2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/module.h>
33 #include <drm/radeon_drm.h>
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
40 #include "radeon_ucode.h"
43 MODULE_FIRMWARE("radeon/R600_pfp.bin");
44 MODULE_FIRMWARE("radeon/R600_me.bin");
45 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
46 MODULE_FIRMWARE("radeon/RV610_me.bin");
47 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
48 MODULE_FIRMWARE("radeon/RV630_me.bin");
49 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
50 MODULE_FIRMWARE("radeon/RV620_me.bin");
51 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
52 MODULE_FIRMWARE("radeon/RV635_me.bin");
53 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
54 MODULE_FIRMWARE("radeon/RV670_me.bin");
55 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
56 MODULE_FIRMWARE("radeon/RS780_me.bin");
57 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
58 MODULE_FIRMWARE("radeon/RV770_me.bin");
59 MODULE_FIRMWARE("radeon/RV770_smc.bin");
60 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV730_me.bin");
62 MODULE_FIRMWARE("radeon/RV730_smc.bin");
63 MODULE_FIRMWARE("radeon/RV740_smc.bin");
64 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
65 MODULE_FIRMWARE("radeon/RV710_me.bin");
66 MODULE_FIRMWARE("radeon/RV710_smc.bin");
67 MODULE_FIRMWARE("radeon/R600_rlc.bin");
68 MODULE_FIRMWARE("radeon/R700_rlc.bin");
69 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
70 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
71 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
72 MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
73 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
74 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
75 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
76 MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
77 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
78 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
79 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
81 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
82 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
85 MODULE_FIRMWARE("radeon/PALM_pfp.bin");
86 MODULE_FIRMWARE("radeon/PALM_me.bin");
87 MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
88 MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
89 MODULE_FIRMWARE("radeon/SUMO_me.bin");
90 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
91 MODULE_FIRMWARE("radeon/SUMO2_me.bin");
93 static const u32 crtc_offsets
[2] =
96 AVIVO_D2CRTC_H_TOTAL
- AVIVO_D1CRTC_H_TOTAL
99 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
);
101 /* r600,rv610,rv630,rv620,rv635,rv670 */
102 int r600_mc_wait_for_idle(struct radeon_device
*rdev
);
103 static void r600_gpu_init(struct radeon_device
*rdev
);
104 void r600_fini(struct radeon_device
*rdev
);
105 void r600_irq_disable(struct radeon_device
*rdev
);
106 static void r600_pcie_gen2_enable(struct radeon_device
*rdev
);
107 extern int evergreen_rlc_resume(struct radeon_device
*rdev
);
110 * r600_get_xclk - get the xclk
112 * @rdev: radeon_device pointer
114 * Returns the reference clock used by the gfx engine
115 * (r6xx, IGPs, APUs).
117 u32
r600_get_xclk(struct radeon_device
*rdev
)
119 return rdev
->clock
.spll
.reference_freq
;
122 int r600_set_uvd_clocks(struct radeon_device
*rdev
, u32 vclk
, u32 dclk
)
127 void dce3_program_fmt(struct drm_encoder
*encoder
)
129 struct drm_device
*dev
= encoder
->dev
;
130 struct radeon_device
*rdev
= dev
->dev_private
;
131 struct radeon_encoder
*radeon_encoder
= to_radeon_encoder(encoder
);
132 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(encoder
->crtc
);
133 struct drm_connector
*connector
= radeon_get_connector_for_encoder(encoder
);
136 enum radeon_connector_dither dither
= RADEON_FMT_DITHER_DISABLE
;
139 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
140 bpc
= radeon_get_monitor_bpc(connector
);
141 dither
= radeon_connector
->dither
;
144 /* LVDS FMT is set up by atom */
145 if (radeon_encoder
->devices
& ATOM_DEVICE_LCD_SUPPORT
)
148 /* not needed for analog */
149 if ((radeon_encoder
->encoder_id
== ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1
) ||
150 (radeon_encoder
->encoder_id
== ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2
))
158 if (dither
== RADEON_FMT_DITHER_ENABLE
)
159 /* XXX sort out optimal dither settings */
160 tmp
|= FMT_SPATIAL_DITHER_EN
;
162 tmp
|= FMT_TRUNCATE_EN
;
165 if (dither
== RADEON_FMT_DITHER_ENABLE
)
166 /* XXX sort out optimal dither settings */
167 tmp
|= (FMT_SPATIAL_DITHER_EN
| FMT_SPATIAL_DITHER_DEPTH
);
169 tmp
|= (FMT_TRUNCATE_EN
| FMT_TRUNCATE_DEPTH
);
177 WREG32(FMT_BIT_DEPTH_CONTROL
+ radeon_crtc
->crtc_offset
, tmp
);
180 /* get temperature in millidegrees */
181 int rv6xx_get_temp(struct radeon_device
*rdev
)
183 u32 temp
= (RREG32(CG_THERMAL_STATUS
) & ASIC_T_MASK
) >>
185 int actual_temp
= temp
& 0xff;
190 return actual_temp
* 1000;
193 void r600_pm_get_dynpm_state(struct radeon_device
*rdev
)
197 rdev
->pm
.dynpm_can_upclock
= true;
198 rdev
->pm
.dynpm_can_downclock
= true;
200 /* power state array is low to high, default is first */
201 if ((rdev
->flags
& RADEON_IS_IGP
) || (rdev
->family
== CHIP_R600
)) {
202 int min_power_state_index
= 0;
204 if (rdev
->pm
.num_power_states
> 2)
205 min_power_state_index
= 1;
207 switch (rdev
->pm
.dynpm_planned_action
) {
208 case DYNPM_ACTION_MINIMUM
:
209 rdev
->pm
.requested_power_state_index
= min_power_state_index
;
210 rdev
->pm
.requested_clock_mode_index
= 0;
211 rdev
->pm
.dynpm_can_downclock
= false;
213 case DYNPM_ACTION_DOWNCLOCK
:
214 if (rdev
->pm
.current_power_state_index
== min_power_state_index
) {
215 rdev
->pm
.requested_power_state_index
= rdev
->pm
.current_power_state_index
;
216 rdev
->pm
.dynpm_can_downclock
= false;
218 if (rdev
->pm
.active_crtc_count
> 1) {
219 for (i
= 0; i
< rdev
->pm
.num_power_states
; i
++) {
220 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
222 else if (i
>= rdev
->pm
.current_power_state_index
) {
223 rdev
->pm
.requested_power_state_index
=
224 rdev
->pm
.current_power_state_index
;
227 rdev
->pm
.requested_power_state_index
= i
;
232 if (rdev
->pm
.current_power_state_index
== 0)
233 rdev
->pm
.requested_power_state_index
=
234 rdev
->pm
.num_power_states
- 1;
236 rdev
->pm
.requested_power_state_index
=
237 rdev
->pm
.current_power_state_index
- 1;
240 rdev
->pm
.requested_clock_mode_index
= 0;
241 /* don't use the power state if crtcs are active and no display flag is set */
242 if ((rdev
->pm
.active_crtc_count
> 0) &&
243 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
244 clock_info
[rdev
->pm
.requested_clock_mode_index
].flags
&
245 RADEON_PM_MODE_NO_DISPLAY
)) {
246 rdev
->pm
.requested_power_state_index
++;
249 case DYNPM_ACTION_UPCLOCK
:
250 if (rdev
->pm
.current_power_state_index
== (rdev
->pm
.num_power_states
- 1)) {
251 rdev
->pm
.requested_power_state_index
= rdev
->pm
.current_power_state_index
;
252 rdev
->pm
.dynpm_can_upclock
= false;
254 if (rdev
->pm
.active_crtc_count
> 1) {
255 for (i
= (rdev
->pm
.num_power_states
- 1); i
>= 0; i
--) {
256 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
258 else if (i
<= rdev
->pm
.current_power_state_index
) {
259 rdev
->pm
.requested_power_state_index
=
260 rdev
->pm
.current_power_state_index
;
263 rdev
->pm
.requested_power_state_index
= i
;
268 rdev
->pm
.requested_power_state_index
=
269 rdev
->pm
.current_power_state_index
+ 1;
271 rdev
->pm
.requested_clock_mode_index
= 0;
273 case DYNPM_ACTION_DEFAULT
:
274 rdev
->pm
.requested_power_state_index
= rdev
->pm
.default_power_state_index
;
275 rdev
->pm
.requested_clock_mode_index
= 0;
276 rdev
->pm
.dynpm_can_upclock
= false;
278 case DYNPM_ACTION_NONE
:
280 DRM_ERROR("Requested mode for not defined action\n");
284 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
285 /* for now just select the first power state and switch between clock modes */
286 /* power state array is low to high, default is first (0) */
287 if (rdev
->pm
.active_crtc_count
> 1) {
288 rdev
->pm
.requested_power_state_index
= -1;
289 /* start at 1 as we don't want the default mode */
290 for (i
= 1; i
< rdev
->pm
.num_power_states
; i
++) {
291 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
293 else if ((rdev
->pm
.power_state
[i
].type
== POWER_STATE_TYPE_PERFORMANCE
) ||
294 (rdev
->pm
.power_state
[i
].type
== POWER_STATE_TYPE_BATTERY
)) {
295 rdev
->pm
.requested_power_state_index
= i
;
299 /* if nothing selected, grab the default state. */
300 if (rdev
->pm
.requested_power_state_index
== -1)
301 rdev
->pm
.requested_power_state_index
= 0;
303 rdev
->pm
.requested_power_state_index
= 1;
305 switch (rdev
->pm
.dynpm_planned_action
) {
306 case DYNPM_ACTION_MINIMUM
:
307 rdev
->pm
.requested_clock_mode_index
= 0;
308 rdev
->pm
.dynpm_can_downclock
= false;
310 case DYNPM_ACTION_DOWNCLOCK
:
311 if (rdev
->pm
.requested_power_state_index
== rdev
->pm
.current_power_state_index
) {
312 if (rdev
->pm
.current_clock_mode_index
== 0) {
313 rdev
->pm
.requested_clock_mode_index
= 0;
314 rdev
->pm
.dynpm_can_downclock
= false;
316 rdev
->pm
.requested_clock_mode_index
=
317 rdev
->pm
.current_clock_mode_index
- 1;
319 rdev
->pm
.requested_clock_mode_index
= 0;
320 rdev
->pm
.dynpm_can_downclock
= false;
322 /* don't use the power state if crtcs are active and no display flag is set */
323 if ((rdev
->pm
.active_crtc_count
> 0) &&
324 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
325 clock_info
[rdev
->pm
.requested_clock_mode_index
].flags
&
326 RADEON_PM_MODE_NO_DISPLAY
)) {
327 rdev
->pm
.requested_clock_mode_index
++;
330 case DYNPM_ACTION_UPCLOCK
:
331 if (rdev
->pm
.requested_power_state_index
== rdev
->pm
.current_power_state_index
) {
332 if (rdev
->pm
.current_clock_mode_index
==
333 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].num_clock_modes
- 1)) {
334 rdev
->pm
.requested_clock_mode_index
= rdev
->pm
.current_clock_mode_index
;
335 rdev
->pm
.dynpm_can_upclock
= false;
337 rdev
->pm
.requested_clock_mode_index
=
338 rdev
->pm
.current_clock_mode_index
+ 1;
340 rdev
->pm
.requested_clock_mode_index
=
341 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].num_clock_modes
- 1;
342 rdev
->pm
.dynpm_can_upclock
= false;
345 case DYNPM_ACTION_DEFAULT
:
346 rdev
->pm
.requested_power_state_index
= rdev
->pm
.default_power_state_index
;
347 rdev
->pm
.requested_clock_mode_index
= 0;
348 rdev
->pm
.dynpm_can_upclock
= false;
350 case DYNPM_ACTION_NONE
:
352 DRM_ERROR("Requested mode for not defined action\n");
357 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
358 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
359 clock_info
[rdev
->pm
.requested_clock_mode_index
].sclk
,
360 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
361 clock_info
[rdev
->pm
.requested_clock_mode_index
].mclk
,
362 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
366 void rs780_pm_init_profile(struct radeon_device
*rdev
)
368 if (rdev
->pm
.num_power_states
== 2) {
370 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
371 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
372 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
373 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
375 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 0;
376 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 0;
377 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
378 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
380 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 0;
381 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 0;
382 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
383 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
385 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 0;
386 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 1;
387 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
388 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
390 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 0;
391 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 0;
392 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
393 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
395 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 0;
396 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 0;
397 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
398 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
400 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 0;
401 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 1;
402 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
403 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
404 } else if (rdev
->pm
.num_power_states
== 3) {
406 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
407 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
408 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
409 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
411 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 1;
412 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 1;
413 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
414 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
416 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 1;
417 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 1;
418 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
419 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
421 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 1;
422 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 2;
423 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
424 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
426 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 1;
427 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 1;
428 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
429 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
431 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 1;
432 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 1;
433 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
434 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
436 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 1;
437 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 2;
438 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
439 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
442 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
443 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
444 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
445 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
447 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 2;
448 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 2;
449 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
450 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
452 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 2;
453 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 2;
454 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
455 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
457 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 2;
458 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 3;
459 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
460 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
462 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 2;
463 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 0;
464 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
465 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
467 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 2;
468 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 0;
469 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
470 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
472 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 2;
473 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 3;
474 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
475 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
479 void r600_pm_init_profile(struct radeon_device
*rdev
)
483 if (rdev
->family
== CHIP_R600
) {
486 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
487 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
488 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
489 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
491 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
492 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
493 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
494 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
496 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
497 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
498 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
499 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
501 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
502 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
503 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
504 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
506 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
507 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
508 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
509 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
511 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
512 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
513 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
514 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
516 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
517 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
518 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
519 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
521 if (rdev
->pm
.num_power_states
< 4) {
523 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
524 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
525 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
526 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 2;
528 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 1;
529 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 1;
530 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
531 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
533 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 1;
534 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 1;
535 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
536 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
538 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 1;
539 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 1;
540 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
541 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 2;
543 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 2;
544 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 2;
545 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
546 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
548 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 2;
549 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 2;
550 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
551 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
553 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 2;
554 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 2;
555 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
556 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 2;
559 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
560 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
561 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
562 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 2;
564 if (rdev
->flags
& RADEON_IS_MOBILITY
)
565 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
567 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
568 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= idx
;
569 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= idx
;
570 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
571 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
573 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= idx
;
574 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= idx
;
575 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
576 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
578 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
579 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= idx
;
580 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= idx
;
581 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
582 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 2;
584 if (rdev
->flags
& RADEON_IS_MOBILITY
)
585 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 1);
587 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
588 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= idx
;
589 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= idx
;
590 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
591 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
593 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= idx
;
594 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= idx
;
595 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
596 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
598 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
599 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= idx
;
600 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= idx
;
601 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
602 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 2;
607 void r600_pm_misc(struct radeon_device
*rdev
)
609 int req_ps_idx
= rdev
->pm
.requested_power_state_index
;
610 int req_cm_idx
= rdev
->pm
.requested_clock_mode_index
;
611 struct radeon_power_state
*ps
= &rdev
->pm
.power_state
[req_ps_idx
];
612 struct radeon_voltage
*voltage
= &ps
->clock_info
[req_cm_idx
].voltage
;
614 if ((voltage
->type
== VOLTAGE_SW
) && voltage
->voltage
) {
615 /* 0xff01 is a flag rather then an actual voltage */
616 if (voltage
->voltage
== 0xff01)
618 if (voltage
->voltage
!= rdev
->pm
.current_vddc
) {
619 radeon_atom_set_voltage(rdev
, voltage
->voltage
, SET_VOLTAGE_TYPE_ASIC_VDDC
);
620 rdev
->pm
.current_vddc
= voltage
->voltage
;
621 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage
->voltage
);
626 bool r600_gui_idle(struct radeon_device
*rdev
)
628 if (RREG32(GRBM_STATUS
) & GUI_ACTIVE
)
634 /* hpd for digital panel detect/disconnect */
635 bool r600_hpd_sense(struct radeon_device
*rdev
, enum radeon_hpd_id hpd
)
637 bool connected
= false;
639 if (ASIC_IS_DCE3(rdev
)) {
642 if (RREG32(DC_HPD1_INT_STATUS
) & DC_HPDx_SENSE
)
646 if (RREG32(DC_HPD2_INT_STATUS
) & DC_HPDx_SENSE
)
650 if (RREG32(DC_HPD3_INT_STATUS
) & DC_HPDx_SENSE
)
654 if (RREG32(DC_HPD4_INT_STATUS
) & DC_HPDx_SENSE
)
659 if (RREG32(DC_HPD5_INT_STATUS
) & DC_HPDx_SENSE
)
663 if (RREG32(DC_HPD6_INT_STATUS
) & DC_HPDx_SENSE
)
672 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
676 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
680 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
690 void r600_hpd_set_polarity(struct radeon_device
*rdev
,
691 enum radeon_hpd_id hpd
)
694 bool connected
= r600_hpd_sense(rdev
, hpd
);
696 if (ASIC_IS_DCE3(rdev
)) {
699 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
701 tmp
&= ~DC_HPDx_INT_POLARITY
;
703 tmp
|= DC_HPDx_INT_POLARITY
;
704 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
707 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
709 tmp
&= ~DC_HPDx_INT_POLARITY
;
711 tmp
|= DC_HPDx_INT_POLARITY
;
712 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
715 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
717 tmp
&= ~DC_HPDx_INT_POLARITY
;
719 tmp
|= DC_HPDx_INT_POLARITY
;
720 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
723 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
725 tmp
&= ~DC_HPDx_INT_POLARITY
;
727 tmp
|= DC_HPDx_INT_POLARITY
;
728 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
731 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
733 tmp
&= ~DC_HPDx_INT_POLARITY
;
735 tmp
|= DC_HPDx_INT_POLARITY
;
736 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
740 tmp
= RREG32(DC_HPD6_INT_CONTROL
);
742 tmp
&= ~DC_HPDx_INT_POLARITY
;
744 tmp
|= DC_HPDx_INT_POLARITY
;
745 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
753 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
);
755 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
757 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
758 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
761 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
);
763 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
765 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
766 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
769 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
);
771 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
773 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
774 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
782 void r600_hpd_init(struct radeon_device
*rdev
)
784 struct drm_device
*dev
= rdev
->ddev
;
785 struct drm_connector
*connector
;
788 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
789 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
791 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
||
792 connector
->connector_type
== DRM_MODE_CONNECTOR_LVDS
) {
793 /* don't try to enable hpd on eDP or LVDS avoid breaking the
794 * aux dp channel on imac and help (but not completely fix)
795 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
799 if (ASIC_IS_DCE3(rdev
)) {
800 u32 tmp
= DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
801 if (ASIC_IS_DCE32(rdev
))
804 switch (radeon_connector
->hpd
.hpd
) {
806 WREG32(DC_HPD1_CONTROL
, tmp
);
809 WREG32(DC_HPD2_CONTROL
, tmp
);
812 WREG32(DC_HPD3_CONTROL
, tmp
);
815 WREG32(DC_HPD4_CONTROL
, tmp
);
819 WREG32(DC_HPD5_CONTROL
, tmp
);
822 WREG32(DC_HPD6_CONTROL
, tmp
);
828 switch (radeon_connector
->hpd
.hpd
) {
830 WREG32(DC_HOT_PLUG_DETECT1_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
833 WREG32(DC_HOT_PLUG_DETECT2_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
836 WREG32(DC_HOT_PLUG_DETECT3_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
842 enable
|= 1 << radeon_connector
->hpd
.hpd
;
843 radeon_hpd_set_polarity(rdev
, radeon_connector
->hpd
.hpd
);
845 radeon_irq_kms_enable_hpd(rdev
, enable
);
848 void r600_hpd_fini(struct radeon_device
*rdev
)
850 struct drm_device
*dev
= rdev
->ddev
;
851 struct drm_connector
*connector
;
852 unsigned disable
= 0;
854 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
855 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
856 if (ASIC_IS_DCE3(rdev
)) {
857 switch (radeon_connector
->hpd
.hpd
) {
859 WREG32(DC_HPD1_CONTROL
, 0);
862 WREG32(DC_HPD2_CONTROL
, 0);
865 WREG32(DC_HPD3_CONTROL
, 0);
868 WREG32(DC_HPD4_CONTROL
, 0);
872 WREG32(DC_HPD5_CONTROL
, 0);
875 WREG32(DC_HPD6_CONTROL
, 0);
881 switch (radeon_connector
->hpd
.hpd
) {
883 WREG32(DC_HOT_PLUG_DETECT1_CONTROL
, 0);
886 WREG32(DC_HOT_PLUG_DETECT2_CONTROL
, 0);
889 WREG32(DC_HOT_PLUG_DETECT3_CONTROL
, 0);
895 disable
|= 1 << radeon_connector
->hpd
.hpd
;
897 radeon_irq_kms_disable_hpd(rdev
, disable
);
903 void r600_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
908 /* flush hdp cache so updates hit vram */
909 if ((rdev
->family
>= CHIP_RV770
) && (rdev
->family
<= CHIP_RV740
) &&
910 !(rdev
->flags
& RADEON_IS_AGP
)) {
911 void __iomem
*ptr
= (void *)rdev
->gart
.ptr
;
914 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
915 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
916 * This seems to cause problems on some AGP cards. Just use the old
919 WREG32(HDP_DEBUG1
, 0);
920 tmp
= readl((void __iomem
*)ptr
);
922 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
924 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR
, rdev
->mc
.gtt_start
>> 12);
925 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR
, (rdev
->mc
.gtt_end
- 1) >> 12);
926 WREG32(VM_CONTEXT0_REQUEST_RESPONSE
, REQUEST_TYPE(1));
927 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
929 tmp
= RREG32(VM_CONTEXT0_REQUEST_RESPONSE
);
930 tmp
= (tmp
& RESPONSE_TYPE_MASK
) >> RESPONSE_TYPE_SHIFT
;
932 printk(KERN_WARNING
"[drm] r600 flush TLB failed\n");
942 int r600_pcie_gart_init(struct radeon_device
*rdev
)
946 if (rdev
->gart
.robj
) {
947 WARN(1, "R600 PCIE GART already initialized\n");
950 /* Initialize common gart structure */
951 r
= radeon_gart_init(rdev
);
954 rdev
->gart
.table_size
= rdev
->gart
.num_gpu_pages
* 8;
955 return radeon_gart_table_vram_alloc(rdev
);
958 static int r600_pcie_gart_enable(struct radeon_device
*rdev
)
963 if (rdev
->gart
.robj
== NULL
) {
964 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
967 r
= radeon_gart_table_vram_pin(rdev
);
970 radeon_gart_restore(rdev
);
973 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
974 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
975 EFFECTIVE_L2_QUEUE_SIZE(7));
976 WREG32(VM_L2_CNTL2
, 0);
977 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
978 /* Setup TLB control */
979 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
980 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
981 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
982 ENABLE_WAIT_L2_QUERY
;
983 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
984 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
985 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
986 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
987 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
988 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
989 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
990 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
991 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
992 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
993 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
994 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
995 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
996 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
997 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
998 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, rdev
->mc
.gtt_end
>> 12);
999 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
1000 WREG32(VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
1001 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
1002 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
1003 (u32
)(rdev
->dummy_page
.addr
>> 12));
1004 for (i
= 1; i
< 7; i
++)
1005 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
1007 r600_pcie_gart_tlb_flush(rdev
);
1008 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1009 (unsigned)(rdev
->mc
.gtt_size
>> 20),
1010 (unsigned long long)rdev
->gart
.table_addr
);
1011 rdev
->gart
.ready
= true;
1015 static void r600_pcie_gart_disable(struct radeon_device
*rdev
)
1020 /* Disable all tables */
1021 for (i
= 0; i
< 7; i
++)
1022 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
1024 /* Disable L2 cache */
1025 WREG32(VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
|
1026 EFFECTIVE_L2_QUEUE_SIZE(7));
1027 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1028 /* Setup L1 TLB control */
1029 tmp
= EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1030 ENABLE_WAIT_L2_QUERY
;
1031 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
1032 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
1033 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
1034 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
1035 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
1036 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
1037 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
1038 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
1039 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
);
1040 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
);
1041 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
1042 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
1043 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
);
1044 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
1045 radeon_gart_table_vram_unpin(rdev
);
1048 static void r600_pcie_gart_fini(struct radeon_device
*rdev
)
1050 radeon_gart_fini(rdev
);
1051 r600_pcie_gart_disable(rdev
);
1052 radeon_gart_table_vram_free(rdev
);
1055 static void r600_agp_enable(struct radeon_device
*rdev
)
1060 /* Setup L2 cache */
1061 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
1062 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
1063 EFFECTIVE_L2_QUEUE_SIZE(7));
1064 WREG32(VM_L2_CNTL2
, 0);
1065 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1066 /* Setup TLB control */
1067 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
1068 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
1069 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1070 ENABLE_WAIT_L2_QUERY
;
1071 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
1072 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
1073 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
1074 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
1075 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
1076 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
1077 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
1078 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
1079 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
1080 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
1081 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
1082 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
1083 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
1084 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
1085 for (i
= 0; i
< 7; i
++)
1086 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
1089 int r600_mc_wait_for_idle(struct radeon_device
*rdev
)
1094 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
1095 /* read MC_STATUS */
1096 tmp
= RREG32(R_000E50_SRBM_STATUS
) & 0x3F00;
1104 uint32_t rs780_mc_rreg(struct radeon_device
*rdev
, uint32_t reg
)
1106 unsigned long flags
;
1109 spin_lock_irqsave(&rdev
->mc_idx_lock
, flags
);
1110 WREG32(R_0028F8_MC_INDEX
, S_0028F8_MC_IND_ADDR(reg
));
1111 r
= RREG32(R_0028FC_MC_DATA
);
1112 WREG32(R_0028F8_MC_INDEX
, ~C_0028F8_MC_IND_ADDR
);
1113 spin_unlock_irqrestore(&rdev
->mc_idx_lock
, flags
);
1117 void rs780_mc_wreg(struct radeon_device
*rdev
, uint32_t reg
, uint32_t v
)
1119 unsigned long flags
;
1121 spin_lock_irqsave(&rdev
->mc_idx_lock
, flags
);
1122 WREG32(R_0028F8_MC_INDEX
, S_0028F8_MC_IND_ADDR(reg
) |
1123 S_0028F8_MC_IND_WR_EN(1));
1124 WREG32(R_0028FC_MC_DATA
, v
);
1125 WREG32(R_0028F8_MC_INDEX
, 0x7F);
1126 spin_unlock_irqrestore(&rdev
->mc_idx_lock
, flags
);
1129 static void r600_mc_program(struct radeon_device
*rdev
)
1131 struct rv515_mc_save save
;
1135 /* Initialize HDP */
1136 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1137 WREG32((0x2c14 + j
), 0x00000000);
1138 WREG32((0x2c18 + j
), 0x00000000);
1139 WREG32((0x2c1c + j
), 0x00000000);
1140 WREG32((0x2c20 + j
), 0x00000000);
1141 WREG32((0x2c24 + j
), 0x00000000);
1143 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL
, 0);
1145 rv515_mc_stop(rdev
, &save
);
1146 if (r600_mc_wait_for_idle(rdev
)) {
1147 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1149 /* Lockout access through VGA aperture (doesn't exist before R600) */
1150 WREG32(VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
);
1151 /* Update configuration */
1152 if (rdev
->flags
& RADEON_IS_AGP
) {
1153 if (rdev
->mc
.vram_start
< rdev
->mc
.gtt_start
) {
1154 /* VRAM before AGP */
1155 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1156 rdev
->mc
.vram_start
>> 12);
1157 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1158 rdev
->mc
.gtt_end
>> 12);
1160 /* VRAM after AGP */
1161 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1162 rdev
->mc
.gtt_start
>> 12);
1163 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1164 rdev
->mc
.vram_end
>> 12);
1167 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
, rdev
->mc
.vram_start
>> 12);
1168 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
, rdev
->mc
.vram_end
>> 12);
1170 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
, rdev
->vram_scratch
.gpu_addr
>> 12);
1171 tmp
= ((rdev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
1172 tmp
|= ((rdev
->mc
.vram_start
>> 24) & 0xFFFF);
1173 WREG32(MC_VM_FB_LOCATION
, tmp
);
1174 WREG32(HDP_NONSURFACE_BASE
, (rdev
->mc
.vram_start
>> 8));
1175 WREG32(HDP_NONSURFACE_INFO
, (2 << 7));
1176 WREG32(HDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
1177 if (rdev
->flags
& RADEON_IS_AGP
) {
1178 WREG32(MC_VM_AGP_TOP
, rdev
->mc
.gtt_end
>> 22);
1179 WREG32(MC_VM_AGP_BOT
, rdev
->mc
.gtt_start
>> 22);
1180 WREG32(MC_VM_AGP_BASE
, rdev
->mc
.agp_base
>> 22);
1182 WREG32(MC_VM_AGP_BASE
, 0);
1183 WREG32(MC_VM_AGP_TOP
, 0x0FFFFFFF);
1184 WREG32(MC_VM_AGP_BOT
, 0x0FFFFFFF);
1186 if (r600_mc_wait_for_idle(rdev
)) {
1187 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1189 rv515_mc_resume(rdev
, &save
);
1190 /* we need to own VRAM, so turn off the VGA renderer here
1191 * to stop it overwriting our objects */
1192 rv515_vga_render_disable(rdev
);
1196 * r600_vram_gtt_location - try to find VRAM & GTT location
1197 * @rdev: radeon device structure holding all necessary informations
1198 * @mc: memory controller structure holding memory informations
1200 * Function will place try to place VRAM at same place as in CPU (PCI)
1201 * address space as some GPU seems to have issue when we reprogram at
1202 * different address space.
1204 * If there is not enough space to fit the unvisible VRAM after the
1205 * aperture then we limit the VRAM size to the aperture.
1207 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1208 * them to be in one from GPU point of view so that we can program GPU to
1209 * catch access outside them (weird GPU policy see ??).
1211 * This function will never fails, worst case are limiting VRAM or GTT.
1213 * Note: GTT start, end, size should be initialized before calling this
1214 * function on AGP platform.
1216 static void r600_vram_gtt_location(struct radeon_device
*rdev
, struct radeon_mc
*mc
)
1218 u64 size_bf
, size_af
;
1220 if (mc
->mc_vram_size
> 0xE0000000) {
1221 /* leave room for at least 512M GTT */
1222 dev_warn(rdev
->dev
, "limiting VRAM\n");
1223 mc
->real_vram_size
= 0xE0000000;
1224 mc
->mc_vram_size
= 0xE0000000;
1226 if (rdev
->flags
& RADEON_IS_AGP
) {
1227 size_bf
= mc
->gtt_start
;
1228 size_af
= mc
->mc_mask
- mc
->gtt_end
;
1229 if (size_bf
> size_af
) {
1230 if (mc
->mc_vram_size
> size_bf
) {
1231 dev_warn(rdev
->dev
, "limiting VRAM\n");
1232 mc
->real_vram_size
= size_bf
;
1233 mc
->mc_vram_size
= size_bf
;
1235 mc
->vram_start
= mc
->gtt_start
- mc
->mc_vram_size
;
1237 if (mc
->mc_vram_size
> size_af
) {
1238 dev_warn(rdev
->dev
, "limiting VRAM\n");
1239 mc
->real_vram_size
= size_af
;
1240 mc
->mc_vram_size
= size_af
;
1242 mc
->vram_start
= mc
->gtt_end
+ 1;
1244 mc
->vram_end
= mc
->vram_start
+ mc
->mc_vram_size
- 1;
1245 dev_info(rdev
->dev
, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1246 mc
->mc_vram_size
>> 20, mc
->vram_start
,
1247 mc
->vram_end
, mc
->real_vram_size
>> 20);
1250 if (rdev
->flags
& RADEON_IS_IGP
) {
1251 base
= RREG32(MC_VM_FB_LOCATION
) & 0xFFFF;
1254 radeon_vram_location(rdev
, &rdev
->mc
, base
);
1255 rdev
->mc
.gtt_base_align
= 0;
1256 radeon_gtt_location(rdev
, mc
);
1260 static int r600_mc_init(struct radeon_device
*rdev
)
1263 int chansize
, numchan
;
1264 uint32_t h_addr
, l_addr
;
1265 unsigned long long k8_addr
;
1267 /* Get VRAM informations */
1268 rdev
->mc
.vram_is_ddr
= true;
1269 tmp
= RREG32(RAMCFG
);
1270 if (tmp
& CHANSIZE_OVERRIDE
) {
1272 } else if (tmp
& CHANSIZE_MASK
) {
1277 tmp
= RREG32(CHMAP
);
1278 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
1293 rdev
->mc
.vram_width
= numchan
* chansize
;
1294 /* Could aper size report 0 ? */
1295 rdev
->mc
.aper_base
= pci_resource_start(rdev
->pdev
, 0);
1296 rdev
->mc
.aper_size
= pci_resource_len(rdev
->pdev
, 0);
1297 /* Setup GPU memory space */
1298 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
);
1299 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
);
1300 rdev
->mc
.visible_vram_size
= rdev
->mc
.aper_size
;
1301 r600_vram_gtt_location(rdev
, &rdev
->mc
);
1303 if (rdev
->flags
& RADEON_IS_IGP
) {
1304 rs690_pm_info(rdev
);
1305 rdev
->mc
.igp_sideport_enabled
= radeon_atombios_sideport_present(rdev
);
1307 if (rdev
->family
== CHIP_RS780
|| rdev
->family
== CHIP_RS880
) {
1308 /* Use K8 direct mapping for fast fb access. */
1309 rdev
->fastfb_working
= false;
1310 h_addr
= G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL
));
1311 l_addr
= RREG32_MC(R_000011_K8_FB_LOCATION
);
1312 k8_addr
= ((unsigned long long)h_addr
) << 32 | l_addr
;
1313 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1314 if (k8_addr
+ rdev
->mc
.visible_vram_size
< 0x100000000ULL
)
1317 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1318 * memory is present.
1320 if (rdev
->mc
.igp_sideport_enabled
== false && radeon_fastfb
== 1) {
1321 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1322 (unsigned long long)rdev
->mc
.aper_base
, k8_addr
);
1323 rdev
->mc
.aper_base
= (resource_size_t
)k8_addr
;
1324 rdev
->fastfb_working
= true;
1330 radeon_update_bandwidth_info(rdev
);
1334 int r600_vram_scratch_init(struct radeon_device
*rdev
)
1338 if (rdev
->vram_scratch
.robj
== NULL
) {
1339 r
= radeon_bo_create(rdev
, RADEON_GPU_PAGE_SIZE
,
1340 PAGE_SIZE
, true, RADEON_GEM_DOMAIN_VRAM
,
1341 NULL
, &rdev
->vram_scratch
.robj
);
1347 r
= radeon_bo_reserve(rdev
->vram_scratch
.robj
, false);
1348 if (unlikely(r
!= 0))
1350 r
= radeon_bo_pin(rdev
->vram_scratch
.robj
,
1351 RADEON_GEM_DOMAIN_VRAM
, &rdev
->vram_scratch
.gpu_addr
);
1353 radeon_bo_unreserve(rdev
->vram_scratch
.robj
);
1356 r
= radeon_bo_kmap(rdev
->vram_scratch
.robj
,
1357 (void **)&rdev
->vram_scratch
.ptr
);
1359 radeon_bo_unpin(rdev
->vram_scratch
.robj
);
1360 radeon_bo_unreserve(rdev
->vram_scratch
.robj
);
1365 void r600_vram_scratch_fini(struct radeon_device
*rdev
)
1369 if (rdev
->vram_scratch
.robj
== NULL
) {
1372 r
= radeon_bo_reserve(rdev
->vram_scratch
.robj
, false);
1373 if (likely(r
== 0)) {
1374 radeon_bo_kunmap(rdev
->vram_scratch
.robj
);
1375 radeon_bo_unpin(rdev
->vram_scratch
.robj
);
1376 radeon_bo_unreserve(rdev
->vram_scratch
.robj
);
1378 radeon_bo_unref(&rdev
->vram_scratch
.robj
);
1381 void r600_set_bios_scratch_engine_hung(struct radeon_device
*rdev
, bool hung
)
1383 u32 tmp
= RREG32(R600_BIOS_3_SCRATCH
);
1386 tmp
|= ATOM_S3_ASIC_GUI_ENGINE_HUNG
;
1388 tmp
&= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG
;
1390 WREG32(R600_BIOS_3_SCRATCH
, tmp
);
1393 static void r600_print_gpu_status_regs(struct radeon_device
*rdev
)
1395 dev_info(rdev
->dev
, " R_008010_GRBM_STATUS = 0x%08X\n",
1396 RREG32(R_008010_GRBM_STATUS
));
1397 dev_info(rdev
->dev
, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1398 RREG32(R_008014_GRBM_STATUS2
));
1399 dev_info(rdev
->dev
, " R_000E50_SRBM_STATUS = 0x%08X\n",
1400 RREG32(R_000E50_SRBM_STATUS
));
1401 dev_info(rdev
->dev
, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1402 RREG32(CP_STALLED_STAT1
));
1403 dev_info(rdev
->dev
, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1404 RREG32(CP_STALLED_STAT2
));
1405 dev_info(rdev
->dev
, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1406 RREG32(CP_BUSY_STAT
));
1407 dev_info(rdev
->dev
, " R_008680_CP_STAT = 0x%08X\n",
1409 dev_info(rdev
->dev
, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1410 RREG32(DMA_STATUS_REG
));
1413 static bool r600_is_display_hung(struct radeon_device
*rdev
)
1419 for (i
= 0; i
< rdev
->num_crtc
; i
++) {
1420 if (RREG32(AVIVO_D1CRTC_CONTROL
+ crtc_offsets
[i
]) & AVIVO_CRTC_EN
) {
1421 crtc_status
[i
] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT
+ crtc_offsets
[i
]);
1422 crtc_hung
|= (1 << i
);
1426 for (j
= 0; j
< 10; j
++) {
1427 for (i
= 0; i
< rdev
->num_crtc
; i
++) {
1428 if (crtc_hung
& (1 << i
)) {
1429 tmp
= RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT
+ crtc_offsets
[i
]);
1430 if (tmp
!= crtc_status
[i
])
1431 crtc_hung
&= ~(1 << i
);
1442 u32
r600_gpu_check_soft_reset(struct radeon_device
*rdev
)
1448 tmp
= RREG32(R_008010_GRBM_STATUS
);
1449 if (rdev
->family
>= CHIP_RV770
) {
1450 if (G_008010_PA_BUSY(tmp
) | G_008010_SC_BUSY(tmp
) |
1451 G_008010_SH_BUSY(tmp
) | G_008010_SX_BUSY(tmp
) |
1452 G_008010_TA_BUSY(tmp
) | G_008010_VGT_BUSY(tmp
) |
1453 G_008010_DB03_BUSY(tmp
) | G_008010_CB03_BUSY(tmp
) |
1454 G_008010_SPI03_BUSY(tmp
) | G_008010_VGT_BUSY_NO_DMA(tmp
))
1455 reset_mask
|= RADEON_RESET_GFX
;
1457 if (G_008010_PA_BUSY(tmp
) | G_008010_SC_BUSY(tmp
) |
1458 G_008010_SH_BUSY(tmp
) | G_008010_SX_BUSY(tmp
) |
1459 G_008010_TA03_BUSY(tmp
) | G_008010_VGT_BUSY(tmp
) |
1460 G_008010_DB03_BUSY(tmp
) | G_008010_CB03_BUSY(tmp
) |
1461 G_008010_SPI03_BUSY(tmp
) | G_008010_VGT_BUSY_NO_DMA(tmp
))
1462 reset_mask
|= RADEON_RESET_GFX
;
1465 if (G_008010_CF_RQ_PENDING(tmp
) | G_008010_PF_RQ_PENDING(tmp
) |
1466 G_008010_CP_BUSY(tmp
) | G_008010_CP_COHERENCY_BUSY(tmp
))
1467 reset_mask
|= RADEON_RESET_CP
;
1469 if (G_008010_GRBM_EE_BUSY(tmp
))
1470 reset_mask
|= RADEON_RESET_GRBM
| RADEON_RESET_GFX
| RADEON_RESET_CP
;
1472 /* DMA_STATUS_REG */
1473 tmp
= RREG32(DMA_STATUS_REG
);
1474 if (!(tmp
& DMA_IDLE
))
1475 reset_mask
|= RADEON_RESET_DMA
;
1478 tmp
= RREG32(R_000E50_SRBM_STATUS
);
1479 if (G_000E50_RLC_RQ_PENDING(tmp
) | G_000E50_RLC_BUSY(tmp
))
1480 reset_mask
|= RADEON_RESET_RLC
;
1482 if (G_000E50_IH_BUSY(tmp
))
1483 reset_mask
|= RADEON_RESET_IH
;
1485 if (G_000E50_SEM_BUSY(tmp
))
1486 reset_mask
|= RADEON_RESET_SEM
;
1488 if (G_000E50_GRBM_RQ_PENDING(tmp
))
1489 reset_mask
|= RADEON_RESET_GRBM
;
1491 if (G_000E50_VMC_BUSY(tmp
))
1492 reset_mask
|= RADEON_RESET_VMC
;
1494 if (G_000E50_MCB_BUSY(tmp
) | G_000E50_MCDZ_BUSY(tmp
) |
1495 G_000E50_MCDY_BUSY(tmp
) | G_000E50_MCDX_BUSY(tmp
) |
1496 G_000E50_MCDW_BUSY(tmp
))
1497 reset_mask
|= RADEON_RESET_MC
;
1499 if (r600_is_display_hung(rdev
))
1500 reset_mask
|= RADEON_RESET_DISPLAY
;
1502 /* Skip MC reset as it's mostly likely not hung, just busy */
1503 if (reset_mask
& RADEON_RESET_MC
) {
1504 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask
);
1505 reset_mask
&= ~RADEON_RESET_MC
;
1511 static void r600_gpu_soft_reset(struct radeon_device
*rdev
, u32 reset_mask
)
1513 struct rv515_mc_save save
;
1514 u32 grbm_soft_reset
= 0, srbm_soft_reset
= 0;
1517 if (reset_mask
== 0)
1520 dev_info(rdev
->dev
, "GPU softreset: 0x%08X\n", reset_mask
);
1522 r600_print_gpu_status_regs(rdev
);
1524 /* Disable CP parsing/prefetching */
1525 if (rdev
->family
>= CHIP_RV770
)
1526 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1528 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
1530 /* disable the RLC */
1531 WREG32(RLC_CNTL
, 0);
1533 if (reset_mask
& RADEON_RESET_DMA
) {
1535 tmp
= RREG32(DMA_RB_CNTL
);
1536 tmp
&= ~DMA_RB_ENABLE
;
1537 WREG32(DMA_RB_CNTL
, tmp
);
1542 rv515_mc_stop(rdev
, &save
);
1543 if (r600_mc_wait_for_idle(rdev
)) {
1544 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1547 if (reset_mask
& (RADEON_RESET_GFX
| RADEON_RESET_COMPUTE
)) {
1548 if (rdev
->family
>= CHIP_RV770
)
1549 grbm_soft_reset
|= S_008020_SOFT_RESET_DB(1) |
1550 S_008020_SOFT_RESET_CB(1) |
1551 S_008020_SOFT_RESET_PA(1) |
1552 S_008020_SOFT_RESET_SC(1) |
1553 S_008020_SOFT_RESET_SPI(1) |
1554 S_008020_SOFT_RESET_SX(1) |
1555 S_008020_SOFT_RESET_SH(1) |
1556 S_008020_SOFT_RESET_TC(1) |
1557 S_008020_SOFT_RESET_TA(1) |
1558 S_008020_SOFT_RESET_VC(1) |
1559 S_008020_SOFT_RESET_VGT(1);
1561 grbm_soft_reset
|= S_008020_SOFT_RESET_CR(1) |
1562 S_008020_SOFT_RESET_DB(1) |
1563 S_008020_SOFT_RESET_CB(1) |
1564 S_008020_SOFT_RESET_PA(1) |
1565 S_008020_SOFT_RESET_SC(1) |
1566 S_008020_SOFT_RESET_SMX(1) |
1567 S_008020_SOFT_RESET_SPI(1) |
1568 S_008020_SOFT_RESET_SX(1) |
1569 S_008020_SOFT_RESET_SH(1) |
1570 S_008020_SOFT_RESET_TC(1) |
1571 S_008020_SOFT_RESET_TA(1) |
1572 S_008020_SOFT_RESET_VC(1) |
1573 S_008020_SOFT_RESET_VGT(1);
1576 if (reset_mask
& RADEON_RESET_CP
) {
1577 grbm_soft_reset
|= S_008020_SOFT_RESET_CP(1) |
1578 S_008020_SOFT_RESET_VGT(1);
1580 srbm_soft_reset
|= S_000E60_SOFT_RESET_GRBM(1);
1583 if (reset_mask
& RADEON_RESET_DMA
) {
1584 if (rdev
->family
>= CHIP_RV770
)
1585 srbm_soft_reset
|= RV770_SOFT_RESET_DMA
;
1587 srbm_soft_reset
|= SOFT_RESET_DMA
;
1590 if (reset_mask
& RADEON_RESET_RLC
)
1591 srbm_soft_reset
|= S_000E60_SOFT_RESET_RLC(1);
1593 if (reset_mask
& RADEON_RESET_SEM
)
1594 srbm_soft_reset
|= S_000E60_SOFT_RESET_SEM(1);
1596 if (reset_mask
& RADEON_RESET_IH
)
1597 srbm_soft_reset
|= S_000E60_SOFT_RESET_IH(1);
1599 if (reset_mask
& RADEON_RESET_GRBM
)
1600 srbm_soft_reset
|= S_000E60_SOFT_RESET_GRBM(1);
1602 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
1603 if (reset_mask
& RADEON_RESET_MC
)
1604 srbm_soft_reset
|= S_000E60_SOFT_RESET_MC(1);
1607 if (reset_mask
& RADEON_RESET_VMC
)
1608 srbm_soft_reset
|= S_000E60_SOFT_RESET_VMC(1);
1610 if (grbm_soft_reset
) {
1611 tmp
= RREG32(R_008020_GRBM_SOFT_RESET
);
1612 tmp
|= grbm_soft_reset
;
1613 dev_info(rdev
->dev
, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
1614 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
1615 tmp
= RREG32(R_008020_GRBM_SOFT_RESET
);
1619 tmp
&= ~grbm_soft_reset
;
1620 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
1621 tmp
= RREG32(R_008020_GRBM_SOFT_RESET
);
1624 if (srbm_soft_reset
) {
1625 tmp
= RREG32(SRBM_SOFT_RESET
);
1626 tmp
|= srbm_soft_reset
;
1627 dev_info(rdev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
1628 WREG32(SRBM_SOFT_RESET
, tmp
);
1629 tmp
= RREG32(SRBM_SOFT_RESET
);
1633 tmp
&= ~srbm_soft_reset
;
1634 WREG32(SRBM_SOFT_RESET
, tmp
);
1635 tmp
= RREG32(SRBM_SOFT_RESET
);
1638 /* Wait a little for things to settle down */
1641 rv515_mc_resume(rdev
, &save
);
1644 r600_print_gpu_status_regs(rdev
);
1647 int r600_asic_reset(struct radeon_device
*rdev
)
1651 reset_mask
= r600_gpu_check_soft_reset(rdev
);
1654 r600_set_bios_scratch_engine_hung(rdev
, true);
1656 r600_gpu_soft_reset(rdev
, reset_mask
);
1658 reset_mask
= r600_gpu_check_soft_reset(rdev
);
1661 r600_set_bios_scratch_engine_hung(rdev
, false);
1667 * r600_gfx_is_lockup - Check if the GFX engine is locked up
1669 * @rdev: radeon_device pointer
1670 * @ring: radeon_ring structure holding ring information
1672 * Check if the GFX engine is locked up.
1673 * Returns true if the engine appears to be locked up, false if not.
1675 bool r600_gfx_is_lockup(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
1677 u32 reset_mask
= r600_gpu_check_soft_reset(rdev
);
1679 if (!(reset_mask
& (RADEON_RESET_GFX
|
1680 RADEON_RESET_COMPUTE
|
1681 RADEON_RESET_CP
))) {
1682 radeon_ring_lockup_update(ring
);
1685 /* force CP activities */
1686 radeon_ring_force_activity(rdev
, ring
);
1687 return radeon_ring_test_lockup(rdev
, ring
);
1690 u32
r6xx_remap_render_backend(struct radeon_device
*rdev
,
1691 u32 tiling_pipe_num
,
1693 u32 total_max_rb_num
,
1694 u32 disabled_rb_mask
)
1696 u32 rendering_pipe_num
, rb_num_width
, req_rb_num
;
1697 u32 pipe_rb_ratio
, pipe_rb_remain
, tmp
;
1698 u32 data
= 0, mask
= 1 << (max_rb_num
- 1);
1701 /* mask out the RBs that don't exist on that asic */
1702 tmp
= disabled_rb_mask
| ((0xff << max_rb_num
) & 0xff);
1703 /* make sure at least one RB is available */
1704 if ((tmp
& 0xff) != 0xff)
1705 disabled_rb_mask
= tmp
;
1707 rendering_pipe_num
= 1 << tiling_pipe_num
;
1708 req_rb_num
= total_max_rb_num
- r600_count_pipe_bits(disabled_rb_mask
);
1709 BUG_ON(rendering_pipe_num
< req_rb_num
);
1711 pipe_rb_ratio
= rendering_pipe_num
/ req_rb_num
;
1712 pipe_rb_remain
= rendering_pipe_num
- pipe_rb_ratio
* req_rb_num
;
1714 if (rdev
->family
<= CHIP_RV740
) {
1722 for (i
= 0; i
< max_rb_num
; i
++) {
1723 if (!(mask
& disabled_rb_mask
)) {
1724 for (j
= 0; j
< pipe_rb_ratio
; j
++) {
1725 data
<<= rb_num_width
;
1726 data
|= max_rb_num
- i
- 1;
1728 if (pipe_rb_remain
) {
1729 data
<<= rb_num_width
;
1730 data
|= max_rb_num
- i
- 1;
1740 int r600_count_pipe_bits(uint32_t val
)
1742 return hweight32(val
);
1745 static void r600_gpu_init(struct radeon_device
*rdev
)
1749 u32 cc_rb_backend_disable
;
1750 u32 cc_gc_shader_pipe_config
;
1754 u32 sq_gpr_resource_mgmt_1
= 0;
1755 u32 sq_gpr_resource_mgmt_2
= 0;
1756 u32 sq_thread_resource_mgmt
= 0;
1757 u32 sq_stack_resource_mgmt_1
= 0;
1758 u32 sq_stack_resource_mgmt_2
= 0;
1759 u32 disabled_rb_mask
;
1761 rdev
->config
.r600
.tiling_group_size
= 256;
1762 switch (rdev
->family
) {
1764 rdev
->config
.r600
.max_pipes
= 4;
1765 rdev
->config
.r600
.max_tile_pipes
= 8;
1766 rdev
->config
.r600
.max_simds
= 4;
1767 rdev
->config
.r600
.max_backends
= 4;
1768 rdev
->config
.r600
.max_gprs
= 256;
1769 rdev
->config
.r600
.max_threads
= 192;
1770 rdev
->config
.r600
.max_stack_entries
= 256;
1771 rdev
->config
.r600
.max_hw_contexts
= 8;
1772 rdev
->config
.r600
.max_gs_threads
= 16;
1773 rdev
->config
.r600
.sx_max_export_size
= 128;
1774 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1775 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1776 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1780 rdev
->config
.r600
.max_pipes
= 2;
1781 rdev
->config
.r600
.max_tile_pipes
= 2;
1782 rdev
->config
.r600
.max_simds
= 3;
1783 rdev
->config
.r600
.max_backends
= 1;
1784 rdev
->config
.r600
.max_gprs
= 128;
1785 rdev
->config
.r600
.max_threads
= 192;
1786 rdev
->config
.r600
.max_stack_entries
= 128;
1787 rdev
->config
.r600
.max_hw_contexts
= 8;
1788 rdev
->config
.r600
.max_gs_threads
= 4;
1789 rdev
->config
.r600
.sx_max_export_size
= 128;
1790 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1791 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1792 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1798 rdev
->config
.r600
.max_pipes
= 1;
1799 rdev
->config
.r600
.max_tile_pipes
= 1;
1800 rdev
->config
.r600
.max_simds
= 2;
1801 rdev
->config
.r600
.max_backends
= 1;
1802 rdev
->config
.r600
.max_gprs
= 128;
1803 rdev
->config
.r600
.max_threads
= 192;
1804 rdev
->config
.r600
.max_stack_entries
= 128;
1805 rdev
->config
.r600
.max_hw_contexts
= 4;
1806 rdev
->config
.r600
.max_gs_threads
= 4;
1807 rdev
->config
.r600
.sx_max_export_size
= 128;
1808 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1809 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1810 rdev
->config
.r600
.sq_num_cf_insts
= 1;
1813 rdev
->config
.r600
.max_pipes
= 4;
1814 rdev
->config
.r600
.max_tile_pipes
= 4;
1815 rdev
->config
.r600
.max_simds
= 4;
1816 rdev
->config
.r600
.max_backends
= 4;
1817 rdev
->config
.r600
.max_gprs
= 192;
1818 rdev
->config
.r600
.max_threads
= 192;
1819 rdev
->config
.r600
.max_stack_entries
= 256;
1820 rdev
->config
.r600
.max_hw_contexts
= 8;
1821 rdev
->config
.r600
.max_gs_threads
= 16;
1822 rdev
->config
.r600
.sx_max_export_size
= 128;
1823 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1824 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1825 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1831 /* Initialize HDP */
1832 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1833 WREG32((0x2c14 + j
), 0x00000000);
1834 WREG32((0x2c18 + j
), 0x00000000);
1835 WREG32((0x2c1c + j
), 0x00000000);
1836 WREG32((0x2c20 + j
), 0x00000000);
1837 WREG32((0x2c24 + j
), 0x00000000);
1840 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
1844 ramcfg
= RREG32(RAMCFG
);
1845 switch (rdev
->config
.r600
.max_tile_pipes
) {
1847 tiling_config
|= PIPE_TILING(0);
1850 tiling_config
|= PIPE_TILING(1);
1853 tiling_config
|= PIPE_TILING(2);
1856 tiling_config
|= PIPE_TILING(3);
1861 rdev
->config
.r600
.tiling_npipes
= rdev
->config
.r600
.max_tile_pipes
;
1862 rdev
->config
.r600
.tiling_nbanks
= 4 << ((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
1863 tiling_config
|= BANK_TILING((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
1864 tiling_config
|= GROUP_SIZE((ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
);
1866 tmp
= (ramcfg
& NOOFROWS_MASK
) >> NOOFROWS_SHIFT
;
1868 tiling_config
|= ROW_TILING(3);
1869 tiling_config
|= SAMPLE_SPLIT(3);
1871 tiling_config
|= ROW_TILING(tmp
);
1872 tiling_config
|= SAMPLE_SPLIT(tmp
);
1874 tiling_config
|= BANK_SWAPS(1);
1876 cc_rb_backend_disable
= RREG32(CC_RB_BACKEND_DISABLE
) & 0x00ff0000;
1877 tmp
= R6XX_MAX_BACKENDS
-
1878 r600_count_pipe_bits((cc_rb_backend_disable
>> 16) & R6XX_MAX_BACKENDS_MASK
);
1879 if (tmp
< rdev
->config
.r600
.max_backends
) {
1880 rdev
->config
.r600
.max_backends
= tmp
;
1883 cc_gc_shader_pipe_config
= RREG32(CC_GC_SHADER_PIPE_CONFIG
) & 0x00ffff00;
1884 tmp
= R6XX_MAX_PIPES
-
1885 r600_count_pipe_bits((cc_gc_shader_pipe_config
>> 8) & R6XX_MAX_PIPES_MASK
);
1886 if (tmp
< rdev
->config
.r600
.max_pipes
) {
1887 rdev
->config
.r600
.max_pipes
= tmp
;
1889 tmp
= R6XX_MAX_SIMDS
-
1890 r600_count_pipe_bits((cc_gc_shader_pipe_config
>> 16) & R6XX_MAX_SIMDS_MASK
);
1891 if (tmp
< rdev
->config
.r600
.max_simds
) {
1892 rdev
->config
.r600
.max_simds
= tmp
;
1895 disabled_rb_mask
= (RREG32(CC_RB_BACKEND_DISABLE
) >> 16) & R6XX_MAX_BACKENDS_MASK
;
1896 tmp
= (tiling_config
& PIPE_TILING__MASK
) >> PIPE_TILING__SHIFT
;
1897 tmp
= r6xx_remap_render_backend(rdev
, tmp
, rdev
->config
.r600
.max_backends
,
1898 R6XX_MAX_BACKENDS
, disabled_rb_mask
);
1899 tiling_config
|= tmp
<< 16;
1900 rdev
->config
.r600
.backend_map
= tmp
;
1902 rdev
->config
.r600
.tile_config
= tiling_config
;
1903 WREG32(GB_TILING_CONFIG
, tiling_config
);
1904 WREG32(DCP_TILING_CONFIG
, tiling_config
& 0xffff);
1905 WREG32(HDP_TILING_CONFIG
, tiling_config
& 0xffff);
1906 WREG32(DMA_TILING_CONFIG
, tiling_config
& 0xffff);
1908 tmp
= R6XX_MAX_PIPES
- r600_count_pipe_bits((cc_gc_shader_pipe_config
& INACTIVE_QD_PIPES_MASK
) >> 8);
1909 WREG32(VGT_OUT_DEALLOC_CNTL
, (tmp
* 4) & DEALLOC_DIST_MASK
);
1910 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL
, ((tmp
* 4) - 2) & VTX_REUSE_DEPTH_MASK
);
1912 /* Setup some CP states */
1913 WREG32(CP_QUEUE_THRESHOLDS
, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1914 WREG32(CP_MEQ_THRESHOLDS
, (MEQ_END(0x40) | ROQ_END(0x40)));
1916 WREG32(TA_CNTL_AUX
, (DISABLE_CUBE_ANISO
| SYNC_GRADIENT
|
1917 SYNC_WALKER
| SYNC_ALIGNER
));
1918 /* Setup various GPU states */
1919 if (rdev
->family
== CHIP_RV670
)
1920 WREG32(ARB_GDEC_RD_CNTL
, 0x00000021);
1922 tmp
= RREG32(SX_DEBUG_1
);
1923 tmp
|= SMX_EVENT_RELEASE
;
1924 if ((rdev
->family
> CHIP_R600
))
1925 tmp
|= ENABLE_NEW_SMX_ADDRESS
;
1926 WREG32(SX_DEBUG_1
, tmp
);
1928 if (((rdev
->family
) == CHIP_R600
) ||
1929 ((rdev
->family
) == CHIP_RV630
) ||
1930 ((rdev
->family
) == CHIP_RV610
) ||
1931 ((rdev
->family
) == CHIP_RV620
) ||
1932 ((rdev
->family
) == CHIP_RS780
) ||
1933 ((rdev
->family
) == CHIP_RS880
)) {
1934 WREG32(DB_DEBUG
, PREZ_MUST_WAIT_FOR_POSTZ_DONE
);
1936 WREG32(DB_DEBUG
, 0);
1938 WREG32(DB_WATERMARKS
, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1939 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1941 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
1942 WREG32(VGT_NUM_INSTANCES
, 0);
1944 WREG32(SPI_CONFIG_CNTL
, GPR_WRITE_PRIORITY(0));
1945 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(0));
1947 tmp
= RREG32(SQ_MS_FIFO_SIZES
);
1948 if (((rdev
->family
) == CHIP_RV610
) ||
1949 ((rdev
->family
) == CHIP_RV620
) ||
1950 ((rdev
->family
) == CHIP_RS780
) ||
1951 ((rdev
->family
) == CHIP_RS880
)) {
1952 tmp
= (CACHE_FIFO_SIZE(0xa) |
1953 FETCH_FIFO_HIWATER(0xa) |
1954 DONE_FIFO_HIWATER(0xe0) |
1955 ALU_UPDATE_FIFO_HIWATER(0x8));
1956 } else if (((rdev
->family
) == CHIP_R600
) ||
1957 ((rdev
->family
) == CHIP_RV630
)) {
1958 tmp
&= ~DONE_FIFO_HIWATER(0xff);
1959 tmp
|= DONE_FIFO_HIWATER(0x4);
1961 WREG32(SQ_MS_FIFO_SIZES
, tmp
);
1963 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1964 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1966 sq_config
= RREG32(SQ_CONFIG
);
1967 sq_config
&= ~(PS_PRIO(3) |
1971 sq_config
|= (DX9_CONSTS
|
1978 if ((rdev
->family
) == CHIP_R600
) {
1979 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(124) |
1981 NUM_CLAUSE_TEMP_GPRS(4));
1982 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(0) |
1984 sq_thread_resource_mgmt
= (NUM_PS_THREADS(136) |
1985 NUM_VS_THREADS(48) |
1988 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(128) |
1989 NUM_VS_STACK_ENTRIES(128));
1990 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(0) |
1991 NUM_ES_STACK_ENTRIES(0));
1992 } else if (((rdev
->family
) == CHIP_RV610
) ||
1993 ((rdev
->family
) == CHIP_RV620
) ||
1994 ((rdev
->family
) == CHIP_RS780
) ||
1995 ((rdev
->family
) == CHIP_RS880
)) {
1996 /* no vertex cache */
1997 sq_config
&= ~VC_ENABLE
;
1999 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
2001 NUM_CLAUSE_TEMP_GPRS(2));
2002 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
2004 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
2005 NUM_VS_THREADS(78) |
2007 NUM_ES_THREADS(31));
2008 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
2009 NUM_VS_STACK_ENTRIES(40));
2010 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
2011 NUM_ES_STACK_ENTRIES(16));
2012 } else if (((rdev
->family
) == CHIP_RV630
) ||
2013 ((rdev
->family
) == CHIP_RV635
)) {
2014 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
2016 NUM_CLAUSE_TEMP_GPRS(2));
2017 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(18) |
2019 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
2020 NUM_VS_THREADS(78) |
2022 NUM_ES_THREADS(31));
2023 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
2024 NUM_VS_STACK_ENTRIES(40));
2025 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
2026 NUM_ES_STACK_ENTRIES(16));
2027 } else if ((rdev
->family
) == CHIP_RV670
) {
2028 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
2030 NUM_CLAUSE_TEMP_GPRS(2));
2031 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
2033 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
2034 NUM_VS_THREADS(78) |
2036 NUM_ES_THREADS(31));
2037 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(64) |
2038 NUM_VS_STACK_ENTRIES(64));
2039 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(64) |
2040 NUM_ES_STACK_ENTRIES(64));
2043 WREG32(SQ_CONFIG
, sq_config
);
2044 WREG32(SQ_GPR_RESOURCE_MGMT_1
, sq_gpr_resource_mgmt_1
);
2045 WREG32(SQ_GPR_RESOURCE_MGMT_2
, sq_gpr_resource_mgmt_2
);
2046 WREG32(SQ_THREAD_RESOURCE_MGMT
, sq_thread_resource_mgmt
);
2047 WREG32(SQ_STACK_RESOURCE_MGMT_1
, sq_stack_resource_mgmt_1
);
2048 WREG32(SQ_STACK_RESOURCE_MGMT_2
, sq_stack_resource_mgmt_2
);
2050 if (((rdev
->family
) == CHIP_RV610
) ||
2051 ((rdev
->family
) == CHIP_RV620
) ||
2052 ((rdev
->family
) == CHIP_RS780
) ||
2053 ((rdev
->family
) == CHIP_RS880
)) {
2054 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(TC_ONLY
));
2056 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(VC_AND_TC
));
2059 /* More default values. 2D/3D driver should adjust as needed */
2060 WREG32(PA_SC_AA_SAMPLE_LOCS_2S
, (S0_X(0xc) | S0_Y(0x4) |
2061 S1_X(0x4) | S1_Y(0xc)));
2062 WREG32(PA_SC_AA_SAMPLE_LOCS_4S
, (S0_X(0xe) | S0_Y(0xe) |
2063 S1_X(0x2) | S1_Y(0x2) |
2064 S2_X(0xa) | S2_Y(0x6) |
2065 S3_X(0x6) | S3_Y(0xa)));
2066 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0
, (S0_X(0xe) | S0_Y(0xb) |
2067 S1_X(0x4) | S1_Y(0xc) |
2068 S2_X(0x1) | S2_Y(0x6) |
2069 S3_X(0xa) | S3_Y(0xe)));
2070 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1
, (S4_X(0x6) | S4_Y(0x1) |
2071 S5_X(0x0) | S5_Y(0x0) |
2072 S6_X(0xb) | S6_Y(0x4) |
2073 S7_X(0x7) | S7_Y(0x8)));
2075 WREG32(VGT_STRMOUT_EN
, 0);
2076 tmp
= rdev
->config
.r600
.max_pipes
* 16;
2077 switch (rdev
->family
) {
2093 WREG32(VGT_ES_PER_GS
, 128);
2094 WREG32(VGT_GS_PER_ES
, tmp
);
2095 WREG32(VGT_GS_PER_VS
, 2);
2096 WREG32(VGT_GS_VERTEX_REUSE
, 16);
2098 /* more default values. 2D/3D driver should adjust as needed */
2099 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
2100 WREG32(VGT_STRMOUT_EN
, 0);
2102 WREG32(PA_SC_MODE_CNTL
, 0);
2103 WREG32(PA_SC_AA_CONFIG
, 0);
2104 WREG32(PA_SC_LINE_STIPPLE
, 0);
2105 WREG32(SPI_INPUT_Z
, 0);
2106 WREG32(SPI_PS_IN_CONTROL_0
, NUM_INTERP(2));
2107 WREG32(CB_COLOR7_FRAG
, 0);
2109 /* Clear render buffer base addresses */
2110 WREG32(CB_COLOR0_BASE
, 0);
2111 WREG32(CB_COLOR1_BASE
, 0);
2112 WREG32(CB_COLOR2_BASE
, 0);
2113 WREG32(CB_COLOR3_BASE
, 0);
2114 WREG32(CB_COLOR4_BASE
, 0);
2115 WREG32(CB_COLOR5_BASE
, 0);
2116 WREG32(CB_COLOR6_BASE
, 0);
2117 WREG32(CB_COLOR7_BASE
, 0);
2118 WREG32(CB_COLOR7_FRAG
, 0);
2120 switch (rdev
->family
) {
2125 tmp
= TC_L2_SIZE(8);
2129 tmp
= TC_L2_SIZE(4);
2132 tmp
= TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT
;
2135 tmp
= TC_L2_SIZE(0);
2138 WREG32(TC_CNTL
, tmp
);
2140 tmp
= RREG32(HDP_HOST_PATH_CNTL
);
2141 WREG32(HDP_HOST_PATH_CNTL
, tmp
);
2143 tmp
= RREG32(ARB_POP
);
2144 tmp
|= ENABLE_TC128
;
2145 WREG32(ARB_POP
, tmp
);
2147 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
2148 WREG32(PA_CL_ENHANCE
, (CLIP_VTX_REORDER_ENA
|
2150 WREG32(PA_SC_ENHANCE
, FORCE_EOV_MAX_CLK_CNT(4095));
2151 WREG32(VC_ENHANCE
, 0);
2156 * Indirect registers accessor
2158 u32
r600_pciep_rreg(struct radeon_device
*rdev
, u32 reg
)
2160 unsigned long flags
;
2163 spin_lock_irqsave(&rdev
->pciep_idx_lock
, flags
);
2164 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
2165 (void)RREG32(PCIE_PORT_INDEX
);
2166 r
= RREG32(PCIE_PORT_DATA
);
2167 spin_unlock_irqrestore(&rdev
->pciep_idx_lock
, flags
);
2171 void r600_pciep_wreg(struct radeon_device
*rdev
, u32 reg
, u32 v
)
2173 unsigned long flags
;
2175 spin_lock_irqsave(&rdev
->pciep_idx_lock
, flags
);
2176 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
2177 (void)RREG32(PCIE_PORT_INDEX
);
2178 WREG32(PCIE_PORT_DATA
, (v
));
2179 (void)RREG32(PCIE_PORT_DATA
);
2180 spin_unlock_irqrestore(&rdev
->pciep_idx_lock
, flags
);
2186 void r600_cp_stop(struct radeon_device
*rdev
)
2188 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
2189 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
2190 WREG32(SCRATCH_UMSK
, 0);
2191 rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
].ready
= false;
2194 int r600_init_microcode(struct radeon_device
*rdev
)
2196 const char *chip_name
;
2197 const char *rlc_chip_name
;
2198 const char *smc_chip_name
= "RV770";
2199 size_t pfp_req_size
, me_req_size
, rlc_req_size
, smc_req_size
= 0;
2205 switch (rdev
->family
) {
2208 rlc_chip_name
= "R600";
2211 chip_name
= "RV610";
2212 rlc_chip_name
= "R600";
2215 chip_name
= "RV630";
2216 rlc_chip_name
= "R600";
2219 chip_name
= "RV620";
2220 rlc_chip_name
= "R600";
2223 chip_name
= "RV635";
2224 rlc_chip_name
= "R600";
2227 chip_name
= "RV670";
2228 rlc_chip_name
= "R600";
2232 chip_name
= "RS780";
2233 rlc_chip_name
= "R600";
2236 chip_name
= "RV770";
2237 rlc_chip_name
= "R700";
2238 smc_chip_name
= "RV770";
2239 smc_req_size
= ALIGN(RV770_SMC_UCODE_SIZE
, 4);
2242 chip_name
= "RV730";
2243 rlc_chip_name
= "R700";
2244 smc_chip_name
= "RV730";
2245 smc_req_size
= ALIGN(RV730_SMC_UCODE_SIZE
, 4);
2248 chip_name
= "RV710";
2249 rlc_chip_name
= "R700";
2250 smc_chip_name
= "RV710";
2251 smc_req_size
= ALIGN(RV710_SMC_UCODE_SIZE
, 4);
2254 chip_name
= "RV730";
2255 rlc_chip_name
= "R700";
2256 smc_chip_name
= "RV740";
2257 smc_req_size
= ALIGN(RV740_SMC_UCODE_SIZE
, 4);
2260 chip_name
= "CEDAR";
2261 rlc_chip_name
= "CEDAR";
2262 smc_chip_name
= "CEDAR";
2263 smc_req_size
= ALIGN(CEDAR_SMC_UCODE_SIZE
, 4);
2266 chip_name
= "REDWOOD";
2267 rlc_chip_name
= "REDWOOD";
2268 smc_chip_name
= "REDWOOD";
2269 smc_req_size
= ALIGN(REDWOOD_SMC_UCODE_SIZE
, 4);
2272 chip_name
= "JUNIPER";
2273 rlc_chip_name
= "JUNIPER";
2274 smc_chip_name
= "JUNIPER";
2275 smc_req_size
= ALIGN(JUNIPER_SMC_UCODE_SIZE
, 4);
2279 chip_name
= "CYPRESS";
2280 rlc_chip_name
= "CYPRESS";
2281 smc_chip_name
= "CYPRESS";
2282 smc_req_size
= ALIGN(CYPRESS_SMC_UCODE_SIZE
, 4);
2286 rlc_chip_name
= "SUMO";
2290 rlc_chip_name
= "SUMO";
2293 chip_name
= "SUMO2";
2294 rlc_chip_name
= "SUMO";
2299 if (rdev
->family
>= CHIP_CEDAR
) {
2300 pfp_req_size
= EVERGREEN_PFP_UCODE_SIZE
* 4;
2301 me_req_size
= EVERGREEN_PM4_UCODE_SIZE
* 4;
2302 rlc_req_size
= EVERGREEN_RLC_UCODE_SIZE
* 4;
2303 } else if (rdev
->family
>= CHIP_RV770
) {
2304 pfp_req_size
= R700_PFP_UCODE_SIZE
* 4;
2305 me_req_size
= R700_PM4_UCODE_SIZE
* 4;
2306 rlc_req_size
= R700_RLC_UCODE_SIZE
* 4;
2308 pfp_req_size
= R600_PFP_UCODE_SIZE
* 4;
2309 me_req_size
= R600_PM4_UCODE_SIZE
* 12;
2310 rlc_req_size
= R600_RLC_UCODE_SIZE
* 4;
2313 DRM_INFO("Loading %s Microcode\n", chip_name
);
2315 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_pfp.bin", chip_name
);
2316 err
= request_firmware(&rdev
->pfp_fw
, fw_name
, rdev
->dev
);
2319 if (rdev
->pfp_fw
->size
!= pfp_req_size
) {
2321 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2322 rdev
->pfp_fw
->size
, fw_name
);
2327 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_me.bin", chip_name
);
2328 err
= request_firmware(&rdev
->me_fw
, fw_name
, rdev
->dev
);
2331 if (rdev
->me_fw
->size
!= me_req_size
) {
2333 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2334 rdev
->me_fw
->size
, fw_name
);
2338 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_rlc.bin", rlc_chip_name
);
2339 err
= request_firmware(&rdev
->rlc_fw
, fw_name
, rdev
->dev
);
2342 if (rdev
->rlc_fw
->size
!= rlc_req_size
) {
2344 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2345 rdev
->rlc_fw
->size
, fw_name
);
2349 if ((rdev
->family
>= CHIP_RV770
) && (rdev
->family
<= CHIP_HEMLOCK
)) {
2350 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_smc.bin", smc_chip_name
);
2351 err
= request_firmware(&rdev
->smc_fw
, fw_name
, rdev
->dev
);
2354 "smc: error loading firmware \"%s\"\n",
2356 release_firmware(rdev
->smc_fw
);
2357 rdev
->smc_fw
= NULL
;
2359 } else if (rdev
->smc_fw
->size
!= smc_req_size
) {
2361 "smc: Bogus length %zu in firmware \"%s\"\n",
2362 rdev
->smc_fw
->size
, fw_name
);
2371 "r600_cp: Failed to load firmware \"%s\"\n",
2373 release_firmware(rdev
->pfp_fw
);
2374 rdev
->pfp_fw
= NULL
;
2375 release_firmware(rdev
->me_fw
);
2377 release_firmware(rdev
->rlc_fw
);
2378 rdev
->rlc_fw
= NULL
;
2379 release_firmware(rdev
->smc_fw
);
2380 rdev
->smc_fw
= NULL
;
2385 static int r600_cp_load_microcode(struct radeon_device
*rdev
)
2387 const __be32
*fw_data
;
2390 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
2399 RB_NO_UPDATE
| RB_BLKSZ(15) | RB_BUFSZ(3));
2402 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
2403 RREG32(GRBM_SOFT_RESET
);
2405 WREG32(GRBM_SOFT_RESET
, 0);
2407 WREG32(CP_ME_RAM_WADDR
, 0);
2409 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
2410 WREG32(CP_ME_RAM_WADDR
, 0);
2411 for (i
= 0; i
< R600_PM4_UCODE_SIZE
* 3; i
++)
2412 WREG32(CP_ME_RAM_DATA
,
2413 be32_to_cpup(fw_data
++));
2415 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
2416 WREG32(CP_PFP_UCODE_ADDR
, 0);
2417 for (i
= 0; i
< R600_PFP_UCODE_SIZE
; i
++)
2418 WREG32(CP_PFP_UCODE_DATA
,
2419 be32_to_cpup(fw_data
++));
2421 WREG32(CP_PFP_UCODE_ADDR
, 0);
2422 WREG32(CP_ME_RAM_WADDR
, 0);
2423 WREG32(CP_ME_RAM_RADDR
, 0);
2427 int r600_cp_start(struct radeon_device
*rdev
)
2429 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2433 r
= radeon_ring_lock(rdev
, ring
, 7);
2435 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
2438 radeon_ring_write(ring
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
2439 radeon_ring_write(ring
, 0x1);
2440 if (rdev
->family
>= CHIP_RV770
) {
2441 radeon_ring_write(ring
, 0x0);
2442 radeon_ring_write(ring
, rdev
->config
.rv770
.max_hw_contexts
- 1);
2444 radeon_ring_write(ring
, 0x3);
2445 radeon_ring_write(ring
, rdev
->config
.r600
.max_hw_contexts
- 1);
2447 radeon_ring_write(ring
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2448 radeon_ring_write(ring
, 0);
2449 radeon_ring_write(ring
, 0);
2450 radeon_ring_unlock_commit(rdev
, ring
);
2453 WREG32(R_0086D8_CP_ME_CNTL
, cp_me
);
2457 int r600_cp_resume(struct radeon_device
*rdev
)
2459 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2465 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
2466 RREG32(GRBM_SOFT_RESET
);
2468 WREG32(GRBM_SOFT_RESET
, 0);
2470 /* Set ring buffer size */
2471 rb_bufsz
= order_base_2(ring
->ring_size
/ 8);
2472 tmp
= (order_base_2(RADEON_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
2474 tmp
|= BUF_SWAP_32BIT
;
2476 WREG32(CP_RB_CNTL
, tmp
);
2477 WREG32(CP_SEM_WAIT_TIMER
, 0x0);
2479 /* Set the write pointer delay */
2480 WREG32(CP_RB_WPTR_DELAY
, 0);
2482 /* Initialize the ring buffer's read and write pointers */
2483 WREG32(CP_RB_CNTL
, tmp
| RB_RPTR_WR_ENA
);
2484 WREG32(CP_RB_RPTR_WR
, 0);
2486 WREG32(CP_RB_WPTR
, ring
->wptr
);
2488 /* set the wb address whether it's enabled or not */
2489 WREG32(CP_RB_RPTR_ADDR
,
2490 ((rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFFFFFFFC));
2491 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFF);
2492 WREG32(SCRATCH_ADDR
, ((rdev
->wb
.gpu_addr
+ RADEON_WB_SCRATCH_OFFSET
) >> 8) & 0xFFFFFFFF);
2494 if (rdev
->wb
.enabled
)
2495 WREG32(SCRATCH_UMSK
, 0xff);
2497 tmp
|= RB_NO_UPDATE
;
2498 WREG32(SCRATCH_UMSK
, 0);
2502 WREG32(CP_RB_CNTL
, tmp
);
2504 WREG32(CP_RB_BASE
, ring
->gpu_addr
>> 8);
2505 WREG32(CP_DEBUG
, (1 << 27) | (1 << 28));
2507 ring
->rptr
= RREG32(CP_RB_RPTR
);
2509 r600_cp_start(rdev
);
2511 r
= radeon_ring_test(rdev
, RADEON_RING_TYPE_GFX_INDEX
, ring
);
2513 ring
->ready
= false;
2519 void r600_ring_init(struct radeon_device
*rdev
, struct radeon_ring
*ring
, unsigned ring_size
)
2524 /* Align ring size */
2525 rb_bufsz
= order_base_2(ring_size
/ 8);
2526 ring_size
= (1 << (rb_bufsz
+ 1)) * 4;
2527 ring
->ring_size
= ring_size
;
2528 ring
->align_mask
= 16 - 1;
2530 if (radeon_ring_supports_scratch_reg(rdev
, ring
)) {
2531 r
= radeon_scratch_get(rdev
, &ring
->rptr_save_reg
);
2533 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r
);
2534 ring
->rptr_save_reg
= 0;
2539 void r600_cp_fini(struct radeon_device
*rdev
)
2541 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2543 radeon_ring_fini(rdev
, ring
);
2544 radeon_scratch_free(rdev
, ring
->rptr_save_reg
);
2548 * GPU scratch registers helpers function.
2550 void r600_scratch_init(struct radeon_device
*rdev
)
2554 rdev
->scratch
.num_reg
= 7;
2555 rdev
->scratch
.reg_base
= SCRATCH_REG0
;
2556 for (i
= 0; i
< rdev
->scratch
.num_reg
; i
++) {
2557 rdev
->scratch
.free
[i
] = true;
2558 rdev
->scratch
.reg
[i
] = rdev
->scratch
.reg_base
+ (i
* 4);
2562 int r600_ring_test(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
2569 r
= radeon_scratch_get(rdev
, &scratch
);
2571 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r
);
2574 WREG32(scratch
, 0xCAFEDEAD);
2575 r
= radeon_ring_lock(rdev
, ring
, 3);
2577 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring
->idx
, r
);
2578 radeon_scratch_free(rdev
, scratch
);
2581 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2582 radeon_ring_write(ring
, ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
2583 radeon_ring_write(ring
, 0xDEADBEEF);
2584 radeon_ring_unlock_commit(rdev
, ring
);
2585 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
2586 tmp
= RREG32(scratch
);
2587 if (tmp
== 0xDEADBEEF)
2591 if (i
< rdev
->usec_timeout
) {
2592 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring
->idx
, i
);
2594 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2595 ring
->idx
, scratch
, tmp
);
2598 radeon_scratch_free(rdev
, scratch
);
2603 * CP fences/semaphores
2606 void r600_fence_ring_emit(struct radeon_device
*rdev
,
2607 struct radeon_fence
*fence
)
2609 struct radeon_ring
*ring
= &rdev
->ring
[fence
->ring
];
2611 if (rdev
->wb
.use_event
) {
2612 u64 addr
= rdev
->fence_drv
[fence
->ring
].gpu_addr
;
2613 /* flush read cache over gart */
2614 radeon_ring_write(ring
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
2615 radeon_ring_write(ring
, PACKET3_TC_ACTION_ENA
|
2616 PACKET3_VC_ACTION_ENA
|
2617 PACKET3_SH_ACTION_ENA
);
2618 radeon_ring_write(ring
, 0xFFFFFFFF);
2619 radeon_ring_write(ring
, 0);
2620 radeon_ring_write(ring
, 10); /* poll interval */
2621 /* EVENT_WRITE_EOP - flush caches, send int */
2622 radeon_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE_EOP
, 4));
2623 radeon_ring_write(ring
, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS
) | EVENT_INDEX(5));
2624 radeon_ring_write(ring
, addr
& 0xffffffff);
2625 radeon_ring_write(ring
, (upper_32_bits(addr
) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2626 radeon_ring_write(ring
, fence
->seq
);
2627 radeon_ring_write(ring
, 0);
2629 /* flush read cache over gart */
2630 radeon_ring_write(ring
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
2631 radeon_ring_write(ring
, PACKET3_TC_ACTION_ENA
|
2632 PACKET3_VC_ACTION_ENA
|
2633 PACKET3_SH_ACTION_ENA
);
2634 radeon_ring_write(ring
, 0xFFFFFFFF);
2635 radeon_ring_write(ring
, 0);
2636 radeon_ring_write(ring
, 10); /* poll interval */
2637 radeon_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE
, 0));
2638 radeon_ring_write(ring
, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT
) | EVENT_INDEX(0));
2639 /* wait for 3D idle clean */
2640 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2641 radeon_ring_write(ring
, (WAIT_UNTIL
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2642 radeon_ring_write(ring
, WAIT_3D_IDLE_bit
| WAIT_3D_IDLECLEAN_bit
);
2643 /* Emit fence sequence & fire IRQ */
2644 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2645 radeon_ring_write(ring
, ((rdev
->fence_drv
[fence
->ring
].scratch_reg
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
2646 radeon_ring_write(ring
, fence
->seq
);
2647 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2648 radeon_ring_write(ring
, PACKET0(CP_INT_STATUS
, 0));
2649 radeon_ring_write(ring
, RB_INT_STAT
);
2653 bool r600_semaphore_ring_emit(struct radeon_device
*rdev
,
2654 struct radeon_ring
*ring
,
2655 struct radeon_semaphore
*semaphore
,
2658 uint64_t addr
= semaphore
->gpu_addr
;
2659 unsigned sel
= emit_wait
? PACKET3_SEM_SEL_WAIT
: PACKET3_SEM_SEL_SIGNAL
;
2661 if (rdev
->family
< CHIP_CAYMAN
)
2662 sel
|= PACKET3_SEM_WAIT_ON_SIGNAL
;
2664 radeon_ring_write(ring
, PACKET3(PACKET3_MEM_SEMAPHORE
, 1));
2665 radeon_ring_write(ring
, addr
& 0xffffffff);
2666 radeon_ring_write(ring
, (upper_32_bits(addr
) & 0xff) | sel
);
2672 * r600_copy_cpdma - copy pages using the CP DMA engine
2674 * @rdev: radeon_device pointer
2675 * @src_offset: src GPU address
2676 * @dst_offset: dst GPU address
2677 * @num_gpu_pages: number of GPU pages to xfer
2678 * @fence: radeon fence object
2680 * Copy GPU paging using the CP DMA engine (r6xx+).
2681 * Used by the radeon ttm implementation to move pages if
2682 * registered as the asic copy callback.
2684 int r600_copy_cpdma(struct radeon_device
*rdev
,
2685 uint64_t src_offset
, uint64_t dst_offset
,
2686 unsigned num_gpu_pages
,
2687 struct radeon_fence
**fence
)
2689 struct radeon_semaphore
*sem
= NULL
;
2690 int ring_index
= rdev
->asic
->copy
.blit_ring_index
;
2691 struct radeon_ring
*ring
= &rdev
->ring
[ring_index
];
2692 u32 size_in_bytes
, cur_size_in_bytes
, tmp
;
2696 r
= radeon_semaphore_create(rdev
, &sem
);
2698 DRM_ERROR("radeon: moving bo (%d).\n", r
);
2702 size_in_bytes
= (num_gpu_pages
<< RADEON_GPU_PAGE_SHIFT
);
2703 num_loops
= DIV_ROUND_UP(size_in_bytes
, 0x1fffff);
2704 r
= radeon_ring_lock(rdev
, ring
, num_loops
* 6 + 24);
2706 DRM_ERROR("radeon: moving bo (%d).\n", r
);
2707 radeon_semaphore_free(rdev
, &sem
, NULL
);
2711 radeon_semaphore_sync_to(sem
, *fence
);
2712 radeon_semaphore_sync_rings(rdev
, sem
, ring
->idx
);
2714 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2715 radeon_ring_write(ring
, (WAIT_UNTIL
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2716 radeon_ring_write(ring
, WAIT_3D_IDLE_bit
);
2717 for (i
= 0; i
< num_loops
; i
++) {
2718 cur_size_in_bytes
= size_in_bytes
;
2719 if (cur_size_in_bytes
> 0x1fffff)
2720 cur_size_in_bytes
= 0x1fffff;
2721 size_in_bytes
-= cur_size_in_bytes
;
2722 tmp
= upper_32_bits(src_offset
) & 0xff;
2723 if (size_in_bytes
== 0)
2724 tmp
|= PACKET3_CP_DMA_CP_SYNC
;
2725 radeon_ring_write(ring
, PACKET3(PACKET3_CP_DMA
, 4));
2726 radeon_ring_write(ring
, src_offset
& 0xffffffff);
2727 radeon_ring_write(ring
, tmp
);
2728 radeon_ring_write(ring
, dst_offset
& 0xffffffff);
2729 radeon_ring_write(ring
, upper_32_bits(dst_offset
) & 0xff);
2730 radeon_ring_write(ring
, cur_size_in_bytes
);
2731 src_offset
+= cur_size_in_bytes
;
2732 dst_offset
+= cur_size_in_bytes
;
2734 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2735 radeon_ring_write(ring
, (WAIT_UNTIL
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2736 radeon_ring_write(ring
, WAIT_CP_DMA_IDLE_bit
);
2738 r
= radeon_fence_emit(rdev
, fence
, ring
->idx
);
2740 radeon_ring_unlock_undo(rdev
, ring
);
2744 radeon_ring_unlock_commit(rdev
, ring
);
2745 radeon_semaphore_free(rdev
, &sem
, *fence
);
2750 int r600_set_surface_reg(struct radeon_device
*rdev
, int reg
,
2751 uint32_t tiling_flags
, uint32_t pitch
,
2752 uint32_t offset
, uint32_t obj_size
)
2754 /* FIXME: implement */
2758 void r600_clear_surface_reg(struct radeon_device
*rdev
, int reg
)
2760 /* FIXME: implement */
2763 static int r600_startup(struct radeon_device
*rdev
)
2765 struct radeon_ring
*ring
;
2768 /* enable pcie gen2 link */
2769 r600_pcie_gen2_enable(rdev
);
2771 /* scratch needs to be initialized before MC */
2772 r
= r600_vram_scratch_init(rdev
);
2776 r600_mc_program(rdev
);
2778 if (rdev
->flags
& RADEON_IS_AGP
) {
2779 r600_agp_enable(rdev
);
2781 r
= r600_pcie_gart_enable(rdev
);
2785 r600_gpu_init(rdev
);
2787 /* allocate wb buffer */
2788 r
= radeon_wb_init(rdev
);
2792 r
= radeon_fence_driver_start_ring(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
2794 dev_err(rdev
->dev
, "failed initializing CP fences (%d).\n", r
);
2798 r
= radeon_fence_driver_start_ring(rdev
, R600_RING_TYPE_DMA_INDEX
);
2800 dev_err(rdev
->dev
, "failed initializing DMA fences (%d).\n", r
);
2805 if (!rdev
->irq
.installed
) {
2806 r
= radeon_irq_kms_init(rdev
);
2811 r
= r600_irq_init(rdev
);
2813 DRM_ERROR("radeon: IH init failed (%d).\n", r
);
2814 radeon_irq_kms_fini(rdev
);
2819 ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2820 r
= radeon_ring_init(rdev
, ring
, ring
->ring_size
, RADEON_WB_CP_RPTR_OFFSET
,
2821 R600_CP_RB_RPTR
, R600_CP_RB_WPTR
,
2826 ring
= &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
];
2827 r
= radeon_ring_init(rdev
, ring
, ring
->ring_size
, R600_WB_DMA_RPTR_OFFSET
,
2828 DMA_RB_RPTR
, DMA_RB_WPTR
,
2829 DMA_PACKET(DMA_PACKET_NOP
, 0, 0, 0));
2833 r
= r600_cp_load_microcode(rdev
);
2836 r
= r600_cp_resume(rdev
);
2840 r
= r600_dma_resume(rdev
);
2844 r
= radeon_ib_pool_init(rdev
);
2846 dev_err(rdev
->dev
, "IB initialization failed (%d).\n", r
);
2850 r
= r600_audio_init(rdev
);
2852 DRM_ERROR("radeon: audio init failed\n");
2859 void r600_vga_set_state(struct radeon_device
*rdev
, bool state
)
2863 temp
= RREG32(CONFIG_CNTL
);
2864 if (state
== false) {
2870 WREG32(CONFIG_CNTL
, temp
);
2873 int r600_resume(struct radeon_device
*rdev
)
2877 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2878 * posting will perform necessary task to bring back GPU into good
2882 atom_asic_init(rdev
->mode_info
.atom_context
);
2884 radeon_pm_resume(rdev
);
2886 rdev
->accel_working
= true;
2887 r
= r600_startup(rdev
);
2889 DRM_ERROR("r600 startup failed on resume\n");
2890 rdev
->accel_working
= false;
2897 int r600_suspend(struct radeon_device
*rdev
)
2899 radeon_pm_suspend(rdev
);
2900 r600_audio_fini(rdev
);
2902 r600_dma_stop(rdev
);
2903 r600_irq_suspend(rdev
);
2904 radeon_wb_disable(rdev
);
2905 r600_pcie_gart_disable(rdev
);
2910 /* Plan is to move initialization in that function and use
2911 * helper function so that radeon_device_init pretty much
2912 * do nothing more than calling asic specific function. This
2913 * should also allow to remove a bunch of callback function
2916 int r600_init(struct radeon_device
*rdev
)
2920 if (r600_debugfs_mc_info_init(rdev
)) {
2921 DRM_ERROR("Failed to register debugfs file for mc !\n");
2924 if (!radeon_get_bios(rdev
)) {
2925 if (ASIC_IS_AVIVO(rdev
))
2928 /* Must be an ATOMBIOS */
2929 if (!rdev
->is_atom_bios
) {
2930 dev_err(rdev
->dev
, "Expecting atombios for R600 GPU\n");
2933 r
= radeon_atombios_init(rdev
);
2936 /* Post card if necessary */
2937 if (!radeon_card_posted(rdev
)) {
2939 dev_err(rdev
->dev
, "Card not posted and no BIOS - ignoring\n");
2942 DRM_INFO("GPU not posted. posting now...\n");
2943 atom_asic_init(rdev
->mode_info
.atom_context
);
2945 /* Initialize scratch registers */
2946 r600_scratch_init(rdev
);
2947 /* Initialize surface registers */
2948 radeon_surface_init(rdev
);
2949 /* Initialize clocks */
2950 radeon_get_clock_info(rdev
->ddev
);
2952 r
= radeon_fence_driver_init(rdev
);
2955 if (rdev
->flags
& RADEON_IS_AGP
) {
2956 r
= radeon_agp_init(rdev
);
2958 radeon_agp_disable(rdev
);
2960 r
= r600_mc_init(rdev
);
2963 /* Memory manager */
2964 r
= radeon_bo_init(rdev
);
2968 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
) {
2969 r
= r600_init_microcode(rdev
);
2971 DRM_ERROR("Failed to load firmware!\n");
2976 /* Initialize power management */
2977 radeon_pm_init(rdev
);
2979 rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
].ring_obj
= NULL
;
2980 r600_ring_init(rdev
, &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
], 1024 * 1024);
2982 rdev
->ring
[R600_RING_TYPE_DMA_INDEX
].ring_obj
= NULL
;
2983 r600_ring_init(rdev
, &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
], 64 * 1024);
2985 rdev
->ih
.ring_obj
= NULL
;
2986 r600_ih_ring_init(rdev
, 64 * 1024);
2988 r
= r600_pcie_gart_init(rdev
);
2992 rdev
->accel_working
= true;
2993 r
= r600_startup(rdev
);
2995 dev_err(rdev
->dev
, "disabling GPU acceleration\n");
2997 r600_dma_fini(rdev
);
2998 r600_irq_fini(rdev
);
2999 radeon_wb_fini(rdev
);
3000 radeon_ib_pool_fini(rdev
);
3001 radeon_irq_kms_fini(rdev
);
3002 r600_pcie_gart_fini(rdev
);
3003 rdev
->accel_working
= false;
3009 void r600_fini(struct radeon_device
*rdev
)
3011 radeon_pm_fini(rdev
);
3012 r600_audio_fini(rdev
);
3014 r600_dma_fini(rdev
);
3015 r600_irq_fini(rdev
);
3016 radeon_wb_fini(rdev
);
3017 radeon_ib_pool_fini(rdev
);
3018 radeon_irq_kms_fini(rdev
);
3019 r600_pcie_gart_fini(rdev
);
3020 r600_vram_scratch_fini(rdev
);
3021 radeon_agp_fini(rdev
);
3022 radeon_gem_fini(rdev
);
3023 radeon_fence_driver_fini(rdev
);
3024 radeon_bo_fini(rdev
);
3025 radeon_atombios_fini(rdev
);
3034 void r600_ring_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
3036 struct radeon_ring
*ring
= &rdev
->ring
[ib
->ring
];
3039 if (ring
->rptr_save_reg
) {
3040 next_rptr
= ring
->wptr
+ 3 + 4;
3041 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
3042 radeon_ring_write(ring
, ((ring
->rptr_save_reg
-
3043 PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
3044 radeon_ring_write(ring
, next_rptr
);
3045 } else if (rdev
->wb
.enabled
) {
3046 next_rptr
= ring
->wptr
+ 5 + 4;
3047 radeon_ring_write(ring
, PACKET3(PACKET3_MEM_WRITE
, 3));
3048 radeon_ring_write(ring
, ring
->next_rptr_gpu_addr
& 0xfffffffc);
3049 radeon_ring_write(ring
, (upper_32_bits(ring
->next_rptr_gpu_addr
) & 0xff) | (1 << 18));
3050 radeon_ring_write(ring
, next_rptr
);
3051 radeon_ring_write(ring
, 0);
3054 radeon_ring_write(ring
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
3055 radeon_ring_write(ring
,
3059 (ib
->gpu_addr
& 0xFFFFFFFC));
3060 radeon_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xFF);
3061 radeon_ring_write(ring
, ib
->length_dw
);
3064 int r600_ib_test(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
3066 struct radeon_ib ib
;
3072 r
= radeon_scratch_get(rdev
, &scratch
);
3074 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r
);
3077 WREG32(scratch
, 0xCAFEDEAD);
3078 r
= radeon_ib_get(rdev
, ring
->idx
, &ib
, NULL
, 256);
3080 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
3083 ib
.ptr
[0] = PACKET3(PACKET3_SET_CONFIG_REG
, 1);
3084 ib
.ptr
[1] = ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
3085 ib
.ptr
[2] = 0xDEADBEEF;
3087 r
= radeon_ib_schedule(rdev
, &ib
, NULL
);
3089 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
3092 r
= radeon_fence_wait(ib
.fence
, false);
3094 DRM_ERROR("radeon: fence wait failed (%d).\n", r
);
3097 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
3098 tmp
= RREG32(scratch
);
3099 if (tmp
== 0xDEADBEEF)
3103 if (i
< rdev
->usec_timeout
) {
3104 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib
.fence
->ring
, i
);
3106 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3111 radeon_ib_free(rdev
, &ib
);
3113 radeon_scratch_free(rdev
, scratch
);
3120 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
3121 * the same as the CP ring buffer, but in reverse. Rather than the CPU
3122 * writing to the ring and the GPU consuming, the GPU writes to the ring
3123 * and host consumes. As the host irq handler processes interrupts, it
3124 * increments the rptr. When the rptr catches up with the wptr, all the
3125 * current interrupts have been processed.
3128 void r600_ih_ring_init(struct radeon_device
*rdev
, unsigned ring_size
)
3132 /* Align ring size */
3133 rb_bufsz
= order_base_2(ring_size
/ 4);
3134 ring_size
= (1 << rb_bufsz
) * 4;
3135 rdev
->ih
.ring_size
= ring_size
;
3136 rdev
->ih
.ptr_mask
= rdev
->ih
.ring_size
- 1;
3140 int r600_ih_ring_alloc(struct radeon_device
*rdev
)
3144 /* Allocate ring buffer */
3145 if (rdev
->ih
.ring_obj
== NULL
) {
3146 r
= radeon_bo_create(rdev
, rdev
->ih
.ring_size
,
3148 RADEON_GEM_DOMAIN_GTT
,
3149 NULL
, &rdev
->ih
.ring_obj
);
3151 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r
);
3154 r
= radeon_bo_reserve(rdev
->ih
.ring_obj
, false);
3155 if (unlikely(r
!= 0))
3157 r
= radeon_bo_pin(rdev
->ih
.ring_obj
,
3158 RADEON_GEM_DOMAIN_GTT
,
3159 &rdev
->ih
.gpu_addr
);
3161 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
3162 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r
);
3165 r
= radeon_bo_kmap(rdev
->ih
.ring_obj
,
3166 (void **)&rdev
->ih
.ring
);
3167 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
3169 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r
);
3176 void r600_ih_ring_fini(struct radeon_device
*rdev
)
3179 if (rdev
->ih
.ring_obj
) {
3180 r
= radeon_bo_reserve(rdev
->ih
.ring_obj
, false);
3181 if (likely(r
== 0)) {
3182 radeon_bo_kunmap(rdev
->ih
.ring_obj
);
3183 radeon_bo_unpin(rdev
->ih
.ring_obj
);
3184 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
3186 radeon_bo_unref(&rdev
->ih
.ring_obj
);
3187 rdev
->ih
.ring
= NULL
;
3188 rdev
->ih
.ring_obj
= NULL
;
3192 void r600_rlc_stop(struct radeon_device
*rdev
)
3195 if ((rdev
->family
>= CHIP_RV770
) &&
3196 (rdev
->family
<= CHIP_RV740
)) {
3197 /* r7xx asics need to soft reset RLC before halting */
3198 WREG32(SRBM_SOFT_RESET
, SOFT_RESET_RLC
);
3199 RREG32(SRBM_SOFT_RESET
);
3201 WREG32(SRBM_SOFT_RESET
, 0);
3202 RREG32(SRBM_SOFT_RESET
);
3205 WREG32(RLC_CNTL
, 0);
3208 static void r600_rlc_start(struct radeon_device
*rdev
)
3210 WREG32(RLC_CNTL
, RLC_ENABLE
);
3213 static int r600_rlc_resume(struct radeon_device
*rdev
)
3216 const __be32
*fw_data
;
3221 r600_rlc_stop(rdev
);
3223 WREG32(RLC_HB_CNTL
, 0);
3225 WREG32(RLC_HB_BASE
, 0);
3226 WREG32(RLC_HB_RPTR
, 0);
3227 WREG32(RLC_HB_WPTR
, 0);
3228 WREG32(RLC_HB_WPTR_LSB_ADDR
, 0);
3229 WREG32(RLC_HB_WPTR_MSB_ADDR
, 0);
3230 WREG32(RLC_MC_CNTL
, 0);
3231 WREG32(RLC_UCODE_CNTL
, 0);
3233 fw_data
= (const __be32
*)rdev
->rlc_fw
->data
;
3234 if (rdev
->family
>= CHIP_RV770
) {
3235 for (i
= 0; i
< R700_RLC_UCODE_SIZE
; i
++) {
3236 WREG32(RLC_UCODE_ADDR
, i
);
3237 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
3240 for (i
= 0; i
< R600_RLC_UCODE_SIZE
; i
++) {
3241 WREG32(RLC_UCODE_ADDR
, i
);
3242 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
3245 WREG32(RLC_UCODE_ADDR
, 0);
3247 r600_rlc_start(rdev
);
3252 static void r600_enable_interrupts(struct radeon_device
*rdev
)
3254 u32 ih_cntl
= RREG32(IH_CNTL
);
3255 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
3257 ih_cntl
|= ENABLE_INTR
;
3258 ih_rb_cntl
|= IH_RB_ENABLE
;
3259 WREG32(IH_CNTL
, ih_cntl
);
3260 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
3261 rdev
->ih
.enabled
= true;
3264 void r600_disable_interrupts(struct radeon_device
*rdev
)
3266 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
3267 u32 ih_cntl
= RREG32(IH_CNTL
);
3269 ih_rb_cntl
&= ~IH_RB_ENABLE
;
3270 ih_cntl
&= ~ENABLE_INTR
;
3271 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
3272 WREG32(IH_CNTL
, ih_cntl
);
3273 /* set rptr, wptr to 0 */
3274 WREG32(IH_RB_RPTR
, 0);
3275 WREG32(IH_RB_WPTR
, 0);
3276 rdev
->ih
.enabled
= false;
3280 static void r600_disable_interrupt_state(struct radeon_device
*rdev
)
3284 WREG32(CP_INT_CNTL
, CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
);
3285 tmp
= RREG32(DMA_CNTL
) & ~TRAP_ENABLE
;
3286 WREG32(DMA_CNTL
, tmp
);
3287 WREG32(GRBM_INT_CNTL
, 0);
3288 WREG32(DxMODE_INT_MASK
, 0);
3289 WREG32(D1GRPH_INTERRUPT_CONTROL
, 0);
3290 WREG32(D2GRPH_INTERRUPT_CONTROL
, 0);
3291 if (ASIC_IS_DCE3(rdev
)) {
3292 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL
, 0);
3293 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL
, 0);
3294 tmp
= RREG32(DC_HPD1_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3295 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
3296 tmp
= RREG32(DC_HPD2_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3297 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
3298 tmp
= RREG32(DC_HPD3_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3299 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
3300 tmp
= RREG32(DC_HPD4_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3301 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
3302 if (ASIC_IS_DCE32(rdev
)) {
3303 tmp
= RREG32(DC_HPD5_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3304 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
3305 tmp
= RREG32(DC_HPD6_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3306 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
3307 tmp
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3308 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
, tmp
);
3309 tmp
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3310 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
, tmp
);
3312 tmp
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3313 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, tmp
);
3314 tmp
= RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3315 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
, tmp
);
3318 WREG32(DACA_AUTODETECT_INT_CONTROL
, 0);
3319 WREG32(DACB_AUTODETECT_INT_CONTROL
, 0);
3320 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
3321 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
3322 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
3323 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
3324 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
3325 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
3326 tmp
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3327 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, tmp
);
3328 tmp
= RREG32(HDMI1_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3329 WREG32(HDMI1_AUDIO_PACKET_CONTROL
, tmp
);
3333 int r600_irq_init(struct radeon_device
*rdev
)
3337 u32 interrupt_cntl
, ih_cntl
, ih_rb_cntl
;
3340 ret
= r600_ih_ring_alloc(rdev
);
3345 r600_disable_interrupts(rdev
);
3348 if (rdev
->family
>= CHIP_CEDAR
)
3349 ret
= evergreen_rlc_resume(rdev
);
3351 ret
= r600_rlc_resume(rdev
);
3353 r600_ih_ring_fini(rdev
);
3357 /* setup interrupt control */
3358 /* set dummy read address to ring address */
3359 WREG32(INTERRUPT_CNTL2
, rdev
->ih
.gpu_addr
>> 8);
3360 interrupt_cntl
= RREG32(INTERRUPT_CNTL
);
3361 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3362 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3364 interrupt_cntl
&= ~IH_DUMMY_RD_OVERRIDE
;
3365 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3366 interrupt_cntl
&= ~IH_REQ_NONSNOOP_EN
;
3367 WREG32(INTERRUPT_CNTL
, interrupt_cntl
);
3369 WREG32(IH_RB_BASE
, rdev
->ih
.gpu_addr
>> 8);
3370 rb_bufsz
= order_base_2(rdev
->ih
.ring_size
/ 4);
3372 ih_rb_cntl
= (IH_WPTR_OVERFLOW_ENABLE
|
3373 IH_WPTR_OVERFLOW_CLEAR
|
3376 if (rdev
->wb
.enabled
)
3377 ih_rb_cntl
|= IH_WPTR_WRITEBACK_ENABLE
;
3379 /* set the writeback address whether it's enabled or not */
3380 WREG32(IH_RB_WPTR_ADDR_LO
, (rdev
->wb
.gpu_addr
+ R600_WB_IH_WPTR_OFFSET
) & 0xFFFFFFFC);
3381 WREG32(IH_RB_WPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ R600_WB_IH_WPTR_OFFSET
) & 0xFF);
3383 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
3385 /* set rptr, wptr to 0 */
3386 WREG32(IH_RB_RPTR
, 0);
3387 WREG32(IH_RB_WPTR
, 0);
3389 /* Default settings for IH_CNTL (disabled at first) */
3390 ih_cntl
= MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3391 /* RPTR_REARM only works if msi's are enabled */
3392 if (rdev
->msi_enabled
)
3393 ih_cntl
|= RPTR_REARM
;
3394 WREG32(IH_CNTL
, ih_cntl
);
3396 /* force the active interrupt state to all disabled */
3397 if (rdev
->family
>= CHIP_CEDAR
)
3398 evergreen_disable_interrupt_state(rdev
);
3400 r600_disable_interrupt_state(rdev
);
3402 /* at this point everything should be setup correctly to enable master */
3403 pci_set_master(rdev
->pdev
);
3406 r600_enable_interrupts(rdev
);
3411 void r600_irq_suspend(struct radeon_device
*rdev
)
3413 r600_irq_disable(rdev
);
3414 r600_rlc_stop(rdev
);
3417 void r600_irq_fini(struct radeon_device
*rdev
)
3419 r600_irq_suspend(rdev
);
3420 r600_ih_ring_fini(rdev
);
3423 int r600_irq_set(struct radeon_device
*rdev
)
3425 u32 cp_int_cntl
= CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
;
3427 u32 hpd1
, hpd2
, hpd3
, hpd4
= 0, hpd5
= 0, hpd6
= 0;
3428 u32 grbm_int_cntl
= 0;
3430 u32 d1grph
= 0, d2grph
= 0;
3432 u32 thermal_int
= 0;
3434 if (!rdev
->irq
.installed
) {
3435 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3438 /* don't enable anything if the ih is disabled */
3439 if (!rdev
->ih
.enabled
) {
3440 r600_disable_interrupts(rdev
);
3441 /* force the active interrupt state to all disabled */
3442 r600_disable_interrupt_state(rdev
);
3446 if (ASIC_IS_DCE3(rdev
)) {
3447 hpd1
= RREG32(DC_HPD1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3448 hpd2
= RREG32(DC_HPD2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3449 hpd3
= RREG32(DC_HPD3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3450 hpd4
= RREG32(DC_HPD4_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3451 if (ASIC_IS_DCE32(rdev
)) {
3452 hpd5
= RREG32(DC_HPD5_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3453 hpd6
= RREG32(DC_HPD6_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3454 hdmi0
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
) & ~AFMT_AZ_FORMAT_WTRIG_MASK
;
3455 hdmi1
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
) & ~AFMT_AZ_FORMAT_WTRIG_MASK
;
3457 hdmi0
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3458 hdmi1
= RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3461 hpd1
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3462 hpd2
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3463 hpd3
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3464 hdmi0
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3465 hdmi1
= RREG32(HDMI1_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3468 dma_cntl
= RREG32(DMA_CNTL
) & ~TRAP_ENABLE
;
3470 if ((rdev
->family
> CHIP_R600
) && (rdev
->family
< CHIP_RV770
)) {
3471 thermal_int
= RREG32(CG_THERMAL_INT
) &
3472 ~(THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
);
3473 } else if (rdev
->family
>= CHIP_RV770
) {
3474 thermal_int
= RREG32(RV770_CG_THERMAL_INT
) &
3475 ~(THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
);
3477 if (rdev
->irq
.dpm_thermal
) {
3478 DRM_DEBUG("dpm thermal\n");
3479 thermal_int
|= THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
;
3482 if (atomic_read(&rdev
->irq
.ring_int
[RADEON_RING_TYPE_GFX_INDEX
])) {
3483 DRM_DEBUG("r600_irq_set: sw int\n");
3484 cp_int_cntl
|= RB_INT_ENABLE
;
3485 cp_int_cntl
|= TIME_STAMP_INT_ENABLE
;
3488 if (atomic_read(&rdev
->irq
.ring_int
[R600_RING_TYPE_DMA_INDEX
])) {
3489 DRM_DEBUG("r600_irq_set: sw int dma\n");
3490 dma_cntl
|= TRAP_ENABLE
;
3493 if (rdev
->irq
.crtc_vblank_int
[0] ||
3494 atomic_read(&rdev
->irq
.pflip
[0])) {
3495 DRM_DEBUG("r600_irq_set: vblank 0\n");
3496 mode_int
|= D1MODE_VBLANK_INT_MASK
;
3498 if (rdev
->irq
.crtc_vblank_int
[1] ||
3499 atomic_read(&rdev
->irq
.pflip
[1])) {
3500 DRM_DEBUG("r600_irq_set: vblank 1\n");
3501 mode_int
|= D2MODE_VBLANK_INT_MASK
;
3503 if (rdev
->irq
.hpd
[0]) {
3504 DRM_DEBUG("r600_irq_set: hpd 1\n");
3505 hpd1
|= DC_HPDx_INT_EN
;
3507 if (rdev
->irq
.hpd
[1]) {
3508 DRM_DEBUG("r600_irq_set: hpd 2\n");
3509 hpd2
|= DC_HPDx_INT_EN
;
3511 if (rdev
->irq
.hpd
[2]) {
3512 DRM_DEBUG("r600_irq_set: hpd 3\n");
3513 hpd3
|= DC_HPDx_INT_EN
;
3515 if (rdev
->irq
.hpd
[3]) {
3516 DRM_DEBUG("r600_irq_set: hpd 4\n");
3517 hpd4
|= DC_HPDx_INT_EN
;
3519 if (rdev
->irq
.hpd
[4]) {
3520 DRM_DEBUG("r600_irq_set: hpd 5\n");
3521 hpd5
|= DC_HPDx_INT_EN
;
3523 if (rdev
->irq
.hpd
[5]) {
3524 DRM_DEBUG("r600_irq_set: hpd 6\n");
3525 hpd6
|= DC_HPDx_INT_EN
;
3527 if (rdev
->irq
.afmt
[0]) {
3528 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3529 hdmi0
|= HDMI0_AZ_FORMAT_WTRIG_MASK
;
3531 if (rdev
->irq
.afmt
[1]) {
3532 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3533 hdmi1
|= HDMI0_AZ_FORMAT_WTRIG_MASK
;
3536 WREG32(CP_INT_CNTL
, cp_int_cntl
);
3537 WREG32(DMA_CNTL
, dma_cntl
);
3538 WREG32(DxMODE_INT_MASK
, mode_int
);
3539 WREG32(D1GRPH_INTERRUPT_CONTROL
, d1grph
);
3540 WREG32(D2GRPH_INTERRUPT_CONTROL
, d2grph
);
3541 WREG32(GRBM_INT_CNTL
, grbm_int_cntl
);
3542 if (ASIC_IS_DCE3(rdev
)) {
3543 WREG32(DC_HPD1_INT_CONTROL
, hpd1
);
3544 WREG32(DC_HPD2_INT_CONTROL
, hpd2
);
3545 WREG32(DC_HPD3_INT_CONTROL
, hpd3
);
3546 WREG32(DC_HPD4_INT_CONTROL
, hpd4
);
3547 if (ASIC_IS_DCE32(rdev
)) {
3548 WREG32(DC_HPD5_INT_CONTROL
, hpd5
);
3549 WREG32(DC_HPD6_INT_CONTROL
, hpd6
);
3550 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
, hdmi0
);
3551 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
, hdmi1
);
3553 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, hdmi0
);
3554 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
, hdmi1
);
3557 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, hpd1
);
3558 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, hpd2
);
3559 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, hpd3
);
3560 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, hdmi0
);
3561 WREG32(HDMI1_AUDIO_PACKET_CONTROL
, hdmi1
);
3563 if ((rdev
->family
> CHIP_R600
) && (rdev
->family
< CHIP_RV770
)) {
3564 WREG32(CG_THERMAL_INT
, thermal_int
);
3565 } else if (rdev
->family
>= CHIP_RV770
) {
3566 WREG32(RV770_CG_THERMAL_INT
, thermal_int
);
3572 static void r600_irq_ack(struct radeon_device
*rdev
)
3576 if (ASIC_IS_DCE3(rdev
)) {
3577 rdev
->irq
.stat_regs
.r600
.disp_int
= RREG32(DCE3_DISP_INTERRUPT_STATUS
);
3578 rdev
->irq
.stat_regs
.r600
.disp_int_cont
= RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE
);
3579 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
= RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2
);
3580 if (ASIC_IS_DCE32(rdev
)) {
3581 rdev
->irq
.stat_regs
.r600
.hdmi0_status
= RREG32(AFMT_STATUS
+ DCE3_HDMI_OFFSET0
);
3582 rdev
->irq
.stat_regs
.r600
.hdmi1_status
= RREG32(AFMT_STATUS
+ DCE3_HDMI_OFFSET1
);
3584 rdev
->irq
.stat_regs
.r600
.hdmi0_status
= RREG32(HDMI0_STATUS
);
3585 rdev
->irq
.stat_regs
.r600
.hdmi1_status
= RREG32(DCE3_HDMI1_STATUS
);
3588 rdev
->irq
.stat_regs
.r600
.disp_int
= RREG32(DISP_INTERRUPT_STATUS
);
3589 rdev
->irq
.stat_regs
.r600
.disp_int_cont
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE
);
3590 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
= 0;
3591 rdev
->irq
.stat_regs
.r600
.hdmi0_status
= RREG32(HDMI0_STATUS
);
3592 rdev
->irq
.stat_regs
.r600
.hdmi1_status
= RREG32(HDMI1_STATUS
);
3594 rdev
->irq
.stat_regs
.r600
.d1grph_int
= RREG32(D1GRPH_INTERRUPT_STATUS
);
3595 rdev
->irq
.stat_regs
.r600
.d2grph_int
= RREG32(D2GRPH_INTERRUPT_STATUS
);
3597 if (rdev
->irq
.stat_regs
.r600
.d1grph_int
& DxGRPH_PFLIP_INT_OCCURRED
)
3598 WREG32(D1GRPH_INTERRUPT_STATUS
, DxGRPH_PFLIP_INT_CLEAR
);
3599 if (rdev
->irq
.stat_regs
.r600
.d2grph_int
& DxGRPH_PFLIP_INT_OCCURRED
)
3600 WREG32(D2GRPH_INTERRUPT_STATUS
, DxGRPH_PFLIP_INT_CLEAR
);
3601 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VBLANK_INTERRUPT
)
3602 WREG32(D1MODE_VBLANK_STATUS
, DxMODE_VBLANK_ACK
);
3603 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VLINE_INTERRUPT
)
3604 WREG32(D1MODE_VLINE_STATUS
, DxMODE_VLINE_ACK
);
3605 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VBLANK_INTERRUPT
)
3606 WREG32(D2MODE_VBLANK_STATUS
, DxMODE_VBLANK_ACK
);
3607 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VLINE_INTERRUPT
)
3608 WREG32(D2MODE_VLINE_STATUS
, DxMODE_VLINE_ACK
);
3609 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD1_INTERRUPT
) {
3610 if (ASIC_IS_DCE3(rdev
)) {
3611 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
3612 tmp
|= DC_HPDx_INT_ACK
;
3613 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
3615 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
);
3616 tmp
|= DC_HPDx_INT_ACK
;
3617 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
3620 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD2_INTERRUPT
) {
3621 if (ASIC_IS_DCE3(rdev
)) {
3622 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
3623 tmp
|= DC_HPDx_INT_ACK
;
3624 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
3626 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
);
3627 tmp
|= DC_HPDx_INT_ACK
;
3628 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
3631 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD3_INTERRUPT
) {
3632 if (ASIC_IS_DCE3(rdev
)) {
3633 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
3634 tmp
|= DC_HPDx_INT_ACK
;
3635 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
3637 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
);
3638 tmp
|= DC_HPDx_INT_ACK
;
3639 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
3642 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD4_INTERRUPT
) {
3643 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
3644 tmp
|= DC_HPDx_INT_ACK
;
3645 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
3647 if (ASIC_IS_DCE32(rdev
)) {
3648 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD5_INTERRUPT
) {
3649 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
3650 tmp
|= DC_HPDx_INT_ACK
;
3651 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
3653 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD6_INTERRUPT
) {
3654 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
3655 tmp
|= DC_HPDx_INT_ACK
;
3656 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
3658 if (rdev
->irq
.stat_regs
.r600
.hdmi0_status
& AFMT_AZ_FORMAT_WTRIG
) {
3659 tmp
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
);
3660 tmp
|= AFMT_AZ_FORMAT_WTRIG_ACK
;
3661 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
, tmp
);
3663 if (rdev
->irq
.stat_regs
.r600
.hdmi1_status
& AFMT_AZ_FORMAT_WTRIG
) {
3664 tmp
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
);
3665 tmp
|= AFMT_AZ_FORMAT_WTRIG_ACK
;
3666 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
, tmp
);
3669 if (rdev
->irq
.stat_regs
.r600
.hdmi0_status
& HDMI0_AZ_FORMAT_WTRIG
) {
3670 tmp
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
);
3671 tmp
|= HDMI0_AZ_FORMAT_WTRIG_ACK
;
3672 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, tmp
);
3674 if (rdev
->irq
.stat_regs
.r600
.hdmi1_status
& HDMI0_AZ_FORMAT_WTRIG
) {
3675 if (ASIC_IS_DCE3(rdev
)) {
3676 tmp
= RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
);
3677 tmp
|= HDMI0_AZ_FORMAT_WTRIG_ACK
;
3678 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
, tmp
);
3680 tmp
= RREG32(HDMI1_AUDIO_PACKET_CONTROL
);
3681 tmp
|= HDMI0_AZ_FORMAT_WTRIG_ACK
;
3682 WREG32(HDMI1_AUDIO_PACKET_CONTROL
, tmp
);
3688 void r600_irq_disable(struct radeon_device
*rdev
)
3690 r600_disable_interrupts(rdev
);
3691 /* Wait and acknowledge irq */
3694 r600_disable_interrupt_state(rdev
);
3697 static u32
r600_get_ih_wptr(struct radeon_device
*rdev
)
3701 if (rdev
->wb
.enabled
)
3702 wptr
= le32_to_cpu(rdev
->wb
.wb
[R600_WB_IH_WPTR_OFFSET
/4]);
3704 wptr
= RREG32(IH_RB_WPTR
);
3706 if (wptr
& RB_OVERFLOW
) {
3707 /* When a ring buffer overflow happen start parsing interrupt
3708 * from the last not overwritten vector (wptr + 16). Hopefully
3709 * this should allow us to catchup.
3711 dev_warn(rdev
->dev
, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3712 wptr
, rdev
->ih
.rptr
, (wptr
+ 16) + rdev
->ih
.ptr_mask
);
3713 rdev
->ih
.rptr
= (wptr
+ 16) & rdev
->ih
.ptr_mask
;
3714 tmp
= RREG32(IH_RB_CNTL
);
3715 tmp
|= IH_WPTR_OVERFLOW_CLEAR
;
3716 WREG32(IH_RB_CNTL
, tmp
);
3718 return (wptr
& rdev
->ih
.ptr_mask
);
3722 * Each IV ring entry is 128 bits:
3723 * [7:0] - interrupt source id
3725 * [59:32] - interrupt source data
3726 * [127:60] - reserved
3728 * The basic interrupt vector entries
3729 * are decoded as follows:
3730 * src_id src_data description
3735 * 19 0 FP Hot plug detection A
3736 * 19 1 FP Hot plug detection B
3737 * 19 2 DAC A auto-detection
3738 * 19 3 DAC B auto-detection
3744 * 181 - EOP Interrupt
3747 * Note, these are based on r600 and may need to be
3748 * adjusted or added to on newer asics
3751 int r600_irq_process(struct radeon_device
*rdev
)
3755 u32 src_id
, src_data
;
3757 bool queue_hotplug
= false;
3758 bool queue_hdmi
= false;
3759 bool queue_thermal
= false;
3761 if (!rdev
->ih
.enabled
|| rdev
->shutdown
)
3764 /* No MSIs, need a dummy read to flush PCI DMAs */
3765 if (!rdev
->msi_enabled
)
3768 wptr
= r600_get_ih_wptr(rdev
);
3771 /* is somebody else already processing irqs? */
3772 if (atomic_xchg(&rdev
->ih
.lock
, 1))
3775 rptr
= rdev
->ih
.rptr
;
3776 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr
, wptr
);
3778 /* Order reading of wptr vs. reading of IH ring data */
3781 /* display interrupts */
3784 while (rptr
!= wptr
) {
3785 /* wptr/rptr are in bytes! */
3786 ring_index
= rptr
/ 4;
3787 src_id
= le32_to_cpu(rdev
->ih
.ring
[ring_index
]) & 0xff;
3788 src_data
= le32_to_cpu(rdev
->ih
.ring
[ring_index
+ 1]) & 0xfffffff;
3791 case 1: /* D1 vblank/vline */
3793 case 0: /* D1 vblank */
3794 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VBLANK_INTERRUPT
) {
3795 if (rdev
->irq
.crtc_vblank_int
[0]) {
3796 drm_handle_vblank(rdev
->ddev
, 0);
3797 rdev
->pm
.vblank_sync
= true;
3798 wake_up(&rdev
->irq
.vblank_queue
);
3800 if (atomic_read(&rdev
->irq
.pflip
[0]))
3801 radeon_crtc_handle_flip(rdev
, 0);
3802 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D1_VBLANK_INTERRUPT
;
3803 DRM_DEBUG("IH: D1 vblank\n");
3806 case 1: /* D1 vline */
3807 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VLINE_INTERRUPT
) {
3808 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D1_VLINE_INTERRUPT
;
3809 DRM_DEBUG("IH: D1 vline\n");
3813 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3817 case 5: /* D2 vblank/vline */
3819 case 0: /* D2 vblank */
3820 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VBLANK_INTERRUPT
) {
3821 if (rdev
->irq
.crtc_vblank_int
[1]) {
3822 drm_handle_vblank(rdev
->ddev
, 1);
3823 rdev
->pm
.vblank_sync
= true;
3824 wake_up(&rdev
->irq
.vblank_queue
);
3826 if (atomic_read(&rdev
->irq
.pflip
[1]))
3827 radeon_crtc_handle_flip(rdev
, 1);
3828 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D2_VBLANK_INTERRUPT
;
3829 DRM_DEBUG("IH: D2 vblank\n");
3832 case 1: /* D1 vline */
3833 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VLINE_INTERRUPT
) {
3834 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D2_VLINE_INTERRUPT
;
3835 DRM_DEBUG("IH: D2 vline\n");
3839 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3843 case 19: /* HPD/DAC hotplug */
3846 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD1_INTERRUPT
) {
3847 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~DC_HPD1_INTERRUPT
;
3848 queue_hotplug
= true;
3849 DRM_DEBUG("IH: HPD1\n");
3853 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD2_INTERRUPT
) {
3854 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~DC_HPD2_INTERRUPT
;
3855 queue_hotplug
= true;
3856 DRM_DEBUG("IH: HPD2\n");
3860 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD3_INTERRUPT
) {
3861 rdev
->irq
.stat_regs
.r600
.disp_int_cont
&= ~DC_HPD3_INTERRUPT
;
3862 queue_hotplug
= true;
3863 DRM_DEBUG("IH: HPD3\n");
3867 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD4_INTERRUPT
) {
3868 rdev
->irq
.stat_regs
.r600
.disp_int_cont
&= ~DC_HPD4_INTERRUPT
;
3869 queue_hotplug
= true;
3870 DRM_DEBUG("IH: HPD4\n");
3874 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD5_INTERRUPT
) {
3875 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
&= ~DC_HPD5_INTERRUPT
;
3876 queue_hotplug
= true;
3877 DRM_DEBUG("IH: HPD5\n");
3881 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD6_INTERRUPT
) {
3882 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
&= ~DC_HPD6_INTERRUPT
;
3883 queue_hotplug
= true;
3884 DRM_DEBUG("IH: HPD6\n");
3888 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3895 if (rdev
->irq
.stat_regs
.r600
.hdmi0_status
& HDMI0_AZ_FORMAT_WTRIG
) {
3896 rdev
->irq
.stat_regs
.r600
.hdmi0_status
&= ~HDMI0_AZ_FORMAT_WTRIG
;
3898 DRM_DEBUG("IH: HDMI0\n");
3902 if (rdev
->irq
.stat_regs
.r600
.hdmi1_status
& HDMI0_AZ_FORMAT_WTRIG
) {
3903 rdev
->irq
.stat_regs
.r600
.hdmi1_status
&= ~HDMI0_AZ_FORMAT_WTRIG
;
3905 DRM_DEBUG("IH: HDMI1\n");
3909 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3913 case 176: /* CP_INT in ring buffer */
3914 case 177: /* CP_INT in IB1 */
3915 case 178: /* CP_INT in IB2 */
3916 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data
);
3917 radeon_fence_process(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
3919 case 181: /* CP EOP event */
3920 DRM_DEBUG("IH: CP EOP\n");
3921 radeon_fence_process(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
3923 case 224: /* DMA trap event */
3924 DRM_DEBUG("IH: DMA trap\n");
3925 radeon_fence_process(rdev
, R600_RING_TYPE_DMA_INDEX
);
3927 case 230: /* thermal low to high */
3928 DRM_DEBUG("IH: thermal low to high\n");
3929 rdev
->pm
.dpm
.thermal
.high_to_low
= false;
3930 queue_thermal
= true;
3932 case 231: /* thermal high to low */
3933 DRM_DEBUG("IH: thermal high to low\n");
3934 rdev
->pm
.dpm
.thermal
.high_to_low
= true;
3935 queue_thermal
= true;
3937 case 233: /* GUI IDLE */
3938 DRM_DEBUG("IH: GUI idle\n");
3941 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3945 /* wptr/rptr are in bytes! */
3947 rptr
&= rdev
->ih
.ptr_mask
;
3950 schedule_work(&rdev
->hotplug_work
);
3952 schedule_work(&rdev
->audio_work
);
3953 if (queue_thermal
&& rdev
->pm
.dpm_enabled
)
3954 schedule_work(&rdev
->pm
.dpm
.thermal
.work
);
3955 rdev
->ih
.rptr
= rptr
;
3956 WREG32(IH_RB_RPTR
, rdev
->ih
.rptr
);
3957 atomic_set(&rdev
->ih
.lock
, 0);
3959 /* make sure wptr hasn't changed while processing */
3960 wptr
= r600_get_ih_wptr(rdev
);
3970 #if defined(CONFIG_DEBUG_FS)
3972 static int r600_debugfs_mc_info(struct seq_file
*m
, void *data
)
3974 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
3975 struct drm_device
*dev
= node
->minor
->dev
;
3976 struct radeon_device
*rdev
= dev
->dev_private
;
3978 DREG32_SYS(m
, rdev
, R_000E50_SRBM_STATUS
);
3979 DREG32_SYS(m
, rdev
, VM_L2_STATUS
);
3983 static struct drm_info_list r600_mc_info_list
[] = {
3984 {"r600_mc_info", r600_debugfs_mc_info
, 0, NULL
},
3988 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
)
3990 #if defined(CONFIG_DEBUG_FS)
3991 return radeon_debugfs_add_files(rdev
, r600_mc_info_list
, ARRAY_SIZE(r600_mc_info_list
));
3998 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3999 * rdev: radeon device structure
4000 * bo: buffer object struct which userspace is waiting for idle
4002 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
4003 * through ring buffer, this leads to corruption in rendering, see
4004 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
4005 * directly perform HDP flush by writing register through MMIO.
4007 void r600_ioctl_wait_idle(struct radeon_device
*rdev
, struct radeon_bo
*bo
)
4009 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
4010 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
4011 * This seems to cause problems on some AGP cards. Just use the old
4014 if ((rdev
->family
>= CHIP_RV770
) && (rdev
->family
<= CHIP_RV740
) &&
4015 rdev
->vram_scratch
.ptr
&& !(rdev
->flags
& RADEON_IS_AGP
)) {
4016 void __iomem
*ptr
= (void *)rdev
->vram_scratch
.ptr
;
4019 WREG32(HDP_DEBUG1
, 0);
4020 tmp
= readl((void __iomem
*)ptr
);
4022 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
4025 void r600_set_pcie_lanes(struct radeon_device
*rdev
, int lanes
)
4027 u32 link_width_cntl
, mask
;
4029 if (rdev
->flags
& RADEON_IS_IGP
)
4032 if (!(rdev
->flags
& RADEON_IS_PCIE
))
4035 /* x2 cards have a special sequence */
4036 if (ASIC_IS_X2(rdev
))
4039 radeon_gui_idle(rdev
);
4043 mask
= RADEON_PCIE_LC_LINK_WIDTH_X0
;
4046 mask
= RADEON_PCIE_LC_LINK_WIDTH_X1
;
4049 mask
= RADEON_PCIE_LC_LINK_WIDTH_X2
;
4052 mask
= RADEON_PCIE_LC_LINK_WIDTH_X4
;
4055 mask
= RADEON_PCIE_LC_LINK_WIDTH_X8
;
4058 /* not actually supported */
4059 mask
= RADEON_PCIE_LC_LINK_WIDTH_X12
;
4062 mask
= RADEON_PCIE_LC_LINK_WIDTH_X16
;
4065 DRM_ERROR("invalid pcie lane request: %d\n", lanes
);
4069 link_width_cntl
= RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
4070 link_width_cntl
&= ~RADEON_PCIE_LC_LINK_WIDTH_MASK
;
4071 link_width_cntl
|= mask
<< RADEON_PCIE_LC_LINK_WIDTH_SHIFT
;
4072 link_width_cntl
|= (RADEON_PCIE_LC_RECONFIG_NOW
|
4073 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE
);
4075 WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
4078 int r600_get_pcie_lanes(struct radeon_device
*rdev
)
4080 u32 link_width_cntl
;
4082 if (rdev
->flags
& RADEON_IS_IGP
)
4085 if (!(rdev
->flags
& RADEON_IS_PCIE
))
4088 /* x2 cards have a special sequence */
4089 if (ASIC_IS_X2(rdev
))
4092 radeon_gui_idle(rdev
);
4094 link_width_cntl
= RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
4096 switch ((link_width_cntl
& RADEON_PCIE_LC_LINK_WIDTH_RD_MASK
) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT
) {
4097 case RADEON_PCIE_LC_LINK_WIDTH_X1
:
4099 case RADEON_PCIE_LC_LINK_WIDTH_X2
:
4101 case RADEON_PCIE_LC_LINK_WIDTH_X4
:
4103 case RADEON_PCIE_LC_LINK_WIDTH_X8
:
4105 case RADEON_PCIE_LC_LINK_WIDTH_X12
:
4106 /* not actually supported */
4108 case RADEON_PCIE_LC_LINK_WIDTH_X0
:
4109 case RADEON_PCIE_LC_LINK_WIDTH_X16
:
4115 static void r600_pcie_gen2_enable(struct radeon_device
*rdev
)
4117 u32 link_width_cntl
, lanes
, speed_cntl
, training_cntl
, tmp
;
4120 if (radeon_pcie_gen2
== 0)
4123 if (rdev
->flags
& RADEON_IS_IGP
)
4126 if (!(rdev
->flags
& RADEON_IS_PCIE
))
4129 /* x2 cards have a special sequence */
4130 if (ASIC_IS_X2(rdev
))
4133 /* only RV6xx+ chips are supported */
4134 if (rdev
->family
<= CHIP_R600
)
4137 if ((rdev
->pdev
->bus
->max_bus_speed
!= PCIE_SPEED_5_0GT
) &&
4138 (rdev
->pdev
->bus
->max_bus_speed
!= PCIE_SPEED_8_0GT
))
4141 speed_cntl
= RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
);
4142 if (speed_cntl
& LC_CURRENT_DATA_RATE
) {
4143 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4147 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4149 /* 55 nm r6xx asics */
4150 if ((rdev
->family
== CHIP_RV670
) ||
4151 (rdev
->family
== CHIP_RV620
) ||
4152 (rdev
->family
== CHIP_RV635
)) {
4153 /* advertise upconfig capability */
4154 link_width_cntl
= RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
);
4155 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
4156 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
4157 link_width_cntl
= RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
);
4158 if (link_width_cntl
& LC_RENEGOTIATION_SUPPORT
) {
4159 lanes
= (link_width_cntl
& LC_LINK_WIDTH_RD_MASK
) >> LC_LINK_WIDTH_RD_SHIFT
;
4160 link_width_cntl
&= ~(LC_LINK_WIDTH_MASK
|
4161 LC_RECONFIG_ARC_MISSING_ESCAPE
);
4162 link_width_cntl
|= lanes
| LC_RECONFIG_NOW
| LC_RENEGOTIATE_EN
;
4163 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
4165 link_width_cntl
|= LC_UPCONFIGURE_DIS
;
4166 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
4170 speed_cntl
= RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
);
4171 if ((speed_cntl
& LC_OTHER_SIDE_EVER_SENT_GEN2
) &&
4172 (speed_cntl
& LC_OTHER_SIDE_SUPPORTS_GEN2
)) {
4174 /* 55 nm r6xx asics */
4175 if ((rdev
->family
== CHIP_RV670
) ||
4176 (rdev
->family
== CHIP_RV620
) ||
4177 (rdev
->family
== CHIP_RV635
)) {
4178 WREG32(MM_CFGREGS_CNTL
, 0x8);
4179 link_cntl2
= RREG32(0x4088);
4180 WREG32(MM_CFGREGS_CNTL
, 0);
4181 /* not supported yet */
4182 if (link_cntl2
& SELECTABLE_DEEMPHASIS
)
4186 speed_cntl
&= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK
;
4187 speed_cntl
|= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT
);
4188 speed_cntl
&= ~LC_VOLTAGE_TIMER_SEL_MASK
;
4189 speed_cntl
&= ~LC_FORCE_DIS_HW_SPEED_CHANGE
;
4190 speed_cntl
|= LC_FORCE_EN_HW_SPEED_CHANGE
;
4191 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
, speed_cntl
);
4193 tmp
= RREG32(0x541c);
4194 WREG32(0x541c, tmp
| 0x8);
4195 WREG32(MM_CFGREGS_CNTL
, MM_WR_TO_CFG_EN
);
4196 link_cntl2
= RREG16(0x4088);
4197 link_cntl2
&= ~TARGET_LINK_SPEED_MASK
;
4199 WREG16(0x4088, link_cntl2
);
4200 WREG32(MM_CFGREGS_CNTL
, 0);
4202 if ((rdev
->family
== CHIP_RV670
) ||
4203 (rdev
->family
== CHIP_RV620
) ||
4204 (rdev
->family
== CHIP_RV635
)) {
4205 training_cntl
= RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL
);
4206 training_cntl
&= ~LC_POINT_7_PLUS_EN
;
4207 WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL
, training_cntl
);
4209 speed_cntl
= RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
);
4210 speed_cntl
&= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN
;
4211 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
, speed_cntl
);
4214 speed_cntl
= RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
);
4215 speed_cntl
|= LC_GEN2_EN_STRAP
;
4216 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
, speed_cntl
);
4219 link_width_cntl
= RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
);
4220 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4222 link_width_cntl
|= LC_UPCONFIGURE_DIS
;
4224 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
4225 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
4230 * r600_get_gpu_clock_counter - return GPU clock counter snapshot
4232 * @rdev: radeon_device pointer
4234 * Fetches a GPU clock counter snapshot (R6xx-cayman).
4235 * Returns the 64 bit clock counter snapshot.
4237 uint64_t r600_get_gpu_clock_counter(struct radeon_device
*rdev
)
4241 mutex_lock(&rdev
->gpu_clock_mutex
);
4242 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT
, 1);
4243 clock
= (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB
) |
4244 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB
) << 32ULL);
4245 mutex_unlock(&rdev
->gpu_clock_mutex
);