2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
33 #include "radeon_drm.h"
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
41 #define PFP_UCODE_SIZE 576
42 #define PM4_UCODE_SIZE 1792
43 #define RLC_UCODE_SIZE 768
44 #define R700_PFP_UCODE_SIZE 848
45 #define R700_PM4_UCODE_SIZE 1360
46 #define R700_RLC_UCODE_SIZE 1024
47 #define EVERGREEN_PFP_UCODE_SIZE 1120
48 #define EVERGREEN_PM4_UCODE_SIZE 1376
49 #define EVERGREEN_RLC_UCODE_SIZE 768
50 #define CAYMAN_RLC_UCODE_SIZE 1024
53 MODULE_FIRMWARE("radeon/R600_pfp.bin");
54 MODULE_FIRMWARE("radeon/R600_me.bin");
55 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
56 MODULE_FIRMWARE("radeon/RV610_me.bin");
57 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
58 MODULE_FIRMWARE("radeon/RV630_me.bin");
59 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
60 MODULE_FIRMWARE("radeon/RV620_me.bin");
61 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
62 MODULE_FIRMWARE("radeon/RV635_me.bin");
63 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
64 MODULE_FIRMWARE("radeon/RV670_me.bin");
65 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
66 MODULE_FIRMWARE("radeon/RS780_me.bin");
67 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
68 MODULE_FIRMWARE("radeon/RV770_me.bin");
69 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
70 MODULE_FIRMWARE("radeon/RV730_me.bin");
71 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
72 MODULE_FIRMWARE("radeon/RV710_me.bin");
73 MODULE_FIRMWARE("radeon/R600_rlc.bin");
74 MODULE_FIRMWARE("radeon/R700_rlc.bin");
75 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
76 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
77 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
78 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
79 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
80 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
81 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
82 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
83 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
85 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
86 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
87 MODULE_FIRMWARE("radeon/PALM_pfp.bin");
88 MODULE_FIRMWARE("radeon/PALM_me.bin");
89 MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
90 MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
91 MODULE_FIRMWARE("radeon/SUMO_me.bin");
92 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
93 MODULE_FIRMWARE("radeon/SUMO2_me.bin");
95 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
);
97 /* r600,rv610,rv630,rv620,rv635,rv670 */
98 int r600_mc_wait_for_idle(struct radeon_device
*rdev
);
99 void r600_gpu_init(struct radeon_device
*rdev
);
100 void r600_fini(struct radeon_device
*rdev
);
101 void r600_irq_disable(struct radeon_device
*rdev
);
102 static void r600_pcie_gen2_enable(struct radeon_device
*rdev
);
104 /* get temperature in millidegrees */
105 int rv6xx_get_temp(struct radeon_device
*rdev
)
107 u32 temp
= (RREG32(CG_THERMAL_STATUS
) & ASIC_T_MASK
) >>
109 int actual_temp
= temp
& 0xff;
114 return actual_temp
* 1000;
117 void r600_pm_get_dynpm_state(struct radeon_device
*rdev
)
121 rdev
->pm
.dynpm_can_upclock
= true;
122 rdev
->pm
.dynpm_can_downclock
= true;
124 /* power state array is low to high, default is first */
125 if ((rdev
->flags
& RADEON_IS_IGP
) || (rdev
->family
== CHIP_R600
)) {
126 int min_power_state_index
= 0;
128 if (rdev
->pm
.num_power_states
> 2)
129 min_power_state_index
= 1;
131 switch (rdev
->pm
.dynpm_planned_action
) {
132 case DYNPM_ACTION_MINIMUM
:
133 rdev
->pm
.requested_power_state_index
= min_power_state_index
;
134 rdev
->pm
.requested_clock_mode_index
= 0;
135 rdev
->pm
.dynpm_can_downclock
= false;
137 case DYNPM_ACTION_DOWNCLOCK
:
138 if (rdev
->pm
.current_power_state_index
== min_power_state_index
) {
139 rdev
->pm
.requested_power_state_index
= rdev
->pm
.current_power_state_index
;
140 rdev
->pm
.dynpm_can_downclock
= false;
142 if (rdev
->pm
.active_crtc_count
> 1) {
143 for (i
= 0; i
< rdev
->pm
.num_power_states
; i
++) {
144 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
146 else if (i
>= rdev
->pm
.current_power_state_index
) {
147 rdev
->pm
.requested_power_state_index
=
148 rdev
->pm
.current_power_state_index
;
151 rdev
->pm
.requested_power_state_index
= i
;
156 if (rdev
->pm
.current_power_state_index
== 0)
157 rdev
->pm
.requested_power_state_index
=
158 rdev
->pm
.num_power_states
- 1;
160 rdev
->pm
.requested_power_state_index
=
161 rdev
->pm
.current_power_state_index
- 1;
164 rdev
->pm
.requested_clock_mode_index
= 0;
165 /* don't use the power state if crtcs are active and no display flag is set */
166 if ((rdev
->pm
.active_crtc_count
> 0) &&
167 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
168 clock_info
[rdev
->pm
.requested_clock_mode_index
].flags
&
169 RADEON_PM_MODE_NO_DISPLAY
)) {
170 rdev
->pm
.requested_power_state_index
++;
173 case DYNPM_ACTION_UPCLOCK
:
174 if (rdev
->pm
.current_power_state_index
== (rdev
->pm
.num_power_states
- 1)) {
175 rdev
->pm
.requested_power_state_index
= rdev
->pm
.current_power_state_index
;
176 rdev
->pm
.dynpm_can_upclock
= false;
178 if (rdev
->pm
.active_crtc_count
> 1) {
179 for (i
= (rdev
->pm
.num_power_states
- 1); i
>= 0; i
--) {
180 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
182 else if (i
<= rdev
->pm
.current_power_state_index
) {
183 rdev
->pm
.requested_power_state_index
=
184 rdev
->pm
.current_power_state_index
;
187 rdev
->pm
.requested_power_state_index
= i
;
192 rdev
->pm
.requested_power_state_index
=
193 rdev
->pm
.current_power_state_index
+ 1;
195 rdev
->pm
.requested_clock_mode_index
= 0;
197 case DYNPM_ACTION_DEFAULT
:
198 rdev
->pm
.requested_power_state_index
= rdev
->pm
.default_power_state_index
;
199 rdev
->pm
.requested_clock_mode_index
= 0;
200 rdev
->pm
.dynpm_can_upclock
= false;
202 case DYNPM_ACTION_NONE
:
204 DRM_ERROR("Requested mode for not defined action\n");
208 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
209 /* for now just select the first power state and switch between clock modes */
210 /* power state array is low to high, default is first (0) */
211 if (rdev
->pm
.active_crtc_count
> 1) {
212 rdev
->pm
.requested_power_state_index
= -1;
213 /* start at 1 as we don't want the default mode */
214 for (i
= 1; i
< rdev
->pm
.num_power_states
; i
++) {
215 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
217 else if ((rdev
->pm
.power_state
[i
].type
== POWER_STATE_TYPE_PERFORMANCE
) ||
218 (rdev
->pm
.power_state
[i
].type
== POWER_STATE_TYPE_BATTERY
)) {
219 rdev
->pm
.requested_power_state_index
= i
;
223 /* if nothing selected, grab the default state. */
224 if (rdev
->pm
.requested_power_state_index
== -1)
225 rdev
->pm
.requested_power_state_index
= 0;
227 rdev
->pm
.requested_power_state_index
= 1;
229 switch (rdev
->pm
.dynpm_planned_action
) {
230 case DYNPM_ACTION_MINIMUM
:
231 rdev
->pm
.requested_clock_mode_index
= 0;
232 rdev
->pm
.dynpm_can_downclock
= false;
234 case DYNPM_ACTION_DOWNCLOCK
:
235 if (rdev
->pm
.requested_power_state_index
== rdev
->pm
.current_power_state_index
) {
236 if (rdev
->pm
.current_clock_mode_index
== 0) {
237 rdev
->pm
.requested_clock_mode_index
= 0;
238 rdev
->pm
.dynpm_can_downclock
= false;
240 rdev
->pm
.requested_clock_mode_index
=
241 rdev
->pm
.current_clock_mode_index
- 1;
243 rdev
->pm
.requested_clock_mode_index
= 0;
244 rdev
->pm
.dynpm_can_downclock
= false;
246 /* don't use the power state if crtcs are active and no display flag is set */
247 if ((rdev
->pm
.active_crtc_count
> 0) &&
248 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
249 clock_info
[rdev
->pm
.requested_clock_mode_index
].flags
&
250 RADEON_PM_MODE_NO_DISPLAY
)) {
251 rdev
->pm
.requested_clock_mode_index
++;
254 case DYNPM_ACTION_UPCLOCK
:
255 if (rdev
->pm
.requested_power_state_index
== rdev
->pm
.current_power_state_index
) {
256 if (rdev
->pm
.current_clock_mode_index
==
257 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].num_clock_modes
- 1)) {
258 rdev
->pm
.requested_clock_mode_index
= rdev
->pm
.current_clock_mode_index
;
259 rdev
->pm
.dynpm_can_upclock
= false;
261 rdev
->pm
.requested_clock_mode_index
=
262 rdev
->pm
.current_clock_mode_index
+ 1;
264 rdev
->pm
.requested_clock_mode_index
=
265 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].num_clock_modes
- 1;
266 rdev
->pm
.dynpm_can_upclock
= false;
269 case DYNPM_ACTION_DEFAULT
:
270 rdev
->pm
.requested_power_state_index
= rdev
->pm
.default_power_state_index
;
271 rdev
->pm
.requested_clock_mode_index
= 0;
272 rdev
->pm
.dynpm_can_upclock
= false;
274 case DYNPM_ACTION_NONE
:
276 DRM_ERROR("Requested mode for not defined action\n");
281 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
282 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
283 clock_info
[rdev
->pm
.requested_clock_mode_index
].sclk
,
284 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
285 clock_info
[rdev
->pm
.requested_clock_mode_index
].mclk
,
286 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
290 static int r600_pm_get_type_index(struct radeon_device
*rdev
,
291 enum radeon_pm_state_type ps_type
,
295 int found_instance
= -1;
297 for (i
= 0; i
< rdev
->pm
.num_power_states
; i
++) {
298 if (rdev
->pm
.power_state
[i
].type
== ps_type
) {
300 if (found_instance
== instance
)
304 /* return default if no match */
305 return rdev
->pm
.default_power_state_index
;
308 void rs780_pm_init_profile(struct radeon_device
*rdev
)
310 if (rdev
->pm
.num_power_states
== 2) {
312 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
313 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
314 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
315 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
317 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 0;
318 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 0;
319 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
320 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
322 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 0;
323 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 0;
324 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
325 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
327 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 0;
328 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 1;
329 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
330 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
332 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 0;
333 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 0;
334 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
335 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
337 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 0;
338 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 0;
339 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
340 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
342 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 0;
343 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 1;
344 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
345 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
346 } else if (rdev
->pm
.num_power_states
== 3) {
348 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
349 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
350 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
351 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
353 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 1;
354 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 1;
355 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
356 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
358 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 1;
359 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 1;
360 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
361 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
363 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 1;
364 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 2;
365 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
366 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
368 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 1;
369 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 1;
370 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
371 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
373 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 1;
374 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 1;
375 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
376 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
378 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 1;
379 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 2;
380 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
381 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
384 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
385 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
386 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
387 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
389 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 2;
390 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 2;
391 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
392 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
394 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 2;
395 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 2;
396 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
397 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
399 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 2;
400 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 3;
401 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
402 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
404 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 2;
405 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 0;
406 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
407 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
409 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 2;
410 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 0;
411 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
412 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
414 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 2;
415 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 3;
416 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
417 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
421 void r600_pm_init_profile(struct radeon_device
*rdev
)
423 if (rdev
->family
== CHIP_R600
) {
426 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
427 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
428 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
429 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
431 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
432 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
433 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
434 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
436 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
437 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
438 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
439 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
441 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
442 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
443 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
444 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
446 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
447 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
448 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
449 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
451 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
452 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
453 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
454 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
456 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
457 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
458 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
459 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
461 if (rdev
->pm
.num_power_states
< 4) {
463 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
464 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
465 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
466 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 2;
468 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 1;
469 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 1;
470 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
471 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
473 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 1;
474 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 1;
475 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
476 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
478 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 1;
479 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 1;
480 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
481 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 2;
483 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 2;
484 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 2;
485 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
486 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
488 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 2;
489 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 2;
490 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
491 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
493 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 2;
494 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 2;
495 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
496 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 2;
499 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
500 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
501 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
502 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 2;
504 if (rdev
->flags
& RADEON_IS_MOBILITY
) {
505 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
=
506 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
507 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
=
508 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
509 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
510 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
512 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
=
513 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
514 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
=
515 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
516 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
517 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
520 if (rdev
->flags
& RADEON_IS_MOBILITY
) {
521 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
=
522 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
523 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
=
524 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
525 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
526 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
528 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
=
529 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
530 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
=
531 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
532 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
533 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
536 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
=
537 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
538 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
=
539 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
540 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
541 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 2;
543 if (rdev
->flags
& RADEON_IS_MOBILITY
) {
544 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
=
545 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 1);
546 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
=
547 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 1);
548 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
549 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
551 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
=
552 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
553 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
=
554 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
555 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
556 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
559 if (rdev
->flags
& RADEON_IS_MOBILITY
) {
560 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
=
561 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 1);
562 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
=
563 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 1);
564 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
565 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
567 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
=
568 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
569 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
=
570 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
571 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
572 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
575 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
=
576 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
577 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
=
578 r600_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
579 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
580 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 2;
585 void r600_pm_misc(struct radeon_device
*rdev
)
587 int req_ps_idx
= rdev
->pm
.requested_power_state_index
;
588 int req_cm_idx
= rdev
->pm
.requested_clock_mode_index
;
589 struct radeon_power_state
*ps
= &rdev
->pm
.power_state
[req_ps_idx
];
590 struct radeon_voltage
*voltage
= &ps
->clock_info
[req_cm_idx
].voltage
;
592 if ((voltage
->type
== VOLTAGE_SW
) && voltage
->voltage
) {
593 /* 0xff01 is a flag rather then an actual voltage */
594 if (voltage
->voltage
== 0xff01)
596 if (voltage
->voltage
!= rdev
->pm
.current_vddc
) {
597 radeon_atom_set_voltage(rdev
, voltage
->voltage
, SET_VOLTAGE_TYPE_ASIC_VDDC
);
598 rdev
->pm
.current_vddc
= voltage
->voltage
;
599 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage
->voltage
);
604 bool r600_gui_idle(struct radeon_device
*rdev
)
606 if (RREG32(GRBM_STATUS
) & GUI_ACTIVE
)
612 /* hpd for digital panel detect/disconnect */
613 bool r600_hpd_sense(struct radeon_device
*rdev
, enum radeon_hpd_id hpd
)
615 bool connected
= false;
617 if (ASIC_IS_DCE3(rdev
)) {
620 if (RREG32(DC_HPD1_INT_STATUS
) & DC_HPDx_SENSE
)
624 if (RREG32(DC_HPD2_INT_STATUS
) & DC_HPDx_SENSE
)
628 if (RREG32(DC_HPD3_INT_STATUS
) & DC_HPDx_SENSE
)
632 if (RREG32(DC_HPD4_INT_STATUS
) & DC_HPDx_SENSE
)
637 if (RREG32(DC_HPD5_INT_STATUS
) & DC_HPDx_SENSE
)
641 if (RREG32(DC_HPD6_INT_STATUS
) & DC_HPDx_SENSE
)
650 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
654 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
658 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
668 void r600_hpd_set_polarity(struct radeon_device
*rdev
,
669 enum radeon_hpd_id hpd
)
672 bool connected
= r600_hpd_sense(rdev
, hpd
);
674 if (ASIC_IS_DCE3(rdev
)) {
677 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
679 tmp
&= ~DC_HPDx_INT_POLARITY
;
681 tmp
|= DC_HPDx_INT_POLARITY
;
682 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
685 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
687 tmp
&= ~DC_HPDx_INT_POLARITY
;
689 tmp
|= DC_HPDx_INT_POLARITY
;
690 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
693 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
695 tmp
&= ~DC_HPDx_INT_POLARITY
;
697 tmp
|= DC_HPDx_INT_POLARITY
;
698 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
701 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
703 tmp
&= ~DC_HPDx_INT_POLARITY
;
705 tmp
|= DC_HPDx_INT_POLARITY
;
706 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
709 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
711 tmp
&= ~DC_HPDx_INT_POLARITY
;
713 tmp
|= DC_HPDx_INT_POLARITY
;
714 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
718 tmp
= RREG32(DC_HPD6_INT_CONTROL
);
720 tmp
&= ~DC_HPDx_INT_POLARITY
;
722 tmp
|= DC_HPDx_INT_POLARITY
;
723 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
731 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
);
733 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
735 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
736 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
739 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
);
741 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
743 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
744 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
747 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
);
749 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
751 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
752 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
760 void r600_hpd_init(struct radeon_device
*rdev
)
762 struct drm_device
*dev
= rdev
->ddev
;
763 struct drm_connector
*connector
;
765 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
766 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
768 if (ASIC_IS_DCE3(rdev
)) {
769 u32 tmp
= DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
770 if (ASIC_IS_DCE32(rdev
))
773 switch (radeon_connector
->hpd
.hpd
) {
775 WREG32(DC_HPD1_CONTROL
, tmp
);
776 rdev
->irq
.hpd
[0] = true;
779 WREG32(DC_HPD2_CONTROL
, tmp
);
780 rdev
->irq
.hpd
[1] = true;
783 WREG32(DC_HPD3_CONTROL
, tmp
);
784 rdev
->irq
.hpd
[2] = true;
787 WREG32(DC_HPD4_CONTROL
, tmp
);
788 rdev
->irq
.hpd
[3] = true;
792 WREG32(DC_HPD5_CONTROL
, tmp
);
793 rdev
->irq
.hpd
[4] = true;
796 WREG32(DC_HPD6_CONTROL
, tmp
);
797 rdev
->irq
.hpd
[5] = true;
803 switch (radeon_connector
->hpd
.hpd
) {
805 WREG32(DC_HOT_PLUG_DETECT1_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
806 rdev
->irq
.hpd
[0] = true;
809 WREG32(DC_HOT_PLUG_DETECT2_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
810 rdev
->irq
.hpd
[1] = true;
813 WREG32(DC_HOT_PLUG_DETECT3_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
814 rdev
->irq
.hpd
[2] = true;
820 radeon_hpd_set_polarity(rdev
, radeon_connector
->hpd
.hpd
);
822 if (rdev
->irq
.installed
)
826 void r600_hpd_fini(struct radeon_device
*rdev
)
828 struct drm_device
*dev
= rdev
->ddev
;
829 struct drm_connector
*connector
;
831 if (ASIC_IS_DCE3(rdev
)) {
832 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
833 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
834 switch (radeon_connector
->hpd
.hpd
) {
836 WREG32(DC_HPD1_CONTROL
, 0);
837 rdev
->irq
.hpd
[0] = false;
840 WREG32(DC_HPD2_CONTROL
, 0);
841 rdev
->irq
.hpd
[1] = false;
844 WREG32(DC_HPD3_CONTROL
, 0);
845 rdev
->irq
.hpd
[2] = false;
848 WREG32(DC_HPD4_CONTROL
, 0);
849 rdev
->irq
.hpd
[3] = false;
853 WREG32(DC_HPD5_CONTROL
, 0);
854 rdev
->irq
.hpd
[4] = false;
857 WREG32(DC_HPD6_CONTROL
, 0);
858 rdev
->irq
.hpd
[5] = false;
865 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
866 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
867 switch (radeon_connector
->hpd
.hpd
) {
869 WREG32(DC_HOT_PLUG_DETECT1_CONTROL
, 0);
870 rdev
->irq
.hpd
[0] = false;
873 WREG32(DC_HOT_PLUG_DETECT2_CONTROL
, 0);
874 rdev
->irq
.hpd
[1] = false;
877 WREG32(DC_HOT_PLUG_DETECT3_CONTROL
, 0);
878 rdev
->irq
.hpd
[2] = false;
890 void r600_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
895 /* flush hdp cache so updates hit vram */
896 if ((rdev
->family
>= CHIP_RV770
) && (rdev
->family
<= CHIP_RV740
) &&
897 !(rdev
->flags
& RADEON_IS_AGP
)) {
898 void __iomem
*ptr
= (void *)rdev
->gart
.ptr
;
901 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
902 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
903 * This seems to cause problems on some AGP cards. Just use the old
906 WREG32(HDP_DEBUG1
, 0);
907 tmp
= readl((void __iomem
*)ptr
);
909 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
911 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR
, rdev
->mc
.gtt_start
>> 12);
912 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR
, (rdev
->mc
.gtt_end
- 1) >> 12);
913 WREG32(VM_CONTEXT0_REQUEST_RESPONSE
, REQUEST_TYPE(1));
914 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
916 tmp
= RREG32(VM_CONTEXT0_REQUEST_RESPONSE
);
917 tmp
= (tmp
& RESPONSE_TYPE_MASK
) >> RESPONSE_TYPE_SHIFT
;
919 printk(KERN_WARNING
"[drm] r600 flush TLB failed\n");
929 int r600_pcie_gart_init(struct radeon_device
*rdev
)
933 if (rdev
->gart
.robj
) {
934 WARN(1, "R600 PCIE GART already initialized\n");
937 /* Initialize common gart structure */
938 r
= radeon_gart_init(rdev
);
941 rdev
->gart
.table_size
= rdev
->gart
.num_gpu_pages
* 8;
942 return radeon_gart_table_vram_alloc(rdev
);
945 int r600_pcie_gart_enable(struct radeon_device
*rdev
)
950 if (rdev
->gart
.robj
== NULL
) {
951 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
954 r
= radeon_gart_table_vram_pin(rdev
);
957 radeon_gart_restore(rdev
);
960 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
961 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
962 EFFECTIVE_L2_QUEUE_SIZE(7));
963 WREG32(VM_L2_CNTL2
, 0);
964 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
965 /* Setup TLB control */
966 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
967 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
968 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
969 ENABLE_WAIT_L2_QUERY
;
970 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
971 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
972 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
973 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
974 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
975 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
976 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
977 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
978 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
979 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
980 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
981 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
982 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
983 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
984 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
985 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, rdev
->mc
.gtt_end
>> 12);
986 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
987 WREG32(VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
988 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
989 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
990 (u32
)(rdev
->dummy_page
.addr
>> 12));
991 for (i
= 1; i
< 7; i
++)
992 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
994 r600_pcie_gart_tlb_flush(rdev
);
995 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
996 (unsigned)(rdev
->mc
.gtt_size
>> 20),
997 (unsigned long long)rdev
->gart
.table_addr
);
998 rdev
->gart
.ready
= true;
1002 void r600_pcie_gart_disable(struct radeon_device
*rdev
)
1007 /* Disable all tables */
1008 for (i
= 0; i
< 7; i
++)
1009 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
1011 /* Disable L2 cache */
1012 WREG32(VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
|
1013 EFFECTIVE_L2_QUEUE_SIZE(7));
1014 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1015 /* Setup L1 TLB control */
1016 tmp
= EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1017 ENABLE_WAIT_L2_QUERY
;
1018 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
1019 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
1020 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
1021 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
1022 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
1023 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
1024 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
1025 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
1026 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
);
1027 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
);
1028 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
1029 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
1030 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
);
1031 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
1032 radeon_gart_table_vram_unpin(rdev
);
1035 void r600_pcie_gart_fini(struct radeon_device
*rdev
)
1037 radeon_gart_fini(rdev
);
1038 r600_pcie_gart_disable(rdev
);
1039 radeon_gart_table_vram_free(rdev
);
1042 void r600_agp_enable(struct radeon_device
*rdev
)
1047 /* Setup L2 cache */
1048 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
1049 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
1050 EFFECTIVE_L2_QUEUE_SIZE(7));
1051 WREG32(VM_L2_CNTL2
, 0);
1052 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1053 /* Setup TLB control */
1054 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
1055 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
1056 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1057 ENABLE_WAIT_L2_QUERY
;
1058 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
1059 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
1060 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
1061 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
1062 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
1063 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
1064 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
1065 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
1066 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
1067 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
1068 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
1069 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
1070 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
1071 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
1072 for (i
= 0; i
< 7; i
++)
1073 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
1076 int r600_mc_wait_for_idle(struct radeon_device
*rdev
)
1081 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
1082 /* read MC_STATUS */
1083 tmp
= RREG32(R_000E50_SRBM_STATUS
) & 0x3F00;
1091 static void r600_mc_program(struct radeon_device
*rdev
)
1093 struct rv515_mc_save save
;
1097 /* Initialize HDP */
1098 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1099 WREG32((0x2c14 + j
), 0x00000000);
1100 WREG32((0x2c18 + j
), 0x00000000);
1101 WREG32((0x2c1c + j
), 0x00000000);
1102 WREG32((0x2c20 + j
), 0x00000000);
1103 WREG32((0x2c24 + j
), 0x00000000);
1105 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL
, 0);
1107 rv515_mc_stop(rdev
, &save
);
1108 if (r600_mc_wait_for_idle(rdev
)) {
1109 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1111 /* Lockout access through VGA aperture (doesn't exist before R600) */
1112 WREG32(VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
);
1113 /* Update configuration */
1114 if (rdev
->flags
& RADEON_IS_AGP
) {
1115 if (rdev
->mc
.vram_start
< rdev
->mc
.gtt_start
) {
1116 /* VRAM before AGP */
1117 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1118 rdev
->mc
.vram_start
>> 12);
1119 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1120 rdev
->mc
.gtt_end
>> 12);
1122 /* VRAM after AGP */
1123 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1124 rdev
->mc
.gtt_start
>> 12);
1125 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1126 rdev
->mc
.vram_end
>> 12);
1129 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
, rdev
->mc
.vram_start
>> 12);
1130 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
, rdev
->mc
.vram_end
>> 12);
1132 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
, rdev
->vram_scratch
.gpu_addr
>> 12);
1133 tmp
= ((rdev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
1134 tmp
|= ((rdev
->mc
.vram_start
>> 24) & 0xFFFF);
1135 WREG32(MC_VM_FB_LOCATION
, tmp
);
1136 WREG32(HDP_NONSURFACE_BASE
, (rdev
->mc
.vram_start
>> 8));
1137 WREG32(HDP_NONSURFACE_INFO
, (2 << 7));
1138 WREG32(HDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
1139 if (rdev
->flags
& RADEON_IS_AGP
) {
1140 WREG32(MC_VM_AGP_TOP
, rdev
->mc
.gtt_end
>> 22);
1141 WREG32(MC_VM_AGP_BOT
, rdev
->mc
.gtt_start
>> 22);
1142 WREG32(MC_VM_AGP_BASE
, rdev
->mc
.agp_base
>> 22);
1144 WREG32(MC_VM_AGP_BASE
, 0);
1145 WREG32(MC_VM_AGP_TOP
, 0x0FFFFFFF);
1146 WREG32(MC_VM_AGP_BOT
, 0x0FFFFFFF);
1148 if (r600_mc_wait_for_idle(rdev
)) {
1149 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1151 rv515_mc_resume(rdev
, &save
);
1152 /* we need to own VRAM, so turn off the VGA renderer here
1153 * to stop it overwriting our objects */
1154 rv515_vga_render_disable(rdev
);
1158 * r600_vram_gtt_location - try to find VRAM & GTT location
1159 * @rdev: radeon device structure holding all necessary informations
1160 * @mc: memory controller structure holding memory informations
1162 * Function will place try to place VRAM at same place as in CPU (PCI)
1163 * address space as some GPU seems to have issue when we reprogram at
1164 * different address space.
1166 * If there is not enough space to fit the unvisible VRAM after the
1167 * aperture then we limit the VRAM size to the aperture.
1169 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1170 * them to be in one from GPU point of view so that we can program GPU to
1171 * catch access outside them (weird GPU policy see ??).
1173 * This function will never fails, worst case are limiting VRAM or GTT.
1175 * Note: GTT start, end, size should be initialized before calling this
1176 * function on AGP platform.
1178 static void r600_vram_gtt_location(struct radeon_device
*rdev
, struct radeon_mc
*mc
)
1180 u64 size_bf
, size_af
;
1182 if (mc
->mc_vram_size
> 0xE0000000) {
1183 /* leave room for at least 512M GTT */
1184 dev_warn(rdev
->dev
, "limiting VRAM\n");
1185 mc
->real_vram_size
= 0xE0000000;
1186 mc
->mc_vram_size
= 0xE0000000;
1188 if (rdev
->flags
& RADEON_IS_AGP
) {
1189 size_bf
= mc
->gtt_start
;
1190 size_af
= 0xFFFFFFFF - mc
->gtt_end
+ 1;
1191 if (size_bf
> size_af
) {
1192 if (mc
->mc_vram_size
> size_bf
) {
1193 dev_warn(rdev
->dev
, "limiting VRAM\n");
1194 mc
->real_vram_size
= size_bf
;
1195 mc
->mc_vram_size
= size_bf
;
1197 mc
->vram_start
= mc
->gtt_start
- mc
->mc_vram_size
;
1199 if (mc
->mc_vram_size
> size_af
) {
1200 dev_warn(rdev
->dev
, "limiting VRAM\n");
1201 mc
->real_vram_size
= size_af
;
1202 mc
->mc_vram_size
= size_af
;
1204 mc
->vram_start
= mc
->gtt_end
;
1206 mc
->vram_end
= mc
->vram_start
+ mc
->mc_vram_size
- 1;
1207 dev_info(rdev
->dev
, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1208 mc
->mc_vram_size
>> 20, mc
->vram_start
,
1209 mc
->vram_end
, mc
->real_vram_size
>> 20);
1212 if (rdev
->flags
& RADEON_IS_IGP
) {
1213 base
= RREG32(MC_VM_FB_LOCATION
) & 0xFFFF;
1216 radeon_vram_location(rdev
, &rdev
->mc
, base
);
1217 rdev
->mc
.gtt_base_align
= 0;
1218 radeon_gtt_location(rdev
, mc
);
1222 int r600_mc_init(struct radeon_device
*rdev
)
1225 int chansize
, numchan
;
1227 /* Get VRAM informations */
1228 rdev
->mc
.vram_is_ddr
= true;
1229 tmp
= RREG32(RAMCFG
);
1230 if (tmp
& CHANSIZE_OVERRIDE
) {
1232 } else if (tmp
& CHANSIZE_MASK
) {
1237 tmp
= RREG32(CHMAP
);
1238 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
1253 rdev
->mc
.vram_width
= numchan
* chansize
;
1254 /* Could aper size report 0 ? */
1255 rdev
->mc
.aper_base
= pci_resource_start(rdev
->pdev
, 0);
1256 rdev
->mc
.aper_size
= pci_resource_len(rdev
->pdev
, 0);
1257 /* Setup GPU memory space */
1258 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
);
1259 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
);
1260 rdev
->mc
.visible_vram_size
= rdev
->mc
.aper_size
;
1261 r600_vram_gtt_location(rdev
, &rdev
->mc
);
1263 if (rdev
->flags
& RADEON_IS_IGP
) {
1264 rs690_pm_info(rdev
);
1265 rdev
->mc
.igp_sideport_enabled
= radeon_atombios_sideport_present(rdev
);
1267 radeon_update_bandwidth_info(rdev
);
1271 int r600_vram_scratch_init(struct radeon_device
*rdev
)
1275 if (rdev
->vram_scratch
.robj
== NULL
) {
1276 r
= radeon_bo_create(rdev
, RADEON_GPU_PAGE_SIZE
,
1277 PAGE_SIZE
, true, RADEON_GEM_DOMAIN_VRAM
,
1278 &rdev
->vram_scratch
.robj
);
1284 r
= radeon_bo_reserve(rdev
->vram_scratch
.robj
, false);
1285 if (unlikely(r
!= 0))
1287 r
= radeon_bo_pin(rdev
->vram_scratch
.robj
,
1288 RADEON_GEM_DOMAIN_VRAM
, &rdev
->vram_scratch
.gpu_addr
);
1290 radeon_bo_unreserve(rdev
->vram_scratch
.robj
);
1293 r
= radeon_bo_kmap(rdev
->vram_scratch
.robj
,
1294 (void **)&rdev
->vram_scratch
.ptr
);
1296 radeon_bo_unpin(rdev
->vram_scratch
.robj
);
1297 radeon_bo_unreserve(rdev
->vram_scratch
.robj
);
1302 void r600_vram_scratch_fini(struct radeon_device
*rdev
)
1306 if (rdev
->vram_scratch
.robj
== NULL
) {
1309 r
= radeon_bo_reserve(rdev
->vram_scratch
.robj
, false);
1310 if (likely(r
== 0)) {
1311 radeon_bo_kunmap(rdev
->vram_scratch
.robj
);
1312 radeon_bo_unpin(rdev
->vram_scratch
.robj
);
1313 radeon_bo_unreserve(rdev
->vram_scratch
.robj
);
1315 radeon_bo_unref(&rdev
->vram_scratch
.robj
);
1318 /* We doesn't check that the GPU really needs a reset we simply do the
1319 * reset, it's up to the caller to determine if the GPU needs one. We
1320 * might add an helper function to check that.
1322 int r600_gpu_soft_reset(struct radeon_device
*rdev
)
1324 struct rv515_mc_save save
;
1325 u32 grbm_busy_mask
= S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1326 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1327 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1328 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1329 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1330 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1331 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1332 S_008010_GUI_ACTIVE(1);
1333 u32 grbm2_busy_mask
= S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1334 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1335 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1336 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1337 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1338 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1339 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1340 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1343 if (!(RREG32(GRBM_STATUS
) & GUI_ACTIVE
))
1346 dev_info(rdev
->dev
, "GPU softreset \n");
1347 dev_info(rdev
->dev
, " R_008010_GRBM_STATUS=0x%08X\n",
1348 RREG32(R_008010_GRBM_STATUS
));
1349 dev_info(rdev
->dev
, " R_008014_GRBM_STATUS2=0x%08X\n",
1350 RREG32(R_008014_GRBM_STATUS2
));
1351 dev_info(rdev
->dev
, " R_000E50_SRBM_STATUS=0x%08X\n",
1352 RREG32(R_000E50_SRBM_STATUS
));
1353 rv515_mc_stop(rdev
, &save
);
1354 if (r600_mc_wait_for_idle(rdev
)) {
1355 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1357 /* Disable CP parsing/prefetching */
1358 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
1359 /* Check if any of the rendering block is busy and reset it */
1360 if ((RREG32(R_008010_GRBM_STATUS
) & grbm_busy_mask
) ||
1361 (RREG32(R_008014_GRBM_STATUS2
) & grbm2_busy_mask
)) {
1362 tmp
= S_008020_SOFT_RESET_CR(1) |
1363 S_008020_SOFT_RESET_DB(1) |
1364 S_008020_SOFT_RESET_CB(1) |
1365 S_008020_SOFT_RESET_PA(1) |
1366 S_008020_SOFT_RESET_SC(1) |
1367 S_008020_SOFT_RESET_SMX(1) |
1368 S_008020_SOFT_RESET_SPI(1) |
1369 S_008020_SOFT_RESET_SX(1) |
1370 S_008020_SOFT_RESET_SH(1) |
1371 S_008020_SOFT_RESET_TC(1) |
1372 S_008020_SOFT_RESET_TA(1) |
1373 S_008020_SOFT_RESET_VC(1) |
1374 S_008020_SOFT_RESET_VGT(1);
1375 dev_info(rdev
->dev
, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
1376 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
1377 RREG32(R_008020_GRBM_SOFT_RESET
);
1379 WREG32(R_008020_GRBM_SOFT_RESET
, 0);
1381 /* Reset CP (we always reset CP) */
1382 tmp
= S_008020_SOFT_RESET_CP(1);
1383 dev_info(rdev
->dev
, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
1384 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
1385 RREG32(R_008020_GRBM_SOFT_RESET
);
1387 WREG32(R_008020_GRBM_SOFT_RESET
, 0);
1388 /* Wait a little for things to settle down */
1390 dev_info(rdev
->dev
, " R_008010_GRBM_STATUS=0x%08X\n",
1391 RREG32(R_008010_GRBM_STATUS
));
1392 dev_info(rdev
->dev
, " R_008014_GRBM_STATUS2=0x%08X\n",
1393 RREG32(R_008014_GRBM_STATUS2
));
1394 dev_info(rdev
->dev
, " R_000E50_SRBM_STATUS=0x%08X\n",
1395 RREG32(R_000E50_SRBM_STATUS
));
1396 rv515_mc_resume(rdev
, &save
);
1400 bool r600_gpu_is_lockup(struct radeon_device
*rdev
)
1405 struct r100_gpu_lockup
*lockup
;
1408 if (rdev
->family
>= CHIP_RV770
)
1409 lockup
= &rdev
->config
.rv770
.lockup
;
1411 lockup
= &rdev
->config
.r600
.lockup
;
1413 srbm_status
= RREG32(R_000E50_SRBM_STATUS
);
1414 grbm_status
= RREG32(R_008010_GRBM_STATUS
);
1415 grbm_status2
= RREG32(R_008014_GRBM_STATUS2
);
1416 if (!G_008010_GUI_ACTIVE(grbm_status
)) {
1417 r100_gpu_lockup_update(lockup
, &rdev
->cp
);
1420 /* force CP activities */
1421 r
= radeon_ring_lock(rdev
, 2);
1424 radeon_ring_write(rdev
, 0x80000000);
1425 radeon_ring_write(rdev
, 0x80000000);
1426 radeon_ring_unlock_commit(rdev
);
1428 rdev
->cp
.rptr
= RREG32(R600_CP_RB_RPTR
);
1429 return r100_gpu_cp_is_lockup(rdev
, lockup
, &rdev
->cp
);
1432 int r600_asic_reset(struct radeon_device
*rdev
)
1434 return r600_gpu_soft_reset(rdev
);
1437 static u32
r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes
,
1439 u32 backend_disable_mask
)
1441 u32 backend_map
= 0;
1442 u32 enabled_backends_mask
;
1443 u32 enabled_backends_count
;
1445 u32 swizzle_pipe
[R6XX_MAX_PIPES
];
1449 if (num_tile_pipes
> R6XX_MAX_PIPES
)
1450 num_tile_pipes
= R6XX_MAX_PIPES
;
1451 if (num_tile_pipes
< 1)
1453 if (num_backends
> R6XX_MAX_BACKENDS
)
1454 num_backends
= R6XX_MAX_BACKENDS
;
1455 if (num_backends
< 1)
1458 enabled_backends_mask
= 0;
1459 enabled_backends_count
= 0;
1460 for (i
= 0; i
< R6XX_MAX_BACKENDS
; ++i
) {
1461 if (((backend_disable_mask
>> i
) & 1) == 0) {
1462 enabled_backends_mask
|= (1 << i
);
1463 ++enabled_backends_count
;
1465 if (enabled_backends_count
== num_backends
)
1469 if (enabled_backends_count
== 0) {
1470 enabled_backends_mask
= 1;
1471 enabled_backends_count
= 1;
1474 if (enabled_backends_count
!= num_backends
)
1475 num_backends
= enabled_backends_count
;
1477 memset((uint8_t *)&swizzle_pipe
[0], 0, sizeof(u32
) * R6XX_MAX_PIPES
);
1478 switch (num_tile_pipes
) {
1480 swizzle_pipe
[0] = 0;
1483 swizzle_pipe
[0] = 0;
1484 swizzle_pipe
[1] = 1;
1487 swizzle_pipe
[0] = 0;
1488 swizzle_pipe
[1] = 1;
1489 swizzle_pipe
[2] = 2;
1492 swizzle_pipe
[0] = 0;
1493 swizzle_pipe
[1] = 1;
1494 swizzle_pipe
[2] = 2;
1495 swizzle_pipe
[3] = 3;
1498 swizzle_pipe
[0] = 0;
1499 swizzle_pipe
[1] = 1;
1500 swizzle_pipe
[2] = 2;
1501 swizzle_pipe
[3] = 3;
1502 swizzle_pipe
[4] = 4;
1505 swizzle_pipe
[0] = 0;
1506 swizzle_pipe
[1] = 2;
1507 swizzle_pipe
[2] = 4;
1508 swizzle_pipe
[3] = 5;
1509 swizzle_pipe
[4] = 1;
1510 swizzle_pipe
[5] = 3;
1513 swizzle_pipe
[0] = 0;
1514 swizzle_pipe
[1] = 2;
1515 swizzle_pipe
[2] = 4;
1516 swizzle_pipe
[3] = 6;
1517 swizzle_pipe
[4] = 1;
1518 swizzle_pipe
[5] = 3;
1519 swizzle_pipe
[6] = 5;
1522 swizzle_pipe
[0] = 0;
1523 swizzle_pipe
[1] = 2;
1524 swizzle_pipe
[2] = 4;
1525 swizzle_pipe
[3] = 6;
1526 swizzle_pipe
[4] = 1;
1527 swizzle_pipe
[5] = 3;
1528 swizzle_pipe
[6] = 5;
1529 swizzle_pipe
[7] = 7;
1534 for (cur_pipe
= 0; cur_pipe
< num_tile_pipes
; ++cur_pipe
) {
1535 while (((1 << cur_backend
) & enabled_backends_mask
) == 0)
1536 cur_backend
= (cur_backend
+ 1) % R6XX_MAX_BACKENDS
;
1538 backend_map
|= (u32
)(((cur_backend
& 3) << (swizzle_pipe
[cur_pipe
] * 2)));
1540 cur_backend
= (cur_backend
+ 1) % R6XX_MAX_BACKENDS
;
1546 int r600_count_pipe_bits(uint32_t val
)
1550 for (i
= 0; i
< 32; i
++) {
1557 void r600_gpu_init(struct radeon_device
*rdev
)
1562 u32 cc_rb_backend_disable
;
1563 u32 cc_gc_shader_pipe_config
;
1567 u32 sq_gpr_resource_mgmt_1
= 0;
1568 u32 sq_gpr_resource_mgmt_2
= 0;
1569 u32 sq_thread_resource_mgmt
= 0;
1570 u32 sq_stack_resource_mgmt_1
= 0;
1571 u32 sq_stack_resource_mgmt_2
= 0;
1573 /* FIXME: implement */
1574 switch (rdev
->family
) {
1576 rdev
->config
.r600
.max_pipes
= 4;
1577 rdev
->config
.r600
.max_tile_pipes
= 8;
1578 rdev
->config
.r600
.max_simds
= 4;
1579 rdev
->config
.r600
.max_backends
= 4;
1580 rdev
->config
.r600
.max_gprs
= 256;
1581 rdev
->config
.r600
.max_threads
= 192;
1582 rdev
->config
.r600
.max_stack_entries
= 256;
1583 rdev
->config
.r600
.max_hw_contexts
= 8;
1584 rdev
->config
.r600
.max_gs_threads
= 16;
1585 rdev
->config
.r600
.sx_max_export_size
= 128;
1586 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1587 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1588 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1592 rdev
->config
.r600
.max_pipes
= 2;
1593 rdev
->config
.r600
.max_tile_pipes
= 2;
1594 rdev
->config
.r600
.max_simds
= 3;
1595 rdev
->config
.r600
.max_backends
= 1;
1596 rdev
->config
.r600
.max_gprs
= 128;
1597 rdev
->config
.r600
.max_threads
= 192;
1598 rdev
->config
.r600
.max_stack_entries
= 128;
1599 rdev
->config
.r600
.max_hw_contexts
= 8;
1600 rdev
->config
.r600
.max_gs_threads
= 4;
1601 rdev
->config
.r600
.sx_max_export_size
= 128;
1602 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1603 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1604 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1610 rdev
->config
.r600
.max_pipes
= 1;
1611 rdev
->config
.r600
.max_tile_pipes
= 1;
1612 rdev
->config
.r600
.max_simds
= 2;
1613 rdev
->config
.r600
.max_backends
= 1;
1614 rdev
->config
.r600
.max_gprs
= 128;
1615 rdev
->config
.r600
.max_threads
= 192;
1616 rdev
->config
.r600
.max_stack_entries
= 128;
1617 rdev
->config
.r600
.max_hw_contexts
= 4;
1618 rdev
->config
.r600
.max_gs_threads
= 4;
1619 rdev
->config
.r600
.sx_max_export_size
= 128;
1620 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1621 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1622 rdev
->config
.r600
.sq_num_cf_insts
= 1;
1625 rdev
->config
.r600
.max_pipes
= 4;
1626 rdev
->config
.r600
.max_tile_pipes
= 4;
1627 rdev
->config
.r600
.max_simds
= 4;
1628 rdev
->config
.r600
.max_backends
= 4;
1629 rdev
->config
.r600
.max_gprs
= 192;
1630 rdev
->config
.r600
.max_threads
= 192;
1631 rdev
->config
.r600
.max_stack_entries
= 256;
1632 rdev
->config
.r600
.max_hw_contexts
= 8;
1633 rdev
->config
.r600
.max_gs_threads
= 16;
1634 rdev
->config
.r600
.sx_max_export_size
= 128;
1635 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1636 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1637 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1643 /* Initialize HDP */
1644 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1645 WREG32((0x2c14 + j
), 0x00000000);
1646 WREG32((0x2c18 + j
), 0x00000000);
1647 WREG32((0x2c1c + j
), 0x00000000);
1648 WREG32((0x2c20 + j
), 0x00000000);
1649 WREG32((0x2c24 + j
), 0x00000000);
1652 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
1656 ramcfg
= RREG32(RAMCFG
);
1657 switch (rdev
->config
.r600
.max_tile_pipes
) {
1659 tiling_config
|= PIPE_TILING(0);
1662 tiling_config
|= PIPE_TILING(1);
1665 tiling_config
|= PIPE_TILING(2);
1668 tiling_config
|= PIPE_TILING(3);
1673 rdev
->config
.r600
.tiling_npipes
= rdev
->config
.r600
.max_tile_pipes
;
1674 rdev
->config
.r600
.tiling_nbanks
= 4 << ((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
1675 tiling_config
|= BANK_TILING((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
1676 tiling_config
|= GROUP_SIZE((ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
);
1677 if ((ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
)
1678 rdev
->config
.r600
.tiling_group_size
= 512;
1680 rdev
->config
.r600
.tiling_group_size
= 256;
1681 tmp
= (ramcfg
& NOOFROWS_MASK
) >> NOOFROWS_SHIFT
;
1683 tiling_config
|= ROW_TILING(3);
1684 tiling_config
|= SAMPLE_SPLIT(3);
1686 tiling_config
|= ROW_TILING(tmp
);
1687 tiling_config
|= SAMPLE_SPLIT(tmp
);
1689 tiling_config
|= BANK_SWAPS(1);
1691 cc_rb_backend_disable
= RREG32(CC_RB_BACKEND_DISABLE
) & 0x00ff0000;
1692 cc_rb_backend_disable
|=
1693 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK
<< rdev
->config
.r600
.max_backends
) & R6XX_MAX_BACKENDS_MASK
);
1695 cc_gc_shader_pipe_config
= RREG32(CC_GC_SHADER_PIPE_CONFIG
) & 0xffffff00;
1696 cc_gc_shader_pipe_config
|=
1697 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK
<< rdev
->config
.r600
.max_pipes
) & R6XX_MAX_PIPES_MASK
);
1698 cc_gc_shader_pipe_config
|=
1699 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK
<< rdev
->config
.r600
.max_simds
) & R6XX_MAX_SIMDS_MASK
);
1701 backend_map
= r600_get_tile_pipe_to_backend_map(rdev
->config
.r600
.max_tile_pipes
,
1702 (R6XX_MAX_BACKENDS
-
1703 r600_count_pipe_bits((cc_rb_backend_disable
&
1704 R6XX_MAX_BACKENDS_MASK
) >> 16)),
1705 (cc_rb_backend_disable
>> 16));
1706 rdev
->config
.r600
.tile_config
= tiling_config
;
1707 rdev
->config
.r600
.backend_map
= backend_map
;
1708 tiling_config
|= BACKEND_MAP(backend_map
);
1709 WREG32(GB_TILING_CONFIG
, tiling_config
);
1710 WREG32(DCP_TILING_CONFIG
, tiling_config
& 0xffff);
1711 WREG32(HDP_TILING_CONFIG
, tiling_config
& 0xffff);
1714 WREG32(CC_RB_BACKEND_DISABLE
, cc_rb_backend_disable
);
1715 WREG32(CC_GC_SHADER_PIPE_CONFIG
, cc_gc_shader_pipe_config
);
1716 WREG32(GC_USER_SHADER_PIPE_CONFIG
, cc_gc_shader_pipe_config
);
1718 tmp
= R6XX_MAX_PIPES
- r600_count_pipe_bits((cc_gc_shader_pipe_config
& INACTIVE_QD_PIPES_MASK
) >> 8);
1719 WREG32(VGT_OUT_DEALLOC_CNTL
, (tmp
* 4) & DEALLOC_DIST_MASK
);
1720 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL
, ((tmp
* 4) - 2) & VTX_REUSE_DEPTH_MASK
);
1722 /* Setup some CP states */
1723 WREG32(CP_QUEUE_THRESHOLDS
, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1724 WREG32(CP_MEQ_THRESHOLDS
, (MEQ_END(0x40) | ROQ_END(0x40)));
1726 WREG32(TA_CNTL_AUX
, (DISABLE_CUBE_ANISO
| SYNC_GRADIENT
|
1727 SYNC_WALKER
| SYNC_ALIGNER
));
1728 /* Setup various GPU states */
1729 if (rdev
->family
== CHIP_RV670
)
1730 WREG32(ARB_GDEC_RD_CNTL
, 0x00000021);
1732 tmp
= RREG32(SX_DEBUG_1
);
1733 tmp
|= SMX_EVENT_RELEASE
;
1734 if ((rdev
->family
> CHIP_R600
))
1735 tmp
|= ENABLE_NEW_SMX_ADDRESS
;
1736 WREG32(SX_DEBUG_1
, tmp
);
1738 if (((rdev
->family
) == CHIP_R600
) ||
1739 ((rdev
->family
) == CHIP_RV630
) ||
1740 ((rdev
->family
) == CHIP_RV610
) ||
1741 ((rdev
->family
) == CHIP_RV620
) ||
1742 ((rdev
->family
) == CHIP_RS780
) ||
1743 ((rdev
->family
) == CHIP_RS880
)) {
1744 WREG32(DB_DEBUG
, PREZ_MUST_WAIT_FOR_POSTZ_DONE
);
1746 WREG32(DB_DEBUG
, 0);
1748 WREG32(DB_WATERMARKS
, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1749 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1751 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
1752 WREG32(VGT_NUM_INSTANCES
, 0);
1754 WREG32(SPI_CONFIG_CNTL
, GPR_WRITE_PRIORITY(0));
1755 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(0));
1757 tmp
= RREG32(SQ_MS_FIFO_SIZES
);
1758 if (((rdev
->family
) == CHIP_RV610
) ||
1759 ((rdev
->family
) == CHIP_RV620
) ||
1760 ((rdev
->family
) == CHIP_RS780
) ||
1761 ((rdev
->family
) == CHIP_RS880
)) {
1762 tmp
= (CACHE_FIFO_SIZE(0xa) |
1763 FETCH_FIFO_HIWATER(0xa) |
1764 DONE_FIFO_HIWATER(0xe0) |
1765 ALU_UPDATE_FIFO_HIWATER(0x8));
1766 } else if (((rdev
->family
) == CHIP_R600
) ||
1767 ((rdev
->family
) == CHIP_RV630
)) {
1768 tmp
&= ~DONE_FIFO_HIWATER(0xff);
1769 tmp
|= DONE_FIFO_HIWATER(0x4);
1771 WREG32(SQ_MS_FIFO_SIZES
, tmp
);
1773 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1774 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1776 sq_config
= RREG32(SQ_CONFIG
);
1777 sq_config
&= ~(PS_PRIO(3) |
1781 sq_config
|= (DX9_CONSTS
|
1788 if ((rdev
->family
) == CHIP_R600
) {
1789 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(124) |
1791 NUM_CLAUSE_TEMP_GPRS(4));
1792 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(0) |
1794 sq_thread_resource_mgmt
= (NUM_PS_THREADS(136) |
1795 NUM_VS_THREADS(48) |
1798 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(128) |
1799 NUM_VS_STACK_ENTRIES(128));
1800 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(0) |
1801 NUM_ES_STACK_ENTRIES(0));
1802 } else if (((rdev
->family
) == CHIP_RV610
) ||
1803 ((rdev
->family
) == CHIP_RV620
) ||
1804 ((rdev
->family
) == CHIP_RS780
) ||
1805 ((rdev
->family
) == CHIP_RS880
)) {
1806 /* no vertex cache */
1807 sq_config
&= ~VC_ENABLE
;
1809 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1811 NUM_CLAUSE_TEMP_GPRS(2));
1812 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
1814 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1815 NUM_VS_THREADS(78) |
1817 NUM_ES_THREADS(31));
1818 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
1819 NUM_VS_STACK_ENTRIES(40));
1820 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
1821 NUM_ES_STACK_ENTRIES(16));
1822 } else if (((rdev
->family
) == CHIP_RV630
) ||
1823 ((rdev
->family
) == CHIP_RV635
)) {
1824 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1826 NUM_CLAUSE_TEMP_GPRS(2));
1827 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(18) |
1829 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1830 NUM_VS_THREADS(78) |
1832 NUM_ES_THREADS(31));
1833 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
1834 NUM_VS_STACK_ENTRIES(40));
1835 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
1836 NUM_ES_STACK_ENTRIES(16));
1837 } else if ((rdev
->family
) == CHIP_RV670
) {
1838 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1840 NUM_CLAUSE_TEMP_GPRS(2));
1841 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
1843 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1844 NUM_VS_THREADS(78) |
1846 NUM_ES_THREADS(31));
1847 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(64) |
1848 NUM_VS_STACK_ENTRIES(64));
1849 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(64) |
1850 NUM_ES_STACK_ENTRIES(64));
1853 WREG32(SQ_CONFIG
, sq_config
);
1854 WREG32(SQ_GPR_RESOURCE_MGMT_1
, sq_gpr_resource_mgmt_1
);
1855 WREG32(SQ_GPR_RESOURCE_MGMT_2
, sq_gpr_resource_mgmt_2
);
1856 WREG32(SQ_THREAD_RESOURCE_MGMT
, sq_thread_resource_mgmt
);
1857 WREG32(SQ_STACK_RESOURCE_MGMT_1
, sq_stack_resource_mgmt_1
);
1858 WREG32(SQ_STACK_RESOURCE_MGMT_2
, sq_stack_resource_mgmt_2
);
1860 if (((rdev
->family
) == CHIP_RV610
) ||
1861 ((rdev
->family
) == CHIP_RV620
) ||
1862 ((rdev
->family
) == CHIP_RS780
) ||
1863 ((rdev
->family
) == CHIP_RS880
)) {
1864 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(TC_ONLY
));
1866 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(VC_AND_TC
));
1869 /* More default values. 2D/3D driver should adjust as needed */
1870 WREG32(PA_SC_AA_SAMPLE_LOCS_2S
, (S0_X(0xc) | S0_Y(0x4) |
1871 S1_X(0x4) | S1_Y(0xc)));
1872 WREG32(PA_SC_AA_SAMPLE_LOCS_4S
, (S0_X(0xe) | S0_Y(0xe) |
1873 S1_X(0x2) | S1_Y(0x2) |
1874 S2_X(0xa) | S2_Y(0x6) |
1875 S3_X(0x6) | S3_Y(0xa)));
1876 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0
, (S0_X(0xe) | S0_Y(0xb) |
1877 S1_X(0x4) | S1_Y(0xc) |
1878 S2_X(0x1) | S2_Y(0x6) |
1879 S3_X(0xa) | S3_Y(0xe)));
1880 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1
, (S4_X(0x6) | S4_Y(0x1) |
1881 S5_X(0x0) | S5_Y(0x0) |
1882 S6_X(0xb) | S6_Y(0x4) |
1883 S7_X(0x7) | S7_Y(0x8)));
1885 WREG32(VGT_STRMOUT_EN
, 0);
1886 tmp
= rdev
->config
.r600
.max_pipes
* 16;
1887 switch (rdev
->family
) {
1903 WREG32(VGT_ES_PER_GS
, 128);
1904 WREG32(VGT_GS_PER_ES
, tmp
);
1905 WREG32(VGT_GS_PER_VS
, 2);
1906 WREG32(VGT_GS_VERTEX_REUSE
, 16);
1908 /* more default values. 2D/3D driver should adjust as needed */
1909 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
1910 WREG32(VGT_STRMOUT_EN
, 0);
1912 WREG32(PA_SC_MODE_CNTL
, 0);
1913 WREG32(PA_SC_AA_CONFIG
, 0);
1914 WREG32(PA_SC_LINE_STIPPLE
, 0);
1915 WREG32(SPI_INPUT_Z
, 0);
1916 WREG32(SPI_PS_IN_CONTROL_0
, NUM_INTERP(2));
1917 WREG32(CB_COLOR7_FRAG
, 0);
1919 /* Clear render buffer base addresses */
1920 WREG32(CB_COLOR0_BASE
, 0);
1921 WREG32(CB_COLOR1_BASE
, 0);
1922 WREG32(CB_COLOR2_BASE
, 0);
1923 WREG32(CB_COLOR3_BASE
, 0);
1924 WREG32(CB_COLOR4_BASE
, 0);
1925 WREG32(CB_COLOR5_BASE
, 0);
1926 WREG32(CB_COLOR6_BASE
, 0);
1927 WREG32(CB_COLOR7_BASE
, 0);
1928 WREG32(CB_COLOR7_FRAG
, 0);
1930 switch (rdev
->family
) {
1935 tmp
= TC_L2_SIZE(8);
1939 tmp
= TC_L2_SIZE(4);
1942 tmp
= TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT
;
1945 tmp
= TC_L2_SIZE(0);
1948 WREG32(TC_CNTL
, tmp
);
1950 tmp
= RREG32(HDP_HOST_PATH_CNTL
);
1951 WREG32(HDP_HOST_PATH_CNTL
, tmp
);
1953 tmp
= RREG32(ARB_POP
);
1954 tmp
|= ENABLE_TC128
;
1955 WREG32(ARB_POP
, tmp
);
1957 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
1958 WREG32(PA_CL_ENHANCE
, (CLIP_VTX_REORDER_ENA
|
1960 WREG32(PA_SC_ENHANCE
, FORCE_EOV_MAX_CLK_CNT(4095));
1965 * Indirect registers accessor
1967 u32
r600_pciep_rreg(struct radeon_device
*rdev
, u32 reg
)
1971 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
1972 (void)RREG32(PCIE_PORT_INDEX
);
1973 r
= RREG32(PCIE_PORT_DATA
);
1977 void r600_pciep_wreg(struct radeon_device
*rdev
, u32 reg
, u32 v
)
1979 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
1980 (void)RREG32(PCIE_PORT_INDEX
);
1981 WREG32(PCIE_PORT_DATA
, (v
));
1982 (void)RREG32(PCIE_PORT_DATA
);
1988 void r600_cp_stop(struct radeon_device
*rdev
)
1990 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
1991 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
1992 WREG32(SCRATCH_UMSK
, 0);
1995 int r600_init_microcode(struct radeon_device
*rdev
)
1997 struct platform_device
*pdev
;
1998 const char *chip_name
;
1999 const char *rlc_chip_name
;
2000 size_t pfp_req_size
, me_req_size
, rlc_req_size
;
2006 pdev
= platform_device_register_simple("radeon_cp", 0, NULL
, 0);
2009 printk(KERN_ERR
"radeon_cp: Failed to register firmware\n");
2013 switch (rdev
->family
) {
2016 rlc_chip_name
= "R600";
2019 chip_name
= "RV610";
2020 rlc_chip_name
= "R600";
2023 chip_name
= "RV630";
2024 rlc_chip_name
= "R600";
2027 chip_name
= "RV620";
2028 rlc_chip_name
= "R600";
2031 chip_name
= "RV635";
2032 rlc_chip_name
= "R600";
2035 chip_name
= "RV670";
2036 rlc_chip_name
= "R600";
2040 chip_name
= "RS780";
2041 rlc_chip_name
= "R600";
2044 chip_name
= "RV770";
2045 rlc_chip_name
= "R700";
2049 chip_name
= "RV730";
2050 rlc_chip_name
= "R700";
2053 chip_name
= "RV710";
2054 rlc_chip_name
= "R700";
2057 chip_name
= "CEDAR";
2058 rlc_chip_name
= "CEDAR";
2061 chip_name
= "REDWOOD";
2062 rlc_chip_name
= "REDWOOD";
2065 chip_name
= "JUNIPER";
2066 rlc_chip_name
= "JUNIPER";
2070 chip_name
= "CYPRESS";
2071 rlc_chip_name
= "CYPRESS";
2075 rlc_chip_name
= "SUMO";
2079 rlc_chip_name
= "SUMO";
2082 chip_name
= "SUMO2";
2083 rlc_chip_name
= "SUMO";
2088 if (rdev
->family
>= CHIP_CEDAR
) {
2089 pfp_req_size
= EVERGREEN_PFP_UCODE_SIZE
* 4;
2090 me_req_size
= EVERGREEN_PM4_UCODE_SIZE
* 4;
2091 rlc_req_size
= EVERGREEN_RLC_UCODE_SIZE
* 4;
2092 } else if (rdev
->family
>= CHIP_RV770
) {
2093 pfp_req_size
= R700_PFP_UCODE_SIZE
* 4;
2094 me_req_size
= R700_PM4_UCODE_SIZE
* 4;
2095 rlc_req_size
= R700_RLC_UCODE_SIZE
* 4;
2097 pfp_req_size
= PFP_UCODE_SIZE
* 4;
2098 me_req_size
= PM4_UCODE_SIZE
* 12;
2099 rlc_req_size
= RLC_UCODE_SIZE
* 4;
2102 DRM_INFO("Loading %s Microcode\n", chip_name
);
2104 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_pfp.bin", chip_name
);
2105 err
= request_firmware(&rdev
->pfp_fw
, fw_name
, &pdev
->dev
);
2108 if (rdev
->pfp_fw
->size
!= pfp_req_size
) {
2110 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2111 rdev
->pfp_fw
->size
, fw_name
);
2116 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_me.bin", chip_name
);
2117 err
= request_firmware(&rdev
->me_fw
, fw_name
, &pdev
->dev
);
2120 if (rdev
->me_fw
->size
!= me_req_size
) {
2122 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2123 rdev
->me_fw
->size
, fw_name
);
2127 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_rlc.bin", rlc_chip_name
);
2128 err
= request_firmware(&rdev
->rlc_fw
, fw_name
, &pdev
->dev
);
2131 if (rdev
->rlc_fw
->size
!= rlc_req_size
) {
2133 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2134 rdev
->rlc_fw
->size
, fw_name
);
2139 platform_device_unregister(pdev
);
2144 "r600_cp: Failed to load firmware \"%s\"\n",
2146 release_firmware(rdev
->pfp_fw
);
2147 rdev
->pfp_fw
= NULL
;
2148 release_firmware(rdev
->me_fw
);
2150 release_firmware(rdev
->rlc_fw
);
2151 rdev
->rlc_fw
= NULL
;
2156 static int r600_cp_load_microcode(struct radeon_device
*rdev
)
2158 const __be32
*fw_data
;
2161 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
2170 RB_NO_UPDATE
| RB_BLKSZ(15) | RB_BUFSZ(3));
2173 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
2174 RREG32(GRBM_SOFT_RESET
);
2176 WREG32(GRBM_SOFT_RESET
, 0);
2178 WREG32(CP_ME_RAM_WADDR
, 0);
2180 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
2181 WREG32(CP_ME_RAM_WADDR
, 0);
2182 for (i
= 0; i
< PM4_UCODE_SIZE
* 3; i
++)
2183 WREG32(CP_ME_RAM_DATA
,
2184 be32_to_cpup(fw_data
++));
2186 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
2187 WREG32(CP_PFP_UCODE_ADDR
, 0);
2188 for (i
= 0; i
< PFP_UCODE_SIZE
; i
++)
2189 WREG32(CP_PFP_UCODE_DATA
,
2190 be32_to_cpup(fw_data
++));
2192 WREG32(CP_PFP_UCODE_ADDR
, 0);
2193 WREG32(CP_ME_RAM_WADDR
, 0);
2194 WREG32(CP_ME_RAM_RADDR
, 0);
2198 int r600_cp_start(struct radeon_device
*rdev
)
2203 r
= radeon_ring_lock(rdev
, 7);
2205 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
2208 radeon_ring_write(rdev
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
2209 radeon_ring_write(rdev
, 0x1);
2210 if (rdev
->family
>= CHIP_RV770
) {
2211 radeon_ring_write(rdev
, 0x0);
2212 radeon_ring_write(rdev
, rdev
->config
.rv770
.max_hw_contexts
- 1);
2214 radeon_ring_write(rdev
, 0x3);
2215 radeon_ring_write(rdev
, rdev
->config
.r600
.max_hw_contexts
- 1);
2217 radeon_ring_write(rdev
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2218 radeon_ring_write(rdev
, 0);
2219 radeon_ring_write(rdev
, 0);
2220 radeon_ring_unlock_commit(rdev
);
2223 WREG32(R_0086D8_CP_ME_CNTL
, cp_me
);
2227 int r600_cp_resume(struct radeon_device
*rdev
)
2234 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
2235 RREG32(GRBM_SOFT_RESET
);
2237 WREG32(GRBM_SOFT_RESET
, 0);
2239 /* Set ring buffer size */
2240 rb_bufsz
= drm_order(rdev
->cp
.ring_size
/ 8);
2241 tmp
= (drm_order(RADEON_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
2243 tmp
|= BUF_SWAP_32BIT
;
2245 WREG32(CP_RB_CNTL
, tmp
);
2246 WREG32(CP_SEM_WAIT_TIMER
, 0x4);
2248 /* Set the write pointer delay */
2249 WREG32(CP_RB_WPTR_DELAY
, 0);
2251 /* Initialize the ring buffer's read and write pointers */
2252 WREG32(CP_RB_CNTL
, tmp
| RB_RPTR_WR_ENA
);
2253 WREG32(CP_RB_RPTR_WR
, 0);
2255 WREG32(CP_RB_WPTR
, rdev
->cp
.wptr
);
2257 /* set the wb address whether it's enabled or not */
2258 WREG32(CP_RB_RPTR_ADDR
,
2259 ((rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFFFFFFFC));
2260 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFF);
2261 WREG32(SCRATCH_ADDR
, ((rdev
->wb
.gpu_addr
+ RADEON_WB_SCRATCH_OFFSET
) >> 8) & 0xFFFFFFFF);
2263 if (rdev
->wb
.enabled
)
2264 WREG32(SCRATCH_UMSK
, 0xff);
2266 tmp
|= RB_NO_UPDATE
;
2267 WREG32(SCRATCH_UMSK
, 0);
2271 WREG32(CP_RB_CNTL
, tmp
);
2273 WREG32(CP_RB_BASE
, rdev
->cp
.gpu_addr
>> 8);
2274 WREG32(CP_DEBUG
, (1 << 27) | (1 << 28));
2276 rdev
->cp
.rptr
= RREG32(CP_RB_RPTR
);
2278 r600_cp_start(rdev
);
2279 rdev
->cp
.ready
= true;
2280 r
= radeon_ring_test(rdev
);
2282 rdev
->cp
.ready
= false;
2288 void r600_cp_commit(struct radeon_device
*rdev
)
2290 WREG32(CP_RB_WPTR
, rdev
->cp
.wptr
);
2291 (void)RREG32(CP_RB_WPTR
);
2294 void r600_ring_init(struct radeon_device
*rdev
, unsigned ring_size
)
2298 /* Align ring size */
2299 rb_bufsz
= drm_order(ring_size
/ 8);
2300 ring_size
= (1 << (rb_bufsz
+ 1)) * 4;
2301 rdev
->cp
.ring_size
= ring_size
;
2302 rdev
->cp
.align_mask
= 16 - 1;
2305 void r600_cp_fini(struct radeon_device
*rdev
)
2308 radeon_ring_fini(rdev
);
2313 * GPU scratch registers helpers function.
2315 void r600_scratch_init(struct radeon_device
*rdev
)
2319 rdev
->scratch
.num_reg
= 7;
2320 rdev
->scratch
.reg_base
= SCRATCH_REG0
;
2321 for (i
= 0; i
< rdev
->scratch
.num_reg
; i
++) {
2322 rdev
->scratch
.free
[i
] = true;
2323 rdev
->scratch
.reg
[i
] = rdev
->scratch
.reg_base
+ (i
* 4);
2327 int r600_ring_test(struct radeon_device
*rdev
)
2334 r
= radeon_scratch_get(rdev
, &scratch
);
2336 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r
);
2339 WREG32(scratch
, 0xCAFEDEAD);
2340 r
= radeon_ring_lock(rdev
, 3);
2342 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
2343 radeon_scratch_free(rdev
, scratch
);
2346 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2347 radeon_ring_write(rdev
, ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
2348 radeon_ring_write(rdev
, 0xDEADBEEF);
2349 radeon_ring_unlock_commit(rdev
);
2350 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
2351 tmp
= RREG32(scratch
);
2352 if (tmp
== 0xDEADBEEF)
2356 if (i
< rdev
->usec_timeout
) {
2357 DRM_INFO("ring test succeeded in %d usecs\n", i
);
2359 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
2363 radeon_scratch_free(rdev
, scratch
);
2367 void r600_fence_ring_emit(struct radeon_device
*rdev
,
2368 struct radeon_fence
*fence
)
2370 if (rdev
->wb
.use_event
) {
2371 u64 addr
= rdev
->wb
.gpu_addr
+ R600_WB_EVENT_OFFSET
+
2372 (u64
)(rdev
->fence_drv
.scratch_reg
- rdev
->scratch
.reg_base
);
2373 /* flush read cache over gart */
2374 radeon_ring_write(rdev
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
2375 radeon_ring_write(rdev
, PACKET3_TC_ACTION_ENA
|
2376 PACKET3_VC_ACTION_ENA
|
2377 PACKET3_SH_ACTION_ENA
);
2378 radeon_ring_write(rdev
, 0xFFFFFFFF);
2379 radeon_ring_write(rdev
, 0);
2380 radeon_ring_write(rdev
, 10); /* poll interval */
2381 /* EVENT_WRITE_EOP - flush caches, send int */
2382 radeon_ring_write(rdev
, PACKET3(PACKET3_EVENT_WRITE_EOP
, 4));
2383 radeon_ring_write(rdev
, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS
) | EVENT_INDEX(5));
2384 radeon_ring_write(rdev
, addr
& 0xffffffff);
2385 radeon_ring_write(rdev
, (upper_32_bits(addr
) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2386 radeon_ring_write(rdev
, fence
->seq
);
2387 radeon_ring_write(rdev
, 0);
2389 /* flush read cache over gart */
2390 radeon_ring_write(rdev
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
2391 radeon_ring_write(rdev
, PACKET3_TC_ACTION_ENA
|
2392 PACKET3_VC_ACTION_ENA
|
2393 PACKET3_SH_ACTION_ENA
);
2394 radeon_ring_write(rdev
, 0xFFFFFFFF);
2395 radeon_ring_write(rdev
, 0);
2396 radeon_ring_write(rdev
, 10); /* poll interval */
2397 radeon_ring_write(rdev
, PACKET3(PACKET3_EVENT_WRITE
, 0));
2398 radeon_ring_write(rdev
, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT
) | EVENT_INDEX(0));
2399 /* wait for 3D idle clean */
2400 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2401 radeon_ring_write(rdev
, (WAIT_UNTIL
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2402 radeon_ring_write(rdev
, WAIT_3D_IDLE_bit
| WAIT_3D_IDLECLEAN_bit
);
2403 /* Emit fence sequence & fire IRQ */
2404 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2405 radeon_ring_write(rdev
, ((rdev
->fence_drv
.scratch_reg
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
2406 radeon_ring_write(rdev
, fence
->seq
);
2407 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2408 radeon_ring_write(rdev
, PACKET0(CP_INT_STATUS
, 0));
2409 radeon_ring_write(rdev
, RB_INT_STAT
);
2413 int r600_copy_blit(struct radeon_device
*rdev
,
2414 uint64_t src_offset
,
2415 uint64_t dst_offset
,
2416 unsigned num_gpu_pages
,
2417 struct radeon_fence
*fence
)
2421 mutex_lock(&rdev
->r600_blit
.mutex
);
2422 rdev
->r600_blit
.vb_ib
= NULL
;
2423 r
= r600_blit_prepare_copy(rdev
, num_gpu_pages
);
2425 if (rdev
->r600_blit
.vb_ib
)
2426 radeon_ib_free(rdev
, &rdev
->r600_blit
.vb_ib
);
2427 mutex_unlock(&rdev
->r600_blit
.mutex
);
2430 r600_kms_blit_copy(rdev
, src_offset
, dst_offset
, num_gpu_pages
);
2431 r600_blit_done_copy(rdev
, fence
);
2432 mutex_unlock(&rdev
->r600_blit
.mutex
);
2436 void r600_blit_suspend(struct radeon_device
*rdev
)
2440 /* unpin shaders bo */
2441 if (rdev
->r600_blit
.shader_obj
) {
2442 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
2444 radeon_bo_unpin(rdev
->r600_blit
.shader_obj
);
2445 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
2450 int r600_set_surface_reg(struct radeon_device
*rdev
, int reg
,
2451 uint32_t tiling_flags
, uint32_t pitch
,
2452 uint32_t offset
, uint32_t obj_size
)
2454 /* FIXME: implement */
2458 void r600_clear_surface_reg(struct radeon_device
*rdev
, int reg
)
2460 /* FIXME: implement */
2463 int r600_startup(struct radeon_device
*rdev
)
2467 /* enable pcie gen2 link */
2468 r600_pcie_gen2_enable(rdev
);
2470 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
) {
2471 r
= r600_init_microcode(rdev
);
2473 DRM_ERROR("Failed to load firmware!\n");
2478 r
= r600_vram_scratch_init(rdev
);
2482 r600_mc_program(rdev
);
2483 if (rdev
->flags
& RADEON_IS_AGP
) {
2484 r600_agp_enable(rdev
);
2486 r
= r600_pcie_gart_enable(rdev
);
2490 r600_gpu_init(rdev
);
2491 r
= r600_blit_init(rdev
);
2493 r600_blit_fini(rdev
);
2494 rdev
->asic
->copy
= NULL
;
2495 dev_warn(rdev
->dev
, "failed blitter (%d) falling back to memcpy\n", r
);
2498 /* allocate wb buffer */
2499 r
= radeon_wb_init(rdev
);
2504 r
= r600_irq_init(rdev
);
2506 DRM_ERROR("radeon: IH init failed (%d).\n", r
);
2507 radeon_irq_kms_fini(rdev
);
2512 r
= radeon_ring_init(rdev
, rdev
->cp
.ring_size
);
2515 r
= r600_cp_load_microcode(rdev
);
2518 r
= r600_cp_resume(rdev
);
2525 void r600_vga_set_state(struct radeon_device
*rdev
, bool state
)
2529 temp
= RREG32(CONFIG_CNTL
);
2530 if (state
== false) {
2536 WREG32(CONFIG_CNTL
, temp
);
2539 int r600_resume(struct radeon_device
*rdev
)
2543 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2544 * posting will perform necessary task to bring back GPU into good
2548 atom_asic_init(rdev
->mode_info
.atom_context
);
2550 r
= r600_startup(rdev
);
2552 DRM_ERROR("r600 startup failed on resume\n");
2556 r
= r600_ib_test(rdev
);
2558 DRM_ERROR("radeon: failed testing IB (%d).\n", r
);
2562 r
= r600_audio_init(rdev
);
2564 DRM_ERROR("radeon: audio resume failed\n");
2571 int r600_suspend(struct radeon_device
*rdev
)
2573 r600_audio_fini(rdev
);
2574 /* FIXME: we should wait for ring to be empty */
2576 rdev
->cp
.ready
= false;
2577 r600_irq_suspend(rdev
);
2578 radeon_wb_disable(rdev
);
2579 r600_pcie_gart_disable(rdev
);
2580 r600_blit_suspend(rdev
);
2585 /* Plan is to move initialization in that function and use
2586 * helper function so that radeon_device_init pretty much
2587 * do nothing more than calling asic specific function. This
2588 * should also allow to remove a bunch of callback function
2591 int r600_init(struct radeon_device
*rdev
)
2595 if (r600_debugfs_mc_info_init(rdev
)) {
2596 DRM_ERROR("Failed to register debugfs file for mc !\n");
2598 /* This don't do much */
2599 r
= radeon_gem_init(rdev
);
2603 if (!radeon_get_bios(rdev
)) {
2604 if (ASIC_IS_AVIVO(rdev
))
2607 /* Must be an ATOMBIOS */
2608 if (!rdev
->is_atom_bios
) {
2609 dev_err(rdev
->dev
, "Expecting atombios for R600 GPU\n");
2612 r
= radeon_atombios_init(rdev
);
2615 /* Post card if necessary */
2616 if (!radeon_card_posted(rdev
)) {
2618 dev_err(rdev
->dev
, "Card not posted and no BIOS - ignoring\n");
2621 DRM_INFO("GPU not posted. posting now...\n");
2622 atom_asic_init(rdev
->mode_info
.atom_context
);
2624 /* Initialize scratch registers */
2625 r600_scratch_init(rdev
);
2626 /* Initialize surface registers */
2627 radeon_surface_init(rdev
);
2628 /* Initialize clocks */
2629 radeon_get_clock_info(rdev
->ddev
);
2631 r
= radeon_fence_driver_init(rdev
);
2634 if (rdev
->flags
& RADEON_IS_AGP
) {
2635 r
= radeon_agp_init(rdev
);
2637 radeon_agp_disable(rdev
);
2639 r
= r600_mc_init(rdev
);
2642 /* Memory manager */
2643 r
= radeon_bo_init(rdev
);
2647 r
= radeon_irq_kms_init(rdev
);
2651 rdev
->cp
.ring_obj
= NULL
;
2652 r600_ring_init(rdev
, 1024 * 1024);
2654 rdev
->ih
.ring_obj
= NULL
;
2655 r600_ih_ring_init(rdev
, 64 * 1024);
2657 r
= r600_pcie_gart_init(rdev
);
2661 rdev
->accel_working
= true;
2662 r
= r600_startup(rdev
);
2664 dev_err(rdev
->dev
, "disabling GPU acceleration\n");
2666 r600_irq_fini(rdev
);
2667 radeon_wb_fini(rdev
);
2668 radeon_irq_kms_fini(rdev
);
2669 r600_pcie_gart_fini(rdev
);
2670 rdev
->accel_working
= false;
2672 if (rdev
->accel_working
) {
2673 r
= radeon_ib_pool_init(rdev
);
2675 dev_err(rdev
->dev
, "IB initialization failed (%d).\n", r
);
2676 rdev
->accel_working
= false;
2678 r
= r600_ib_test(rdev
);
2680 dev_err(rdev
->dev
, "IB test failed (%d).\n", r
);
2681 rdev
->accel_working
= false;
2686 r
= r600_audio_init(rdev
);
2688 return r
; /* TODO error handling */
2692 void r600_fini(struct radeon_device
*rdev
)
2694 r600_audio_fini(rdev
);
2695 r600_blit_fini(rdev
);
2697 r600_irq_fini(rdev
);
2698 radeon_wb_fini(rdev
);
2699 radeon_ib_pool_fini(rdev
);
2700 radeon_irq_kms_fini(rdev
);
2701 r600_pcie_gart_fini(rdev
);
2702 r600_vram_scratch_fini(rdev
);
2703 radeon_agp_fini(rdev
);
2704 radeon_gem_fini(rdev
);
2705 radeon_fence_driver_fini(rdev
);
2706 radeon_bo_fini(rdev
);
2707 radeon_atombios_fini(rdev
);
2716 void r600_ring_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
2718 /* FIXME: implement */
2719 radeon_ring_write(rdev
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
2720 radeon_ring_write(rdev
,
2724 (ib
->gpu_addr
& 0xFFFFFFFC));
2725 radeon_ring_write(rdev
, upper_32_bits(ib
->gpu_addr
) & 0xFF);
2726 radeon_ring_write(rdev
, ib
->length_dw
);
2729 int r600_ib_test(struct radeon_device
*rdev
)
2731 struct radeon_ib
*ib
;
2737 r
= radeon_scratch_get(rdev
, &scratch
);
2739 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r
);
2742 WREG32(scratch
, 0xCAFEDEAD);
2743 r
= radeon_ib_get(rdev
, &ib
);
2745 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
2748 ib
->ptr
[0] = PACKET3(PACKET3_SET_CONFIG_REG
, 1);
2749 ib
->ptr
[1] = ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2750 ib
->ptr
[2] = 0xDEADBEEF;
2751 ib
->ptr
[3] = PACKET2(0);
2752 ib
->ptr
[4] = PACKET2(0);
2753 ib
->ptr
[5] = PACKET2(0);
2754 ib
->ptr
[6] = PACKET2(0);
2755 ib
->ptr
[7] = PACKET2(0);
2756 ib
->ptr
[8] = PACKET2(0);
2757 ib
->ptr
[9] = PACKET2(0);
2758 ib
->ptr
[10] = PACKET2(0);
2759 ib
->ptr
[11] = PACKET2(0);
2760 ib
->ptr
[12] = PACKET2(0);
2761 ib
->ptr
[13] = PACKET2(0);
2762 ib
->ptr
[14] = PACKET2(0);
2763 ib
->ptr
[15] = PACKET2(0);
2765 r
= radeon_ib_schedule(rdev
, ib
);
2767 radeon_scratch_free(rdev
, scratch
);
2768 radeon_ib_free(rdev
, &ib
);
2769 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
2772 r
= radeon_fence_wait(ib
->fence
, false);
2774 DRM_ERROR("radeon: fence wait failed (%d).\n", r
);
2777 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
2778 tmp
= RREG32(scratch
);
2779 if (tmp
== 0xDEADBEEF)
2783 if (i
< rdev
->usec_timeout
) {
2784 DRM_INFO("ib test succeeded in %u usecs\n", i
);
2786 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2790 radeon_scratch_free(rdev
, scratch
);
2791 radeon_ib_free(rdev
, &ib
);
2798 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2799 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2800 * writing to the ring and the GPU consuming, the GPU writes to the ring
2801 * and host consumes. As the host irq handler processes interrupts, it
2802 * increments the rptr. When the rptr catches up with the wptr, all the
2803 * current interrupts have been processed.
2806 void r600_ih_ring_init(struct radeon_device
*rdev
, unsigned ring_size
)
2810 /* Align ring size */
2811 rb_bufsz
= drm_order(ring_size
/ 4);
2812 ring_size
= (1 << rb_bufsz
) * 4;
2813 rdev
->ih
.ring_size
= ring_size
;
2814 rdev
->ih
.ptr_mask
= rdev
->ih
.ring_size
- 1;
2818 static int r600_ih_ring_alloc(struct radeon_device
*rdev
)
2822 /* Allocate ring buffer */
2823 if (rdev
->ih
.ring_obj
== NULL
) {
2824 r
= radeon_bo_create(rdev
, rdev
->ih
.ring_size
,
2826 RADEON_GEM_DOMAIN_GTT
,
2827 &rdev
->ih
.ring_obj
);
2829 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r
);
2832 r
= radeon_bo_reserve(rdev
->ih
.ring_obj
, false);
2833 if (unlikely(r
!= 0))
2835 r
= radeon_bo_pin(rdev
->ih
.ring_obj
,
2836 RADEON_GEM_DOMAIN_GTT
,
2837 &rdev
->ih
.gpu_addr
);
2839 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
2840 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r
);
2843 r
= radeon_bo_kmap(rdev
->ih
.ring_obj
,
2844 (void **)&rdev
->ih
.ring
);
2845 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
2847 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r
);
2854 static void r600_ih_ring_fini(struct radeon_device
*rdev
)
2857 if (rdev
->ih
.ring_obj
) {
2858 r
= radeon_bo_reserve(rdev
->ih
.ring_obj
, false);
2859 if (likely(r
== 0)) {
2860 radeon_bo_kunmap(rdev
->ih
.ring_obj
);
2861 radeon_bo_unpin(rdev
->ih
.ring_obj
);
2862 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
2864 radeon_bo_unref(&rdev
->ih
.ring_obj
);
2865 rdev
->ih
.ring
= NULL
;
2866 rdev
->ih
.ring_obj
= NULL
;
2870 void r600_rlc_stop(struct radeon_device
*rdev
)
2873 if ((rdev
->family
>= CHIP_RV770
) &&
2874 (rdev
->family
<= CHIP_RV740
)) {
2875 /* r7xx asics need to soft reset RLC before halting */
2876 WREG32(SRBM_SOFT_RESET
, SOFT_RESET_RLC
);
2877 RREG32(SRBM_SOFT_RESET
);
2879 WREG32(SRBM_SOFT_RESET
, 0);
2880 RREG32(SRBM_SOFT_RESET
);
2883 WREG32(RLC_CNTL
, 0);
2886 static void r600_rlc_start(struct radeon_device
*rdev
)
2888 WREG32(RLC_CNTL
, RLC_ENABLE
);
2891 static int r600_rlc_init(struct radeon_device
*rdev
)
2894 const __be32
*fw_data
;
2899 r600_rlc_stop(rdev
);
2901 WREG32(RLC_HB_BASE
, 0);
2902 WREG32(RLC_HB_CNTL
, 0);
2903 WREG32(RLC_HB_RPTR
, 0);
2904 WREG32(RLC_HB_WPTR
, 0);
2905 if (rdev
->family
<= CHIP_CAICOS
) {
2906 WREG32(RLC_HB_WPTR_LSB_ADDR
, 0);
2907 WREG32(RLC_HB_WPTR_MSB_ADDR
, 0);
2909 WREG32(RLC_MC_CNTL
, 0);
2910 WREG32(RLC_UCODE_CNTL
, 0);
2912 fw_data
= (const __be32
*)rdev
->rlc_fw
->data
;
2913 if (rdev
->family
>= CHIP_CAYMAN
) {
2914 for (i
= 0; i
< CAYMAN_RLC_UCODE_SIZE
; i
++) {
2915 WREG32(RLC_UCODE_ADDR
, i
);
2916 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2918 } else if (rdev
->family
>= CHIP_CEDAR
) {
2919 for (i
= 0; i
< EVERGREEN_RLC_UCODE_SIZE
; i
++) {
2920 WREG32(RLC_UCODE_ADDR
, i
);
2921 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2923 } else if (rdev
->family
>= CHIP_RV770
) {
2924 for (i
= 0; i
< R700_RLC_UCODE_SIZE
; i
++) {
2925 WREG32(RLC_UCODE_ADDR
, i
);
2926 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2929 for (i
= 0; i
< RLC_UCODE_SIZE
; i
++) {
2930 WREG32(RLC_UCODE_ADDR
, i
);
2931 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2934 WREG32(RLC_UCODE_ADDR
, 0);
2936 r600_rlc_start(rdev
);
2941 static void r600_enable_interrupts(struct radeon_device
*rdev
)
2943 u32 ih_cntl
= RREG32(IH_CNTL
);
2944 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
2946 ih_cntl
|= ENABLE_INTR
;
2947 ih_rb_cntl
|= IH_RB_ENABLE
;
2948 WREG32(IH_CNTL
, ih_cntl
);
2949 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
2950 rdev
->ih
.enabled
= true;
2953 void r600_disable_interrupts(struct radeon_device
*rdev
)
2955 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
2956 u32 ih_cntl
= RREG32(IH_CNTL
);
2958 ih_rb_cntl
&= ~IH_RB_ENABLE
;
2959 ih_cntl
&= ~ENABLE_INTR
;
2960 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
2961 WREG32(IH_CNTL
, ih_cntl
);
2962 /* set rptr, wptr to 0 */
2963 WREG32(IH_RB_RPTR
, 0);
2964 WREG32(IH_RB_WPTR
, 0);
2965 rdev
->ih
.enabled
= false;
2970 static void r600_disable_interrupt_state(struct radeon_device
*rdev
)
2974 WREG32(CP_INT_CNTL
, CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
);
2975 WREG32(GRBM_INT_CNTL
, 0);
2976 WREG32(DxMODE_INT_MASK
, 0);
2977 WREG32(D1GRPH_INTERRUPT_CONTROL
, 0);
2978 WREG32(D2GRPH_INTERRUPT_CONTROL
, 0);
2979 if (ASIC_IS_DCE3(rdev
)) {
2980 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL
, 0);
2981 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL
, 0);
2982 tmp
= RREG32(DC_HPD1_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2983 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
2984 tmp
= RREG32(DC_HPD2_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2985 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
2986 tmp
= RREG32(DC_HPD3_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2987 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
2988 tmp
= RREG32(DC_HPD4_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2989 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
2990 if (ASIC_IS_DCE32(rdev
)) {
2991 tmp
= RREG32(DC_HPD5_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2992 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
2993 tmp
= RREG32(DC_HPD6_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2994 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
2997 WREG32(DACA_AUTODETECT_INT_CONTROL
, 0);
2998 WREG32(DACB_AUTODETECT_INT_CONTROL
, 0);
2999 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
3000 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
3001 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
3002 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
3003 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
3004 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
3008 int r600_irq_init(struct radeon_device
*rdev
)
3012 u32 interrupt_cntl
, ih_cntl
, ih_rb_cntl
;
3015 ret
= r600_ih_ring_alloc(rdev
);
3020 r600_disable_interrupts(rdev
);
3023 ret
= r600_rlc_init(rdev
);
3025 r600_ih_ring_fini(rdev
);
3029 /* setup interrupt control */
3030 /* set dummy read address to ring address */
3031 WREG32(INTERRUPT_CNTL2
, rdev
->ih
.gpu_addr
>> 8);
3032 interrupt_cntl
= RREG32(INTERRUPT_CNTL
);
3033 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3034 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3036 interrupt_cntl
&= ~IH_DUMMY_RD_OVERRIDE
;
3037 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3038 interrupt_cntl
&= ~IH_REQ_NONSNOOP_EN
;
3039 WREG32(INTERRUPT_CNTL
, interrupt_cntl
);
3041 WREG32(IH_RB_BASE
, rdev
->ih
.gpu_addr
>> 8);
3042 rb_bufsz
= drm_order(rdev
->ih
.ring_size
/ 4);
3044 ih_rb_cntl
= (IH_WPTR_OVERFLOW_ENABLE
|
3045 IH_WPTR_OVERFLOW_CLEAR
|
3048 if (rdev
->wb
.enabled
)
3049 ih_rb_cntl
|= IH_WPTR_WRITEBACK_ENABLE
;
3051 /* set the writeback address whether it's enabled or not */
3052 WREG32(IH_RB_WPTR_ADDR_LO
, (rdev
->wb
.gpu_addr
+ R600_WB_IH_WPTR_OFFSET
) & 0xFFFFFFFC);
3053 WREG32(IH_RB_WPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ R600_WB_IH_WPTR_OFFSET
) & 0xFF);
3055 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
3057 /* set rptr, wptr to 0 */
3058 WREG32(IH_RB_RPTR
, 0);
3059 WREG32(IH_RB_WPTR
, 0);
3061 /* Default settings for IH_CNTL (disabled at first) */
3062 ih_cntl
= MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3063 /* RPTR_REARM only works if msi's are enabled */
3064 if (rdev
->msi_enabled
)
3065 ih_cntl
|= RPTR_REARM
;
3066 WREG32(IH_CNTL
, ih_cntl
);
3068 /* force the active interrupt state to all disabled */
3069 if (rdev
->family
>= CHIP_CEDAR
)
3070 evergreen_disable_interrupt_state(rdev
);
3072 r600_disable_interrupt_state(rdev
);
3075 r600_enable_interrupts(rdev
);
3080 void r600_irq_suspend(struct radeon_device
*rdev
)
3082 r600_irq_disable(rdev
);
3083 r600_rlc_stop(rdev
);
3086 void r600_irq_fini(struct radeon_device
*rdev
)
3088 r600_irq_suspend(rdev
);
3089 r600_ih_ring_fini(rdev
);
3092 int r600_irq_set(struct radeon_device
*rdev
)
3094 u32 cp_int_cntl
= CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
;
3096 u32 hpd1
, hpd2
, hpd3
, hpd4
= 0, hpd5
= 0, hpd6
= 0;
3097 u32 grbm_int_cntl
= 0;
3099 u32 d1grph
= 0, d2grph
= 0;
3101 if (!rdev
->irq
.installed
) {
3102 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3105 /* don't enable anything if the ih is disabled */
3106 if (!rdev
->ih
.enabled
) {
3107 r600_disable_interrupts(rdev
);
3108 /* force the active interrupt state to all disabled */
3109 r600_disable_interrupt_state(rdev
);
3113 hdmi1
= RREG32(R600_HDMI_BLOCK1
+ R600_HDMI_CNTL
) & ~R600_HDMI_INT_EN
;
3114 if (ASIC_IS_DCE3(rdev
)) {
3115 hdmi2
= RREG32(R600_HDMI_BLOCK3
+ R600_HDMI_CNTL
) & ~R600_HDMI_INT_EN
;
3116 hpd1
= RREG32(DC_HPD1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3117 hpd2
= RREG32(DC_HPD2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3118 hpd3
= RREG32(DC_HPD3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3119 hpd4
= RREG32(DC_HPD4_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3120 if (ASIC_IS_DCE32(rdev
)) {
3121 hpd5
= RREG32(DC_HPD5_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3122 hpd6
= RREG32(DC_HPD6_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3125 hdmi2
= RREG32(R600_HDMI_BLOCK2
+ R600_HDMI_CNTL
) & ~R600_HDMI_INT_EN
;
3126 hpd1
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3127 hpd2
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3128 hpd3
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3131 if (rdev
->irq
.sw_int
) {
3132 DRM_DEBUG("r600_irq_set: sw int\n");
3133 cp_int_cntl
|= RB_INT_ENABLE
;
3134 cp_int_cntl
|= TIME_STAMP_INT_ENABLE
;
3136 if (rdev
->irq
.crtc_vblank_int
[0] ||
3137 rdev
->irq
.pflip
[0]) {
3138 DRM_DEBUG("r600_irq_set: vblank 0\n");
3139 mode_int
|= D1MODE_VBLANK_INT_MASK
;
3141 if (rdev
->irq
.crtc_vblank_int
[1] ||
3142 rdev
->irq
.pflip
[1]) {
3143 DRM_DEBUG("r600_irq_set: vblank 1\n");
3144 mode_int
|= D2MODE_VBLANK_INT_MASK
;
3146 if (rdev
->irq
.hpd
[0]) {
3147 DRM_DEBUG("r600_irq_set: hpd 1\n");
3148 hpd1
|= DC_HPDx_INT_EN
;
3150 if (rdev
->irq
.hpd
[1]) {
3151 DRM_DEBUG("r600_irq_set: hpd 2\n");
3152 hpd2
|= DC_HPDx_INT_EN
;
3154 if (rdev
->irq
.hpd
[2]) {
3155 DRM_DEBUG("r600_irq_set: hpd 3\n");
3156 hpd3
|= DC_HPDx_INT_EN
;
3158 if (rdev
->irq
.hpd
[3]) {
3159 DRM_DEBUG("r600_irq_set: hpd 4\n");
3160 hpd4
|= DC_HPDx_INT_EN
;
3162 if (rdev
->irq
.hpd
[4]) {
3163 DRM_DEBUG("r600_irq_set: hpd 5\n");
3164 hpd5
|= DC_HPDx_INT_EN
;
3166 if (rdev
->irq
.hpd
[5]) {
3167 DRM_DEBUG("r600_irq_set: hpd 6\n");
3168 hpd6
|= DC_HPDx_INT_EN
;
3170 if (rdev
->irq
.hdmi
[0]) {
3171 DRM_DEBUG("r600_irq_set: hdmi 1\n");
3172 hdmi1
|= R600_HDMI_INT_EN
;
3174 if (rdev
->irq
.hdmi
[1]) {
3175 DRM_DEBUG("r600_irq_set: hdmi 2\n");
3176 hdmi2
|= R600_HDMI_INT_EN
;
3178 if (rdev
->irq
.gui_idle
) {
3179 DRM_DEBUG("gui idle\n");
3180 grbm_int_cntl
|= GUI_IDLE_INT_ENABLE
;
3183 WREG32(CP_INT_CNTL
, cp_int_cntl
);
3184 WREG32(DxMODE_INT_MASK
, mode_int
);
3185 WREG32(D1GRPH_INTERRUPT_CONTROL
, d1grph
);
3186 WREG32(D2GRPH_INTERRUPT_CONTROL
, d2grph
);
3187 WREG32(GRBM_INT_CNTL
, grbm_int_cntl
);
3188 WREG32(R600_HDMI_BLOCK1
+ R600_HDMI_CNTL
, hdmi1
);
3189 if (ASIC_IS_DCE3(rdev
)) {
3190 WREG32(R600_HDMI_BLOCK3
+ R600_HDMI_CNTL
, hdmi2
);
3191 WREG32(DC_HPD1_INT_CONTROL
, hpd1
);
3192 WREG32(DC_HPD2_INT_CONTROL
, hpd2
);
3193 WREG32(DC_HPD3_INT_CONTROL
, hpd3
);
3194 WREG32(DC_HPD4_INT_CONTROL
, hpd4
);
3195 if (ASIC_IS_DCE32(rdev
)) {
3196 WREG32(DC_HPD5_INT_CONTROL
, hpd5
);
3197 WREG32(DC_HPD6_INT_CONTROL
, hpd6
);
3200 WREG32(R600_HDMI_BLOCK2
+ R600_HDMI_CNTL
, hdmi2
);
3201 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, hpd1
);
3202 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, hpd2
);
3203 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, hpd3
);
3209 static void r600_irq_ack(struct radeon_device
*rdev
)
3213 if (ASIC_IS_DCE3(rdev
)) {
3214 rdev
->irq
.stat_regs
.r600
.disp_int
= RREG32(DCE3_DISP_INTERRUPT_STATUS
);
3215 rdev
->irq
.stat_regs
.r600
.disp_int_cont
= RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE
);
3216 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
= RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2
);
3218 rdev
->irq
.stat_regs
.r600
.disp_int
= RREG32(DISP_INTERRUPT_STATUS
);
3219 rdev
->irq
.stat_regs
.r600
.disp_int_cont
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE
);
3220 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
= 0;
3222 rdev
->irq
.stat_regs
.r600
.d1grph_int
= RREG32(D1GRPH_INTERRUPT_STATUS
);
3223 rdev
->irq
.stat_regs
.r600
.d2grph_int
= RREG32(D2GRPH_INTERRUPT_STATUS
);
3225 if (rdev
->irq
.stat_regs
.r600
.d1grph_int
& DxGRPH_PFLIP_INT_OCCURRED
)
3226 WREG32(D1GRPH_INTERRUPT_STATUS
, DxGRPH_PFLIP_INT_CLEAR
);
3227 if (rdev
->irq
.stat_regs
.r600
.d2grph_int
& DxGRPH_PFLIP_INT_OCCURRED
)
3228 WREG32(D2GRPH_INTERRUPT_STATUS
, DxGRPH_PFLIP_INT_CLEAR
);
3229 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VBLANK_INTERRUPT
)
3230 WREG32(D1MODE_VBLANK_STATUS
, DxMODE_VBLANK_ACK
);
3231 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VLINE_INTERRUPT
)
3232 WREG32(D1MODE_VLINE_STATUS
, DxMODE_VLINE_ACK
);
3233 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VBLANK_INTERRUPT
)
3234 WREG32(D2MODE_VBLANK_STATUS
, DxMODE_VBLANK_ACK
);
3235 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VLINE_INTERRUPT
)
3236 WREG32(D2MODE_VLINE_STATUS
, DxMODE_VLINE_ACK
);
3237 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD1_INTERRUPT
) {
3238 if (ASIC_IS_DCE3(rdev
)) {
3239 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
3240 tmp
|= DC_HPDx_INT_ACK
;
3241 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
3243 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
);
3244 tmp
|= DC_HPDx_INT_ACK
;
3245 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
3248 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD2_INTERRUPT
) {
3249 if (ASIC_IS_DCE3(rdev
)) {
3250 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
3251 tmp
|= DC_HPDx_INT_ACK
;
3252 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
3254 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
);
3255 tmp
|= DC_HPDx_INT_ACK
;
3256 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
3259 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD3_INTERRUPT
) {
3260 if (ASIC_IS_DCE3(rdev
)) {
3261 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
3262 tmp
|= DC_HPDx_INT_ACK
;
3263 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
3265 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
);
3266 tmp
|= DC_HPDx_INT_ACK
;
3267 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
3270 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD4_INTERRUPT
) {
3271 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
3272 tmp
|= DC_HPDx_INT_ACK
;
3273 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
3275 if (ASIC_IS_DCE32(rdev
)) {
3276 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD5_INTERRUPT
) {
3277 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
3278 tmp
|= DC_HPDx_INT_ACK
;
3279 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
3281 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD6_INTERRUPT
) {
3282 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
3283 tmp
|= DC_HPDx_INT_ACK
;
3284 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
3287 if (RREG32(R600_HDMI_BLOCK1
+ R600_HDMI_STATUS
) & R600_HDMI_INT_PENDING
) {
3288 WREG32_P(R600_HDMI_BLOCK1
+ R600_HDMI_CNTL
, R600_HDMI_INT_ACK
, ~R600_HDMI_INT_ACK
);
3290 if (ASIC_IS_DCE3(rdev
)) {
3291 if (RREG32(R600_HDMI_BLOCK3
+ R600_HDMI_STATUS
) & R600_HDMI_INT_PENDING
) {
3292 WREG32_P(R600_HDMI_BLOCK3
+ R600_HDMI_CNTL
, R600_HDMI_INT_ACK
, ~R600_HDMI_INT_ACK
);
3295 if (RREG32(R600_HDMI_BLOCK2
+ R600_HDMI_STATUS
) & R600_HDMI_INT_PENDING
) {
3296 WREG32_P(R600_HDMI_BLOCK2
+ R600_HDMI_CNTL
, R600_HDMI_INT_ACK
, ~R600_HDMI_INT_ACK
);
3301 void r600_irq_disable(struct radeon_device
*rdev
)
3303 r600_disable_interrupts(rdev
);
3304 /* Wait and acknowledge irq */
3307 r600_disable_interrupt_state(rdev
);
3310 static u32
r600_get_ih_wptr(struct radeon_device
*rdev
)
3314 if (rdev
->wb
.enabled
)
3315 wptr
= le32_to_cpu(rdev
->wb
.wb
[R600_WB_IH_WPTR_OFFSET
/4]);
3317 wptr
= RREG32(IH_RB_WPTR
);
3319 if (wptr
& RB_OVERFLOW
) {
3320 /* When a ring buffer overflow happen start parsing interrupt
3321 * from the last not overwritten vector (wptr + 16). Hopefully
3322 * this should allow us to catchup.
3324 dev_warn(rdev
->dev
, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3325 wptr
, rdev
->ih
.rptr
, (wptr
+ 16) + rdev
->ih
.ptr_mask
);
3326 rdev
->ih
.rptr
= (wptr
+ 16) & rdev
->ih
.ptr_mask
;
3327 tmp
= RREG32(IH_RB_CNTL
);
3328 tmp
|= IH_WPTR_OVERFLOW_CLEAR
;
3329 WREG32(IH_RB_CNTL
, tmp
);
3331 return (wptr
& rdev
->ih
.ptr_mask
);
3335 * Each IV ring entry is 128 bits:
3336 * [7:0] - interrupt source id
3338 * [59:32] - interrupt source data
3339 * [127:60] - reserved
3341 * The basic interrupt vector entries
3342 * are decoded as follows:
3343 * src_id src_data description
3348 * 19 0 FP Hot plug detection A
3349 * 19 1 FP Hot plug detection B
3350 * 19 2 DAC A auto-detection
3351 * 19 3 DAC B auto-detection
3357 * 181 - EOP Interrupt
3360 * Note, these are based on r600 and may need to be
3361 * adjusted or added to on newer asics
3364 int r600_irq_process(struct radeon_device
*rdev
)
3368 u32 src_id
, src_data
;
3370 unsigned long flags
;
3371 bool queue_hotplug
= false;
3373 if (!rdev
->ih
.enabled
|| rdev
->shutdown
)
3376 /* No MSIs, need a dummy read to flush PCI DMAs */
3377 if (!rdev
->msi_enabled
)
3380 wptr
= r600_get_ih_wptr(rdev
);
3381 rptr
= rdev
->ih
.rptr
;
3382 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr
, wptr
);
3384 spin_lock_irqsave(&rdev
->ih
.lock
, flags
);
3387 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
3392 /* Order reading of wptr vs. reading of IH ring data */
3395 /* display interrupts */
3398 rdev
->ih
.wptr
= wptr
;
3399 while (rptr
!= wptr
) {
3400 /* wptr/rptr are in bytes! */
3401 ring_index
= rptr
/ 4;
3402 src_id
= le32_to_cpu(rdev
->ih
.ring
[ring_index
]) & 0xff;
3403 src_data
= le32_to_cpu(rdev
->ih
.ring
[ring_index
+ 1]) & 0xfffffff;
3406 case 1: /* D1 vblank/vline */
3408 case 0: /* D1 vblank */
3409 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VBLANK_INTERRUPT
) {
3410 if (rdev
->irq
.crtc_vblank_int
[0]) {
3411 drm_handle_vblank(rdev
->ddev
, 0);
3412 rdev
->pm
.vblank_sync
= true;
3413 wake_up(&rdev
->irq
.vblank_queue
);
3415 if (rdev
->irq
.pflip
[0])
3416 radeon_crtc_handle_flip(rdev
, 0);
3417 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D1_VBLANK_INTERRUPT
;
3418 DRM_DEBUG("IH: D1 vblank\n");
3421 case 1: /* D1 vline */
3422 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VLINE_INTERRUPT
) {
3423 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D1_VLINE_INTERRUPT
;
3424 DRM_DEBUG("IH: D1 vline\n");
3428 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3432 case 5: /* D2 vblank/vline */
3434 case 0: /* D2 vblank */
3435 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VBLANK_INTERRUPT
) {
3436 if (rdev
->irq
.crtc_vblank_int
[1]) {
3437 drm_handle_vblank(rdev
->ddev
, 1);
3438 rdev
->pm
.vblank_sync
= true;
3439 wake_up(&rdev
->irq
.vblank_queue
);
3441 if (rdev
->irq
.pflip
[1])
3442 radeon_crtc_handle_flip(rdev
, 1);
3443 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D2_VBLANK_INTERRUPT
;
3444 DRM_DEBUG("IH: D2 vblank\n");
3447 case 1: /* D1 vline */
3448 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VLINE_INTERRUPT
) {
3449 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D2_VLINE_INTERRUPT
;
3450 DRM_DEBUG("IH: D2 vline\n");
3454 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3458 case 19: /* HPD/DAC hotplug */
3461 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD1_INTERRUPT
) {
3462 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~DC_HPD1_INTERRUPT
;
3463 queue_hotplug
= true;
3464 DRM_DEBUG("IH: HPD1\n");
3468 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD2_INTERRUPT
) {
3469 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~DC_HPD2_INTERRUPT
;
3470 queue_hotplug
= true;
3471 DRM_DEBUG("IH: HPD2\n");
3475 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD3_INTERRUPT
) {
3476 rdev
->irq
.stat_regs
.r600
.disp_int_cont
&= ~DC_HPD3_INTERRUPT
;
3477 queue_hotplug
= true;
3478 DRM_DEBUG("IH: HPD3\n");
3482 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD4_INTERRUPT
) {
3483 rdev
->irq
.stat_regs
.r600
.disp_int_cont
&= ~DC_HPD4_INTERRUPT
;
3484 queue_hotplug
= true;
3485 DRM_DEBUG("IH: HPD4\n");
3489 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD5_INTERRUPT
) {
3490 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
&= ~DC_HPD5_INTERRUPT
;
3491 queue_hotplug
= true;
3492 DRM_DEBUG("IH: HPD5\n");
3496 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD6_INTERRUPT
) {
3497 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
&= ~DC_HPD6_INTERRUPT
;
3498 queue_hotplug
= true;
3499 DRM_DEBUG("IH: HPD6\n");
3503 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3508 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data
);
3509 r600_audio_schedule_polling(rdev
);
3511 case 176: /* CP_INT in ring buffer */
3512 case 177: /* CP_INT in IB1 */
3513 case 178: /* CP_INT in IB2 */
3514 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data
);
3515 radeon_fence_process(rdev
);
3517 case 181: /* CP EOP event */
3518 DRM_DEBUG("IH: CP EOP\n");
3519 radeon_fence_process(rdev
);
3521 case 233: /* GUI IDLE */
3522 DRM_DEBUG("IH: GUI idle\n");
3523 rdev
->pm
.gui_idle
= true;
3524 wake_up(&rdev
->irq
.idle_queue
);
3527 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3531 /* wptr/rptr are in bytes! */
3533 rptr
&= rdev
->ih
.ptr_mask
;
3535 /* make sure wptr hasn't changed while processing */
3536 wptr
= r600_get_ih_wptr(rdev
);
3537 if (wptr
!= rdev
->ih
.wptr
)
3540 schedule_work(&rdev
->hotplug_work
);
3541 rdev
->ih
.rptr
= rptr
;
3542 WREG32(IH_RB_RPTR
, rdev
->ih
.rptr
);
3543 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
3550 #if defined(CONFIG_DEBUG_FS)
3552 static int r600_debugfs_cp_ring_info(struct seq_file
*m
, void *data
)
3554 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
3555 struct drm_device
*dev
= node
->minor
->dev
;
3556 struct radeon_device
*rdev
= dev
->dev_private
;
3557 unsigned count
, i
, j
;
3559 radeon_ring_free_size(rdev
);
3560 count
= (rdev
->cp
.ring_size
/ 4) - rdev
->cp
.ring_free_dw
;
3561 seq_printf(m
, "CP_STAT 0x%08x\n", RREG32(CP_STAT
));
3562 seq_printf(m
, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR
));
3563 seq_printf(m
, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR
));
3564 seq_printf(m
, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev
->cp
.wptr
);
3565 seq_printf(m
, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev
->cp
.rptr
);
3566 seq_printf(m
, "%u free dwords in ring\n", rdev
->cp
.ring_free_dw
);
3567 seq_printf(m
, "%u dwords in ring\n", count
);
3569 for (j
= 0; j
<= count
; j
++) {
3570 seq_printf(m
, "r[%04d]=0x%08x\n", i
, rdev
->cp
.ring
[i
]);
3571 i
= (i
+ 1) & rdev
->cp
.ptr_mask
;
3576 static int r600_debugfs_mc_info(struct seq_file
*m
, void *data
)
3578 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
3579 struct drm_device
*dev
= node
->minor
->dev
;
3580 struct radeon_device
*rdev
= dev
->dev_private
;
3582 DREG32_SYS(m
, rdev
, R_000E50_SRBM_STATUS
);
3583 DREG32_SYS(m
, rdev
, VM_L2_STATUS
);
3587 static struct drm_info_list r600_mc_info_list
[] = {
3588 {"r600_mc_info", r600_debugfs_mc_info
, 0, NULL
},
3589 {"r600_ring_info", r600_debugfs_cp_ring_info
, 0, NULL
},
3593 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
)
3595 #if defined(CONFIG_DEBUG_FS)
3596 return radeon_debugfs_add_files(rdev
, r600_mc_info_list
, ARRAY_SIZE(r600_mc_info_list
));
3603 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3604 * rdev: radeon device structure
3605 * bo: buffer object struct which userspace is waiting for idle
3607 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3608 * through ring buffer, this leads to corruption in rendering, see
3609 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3610 * directly perform HDP flush by writing register through MMIO.
3612 void r600_ioctl_wait_idle(struct radeon_device
*rdev
, struct radeon_bo
*bo
)
3614 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
3615 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3616 * This seems to cause problems on some AGP cards. Just use the old
3619 if ((rdev
->family
>= CHIP_RV770
) && (rdev
->family
<= CHIP_RV740
) &&
3620 rdev
->vram_scratch
.ptr
&& !(rdev
->flags
& RADEON_IS_AGP
)) {
3621 void __iomem
*ptr
= (void *)rdev
->vram_scratch
.ptr
;
3624 WREG32(HDP_DEBUG1
, 0);
3625 tmp
= readl((void __iomem
*)ptr
);
3627 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
3630 void r600_set_pcie_lanes(struct radeon_device
*rdev
, int lanes
)
3632 u32 link_width_cntl
, mask
, target_reg
;
3634 if (rdev
->flags
& RADEON_IS_IGP
)
3637 if (!(rdev
->flags
& RADEON_IS_PCIE
))
3640 /* x2 cards have a special sequence */
3641 if (ASIC_IS_X2(rdev
))
3644 /* FIXME wait for idle */
3648 mask
= RADEON_PCIE_LC_LINK_WIDTH_X0
;
3651 mask
= RADEON_PCIE_LC_LINK_WIDTH_X1
;
3654 mask
= RADEON_PCIE_LC_LINK_WIDTH_X2
;
3657 mask
= RADEON_PCIE_LC_LINK_WIDTH_X4
;
3660 mask
= RADEON_PCIE_LC_LINK_WIDTH_X8
;
3663 mask
= RADEON_PCIE_LC_LINK_WIDTH_X12
;
3667 mask
= RADEON_PCIE_LC_LINK_WIDTH_X16
;
3671 link_width_cntl
= RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
3673 if ((link_width_cntl
& RADEON_PCIE_LC_LINK_WIDTH_RD_MASK
) ==
3674 (mask
<< RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT
))
3677 if (link_width_cntl
& R600_PCIE_LC_UPCONFIGURE_DIS
)
3680 link_width_cntl
&= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK
|
3681 RADEON_PCIE_LC_RECONFIG_NOW
|
3682 R600_PCIE_LC_RENEGOTIATE_EN
|
3683 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE
);
3684 link_width_cntl
|= mask
;
3686 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
3688 /* some northbridges can renegotiate the link rather than requiring
3689 * a complete re-config.
3690 * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
3692 if (link_width_cntl
& R600_PCIE_LC_RENEGOTIATION_SUPPORT
)
3693 link_width_cntl
|= R600_PCIE_LC_RENEGOTIATE_EN
| R600_PCIE_LC_UPCONFIGURE_SUPPORT
;
3695 link_width_cntl
|= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE
;
3697 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL
, (link_width_cntl
|
3698 RADEON_PCIE_LC_RECONFIG_NOW
));
3700 if (rdev
->family
>= CHIP_RV770
)
3701 target_reg
= R700_TARGET_AND_CURRENT_PROFILE_INDEX
;
3703 target_reg
= R600_TARGET_AND_CURRENT_PROFILE_INDEX
;
3705 /* wait for lane set to complete */
3706 link_width_cntl
= RREG32(target_reg
);
3707 while (link_width_cntl
== 0xffffffff)
3708 link_width_cntl
= RREG32(target_reg
);
3712 int r600_get_pcie_lanes(struct radeon_device
*rdev
)
3714 u32 link_width_cntl
;
3716 if (rdev
->flags
& RADEON_IS_IGP
)
3719 if (!(rdev
->flags
& RADEON_IS_PCIE
))
3722 /* x2 cards have a special sequence */
3723 if (ASIC_IS_X2(rdev
))
3726 /* FIXME wait for idle */
3728 link_width_cntl
= RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
3730 switch ((link_width_cntl
& RADEON_PCIE_LC_LINK_WIDTH_RD_MASK
) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT
) {
3731 case RADEON_PCIE_LC_LINK_WIDTH_X0
:
3733 case RADEON_PCIE_LC_LINK_WIDTH_X1
:
3735 case RADEON_PCIE_LC_LINK_WIDTH_X2
:
3737 case RADEON_PCIE_LC_LINK_WIDTH_X4
:
3739 case RADEON_PCIE_LC_LINK_WIDTH_X8
:
3741 case RADEON_PCIE_LC_LINK_WIDTH_X16
:
3747 static void r600_pcie_gen2_enable(struct radeon_device
*rdev
)
3749 u32 link_width_cntl
, lanes
, speed_cntl
, training_cntl
, tmp
;
3752 if (radeon_pcie_gen2
== 0)
3755 if (rdev
->flags
& RADEON_IS_IGP
)
3758 if (!(rdev
->flags
& RADEON_IS_PCIE
))
3761 /* x2 cards have a special sequence */
3762 if (ASIC_IS_X2(rdev
))
3765 /* only RV6xx+ chips are supported */
3766 if (rdev
->family
<= CHIP_R600
)
3769 /* 55 nm r6xx asics */
3770 if ((rdev
->family
== CHIP_RV670
) ||
3771 (rdev
->family
== CHIP_RV620
) ||
3772 (rdev
->family
== CHIP_RV635
)) {
3773 /* advertise upconfig capability */
3774 link_width_cntl
= RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
);
3775 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
3776 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
3777 link_width_cntl
= RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
);
3778 if (link_width_cntl
& LC_RENEGOTIATION_SUPPORT
) {
3779 lanes
= (link_width_cntl
& LC_LINK_WIDTH_RD_MASK
) >> LC_LINK_WIDTH_RD_SHIFT
;
3780 link_width_cntl
&= ~(LC_LINK_WIDTH_MASK
|
3781 LC_RECONFIG_ARC_MISSING_ESCAPE
);
3782 link_width_cntl
|= lanes
| LC_RECONFIG_NOW
| LC_RENEGOTIATE_EN
;
3783 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
3785 link_width_cntl
|= LC_UPCONFIGURE_DIS
;
3786 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
3790 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3791 if ((speed_cntl
& LC_OTHER_SIDE_EVER_SENT_GEN2
) &&
3792 (speed_cntl
& LC_OTHER_SIDE_SUPPORTS_GEN2
)) {
3794 /* 55 nm r6xx asics */
3795 if ((rdev
->family
== CHIP_RV670
) ||
3796 (rdev
->family
== CHIP_RV620
) ||
3797 (rdev
->family
== CHIP_RV635
)) {
3798 WREG32(MM_CFGREGS_CNTL
, 0x8);
3799 link_cntl2
= RREG32(0x4088);
3800 WREG32(MM_CFGREGS_CNTL
, 0);
3801 /* not supported yet */
3802 if (link_cntl2
& SELECTABLE_DEEMPHASIS
)
3806 speed_cntl
&= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK
;
3807 speed_cntl
|= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT
);
3808 speed_cntl
&= ~LC_VOLTAGE_TIMER_SEL_MASK
;
3809 speed_cntl
&= ~LC_FORCE_DIS_HW_SPEED_CHANGE
;
3810 speed_cntl
|= LC_FORCE_EN_HW_SPEED_CHANGE
;
3811 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3813 tmp
= RREG32(0x541c);
3814 WREG32(0x541c, tmp
| 0x8);
3815 WREG32(MM_CFGREGS_CNTL
, MM_WR_TO_CFG_EN
);
3816 link_cntl2
= RREG16(0x4088);
3817 link_cntl2
&= ~TARGET_LINK_SPEED_MASK
;
3819 WREG16(0x4088, link_cntl2
);
3820 WREG32(MM_CFGREGS_CNTL
, 0);
3822 if ((rdev
->family
== CHIP_RV670
) ||
3823 (rdev
->family
== CHIP_RV620
) ||
3824 (rdev
->family
== CHIP_RV635
)) {
3825 training_cntl
= RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL
);
3826 training_cntl
&= ~LC_POINT_7_PLUS_EN
;
3827 WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL
, training_cntl
);
3829 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3830 speed_cntl
&= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN
;
3831 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3834 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3835 speed_cntl
|= LC_GEN2_EN_STRAP
;
3836 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3839 link_width_cntl
= RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
);
3840 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3842 link_width_cntl
|= LC_UPCONFIGURE_DIS
;
3844 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
3845 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);