2 * Copyright 2010 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
29 #include "radeon_asic.h"
30 #include "radeon_drm.h"
31 #include "evergreend.h"
34 #include "evergreen_reg.h"
35 #include "evergreen_blit_shaders.h"
37 #define EVERGREEN_PFP_UCODE_SIZE 1120
38 #define EVERGREEN_PM4_UCODE_SIZE 1376
40 static void evergreen_gpu_init(struct radeon_device
*rdev
);
41 void evergreen_fini(struct radeon_device
*rdev
);
42 static void evergreen_pcie_gen2_enable(struct radeon_device
*rdev
);
44 void evergreen_pre_page_flip(struct radeon_device
*rdev
, int crtc
)
46 /* enable the pflip int */
47 radeon_irq_kms_pflip_irq_get(rdev
, crtc
);
50 void evergreen_post_page_flip(struct radeon_device
*rdev
, int crtc
)
52 /* disable the pflip int */
53 radeon_irq_kms_pflip_irq_put(rdev
, crtc
);
56 u32
evergreen_page_flip(struct radeon_device
*rdev
, int crtc_id
, u64 crtc_base
)
58 struct radeon_crtc
*radeon_crtc
= rdev
->mode_info
.crtcs
[crtc_id
];
59 u32 tmp
= RREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
);
61 /* Lock the graphics update lock */
62 tmp
|= EVERGREEN_GRPH_UPDATE_LOCK
;
63 WREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
, tmp
);
65 /* update the scanout addresses */
66 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ radeon_crtc
->crtc_offset
,
67 upper_32_bits(crtc_base
));
68 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ radeon_crtc
->crtc_offset
,
71 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ radeon_crtc
->crtc_offset
,
72 upper_32_bits(crtc_base
));
73 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ radeon_crtc
->crtc_offset
,
76 /* Wait for update_pending to go high. */
77 while (!(RREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING
));
78 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
80 /* Unlock the lock, so double-buffering can take place inside vblank */
81 tmp
&= ~EVERGREEN_GRPH_UPDATE_LOCK
;
82 WREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
, tmp
);
84 /* Return current update_pending status: */
85 return RREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING
;
88 /* get temperature in millidegrees */
89 int evergreen_get_temp(struct radeon_device
*rdev
)
91 u32 temp
= (RREG32(CG_MULT_THERMAL_STATUS
) & ASIC_T_MASK
) >>
97 else if (temp
& 0x200)
99 else if (temp
& 0x100) {
100 actual_temp
= temp
& 0x1ff;
101 actual_temp
|= ~0x1ff;
103 actual_temp
= temp
& 0xff;
105 return (actual_temp
* 1000) / 2;
108 int sumo_get_temp(struct radeon_device
*rdev
)
110 u32 temp
= RREG32(CG_THERMAL_STATUS
) & 0xff;
111 int actual_temp
= temp
- 49;
113 return actual_temp
* 1000;
116 void evergreen_pm_misc(struct radeon_device
*rdev
)
118 int req_ps_idx
= rdev
->pm
.requested_power_state_index
;
119 int req_cm_idx
= rdev
->pm
.requested_clock_mode_index
;
120 struct radeon_power_state
*ps
= &rdev
->pm
.power_state
[req_ps_idx
];
121 struct radeon_voltage
*voltage
= &ps
->clock_info
[req_cm_idx
].voltage
;
123 if (voltage
->type
== VOLTAGE_SW
) {
124 if (voltage
->voltage
&& (voltage
->voltage
!= rdev
->pm
.current_vddc
)) {
125 radeon_atom_set_voltage(rdev
, voltage
->voltage
, SET_VOLTAGE_TYPE_ASIC_VDDC
);
126 rdev
->pm
.current_vddc
= voltage
->voltage
;
127 DRM_DEBUG("Setting: vddc: %d\n", voltage
->voltage
);
129 if (voltage
->vddci
&& (voltage
->vddci
!= rdev
->pm
.current_vddci
)) {
130 radeon_atom_set_voltage(rdev
, voltage
->vddci
, SET_VOLTAGE_TYPE_ASIC_VDDCI
);
131 rdev
->pm
.current_vddci
= voltage
->vddci
;
132 DRM_DEBUG("Setting: vddci: %d\n", voltage
->vddci
);
137 void evergreen_pm_prepare(struct radeon_device
*rdev
)
139 struct drm_device
*ddev
= rdev
->ddev
;
140 struct drm_crtc
*crtc
;
141 struct radeon_crtc
*radeon_crtc
;
144 /* disable any active CRTCs */
145 list_for_each_entry(crtc
, &ddev
->mode_config
.crtc_list
, head
) {
146 radeon_crtc
= to_radeon_crtc(crtc
);
147 if (radeon_crtc
->enabled
) {
148 tmp
= RREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
);
149 tmp
|= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE
;
150 WREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
, tmp
);
155 void evergreen_pm_finish(struct radeon_device
*rdev
)
157 struct drm_device
*ddev
= rdev
->ddev
;
158 struct drm_crtc
*crtc
;
159 struct radeon_crtc
*radeon_crtc
;
162 /* enable any active CRTCs */
163 list_for_each_entry(crtc
, &ddev
->mode_config
.crtc_list
, head
) {
164 radeon_crtc
= to_radeon_crtc(crtc
);
165 if (radeon_crtc
->enabled
) {
166 tmp
= RREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
);
167 tmp
&= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE
;
168 WREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
, tmp
);
173 bool evergreen_hpd_sense(struct radeon_device
*rdev
, enum radeon_hpd_id hpd
)
175 bool connected
= false;
179 if (RREG32(DC_HPD1_INT_STATUS
) & DC_HPDx_SENSE
)
183 if (RREG32(DC_HPD2_INT_STATUS
) & DC_HPDx_SENSE
)
187 if (RREG32(DC_HPD3_INT_STATUS
) & DC_HPDx_SENSE
)
191 if (RREG32(DC_HPD4_INT_STATUS
) & DC_HPDx_SENSE
)
195 if (RREG32(DC_HPD5_INT_STATUS
) & DC_HPDx_SENSE
)
199 if (RREG32(DC_HPD6_INT_STATUS
) & DC_HPDx_SENSE
)
209 void evergreen_hpd_set_polarity(struct radeon_device
*rdev
,
210 enum radeon_hpd_id hpd
)
213 bool connected
= evergreen_hpd_sense(rdev
, hpd
);
217 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
219 tmp
&= ~DC_HPDx_INT_POLARITY
;
221 tmp
|= DC_HPDx_INT_POLARITY
;
222 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
225 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
227 tmp
&= ~DC_HPDx_INT_POLARITY
;
229 tmp
|= DC_HPDx_INT_POLARITY
;
230 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
233 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
235 tmp
&= ~DC_HPDx_INT_POLARITY
;
237 tmp
|= DC_HPDx_INT_POLARITY
;
238 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
241 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
243 tmp
&= ~DC_HPDx_INT_POLARITY
;
245 tmp
|= DC_HPDx_INT_POLARITY
;
246 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
249 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
251 tmp
&= ~DC_HPDx_INT_POLARITY
;
253 tmp
|= DC_HPDx_INT_POLARITY
;
254 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
257 tmp
= RREG32(DC_HPD6_INT_CONTROL
);
259 tmp
&= ~DC_HPDx_INT_POLARITY
;
261 tmp
|= DC_HPDx_INT_POLARITY
;
262 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
269 void evergreen_hpd_init(struct radeon_device
*rdev
)
271 struct drm_device
*dev
= rdev
->ddev
;
272 struct drm_connector
*connector
;
273 u32 tmp
= DC_HPDx_CONNECTION_TIMER(0x9c4) |
274 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN
;
276 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
277 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
278 switch (radeon_connector
->hpd
.hpd
) {
280 WREG32(DC_HPD1_CONTROL
, tmp
);
281 rdev
->irq
.hpd
[0] = true;
284 WREG32(DC_HPD2_CONTROL
, tmp
);
285 rdev
->irq
.hpd
[1] = true;
288 WREG32(DC_HPD3_CONTROL
, tmp
);
289 rdev
->irq
.hpd
[2] = true;
292 WREG32(DC_HPD4_CONTROL
, tmp
);
293 rdev
->irq
.hpd
[3] = true;
296 WREG32(DC_HPD5_CONTROL
, tmp
);
297 rdev
->irq
.hpd
[4] = true;
300 WREG32(DC_HPD6_CONTROL
, tmp
);
301 rdev
->irq
.hpd
[5] = true;
307 if (rdev
->irq
.installed
)
308 evergreen_irq_set(rdev
);
311 void evergreen_hpd_fini(struct radeon_device
*rdev
)
313 struct drm_device
*dev
= rdev
->ddev
;
314 struct drm_connector
*connector
;
316 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
317 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
318 switch (radeon_connector
->hpd
.hpd
) {
320 WREG32(DC_HPD1_CONTROL
, 0);
321 rdev
->irq
.hpd
[0] = false;
324 WREG32(DC_HPD2_CONTROL
, 0);
325 rdev
->irq
.hpd
[1] = false;
328 WREG32(DC_HPD3_CONTROL
, 0);
329 rdev
->irq
.hpd
[2] = false;
332 WREG32(DC_HPD4_CONTROL
, 0);
333 rdev
->irq
.hpd
[3] = false;
336 WREG32(DC_HPD5_CONTROL
, 0);
337 rdev
->irq
.hpd
[4] = false;
340 WREG32(DC_HPD6_CONTROL
, 0);
341 rdev
->irq
.hpd
[5] = false;
349 /* watermark setup */
351 static u32
evergreen_line_buffer_adjust(struct radeon_device
*rdev
,
352 struct radeon_crtc
*radeon_crtc
,
353 struct drm_display_mode
*mode
,
354 struct drm_display_mode
*other_mode
)
359 * There are 3 line buffers, each one shared by 2 display controllers.
360 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
361 * the display controllers. The paritioning is done via one of four
362 * preset allocations specified in bits 2:0:
363 * first display controller
364 * 0 - first half of lb (3840 * 2)
365 * 1 - first 3/4 of lb (5760 * 2)
366 * 2 - whole lb (7680 * 2), other crtc must be disabled
367 * 3 - first 1/4 of lb (1920 * 2)
368 * second display controller
369 * 4 - second half of lb (3840 * 2)
370 * 5 - second 3/4 of lb (5760 * 2)
371 * 6 - whole lb (7680 * 2), other crtc must be disabled
372 * 7 - last 1/4 of lb (1920 * 2)
374 /* this can get tricky if we have two large displays on a paired group
375 * of crtcs. Ideally for multiple large displays we'd assign them to
376 * non-linked crtcs for maximum line buffer allocation.
378 if (radeon_crtc
->base
.enabled
&& mode
) {
386 /* second controller of the pair uses second half of the lb */
387 if (radeon_crtc
->crtc_id
% 2)
389 WREG32(DC_LB_MEMORY_SPLIT
+ radeon_crtc
->crtc_offset
, tmp
);
391 if (radeon_crtc
->base
.enabled
&& mode
) {
396 if (ASIC_IS_DCE5(rdev
))
402 if (ASIC_IS_DCE5(rdev
))
408 if (ASIC_IS_DCE5(rdev
))
414 if (ASIC_IS_DCE5(rdev
))
421 /* controller not enabled, so no lb used */
425 static u32
evergreen_get_number_of_dram_channels(struct radeon_device
*rdev
)
427 u32 tmp
= RREG32(MC_SHARED_CHMAP
);
429 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
442 struct evergreen_wm_params
{
443 u32 dram_channels
; /* number of dram channels */
444 u32 yclk
; /* bandwidth per dram data pin in kHz */
445 u32 sclk
; /* engine clock in kHz */
446 u32 disp_clk
; /* display clock in kHz */
447 u32 src_width
; /* viewport width */
448 u32 active_time
; /* active display time in ns */
449 u32 blank_time
; /* blank time in ns */
450 bool interlaced
; /* mode is interlaced */
451 fixed20_12 vsc
; /* vertical scale ratio */
452 u32 num_heads
; /* number of active crtcs */
453 u32 bytes_per_pixel
; /* bytes per pixel display + overlay */
454 u32 lb_size
; /* line buffer allocated to pipe */
455 u32 vtaps
; /* vertical scaler taps */
458 static u32
evergreen_dram_bandwidth(struct evergreen_wm_params
*wm
)
460 /* Calculate DRAM Bandwidth and the part allocated to display. */
461 fixed20_12 dram_efficiency
; /* 0.7 */
462 fixed20_12 yclk
, dram_channels
, bandwidth
;
465 a
.full
= dfixed_const(1000);
466 yclk
.full
= dfixed_const(wm
->yclk
);
467 yclk
.full
= dfixed_div(yclk
, a
);
468 dram_channels
.full
= dfixed_const(wm
->dram_channels
* 4);
469 a
.full
= dfixed_const(10);
470 dram_efficiency
.full
= dfixed_const(7);
471 dram_efficiency
.full
= dfixed_div(dram_efficiency
, a
);
472 bandwidth
.full
= dfixed_mul(dram_channels
, yclk
);
473 bandwidth
.full
= dfixed_mul(bandwidth
, dram_efficiency
);
475 return dfixed_trunc(bandwidth
);
478 static u32
evergreen_dram_bandwidth_for_display(struct evergreen_wm_params
*wm
)
480 /* Calculate DRAM Bandwidth and the part allocated to display. */
481 fixed20_12 disp_dram_allocation
; /* 0.3 to 0.7 */
482 fixed20_12 yclk
, dram_channels
, bandwidth
;
485 a
.full
= dfixed_const(1000);
486 yclk
.full
= dfixed_const(wm
->yclk
);
487 yclk
.full
= dfixed_div(yclk
, a
);
488 dram_channels
.full
= dfixed_const(wm
->dram_channels
* 4);
489 a
.full
= dfixed_const(10);
490 disp_dram_allocation
.full
= dfixed_const(3); /* XXX worse case value 0.3 */
491 disp_dram_allocation
.full
= dfixed_div(disp_dram_allocation
, a
);
492 bandwidth
.full
= dfixed_mul(dram_channels
, yclk
);
493 bandwidth
.full
= dfixed_mul(bandwidth
, disp_dram_allocation
);
495 return dfixed_trunc(bandwidth
);
498 static u32
evergreen_data_return_bandwidth(struct evergreen_wm_params
*wm
)
500 /* Calculate the display Data return Bandwidth */
501 fixed20_12 return_efficiency
; /* 0.8 */
502 fixed20_12 sclk
, bandwidth
;
505 a
.full
= dfixed_const(1000);
506 sclk
.full
= dfixed_const(wm
->sclk
);
507 sclk
.full
= dfixed_div(sclk
, a
);
508 a
.full
= dfixed_const(10);
509 return_efficiency
.full
= dfixed_const(8);
510 return_efficiency
.full
= dfixed_div(return_efficiency
, a
);
511 a
.full
= dfixed_const(32);
512 bandwidth
.full
= dfixed_mul(a
, sclk
);
513 bandwidth
.full
= dfixed_mul(bandwidth
, return_efficiency
);
515 return dfixed_trunc(bandwidth
);
518 static u32
evergreen_dmif_request_bandwidth(struct evergreen_wm_params
*wm
)
520 /* Calculate the DMIF Request Bandwidth */
521 fixed20_12 disp_clk_request_efficiency
; /* 0.8 */
522 fixed20_12 disp_clk
, bandwidth
;
525 a
.full
= dfixed_const(1000);
526 disp_clk
.full
= dfixed_const(wm
->disp_clk
);
527 disp_clk
.full
= dfixed_div(disp_clk
, a
);
528 a
.full
= dfixed_const(10);
529 disp_clk_request_efficiency
.full
= dfixed_const(8);
530 disp_clk_request_efficiency
.full
= dfixed_div(disp_clk_request_efficiency
, a
);
531 a
.full
= dfixed_const(32);
532 bandwidth
.full
= dfixed_mul(a
, disp_clk
);
533 bandwidth
.full
= dfixed_mul(bandwidth
, disp_clk_request_efficiency
);
535 return dfixed_trunc(bandwidth
);
538 static u32
evergreen_available_bandwidth(struct evergreen_wm_params
*wm
)
540 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
541 u32 dram_bandwidth
= evergreen_dram_bandwidth(wm
);
542 u32 data_return_bandwidth
= evergreen_data_return_bandwidth(wm
);
543 u32 dmif_req_bandwidth
= evergreen_dmif_request_bandwidth(wm
);
545 return min(dram_bandwidth
, min(data_return_bandwidth
, dmif_req_bandwidth
));
548 static u32
evergreen_average_bandwidth(struct evergreen_wm_params
*wm
)
550 /* Calculate the display mode Average Bandwidth
551 * DisplayMode should contain the source and destination dimensions,
555 fixed20_12 line_time
;
556 fixed20_12 src_width
;
557 fixed20_12 bandwidth
;
560 a
.full
= dfixed_const(1000);
561 line_time
.full
= dfixed_const(wm
->active_time
+ wm
->blank_time
);
562 line_time
.full
= dfixed_div(line_time
, a
);
563 bpp
.full
= dfixed_const(wm
->bytes_per_pixel
);
564 src_width
.full
= dfixed_const(wm
->src_width
);
565 bandwidth
.full
= dfixed_mul(src_width
, bpp
);
566 bandwidth
.full
= dfixed_mul(bandwidth
, wm
->vsc
);
567 bandwidth
.full
= dfixed_div(bandwidth
, line_time
);
569 return dfixed_trunc(bandwidth
);
572 static u32
evergreen_latency_watermark(struct evergreen_wm_params
*wm
)
574 /* First calcualte the latency in ns */
575 u32 mc_latency
= 2000; /* 2000 ns. */
576 u32 available_bandwidth
= evergreen_available_bandwidth(wm
);
577 u32 worst_chunk_return_time
= (512 * 8 * 1000) / available_bandwidth
;
578 u32 cursor_line_pair_return_time
= (128 * 4 * 1000) / available_bandwidth
;
579 u32 dc_latency
= 40000000 / wm
->disp_clk
; /* dc pipe latency */
580 u32 other_heads_data_return_time
= ((wm
->num_heads
+ 1) * worst_chunk_return_time
) +
581 (wm
->num_heads
* cursor_line_pair_return_time
);
582 u32 latency
= mc_latency
+ other_heads_data_return_time
+ dc_latency
;
583 u32 max_src_lines_per_dst_line
, lb_fill_bw
, line_fill_time
;
586 if (wm
->num_heads
== 0)
589 a
.full
= dfixed_const(2);
590 b
.full
= dfixed_const(1);
591 if ((wm
->vsc
.full
> a
.full
) ||
592 ((wm
->vsc
.full
> b
.full
) && (wm
->vtaps
>= 3)) ||
594 ((wm
->vsc
.full
>= a
.full
) && wm
->interlaced
))
595 max_src_lines_per_dst_line
= 4;
597 max_src_lines_per_dst_line
= 2;
599 a
.full
= dfixed_const(available_bandwidth
);
600 b
.full
= dfixed_const(wm
->num_heads
);
601 a
.full
= dfixed_div(a
, b
);
603 b
.full
= dfixed_const(1000);
604 c
.full
= dfixed_const(wm
->disp_clk
);
605 b
.full
= dfixed_div(c
, b
);
606 c
.full
= dfixed_const(wm
->bytes_per_pixel
);
607 b
.full
= dfixed_mul(b
, c
);
609 lb_fill_bw
= min(dfixed_trunc(a
), dfixed_trunc(b
));
611 a
.full
= dfixed_const(max_src_lines_per_dst_line
* wm
->src_width
* wm
->bytes_per_pixel
);
612 b
.full
= dfixed_const(1000);
613 c
.full
= dfixed_const(lb_fill_bw
);
614 b
.full
= dfixed_div(c
, b
);
615 a
.full
= dfixed_div(a
, b
);
616 line_fill_time
= dfixed_trunc(a
);
618 if (line_fill_time
< wm
->active_time
)
621 return latency
+ (line_fill_time
- wm
->active_time
);
625 static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params
*wm
)
627 if (evergreen_average_bandwidth(wm
) <=
628 (evergreen_dram_bandwidth_for_display(wm
) / wm
->num_heads
))
634 static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params
*wm
)
636 if (evergreen_average_bandwidth(wm
) <=
637 (evergreen_available_bandwidth(wm
) / wm
->num_heads
))
643 static bool evergreen_check_latency_hiding(struct evergreen_wm_params
*wm
)
645 u32 lb_partitions
= wm
->lb_size
/ wm
->src_width
;
646 u32 line_time
= wm
->active_time
+ wm
->blank_time
;
647 u32 latency_tolerant_lines
;
651 a
.full
= dfixed_const(1);
652 if (wm
->vsc
.full
> a
.full
)
653 latency_tolerant_lines
= 1;
655 if (lb_partitions
<= (wm
->vtaps
+ 1))
656 latency_tolerant_lines
= 1;
658 latency_tolerant_lines
= 2;
661 latency_hiding
= (latency_tolerant_lines
* line_time
+ wm
->blank_time
);
663 if (evergreen_latency_watermark(wm
) <= latency_hiding
)
669 static void evergreen_program_watermarks(struct radeon_device
*rdev
,
670 struct radeon_crtc
*radeon_crtc
,
671 u32 lb_size
, u32 num_heads
)
673 struct drm_display_mode
*mode
= &radeon_crtc
->base
.mode
;
674 struct evergreen_wm_params wm
;
677 u32 latency_watermark_a
= 0, latency_watermark_b
= 0;
678 u32 priority_a_mark
= 0, priority_b_mark
= 0;
679 u32 priority_a_cnt
= PRIORITY_OFF
;
680 u32 priority_b_cnt
= PRIORITY_OFF
;
681 u32 pipe_offset
= radeon_crtc
->crtc_id
* 16;
682 u32 tmp
, arb_control3
;
685 if (radeon_crtc
->base
.enabled
&& num_heads
&& mode
) {
686 pixel_period
= 1000000 / (u32
)mode
->clock
;
687 line_time
= min((u32
)mode
->crtc_htotal
* pixel_period
, (u32
)65535);
691 wm
.yclk
= rdev
->pm
.current_mclk
* 10;
692 wm
.sclk
= rdev
->pm
.current_sclk
* 10;
693 wm
.disp_clk
= mode
->clock
;
694 wm
.src_width
= mode
->crtc_hdisplay
;
695 wm
.active_time
= mode
->crtc_hdisplay
* pixel_period
;
696 wm
.blank_time
= line_time
- wm
.active_time
;
697 wm
.interlaced
= false;
698 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
699 wm
.interlaced
= true;
700 wm
.vsc
= radeon_crtc
->vsc
;
702 if (radeon_crtc
->rmx_type
!= RMX_OFF
)
704 wm
.bytes_per_pixel
= 4; /* XXX: get this from fb config */
705 wm
.lb_size
= lb_size
;
706 wm
.dram_channels
= evergreen_get_number_of_dram_channels(rdev
);
707 wm
.num_heads
= num_heads
;
709 /* set for high clocks */
710 latency_watermark_a
= min(evergreen_latency_watermark(&wm
), (u32
)65535);
711 /* set for low clocks */
712 /* wm.yclk = low clk; wm.sclk = low clk */
713 latency_watermark_b
= min(evergreen_latency_watermark(&wm
), (u32
)65535);
715 /* possibly force display priority to high */
716 /* should really do this at mode validation time... */
717 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm
) ||
718 !evergreen_average_bandwidth_vs_available_bandwidth(&wm
) ||
719 !evergreen_check_latency_hiding(&wm
) ||
720 (rdev
->disp_priority
== 2)) {
721 DRM_INFO("force priority to high\n");
722 priority_a_cnt
|= PRIORITY_ALWAYS_ON
;
723 priority_b_cnt
|= PRIORITY_ALWAYS_ON
;
726 a
.full
= dfixed_const(1000);
727 b
.full
= dfixed_const(mode
->clock
);
728 b
.full
= dfixed_div(b
, a
);
729 c
.full
= dfixed_const(latency_watermark_a
);
730 c
.full
= dfixed_mul(c
, b
);
731 c
.full
= dfixed_mul(c
, radeon_crtc
->hsc
);
732 c
.full
= dfixed_div(c
, a
);
733 a
.full
= dfixed_const(16);
734 c
.full
= dfixed_div(c
, a
);
735 priority_a_mark
= dfixed_trunc(c
);
736 priority_a_cnt
|= priority_a_mark
& PRIORITY_MARK_MASK
;
738 a
.full
= dfixed_const(1000);
739 b
.full
= dfixed_const(mode
->clock
);
740 b
.full
= dfixed_div(b
, a
);
741 c
.full
= dfixed_const(latency_watermark_b
);
742 c
.full
= dfixed_mul(c
, b
);
743 c
.full
= dfixed_mul(c
, radeon_crtc
->hsc
);
744 c
.full
= dfixed_div(c
, a
);
745 a
.full
= dfixed_const(16);
746 c
.full
= dfixed_div(c
, a
);
747 priority_b_mark
= dfixed_trunc(c
);
748 priority_b_cnt
|= priority_b_mark
& PRIORITY_MARK_MASK
;
752 arb_control3
= RREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
);
754 tmp
&= ~LATENCY_WATERMARK_MASK(3);
755 tmp
|= LATENCY_WATERMARK_MASK(1);
756 WREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
, tmp
);
757 WREG32(PIPE0_LATENCY_CONTROL
+ pipe_offset
,
758 (LATENCY_LOW_WATERMARK(latency_watermark_a
) |
759 LATENCY_HIGH_WATERMARK(line_time
)));
761 tmp
= RREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
);
762 tmp
&= ~LATENCY_WATERMARK_MASK(3);
763 tmp
|= LATENCY_WATERMARK_MASK(2);
764 WREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
, tmp
);
765 WREG32(PIPE0_LATENCY_CONTROL
+ pipe_offset
,
766 (LATENCY_LOW_WATERMARK(latency_watermark_b
) |
767 LATENCY_HIGH_WATERMARK(line_time
)));
768 /* restore original selection */
769 WREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
, arb_control3
);
771 /* write the priority marks */
772 WREG32(PRIORITY_A_CNT
+ radeon_crtc
->crtc_offset
, priority_a_cnt
);
773 WREG32(PRIORITY_B_CNT
+ radeon_crtc
->crtc_offset
, priority_b_cnt
);
777 void evergreen_bandwidth_update(struct radeon_device
*rdev
)
779 struct drm_display_mode
*mode0
= NULL
;
780 struct drm_display_mode
*mode1
= NULL
;
781 u32 num_heads
= 0, lb_size
;
784 radeon_update_display_priority(rdev
);
786 for (i
= 0; i
< rdev
->num_crtc
; i
++) {
787 if (rdev
->mode_info
.crtcs
[i
]->base
.enabled
)
790 for (i
= 0; i
< rdev
->num_crtc
; i
+= 2) {
791 mode0
= &rdev
->mode_info
.crtcs
[i
]->base
.mode
;
792 mode1
= &rdev
->mode_info
.crtcs
[i
+1]->base
.mode
;
793 lb_size
= evergreen_line_buffer_adjust(rdev
, rdev
->mode_info
.crtcs
[i
], mode0
, mode1
);
794 evergreen_program_watermarks(rdev
, rdev
->mode_info
.crtcs
[i
], lb_size
, num_heads
);
795 lb_size
= evergreen_line_buffer_adjust(rdev
, rdev
->mode_info
.crtcs
[i
+1], mode1
, mode0
);
796 evergreen_program_watermarks(rdev
, rdev
->mode_info
.crtcs
[i
+1], lb_size
, num_heads
);
800 int evergreen_mc_wait_for_idle(struct radeon_device
*rdev
)
805 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
807 tmp
= RREG32(SRBM_STATUS
) & 0x1F00;
818 void evergreen_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
823 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
825 WREG32(VM_CONTEXT0_REQUEST_RESPONSE
, REQUEST_TYPE(1));
826 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
828 tmp
= RREG32(VM_CONTEXT0_REQUEST_RESPONSE
);
829 tmp
= (tmp
& RESPONSE_TYPE_MASK
) >> RESPONSE_TYPE_SHIFT
;
831 printk(KERN_WARNING
"[drm] r600 flush TLB failed\n");
841 int evergreen_pcie_gart_enable(struct radeon_device
*rdev
)
846 if (rdev
->gart
.table
.vram
.robj
== NULL
) {
847 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
850 r
= radeon_gart_table_vram_pin(rdev
);
853 radeon_gart_restore(rdev
);
855 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
856 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
857 EFFECTIVE_L2_QUEUE_SIZE(7));
858 WREG32(VM_L2_CNTL2
, 0);
859 WREG32(VM_L2_CNTL3
, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
860 /* Setup TLB control */
861 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
862 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
863 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU
|
864 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
865 if (rdev
->flags
& RADEON_IS_IGP
) {
866 WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL
, tmp
);
867 WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL
, tmp
);
868 WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL
, tmp
);
870 WREG32(MC_VM_MD_L1_TLB0_CNTL
, tmp
);
871 WREG32(MC_VM_MD_L1_TLB1_CNTL
, tmp
);
872 WREG32(MC_VM_MD_L1_TLB2_CNTL
, tmp
);
874 WREG32(MC_VM_MB_L1_TLB0_CNTL
, tmp
);
875 WREG32(MC_VM_MB_L1_TLB1_CNTL
, tmp
);
876 WREG32(MC_VM_MB_L1_TLB2_CNTL
, tmp
);
877 WREG32(MC_VM_MB_L1_TLB3_CNTL
, tmp
);
878 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
879 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, rdev
->mc
.gtt_end
>> 12);
880 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
881 WREG32(VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
882 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
883 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
884 (u32
)(rdev
->dummy_page
.addr
>> 12));
885 WREG32(VM_CONTEXT1_CNTL
, 0);
887 evergreen_pcie_gart_tlb_flush(rdev
);
888 rdev
->gart
.ready
= true;
892 void evergreen_pcie_gart_disable(struct radeon_device
*rdev
)
897 /* Disable all tables */
898 WREG32(VM_CONTEXT0_CNTL
, 0);
899 WREG32(VM_CONTEXT1_CNTL
, 0);
902 WREG32(VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
|
903 EFFECTIVE_L2_QUEUE_SIZE(7));
904 WREG32(VM_L2_CNTL2
, 0);
905 WREG32(VM_L2_CNTL3
, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
906 /* Setup TLB control */
907 tmp
= EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
908 WREG32(MC_VM_MD_L1_TLB0_CNTL
, tmp
);
909 WREG32(MC_VM_MD_L1_TLB1_CNTL
, tmp
);
910 WREG32(MC_VM_MD_L1_TLB2_CNTL
, tmp
);
911 WREG32(MC_VM_MB_L1_TLB0_CNTL
, tmp
);
912 WREG32(MC_VM_MB_L1_TLB1_CNTL
, tmp
);
913 WREG32(MC_VM_MB_L1_TLB2_CNTL
, tmp
);
914 WREG32(MC_VM_MB_L1_TLB3_CNTL
, tmp
);
915 if (rdev
->gart
.table
.vram
.robj
) {
916 r
= radeon_bo_reserve(rdev
->gart
.table
.vram
.robj
, false);
917 if (likely(r
== 0)) {
918 radeon_bo_kunmap(rdev
->gart
.table
.vram
.robj
);
919 radeon_bo_unpin(rdev
->gart
.table
.vram
.robj
);
920 radeon_bo_unreserve(rdev
->gart
.table
.vram
.robj
);
925 void evergreen_pcie_gart_fini(struct radeon_device
*rdev
)
927 evergreen_pcie_gart_disable(rdev
);
928 radeon_gart_table_vram_free(rdev
);
929 radeon_gart_fini(rdev
);
933 void evergreen_agp_enable(struct radeon_device
*rdev
)
938 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
939 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
940 EFFECTIVE_L2_QUEUE_SIZE(7));
941 WREG32(VM_L2_CNTL2
, 0);
942 WREG32(VM_L2_CNTL3
, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
943 /* Setup TLB control */
944 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
945 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
946 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU
|
947 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
948 WREG32(MC_VM_MD_L1_TLB0_CNTL
, tmp
);
949 WREG32(MC_VM_MD_L1_TLB1_CNTL
, tmp
);
950 WREG32(MC_VM_MD_L1_TLB2_CNTL
, tmp
);
951 WREG32(MC_VM_MB_L1_TLB0_CNTL
, tmp
);
952 WREG32(MC_VM_MB_L1_TLB1_CNTL
, tmp
);
953 WREG32(MC_VM_MB_L1_TLB2_CNTL
, tmp
);
954 WREG32(MC_VM_MB_L1_TLB3_CNTL
, tmp
);
955 WREG32(VM_CONTEXT0_CNTL
, 0);
956 WREG32(VM_CONTEXT1_CNTL
, 0);
959 void evergreen_mc_stop(struct radeon_device
*rdev
, struct evergreen_mc_save
*save
)
961 save
->vga_control
[0] = RREG32(D1VGA_CONTROL
);
962 save
->vga_control
[1] = RREG32(D2VGA_CONTROL
);
963 save
->vga_control
[2] = RREG32(EVERGREEN_D3VGA_CONTROL
);
964 save
->vga_control
[3] = RREG32(EVERGREEN_D4VGA_CONTROL
);
965 save
->vga_control
[4] = RREG32(EVERGREEN_D5VGA_CONTROL
);
966 save
->vga_control
[5] = RREG32(EVERGREEN_D6VGA_CONTROL
);
967 save
->vga_render_control
= RREG32(VGA_RENDER_CONTROL
);
968 save
->vga_hdp_control
= RREG32(VGA_HDP_CONTROL
);
969 save
->crtc_control
[0] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
);
970 save
->crtc_control
[1] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
);
971 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
972 save
->crtc_control
[2] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
);
973 save
->crtc_control
[3] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
);
974 save
->crtc_control
[4] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
);
975 save
->crtc_control
[5] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
979 WREG32(VGA_RENDER_CONTROL
, 0);
980 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 1);
981 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 1);
982 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
983 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 1);
984 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 1);
985 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 1);
986 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 1);
988 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
989 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
990 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
991 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
992 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
993 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
994 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
996 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
997 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
998 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
999 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
1000 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
1001 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
1002 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
1005 WREG32(D1VGA_CONTROL
, 0);
1006 WREG32(D2VGA_CONTROL
, 0);
1007 WREG32(EVERGREEN_D3VGA_CONTROL
, 0);
1008 WREG32(EVERGREEN_D4VGA_CONTROL
, 0);
1009 WREG32(EVERGREEN_D5VGA_CONTROL
, 0);
1010 WREG32(EVERGREEN_D6VGA_CONTROL
, 0);
1013 void evergreen_mc_resume(struct radeon_device
*rdev
, struct evergreen_mc_save
*save
)
1015 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
1016 upper_32_bits(rdev
->mc
.vram_start
));
1017 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
1018 upper_32_bits(rdev
->mc
.vram_start
));
1019 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
1020 (u32
)rdev
->mc
.vram_start
);
1021 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
1022 (u32
)rdev
->mc
.vram_start
);
1024 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
1025 upper_32_bits(rdev
->mc
.vram_start
));
1026 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
1027 upper_32_bits(rdev
->mc
.vram_start
));
1028 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
1029 (u32
)rdev
->mc
.vram_start
);
1030 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
1031 (u32
)rdev
->mc
.vram_start
);
1033 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
1034 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
1035 upper_32_bits(rdev
->mc
.vram_start
));
1036 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
1037 upper_32_bits(rdev
->mc
.vram_start
));
1038 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
1039 (u32
)rdev
->mc
.vram_start
);
1040 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
1041 (u32
)rdev
->mc
.vram_start
);
1043 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
1044 upper_32_bits(rdev
->mc
.vram_start
));
1045 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
1046 upper_32_bits(rdev
->mc
.vram_start
));
1047 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
1048 (u32
)rdev
->mc
.vram_start
);
1049 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
1050 (u32
)rdev
->mc
.vram_start
);
1052 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
1053 upper_32_bits(rdev
->mc
.vram_start
));
1054 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
1055 upper_32_bits(rdev
->mc
.vram_start
));
1056 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
1057 (u32
)rdev
->mc
.vram_start
);
1058 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
1059 (u32
)rdev
->mc
.vram_start
);
1061 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
1062 upper_32_bits(rdev
->mc
.vram_start
));
1063 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
1064 upper_32_bits(rdev
->mc
.vram_start
));
1065 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
1066 (u32
)rdev
->mc
.vram_start
);
1067 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
1068 (u32
)rdev
->mc
.vram_start
);
1071 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH
, upper_32_bits(rdev
->mc
.vram_start
));
1072 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS
, (u32
)rdev
->mc
.vram_start
);
1073 /* Unlock host access */
1074 WREG32(VGA_HDP_CONTROL
, save
->vga_hdp_control
);
1076 /* Restore video state */
1077 WREG32(D1VGA_CONTROL
, save
->vga_control
[0]);
1078 WREG32(D2VGA_CONTROL
, save
->vga_control
[1]);
1079 WREG32(EVERGREEN_D3VGA_CONTROL
, save
->vga_control
[2]);
1080 WREG32(EVERGREEN_D4VGA_CONTROL
, save
->vga_control
[3]);
1081 WREG32(EVERGREEN_D5VGA_CONTROL
, save
->vga_control
[4]);
1082 WREG32(EVERGREEN_D6VGA_CONTROL
, save
->vga_control
[5]);
1083 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 1);
1084 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 1);
1085 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
1086 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 1);
1087 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 1);
1088 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 1);
1089 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 1);
1091 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, save
->crtc_control
[0]);
1092 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, save
->crtc_control
[1]);
1093 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
1094 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, save
->crtc_control
[2]);
1095 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, save
->crtc_control
[3]);
1096 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, save
->crtc_control
[4]);
1097 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, save
->crtc_control
[5]);
1099 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
1100 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
1101 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
1102 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
1103 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
1104 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
1105 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
1107 WREG32(VGA_RENDER_CONTROL
, save
->vga_render_control
);
1110 void evergreen_mc_program(struct radeon_device
*rdev
)
1112 struct evergreen_mc_save save
;
1116 /* Initialize HDP */
1117 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1118 WREG32((0x2c14 + j
), 0x00000000);
1119 WREG32((0x2c18 + j
), 0x00000000);
1120 WREG32((0x2c1c + j
), 0x00000000);
1121 WREG32((0x2c20 + j
), 0x00000000);
1122 WREG32((0x2c24 + j
), 0x00000000);
1124 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL
, 0);
1126 evergreen_mc_stop(rdev
, &save
);
1127 if (evergreen_mc_wait_for_idle(rdev
)) {
1128 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1130 /* Lockout access through VGA aperture*/
1131 WREG32(VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
);
1132 /* Update configuration */
1133 if (rdev
->flags
& RADEON_IS_AGP
) {
1134 if (rdev
->mc
.vram_start
< rdev
->mc
.gtt_start
) {
1135 /* VRAM before AGP */
1136 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1137 rdev
->mc
.vram_start
>> 12);
1138 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1139 rdev
->mc
.gtt_end
>> 12);
1141 /* VRAM after AGP */
1142 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1143 rdev
->mc
.gtt_start
>> 12);
1144 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1145 rdev
->mc
.vram_end
>> 12);
1148 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1149 rdev
->mc
.vram_start
>> 12);
1150 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1151 rdev
->mc
.vram_end
>> 12);
1153 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
, 0);
1154 if (rdev
->flags
& RADEON_IS_IGP
) {
1155 tmp
= RREG32(MC_FUS_VM_FB_OFFSET
) & 0x000FFFFF;
1156 tmp
|= ((rdev
->mc
.vram_end
>> 20) & 0xF) << 24;
1157 tmp
|= ((rdev
->mc
.vram_start
>> 20) & 0xF) << 20;
1158 WREG32(MC_FUS_VM_FB_OFFSET
, tmp
);
1160 tmp
= ((rdev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
1161 tmp
|= ((rdev
->mc
.vram_start
>> 24) & 0xFFFF);
1162 WREG32(MC_VM_FB_LOCATION
, tmp
);
1163 WREG32(HDP_NONSURFACE_BASE
, (rdev
->mc
.vram_start
>> 8));
1164 WREG32(HDP_NONSURFACE_INFO
, (2 << 7) | (1 << 30));
1165 WREG32(HDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
1166 if (rdev
->flags
& RADEON_IS_AGP
) {
1167 WREG32(MC_VM_AGP_TOP
, rdev
->mc
.gtt_end
>> 16);
1168 WREG32(MC_VM_AGP_BOT
, rdev
->mc
.gtt_start
>> 16);
1169 WREG32(MC_VM_AGP_BASE
, rdev
->mc
.agp_base
>> 22);
1171 WREG32(MC_VM_AGP_BASE
, 0);
1172 WREG32(MC_VM_AGP_TOP
, 0x0FFFFFFF);
1173 WREG32(MC_VM_AGP_BOT
, 0x0FFFFFFF);
1175 if (evergreen_mc_wait_for_idle(rdev
)) {
1176 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1178 evergreen_mc_resume(rdev
, &save
);
1179 /* we need to own VRAM, so turn off the VGA renderer here
1180 * to stop it overwriting our objects */
1181 rv515_vga_render_disable(rdev
);
1187 void evergreen_ring_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
1189 /* set to DX10/11 mode */
1190 radeon_ring_write(rdev
, PACKET3(PACKET3_MODE_CONTROL
, 0));
1191 radeon_ring_write(rdev
, 1);
1192 /* FIXME: implement */
1193 radeon_ring_write(rdev
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
1194 radeon_ring_write(rdev
,
1198 (ib
->gpu_addr
& 0xFFFFFFFC));
1199 radeon_ring_write(rdev
, upper_32_bits(ib
->gpu_addr
) & 0xFF);
1200 radeon_ring_write(rdev
, ib
->length_dw
);
1204 static int evergreen_cp_load_microcode(struct radeon_device
*rdev
)
1206 const __be32
*fw_data
;
1209 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
1217 RB_NO_UPDATE
| RB_BLKSZ(15) | RB_BUFSZ(3));
1219 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
1220 WREG32(CP_PFP_UCODE_ADDR
, 0);
1221 for (i
= 0; i
< EVERGREEN_PFP_UCODE_SIZE
; i
++)
1222 WREG32(CP_PFP_UCODE_DATA
, be32_to_cpup(fw_data
++));
1223 WREG32(CP_PFP_UCODE_ADDR
, 0);
1225 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
1226 WREG32(CP_ME_RAM_WADDR
, 0);
1227 for (i
= 0; i
< EVERGREEN_PM4_UCODE_SIZE
; i
++)
1228 WREG32(CP_ME_RAM_DATA
, be32_to_cpup(fw_data
++));
1230 WREG32(CP_PFP_UCODE_ADDR
, 0);
1231 WREG32(CP_ME_RAM_WADDR
, 0);
1232 WREG32(CP_ME_RAM_RADDR
, 0);
1236 static int evergreen_cp_start(struct radeon_device
*rdev
)
1241 r
= radeon_ring_lock(rdev
, 7);
1243 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1246 radeon_ring_write(rdev
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
1247 radeon_ring_write(rdev
, 0x1);
1248 radeon_ring_write(rdev
, 0x0);
1249 radeon_ring_write(rdev
, rdev
->config
.evergreen
.max_hw_contexts
- 1);
1250 radeon_ring_write(rdev
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1251 radeon_ring_write(rdev
, 0);
1252 radeon_ring_write(rdev
, 0);
1253 radeon_ring_unlock_commit(rdev
);
1256 WREG32(CP_ME_CNTL
, cp_me
);
1258 r
= radeon_ring_lock(rdev
, evergreen_default_size
+ 19);
1260 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1264 /* setup clear context state */
1265 radeon_ring_write(rdev
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
1266 radeon_ring_write(rdev
, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE
);
1268 for (i
= 0; i
< evergreen_default_size
; i
++)
1269 radeon_ring_write(rdev
, evergreen_default_state
[i
]);
1271 radeon_ring_write(rdev
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
1272 radeon_ring_write(rdev
, PACKET3_PREAMBLE_END_CLEAR_STATE
);
1274 /* set clear context state */
1275 radeon_ring_write(rdev
, PACKET3(PACKET3_CLEAR_STATE
, 0));
1276 radeon_ring_write(rdev
, 0);
1278 /* SQ_VTX_BASE_VTX_LOC */
1279 radeon_ring_write(rdev
, 0xc0026f00);
1280 radeon_ring_write(rdev
, 0x00000000);
1281 radeon_ring_write(rdev
, 0x00000000);
1282 radeon_ring_write(rdev
, 0x00000000);
1285 radeon_ring_write(rdev
, 0xc0036f00);
1286 radeon_ring_write(rdev
, 0x00000bc4);
1287 radeon_ring_write(rdev
, 0xffffffff);
1288 radeon_ring_write(rdev
, 0xffffffff);
1289 radeon_ring_write(rdev
, 0xffffffff);
1291 radeon_ring_write(rdev
, 0xc0026900);
1292 radeon_ring_write(rdev
, 0x00000316);
1293 radeon_ring_write(rdev
, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1294 radeon_ring_write(rdev
, 0x00000010); /* */
1296 radeon_ring_unlock_commit(rdev
);
1301 int evergreen_cp_resume(struct radeon_device
*rdev
)
1307 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1308 WREG32(GRBM_SOFT_RESET
, (SOFT_RESET_CP
|
1313 RREG32(GRBM_SOFT_RESET
);
1315 WREG32(GRBM_SOFT_RESET
, 0);
1316 RREG32(GRBM_SOFT_RESET
);
1318 /* Set ring buffer size */
1319 rb_bufsz
= drm_order(rdev
->cp
.ring_size
/ 8);
1320 tmp
= (drm_order(RADEON_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
1322 tmp
|= BUF_SWAP_32BIT
;
1324 WREG32(CP_RB_CNTL
, tmp
);
1325 WREG32(CP_SEM_WAIT_TIMER
, 0x4);
1327 /* Set the write pointer delay */
1328 WREG32(CP_RB_WPTR_DELAY
, 0);
1330 /* Initialize the ring buffer's read and write pointers */
1331 WREG32(CP_RB_CNTL
, tmp
| RB_RPTR_WR_ENA
);
1332 WREG32(CP_RB_RPTR_WR
, 0);
1333 WREG32(CP_RB_WPTR
, 0);
1335 /* set the wb address wether it's enabled or not */
1336 WREG32(CP_RB_RPTR_ADDR
,
1340 ((rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFFFFFFFC));
1341 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFF);
1342 WREG32(SCRATCH_ADDR
, ((rdev
->wb
.gpu_addr
+ RADEON_WB_SCRATCH_OFFSET
) >> 8) & 0xFFFFFFFF);
1344 if (rdev
->wb
.enabled
)
1345 WREG32(SCRATCH_UMSK
, 0xff);
1347 tmp
|= RB_NO_UPDATE
;
1348 WREG32(SCRATCH_UMSK
, 0);
1352 WREG32(CP_RB_CNTL
, tmp
);
1354 WREG32(CP_RB_BASE
, rdev
->cp
.gpu_addr
>> 8);
1355 WREG32(CP_DEBUG
, (1 << 27) | (1 << 28));
1357 rdev
->cp
.rptr
= RREG32(CP_RB_RPTR
);
1358 rdev
->cp
.wptr
= RREG32(CP_RB_WPTR
);
1360 evergreen_cp_start(rdev
);
1361 rdev
->cp
.ready
= true;
1362 r
= radeon_ring_test(rdev
);
1364 rdev
->cp
.ready
= false;
1373 static u32
evergreen_get_tile_pipe_to_backend_map(struct radeon_device
*rdev
,
1376 u32 backend_disable_mask
)
1378 u32 backend_map
= 0;
1379 u32 enabled_backends_mask
= 0;
1380 u32 enabled_backends_count
= 0;
1382 u32 swizzle_pipe
[EVERGREEN_MAX_PIPES
];
1383 u32 cur_backend
= 0;
1385 bool force_no_swizzle
;
1387 if (num_tile_pipes
> EVERGREEN_MAX_PIPES
)
1388 num_tile_pipes
= EVERGREEN_MAX_PIPES
;
1389 if (num_tile_pipes
< 1)
1391 if (num_backends
> EVERGREEN_MAX_BACKENDS
)
1392 num_backends
= EVERGREEN_MAX_BACKENDS
;
1393 if (num_backends
< 1)
1396 for (i
= 0; i
< EVERGREEN_MAX_BACKENDS
; ++i
) {
1397 if (((backend_disable_mask
>> i
) & 1) == 0) {
1398 enabled_backends_mask
|= (1 << i
);
1399 ++enabled_backends_count
;
1401 if (enabled_backends_count
== num_backends
)
1405 if (enabled_backends_count
== 0) {
1406 enabled_backends_mask
= 1;
1407 enabled_backends_count
= 1;
1410 if (enabled_backends_count
!= num_backends
)
1411 num_backends
= enabled_backends_count
;
1413 memset((uint8_t *)&swizzle_pipe
[0], 0, sizeof(u32
) * EVERGREEN_MAX_PIPES
);
1414 switch (rdev
->family
) {
1420 force_no_swizzle
= false;
1427 force_no_swizzle
= true;
1430 if (force_no_swizzle
) {
1431 bool last_backend_enabled
= false;
1433 force_no_swizzle
= false;
1434 for (i
= 0; i
< EVERGREEN_MAX_BACKENDS
; ++i
) {
1435 if (((enabled_backends_mask
>> i
) & 1) == 1) {
1436 if (last_backend_enabled
)
1437 force_no_swizzle
= true;
1438 last_backend_enabled
= true;
1440 last_backend_enabled
= false;
1444 switch (num_tile_pipes
) {
1449 DRM_ERROR("odd number of pipes!\n");
1452 swizzle_pipe
[0] = 0;
1453 swizzle_pipe
[1] = 1;
1456 if (force_no_swizzle
) {
1457 swizzle_pipe
[0] = 0;
1458 swizzle_pipe
[1] = 1;
1459 swizzle_pipe
[2] = 2;
1460 swizzle_pipe
[3] = 3;
1462 swizzle_pipe
[0] = 0;
1463 swizzle_pipe
[1] = 2;
1464 swizzle_pipe
[2] = 1;
1465 swizzle_pipe
[3] = 3;
1469 if (force_no_swizzle
) {
1470 swizzle_pipe
[0] = 0;
1471 swizzle_pipe
[1] = 1;
1472 swizzle_pipe
[2] = 2;
1473 swizzle_pipe
[3] = 3;
1474 swizzle_pipe
[4] = 4;
1475 swizzle_pipe
[5] = 5;
1477 swizzle_pipe
[0] = 0;
1478 swizzle_pipe
[1] = 2;
1479 swizzle_pipe
[2] = 4;
1480 swizzle_pipe
[3] = 1;
1481 swizzle_pipe
[4] = 3;
1482 swizzle_pipe
[5] = 5;
1486 if (force_no_swizzle
) {
1487 swizzle_pipe
[0] = 0;
1488 swizzle_pipe
[1] = 1;
1489 swizzle_pipe
[2] = 2;
1490 swizzle_pipe
[3] = 3;
1491 swizzle_pipe
[4] = 4;
1492 swizzle_pipe
[5] = 5;
1493 swizzle_pipe
[6] = 6;
1494 swizzle_pipe
[7] = 7;
1496 swizzle_pipe
[0] = 0;
1497 swizzle_pipe
[1] = 2;
1498 swizzle_pipe
[2] = 4;
1499 swizzle_pipe
[3] = 6;
1500 swizzle_pipe
[4] = 1;
1501 swizzle_pipe
[5] = 3;
1502 swizzle_pipe
[6] = 5;
1503 swizzle_pipe
[7] = 7;
1508 for (cur_pipe
= 0; cur_pipe
< num_tile_pipes
; ++cur_pipe
) {
1509 while (((1 << cur_backend
) & enabled_backends_mask
) == 0)
1510 cur_backend
= (cur_backend
+ 1) % EVERGREEN_MAX_BACKENDS
;
1512 backend_map
|= (((cur_backend
& 0xf) << (swizzle_pipe
[cur_pipe
] * 4)));
1514 cur_backend
= (cur_backend
+ 1) % EVERGREEN_MAX_BACKENDS
;
1520 static void evergreen_program_channel_remap(struct radeon_device
*rdev
)
1522 u32 tcp_chan_steer_lo
, tcp_chan_steer_hi
, mc_shared_chremap
, tmp
;
1524 tmp
= RREG32(MC_SHARED_CHMAP
);
1525 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
1531 /* default mapping */
1532 mc_shared_chremap
= 0x00fac688;
1536 switch (rdev
->family
) {
1540 tcp_chan_steer_lo
= 0x54763210;
1541 tcp_chan_steer_hi
= 0x0000ba98;
1550 tcp_chan_steer_lo
= 0x76543210;
1551 tcp_chan_steer_hi
= 0x0000ba98;
1555 WREG32(TCP_CHAN_STEER_LO
, tcp_chan_steer_lo
);
1556 WREG32(TCP_CHAN_STEER_HI
, tcp_chan_steer_hi
);
1557 WREG32(MC_SHARED_CHREMAP
, mc_shared_chremap
);
1560 static void evergreen_gpu_init(struct radeon_device
*rdev
)
1562 u32 cc_rb_backend_disable
= 0;
1563 u32 cc_gc_shader_pipe_config
;
1564 u32 gb_addr_config
= 0;
1565 u32 mc_shared_chmap
, mc_arb_ramcfg
;
1571 u32 sq_lds_resource_mgmt
;
1572 u32 sq_gpr_resource_mgmt_1
;
1573 u32 sq_gpr_resource_mgmt_2
;
1574 u32 sq_gpr_resource_mgmt_3
;
1575 u32 sq_thread_resource_mgmt
;
1576 u32 sq_thread_resource_mgmt_2
;
1577 u32 sq_stack_resource_mgmt_1
;
1578 u32 sq_stack_resource_mgmt_2
;
1579 u32 sq_stack_resource_mgmt_3
;
1580 u32 vgt_cache_invalidation
;
1581 u32 hdp_host_path_cntl
;
1582 int i
, j
, num_shader_engines
, ps_thread_count
;
1584 switch (rdev
->family
) {
1587 rdev
->config
.evergreen
.num_ses
= 2;
1588 rdev
->config
.evergreen
.max_pipes
= 4;
1589 rdev
->config
.evergreen
.max_tile_pipes
= 8;
1590 rdev
->config
.evergreen
.max_simds
= 10;
1591 rdev
->config
.evergreen
.max_backends
= 4 * rdev
->config
.evergreen
.num_ses
;
1592 rdev
->config
.evergreen
.max_gprs
= 256;
1593 rdev
->config
.evergreen
.max_threads
= 248;
1594 rdev
->config
.evergreen
.max_gs_threads
= 32;
1595 rdev
->config
.evergreen
.max_stack_entries
= 512;
1596 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1597 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1598 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1599 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1600 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1601 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1603 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1604 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1605 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1608 rdev
->config
.evergreen
.num_ses
= 1;
1609 rdev
->config
.evergreen
.max_pipes
= 4;
1610 rdev
->config
.evergreen
.max_tile_pipes
= 4;
1611 rdev
->config
.evergreen
.max_simds
= 10;
1612 rdev
->config
.evergreen
.max_backends
= 4 * rdev
->config
.evergreen
.num_ses
;
1613 rdev
->config
.evergreen
.max_gprs
= 256;
1614 rdev
->config
.evergreen
.max_threads
= 248;
1615 rdev
->config
.evergreen
.max_gs_threads
= 32;
1616 rdev
->config
.evergreen
.max_stack_entries
= 512;
1617 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1618 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1619 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1620 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1621 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1622 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1624 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1625 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1626 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1629 rdev
->config
.evergreen
.num_ses
= 1;
1630 rdev
->config
.evergreen
.max_pipes
= 4;
1631 rdev
->config
.evergreen
.max_tile_pipes
= 4;
1632 rdev
->config
.evergreen
.max_simds
= 5;
1633 rdev
->config
.evergreen
.max_backends
= 2 * rdev
->config
.evergreen
.num_ses
;
1634 rdev
->config
.evergreen
.max_gprs
= 256;
1635 rdev
->config
.evergreen
.max_threads
= 248;
1636 rdev
->config
.evergreen
.max_gs_threads
= 32;
1637 rdev
->config
.evergreen
.max_stack_entries
= 256;
1638 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1639 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1640 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1641 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1642 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1643 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1645 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1646 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1647 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1651 rdev
->config
.evergreen
.num_ses
= 1;
1652 rdev
->config
.evergreen
.max_pipes
= 2;
1653 rdev
->config
.evergreen
.max_tile_pipes
= 2;
1654 rdev
->config
.evergreen
.max_simds
= 2;
1655 rdev
->config
.evergreen
.max_backends
= 1 * rdev
->config
.evergreen
.num_ses
;
1656 rdev
->config
.evergreen
.max_gprs
= 256;
1657 rdev
->config
.evergreen
.max_threads
= 192;
1658 rdev
->config
.evergreen
.max_gs_threads
= 16;
1659 rdev
->config
.evergreen
.max_stack_entries
= 256;
1660 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1661 rdev
->config
.evergreen
.sx_max_export_size
= 128;
1662 rdev
->config
.evergreen
.sx_max_export_pos_size
= 32;
1663 rdev
->config
.evergreen
.sx_max_export_smx_size
= 96;
1664 rdev
->config
.evergreen
.max_hw_contexts
= 4;
1665 rdev
->config
.evergreen
.sq_num_cf_insts
= 1;
1667 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x40;
1668 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1669 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1672 rdev
->config
.evergreen
.num_ses
= 1;
1673 rdev
->config
.evergreen
.max_pipes
= 2;
1674 rdev
->config
.evergreen
.max_tile_pipes
= 2;
1675 rdev
->config
.evergreen
.max_simds
= 2;
1676 rdev
->config
.evergreen
.max_backends
= 1 * rdev
->config
.evergreen
.num_ses
;
1677 rdev
->config
.evergreen
.max_gprs
= 256;
1678 rdev
->config
.evergreen
.max_threads
= 192;
1679 rdev
->config
.evergreen
.max_gs_threads
= 16;
1680 rdev
->config
.evergreen
.max_stack_entries
= 256;
1681 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1682 rdev
->config
.evergreen
.sx_max_export_size
= 128;
1683 rdev
->config
.evergreen
.sx_max_export_pos_size
= 32;
1684 rdev
->config
.evergreen
.sx_max_export_smx_size
= 96;
1685 rdev
->config
.evergreen
.max_hw_contexts
= 4;
1686 rdev
->config
.evergreen
.sq_num_cf_insts
= 1;
1688 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x40;
1689 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1690 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1693 rdev
->config
.evergreen
.num_ses
= 2;
1694 rdev
->config
.evergreen
.max_pipes
= 4;
1695 rdev
->config
.evergreen
.max_tile_pipes
= 8;
1696 rdev
->config
.evergreen
.max_simds
= 7;
1697 rdev
->config
.evergreen
.max_backends
= 4 * rdev
->config
.evergreen
.num_ses
;
1698 rdev
->config
.evergreen
.max_gprs
= 256;
1699 rdev
->config
.evergreen
.max_threads
= 248;
1700 rdev
->config
.evergreen
.max_gs_threads
= 32;
1701 rdev
->config
.evergreen
.max_stack_entries
= 512;
1702 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1703 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1704 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1705 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1706 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1707 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1709 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1710 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1711 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1714 rdev
->config
.evergreen
.num_ses
= 1;
1715 rdev
->config
.evergreen
.max_pipes
= 4;
1716 rdev
->config
.evergreen
.max_tile_pipes
= 4;
1717 rdev
->config
.evergreen
.max_simds
= 6;
1718 rdev
->config
.evergreen
.max_backends
= 2 * rdev
->config
.evergreen
.num_ses
;
1719 rdev
->config
.evergreen
.max_gprs
= 256;
1720 rdev
->config
.evergreen
.max_threads
= 248;
1721 rdev
->config
.evergreen
.max_gs_threads
= 32;
1722 rdev
->config
.evergreen
.max_stack_entries
= 256;
1723 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1724 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1725 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1726 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1727 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1728 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1730 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1731 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1732 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1735 rdev
->config
.evergreen
.num_ses
= 1;
1736 rdev
->config
.evergreen
.max_pipes
= 4;
1737 rdev
->config
.evergreen
.max_tile_pipes
= 2;
1738 rdev
->config
.evergreen
.max_simds
= 2;
1739 rdev
->config
.evergreen
.max_backends
= 1 * rdev
->config
.evergreen
.num_ses
;
1740 rdev
->config
.evergreen
.max_gprs
= 256;
1741 rdev
->config
.evergreen
.max_threads
= 192;
1742 rdev
->config
.evergreen
.max_gs_threads
= 16;
1743 rdev
->config
.evergreen
.max_stack_entries
= 256;
1744 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1745 rdev
->config
.evergreen
.sx_max_export_size
= 128;
1746 rdev
->config
.evergreen
.sx_max_export_pos_size
= 32;
1747 rdev
->config
.evergreen
.sx_max_export_smx_size
= 96;
1748 rdev
->config
.evergreen
.max_hw_contexts
= 4;
1749 rdev
->config
.evergreen
.sq_num_cf_insts
= 1;
1751 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x40;
1752 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1753 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1757 /* Initialize HDP */
1758 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1759 WREG32((0x2c14 + j
), 0x00000000);
1760 WREG32((0x2c18 + j
), 0x00000000);
1761 WREG32((0x2c1c + j
), 0x00000000);
1762 WREG32((0x2c20 + j
), 0x00000000);
1763 WREG32((0x2c24 + j
), 0x00000000);
1766 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
1768 cc_gc_shader_pipe_config
= RREG32(CC_GC_SHADER_PIPE_CONFIG
) & ~2;
1770 cc_gc_shader_pipe_config
|=
1771 INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK
<< rdev
->config
.evergreen
.max_pipes
)
1772 & EVERGREEN_MAX_PIPES_MASK
);
1773 cc_gc_shader_pipe_config
|=
1774 INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK
<< rdev
->config
.evergreen
.max_simds
)
1775 & EVERGREEN_MAX_SIMDS_MASK
);
1777 cc_rb_backend_disable
=
1778 BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK
<< rdev
->config
.evergreen
.max_backends
)
1779 & EVERGREEN_MAX_BACKENDS_MASK
);
1782 mc_shared_chmap
= RREG32(MC_SHARED_CHMAP
);
1783 if (rdev
->flags
& RADEON_IS_IGP
)
1784 mc_arb_ramcfg
= RREG32(FUS_MC_ARB_RAMCFG
);
1786 mc_arb_ramcfg
= RREG32(MC_ARB_RAMCFG
);
1788 switch (rdev
->config
.evergreen
.max_tile_pipes
) {
1791 gb_addr_config
|= NUM_PIPES(0);
1794 gb_addr_config
|= NUM_PIPES(1);
1797 gb_addr_config
|= NUM_PIPES(2);
1800 gb_addr_config
|= NUM_PIPES(3);
1804 gb_addr_config
|= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
);
1805 gb_addr_config
|= BANK_INTERLEAVE_SIZE(0);
1806 gb_addr_config
|= NUM_SHADER_ENGINES(rdev
->config
.evergreen
.num_ses
- 1);
1807 gb_addr_config
|= SHADER_ENGINE_TILE_SIZE(1);
1808 gb_addr_config
|= NUM_GPUS(0); /* Hemlock? */
1809 gb_addr_config
|= MULTI_GPU_TILE_SIZE(2);
1811 if (((mc_arb_ramcfg
& NOOFCOLS_MASK
) >> NOOFCOLS_SHIFT
) > 2)
1812 gb_addr_config
|= ROW_SIZE(2);
1814 gb_addr_config
|= ROW_SIZE((mc_arb_ramcfg
& NOOFCOLS_MASK
) >> NOOFCOLS_SHIFT
);
1816 if (rdev
->ddev
->pdev
->device
== 0x689e) {
1819 u8 efuse_box_bit_131_124
;
1821 WREG32(RCU_IND_INDEX
, 0x204);
1822 efuse_straps_4
= RREG32(RCU_IND_DATA
);
1823 WREG32(RCU_IND_INDEX
, 0x203);
1824 efuse_straps_3
= RREG32(RCU_IND_DATA
);
1825 efuse_box_bit_131_124
= (u8
)(((efuse_straps_4
& 0xf) << 4) | ((efuse_straps_3
& 0xf0000000) >> 28));
1827 switch(efuse_box_bit_131_124
) {
1829 gb_backend_map
= 0x76543210;
1832 gb_backend_map
= 0x77553311;
1835 gb_backend_map
= 0x77553300;
1838 gb_backend_map
= 0x77552211;
1841 gb_backend_map
= 0x77443300;
1844 gb_backend_map
= 0x66552211;
1847 gb_backend_map
= 0x77552200;
1850 gb_backend_map
= 0x66442200;
1853 gb_backend_map
= 0x66553311;
1856 DRM_ERROR("bad backend map, using default\n");
1858 evergreen_get_tile_pipe_to_backend_map(rdev
,
1859 rdev
->config
.evergreen
.max_tile_pipes
,
1860 rdev
->config
.evergreen
.max_backends
,
1861 ((EVERGREEN_MAX_BACKENDS_MASK
<<
1862 rdev
->config
.evergreen
.max_backends
) &
1863 EVERGREEN_MAX_BACKENDS_MASK
));
1866 } else if (rdev
->ddev
->pdev
->device
== 0x68b9) {
1868 u8 efuse_box_bit_127_124
;
1870 WREG32(RCU_IND_INDEX
, 0x203);
1871 efuse_straps_3
= RREG32(RCU_IND_DATA
);
1872 efuse_box_bit_127_124
= (u8
)((efuse_straps_3
& 0xF0000000) >> 28);
1874 switch(efuse_box_bit_127_124
) {
1876 gb_backend_map
= 0x00003210;
1882 gb_backend_map
= 0x00003311;
1885 DRM_ERROR("bad backend map, using default\n");
1887 evergreen_get_tile_pipe_to_backend_map(rdev
,
1888 rdev
->config
.evergreen
.max_tile_pipes
,
1889 rdev
->config
.evergreen
.max_backends
,
1890 ((EVERGREEN_MAX_BACKENDS_MASK
<<
1891 rdev
->config
.evergreen
.max_backends
) &
1892 EVERGREEN_MAX_BACKENDS_MASK
));
1896 switch (rdev
->family
) {
1900 gb_backend_map
= 0x66442200;
1903 gb_backend_map
= 0x00006420;
1907 evergreen_get_tile_pipe_to_backend_map(rdev
,
1908 rdev
->config
.evergreen
.max_tile_pipes
,
1909 rdev
->config
.evergreen
.max_backends
,
1910 ((EVERGREEN_MAX_BACKENDS_MASK
<<
1911 rdev
->config
.evergreen
.max_backends
) &
1912 EVERGREEN_MAX_BACKENDS_MASK
));
1916 /* setup tiling info dword. gb_addr_config is not adequate since it does
1917 * not have bank info, so create a custom tiling dword.
1918 * bits 3:0 num_pipes
1919 * bits 7:4 num_banks
1920 * bits 11:8 group_size
1921 * bits 15:12 row_size
1923 rdev
->config
.evergreen
.tile_config
= 0;
1924 switch (rdev
->config
.evergreen
.max_tile_pipes
) {
1927 rdev
->config
.evergreen
.tile_config
|= (0 << 0);
1930 rdev
->config
.evergreen
.tile_config
|= (1 << 0);
1933 rdev
->config
.evergreen
.tile_config
|= (2 << 0);
1936 rdev
->config
.evergreen
.tile_config
|= (3 << 0);
1939 rdev
->config
.evergreen
.tile_config
|=
1940 ((mc_arb_ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
) << 4;
1941 rdev
->config
.evergreen
.tile_config
|=
1942 ((mc_arb_ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
) << 8;
1943 rdev
->config
.evergreen
.tile_config
|=
1944 ((gb_addr_config
& 0x30000000) >> 28) << 12;
1946 WREG32(GB_BACKEND_MAP
, gb_backend_map
);
1947 WREG32(GB_ADDR_CONFIG
, gb_addr_config
);
1948 WREG32(DMIF_ADDR_CONFIG
, gb_addr_config
);
1949 WREG32(HDP_ADDR_CONFIG
, gb_addr_config
);
1951 evergreen_program_channel_remap(rdev
);
1953 num_shader_engines
= ((RREG32(GB_ADDR_CONFIG
) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
1954 grbm_gfx_index
= INSTANCE_BROADCAST_WRITES
;
1956 for (i
= 0; i
< rdev
->config
.evergreen
.num_ses
; i
++) {
1957 u32 rb
= cc_rb_backend_disable
| (0xf0 << 16);
1958 u32 sp
= cc_gc_shader_pipe_config
;
1959 u32 gfx
= grbm_gfx_index
| SE_INDEX(i
);
1961 if (i
== num_shader_engines
) {
1962 rb
|= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK
);
1963 sp
|= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK
);
1966 WREG32(GRBM_GFX_INDEX
, gfx
);
1967 WREG32(RLC_GFX_INDEX
, gfx
);
1969 WREG32(CC_RB_BACKEND_DISABLE
, rb
);
1970 WREG32(CC_SYS_RB_BACKEND_DISABLE
, rb
);
1971 WREG32(GC_USER_RB_BACKEND_DISABLE
, rb
);
1972 WREG32(CC_GC_SHADER_PIPE_CONFIG
, sp
);
1975 grbm_gfx_index
|= SE_BROADCAST_WRITES
;
1976 WREG32(GRBM_GFX_INDEX
, grbm_gfx_index
);
1977 WREG32(RLC_GFX_INDEX
, grbm_gfx_index
);
1979 WREG32(CGTS_SYS_TCC_DISABLE
, 0);
1980 WREG32(CGTS_TCC_DISABLE
, 0);
1981 WREG32(CGTS_USER_SYS_TCC_DISABLE
, 0);
1982 WREG32(CGTS_USER_TCC_DISABLE
, 0);
1984 /* set HW defaults for 3D engine */
1985 WREG32(CP_QUEUE_THRESHOLDS
, (ROQ_IB1_START(0x16) |
1986 ROQ_IB2_START(0x2b)));
1988 WREG32(CP_MEQ_THRESHOLDS
, STQ_SPLIT(0x30));
1990 WREG32(TA_CNTL_AUX
, (DISABLE_CUBE_ANISO
|
1995 sx_debug_1
= RREG32(SX_DEBUG_1
);
1996 sx_debug_1
|= ENABLE_NEW_SMX_ADDRESS
;
1997 WREG32(SX_DEBUG_1
, sx_debug_1
);
2000 smx_dc_ctl0
= RREG32(SMX_DC_CTL0
);
2001 smx_dc_ctl0
&= ~NUMBER_OF_SETS(0x1ff);
2002 smx_dc_ctl0
|= NUMBER_OF_SETS(rdev
->config
.evergreen
.sx_num_of_sets
);
2003 WREG32(SMX_DC_CTL0
, smx_dc_ctl0
);
2005 WREG32(SX_EXPORT_BUFFER_SIZES
, (COLOR_BUFFER_SIZE((rdev
->config
.evergreen
.sx_max_export_size
/ 4) - 1) |
2006 POSITION_BUFFER_SIZE((rdev
->config
.evergreen
.sx_max_export_pos_size
/ 4) - 1) |
2007 SMX_BUFFER_SIZE((rdev
->config
.evergreen
.sx_max_export_smx_size
/ 4) - 1)));
2009 WREG32(PA_SC_FIFO_SIZE
, (SC_PRIM_FIFO_SIZE(rdev
->config
.evergreen
.sc_prim_fifo_size
) |
2010 SC_HIZ_TILE_FIFO_SIZE(rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
) |
2011 SC_EARLYZ_TILE_FIFO_SIZE(rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
)));
2013 WREG32(VGT_NUM_INSTANCES
, 1);
2014 WREG32(SPI_CONFIG_CNTL
, 0);
2015 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(4));
2016 WREG32(CP_PERFMON_CNTL
, 0);
2018 WREG32(SQ_MS_FIFO_SIZES
, (CACHE_FIFO_SIZE(16 * rdev
->config
.evergreen
.sq_num_cf_insts
) |
2019 FETCH_FIFO_HIWATER(0x4) |
2020 DONE_FIFO_HIWATER(0xe0) |
2021 ALU_UPDATE_FIFO_HIWATER(0x8)));
2023 sq_config
= RREG32(SQ_CONFIG
);
2024 sq_config
&= ~(PS_PRIO(3) |
2028 sq_config
|= (VC_ENABLE
|
2035 switch (rdev
->family
) {
2039 /* no vertex cache */
2040 sq_config
&= ~VC_ENABLE
;
2046 sq_lds_resource_mgmt
= RREG32(SQ_LDS_RESOURCE_MGMT
);
2048 sq_gpr_resource_mgmt_1
= NUM_PS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2))* 12 / 32);
2049 sq_gpr_resource_mgmt_1
|= NUM_VS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 6 / 32);
2050 sq_gpr_resource_mgmt_1
|= NUM_CLAUSE_TEMP_GPRS(4);
2051 sq_gpr_resource_mgmt_2
= NUM_GS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 4 / 32);
2052 sq_gpr_resource_mgmt_2
|= NUM_ES_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 4 / 32);
2053 sq_gpr_resource_mgmt_3
= NUM_HS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 3 / 32);
2054 sq_gpr_resource_mgmt_3
|= NUM_LS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 3 / 32);
2056 switch (rdev
->family
) {
2059 ps_thread_count
= 96;
2062 ps_thread_count
= 128;
2066 sq_thread_resource_mgmt
= NUM_PS_THREADS(ps_thread_count
);
2067 sq_thread_resource_mgmt
|= NUM_VS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
2068 sq_thread_resource_mgmt
|= NUM_GS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
2069 sq_thread_resource_mgmt
|= NUM_ES_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
2070 sq_thread_resource_mgmt_2
= NUM_HS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
2071 sq_thread_resource_mgmt_2
|= NUM_LS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
2073 sq_stack_resource_mgmt_1
= NUM_PS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2074 sq_stack_resource_mgmt_1
|= NUM_VS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2075 sq_stack_resource_mgmt_2
= NUM_GS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2076 sq_stack_resource_mgmt_2
|= NUM_ES_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2077 sq_stack_resource_mgmt_3
= NUM_HS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2078 sq_stack_resource_mgmt_3
|= NUM_LS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
2080 WREG32(SQ_CONFIG
, sq_config
);
2081 WREG32(SQ_GPR_RESOURCE_MGMT_1
, sq_gpr_resource_mgmt_1
);
2082 WREG32(SQ_GPR_RESOURCE_MGMT_2
, sq_gpr_resource_mgmt_2
);
2083 WREG32(SQ_GPR_RESOURCE_MGMT_3
, sq_gpr_resource_mgmt_3
);
2084 WREG32(SQ_THREAD_RESOURCE_MGMT
, sq_thread_resource_mgmt
);
2085 WREG32(SQ_THREAD_RESOURCE_MGMT_2
, sq_thread_resource_mgmt_2
);
2086 WREG32(SQ_STACK_RESOURCE_MGMT_1
, sq_stack_resource_mgmt_1
);
2087 WREG32(SQ_STACK_RESOURCE_MGMT_2
, sq_stack_resource_mgmt_2
);
2088 WREG32(SQ_STACK_RESOURCE_MGMT_3
, sq_stack_resource_mgmt_3
);
2089 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
, 0);
2090 WREG32(SQ_LDS_RESOURCE_MGMT
, sq_lds_resource_mgmt
);
2092 WREG32(PA_SC_FORCE_EOV_MAX_CNTS
, (FORCE_EOV_MAX_CLK_CNT(4095) |
2093 FORCE_EOV_MAX_REZ_CNT(255)));
2095 switch (rdev
->family
) {
2099 vgt_cache_invalidation
= CACHE_INVALIDATION(TC_ONLY
);
2102 vgt_cache_invalidation
= CACHE_INVALIDATION(VC_AND_TC
);
2105 vgt_cache_invalidation
|= AUTO_INVLD_EN(ES_AND_GS_AUTO
);
2106 WREG32(VGT_CACHE_INVALIDATION
, vgt_cache_invalidation
);
2108 WREG32(VGT_GS_VERTEX_REUSE
, 16);
2109 WREG32(PA_SU_LINE_STIPPLE_VALUE
, 0);
2110 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
2112 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL
, 14);
2113 WREG32(VGT_OUT_DEALLOC_CNTL
, 16);
2115 WREG32(CB_PERF_CTR0_SEL_0
, 0);
2116 WREG32(CB_PERF_CTR0_SEL_1
, 0);
2117 WREG32(CB_PERF_CTR1_SEL_0
, 0);
2118 WREG32(CB_PERF_CTR1_SEL_1
, 0);
2119 WREG32(CB_PERF_CTR2_SEL_0
, 0);
2120 WREG32(CB_PERF_CTR2_SEL_1
, 0);
2121 WREG32(CB_PERF_CTR3_SEL_0
, 0);
2122 WREG32(CB_PERF_CTR3_SEL_1
, 0);
2124 /* clear render buffer base addresses */
2125 WREG32(CB_COLOR0_BASE
, 0);
2126 WREG32(CB_COLOR1_BASE
, 0);
2127 WREG32(CB_COLOR2_BASE
, 0);
2128 WREG32(CB_COLOR3_BASE
, 0);
2129 WREG32(CB_COLOR4_BASE
, 0);
2130 WREG32(CB_COLOR5_BASE
, 0);
2131 WREG32(CB_COLOR6_BASE
, 0);
2132 WREG32(CB_COLOR7_BASE
, 0);
2133 WREG32(CB_COLOR8_BASE
, 0);
2134 WREG32(CB_COLOR9_BASE
, 0);
2135 WREG32(CB_COLOR10_BASE
, 0);
2136 WREG32(CB_COLOR11_BASE
, 0);
2138 /* set the shader const cache sizes to 0 */
2139 for (i
= SQ_ALU_CONST_BUFFER_SIZE_PS_0
; i
< 0x28200; i
+= 4)
2141 for (i
= SQ_ALU_CONST_BUFFER_SIZE_HS_0
; i
< 0x29000; i
+= 4)
2144 hdp_host_path_cntl
= RREG32(HDP_HOST_PATH_CNTL
);
2145 WREG32(HDP_HOST_PATH_CNTL
, hdp_host_path_cntl
);
2147 WREG32(PA_CL_ENHANCE
, CLIP_VTX_REORDER_ENA
| NUM_CLIP_SEQ(3));
2153 int evergreen_mc_init(struct radeon_device
*rdev
)
2156 int chansize
, numchan
;
2158 /* Get VRAM informations */
2159 rdev
->mc
.vram_is_ddr
= true;
2160 tmp
= RREG32(MC_ARB_RAMCFG
);
2161 if (tmp
& CHANSIZE_OVERRIDE
) {
2163 } else if (tmp
& CHANSIZE_MASK
) {
2168 tmp
= RREG32(MC_SHARED_CHMAP
);
2169 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
2184 rdev
->mc
.vram_width
= numchan
* chansize
;
2185 /* Could aper size report 0 ? */
2186 rdev
->mc
.aper_base
= pci_resource_start(rdev
->pdev
, 0);
2187 rdev
->mc
.aper_size
= pci_resource_len(rdev
->pdev
, 0);
2188 /* Setup GPU memory space */
2189 if (rdev
->flags
& RADEON_IS_IGP
) {
2190 /* size in bytes on fusion */
2191 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
);
2192 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
);
2194 /* size in MB on evergreen */
2195 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
) * 1024 * 1024;
2196 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
) * 1024 * 1024;
2198 rdev
->mc
.visible_vram_size
= rdev
->mc
.aper_size
;
2199 r700_vram_gtt_location(rdev
, &rdev
->mc
);
2200 radeon_update_bandwidth_info(rdev
);
2205 bool evergreen_gpu_is_lockup(struct radeon_device
*rdev
)
2209 u32 grbm_status_se0
, grbm_status_se1
;
2210 struct r100_gpu_lockup
*lockup
= &rdev
->config
.evergreen
.lockup
;
2213 srbm_status
= RREG32(SRBM_STATUS
);
2214 grbm_status
= RREG32(GRBM_STATUS
);
2215 grbm_status_se0
= RREG32(GRBM_STATUS_SE0
);
2216 grbm_status_se1
= RREG32(GRBM_STATUS_SE1
);
2217 if (!(grbm_status
& GUI_ACTIVE
)) {
2218 r100_gpu_lockup_update(lockup
, &rdev
->cp
);
2221 /* force CP activities */
2222 r
= radeon_ring_lock(rdev
, 2);
2225 radeon_ring_write(rdev
, 0x80000000);
2226 radeon_ring_write(rdev
, 0x80000000);
2227 radeon_ring_unlock_commit(rdev
);
2229 rdev
->cp
.rptr
= RREG32(CP_RB_RPTR
);
2230 return r100_gpu_cp_is_lockup(rdev
, lockup
, &rdev
->cp
);
2233 static int evergreen_gpu_soft_reset(struct radeon_device
*rdev
)
2235 struct evergreen_mc_save save
;
2238 if (!(RREG32(GRBM_STATUS
) & GUI_ACTIVE
))
2241 dev_info(rdev
->dev
, "GPU softreset \n");
2242 dev_info(rdev
->dev
, " GRBM_STATUS=0x%08X\n",
2243 RREG32(GRBM_STATUS
));
2244 dev_info(rdev
->dev
, " GRBM_STATUS_SE0=0x%08X\n",
2245 RREG32(GRBM_STATUS_SE0
));
2246 dev_info(rdev
->dev
, " GRBM_STATUS_SE1=0x%08X\n",
2247 RREG32(GRBM_STATUS_SE1
));
2248 dev_info(rdev
->dev
, " SRBM_STATUS=0x%08X\n",
2249 RREG32(SRBM_STATUS
));
2250 evergreen_mc_stop(rdev
, &save
);
2251 if (evergreen_mc_wait_for_idle(rdev
)) {
2252 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
2254 /* Disable CP parsing/prefetching */
2255 WREG32(CP_ME_CNTL
, CP_ME_HALT
| CP_PFP_HALT
);
2257 /* reset all the gfx blocks */
2258 grbm_reset
= (SOFT_RESET_CP
|
2271 dev_info(rdev
->dev
, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset
);
2272 WREG32(GRBM_SOFT_RESET
, grbm_reset
);
2273 (void)RREG32(GRBM_SOFT_RESET
);
2275 WREG32(GRBM_SOFT_RESET
, 0);
2276 (void)RREG32(GRBM_SOFT_RESET
);
2277 /* Wait a little for things to settle down */
2279 dev_info(rdev
->dev
, " GRBM_STATUS=0x%08X\n",
2280 RREG32(GRBM_STATUS
));
2281 dev_info(rdev
->dev
, " GRBM_STATUS_SE0=0x%08X\n",
2282 RREG32(GRBM_STATUS_SE0
));
2283 dev_info(rdev
->dev
, " GRBM_STATUS_SE1=0x%08X\n",
2284 RREG32(GRBM_STATUS_SE1
));
2285 dev_info(rdev
->dev
, " SRBM_STATUS=0x%08X\n",
2286 RREG32(SRBM_STATUS
));
2287 evergreen_mc_resume(rdev
, &save
);
2291 int evergreen_asic_reset(struct radeon_device
*rdev
)
2293 return evergreen_gpu_soft_reset(rdev
);
2298 u32
evergreen_get_vblank_counter(struct radeon_device
*rdev
, int crtc
)
2302 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC0_REGISTER_OFFSET
);
2304 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC1_REGISTER_OFFSET
);
2306 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC2_REGISTER_OFFSET
);
2308 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC3_REGISTER_OFFSET
);
2310 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC4_REGISTER_OFFSET
);
2312 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
2318 void evergreen_disable_interrupt_state(struct radeon_device
*rdev
)
2322 WREG32(CP_INT_CNTL
, CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
);
2323 WREG32(GRBM_INT_CNTL
, 0);
2324 WREG32(INT_MASK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
2325 WREG32(INT_MASK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
2326 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
2327 WREG32(INT_MASK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
2328 WREG32(INT_MASK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
2329 WREG32(INT_MASK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
2330 WREG32(INT_MASK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
2333 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
2334 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
2335 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
2336 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
2337 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
2338 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
2339 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
2342 WREG32(DACA_AUTODETECT_INT_CONTROL
, 0);
2343 WREG32(DACB_AUTODETECT_INT_CONTROL
, 0);
2345 tmp
= RREG32(DC_HPD1_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2346 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
2347 tmp
= RREG32(DC_HPD2_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2348 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
2349 tmp
= RREG32(DC_HPD3_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2350 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
2351 tmp
= RREG32(DC_HPD4_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2352 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
2353 tmp
= RREG32(DC_HPD5_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2354 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
2355 tmp
= RREG32(DC_HPD6_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2356 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
2360 int evergreen_irq_set(struct radeon_device
*rdev
)
2362 u32 cp_int_cntl
= CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
;
2363 u32 crtc1
= 0, crtc2
= 0, crtc3
= 0, crtc4
= 0, crtc5
= 0, crtc6
= 0;
2364 u32 hpd1
, hpd2
, hpd3
, hpd4
, hpd5
, hpd6
;
2365 u32 grbm_int_cntl
= 0;
2366 u32 grph1
= 0, grph2
= 0, grph3
= 0, grph4
= 0, grph5
= 0, grph6
= 0;
2368 if (!rdev
->irq
.installed
) {
2369 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2372 /* don't enable anything if the ih is disabled */
2373 if (!rdev
->ih
.enabled
) {
2374 r600_disable_interrupts(rdev
);
2375 /* force the active interrupt state to all disabled */
2376 evergreen_disable_interrupt_state(rdev
);
2380 hpd1
= RREG32(DC_HPD1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2381 hpd2
= RREG32(DC_HPD2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2382 hpd3
= RREG32(DC_HPD3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2383 hpd4
= RREG32(DC_HPD4_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2384 hpd5
= RREG32(DC_HPD5_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2385 hpd6
= RREG32(DC_HPD6_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2387 if (rdev
->irq
.sw_int
) {
2388 DRM_DEBUG("evergreen_irq_set: sw int\n");
2389 cp_int_cntl
|= RB_INT_ENABLE
;
2390 cp_int_cntl
|= TIME_STAMP_INT_ENABLE
;
2392 if (rdev
->irq
.crtc_vblank_int
[0] ||
2393 rdev
->irq
.pflip
[0]) {
2394 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
2395 crtc1
|= VBLANK_INT_MASK
;
2397 if (rdev
->irq
.crtc_vblank_int
[1] ||
2398 rdev
->irq
.pflip
[1]) {
2399 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
2400 crtc2
|= VBLANK_INT_MASK
;
2402 if (rdev
->irq
.crtc_vblank_int
[2] ||
2403 rdev
->irq
.pflip
[2]) {
2404 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
2405 crtc3
|= VBLANK_INT_MASK
;
2407 if (rdev
->irq
.crtc_vblank_int
[3] ||
2408 rdev
->irq
.pflip
[3]) {
2409 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
2410 crtc4
|= VBLANK_INT_MASK
;
2412 if (rdev
->irq
.crtc_vblank_int
[4] ||
2413 rdev
->irq
.pflip
[4]) {
2414 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
2415 crtc5
|= VBLANK_INT_MASK
;
2417 if (rdev
->irq
.crtc_vblank_int
[5] ||
2418 rdev
->irq
.pflip
[5]) {
2419 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
2420 crtc6
|= VBLANK_INT_MASK
;
2422 if (rdev
->irq
.hpd
[0]) {
2423 DRM_DEBUG("evergreen_irq_set: hpd 1\n");
2424 hpd1
|= DC_HPDx_INT_EN
;
2426 if (rdev
->irq
.hpd
[1]) {
2427 DRM_DEBUG("evergreen_irq_set: hpd 2\n");
2428 hpd2
|= DC_HPDx_INT_EN
;
2430 if (rdev
->irq
.hpd
[2]) {
2431 DRM_DEBUG("evergreen_irq_set: hpd 3\n");
2432 hpd3
|= DC_HPDx_INT_EN
;
2434 if (rdev
->irq
.hpd
[3]) {
2435 DRM_DEBUG("evergreen_irq_set: hpd 4\n");
2436 hpd4
|= DC_HPDx_INT_EN
;
2438 if (rdev
->irq
.hpd
[4]) {
2439 DRM_DEBUG("evergreen_irq_set: hpd 5\n");
2440 hpd5
|= DC_HPDx_INT_EN
;
2442 if (rdev
->irq
.hpd
[5]) {
2443 DRM_DEBUG("evergreen_irq_set: hpd 6\n");
2444 hpd6
|= DC_HPDx_INT_EN
;
2446 if (rdev
->irq
.gui_idle
) {
2447 DRM_DEBUG("gui idle\n");
2448 grbm_int_cntl
|= GUI_IDLE_INT_ENABLE
;
2451 WREG32(CP_INT_CNTL
, cp_int_cntl
);
2452 WREG32(GRBM_INT_CNTL
, grbm_int_cntl
);
2454 WREG32(INT_MASK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, crtc1
);
2455 WREG32(INT_MASK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, crtc2
);
2456 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
2457 WREG32(INT_MASK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, crtc3
);
2458 WREG32(INT_MASK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, crtc4
);
2459 WREG32(INT_MASK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, crtc5
);
2460 WREG32(INT_MASK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, crtc6
);
2463 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, grph1
);
2464 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, grph2
);
2465 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, grph3
);
2466 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, grph4
);
2467 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, grph5
);
2468 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, grph6
);
2470 WREG32(DC_HPD1_INT_CONTROL
, hpd1
);
2471 WREG32(DC_HPD2_INT_CONTROL
, hpd2
);
2472 WREG32(DC_HPD3_INT_CONTROL
, hpd3
);
2473 WREG32(DC_HPD4_INT_CONTROL
, hpd4
);
2474 WREG32(DC_HPD5_INT_CONTROL
, hpd5
);
2475 WREG32(DC_HPD6_INT_CONTROL
, hpd6
);
2480 static inline void evergreen_irq_ack(struct radeon_device
*rdev
)
2484 rdev
->irq
.stat_regs
.evergreen
.disp_int
= RREG32(DISP_INTERRUPT_STATUS
);
2485 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE
);
2486 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE2
);
2487 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE3
);
2488 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE4
);
2489 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE5
);
2490 rdev
->irq
.stat_regs
.evergreen
.d1grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
);
2491 rdev
->irq
.stat_regs
.evergreen
.d2grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
);
2492 rdev
->irq
.stat_regs
.evergreen
.d3grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
);
2493 rdev
->irq
.stat_regs
.evergreen
.d4grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
);
2494 rdev
->irq
.stat_regs
.evergreen
.d5grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
);
2495 rdev
->irq
.stat_regs
.evergreen
.d6grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
2497 if (rdev
->irq
.stat_regs
.evergreen
.d1grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2498 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2499 if (rdev
->irq
.stat_regs
.evergreen
.d2grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2500 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2501 if (rdev
->irq
.stat_regs
.evergreen
.d3grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2502 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2503 if (rdev
->irq
.stat_regs
.evergreen
.d4grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2504 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2505 if (rdev
->irq
.stat_regs
.evergreen
.d5grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2506 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2507 if (rdev
->irq
.stat_regs
.evergreen
.d6grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2508 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2510 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VBLANK_INTERRUPT
)
2511 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, VBLANK_ACK
);
2512 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VLINE_INTERRUPT
)
2513 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, VLINE_ACK
);
2515 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VBLANK_INTERRUPT
)
2516 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, VBLANK_ACK
);
2517 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VLINE_INTERRUPT
)
2518 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, VLINE_ACK
);
2520 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VBLANK_INTERRUPT
)
2521 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, VBLANK_ACK
);
2522 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VLINE_INTERRUPT
)
2523 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, VLINE_ACK
);
2525 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VBLANK_INTERRUPT
)
2526 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, VBLANK_ACK
);
2527 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VLINE_INTERRUPT
)
2528 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, VLINE_ACK
);
2530 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VBLANK_INTERRUPT
)
2531 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, VBLANK_ACK
);
2532 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VLINE_INTERRUPT
)
2533 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, VLINE_ACK
);
2535 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VBLANK_INTERRUPT
)
2536 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, VBLANK_ACK
);
2537 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VLINE_INTERRUPT
)
2538 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, VLINE_ACK
);
2540 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& DC_HPD1_INTERRUPT
) {
2541 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
2542 tmp
|= DC_HPDx_INT_ACK
;
2543 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
2545 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& DC_HPD2_INTERRUPT
) {
2546 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
2547 tmp
|= DC_HPDx_INT_ACK
;
2548 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
2550 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& DC_HPD3_INTERRUPT
) {
2551 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
2552 tmp
|= DC_HPDx_INT_ACK
;
2553 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
2555 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& DC_HPD4_INTERRUPT
) {
2556 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
2557 tmp
|= DC_HPDx_INT_ACK
;
2558 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
2560 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& DC_HPD5_INTERRUPT
) {
2561 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
2562 tmp
|= DC_HPDx_INT_ACK
;
2563 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
2565 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& DC_HPD6_INTERRUPT
) {
2566 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
2567 tmp
|= DC_HPDx_INT_ACK
;
2568 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
2572 void evergreen_irq_disable(struct radeon_device
*rdev
)
2574 r600_disable_interrupts(rdev
);
2575 /* Wait and acknowledge irq */
2577 evergreen_irq_ack(rdev
);
2578 evergreen_disable_interrupt_state(rdev
);
2581 void evergreen_irq_suspend(struct radeon_device
*rdev
)
2583 evergreen_irq_disable(rdev
);
2584 r600_rlc_stop(rdev
);
2587 static inline u32
evergreen_get_ih_wptr(struct radeon_device
*rdev
)
2591 if (rdev
->wb
.enabled
)
2592 wptr
= le32_to_cpu(rdev
->wb
.wb
[R600_WB_IH_WPTR_OFFSET
/4]);
2594 wptr
= RREG32(IH_RB_WPTR
);
2596 if (wptr
& RB_OVERFLOW
) {
2597 /* When a ring buffer overflow happen start parsing interrupt
2598 * from the last not overwritten vector (wptr + 16). Hopefully
2599 * this should allow us to catchup.
2601 dev_warn(rdev
->dev
, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2602 wptr
, rdev
->ih
.rptr
, (wptr
+ 16) + rdev
->ih
.ptr_mask
);
2603 rdev
->ih
.rptr
= (wptr
+ 16) & rdev
->ih
.ptr_mask
;
2604 tmp
= RREG32(IH_RB_CNTL
);
2605 tmp
|= IH_WPTR_OVERFLOW_CLEAR
;
2606 WREG32(IH_RB_CNTL
, tmp
);
2608 return (wptr
& rdev
->ih
.ptr_mask
);
2611 int evergreen_irq_process(struct radeon_device
*rdev
)
2613 u32 wptr
= evergreen_get_ih_wptr(rdev
);
2614 u32 rptr
= rdev
->ih
.rptr
;
2615 u32 src_id
, src_data
;
2617 unsigned long flags
;
2618 bool queue_hotplug
= false;
2620 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr
, wptr
);
2621 if (!rdev
->ih
.enabled
)
2624 spin_lock_irqsave(&rdev
->ih
.lock
, flags
);
2627 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
2630 if (rdev
->shutdown
) {
2631 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
2636 /* display interrupts */
2637 evergreen_irq_ack(rdev
);
2639 rdev
->ih
.wptr
= wptr
;
2640 while (rptr
!= wptr
) {
2641 /* wptr/rptr are in bytes! */
2642 ring_index
= rptr
/ 4;
2643 src_id
= le32_to_cpu(rdev
->ih
.ring
[ring_index
]) & 0xff;
2644 src_data
= le32_to_cpu(rdev
->ih
.ring
[ring_index
+ 1]) & 0xfffffff;
2647 case 1: /* D1 vblank/vline */
2649 case 0: /* D1 vblank */
2650 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VBLANK_INTERRUPT
) {
2651 if (rdev
->irq
.crtc_vblank_int
[0]) {
2652 drm_handle_vblank(rdev
->ddev
, 0);
2653 rdev
->pm
.vblank_sync
= true;
2654 wake_up(&rdev
->irq
.vblank_queue
);
2656 if (rdev
->irq
.pflip
[0])
2657 radeon_crtc_handle_flip(rdev
, 0);
2658 rdev
->irq
.stat_regs
.evergreen
.disp_int
&= ~LB_D1_VBLANK_INTERRUPT
;
2659 DRM_DEBUG("IH: D1 vblank\n");
2662 case 1: /* D1 vline */
2663 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VLINE_INTERRUPT
) {
2664 rdev
->irq
.stat_regs
.evergreen
.disp_int
&= ~LB_D1_VLINE_INTERRUPT
;
2665 DRM_DEBUG("IH: D1 vline\n");
2669 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2673 case 2: /* D2 vblank/vline */
2675 case 0: /* D2 vblank */
2676 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VBLANK_INTERRUPT
) {
2677 if (rdev
->irq
.crtc_vblank_int
[1]) {
2678 drm_handle_vblank(rdev
->ddev
, 1);
2679 rdev
->pm
.vblank_sync
= true;
2680 wake_up(&rdev
->irq
.vblank_queue
);
2682 if (rdev
->irq
.pflip
[1])
2683 radeon_crtc_handle_flip(rdev
, 1);
2684 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
&= ~LB_D2_VBLANK_INTERRUPT
;
2685 DRM_DEBUG("IH: D2 vblank\n");
2688 case 1: /* D2 vline */
2689 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VLINE_INTERRUPT
) {
2690 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
&= ~LB_D2_VLINE_INTERRUPT
;
2691 DRM_DEBUG("IH: D2 vline\n");
2695 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2699 case 3: /* D3 vblank/vline */
2701 case 0: /* D3 vblank */
2702 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VBLANK_INTERRUPT
) {
2703 if (rdev
->irq
.crtc_vblank_int
[2]) {
2704 drm_handle_vblank(rdev
->ddev
, 2);
2705 rdev
->pm
.vblank_sync
= true;
2706 wake_up(&rdev
->irq
.vblank_queue
);
2708 if (rdev
->irq
.pflip
[2])
2709 radeon_crtc_handle_flip(rdev
, 2);
2710 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
&= ~LB_D3_VBLANK_INTERRUPT
;
2711 DRM_DEBUG("IH: D3 vblank\n");
2714 case 1: /* D3 vline */
2715 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VLINE_INTERRUPT
) {
2716 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
&= ~LB_D3_VLINE_INTERRUPT
;
2717 DRM_DEBUG("IH: D3 vline\n");
2721 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2725 case 4: /* D4 vblank/vline */
2727 case 0: /* D4 vblank */
2728 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VBLANK_INTERRUPT
) {
2729 if (rdev
->irq
.crtc_vblank_int
[3]) {
2730 drm_handle_vblank(rdev
->ddev
, 3);
2731 rdev
->pm
.vblank_sync
= true;
2732 wake_up(&rdev
->irq
.vblank_queue
);
2734 if (rdev
->irq
.pflip
[3])
2735 radeon_crtc_handle_flip(rdev
, 3);
2736 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
&= ~LB_D4_VBLANK_INTERRUPT
;
2737 DRM_DEBUG("IH: D4 vblank\n");
2740 case 1: /* D4 vline */
2741 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VLINE_INTERRUPT
) {
2742 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
&= ~LB_D4_VLINE_INTERRUPT
;
2743 DRM_DEBUG("IH: D4 vline\n");
2747 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2751 case 5: /* D5 vblank/vline */
2753 case 0: /* D5 vblank */
2754 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VBLANK_INTERRUPT
) {
2755 if (rdev
->irq
.crtc_vblank_int
[4]) {
2756 drm_handle_vblank(rdev
->ddev
, 4);
2757 rdev
->pm
.vblank_sync
= true;
2758 wake_up(&rdev
->irq
.vblank_queue
);
2760 if (rdev
->irq
.pflip
[4])
2761 radeon_crtc_handle_flip(rdev
, 4);
2762 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
&= ~LB_D5_VBLANK_INTERRUPT
;
2763 DRM_DEBUG("IH: D5 vblank\n");
2766 case 1: /* D5 vline */
2767 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VLINE_INTERRUPT
) {
2768 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
&= ~LB_D5_VLINE_INTERRUPT
;
2769 DRM_DEBUG("IH: D5 vline\n");
2773 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2777 case 6: /* D6 vblank/vline */
2779 case 0: /* D6 vblank */
2780 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VBLANK_INTERRUPT
) {
2781 if (rdev
->irq
.crtc_vblank_int
[5]) {
2782 drm_handle_vblank(rdev
->ddev
, 5);
2783 rdev
->pm
.vblank_sync
= true;
2784 wake_up(&rdev
->irq
.vblank_queue
);
2786 if (rdev
->irq
.pflip
[5])
2787 radeon_crtc_handle_flip(rdev
, 5);
2788 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
&= ~LB_D6_VBLANK_INTERRUPT
;
2789 DRM_DEBUG("IH: D6 vblank\n");
2792 case 1: /* D6 vline */
2793 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VLINE_INTERRUPT
) {
2794 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
&= ~LB_D6_VLINE_INTERRUPT
;
2795 DRM_DEBUG("IH: D6 vline\n");
2799 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2803 case 42: /* HPD hotplug */
2806 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& DC_HPD1_INTERRUPT
) {
2807 rdev
->irq
.stat_regs
.evergreen
.disp_int
&= ~DC_HPD1_INTERRUPT
;
2808 queue_hotplug
= true;
2809 DRM_DEBUG("IH: HPD1\n");
2813 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& DC_HPD2_INTERRUPT
) {
2814 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
&= ~DC_HPD2_INTERRUPT
;
2815 queue_hotplug
= true;
2816 DRM_DEBUG("IH: HPD2\n");
2820 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& DC_HPD3_INTERRUPT
) {
2821 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
&= ~DC_HPD3_INTERRUPT
;
2822 queue_hotplug
= true;
2823 DRM_DEBUG("IH: HPD3\n");
2827 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& DC_HPD4_INTERRUPT
) {
2828 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
&= ~DC_HPD4_INTERRUPT
;
2829 queue_hotplug
= true;
2830 DRM_DEBUG("IH: HPD4\n");
2834 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& DC_HPD5_INTERRUPT
) {
2835 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
&= ~DC_HPD5_INTERRUPT
;
2836 queue_hotplug
= true;
2837 DRM_DEBUG("IH: HPD5\n");
2841 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& DC_HPD6_INTERRUPT
) {
2842 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
&= ~DC_HPD6_INTERRUPT
;
2843 queue_hotplug
= true;
2844 DRM_DEBUG("IH: HPD6\n");
2848 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2852 case 176: /* CP_INT in ring buffer */
2853 case 177: /* CP_INT in IB1 */
2854 case 178: /* CP_INT in IB2 */
2855 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data
);
2856 radeon_fence_process(rdev
);
2858 case 181: /* CP EOP event */
2859 DRM_DEBUG("IH: CP EOP\n");
2860 radeon_fence_process(rdev
);
2862 case 233: /* GUI IDLE */
2863 DRM_DEBUG("IH: CP EOP\n");
2864 rdev
->pm
.gui_idle
= true;
2865 wake_up(&rdev
->irq
.idle_queue
);
2868 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2872 /* wptr/rptr are in bytes! */
2874 rptr
&= rdev
->ih
.ptr_mask
;
2876 /* make sure wptr hasn't changed while processing */
2877 wptr
= evergreen_get_ih_wptr(rdev
);
2878 if (wptr
!= rdev
->ih
.wptr
)
2881 schedule_work(&rdev
->hotplug_work
);
2882 rdev
->ih
.rptr
= rptr
;
2883 WREG32(IH_RB_RPTR
, rdev
->ih
.rptr
);
2884 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
2888 static int evergreen_startup(struct radeon_device
*rdev
)
2892 /* enable pcie gen2 link */
2893 if (!ASIC_IS_DCE5(rdev
))
2894 evergreen_pcie_gen2_enable(rdev
);
2896 if (ASIC_IS_DCE5(rdev
)) {
2897 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
|| !rdev
->mc_fw
) {
2898 r
= ni_init_microcode(rdev
);
2900 DRM_ERROR("Failed to load firmware!\n");
2904 r
= ni_mc_load_microcode(rdev
);
2906 DRM_ERROR("Failed to load MC firmware!\n");
2910 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
) {
2911 r
= r600_init_microcode(rdev
);
2913 DRM_ERROR("Failed to load firmware!\n");
2919 evergreen_mc_program(rdev
);
2920 if (rdev
->flags
& RADEON_IS_AGP
) {
2921 evergreen_agp_enable(rdev
);
2923 r
= evergreen_pcie_gart_enable(rdev
);
2927 evergreen_gpu_init(rdev
);
2929 r
= evergreen_blit_init(rdev
);
2931 evergreen_blit_fini(rdev
);
2932 rdev
->asic
->copy
= NULL
;
2933 dev_warn(rdev
->dev
, "failed blitter (%d) falling back to memcpy\n", r
);
2936 /* allocate wb buffer */
2937 r
= radeon_wb_init(rdev
);
2942 r
= r600_irq_init(rdev
);
2944 DRM_ERROR("radeon: IH init failed (%d).\n", r
);
2945 radeon_irq_kms_fini(rdev
);
2948 evergreen_irq_set(rdev
);
2950 r
= radeon_ring_init(rdev
, rdev
->cp
.ring_size
);
2953 r
= evergreen_cp_load_microcode(rdev
);
2956 r
= evergreen_cp_resume(rdev
);
2963 int evergreen_resume(struct radeon_device
*rdev
)
2967 /* reset the asic, the gfx blocks are often in a bad state
2968 * after the driver is unloaded or after a resume
2970 if (radeon_asic_reset(rdev
))
2971 dev_warn(rdev
->dev
, "GPU reset failed !\n");
2972 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
2973 * posting will perform necessary task to bring back GPU into good
2977 atom_asic_init(rdev
->mode_info
.atom_context
);
2979 r
= evergreen_startup(rdev
);
2981 DRM_ERROR("evergreen startup failed on resume\n");
2985 r
= r600_ib_test(rdev
);
2987 DRM_ERROR("radeon: failed testing IB (%d).\n", r
);
2995 int evergreen_suspend(struct radeon_device
*rdev
)
2999 /* FIXME: we should wait for ring to be empty */
3001 rdev
->cp
.ready
= false;
3002 evergreen_irq_suspend(rdev
);
3003 radeon_wb_disable(rdev
);
3004 evergreen_pcie_gart_disable(rdev
);
3006 /* unpin shaders bo */
3007 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
3008 if (likely(r
== 0)) {
3009 radeon_bo_unpin(rdev
->r600_blit
.shader_obj
);
3010 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
3016 int evergreen_copy_blit(struct radeon_device
*rdev
,
3017 uint64_t src_offset
, uint64_t dst_offset
,
3018 unsigned num_pages
, struct radeon_fence
*fence
)
3022 mutex_lock(&rdev
->r600_blit
.mutex
);
3023 rdev
->r600_blit
.vb_ib
= NULL
;
3024 r
= evergreen_blit_prepare_copy(rdev
, num_pages
* RADEON_GPU_PAGE_SIZE
);
3026 if (rdev
->r600_blit
.vb_ib
)
3027 radeon_ib_free(rdev
, &rdev
->r600_blit
.vb_ib
);
3028 mutex_unlock(&rdev
->r600_blit
.mutex
);
3031 evergreen_kms_blit_copy(rdev
, src_offset
, dst_offset
, num_pages
* RADEON_GPU_PAGE_SIZE
);
3032 evergreen_blit_done_copy(rdev
, fence
);
3033 mutex_unlock(&rdev
->r600_blit
.mutex
);
3037 /* Plan is to move initialization in that function and use
3038 * helper function so that radeon_device_init pretty much
3039 * do nothing more than calling asic specific function. This
3040 * should also allow to remove a bunch of callback function
3043 int evergreen_init(struct radeon_device
*rdev
)
3047 /* This don't do much */
3048 r
= radeon_gem_init(rdev
);
3052 if (!radeon_get_bios(rdev
)) {
3053 if (ASIC_IS_AVIVO(rdev
))
3056 /* Must be an ATOMBIOS */
3057 if (!rdev
->is_atom_bios
) {
3058 dev_err(rdev
->dev
, "Expecting atombios for evergreen GPU\n");
3061 r
= radeon_atombios_init(rdev
);
3064 /* reset the asic, the gfx blocks are often in a bad state
3065 * after the driver is unloaded or after a resume
3067 if (radeon_asic_reset(rdev
))
3068 dev_warn(rdev
->dev
, "GPU reset failed !\n");
3069 /* Post card if necessary */
3070 if (!radeon_card_posted(rdev
)) {
3072 dev_err(rdev
->dev
, "Card not posted and no BIOS - ignoring\n");
3075 DRM_INFO("GPU not posted. posting now...\n");
3076 atom_asic_init(rdev
->mode_info
.atom_context
);
3078 /* Initialize scratch registers */
3079 r600_scratch_init(rdev
);
3080 /* Initialize surface registers */
3081 radeon_surface_init(rdev
);
3082 /* Initialize clocks */
3083 radeon_get_clock_info(rdev
->ddev
);
3085 r
= radeon_fence_driver_init(rdev
);
3088 /* initialize AGP */
3089 if (rdev
->flags
& RADEON_IS_AGP
) {
3090 r
= radeon_agp_init(rdev
);
3092 radeon_agp_disable(rdev
);
3094 /* initialize memory controller */
3095 r
= evergreen_mc_init(rdev
);
3098 /* Memory manager */
3099 r
= radeon_bo_init(rdev
);
3103 r
= radeon_irq_kms_init(rdev
);
3107 rdev
->cp
.ring_obj
= NULL
;
3108 r600_ring_init(rdev
, 1024 * 1024);
3110 rdev
->ih
.ring_obj
= NULL
;
3111 r600_ih_ring_init(rdev
, 64 * 1024);
3113 r
= r600_pcie_gart_init(rdev
);
3117 rdev
->accel_working
= true;
3118 r
= evergreen_startup(rdev
);
3120 dev_err(rdev
->dev
, "disabling GPU acceleration\n");
3122 r600_irq_fini(rdev
);
3123 radeon_wb_fini(rdev
);
3124 radeon_irq_kms_fini(rdev
);
3125 evergreen_pcie_gart_fini(rdev
);
3126 rdev
->accel_working
= false;
3128 if (rdev
->accel_working
) {
3129 r
= radeon_ib_pool_init(rdev
);
3131 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r
);
3132 rdev
->accel_working
= false;
3134 r
= r600_ib_test(rdev
);
3136 DRM_ERROR("radeon: failed testing IB (%d).\n", r
);
3137 rdev
->accel_working
= false;
3143 void evergreen_fini(struct radeon_device
*rdev
)
3145 evergreen_blit_fini(rdev
);
3147 r600_irq_fini(rdev
);
3148 radeon_wb_fini(rdev
);
3149 radeon_irq_kms_fini(rdev
);
3150 evergreen_pcie_gart_fini(rdev
);
3151 radeon_gem_fini(rdev
);
3152 radeon_fence_driver_fini(rdev
);
3153 radeon_agp_fini(rdev
);
3154 radeon_bo_fini(rdev
);
3155 radeon_atombios_fini(rdev
);
3160 static void evergreen_pcie_gen2_enable(struct radeon_device
*rdev
)
3162 u32 link_width_cntl
, speed_cntl
;
3164 if (radeon_pcie_gen2
== 0)
3167 if (rdev
->flags
& RADEON_IS_IGP
)
3170 if (!(rdev
->flags
& RADEON_IS_PCIE
))
3173 /* x2 cards have a special sequence */
3174 if (ASIC_IS_X2(rdev
))
3177 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3178 if ((speed_cntl
& LC_OTHER_SIDE_EVER_SENT_GEN2
) ||
3179 (speed_cntl
& LC_OTHER_SIDE_SUPPORTS_GEN2
)) {
3181 link_width_cntl
= RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
);
3182 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
3183 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
3185 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3186 speed_cntl
&= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN
;
3187 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3189 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3190 speed_cntl
|= LC_CLR_FAILED_SPD_CHANGE_CNT
;
3191 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3193 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3194 speed_cntl
&= ~LC_CLR_FAILED_SPD_CHANGE_CNT
;
3195 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3197 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3198 speed_cntl
|= LC_GEN2_EN_STRAP
;
3199 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3202 link_width_cntl
= RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
);
3203 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3205 link_width_cntl
|= LC_UPCONFIGURE_DIS
;
3207 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
3208 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);