2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include "radeon_reg.h"
33 #include "atom-bits.h"
35 /* rs690,rs740 depends on : */
36 void r100_hdp_reset(struct radeon_device
*rdev
);
37 int r300_mc_wait_for_idle(struct radeon_device
*rdev
);
38 void r420_pipes_init(struct radeon_device
*rdev
);
39 void rs400_gart_disable(struct radeon_device
*rdev
);
40 int rs400_gart_enable(struct radeon_device
*rdev
);
41 void rs400_gart_adjust_size(struct radeon_device
*rdev
);
42 void rs600_mc_disable_clients(struct radeon_device
*rdev
);
44 /* This files gather functions specifics to :
47 * Some of these functions might be used by newer ASICs.
49 void rs690_gpu_init(struct radeon_device
*rdev
);
50 int rs690_mc_wait_for_idle(struct radeon_device
*rdev
);
56 int rs690_mc_init(struct radeon_device
*rdev
)
61 if (r100_debugfs_rbbm_init(rdev
)) {
62 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
66 rs400_gart_disable(rdev
);
68 /* Setup GPU memory space */
69 rdev
->mc
.gtt_location
= rdev
->mc
.mc_vram_size
;
70 rdev
->mc
.gtt_location
+= (rdev
->mc
.gtt_size
- 1);
71 rdev
->mc
.gtt_location
&= ~(rdev
->mc
.gtt_size
- 1);
72 rdev
->mc
.vram_location
= 0xFFFFFFFFUL
;
73 r
= radeon_mc_setup(rdev
);
78 /* Program GPU memory space */
79 rs600_mc_disable_clients(rdev
);
80 if (rs690_mc_wait_for_idle(rdev
)) {
81 printk(KERN_WARNING
"Failed to wait MC idle while "
82 "programming pipes. Bad things might happen.\n");
84 tmp
= rdev
->mc
.vram_location
+ rdev
->mc
.mc_vram_size
- 1;
85 tmp
= REG_SET(RS690_MC_FB_TOP
, tmp
>> 16);
86 tmp
|= REG_SET(RS690_MC_FB_START
, rdev
->mc
.vram_location
>> 16);
87 WREG32_MC(RS690_MCCFG_FB_LOCATION
, tmp
);
88 /* FIXME: Does this reg exist on RS480,RS740 ? */
89 WREG32(0x310, rdev
->mc
.vram_location
);
90 WREG32(RS690_HDP_FB_LOCATION
, rdev
->mc
.vram_location
>> 16);
94 void rs690_mc_fini(struct radeon_device
*rdev
)
100 * Global GPU functions
102 int rs690_mc_wait_for_idle(struct radeon_device
*rdev
)
107 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
109 tmp
= RREG32_MC(RS690_MC_STATUS
);
110 if (tmp
& RS690_MC_STATUS_IDLE
) {
118 void rs690_errata(struct radeon_device
*rdev
)
120 rdev
->pll_errata
= 0;
123 void rs690_gpu_init(struct radeon_device
*rdev
)
125 /* FIXME: HDP same place on rs690 ? */
126 r100_hdp_reset(rdev
);
127 rv515_vga_render_disable(rdev
);
128 /* FIXME: is this correct ? */
129 r420_pipes_init(rdev
);
130 if (rs690_mc_wait_for_idle(rdev
)) {
131 printk(KERN_WARNING
"Failed to wait MC idle while "
132 "programming pipes. Bad things might happen.\n");
140 void rs690_pm_info(struct radeon_device
*rdev
)
142 int index
= GetIndexIntoMasterTable(DATA
, IntegratedSystemInfo
);
143 struct _ATOM_INTEGRATED_SYSTEM_INFO
*info
;
144 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2
*info_v2
;
146 uint16_t data_offset
;
150 atom_parse_data_header(rdev
->mode_info
.atom_context
, index
, NULL
,
151 &frev
, &crev
, &data_offset
);
152 ptr
= rdev
->mode_info
.atom_context
->bios
+ data_offset
;
153 info
= (struct _ATOM_INTEGRATED_SYSTEM_INFO
*)ptr
;
154 info_v2
= (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2
*)ptr
;
155 /* Get various system informations from bios */
158 tmp
.full
= rfixed_const(100);
159 rdev
->pm
.igp_sideport_mclk
.full
= rfixed_const(info
->ulBootUpMemoryClock
);
160 rdev
->pm
.igp_sideport_mclk
.full
= rfixed_div(rdev
->pm
.igp_sideport_mclk
, tmp
);
161 rdev
->pm
.igp_system_mclk
.full
= rfixed_const(le16_to_cpu(info
->usK8MemoryClock
));
162 rdev
->pm
.igp_ht_link_clk
.full
= rfixed_const(le16_to_cpu(info
->usFSBClock
));
163 rdev
->pm
.igp_ht_link_width
.full
= rfixed_const(info
->ucHTLinkWidth
);
166 tmp
.full
= rfixed_const(100);
167 rdev
->pm
.igp_sideport_mclk
.full
= rfixed_const(info_v2
->ulBootUpSidePortClock
);
168 rdev
->pm
.igp_sideport_mclk
.full
= rfixed_div(rdev
->pm
.igp_sideport_mclk
, tmp
);
169 rdev
->pm
.igp_system_mclk
.full
= rfixed_const(info_v2
->ulBootUpUMAClock
);
170 rdev
->pm
.igp_system_mclk
.full
= rfixed_div(rdev
->pm
.igp_system_mclk
, tmp
);
171 rdev
->pm
.igp_ht_link_clk
.full
= rfixed_const(info_v2
->ulHTLinkFreq
);
172 rdev
->pm
.igp_ht_link_clk
.full
= rfixed_div(rdev
->pm
.igp_ht_link_clk
, tmp
);
173 rdev
->pm
.igp_ht_link_width
.full
= rfixed_const(le16_to_cpu(info_v2
->usMinHTLinkWidth
));
176 tmp
.full
= rfixed_const(100);
177 /* We assume the slower possible clock ie worst case */
179 rdev
->pm
.igp_sideport_mclk
.full
= rfixed_const(333);
180 /* FIXME: system clock ? */
181 rdev
->pm
.igp_system_mclk
.full
= rfixed_const(100);
182 rdev
->pm
.igp_system_mclk
.full
= rfixed_div(rdev
->pm
.igp_system_mclk
, tmp
);
183 rdev
->pm
.igp_ht_link_clk
.full
= rfixed_const(200);
184 rdev
->pm
.igp_ht_link_width
.full
= rfixed_const(8);
185 DRM_ERROR("No integrated system info for your GPU, using safe default\n");
188 /* Compute various bandwidth */
189 /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
190 tmp
.full
= rfixed_const(4);
191 rdev
->pm
.k8_bandwidth
.full
= rfixed_mul(rdev
->pm
.igp_system_mclk
, tmp
);
192 /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
193 * = ht_clk * ht_width / 5
195 tmp
.full
= rfixed_const(5);
196 rdev
->pm
.ht_bandwidth
.full
= rfixed_mul(rdev
->pm
.igp_ht_link_clk
,
197 rdev
->pm
.igp_ht_link_width
);
198 rdev
->pm
.ht_bandwidth
.full
= rfixed_div(rdev
->pm
.ht_bandwidth
, tmp
);
199 if (tmp
.full
< rdev
->pm
.max_bandwidth
.full
) {
200 /* HT link is a limiting factor */
201 rdev
->pm
.max_bandwidth
.full
= tmp
.full
;
203 /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
204 * = (sideport_clk * 14) / 10
206 tmp
.full
= rfixed_const(14);
207 rdev
->pm
.sideport_bandwidth
.full
= rfixed_mul(rdev
->pm
.igp_sideport_mclk
, tmp
);
208 tmp
.full
= rfixed_const(10);
209 rdev
->pm
.sideport_bandwidth
.full
= rfixed_div(rdev
->pm
.sideport_bandwidth
, tmp
);
212 void rs690_vram_info(struct radeon_device
*rdev
)
217 rs400_gart_adjust_size(rdev
);
218 /* DDR for all card after R300 & IGP */
219 rdev
->mc
.vram_is_ddr
= true;
220 /* FIXME: is this correct for RS690/RS740 ? */
221 tmp
= RREG32(RADEON_MEM_CNTL
);
222 if (tmp
& R300_MEM_NUM_CHANNELS_MASK
) {
223 rdev
->mc
.vram_width
= 128;
225 rdev
->mc
.vram_width
= 64;
227 rdev
->mc
.real_vram_size
= RREG32(RADEON_CONFIG_MEMSIZE
);
228 rdev
->mc
.mc_vram_size
= rdev
->mc
.real_vram_size
;
230 rdev
->mc
.aper_base
= drm_get_resource_start(rdev
->ddev
, 0);
231 rdev
->mc
.aper_size
= drm_get_resource_len(rdev
->ddev
, 0);
233 /* FIXME: we should enforce default clock in case GPU is not in
236 a
.full
= rfixed_const(100);
237 rdev
->pm
.sclk
.full
= rfixed_const(rdev
->clock
.default_sclk
);
238 rdev
->pm
.sclk
.full
= rfixed_div(rdev
->pm
.sclk
, a
);
239 a
.full
= rfixed_const(16);
240 /* core_bandwidth = sclk(Mhz) * 16 */
241 rdev
->pm
.core_bandwidth
.full
= rfixed_div(rdev
->pm
.sclk
, a
);
244 void rs690_line_buffer_adjust(struct radeon_device
*rdev
,
245 struct drm_display_mode
*mode1
,
246 struct drm_display_mode
*mode2
)
252 * There is a single line buffer shared by both display controllers.
253 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
254 * the display controllers. The paritioning can either be done
255 * manually or via one of four preset allocations specified in bits 1:0:
256 * 0 - line buffer is divided in half and shared between crtc
257 * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
258 * 2 - D1 gets the whole buffer
259 * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
260 * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual
261 * allocation mode. In manual allocation mode, D1 always starts at 0,
262 * D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
264 tmp
= RREG32(DC_LB_MEMORY_SPLIT
) & ~DC_LB_MEMORY_SPLIT_MASK
;
265 tmp
&= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE
;
267 if (mode1
&& mode2
) {
268 if (mode1
->hdisplay
> mode2
->hdisplay
) {
269 if (mode1
->hdisplay
> 2560)
270 tmp
|= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q
;
272 tmp
|= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF
;
273 } else if (mode2
->hdisplay
> mode1
->hdisplay
) {
274 if (mode2
->hdisplay
> 2560)
275 tmp
|= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q
;
277 tmp
|= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF
;
279 tmp
|= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF
;
281 tmp
|= DC_LB_MEMORY_SPLIT_D1_ONLY
;
283 tmp
|= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q
;
285 WREG32(DC_LB_MEMORY_SPLIT
, tmp
);
288 struct rs690_watermark
{
289 u32 lb_request_fifo_depth
;
290 fixed20_12 num_line_pair
;
291 fixed20_12 estimated_width
;
292 fixed20_12 worst_case_latency
;
293 fixed20_12 consumption_rate
;
294 fixed20_12 active_time
;
296 fixed20_12 priority_mark_max
;
297 fixed20_12 priority_mark
;
301 void rs690_crtc_bandwidth_compute(struct radeon_device
*rdev
,
302 struct radeon_crtc
*crtc
,
303 struct rs690_watermark
*wm
)
305 struct drm_display_mode
*mode
= &crtc
->base
.mode
;
307 fixed20_12 pclk
, request_fifo_depth
, tolerable_latency
, estimated_width
;
308 fixed20_12 consumption_time
, line_time
, chunk_time
, read_delay_latency
;
309 /* FIXME: detect IGP with sideport memory, i don't think there is any
310 * such product available
312 bool sideport
= false;
314 if (!crtc
->base
.enabled
) {
315 /* FIXME: wouldn't it better to set priority mark to maximum */
316 wm
->lb_request_fifo_depth
= 4;
320 if (crtc
->vsc
.full
> rfixed_const(2))
321 wm
->num_line_pair
.full
= rfixed_const(2);
323 wm
->num_line_pair
.full
= rfixed_const(1);
325 b
.full
= rfixed_const(mode
->crtc_hdisplay
);
326 c
.full
= rfixed_const(256);
327 a
.full
= rfixed_mul(wm
->num_line_pair
, b
);
328 request_fifo_depth
.full
= rfixed_div(a
, c
);
329 if (a
.full
< rfixed_const(4)) {
330 wm
->lb_request_fifo_depth
= 4;
332 wm
->lb_request_fifo_depth
= rfixed_trunc(request_fifo_depth
);
335 /* Determine consumption rate
336 * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
337 * vtaps = number of vertical taps,
338 * vsc = vertical scaling ratio, defined as source/destination
339 * hsc = horizontal scaling ration, defined as source/destination
341 a
.full
= rfixed_const(mode
->clock
);
342 b
.full
= rfixed_const(1000);
343 a
.full
= rfixed_div(a
, b
);
344 pclk
.full
= rfixed_div(b
, a
);
345 if (crtc
->rmx_type
!= RMX_OFF
) {
346 b
.full
= rfixed_const(2);
347 if (crtc
->vsc
.full
> b
.full
)
348 b
.full
= crtc
->vsc
.full
;
349 b
.full
= rfixed_mul(b
, crtc
->hsc
);
350 c
.full
= rfixed_const(2);
351 b
.full
= rfixed_div(b
, c
);
352 consumption_time
.full
= rfixed_div(pclk
, b
);
354 consumption_time
.full
= pclk
.full
;
356 a
.full
= rfixed_const(1);
357 wm
->consumption_rate
.full
= rfixed_div(a
, consumption_time
);
360 /* Determine line time
361 * LineTime = total time for one line of displayhtotal
362 * LineTime = total number of horizontal pixels
363 * pclk = pixel clock period(ns)
365 a
.full
= rfixed_const(crtc
->base
.mode
.crtc_htotal
);
366 line_time
.full
= rfixed_mul(a
, pclk
);
368 /* Determine active time
369 * ActiveTime = time of active region of display within one line,
370 * hactive = total number of horizontal active pixels
371 * htotal = total number of horizontal pixels
373 a
.full
= rfixed_const(crtc
->base
.mode
.crtc_htotal
);
374 b
.full
= rfixed_const(crtc
->base
.mode
.crtc_hdisplay
);
375 wm
->active_time
.full
= rfixed_mul(line_time
, b
);
376 wm
->active_time
.full
= rfixed_div(wm
->active_time
, a
);
378 /* Maximun bandwidth is the minimun bandwidth of all component */
379 rdev
->pm
.max_bandwidth
= rdev
->pm
.core_bandwidth
;
381 if (rdev
->pm
.max_bandwidth
.full
> rdev
->pm
.sideport_bandwidth
.full
&&
382 rdev
->pm
.sideport_bandwidth
.full
)
383 rdev
->pm
.max_bandwidth
= rdev
->pm
.sideport_bandwidth
;
384 read_delay_latency
.full
= rfixed_const(370 * 800 * 1000);
385 read_delay_latency
.full
= rfixed_div(read_delay_latency
,
386 rdev
->pm
.igp_sideport_mclk
);
388 if (rdev
->pm
.max_bandwidth
.full
> rdev
->pm
.k8_bandwidth
.full
&&
389 rdev
->pm
.k8_bandwidth
.full
)
390 rdev
->pm
.max_bandwidth
= rdev
->pm
.k8_bandwidth
;
391 if (rdev
->pm
.max_bandwidth
.full
> rdev
->pm
.ht_bandwidth
.full
&&
392 rdev
->pm
.ht_bandwidth
.full
)
393 rdev
->pm
.max_bandwidth
= rdev
->pm
.ht_bandwidth
;
394 read_delay_latency
.full
= rfixed_const(5000);
397 /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
398 a
.full
= rfixed_const(16);
399 rdev
->pm
.sclk
.full
= rfixed_mul(rdev
->pm
.max_bandwidth
, a
);
400 a
.full
= rfixed_const(1000);
401 rdev
->pm
.sclk
.full
= rfixed_div(a
, rdev
->pm
.sclk
);
402 /* Determine chunk time
403 * ChunkTime = the time it takes the DCP to send one chunk of data
404 * to the LB which consists of pipeline delay and inter chunk gap
405 * sclk = system clock(ns)
407 a
.full
= rfixed_const(256 * 13);
408 chunk_time
.full
= rfixed_mul(rdev
->pm
.sclk
, a
);
409 a
.full
= rfixed_const(10);
410 chunk_time
.full
= rfixed_div(chunk_time
, a
);
412 /* Determine the worst case latency
413 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
414 * WorstCaseLatency = worst case time from urgent to when the MC starts
416 * READ_DELAY_IDLE_MAX = constant of 1us
417 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
418 * which consists of pipeline delay and inter chunk gap
420 if (rfixed_trunc(wm
->num_line_pair
) > 1) {
421 a
.full
= rfixed_const(3);
422 wm
->worst_case_latency
.full
= rfixed_mul(a
, chunk_time
);
423 wm
->worst_case_latency
.full
+= read_delay_latency
.full
;
425 a
.full
= rfixed_const(2);
426 wm
->worst_case_latency
.full
= rfixed_mul(a
, chunk_time
);
427 wm
->worst_case_latency
.full
+= read_delay_latency
.full
;
430 /* Determine the tolerable latency
431 * TolerableLatency = Any given request has only 1 line time
432 * for the data to be returned
433 * LBRequestFifoDepth = Number of chunk requests the LB can
434 * put into the request FIFO for a display
435 * LineTime = total time for one line of display
436 * ChunkTime = the time it takes the DCP to send one chunk
437 * of data to the LB which consists of
438 * pipeline delay and inter chunk gap
440 if ((2+wm
->lb_request_fifo_depth
) >= rfixed_trunc(request_fifo_depth
)) {
441 tolerable_latency
.full
= line_time
.full
;
443 tolerable_latency
.full
= rfixed_const(wm
->lb_request_fifo_depth
- 2);
444 tolerable_latency
.full
= request_fifo_depth
.full
- tolerable_latency
.full
;
445 tolerable_latency
.full
= rfixed_mul(tolerable_latency
, chunk_time
);
446 tolerable_latency
.full
= line_time
.full
- tolerable_latency
.full
;
448 /* We assume worst case 32bits (4 bytes) */
449 wm
->dbpp
.full
= rfixed_const(4 * 8);
451 /* Determine the maximum priority mark
452 * width = viewport width in pixels
454 a
.full
= rfixed_const(16);
455 wm
->priority_mark_max
.full
= rfixed_const(crtc
->base
.mode
.crtc_hdisplay
);
456 wm
->priority_mark_max
.full
= rfixed_div(wm
->priority_mark_max
, a
);
458 /* Determine estimated width */
459 estimated_width
.full
= tolerable_latency
.full
- wm
->worst_case_latency
.full
;
460 estimated_width
.full
= rfixed_div(estimated_width
, consumption_time
);
461 if (rfixed_trunc(estimated_width
) > crtc
->base
.mode
.crtc_hdisplay
) {
462 wm
->priority_mark
.full
= rfixed_const(10);
464 a
.full
= rfixed_const(16);
465 wm
->priority_mark
.full
= rfixed_div(estimated_width
, a
);
466 wm
->priority_mark
.full
= wm
->priority_mark_max
.full
- wm
->priority_mark
.full
;
470 void rs690_bandwidth_update(struct radeon_device
*rdev
)
472 struct drm_display_mode
*mode0
= NULL
;
473 struct drm_display_mode
*mode1
= NULL
;
474 struct rs690_watermark wm0
;
475 struct rs690_watermark wm1
;
477 fixed20_12 priority_mark02
, priority_mark12
, fill_rate
;
480 if (rdev
->mode_info
.crtcs
[0]->base
.enabled
)
481 mode0
= &rdev
->mode_info
.crtcs
[0]->base
.mode
;
482 if (rdev
->mode_info
.crtcs
[1]->base
.enabled
)
483 mode1
= &rdev
->mode_info
.crtcs
[1]->base
.mode
;
485 * Set display0/1 priority up in the memory controller for
486 * modes if the user specifies HIGH for displaypriority
489 if (rdev
->disp_priority
== 2) {
490 tmp
= RREG32_MC(MC_INIT_MISC_LAT_TIMER
);
491 tmp
&= ~MC_DISP1R_INIT_LAT_MASK
;
492 tmp
&= ~MC_DISP0R_INIT_LAT_MASK
;
494 tmp
|= (1 << MC_DISP1R_INIT_LAT_SHIFT
);
496 tmp
|= (1 << MC_DISP0R_INIT_LAT_SHIFT
);
497 WREG32_MC(MC_INIT_MISC_LAT_TIMER
, tmp
);
499 rs690_line_buffer_adjust(rdev
, mode0
, mode1
);
501 if ((rdev
->family
== CHIP_RS690
) || (rdev
->family
== CHIP_RS740
))
502 WREG32(DCP_CONTROL
, 0);
503 if ((rdev
->family
== CHIP_RS780
) || (rdev
->family
== CHIP_RS880
))
504 WREG32(DCP_CONTROL
, 2);
506 rs690_crtc_bandwidth_compute(rdev
, rdev
->mode_info
.crtcs
[0], &wm0
);
507 rs690_crtc_bandwidth_compute(rdev
, rdev
->mode_info
.crtcs
[1], &wm1
);
509 tmp
= (wm0
.lb_request_fifo_depth
- 1);
510 tmp
|= (wm1
.lb_request_fifo_depth
- 1) << 16;
511 WREG32(LB_MAX_REQ_OUTSTANDING
, tmp
);
513 if (mode0
&& mode1
) {
514 if (rfixed_trunc(wm0
.dbpp
) > 64)
515 a
.full
= rfixed_mul(wm0
.dbpp
, wm0
.num_line_pair
);
517 a
.full
= wm0
.num_line_pair
.full
;
518 if (rfixed_trunc(wm1
.dbpp
) > 64)
519 b
.full
= rfixed_mul(wm1
.dbpp
, wm1
.num_line_pair
);
521 b
.full
= wm1
.num_line_pair
.full
;
523 fill_rate
.full
= rfixed_div(wm0
.sclk
, a
);
524 if (wm0
.consumption_rate
.full
> fill_rate
.full
) {
525 b
.full
= wm0
.consumption_rate
.full
- fill_rate
.full
;
526 b
.full
= rfixed_mul(b
, wm0
.active_time
);
527 a
.full
= rfixed_mul(wm0
.worst_case_latency
,
528 wm0
.consumption_rate
);
529 a
.full
= a
.full
+ b
.full
;
530 b
.full
= rfixed_const(16 * 1000);
531 priority_mark02
.full
= rfixed_div(a
, b
);
533 a
.full
= rfixed_mul(wm0
.worst_case_latency
,
534 wm0
.consumption_rate
);
535 b
.full
= rfixed_const(16 * 1000);
536 priority_mark02
.full
= rfixed_div(a
, b
);
538 if (wm1
.consumption_rate
.full
> fill_rate
.full
) {
539 b
.full
= wm1
.consumption_rate
.full
- fill_rate
.full
;
540 b
.full
= rfixed_mul(b
, wm1
.active_time
);
541 a
.full
= rfixed_mul(wm1
.worst_case_latency
,
542 wm1
.consumption_rate
);
543 a
.full
= a
.full
+ b
.full
;
544 b
.full
= rfixed_const(16 * 1000);
545 priority_mark12
.full
= rfixed_div(a
, b
);
547 a
.full
= rfixed_mul(wm1
.worst_case_latency
,
548 wm1
.consumption_rate
);
549 b
.full
= rfixed_const(16 * 1000);
550 priority_mark12
.full
= rfixed_div(a
, b
);
552 if (wm0
.priority_mark
.full
> priority_mark02
.full
)
553 priority_mark02
.full
= wm0
.priority_mark
.full
;
554 if (rfixed_trunc(priority_mark02
) < 0)
555 priority_mark02
.full
= 0;
556 if (wm0
.priority_mark_max
.full
> priority_mark02
.full
)
557 priority_mark02
.full
= wm0
.priority_mark_max
.full
;
558 if (wm1
.priority_mark
.full
> priority_mark12
.full
)
559 priority_mark12
.full
= wm1
.priority_mark
.full
;
560 if (rfixed_trunc(priority_mark12
) < 0)
561 priority_mark12
.full
= 0;
562 if (wm1
.priority_mark_max
.full
> priority_mark12
.full
)
563 priority_mark12
.full
= wm1
.priority_mark_max
.full
;
564 WREG32(D1MODE_PRIORITY_A_CNT
, rfixed_trunc(priority_mark02
));
565 WREG32(D1MODE_PRIORITY_B_CNT
, rfixed_trunc(priority_mark02
));
566 WREG32(D2MODE_PRIORITY_A_CNT
, rfixed_trunc(priority_mark12
));
567 WREG32(D2MODE_PRIORITY_B_CNT
, rfixed_trunc(priority_mark12
));
569 if (rfixed_trunc(wm0
.dbpp
) > 64)
570 a
.full
= rfixed_mul(wm0
.dbpp
, wm0
.num_line_pair
);
572 a
.full
= wm0
.num_line_pair
.full
;
573 fill_rate
.full
= rfixed_div(wm0
.sclk
, a
);
574 if (wm0
.consumption_rate
.full
> fill_rate
.full
) {
575 b
.full
= wm0
.consumption_rate
.full
- fill_rate
.full
;
576 b
.full
= rfixed_mul(b
, wm0
.active_time
);
577 a
.full
= rfixed_mul(wm0
.worst_case_latency
,
578 wm0
.consumption_rate
);
579 a
.full
= a
.full
+ b
.full
;
580 b
.full
= rfixed_const(16 * 1000);
581 priority_mark02
.full
= rfixed_div(a
, b
);
583 a
.full
= rfixed_mul(wm0
.worst_case_latency
,
584 wm0
.consumption_rate
);
585 b
.full
= rfixed_const(16 * 1000);
586 priority_mark02
.full
= rfixed_div(a
, b
);
588 if (wm0
.priority_mark
.full
> priority_mark02
.full
)
589 priority_mark02
.full
= wm0
.priority_mark
.full
;
590 if (rfixed_trunc(priority_mark02
) < 0)
591 priority_mark02
.full
= 0;
592 if (wm0
.priority_mark_max
.full
> priority_mark02
.full
)
593 priority_mark02
.full
= wm0
.priority_mark_max
.full
;
594 WREG32(D1MODE_PRIORITY_A_CNT
, rfixed_trunc(priority_mark02
));
595 WREG32(D1MODE_PRIORITY_B_CNT
, rfixed_trunc(priority_mark02
));
596 WREG32(D2MODE_PRIORITY_A_CNT
, MODE_PRIORITY_OFF
);
597 WREG32(D2MODE_PRIORITY_B_CNT
, MODE_PRIORITY_OFF
);
599 if (rfixed_trunc(wm1
.dbpp
) > 64)
600 a
.full
= rfixed_mul(wm1
.dbpp
, wm1
.num_line_pair
);
602 a
.full
= wm1
.num_line_pair
.full
;
603 fill_rate
.full
= rfixed_div(wm1
.sclk
, a
);
604 if (wm1
.consumption_rate
.full
> fill_rate
.full
) {
605 b
.full
= wm1
.consumption_rate
.full
- fill_rate
.full
;
606 b
.full
= rfixed_mul(b
, wm1
.active_time
);
607 a
.full
= rfixed_mul(wm1
.worst_case_latency
,
608 wm1
.consumption_rate
);
609 a
.full
= a
.full
+ b
.full
;
610 b
.full
= rfixed_const(16 * 1000);
611 priority_mark12
.full
= rfixed_div(a
, b
);
613 a
.full
= rfixed_mul(wm1
.worst_case_latency
,
614 wm1
.consumption_rate
);
615 b
.full
= rfixed_const(16 * 1000);
616 priority_mark12
.full
= rfixed_div(a
, b
);
618 if (wm1
.priority_mark
.full
> priority_mark12
.full
)
619 priority_mark12
.full
= wm1
.priority_mark
.full
;
620 if (rfixed_trunc(priority_mark12
) < 0)
621 priority_mark12
.full
= 0;
622 if (wm1
.priority_mark_max
.full
> priority_mark12
.full
)
623 priority_mark12
.full
= wm1
.priority_mark_max
.full
;
624 WREG32(D1MODE_PRIORITY_A_CNT
, MODE_PRIORITY_OFF
);
625 WREG32(D1MODE_PRIORITY_B_CNT
, MODE_PRIORITY_OFF
);
626 WREG32(D2MODE_PRIORITY_A_CNT
, rfixed_trunc(priority_mark12
));
627 WREG32(D2MODE_PRIORITY_B_CNT
, rfixed_trunc(priority_mark12
));
632 * Indirect registers accessor
634 uint32_t rs690_mc_rreg(struct radeon_device
*rdev
, uint32_t reg
)
638 WREG32(RS690_MC_INDEX
, (reg
& RS690_MC_INDEX_MASK
));
639 r
= RREG32(RS690_MC_DATA
);
640 WREG32(RS690_MC_INDEX
, RS690_MC_INDEX_MASK
);
644 void rs690_mc_wreg(struct radeon_device
*rdev
, uint32_t reg
, uint32_t v
)
646 WREG32(RS690_MC_INDEX
,
647 RS690_MC_INDEX_WR_EN
| ((reg
) & RS690_MC_INDEX_MASK
));
648 WREG32(RS690_MC_DATA
, v
);
649 WREG32(RS690_MC_INDEX
, RS690_MC_INDEX_WR_ACK
);