Commit | Line | Data |
---|---|---|
43b3cd99 AD |
1 | /* |
2 | * Copyright 2011 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Alex Deucher | |
23 | */ | |
24 | #include "drmP.h" | |
25 | #include "radeon.h" | |
26 | #include "radeon_asic.h" | |
27 | #include "radeon_drm.h" | |
28 | #include "sid.h" | |
29 | #include "atom.h" | |
30 | ||
1bd47d2e AD |
31 | /* get temperature in millidegrees */ |
32 | int si_get_temp(struct radeon_device *rdev) | |
33 | { | |
34 | u32 temp; | |
35 | int actual_temp = 0; | |
36 | ||
37 | temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >> | |
38 | CTF_TEMP_SHIFT; | |
39 | ||
40 | if (temp & 0x200) | |
41 | actual_temp = 255; | |
42 | else | |
43 | actual_temp = temp & 0x1ff; | |
44 | ||
45 | actual_temp = (actual_temp * 1000); | |
46 | ||
47 | return actual_temp; | |
48 | } | |
49 | ||
43b3cd99 AD |
50 | /* watermark setup */ |
51 | static u32 dce6_line_buffer_adjust(struct radeon_device *rdev, | |
52 | struct radeon_crtc *radeon_crtc, | |
53 | struct drm_display_mode *mode, | |
54 | struct drm_display_mode *other_mode) | |
55 | { | |
56 | u32 tmp; | |
57 | /* | |
58 | * Line Buffer Setup | |
59 | * There are 3 line buffers, each one shared by 2 display controllers. | |
60 | * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between | |
61 | * the display controllers. The paritioning is done via one of four | |
62 | * preset allocations specified in bits 21:20: | |
63 | * 0 - half lb | |
64 | * 2 - whole lb, other crtc must be disabled | |
65 | */ | |
66 | /* this can get tricky if we have two large displays on a paired group | |
67 | * of crtcs. Ideally for multiple large displays we'd assign them to | |
68 | * non-linked crtcs for maximum line buffer allocation. | |
69 | */ | |
70 | if (radeon_crtc->base.enabled && mode) { | |
71 | if (other_mode) | |
72 | tmp = 0; /* 1/2 */ | |
73 | else | |
74 | tmp = 2; /* whole */ | |
75 | } else | |
76 | tmp = 0; | |
77 | ||
78 | WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, | |
79 | DC_LB_MEMORY_CONFIG(tmp)); | |
80 | ||
81 | if (radeon_crtc->base.enabled && mode) { | |
82 | switch (tmp) { | |
83 | case 0: | |
84 | default: | |
85 | return 4096 * 2; | |
86 | case 2: | |
87 | return 8192 * 2; | |
88 | } | |
89 | } | |
90 | ||
91 | /* controller not enabled, so no lb used */ | |
92 | return 0; | |
93 | } | |
94 | ||
95 | static u32 dce6_get_number_of_dram_channels(struct radeon_device *rdev) | |
96 | { | |
97 | u32 tmp = RREG32(MC_SHARED_CHMAP); | |
98 | ||
99 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | |
100 | case 0: | |
101 | default: | |
102 | return 1; | |
103 | case 1: | |
104 | return 2; | |
105 | case 2: | |
106 | return 4; | |
107 | case 3: | |
108 | return 8; | |
109 | case 4: | |
110 | return 3; | |
111 | case 5: | |
112 | return 6; | |
113 | case 6: | |
114 | return 10; | |
115 | case 7: | |
116 | return 12; | |
117 | case 8: | |
118 | return 16; | |
119 | } | |
120 | } | |
121 | ||
122 | struct dce6_wm_params { | |
123 | u32 dram_channels; /* number of dram channels */ | |
124 | u32 yclk; /* bandwidth per dram data pin in kHz */ | |
125 | u32 sclk; /* engine clock in kHz */ | |
126 | u32 disp_clk; /* display clock in kHz */ | |
127 | u32 src_width; /* viewport width */ | |
128 | u32 active_time; /* active display time in ns */ | |
129 | u32 blank_time; /* blank time in ns */ | |
130 | bool interlaced; /* mode is interlaced */ | |
131 | fixed20_12 vsc; /* vertical scale ratio */ | |
132 | u32 num_heads; /* number of active crtcs */ | |
133 | u32 bytes_per_pixel; /* bytes per pixel display + overlay */ | |
134 | u32 lb_size; /* line buffer allocated to pipe */ | |
135 | u32 vtaps; /* vertical scaler taps */ | |
136 | }; | |
137 | ||
138 | static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm) | |
139 | { | |
140 | /* Calculate raw DRAM Bandwidth */ | |
141 | fixed20_12 dram_efficiency; /* 0.7 */ | |
142 | fixed20_12 yclk, dram_channels, bandwidth; | |
143 | fixed20_12 a; | |
144 | ||
145 | a.full = dfixed_const(1000); | |
146 | yclk.full = dfixed_const(wm->yclk); | |
147 | yclk.full = dfixed_div(yclk, a); | |
148 | dram_channels.full = dfixed_const(wm->dram_channels * 4); | |
149 | a.full = dfixed_const(10); | |
150 | dram_efficiency.full = dfixed_const(7); | |
151 | dram_efficiency.full = dfixed_div(dram_efficiency, a); | |
152 | bandwidth.full = dfixed_mul(dram_channels, yclk); | |
153 | bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); | |
154 | ||
155 | return dfixed_trunc(bandwidth); | |
156 | } | |
157 | ||
158 | static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm) | |
159 | { | |
160 | /* Calculate DRAM Bandwidth and the part allocated to display. */ | |
161 | fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ | |
162 | fixed20_12 yclk, dram_channels, bandwidth; | |
163 | fixed20_12 a; | |
164 | ||
165 | a.full = dfixed_const(1000); | |
166 | yclk.full = dfixed_const(wm->yclk); | |
167 | yclk.full = dfixed_div(yclk, a); | |
168 | dram_channels.full = dfixed_const(wm->dram_channels * 4); | |
169 | a.full = dfixed_const(10); | |
170 | disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ | |
171 | disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); | |
172 | bandwidth.full = dfixed_mul(dram_channels, yclk); | |
173 | bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); | |
174 | ||
175 | return dfixed_trunc(bandwidth); | |
176 | } | |
177 | ||
178 | static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm) | |
179 | { | |
180 | /* Calculate the display Data return Bandwidth */ | |
181 | fixed20_12 return_efficiency; /* 0.8 */ | |
182 | fixed20_12 sclk, bandwidth; | |
183 | fixed20_12 a; | |
184 | ||
185 | a.full = dfixed_const(1000); | |
186 | sclk.full = dfixed_const(wm->sclk); | |
187 | sclk.full = dfixed_div(sclk, a); | |
188 | a.full = dfixed_const(10); | |
189 | return_efficiency.full = dfixed_const(8); | |
190 | return_efficiency.full = dfixed_div(return_efficiency, a); | |
191 | a.full = dfixed_const(32); | |
192 | bandwidth.full = dfixed_mul(a, sclk); | |
193 | bandwidth.full = dfixed_mul(bandwidth, return_efficiency); | |
194 | ||
195 | return dfixed_trunc(bandwidth); | |
196 | } | |
197 | ||
198 | static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm) | |
199 | { | |
200 | return 32; | |
201 | } | |
202 | ||
203 | static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm) | |
204 | { | |
205 | /* Calculate the DMIF Request Bandwidth */ | |
206 | fixed20_12 disp_clk_request_efficiency; /* 0.8 */ | |
207 | fixed20_12 disp_clk, sclk, bandwidth; | |
208 | fixed20_12 a, b1, b2; | |
209 | u32 min_bandwidth; | |
210 | ||
211 | a.full = dfixed_const(1000); | |
212 | disp_clk.full = dfixed_const(wm->disp_clk); | |
213 | disp_clk.full = dfixed_div(disp_clk, a); | |
214 | a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2); | |
215 | b1.full = dfixed_mul(a, disp_clk); | |
216 | ||
217 | a.full = dfixed_const(1000); | |
218 | sclk.full = dfixed_const(wm->sclk); | |
219 | sclk.full = dfixed_div(sclk, a); | |
220 | a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm)); | |
221 | b2.full = dfixed_mul(a, sclk); | |
222 | ||
223 | a.full = dfixed_const(10); | |
224 | disp_clk_request_efficiency.full = dfixed_const(8); | |
225 | disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); | |
226 | ||
227 | min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2)); | |
228 | ||
229 | a.full = dfixed_const(min_bandwidth); | |
230 | bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency); | |
231 | ||
232 | return dfixed_trunc(bandwidth); | |
233 | } | |
234 | ||
235 | static u32 dce6_available_bandwidth(struct dce6_wm_params *wm) | |
236 | { | |
237 | /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ | |
238 | u32 dram_bandwidth = dce6_dram_bandwidth(wm); | |
239 | u32 data_return_bandwidth = dce6_data_return_bandwidth(wm); | |
240 | u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm); | |
241 | ||
242 | return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); | |
243 | } | |
244 | ||
245 | static u32 dce6_average_bandwidth(struct dce6_wm_params *wm) | |
246 | { | |
247 | /* Calculate the display mode Average Bandwidth | |
248 | * DisplayMode should contain the source and destination dimensions, | |
249 | * timing, etc. | |
250 | */ | |
251 | fixed20_12 bpp; | |
252 | fixed20_12 line_time; | |
253 | fixed20_12 src_width; | |
254 | fixed20_12 bandwidth; | |
255 | fixed20_12 a; | |
256 | ||
257 | a.full = dfixed_const(1000); | |
258 | line_time.full = dfixed_const(wm->active_time + wm->blank_time); | |
259 | line_time.full = dfixed_div(line_time, a); | |
260 | bpp.full = dfixed_const(wm->bytes_per_pixel); | |
261 | src_width.full = dfixed_const(wm->src_width); | |
262 | bandwidth.full = dfixed_mul(src_width, bpp); | |
263 | bandwidth.full = dfixed_mul(bandwidth, wm->vsc); | |
264 | bandwidth.full = dfixed_div(bandwidth, line_time); | |
265 | ||
266 | return dfixed_trunc(bandwidth); | |
267 | } | |
268 | ||
269 | static u32 dce6_latency_watermark(struct dce6_wm_params *wm) | |
270 | { | |
271 | /* First calcualte the latency in ns */ | |
272 | u32 mc_latency = 2000; /* 2000 ns. */ | |
273 | u32 available_bandwidth = dce6_available_bandwidth(wm); | |
274 | u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; | |
275 | u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; | |
276 | u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ | |
277 | u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + | |
278 | (wm->num_heads * cursor_line_pair_return_time); | |
279 | u32 latency = mc_latency + other_heads_data_return_time + dc_latency; | |
280 | u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; | |
281 | u32 tmp, dmif_size = 12288; | |
282 | fixed20_12 a, b, c; | |
283 | ||
284 | if (wm->num_heads == 0) | |
285 | return 0; | |
286 | ||
287 | a.full = dfixed_const(2); | |
288 | b.full = dfixed_const(1); | |
289 | if ((wm->vsc.full > a.full) || | |
290 | ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || | |
291 | (wm->vtaps >= 5) || | |
292 | ((wm->vsc.full >= a.full) && wm->interlaced)) | |
293 | max_src_lines_per_dst_line = 4; | |
294 | else | |
295 | max_src_lines_per_dst_line = 2; | |
296 | ||
297 | a.full = dfixed_const(available_bandwidth); | |
298 | b.full = dfixed_const(wm->num_heads); | |
299 | a.full = dfixed_div(a, b); | |
300 | ||
301 | b.full = dfixed_const(mc_latency + 512); | |
302 | c.full = dfixed_const(wm->disp_clk); | |
303 | b.full = dfixed_div(b, c); | |
304 | ||
305 | c.full = dfixed_const(dmif_size); | |
306 | b.full = dfixed_div(c, b); | |
307 | ||
308 | tmp = min(dfixed_trunc(a), dfixed_trunc(b)); | |
309 | ||
310 | b.full = dfixed_const(1000); | |
311 | c.full = dfixed_const(wm->disp_clk); | |
312 | b.full = dfixed_div(c, b); | |
313 | c.full = dfixed_const(wm->bytes_per_pixel); | |
314 | b.full = dfixed_mul(b, c); | |
315 | ||
316 | lb_fill_bw = min(tmp, dfixed_trunc(b)); | |
317 | ||
318 | a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); | |
319 | b.full = dfixed_const(1000); | |
320 | c.full = dfixed_const(lb_fill_bw); | |
321 | b.full = dfixed_div(c, b); | |
322 | a.full = dfixed_div(a, b); | |
323 | line_fill_time = dfixed_trunc(a); | |
324 | ||
325 | if (line_fill_time < wm->active_time) | |
326 | return latency; | |
327 | else | |
328 | return latency + (line_fill_time - wm->active_time); | |
329 | ||
330 | } | |
331 | ||
332 | static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm) | |
333 | { | |
334 | if (dce6_average_bandwidth(wm) <= | |
335 | (dce6_dram_bandwidth_for_display(wm) / wm->num_heads)) | |
336 | return true; | |
337 | else | |
338 | return false; | |
339 | }; | |
340 | ||
341 | static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm) | |
342 | { | |
343 | if (dce6_average_bandwidth(wm) <= | |
344 | (dce6_available_bandwidth(wm) / wm->num_heads)) | |
345 | return true; | |
346 | else | |
347 | return false; | |
348 | }; | |
349 | ||
350 | static bool dce6_check_latency_hiding(struct dce6_wm_params *wm) | |
351 | { | |
352 | u32 lb_partitions = wm->lb_size / wm->src_width; | |
353 | u32 line_time = wm->active_time + wm->blank_time; | |
354 | u32 latency_tolerant_lines; | |
355 | u32 latency_hiding; | |
356 | fixed20_12 a; | |
357 | ||
358 | a.full = dfixed_const(1); | |
359 | if (wm->vsc.full > a.full) | |
360 | latency_tolerant_lines = 1; | |
361 | else { | |
362 | if (lb_partitions <= (wm->vtaps + 1)) | |
363 | latency_tolerant_lines = 1; | |
364 | else | |
365 | latency_tolerant_lines = 2; | |
366 | } | |
367 | ||
368 | latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); | |
369 | ||
370 | if (dce6_latency_watermark(wm) <= latency_hiding) | |
371 | return true; | |
372 | else | |
373 | return false; | |
374 | } | |
375 | ||
376 | static void dce6_program_watermarks(struct radeon_device *rdev, | |
377 | struct radeon_crtc *radeon_crtc, | |
378 | u32 lb_size, u32 num_heads) | |
379 | { | |
380 | struct drm_display_mode *mode = &radeon_crtc->base.mode; | |
381 | struct dce6_wm_params wm; | |
382 | u32 pixel_period; | |
383 | u32 line_time = 0; | |
384 | u32 latency_watermark_a = 0, latency_watermark_b = 0; | |
385 | u32 priority_a_mark = 0, priority_b_mark = 0; | |
386 | u32 priority_a_cnt = PRIORITY_OFF; | |
387 | u32 priority_b_cnt = PRIORITY_OFF; | |
388 | u32 tmp, arb_control3; | |
389 | fixed20_12 a, b, c; | |
390 | ||
391 | if (radeon_crtc->base.enabled && num_heads && mode) { | |
392 | pixel_period = 1000000 / (u32)mode->clock; | |
393 | line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); | |
394 | priority_a_cnt = 0; | |
395 | priority_b_cnt = 0; | |
396 | ||
397 | wm.yclk = rdev->pm.current_mclk * 10; | |
398 | wm.sclk = rdev->pm.current_sclk * 10; | |
399 | wm.disp_clk = mode->clock; | |
400 | wm.src_width = mode->crtc_hdisplay; | |
401 | wm.active_time = mode->crtc_hdisplay * pixel_period; | |
402 | wm.blank_time = line_time - wm.active_time; | |
403 | wm.interlaced = false; | |
404 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | |
405 | wm.interlaced = true; | |
406 | wm.vsc = radeon_crtc->vsc; | |
407 | wm.vtaps = 1; | |
408 | if (radeon_crtc->rmx_type != RMX_OFF) | |
409 | wm.vtaps = 2; | |
410 | wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ | |
411 | wm.lb_size = lb_size; | |
412 | wm.dram_channels = dce6_get_number_of_dram_channels(rdev); | |
413 | wm.num_heads = num_heads; | |
414 | ||
415 | /* set for high clocks */ | |
416 | latency_watermark_a = min(dce6_latency_watermark(&wm), (u32)65535); | |
417 | /* set for low clocks */ | |
418 | /* wm.yclk = low clk; wm.sclk = low clk */ | |
419 | latency_watermark_b = min(dce6_latency_watermark(&wm), (u32)65535); | |
420 | ||
421 | /* possibly force display priority to high */ | |
422 | /* should really do this at mode validation time... */ | |
423 | if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || | |
424 | !dce6_average_bandwidth_vs_available_bandwidth(&wm) || | |
425 | !dce6_check_latency_hiding(&wm) || | |
426 | (rdev->disp_priority == 2)) { | |
427 | DRM_DEBUG_KMS("force priority to high\n"); | |
428 | priority_a_cnt |= PRIORITY_ALWAYS_ON; | |
429 | priority_b_cnt |= PRIORITY_ALWAYS_ON; | |
430 | } | |
431 | ||
432 | a.full = dfixed_const(1000); | |
433 | b.full = dfixed_const(mode->clock); | |
434 | b.full = dfixed_div(b, a); | |
435 | c.full = dfixed_const(latency_watermark_a); | |
436 | c.full = dfixed_mul(c, b); | |
437 | c.full = dfixed_mul(c, radeon_crtc->hsc); | |
438 | c.full = dfixed_div(c, a); | |
439 | a.full = dfixed_const(16); | |
440 | c.full = dfixed_div(c, a); | |
441 | priority_a_mark = dfixed_trunc(c); | |
442 | priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK; | |
443 | ||
444 | a.full = dfixed_const(1000); | |
445 | b.full = dfixed_const(mode->clock); | |
446 | b.full = dfixed_div(b, a); | |
447 | c.full = dfixed_const(latency_watermark_b); | |
448 | c.full = dfixed_mul(c, b); | |
449 | c.full = dfixed_mul(c, radeon_crtc->hsc); | |
450 | c.full = dfixed_div(c, a); | |
451 | a.full = dfixed_const(16); | |
452 | c.full = dfixed_div(c, a); | |
453 | priority_b_mark = dfixed_trunc(c); | |
454 | priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; | |
455 | } | |
456 | ||
457 | /* select wm A */ | |
458 | arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset); | |
459 | tmp = arb_control3; | |
460 | tmp &= ~LATENCY_WATERMARK_MASK(3); | |
461 | tmp |= LATENCY_WATERMARK_MASK(1); | |
462 | WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp); | |
463 | WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset, | |
464 | (LATENCY_LOW_WATERMARK(latency_watermark_a) | | |
465 | LATENCY_HIGH_WATERMARK(line_time))); | |
466 | /* select wm B */ | |
467 | tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset); | |
468 | tmp &= ~LATENCY_WATERMARK_MASK(3); | |
469 | tmp |= LATENCY_WATERMARK_MASK(2); | |
470 | WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp); | |
471 | WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset, | |
472 | (LATENCY_LOW_WATERMARK(latency_watermark_b) | | |
473 | LATENCY_HIGH_WATERMARK(line_time))); | |
474 | /* restore original selection */ | |
475 | WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3); | |
476 | ||
477 | /* write the priority marks */ | |
478 | WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); | |
479 | WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); | |
480 | ||
481 | } | |
482 | ||
483 | void dce6_bandwidth_update(struct radeon_device *rdev) | |
484 | { | |
485 | struct drm_display_mode *mode0 = NULL; | |
486 | struct drm_display_mode *mode1 = NULL; | |
487 | u32 num_heads = 0, lb_size; | |
488 | int i; | |
489 | ||
490 | radeon_update_display_priority(rdev); | |
491 | ||
492 | for (i = 0; i < rdev->num_crtc; i++) { | |
493 | if (rdev->mode_info.crtcs[i]->base.enabled) | |
494 | num_heads++; | |
495 | } | |
496 | for (i = 0; i < rdev->num_crtc; i += 2) { | |
497 | mode0 = &rdev->mode_info.crtcs[i]->base.mode; | |
498 | mode1 = &rdev->mode_info.crtcs[i+1]->base.mode; | |
499 | lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1); | |
500 | dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads); | |
501 | lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0); | |
502 | dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads); | |
503 | } | |
504 | } | |
505 |