Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #include "drmP.h" | |
771fe6b9 | 29 | #include "radeon.h" |
c93bb85b | 30 | #include "atom.h" |
3bc68535 | 31 | #include "rs690d.h" |
771fe6b9 | 32 | |
3bc68535 | 33 | static int rs690_mc_wait_for_idle(struct radeon_device *rdev) |
771fe6b9 JG |
34 | { |
35 | unsigned i; | |
36 | uint32_t tmp; | |
37 | ||
38 | for (i = 0; i < rdev->usec_timeout; i++) { | |
39 | /* read MC_STATUS */ | |
3bc68535 JG |
40 | tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS); |
41 | if (G_000090_MC_SYSTEM_IDLE(tmp)) | |
771fe6b9 | 42 | return 0; |
3bc68535 | 43 | udelay(1); |
771fe6b9 JG |
44 | } |
45 | return -1; | |
46 | } | |
47 | ||
3bc68535 | 48 | static void rs690_gpu_init(struct radeon_device *rdev) |
771fe6b9 JG |
49 | { |
50 | /* FIXME: HDP same place on rs690 ? */ | |
51 | r100_hdp_reset(rdev); | |
771fe6b9 JG |
52 | /* FIXME: is this correct ? */ |
53 | r420_pipes_init(rdev); | |
54 | if (rs690_mc_wait_for_idle(rdev)) { | |
55 | printk(KERN_WARNING "Failed to wait MC idle while " | |
56 | "programming pipes. Bad things might happen.\n"); | |
57 | } | |
58 | } | |
59 | ||
c93bb85b JG |
60 | void rs690_pm_info(struct radeon_device *rdev) |
61 | { | |
62 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | |
63 | struct _ATOM_INTEGRATED_SYSTEM_INFO *info; | |
64 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2; | |
65 | void *ptr; | |
66 | uint16_t data_offset; | |
67 | uint8_t frev, crev; | |
68 | fixed20_12 tmp; | |
69 | ||
70 | atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, | |
71 | &frev, &crev, &data_offset); | |
72 | ptr = rdev->mode_info.atom_context->bios + data_offset; | |
73 | info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr; | |
74 | info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr; | |
75 | /* Get various system informations from bios */ | |
76 | switch (crev) { | |
77 | case 1: | |
78 | tmp.full = rfixed_const(100); | |
79 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock); | |
80 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); | |
81 | rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock)); | |
82 | rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock)); | |
83 | rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth); | |
84 | break; | |
85 | case 2: | |
86 | tmp.full = rfixed_const(100); | |
87 | rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock); | |
88 | rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); | |
89 | rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock); | |
90 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | |
91 | rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq); | |
92 | rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); | |
93 | rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth)); | |
94 | break; | |
95 | default: | |
96 | tmp.full = rfixed_const(100); | |
97 | /* We assume the slower possible clock ie worst case */ | |
98 | /* DDR 333Mhz */ | |
99 | rdev->pm.igp_sideport_mclk.full = rfixed_const(333); | |
100 | /* FIXME: system clock ? */ | |
101 | rdev->pm.igp_system_mclk.full = rfixed_const(100); | |
102 | rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); | |
103 | rdev->pm.igp_ht_link_clk.full = rfixed_const(200); | |
104 | rdev->pm.igp_ht_link_width.full = rfixed_const(8); | |
105 | DRM_ERROR("No integrated system info for your GPU, using safe default\n"); | |
106 | break; | |
107 | } | |
108 | /* Compute various bandwidth */ | |
109 | /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ | |
110 | tmp.full = rfixed_const(4); | |
111 | rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp); | |
112 | /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 | |
113 | * = ht_clk * ht_width / 5 | |
114 | */ | |
115 | tmp.full = rfixed_const(5); | |
116 | rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk, | |
117 | rdev->pm.igp_ht_link_width); | |
118 | rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp); | |
119 | if (tmp.full < rdev->pm.max_bandwidth.full) { | |
120 | /* HT link is a limiting factor */ | |
121 | rdev->pm.max_bandwidth.full = tmp.full; | |
122 | } | |
123 | /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 | |
124 | * = (sideport_clk * 14) / 10 | |
125 | */ | |
126 | tmp.full = rfixed_const(14); | |
127 | rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp); | |
128 | tmp.full = rfixed_const(10); | |
129 | rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); | |
130 | } | |
131 | ||
771fe6b9 JG |
132 | void rs690_vram_info(struct radeon_device *rdev) |
133 | { | |
134 | uint32_t tmp; | |
c93bb85b | 135 | fixed20_12 a; |
771fe6b9 JG |
136 | |
137 | rs400_gart_adjust_size(rdev); | |
138 | /* DDR for all card after R300 & IGP */ | |
139 | rdev->mc.vram_is_ddr = true; | |
140 | /* FIXME: is this correct for RS690/RS740 ? */ | |
141 | tmp = RREG32(RADEON_MEM_CNTL); | |
142 | if (tmp & R300_MEM_NUM_CHANNELS_MASK) { | |
143 | rdev->mc.vram_width = 128; | |
144 | } else { | |
145 | rdev->mc.vram_width = 64; | |
146 | } | |
7a50f01a DA |
147 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
148 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | |
771fe6b9 JG |
149 | |
150 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | |
151 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | |
c93bb85b JG |
152 | rs690_pm_info(rdev); |
153 | /* FIXME: we should enforce default clock in case GPU is not in | |
154 | * default setup | |
155 | */ | |
156 | a.full = rfixed_const(100); | |
157 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | |
158 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | |
159 | a.full = rfixed_const(16); | |
160 | /* core_bandwidth = sclk(Mhz) * 16 */ | |
161 | rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); | |
162 | } | |
163 | ||
164 | void rs690_line_buffer_adjust(struct radeon_device *rdev, | |
165 | struct drm_display_mode *mode1, | |
166 | struct drm_display_mode *mode2) | |
167 | { | |
168 | u32 tmp; | |
169 | ||
170 | /* | |
171 | * Line Buffer Setup | |
172 | * There is a single line buffer shared by both display controllers. | |
3bc68535 | 173 | * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between |
c93bb85b JG |
174 | * the display controllers. The paritioning can either be done |
175 | * manually or via one of four preset allocations specified in bits 1:0: | |
176 | * 0 - line buffer is divided in half and shared between crtc | |
177 | * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 | |
178 | * 2 - D1 gets the whole buffer | |
179 | * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 | |
3bc68535 | 180 | * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual |
c93bb85b JG |
181 | * allocation mode. In manual allocation mode, D1 always starts at 0, |
182 | * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. | |
183 | */ | |
3bc68535 JG |
184 | tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT; |
185 | tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE; | |
c93bb85b JG |
186 | /* auto */ |
187 | if (mode1 && mode2) { | |
188 | if (mode1->hdisplay > mode2->hdisplay) { | |
189 | if (mode1->hdisplay > 2560) | |
3bc68535 | 190 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; |
c93bb85b | 191 | else |
3bc68535 | 192 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
c93bb85b JG |
193 | } else if (mode2->hdisplay > mode1->hdisplay) { |
194 | if (mode2->hdisplay > 2560) | |
3bc68535 | 195 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
c93bb85b | 196 | else |
3bc68535 | 197 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
c93bb85b | 198 | } else |
3bc68535 | 199 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
c93bb85b | 200 | } else if (mode1) { |
3bc68535 | 201 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY; |
c93bb85b | 202 | } else if (mode2) { |
3bc68535 | 203 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
c93bb85b | 204 | } |
3bc68535 | 205 | WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); |
771fe6b9 JG |
206 | } |
207 | ||
c93bb85b JG |
208 | struct rs690_watermark { |
209 | u32 lb_request_fifo_depth; | |
210 | fixed20_12 num_line_pair; | |
211 | fixed20_12 estimated_width; | |
212 | fixed20_12 worst_case_latency; | |
213 | fixed20_12 consumption_rate; | |
214 | fixed20_12 active_time; | |
215 | fixed20_12 dbpp; | |
216 | fixed20_12 priority_mark_max; | |
217 | fixed20_12 priority_mark; | |
218 | fixed20_12 sclk; | |
219 | }; | |
220 | ||
221 | void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, | |
222 | struct radeon_crtc *crtc, | |
223 | struct rs690_watermark *wm) | |
224 | { | |
225 | struct drm_display_mode *mode = &crtc->base.mode; | |
226 | fixed20_12 a, b, c; | |
227 | fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; | |
228 | fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; | |
229 | /* FIXME: detect IGP with sideport memory, i don't think there is any | |
230 | * such product available | |
231 | */ | |
232 | bool sideport = false; | |
233 | ||
234 | if (!crtc->base.enabled) { | |
235 | /* FIXME: wouldn't it better to set priority mark to maximum */ | |
236 | wm->lb_request_fifo_depth = 4; | |
237 | return; | |
238 | } | |
239 | ||
240 | if (crtc->vsc.full > rfixed_const(2)) | |
241 | wm->num_line_pair.full = rfixed_const(2); | |
242 | else | |
243 | wm->num_line_pair.full = rfixed_const(1); | |
244 | ||
245 | b.full = rfixed_const(mode->crtc_hdisplay); | |
246 | c.full = rfixed_const(256); | |
247 | a.full = rfixed_mul(wm->num_line_pair, b); | |
248 | request_fifo_depth.full = rfixed_div(a, c); | |
249 | if (a.full < rfixed_const(4)) { | |
250 | wm->lb_request_fifo_depth = 4; | |
251 | } else { | |
252 | wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); | |
253 | } | |
254 | ||
255 | /* Determine consumption rate | |
256 | * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) | |
257 | * vtaps = number of vertical taps, | |
258 | * vsc = vertical scaling ratio, defined as source/destination | |
259 | * hsc = horizontal scaling ration, defined as source/destination | |
260 | */ | |
261 | a.full = rfixed_const(mode->clock); | |
262 | b.full = rfixed_const(1000); | |
263 | a.full = rfixed_div(a, b); | |
264 | pclk.full = rfixed_div(b, a); | |
265 | if (crtc->rmx_type != RMX_OFF) { | |
266 | b.full = rfixed_const(2); | |
267 | if (crtc->vsc.full > b.full) | |
268 | b.full = crtc->vsc.full; | |
269 | b.full = rfixed_mul(b, crtc->hsc); | |
270 | c.full = rfixed_const(2); | |
271 | b.full = rfixed_div(b, c); | |
272 | consumption_time.full = rfixed_div(pclk, b); | |
273 | } else { | |
274 | consumption_time.full = pclk.full; | |
275 | } | |
276 | a.full = rfixed_const(1); | |
277 | wm->consumption_rate.full = rfixed_div(a, consumption_time); | |
278 | ||
279 | ||
280 | /* Determine line time | |
281 | * LineTime = total time for one line of displayhtotal | |
282 | * LineTime = total number of horizontal pixels | |
283 | * pclk = pixel clock period(ns) | |
284 | */ | |
285 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | |
286 | line_time.full = rfixed_mul(a, pclk); | |
287 | ||
288 | /* Determine active time | |
289 | * ActiveTime = time of active region of display within one line, | |
290 | * hactive = total number of horizontal active pixels | |
291 | * htotal = total number of horizontal pixels | |
292 | */ | |
293 | a.full = rfixed_const(crtc->base.mode.crtc_htotal); | |
294 | b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | |
295 | wm->active_time.full = rfixed_mul(line_time, b); | |
296 | wm->active_time.full = rfixed_div(wm->active_time, a); | |
297 | ||
298 | /* Maximun bandwidth is the minimun bandwidth of all component */ | |
299 | rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; | |
300 | if (sideport) { | |
301 | if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && | |
302 | rdev->pm.sideport_bandwidth.full) | |
303 | rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; | |
304 | read_delay_latency.full = rfixed_const(370 * 800 * 1000); | |
305 | read_delay_latency.full = rfixed_div(read_delay_latency, | |
306 | rdev->pm.igp_sideport_mclk); | |
307 | } else { | |
308 | if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && | |
309 | rdev->pm.k8_bandwidth.full) | |
310 | rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth; | |
311 | if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && | |
312 | rdev->pm.ht_bandwidth.full) | |
313 | rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; | |
314 | read_delay_latency.full = rfixed_const(5000); | |
315 | } | |
316 | ||
317 | /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ | |
318 | a.full = rfixed_const(16); | |
319 | rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a); | |
320 | a.full = rfixed_const(1000); | |
321 | rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk); | |
322 | /* Determine chunk time | |
323 | * ChunkTime = the time it takes the DCP to send one chunk of data | |
324 | * to the LB which consists of pipeline delay and inter chunk gap | |
325 | * sclk = system clock(ns) | |
326 | */ | |
327 | a.full = rfixed_const(256 * 13); | |
328 | chunk_time.full = rfixed_mul(rdev->pm.sclk, a); | |
329 | a.full = rfixed_const(10); | |
330 | chunk_time.full = rfixed_div(chunk_time, a); | |
331 | ||
332 | /* Determine the worst case latency | |
333 | * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) | |
334 | * WorstCaseLatency = worst case time from urgent to when the MC starts | |
335 | * to return data | |
336 | * READ_DELAY_IDLE_MAX = constant of 1us | |
337 | * ChunkTime = time it takes the DCP to send one chunk of data to the LB | |
338 | * which consists of pipeline delay and inter chunk gap | |
339 | */ | |
340 | if (rfixed_trunc(wm->num_line_pair) > 1) { | |
341 | a.full = rfixed_const(3); | |
342 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | |
343 | wm->worst_case_latency.full += read_delay_latency.full; | |
344 | } else { | |
345 | a.full = rfixed_const(2); | |
346 | wm->worst_case_latency.full = rfixed_mul(a, chunk_time); | |
347 | wm->worst_case_latency.full += read_delay_latency.full; | |
348 | } | |
349 | ||
350 | /* Determine the tolerable latency | |
351 | * TolerableLatency = Any given request has only 1 line time | |
352 | * for the data to be returned | |
353 | * LBRequestFifoDepth = Number of chunk requests the LB can | |
354 | * put into the request FIFO for a display | |
355 | * LineTime = total time for one line of display | |
356 | * ChunkTime = the time it takes the DCP to send one chunk | |
357 | * of data to the LB which consists of | |
358 | * pipeline delay and inter chunk gap | |
359 | */ | |
360 | if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { | |
361 | tolerable_latency.full = line_time.full; | |
362 | } else { | |
363 | tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); | |
364 | tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; | |
365 | tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); | |
366 | tolerable_latency.full = line_time.full - tolerable_latency.full; | |
367 | } | |
368 | /* We assume worst case 32bits (4 bytes) */ | |
369 | wm->dbpp.full = rfixed_const(4 * 8); | |
370 | ||
371 | /* Determine the maximum priority mark | |
372 | * width = viewport width in pixels | |
373 | */ | |
374 | a.full = rfixed_const(16); | |
375 | wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); | |
376 | wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); | |
377 | ||
378 | /* Determine estimated width */ | |
379 | estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; | |
380 | estimated_width.full = rfixed_div(estimated_width, consumption_time); | |
381 | if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { | |
382 | wm->priority_mark.full = rfixed_const(10); | |
383 | } else { | |
384 | a.full = rfixed_const(16); | |
385 | wm->priority_mark.full = rfixed_div(estimated_width, a); | |
386 | wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; | |
387 | } | |
388 | } | |
389 | ||
390 | void rs690_bandwidth_update(struct radeon_device *rdev) | |
391 | { | |
392 | struct drm_display_mode *mode0 = NULL; | |
393 | struct drm_display_mode *mode1 = NULL; | |
394 | struct rs690_watermark wm0; | |
395 | struct rs690_watermark wm1; | |
396 | u32 tmp; | |
397 | fixed20_12 priority_mark02, priority_mark12, fill_rate; | |
398 | fixed20_12 a, b; | |
399 | ||
400 | if (rdev->mode_info.crtcs[0]->base.enabled) | |
401 | mode0 = &rdev->mode_info.crtcs[0]->base.mode; | |
402 | if (rdev->mode_info.crtcs[1]->base.enabled) | |
403 | mode1 = &rdev->mode_info.crtcs[1]->base.mode; | |
404 | /* | |
405 | * Set display0/1 priority up in the memory controller for | |
406 | * modes if the user specifies HIGH for displaypriority | |
407 | * option. | |
408 | */ | |
409 | if (rdev->disp_priority == 2) { | |
3bc68535 JG |
410 | tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); |
411 | tmp &= C_000104_MC_DISP0R_INIT_LAT; | |
412 | tmp &= C_000104_MC_DISP1R_INIT_LAT; | |
c93bb85b | 413 | if (mode0) |
3bc68535 JG |
414 | tmp |= S_000104_MC_DISP0R_INIT_LAT(1); |
415 | if (mode1) | |
416 | tmp |= S_000104_MC_DISP1R_INIT_LAT(1); | |
417 | WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp); | |
c93bb85b JG |
418 | } |
419 | rs690_line_buffer_adjust(rdev, mode0, mode1); | |
420 | ||
421 | if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) | |
3bc68535 | 422 | WREG32(R_006C9C_DCP_CONTROL, 0); |
c93bb85b | 423 | if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) |
3bc68535 | 424 | WREG32(R_006C9C_DCP_CONTROL, 2); |
c93bb85b JG |
425 | |
426 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); | |
427 | rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); | |
428 | ||
429 | tmp = (wm0.lb_request_fifo_depth - 1); | |
430 | tmp |= (wm1.lb_request_fifo_depth - 1) << 16; | |
3bc68535 | 431 | WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); |
c93bb85b JG |
432 | |
433 | if (mode0 && mode1) { | |
434 | if (rfixed_trunc(wm0.dbpp) > 64) | |
435 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | |
436 | else | |
437 | a.full = wm0.num_line_pair.full; | |
438 | if (rfixed_trunc(wm1.dbpp) > 64) | |
439 | b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); | |
440 | else | |
441 | b.full = wm1.num_line_pair.full; | |
442 | a.full += b.full; | |
443 | fill_rate.full = rfixed_div(wm0.sclk, a); | |
444 | if (wm0.consumption_rate.full > fill_rate.full) { | |
445 | b.full = wm0.consumption_rate.full - fill_rate.full; | |
446 | b.full = rfixed_mul(b, wm0.active_time); | |
447 | a.full = rfixed_mul(wm0.worst_case_latency, | |
448 | wm0.consumption_rate); | |
449 | a.full = a.full + b.full; | |
450 | b.full = rfixed_const(16 * 1000); | |
451 | priority_mark02.full = rfixed_div(a, b); | |
452 | } else { | |
453 | a.full = rfixed_mul(wm0.worst_case_latency, | |
454 | wm0.consumption_rate); | |
455 | b.full = rfixed_const(16 * 1000); | |
456 | priority_mark02.full = rfixed_div(a, b); | |
457 | } | |
458 | if (wm1.consumption_rate.full > fill_rate.full) { | |
459 | b.full = wm1.consumption_rate.full - fill_rate.full; | |
460 | b.full = rfixed_mul(b, wm1.active_time); | |
461 | a.full = rfixed_mul(wm1.worst_case_latency, | |
462 | wm1.consumption_rate); | |
463 | a.full = a.full + b.full; | |
464 | b.full = rfixed_const(16 * 1000); | |
465 | priority_mark12.full = rfixed_div(a, b); | |
466 | } else { | |
467 | a.full = rfixed_mul(wm1.worst_case_latency, | |
468 | wm1.consumption_rate); | |
469 | b.full = rfixed_const(16 * 1000); | |
470 | priority_mark12.full = rfixed_div(a, b); | |
471 | } | |
472 | if (wm0.priority_mark.full > priority_mark02.full) | |
473 | priority_mark02.full = wm0.priority_mark.full; | |
474 | if (rfixed_trunc(priority_mark02) < 0) | |
475 | priority_mark02.full = 0; | |
476 | if (wm0.priority_mark_max.full > priority_mark02.full) | |
477 | priority_mark02.full = wm0.priority_mark_max.full; | |
478 | if (wm1.priority_mark.full > priority_mark12.full) | |
479 | priority_mark12.full = wm1.priority_mark.full; | |
480 | if (rfixed_trunc(priority_mark12) < 0) | |
481 | priority_mark12.full = 0; | |
482 | if (wm1.priority_mark_max.full > priority_mark12.full) | |
483 | priority_mark12.full = wm1.priority_mark_max.full; | |
3bc68535 JG |
484 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); |
485 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | |
486 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | |
487 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | |
c93bb85b JG |
488 | } else if (mode0) { |
489 | if (rfixed_trunc(wm0.dbpp) > 64) | |
490 | a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); | |
491 | else | |
492 | a.full = wm0.num_line_pair.full; | |
493 | fill_rate.full = rfixed_div(wm0.sclk, a); | |
494 | if (wm0.consumption_rate.full > fill_rate.full) { | |
495 | b.full = wm0.consumption_rate.full - fill_rate.full; | |
496 | b.full = rfixed_mul(b, wm0.active_time); | |
497 | a.full = rfixed_mul(wm0.worst_case_latency, | |
498 | wm0.consumption_rate); | |
499 | a.full = a.full + b.full; | |
500 | b.full = rfixed_const(16 * 1000); | |
501 | priority_mark02.full = rfixed_div(a, b); | |
502 | } else { | |
503 | a.full = rfixed_mul(wm0.worst_case_latency, | |
504 | wm0.consumption_rate); | |
505 | b.full = rfixed_const(16 * 1000); | |
506 | priority_mark02.full = rfixed_div(a, b); | |
507 | } | |
508 | if (wm0.priority_mark.full > priority_mark02.full) | |
509 | priority_mark02.full = wm0.priority_mark.full; | |
510 | if (rfixed_trunc(priority_mark02) < 0) | |
511 | priority_mark02.full = 0; | |
512 | if (wm0.priority_mark_max.full > priority_mark02.full) | |
513 | priority_mark02.full = wm0.priority_mark_max.full; | |
3bc68535 JG |
514 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); |
515 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); | |
516 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, | |
517 | S_006D48_D2MODE_PRIORITY_A_OFF(1)); | |
518 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, | |
519 | S_006D4C_D2MODE_PRIORITY_B_OFF(1)); | |
c93bb85b JG |
520 | } else { |
521 | if (rfixed_trunc(wm1.dbpp) > 64) | |
522 | a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); | |
523 | else | |
524 | a.full = wm1.num_line_pair.full; | |
525 | fill_rate.full = rfixed_div(wm1.sclk, a); | |
526 | if (wm1.consumption_rate.full > fill_rate.full) { | |
527 | b.full = wm1.consumption_rate.full - fill_rate.full; | |
528 | b.full = rfixed_mul(b, wm1.active_time); | |
529 | a.full = rfixed_mul(wm1.worst_case_latency, | |
530 | wm1.consumption_rate); | |
531 | a.full = a.full + b.full; | |
532 | b.full = rfixed_const(16 * 1000); | |
533 | priority_mark12.full = rfixed_div(a, b); | |
534 | } else { | |
535 | a.full = rfixed_mul(wm1.worst_case_latency, | |
536 | wm1.consumption_rate); | |
537 | b.full = rfixed_const(16 * 1000); | |
538 | priority_mark12.full = rfixed_div(a, b); | |
539 | } | |
540 | if (wm1.priority_mark.full > priority_mark12.full) | |
541 | priority_mark12.full = wm1.priority_mark.full; | |
542 | if (rfixed_trunc(priority_mark12) < 0) | |
543 | priority_mark12.full = 0; | |
544 | if (wm1.priority_mark_max.full > priority_mark12.full) | |
545 | priority_mark12.full = wm1.priority_mark_max.full; | |
3bc68535 JG |
546 | WREG32(R_006548_D1MODE_PRIORITY_A_CNT, |
547 | S_006548_D1MODE_PRIORITY_A_OFF(1)); | |
548 | WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, | |
549 | S_00654C_D1MODE_PRIORITY_B_OFF(1)); | |
550 | WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); | |
551 | WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); | |
c93bb85b JG |
552 | } |
553 | } | |
771fe6b9 | 554 | |
771fe6b9 JG |
555 | uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
556 | { | |
557 | uint32_t r; | |
558 | ||
3bc68535 JG |
559 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); |
560 | r = RREG32(R_00007C_MC_DATA); | |
561 | WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); | |
771fe6b9 JG |
562 | return r; |
563 | } | |
564 | ||
565 | void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) | |
566 | { | |
3bc68535 JG |
567 | WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | |
568 | S_000078_MC_IND_WR_EN(1)); | |
569 | WREG32(R_00007C_MC_DATA, v); | |
570 | WREG32(R_000078_MC_INDEX, 0x7F); | |
571 | } | |
572 | ||
573 | void rs690_mc_program(struct radeon_device *rdev) | |
574 | { | |
575 | struct rv515_mc_save save; | |
576 | ||
577 | /* Stops all mc clients */ | |
578 | rv515_mc_stop(rdev, &save); | |
579 | ||
580 | /* Wait for mc idle */ | |
581 | if (rs690_mc_wait_for_idle(rdev)) | |
582 | dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); | |
583 | /* Program MC, should be a 32bits limited address space */ | |
584 | WREG32_MC(R_000100_MCCFG_FB_LOCATION, | |
585 | S_000100_MC_FB_START(rdev->mc.vram_start >> 16) | | |
586 | S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16)); | |
587 | WREG32(R_000134_HDP_FB_LOCATION, | |
588 | S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); | |
589 | ||
590 | rv515_mc_resume(rdev, &save); | |
591 | } | |
592 | ||
593 | static int rs690_startup(struct radeon_device *rdev) | |
594 | { | |
595 | int r; | |
596 | ||
597 | rs690_mc_program(rdev); | |
598 | /* Resume clock */ | |
599 | rv515_clock_startup(rdev); | |
600 | /* Initialize GPU configuration (# pipes, ...) */ | |
601 | rs690_gpu_init(rdev); | |
602 | /* Initialize GART (initialize after TTM so we can allocate | |
603 | * memory through TTM but finalize after TTM) */ | |
604 | r = rs400_gart_enable(rdev); | |
605 | if (r) | |
606 | return r; | |
607 | /* Enable IRQ */ | |
608 | rdev->irq.sw_int = true; | |
ac447df4 | 609 | rs600_irq_set(rdev); |
3bc68535 JG |
610 | /* 1M ring buffer */ |
611 | r = r100_cp_init(rdev, 1024 * 1024); | |
612 | if (r) { | |
613 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | |
614 | return r; | |
615 | } | |
616 | r = r100_wb_init(rdev); | |
617 | if (r) | |
618 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | |
619 | r = r100_ib_init(rdev); | |
620 | if (r) { | |
621 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | |
622 | return r; | |
623 | } | |
624 | return 0; | |
625 | } | |
626 | ||
627 | int rs690_resume(struct radeon_device *rdev) | |
628 | { | |
629 | /* Make sur GART are not working */ | |
630 | rs400_gart_disable(rdev); | |
631 | /* Resume clock before doing reset */ | |
632 | rv515_clock_startup(rdev); | |
633 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | |
634 | if (radeon_gpu_reset(rdev)) { | |
635 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | |
636 | RREG32(R_000E40_RBBM_STATUS), | |
637 | RREG32(R_0007C0_CP_STAT)); | |
638 | } | |
639 | /* post */ | |
640 | atom_asic_init(rdev->mode_info.atom_context); | |
641 | /* Resume clock after posting */ | |
642 | rv515_clock_startup(rdev); | |
643 | return rs690_startup(rdev); | |
644 | } | |
645 | ||
646 | int rs690_suspend(struct radeon_device *rdev) | |
647 | { | |
648 | r100_cp_disable(rdev); | |
649 | r100_wb_disable(rdev); | |
ac447df4 | 650 | rs600_irq_disable(rdev); |
3bc68535 JG |
651 | rs400_gart_disable(rdev); |
652 | return 0; | |
653 | } | |
654 | ||
655 | void rs690_fini(struct radeon_device *rdev) | |
656 | { | |
657 | rs690_suspend(rdev); | |
658 | r100_cp_fini(rdev); | |
659 | r100_wb_fini(rdev); | |
660 | r100_ib_fini(rdev); | |
661 | radeon_gem_fini(rdev); | |
662 | rs400_gart_fini(rdev); | |
663 | radeon_irq_kms_fini(rdev); | |
664 | radeon_fence_driver_fini(rdev); | |
665 | radeon_object_fini(rdev); | |
666 | radeon_atombios_fini(rdev); | |
667 | kfree(rdev->bios); | |
668 | rdev->bios = NULL; | |
669 | } | |
670 | ||
671 | int rs690_init(struct radeon_device *rdev) | |
672 | { | |
673 | int r; | |
674 | ||
3bc68535 JG |
675 | /* Disable VGA */ |
676 | rv515_vga_render_disable(rdev); | |
677 | /* Initialize scratch registers */ | |
678 | radeon_scratch_init(rdev); | |
679 | /* Initialize surface registers */ | |
680 | radeon_surface_init(rdev); | |
681 | /* TODO: disable VGA need to use VGA request */ | |
682 | /* BIOS*/ | |
683 | if (!radeon_get_bios(rdev)) { | |
684 | if (ASIC_IS_AVIVO(rdev)) | |
685 | return -EINVAL; | |
686 | } | |
687 | if (rdev->is_atom_bios) { | |
688 | r = radeon_atombios_init(rdev); | |
689 | if (r) | |
690 | return r; | |
691 | } else { | |
692 | dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n"); | |
693 | return -EINVAL; | |
694 | } | |
695 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | |
696 | if (radeon_gpu_reset(rdev)) { | |
697 | dev_warn(rdev->dev, | |
698 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | |
699 | RREG32(R_000E40_RBBM_STATUS), | |
700 | RREG32(R_0007C0_CP_STAT)); | |
701 | } | |
702 | /* check if cards are posted or not */ | |
703 | if (!radeon_card_posted(rdev) && rdev->bios) { | |
704 | DRM_INFO("GPU not posted. posting now...\n"); | |
705 | atom_asic_init(rdev->mode_info.atom_context); | |
706 | } | |
707 | /* Initialize clocks */ | |
708 | radeon_get_clock_info(rdev->ddev); | |
7433874e RM |
709 | /* Initialize power management */ |
710 | radeon_pm_init(rdev); | |
3bc68535 JG |
711 | /* Get vram informations */ |
712 | rs690_vram_info(rdev); | |
713 | /* Initialize memory controller (also test AGP) */ | |
714 | r = r420_mc_init(rdev); | |
715 | if (r) | |
716 | return r; | |
717 | rv515_debugfs(rdev); | |
718 | /* Fence driver */ | |
719 | r = radeon_fence_driver_init(rdev); | |
720 | if (r) | |
721 | return r; | |
722 | r = radeon_irq_kms_init(rdev); | |
723 | if (r) | |
724 | return r; | |
725 | /* Memory manager */ | |
726 | r = radeon_object_init(rdev); | |
727 | if (r) | |
728 | return r; | |
729 | r = rs400_gart_init(rdev); | |
730 | if (r) | |
731 | return r; | |
732 | rs600_set_safe_registers(rdev); | |
733 | rdev->accel_working = true; | |
734 | r = rs690_startup(rdev); | |
735 | if (r) { | |
736 | /* Somethings want wront with the accel init stop accel */ | |
737 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | |
738 | rs690_suspend(rdev); | |
739 | r100_cp_fini(rdev); | |
740 | r100_wb_fini(rdev); | |
741 | r100_ib_fini(rdev); | |
742 | rs400_gart_fini(rdev); | |
743 | radeon_irq_kms_fini(rdev); | |
744 | rdev->accel_working = false; | |
745 | } | |
746 | return 0; | |
771fe6b9 | 747 | } |