Commit | Line | Data |
---|---|---|
bcc1c2a1 AD |
1 | /* |
2 | * Copyright 2010 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Alex Deucher | |
23 | */ | |
24 | #include <linux/firmware.h> | |
25 | #include <linux/platform_device.h> | |
5a0e3ad6 | 26 | #include <linux/slab.h> |
bcc1c2a1 AD |
27 | #include "drmP.h" |
28 | #include "radeon.h" | |
e6990375 | 29 | #include "radeon_asic.h" |
bcc1c2a1 | 30 | #include "radeon_drm.h" |
0fcdb61e | 31 | #include "evergreend.h" |
bcc1c2a1 AD |
32 | #include "atom.h" |
33 | #include "avivod.h" | |
34 | #include "evergreen_reg.h" | |
2281a378 | 35 | #include "evergreen_blit_shaders.h" |
bcc1c2a1 | 36 | |
fe251e2f AD |
37 | #define EVERGREEN_PFP_UCODE_SIZE 1120 |
38 | #define EVERGREEN_PM4_UCODE_SIZE 1376 | |
39 | ||
bcc1c2a1 AD |
40 | static void evergreen_gpu_init(struct radeon_device *rdev); |
41 | void evergreen_fini(struct radeon_device *rdev); | |
42 | ||
21a8122a AD |
43 | /* get temperature in millidegrees */ |
44 | u32 evergreen_get_temp(struct radeon_device *rdev) | |
45 | { | |
46 | u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> | |
47 | ASIC_T_SHIFT; | |
48 | u32 actual_temp = 0; | |
49 | ||
50 | if ((temp >> 10) & 1) | |
51 | actual_temp = 0; | |
52 | else if ((temp >> 9) & 1) | |
53 | actual_temp = 255; | |
54 | else | |
55 | actual_temp = (temp >> 1) & 0xff; | |
56 | ||
57 | return actual_temp * 1000; | |
58 | } | |
59 | ||
49e02b73 AD |
60 | void evergreen_pm_misc(struct radeon_device *rdev) |
61 | { | |
a081a9d6 RM |
62 | int req_ps_idx = rdev->pm.requested_power_state_index; |
63 | int req_cm_idx = rdev->pm.requested_clock_mode_index; | |
64 | struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; | |
65 | struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; | |
49e02b73 | 66 | |
4d60173f AD |
67 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { |
68 | if (voltage->voltage != rdev->pm.current_vddc) { | |
69 | radeon_atom_set_voltage(rdev, voltage->voltage); | |
70 | rdev->pm.current_vddc = voltage->voltage; | |
0fcbe947 | 71 | DRM_DEBUG("Setting: v: %d\n", voltage->voltage); |
4d60173f AD |
72 | } |
73 | } | |
49e02b73 AD |
74 | } |
75 | ||
76 | void evergreen_pm_prepare(struct radeon_device *rdev) | |
77 | { | |
78 | struct drm_device *ddev = rdev->ddev; | |
79 | struct drm_crtc *crtc; | |
80 | struct radeon_crtc *radeon_crtc; | |
81 | u32 tmp; | |
82 | ||
83 | /* disable any active CRTCs */ | |
84 | list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { | |
85 | radeon_crtc = to_radeon_crtc(crtc); | |
86 | if (radeon_crtc->enabled) { | |
87 | tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset); | |
88 | tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; | |
89 | WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); | |
90 | } | |
91 | } | |
92 | } | |
93 | ||
94 | void evergreen_pm_finish(struct radeon_device *rdev) | |
95 | { | |
96 | struct drm_device *ddev = rdev->ddev; | |
97 | struct drm_crtc *crtc; | |
98 | struct radeon_crtc *radeon_crtc; | |
99 | u32 tmp; | |
100 | ||
101 | /* enable any active CRTCs */ | |
102 | list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { | |
103 | radeon_crtc = to_radeon_crtc(crtc); | |
104 | if (radeon_crtc->enabled) { | |
105 | tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset); | |
106 | tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; | |
107 | WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp); | |
108 | } | |
109 | } | |
110 | } | |
111 | ||
bcc1c2a1 AD |
112 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) |
113 | { | |
114 | bool connected = false; | |
0ca2ab52 AD |
115 | |
116 | switch (hpd) { | |
117 | case RADEON_HPD_1: | |
118 | if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) | |
119 | connected = true; | |
120 | break; | |
121 | case RADEON_HPD_2: | |
122 | if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) | |
123 | connected = true; | |
124 | break; | |
125 | case RADEON_HPD_3: | |
126 | if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) | |
127 | connected = true; | |
128 | break; | |
129 | case RADEON_HPD_4: | |
130 | if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) | |
131 | connected = true; | |
132 | break; | |
133 | case RADEON_HPD_5: | |
134 | if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) | |
135 | connected = true; | |
136 | break; | |
137 | case RADEON_HPD_6: | |
138 | if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) | |
139 | connected = true; | |
140 | break; | |
141 | default: | |
142 | break; | |
143 | } | |
144 | ||
bcc1c2a1 AD |
145 | return connected; |
146 | } | |
147 | ||
148 | void evergreen_hpd_set_polarity(struct radeon_device *rdev, | |
149 | enum radeon_hpd_id hpd) | |
150 | { | |
0ca2ab52 AD |
151 | u32 tmp; |
152 | bool connected = evergreen_hpd_sense(rdev, hpd); | |
153 | ||
154 | switch (hpd) { | |
155 | case RADEON_HPD_1: | |
156 | tmp = RREG32(DC_HPD1_INT_CONTROL); | |
157 | if (connected) | |
158 | tmp &= ~DC_HPDx_INT_POLARITY; | |
159 | else | |
160 | tmp |= DC_HPDx_INT_POLARITY; | |
161 | WREG32(DC_HPD1_INT_CONTROL, tmp); | |
162 | break; | |
163 | case RADEON_HPD_2: | |
164 | tmp = RREG32(DC_HPD2_INT_CONTROL); | |
165 | if (connected) | |
166 | tmp &= ~DC_HPDx_INT_POLARITY; | |
167 | else | |
168 | tmp |= DC_HPDx_INT_POLARITY; | |
169 | WREG32(DC_HPD2_INT_CONTROL, tmp); | |
170 | break; | |
171 | case RADEON_HPD_3: | |
172 | tmp = RREG32(DC_HPD3_INT_CONTROL); | |
173 | if (connected) | |
174 | tmp &= ~DC_HPDx_INT_POLARITY; | |
175 | else | |
176 | tmp |= DC_HPDx_INT_POLARITY; | |
177 | WREG32(DC_HPD3_INT_CONTROL, tmp); | |
178 | break; | |
179 | case RADEON_HPD_4: | |
180 | tmp = RREG32(DC_HPD4_INT_CONTROL); | |
181 | if (connected) | |
182 | tmp &= ~DC_HPDx_INT_POLARITY; | |
183 | else | |
184 | tmp |= DC_HPDx_INT_POLARITY; | |
185 | WREG32(DC_HPD4_INT_CONTROL, tmp); | |
186 | break; | |
187 | case RADEON_HPD_5: | |
188 | tmp = RREG32(DC_HPD5_INT_CONTROL); | |
189 | if (connected) | |
190 | tmp &= ~DC_HPDx_INT_POLARITY; | |
191 | else | |
192 | tmp |= DC_HPDx_INT_POLARITY; | |
193 | WREG32(DC_HPD5_INT_CONTROL, tmp); | |
194 | break; | |
195 | case RADEON_HPD_6: | |
196 | tmp = RREG32(DC_HPD6_INT_CONTROL); | |
197 | if (connected) | |
198 | tmp &= ~DC_HPDx_INT_POLARITY; | |
199 | else | |
200 | tmp |= DC_HPDx_INT_POLARITY; | |
201 | WREG32(DC_HPD6_INT_CONTROL, tmp); | |
202 | break; | |
203 | default: | |
204 | break; | |
205 | } | |
bcc1c2a1 AD |
206 | } |
207 | ||
208 | void evergreen_hpd_init(struct radeon_device *rdev) | |
209 | { | |
0ca2ab52 AD |
210 | struct drm_device *dev = rdev->ddev; |
211 | struct drm_connector *connector; | |
212 | u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | | |
213 | DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN; | |
bcc1c2a1 | 214 | |
0ca2ab52 AD |
215 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
216 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | |
217 | switch (radeon_connector->hpd.hpd) { | |
218 | case RADEON_HPD_1: | |
219 | WREG32(DC_HPD1_CONTROL, tmp); | |
220 | rdev->irq.hpd[0] = true; | |
221 | break; | |
222 | case RADEON_HPD_2: | |
223 | WREG32(DC_HPD2_CONTROL, tmp); | |
224 | rdev->irq.hpd[1] = true; | |
225 | break; | |
226 | case RADEON_HPD_3: | |
227 | WREG32(DC_HPD3_CONTROL, tmp); | |
228 | rdev->irq.hpd[2] = true; | |
229 | break; | |
230 | case RADEON_HPD_4: | |
231 | WREG32(DC_HPD4_CONTROL, tmp); | |
232 | rdev->irq.hpd[3] = true; | |
233 | break; | |
234 | case RADEON_HPD_5: | |
235 | WREG32(DC_HPD5_CONTROL, tmp); | |
236 | rdev->irq.hpd[4] = true; | |
237 | break; | |
238 | case RADEON_HPD_6: | |
239 | WREG32(DC_HPD6_CONTROL, tmp); | |
240 | rdev->irq.hpd[5] = true; | |
241 | break; | |
242 | default: | |
243 | break; | |
244 | } | |
245 | } | |
246 | if (rdev->irq.installed) | |
247 | evergreen_irq_set(rdev); | |
bcc1c2a1 AD |
248 | } |
249 | ||
0ca2ab52 | 250 | void evergreen_hpd_fini(struct radeon_device *rdev) |
bcc1c2a1 | 251 | { |
0ca2ab52 AD |
252 | struct drm_device *dev = rdev->ddev; |
253 | struct drm_connector *connector; | |
254 | ||
255 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | |
256 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | |
257 | switch (radeon_connector->hpd.hpd) { | |
258 | case RADEON_HPD_1: | |
259 | WREG32(DC_HPD1_CONTROL, 0); | |
260 | rdev->irq.hpd[0] = false; | |
261 | break; | |
262 | case RADEON_HPD_2: | |
263 | WREG32(DC_HPD2_CONTROL, 0); | |
264 | rdev->irq.hpd[1] = false; | |
265 | break; | |
266 | case RADEON_HPD_3: | |
267 | WREG32(DC_HPD3_CONTROL, 0); | |
268 | rdev->irq.hpd[2] = false; | |
269 | break; | |
270 | case RADEON_HPD_4: | |
271 | WREG32(DC_HPD4_CONTROL, 0); | |
272 | rdev->irq.hpd[3] = false; | |
273 | break; | |
274 | case RADEON_HPD_5: | |
275 | WREG32(DC_HPD5_CONTROL, 0); | |
276 | rdev->irq.hpd[4] = false; | |
277 | break; | |
278 | case RADEON_HPD_6: | |
279 | WREG32(DC_HPD6_CONTROL, 0); | |
280 | rdev->irq.hpd[5] = false; | |
281 | break; | |
282 | default: | |
283 | break; | |
284 | } | |
285 | } | |
bcc1c2a1 AD |
286 | } |
287 | ||
f9d9c362 AD |
288 | /* watermark setup */ |
289 | ||
290 | static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, | |
291 | struct radeon_crtc *radeon_crtc, | |
292 | struct drm_display_mode *mode, | |
293 | struct drm_display_mode *other_mode) | |
294 | { | |
295 | u32 tmp = 0; | |
296 | /* | |
297 | * Line Buffer Setup | |
298 | * There are 3 line buffers, each one shared by 2 display controllers. | |
299 | * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between | |
300 | * the display controllers. The paritioning is done via one of four | |
301 | * preset allocations specified in bits 2:0: | |
302 | * first display controller | |
303 | * 0 - first half of lb (3840 * 2) | |
304 | * 1 - first 3/4 of lb (5760 * 2) | |
305 | * 2 - whole lb (7680 * 2) | |
306 | * 3 - first 1/4 of lb (1920 * 2) | |
307 | * second display controller | |
308 | * 4 - second half of lb (3840 * 2) | |
309 | * 5 - second 3/4 of lb (5760 * 2) | |
310 | * 6 - whole lb (7680 * 2) | |
311 | * 7 - last 1/4 of lb (1920 * 2) | |
312 | */ | |
313 | if (mode && other_mode) { | |
314 | if (mode->hdisplay > other_mode->hdisplay) { | |
315 | if (mode->hdisplay > 2560) | |
316 | tmp = 1; /* 3/4 */ | |
317 | else | |
318 | tmp = 0; /* 1/2 */ | |
319 | } else if (other_mode->hdisplay > mode->hdisplay) { | |
320 | if (other_mode->hdisplay > 2560) | |
321 | tmp = 3; /* 1/4 */ | |
322 | else | |
323 | tmp = 0; /* 1/2 */ | |
324 | } else | |
325 | tmp = 0; /* 1/2 */ | |
326 | } else if (mode) | |
327 | tmp = 2; /* whole */ | |
328 | else if (other_mode) | |
329 | tmp = 3; /* 1/4 */ | |
330 | ||
331 | /* second controller of the pair uses second half of the lb */ | |
332 | if (radeon_crtc->crtc_id % 2) | |
333 | tmp += 4; | |
334 | WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); | |
335 | ||
336 | switch (tmp) { | |
337 | case 0: | |
338 | case 4: | |
339 | default: | |
340 | return 3840 * 2; | |
341 | case 1: | |
342 | case 5: | |
343 | return 5760 * 2; | |
344 | case 2: | |
345 | case 6: | |
346 | return 7680 * 2; | |
347 | case 3: | |
348 | case 7: | |
349 | return 1920 * 2; | |
350 | } | |
351 | } | |
352 | ||
353 | static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev) | |
354 | { | |
355 | u32 tmp = RREG32(MC_SHARED_CHMAP); | |
356 | ||
357 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | |
358 | case 0: | |
359 | default: | |
360 | return 1; | |
361 | case 1: | |
362 | return 2; | |
363 | case 2: | |
364 | return 4; | |
365 | case 3: | |
366 | return 8; | |
367 | } | |
368 | } | |
369 | ||
370 | struct evergreen_wm_params { | |
371 | u32 dram_channels; /* number of dram channels */ | |
372 | u32 yclk; /* bandwidth per dram data pin in kHz */ | |
373 | u32 sclk; /* engine clock in kHz */ | |
374 | u32 disp_clk; /* display clock in kHz */ | |
375 | u32 src_width; /* viewport width */ | |
376 | u32 active_time; /* active display time in ns */ | |
377 | u32 blank_time; /* blank time in ns */ | |
378 | bool interlaced; /* mode is interlaced */ | |
379 | fixed20_12 vsc; /* vertical scale ratio */ | |
380 | u32 num_heads; /* number of active crtcs */ | |
381 | u32 bytes_per_pixel; /* bytes per pixel display + overlay */ | |
382 | u32 lb_size; /* line buffer allocated to pipe */ | |
383 | u32 vtaps; /* vertical scaler taps */ | |
384 | }; | |
385 | ||
386 | static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm) | |
387 | { | |
388 | /* Calculate DRAM Bandwidth and the part allocated to display. */ | |
389 | fixed20_12 dram_efficiency; /* 0.7 */ | |
390 | fixed20_12 yclk, dram_channels, bandwidth; | |
391 | fixed20_12 a; | |
392 | ||
393 | a.full = dfixed_const(1000); | |
394 | yclk.full = dfixed_const(wm->yclk); | |
395 | yclk.full = dfixed_div(yclk, a); | |
396 | dram_channels.full = dfixed_const(wm->dram_channels * 4); | |
397 | a.full = dfixed_const(10); | |
398 | dram_efficiency.full = dfixed_const(7); | |
399 | dram_efficiency.full = dfixed_div(dram_efficiency, a); | |
400 | bandwidth.full = dfixed_mul(dram_channels, yclk); | |
401 | bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); | |
402 | ||
403 | return dfixed_trunc(bandwidth); | |
404 | } | |
405 | ||
406 | static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm) | |
407 | { | |
408 | /* Calculate DRAM Bandwidth and the part allocated to display. */ | |
409 | fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ | |
410 | fixed20_12 yclk, dram_channels, bandwidth; | |
411 | fixed20_12 a; | |
412 | ||
413 | a.full = dfixed_const(1000); | |
414 | yclk.full = dfixed_const(wm->yclk); | |
415 | yclk.full = dfixed_div(yclk, a); | |
416 | dram_channels.full = dfixed_const(wm->dram_channels * 4); | |
417 | a.full = dfixed_const(10); | |
418 | disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ | |
419 | disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); | |
420 | bandwidth.full = dfixed_mul(dram_channels, yclk); | |
421 | bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); | |
422 | ||
423 | return dfixed_trunc(bandwidth); | |
424 | } | |
425 | ||
426 | static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm) | |
427 | { | |
428 | /* Calculate the display Data return Bandwidth */ | |
429 | fixed20_12 return_efficiency; /* 0.8 */ | |
430 | fixed20_12 sclk, bandwidth; | |
431 | fixed20_12 a; | |
432 | ||
433 | a.full = dfixed_const(1000); | |
434 | sclk.full = dfixed_const(wm->sclk); | |
435 | sclk.full = dfixed_div(sclk, a); | |
436 | a.full = dfixed_const(10); | |
437 | return_efficiency.full = dfixed_const(8); | |
438 | return_efficiency.full = dfixed_div(return_efficiency, a); | |
439 | a.full = dfixed_const(32); | |
440 | bandwidth.full = dfixed_mul(a, sclk); | |
441 | bandwidth.full = dfixed_mul(bandwidth, return_efficiency); | |
442 | ||
443 | return dfixed_trunc(bandwidth); | |
444 | } | |
445 | ||
446 | static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm) | |
447 | { | |
448 | /* Calculate the DMIF Request Bandwidth */ | |
449 | fixed20_12 disp_clk_request_efficiency; /* 0.8 */ | |
450 | fixed20_12 disp_clk, bandwidth; | |
451 | fixed20_12 a; | |
452 | ||
453 | a.full = dfixed_const(1000); | |
454 | disp_clk.full = dfixed_const(wm->disp_clk); | |
455 | disp_clk.full = dfixed_div(disp_clk, a); | |
456 | a.full = dfixed_const(10); | |
457 | disp_clk_request_efficiency.full = dfixed_const(8); | |
458 | disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); | |
459 | a.full = dfixed_const(32); | |
460 | bandwidth.full = dfixed_mul(a, disp_clk); | |
461 | bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency); | |
462 | ||
463 | return dfixed_trunc(bandwidth); | |
464 | } | |
465 | ||
466 | static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm) | |
467 | { | |
468 | /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ | |
469 | u32 dram_bandwidth = evergreen_dram_bandwidth(wm); | |
470 | u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm); | |
471 | u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm); | |
472 | ||
473 | return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); | |
474 | } | |
475 | ||
476 | static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm) | |
477 | { | |
478 | /* Calculate the display mode Average Bandwidth | |
479 | * DisplayMode should contain the source and destination dimensions, | |
480 | * timing, etc. | |
481 | */ | |
482 | fixed20_12 bpp; | |
483 | fixed20_12 line_time; | |
484 | fixed20_12 src_width; | |
485 | fixed20_12 bandwidth; | |
486 | fixed20_12 a; | |
487 | ||
488 | a.full = dfixed_const(1000); | |
489 | line_time.full = dfixed_const(wm->active_time + wm->blank_time); | |
490 | line_time.full = dfixed_div(line_time, a); | |
491 | bpp.full = dfixed_const(wm->bytes_per_pixel); | |
492 | src_width.full = dfixed_const(wm->src_width); | |
493 | bandwidth.full = dfixed_mul(src_width, bpp); | |
494 | bandwidth.full = dfixed_mul(bandwidth, wm->vsc); | |
495 | bandwidth.full = dfixed_div(bandwidth, line_time); | |
496 | ||
497 | return dfixed_trunc(bandwidth); | |
498 | } | |
499 | ||
500 | static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm) | |
501 | { | |
502 | /* First calcualte the latency in ns */ | |
503 | u32 mc_latency = 2000; /* 2000 ns. */ | |
504 | u32 available_bandwidth = evergreen_available_bandwidth(wm); | |
505 | u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; | |
506 | u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; | |
507 | u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ | |
508 | u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + | |
509 | (wm->num_heads * cursor_line_pair_return_time); | |
510 | u32 latency = mc_latency + other_heads_data_return_time + dc_latency; | |
511 | u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; | |
512 | fixed20_12 a, b, c; | |
513 | ||
514 | if (wm->num_heads == 0) | |
515 | return 0; | |
516 | ||
517 | a.full = dfixed_const(2); | |
518 | b.full = dfixed_const(1); | |
519 | if ((wm->vsc.full > a.full) || | |
520 | ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || | |
521 | (wm->vtaps >= 5) || | |
522 | ((wm->vsc.full >= a.full) && wm->interlaced)) | |
523 | max_src_lines_per_dst_line = 4; | |
524 | else | |
525 | max_src_lines_per_dst_line = 2; | |
526 | ||
527 | a.full = dfixed_const(available_bandwidth); | |
528 | b.full = dfixed_const(wm->num_heads); | |
529 | a.full = dfixed_div(a, b); | |
530 | ||
531 | b.full = dfixed_const(1000); | |
532 | c.full = dfixed_const(wm->disp_clk); | |
533 | b.full = dfixed_div(c, b); | |
534 | c.full = dfixed_const(wm->bytes_per_pixel); | |
535 | b.full = dfixed_mul(b, c); | |
536 | ||
537 | lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b)); | |
538 | ||
539 | a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); | |
540 | b.full = dfixed_const(1000); | |
541 | c.full = dfixed_const(lb_fill_bw); | |
542 | b.full = dfixed_div(c, b); | |
543 | a.full = dfixed_div(a, b); | |
544 | line_fill_time = dfixed_trunc(a); | |
545 | ||
546 | if (line_fill_time < wm->active_time) | |
547 | return latency; | |
548 | else | |
549 | return latency + (line_fill_time - wm->active_time); | |
550 | ||
551 | } | |
552 | ||
553 | static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm) | |
554 | { | |
555 | if (evergreen_average_bandwidth(wm) <= | |
556 | (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads)) | |
557 | return true; | |
558 | else | |
559 | return false; | |
560 | }; | |
561 | ||
562 | static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm) | |
563 | { | |
564 | if (evergreen_average_bandwidth(wm) <= | |
565 | (evergreen_available_bandwidth(wm) / wm->num_heads)) | |
566 | return true; | |
567 | else | |
568 | return false; | |
569 | }; | |
570 | ||
571 | static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm) | |
572 | { | |
573 | u32 lb_partitions = wm->lb_size / wm->src_width; | |
574 | u32 line_time = wm->active_time + wm->blank_time; | |
575 | u32 latency_tolerant_lines; | |
576 | u32 latency_hiding; | |
577 | fixed20_12 a; | |
578 | ||
579 | a.full = dfixed_const(1); | |
580 | if (wm->vsc.full > a.full) | |
581 | latency_tolerant_lines = 1; | |
582 | else { | |
583 | if (lb_partitions <= (wm->vtaps + 1)) | |
584 | latency_tolerant_lines = 1; | |
585 | else | |
586 | latency_tolerant_lines = 2; | |
587 | } | |
588 | ||
589 | latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); | |
590 | ||
591 | if (evergreen_latency_watermark(wm) <= latency_hiding) | |
592 | return true; | |
593 | else | |
594 | return false; | |
595 | } | |
596 | ||
597 | static void evergreen_program_watermarks(struct radeon_device *rdev, | |
598 | struct radeon_crtc *radeon_crtc, | |
599 | u32 lb_size, u32 num_heads) | |
600 | { | |
601 | struct drm_display_mode *mode = &radeon_crtc->base.mode; | |
602 | struct evergreen_wm_params wm; | |
603 | u32 pixel_period; | |
604 | u32 line_time = 0; | |
605 | u32 latency_watermark_a = 0, latency_watermark_b = 0; | |
606 | u32 priority_a_mark = 0, priority_b_mark = 0; | |
607 | u32 priority_a_cnt = PRIORITY_OFF; | |
608 | u32 priority_b_cnt = PRIORITY_OFF; | |
609 | u32 pipe_offset = radeon_crtc->crtc_id * 16; | |
610 | u32 tmp, arb_control3; | |
611 | fixed20_12 a, b, c; | |
612 | ||
613 | if (radeon_crtc->base.enabled && num_heads && mode) { | |
614 | pixel_period = 1000000 / (u32)mode->clock; | |
615 | line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); | |
616 | priority_a_cnt = 0; | |
617 | priority_b_cnt = 0; | |
618 | ||
619 | wm.yclk = rdev->pm.current_mclk * 10; | |
620 | wm.sclk = rdev->pm.current_sclk * 10; | |
621 | wm.disp_clk = mode->clock; | |
622 | wm.src_width = mode->crtc_hdisplay; | |
623 | wm.active_time = mode->crtc_hdisplay * pixel_period; | |
624 | wm.blank_time = line_time - wm.active_time; | |
625 | wm.interlaced = false; | |
626 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | |
627 | wm.interlaced = true; | |
628 | wm.vsc = radeon_crtc->vsc; | |
629 | wm.vtaps = 1; | |
630 | if (radeon_crtc->rmx_type != RMX_OFF) | |
631 | wm.vtaps = 2; | |
632 | wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ | |
633 | wm.lb_size = lb_size; | |
634 | wm.dram_channels = evergreen_get_number_of_dram_channels(rdev); | |
635 | wm.num_heads = num_heads; | |
636 | ||
637 | /* set for high clocks */ | |
638 | latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535); | |
639 | /* set for low clocks */ | |
640 | /* wm.yclk = low clk; wm.sclk = low clk */ | |
641 | latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535); | |
642 | ||
643 | /* possibly force display priority to high */ | |
644 | /* should really do this at mode validation time... */ | |
645 | if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || | |
646 | !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || | |
647 | !evergreen_check_latency_hiding(&wm) || | |
648 | (rdev->disp_priority == 2)) { | |
649 | DRM_INFO("force priority to high\n"); | |
650 | priority_a_cnt |= PRIORITY_ALWAYS_ON; | |
651 | priority_b_cnt |= PRIORITY_ALWAYS_ON; | |
652 | } | |
653 | ||
654 | a.full = dfixed_const(1000); | |
655 | b.full = dfixed_const(mode->clock); | |
656 | b.full = dfixed_div(b, a); | |
657 | c.full = dfixed_const(latency_watermark_a); | |
658 | c.full = dfixed_mul(c, b); | |
659 | c.full = dfixed_mul(c, radeon_crtc->hsc); | |
660 | c.full = dfixed_div(c, a); | |
661 | a.full = dfixed_const(16); | |
662 | c.full = dfixed_div(c, a); | |
663 | priority_a_mark = dfixed_trunc(c); | |
664 | priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK; | |
665 | ||
666 | a.full = dfixed_const(1000); | |
667 | b.full = dfixed_const(mode->clock); | |
668 | b.full = dfixed_div(b, a); | |
669 | c.full = dfixed_const(latency_watermark_b); | |
670 | c.full = dfixed_mul(c, b); | |
671 | c.full = dfixed_mul(c, radeon_crtc->hsc); | |
672 | c.full = dfixed_div(c, a); | |
673 | a.full = dfixed_const(16); | |
674 | c.full = dfixed_div(c, a); | |
675 | priority_b_mark = dfixed_trunc(c); | |
676 | priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; | |
677 | } | |
678 | ||
679 | /* select wm A */ | |
680 | arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset); | |
681 | tmp = arb_control3; | |
682 | tmp &= ~LATENCY_WATERMARK_MASK(3); | |
683 | tmp |= LATENCY_WATERMARK_MASK(1); | |
684 | WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp); | |
685 | WREG32(PIPE0_LATENCY_CONTROL + pipe_offset, | |
686 | (LATENCY_LOW_WATERMARK(latency_watermark_a) | | |
687 | LATENCY_HIGH_WATERMARK(line_time))); | |
688 | /* select wm B */ | |
689 | tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset); | |
690 | tmp &= ~LATENCY_WATERMARK_MASK(3); | |
691 | tmp |= LATENCY_WATERMARK_MASK(2); | |
692 | WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp); | |
693 | WREG32(PIPE0_LATENCY_CONTROL + pipe_offset, | |
694 | (LATENCY_LOW_WATERMARK(latency_watermark_b) | | |
695 | LATENCY_HIGH_WATERMARK(line_time))); | |
696 | /* restore original selection */ | |
697 | WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3); | |
698 | ||
699 | /* write the priority marks */ | |
700 | WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); | |
701 | WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); | |
702 | ||
703 | } | |
704 | ||
0ca2ab52 | 705 | void evergreen_bandwidth_update(struct radeon_device *rdev) |
bcc1c2a1 | 706 | { |
f9d9c362 AD |
707 | struct drm_display_mode *mode0 = NULL; |
708 | struct drm_display_mode *mode1 = NULL; | |
709 | u32 num_heads = 0, lb_size; | |
710 | int i; | |
711 | ||
712 | radeon_update_display_priority(rdev); | |
713 | ||
714 | for (i = 0; i < rdev->num_crtc; i++) { | |
715 | if (rdev->mode_info.crtcs[i]->base.enabled) | |
716 | num_heads++; | |
717 | } | |
718 | for (i = 0; i < rdev->num_crtc; i += 2) { | |
719 | mode0 = &rdev->mode_info.crtcs[i]->base.mode; | |
720 | mode1 = &rdev->mode_info.crtcs[i+1]->base.mode; | |
721 | lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1); | |
722 | evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads); | |
723 | lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0); | |
724 | evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads); | |
725 | } | |
bcc1c2a1 AD |
726 | } |
727 | ||
728 | static int evergreen_mc_wait_for_idle(struct radeon_device *rdev) | |
729 | { | |
730 | unsigned i; | |
731 | u32 tmp; | |
732 | ||
733 | for (i = 0; i < rdev->usec_timeout; i++) { | |
734 | /* read MC_STATUS */ | |
735 | tmp = RREG32(SRBM_STATUS) & 0x1F00; | |
736 | if (!tmp) | |
737 | return 0; | |
738 | udelay(1); | |
739 | } | |
740 | return -1; | |
741 | } | |
742 | ||
743 | /* | |
744 | * GART | |
745 | */ | |
0fcdb61e AD |
746 | void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev) |
747 | { | |
748 | unsigned i; | |
749 | u32 tmp; | |
750 | ||
751 | WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); | |
752 | for (i = 0; i < rdev->usec_timeout; i++) { | |
753 | /* read MC_STATUS */ | |
754 | tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE); | |
755 | tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT; | |
756 | if (tmp == 2) { | |
757 | printk(KERN_WARNING "[drm] r600 flush TLB failed\n"); | |
758 | return; | |
759 | } | |
760 | if (tmp) { | |
761 | return; | |
762 | } | |
763 | udelay(1); | |
764 | } | |
765 | } | |
766 | ||
bcc1c2a1 AD |
767 | int evergreen_pcie_gart_enable(struct radeon_device *rdev) |
768 | { | |
769 | u32 tmp; | |
0fcdb61e | 770 | int r; |
bcc1c2a1 AD |
771 | |
772 | if (rdev->gart.table.vram.robj == NULL) { | |
773 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); | |
774 | return -EINVAL; | |
775 | } | |
776 | r = radeon_gart_table_vram_pin(rdev); | |
777 | if (r) | |
778 | return r; | |
82568565 | 779 | radeon_gart_restore(rdev); |
bcc1c2a1 AD |
780 | /* Setup L2 cache */ |
781 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | | |
782 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | | |
783 | EFFECTIVE_L2_QUEUE_SIZE(7)); | |
784 | WREG32(VM_L2_CNTL2, 0); | |
785 | WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); | |
786 | /* Setup TLB control */ | |
787 | tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | | |
788 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | | |
789 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | | |
790 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); | |
791 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); | |
792 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); | |
793 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); | |
794 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); | |
795 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); | |
796 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | |
797 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); | |
798 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | |
799 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); | |
800 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); | |
801 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | |
802 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | |
803 | WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, | |
804 | (u32)(rdev->dummy_page.addr >> 12)); | |
0fcdb61e | 805 | WREG32(VM_CONTEXT1_CNTL, 0); |
bcc1c2a1 | 806 | |
0fcdb61e | 807 | evergreen_pcie_gart_tlb_flush(rdev); |
bcc1c2a1 AD |
808 | rdev->gart.ready = true; |
809 | return 0; | |
810 | } | |
811 | ||
812 | void evergreen_pcie_gart_disable(struct radeon_device *rdev) | |
813 | { | |
814 | u32 tmp; | |
0fcdb61e | 815 | int r; |
bcc1c2a1 AD |
816 | |
817 | /* Disable all tables */ | |
0fcdb61e AD |
818 | WREG32(VM_CONTEXT0_CNTL, 0); |
819 | WREG32(VM_CONTEXT1_CNTL, 0); | |
bcc1c2a1 AD |
820 | |
821 | /* Setup L2 cache */ | |
822 | WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | | |
823 | EFFECTIVE_L2_QUEUE_SIZE(7)); | |
824 | WREG32(VM_L2_CNTL2, 0); | |
825 | WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); | |
826 | /* Setup TLB control */ | |
827 | tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); | |
828 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); | |
829 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); | |
830 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); | |
831 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); | |
832 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); | |
833 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | |
834 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); | |
835 | if (rdev->gart.table.vram.robj) { | |
836 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); | |
837 | if (likely(r == 0)) { | |
838 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | |
839 | radeon_bo_unpin(rdev->gart.table.vram.robj); | |
840 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | |
841 | } | |
842 | } | |
843 | } | |
844 | ||
845 | void evergreen_pcie_gart_fini(struct radeon_device *rdev) | |
846 | { | |
847 | evergreen_pcie_gart_disable(rdev); | |
848 | radeon_gart_table_vram_free(rdev); | |
849 | radeon_gart_fini(rdev); | |
850 | } | |
851 | ||
852 | ||
853 | void evergreen_agp_enable(struct radeon_device *rdev) | |
854 | { | |
855 | u32 tmp; | |
bcc1c2a1 AD |
856 | |
857 | /* Setup L2 cache */ | |
858 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | | |
859 | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | | |
860 | EFFECTIVE_L2_QUEUE_SIZE(7)); | |
861 | WREG32(VM_L2_CNTL2, 0); | |
862 | WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); | |
863 | /* Setup TLB control */ | |
864 | tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | | |
865 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | | |
866 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | | |
867 | EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); | |
868 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); | |
869 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); | |
870 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); | |
871 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); | |
872 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); | |
873 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | |
874 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); | |
0fcdb61e AD |
875 | WREG32(VM_CONTEXT0_CNTL, 0); |
876 | WREG32(VM_CONTEXT1_CNTL, 0); | |
bcc1c2a1 AD |
877 | } |
878 | ||
879 | static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) | |
880 | { | |
881 | save->vga_control[0] = RREG32(D1VGA_CONTROL); | |
882 | save->vga_control[1] = RREG32(D2VGA_CONTROL); | |
883 | save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL); | |
884 | save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL); | |
885 | save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL); | |
886 | save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL); | |
887 | save->vga_render_control = RREG32(VGA_RENDER_CONTROL); | |
888 | save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); | |
889 | save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); | |
890 | save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); | |
891 | save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); | |
892 | save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); | |
893 | save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); | |
894 | save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); | |
895 | ||
896 | /* Stop all video */ | |
897 | WREG32(VGA_RENDER_CONTROL, 0); | |
898 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); | |
899 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); | |
900 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); | |
901 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); | |
902 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); | |
903 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); | |
904 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | |
905 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | |
906 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | |
907 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | |
908 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | |
909 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | |
910 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | |
911 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | |
912 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | |
913 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | |
914 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | |
915 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | |
916 | ||
917 | WREG32(D1VGA_CONTROL, 0); | |
918 | WREG32(D2VGA_CONTROL, 0); | |
919 | WREG32(EVERGREEN_D3VGA_CONTROL, 0); | |
920 | WREG32(EVERGREEN_D4VGA_CONTROL, 0); | |
921 | WREG32(EVERGREEN_D5VGA_CONTROL, 0); | |
922 | WREG32(EVERGREEN_D6VGA_CONTROL, 0); | |
923 | } | |
924 | ||
925 | static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) | |
926 | { | |
927 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, | |
928 | upper_32_bits(rdev->mc.vram_start)); | |
929 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, | |
930 | upper_32_bits(rdev->mc.vram_start)); | |
931 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET, | |
932 | (u32)rdev->mc.vram_start); | |
933 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET, | |
934 | (u32)rdev->mc.vram_start); | |
935 | ||
936 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET, | |
937 | upper_32_bits(rdev->mc.vram_start)); | |
938 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET, | |
939 | upper_32_bits(rdev->mc.vram_start)); | |
940 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, | |
941 | (u32)rdev->mc.vram_start); | |
942 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, | |
943 | (u32)rdev->mc.vram_start); | |
944 | ||
945 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, | |
946 | upper_32_bits(rdev->mc.vram_start)); | |
947 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, | |
948 | upper_32_bits(rdev->mc.vram_start)); | |
949 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, | |
950 | (u32)rdev->mc.vram_start); | |
951 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, | |
952 | (u32)rdev->mc.vram_start); | |
953 | ||
954 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, | |
955 | upper_32_bits(rdev->mc.vram_start)); | |
956 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, | |
957 | upper_32_bits(rdev->mc.vram_start)); | |
958 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, | |
959 | (u32)rdev->mc.vram_start); | |
960 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, | |
961 | (u32)rdev->mc.vram_start); | |
962 | ||
963 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, | |
964 | upper_32_bits(rdev->mc.vram_start)); | |
965 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, | |
966 | upper_32_bits(rdev->mc.vram_start)); | |
967 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, | |
968 | (u32)rdev->mc.vram_start); | |
969 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, | |
970 | (u32)rdev->mc.vram_start); | |
971 | ||
972 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, | |
973 | upper_32_bits(rdev->mc.vram_start)); | |
974 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, | |
975 | upper_32_bits(rdev->mc.vram_start)); | |
976 | WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, | |
977 | (u32)rdev->mc.vram_start); | |
978 | WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, | |
979 | (u32)rdev->mc.vram_start); | |
980 | ||
981 | WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); | |
982 | WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); | |
983 | /* Unlock host access */ | |
984 | WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); | |
985 | mdelay(1); | |
986 | /* Restore video state */ | |
987 | WREG32(D1VGA_CONTROL, save->vga_control[0]); | |
988 | WREG32(D2VGA_CONTROL, save->vga_control[1]); | |
989 | WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]); | |
990 | WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]); | |
991 | WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]); | |
992 | WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]); | |
993 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); | |
994 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); | |
995 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); | |
996 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); | |
997 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); | |
998 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); | |
999 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]); | |
1000 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]); | |
1001 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); | |
1002 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); | |
1003 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); | |
1004 | WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); | |
1005 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | |
1006 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | |
1007 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | |
1008 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | |
1009 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | |
1010 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | |
1011 | WREG32(VGA_RENDER_CONTROL, save->vga_render_control); | |
1012 | } | |
1013 | ||
1014 | static void evergreen_mc_program(struct radeon_device *rdev) | |
1015 | { | |
1016 | struct evergreen_mc_save save; | |
1017 | u32 tmp; | |
1018 | int i, j; | |
1019 | ||
1020 | /* Initialize HDP */ | |
1021 | for (i = 0, j = 0; i < 32; i++, j += 0x18) { | |
1022 | WREG32((0x2c14 + j), 0x00000000); | |
1023 | WREG32((0x2c18 + j), 0x00000000); | |
1024 | WREG32((0x2c1c + j), 0x00000000); | |
1025 | WREG32((0x2c20 + j), 0x00000000); | |
1026 | WREG32((0x2c24 + j), 0x00000000); | |
1027 | } | |
1028 | WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); | |
1029 | ||
1030 | evergreen_mc_stop(rdev, &save); | |
1031 | if (evergreen_mc_wait_for_idle(rdev)) { | |
1032 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | |
1033 | } | |
1034 | /* Lockout access through VGA aperture*/ | |
1035 | WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); | |
1036 | /* Update configuration */ | |
1037 | if (rdev->flags & RADEON_IS_AGP) { | |
1038 | if (rdev->mc.vram_start < rdev->mc.gtt_start) { | |
1039 | /* VRAM before AGP */ | |
1040 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | |
1041 | rdev->mc.vram_start >> 12); | |
1042 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | |
1043 | rdev->mc.gtt_end >> 12); | |
1044 | } else { | |
1045 | /* VRAM after AGP */ | |
1046 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | |
1047 | rdev->mc.gtt_start >> 12); | |
1048 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | |
1049 | rdev->mc.vram_end >> 12); | |
1050 | } | |
1051 | } else { | |
1052 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, | |
1053 | rdev->mc.vram_start >> 12); | |
1054 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | |
1055 | rdev->mc.vram_end >> 12); | |
1056 | } | |
1057 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); | |
1058 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; | |
1059 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); | |
1060 | WREG32(MC_VM_FB_LOCATION, tmp); | |
1061 | WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); | |
1062 | WREG32(HDP_NONSURFACE_INFO, (2 << 7)); | |
46fcd2b3 | 1063 | WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); |
bcc1c2a1 AD |
1064 | if (rdev->flags & RADEON_IS_AGP) { |
1065 | WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); | |
1066 | WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); | |
1067 | WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); | |
1068 | } else { | |
1069 | WREG32(MC_VM_AGP_BASE, 0); | |
1070 | WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); | |
1071 | WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); | |
1072 | } | |
1073 | if (evergreen_mc_wait_for_idle(rdev)) { | |
1074 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | |
1075 | } | |
1076 | evergreen_mc_resume(rdev, &save); | |
1077 | /* we need to own VRAM, so turn off the VGA renderer here | |
1078 | * to stop it overwriting our objects */ | |
1079 | rv515_vga_render_disable(rdev); | |
1080 | } | |
1081 | ||
bcc1c2a1 AD |
1082 | /* |
1083 | * CP. | |
1084 | */ | |
bcc1c2a1 AD |
1085 | |
1086 | static int evergreen_cp_load_microcode(struct radeon_device *rdev) | |
1087 | { | |
fe251e2f AD |
1088 | const __be32 *fw_data; |
1089 | int i; | |
1090 | ||
1091 | if (!rdev->me_fw || !rdev->pfp_fw) | |
1092 | return -EINVAL; | |
bcc1c2a1 | 1093 | |
fe251e2f AD |
1094 | r700_cp_stop(rdev); |
1095 | WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); | |
1096 | ||
1097 | fw_data = (const __be32 *)rdev->pfp_fw->data; | |
1098 | WREG32(CP_PFP_UCODE_ADDR, 0); | |
1099 | for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++) | |
1100 | WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); | |
1101 | WREG32(CP_PFP_UCODE_ADDR, 0); | |
1102 | ||
1103 | fw_data = (const __be32 *)rdev->me_fw->data; | |
1104 | WREG32(CP_ME_RAM_WADDR, 0); | |
1105 | for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++) | |
1106 | WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); | |
1107 | ||
1108 | WREG32(CP_PFP_UCODE_ADDR, 0); | |
1109 | WREG32(CP_ME_RAM_WADDR, 0); | |
1110 | WREG32(CP_ME_RAM_RADDR, 0); | |
bcc1c2a1 AD |
1111 | return 0; |
1112 | } | |
1113 | ||
7e7b41d2 AD |
1114 | static int evergreen_cp_start(struct radeon_device *rdev) |
1115 | { | |
2281a378 | 1116 | int r, i; |
7e7b41d2 AD |
1117 | uint32_t cp_me; |
1118 | ||
1119 | r = radeon_ring_lock(rdev, 7); | |
1120 | if (r) { | |
1121 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | |
1122 | return r; | |
1123 | } | |
1124 | radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); | |
1125 | radeon_ring_write(rdev, 0x1); | |
1126 | radeon_ring_write(rdev, 0x0); | |
1127 | radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); | |
1128 | radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); | |
1129 | radeon_ring_write(rdev, 0); | |
1130 | radeon_ring_write(rdev, 0); | |
1131 | radeon_ring_unlock_commit(rdev); | |
1132 | ||
1133 | cp_me = 0xff; | |
1134 | WREG32(CP_ME_CNTL, cp_me); | |
1135 | ||
2281a378 | 1136 | r = radeon_ring_lock(rdev, evergreen_default_size + 15); |
7e7b41d2 AD |
1137 | if (r) { |
1138 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | |
1139 | return r; | |
1140 | } | |
2281a378 AD |
1141 | |
1142 | /* setup clear context state */ | |
1143 | radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | |
1144 | radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); | |
1145 | ||
1146 | for (i = 0; i < evergreen_default_size; i++) | |
1147 | radeon_ring_write(rdev, evergreen_default_state[i]); | |
1148 | ||
1149 | radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | |
1150 | radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE); | |
1151 | ||
1152 | /* set clear context state */ | |
1153 | radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); | |
1154 | radeon_ring_write(rdev, 0); | |
1155 | ||
1156 | /* SQ_VTX_BASE_VTX_LOC */ | |
1157 | radeon_ring_write(rdev, 0xc0026f00); | |
1158 | radeon_ring_write(rdev, 0x00000000); | |
1159 | radeon_ring_write(rdev, 0x00000000); | |
1160 | radeon_ring_write(rdev, 0x00000000); | |
1161 | ||
1162 | /* Clear consts */ | |
1163 | radeon_ring_write(rdev, 0xc0036f00); | |
1164 | radeon_ring_write(rdev, 0x00000bc4); | |
1165 | radeon_ring_write(rdev, 0xffffffff); | |
1166 | radeon_ring_write(rdev, 0xffffffff); | |
1167 | radeon_ring_write(rdev, 0xffffffff); | |
1168 | ||
7e7b41d2 AD |
1169 | radeon_ring_unlock_commit(rdev); |
1170 | ||
1171 | return 0; | |
1172 | } | |
1173 | ||
fe251e2f AD |
1174 | int evergreen_cp_resume(struct radeon_device *rdev) |
1175 | { | |
1176 | u32 tmp; | |
1177 | u32 rb_bufsz; | |
1178 | int r; | |
1179 | ||
1180 | /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ | |
1181 | WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | | |
1182 | SOFT_RESET_PA | | |
1183 | SOFT_RESET_SH | | |
1184 | SOFT_RESET_VGT | | |
1185 | SOFT_RESET_SX)); | |
1186 | RREG32(GRBM_SOFT_RESET); | |
1187 | mdelay(15); | |
1188 | WREG32(GRBM_SOFT_RESET, 0); | |
1189 | RREG32(GRBM_SOFT_RESET); | |
1190 | ||
1191 | /* Set ring buffer size */ | |
1192 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); | |
724c80e1 | 1193 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
fe251e2f AD |
1194 | #ifdef __BIG_ENDIAN |
1195 | tmp |= BUF_SWAP_32BIT; | |
32fcdbf4 | 1196 | #endif |
fe251e2f AD |
1197 | WREG32(CP_RB_CNTL, tmp); |
1198 | WREG32(CP_SEM_WAIT_TIMER, 0x4); | |
1199 | ||
1200 | /* Set the write pointer delay */ | |
1201 | WREG32(CP_RB_WPTR_DELAY, 0); | |
1202 | ||
1203 | /* Initialize the ring buffer's read and write pointers */ | |
1204 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | |
1205 | WREG32(CP_RB_RPTR_WR, 0); | |
1206 | WREG32(CP_RB_WPTR, 0); | |
724c80e1 AD |
1207 | |
1208 | /* set the wb address wether it's enabled or not */ | |
1209 | WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); | |
1210 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | |
1211 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); | |
1212 | ||
1213 | if (rdev->wb.enabled) | |
1214 | WREG32(SCRATCH_UMSK, 0xff); | |
1215 | else { | |
1216 | tmp |= RB_NO_UPDATE; | |
1217 | WREG32(SCRATCH_UMSK, 0); | |
1218 | } | |
1219 | ||
fe251e2f AD |
1220 | mdelay(1); |
1221 | WREG32(CP_RB_CNTL, tmp); | |
1222 | ||
1223 | WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8); | |
1224 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); | |
1225 | ||
1226 | rdev->cp.rptr = RREG32(CP_RB_RPTR); | |
1227 | rdev->cp.wptr = RREG32(CP_RB_WPTR); | |
1228 | ||
7e7b41d2 | 1229 | evergreen_cp_start(rdev); |
fe251e2f AD |
1230 | rdev->cp.ready = true; |
1231 | r = radeon_ring_test(rdev); | |
1232 | if (r) { | |
1233 | rdev->cp.ready = false; | |
1234 | return r; | |
1235 | } | |
1236 | return 0; | |
1237 | } | |
bcc1c2a1 AD |
1238 | |
1239 | /* | |
1240 | * Core functions | |
1241 | */ | |
32fcdbf4 AD |
1242 | static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, |
1243 | u32 num_tile_pipes, | |
bcc1c2a1 AD |
1244 | u32 num_backends, |
1245 | u32 backend_disable_mask) | |
1246 | { | |
1247 | u32 backend_map = 0; | |
32fcdbf4 AD |
1248 | u32 enabled_backends_mask = 0; |
1249 | u32 enabled_backends_count = 0; | |
1250 | u32 cur_pipe; | |
1251 | u32 swizzle_pipe[EVERGREEN_MAX_PIPES]; | |
1252 | u32 cur_backend = 0; | |
1253 | u32 i; | |
1254 | bool force_no_swizzle; | |
1255 | ||
1256 | if (num_tile_pipes > EVERGREEN_MAX_PIPES) | |
1257 | num_tile_pipes = EVERGREEN_MAX_PIPES; | |
1258 | if (num_tile_pipes < 1) | |
1259 | num_tile_pipes = 1; | |
1260 | if (num_backends > EVERGREEN_MAX_BACKENDS) | |
1261 | num_backends = EVERGREEN_MAX_BACKENDS; | |
1262 | if (num_backends < 1) | |
1263 | num_backends = 1; | |
1264 | ||
1265 | for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { | |
1266 | if (((backend_disable_mask >> i) & 1) == 0) { | |
1267 | enabled_backends_mask |= (1 << i); | |
1268 | ++enabled_backends_count; | |
1269 | } | |
1270 | if (enabled_backends_count == num_backends) | |
1271 | break; | |
1272 | } | |
1273 | ||
1274 | if (enabled_backends_count == 0) { | |
1275 | enabled_backends_mask = 1; | |
1276 | enabled_backends_count = 1; | |
1277 | } | |
1278 | ||
1279 | if (enabled_backends_count != num_backends) | |
1280 | num_backends = enabled_backends_count; | |
1281 | ||
1282 | memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES); | |
1283 | switch (rdev->family) { | |
1284 | case CHIP_CEDAR: | |
1285 | case CHIP_REDWOOD: | |
1286 | force_no_swizzle = false; | |
1287 | break; | |
1288 | case CHIP_CYPRESS: | |
1289 | case CHIP_HEMLOCK: | |
1290 | case CHIP_JUNIPER: | |
1291 | default: | |
1292 | force_no_swizzle = true; | |
1293 | break; | |
1294 | } | |
1295 | if (force_no_swizzle) { | |
1296 | bool last_backend_enabled = false; | |
1297 | ||
1298 | force_no_swizzle = false; | |
1299 | for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { | |
1300 | if (((enabled_backends_mask >> i) & 1) == 1) { | |
1301 | if (last_backend_enabled) | |
1302 | force_no_swizzle = true; | |
1303 | last_backend_enabled = true; | |
1304 | } else | |
1305 | last_backend_enabled = false; | |
1306 | } | |
1307 | } | |
1308 | ||
1309 | switch (num_tile_pipes) { | |
1310 | case 1: | |
1311 | case 3: | |
1312 | case 5: | |
1313 | case 7: | |
1314 | DRM_ERROR("odd number of pipes!\n"); | |
1315 | break; | |
1316 | case 2: | |
1317 | swizzle_pipe[0] = 0; | |
1318 | swizzle_pipe[1] = 1; | |
1319 | break; | |
1320 | case 4: | |
1321 | if (force_no_swizzle) { | |
1322 | swizzle_pipe[0] = 0; | |
1323 | swizzle_pipe[1] = 1; | |
1324 | swizzle_pipe[2] = 2; | |
1325 | swizzle_pipe[3] = 3; | |
1326 | } else { | |
1327 | swizzle_pipe[0] = 0; | |
1328 | swizzle_pipe[1] = 2; | |
1329 | swizzle_pipe[2] = 1; | |
1330 | swizzle_pipe[3] = 3; | |
1331 | } | |
1332 | break; | |
1333 | case 6: | |
1334 | if (force_no_swizzle) { | |
1335 | swizzle_pipe[0] = 0; | |
1336 | swizzle_pipe[1] = 1; | |
1337 | swizzle_pipe[2] = 2; | |
1338 | swizzle_pipe[3] = 3; | |
1339 | swizzle_pipe[4] = 4; | |
1340 | swizzle_pipe[5] = 5; | |
1341 | } else { | |
1342 | swizzle_pipe[0] = 0; | |
1343 | swizzle_pipe[1] = 2; | |
1344 | swizzle_pipe[2] = 4; | |
1345 | swizzle_pipe[3] = 1; | |
1346 | swizzle_pipe[4] = 3; | |
1347 | swizzle_pipe[5] = 5; | |
1348 | } | |
1349 | break; | |
1350 | case 8: | |
1351 | if (force_no_swizzle) { | |
1352 | swizzle_pipe[0] = 0; | |
1353 | swizzle_pipe[1] = 1; | |
1354 | swizzle_pipe[2] = 2; | |
1355 | swizzle_pipe[3] = 3; | |
1356 | swizzle_pipe[4] = 4; | |
1357 | swizzle_pipe[5] = 5; | |
1358 | swizzle_pipe[6] = 6; | |
1359 | swizzle_pipe[7] = 7; | |
1360 | } else { | |
1361 | swizzle_pipe[0] = 0; | |
1362 | swizzle_pipe[1] = 2; | |
1363 | swizzle_pipe[2] = 4; | |
1364 | swizzle_pipe[3] = 6; | |
1365 | swizzle_pipe[4] = 1; | |
1366 | swizzle_pipe[5] = 3; | |
1367 | swizzle_pipe[6] = 5; | |
1368 | swizzle_pipe[7] = 7; | |
1369 | } | |
1370 | break; | |
1371 | } | |
1372 | ||
1373 | for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { | |
1374 | while (((1 << cur_backend) & enabled_backends_mask) == 0) | |
1375 | cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; | |
1376 | ||
1377 | backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); | |
1378 | ||
1379 | cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; | |
1380 | } | |
bcc1c2a1 AD |
1381 | |
1382 | return backend_map; | |
1383 | } | |
bcc1c2a1 AD |
1384 | |
1385 | static void evergreen_gpu_init(struct radeon_device *rdev) | |
1386 | { | |
32fcdbf4 AD |
1387 | u32 cc_rb_backend_disable = 0; |
1388 | u32 cc_gc_shader_pipe_config; | |
1389 | u32 gb_addr_config = 0; | |
1390 | u32 mc_shared_chmap, mc_arb_ramcfg; | |
1391 | u32 gb_backend_map; | |
1392 | u32 grbm_gfx_index; | |
1393 | u32 sx_debug_1; | |
1394 | u32 smx_dc_ctl0; | |
1395 | u32 sq_config; | |
1396 | u32 sq_lds_resource_mgmt; | |
1397 | u32 sq_gpr_resource_mgmt_1; | |
1398 | u32 sq_gpr_resource_mgmt_2; | |
1399 | u32 sq_gpr_resource_mgmt_3; | |
1400 | u32 sq_thread_resource_mgmt; | |
1401 | u32 sq_thread_resource_mgmt_2; | |
1402 | u32 sq_stack_resource_mgmt_1; | |
1403 | u32 sq_stack_resource_mgmt_2; | |
1404 | u32 sq_stack_resource_mgmt_3; | |
1405 | u32 vgt_cache_invalidation; | |
1406 | u32 hdp_host_path_cntl; | |
1407 | int i, j, num_shader_engines, ps_thread_count; | |
1408 | ||
1409 | switch (rdev->family) { | |
1410 | case CHIP_CYPRESS: | |
1411 | case CHIP_HEMLOCK: | |
1412 | rdev->config.evergreen.num_ses = 2; | |
1413 | rdev->config.evergreen.max_pipes = 4; | |
1414 | rdev->config.evergreen.max_tile_pipes = 8; | |
1415 | rdev->config.evergreen.max_simds = 10; | |
1416 | rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; | |
1417 | rdev->config.evergreen.max_gprs = 256; | |
1418 | rdev->config.evergreen.max_threads = 248; | |
1419 | rdev->config.evergreen.max_gs_threads = 32; | |
1420 | rdev->config.evergreen.max_stack_entries = 512; | |
1421 | rdev->config.evergreen.sx_num_of_sets = 4; | |
1422 | rdev->config.evergreen.sx_max_export_size = 256; | |
1423 | rdev->config.evergreen.sx_max_export_pos_size = 64; | |
1424 | rdev->config.evergreen.sx_max_export_smx_size = 192; | |
1425 | rdev->config.evergreen.max_hw_contexts = 8; | |
1426 | rdev->config.evergreen.sq_num_cf_insts = 2; | |
1427 | ||
1428 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; | |
1429 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | |
1430 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | |
1431 | break; | |
1432 | case CHIP_JUNIPER: | |
1433 | rdev->config.evergreen.num_ses = 1; | |
1434 | rdev->config.evergreen.max_pipes = 4; | |
1435 | rdev->config.evergreen.max_tile_pipes = 4; | |
1436 | rdev->config.evergreen.max_simds = 10; | |
1437 | rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; | |
1438 | rdev->config.evergreen.max_gprs = 256; | |
1439 | rdev->config.evergreen.max_threads = 248; | |
1440 | rdev->config.evergreen.max_gs_threads = 32; | |
1441 | rdev->config.evergreen.max_stack_entries = 512; | |
1442 | rdev->config.evergreen.sx_num_of_sets = 4; | |
1443 | rdev->config.evergreen.sx_max_export_size = 256; | |
1444 | rdev->config.evergreen.sx_max_export_pos_size = 64; | |
1445 | rdev->config.evergreen.sx_max_export_smx_size = 192; | |
1446 | rdev->config.evergreen.max_hw_contexts = 8; | |
1447 | rdev->config.evergreen.sq_num_cf_insts = 2; | |
1448 | ||
1449 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; | |
1450 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | |
1451 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | |
1452 | break; | |
1453 | case CHIP_REDWOOD: | |
1454 | rdev->config.evergreen.num_ses = 1; | |
1455 | rdev->config.evergreen.max_pipes = 4; | |
1456 | rdev->config.evergreen.max_tile_pipes = 4; | |
1457 | rdev->config.evergreen.max_simds = 5; | |
1458 | rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; | |
1459 | rdev->config.evergreen.max_gprs = 256; | |
1460 | rdev->config.evergreen.max_threads = 248; | |
1461 | rdev->config.evergreen.max_gs_threads = 32; | |
1462 | rdev->config.evergreen.max_stack_entries = 256; | |
1463 | rdev->config.evergreen.sx_num_of_sets = 4; | |
1464 | rdev->config.evergreen.sx_max_export_size = 256; | |
1465 | rdev->config.evergreen.sx_max_export_pos_size = 64; | |
1466 | rdev->config.evergreen.sx_max_export_smx_size = 192; | |
1467 | rdev->config.evergreen.max_hw_contexts = 8; | |
1468 | rdev->config.evergreen.sq_num_cf_insts = 2; | |
1469 | ||
1470 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; | |
1471 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | |
1472 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | |
1473 | break; | |
1474 | case CHIP_CEDAR: | |
1475 | default: | |
1476 | rdev->config.evergreen.num_ses = 1; | |
1477 | rdev->config.evergreen.max_pipes = 2; | |
1478 | rdev->config.evergreen.max_tile_pipes = 2; | |
1479 | rdev->config.evergreen.max_simds = 2; | |
1480 | rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; | |
1481 | rdev->config.evergreen.max_gprs = 256; | |
1482 | rdev->config.evergreen.max_threads = 192; | |
1483 | rdev->config.evergreen.max_gs_threads = 16; | |
1484 | rdev->config.evergreen.max_stack_entries = 256; | |
1485 | rdev->config.evergreen.sx_num_of_sets = 4; | |
1486 | rdev->config.evergreen.sx_max_export_size = 128; | |
1487 | rdev->config.evergreen.sx_max_export_pos_size = 32; | |
1488 | rdev->config.evergreen.sx_max_export_smx_size = 96; | |
1489 | rdev->config.evergreen.max_hw_contexts = 4; | |
1490 | rdev->config.evergreen.sq_num_cf_insts = 1; | |
1491 | ||
1492 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; | |
1493 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | |
1494 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | |
1495 | break; | |
1496 | } | |
1497 | ||
1498 | /* Initialize HDP */ | |
1499 | for (i = 0, j = 0; i < 32; i++, j += 0x18) { | |
1500 | WREG32((0x2c14 + j), 0x00000000); | |
1501 | WREG32((0x2c18 + j), 0x00000000); | |
1502 | WREG32((0x2c1c + j), 0x00000000); | |
1503 | WREG32((0x2c20 + j), 0x00000000); | |
1504 | WREG32((0x2c24 + j), 0x00000000); | |
1505 | } | |
1506 | ||
1507 | WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); | |
1508 | ||
1509 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; | |
1510 | ||
1511 | cc_gc_shader_pipe_config |= | |
1512 | INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes) | |
1513 | & EVERGREEN_MAX_PIPES_MASK); | |
1514 | cc_gc_shader_pipe_config |= | |
1515 | INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds) | |
1516 | & EVERGREEN_MAX_SIMDS_MASK); | |
1517 | ||
1518 | cc_rb_backend_disable = | |
1519 | BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends) | |
1520 | & EVERGREEN_MAX_BACKENDS_MASK); | |
1521 | ||
1522 | ||
1523 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); | |
1524 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | |
1525 | ||
1526 | switch (rdev->config.evergreen.max_tile_pipes) { | |
1527 | case 1: | |
1528 | default: | |
1529 | gb_addr_config |= NUM_PIPES(0); | |
1530 | break; | |
1531 | case 2: | |
1532 | gb_addr_config |= NUM_PIPES(1); | |
1533 | break; | |
1534 | case 4: | |
1535 | gb_addr_config |= NUM_PIPES(2); | |
1536 | break; | |
1537 | case 8: | |
1538 | gb_addr_config |= NUM_PIPES(3); | |
1539 | break; | |
1540 | } | |
1541 | ||
1542 | gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); | |
1543 | gb_addr_config |= BANK_INTERLEAVE_SIZE(0); | |
1544 | gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1); | |
1545 | gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1); | |
1546 | gb_addr_config |= NUM_GPUS(0); /* Hemlock? */ | |
1547 | gb_addr_config |= MULTI_GPU_TILE_SIZE(2); | |
1548 | ||
1549 | if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2) | |
1550 | gb_addr_config |= ROW_SIZE(2); | |
1551 | else | |
1552 | gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT); | |
1553 | ||
1554 | if (rdev->ddev->pdev->device == 0x689e) { | |
1555 | u32 efuse_straps_4; | |
1556 | u32 efuse_straps_3; | |
1557 | u8 efuse_box_bit_131_124; | |
1558 | ||
1559 | WREG32(RCU_IND_INDEX, 0x204); | |
1560 | efuse_straps_4 = RREG32(RCU_IND_DATA); | |
1561 | WREG32(RCU_IND_INDEX, 0x203); | |
1562 | efuse_straps_3 = RREG32(RCU_IND_DATA); | |
1563 | efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28)); | |
1564 | ||
1565 | switch(efuse_box_bit_131_124) { | |
1566 | case 0x00: | |
1567 | gb_backend_map = 0x76543210; | |
1568 | break; | |
1569 | case 0x55: | |
1570 | gb_backend_map = 0x77553311; | |
1571 | break; | |
1572 | case 0x56: | |
1573 | gb_backend_map = 0x77553300; | |
1574 | break; | |
1575 | case 0x59: | |
1576 | gb_backend_map = 0x77552211; | |
1577 | break; | |
1578 | case 0x66: | |
1579 | gb_backend_map = 0x77443300; | |
1580 | break; | |
1581 | case 0x99: | |
1582 | gb_backend_map = 0x66552211; | |
1583 | break; | |
1584 | case 0x5a: | |
1585 | gb_backend_map = 0x77552200; | |
1586 | break; | |
1587 | case 0xaa: | |
1588 | gb_backend_map = 0x66442200; | |
1589 | break; | |
1590 | case 0x95: | |
1591 | gb_backend_map = 0x66553311; | |
1592 | break; | |
1593 | default: | |
1594 | DRM_ERROR("bad backend map, using default\n"); | |
1595 | gb_backend_map = | |
1596 | evergreen_get_tile_pipe_to_backend_map(rdev, | |
1597 | rdev->config.evergreen.max_tile_pipes, | |
1598 | rdev->config.evergreen.max_backends, | |
1599 | ((EVERGREEN_MAX_BACKENDS_MASK << | |
1600 | rdev->config.evergreen.max_backends) & | |
1601 | EVERGREEN_MAX_BACKENDS_MASK)); | |
1602 | break; | |
1603 | } | |
1604 | } else if (rdev->ddev->pdev->device == 0x68b9) { | |
1605 | u32 efuse_straps_3; | |
1606 | u8 efuse_box_bit_127_124; | |
1607 | ||
1608 | WREG32(RCU_IND_INDEX, 0x203); | |
1609 | efuse_straps_3 = RREG32(RCU_IND_DATA); | |
d31dba58 | 1610 | efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28); |
32fcdbf4 AD |
1611 | |
1612 | switch(efuse_box_bit_127_124) { | |
1613 | case 0x0: | |
1614 | gb_backend_map = 0x00003210; | |
1615 | break; | |
1616 | case 0x5: | |
1617 | case 0x6: | |
1618 | case 0x9: | |
1619 | case 0xa: | |
1620 | gb_backend_map = 0x00003311; | |
1621 | break; | |
1622 | default: | |
1623 | DRM_ERROR("bad backend map, using default\n"); | |
1624 | gb_backend_map = | |
1625 | evergreen_get_tile_pipe_to_backend_map(rdev, | |
1626 | rdev->config.evergreen.max_tile_pipes, | |
1627 | rdev->config.evergreen.max_backends, | |
1628 | ((EVERGREEN_MAX_BACKENDS_MASK << | |
1629 | rdev->config.evergreen.max_backends) & | |
1630 | EVERGREEN_MAX_BACKENDS_MASK)); | |
1631 | break; | |
1632 | } | |
b741be82 AD |
1633 | } else { |
1634 | switch (rdev->family) { | |
1635 | case CHIP_CYPRESS: | |
1636 | case CHIP_HEMLOCK: | |
1637 | gb_backend_map = 0x66442200; | |
1638 | break; | |
1639 | case CHIP_JUNIPER: | |
1640 | gb_backend_map = 0x00006420; | |
1641 | break; | |
1642 | default: | |
1643 | gb_backend_map = | |
1644 | evergreen_get_tile_pipe_to_backend_map(rdev, | |
1645 | rdev->config.evergreen.max_tile_pipes, | |
1646 | rdev->config.evergreen.max_backends, | |
1647 | ((EVERGREEN_MAX_BACKENDS_MASK << | |
1648 | rdev->config.evergreen.max_backends) & | |
1649 | EVERGREEN_MAX_BACKENDS_MASK)); | |
1650 | } | |
1651 | } | |
32fcdbf4 | 1652 | |
1aa52bd3 AD |
1653 | /* setup tiling info dword. gb_addr_config is not adequate since it does |
1654 | * not have bank info, so create a custom tiling dword. | |
1655 | * bits 3:0 num_pipes | |
1656 | * bits 7:4 num_banks | |
1657 | * bits 11:8 group_size | |
1658 | * bits 15:12 row_size | |
1659 | */ | |
1660 | rdev->config.evergreen.tile_config = 0; | |
1661 | switch (rdev->config.evergreen.max_tile_pipes) { | |
1662 | case 1: | |
1663 | default: | |
1664 | rdev->config.evergreen.tile_config |= (0 << 0); | |
1665 | break; | |
1666 | case 2: | |
1667 | rdev->config.evergreen.tile_config |= (1 << 0); | |
1668 | break; | |
1669 | case 4: | |
1670 | rdev->config.evergreen.tile_config |= (2 << 0); | |
1671 | break; | |
1672 | case 8: | |
1673 | rdev->config.evergreen.tile_config |= (3 << 0); | |
1674 | break; | |
1675 | } | |
1676 | rdev->config.evergreen.tile_config |= | |
1677 | ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; | |
1678 | rdev->config.evergreen.tile_config |= | |
1679 | ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8; | |
1680 | rdev->config.evergreen.tile_config |= | |
1681 | ((gb_addr_config & 0x30000000) >> 28) << 12; | |
1682 | ||
32fcdbf4 AD |
1683 | WREG32(GB_BACKEND_MAP, gb_backend_map); |
1684 | WREG32(GB_ADDR_CONFIG, gb_addr_config); | |
1685 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | |
1686 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | |
1687 | ||
1688 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; | |
1689 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; | |
1690 | ||
1691 | for (i = 0; i < rdev->config.evergreen.num_ses; i++) { | |
1692 | u32 rb = cc_rb_backend_disable | (0xf0 << 16); | |
1693 | u32 sp = cc_gc_shader_pipe_config; | |
1694 | u32 gfx = grbm_gfx_index | SE_INDEX(i); | |
1695 | ||
1696 | if (i == num_shader_engines) { | |
1697 | rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK); | |
1698 | sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK); | |
1699 | } | |
1700 | ||
1701 | WREG32(GRBM_GFX_INDEX, gfx); | |
1702 | WREG32(RLC_GFX_INDEX, gfx); | |
1703 | ||
1704 | WREG32(CC_RB_BACKEND_DISABLE, rb); | |
1705 | WREG32(CC_SYS_RB_BACKEND_DISABLE, rb); | |
1706 | WREG32(GC_USER_RB_BACKEND_DISABLE, rb); | |
1707 | WREG32(CC_GC_SHADER_PIPE_CONFIG, sp); | |
1708 | } | |
1709 | ||
1710 | grbm_gfx_index |= SE_BROADCAST_WRITES; | |
1711 | WREG32(GRBM_GFX_INDEX, grbm_gfx_index); | |
1712 | WREG32(RLC_GFX_INDEX, grbm_gfx_index); | |
1713 | ||
1714 | WREG32(CGTS_SYS_TCC_DISABLE, 0); | |
1715 | WREG32(CGTS_TCC_DISABLE, 0); | |
1716 | WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); | |
1717 | WREG32(CGTS_USER_TCC_DISABLE, 0); | |
1718 | ||
1719 | /* set HW defaults for 3D engine */ | |
1720 | WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | | |
1721 | ROQ_IB2_START(0x2b))); | |
1722 | ||
1723 | WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); | |
1724 | ||
1725 | WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | | |
1726 | SYNC_GRADIENT | | |
1727 | SYNC_WALKER | | |
1728 | SYNC_ALIGNER)); | |
1729 | ||
1730 | sx_debug_1 = RREG32(SX_DEBUG_1); | |
1731 | sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; | |
1732 | WREG32(SX_DEBUG_1, sx_debug_1); | |
1733 | ||
1734 | ||
1735 | smx_dc_ctl0 = RREG32(SMX_DC_CTL0); | |
1736 | smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); | |
1737 | smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); | |
1738 | WREG32(SMX_DC_CTL0, smx_dc_ctl0); | |
1739 | ||
1740 | WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | | |
1741 | POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | | |
1742 | SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); | |
1743 | ||
1744 | WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | | |
1745 | SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | | |
1746 | SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); | |
1747 | ||
1748 | WREG32(VGT_NUM_INSTANCES, 1); | |
1749 | WREG32(SPI_CONFIG_CNTL, 0); | |
1750 | WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); | |
1751 | WREG32(CP_PERFMON_CNTL, 0); | |
1752 | ||
1753 | WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | | |
1754 | FETCH_FIFO_HIWATER(0x4) | | |
1755 | DONE_FIFO_HIWATER(0xe0) | | |
1756 | ALU_UPDATE_FIFO_HIWATER(0x8))); | |
1757 | ||
1758 | sq_config = RREG32(SQ_CONFIG); | |
1759 | sq_config &= ~(PS_PRIO(3) | | |
1760 | VS_PRIO(3) | | |
1761 | GS_PRIO(3) | | |
1762 | ES_PRIO(3)); | |
1763 | sq_config |= (VC_ENABLE | | |
1764 | EXPORT_SRC_C | | |
1765 | PS_PRIO(0) | | |
1766 | VS_PRIO(1) | | |
1767 | GS_PRIO(2) | | |
1768 | ES_PRIO(3)); | |
1769 | ||
1770 | if (rdev->family == CHIP_CEDAR) | |
1771 | /* no vertex cache */ | |
1772 | sq_config &= ~VC_ENABLE; | |
1773 | ||
1774 | sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); | |
1775 | ||
1776 | sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32); | |
1777 | sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32); | |
1778 | sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4); | |
1779 | sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); | |
1780 | sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32); | |
1781 | sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); | |
1782 | sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); | |
1783 | ||
1784 | if (rdev->family == CHIP_CEDAR) | |
1785 | ps_thread_count = 96; | |
1786 | else | |
1787 | ps_thread_count = 128; | |
1788 | ||
1789 | sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); | |
f96b35cd AD |
1790 | sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); |
1791 | sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); | |
1792 | sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); | |
1793 | sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); | |
1794 | sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); | |
32fcdbf4 AD |
1795 | |
1796 | sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | |
1797 | sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | |
1798 | sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | |
1799 | sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | |
1800 | sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | |
1801 | sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6); | |
1802 | ||
1803 | WREG32(SQ_CONFIG, sq_config); | |
1804 | WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1); | |
1805 | WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2); | |
1806 | WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3); | |
1807 | WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); | |
1808 | WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2); | |
1809 | WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1); | |
1810 | WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2); | |
1811 | WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3); | |
1812 | WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0); | |
1813 | WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt); | |
1814 | ||
1815 | WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | | |
1816 | FORCE_EOV_MAX_REZ_CNT(255))); | |
1817 | ||
1818 | if (rdev->family == CHIP_CEDAR) | |
1819 | vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); | |
1820 | else | |
1821 | vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC); | |
1822 | vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO); | |
1823 | WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); | |
1824 | ||
1825 | WREG32(VGT_GS_VERTEX_REUSE, 16); | |
1826 | WREG32(PA_SC_LINE_STIPPLE_STATE, 0); | |
1827 | ||
60a4a3e0 AD |
1828 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14); |
1829 | WREG32(VGT_OUT_DEALLOC_CNTL, 16); | |
1830 | ||
32fcdbf4 AD |
1831 | WREG32(CB_PERF_CTR0_SEL_0, 0); |
1832 | WREG32(CB_PERF_CTR0_SEL_1, 0); | |
1833 | WREG32(CB_PERF_CTR1_SEL_0, 0); | |
1834 | WREG32(CB_PERF_CTR1_SEL_1, 0); | |
1835 | WREG32(CB_PERF_CTR2_SEL_0, 0); | |
1836 | WREG32(CB_PERF_CTR2_SEL_1, 0); | |
1837 | WREG32(CB_PERF_CTR3_SEL_0, 0); | |
1838 | WREG32(CB_PERF_CTR3_SEL_1, 0); | |
1839 | ||
60a4a3e0 AD |
1840 | /* clear render buffer base addresses */ |
1841 | WREG32(CB_COLOR0_BASE, 0); | |
1842 | WREG32(CB_COLOR1_BASE, 0); | |
1843 | WREG32(CB_COLOR2_BASE, 0); | |
1844 | WREG32(CB_COLOR3_BASE, 0); | |
1845 | WREG32(CB_COLOR4_BASE, 0); | |
1846 | WREG32(CB_COLOR5_BASE, 0); | |
1847 | WREG32(CB_COLOR6_BASE, 0); | |
1848 | WREG32(CB_COLOR7_BASE, 0); | |
1849 | WREG32(CB_COLOR8_BASE, 0); | |
1850 | WREG32(CB_COLOR9_BASE, 0); | |
1851 | WREG32(CB_COLOR10_BASE, 0); | |
1852 | WREG32(CB_COLOR11_BASE, 0); | |
1853 | ||
1854 | /* set the shader const cache sizes to 0 */ | |
1855 | for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4) | |
1856 | WREG32(i, 0); | |
1857 | for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4) | |
1858 | WREG32(i, 0); | |
1859 | ||
32fcdbf4 AD |
1860 | hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); |
1861 | WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); | |
1862 | ||
1863 | WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); | |
1864 | ||
1865 | udelay(50); | |
1866 | ||
bcc1c2a1 AD |
1867 | } |
1868 | ||
1869 | int evergreen_mc_init(struct radeon_device *rdev) | |
1870 | { | |
bcc1c2a1 AD |
1871 | u32 tmp; |
1872 | int chansize, numchan; | |
bcc1c2a1 AD |
1873 | |
1874 | /* Get VRAM informations */ | |
1875 | rdev->mc.vram_is_ddr = true; | |
1876 | tmp = RREG32(MC_ARB_RAMCFG); | |
1877 | if (tmp & CHANSIZE_OVERRIDE) { | |
1878 | chansize = 16; | |
1879 | } else if (tmp & CHANSIZE_MASK) { | |
1880 | chansize = 64; | |
1881 | } else { | |
1882 | chansize = 32; | |
1883 | } | |
1884 | tmp = RREG32(MC_SHARED_CHMAP); | |
1885 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | |
1886 | case 0: | |
1887 | default: | |
1888 | numchan = 1; | |
1889 | break; | |
1890 | case 1: | |
1891 | numchan = 2; | |
1892 | break; | |
1893 | case 2: | |
1894 | numchan = 4; | |
1895 | break; | |
1896 | case 3: | |
1897 | numchan = 8; | |
1898 | break; | |
1899 | } | |
1900 | rdev->mc.vram_width = numchan * chansize; | |
1901 | /* Could aper size report 0 ? */ | |
01d73a69 JC |
1902 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
1903 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); | |
bcc1c2a1 AD |
1904 | /* Setup GPU memory space */ |
1905 | /* size in MB on evergreen */ | |
1906 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | |
1907 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | |
51e5fcd3 | 1908 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
c919b371 | 1909 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; |
d594e46a | 1910 | r600_vram_gtt_location(rdev, &rdev->mc); |
f47299c5 AD |
1911 | radeon_update_bandwidth_info(rdev); |
1912 | ||
bcc1c2a1 AD |
1913 | return 0; |
1914 | } | |
d594e46a | 1915 | |
225758d8 JG |
1916 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev) |
1917 | { | |
1918 | /* FIXME: implement for evergreen */ | |
1919 | return false; | |
1920 | } | |
1921 | ||
747943ea | 1922 | static int evergreen_gpu_soft_reset(struct radeon_device *rdev) |
bcc1c2a1 | 1923 | { |
747943ea AD |
1924 | struct evergreen_mc_save save; |
1925 | u32 srbm_reset = 0; | |
1926 | u32 grbm_reset = 0; | |
1927 | ||
1928 | dev_info(rdev->dev, "GPU softreset \n"); | |
1929 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", | |
1930 | RREG32(GRBM_STATUS)); | |
1931 | dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", | |
1932 | RREG32(GRBM_STATUS_SE0)); | |
1933 | dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", | |
1934 | RREG32(GRBM_STATUS_SE1)); | |
1935 | dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", | |
1936 | RREG32(SRBM_STATUS)); | |
1937 | evergreen_mc_stop(rdev, &save); | |
1938 | if (evergreen_mc_wait_for_idle(rdev)) { | |
1939 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | |
1940 | } | |
1941 | /* Disable CP parsing/prefetching */ | |
1942 | WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); | |
1943 | ||
1944 | /* reset all the gfx blocks */ | |
1945 | grbm_reset = (SOFT_RESET_CP | | |
1946 | SOFT_RESET_CB | | |
1947 | SOFT_RESET_DB | | |
1948 | SOFT_RESET_PA | | |
1949 | SOFT_RESET_SC | | |
1950 | SOFT_RESET_SPI | | |
1951 | SOFT_RESET_SH | | |
1952 | SOFT_RESET_SX | | |
1953 | SOFT_RESET_TC | | |
1954 | SOFT_RESET_TA | | |
1955 | SOFT_RESET_VC | | |
1956 | SOFT_RESET_VGT); | |
1957 | ||
1958 | dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); | |
1959 | WREG32(GRBM_SOFT_RESET, grbm_reset); | |
1960 | (void)RREG32(GRBM_SOFT_RESET); | |
1961 | udelay(50); | |
1962 | WREG32(GRBM_SOFT_RESET, 0); | |
1963 | (void)RREG32(GRBM_SOFT_RESET); | |
1964 | ||
1965 | /* reset all the system blocks */ | |
1966 | srbm_reset = SRBM_SOFT_RESET_ALL_MASK; | |
1967 | ||
1968 | dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset); | |
1969 | WREG32(SRBM_SOFT_RESET, srbm_reset); | |
1970 | (void)RREG32(SRBM_SOFT_RESET); | |
1971 | udelay(50); | |
1972 | WREG32(SRBM_SOFT_RESET, 0); | |
1973 | (void)RREG32(SRBM_SOFT_RESET); | |
1974 | /* Wait a little for things to settle down */ | |
1975 | udelay(50); | |
1976 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", | |
1977 | RREG32(GRBM_STATUS)); | |
1978 | dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", | |
1979 | RREG32(GRBM_STATUS_SE0)); | |
1980 | dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", | |
1981 | RREG32(GRBM_STATUS_SE1)); | |
1982 | dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", | |
1983 | RREG32(SRBM_STATUS)); | |
1984 | /* After reset we need to reinit the asic as GPU often endup in an | |
1985 | * incoherent state. | |
1986 | */ | |
1987 | atom_asic_init(rdev->mode_info.atom_context); | |
1988 | evergreen_mc_resume(rdev, &save); | |
bcc1c2a1 AD |
1989 | return 0; |
1990 | } | |
1991 | ||
a2d07b74 | 1992 | int evergreen_asic_reset(struct radeon_device *rdev) |
bcc1c2a1 | 1993 | { |
747943ea AD |
1994 | return evergreen_gpu_soft_reset(rdev); |
1995 | } | |
1996 | ||
45f9a39b AD |
1997 | /* Interrupts */ |
1998 | ||
1999 | u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc) | |
2000 | { | |
2001 | switch (crtc) { | |
2002 | case 0: | |
2003 | return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET); | |
2004 | case 1: | |
2005 | return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET); | |
2006 | case 2: | |
2007 | return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET); | |
2008 | case 3: | |
2009 | return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET); | |
2010 | case 4: | |
2011 | return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET); | |
2012 | case 5: | |
2013 | return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET); | |
2014 | default: | |
2015 | return 0; | |
2016 | } | |
2017 | } | |
2018 | ||
2019 | void evergreen_disable_interrupt_state(struct radeon_device *rdev) | |
2020 | { | |
2021 | u32 tmp; | |
2022 | ||
3555e53b | 2023 | WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
45f9a39b AD |
2024 | WREG32(GRBM_INT_CNTL, 0); |
2025 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | |
2026 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | |
2027 | WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | |
2028 | WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | |
2029 | WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | |
2030 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | |
2031 | ||
2032 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | |
2033 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | |
2034 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | |
2035 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | |
2036 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | |
2037 | WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | |
2038 | ||
2039 | WREG32(DACA_AUTODETECT_INT_CONTROL, 0); | |
2040 | WREG32(DACB_AUTODETECT_INT_CONTROL, 0); | |
2041 | ||
2042 | tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
2043 | WREG32(DC_HPD1_INT_CONTROL, tmp); | |
2044 | tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
2045 | WREG32(DC_HPD2_INT_CONTROL, tmp); | |
2046 | tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
2047 | WREG32(DC_HPD3_INT_CONTROL, tmp); | |
2048 | tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
2049 | WREG32(DC_HPD4_INT_CONTROL, tmp); | |
2050 | tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
2051 | WREG32(DC_HPD5_INT_CONTROL, tmp); | |
2052 | tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; | |
2053 | WREG32(DC_HPD6_INT_CONTROL, tmp); | |
2054 | ||
2055 | } | |
2056 | ||
2057 | int evergreen_irq_set(struct radeon_device *rdev) | |
2058 | { | |
2059 | u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; | |
2060 | u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; | |
2061 | u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; | |
2031f77c | 2062 | u32 grbm_int_cntl = 0; |
45f9a39b AD |
2063 | |
2064 | if (!rdev->irq.installed) { | |
fce7d61b | 2065 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
45f9a39b AD |
2066 | return -EINVAL; |
2067 | } | |
2068 | /* don't enable anything if the ih is disabled */ | |
2069 | if (!rdev->ih.enabled) { | |
2070 | r600_disable_interrupts(rdev); | |
2071 | /* force the active interrupt state to all disabled */ | |
2072 | evergreen_disable_interrupt_state(rdev); | |
2073 | return 0; | |
2074 | } | |
2075 | ||
2076 | hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
2077 | hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
2078 | hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
2079 | hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
2080 | hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
2081 | hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; | |
2082 | ||
2083 | if (rdev->irq.sw_int) { | |
2084 | DRM_DEBUG("evergreen_irq_set: sw int\n"); | |
2085 | cp_int_cntl |= RB_INT_ENABLE; | |
d0f8a854 | 2086 | cp_int_cntl |= TIME_STAMP_INT_ENABLE; |
45f9a39b AD |
2087 | } |
2088 | if (rdev->irq.crtc_vblank_int[0]) { | |
2089 | DRM_DEBUG("evergreen_irq_set: vblank 0\n"); | |
2090 | crtc1 |= VBLANK_INT_MASK; | |
2091 | } | |
2092 | if (rdev->irq.crtc_vblank_int[1]) { | |
2093 | DRM_DEBUG("evergreen_irq_set: vblank 1\n"); | |
2094 | crtc2 |= VBLANK_INT_MASK; | |
2095 | } | |
2096 | if (rdev->irq.crtc_vblank_int[2]) { | |
2097 | DRM_DEBUG("evergreen_irq_set: vblank 2\n"); | |
2098 | crtc3 |= VBLANK_INT_MASK; | |
2099 | } | |
2100 | if (rdev->irq.crtc_vblank_int[3]) { | |
2101 | DRM_DEBUG("evergreen_irq_set: vblank 3\n"); | |
2102 | crtc4 |= VBLANK_INT_MASK; | |
2103 | } | |
2104 | if (rdev->irq.crtc_vblank_int[4]) { | |
2105 | DRM_DEBUG("evergreen_irq_set: vblank 4\n"); | |
2106 | crtc5 |= VBLANK_INT_MASK; | |
2107 | } | |
2108 | if (rdev->irq.crtc_vblank_int[5]) { | |
2109 | DRM_DEBUG("evergreen_irq_set: vblank 5\n"); | |
2110 | crtc6 |= VBLANK_INT_MASK; | |
2111 | } | |
2112 | if (rdev->irq.hpd[0]) { | |
2113 | DRM_DEBUG("evergreen_irq_set: hpd 1\n"); | |
2114 | hpd1 |= DC_HPDx_INT_EN; | |
2115 | } | |
2116 | if (rdev->irq.hpd[1]) { | |
2117 | DRM_DEBUG("evergreen_irq_set: hpd 2\n"); | |
2118 | hpd2 |= DC_HPDx_INT_EN; | |
2119 | } | |
2120 | if (rdev->irq.hpd[2]) { | |
2121 | DRM_DEBUG("evergreen_irq_set: hpd 3\n"); | |
2122 | hpd3 |= DC_HPDx_INT_EN; | |
2123 | } | |
2124 | if (rdev->irq.hpd[3]) { | |
2125 | DRM_DEBUG("evergreen_irq_set: hpd 4\n"); | |
2126 | hpd4 |= DC_HPDx_INT_EN; | |
2127 | } | |
2128 | if (rdev->irq.hpd[4]) { | |
2129 | DRM_DEBUG("evergreen_irq_set: hpd 5\n"); | |
2130 | hpd5 |= DC_HPDx_INT_EN; | |
2131 | } | |
2132 | if (rdev->irq.hpd[5]) { | |
2133 | DRM_DEBUG("evergreen_irq_set: hpd 6\n"); | |
2134 | hpd6 |= DC_HPDx_INT_EN; | |
2135 | } | |
2031f77c AD |
2136 | if (rdev->irq.gui_idle) { |
2137 | DRM_DEBUG("gui idle\n"); | |
2138 | grbm_int_cntl |= GUI_IDLE_INT_ENABLE; | |
2139 | } | |
45f9a39b AD |
2140 | |
2141 | WREG32(CP_INT_CNTL, cp_int_cntl); | |
2031f77c | 2142 | WREG32(GRBM_INT_CNTL, grbm_int_cntl); |
45f9a39b AD |
2143 | |
2144 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); | |
2145 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); | |
2146 | WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); | |
2147 | WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); | |
2148 | WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); | |
2149 | WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); | |
2150 | ||
2151 | WREG32(DC_HPD1_INT_CONTROL, hpd1); | |
2152 | WREG32(DC_HPD2_INT_CONTROL, hpd2); | |
2153 | WREG32(DC_HPD3_INT_CONTROL, hpd3); | |
2154 | WREG32(DC_HPD4_INT_CONTROL, hpd4); | |
2155 | WREG32(DC_HPD5_INT_CONTROL, hpd5); | |
2156 | WREG32(DC_HPD6_INT_CONTROL, hpd6); | |
2157 | ||
bcc1c2a1 AD |
2158 | return 0; |
2159 | } | |
2160 | ||
45f9a39b AD |
2161 | static inline void evergreen_irq_ack(struct radeon_device *rdev, |
2162 | u32 *disp_int, | |
2163 | u32 *disp_int_cont, | |
2164 | u32 *disp_int_cont2, | |
2165 | u32 *disp_int_cont3, | |
2166 | u32 *disp_int_cont4, | |
2167 | u32 *disp_int_cont5) | |
2168 | { | |
2169 | u32 tmp; | |
2170 | ||
2171 | *disp_int = RREG32(DISP_INTERRUPT_STATUS); | |
2172 | *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); | |
2173 | *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); | |
2174 | *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); | |
2175 | *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); | |
2176 | *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); | |
2177 | ||
2178 | if (*disp_int & LB_D1_VBLANK_INTERRUPT) | |
2179 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); | |
2180 | if (*disp_int & LB_D1_VLINE_INTERRUPT) | |
2181 | WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); | |
2182 | ||
2183 | if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT) | |
2184 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); | |
2185 | if (*disp_int_cont & LB_D2_VLINE_INTERRUPT) | |
2186 | WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); | |
2187 | ||
2188 | if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) | |
2189 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); | |
2190 | if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT) | |
2191 | WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); | |
2192 | ||
2193 | if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) | |
2194 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); | |
2195 | if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT) | |
2196 | WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); | |
2197 | ||
2198 | if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) | |
2199 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); | |
2200 | if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT) | |
2201 | WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); | |
2202 | ||
2203 | if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) | |
2204 | WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); | |
2205 | if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT) | |
2206 | WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); | |
2207 | ||
2208 | if (*disp_int & DC_HPD1_INTERRUPT) { | |
2209 | tmp = RREG32(DC_HPD1_INT_CONTROL); | |
2210 | tmp |= DC_HPDx_INT_ACK; | |
2211 | WREG32(DC_HPD1_INT_CONTROL, tmp); | |
2212 | } | |
2213 | if (*disp_int_cont & DC_HPD2_INTERRUPT) { | |
2214 | tmp = RREG32(DC_HPD2_INT_CONTROL); | |
2215 | tmp |= DC_HPDx_INT_ACK; | |
2216 | WREG32(DC_HPD2_INT_CONTROL, tmp); | |
2217 | } | |
2218 | if (*disp_int_cont2 & DC_HPD3_INTERRUPT) { | |
2219 | tmp = RREG32(DC_HPD3_INT_CONTROL); | |
2220 | tmp |= DC_HPDx_INT_ACK; | |
2221 | WREG32(DC_HPD3_INT_CONTROL, tmp); | |
2222 | } | |
2223 | if (*disp_int_cont3 & DC_HPD4_INTERRUPT) { | |
2224 | tmp = RREG32(DC_HPD4_INT_CONTROL); | |
2225 | tmp |= DC_HPDx_INT_ACK; | |
2226 | WREG32(DC_HPD4_INT_CONTROL, tmp); | |
2227 | } | |
2228 | if (*disp_int_cont4 & DC_HPD5_INTERRUPT) { | |
2229 | tmp = RREG32(DC_HPD5_INT_CONTROL); | |
2230 | tmp |= DC_HPDx_INT_ACK; | |
2231 | WREG32(DC_HPD5_INT_CONTROL, tmp); | |
2232 | } | |
2233 | if (*disp_int_cont5 & DC_HPD6_INTERRUPT) { | |
2234 | tmp = RREG32(DC_HPD5_INT_CONTROL); | |
2235 | tmp |= DC_HPDx_INT_ACK; | |
2236 | WREG32(DC_HPD6_INT_CONTROL, tmp); | |
2237 | } | |
2238 | } | |
2239 | ||
2240 | void evergreen_irq_disable(struct radeon_device *rdev) | |
2241 | { | |
2242 | u32 disp_int, disp_int_cont, disp_int_cont2; | |
2243 | u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; | |
2244 | ||
2245 | r600_disable_interrupts(rdev); | |
2246 | /* Wait and acknowledge irq */ | |
2247 | mdelay(1); | |
2248 | evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, | |
2249 | &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); | |
2250 | evergreen_disable_interrupt_state(rdev); | |
2251 | } | |
2252 | ||
2253 | static void evergreen_irq_suspend(struct radeon_device *rdev) | |
2254 | { | |
2255 | evergreen_irq_disable(rdev); | |
2256 | r600_rlc_stop(rdev); | |
2257 | } | |
2258 | ||
2259 | static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev) | |
2260 | { | |
2261 | u32 wptr, tmp; | |
2262 | ||
724c80e1 AD |
2263 | if (rdev->wb.enabled) |
2264 | wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; | |
2265 | else | |
2266 | wptr = RREG32(IH_RB_WPTR); | |
45f9a39b AD |
2267 | |
2268 | if (wptr & RB_OVERFLOW) { | |
2269 | /* When a ring buffer overflow happen start parsing interrupt | |
2270 | * from the last not overwritten vector (wptr + 16). Hopefully | |
2271 | * this should allow us to catchup. | |
2272 | */ | |
2273 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", | |
2274 | wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); | |
2275 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; | |
2276 | tmp = RREG32(IH_RB_CNTL); | |
2277 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | |
2278 | WREG32(IH_RB_CNTL, tmp); | |
2279 | } | |
2280 | return (wptr & rdev->ih.ptr_mask); | |
2281 | } | |
2282 | ||
2283 | int evergreen_irq_process(struct radeon_device *rdev) | |
2284 | { | |
2285 | u32 wptr = evergreen_get_ih_wptr(rdev); | |
2286 | u32 rptr = rdev->ih.rptr; | |
2287 | u32 src_id, src_data; | |
2288 | u32 ring_index; | |
2289 | u32 disp_int, disp_int_cont, disp_int_cont2; | |
2290 | u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; | |
2291 | unsigned long flags; | |
2292 | bool queue_hotplug = false; | |
2293 | ||
2294 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); | |
2295 | if (!rdev->ih.enabled) | |
2296 | return IRQ_NONE; | |
2297 | ||
2298 | spin_lock_irqsave(&rdev->ih.lock, flags); | |
2299 | ||
2300 | if (rptr == wptr) { | |
2301 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | |
2302 | return IRQ_NONE; | |
2303 | } | |
2304 | if (rdev->shutdown) { | |
2305 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | |
2306 | return IRQ_NONE; | |
2307 | } | |
2308 | ||
2309 | restart_ih: | |
2310 | /* display interrupts */ | |
2311 | evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, | |
2312 | &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); | |
2313 | ||
2314 | rdev->ih.wptr = wptr; | |
2315 | while (rptr != wptr) { | |
2316 | /* wptr/rptr are in bytes! */ | |
2317 | ring_index = rptr / 4; | |
2318 | src_id = rdev->ih.ring[ring_index] & 0xff; | |
2319 | src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; | |
2320 | ||
2321 | switch (src_id) { | |
2322 | case 1: /* D1 vblank/vline */ | |
2323 | switch (src_data) { | |
2324 | case 0: /* D1 vblank */ | |
2325 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { | |
2326 | drm_handle_vblank(rdev->ddev, 0); | |
f5d8e0eb | 2327 | rdev->pm.vblank_sync = true; |
45f9a39b AD |
2328 | wake_up(&rdev->irq.vblank_queue); |
2329 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; | |
2330 | DRM_DEBUG("IH: D1 vblank\n"); | |
2331 | } | |
2332 | break; | |
2333 | case 1: /* D1 vline */ | |
2334 | if (disp_int & LB_D1_VLINE_INTERRUPT) { | |
2335 | disp_int &= ~LB_D1_VLINE_INTERRUPT; | |
2336 | DRM_DEBUG("IH: D1 vline\n"); | |
2337 | } | |
2338 | break; | |
2339 | default: | |
2340 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
2341 | break; | |
2342 | } | |
2343 | break; | |
2344 | case 2: /* D2 vblank/vline */ | |
2345 | switch (src_data) { | |
2346 | case 0: /* D2 vblank */ | |
2347 | if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { | |
2348 | drm_handle_vblank(rdev->ddev, 1); | |
f5d8e0eb | 2349 | rdev->pm.vblank_sync = true; |
45f9a39b AD |
2350 | wake_up(&rdev->irq.vblank_queue); |
2351 | disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; | |
2352 | DRM_DEBUG("IH: D2 vblank\n"); | |
2353 | } | |
2354 | break; | |
2355 | case 1: /* D2 vline */ | |
2356 | if (disp_int_cont & LB_D2_VLINE_INTERRUPT) { | |
2357 | disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; | |
2358 | DRM_DEBUG("IH: D2 vline\n"); | |
2359 | } | |
2360 | break; | |
2361 | default: | |
2362 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
2363 | break; | |
2364 | } | |
2365 | break; | |
2366 | case 3: /* D3 vblank/vline */ | |
2367 | switch (src_data) { | |
2368 | case 0: /* D3 vblank */ | |
2369 | if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { | |
2370 | drm_handle_vblank(rdev->ddev, 2); | |
f5d8e0eb | 2371 | rdev->pm.vblank_sync = true; |
45f9a39b AD |
2372 | wake_up(&rdev->irq.vblank_queue); |
2373 | disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; | |
2374 | DRM_DEBUG("IH: D3 vblank\n"); | |
2375 | } | |
2376 | break; | |
2377 | case 1: /* D3 vline */ | |
2378 | if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { | |
2379 | disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; | |
2380 | DRM_DEBUG("IH: D3 vline\n"); | |
2381 | } | |
2382 | break; | |
2383 | default: | |
2384 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
2385 | break; | |
2386 | } | |
2387 | break; | |
2388 | case 4: /* D4 vblank/vline */ | |
2389 | switch (src_data) { | |
2390 | case 0: /* D4 vblank */ | |
2391 | if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { | |
2392 | drm_handle_vblank(rdev->ddev, 3); | |
f5d8e0eb | 2393 | rdev->pm.vblank_sync = true; |
45f9a39b AD |
2394 | wake_up(&rdev->irq.vblank_queue); |
2395 | disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; | |
2396 | DRM_DEBUG("IH: D4 vblank\n"); | |
2397 | } | |
2398 | break; | |
2399 | case 1: /* D4 vline */ | |
2400 | if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { | |
2401 | disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; | |
2402 | DRM_DEBUG("IH: D4 vline\n"); | |
2403 | } | |
2404 | break; | |
2405 | default: | |
2406 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
2407 | break; | |
2408 | } | |
2409 | break; | |
2410 | case 5: /* D5 vblank/vline */ | |
2411 | switch (src_data) { | |
2412 | case 0: /* D5 vblank */ | |
2413 | if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { | |
2414 | drm_handle_vblank(rdev->ddev, 4); | |
f5d8e0eb | 2415 | rdev->pm.vblank_sync = true; |
45f9a39b AD |
2416 | wake_up(&rdev->irq.vblank_queue); |
2417 | disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; | |
2418 | DRM_DEBUG("IH: D5 vblank\n"); | |
2419 | } | |
2420 | break; | |
2421 | case 1: /* D5 vline */ | |
2422 | if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { | |
2423 | disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; | |
2424 | DRM_DEBUG("IH: D5 vline\n"); | |
2425 | } | |
2426 | break; | |
2427 | default: | |
2428 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
2429 | break; | |
2430 | } | |
2431 | break; | |
2432 | case 6: /* D6 vblank/vline */ | |
2433 | switch (src_data) { | |
2434 | case 0: /* D6 vblank */ | |
2435 | if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { | |
2436 | drm_handle_vblank(rdev->ddev, 5); | |
f5d8e0eb | 2437 | rdev->pm.vblank_sync = true; |
45f9a39b AD |
2438 | wake_up(&rdev->irq.vblank_queue); |
2439 | disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; | |
2440 | DRM_DEBUG("IH: D6 vblank\n"); | |
2441 | } | |
2442 | break; | |
2443 | case 1: /* D6 vline */ | |
2444 | if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { | |
2445 | disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; | |
2446 | DRM_DEBUG("IH: D6 vline\n"); | |
2447 | } | |
2448 | break; | |
2449 | default: | |
2450 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
2451 | break; | |
2452 | } | |
2453 | break; | |
2454 | case 42: /* HPD hotplug */ | |
2455 | switch (src_data) { | |
2456 | case 0: | |
2457 | if (disp_int & DC_HPD1_INTERRUPT) { | |
2458 | disp_int &= ~DC_HPD1_INTERRUPT; | |
2459 | queue_hotplug = true; | |
2460 | DRM_DEBUG("IH: HPD1\n"); | |
2461 | } | |
2462 | break; | |
2463 | case 1: | |
2464 | if (disp_int_cont & DC_HPD2_INTERRUPT) { | |
2465 | disp_int_cont &= ~DC_HPD2_INTERRUPT; | |
2466 | queue_hotplug = true; | |
2467 | DRM_DEBUG("IH: HPD2\n"); | |
2468 | } | |
2469 | break; | |
2470 | case 2: | |
2471 | if (disp_int_cont2 & DC_HPD3_INTERRUPT) { | |
2472 | disp_int_cont2 &= ~DC_HPD3_INTERRUPT; | |
2473 | queue_hotplug = true; | |
2474 | DRM_DEBUG("IH: HPD3\n"); | |
2475 | } | |
2476 | break; | |
2477 | case 3: | |
2478 | if (disp_int_cont3 & DC_HPD4_INTERRUPT) { | |
2479 | disp_int_cont3 &= ~DC_HPD4_INTERRUPT; | |
2480 | queue_hotplug = true; | |
2481 | DRM_DEBUG("IH: HPD4\n"); | |
2482 | } | |
2483 | break; | |
2484 | case 4: | |
2485 | if (disp_int_cont4 & DC_HPD5_INTERRUPT) { | |
2486 | disp_int_cont4 &= ~DC_HPD5_INTERRUPT; | |
2487 | queue_hotplug = true; | |
2488 | DRM_DEBUG("IH: HPD5\n"); | |
2489 | } | |
2490 | break; | |
2491 | case 5: | |
2492 | if (disp_int_cont5 & DC_HPD6_INTERRUPT) { | |
2493 | disp_int_cont5 &= ~DC_HPD6_INTERRUPT; | |
2494 | queue_hotplug = true; | |
2495 | DRM_DEBUG("IH: HPD6\n"); | |
2496 | } | |
2497 | break; | |
2498 | default: | |
2499 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
2500 | break; | |
2501 | } | |
2502 | break; | |
2503 | case 176: /* CP_INT in ring buffer */ | |
2504 | case 177: /* CP_INT in IB1 */ | |
2505 | case 178: /* CP_INT in IB2 */ | |
2506 | DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); | |
2507 | radeon_fence_process(rdev); | |
2508 | break; | |
2509 | case 181: /* CP EOP event */ | |
2510 | DRM_DEBUG("IH: CP EOP\n"); | |
d0f8a854 | 2511 | radeon_fence_process(rdev); |
45f9a39b | 2512 | break; |
2031f77c AD |
2513 | case 233: /* GUI IDLE */ |
2514 | DRM_DEBUG("IH: CP EOP\n"); | |
2515 | rdev->pm.gui_idle = true; | |
2516 | wake_up(&rdev->irq.idle_queue); | |
2517 | break; | |
45f9a39b AD |
2518 | default: |
2519 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | |
2520 | break; | |
2521 | } | |
2522 | ||
2523 | /* wptr/rptr are in bytes! */ | |
2524 | rptr += 16; | |
2525 | rptr &= rdev->ih.ptr_mask; | |
2526 | } | |
2527 | /* make sure wptr hasn't changed while processing */ | |
2528 | wptr = evergreen_get_ih_wptr(rdev); | |
2529 | if (wptr != rdev->ih.wptr) | |
2530 | goto restart_ih; | |
2531 | if (queue_hotplug) | |
2532 | queue_work(rdev->wq, &rdev->hotplug_work); | |
2533 | rdev->ih.rptr = rptr; | |
2534 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | |
2535 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | |
2536 | return IRQ_HANDLED; | |
2537 | } | |
2538 | ||
bcc1c2a1 AD |
2539 | static int evergreen_startup(struct radeon_device *rdev) |
2540 | { | |
bcc1c2a1 AD |
2541 | int r; |
2542 | ||
2543 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | |
2544 | r = r600_init_microcode(rdev); | |
2545 | if (r) { | |
2546 | DRM_ERROR("Failed to load firmware!\n"); | |
2547 | return r; | |
2548 | } | |
2549 | } | |
fe251e2f | 2550 | |
bcc1c2a1 | 2551 | evergreen_mc_program(rdev); |
bcc1c2a1 | 2552 | if (rdev->flags & RADEON_IS_AGP) { |
0fcdb61e | 2553 | evergreen_agp_enable(rdev); |
bcc1c2a1 AD |
2554 | } else { |
2555 | r = evergreen_pcie_gart_enable(rdev); | |
2556 | if (r) | |
2557 | return r; | |
2558 | } | |
bcc1c2a1 | 2559 | evergreen_gpu_init(rdev); |
bcc1c2a1 | 2560 | |
d7ccd8fc | 2561 | r = evergreen_blit_init(rdev); |
bcc1c2a1 | 2562 | if (r) { |
d7ccd8fc AD |
2563 | evergreen_blit_fini(rdev); |
2564 | rdev->asic->copy = NULL; | |
2565 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | |
bcc1c2a1 AD |
2566 | } |
2567 | ||
724c80e1 AD |
2568 | /* allocate wb buffer */ |
2569 | r = radeon_wb_init(rdev); | |
2570 | if (r) | |
2571 | return r; | |
2572 | ||
bcc1c2a1 AD |
2573 | /* Enable IRQ */ |
2574 | r = r600_irq_init(rdev); | |
2575 | if (r) { | |
2576 | DRM_ERROR("radeon: IH init failed (%d).\n", r); | |
2577 | radeon_irq_kms_fini(rdev); | |
2578 | return r; | |
2579 | } | |
45f9a39b | 2580 | evergreen_irq_set(rdev); |
bcc1c2a1 AD |
2581 | |
2582 | r = radeon_ring_init(rdev, rdev->cp.ring_size); | |
2583 | if (r) | |
2584 | return r; | |
2585 | r = evergreen_cp_load_microcode(rdev); | |
2586 | if (r) | |
2587 | return r; | |
fe251e2f | 2588 | r = evergreen_cp_resume(rdev); |
bcc1c2a1 AD |
2589 | if (r) |
2590 | return r; | |
fe251e2f | 2591 | |
bcc1c2a1 AD |
2592 | return 0; |
2593 | } | |
2594 | ||
2595 | int evergreen_resume(struct radeon_device *rdev) | |
2596 | { | |
2597 | int r; | |
2598 | ||
2599 | /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, | |
2600 | * posting will perform necessary task to bring back GPU into good | |
2601 | * shape. | |
2602 | */ | |
2603 | /* post card */ | |
2604 | atom_asic_init(rdev->mode_info.atom_context); | |
bcc1c2a1 AD |
2605 | |
2606 | r = evergreen_startup(rdev); | |
2607 | if (r) { | |
2608 | DRM_ERROR("r600 startup failed on resume\n"); | |
2609 | return r; | |
2610 | } | |
fe251e2f | 2611 | |
bcc1c2a1 AD |
2612 | r = r600_ib_test(rdev); |
2613 | if (r) { | |
2614 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | |
2615 | return r; | |
2616 | } | |
fe251e2f | 2617 | |
bcc1c2a1 AD |
2618 | return r; |
2619 | ||
2620 | } | |
2621 | ||
2622 | int evergreen_suspend(struct radeon_device *rdev) | |
2623 | { | |
bcc1c2a1 | 2624 | int r; |
d7ccd8fc | 2625 | |
bcc1c2a1 AD |
2626 | /* FIXME: we should wait for ring to be empty */ |
2627 | r700_cp_stop(rdev); | |
2628 | rdev->cp.ready = false; | |
45f9a39b | 2629 | evergreen_irq_suspend(rdev); |
724c80e1 | 2630 | radeon_wb_disable(rdev); |
bcc1c2a1 | 2631 | evergreen_pcie_gart_disable(rdev); |
d7ccd8fc | 2632 | |
bcc1c2a1 AD |
2633 | /* unpin shaders bo */ |
2634 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | |
2635 | if (likely(r == 0)) { | |
2636 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | |
2637 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | |
2638 | } | |
d7ccd8fc AD |
2639 | |
2640 | return 0; | |
2641 | } | |
2642 | ||
2643 | int evergreen_copy_blit(struct radeon_device *rdev, | |
2644 | uint64_t src_offset, uint64_t dst_offset, | |
2645 | unsigned num_pages, struct radeon_fence *fence) | |
2646 | { | |
2647 | int r; | |
2648 | ||
2649 | mutex_lock(&rdev->r600_blit.mutex); | |
2650 | rdev->r600_blit.vb_ib = NULL; | |
2651 | r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); | |
2652 | if (r) { | |
2653 | if (rdev->r600_blit.vb_ib) | |
2654 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | |
2655 | mutex_unlock(&rdev->r600_blit.mutex); | |
2656 | return r; | |
2657 | } | |
2658 | evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); | |
2659 | evergreen_blit_done_copy(rdev, fence); | |
2660 | mutex_unlock(&rdev->r600_blit.mutex); | |
bcc1c2a1 AD |
2661 | return 0; |
2662 | } | |
2663 | ||
2664 | static bool evergreen_card_posted(struct radeon_device *rdev) | |
2665 | { | |
2666 | u32 reg; | |
2667 | ||
2668 | /* first check CRTCs */ | |
2669 | reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | | |
2670 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | | |
2671 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | | |
2672 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | | |
2673 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | | |
2674 | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); | |
2675 | if (reg & EVERGREEN_CRTC_MASTER_EN) | |
2676 | return true; | |
2677 | ||
2678 | /* then check MEM_SIZE, in case the crtcs are off */ | |
2679 | if (RREG32(CONFIG_MEMSIZE)) | |
2680 | return true; | |
2681 | ||
2682 | return false; | |
2683 | } | |
2684 | ||
2685 | /* Plan is to move initialization in that function and use | |
2686 | * helper function so that radeon_device_init pretty much | |
2687 | * do nothing more than calling asic specific function. This | |
2688 | * should also allow to remove a bunch of callback function | |
2689 | * like vram_info. | |
2690 | */ | |
2691 | int evergreen_init(struct radeon_device *rdev) | |
2692 | { | |
2693 | int r; | |
2694 | ||
2695 | r = radeon_dummy_page_init(rdev); | |
2696 | if (r) | |
2697 | return r; | |
2698 | /* This don't do much */ | |
2699 | r = radeon_gem_init(rdev); | |
2700 | if (r) | |
2701 | return r; | |
2702 | /* Read BIOS */ | |
2703 | if (!radeon_get_bios(rdev)) { | |
2704 | if (ASIC_IS_AVIVO(rdev)) | |
2705 | return -EINVAL; | |
2706 | } | |
2707 | /* Must be an ATOMBIOS */ | |
2708 | if (!rdev->is_atom_bios) { | |
2709 | dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); | |
2710 | return -EINVAL; | |
2711 | } | |
2712 | r = radeon_atombios_init(rdev); | |
2713 | if (r) | |
2714 | return r; | |
2715 | /* Post card if necessary */ | |
2716 | if (!evergreen_card_posted(rdev)) { | |
2717 | if (!rdev->bios) { | |
2718 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | |
2719 | return -EINVAL; | |
2720 | } | |
2721 | DRM_INFO("GPU not posted. posting now...\n"); | |
2722 | atom_asic_init(rdev->mode_info.atom_context); | |
2723 | } | |
2724 | /* Initialize scratch registers */ | |
2725 | r600_scratch_init(rdev); | |
2726 | /* Initialize surface registers */ | |
2727 | radeon_surface_init(rdev); | |
2728 | /* Initialize clocks */ | |
2729 | radeon_get_clock_info(rdev->ddev); | |
bcc1c2a1 AD |
2730 | /* Fence driver */ |
2731 | r = radeon_fence_driver_init(rdev); | |
2732 | if (r) | |
2733 | return r; | |
d594e46a JG |
2734 | /* initialize AGP */ |
2735 | if (rdev->flags & RADEON_IS_AGP) { | |
2736 | r = radeon_agp_init(rdev); | |
2737 | if (r) | |
2738 | radeon_agp_disable(rdev); | |
2739 | } | |
2740 | /* initialize memory controller */ | |
bcc1c2a1 AD |
2741 | r = evergreen_mc_init(rdev); |
2742 | if (r) | |
2743 | return r; | |
2744 | /* Memory manager */ | |
2745 | r = radeon_bo_init(rdev); | |
2746 | if (r) | |
2747 | return r; | |
45f9a39b | 2748 | |
bcc1c2a1 AD |
2749 | r = radeon_irq_kms_init(rdev); |
2750 | if (r) | |
2751 | return r; | |
2752 | ||
2753 | rdev->cp.ring_obj = NULL; | |
2754 | r600_ring_init(rdev, 1024 * 1024); | |
2755 | ||
2756 | rdev->ih.ring_obj = NULL; | |
2757 | r600_ih_ring_init(rdev, 64 * 1024); | |
2758 | ||
2759 | r = r600_pcie_gart_init(rdev); | |
2760 | if (r) | |
2761 | return r; | |
0fcdb61e | 2762 | |
148a03bc | 2763 | rdev->accel_working = true; |
bcc1c2a1 AD |
2764 | r = evergreen_startup(rdev); |
2765 | if (r) { | |
fe251e2f AD |
2766 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
2767 | r700_cp_fini(rdev); | |
fe251e2f | 2768 | r600_irq_fini(rdev); |
724c80e1 | 2769 | radeon_wb_fini(rdev); |
fe251e2f | 2770 | radeon_irq_kms_fini(rdev); |
0fcdb61e | 2771 | evergreen_pcie_gart_fini(rdev); |
bcc1c2a1 AD |
2772 | rdev->accel_working = false; |
2773 | } | |
2774 | if (rdev->accel_working) { | |
2775 | r = radeon_ib_pool_init(rdev); | |
2776 | if (r) { | |
2777 | DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); | |
2778 | rdev->accel_working = false; | |
2779 | } | |
2780 | r = r600_ib_test(rdev); | |
2781 | if (r) { | |
2782 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | |
2783 | rdev->accel_working = false; | |
2784 | } | |
2785 | } | |
2786 | return 0; | |
2787 | } | |
2788 | ||
2789 | void evergreen_fini(struct radeon_device *rdev) | |
2790 | { | |
d7ccd8fc | 2791 | evergreen_blit_fini(rdev); |
45f9a39b | 2792 | r700_cp_fini(rdev); |
bcc1c2a1 | 2793 | r600_irq_fini(rdev); |
724c80e1 | 2794 | radeon_wb_fini(rdev); |
bcc1c2a1 | 2795 | radeon_irq_kms_fini(rdev); |
bcc1c2a1 | 2796 | evergreen_pcie_gart_fini(rdev); |
bcc1c2a1 AD |
2797 | radeon_gem_fini(rdev); |
2798 | radeon_fence_driver_fini(rdev); | |
bcc1c2a1 AD |
2799 | radeon_agp_fini(rdev); |
2800 | radeon_bo_fini(rdev); | |
2801 | radeon_atombios_fini(rdev); | |
2802 | kfree(rdev->bios); | |
2803 | rdev->bios = NULL; | |
2804 | radeon_dummy_page_fini(rdev); | |
2805 | } |