Merge branch 'drm-next-3.15' of git://people.freedesktop.org/~deathsimple/linux into...
[deliverable/linux.git] / drivers / gpu / drm / radeon / cik.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24 #include <linux/firmware.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
27 #include "drmP.h"
28 #include "radeon.h"
29 #include "radeon_asic.h"
30 #include "cikd.h"
31 #include "atom.h"
32 #include "cik_blit_shaders.h"
33 #include "radeon_ucode.h"
34 #include "clearstate_ci.h"
35
36 MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
37 MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
38 MODULE_FIRMWARE("radeon/BONAIRE_ce.bin");
39 MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
40 MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
41 MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
42 MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
43 MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
44 MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
45 MODULE_FIRMWARE("radeon/HAWAII_me.bin");
46 MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
47 MODULE_FIRMWARE("radeon/HAWAII_mec.bin");
48 MODULE_FIRMWARE("radeon/HAWAII_mc.bin");
49 MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
50 MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
51 MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
52 MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
53 MODULE_FIRMWARE("radeon/KAVERI_me.bin");
54 MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
55 MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
56 MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
57 MODULE_FIRMWARE("radeon/KAVERI_sdma.bin");
58 MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
59 MODULE_FIRMWARE("radeon/KABINI_me.bin");
60 MODULE_FIRMWARE("radeon/KABINI_ce.bin");
61 MODULE_FIRMWARE("radeon/KABINI_mec.bin");
62 MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
63 MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
64
65 extern int r600_ih_ring_alloc(struct radeon_device *rdev);
66 extern void r600_ih_ring_fini(struct radeon_device *rdev);
67 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
68 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
69 extern bool evergreen_is_display_hung(struct radeon_device *rdev);
70 extern void sumo_rlc_fini(struct radeon_device *rdev);
71 extern int sumo_rlc_init(struct radeon_device *rdev);
72 extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
73 extern void si_rlc_reset(struct radeon_device *rdev);
74 extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
75 extern int cik_sdma_resume(struct radeon_device *rdev);
76 extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
77 extern void cik_sdma_fini(struct radeon_device *rdev);
78 extern void vce_v2_0_enable_mgcg(struct radeon_device *rdev, bool enable);
79 static void cik_rlc_stop(struct radeon_device *rdev);
80 static void cik_pcie_gen3_enable(struct radeon_device *rdev);
81 static void cik_program_aspm(struct radeon_device *rdev);
82 static void cik_init_pg(struct radeon_device *rdev);
83 static void cik_init_cg(struct radeon_device *rdev);
84 static void cik_fini_pg(struct radeon_device *rdev);
85 static void cik_fini_cg(struct radeon_device *rdev);
86 static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
87 bool enable);
88
89 /* get temperature in millidegrees */
90 int ci_get_temp(struct radeon_device *rdev)
91 {
92 u32 temp;
93 int actual_temp = 0;
94
95 temp = (RREG32_SMC(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
96 CTF_TEMP_SHIFT;
97
98 if (temp & 0x200)
99 actual_temp = 255;
100 else
101 actual_temp = temp & 0x1ff;
102
103 actual_temp = actual_temp * 1000;
104
105 return actual_temp;
106 }
107
108 /* get temperature in millidegrees */
109 int kv_get_temp(struct radeon_device *rdev)
110 {
111 u32 temp;
112 int actual_temp = 0;
113
114 temp = RREG32_SMC(0xC0300E0C);
115
116 if (temp)
117 actual_temp = (temp / 8) - 49;
118 else
119 actual_temp = 0;
120
121 actual_temp = actual_temp * 1000;
122
123 return actual_temp;
124 }
125
126 /*
127 * Indirect registers accessor
128 */
129 u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
130 {
131 unsigned long flags;
132 u32 r;
133
134 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
135 WREG32(PCIE_INDEX, reg);
136 (void)RREG32(PCIE_INDEX);
137 r = RREG32(PCIE_DATA);
138 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
139 return r;
140 }
141
142 void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
143 {
144 unsigned long flags;
145
146 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
147 WREG32(PCIE_INDEX, reg);
148 (void)RREG32(PCIE_INDEX);
149 WREG32(PCIE_DATA, v);
150 (void)RREG32(PCIE_DATA);
151 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
152 }
153
154 static const u32 spectre_rlc_save_restore_register_list[] =
155 {
156 (0x0e00 << 16) | (0xc12c >> 2),
157 0x00000000,
158 (0x0e00 << 16) | (0xc140 >> 2),
159 0x00000000,
160 (0x0e00 << 16) | (0xc150 >> 2),
161 0x00000000,
162 (0x0e00 << 16) | (0xc15c >> 2),
163 0x00000000,
164 (0x0e00 << 16) | (0xc168 >> 2),
165 0x00000000,
166 (0x0e00 << 16) | (0xc170 >> 2),
167 0x00000000,
168 (0x0e00 << 16) | (0xc178 >> 2),
169 0x00000000,
170 (0x0e00 << 16) | (0xc204 >> 2),
171 0x00000000,
172 (0x0e00 << 16) | (0xc2b4 >> 2),
173 0x00000000,
174 (0x0e00 << 16) | (0xc2b8 >> 2),
175 0x00000000,
176 (0x0e00 << 16) | (0xc2bc >> 2),
177 0x00000000,
178 (0x0e00 << 16) | (0xc2c0 >> 2),
179 0x00000000,
180 (0x0e00 << 16) | (0x8228 >> 2),
181 0x00000000,
182 (0x0e00 << 16) | (0x829c >> 2),
183 0x00000000,
184 (0x0e00 << 16) | (0x869c >> 2),
185 0x00000000,
186 (0x0600 << 16) | (0x98f4 >> 2),
187 0x00000000,
188 (0x0e00 << 16) | (0x98f8 >> 2),
189 0x00000000,
190 (0x0e00 << 16) | (0x9900 >> 2),
191 0x00000000,
192 (0x0e00 << 16) | (0xc260 >> 2),
193 0x00000000,
194 (0x0e00 << 16) | (0x90e8 >> 2),
195 0x00000000,
196 (0x0e00 << 16) | (0x3c000 >> 2),
197 0x00000000,
198 (0x0e00 << 16) | (0x3c00c >> 2),
199 0x00000000,
200 (0x0e00 << 16) | (0x8c1c >> 2),
201 0x00000000,
202 (0x0e00 << 16) | (0x9700 >> 2),
203 0x00000000,
204 (0x0e00 << 16) | (0xcd20 >> 2),
205 0x00000000,
206 (0x4e00 << 16) | (0xcd20 >> 2),
207 0x00000000,
208 (0x5e00 << 16) | (0xcd20 >> 2),
209 0x00000000,
210 (0x6e00 << 16) | (0xcd20 >> 2),
211 0x00000000,
212 (0x7e00 << 16) | (0xcd20 >> 2),
213 0x00000000,
214 (0x8e00 << 16) | (0xcd20 >> 2),
215 0x00000000,
216 (0x9e00 << 16) | (0xcd20 >> 2),
217 0x00000000,
218 (0xae00 << 16) | (0xcd20 >> 2),
219 0x00000000,
220 (0xbe00 << 16) | (0xcd20 >> 2),
221 0x00000000,
222 (0x0e00 << 16) | (0x89bc >> 2),
223 0x00000000,
224 (0x0e00 << 16) | (0x8900 >> 2),
225 0x00000000,
226 0x3,
227 (0x0e00 << 16) | (0xc130 >> 2),
228 0x00000000,
229 (0x0e00 << 16) | (0xc134 >> 2),
230 0x00000000,
231 (0x0e00 << 16) | (0xc1fc >> 2),
232 0x00000000,
233 (0x0e00 << 16) | (0xc208 >> 2),
234 0x00000000,
235 (0x0e00 << 16) | (0xc264 >> 2),
236 0x00000000,
237 (0x0e00 << 16) | (0xc268 >> 2),
238 0x00000000,
239 (0x0e00 << 16) | (0xc26c >> 2),
240 0x00000000,
241 (0x0e00 << 16) | (0xc270 >> 2),
242 0x00000000,
243 (0x0e00 << 16) | (0xc274 >> 2),
244 0x00000000,
245 (0x0e00 << 16) | (0xc278 >> 2),
246 0x00000000,
247 (0x0e00 << 16) | (0xc27c >> 2),
248 0x00000000,
249 (0x0e00 << 16) | (0xc280 >> 2),
250 0x00000000,
251 (0x0e00 << 16) | (0xc284 >> 2),
252 0x00000000,
253 (0x0e00 << 16) | (0xc288 >> 2),
254 0x00000000,
255 (0x0e00 << 16) | (0xc28c >> 2),
256 0x00000000,
257 (0x0e00 << 16) | (0xc290 >> 2),
258 0x00000000,
259 (0x0e00 << 16) | (0xc294 >> 2),
260 0x00000000,
261 (0x0e00 << 16) | (0xc298 >> 2),
262 0x00000000,
263 (0x0e00 << 16) | (0xc29c >> 2),
264 0x00000000,
265 (0x0e00 << 16) | (0xc2a0 >> 2),
266 0x00000000,
267 (0x0e00 << 16) | (0xc2a4 >> 2),
268 0x00000000,
269 (0x0e00 << 16) | (0xc2a8 >> 2),
270 0x00000000,
271 (0x0e00 << 16) | (0xc2ac >> 2),
272 0x00000000,
273 (0x0e00 << 16) | (0xc2b0 >> 2),
274 0x00000000,
275 (0x0e00 << 16) | (0x301d0 >> 2),
276 0x00000000,
277 (0x0e00 << 16) | (0x30238 >> 2),
278 0x00000000,
279 (0x0e00 << 16) | (0x30250 >> 2),
280 0x00000000,
281 (0x0e00 << 16) | (0x30254 >> 2),
282 0x00000000,
283 (0x0e00 << 16) | (0x30258 >> 2),
284 0x00000000,
285 (0x0e00 << 16) | (0x3025c >> 2),
286 0x00000000,
287 (0x4e00 << 16) | (0xc900 >> 2),
288 0x00000000,
289 (0x5e00 << 16) | (0xc900 >> 2),
290 0x00000000,
291 (0x6e00 << 16) | (0xc900 >> 2),
292 0x00000000,
293 (0x7e00 << 16) | (0xc900 >> 2),
294 0x00000000,
295 (0x8e00 << 16) | (0xc900 >> 2),
296 0x00000000,
297 (0x9e00 << 16) | (0xc900 >> 2),
298 0x00000000,
299 (0xae00 << 16) | (0xc900 >> 2),
300 0x00000000,
301 (0xbe00 << 16) | (0xc900 >> 2),
302 0x00000000,
303 (0x4e00 << 16) | (0xc904 >> 2),
304 0x00000000,
305 (0x5e00 << 16) | (0xc904 >> 2),
306 0x00000000,
307 (0x6e00 << 16) | (0xc904 >> 2),
308 0x00000000,
309 (0x7e00 << 16) | (0xc904 >> 2),
310 0x00000000,
311 (0x8e00 << 16) | (0xc904 >> 2),
312 0x00000000,
313 (0x9e00 << 16) | (0xc904 >> 2),
314 0x00000000,
315 (0xae00 << 16) | (0xc904 >> 2),
316 0x00000000,
317 (0xbe00 << 16) | (0xc904 >> 2),
318 0x00000000,
319 (0x4e00 << 16) | (0xc908 >> 2),
320 0x00000000,
321 (0x5e00 << 16) | (0xc908 >> 2),
322 0x00000000,
323 (0x6e00 << 16) | (0xc908 >> 2),
324 0x00000000,
325 (0x7e00 << 16) | (0xc908 >> 2),
326 0x00000000,
327 (0x8e00 << 16) | (0xc908 >> 2),
328 0x00000000,
329 (0x9e00 << 16) | (0xc908 >> 2),
330 0x00000000,
331 (0xae00 << 16) | (0xc908 >> 2),
332 0x00000000,
333 (0xbe00 << 16) | (0xc908 >> 2),
334 0x00000000,
335 (0x4e00 << 16) | (0xc90c >> 2),
336 0x00000000,
337 (0x5e00 << 16) | (0xc90c >> 2),
338 0x00000000,
339 (0x6e00 << 16) | (0xc90c >> 2),
340 0x00000000,
341 (0x7e00 << 16) | (0xc90c >> 2),
342 0x00000000,
343 (0x8e00 << 16) | (0xc90c >> 2),
344 0x00000000,
345 (0x9e00 << 16) | (0xc90c >> 2),
346 0x00000000,
347 (0xae00 << 16) | (0xc90c >> 2),
348 0x00000000,
349 (0xbe00 << 16) | (0xc90c >> 2),
350 0x00000000,
351 (0x4e00 << 16) | (0xc910 >> 2),
352 0x00000000,
353 (0x5e00 << 16) | (0xc910 >> 2),
354 0x00000000,
355 (0x6e00 << 16) | (0xc910 >> 2),
356 0x00000000,
357 (0x7e00 << 16) | (0xc910 >> 2),
358 0x00000000,
359 (0x8e00 << 16) | (0xc910 >> 2),
360 0x00000000,
361 (0x9e00 << 16) | (0xc910 >> 2),
362 0x00000000,
363 (0xae00 << 16) | (0xc910 >> 2),
364 0x00000000,
365 (0xbe00 << 16) | (0xc910 >> 2),
366 0x00000000,
367 (0x0e00 << 16) | (0xc99c >> 2),
368 0x00000000,
369 (0x0e00 << 16) | (0x9834 >> 2),
370 0x00000000,
371 (0x0000 << 16) | (0x30f00 >> 2),
372 0x00000000,
373 (0x0001 << 16) | (0x30f00 >> 2),
374 0x00000000,
375 (0x0000 << 16) | (0x30f04 >> 2),
376 0x00000000,
377 (0x0001 << 16) | (0x30f04 >> 2),
378 0x00000000,
379 (0x0000 << 16) | (0x30f08 >> 2),
380 0x00000000,
381 (0x0001 << 16) | (0x30f08 >> 2),
382 0x00000000,
383 (0x0000 << 16) | (0x30f0c >> 2),
384 0x00000000,
385 (0x0001 << 16) | (0x30f0c >> 2),
386 0x00000000,
387 (0x0600 << 16) | (0x9b7c >> 2),
388 0x00000000,
389 (0x0e00 << 16) | (0x8a14 >> 2),
390 0x00000000,
391 (0x0e00 << 16) | (0x8a18 >> 2),
392 0x00000000,
393 (0x0600 << 16) | (0x30a00 >> 2),
394 0x00000000,
395 (0x0e00 << 16) | (0x8bf0 >> 2),
396 0x00000000,
397 (0x0e00 << 16) | (0x8bcc >> 2),
398 0x00000000,
399 (0x0e00 << 16) | (0x8b24 >> 2),
400 0x00000000,
401 (0x0e00 << 16) | (0x30a04 >> 2),
402 0x00000000,
403 (0x0600 << 16) | (0x30a10 >> 2),
404 0x00000000,
405 (0x0600 << 16) | (0x30a14 >> 2),
406 0x00000000,
407 (0x0600 << 16) | (0x30a18 >> 2),
408 0x00000000,
409 (0x0600 << 16) | (0x30a2c >> 2),
410 0x00000000,
411 (0x0e00 << 16) | (0xc700 >> 2),
412 0x00000000,
413 (0x0e00 << 16) | (0xc704 >> 2),
414 0x00000000,
415 (0x0e00 << 16) | (0xc708 >> 2),
416 0x00000000,
417 (0x0e00 << 16) | (0xc768 >> 2),
418 0x00000000,
419 (0x0400 << 16) | (0xc770 >> 2),
420 0x00000000,
421 (0x0400 << 16) | (0xc774 >> 2),
422 0x00000000,
423 (0x0400 << 16) | (0xc778 >> 2),
424 0x00000000,
425 (0x0400 << 16) | (0xc77c >> 2),
426 0x00000000,
427 (0x0400 << 16) | (0xc780 >> 2),
428 0x00000000,
429 (0x0400 << 16) | (0xc784 >> 2),
430 0x00000000,
431 (0x0400 << 16) | (0xc788 >> 2),
432 0x00000000,
433 (0x0400 << 16) | (0xc78c >> 2),
434 0x00000000,
435 (0x0400 << 16) | (0xc798 >> 2),
436 0x00000000,
437 (0x0400 << 16) | (0xc79c >> 2),
438 0x00000000,
439 (0x0400 << 16) | (0xc7a0 >> 2),
440 0x00000000,
441 (0x0400 << 16) | (0xc7a4 >> 2),
442 0x00000000,
443 (0x0400 << 16) | (0xc7a8 >> 2),
444 0x00000000,
445 (0x0400 << 16) | (0xc7ac >> 2),
446 0x00000000,
447 (0x0400 << 16) | (0xc7b0 >> 2),
448 0x00000000,
449 (0x0400 << 16) | (0xc7b4 >> 2),
450 0x00000000,
451 (0x0e00 << 16) | (0x9100 >> 2),
452 0x00000000,
453 (0x0e00 << 16) | (0x3c010 >> 2),
454 0x00000000,
455 (0x0e00 << 16) | (0x92a8 >> 2),
456 0x00000000,
457 (0x0e00 << 16) | (0x92ac >> 2),
458 0x00000000,
459 (0x0e00 << 16) | (0x92b4 >> 2),
460 0x00000000,
461 (0x0e00 << 16) | (0x92b8 >> 2),
462 0x00000000,
463 (0x0e00 << 16) | (0x92bc >> 2),
464 0x00000000,
465 (0x0e00 << 16) | (0x92c0 >> 2),
466 0x00000000,
467 (0x0e00 << 16) | (0x92c4 >> 2),
468 0x00000000,
469 (0x0e00 << 16) | (0x92c8 >> 2),
470 0x00000000,
471 (0x0e00 << 16) | (0x92cc >> 2),
472 0x00000000,
473 (0x0e00 << 16) | (0x92d0 >> 2),
474 0x00000000,
475 (0x0e00 << 16) | (0x8c00 >> 2),
476 0x00000000,
477 (0x0e00 << 16) | (0x8c04 >> 2),
478 0x00000000,
479 (0x0e00 << 16) | (0x8c20 >> 2),
480 0x00000000,
481 (0x0e00 << 16) | (0x8c38 >> 2),
482 0x00000000,
483 (0x0e00 << 16) | (0x8c3c >> 2),
484 0x00000000,
485 (0x0e00 << 16) | (0xae00 >> 2),
486 0x00000000,
487 (0x0e00 << 16) | (0x9604 >> 2),
488 0x00000000,
489 (0x0e00 << 16) | (0xac08 >> 2),
490 0x00000000,
491 (0x0e00 << 16) | (0xac0c >> 2),
492 0x00000000,
493 (0x0e00 << 16) | (0xac10 >> 2),
494 0x00000000,
495 (0x0e00 << 16) | (0xac14 >> 2),
496 0x00000000,
497 (0x0e00 << 16) | (0xac58 >> 2),
498 0x00000000,
499 (0x0e00 << 16) | (0xac68 >> 2),
500 0x00000000,
501 (0x0e00 << 16) | (0xac6c >> 2),
502 0x00000000,
503 (0x0e00 << 16) | (0xac70 >> 2),
504 0x00000000,
505 (0x0e00 << 16) | (0xac74 >> 2),
506 0x00000000,
507 (0x0e00 << 16) | (0xac78 >> 2),
508 0x00000000,
509 (0x0e00 << 16) | (0xac7c >> 2),
510 0x00000000,
511 (0x0e00 << 16) | (0xac80 >> 2),
512 0x00000000,
513 (0x0e00 << 16) | (0xac84 >> 2),
514 0x00000000,
515 (0x0e00 << 16) | (0xac88 >> 2),
516 0x00000000,
517 (0x0e00 << 16) | (0xac8c >> 2),
518 0x00000000,
519 (0x0e00 << 16) | (0x970c >> 2),
520 0x00000000,
521 (0x0e00 << 16) | (0x9714 >> 2),
522 0x00000000,
523 (0x0e00 << 16) | (0x9718 >> 2),
524 0x00000000,
525 (0x0e00 << 16) | (0x971c >> 2),
526 0x00000000,
527 (0x0e00 << 16) | (0x31068 >> 2),
528 0x00000000,
529 (0x4e00 << 16) | (0x31068 >> 2),
530 0x00000000,
531 (0x5e00 << 16) | (0x31068 >> 2),
532 0x00000000,
533 (0x6e00 << 16) | (0x31068 >> 2),
534 0x00000000,
535 (0x7e00 << 16) | (0x31068 >> 2),
536 0x00000000,
537 (0x8e00 << 16) | (0x31068 >> 2),
538 0x00000000,
539 (0x9e00 << 16) | (0x31068 >> 2),
540 0x00000000,
541 (0xae00 << 16) | (0x31068 >> 2),
542 0x00000000,
543 (0xbe00 << 16) | (0x31068 >> 2),
544 0x00000000,
545 (0x0e00 << 16) | (0xcd10 >> 2),
546 0x00000000,
547 (0x0e00 << 16) | (0xcd14 >> 2),
548 0x00000000,
549 (0x0e00 << 16) | (0x88b0 >> 2),
550 0x00000000,
551 (0x0e00 << 16) | (0x88b4 >> 2),
552 0x00000000,
553 (0x0e00 << 16) | (0x88b8 >> 2),
554 0x00000000,
555 (0x0e00 << 16) | (0x88bc >> 2),
556 0x00000000,
557 (0x0400 << 16) | (0x89c0 >> 2),
558 0x00000000,
559 (0x0e00 << 16) | (0x88c4 >> 2),
560 0x00000000,
561 (0x0e00 << 16) | (0x88c8 >> 2),
562 0x00000000,
563 (0x0e00 << 16) | (0x88d0 >> 2),
564 0x00000000,
565 (0x0e00 << 16) | (0x88d4 >> 2),
566 0x00000000,
567 (0x0e00 << 16) | (0x88d8 >> 2),
568 0x00000000,
569 (0x0e00 << 16) | (0x8980 >> 2),
570 0x00000000,
571 (0x0e00 << 16) | (0x30938 >> 2),
572 0x00000000,
573 (0x0e00 << 16) | (0x3093c >> 2),
574 0x00000000,
575 (0x0e00 << 16) | (0x30940 >> 2),
576 0x00000000,
577 (0x0e00 << 16) | (0x89a0 >> 2),
578 0x00000000,
579 (0x0e00 << 16) | (0x30900 >> 2),
580 0x00000000,
581 (0x0e00 << 16) | (0x30904 >> 2),
582 0x00000000,
583 (0x0e00 << 16) | (0x89b4 >> 2),
584 0x00000000,
585 (0x0e00 << 16) | (0x3c210 >> 2),
586 0x00000000,
587 (0x0e00 << 16) | (0x3c214 >> 2),
588 0x00000000,
589 (0x0e00 << 16) | (0x3c218 >> 2),
590 0x00000000,
591 (0x0e00 << 16) | (0x8904 >> 2),
592 0x00000000,
593 0x5,
594 (0x0e00 << 16) | (0x8c28 >> 2),
595 (0x0e00 << 16) | (0x8c2c >> 2),
596 (0x0e00 << 16) | (0x8c30 >> 2),
597 (0x0e00 << 16) | (0x8c34 >> 2),
598 (0x0e00 << 16) | (0x9600 >> 2),
599 };
600
601 static const u32 kalindi_rlc_save_restore_register_list[] =
602 {
603 (0x0e00 << 16) | (0xc12c >> 2),
604 0x00000000,
605 (0x0e00 << 16) | (0xc140 >> 2),
606 0x00000000,
607 (0x0e00 << 16) | (0xc150 >> 2),
608 0x00000000,
609 (0x0e00 << 16) | (0xc15c >> 2),
610 0x00000000,
611 (0x0e00 << 16) | (0xc168 >> 2),
612 0x00000000,
613 (0x0e00 << 16) | (0xc170 >> 2),
614 0x00000000,
615 (0x0e00 << 16) | (0xc204 >> 2),
616 0x00000000,
617 (0x0e00 << 16) | (0xc2b4 >> 2),
618 0x00000000,
619 (0x0e00 << 16) | (0xc2b8 >> 2),
620 0x00000000,
621 (0x0e00 << 16) | (0xc2bc >> 2),
622 0x00000000,
623 (0x0e00 << 16) | (0xc2c0 >> 2),
624 0x00000000,
625 (0x0e00 << 16) | (0x8228 >> 2),
626 0x00000000,
627 (0x0e00 << 16) | (0x829c >> 2),
628 0x00000000,
629 (0x0e00 << 16) | (0x869c >> 2),
630 0x00000000,
631 (0x0600 << 16) | (0x98f4 >> 2),
632 0x00000000,
633 (0x0e00 << 16) | (0x98f8 >> 2),
634 0x00000000,
635 (0x0e00 << 16) | (0x9900 >> 2),
636 0x00000000,
637 (0x0e00 << 16) | (0xc260 >> 2),
638 0x00000000,
639 (0x0e00 << 16) | (0x90e8 >> 2),
640 0x00000000,
641 (0x0e00 << 16) | (0x3c000 >> 2),
642 0x00000000,
643 (0x0e00 << 16) | (0x3c00c >> 2),
644 0x00000000,
645 (0x0e00 << 16) | (0x8c1c >> 2),
646 0x00000000,
647 (0x0e00 << 16) | (0x9700 >> 2),
648 0x00000000,
649 (0x0e00 << 16) | (0xcd20 >> 2),
650 0x00000000,
651 (0x4e00 << 16) | (0xcd20 >> 2),
652 0x00000000,
653 (0x5e00 << 16) | (0xcd20 >> 2),
654 0x00000000,
655 (0x6e00 << 16) | (0xcd20 >> 2),
656 0x00000000,
657 (0x7e00 << 16) | (0xcd20 >> 2),
658 0x00000000,
659 (0x0e00 << 16) | (0x89bc >> 2),
660 0x00000000,
661 (0x0e00 << 16) | (0x8900 >> 2),
662 0x00000000,
663 0x3,
664 (0x0e00 << 16) | (0xc130 >> 2),
665 0x00000000,
666 (0x0e00 << 16) | (0xc134 >> 2),
667 0x00000000,
668 (0x0e00 << 16) | (0xc1fc >> 2),
669 0x00000000,
670 (0x0e00 << 16) | (0xc208 >> 2),
671 0x00000000,
672 (0x0e00 << 16) | (0xc264 >> 2),
673 0x00000000,
674 (0x0e00 << 16) | (0xc268 >> 2),
675 0x00000000,
676 (0x0e00 << 16) | (0xc26c >> 2),
677 0x00000000,
678 (0x0e00 << 16) | (0xc270 >> 2),
679 0x00000000,
680 (0x0e00 << 16) | (0xc274 >> 2),
681 0x00000000,
682 (0x0e00 << 16) | (0xc28c >> 2),
683 0x00000000,
684 (0x0e00 << 16) | (0xc290 >> 2),
685 0x00000000,
686 (0x0e00 << 16) | (0xc294 >> 2),
687 0x00000000,
688 (0x0e00 << 16) | (0xc298 >> 2),
689 0x00000000,
690 (0x0e00 << 16) | (0xc2a0 >> 2),
691 0x00000000,
692 (0x0e00 << 16) | (0xc2a4 >> 2),
693 0x00000000,
694 (0x0e00 << 16) | (0xc2a8 >> 2),
695 0x00000000,
696 (0x0e00 << 16) | (0xc2ac >> 2),
697 0x00000000,
698 (0x0e00 << 16) | (0x301d0 >> 2),
699 0x00000000,
700 (0x0e00 << 16) | (0x30238 >> 2),
701 0x00000000,
702 (0x0e00 << 16) | (0x30250 >> 2),
703 0x00000000,
704 (0x0e00 << 16) | (0x30254 >> 2),
705 0x00000000,
706 (0x0e00 << 16) | (0x30258 >> 2),
707 0x00000000,
708 (0x0e00 << 16) | (0x3025c >> 2),
709 0x00000000,
710 (0x4e00 << 16) | (0xc900 >> 2),
711 0x00000000,
712 (0x5e00 << 16) | (0xc900 >> 2),
713 0x00000000,
714 (0x6e00 << 16) | (0xc900 >> 2),
715 0x00000000,
716 (0x7e00 << 16) | (0xc900 >> 2),
717 0x00000000,
718 (0x4e00 << 16) | (0xc904 >> 2),
719 0x00000000,
720 (0x5e00 << 16) | (0xc904 >> 2),
721 0x00000000,
722 (0x6e00 << 16) | (0xc904 >> 2),
723 0x00000000,
724 (0x7e00 << 16) | (0xc904 >> 2),
725 0x00000000,
726 (0x4e00 << 16) | (0xc908 >> 2),
727 0x00000000,
728 (0x5e00 << 16) | (0xc908 >> 2),
729 0x00000000,
730 (0x6e00 << 16) | (0xc908 >> 2),
731 0x00000000,
732 (0x7e00 << 16) | (0xc908 >> 2),
733 0x00000000,
734 (0x4e00 << 16) | (0xc90c >> 2),
735 0x00000000,
736 (0x5e00 << 16) | (0xc90c >> 2),
737 0x00000000,
738 (0x6e00 << 16) | (0xc90c >> 2),
739 0x00000000,
740 (0x7e00 << 16) | (0xc90c >> 2),
741 0x00000000,
742 (0x4e00 << 16) | (0xc910 >> 2),
743 0x00000000,
744 (0x5e00 << 16) | (0xc910 >> 2),
745 0x00000000,
746 (0x6e00 << 16) | (0xc910 >> 2),
747 0x00000000,
748 (0x7e00 << 16) | (0xc910 >> 2),
749 0x00000000,
750 (0x0e00 << 16) | (0xc99c >> 2),
751 0x00000000,
752 (0x0e00 << 16) | (0x9834 >> 2),
753 0x00000000,
754 (0x0000 << 16) | (0x30f00 >> 2),
755 0x00000000,
756 (0x0000 << 16) | (0x30f04 >> 2),
757 0x00000000,
758 (0x0000 << 16) | (0x30f08 >> 2),
759 0x00000000,
760 (0x0000 << 16) | (0x30f0c >> 2),
761 0x00000000,
762 (0x0600 << 16) | (0x9b7c >> 2),
763 0x00000000,
764 (0x0e00 << 16) | (0x8a14 >> 2),
765 0x00000000,
766 (0x0e00 << 16) | (0x8a18 >> 2),
767 0x00000000,
768 (0x0600 << 16) | (0x30a00 >> 2),
769 0x00000000,
770 (0x0e00 << 16) | (0x8bf0 >> 2),
771 0x00000000,
772 (0x0e00 << 16) | (0x8bcc >> 2),
773 0x00000000,
774 (0x0e00 << 16) | (0x8b24 >> 2),
775 0x00000000,
776 (0x0e00 << 16) | (0x30a04 >> 2),
777 0x00000000,
778 (0x0600 << 16) | (0x30a10 >> 2),
779 0x00000000,
780 (0x0600 << 16) | (0x30a14 >> 2),
781 0x00000000,
782 (0x0600 << 16) | (0x30a18 >> 2),
783 0x00000000,
784 (0x0600 << 16) | (0x30a2c >> 2),
785 0x00000000,
786 (0x0e00 << 16) | (0xc700 >> 2),
787 0x00000000,
788 (0x0e00 << 16) | (0xc704 >> 2),
789 0x00000000,
790 (0x0e00 << 16) | (0xc708 >> 2),
791 0x00000000,
792 (0x0e00 << 16) | (0xc768 >> 2),
793 0x00000000,
794 (0x0400 << 16) | (0xc770 >> 2),
795 0x00000000,
796 (0x0400 << 16) | (0xc774 >> 2),
797 0x00000000,
798 (0x0400 << 16) | (0xc798 >> 2),
799 0x00000000,
800 (0x0400 << 16) | (0xc79c >> 2),
801 0x00000000,
802 (0x0e00 << 16) | (0x9100 >> 2),
803 0x00000000,
804 (0x0e00 << 16) | (0x3c010 >> 2),
805 0x00000000,
806 (0x0e00 << 16) | (0x8c00 >> 2),
807 0x00000000,
808 (0x0e00 << 16) | (0x8c04 >> 2),
809 0x00000000,
810 (0x0e00 << 16) | (0x8c20 >> 2),
811 0x00000000,
812 (0x0e00 << 16) | (0x8c38 >> 2),
813 0x00000000,
814 (0x0e00 << 16) | (0x8c3c >> 2),
815 0x00000000,
816 (0x0e00 << 16) | (0xae00 >> 2),
817 0x00000000,
818 (0x0e00 << 16) | (0x9604 >> 2),
819 0x00000000,
820 (0x0e00 << 16) | (0xac08 >> 2),
821 0x00000000,
822 (0x0e00 << 16) | (0xac0c >> 2),
823 0x00000000,
824 (0x0e00 << 16) | (0xac10 >> 2),
825 0x00000000,
826 (0x0e00 << 16) | (0xac14 >> 2),
827 0x00000000,
828 (0x0e00 << 16) | (0xac58 >> 2),
829 0x00000000,
830 (0x0e00 << 16) | (0xac68 >> 2),
831 0x00000000,
832 (0x0e00 << 16) | (0xac6c >> 2),
833 0x00000000,
834 (0x0e00 << 16) | (0xac70 >> 2),
835 0x00000000,
836 (0x0e00 << 16) | (0xac74 >> 2),
837 0x00000000,
838 (0x0e00 << 16) | (0xac78 >> 2),
839 0x00000000,
840 (0x0e00 << 16) | (0xac7c >> 2),
841 0x00000000,
842 (0x0e00 << 16) | (0xac80 >> 2),
843 0x00000000,
844 (0x0e00 << 16) | (0xac84 >> 2),
845 0x00000000,
846 (0x0e00 << 16) | (0xac88 >> 2),
847 0x00000000,
848 (0x0e00 << 16) | (0xac8c >> 2),
849 0x00000000,
850 (0x0e00 << 16) | (0x970c >> 2),
851 0x00000000,
852 (0x0e00 << 16) | (0x9714 >> 2),
853 0x00000000,
854 (0x0e00 << 16) | (0x9718 >> 2),
855 0x00000000,
856 (0x0e00 << 16) | (0x971c >> 2),
857 0x00000000,
858 (0x0e00 << 16) | (0x31068 >> 2),
859 0x00000000,
860 (0x4e00 << 16) | (0x31068 >> 2),
861 0x00000000,
862 (0x5e00 << 16) | (0x31068 >> 2),
863 0x00000000,
864 (0x6e00 << 16) | (0x31068 >> 2),
865 0x00000000,
866 (0x7e00 << 16) | (0x31068 >> 2),
867 0x00000000,
868 (0x0e00 << 16) | (0xcd10 >> 2),
869 0x00000000,
870 (0x0e00 << 16) | (0xcd14 >> 2),
871 0x00000000,
872 (0x0e00 << 16) | (0x88b0 >> 2),
873 0x00000000,
874 (0x0e00 << 16) | (0x88b4 >> 2),
875 0x00000000,
876 (0x0e00 << 16) | (0x88b8 >> 2),
877 0x00000000,
878 (0x0e00 << 16) | (0x88bc >> 2),
879 0x00000000,
880 (0x0400 << 16) | (0x89c0 >> 2),
881 0x00000000,
882 (0x0e00 << 16) | (0x88c4 >> 2),
883 0x00000000,
884 (0x0e00 << 16) | (0x88c8 >> 2),
885 0x00000000,
886 (0x0e00 << 16) | (0x88d0 >> 2),
887 0x00000000,
888 (0x0e00 << 16) | (0x88d4 >> 2),
889 0x00000000,
890 (0x0e00 << 16) | (0x88d8 >> 2),
891 0x00000000,
892 (0x0e00 << 16) | (0x8980 >> 2),
893 0x00000000,
894 (0x0e00 << 16) | (0x30938 >> 2),
895 0x00000000,
896 (0x0e00 << 16) | (0x3093c >> 2),
897 0x00000000,
898 (0x0e00 << 16) | (0x30940 >> 2),
899 0x00000000,
900 (0x0e00 << 16) | (0x89a0 >> 2),
901 0x00000000,
902 (0x0e00 << 16) | (0x30900 >> 2),
903 0x00000000,
904 (0x0e00 << 16) | (0x30904 >> 2),
905 0x00000000,
906 (0x0e00 << 16) | (0x89b4 >> 2),
907 0x00000000,
908 (0x0e00 << 16) | (0x3e1fc >> 2),
909 0x00000000,
910 (0x0e00 << 16) | (0x3c210 >> 2),
911 0x00000000,
912 (0x0e00 << 16) | (0x3c214 >> 2),
913 0x00000000,
914 (0x0e00 << 16) | (0x3c218 >> 2),
915 0x00000000,
916 (0x0e00 << 16) | (0x8904 >> 2),
917 0x00000000,
918 0x5,
919 (0x0e00 << 16) | (0x8c28 >> 2),
920 (0x0e00 << 16) | (0x8c2c >> 2),
921 (0x0e00 << 16) | (0x8c30 >> 2),
922 (0x0e00 << 16) | (0x8c34 >> 2),
923 (0x0e00 << 16) | (0x9600 >> 2),
924 };
925
926 static const u32 bonaire_golden_spm_registers[] =
927 {
928 0x30800, 0xe0ffffff, 0xe0000000
929 };
930
931 static const u32 bonaire_golden_common_registers[] =
932 {
933 0xc770, 0xffffffff, 0x00000800,
934 0xc774, 0xffffffff, 0x00000800,
935 0xc798, 0xffffffff, 0x00007fbf,
936 0xc79c, 0xffffffff, 0x00007faf
937 };
938
939 static const u32 bonaire_golden_registers[] =
940 {
941 0x3354, 0x00000333, 0x00000333,
942 0x3350, 0x000c0fc0, 0x00040200,
943 0x9a10, 0x00010000, 0x00058208,
944 0x3c000, 0xffff1fff, 0x00140000,
945 0x3c200, 0xfdfc0fff, 0x00000100,
946 0x3c234, 0x40000000, 0x40000200,
947 0x9830, 0xffffffff, 0x00000000,
948 0x9834, 0xf00fffff, 0x00000400,
949 0x9838, 0x0002021c, 0x00020200,
950 0xc78, 0x00000080, 0x00000000,
951 0x5bb0, 0x000000f0, 0x00000070,
952 0x5bc0, 0xf0311fff, 0x80300000,
953 0x98f8, 0x73773777, 0x12010001,
954 0x350c, 0x00810000, 0x408af000,
955 0x7030, 0x31000111, 0x00000011,
956 0x2f48, 0x73773777, 0x12010001,
957 0x220c, 0x00007fb6, 0x0021a1b1,
958 0x2210, 0x00007fb6, 0x002021b1,
959 0x2180, 0x00007fb6, 0x00002191,
960 0x2218, 0x00007fb6, 0x002121b1,
961 0x221c, 0x00007fb6, 0x002021b1,
962 0x21dc, 0x00007fb6, 0x00002191,
963 0x21e0, 0x00007fb6, 0x00002191,
964 0x3628, 0x0000003f, 0x0000000a,
965 0x362c, 0x0000003f, 0x0000000a,
966 0x2ae4, 0x00073ffe, 0x000022a2,
967 0x240c, 0x000007ff, 0x00000000,
968 0x8a14, 0xf000003f, 0x00000007,
969 0x8bf0, 0x00002001, 0x00000001,
970 0x8b24, 0xffffffff, 0x00ffffff,
971 0x30a04, 0x0000ff0f, 0x00000000,
972 0x28a4c, 0x07ffffff, 0x06000000,
973 0x4d8, 0x00000fff, 0x00000100,
974 0x3e78, 0x00000001, 0x00000002,
975 0x9100, 0x03000000, 0x0362c688,
976 0x8c00, 0x000000ff, 0x00000001,
977 0xe40, 0x00001fff, 0x00001fff,
978 0x9060, 0x0000007f, 0x00000020,
979 0x9508, 0x00010000, 0x00010000,
980 0xac14, 0x000003ff, 0x000000f3,
981 0xac0c, 0xffffffff, 0x00001032
982 };
983
984 static const u32 bonaire_mgcg_cgcg_init[] =
985 {
986 0xc420, 0xffffffff, 0xfffffffc,
987 0x30800, 0xffffffff, 0xe0000000,
988 0x3c2a0, 0xffffffff, 0x00000100,
989 0x3c208, 0xffffffff, 0x00000100,
990 0x3c2c0, 0xffffffff, 0xc0000100,
991 0x3c2c8, 0xffffffff, 0xc0000100,
992 0x3c2c4, 0xffffffff, 0xc0000100,
993 0x55e4, 0xffffffff, 0x00600100,
994 0x3c280, 0xffffffff, 0x00000100,
995 0x3c214, 0xffffffff, 0x06000100,
996 0x3c220, 0xffffffff, 0x00000100,
997 0x3c218, 0xffffffff, 0x06000100,
998 0x3c204, 0xffffffff, 0x00000100,
999 0x3c2e0, 0xffffffff, 0x00000100,
1000 0x3c224, 0xffffffff, 0x00000100,
1001 0x3c200, 0xffffffff, 0x00000100,
1002 0x3c230, 0xffffffff, 0x00000100,
1003 0x3c234, 0xffffffff, 0x00000100,
1004 0x3c250, 0xffffffff, 0x00000100,
1005 0x3c254, 0xffffffff, 0x00000100,
1006 0x3c258, 0xffffffff, 0x00000100,
1007 0x3c25c, 0xffffffff, 0x00000100,
1008 0x3c260, 0xffffffff, 0x00000100,
1009 0x3c27c, 0xffffffff, 0x00000100,
1010 0x3c278, 0xffffffff, 0x00000100,
1011 0x3c210, 0xffffffff, 0x06000100,
1012 0x3c290, 0xffffffff, 0x00000100,
1013 0x3c274, 0xffffffff, 0x00000100,
1014 0x3c2b4, 0xffffffff, 0x00000100,
1015 0x3c2b0, 0xffffffff, 0x00000100,
1016 0x3c270, 0xffffffff, 0x00000100,
1017 0x30800, 0xffffffff, 0xe0000000,
1018 0x3c020, 0xffffffff, 0x00010000,
1019 0x3c024, 0xffffffff, 0x00030002,
1020 0x3c028, 0xffffffff, 0x00040007,
1021 0x3c02c, 0xffffffff, 0x00060005,
1022 0x3c030, 0xffffffff, 0x00090008,
1023 0x3c034, 0xffffffff, 0x00010000,
1024 0x3c038, 0xffffffff, 0x00030002,
1025 0x3c03c, 0xffffffff, 0x00040007,
1026 0x3c040, 0xffffffff, 0x00060005,
1027 0x3c044, 0xffffffff, 0x00090008,
1028 0x3c048, 0xffffffff, 0x00010000,
1029 0x3c04c, 0xffffffff, 0x00030002,
1030 0x3c050, 0xffffffff, 0x00040007,
1031 0x3c054, 0xffffffff, 0x00060005,
1032 0x3c058, 0xffffffff, 0x00090008,
1033 0x3c05c, 0xffffffff, 0x00010000,
1034 0x3c060, 0xffffffff, 0x00030002,
1035 0x3c064, 0xffffffff, 0x00040007,
1036 0x3c068, 0xffffffff, 0x00060005,
1037 0x3c06c, 0xffffffff, 0x00090008,
1038 0x3c070, 0xffffffff, 0x00010000,
1039 0x3c074, 0xffffffff, 0x00030002,
1040 0x3c078, 0xffffffff, 0x00040007,
1041 0x3c07c, 0xffffffff, 0x00060005,
1042 0x3c080, 0xffffffff, 0x00090008,
1043 0x3c084, 0xffffffff, 0x00010000,
1044 0x3c088, 0xffffffff, 0x00030002,
1045 0x3c08c, 0xffffffff, 0x00040007,
1046 0x3c090, 0xffffffff, 0x00060005,
1047 0x3c094, 0xffffffff, 0x00090008,
1048 0x3c098, 0xffffffff, 0x00010000,
1049 0x3c09c, 0xffffffff, 0x00030002,
1050 0x3c0a0, 0xffffffff, 0x00040007,
1051 0x3c0a4, 0xffffffff, 0x00060005,
1052 0x3c0a8, 0xffffffff, 0x00090008,
1053 0x3c000, 0xffffffff, 0x96e00200,
1054 0x8708, 0xffffffff, 0x00900100,
1055 0xc424, 0xffffffff, 0x0020003f,
1056 0x38, 0xffffffff, 0x0140001c,
1057 0x3c, 0x000f0000, 0x000f0000,
1058 0x220, 0xffffffff, 0xC060000C,
1059 0x224, 0xc0000fff, 0x00000100,
1060 0xf90, 0xffffffff, 0x00000100,
1061 0xf98, 0x00000101, 0x00000000,
1062 0x20a8, 0xffffffff, 0x00000104,
1063 0x55e4, 0xff000fff, 0x00000100,
1064 0x30cc, 0xc0000fff, 0x00000104,
1065 0xc1e4, 0x00000001, 0x00000001,
1066 0xd00c, 0xff000ff0, 0x00000100,
1067 0xd80c, 0xff000ff0, 0x00000100
1068 };
1069
1070 static const u32 spectre_golden_spm_registers[] =
1071 {
1072 0x30800, 0xe0ffffff, 0xe0000000
1073 };
1074
1075 static const u32 spectre_golden_common_registers[] =
1076 {
1077 0xc770, 0xffffffff, 0x00000800,
1078 0xc774, 0xffffffff, 0x00000800,
1079 0xc798, 0xffffffff, 0x00007fbf,
1080 0xc79c, 0xffffffff, 0x00007faf
1081 };
1082
1083 static const u32 spectre_golden_registers[] =
1084 {
1085 0x3c000, 0xffff1fff, 0x96940200,
1086 0x3c00c, 0xffff0001, 0xff000000,
1087 0x3c200, 0xfffc0fff, 0x00000100,
1088 0x6ed8, 0x00010101, 0x00010000,
1089 0x9834, 0xf00fffff, 0x00000400,
1090 0x9838, 0xfffffffc, 0x00020200,
1091 0x5bb0, 0x000000f0, 0x00000070,
1092 0x5bc0, 0xf0311fff, 0x80300000,
1093 0x98f8, 0x73773777, 0x12010001,
1094 0x9b7c, 0x00ff0000, 0x00fc0000,
1095 0x2f48, 0x73773777, 0x12010001,
1096 0x8a14, 0xf000003f, 0x00000007,
1097 0x8b24, 0xffffffff, 0x00ffffff,
1098 0x28350, 0x3f3f3fff, 0x00000082,
1099 0x28355, 0x0000003f, 0x00000000,
1100 0x3e78, 0x00000001, 0x00000002,
1101 0x913c, 0xffff03df, 0x00000004,
1102 0xc768, 0x00000008, 0x00000008,
1103 0x8c00, 0x000008ff, 0x00000800,
1104 0x9508, 0x00010000, 0x00010000,
1105 0xac0c, 0xffffffff, 0x54763210,
1106 0x214f8, 0x01ff01ff, 0x00000002,
1107 0x21498, 0x007ff800, 0x00200000,
1108 0x2015c, 0xffffffff, 0x00000f40,
1109 0x30934, 0xffffffff, 0x00000001
1110 };
1111
1112 static const u32 spectre_mgcg_cgcg_init[] =
1113 {
1114 0xc420, 0xffffffff, 0xfffffffc,
1115 0x30800, 0xffffffff, 0xe0000000,
1116 0x3c2a0, 0xffffffff, 0x00000100,
1117 0x3c208, 0xffffffff, 0x00000100,
1118 0x3c2c0, 0xffffffff, 0x00000100,
1119 0x3c2c8, 0xffffffff, 0x00000100,
1120 0x3c2c4, 0xffffffff, 0x00000100,
1121 0x55e4, 0xffffffff, 0x00600100,
1122 0x3c280, 0xffffffff, 0x00000100,
1123 0x3c214, 0xffffffff, 0x06000100,
1124 0x3c220, 0xffffffff, 0x00000100,
1125 0x3c218, 0xffffffff, 0x06000100,
1126 0x3c204, 0xffffffff, 0x00000100,
1127 0x3c2e0, 0xffffffff, 0x00000100,
1128 0x3c224, 0xffffffff, 0x00000100,
1129 0x3c200, 0xffffffff, 0x00000100,
1130 0x3c230, 0xffffffff, 0x00000100,
1131 0x3c234, 0xffffffff, 0x00000100,
1132 0x3c250, 0xffffffff, 0x00000100,
1133 0x3c254, 0xffffffff, 0x00000100,
1134 0x3c258, 0xffffffff, 0x00000100,
1135 0x3c25c, 0xffffffff, 0x00000100,
1136 0x3c260, 0xffffffff, 0x00000100,
1137 0x3c27c, 0xffffffff, 0x00000100,
1138 0x3c278, 0xffffffff, 0x00000100,
1139 0x3c210, 0xffffffff, 0x06000100,
1140 0x3c290, 0xffffffff, 0x00000100,
1141 0x3c274, 0xffffffff, 0x00000100,
1142 0x3c2b4, 0xffffffff, 0x00000100,
1143 0x3c2b0, 0xffffffff, 0x00000100,
1144 0x3c270, 0xffffffff, 0x00000100,
1145 0x30800, 0xffffffff, 0xe0000000,
1146 0x3c020, 0xffffffff, 0x00010000,
1147 0x3c024, 0xffffffff, 0x00030002,
1148 0x3c028, 0xffffffff, 0x00040007,
1149 0x3c02c, 0xffffffff, 0x00060005,
1150 0x3c030, 0xffffffff, 0x00090008,
1151 0x3c034, 0xffffffff, 0x00010000,
1152 0x3c038, 0xffffffff, 0x00030002,
1153 0x3c03c, 0xffffffff, 0x00040007,
1154 0x3c040, 0xffffffff, 0x00060005,
1155 0x3c044, 0xffffffff, 0x00090008,
1156 0x3c048, 0xffffffff, 0x00010000,
1157 0x3c04c, 0xffffffff, 0x00030002,
1158 0x3c050, 0xffffffff, 0x00040007,
1159 0x3c054, 0xffffffff, 0x00060005,
1160 0x3c058, 0xffffffff, 0x00090008,
1161 0x3c05c, 0xffffffff, 0x00010000,
1162 0x3c060, 0xffffffff, 0x00030002,
1163 0x3c064, 0xffffffff, 0x00040007,
1164 0x3c068, 0xffffffff, 0x00060005,
1165 0x3c06c, 0xffffffff, 0x00090008,
1166 0x3c070, 0xffffffff, 0x00010000,
1167 0x3c074, 0xffffffff, 0x00030002,
1168 0x3c078, 0xffffffff, 0x00040007,
1169 0x3c07c, 0xffffffff, 0x00060005,
1170 0x3c080, 0xffffffff, 0x00090008,
1171 0x3c084, 0xffffffff, 0x00010000,
1172 0x3c088, 0xffffffff, 0x00030002,
1173 0x3c08c, 0xffffffff, 0x00040007,
1174 0x3c090, 0xffffffff, 0x00060005,
1175 0x3c094, 0xffffffff, 0x00090008,
1176 0x3c098, 0xffffffff, 0x00010000,
1177 0x3c09c, 0xffffffff, 0x00030002,
1178 0x3c0a0, 0xffffffff, 0x00040007,
1179 0x3c0a4, 0xffffffff, 0x00060005,
1180 0x3c0a8, 0xffffffff, 0x00090008,
1181 0x3c0ac, 0xffffffff, 0x00010000,
1182 0x3c0b0, 0xffffffff, 0x00030002,
1183 0x3c0b4, 0xffffffff, 0x00040007,
1184 0x3c0b8, 0xffffffff, 0x00060005,
1185 0x3c0bc, 0xffffffff, 0x00090008,
1186 0x3c000, 0xffffffff, 0x96e00200,
1187 0x8708, 0xffffffff, 0x00900100,
1188 0xc424, 0xffffffff, 0x0020003f,
1189 0x38, 0xffffffff, 0x0140001c,
1190 0x3c, 0x000f0000, 0x000f0000,
1191 0x220, 0xffffffff, 0xC060000C,
1192 0x224, 0xc0000fff, 0x00000100,
1193 0xf90, 0xffffffff, 0x00000100,
1194 0xf98, 0x00000101, 0x00000000,
1195 0x20a8, 0xffffffff, 0x00000104,
1196 0x55e4, 0xff000fff, 0x00000100,
1197 0x30cc, 0xc0000fff, 0x00000104,
1198 0xc1e4, 0x00000001, 0x00000001,
1199 0xd00c, 0xff000ff0, 0x00000100,
1200 0xd80c, 0xff000ff0, 0x00000100
1201 };
1202
1203 static const u32 kalindi_golden_spm_registers[] =
1204 {
1205 0x30800, 0xe0ffffff, 0xe0000000
1206 };
1207
1208 static const u32 kalindi_golden_common_registers[] =
1209 {
1210 0xc770, 0xffffffff, 0x00000800,
1211 0xc774, 0xffffffff, 0x00000800,
1212 0xc798, 0xffffffff, 0x00007fbf,
1213 0xc79c, 0xffffffff, 0x00007faf
1214 };
1215
1216 static const u32 kalindi_golden_registers[] =
1217 {
1218 0x3c000, 0xffffdfff, 0x6e944040,
1219 0x55e4, 0xff607fff, 0xfc000100,
1220 0x3c220, 0xff000fff, 0x00000100,
1221 0x3c224, 0xff000fff, 0x00000100,
1222 0x3c200, 0xfffc0fff, 0x00000100,
1223 0x6ed8, 0x00010101, 0x00010000,
1224 0x9830, 0xffffffff, 0x00000000,
1225 0x9834, 0xf00fffff, 0x00000400,
1226 0x5bb0, 0x000000f0, 0x00000070,
1227 0x5bc0, 0xf0311fff, 0x80300000,
1228 0x98f8, 0x73773777, 0x12010001,
1229 0x98fc, 0xffffffff, 0x00000010,
1230 0x9b7c, 0x00ff0000, 0x00fc0000,
1231 0x8030, 0x00001f0f, 0x0000100a,
1232 0x2f48, 0x73773777, 0x12010001,
1233 0x2408, 0x000fffff, 0x000c007f,
1234 0x8a14, 0xf000003f, 0x00000007,
1235 0x8b24, 0x3fff3fff, 0x00ffcfff,
1236 0x30a04, 0x0000ff0f, 0x00000000,
1237 0x28a4c, 0x07ffffff, 0x06000000,
1238 0x4d8, 0x00000fff, 0x00000100,
1239 0x3e78, 0x00000001, 0x00000002,
1240 0xc768, 0x00000008, 0x00000008,
1241 0x8c00, 0x000000ff, 0x00000003,
1242 0x214f8, 0x01ff01ff, 0x00000002,
1243 0x21498, 0x007ff800, 0x00200000,
1244 0x2015c, 0xffffffff, 0x00000f40,
1245 0x88c4, 0x001f3ae3, 0x00000082,
1246 0x88d4, 0x0000001f, 0x00000010,
1247 0x30934, 0xffffffff, 0x00000000
1248 };
1249
1250 static const u32 kalindi_mgcg_cgcg_init[] =
1251 {
1252 0xc420, 0xffffffff, 0xfffffffc,
1253 0x30800, 0xffffffff, 0xe0000000,
1254 0x3c2a0, 0xffffffff, 0x00000100,
1255 0x3c208, 0xffffffff, 0x00000100,
1256 0x3c2c0, 0xffffffff, 0x00000100,
1257 0x3c2c8, 0xffffffff, 0x00000100,
1258 0x3c2c4, 0xffffffff, 0x00000100,
1259 0x55e4, 0xffffffff, 0x00600100,
1260 0x3c280, 0xffffffff, 0x00000100,
1261 0x3c214, 0xffffffff, 0x06000100,
1262 0x3c220, 0xffffffff, 0x00000100,
1263 0x3c218, 0xffffffff, 0x06000100,
1264 0x3c204, 0xffffffff, 0x00000100,
1265 0x3c2e0, 0xffffffff, 0x00000100,
1266 0x3c224, 0xffffffff, 0x00000100,
1267 0x3c200, 0xffffffff, 0x00000100,
1268 0x3c230, 0xffffffff, 0x00000100,
1269 0x3c234, 0xffffffff, 0x00000100,
1270 0x3c250, 0xffffffff, 0x00000100,
1271 0x3c254, 0xffffffff, 0x00000100,
1272 0x3c258, 0xffffffff, 0x00000100,
1273 0x3c25c, 0xffffffff, 0x00000100,
1274 0x3c260, 0xffffffff, 0x00000100,
1275 0x3c27c, 0xffffffff, 0x00000100,
1276 0x3c278, 0xffffffff, 0x00000100,
1277 0x3c210, 0xffffffff, 0x06000100,
1278 0x3c290, 0xffffffff, 0x00000100,
1279 0x3c274, 0xffffffff, 0x00000100,
1280 0x3c2b4, 0xffffffff, 0x00000100,
1281 0x3c2b0, 0xffffffff, 0x00000100,
1282 0x3c270, 0xffffffff, 0x00000100,
1283 0x30800, 0xffffffff, 0xe0000000,
1284 0x3c020, 0xffffffff, 0x00010000,
1285 0x3c024, 0xffffffff, 0x00030002,
1286 0x3c028, 0xffffffff, 0x00040007,
1287 0x3c02c, 0xffffffff, 0x00060005,
1288 0x3c030, 0xffffffff, 0x00090008,
1289 0x3c034, 0xffffffff, 0x00010000,
1290 0x3c038, 0xffffffff, 0x00030002,
1291 0x3c03c, 0xffffffff, 0x00040007,
1292 0x3c040, 0xffffffff, 0x00060005,
1293 0x3c044, 0xffffffff, 0x00090008,
1294 0x3c000, 0xffffffff, 0x96e00200,
1295 0x8708, 0xffffffff, 0x00900100,
1296 0xc424, 0xffffffff, 0x0020003f,
1297 0x38, 0xffffffff, 0x0140001c,
1298 0x3c, 0x000f0000, 0x000f0000,
1299 0x220, 0xffffffff, 0xC060000C,
1300 0x224, 0xc0000fff, 0x00000100,
1301 0x20a8, 0xffffffff, 0x00000104,
1302 0x55e4, 0xff000fff, 0x00000100,
1303 0x30cc, 0xc0000fff, 0x00000104,
1304 0xc1e4, 0x00000001, 0x00000001,
1305 0xd00c, 0xff000ff0, 0x00000100,
1306 0xd80c, 0xff000ff0, 0x00000100
1307 };
1308
1309 static const u32 hawaii_golden_spm_registers[] =
1310 {
1311 0x30800, 0xe0ffffff, 0xe0000000
1312 };
1313
1314 static const u32 hawaii_golden_common_registers[] =
1315 {
1316 0x30800, 0xffffffff, 0xe0000000,
1317 0x28350, 0xffffffff, 0x3a00161a,
1318 0x28354, 0xffffffff, 0x0000002e,
1319 0x9a10, 0xffffffff, 0x00018208,
1320 0x98f8, 0xffffffff, 0x12011003
1321 };
1322
1323 static const u32 hawaii_golden_registers[] =
1324 {
1325 0x3354, 0x00000333, 0x00000333,
1326 0x9a10, 0x00010000, 0x00058208,
1327 0x9830, 0xffffffff, 0x00000000,
1328 0x9834, 0xf00fffff, 0x00000400,
1329 0x9838, 0x0002021c, 0x00020200,
1330 0xc78, 0x00000080, 0x00000000,
1331 0x5bb0, 0x000000f0, 0x00000070,
1332 0x5bc0, 0xf0311fff, 0x80300000,
1333 0x350c, 0x00810000, 0x408af000,
1334 0x7030, 0x31000111, 0x00000011,
1335 0x2f48, 0x73773777, 0x12010001,
1336 0x2120, 0x0000007f, 0x0000001b,
1337 0x21dc, 0x00007fb6, 0x00002191,
1338 0x3628, 0x0000003f, 0x0000000a,
1339 0x362c, 0x0000003f, 0x0000000a,
1340 0x2ae4, 0x00073ffe, 0x000022a2,
1341 0x240c, 0x000007ff, 0x00000000,
1342 0x8bf0, 0x00002001, 0x00000001,
1343 0x8b24, 0xffffffff, 0x00ffffff,
1344 0x30a04, 0x0000ff0f, 0x00000000,
1345 0x28a4c, 0x07ffffff, 0x06000000,
1346 0x3e78, 0x00000001, 0x00000002,
1347 0xc768, 0x00000008, 0x00000008,
1348 0xc770, 0x00000f00, 0x00000800,
1349 0xc774, 0x00000f00, 0x00000800,
1350 0xc798, 0x00ffffff, 0x00ff7fbf,
1351 0xc79c, 0x00ffffff, 0x00ff7faf,
1352 0x8c00, 0x000000ff, 0x00000800,
1353 0xe40, 0x00001fff, 0x00001fff,
1354 0x9060, 0x0000007f, 0x00000020,
1355 0x9508, 0x00010000, 0x00010000,
1356 0xae00, 0x00100000, 0x000ff07c,
1357 0xac14, 0x000003ff, 0x0000000f,
1358 0xac10, 0xffffffff, 0x7564fdec,
1359 0xac0c, 0xffffffff, 0x3120b9a8,
1360 0xac08, 0x20000000, 0x0f9c0000
1361 };
1362
1363 static const u32 hawaii_mgcg_cgcg_init[] =
1364 {
1365 0xc420, 0xffffffff, 0xfffffffd,
1366 0x30800, 0xffffffff, 0xe0000000,
1367 0x3c2a0, 0xffffffff, 0x00000100,
1368 0x3c208, 0xffffffff, 0x00000100,
1369 0x3c2c0, 0xffffffff, 0x00000100,
1370 0x3c2c8, 0xffffffff, 0x00000100,
1371 0x3c2c4, 0xffffffff, 0x00000100,
1372 0x55e4, 0xffffffff, 0x00200100,
1373 0x3c280, 0xffffffff, 0x00000100,
1374 0x3c214, 0xffffffff, 0x06000100,
1375 0x3c220, 0xffffffff, 0x00000100,
1376 0x3c218, 0xffffffff, 0x06000100,
1377 0x3c204, 0xffffffff, 0x00000100,
1378 0x3c2e0, 0xffffffff, 0x00000100,
1379 0x3c224, 0xffffffff, 0x00000100,
1380 0x3c200, 0xffffffff, 0x00000100,
1381 0x3c230, 0xffffffff, 0x00000100,
1382 0x3c234, 0xffffffff, 0x00000100,
1383 0x3c250, 0xffffffff, 0x00000100,
1384 0x3c254, 0xffffffff, 0x00000100,
1385 0x3c258, 0xffffffff, 0x00000100,
1386 0x3c25c, 0xffffffff, 0x00000100,
1387 0x3c260, 0xffffffff, 0x00000100,
1388 0x3c27c, 0xffffffff, 0x00000100,
1389 0x3c278, 0xffffffff, 0x00000100,
1390 0x3c210, 0xffffffff, 0x06000100,
1391 0x3c290, 0xffffffff, 0x00000100,
1392 0x3c274, 0xffffffff, 0x00000100,
1393 0x3c2b4, 0xffffffff, 0x00000100,
1394 0x3c2b0, 0xffffffff, 0x00000100,
1395 0x3c270, 0xffffffff, 0x00000100,
1396 0x30800, 0xffffffff, 0xe0000000,
1397 0x3c020, 0xffffffff, 0x00010000,
1398 0x3c024, 0xffffffff, 0x00030002,
1399 0x3c028, 0xffffffff, 0x00040007,
1400 0x3c02c, 0xffffffff, 0x00060005,
1401 0x3c030, 0xffffffff, 0x00090008,
1402 0x3c034, 0xffffffff, 0x00010000,
1403 0x3c038, 0xffffffff, 0x00030002,
1404 0x3c03c, 0xffffffff, 0x00040007,
1405 0x3c040, 0xffffffff, 0x00060005,
1406 0x3c044, 0xffffffff, 0x00090008,
1407 0x3c048, 0xffffffff, 0x00010000,
1408 0x3c04c, 0xffffffff, 0x00030002,
1409 0x3c050, 0xffffffff, 0x00040007,
1410 0x3c054, 0xffffffff, 0x00060005,
1411 0x3c058, 0xffffffff, 0x00090008,
1412 0x3c05c, 0xffffffff, 0x00010000,
1413 0x3c060, 0xffffffff, 0x00030002,
1414 0x3c064, 0xffffffff, 0x00040007,
1415 0x3c068, 0xffffffff, 0x00060005,
1416 0x3c06c, 0xffffffff, 0x00090008,
1417 0x3c070, 0xffffffff, 0x00010000,
1418 0x3c074, 0xffffffff, 0x00030002,
1419 0x3c078, 0xffffffff, 0x00040007,
1420 0x3c07c, 0xffffffff, 0x00060005,
1421 0x3c080, 0xffffffff, 0x00090008,
1422 0x3c084, 0xffffffff, 0x00010000,
1423 0x3c088, 0xffffffff, 0x00030002,
1424 0x3c08c, 0xffffffff, 0x00040007,
1425 0x3c090, 0xffffffff, 0x00060005,
1426 0x3c094, 0xffffffff, 0x00090008,
1427 0x3c098, 0xffffffff, 0x00010000,
1428 0x3c09c, 0xffffffff, 0x00030002,
1429 0x3c0a0, 0xffffffff, 0x00040007,
1430 0x3c0a4, 0xffffffff, 0x00060005,
1431 0x3c0a8, 0xffffffff, 0x00090008,
1432 0x3c0ac, 0xffffffff, 0x00010000,
1433 0x3c0b0, 0xffffffff, 0x00030002,
1434 0x3c0b4, 0xffffffff, 0x00040007,
1435 0x3c0b8, 0xffffffff, 0x00060005,
1436 0x3c0bc, 0xffffffff, 0x00090008,
1437 0x3c0c0, 0xffffffff, 0x00010000,
1438 0x3c0c4, 0xffffffff, 0x00030002,
1439 0x3c0c8, 0xffffffff, 0x00040007,
1440 0x3c0cc, 0xffffffff, 0x00060005,
1441 0x3c0d0, 0xffffffff, 0x00090008,
1442 0x3c0d4, 0xffffffff, 0x00010000,
1443 0x3c0d8, 0xffffffff, 0x00030002,
1444 0x3c0dc, 0xffffffff, 0x00040007,
1445 0x3c0e0, 0xffffffff, 0x00060005,
1446 0x3c0e4, 0xffffffff, 0x00090008,
1447 0x3c0e8, 0xffffffff, 0x00010000,
1448 0x3c0ec, 0xffffffff, 0x00030002,
1449 0x3c0f0, 0xffffffff, 0x00040007,
1450 0x3c0f4, 0xffffffff, 0x00060005,
1451 0x3c0f8, 0xffffffff, 0x00090008,
1452 0xc318, 0xffffffff, 0x00020200,
1453 0x3350, 0xffffffff, 0x00000200,
1454 0x15c0, 0xffffffff, 0x00000400,
1455 0x55e8, 0xffffffff, 0x00000000,
1456 0x2f50, 0xffffffff, 0x00000902,
1457 0x3c000, 0xffffffff, 0x96940200,
1458 0x8708, 0xffffffff, 0x00900100,
1459 0xc424, 0xffffffff, 0x0020003f,
1460 0x38, 0xffffffff, 0x0140001c,
1461 0x3c, 0x000f0000, 0x000f0000,
1462 0x220, 0xffffffff, 0xc060000c,
1463 0x224, 0xc0000fff, 0x00000100,
1464 0xf90, 0xffffffff, 0x00000100,
1465 0xf98, 0x00000101, 0x00000000,
1466 0x20a8, 0xffffffff, 0x00000104,
1467 0x55e4, 0xff000fff, 0x00000100,
1468 0x30cc, 0xc0000fff, 0x00000104,
1469 0xc1e4, 0x00000001, 0x00000001,
1470 0xd00c, 0xff000ff0, 0x00000100,
1471 0xd80c, 0xff000ff0, 0x00000100
1472 };
1473
1474 static void cik_init_golden_registers(struct radeon_device *rdev)
1475 {
1476 switch (rdev->family) {
1477 case CHIP_BONAIRE:
1478 radeon_program_register_sequence(rdev,
1479 bonaire_mgcg_cgcg_init,
1480 (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
1481 radeon_program_register_sequence(rdev,
1482 bonaire_golden_registers,
1483 (const u32)ARRAY_SIZE(bonaire_golden_registers));
1484 radeon_program_register_sequence(rdev,
1485 bonaire_golden_common_registers,
1486 (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
1487 radeon_program_register_sequence(rdev,
1488 bonaire_golden_spm_registers,
1489 (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
1490 break;
1491 case CHIP_KABINI:
1492 radeon_program_register_sequence(rdev,
1493 kalindi_mgcg_cgcg_init,
1494 (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
1495 radeon_program_register_sequence(rdev,
1496 kalindi_golden_registers,
1497 (const u32)ARRAY_SIZE(kalindi_golden_registers));
1498 radeon_program_register_sequence(rdev,
1499 kalindi_golden_common_registers,
1500 (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
1501 radeon_program_register_sequence(rdev,
1502 kalindi_golden_spm_registers,
1503 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
1504 break;
1505 case CHIP_KAVERI:
1506 radeon_program_register_sequence(rdev,
1507 spectre_mgcg_cgcg_init,
1508 (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
1509 radeon_program_register_sequence(rdev,
1510 spectre_golden_registers,
1511 (const u32)ARRAY_SIZE(spectre_golden_registers));
1512 radeon_program_register_sequence(rdev,
1513 spectre_golden_common_registers,
1514 (const u32)ARRAY_SIZE(spectre_golden_common_registers));
1515 radeon_program_register_sequence(rdev,
1516 spectre_golden_spm_registers,
1517 (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
1518 break;
1519 case CHIP_HAWAII:
1520 radeon_program_register_sequence(rdev,
1521 hawaii_mgcg_cgcg_init,
1522 (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
1523 radeon_program_register_sequence(rdev,
1524 hawaii_golden_registers,
1525 (const u32)ARRAY_SIZE(hawaii_golden_registers));
1526 radeon_program_register_sequence(rdev,
1527 hawaii_golden_common_registers,
1528 (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
1529 radeon_program_register_sequence(rdev,
1530 hawaii_golden_spm_registers,
1531 (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
1532 break;
1533 default:
1534 break;
1535 }
1536 }
1537
1538 /**
1539 * cik_get_xclk - get the xclk
1540 *
1541 * @rdev: radeon_device pointer
1542 *
1543 * Returns the reference clock used by the gfx engine
1544 * (CIK).
1545 */
1546 u32 cik_get_xclk(struct radeon_device *rdev)
1547 {
1548 u32 reference_clock = rdev->clock.spll.reference_freq;
1549
1550 if (rdev->flags & RADEON_IS_IGP) {
1551 if (RREG32_SMC(GENERAL_PWRMGT) & GPU_COUNTER_CLK)
1552 return reference_clock / 2;
1553 } else {
1554 if (RREG32_SMC(CG_CLKPIN_CNTL) & XTALIN_DIVIDE)
1555 return reference_clock / 4;
1556 }
1557 return reference_clock;
1558 }
1559
1560 /**
1561 * cik_mm_rdoorbell - read a doorbell dword
1562 *
1563 * @rdev: radeon_device pointer
1564 * @index: doorbell index
1565 *
1566 * Returns the value in the doorbell aperture at the
1567 * requested doorbell index (CIK).
1568 */
1569 u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index)
1570 {
1571 if (index < rdev->doorbell.num_doorbells) {
1572 return readl(rdev->doorbell.ptr + index);
1573 } else {
1574 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
1575 return 0;
1576 }
1577 }
1578
1579 /**
1580 * cik_mm_wdoorbell - write a doorbell dword
1581 *
1582 * @rdev: radeon_device pointer
1583 * @index: doorbell index
1584 * @v: value to write
1585 *
1586 * Writes @v to the doorbell aperture at the
1587 * requested doorbell index (CIK).
1588 */
1589 void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v)
1590 {
1591 if (index < rdev->doorbell.num_doorbells) {
1592 writel(v, rdev->doorbell.ptr + index);
1593 } else {
1594 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
1595 }
1596 }
1597
1598 #define BONAIRE_IO_MC_REGS_SIZE 36
1599
1600 static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
1601 {
1602 {0x00000070, 0x04400000},
1603 {0x00000071, 0x80c01803},
1604 {0x00000072, 0x00004004},
1605 {0x00000073, 0x00000100},
1606 {0x00000074, 0x00ff0000},
1607 {0x00000075, 0x34000000},
1608 {0x00000076, 0x08000014},
1609 {0x00000077, 0x00cc08ec},
1610 {0x00000078, 0x00000400},
1611 {0x00000079, 0x00000000},
1612 {0x0000007a, 0x04090000},
1613 {0x0000007c, 0x00000000},
1614 {0x0000007e, 0x4408a8e8},
1615 {0x0000007f, 0x00000304},
1616 {0x00000080, 0x00000000},
1617 {0x00000082, 0x00000001},
1618 {0x00000083, 0x00000002},
1619 {0x00000084, 0xf3e4f400},
1620 {0x00000085, 0x052024e3},
1621 {0x00000087, 0x00000000},
1622 {0x00000088, 0x01000000},
1623 {0x0000008a, 0x1c0a0000},
1624 {0x0000008b, 0xff010000},
1625 {0x0000008d, 0xffffefff},
1626 {0x0000008e, 0xfff3efff},
1627 {0x0000008f, 0xfff3efbf},
1628 {0x00000092, 0xf7ffffff},
1629 {0x00000093, 0xffffff7f},
1630 {0x00000095, 0x00101101},
1631 {0x00000096, 0x00000fff},
1632 {0x00000097, 0x00116fff},
1633 {0x00000098, 0x60010000},
1634 {0x00000099, 0x10010000},
1635 {0x0000009a, 0x00006000},
1636 {0x0000009b, 0x00001000},
1637 {0x0000009f, 0x00b48000}
1638 };
1639
1640 #define HAWAII_IO_MC_REGS_SIZE 22
1641
1642 static const u32 hawaii_io_mc_regs[HAWAII_IO_MC_REGS_SIZE][2] =
1643 {
1644 {0x0000007d, 0x40000000},
1645 {0x0000007e, 0x40180304},
1646 {0x0000007f, 0x0000ff00},
1647 {0x00000081, 0x00000000},
1648 {0x00000083, 0x00000800},
1649 {0x00000086, 0x00000000},
1650 {0x00000087, 0x00000100},
1651 {0x00000088, 0x00020100},
1652 {0x00000089, 0x00000000},
1653 {0x0000008b, 0x00040000},
1654 {0x0000008c, 0x00000100},
1655 {0x0000008e, 0xff010000},
1656 {0x00000090, 0xffffefff},
1657 {0x00000091, 0xfff3efff},
1658 {0x00000092, 0xfff3efbf},
1659 {0x00000093, 0xf7ffffff},
1660 {0x00000094, 0xffffff7f},
1661 {0x00000095, 0x00000fff},
1662 {0x00000096, 0x00116fff},
1663 {0x00000097, 0x60010000},
1664 {0x00000098, 0x10010000},
1665 {0x0000009f, 0x00c79000}
1666 };
1667
1668
1669 /**
1670 * cik_srbm_select - select specific register instances
1671 *
1672 * @rdev: radeon_device pointer
1673 * @me: selected ME (micro engine)
1674 * @pipe: pipe
1675 * @queue: queue
1676 * @vmid: VMID
1677 *
1678 * Switches the currently active registers instances. Some
1679 * registers are instanced per VMID, others are instanced per
1680 * me/pipe/queue combination.
1681 */
1682 static void cik_srbm_select(struct radeon_device *rdev,
1683 u32 me, u32 pipe, u32 queue, u32 vmid)
1684 {
1685 u32 srbm_gfx_cntl = (PIPEID(pipe & 0x3) |
1686 MEID(me & 0x3) |
1687 VMID(vmid & 0xf) |
1688 QUEUEID(queue & 0x7));
1689 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl);
1690 }
1691
1692 /* ucode loading */
1693 /**
1694 * ci_mc_load_microcode - load MC ucode into the hw
1695 *
1696 * @rdev: radeon_device pointer
1697 *
1698 * Load the GDDR MC ucode into the hw (CIK).
1699 * Returns 0 on success, error on failure.
1700 */
1701 int ci_mc_load_microcode(struct radeon_device *rdev)
1702 {
1703 const __be32 *fw_data;
1704 u32 running, blackout = 0;
1705 u32 *io_mc_regs;
1706 int i, ucode_size, regs_size;
1707
1708 if (!rdev->mc_fw)
1709 return -EINVAL;
1710
1711 switch (rdev->family) {
1712 case CHIP_BONAIRE:
1713 io_mc_regs = (u32 *)&bonaire_io_mc_regs;
1714 ucode_size = CIK_MC_UCODE_SIZE;
1715 regs_size = BONAIRE_IO_MC_REGS_SIZE;
1716 break;
1717 case CHIP_HAWAII:
1718 io_mc_regs = (u32 *)&hawaii_io_mc_regs;
1719 ucode_size = HAWAII_MC_UCODE_SIZE;
1720 regs_size = HAWAII_IO_MC_REGS_SIZE;
1721 break;
1722 default:
1723 return -EINVAL;
1724 }
1725
1726 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
1727
1728 if (running == 0) {
1729 if (running) {
1730 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
1731 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
1732 }
1733
1734 /* reset the engine and set to writable */
1735 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1736 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
1737
1738 /* load mc io regs */
1739 for (i = 0; i < regs_size; i++) {
1740 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
1741 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
1742 }
1743 /* load the MC ucode */
1744 fw_data = (const __be32 *)rdev->mc_fw->data;
1745 for (i = 0; i < ucode_size; i++)
1746 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
1747
1748 /* put the engine back into the active state */
1749 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1750 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
1751 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
1752
1753 /* wait for training to complete */
1754 for (i = 0; i < rdev->usec_timeout; i++) {
1755 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
1756 break;
1757 udelay(1);
1758 }
1759 for (i = 0; i < rdev->usec_timeout; i++) {
1760 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
1761 break;
1762 udelay(1);
1763 }
1764
1765 if (running)
1766 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
1767 }
1768
1769 return 0;
1770 }
1771
1772 /**
1773 * cik_init_microcode - load ucode images from disk
1774 *
1775 * @rdev: radeon_device pointer
1776 *
1777 * Use the firmware interface to load the ucode images into
1778 * the driver (not loaded into hw).
1779 * Returns 0 on success, error on failure.
1780 */
1781 static int cik_init_microcode(struct radeon_device *rdev)
1782 {
1783 const char *chip_name;
1784 size_t pfp_req_size, me_req_size, ce_req_size,
1785 mec_req_size, rlc_req_size, mc_req_size = 0,
1786 sdma_req_size, smc_req_size = 0;
1787 char fw_name[30];
1788 int err;
1789
1790 DRM_DEBUG("\n");
1791
1792 switch (rdev->family) {
1793 case CHIP_BONAIRE:
1794 chip_name = "BONAIRE";
1795 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1796 me_req_size = CIK_ME_UCODE_SIZE * 4;
1797 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1798 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1799 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
1800 mc_req_size = CIK_MC_UCODE_SIZE * 4;
1801 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1802 smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
1803 break;
1804 case CHIP_HAWAII:
1805 chip_name = "HAWAII";
1806 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1807 me_req_size = CIK_ME_UCODE_SIZE * 4;
1808 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1809 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1810 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
1811 mc_req_size = HAWAII_MC_UCODE_SIZE * 4;
1812 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1813 smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
1814 break;
1815 case CHIP_KAVERI:
1816 chip_name = "KAVERI";
1817 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1818 me_req_size = CIK_ME_UCODE_SIZE * 4;
1819 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1820 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1821 rlc_req_size = KV_RLC_UCODE_SIZE * 4;
1822 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1823 break;
1824 case CHIP_KABINI:
1825 chip_name = "KABINI";
1826 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1827 me_req_size = CIK_ME_UCODE_SIZE * 4;
1828 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1829 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1830 rlc_req_size = KB_RLC_UCODE_SIZE * 4;
1831 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1832 break;
1833 default: BUG();
1834 }
1835
1836 DRM_INFO("Loading %s Microcode\n", chip_name);
1837
1838 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1839 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1840 if (err)
1841 goto out;
1842 if (rdev->pfp_fw->size != pfp_req_size) {
1843 printk(KERN_ERR
1844 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
1845 rdev->pfp_fw->size, fw_name);
1846 err = -EINVAL;
1847 goto out;
1848 }
1849
1850 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1851 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1852 if (err)
1853 goto out;
1854 if (rdev->me_fw->size != me_req_size) {
1855 printk(KERN_ERR
1856 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
1857 rdev->me_fw->size, fw_name);
1858 err = -EINVAL;
1859 }
1860
1861 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
1862 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1863 if (err)
1864 goto out;
1865 if (rdev->ce_fw->size != ce_req_size) {
1866 printk(KERN_ERR
1867 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
1868 rdev->ce_fw->size, fw_name);
1869 err = -EINVAL;
1870 }
1871
1872 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
1873 err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
1874 if (err)
1875 goto out;
1876 if (rdev->mec_fw->size != mec_req_size) {
1877 printk(KERN_ERR
1878 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
1879 rdev->mec_fw->size, fw_name);
1880 err = -EINVAL;
1881 }
1882
1883 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
1884 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1885 if (err)
1886 goto out;
1887 if (rdev->rlc_fw->size != rlc_req_size) {
1888 printk(KERN_ERR
1889 "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
1890 rdev->rlc_fw->size, fw_name);
1891 err = -EINVAL;
1892 }
1893
1894 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
1895 err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
1896 if (err)
1897 goto out;
1898 if (rdev->sdma_fw->size != sdma_req_size) {
1899 printk(KERN_ERR
1900 "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
1901 rdev->sdma_fw->size, fw_name);
1902 err = -EINVAL;
1903 }
1904
1905 /* No SMC, MC ucode on APUs */
1906 if (!(rdev->flags & RADEON_IS_IGP)) {
1907 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
1908 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1909 if (err)
1910 goto out;
1911 if (rdev->mc_fw->size != mc_req_size) {
1912 printk(KERN_ERR
1913 "cik_mc: Bogus length %zu in firmware \"%s\"\n",
1914 rdev->mc_fw->size, fw_name);
1915 err = -EINVAL;
1916 }
1917
1918 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1919 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1920 if (err) {
1921 printk(KERN_ERR
1922 "smc: error loading firmware \"%s\"\n",
1923 fw_name);
1924 release_firmware(rdev->smc_fw);
1925 rdev->smc_fw = NULL;
1926 err = 0;
1927 } else if (rdev->smc_fw->size != smc_req_size) {
1928 printk(KERN_ERR
1929 "cik_smc: Bogus length %zu in firmware \"%s\"\n",
1930 rdev->smc_fw->size, fw_name);
1931 err = -EINVAL;
1932 }
1933 }
1934
1935 out:
1936 if (err) {
1937 if (err != -EINVAL)
1938 printk(KERN_ERR
1939 "cik_cp: Failed to load firmware \"%s\"\n",
1940 fw_name);
1941 release_firmware(rdev->pfp_fw);
1942 rdev->pfp_fw = NULL;
1943 release_firmware(rdev->me_fw);
1944 rdev->me_fw = NULL;
1945 release_firmware(rdev->ce_fw);
1946 rdev->ce_fw = NULL;
1947 release_firmware(rdev->rlc_fw);
1948 rdev->rlc_fw = NULL;
1949 release_firmware(rdev->mc_fw);
1950 rdev->mc_fw = NULL;
1951 release_firmware(rdev->smc_fw);
1952 rdev->smc_fw = NULL;
1953 }
1954 return err;
1955 }
1956
1957 /*
1958 * Core functions
1959 */
1960 /**
1961 * cik_tiling_mode_table_init - init the hw tiling table
1962 *
1963 * @rdev: radeon_device pointer
1964 *
1965 * Starting with SI, the tiling setup is done globally in a
1966 * set of 32 tiling modes. Rather than selecting each set of
1967 * parameters per surface as on older asics, we just select
1968 * which index in the tiling table we want to use, and the
1969 * surface uses those parameters (CIK).
1970 */
1971 static void cik_tiling_mode_table_init(struct radeon_device *rdev)
1972 {
1973 const u32 num_tile_mode_states = 32;
1974 const u32 num_secondary_tile_mode_states = 16;
1975 u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
1976 u32 num_pipe_configs;
1977 u32 num_rbs = rdev->config.cik.max_backends_per_se *
1978 rdev->config.cik.max_shader_engines;
1979
1980 switch (rdev->config.cik.mem_row_size_in_kb) {
1981 case 1:
1982 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
1983 break;
1984 case 2:
1985 default:
1986 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
1987 break;
1988 case 4:
1989 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
1990 break;
1991 }
1992
1993 num_pipe_configs = rdev->config.cik.max_tile_pipes;
1994 if (num_pipe_configs > 8)
1995 num_pipe_configs = 16;
1996
1997 if (num_pipe_configs == 16) {
1998 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1999 switch (reg_offset) {
2000 case 0:
2001 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2002 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2003 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2004 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
2005 break;
2006 case 1:
2007 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2008 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2009 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2010 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
2011 break;
2012 case 2:
2013 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2014 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2015 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2016 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2017 break;
2018 case 3:
2019 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2020 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2021 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2022 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
2023 break;
2024 case 4:
2025 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2026 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2027 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2028 TILE_SPLIT(split_equal_to_row_size));
2029 break;
2030 case 5:
2031 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2032 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2033 break;
2034 case 6:
2035 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2036 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2037 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2038 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2039 break;
2040 case 7:
2041 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2042 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2043 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2044 TILE_SPLIT(split_equal_to_row_size));
2045 break;
2046 case 8:
2047 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2048 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
2049 break;
2050 case 9:
2051 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2052 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2053 break;
2054 case 10:
2055 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2056 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2057 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2058 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2059 break;
2060 case 11:
2061 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2062 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2063 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
2064 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2065 break;
2066 case 12:
2067 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2068 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2069 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2070 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2071 break;
2072 case 13:
2073 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2074 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2075 break;
2076 case 14:
2077 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2078 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2079 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2080 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2081 break;
2082 case 16:
2083 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2084 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2085 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
2086 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2087 break;
2088 case 17:
2089 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2090 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2091 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2092 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2093 break;
2094 case 27:
2095 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2096 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2097 break;
2098 case 28:
2099 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2100 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2101 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2102 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2103 break;
2104 case 29:
2105 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2106 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2107 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
2108 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2109 break;
2110 case 30:
2111 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2112 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2113 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2114 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2115 break;
2116 default:
2117 gb_tile_moden = 0;
2118 break;
2119 }
2120 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
2121 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2122 }
2123 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
2124 switch (reg_offset) {
2125 case 0:
2126 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2127 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2128 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2129 NUM_BANKS(ADDR_SURF_16_BANK));
2130 break;
2131 case 1:
2132 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2133 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2134 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2135 NUM_BANKS(ADDR_SURF_16_BANK));
2136 break;
2137 case 2:
2138 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2139 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2140 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2141 NUM_BANKS(ADDR_SURF_16_BANK));
2142 break;
2143 case 3:
2144 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2145 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2146 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2147 NUM_BANKS(ADDR_SURF_16_BANK));
2148 break;
2149 case 4:
2150 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2151 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2152 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2153 NUM_BANKS(ADDR_SURF_8_BANK));
2154 break;
2155 case 5:
2156 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2157 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2158 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2159 NUM_BANKS(ADDR_SURF_4_BANK));
2160 break;
2161 case 6:
2162 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2163 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2164 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2165 NUM_BANKS(ADDR_SURF_2_BANK));
2166 break;
2167 case 8:
2168 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2169 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2170 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2171 NUM_BANKS(ADDR_SURF_16_BANK));
2172 break;
2173 case 9:
2174 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2175 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2176 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2177 NUM_BANKS(ADDR_SURF_16_BANK));
2178 break;
2179 case 10:
2180 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2181 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2182 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2183 NUM_BANKS(ADDR_SURF_16_BANK));
2184 break;
2185 case 11:
2186 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2187 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2188 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2189 NUM_BANKS(ADDR_SURF_8_BANK));
2190 break;
2191 case 12:
2192 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2193 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2194 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2195 NUM_BANKS(ADDR_SURF_4_BANK));
2196 break;
2197 case 13:
2198 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2199 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2200 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2201 NUM_BANKS(ADDR_SURF_2_BANK));
2202 break;
2203 case 14:
2204 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2205 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2206 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2207 NUM_BANKS(ADDR_SURF_2_BANK));
2208 break;
2209 default:
2210 gb_tile_moden = 0;
2211 break;
2212 }
2213 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2214 }
2215 } else if (num_pipe_configs == 8) {
2216 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2217 switch (reg_offset) {
2218 case 0:
2219 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2220 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2221 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2222 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
2223 break;
2224 case 1:
2225 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2226 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2227 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2228 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
2229 break;
2230 case 2:
2231 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2232 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2233 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2234 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2235 break;
2236 case 3:
2237 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2238 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2239 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2240 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
2241 break;
2242 case 4:
2243 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2244 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2245 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2246 TILE_SPLIT(split_equal_to_row_size));
2247 break;
2248 case 5:
2249 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2250 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2251 break;
2252 case 6:
2253 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2254 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2255 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2256 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2257 break;
2258 case 7:
2259 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2260 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2261 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2262 TILE_SPLIT(split_equal_to_row_size));
2263 break;
2264 case 8:
2265 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2266 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2267 break;
2268 case 9:
2269 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2270 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2271 break;
2272 case 10:
2273 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2274 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2275 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2276 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2277 break;
2278 case 11:
2279 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2280 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2281 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2282 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2283 break;
2284 case 12:
2285 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2286 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2287 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2288 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2289 break;
2290 case 13:
2291 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2292 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2293 break;
2294 case 14:
2295 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2296 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2297 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2298 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2299 break;
2300 case 16:
2301 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2302 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2303 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2304 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2305 break;
2306 case 17:
2307 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2308 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2309 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2310 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2311 break;
2312 case 27:
2313 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2314 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2315 break;
2316 case 28:
2317 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2318 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2319 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2320 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2321 break;
2322 case 29:
2323 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2324 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2325 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2326 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2327 break;
2328 case 30:
2329 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2330 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2331 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2332 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2333 break;
2334 default:
2335 gb_tile_moden = 0;
2336 break;
2337 }
2338 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
2339 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2340 }
2341 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
2342 switch (reg_offset) {
2343 case 0:
2344 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2345 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2346 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2347 NUM_BANKS(ADDR_SURF_16_BANK));
2348 break;
2349 case 1:
2350 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2351 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2352 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2353 NUM_BANKS(ADDR_SURF_16_BANK));
2354 break;
2355 case 2:
2356 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2357 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2358 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2359 NUM_BANKS(ADDR_SURF_16_BANK));
2360 break;
2361 case 3:
2362 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2363 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2364 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2365 NUM_BANKS(ADDR_SURF_16_BANK));
2366 break;
2367 case 4:
2368 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2369 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2370 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2371 NUM_BANKS(ADDR_SURF_8_BANK));
2372 break;
2373 case 5:
2374 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2375 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2376 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2377 NUM_BANKS(ADDR_SURF_4_BANK));
2378 break;
2379 case 6:
2380 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2381 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2382 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2383 NUM_BANKS(ADDR_SURF_2_BANK));
2384 break;
2385 case 8:
2386 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2387 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2388 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2389 NUM_BANKS(ADDR_SURF_16_BANK));
2390 break;
2391 case 9:
2392 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2393 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2394 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2395 NUM_BANKS(ADDR_SURF_16_BANK));
2396 break;
2397 case 10:
2398 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2399 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2400 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2401 NUM_BANKS(ADDR_SURF_16_BANK));
2402 break;
2403 case 11:
2404 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2405 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2406 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2407 NUM_BANKS(ADDR_SURF_16_BANK));
2408 break;
2409 case 12:
2410 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2411 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2412 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2413 NUM_BANKS(ADDR_SURF_8_BANK));
2414 break;
2415 case 13:
2416 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2417 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2418 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2419 NUM_BANKS(ADDR_SURF_4_BANK));
2420 break;
2421 case 14:
2422 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2423 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2424 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2425 NUM_BANKS(ADDR_SURF_2_BANK));
2426 break;
2427 default:
2428 gb_tile_moden = 0;
2429 break;
2430 }
2431 rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
2432 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2433 }
2434 } else if (num_pipe_configs == 4) {
2435 if (num_rbs == 4) {
2436 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2437 switch (reg_offset) {
2438 case 0:
2439 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2440 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2441 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2442 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
2443 break;
2444 case 1:
2445 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2446 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2447 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2448 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
2449 break;
2450 case 2:
2451 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2452 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2453 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2454 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2455 break;
2456 case 3:
2457 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2458 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2459 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2460 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
2461 break;
2462 case 4:
2463 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2464 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2465 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2466 TILE_SPLIT(split_equal_to_row_size));
2467 break;
2468 case 5:
2469 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2470 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2471 break;
2472 case 6:
2473 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2474 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2475 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2476 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2477 break;
2478 case 7:
2479 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2480 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2481 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2482 TILE_SPLIT(split_equal_to_row_size));
2483 break;
2484 case 8:
2485 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2486 PIPE_CONFIG(ADDR_SURF_P4_16x16));
2487 break;
2488 case 9:
2489 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2490 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2491 break;
2492 case 10:
2493 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2494 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2495 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2496 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2497 break;
2498 case 11:
2499 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2500 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2501 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2502 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2503 break;
2504 case 12:
2505 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2506 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2507 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2508 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2509 break;
2510 case 13:
2511 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2512 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2513 break;
2514 case 14:
2515 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2516 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2517 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2518 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2519 break;
2520 case 16:
2521 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2522 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2523 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2524 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2525 break;
2526 case 17:
2527 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2528 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2529 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2530 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2531 break;
2532 case 27:
2533 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2534 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2535 break;
2536 case 28:
2537 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2538 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2539 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2540 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2541 break;
2542 case 29:
2543 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2544 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2545 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2546 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2547 break;
2548 case 30:
2549 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2550 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2551 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2552 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2553 break;
2554 default:
2555 gb_tile_moden = 0;
2556 break;
2557 }
2558 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
2559 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2560 }
2561 } else if (num_rbs < 4) {
2562 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2563 switch (reg_offset) {
2564 case 0:
2565 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2566 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2567 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2568 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
2569 break;
2570 case 1:
2571 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2572 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2573 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2574 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
2575 break;
2576 case 2:
2577 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2578 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2579 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2580 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2581 break;
2582 case 3:
2583 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2584 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2585 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2586 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
2587 break;
2588 case 4:
2589 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2590 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2591 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2592 TILE_SPLIT(split_equal_to_row_size));
2593 break;
2594 case 5:
2595 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2596 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2597 break;
2598 case 6:
2599 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2600 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2601 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2602 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2603 break;
2604 case 7:
2605 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2606 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2607 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2608 TILE_SPLIT(split_equal_to_row_size));
2609 break;
2610 case 8:
2611 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2612 PIPE_CONFIG(ADDR_SURF_P4_8x16));
2613 break;
2614 case 9:
2615 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2616 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2617 break;
2618 case 10:
2619 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2620 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2621 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2622 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2623 break;
2624 case 11:
2625 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2626 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2627 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2628 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2629 break;
2630 case 12:
2631 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2632 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2633 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2634 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2635 break;
2636 case 13:
2637 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2638 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2639 break;
2640 case 14:
2641 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2642 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2643 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2644 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2645 break;
2646 case 16:
2647 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2648 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2649 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2650 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2651 break;
2652 case 17:
2653 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2654 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2655 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2656 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2657 break;
2658 case 27:
2659 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2660 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2661 break;
2662 case 28:
2663 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2664 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2665 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2666 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2667 break;
2668 case 29:
2669 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2670 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2671 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2672 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2673 break;
2674 case 30:
2675 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2676 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2677 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2678 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2679 break;
2680 default:
2681 gb_tile_moden = 0;
2682 break;
2683 }
2684 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
2685 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2686 }
2687 }
2688 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
2689 switch (reg_offset) {
2690 case 0:
2691 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2692 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2693 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2694 NUM_BANKS(ADDR_SURF_16_BANK));
2695 break;
2696 case 1:
2697 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2698 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2699 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2700 NUM_BANKS(ADDR_SURF_16_BANK));
2701 break;
2702 case 2:
2703 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2704 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2705 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2706 NUM_BANKS(ADDR_SURF_16_BANK));
2707 break;
2708 case 3:
2709 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2710 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2711 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2712 NUM_BANKS(ADDR_SURF_16_BANK));
2713 break;
2714 case 4:
2715 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2716 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2717 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2718 NUM_BANKS(ADDR_SURF_16_BANK));
2719 break;
2720 case 5:
2721 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2722 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2723 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2724 NUM_BANKS(ADDR_SURF_8_BANK));
2725 break;
2726 case 6:
2727 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2728 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2729 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2730 NUM_BANKS(ADDR_SURF_4_BANK));
2731 break;
2732 case 8:
2733 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2734 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2735 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2736 NUM_BANKS(ADDR_SURF_16_BANK));
2737 break;
2738 case 9:
2739 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2740 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2741 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2742 NUM_BANKS(ADDR_SURF_16_BANK));
2743 break;
2744 case 10:
2745 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2746 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2747 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2748 NUM_BANKS(ADDR_SURF_16_BANK));
2749 break;
2750 case 11:
2751 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2752 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2753 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2754 NUM_BANKS(ADDR_SURF_16_BANK));
2755 break;
2756 case 12:
2757 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2758 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2759 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2760 NUM_BANKS(ADDR_SURF_16_BANK));
2761 break;
2762 case 13:
2763 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2764 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2765 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2766 NUM_BANKS(ADDR_SURF_8_BANK));
2767 break;
2768 case 14:
2769 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2770 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2771 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2772 NUM_BANKS(ADDR_SURF_4_BANK));
2773 break;
2774 default:
2775 gb_tile_moden = 0;
2776 break;
2777 }
2778 rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
2779 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2780 }
2781 } else if (num_pipe_configs == 2) {
2782 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2783 switch (reg_offset) {
2784 case 0:
2785 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2786 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2787 PIPE_CONFIG(ADDR_SURF_P2) |
2788 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
2789 break;
2790 case 1:
2791 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2792 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2793 PIPE_CONFIG(ADDR_SURF_P2) |
2794 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
2795 break;
2796 case 2:
2797 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2798 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2799 PIPE_CONFIG(ADDR_SURF_P2) |
2800 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2801 break;
2802 case 3:
2803 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2804 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2805 PIPE_CONFIG(ADDR_SURF_P2) |
2806 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
2807 break;
2808 case 4:
2809 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2810 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2811 PIPE_CONFIG(ADDR_SURF_P2) |
2812 TILE_SPLIT(split_equal_to_row_size));
2813 break;
2814 case 5:
2815 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2816 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2817 break;
2818 case 6:
2819 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2820 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2821 PIPE_CONFIG(ADDR_SURF_P2) |
2822 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2823 break;
2824 case 7:
2825 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2826 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2827 PIPE_CONFIG(ADDR_SURF_P2) |
2828 TILE_SPLIT(split_equal_to_row_size));
2829 break;
2830 case 8:
2831 gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED);
2832 break;
2833 case 9:
2834 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2835 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2836 break;
2837 case 10:
2838 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2839 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2840 PIPE_CONFIG(ADDR_SURF_P2) |
2841 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2842 break;
2843 case 11:
2844 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2845 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2846 PIPE_CONFIG(ADDR_SURF_P2) |
2847 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2848 break;
2849 case 12:
2850 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2851 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2852 PIPE_CONFIG(ADDR_SURF_P2) |
2853 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2854 break;
2855 case 13:
2856 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2857 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2858 break;
2859 case 14:
2860 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2861 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2862 PIPE_CONFIG(ADDR_SURF_P2) |
2863 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2864 break;
2865 case 16:
2866 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2867 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2868 PIPE_CONFIG(ADDR_SURF_P2) |
2869 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2870 break;
2871 case 17:
2872 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2873 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2874 PIPE_CONFIG(ADDR_SURF_P2) |
2875 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2876 break;
2877 case 27:
2878 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2879 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2880 break;
2881 case 28:
2882 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2883 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2884 PIPE_CONFIG(ADDR_SURF_P2) |
2885 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2886 break;
2887 case 29:
2888 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2889 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2890 PIPE_CONFIG(ADDR_SURF_P2) |
2891 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2892 break;
2893 case 30:
2894 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2895 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2896 PIPE_CONFIG(ADDR_SURF_P2) |
2897 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2898 break;
2899 default:
2900 gb_tile_moden = 0;
2901 break;
2902 }
2903 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
2904 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2905 }
2906 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
2907 switch (reg_offset) {
2908 case 0:
2909 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2910 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2911 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2912 NUM_BANKS(ADDR_SURF_16_BANK));
2913 break;
2914 case 1:
2915 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2916 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2917 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2918 NUM_BANKS(ADDR_SURF_16_BANK));
2919 break;
2920 case 2:
2921 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2922 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2923 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2924 NUM_BANKS(ADDR_SURF_16_BANK));
2925 break;
2926 case 3:
2927 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2928 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2929 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2930 NUM_BANKS(ADDR_SURF_16_BANK));
2931 break;
2932 case 4:
2933 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2934 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2935 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2936 NUM_BANKS(ADDR_SURF_16_BANK));
2937 break;
2938 case 5:
2939 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2940 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2941 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2942 NUM_BANKS(ADDR_SURF_16_BANK));
2943 break;
2944 case 6:
2945 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2946 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2947 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2948 NUM_BANKS(ADDR_SURF_8_BANK));
2949 break;
2950 case 8:
2951 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2952 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2953 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2954 NUM_BANKS(ADDR_SURF_16_BANK));
2955 break;
2956 case 9:
2957 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2958 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2959 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2960 NUM_BANKS(ADDR_SURF_16_BANK));
2961 break;
2962 case 10:
2963 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2964 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2965 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2966 NUM_BANKS(ADDR_SURF_16_BANK));
2967 break;
2968 case 11:
2969 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2970 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2971 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2972 NUM_BANKS(ADDR_SURF_16_BANK));
2973 break;
2974 case 12:
2975 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2976 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2977 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2978 NUM_BANKS(ADDR_SURF_16_BANK));
2979 break;
2980 case 13:
2981 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2982 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2983 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2984 NUM_BANKS(ADDR_SURF_16_BANK));
2985 break;
2986 case 14:
2987 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2988 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2989 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2990 NUM_BANKS(ADDR_SURF_8_BANK));
2991 break;
2992 default:
2993 gb_tile_moden = 0;
2994 break;
2995 }
2996 rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
2997 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2998 }
2999 } else
3000 DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs);
3001 }
3002
3003 /**
3004 * cik_select_se_sh - select which SE, SH to address
3005 *
3006 * @rdev: radeon_device pointer
3007 * @se_num: shader engine to address
3008 * @sh_num: sh block to address
3009 *
3010 * Select which SE, SH combinations to address. Certain
3011 * registers are instanced per SE or SH. 0xffffffff means
3012 * broadcast to all SEs or SHs (CIK).
3013 */
3014 static void cik_select_se_sh(struct radeon_device *rdev,
3015 u32 se_num, u32 sh_num)
3016 {
3017 u32 data = INSTANCE_BROADCAST_WRITES;
3018
3019 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
3020 data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
3021 else if (se_num == 0xffffffff)
3022 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
3023 else if (sh_num == 0xffffffff)
3024 data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
3025 else
3026 data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
3027 WREG32(GRBM_GFX_INDEX, data);
3028 }
3029
3030 /**
3031 * cik_create_bitmask - create a bitmask
3032 *
3033 * @bit_width: length of the mask
3034 *
3035 * create a variable length bit mask (CIK).
3036 * Returns the bitmask.
3037 */
3038 static u32 cik_create_bitmask(u32 bit_width)
3039 {
3040 u32 i, mask = 0;
3041
3042 for (i = 0; i < bit_width; i++) {
3043 mask <<= 1;
3044 mask |= 1;
3045 }
3046 return mask;
3047 }
3048
3049 /**
3050 * cik_select_se_sh - select which SE, SH to address
3051 *
3052 * @rdev: radeon_device pointer
3053 * @max_rb_num: max RBs (render backends) for the asic
3054 * @se_num: number of SEs (shader engines) for the asic
3055 * @sh_per_se: number of SH blocks per SE for the asic
3056 *
3057 * Calculates the bitmask of disabled RBs (CIK).
3058 * Returns the disabled RB bitmask.
3059 */
3060 static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3061 u32 max_rb_num_per_se,
3062 u32 sh_per_se)
3063 {
3064 u32 data, mask;
3065
3066 data = RREG32(CC_RB_BACKEND_DISABLE);
3067 if (data & 1)
3068 data &= BACKEND_DISABLE_MASK;
3069 else
3070 data = 0;
3071 data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
3072
3073 data >>= BACKEND_DISABLE_SHIFT;
3074
3075 mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
3076
3077 return data & mask;
3078 }
3079
3080 /**
3081 * cik_setup_rb - setup the RBs on the asic
3082 *
3083 * @rdev: radeon_device pointer
3084 * @se_num: number of SEs (shader engines) for the asic
3085 * @sh_per_se: number of SH blocks per SE for the asic
3086 * @max_rb_num: max RBs (render backends) for the asic
3087 *
3088 * Configures per-SE/SH RB registers (CIK).
3089 */
3090 static void cik_setup_rb(struct radeon_device *rdev,
3091 u32 se_num, u32 sh_per_se,
3092 u32 max_rb_num_per_se)
3093 {
3094 int i, j;
3095 u32 data, mask;
3096 u32 disabled_rbs = 0;
3097 u32 enabled_rbs = 0;
3098
3099 for (i = 0; i < se_num; i++) {
3100 for (j = 0; j < sh_per_se; j++) {
3101 cik_select_se_sh(rdev, i, j);
3102 data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
3103 if (rdev->family == CHIP_HAWAII)
3104 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
3105 else
3106 disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
3107 }
3108 }
3109 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3110
3111 mask = 1;
3112 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
3113 if (!(disabled_rbs & mask))
3114 enabled_rbs |= mask;
3115 mask <<= 1;
3116 }
3117
3118 rdev->config.cik.backend_enable_mask = enabled_rbs;
3119
3120 for (i = 0; i < se_num; i++) {
3121 cik_select_se_sh(rdev, i, 0xffffffff);
3122 data = 0;
3123 for (j = 0; j < sh_per_se; j++) {
3124 switch (enabled_rbs & 3) {
3125 case 0:
3126 if (j == 0)
3127 data |= PKR_MAP(RASTER_CONFIG_RB_MAP_3);
3128 else
3129 data |= PKR_MAP(RASTER_CONFIG_RB_MAP_0);
3130 break;
3131 case 1:
3132 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
3133 break;
3134 case 2:
3135 data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
3136 break;
3137 case 3:
3138 default:
3139 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
3140 break;
3141 }
3142 enabled_rbs >>= 2;
3143 }
3144 WREG32(PA_SC_RASTER_CONFIG, data);
3145 }
3146 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3147 }
3148
3149 /**
3150 * cik_gpu_init - setup the 3D engine
3151 *
3152 * @rdev: radeon_device pointer
3153 *
3154 * Configures the 3D engine and tiling configuration
3155 * registers so that the 3D engine is usable.
3156 */
3157 static void cik_gpu_init(struct radeon_device *rdev)
3158 {
3159 u32 gb_addr_config = RREG32(GB_ADDR_CONFIG);
3160 u32 mc_shared_chmap, mc_arb_ramcfg;
3161 u32 hdp_host_path_cntl;
3162 u32 tmp;
3163 int i, j;
3164
3165 switch (rdev->family) {
3166 case CHIP_BONAIRE:
3167 rdev->config.cik.max_shader_engines = 2;
3168 rdev->config.cik.max_tile_pipes = 4;
3169 rdev->config.cik.max_cu_per_sh = 7;
3170 rdev->config.cik.max_sh_per_se = 1;
3171 rdev->config.cik.max_backends_per_se = 2;
3172 rdev->config.cik.max_texture_channel_caches = 4;
3173 rdev->config.cik.max_gprs = 256;
3174 rdev->config.cik.max_gs_threads = 32;
3175 rdev->config.cik.max_hw_contexts = 8;
3176
3177 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
3178 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
3179 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
3180 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
3181 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
3182 break;
3183 case CHIP_HAWAII:
3184 rdev->config.cik.max_shader_engines = 4;
3185 rdev->config.cik.max_tile_pipes = 16;
3186 rdev->config.cik.max_cu_per_sh = 11;
3187 rdev->config.cik.max_sh_per_se = 1;
3188 rdev->config.cik.max_backends_per_se = 4;
3189 rdev->config.cik.max_texture_channel_caches = 16;
3190 rdev->config.cik.max_gprs = 256;
3191 rdev->config.cik.max_gs_threads = 32;
3192 rdev->config.cik.max_hw_contexts = 8;
3193
3194 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
3195 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
3196 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
3197 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
3198 gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
3199 break;
3200 case CHIP_KAVERI:
3201 rdev->config.cik.max_shader_engines = 1;
3202 rdev->config.cik.max_tile_pipes = 4;
3203 if ((rdev->pdev->device == 0x1304) ||
3204 (rdev->pdev->device == 0x1305) ||
3205 (rdev->pdev->device == 0x130C) ||
3206 (rdev->pdev->device == 0x130F) ||
3207 (rdev->pdev->device == 0x1310) ||
3208 (rdev->pdev->device == 0x1311) ||
3209 (rdev->pdev->device == 0x131C)) {
3210 rdev->config.cik.max_cu_per_sh = 8;
3211 rdev->config.cik.max_backends_per_se = 2;
3212 } else if ((rdev->pdev->device == 0x1309) ||
3213 (rdev->pdev->device == 0x130A) ||
3214 (rdev->pdev->device == 0x130D) ||
3215 (rdev->pdev->device == 0x1313) ||
3216 (rdev->pdev->device == 0x131D)) {
3217 rdev->config.cik.max_cu_per_sh = 6;
3218 rdev->config.cik.max_backends_per_se = 2;
3219 } else if ((rdev->pdev->device == 0x1306) ||
3220 (rdev->pdev->device == 0x1307) ||
3221 (rdev->pdev->device == 0x130B) ||
3222 (rdev->pdev->device == 0x130E) ||
3223 (rdev->pdev->device == 0x1315) ||
3224 (rdev->pdev->device == 0x131B)) {
3225 rdev->config.cik.max_cu_per_sh = 4;
3226 rdev->config.cik.max_backends_per_se = 1;
3227 } else {
3228 rdev->config.cik.max_cu_per_sh = 3;
3229 rdev->config.cik.max_backends_per_se = 1;
3230 }
3231 rdev->config.cik.max_sh_per_se = 1;
3232 rdev->config.cik.max_texture_channel_caches = 4;
3233 rdev->config.cik.max_gprs = 256;
3234 rdev->config.cik.max_gs_threads = 16;
3235 rdev->config.cik.max_hw_contexts = 8;
3236
3237 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
3238 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
3239 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
3240 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
3241 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
3242 break;
3243 case CHIP_KABINI:
3244 default:
3245 rdev->config.cik.max_shader_engines = 1;
3246 rdev->config.cik.max_tile_pipes = 2;
3247 rdev->config.cik.max_cu_per_sh = 2;
3248 rdev->config.cik.max_sh_per_se = 1;
3249 rdev->config.cik.max_backends_per_se = 1;
3250 rdev->config.cik.max_texture_channel_caches = 2;
3251 rdev->config.cik.max_gprs = 256;
3252 rdev->config.cik.max_gs_threads = 16;
3253 rdev->config.cik.max_hw_contexts = 8;
3254
3255 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
3256 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
3257 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
3258 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
3259 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
3260 break;
3261 }
3262
3263 /* Initialize HDP */
3264 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
3265 WREG32((0x2c14 + j), 0x00000000);
3266 WREG32((0x2c18 + j), 0x00000000);
3267 WREG32((0x2c1c + j), 0x00000000);
3268 WREG32((0x2c20 + j), 0x00000000);
3269 WREG32((0x2c24 + j), 0x00000000);
3270 }
3271
3272 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3273
3274 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
3275
3276 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
3277 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
3278
3279 rdev->config.cik.num_tile_pipes = rdev->config.cik.max_tile_pipes;
3280 rdev->config.cik.mem_max_burst_length_bytes = 256;
3281 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
3282 rdev->config.cik.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
3283 if (rdev->config.cik.mem_row_size_in_kb > 4)
3284 rdev->config.cik.mem_row_size_in_kb = 4;
3285 /* XXX use MC settings? */
3286 rdev->config.cik.shader_engine_tile_size = 32;
3287 rdev->config.cik.num_gpus = 1;
3288 rdev->config.cik.multi_gpu_tile_size = 64;
3289
3290 /* fix up row size */
3291 gb_addr_config &= ~ROW_SIZE_MASK;
3292 switch (rdev->config.cik.mem_row_size_in_kb) {
3293 case 1:
3294 default:
3295 gb_addr_config |= ROW_SIZE(0);
3296 break;
3297 case 2:
3298 gb_addr_config |= ROW_SIZE(1);
3299 break;
3300 case 4:
3301 gb_addr_config |= ROW_SIZE(2);
3302 break;
3303 }
3304
3305 /* setup tiling info dword. gb_addr_config is not adequate since it does
3306 * not have bank info, so create a custom tiling dword.
3307 * bits 3:0 num_pipes
3308 * bits 7:4 num_banks
3309 * bits 11:8 group_size
3310 * bits 15:12 row_size
3311 */
3312 rdev->config.cik.tile_config = 0;
3313 switch (rdev->config.cik.num_tile_pipes) {
3314 case 1:
3315 rdev->config.cik.tile_config |= (0 << 0);
3316 break;
3317 case 2:
3318 rdev->config.cik.tile_config |= (1 << 0);
3319 break;
3320 case 4:
3321 rdev->config.cik.tile_config |= (2 << 0);
3322 break;
3323 case 8:
3324 default:
3325 /* XXX what about 12? */
3326 rdev->config.cik.tile_config |= (3 << 0);
3327 break;
3328 }
3329 rdev->config.cik.tile_config |=
3330 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
3331 rdev->config.cik.tile_config |=
3332 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
3333 rdev->config.cik.tile_config |=
3334 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
3335
3336 WREG32(GB_ADDR_CONFIG, gb_addr_config);
3337 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
3338 WREG32(DMIF_ADDR_CALC, gb_addr_config);
3339 WREG32(SDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70);
3340 WREG32(SDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70);
3341 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
3342 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
3343 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
3344
3345 cik_tiling_mode_table_init(rdev);
3346
3347 cik_setup_rb(rdev, rdev->config.cik.max_shader_engines,
3348 rdev->config.cik.max_sh_per_se,
3349 rdev->config.cik.max_backends_per_se);
3350
3351 /* set HW defaults for 3D engine */
3352 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
3353
3354 WREG32(SX_DEBUG_1, 0x20);
3355
3356 WREG32(TA_CNTL_AUX, 0x00010000);
3357
3358 tmp = RREG32(SPI_CONFIG_CNTL);
3359 tmp |= 0x03000000;
3360 WREG32(SPI_CONFIG_CNTL, tmp);
3361
3362 WREG32(SQ_CONFIG, 1);
3363
3364 WREG32(DB_DEBUG, 0);
3365
3366 tmp = RREG32(DB_DEBUG2) & ~0xf00fffff;
3367 tmp |= 0x00000400;
3368 WREG32(DB_DEBUG2, tmp);
3369
3370 tmp = RREG32(DB_DEBUG3) & ~0x0002021c;
3371 tmp |= 0x00020200;
3372 WREG32(DB_DEBUG3, tmp);
3373
3374 tmp = RREG32(CB_HW_CONTROL) & ~0x00010000;
3375 tmp |= 0x00018208;
3376 WREG32(CB_HW_CONTROL, tmp);
3377
3378 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
3379
3380 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_frontend) |
3381 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_backend) |
3382 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cik.sc_hiz_tile_fifo_size) |
3383 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cik.sc_earlyz_tile_fifo_size)));
3384
3385 WREG32(VGT_NUM_INSTANCES, 1);
3386
3387 WREG32(CP_PERFMON_CNTL, 0);
3388
3389 WREG32(SQ_CONFIG, 0);
3390
3391 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
3392 FORCE_EOV_MAX_REZ_CNT(255)));
3393
3394 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
3395 AUTO_INVLD_EN(ES_AND_GS_AUTO));
3396
3397 WREG32(VGT_GS_VERTEX_REUSE, 16);
3398 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
3399
3400 tmp = RREG32(HDP_MISC_CNTL);
3401 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
3402 WREG32(HDP_MISC_CNTL, tmp);
3403
3404 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
3405 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
3406
3407 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
3408 WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
3409
3410 udelay(50);
3411 }
3412
3413 /*
3414 * GPU scratch registers helpers function.
3415 */
3416 /**
3417 * cik_scratch_init - setup driver info for CP scratch regs
3418 *
3419 * @rdev: radeon_device pointer
3420 *
3421 * Set up the number and offset of the CP scratch registers.
3422 * NOTE: use of CP scratch registers is a legacy inferface and
3423 * is not used by default on newer asics (r6xx+). On newer asics,
3424 * memory buffers are used for fences rather than scratch regs.
3425 */
3426 static void cik_scratch_init(struct radeon_device *rdev)
3427 {
3428 int i;
3429
3430 rdev->scratch.num_reg = 7;
3431 rdev->scratch.reg_base = SCRATCH_REG0;
3432 for (i = 0; i < rdev->scratch.num_reg; i++) {
3433 rdev->scratch.free[i] = true;
3434 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
3435 }
3436 }
3437
3438 /**
3439 * cik_ring_test - basic gfx ring test
3440 *
3441 * @rdev: radeon_device pointer
3442 * @ring: radeon_ring structure holding ring information
3443 *
3444 * Allocate a scratch register and write to it using the gfx ring (CIK).
3445 * Provides a basic gfx ring test to verify that the ring is working.
3446 * Used by cik_cp_gfx_resume();
3447 * Returns 0 on success, error on failure.
3448 */
3449 int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3450 {
3451 uint32_t scratch;
3452 uint32_t tmp = 0;
3453 unsigned i;
3454 int r;
3455
3456 r = radeon_scratch_get(rdev, &scratch);
3457 if (r) {
3458 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
3459 return r;
3460 }
3461 WREG32(scratch, 0xCAFEDEAD);
3462 r = radeon_ring_lock(rdev, ring, 3);
3463 if (r) {
3464 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
3465 radeon_scratch_free(rdev, scratch);
3466 return r;
3467 }
3468 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
3469 radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
3470 radeon_ring_write(ring, 0xDEADBEEF);
3471 radeon_ring_unlock_commit(rdev, ring);
3472
3473 for (i = 0; i < rdev->usec_timeout; i++) {
3474 tmp = RREG32(scratch);
3475 if (tmp == 0xDEADBEEF)
3476 break;
3477 DRM_UDELAY(1);
3478 }
3479 if (i < rdev->usec_timeout) {
3480 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
3481 } else {
3482 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
3483 ring->idx, scratch, tmp);
3484 r = -EINVAL;
3485 }
3486 radeon_scratch_free(rdev, scratch);
3487 return r;
3488 }
3489
3490 /**
3491 * cik_hdp_flush_cp_ring_emit - emit an hdp flush on the cp
3492 *
3493 * @rdev: radeon_device pointer
3494 * @ridx: radeon ring index
3495 *
3496 * Emits an hdp flush on the cp.
3497 */
3498 static void cik_hdp_flush_cp_ring_emit(struct radeon_device *rdev,
3499 int ridx)
3500 {
3501 struct radeon_ring *ring = &rdev->ring[ridx];
3502 u32 ref_and_mask;
3503
3504 switch (ring->idx) {
3505 case CAYMAN_RING_TYPE_CP1_INDEX:
3506 case CAYMAN_RING_TYPE_CP2_INDEX:
3507 default:
3508 switch (ring->me) {
3509 case 0:
3510 ref_and_mask = CP2 << ring->pipe;
3511 break;
3512 case 1:
3513 ref_and_mask = CP6 << ring->pipe;
3514 break;
3515 default:
3516 return;
3517 }
3518 break;
3519 case RADEON_RING_TYPE_GFX_INDEX:
3520 ref_and_mask = CP0;
3521 break;
3522 }
3523
3524 radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3525 radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
3526 WAIT_REG_MEM_FUNCTION(3) | /* == */
3527 WAIT_REG_MEM_ENGINE(1))); /* pfp */
3528 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ >> 2);
3529 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE >> 2);
3530 radeon_ring_write(ring, ref_and_mask);
3531 radeon_ring_write(ring, ref_and_mask);
3532 radeon_ring_write(ring, 0x20); /* poll interval */
3533 }
3534
3535 /**
3536 * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
3537 *
3538 * @rdev: radeon_device pointer
3539 * @fence: radeon fence object
3540 *
3541 * Emits a fence sequnce number on the gfx ring and flushes
3542 * GPU caches.
3543 */
3544 void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
3545 struct radeon_fence *fence)
3546 {
3547 struct radeon_ring *ring = &rdev->ring[fence->ring];
3548 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3549
3550 /* EVENT_WRITE_EOP - flush caches, send int */
3551 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
3552 radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
3553 EOP_TC_ACTION_EN |
3554 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3555 EVENT_INDEX(5)));
3556 radeon_ring_write(ring, addr & 0xfffffffc);
3557 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
3558 radeon_ring_write(ring, fence->seq);
3559 radeon_ring_write(ring, 0);
3560 /* HDP flush */
3561 cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
3562 }
3563
3564 /**
3565 * cik_fence_compute_ring_emit - emit a fence on the compute ring
3566 *
3567 * @rdev: radeon_device pointer
3568 * @fence: radeon fence object
3569 *
3570 * Emits a fence sequnce number on the compute ring and flushes
3571 * GPU caches.
3572 */
3573 void cik_fence_compute_ring_emit(struct radeon_device *rdev,
3574 struct radeon_fence *fence)
3575 {
3576 struct radeon_ring *ring = &rdev->ring[fence->ring];
3577 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3578
3579 /* RELEASE_MEM - flush caches, send int */
3580 radeon_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
3581 radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
3582 EOP_TC_ACTION_EN |
3583 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3584 EVENT_INDEX(5)));
3585 radeon_ring_write(ring, DATA_SEL(1) | INT_SEL(2));
3586 radeon_ring_write(ring, addr & 0xfffffffc);
3587 radeon_ring_write(ring, upper_32_bits(addr));
3588 radeon_ring_write(ring, fence->seq);
3589 radeon_ring_write(ring, 0);
3590 /* HDP flush */
3591 cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
3592 }
3593
3594 bool cik_semaphore_ring_emit(struct radeon_device *rdev,
3595 struct radeon_ring *ring,
3596 struct radeon_semaphore *semaphore,
3597 bool emit_wait)
3598 {
3599 uint64_t addr = semaphore->gpu_addr;
3600 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
3601
3602 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
3603 radeon_ring_write(ring, addr & 0xffffffff);
3604 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
3605
3606 return true;
3607 }
3608
3609 /**
3610 * cik_copy_cpdma - copy pages using the CP DMA engine
3611 *
3612 * @rdev: radeon_device pointer
3613 * @src_offset: src GPU address
3614 * @dst_offset: dst GPU address
3615 * @num_gpu_pages: number of GPU pages to xfer
3616 * @fence: radeon fence object
3617 *
3618 * Copy GPU paging using the CP DMA engine (CIK+).
3619 * Used by the radeon ttm implementation to move pages if
3620 * registered as the asic copy callback.
3621 */
3622 int cik_copy_cpdma(struct radeon_device *rdev,
3623 uint64_t src_offset, uint64_t dst_offset,
3624 unsigned num_gpu_pages,
3625 struct radeon_fence **fence)
3626 {
3627 struct radeon_semaphore *sem = NULL;
3628 int ring_index = rdev->asic->copy.blit_ring_index;
3629 struct radeon_ring *ring = &rdev->ring[ring_index];
3630 u32 size_in_bytes, cur_size_in_bytes, control;
3631 int i, num_loops;
3632 int r = 0;
3633
3634 r = radeon_semaphore_create(rdev, &sem);
3635 if (r) {
3636 DRM_ERROR("radeon: moving bo (%d).\n", r);
3637 return r;
3638 }
3639
3640 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
3641 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
3642 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
3643 if (r) {
3644 DRM_ERROR("radeon: moving bo (%d).\n", r);
3645 radeon_semaphore_free(rdev, &sem, NULL);
3646 return r;
3647 }
3648
3649 radeon_semaphore_sync_to(sem, *fence);
3650 radeon_semaphore_sync_rings(rdev, sem, ring->idx);
3651
3652 for (i = 0; i < num_loops; i++) {
3653 cur_size_in_bytes = size_in_bytes;
3654 if (cur_size_in_bytes > 0x1fffff)
3655 cur_size_in_bytes = 0x1fffff;
3656 size_in_bytes -= cur_size_in_bytes;
3657 control = 0;
3658 if (size_in_bytes == 0)
3659 control |= PACKET3_DMA_DATA_CP_SYNC;
3660 radeon_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
3661 radeon_ring_write(ring, control);
3662 radeon_ring_write(ring, lower_32_bits(src_offset));
3663 radeon_ring_write(ring, upper_32_bits(src_offset));
3664 radeon_ring_write(ring, lower_32_bits(dst_offset));
3665 radeon_ring_write(ring, upper_32_bits(dst_offset));
3666 radeon_ring_write(ring, cur_size_in_bytes);
3667 src_offset += cur_size_in_bytes;
3668 dst_offset += cur_size_in_bytes;
3669 }
3670
3671 r = radeon_fence_emit(rdev, fence, ring->idx);
3672 if (r) {
3673 radeon_ring_unlock_undo(rdev, ring);
3674 return r;
3675 }
3676
3677 radeon_ring_unlock_commit(rdev, ring);
3678 radeon_semaphore_free(rdev, &sem, *fence);
3679
3680 return r;
3681 }
3682
3683 /*
3684 * IB stuff
3685 */
3686 /**
3687 * cik_ring_ib_execute - emit an IB (Indirect Buffer) on the gfx ring
3688 *
3689 * @rdev: radeon_device pointer
3690 * @ib: radeon indirect buffer object
3691 *
3692 * Emits an DE (drawing engine) or CE (constant engine) IB
3693 * on the gfx ring. IBs are usually generated by userspace
3694 * acceleration drivers and submitted to the kernel for
3695 * sheduling on the ring. This function schedules the IB
3696 * on the gfx ring for execution by the GPU.
3697 */
3698 void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3699 {
3700 struct radeon_ring *ring = &rdev->ring[ib->ring];
3701 u32 header, control = INDIRECT_BUFFER_VALID;
3702
3703 if (ib->is_const_ib) {
3704 /* set switch buffer packet before const IB */
3705 radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3706 radeon_ring_write(ring, 0);
3707
3708 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
3709 } else {
3710 u32 next_rptr;
3711 if (ring->rptr_save_reg) {
3712 next_rptr = ring->wptr + 3 + 4;
3713 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
3714 radeon_ring_write(ring, ((ring->rptr_save_reg -
3715 PACKET3_SET_UCONFIG_REG_START) >> 2));
3716 radeon_ring_write(ring, next_rptr);
3717 } else if (rdev->wb.enabled) {
3718 next_rptr = ring->wptr + 5 + 4;
3719 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3720 radeon_ring_write(ring, WRITE_DATA_DST_SEL(1));
3721 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3722 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
3723 radeon_ring_write(ring, next_rptr);
3724 }
3725
3726 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3727 }
3728
3729 control |= ib->length_dw |
3730 (ib->vm ? (ib->vm->id << 24) : 0);
3731
3732 radeon_ring_write(ring, header);
3733 radeon_ring_write(ring,
3734 #ifdef __BIG_ENDIAN
3735 (2 << 0) |
3736 #endif
3737 (ib->gpu_addr & 0xFFFFFFFC));
3738 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
3739 radeon_ring_write(ring, control);
3740 }
3741
3742 /**
3743 * cik_ib_test - basic gfx ring IB test
3744 *
3745 * @rdev: radeon_device pointer
3746 * @ring: radeon_ring structure holding ring information
3747 *
3748 * Allocate an IB and execute it on the gfx ring (CIK).
3749 * Provides a basic gfx ring test to verify that IBs are working.
3750 * Returns 0 on success, error on failure.
3751 */
3752 int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3753 {
3754 struct radeon_ib ib;
3755 uint32_t scratch;
3756 uint32_t tmp = 0;
3757 unsigned i;
3758 int r;
3759
3760 r = radeon_scratch_get(rdev, &scratch);
3761 if (r) {
3762 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3763 return r;
3764 }
3765 WREG32(scratch, 0xCAFEDEAD);
3766 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3767 if (r) {
3768 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3769 radeon_scratch_free(rdev, scratch);
3770 return r;
3771 }
3772 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
3773 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
3774 ib.ptr[2] = 0xDEADBEEF;
3775 ib.length_dw = 3;
3776 r = radeon_ib_schedule(rdev, &ib, NULL);
3777 if (r) {
3778 radeon_scratch_free(rdev, scratch);
3779 radeon_ib_free(rdev, &ib);
3780 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3781 return r;
3782 }
3783 r = radeon_fence_wait(ib.fence, false);
3784 if (r) {
3785 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3786 radeon_scratch_free(rdev, scratch);
3787 radeon_ib_free(rdev, &ib);
3788 return r;
3789 }
3790 for (i = 0; i < rdev->usec_timeout; i++) {
3791 tmp = RREG32(scratch);
3792 if (tmp == 0xDEADBEEF)
3793 break;
3794 DRM_UDELAY(1);
3795 }
3796 if (i < rdev->usec_timeout) {
3797 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3798 } else {
3799 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3800 scratch, tmp);
3801 r = -EINVAL;
3802 }
3803 radeon_scratch_free(rdev, scratch);
3804 radeon_ib_free(rdev, &ib);
3805 return r;
3806 }
3807
3808 /*
3809 * CP.
3810 * On CIK, gfx and compute now have independant command processors.
3811 *
3812 * GFX
3813 * Gfx consists of a single ring and can process both gfx jobs and
3814 * compute jobs. The gfx CP consists of three microengines (ME):
3815 * PFP - Pre-Fetch Parser
3816 * ME - Micro Engine
3817 * CE - Constant Engine
3818 * The PFP and ME make up what is considered the Drawing Engine (DE).
3819 * The CE is an asynchronous engine used for updating buffer desciptors
3820 * used by the DE so that they can be loaded into cache in parallel
3821 * while the DE is processing state update packets.
3822 *
3823 * Compute
3824 * The compute CP consists of two microengines (ME):
3825 * MEC1 - Compute MicroEngine 1
3826 * MEC2 - Compute MicroEngine 2
3827 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
3828 * The queues are exposed to userspace and are programmed directly
3829 * by the compute runtime.
3830 */
3831 /**
3832 * cik_cp_gfx_enable - enable/disable the gfx CP MEs
3833 *
3834 * @rdev: radeon_device pointer
3835 * @enable: enable or disable the MEs
3836 *
3837 * Halts or unhalts the gfx MEs.
3838 */
3839 static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
3840 {
3841 if (enable)
3842 WREG32(CP_ME_CNTL, 0);
3843 else {
3844 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3845 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
3846 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
3847 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3848 }
3849 udelay(50);
3850 }
3851
3852 /**
3853 * cik_cp_gfx_load_microcode - load the gfx CP ME ucode
3854 *
3855 * @rdev: radeon_device pointer
3856 *
3857 * Loads the gfx PFP, ME, and CE ucode.
3858 * Returns 0 for success, -EINVAL if the ucode is not available.
3859 */
3860 static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
3861 {
3862 const __be32 *fw_data;
3863 int i;
3864
3865 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
3866 return -EINVAL;
3867
3868 cik_cp_gfx_enable(rdev, false);
3869
3870 /* PFP */
3871 fw_data = (const __be32 *)rdev->pfp_fw->data;
3872 WREG32(CP_PFP_UCODE_ADDR, 0);
3873 for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
3874 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
3875 WREG32(CP_PFP_UCODE_ADDR, 0);
3876
3877 /* CE */
3878 fw_data = (const __be32 *)rdev->ce_fw->data;
3879 WREG32(CP_CE_UCODE_ADDR, 0);
3880 for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
3881 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
3882 WREG32(CP_CE_UCODE_ADDR, 0);
3883
3884 /* ME */
3885 fw_data = (const __be32 *)rdev->me_fw->data;
3886 WREG32(CP_ME_RAM_WADDR, 0);
3887 for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
3888 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
3889 WREG32(CP_ME_RAM_WADDR, 0);
3890
3891 WREG32(CP_PFP_UCODE_ADDR, 0);
3892 WREG32(CP_CE_UCODE_ADDR, 0);
3893 WREG32(CP_ME_RAM_WADDR, 0);
3894 WREG32(CP_ME_RAM_RADDR, 0);
3895 return 0;
3896 }
3897
3898 /**
3899 * cik_cp_gfx_start - start the gfx ring
3900 *
3901 * @rdev: radeon_device pointer
3902 *
3903 * Enables the ring and loads the clear state context and other
3904 * packets required to init the ring.
3905 * Returns 0 for success, error for failure.
3906 */
3907 static int cik_cp_gfx_start(struct radeon_device *rdev)
3908 {
3909 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3910 int r, i;
3911
3912 /* init the CP */
3913 WREG32(CP_MAX_CONTEXT, rdev->config.cik.max_hw_contexts - 1);
3914 WREG32(CP_ENDIAN_SWAP, 0);
3915 WREG32(CP_DEVICE_ID, 1);
3916
3917 cik_cp_gfx_enable(rdev, true);
3918
3919 r = radeon_ring_lock(rdev, ring, cik_default_size + 17);
3920 if (r) {
3921 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3922 return r;
3923 }
3924
3925 /* init the CE partitions. CE only used for gfx on CIK */
3926 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3927 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3928 radeon_ring_write(ring, 0xc000);
3929 radeon_ring_write(ring, 0xc000);
3930
3931 /* setup clear context state */
3932 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3933 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3934
3935 radeon_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3936 radeon_ring_write(ring, 0x80000000);
3937 radeon_ring_write(ring, 0x80000000);
3938
3939 for (i = 0; i < cik_default_size; i++)
3940 radeon_ring_write(ring, cik_default_state[i]);
3941
3942 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3943 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3944
3945 /* set clear context state */
3946 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3947 radeon_ring_write(ring, 0);
3948
3949 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
3950 radeon_ring_write(ring, 0x00000316);
3951 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
3952 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
3953
3954 radeon_ring_unlock_commit(rdev, ring);
3955
3956 return 0;
3957 }
3958
3959 /**
3960 * cik_cp_gfx_fini - stop the gfx ring
3961 *
3962 * @rdev: radeon_device pointer
3963 *
3964 * Stop the gfx ring and tear down the driver ring
3965 * info.
3966 */
3967 static void cik_cp_gfx_fini(struct radeon_device *rdev)
3968 {
3969 cik_cp_gfx_enable(rdev, false);
3970 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
3971 }
3972
3973 /**
3974 * cik_cp_gfx_resume - setup the gfx ring buffer registers
3975 *
3976 * @rdev: radeon_device pointer
3977 *
3978 * Program the location and size of the gfx ring buffer
3979 * and test it to make sure it's working.
3980 * Returns 0 for success, error for failure.
3981 */
3982 static int cik_cp_gfx_resume(struct radeon_device *rdev)
3983 {
3984 struct radeon_ring *ring;
3985 u32 tmp;
3986 u32 rb_bufsz;
3987 u64 rb_addr;
3988 int r;
3989
3990 WREG32(CP_SEM_WAIT_TIMER, 0x0);
3991 if (rdev->family != CHIP_HAWAII)
3992 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3993
3994 /* Set the write pointer delay */
3995 WREG32(CP_RB_WPTR_DELAY, 0);
3996
3997 /* set the RB to use vmid 0 */
3998 WREG32(CP_RB_VMID, 0);
3999
4000 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
4001
4002 /* ring 0 - compute and gfx */
4003 /* Set ring buffer size */
4004 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
4005 rb_bufsz = order_base_2(ring->ring_size / 8);
4006 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
4007 #ifdef __BIG_ENDIAN
4008 tmp |= BUF_SWAP_32BIT;
4009 #endif
4010 WREG32(CP_RB0_CNTL, tmp);
4011
4012 /* Initialize the ring buffer's read and write pointers */
4013 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
4014 ring->wptr = 0;
4015 WREG32(CP_RB0_WPTR, ring->wptr);
4016
4017 /* set the wb address wether it's enabled or not */
4018 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
4019 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
4020
4021 /* scratch register shadowing is no longer supported */
4022 WREG32(SCRATCH_UMSK, 0);
4023
4024 if (!rdev->wb.enabled)
4025 tmp |= RB_NO_UPDATE;
4026
4027 mdelay(1);
4028 WREG32(CP_RB0_CNTL, tmp);
4029
4030 rb_addr = ring->gpu_addr >> 8;
4031 WREG32(CP_RB0_BASE, rb_addr);
4032 WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr));
4033
4034 /* start the ring */
4035 cik_cp_gfx_start(rdev);
4036 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
4037 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
4038 if (r) {
4039 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
4040 return r;
4041 }
4042
4043 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
4044 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
4045
4046 return 0;
4047 }
4048
4049 u32 cik_gfx_get_rptr(struct radeon_device *rdev,
4050 struct radeon_ring *ring)
4051 {
4052 u32 rptr;
4053
4054 if (rdev->wb.enabled)
4055 rptr = rdev->wb.wb[ring->rptr_offs/4];
4056 else
4057 rptr = RREG32(CP_RB0_RPTR);
4058
4059 return rptr;
4060 }
4061
4062 u32 cik_gfx_get_wptr(struct radeon_device *rdev,
4063 struct radeon_ring *ring)
4064 {
4065 u32 wptr;
4066
4067 wptr = RREG32(CP_RB0_WPTR);
4068
4069 return wptr;
4070 }
4071
4072 void cik_gfx_set_wptr(struct radeon_device *rdev,
4073 struct radeon_ring *ring)
4074 {
4075 WREG32(CP_RB0_WPTR, ring->wptr);
4076 (void)RREG32(CP_RB0_WPTR);
4077 }
4078
4079 u32 cik_compute_get_rptr(struct radeon_device *rdev,
4080 struct radeon_ring *ring)
4081 {
4082 u32 rptr;
4083
4084 if (rdev->wb.enabled) {
4085 rptr = rdev->wb.wb[ring->rptr_offs/4];
4086 } else {
4087 mutex_lock(&rdev->srbm_mutex);
4088 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
4089 rptr = RREG32(CP_HQD_PQ_RPTR);
4090 cik_srbm_select(rdev, 0, 0, 0, 0);
4091 mutex_unlock(&rdev->srbm_mutex);
4092 }
4093
4094 return rptr;
4095 }
4096
4097 u32 cik_compute_get_wptr(struct radeon_device *rdev,
4098 struct radeon_ring *ring)
4099 {
4100 u32 wptr;
4101
4102 if (rdev->wb.enabled) {
4103 /* XXX check if swapping is necessary on BE */
4104 wptr = rdev->wb.wb[ring->wptr_offs/4];
4105 } else {
4106 mutex_lock(&rdev->srbm_mutex);
4107 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
4108 wptr = RREG32(CP_HQD_PQ_WPTR);
4109 cik_srbm_select(rdev, 0, 0, 0, 0);
4110 mutex_unlock(&rdev->srbm_mutex);
4111 }
4112
4113 return wptr;
4114 }
4115
4116 void cik_compute_set_wptr(struct radeon_device *rdev,
4117 struct radeon_ring *ring)
4118 {
4119 /* XXX check if swapping is necessary on BE */
4120 rdev->wb.wb[ring->wptr_offs/4] = ring->wptr;
4121 WDOORBELL32(ring->doorbell_index, ring->wptr);
4122 }
4123
4124 /**
4125 * cik_cp_compute_enable - enable/disable the compute CP MEs
4126 *
4127 * @rdev: radeon_device pointer
4128 * @enable: enable or disable the MEs
4129 *
4130 * Halts or unhalts the compute MEs.
4131 */
4132 static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
4133 {
4134 if (enable)
4135 WREG32(CP_MEC_CNTL, 0);
4136 else
4137 WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
4138 udelay(50);
4139 }
4140
4141 /**
4142 * cik_cp_compute_load_microcode - load the compute CP ME ucode
4143 *
4144 * @rdev: radeon_device pointer
4145 *
4146 * Loads the compute MEC1&2 ucode.
4147 * Returns 0 for success, -EINVAL if the ucode is not available.
4148 */
4149 static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
4150 {
4151 const __be32 *fw_data;
4152 int i;
4153
4154 if (!rdev->mec_fw)
4155 return -EINVAL;
4156
4157 cik_cp_compute_enable(rdev, false);
4158
4159 /* MEC1 */
4160 fw_data = (const __be32 *)rdev->mec_fw->data;
4161 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
4162 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
4163 WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
4164 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
4165
4166 if (rdev->family == CHIP_KAVERI) {
4167 /* MEC2 */
4168 fw_data = (const __be32 *)rdev->mec_fw->data;
4169 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
4170 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
4171 WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
4172 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
4173 }
4174
4175 return 0;
4176 }
4177
4178 /**
4179 * cik_cp_compute_start - start the compute queues
4180 *
4181 * @rdev: radeon_device pointer
4182 *
4183 * Enable the compute queues.
4184 * Returns 0 for success, error for failure.
4185 */
4186 static int cik_cp_compute_start(struct radeon_device *rdev)
4187 {
4188 cik_cp_compute_enable(rdev, true);
4189
4190 return 0;
4191 }
4192
4193 /**
4194 * cik_cp_compute_fini - stop the compute queues
4195 *
4196 * @rdev: radeon_device pointer
4197 *
4198 * Stop the compute queues and tear down the driver queue
4199 * info.
4200 */
4201 static void cik_cp_compute_fini(struct radeon_device *rdev)
4202 {
4203 int i, idx, r;
4204
4205 cik_cp_compute_enable(rdev, false);
4206
4207 for (i = 0; i < 2; i++) {
4208 if (i == 0)
4209 idx = CAYMAN_RING_TYPE_CP1_INDEX;
4210 else
4211 idx = CAYMAN_RING_TYPE_CP2_INDEX;
4212
4213 if (rdev->ring[idx].mqd_obj) {
4214 r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
4215 if (unlikely(r != 0))
4216 dev_warn(rdev->dev, "(%d) reserve MQD bo failed\n", r);
4217
4218 radeon_bo_unpin(rdev->ring[idx].mqd_obj);
4219 radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
4220
4221 radeon_bo_unref(&rdev->ring[idx].mqd_obj);
4222 rdev->ring[idx].mqd_obj = NULL;
4223 }
4224 }
4225 }
4226
4227 static void cik_mec_fini(struct radeon_device *rdev)
4228 {
4229 int r;
4230
4231 if (rdev->mec.hpd_eop_obj) {
4232 r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
4233 if (unlikely(r != 0))
4234 dev_warn(rdev->dev, "(%d) reserve HPD EOP bo failed\n", r);
4235 radeon_bo_unpin(rdev->mec.hpd_eop_obj);
4236 radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
4237
4238 radeon_bo_unref(&rdev->mec.hpd_eop_obj);
4239 rdev->mec.hpd_eop_obj = NULL;
4240 }
4241 }
4242
4243 #define MEC_HPD_SIZE 2048
4244
4245 static int cik_mec_init(struct radeon_device *rdev)
4246 {
4247 int r;
4248 u32 *hpd;
4249
4250 /*
4251 * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
4252 * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
4253 */
4254 if (rdev->family == CHIP_KAVERI)
4255 rdev->mec.num_mec = 2;
4256 else
4257 rdev->mec.num_mec = 1;
4258 rdev->mec.num_pipe = 4;
4259 rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
4260
4261 if (rdev->mec.hpd_eop_obj == NULL) {
4262 r = radeon_bo_create(rdev,
4263 rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
4264 PAGE_SIZE, true,
4265 RADEON_GEM_DOMAIN_GTT, NULL,
4266 &rdev->mec.hpd_eop_obj);
4267 if (r) {
4268 dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
4269 return r;
4270 }
4271 }
4272
4273 r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
4274 if (unlikely(r != 0)) {
4275 cik_mec_fini(rdev);
4276 return r;
4277 }
4278 r = radeon_bo_pin(rdev->mec.hpd_eop_obj, RADEON_GEM_DOMAIN_GTT,
4279 &rdev->mec.hpd_eop_gpu_addr);
4280 if (r) {
4281 dev_warn(rdev->dev, "(%d) pin HDP EOP bo failed\n", r);
4282 cik_mec_fini(rdev);
4283 return r;
4284 }
4285 r = radeon_bo_kmap(rdev->mec.hpd_eop_obj, (void **)&hpd);
4286 if (r) {
4287 dev_warn(rdev->dev, "(%d) map HDP EOP bo failed\n", r);
4288 cik_mec_fini(rdev);
4289 return r;
4290 }
4291
4292 /* clear memory. Not sure if this is required or not */
4293 memset(hpd, 0, rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2);
4294
4295 radeon_bo_kunmap(rdev->mec.hpd_eop_obj);
4296 radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
4297
4298 return 0;
4299 }
4300
4301 struct hqd_registers
4302 {
4303 u32 cp_mqd_base_addr;
4304 u32 cp_mqd_base_addr_hi;
4305 u32 cp_hqd_active;
4306 u32 cp_hqd_vmid;
4307 u32 cp_hqd_persistent_state;
4308 u32 cp_hqd_pipe_priority;
4309 u32 cp_hqd_queue_priority;
4310 u32 cp_hqd_quantum;
4311 u32 cp_hqd_pq_base;
4312 u32 cp_hqd_pq_base_hi;
4313 u32 cp_hqd_pq_rptr;
4314 u32 cp_hqd_pq_rptr_report_addr;
4315 u32 cp_hqd_pq_rptr_report_addr_hi;
4316 u32 cp_hqd_pq_wptr_poll_addr;
4317 u32 cp_hqd_pq_wptr_poll_addr_hi;
4318 u32 cp_hqd_pq_doorbell_control;
4319 u32 cp_hqd_pq_wptr;
4320 u32 cp_hqd_pq_control;
4321 u32 cp_hqd_ib_base_addr;
4322 u32 cp_hqd_ib_base_addr_hi;
4323 u32 cp_hqd_ib_rptr;
4324 u32 cp_hqd_ib_control;
4325 u32 cp_hqd_iq_timer;
4326 u32 cp_hqd_iq_rptr;
4327 u32 cp_hqd_dequeue_request;
4328 u32 cp_hqd_dma_offload;
4329 u32 cp_hqd_sema_cmd;
4330 u32 cp_hqd_msg_type;
4331 u32 cp_hqd_atomic0_preop_lo;
4332 u32 cp_hqd_atomic0_preop_hi;
4333 u32 cp_hqd_atomic1_preop_lo;
4334 u32 cp_hqd_atomic1_preop_hi;
4335 u32 cp_hqd_hq_scheduler0;
4336 u32 cp_hqd_hq_scheduler1;
4337 u32 cp_mqd_control;
4338 };
4339
4340 struct bonaire_mqd
4341 {
4342 u32 header;
4343 u32 dispatch_initiator;
4344 u32 dimensions[3];
4345 u32 start_idx[3];
4346 u32 num_threads[3];
4347 u32 pipeline_stat_enable;
4348 u32 perf_counter_enable;
4349 u32 pgm[2];
4350 u32 tba[2];
4351 u32 tma[2];
4352 u32 pgm_rsrc[2];
4353 u32 vmid;
4354 u32 resource_limits;
4355 u32 static_thread_mgmt01[2];
4356 u32 tmp_ring_size;
4357 u32 static_thread_mgmt23[2];
4358 u32 restart[3];
4359 u32 thread_trace_enable;
4360 u32 reserved1;
4361 u32 user_data[16];
4362 u32 vgtcs_invoke_count[2];
4363 struct hqd_registers queue_state;
4364 u32 dequeue_cntr;
4365 u32 interrupt_queue[64];
4366 };
4367
4368 /**
4369 * cik_cp_compute_resume - setup the compute queue registers
4370 *
4371 * @rdev: radeon_device pointer
4372 *
4373 * Program the compute queues and test them to make sure they
4374 * are working.
4375 * Returns 0 for success, error for failure.
4376 */
4377 static int cik_cp_compute_resume(struct radeon_device *rdev)
4378 {
4379 int r, i, idx;
4380 u32 tmp;
4381 bool use_doorbell = true;
4382 u64 hqd_gpu_addr;
4383 u64 mqd_gpu_addr;
4384 u64 eop_gpu_addr;
4385 u64 wb_gpu_addr;
4386 u32 *buf;
4387 struct bonaire_mqd *mqd;
4388
4389 r = cik_cp_compute_start(rdev);
4390 if (r)
4391 return r;
4392
4393 /* fix up chicken bits */
4394 tmp = RREG32(CP_CPF_DEBUG);
4395 tmp |= (1 << 23);
4396 WREG32(CP_CPF_DEBUG, tmp);
4397
4398 /* init the pipes */
4399 mutex_lock(&rdev->srbm_mutex);
4400 for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
4401 int me = (i < 4) ? 1 : 2;
4402 int pipe = (i < 4) ? i : (i - 4);
4403
4404 eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
4405
4406 cik_srbm_select(rdev, me, pipe, 0, 0);
4407
4408 /* write the EOP addr */
4409 WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
4410 WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
4411
4412 /* set the VMID assigned */
4413 WREG32(CP_HPD_EOP_VMID, 0);
4414
4415 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4416 tmp = RREG32(CP_HPD_EOP_CONTROL);
4417 tmp &= ~EOP_SIZE_MASK;
4418 tmp |= order_base_2(MEC_HPD_SIZE / 8);
4419 WREG32(CP_HPD_EOP_CONTROL, tmp);
4420 }
4421 cik_srbm_select(rdev, 0, 0, 0, 0);
4422 mutex_unlock(&rdev->srbm_mutex);
4423
4424 /* init the queues. Just two for now. */
4425 for (i = 0; i < 2; i++) {
4426 if (i == 0)
4427 idx = CAYMAN_RING_TYPE_CP1_INDEX;
4428 else
4429 idx = CAYMAN_RING_TYPE_CP2_INDEX;
4430
4431 if (rdev->ring[idx].mqd_obj == NULL) {
4432 r = radeon_bo_create(rdev,
4433 sizeof(struct bonaire_mqd),
4434 PAGE_SIZE, true,
4435 RADEON_GEM_DOMAIN_GTT, NULL,
4436 &rdev->ring[idx].mqd_obj);
4437 if (r) {
4438 dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
4439 return r;
4440 }
4441 }
4442
4443 r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
4444 if (unlikely(r != 0)) {
4445 cik_cp_compute_fini(rdev);
4446 return r;
4447 }
4448 r = radeon_bo_pin(rdev->ring[idx].mqd_obj, RADEON_GEM_DOMAIN_GTT,
4449 &mqd_gpu_addr);
4450 if (r) {
4451 dev_warn(rdev->dev, "(%d) pin MQD bo failed\n", r);
4452 cik_cp_compute_fini(rdev);
4453 return r;
4454 }
4455 r = radeon_bo_kmap(rdev->ring[idx].mqd_obj, (void **)&buf);
4456 if (r) {
4457 dev_warn(rdev->dev, "(%d) map MQD bo failed\n", r);
4458 cik_cp_compute_fini(rdev);
4459 return r;
4460 }
4461
4462 /* init the mqd struct */
4463 memset(buf, 0, sizeof(struct bonaire_mqd));
4464
4465 mqd = (struct bonaire_mqd *)buf;
4466 mqd->header = 0xC0310800;
4467 mqd->static_thread_mgmt01[0] = 0xffffffff;
4468 mqd->static_thread_mgmt01[1] = 0xffffffff;
4469 mqd->static_thread_mgmt23[0] = 0xffffffff;
4470 mqd->static_thread_mgmt23[1] = 0xffffffff;
4471
4472 mutex_lock(&rdev->srbm_mutex);
4473 cik_srbm_select(rdev, rdev->ring[idx].me,
4474 rdev->ring[idx].pipe,
4475 rdev->ring[idx].queue, 0);
4476
4477 /* disable wptr polling */
4478 tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
4479 tmp &= ~WPTR_POLL_EN;
4480 WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
4481
4482 /* enable doorbell? */
4483 mqd->queue_state.cp_hqd_pq_doorbell_control =
4484 RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
4485 if (use_doorbell)
4486 mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
4487 else
4488 mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_EN;
4489 WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
4490 mqd->queue_state.cp_hqd_pq_doorbell_control);
4491
4492 /* disable the queue if it's active */
4493 mqd->queue_state.cp_hqd_dequeue_request = 0;
4494 mqd->queue_state.cp_hqd_pq_rptr = 0;
4495 mqd->queue_state.cp_hqd_pq_wptr= 0;
4496 if (RREG32(CP_HQD_ACTIVE) & 1) {
4497 WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
4498 for (i = 0; i < rdev->usec_timeout; i++) {
4499 if (!(RREG32(CP_HQD_ACTIVE) & 1))
4500 break;
4501 udelay(1);
4502 }
4503 WREG32(CP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
4504 WREG32(CP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
4505 WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
4506 }
4507
4508 /* set the pointer to the MQD */
4509 mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
4510 mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
4511 WREG32(CP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
4512 WREG32(CP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
4513 /* set MQD vmid to 0 */
4514 mqd->queue_state.cp_mqd_control = RREG32(CP_MQD_CONTROL);
4515 mqd->queue_state.cp_mqd_control &= ~MQD_VMID_MASK;
4516 WREG32(CP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
4517
4518 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4519 hqd_gpu_addr = rdev->ring[idx].gpu_addr >> 8;
4520 mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
4521 mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4522 WREG32(CP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
4523 WREG32(CP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
4524
4525 /* set up the HQD, this is similar to CP_RB0_CNTL */
4526 mqd->queue_state.cp_hqd_pq_control = RREG32(CP_HQD_PQ_CONTROL);
4527 mqd->queue_state.cp_hqd_pq_control &=
4528 ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
4529
4530 mqd->queue_state.cp_hqd_pq_control |=
4531 order_base_2(rdev->ring[idx].ring_size / 8);
4532 mqd->queue_state.cp_hqd_pq_control |=
4533 (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
4534 #ifdef __BIG_ENDIAN
4535 mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
4536 #endif
4537 mqd->queue_state.cp_hqd_pq_control &=
4538 ~(UNORD_DISPATCH | ROQ_PQ_IB_FLIP | PQ_VOLATILE);
4539 mqd->queue_state.cp_hqd_pq_control |=
4540 PRIV_STATE | KMD_QUEUE; /* assuming kernel queue control */
4541 WREG32(CP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
4542
4543 /* only used if CP_PQ_WPTR_POLL_CNTL.WPTR_POLL_EN=1 */
4544 if (i == 0)
4545 wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP1_WPTR_OFFSET;
4546 else
4547 wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP2_WPTR_OFFSET;
4548 mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
4549 mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4550 WREG32(CP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
4551 WREG32(CP_HQD_PQ_WPTR_POLL_ADDR_HI,
4552 mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
4553
4554 /* set the wb address wether it's enabled or not */
4555 if (i == 0)
4556 wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET;
4557 else
4558 wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET;
4559 mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
4560 mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
4561 upper_32_bits(wb_gpu_addr) & 0xffff;
4562 WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR,
4563 mqd->queue_state.cp_hqd_pq_rptr_report_addr);
4564 WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
4565 mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
4566
4567 /* enable the doorbell if requested */
4568 if (use_doorbell) {
4569 mqd->queue_state.cp_hqd_pq_doorbell_control =
4570 RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
4571 mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
4572 mqd->queue_state.cp_hqd_pq_doorbell_control |=
4573 DOORBELL_OFFSET(rdev->ring[idx].doorbell_index);
4574 mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
4575 mqd->queue_state.cp_hqd_pq_doorbell_control &=
4576 ~(DOORBELL_SOURCE | DOORBELL_HIT);
4577
4578 } else {
4579 mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
4580 }
4581 WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
4582 mqd->queue_state.cp_hqd_pq_doorbell_control);
4583
4584 /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4585 rdev->ring[idx].wptr = 0;
4586 mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr;
4587 WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
4588 mqd->queue_state.cp_hqd_pq_rptr = RREG32(CP_HQD_PQ_RPTR);
4589
4590 /* set the vmid for the queue */
4591 mqd->queue_state.cp_hqd_vmid = 0;
4592 WREG32(CP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
4593
4594 /* activate the queue */
4595 mqd->queue_state.cp_hqd_active = 1;
4596 WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
4597
4598 cik_srbm_select(rdev, 0, 0, 0, 0);
4599 mutex_unlock(&rdev->srbm_mutex);
4600
4601 radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
4602 radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
4603
4604 rdev->ring[idx].ready = true;
4605 r = radeon_ring_test(rdev, idx, &rdev->ring[idx]);
4606 if (r)
4607 rdev->ring[idx].ready = false;
4608 }
4609
4610 return 0;
4611 }
4612
4613 static void cik_cp_enable(struct radeon_device *rdev, bool enable)
4614 {
4615 cik_cp_gfx_enable(rdev, enable);
4616 cik_cp_compute_enable(rdev, enable);
4617 }
4618
4619 static int cik_cp_load_microcode(struct radeon_device *rdev)
4620 {
4621 int r;
4622
4623 r = cik_cp_gfx_load_microcode(rdev);
4624 if (r)
4625 return r;
4626 r = cik_cp_compute_load_microcode(rdev);
4627 if (r)
4628 return r;
4629
4630 return 0;
4631 }
4632
4633 static void cik_cp_fini(struct radeon_device *rdev)
4634 {
4635 cik_cp_gfx_fini(rdev);
4636 cik_cp_compute_fini(rdev);
4637 }
4638
4639 static int cik_cp_resume(struct radeon_device *rdev)
4640 {
4641 int r;
4642
4643 cik_enable_gui_idle_interrupt(rdev, false);
4644
4645 r = cik_cp_load_microcode(rdev);
4646 if (r)
4647 return r;
4648
4649 r = cik_cp_gfx_resume(rdev);
4650 if (r)
4651 return r;
4652 r = cik_cp_compute_resume(rdev);
4653 if (r)
4654 return r;
4655
4656 cik_enable_gui_idle_interrupt(rdev, true);
4657
4658 return 0;
4659 }
4660
4661 static void cik_print_gpu_status_regs(struct radeon_device *rdev)
4662 {
4663 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
4664 RREG32(GRBM_STATUS));
4665 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
4666 RREG32(GRBM_STATUS2));
4667 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
4668 RREG32(GRBM_STATUS_SE0));
4669 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
4670 RREG32(GRBM_STATUS_SE1));
4671 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
4672 RREG32(GRBM_STATUS_SE2));
4673 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
4674 RREG32(GRBM_STATUS_SE3));
4675 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
4676 RREG32(SRBM_STATUS));
4677 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
4678 RREG32(SRBM_STATUS2));
4679 dev_info(rdev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
4680 RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
4681 dev_info(rdev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
4682 RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
4683 dev_info(rdev->dev, " CP_STAT = 0x%08x\n", RREG32(CP_STAT));
4684 dev_info(rdev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
4685 RREG32(CP_STALLED_STAT1));
4686 dev_info(rdev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
4687 RREG32(CP_STALLED_STAT2));
4688 dev_info(rdev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
4689 RREG32(CP_STALLED_STAT3));
4690 dev_info(rdev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
4691 RREG32(CP_CPF_BUSY_STAT));
4692 dev_info(rdev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
4693 RREG32(CP_CPF_STALLED_STAT1));
4694 dev_info(rdev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(CP_CPF_STATUS));
4695 dev_info(rdev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(CP_CPC_BUSY_STAT));
4696 dev_info(rdev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
4697 RREG32(CP_CPC_STALLED_STAT1));
4698 dev_info(rdev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(CP_CPC_STATUS));
4699 }
4700
4701 /**
4702 * cik_gpu_check_soft_reset - check which blocks are busy
4703 *
4704 * @rdev: radeon_device pointer
4705 *
4706 * Check which blocks are busy and return the relevant reset
4707 * mask to be used by cik_gpu_soft_reset().
4708 * Returns a mask of the blocks to be reset.
4709 */
4710 u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
4711 {
4712 u32 reset_mask = 0;
4713 u32 tmp;
4714
4715 /* GRBM_STATUS */
4716 tmp = RREG32(GRBM_STATUS);
4717 if (tmp & (PA_BUSY | SC_BUSY |
4718 BCI_BUSY | SX_BUSY |
4719 TA_BUSY | VGT_BUSY |
4720 DB_BUSY | CB_BUSY |
4721 GDS_BUSY | SPI_BUSY |
4722 IA_BUSY | IA_BUSY_NO_DMA))
4723 reset_mask |= RADEON_RESET_GFX;
4724
4725 if (tmp & (CP_BUSY | CP_COHERENCY_BUSY))
4726 reset_mask |= RADEON_RESET_CP;
4727
4728 /* GRBM_STATUS2 */
4729 tmp = RREG32(GRBM_STATUS2);
4730 if (tmp & RLC_BUSY)
4731 reset_mask |= RADEON_RESET_RLC;
4732
4733 /* SDMA0_STATUS_REG */
4734 tmp = RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
4735 if (!(tmp & SDMA_IDLE))
4736 reset_mask |= RADEON_RESET_DMA;
4737
4738 /* SDMA1_STATUS_REG */
4739 tmp = RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
4740 if (!(tmp & SDMA_IDLE))
4741 reset_mask |= RADEON_RESET_DMA1;
4742
4743 /* SRBM_STATUS2 */
4744 tmp = RREG32(SRBM_STATUS2);
4745 if (tmp & SDMA_BUSY)
4746 reset_mask |= RADEON_RESET_DMA;
4747
4748 if (tmp & SDMA1_BUSY)
4749 reset_mask |= RADEON_RESET_DMA1;
4750
4751 /* SRBM_STATUS */
4752 tmp = RREG32(SRBM_STATUS);
4753
4754 if (tmp & IH_BUSY)
4755 reset_mask |= RADEON_RESET_IH;
4756
4757 if (tmp & SEM_BUSY)
4758 reset_mask |= RADEON_RESET_SEM;
4759
4760 if (tmp & GRBM_RQ_PENDING)
4761 reset_mask |= RADEON_RESET_GRBM;
4762
4763 if (tmp & VMC_BUSY)
4764 reset_mask |= RADEON_RESET_VMC;
4765
4766 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
4767 MCC_BUSY | MCD_BUSY))
4768 reset_mask |= RADEON_RESET_MC;
4769
4770 if (evergreen_is_display_hung(rdev))
4771 reset_mask |= RADEON_RESET_DISPLAY;
4772
4773 /* Skip MC reset as it's mostly likely not hung, just busy */
4774 if (reset_mask & RADEON_RESET_MC) {
4775 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
4776 reset_mask &= ~RADEON_RESET_MC;
4777 }
4778
4779 return reset_mask;
4780 }
4781
4782 /**
4783 * cik_gpu_soft_reset - soft reset GPU
4784 *
4785 * @rdev: radeon_device pointer
4786 * @reset_mask: mask of which blocks to reset
4787 *
4788 * Soft reset the blocks specified in @reset_mask.
4789 */
4790 static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
4791 {
4792 struct evergreen_mc_save save;
4793 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4794 u32 tmp;
4795
4796 if (reset_mask == 0)
4797 return;
4798
4799 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
4800
4801 cik_print_gpu_status_regs(rdev);
4802 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
4803 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
4804 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
4805 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
4806
4807 /* disable CG/PG */
4808 cik_fini_pg(rdev);
4809 cik_fini_cg(rdev);
4810
4811 /* stop the rlc */
4812 cik_rlc_stop(rdev);
4813
4814 /* Disable GFX parsing/prefetching */
4815 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
4816
4817 /* Disable MEC parsing/prefetching */
4818 WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
4819
4820 if (reset_mask & RADEON_RESET_DMA) {
4821 /* sdma0 */
4822 tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
4823 tmp |= SDMA_HALT;
4824 WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
4825 }
4826 if (reset_mask & RADEON_RESET_DMA1) {
4827 /* sdma1 */
4828 tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
4829 tmp |= SDMA_HALT;
4830 WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
4831 }
4832
4833 evergreen_mc_stop(rdev, &save);
4834 if (evergreen_mc_wait_for_idle(rdev)) {
4835 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4836 }
4837
4838 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))
4839 grbm_soft_reset = SOFT_RESET_CP | SOFT_RESET_GFX;
4840
4841 if (reset_mask & RADEON_RESET_CP) {
4842 grbm_soft_reset |= SOFT_RESET_CP;
4843
4844 srbm_soft_reset |= SOFT_RESET_GRBM;
4845 }
4846
4847 if (reset_mask & RADEON_RESET_DMA)
4848 srbm_soft_reset |= SOFT_RESET_SDMA;
4849
4850 if (reset_mask & RADEON_RESET_DMA1)
4851 srbm_soft_reset |= SOFT_RESET_SDMA1;
4852
4853 if (reset_mask & RADEON_RESET_DISPLAY)
4854 srbm_soft_reset |= SOFT_RESET_DC;
4855
4856 if (reset_mask & RADEON_RESET_RLC)
4857 grbm_soft_reset |= SOFT_RESET_RLC;
4858
4859 if (reset_mask & RADEON_RESET_SEM)
4860 srbm_soft_reset |= SOFT_RESET_SEM;
4861
4862 if (reset_mask & RADEON_RESET_IH)
4863 srbm_soft_reset |= SOFT_RESET_IH;
4864
4865 if (reset_mask & RADEON_RESET_GRBM)
4866 srbm_soft_reset |= SOFT_RESET_GRBM;
4867
4868 if (reset_mask & RADEON_RESET_VMC)
4869 srbm_soft_reset |= SOFT_RESET_VMC;
4870
4871 if (!(rdev->flags & RADEON_IS_IGP)) {
4872 if (reset_mask & RADEON_RESET_MC)
4873 srbm_soft_reset |= SOFT_RESET_MC;
4874 }
4875
4876 if (grbm_soft_reset) {
4877 tmp = RREG32(GRBM_SOFT_RESET);
4878 tmp |= grbm_soft_reset;
4879 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4880 WREG32(GRBM_SOFT_RESET, tmp);
4881 tmp = RREG32(GRBM_SOFT_RESET);
4882
4883 udelay(50);
4884
4885 tmp &= ~grbm_soft_reset;
4886 WREG32(GRBM_SOFT_RESET, tmp);
4887 tmp = RREG32(GRBM_SOFT_RESET);
4888 }
4889
4890 if (srbm_soft_reset) {
4891 tmp = RREG32(SRBM_SOFT_RESET);
4892 tmp |= srbm_soft_reset;
4893 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
4894 WREG32(SRBM_SOFT_RESET, tmp);
4895 tmp = RREG32(SRBM_SOFT_RESET);
4896
4897 udelay(50);
4898
4899 tmp &= ~srbm_soft_reset;
4900 WREG32(SRBM_SOFT_RESET, tmp);
4901 tmp = RREG32(SRBM_SOFT_RESET);
4902 }
4903
4904 /* Wait a little for things to settle down */
4905 udelay(50);
4906
4907 evergreen_mc_resume(rdev, &save);
4908 udelay(50);
4909
4910 cik_print_gpu_status_regs(rdev);
4911 }
4912
4913 struct kv_reset_save_regs {
4914 u32 gmcon_reng_execute;
4915 u32 gmcon_misc;
4916 u32 gmcon_misc3;
4917 };
4918
4919 static void kv_save_regs_for_reset(struct radeon_device *rdev,
4920 struct kv_reset_save_regs *save)
4921 {
4922 save->gmcon_reng_execute = RREG32(GMCON_RENG_EXECUTE);
4923 save->gmcon_misc = RREG32(GMCON_MISC);
4924 save->gmcon_misc3 = RREG32(GMCON_MISC3);
4925
4926 WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute & ~RENG_EXECUTE_ON_PWR_UP);
4927 WREG32(GMCON_MISC, save->gmcon_misc & ~(RENG_EXECUTE_ON_REG_UPDATE |
4928 STCTRL_STUTTER_EN));
4929 }
4930
4931 static void kv_restore_regs_for_reset(struct radeon_device *rdev,
4932 struct kv_reset_save_regs *save)
4933 {
4934 int i;
4935
4936 WREG32(GMCON_PGFSM_WRITE, 0);
4937 WREG32(GMCON_PGFSM_CONFIG, 0x200010ff);
4938
4939 for (i = 0; i < 5; i++)
4940 WREG32(GMCON_PGFSM_WRITE, 0);
4941
4942 WREG32(GMCON_PGFSM_WRITE, 0);
4943 WREG32(GMCON_PGFSM_CONFIG, 0x300010ff);
4944
4945 for (i = 0; i < 5; i++)
4946 WREG32(GMCON_PGFSM_WRITE, 0);
4947
4948 WREG32(GMCON_PGFSM_WRITE, 0x210000);
4949 WREG32(GMCON_PGFSM_CONFIG, 0xa00010ff);
4950
4951 for (i = 0; i < 5; i++)
4952 WREG32(GMCON_PGFSM_WRITE, 0);
4953
4954 WREG32(GMCON_PGFSM_WRITE, 0x21003);
4955 WREG32(GMCON_PGFSM_CONFIG, 0xb00010ff);
4956
4957 for (i = 0; i < 5; i++)
4958 WREG32(GMCON_PGFSM_WRITE, 0);
4959
4960 WREG32(GMCON_PGFSM_WRITE, 0x2b00);
4961 WREG32(GMCON_PGFSM_CONFIG, 0xc00010ff);
4962
4963 for (i = 0; i < 5; i++)
4964 WREG32(GMCON_PGFSM_WRITE, 0);
4965
4966 WREG32(GMCON_PGFSM_WRITE, 0);
4967 WREG32(GMCON_PGFSM_CONFIG, 0xd00010ff);
4968
4969 for (i = 0; i < 5; i++)
4970 WREG32(GMCON_PGFSM_WRITE, 0);
4971
4972 WREG32(GMCON_PGFSM_WRITE, 0x420000);
4973 WREG32(GMCON_PGFSM_CONFIG, 0x100010ff);
4974
4975 for (i = 0; i < 5; i++)
4976 WREG32(GMCON_PGFSM_WRITE, 0);
4977
4978 WREG32(GMCON_PGFSM_WRITE, 0x120202);
4979 WREG32(GMCON_PGFSM_CONFIG, 0x500010ff);
4980
4981 for (i = 0; i < 5; i++)
4982 WREG32(GMCON_PGFSM_WRITE, 0);
4983
4984 WREG32(GMCON_PGFSM_WRITE, 0x3e3e36);
4985 WREG32(GMCON_PGFSM_CONFIG, 0x600010ff);
4986
4987 for (i = 0; i < 5; i++)
4988 WREG32(GMCON_PGFSM_WRITE, 0);
4989
4990 WREG32(GMCON_PGFSM_WRITE, 0x373f3e);
4991 WREG32(GMCON_PGFSM_CONFIG, 0x700010ff);
4992
4993 for (i = 0; i < 5; i++)
4994 WREG32(GMCON_PGFSM_WRITE, 0);
4995
4996 WREG32(GMCON_PGFSM_WRITE, 0x3e1332);
4997 WREG32(GMCON_PGFSM_CONFIG, 0xe00010ff);
4998
4999 WREG32(GMCON_MISC3, save->gmcon_misc3);
5000 WREG32(GMCON_MISC, save->gmcon_misc);
5001 WREG32(GMCON_RENG_EXECUTE, save->gmcon_reng_execute);
5002 }
5003
5004 static void cik_gpu_pci_config_reset(struct radeon_device *rdev)
5005 {
5006 struct evergreen_mc_save save;
5007 struct kv_reset_save_regs kv_save = { 0 };
5008 u32 tmp, i;
5009
5010 dev_info(rdev->dev, "GPU pci config reset\n");
5011
5012 /* disable dpm? */
5013
5014 /* disable cg/pg */
5015 cik_fini_pg(rdev);
5016 cik_fini_cg(rdev);
5017
5018 /* Disable GFX parsing/prefetching */
5019 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
5020
5021 /* Disable MEC parsing/prefetching */
5022 WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
5023
5024 /* sdma0 */
5025 tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
5026 tmp |= SDMA_HALT;
5027 WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
5028 /* sdma1 */
5029 tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
5030 tmp |= SDMA_HALT;
5031 WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
5032 /* XXX other engines? */
5033
5034 /* halt the rlc, disable cp internal ints */
5035 cik_rlc_stop(rdev);
5036
5037 udelay(50);
5038
5039 /* disable mem access */
5040 evergreen_mc_stop(rdev, &save);
5041 if (evergreen_mc_wait_for_idle(rdev)) {
5042 dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
5043 }
5044
5045 if (rdev->flags & RADEON_IS_IGP)
5046 kv_save_regs_for_reset(rdev, &kv_save);
5047
5048 /* disable BM */
5049 pci_clear_master(rdev->pdev);
5050 /* reset */
5051 radeon_pci_config_reset(rdev);
5052
5053 udelay(100);
5054
5055 /* wait for asic to come out of reset */
5056 for (i = 0; i < rdev->usec_timeout; i++) {
5057 if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
5058 break;
5059 udelay(1);
5060 }
5061
5062 /* does asic init need to be run first??? */
5063 if (rdev->flags & RADEON_IS_IGP)
5064 kv_restore_regs_for_reset(rdev, &kv_save);
5065 }
5066
5067 /**
5068 * cik_asic_reset - soft reset GPU
5069 *
5070 * @rdev: radeon_device pointer
5071 *
5072 * Look up which blocks are hung and attempt
5073 * to reset them.
5074 * Returns 0 for success.
5075 */
5076 int cik_asic_reset(struct radeon_device *rdev)
5077 {
5078 u32 reset_mask;
5079
5080 reset_mask = cik_gpu_check_soft_reset(rdev);
5081
5082 if (reset_mask)
5083 r600_set_bios_scratch_engine_hung(rdev, true);
5084
5085 /* try soft reset */
5086 cik_gpu_soft_reset(rdev, reset_mask);
5087
5088 reset_mask = cik_gpu_check_soft_reset(rdev);
5089
5090 /* try pci config reset */
5091 if (reset_mask && radeon_hard_reset)
5092 cik_gpu_pci_config_reset(rdev);
5093
5094 reset_mask = cik_gpu_check_soft_reset(rdev);
5095
5096 if (!reset_mask)
5097 r600_set_bios_scratch_engine_hung(rdev, false);
5098
5099 return 0;
5100 }
5101
5102 /**
5103 * cik_gfx_is_lockup - check if the 3D engine is locked up
5104 *
5105 * @rdev: radeon_device pointer
5106 * @ring: radeon_ring structure holding ring information
5107 *
5108 * Check if the 3D engine is locked up (CIK).
5109 * Returns true if the engine is locked, false if not.
5110 */
5111 bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
5112 {
5113 u32 reset_mask = cik_gpu_check_soft_reset(rdev);
5114
5115 if (!(reset_mask & (RADEON_RESET_GFX |
5116 RADEON_RESET_COMPUTE |
5117 RADEON_RESET_CP))) {
5118 radeon_ring_lockup_update(rdev, ring);
5119 return false;
5120 }
5121 return radeon_ring_test_lockup(rdev, ring);
5122 }
5123
5124 /* MC */
5125 /**
5126 * cik_mc_program - program the GPU memory controller
5127 *
5128 * @rdev: radeon_device pointer
5129 *
5130 * Set the location of vram, gart, and AGP in the GPU's
5131 * physical address space (CIK).
5132 */
5133 static void cik_mc_program(struct radeon_device *rdev)
5134 {
5135 struct evergreen_mc_save save;
5136 u32 tmp;
5137 int i, j;
5138
5139 /* Initialize HDP */
5140 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
5141 WREG32((0x2c14 + j), 0x00000000);
5142 WREG32((0x2c18 + j), 0x00000000);
5143 WREG32((0x2c1c + j), 0x00000000);
5144 WREG32((0x2c20 + j), 0x00000000);
5145 WREG32((0x2c24 + j), 0x00000000);
5146 }
5147 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
5148
5149 evergreen_mc_stop(rdev, &save);
5150 if (radeon_mc_wait_for_idle(rdev)) {
5151 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
5152 }
5153 /* Lockout access through VGA aperture*/
5154 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
5155 /* Update configuration */
5156 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
5157 rdev->mc.vram_start >> 12);
5158 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
5159 rdev->mc.vram_end >> 12);
5160 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
5161 rdev->vram_scratch.gpu_addr >> 12);
5162 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
5163 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
5164 WREG32(MC_VM_FB_LOCATION, tmp);
5165 /* XXX double check these! */
5166 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
5167 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
5168 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
5169 WREG32(MC_VM_AGP_BASE, 0);
5170 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
5171 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
5172 if (radeon_mc_wait_for_idle(rdev)) {
5173 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
5174 }
5175 evergreen_mc_resume(rdev, &save);
5176 /* we need to own VRAM, so turn off the VGA renderer here
5177 * to stop it overwriting our objects */
5178 rv515_vga_render_disable(rdev);
5179 }
5180
5181 /**
5182 * cik_mc_init - initialize the memory controller driver params
5183 *
5184 * @rdev: radeon_device pointer
5185 *
5186 * Look up the amount of vram, vram width, and decide how to place
5187 * vram and gart within the GPU's physical address space (CIK).
5188 * Returns 0 for success.
5189 */
5190 static int cik_mc_init(struct radeon_device *rdev)
5191 {
5192 u32 tmp;
5193 int chansize, numchan;
5194
5195 /* Get VRAM informations */
5196 rdev->mc.vram_is_ddr = true;
5197 tmp = RREG32(MC_ARB_RAMCFG);
5198 if (tmp & CHANSIZE_MASK) {
5199 chansize = 64;
5200 } else {
5201 chansize = 32;
5202 }
5203 tmp = RREG32(MC_SHARED_CHMAP);
5204 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
5205 case 0:
5206 default:
5207 numchan = 1;
5208 break;
5209 case 1:
5210 numchan = 2;
5211 break;
5212 case 2:
5213 numchan = 4;
5214 break;
5215 case 3:
5216 numchan = 8;
5217 break;
5218 case 4:
5219 numchan = 3;
5220 break;
5221 case 5:
5222 numchan = 6;
5223 break;
5224 case 6:
5225 numchan = 10;
5226 break;
5227 case 7:
5228 numchan = 12;
5229 break;
5230 case 8:
5231 numchan = 16;
5232 break;
5233 }
5234 rdev->mc.vram_width = numchan * chansize;
5235 /* Could aper size report 0 ? */
5236 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
5237 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
5238 /* size in MB on si */
5239 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
5240 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
5241 rdev->mc.visible_vram_size = rdev->mc.aper_size;
5242 si_vram_gtt_location(rdev, &rdev->mc);
5243 radeon_update_bandwidth_info(rdev);
5244
5245 return 0;
5246 }
5247
5248 /*
5249 * GART
5250 * VMID 0 is the physical GPU addresses as used by the kernel.
5251 * VMIDs 1-15 are used for userspace clients and are handled
5252 * by the radeon vm/hsa code.
5253 */
5254 /**
5255 * cik_pcie_gart_tlb_flush - gart tlb flush callback
5256 *
5257 * @rdev: radeon_device pointer
5258 *
5259 * Flush the TLB for the VMID 0 page table (CIK).
5260 */
5261 void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
5262 {
5263 /* flush hdp cache */
5264 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
5265
5266 /* bits 0-15 are the VM contexts0-15 */
5267 WREG32(VM_INVALIDATE_REQUEST, 0x1);
5268 }
5269
5270 /**
5271 * cik_pcie_gart_enable - gart enable
5272 *
5273 * @rdev: radeon_device pointer
5274 *
5275 * This sets up the TLBs, programs the page tables for VMID0,
5276 * sets up the hw for VMIDs 1-15 which are allocated on
5277 * demand, and sets up the global locations for the LDS, GDS,
5278 * and GPUVM for FSA64 clients (CIK).
5279 * Returns 0 for success, errors for failure.
5280 */
5281 static int cik_pcie_gart_enable(struct radeon_device *rdev)
5282 {
5283 int r, i;
5284
5285 if (rdev->gart.robj == NULL) {
5286 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
5287 return -EINVAL;
5288 }
5289 r = radeon_gart_table_vram_pin(rdev);
5290 if (r)
5291 return r;
5292 radeon_gart_restore(rdev);
5293 /* Setup TLB control */
5294 WREG32(MC_VM_MX_L1_TLB_CNTL,
5295 (0xA << 7) |
5296 ENABLE_L1_TLB |
5297 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
5298 ENABLE_ADVANCED_DRIVER_MODEL |
5299 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
5300 /* Setup L2 cache */
5301 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
5302 ENABLE_L2_FRAGMENT_PROCESSING |
5303 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
5304 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
5305 EFFECTIVE_L2_QUEUE_SIZE(7) |
5306 CONTEXT1_IDENTITY_ACCESS_MODE(1));
5307 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
5308 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
5309 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
5310 /* setup context0 */
5311 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
5312 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
5313 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
5314 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
5315 (u32)(rdev->dummy_page.addr >> 12));
5316 WREG32(VM_CONTEXT0_CNTL2, 0);
5317 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
5318 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
5319
5320 WREG32(0x15D4, 0);
5321 WREG32(0x15D8, 0);
5322 WREG32(0x15DC, 0);
5323
5324 /* empty context1-15 */
5325 /* FIXME start with 4G, once using 2 level pt switch to full
5326 * vm size space
5327 */
5328 /* set vm size, must be a multiple of 4 */
5329 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
5330 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
5331 for (i = 1; i < 16; i++) {
5332 if (i < 8)
5333 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
5334 rdev->gart.table_addr >> 12);
5335 else
5336 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
5337 rdev->gart.table_addr >> 12);
5338 }
5339
5340 /* enable context1-15 */
5341 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
5342 (u32)(rdev->dummy_page.addr >> 12));
5343 WREG32(VM_CONTEXT1_CNTL2, 4);
5344 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
5345 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
5346 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
5347 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
5348 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
5349 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
5350 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
5351 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
5352 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
5353 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
5354 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
5355 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
5356 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
5357
5358 if (rdev->family == CHIP_KAVERI) {
5359 u32 tmp = RREG32(CHUB_CONTROL);
5360 tmp &= ~BYPASS_VM;
5361 WREG32(CHUB_CONTROL, tmp);
5362 }
5363
5364 /* XXX SH_MEM regs */
5365 /* where to put LDS, scratch, GPUVM in FSA64 space */
5366 mutex_lock(&rdev->srbm_mutex);
5367 for (i = 0; i < 16; i++) {
5368 cik_srbm_select(rdev, 0, 0, 0, i);
5369 /* CP and shaders */
5370 WREG32(SH_MEM_CONFIG, 0);
5371 WREG32(SH_MEM_APE1_BASE, 1);
5372 WREG32(SH_MEM_APE1_LIMIT, 0);
5373 WREG32(SH_MEM_BASES, 0);
5374 /* SDMA GFX */
5375 WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA0_REGISTER_OFFSET, 0);
5376 WREG32(SDMA0_GFX_APE1_CNTL + SDMA0_REGISTER_OFFSET, 0);
5377 WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA1_REGISTER_OFFSET, 0);
5378 WREG32(SDMA0_GFX_APE1_CNTL + SDMA1_REGISTER_OFFSET, 0);
5379 /* XXX SDMA RLC - todo */
5380 }
5381 cik_srbm_select(rdev, 0, 0, 0, 0);
5382 mutex_unlock(&rdev->srbm_mutex);
5383
5384 cik_pcie_gart_tlb_flush(rdev);
5385 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
5386 (unsigned)(rdev->mc.gtt_size >> 20),
5387 (unsigned long long)rdev->gart.table_addr);
5388 rdev->gart.ready = true;
5389 return 0;
5390 }
5391
5392 /**
5393 * cik_pcie_gart_disable - gart disable
5394 *
5395 * @rdev: radeon_device pointer
5396 *
5397 * This disables all VM page table (CIK).
5398 */
5399 static void cik_pcie_gart_disable(struct radeon_device *rdev)
5400 {
5401 /* Disable all tables */
5402 WREG32(VM_CONTEXT0_CNTL, 0);
5403 WREG32(VM_CONTEXT1_CNTL, 0);
5404 /* Setup TLB control */
5405 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
5406 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
5407 /* Setup L2 cache */
5408 WREG32(VM_L2_CNTL,
5409 ENABLE_L2_FRAGMENT_PROCESSING |
5410 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
5411 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
5412 EFFECTIVE_L2_QUEUE_SIZE(7) |
5413 CONTEXT1_IDENTITY_ACCESS_MODE(1));
5414 WREG32(VM_L2_CNTL2, 0);
5415 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
5416 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
5417 radeon_gart_table_vram_unpin(rdev);
5418 }
5419
5420 /**
5421 * cik_pcie_gart_fini - vm fini callback
5422 *
5423 * @rdev: radeon_device pointer
5424 *
5425 * Tears down the driver GART/VM setup (CIK).
5426 */
5427 static void cik_pcie_gart_fini(struct radeon_device *rdev)
5428 {
5429 cik_pcie_gart_disable(rdev);
5430 radeon_gart_table_vram_free(rdev);
5431 radeon_gart_fini(rdev);
5432 }
5433
5434 /* vm parser */
5435 /**
5436 * cik_ib_parse - vm ib_parse callback
5437 *
5438 * @rdev: radeon_device pointer
5439 * @ib: indirect buffer pointer
5440 *
5441 * CIK uses hw IB checking so this is a nop (CIK).
5442 */
5443 int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
5444 {
5445 return 0;
5446 }
5447
5448 /*
5449 * vm
5450 * VMID 0 is the physical GPU addresses as used by the kernel.
5451 * VMIDs 1-15 are used for userspace clients and are handled
5452 * by the radeon vm/hsa code.
5453 */
5454 /**
5455 * cik_vm_init - cik vm init callback
5456 *
5457 * @rdev: radeon_device pointer
5458 *
5459 * Inits cik specific vm parameters (number of VMs, base of vram for
5460 * VMIDs 1-15) (CIK).
5461 * Returns 0 for success.
5462 */
5463 int cik_vm_init(struct radeon_device *rdev)
5464 {
5465 /* number of VMs */
5466 rdev->vm_manager.nvm = 16;
5467 /* base offset of vram pages */
5468 if (rdev->flags & RADEON_IS_IGP) {
5469 u64 tmp = RREG32(MC_VM_FB_OFFSET);
5470 tmp <<= 22;
5471 rdev->vm_manager.vram_base_offset = tmp;
5472 } else
5473 rdev->vm_manager.vram_base_offset = 0;
5474
5475 return 0;
5476 }
5477
5478 /**
5479 * cik_vm_fini - cik vm fini callback
5480 *
5481 * @rdev: radeon_device pointer
5482 *
5483 * Tear down any asic specific VM setup (CIK).
5484 */
5485 void cik_vm_fini(struct radeon_device *rdev)
5486 {
5487 }
5488
5489 /**
5490 * cik_vm_decode_fault - print human readable fault info
5491 *
5492 * @rdev: radeon_device pointer
5493 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
5494 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
5495 *
5496 * Print human readable fault information (CIK).
5497 */
5498 static void cik_vm_decode_fault(struct radeon_device *rdev,
5499 u32 status, u32 addr, u32 mc_client)
5500 {
5501 u32 mc_id;
5502 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
5503 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
5504 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
5505 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
5506
5507 if (rdev->family == CHIP_HAWAII)
5508 mc_id = (status & HAWAII_MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
5509 else
5510 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
5511
5512 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
5513 protections, vmid, addr,
5514 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
5515 block, mc_client, mc_id);
5516 }
5517
5518 /**
5519 * cik_vm_flush - cik vm flush using the CP
5520 *
5521 * @rdev: radeon_device pointer
5522 *
5523 * Update the page table base and flush the VM TLB
5524 * using the CP (CIK).
5525 */
5526 void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
5527 {
5528 struct radeon_ring *ring = &rdev->ring[ridx];
5529
5530 if (vm == NULL)
5531 return;
5532
5533 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5534 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5535 WRITE_DATA_DST_SEL(0)));
5536 if (vm->id < 8) {
5537 radeon_ring_write(ring,
5538 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
5539 } else {
5540 radeon_ring_write(ring,
5541 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
5542 }
5543 radeon_ring_write(ring, 0);
5544 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
5545
5546 /* update SH_MEM_* regs */
5547 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5548 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5549 WRITE_DATA_DST_SEL(0)));
5550 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
5551 radeon_ring_write(ring, 0);
5552 radeon_ring_write(ring, VMID(vm->id));
5553
5554 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
5555 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5556 WRITE_DATA_DST_SEL(0)));
5557 radeon_ring_write(ring, SH_MEM_BASES >> 2);
5558 radeon_ring_write(ring, 0);
5559
5560 radeon_ring_write(ring, 0); /* SH_MEM_BASES */
5561 radeon_ring_write(ring, 0); /* SH_MEM_CONFIG */
5562 radeon_ring_write(ring, 1); /* SH_MEM_APE1_BASE */
5563 radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
5564
5565 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5566 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5567 WRITE_DATA_DST_SEL(0)));
5568 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
5569 radeon_ring_write(ring, 0);
5570 radeon_ring_write(ring, VMID(0));
5571
5572 /* HDP flush */
5573 cik_hdp_flush_cp_ring_emit(rdev, ridx);
5574
5575 /* bits 0-15 are the VM contexts0-15 */
5576 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5577 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5578 WRITE_DATA_DST_SEL(0)));
5579 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
5580 radeon_ring_write(ring, 0);
5581 radeon_ring_write(ring, 1 << vm->id);
5582
5583 /* compute doesn't have PFP */
5584 if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
5585 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5586 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5587 radeon_ring_write(ring, 0x0);
5588 }
5589 }
5590
5591 /*
5592 * RLC
5593 * The RLC is a multi-purpose microengine that handles a
5594 * variety of functions, the most important of which is
5595 * the interrupt controller.
5596 */
5597 static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
5598 bool enable)
5599 {
5600 u32 tmp = RREG32(CP_INT_CNTL_RING0);
5601
5602 if (enable)
5603 tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5604 else
5605 tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5606 WREG32(CP_INT_CNTL_RING0, tmp);
5607 }
5608
5609 static void cik_enable_lbpw(struct radeon_device *rdev, bool enable)
5610 {
5611 u32 tmp;
5612
5613 tmp = RREG32(RLC_LB_CNTL);
5614 if (enable)
5615 tmp |= LOAD_BALANCE_ENABLE;
5616 else
5617 tmp &= ~LOAD_BALANCE_ENABLE;
5618 WREG32(RLC_LB_CNTL, tmp);
5619 }
5620
5621 static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
5622 {
5623 u32 i, j, k;
5624 u32 mask;
5625
5626 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
5627 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
5628 cik_select_se_sh(rdev, i, j);
5629 for (k = 0; k < rdev->usec_timeout; k++) {
5630 if (RREG32(RLC_SERDES_CU_MASTER_BUSY) == 0)
5631 break;
5632 udelay(1);
5633 }
5634 }
5635 }
5636 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5637
5638 mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
5639 for (k = 0; k < rdev->usec_timeout; k++) {
5640 if ((RREG32(RLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
5641 break;
5642 udelay(1);
5643 }
5644 }
5645
5646 static void cik_update_rlc(struct radeon_device *rdev, u32 rlc)
5647 {
5648 u32 tmp;
5649
5650 tmp = RREG32(RLC_CNTL);
5651 if (tmp != rlc)
5652 WREG32(RLC_CNTL, rlc);
5653 }
5654
5655 static u32 cik_halt_rlc(struct radeon_device *rdev)
5656 {
5657 u32 data, orig;
5658
5659 orig = data = RREG32(RLC_CNTL);
5660
5661 if (data & RLC_ENABLE) {
5662 u32 i;
5663
5664 data &= ~RLC_ENABLE;
5665 WREG32(RLC_CNTL, data);
5666
5667 for (i = 0; i < rdev->usec_timeout; i++) {
5668 if ((RREG32(RLC_GPM_STAT) & RLC_GPM_BUSY) == 0)
5669 break;
5670 udelay(1);
5671 }
5672
5673 cik_wait_for_rlc_serdes(rdev);
5674 }
5675
5676 return orig;
5677 }
5678
5679 void cik_enter_rlc_safe_mode(struct radeon_device *rdev)
5680 {
5681 u32 tmp, i, mask;
5682
5683 tmp = REQ | MESSAGE(MSG_ENTER_RLC_SAFE_MODE);
5684 WREG32(RLC_GPR_REG2, tmp);
5685
5686 mask = GFX_POWER_STATUS | GFX_CLOCK_STATUS;
5687 for (i = 0; i < rdev->usec_timeout; i++) {
5688 if ((RREG32(RLC_GPM_STAT) & mask) == mask)
5689 break;
5690 udelay(1);
5691 }
5692
5693 for (i = 0; i < rdev->usec_timeout; i++) {
5694 if ((RREG32(RLC_GPR_REG2) & REQ) == 0)
5695 break;
5696 udelay(1);
5697 }
5698 }
5699
5700 void cik_exit_rlc_safe_mode(struct radeon_device *rdev)
5701 {
5702 u32 tmp;
5703
5704 tmp = REQ | MESSAGE(MSG_EXIT_RLC_SAFE_MODE);
5705 WREG32(RLC_GPR_REG2, tmp);
5706 }
5707
5708 /**
5709 * cik_rlc_stop - stop the RLC ME
5710 *
5711 * @rdev: radeon_device pointer
5712 *
5713 * Halt the RLC ME (MicroEngine) (CIK).
5714 */
5715 static void cik_rlc_stop(struct radeon_device *rdev)
5716 {
5717 WREG32(RLC_CNTL, 0);
5718
5719 cik_enable_gui_idle_interrupt(rdev, false);
5720
5721 cik_wait_for_rlc_serdes(rdev);
5722 }
5723
5724 /**
5725 * cik_rlc_start - start the RLC ME
5726 *
5727 * @rdev: radeon_device pointer
5728 *
5729 * Unhalt the RLC ME (MicroEngine) (CIK).
5730 */
5731 static void cik_rlc_start(struct radeon_device *rdev)
5732 {
5733 WREG32(RLC_CNTL, RLC_ENABLE);
5734
5735 cik_enable_gui_idle_interrupt(rdev, true);
5736
5737 udelay(50);
5738 }
5739
5740 /**
5741 * cik_rlc_resume - setup the RLC hw
5742 *
5743 * @rdev: radeon_device pointer
5744 *
5745 * Initialize the RLC registers, load the ucode,
5746 * and start the RLC (CIK).
5747 * Returns 0 for success, -EINVAL if the ucode is not available.
5748 */
5749 static int cik_rlc_resume(struct radeon_device *rdev)
5750 {
5751 u32 i, size, tmp;
5752 const __be32 *fw_data;
5753
5754 if (!rdev->rlc_fw)
5755 return -EINVAL;
5756
5757 switch (rdev->family) {
5758 case CHIP_BONAIRE:
5759 case CHIP_HAWAII:
5760 default:
5761 size = BONAIRE_RLC_UCODE_SIZE;
5762 break;
5763 case CHIP_KAVERI:
5764 size = KV_RLC_UCODE_SIZE;
5765 break;
5766 case CHIP_KABINI:
5767 size = KB_RLC_UCODE_SIZE;
5768 break;
5769 }
5770
5771 cik_rlc_stop(rdev);
5772
5773 /* disable CG */
5774 tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
5775 WREG32(RLC_CGCG_CGLS_CTRL, tmp);
5776
5777 si_rlc_reset(rdev);
5778
5779 cik_init_pg(rdev);
5780
5781 cik_init_cg(rdev);
5782
5783 WREG32(RLC_LB_CNTR_INIT, 0);
5784 WREG32(RLC_LB_CNTR_MAX, 0x00008000);
5785
5786 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5787 WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
5788 WREG32(RLC_LB_PARAMS, 0x00600408);
5789 WREG32(RLC_LB_CNTL, 0x80000004);
5790
5791 WREG32(RLC_MC_CNTL, 0);
5792 WREG32(RLC_UCODE_CNTL, 0);
5793
5794 fw_data = (const __be32 *)rdev->rlc_fw->data;
5795 WREG32(RLC_GPM_UCODE_ADDR, 0);
5796 for (i = 0; i < size; i++)
5797 WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
5798 WREG32(RLC_GPM_UCODE_ADDR, 0);
5799
5800 /* XXX - find out what chips support lbpw */
5801 cik_enable_lbpw(rdev, false);
5802
5803 if (rdev->family == CHIP_BONAIRE)
5804 WREG32(RLC_DRIVER_DMA_STATUS, 0);
5805
5806 cik_rlc_start(rdev);
5807
5808 return 0;
5809 }
5810
5811 static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
5812 {
5813 u32 data, orig, tmp, tmp2;
5814
5815 orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
5816
5817 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
5818 cik_enable_gui_idle_interrupt(rdev, true);
5819
5820 tmp = cik_halt_rlc(rdev);
5821
5822 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5823 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5824 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5825 tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
5826 WREG32(RLC_SERDES_WR_CTRL, tmp2);
5827
5828 cik_update_rlc(rdev, tmp);
5829
5830 data |= CGCG_EN | CGLS_EN;
5831 } else {
5832 cik_enable_gui_idle_interrupt(rdev, false);
5833
5834 RREG32(CB_CGTT_SCLK_CTRL);
5835 RREG32(CB_CGTT_SCLK_CTRL);
5836 RREG32(CB_CGTT_SCLK_CTRL);
5837 RREG32(CB_CGTT_SCLK_CTRL);
5838
5839 data &= ~(CGCG_EN | CGLS_EN);
5840 }
5841
5842 if (orig != data)
5843 WREG32(RLC_CGCG_CGLS_CTRL, data);
5844
5845 }
5846
5847 static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
5848 {
5849 u32 data, orig, tmp = 0;
5850
5851 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
5852 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) {
5853 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
5854 orig = data = RREG32(CP_MEM_SLP_CNTL);
5855 data |= CP_MEM_LS_EN;
5856 if (orig != data)
5857 WREG32(CP_MEM_SLP_CNTL, data);
5858 }
5859 }
5860
5861 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5862 data &= 0xfffffffd;
5863 if (orig != data)
5864 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5865
5866 tmp = cik_halt_rlc(rdev);
5867
5868 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5869 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5870 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5871 data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
5872 WREG32(RLC_SERDES_WR_CTRL, data);
5873
5874 cik_update_rlc(rdev, tmp);
5875
5876 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS) {
5877 orig = data = RREG32(CGTS_SM_CTRL_REG);
5878 data &= ~SM_MODE_MASK;
5879 data |= SM_MODE(0x2);
5880 data |= SM_MODE_ENABLE;
5881 data &= ~CGTS_OVERRIDE;
5882 if ((rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) &&
5883 (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS_LS))
5884 data &= ~CGTS_LS_OVERRIDE;
5885 data &= ~ON_MONITOR_ADD_MASK;
5886 data |= ON_MONITOR_ADD_EN;
5887 data |= ON_MONITOR_ADD(0x96);
5888 if (orig != data)
5889 WREG32(CGTS_SM_CTRL_REG, data);
5890 }
5891 } else {
5892 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5893 data |= 0x00000002;
5894 if (orig != data)
5895 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5896
5897 data = RREG32(RLC_MEM_SLP_CNTL);
5898 if (data & RLC_MEM_LS_EN) {
5899 data &= ~RLC_MEM_LS_EN;
5900 WREG32(RLC_MEM_SLP_CNTL, data);
5901 }
5902
5903 data = RREG32(CP_MEM_SLP_CNTL);
5904 if (data & CP_MEM_LS_EN) {
5905 data &= ~CP_MEM_LS_EN;
5906 WREG32(CP_MEM_SLP_CNTL, data);
5907 }
5908
5909 orig = data = RREG32(CGTS_SM_CTRL_REG);
5910 data |= CGTS_OVERRIDE | CGTS_LS_OVERRIDE;
5911 if (orig != data)
5912 WREG32(CGTS_SM_CTRL_REG, data);
5913
5914 tmp = cik_halt_rlc(rdev);
5915
5916 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5917 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5918 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5919 data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
5920 WREG32(RLC_SERDES_WR_CTRL, data);
5921
5922 cik_update_rlc(rdev, tmp);
5923 }
5924 }
5925
5926 static const u32 mc_cg_registers[] =
5927 {
5928 MC_HUB_MISC_HUB_CG,
5929 MC_HUB_MISC_SIP_CG,
5930 MC_HUB_MISC_VM_CG,
5931 MC_XPB_CLK_GAT,
5932 ATC_MISC_CG,
5933 MC_CITF_MISC_WR_CG,
5934 MC_CITF_MISC_RD_CG,
5935 MC_CITF_MISC_VM_CG,
5936 VM_L2_CG,
5937 };
5938
5939 static void cik_enable_mc_ls(struct radeon_device *rdev,
5940 bool enable)
5941 {
5942 int i;
5943 u32 orig, data;
5944
5945 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5946 orig = data = RREG32(mc_cg_registers[i]);
5947 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
5948 data |= MC_LS_ENABLE;
5949 else
5950 data &= ~MC_LS_ENABLE;
5951 if (data != orig)
5952 WREG32(mc_cg_registers[i], data);
5953 }
5954 }
5955
5956 static void cik_enable_mc_mgcg(struct radeon_device *rdev,
5957 bool enable)
5958 {
5959 int i;
5960 u32 orig, data;
5961
5962 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5963 orig = data = RREG32(mc_cg_registers[i]);
5964 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
5965 data |= MC_CG_ENABLE;
5966 else
5967 data &= ~MC_CG_ENABLE;
5968 if (data != orig)
5969 WREG32(mc_cg_registers[i], data);
5970 }
5971 }
5972
5973 static void cik_enable_sdma_mgcg(struct radeon_device *rdev,
5974 bool enable)
5975 {
5976 u32 orig, data;
5977
5978 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
5979 WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
5980 WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
5981 } else {
5982 orig = data = RREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
5983 data |= 0xff000000;
5984 if (data != orig)
5985 WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
5986
5987 orig = data = RREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
5988 data |= 0xff000000;
5989 if (data != orig)
5990 WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
5991 }
5992 }
5993
5994 static void cik_enable_sdma_mgls(struct radeon_device *rdev,
5995 bool enable)
5996 {
5997 u32 orig, data;
5998
5999 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_LS)) {
6000 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
6001 data |= 0x100;
6002 if (orig != data)
6003 WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
6004
6005 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
6006 data |= 0x100;
6007 if (orig != data)
6008 WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
6009 } else {
6010 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
6011 data &= ~0x100;
6012 if (orig != data)
6013 WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
6014
6015 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
6016 data &= ~0x100;
6017 if (orig != data)
6018 WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
6019 }
6020 }
6021
6022 static void cik_enable_uvd_mgcg(struct radeon_device *rdev,
6023 bool enable)
6024 {
6025 u32 orig, data;
6026
6027 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
6028 data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
6029 data = 0xfff;
6030 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
6031
6032 orig = data = RREG32(UVD_CGC_CTRL);
6033 data |= DCM;
6034 if (orig != data)
6035 WREG32(UVD_CGC_CTRL, data);
6036 } else {
6037 data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
6038 data &= ~0xfff;
6039 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
6040
6041 orig = data = RREG32(UVD_CGC_CTRL);
6042 data &= ~DCM;
6043 if (orig != data)
6044 WREG32(UVD_CGC_CTRL, data);
6045 }
6046 }
6047
6048 static void cik_enable_bif_mgls(struct radeon_device *rdev,
6049 bool enable)
6050 {
6051 u32 orig, data;
6052
6053 orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
6054
6055 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
6056 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
6057 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
6058 else
6059 data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
6060 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
6061
6062 if (orig != data)
6063 WREG32_PCIE_PORT(PCIE_CNTL2, data);
6064 }
6065
6066 static void cik_enable_hdp_mgcg(struct radeon_device *rdev,
6067 bool enable)
6068 {
6069 u32 orig, data;
6070
6071 orig = data = RREG32(HDP_HOST_PATH_CNTL);
6072
6073 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
6074 data &= ~CLOCK_GATING_DIS;
6075 else
6076 data |= CLOCK_GATING_DIS;
6077
6078 if (orig != data)
6079 WREG32(HDP_HOST_PATH_CNTL, data);
6080 }
6081
6082 static void cik_enable_hdp_ls(struct radeon_device *rdev,
6083 bool enable)
6084 {
6085 u32 orig, data;
6086
6087 orig = data = RREG32(HDP_MEM_POWER_LS);
6088
6089 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
6090 data |= HDP_LS_ENABLE;
6091 else
6092 data &= ~HDP_LS_ENABLE;
6093
6094 if (orig != data)
6095 WREG32(HDP_MEM_POWER_LS, data);
6096 }
6097
6098 void cik_update_cg(struct radeon_device *rdev,
6099 u32 block, bool enable)
6100 {
6101
6102 if (block & RADEON_CG_BLOCK_GFX) {
6103 cik_enable_gui_idle_interrupt(rdev, false);
6104 /* order matters! */
6105 if (enable) {
6106 cik_enable_mgcg(rdev, true);
6107 cik_enable_cgcg(rdev, true);
6108 } else {
6109 cik_enable_cgcg(rdev, false);
6110 cik_enable_mgcg(rdev, false);
6111 }
6112 cik_enable_gui_idle_interrupt(rdev, true);
6113 }
6114
6115 if (block & RADEON_CG_BLOCK_MC) {
6116 if (!(rdev->flags & RADEON_IS_IGP)) {
6117 cik_enable_mc_mgcg(rdev, enable);
6118 cik_enable_mc_ls(rdev, enable);
6119 }
6120 }
6121
6122 if (block & RADEON_CG_BLOCK_SDMA) {
6123 cik_enable_sdma_mgcg(rdev, enable);
6124 cik_enable_sdma_mgls(rdev, enable);
6125 }
6126
6127 if (block & RADEON_CG_BLOCK_BIF) {
6128 cik_enable_bif_mgls(rdev, enable);
6129 }
6130
6131 if (block & RADEON_CG_BLOCK_UVD) {
6132 if (rdev->has_uvd)
6133 cik_enable_uvd_mgcg(rdev, enable);
6134 }
6135
6136 if (block & RADEON_CG_BLOCK_HDP) {
6137 cik_enable_hdp_mgcg(rdev, enable);
6138 cik_enable_hdp_ls(rdev, enable);
6139 }
6140
6141 if (block & RADEON_CG_BLOCK_VCE) {
6142 vce_v2_0_enable_mgcg(rdev, enable);
6143 }
6144 }
6145
6146 static void cik_init_cg(struct radeon_device *rdev)
6147 {
6148
6149 cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, true);
6150
6151 if (rdev->has_uvd)
6152 si_init_uvd_internal_cg(rdev);
6153
6154 cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
6155 RADEON_CG_BLOCK_SDMA |
6156 RADEON_CG_BLOCK_BIF |
6157 RADEON_CG_BLOCK_UVD |
6158 RADEON_CG_BLOCK_HDP), true);
6159 }
6160
6161 static void cik_fini_cg(struct radeon_device *rdev)
6162 {
6163 cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
6164 RADEON_CG_BLOCK_SDMA |
6165 RADEON_CG_BLOCK_BIF |
6166 RADEON_CG_BLOCK_UVD |
6167 RADEON_CG_BLOCK_HDP), false);
6168
6169 cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, false);
6170 }
6171
6172 static void cik_enable_sck_slowdown_on_pu(struct radeon_device *rdev,
6173 bool enable)
6174 {
6175 u32 data, orig;
6176
6177 orig = data = RREG32(RLC_PG_CNTL);
6178 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
6179 data |= SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
6180 else
6181 data &= ~SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
6182 if (orig != data)
6183 WREG32(RLC_PG_CNTL, data);
6184 }
6185
6186 static void cik_enable_sck_slowdown_on_pd(struct radeon_device *rdev,
6187 bool enable)
6188 {
6189 u32 data, orig;
6190
6191 orig = data = RREG32(RLC_PG_CNTL);
6192 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
6193 data |= SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
6194 else
6195 data &= ~SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
6196 if (orig != data)
6197 WREG32(RLC_PG_CNTL, data);
6198 }
6199
6200 static void cik_enable_cp_pg(struct radeon_device *rdev, bool enable)
6201 {
6202 u32 data, orig;
6203
6204 orig = data = RREG32(RLC_PG_CNTL);
6205 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_CP))
6206 data &= ~DISABLE_CP_PG;
6207 else
6208 data |= DISABLE_CP_PG;
6209 if (orig != data)
6210 WREG32(RLC_PG_CNTL, data);
6211 }
6212
6213 static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
6214 {
6215 u32 data, orig;
6216
6217 orig = data = RREG32(RLC_PG_CNTL);
6218 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GDS))
6219 data &= ~DISABLE_GDS_PG;
6220 else
6221 data |= DISABLE_GDS_PG;
6222 if (orig != data)
6223 WREG32(RLC_PG_CNTL, data);
6224 }
6225
6226 #define CP_ME_TABLE_SIZE 96
6227 #define CP_ME_TABLE_OFFSET 2048
6228 #define CP_MEC_TABLE_OFFSET 4096
6229
6230 void cik_init_cp_pg_table(struct radeon_device *rdev)
6231 {
6232 const __be32 *fw_data;
6233 volatile u32 *dst_ptr;
6234 int me, i, max_me = 4;
6235 u32 bo_offset = 0;
6236 u32 table_offset;
6237
6238 if (rdev->family == CHIP_KAVERI)
6239 max_me = 5;
6240
6241 if (rdev->rlc.cp_table_ptr == NULL)
6242 return;
6243
6244 /* write the cp table buffer */
6245 dst_ptr = rdev->rlc.cp_table_ptr;
6246 for (me = 0; me < max_me; me++) {
6247 if (me == 0) {
6248 fw_data = (const __be32 *)rdev->ce_fw->data;
6249 table_offset = CP_ME_TABLE_OFFSET;
6250 } else if (me == 1) {
6251 fw_data = (const __be32 *)rdev->pfp_fw->data;
6252 table_offset = CP_ME_TABLE_OFFSET;
6253 } else if (me == 2) {
6254 fw_data = (const __be32 *)rdev->me_fw->data;
6255 table_offset = CP_ME_TABLE_OFFSET;
6256 } else {
6257 fw_data = (const __be32 *)rdev->mec_fw->data;
6258 table_offset = CP_MEC_TABLE_OFFSET;
6259 }
6260
6261 for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
6262 dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
6263 }
6264 bo_offset += CP_ME_TABLE_SIZE;
6265 }
6266 }
6267
6268 static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
6269 bool enable)
6270 {
6271 u32 data, orig;
6272
6273 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
6274 orig = data = RREG32(RLC_PG_CNTL);
6275 data |= GFX_PG_ENABLE;
6276 if (orig != data)
6277 WREG32(RLC_PG_CNTL, data);
6278
6279 orig = data = RREG32(RLC_AUTO_PG_CTRL);
6280 data |= AUTO_PG_EN;
6281 if (orig != data)
6282 WREG32(RLC_AUTO_PG_CTRL, data);
6283 } else {
6284 orig = data = RREG32(RLC_PG_CNTL);
6285 data &= ~GFX_PG_ENABLE;
6286 if (orig != data)
6287 WREG32(RLC_PG_CNTL, data);
6288
6289 orig = data = RREG32(RLC_AUTO_PG_CTRL);
6290 data &= ~AUTO_PG_EN;
6291 if (orig != data)
6292 WREG32(RLC_AUTO_PG_CTRL, data);
6293
6294 data = RREG32(DB_RENDER_CONTROL);
6295 }
6296 }
6297
6298 static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
6299 {
6300 u32 mask = 0, tmp, tmp1;
6301 int i;
6302
6303 cik_select_se_sh(rdev, se, sh);
6304 tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
6305 tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
6306 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
6307
6308 tmp &= 0xffff0000;
6309
6310 tmp |= tmp1;
6311 tmp >>= 16;
6312
6313 for (i = 0; i < rdev->config.cik.max_cu_per_sh; i ++) {
6314 mask <<= 1;
6315 mask |= 1;
6316 }
6317
6318 return (~tmp) & mask;
6319 }
6320
6321 static void cik_init_ao_cu_mask(struct radeon_device *rdev)
6322 {
6323 u32 i, j, k, active_cu_number = 0;
6324 u32 mask, counter, cu_bitmap;
6325 u32 tmp = 0;
6326
6327 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
6328 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
6329 mask = 1;
6330 cu_bitmap = 0;
6331 counter = 0;
6332 for (k = 0; k < rdev->config.cik.max_cu_per_sh; k ++) {
6333 if (cik_get_cu_active_bitmap(rdev, i, j) & mask) {
6334 if (counter < 2)
6335 cu_bitmap |= mask;
6336 counter ++;
6337 }
6338 mask <<= 1;
6339 }
6340
6341 active_cu_number += counter;
6342 tmp |= (cu_bitmap << (i * 16 + j * 8));
6343 }
6344 }
6345
6346 WREG32(RLC_PG_AO_CU_MASK, tmp);
6347
6348 tmp = RREG32(RLC_MAX_PG_CU);
6349 tmp &= ~MAX_PU_CU_MASK;
6350 tmp |= MAX_PU_CU(active_cu_number);
6351 WREG32(RLC_MAX_PG_CU, tmp);
6352 }
6353
6354 static void cik_enable_gfx_static_mgpg(struct radeon_device *rdev,
6355 bool enable)
6356 {
6357 u32 data, orig;
6358
6359 orig = data = RREG32(RLC_PG_CNTL);
6360 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_SMG))
6361 data |= STATIC_PER_CU_PG_ENABLE;
6362 else
6363 data &= ~STATIC_PER_CU_PG_ENABLE;
6364 if (orig != data)
6365 WREG32(RLC_PG_CNTL, data);
6366 }
6367
6368 static void cik_enable_gfx_dynamic_mgpg(struct radeon_device *rdev,
6369 bool enable)
6370 {
6371 u32 data, orig;
6372
6373 orig = data = RREG32(RLC_PG_CNTL);
6374 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_DMG))
6375 data |= DYN_PER_CU_PG_ENABLE;
6376 else
6377 data &= ~DYN_PER_CU_PG_ENABLE;
6378 if (orig != data)
6379 WREG32(RLC_PG_CNTL, data);
6380 }
6381
6382 #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
6383 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
6384
6385 static void cik_init_gfx_cgpg(struct radeon_device *rdev)
6386 {
6387 u32 data, orig;
6388 u32 i;
6389
6390 if (rdev->rlc.cs_data) {
6391 WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
6392 WREG32(RLC_GPM_SCRATCH_DATA, upper_32_bits(rdev->rlc.clear_state_gpu_addr));
6393 WREG32(RLC_GPM_SCRATCH_DATA, lower_32_bits(rdev->rlc.clear_state_gpu_addr));
6394 WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_size);
6395 } else {
6396 WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
6397 for (i = 0; i < 3; i++)
6398 WREG32(RLC_GPM_SCRATCH_DATA, 0);
6399 }
6400 if (rdev->rlc.reg_list) {
6401 WREG32(RLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
6402 for (i = 0; i < rdev->rlc.reg_list_size; i++)
6403 WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.reg_list[i]);
6404 }
6405
6406 orig = data = RREG32(RLC_PG_CNTL);
6407 data |= GFX_PG_SRC;
6408 if (orig != data)
6409 WREG32(RLC_PG_CNTL, data);
6410
6411 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
6412 WREG32(RLC_CP_TABLE_RESTORE, rdev->rlc.cp_table_gpu_addr >> 8);
6413
6414 data = RREG32(CP_RB_WPTR_POLL_CNTL);
6415 data &= ~IDLE_POLL_COUNT_MASK;
6416 data |= IDLE_POLL_COUNT(0x60);
6417 WREG32(CP_RB_WPTR_POLL_CNTL, data);
6418
6419 data = 0x10101010;
6420 WREG32(RLC_PG_DELAY, data);
6421
6422 data = RREG32(RLC_PG_DELAY_2);
6423 data &= ~0xff;
6424 data |= 0x3;
6425 WREG32(RLC_PG_DELAY_2, data);
6426
6427 data = RREG32(RLC_AUTO_PG_CTRL);
6428 data &= ~GRBM_REG_SGIT_MASK;
6429 data |= GRBM_REG_SGIT(0x700);
6430 WREG32(RLC_AUTO_PG_CTRL, data);
6431
6432 }
6433
6434 static void cik_update_gfx_pg(struct radeon_device *rdev, bool enable)
6435 {
6436 cik_enable_gfx_cgpg(rdev, enable);
6437 cik_enable_gfx_static_mgpg(rdev, enable);
6438 cik_enable_gfx_dynamic_mgpg(rdev, enable);
6439 }
6440
6441 u32 cik_get_csb_size(struct radeon_device *rdev)
6442 {
6443 u32 count = 0;
6444 const struct cs_section_def *sect = NULL;
6445 const struct cs_extent_def *ext = NULL;
6446
6447 if (rdev->rlc.cs_data == NULL)
6448 return 0;
6449
6450 /* begin clear state */
6451 count += 2;
6452 /* context control state */
6453 count += 3;
6454
6455 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
6456 for (ext = sect->section; ext->extent != NULL; ++ext) {
6457 if (sect->id == SECT_CONTEXT)
6458 count += 2 + ext->reg_count;
6459 else
6460 return 0;
6461 }
6462 }
6463 /* pa_sc_raster_config/pa_sc_raster_config1 */
6464 count += 4;
6465 /* end clear state */
6466 count += 2;
6467 /* clear state */
6468 count += 2;
6469
6470 return count;
6471 }
6472
6473 void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
6474 {
6475 u32 count = 0, i;
6476 const struct cs_section_def *sect = NULL;
6477 const struct cs_extent_def *ext = NULL;
6478
6479 if (rdev->rlc.cs_data == NULL)
6480 return;
6481 if (buffer == NULL)
6482 return;
6483
6484 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
6485 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
6486
6487 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
6488 buffer[count++] = cpu_to_le32(0x80000000);
6489 buffer[count++] = cpu_to_le32(0x80000000);
6490
6491 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
6492 for (ext = sect->section; ext->extent != NULL; ++ext) {
6493 if (sect->id == SECT_CONTEXT) {
6494 buffer[count++] =
6495 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
6496 buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
6497 for (i = 0; i < ext->reg_count; i++)
6498 buffer[count++] = cpu_to_le32(ext->extent[i]);
6499 } else {
6500 return;
6501 }
6502 }
6503 }
6504
6505 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
6506 buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
6507 switch (rdev->family) {
6508 case CHIP_BONAIRE:
6509 buffer[count++] = cpu_to_le32(0x16000012);
6510 buffer[count++] = cpu_to_le32(0x00000000);
6511 break;
6512 case CHIP_KAVERI:
6513 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
6514 buffer[count++] = cpu_to_le32(0x00000000);
6515 break;
6516 case CHIP_KABINI:
6517 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
6518 buffer[count++] = cpu_to_le32(0x00000000);
6519 break;
6520 case CHIP_HAWAII:
6521 buffer[count++] = 0x3a00161a;
6522 buffer[count++] = 0x0000002e;
6523 break;
6524 default:
6525 buffer[count++] = cpu_to_le32(0x00000000);
6526 buffer[count++] = cpu_to_le32(0x00000000);
6527 break;
6528 }
6529
6530 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
6531 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
6532
6533 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
6534 buffer[count++] = cpu_to_le32(0);
6535 }
6536
6537 static void cik_init_pg(struct radeon_device *rdev)
6538 {
6539 if (rdev->pg_flags) {
6540 cik_enable_sck_slowdown_on_pu(rdev, true);
6541 cik_enable_sck_slowdown_on_pd(rdev, true);
6542 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
6543 cik_init_gfx_cgpg(rdev);
6544 cik_enable_cp_pg(rdev, true);
6545 cik_enable_gds_pg(rdev, true);
6546 }
6547 cik_init_ao_cu_mask(rdev);
6548 cik_update_gfx_pg(rdev, true);
6549 }
6550 }
6551
6552 static void cik_fini_pg(struct radeon_device *rdev)
6553 {
6554 if (rdev->pg_flags) {
6555 cik_update_gfx_pg(rdev, false);
6556 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
6557 cik_enable_cp_pg(rdev, false);
6558 cik_enable_gds_pg(rdev, false);
6559 }
6560 }
6561 }
6562
6563 /*
6564 * Interrupts
6565 * Starting with r6xx, interrupts are handled via a ring buffer.
6566 * Ring buffers are areas of GPU accessible memory that the GPU
6567 * writes interrupt vectors into and the host reads vectors out of.
6568 * There is a rptr (read pointer) that determines where the
6569 * host is currently reading, and a wptr (write pointer)
6570 * which determines where the GPU has written. When the
6571 * pointers are equal, the ring is idle. When the GPU
6572 * writes vectors to the ring buffer, it increments the
6573 * wptr. When there is an interrupt, the host then starts
6574 * fetching commands and processing them until the pointers are
6575 * equal again at which point it updates the rptr.
6576 */
6577
6578 /**
6579 * cik_enable_interrupts - Enable the interrupt ring buffer
6580 *
6581 * @rdev: radeon_device pointer
6582 *
6583 * Enable the interrupt ring buffer (CIK).
6584 */
6585 static void cik_enable_interrupts(struct radeon_device *rdev)
6586 {
6587 u32 ih_cntl = RREG32(IH_CNTL);
6588 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
6589
6590 ih_cntl |= ENABLE_INTR;
6591 ih_rb_cntl |= IH_RB_ENABLE;
6592 WREG32(IH_CNTL, ih_cntl);
6593 WREG32(IH_RB_CNTL, ih_rb_cntl);
6594 rdev->ih.enabled = true;
6595 }
6596
6597 /**
6598 * cik_disable_interrupts - Disable the interrupt ring buffer
6599 *
6600 * @rdev: radeon_device pointer
6601 *
6602 * Disable the interrupt ring buffer (CIK).
6603 */
6604 static void cik_disable_interrupts(struct radeon_device *rdev)
6605 {
6606 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
6607 u32 ih_cntl = RREG32(IH_CNTL);
6608
6609 ih_rb_cntl &= ~IH_RB_ENABLE;
6610 ih_cntl &= ~ENABLE_INTR;
6611 WREG32(IH_RB_CNTL, ih_rb_cntl);
6612 WREG32(IH_CNTL, ih_cntl);
6613 /* set rptr, wptr to 0 */
6614 WREG32(IH_RB_RPTR, 0);
6615 WREG32(IH_RB_WPTR, 0);
6616 rdev->ih.enabled = false;
6617 rdev->ih.rptr = 0;
6618 }
6619
6620 /**
6621 * cik_disable_interrupt_state - Disable all interrupt sources
6622 *
6623 * @rdev: radeon_device pointer
6624 *
6625 * Clear all interrupt enable bits used by the driver (CIK).
6626 */
6627 static void cik_disable_interrupt_state(struct radeon_device *rdev)
6628 {
6629 u32 tmp;
6630
6631 /* gfx ring */
6632 tmp = RREG32(CP_INT_CNTL_RING0) &
6633 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
6634 WREG32(CP_INT_CNTL_RING0, tmp);
6635 /* sdma */
6636 tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
6637 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
6638 tmp = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
6639 WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, tmp);
6640 /* compute queues */
6641 WREG32(CP_ME1_PIPE0_INT_CNTL, 0);
6642 WREG32(CP_ME1_PIPE1_INT_CNTL, 0);
6643 WREG32(CP_ME1_PIPE2_INT_CNTL, 0);
6644 WREG32(CP_ME1_PIPE3_INT_CNTL, 0);
6645 WREG32(CP_ME2_PIPE0_INT_CNTL, 0);
6646 WREG32(CP_ME2_PIPE1_INT_CNTL, 0);
6647 WREG32(CP_ME2_PIPE2_INT_CNTL, 0);
6648 WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
6649 /* grbm */
6650 WREG32(GRBM_INT_CNTL, 0);
6651 /* vline/vblank, etc. */
6652 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
6653 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
6654 if (rdev->num_crtc >= 4) {
6655 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
6656 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
6657 }
6658 if (rdev->num_crtc >= 6) {
6659 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
6660 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
6661 }
6662
6663 /* dac hotplug */
6664 WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
6665
6666 /* digital hotplug */
6667 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
6668 WREG32(DC_HPD1_INT_CONTROL, tmp);
6669 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
6670 WREG32(DC_HPD2_INT_CONTROL, tmp);
6671 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
6672 WREG32(DC_HPD3_INT_CONTROL, tmp);
6673 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
6674 WREG32(DC_HPD4_INT_CONTROL, tmp);
6675 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
6676 WREG32(DC_HPD5_INT_CONTROL, tmp);
6677 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
6678 WREG32(DC_HPD6_INT_CONTROL, tmp);
6679
6680 }
6681
6682 /**
6683 * cik_irq_init - init and enable the interrupt ring
6684 *
6685 * @rdev: radeon_device pointer
6686 *
6687 * Allocate a ring buffer for the interrupt controller,
6688 * enable the RLC, disable interrupts, enable the IH
6689 * ring buffer and enable it (CIK).
6690 * Called at device load and reume.
6691 * Returns 0 for success, errors for failure.
6692 */
6693 static int cik_irq_init(struct radeon_device *rdev)
6694 {
6695 int ret = 0;
6696 int rb_bufsz;
6697 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
6698
6699 /* allocate ring */
6700 ret = r600_ih_ring_alloc(rdev);
6701 if (ret)
6702 return ret;
6703
6704 /* disable irqs */
6705 cik_disable_interrupts(rdev);
6706
6707 /* init rlc */
6708 ret = cik_rlc_resume(rdev);
6709 if (ret) {
6710 r600_ih_ring_fini(rdev);
6711 return ret;
6712 }
6713
6714 /* setup interrupt control */
6715 /* XXX this should actually be a bus address, not an MC address. same on older asics */
6716 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
6717 interrupt_cntl = RREG32(INTERRUPT_CNTL);
6718 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
6719 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
6720 */
6721 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
6722 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
6723 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
6724 WREG32(INTERRUPT_CNTL, interrupt_cntl);
6725
6726 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
6727 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
6728
6729 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
6730 IH_WPTR_OVERFLOW_CLEAR |
6731 (rb_bufsz << 1));
6732
6733 if (rdev->wb.enabled)
6734 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
6735
6736 /* set the writeback address whether it's enabled or not */
6737 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
6738 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
6739
6740 WREG32(IH_RB_CNTL, ih_rb_cntl);
6741
6742 /* set rptr, wptr to 0 */
6743 WREG32(IH_RB_RPTR, 0);
6744 WREG32(IH_RB_WPTR, 0);
6745
6746 /* Default settings for IH_CNTL (disabled at first) */
6747 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
6748 /* RPTR_REARM only works if msi's are enabled */
6749 if (rdev->msi_enabled)
6750 ih_cntl |= RPTR_REARM;
6751 WREG32(IH_CNTL, ih_cntl);
6752
6753 /* force the active interrupt state to all disabled */
6754 cik_disable_interrupt_state(rdev);
6755
6756 pci_set_master(rdev->pdev);
6757
6758 /* enable irqs */
6759 cik_enable_interrupts(rdev);
6760
6761 return ret;
6762 }
6763
6764 /**
6765 * cik_irq_set - enable/disable interrupt sources
6766 *
6767 * @rdev: radeon_device pointer
6768 *
6769 * Enable interrupt sources on the GPU (vblanks, hpd,
6770 * etc.) (CIK).
6771 * Returns 0 for success, errors for failure.
6772 */
6773 int cik_irq_set(struct radeon_device *rdev)
6774 {
6775 u32 cp_int_cntl;
6776 u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
6777 u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
6778 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
6779 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
6780 u32 grbm_int_cntl = 0;
6781 u32 dma_cntl, dma_cntl1;
6782 u32 thermal_int;
6783
6784 if (!rdev->irq.installed) {
6785 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
6786 return -EINVAL;
6787 }
6788 /* don't enable anything if the ih is disabled */
6789 if (!rdev->ih.enabled) {
6790 cik_disable_interrupts(rdev);
6791 /* force the active interrupt state to all disabled */
6792 cik_disable_interrupt_state(rdev);
6793 return 0;
6794 }
6795
6796 cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
6797 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
6798 cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
6799
6800 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
6801 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
6802 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
6803 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
6804 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
6805 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
6806
6807 dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
6808 dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
6809
6810 cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6811 cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6812 cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6813 cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6814 cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6815 cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6816 cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6817 cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6818
6819 if (rdev->flags & RADEON_IS_IGP)
6820 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
6821 ~(THERM_INTH_MASK | THERM_INTL_MASK);
6822 else
6823 thermal_int = RREG32_SMC(CG_THERMAL_INT) &
6824 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
6825
6826 /* enable CP interrupts on all rings */
6827 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
6828 DRM_DEBUG("cik_irq_set: sw int gfx\n");
6829 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
6830 }
6831 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
6832 struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6833 DRM_DEBUG("si_irq_set: sw int cp1\n");
6834 if (ring->me == 1) {
6835 switch (ring->pipe) {
6836 case 0:
6837 cp_m1p0 |= TIME_STAMP_INT_ENABLE;
6838 break;
6839 case 1:
6840 cp_m1p1 |= TIME_STAMP_INT_ENABLE;
6841 break;
6842 case 2:
6843 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
6844 break;
6845 case 3:
6846 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
6847 break;
6848 default:
6849 DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
6850 break;
6851 }
6852 } else if (ring->me == 2) {
6853 switch (ring->pipe) {
6854 case 0:
6855 cp_m2p0 |= TIME_STAMP_INT_ENABLE;
6856 break;
6857 case 1:
6858 cp_m2p1 |= TIME_STAMP_INT_ENABLE;
6859 break;
6860 case 2:
6861 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
6862 break;
6863 case 3:
6864 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
6865 break;
6866 default:
6867 DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
6868 break;
6869 }
6870 } else {
6871 DRM_DEBUG("si_irq_set: sw int cp1 invalid me %d\n", ring->me);
6872 }
6873 }
6874 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
6875 struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6876 DRM_DEBUG("si_irq_set: sw int cp2\n");
6877 if (ring->me == 1) {
6878 switch (ring->pipe) {
6879 case 0:
6880 cp_m1p0 |= TIME_STAMP_INT_ENABLE;
6881 break;
6882 case 1:
6883 cp_m1p1 |= TIME_STAMP_INT_ENABLE;
6884 break;
6885 case 2:
6886 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
6887 break;
6888 case 3:
6889 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
6890 break;
6891 default:
6892 DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
6893 break;
6894 }
6895 } else if (ring->me == 2) {
6896 switch (ring->pipe) {
6897 case 0:
6898 cp_m2p0 |= TIME_STAMP_INT_ENABLE;
6899 break;
6900 case 1:
6901 cp_m2p1 |= TIME_STAMP_INT_ENABLE;
6902 break;
6903 case 2:
6904 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
6905 break;
6906 case 3:
6907 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
6908 break;
6909 default:
6910 DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
6911 break;
6912 }
6913 } else {
6914 DRM_DEBUG("si_irq_set: sw int cp2 invalid me %d\n", ring->me);
6915 }
6916 }
6917
6918 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
6919 DRM_DEBUG("cik_irq_set: sw int dma\n");
6920 dma_cntl |= TRAP_ENABLE;
6921 }
6922
6923 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
6924 DRM_DEBUG("cik_irq_set: sw int dma1\n");
6925 dma_cntl1 |= TRAP_ENABLE;
6926 }
6927
6928 if (rdev->irq.crtc_vblank_int[0] ||
6929 atomic_read(&rdev->irq.pflip[0])) {
6930 DRM_DEBUG("cik_irq_set: vblank 0\n");
6931 crtc1 |= VBLANK_INTERRUPT_MASK;
6932 }
6933 if (rdev->irq.crtc_vblank_int[1] ||
6934 atomic_read(&rdev->irq.pflip[1])) {
6935 DRM_DEBUG("cik_irq_set: vblank 1\n");
6936 crtc2 |= VBLANK_INTERRUPT_MASK;
6937 }
6938 if (rdev->irq.crtc_vblank_int[2] ||
6939 atomic_read(&rdev->irq.pflip[2])) {
6940 DRM_DEBUG("cik_irq_set: vblank 2\n");
6941 crtc3 |= VBLANK_INTERRUPT_MASK;
6942 }
6943 if (rdev->irq.crtc_vblank_int[3] ||
6944 atomic_read(&rdev->irq.pflip[3])) {
6945 DRM_DEBUG("cik_irq_set: vblank 3\n");
6946 crtc4 |= VBLANK_INTERRUPT_MASK;
6947 }
6948 if (rdev->irq.crtc_vblank_int[4] ||
6949 atomic_read(&rdev->irq.pflip[4])) {
6950 DRM_DEBUG("cik_irq_set: vblank 4\n");
6951 crtc5 |= VBLANK_INTERRUPT_MASK;
6952 }
6953 if (rdev->irq.crtc_vblank_int[5] ||
6954 atomic_read(&rdev->irq.pflip[5])) {
6955 DRM_DEBUG("cik_irq_set: vblank 5\n");
6956 crtc6 |= VBLANK_INTERRUPT_MASK;
6957 }
6958 if (rdev->irq.hpd[0]) {
6959 DRM_DEBUG("cik_irq_set: hpd 1\n");
6960 hpd1 |= DC_HPDx_INT_EN;
6961 }
6962 if (rdev->irq.hpd[1]) {
6963 DRM_DEBUG("cik_irq_set: hpd 2\n");
6964 hpd2 |= DC_HPDx_INT_EN;
6965 }
6966 if (rdev->irq.hpd[2]) {
6967 DRM_DEBUG("cik_irq_set: hpd 3\n");
6968 hpd3 |= DC_HPDx_INT_EN;
6969 }
6970 if (rdev->irq.hpd[3]) {
6971 DRM_DEBUG("cik_irq_set: hpd 4\n");
6972 hpd4 |= DC_HPDx_INT_EN;
6973 }
6974 if (rdev->irq.hpd[4]) {
6975 DRM_DEBUG("cik_irq_set: hpd 5\n");
6976 hpd5 |= DC_HPDx_INT_EN;
6977 }
6978 if (rdev->irq.hpd[5]) {
6979 DRM_DEBUG("cik_irq_set: hpd 6\n");
6980 hpd6 |= DC_HPDx_INT_EN;
6981 }
6982
6983 if (rdev->irq.dpm_thermal) {
6984 DRM_DEBUG("dpm thermal\n");
6985 if (rdev->flags & RADEON_IS_IGP)
6986 thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
6987 else
6988 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
6989 }
6990
6991 WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
6992
6993 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
6994 WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
6995
6996 WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
6997 WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
6998 WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
6999 WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
7000 WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
7001 WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
7002 WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
7003 WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
7004
7005 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
7006
7007 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
7008 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
7009 if (rdev->num_crtc >= 4) {
7010 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
7011 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
7012 }
7013 if (rdev->num_crtc >= 6) {
7014 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
7015 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
7016 }
7017
7018 WREG32(DC_HPD1_INT_CONTROL, hpd1);
7019 WREG32(DC_HPD2_INT_CONTROL, hpd2);
7020 WREG32(DC_HPD3_INT_CONTROL, hpd3);
7021 WREG32(DC_HPD4_INT_CONTROL, hpd4);
7022 WREG32(DC_HPD5_INT_CONTROL, hpd5);
7023 WREG32(DC_HPD6_INT_CONTROL, hpd6);
7024
7025 if (rdev->flags & RADEON_IS_IGP)
7026 WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
7027 else
7028 WREG32_SMC(CG_THERMAL_INT, thermal_int);
7029
7030 return 0;
7031 }
7032
7033 /**
7034 * cik_irq_ack - ack interrupt sources
7035 *
7036 * @rdev: radeon_device pointer
7037 *
7038 * Ack interrupt sources on the GPU (vblanks, hpd,
7039 * etc.) (CIK). Certain interrupts sources are sw
7040 * generated and do not require an explicit ack.
7041 */
7042 static inline void cik_irq_ack(struct radeon_device *rdev)
7043 {
7044 u32 tmp;
7045
7046 rdev->irq.stat_regs.cik.disp_int = RREG32(DISP_INTERRUPT_STATUS);
7047 rdev->irq.stat_regs.cik.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
7048 rdev->irq.stat_regs.cik.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
7049 rdev->irq.stat_regs.cik.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
7050 rdev->irq.stat_regs.cik.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
7051 rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
7052 rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
7053
7054 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
7055 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
7056 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
7057 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
7058 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
7059 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
7060 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT)
7061 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
7062
7063 if (rdev->num_crtc >= 4) {
7064 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
7065 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
7066 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
7067 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
7068 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
7069 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
7070 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
7071 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
7072 }
7073
7074 if (rdev->num_crtc >= 6) {
7075 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
7076 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
7077 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
7078 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
7079 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
7080 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
7081 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
7082 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
7083 }
7084
7085 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
7086 tmp = RREG32(DC_HPD1_INT_CONTROL);
7087 tmp |= DC_HPDx_INT_ACK;
7088 WREG32(DC_HPD1_INT_CONTROL, tmp);
7089 }
7090 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
7091 tmp = RREG32(DC_HPD2_INT_CONTROL);
7092 tmp |= DC_HPDx_INT_ACK;
7093 WREG32(DC_HPD2_INT_CONTROL, tmp);
7094 }
7095 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
7096 tmp = RREG32(DC_HPD3_INT_CONTROL);
7097 tmp |= DC_HPDx_INT_ACK;
7098 WREG32(DC_HPD3_INT_CONTROL, tmp);
7099 }
7100 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
7101 tmp = RREG32(DC_HPD4_INT_CONTROL);
7102 tmp |= DC_HPDx_INT_ACK;
7103 WREG32(DC_HPD4_INT_CONTROL, tmp);
7104 }
7105 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
7106 tmp = RREG32(DC_HPD5_INT_CONTROL);
7107 tmp |= DC_HPDx_INT_ACK;
7108 WREG32(DC_HPD5_INT_CONTROL, tmp);
7109 }
7110 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
7111 tmp = RREG32(DC_HPD5_INT_CONTROL);
7112 tmp |= DC_HPDx_INT_ACK;
7113 WREG32(DC_HPD6_INT_CONTROL, tmp);
7114 }
7115 }
7116
7117 /**
7118 * cik_irq_disable - disable interrupts
7119 *
7120 * @rdev: radeon_device pointer
7121 *
7122 * Disable interrupts on the hw (CIK).
7123 */
7124 static void cik_irq_disable(struct radeon_device *rdev)
7125 {
7126 cik_disable_interrupts(rdev);
7127 /* Wait and acknowledge irq */
7128 mdelay(1);
7129 cik_irq_ack(rdev);
7130 cik_disable_interrupt_state(rdev);
7131 }
7132
7133 /**
7134 * cik_irq_disable - disable interrupts for suspend
7135 *
7136 * @rdev: radeon_device pointer
7137 *
7138 * Disable interrupts and stop the RLC (CIK).
7139 * Used for suspend.
7140 */
7141 static void cik_irq_suspend(struct radeon_device *rdev)
7142 {
7143 cik_irq_disable(rdev);
7144 cik_rlc_stop(rdev);
7145 }
7146
7147 /**
7148 * cik_irq_fini - tear down interrupt support
7149 *
7150 * @rdev: radeon_device pointer
7151 *
7152 * Disable interrupts on the hw and free the IH ring
7153 * buffer (CIK).
7154 * Used for driver unload.
7155 */
7156 static void cik_irq_fini(struct radeon_device *rdev)
7157 {
7158 cik_irq_suspend(rdev);
7159 r600_ih_ring_fini(rdev);
7160 }
7161
7162 /**
7163 * cik_get_ih_wptr - get the IH ring buffer wptr
7164 *
7165 * @rdev: radeon_device pointer
7166 *
7167 * Get the IH ring buffer wptr from either the register
7168 * or the writeback memory buffer (CIK). Also check for
7169 * ring buffer overflow and deal with it.
7170 * Used by cik_irq_process().
7171 * Returns the value of the wptr.
7172 */
7173 static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
7174 {
7175 u32 wptr, tmp;
7176
7177 if (rdev->wb.enabled)
7178 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
7179 else
7180 wptr = RREG32(IH_RB_WPTR);
7181
7182 if (wptr & RB_OVERFLOW) {
7183 /* When a ring buffer overflow happen start parsing interrupt
7184 * from the last not overwritten vector (wptr + 16). Hopefully
7185 * this should allow us to catchup.
7186 */
7187 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
7188 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
7189 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
7190 tmp = RREG32(IH_RB_CNTL);
7191 tmp |= IH_WPTR_OVERFLOW_CLEAR;
7192 WREG32(IH_RB_CNTL, tmp);
7193 }
7194 return (wptr & rdev->ih.ptr_mask);
7195 }
7196
7197 /* CIK IV Ring
7198 * Each IV ring entry is 128 bits:
7199 * [7:0] - interrupt source id
7200 * [31:8] - reserved
7201 * [59:32] - interrupt source data
7202 * [63:60] - reserved
7203 * [71:64] - RINGID
7204 * CP:
7205 * ME_ID [1:0], PIPE_ID[1:0], QUEUE_ID[2:0]
7206 * QUEUE_ID - for compute, which of the 8 queues owned by the dispatcher
7207 * - for gfx, hw shader state (0=PS...5=LS, 6=CS)
7208 * ME_ID - 0 = gfx, 1 = first 4 CS pipes, 2 = second 4 CS pipes
7209 * PIPE_ID - ME0 0=3D
7210 * - ME1&2 compute dispatcher (4 pipes each)
7211 * SDMA:
7212 * INSTANCE_ID [1:0], QUEUE_ID[1:0]
7213 * INSTANCE_ID - 0 = sdma0, 1 = sdma1
7214 * QUEUE_ID - 0 = gfx, 1 = rlc0, 2 = rlc1
7215 * [79:72] - VMID
7216 * [95:80] - PASID
7217 * [127:96] - reserved
7218 */
7219 /**
7220 * cik_irq_process - interrupt handler
7221 *
7222 * @rdev: radeon_device pointer
7223 *
7224 * Interrupt hander (CIK). Walk the IH ring,
7225 * ack interrupts and schedule work to handle
7226 * interrupt events.
7227 * Returns irq process return code.
7228 */
7229 int cik_irq_process(struct radeon_device *rdev)
7230 {
7231 struct radeon_ring *cp1_ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
7232 struct radeon_ring *cp2_ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
7233 u32 wptr;
7234 u32 rptr;
7235 u32 src_id, src_data, ring_id;
7236 u8 me_id, pipe_id, queue_id;
7237 u32 ring_index;
7238 bool queue_hotplug = false;
7239 bool queue_reset = false;
7240 u32 addr, status, mc_client;
7241 bool queue_thermal = false;
7242
7243 if (!rdev->ih.enabled || rdev->shutdown)
7244 return IRQ_NONE;
7245
7246 wptr = cik_get_ih_wptr(rdev);
7247
7248 restart_ih:
7249 /* is somebody else already processing irqs? */
7250 if (atomic_xchg(&rdev->ih.lock, 1))
7251 return IRQ_NONE;
7252
7253 rptr = rdev->ih.rptr;
7254 DRM_DEBUG("cik_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
7255
7256 /* Order reading of wptr vs. reading of IH ring data */
7257 rmb();
7258
7259 /* display interrupts */
7260 cik_irq_ack(rdev);
7261
7262 while (rptr != wptr) {
7263 /* wptr/rptr are in bytes! */
7264 ring_index = rptr / 4;
7265 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
7266 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
7267 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
7268
7269 switch (src_id) {
7270 case 1: /* D1 vblank/vline */
7271 switch (src_data) {
7272 case 0: /* D1 vblank */
7273 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
7274 if (rdev->irq.crtc_vblank_int[0]) {
7275 drm_handle_vblank(rdev->ddev, 0);
7276 rdev->pm.vblank_sync = true;
7277 wake_up(&rdev->irq.vblank_queue);
7278 }
7279 if (atomic_read(&rdev->irq.pflip[0]))
7280 radeon_crtc_handle_flip(rdev, 0);
7281 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
7282 DRM_DEBUG("IH: D1 vblank\n");
7283 }
7284 break;
7285 case 1: /* D1 vline */
7286 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
7287 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
7288 DRM_DEBUG("IH: D1 vline\n");
7289 }
7290 break;
7291 default:
7292 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7293 break;
7294 }
7295 break;
7296 case 2: /* D2 vblank/vline */
7297 switch (src_data) {
7298 case 0: /* D2 vblank */
7299 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
7300 if (rdev->irq.crtc_vblank_int[1]) {
7301 drm_handle_vblank(rdev->ddev, 1);
7302 rdev->pm.vblank_sync = true;
7303 wake_up(&rdev->irq.vblank_queue);
7304 }
7305 if (atomic_read(&rdev->irq.pflip[1]))
7306 radeon_crtc_handle_flip(rdev, 1);
7307 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
7308 DRM_DEBUG("IH: D2 vblank\n");
7309 }
7310 break;
7311 case 1: /* D2 vline */
7312 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
7313 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
7314 DRM_DEBUG("IH: D2 vline\n");
7315 }
7316 break;
7317 default:
7318 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7319 break;
7320 }
7321 break;
7322 case 3: /* D3 vblank/vline */
7323 switch (src_data) {
7324 case 0: /* D3 vblank */
7325 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
7326 if (rdev->irq.crtc_vblank_int[2]) {
7327 drm_handle_vblank(rdev->ddev, 2);
7328 rdev->pm.vblank_sync = true;
7329 wake_up(&rdev->irq.vblank_queue);
7330 }
7331 if (atomic_read(&rdev->irq.pflip[2]))
7332 radeon_crtc_handle_flip(rdev, 2);
7333 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
7334 DRM_DEBUG("IH: D3 vblank\n");
7335 }
7336 break;
7337 case 1: /* D3 vline */
7338 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
7339 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
7340 DRM_DEBUG("IH: D3 vline\n");
7341 }
7342 break;
7343 default:
7344 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7345 break;
7346 }
7347 break;
7348 case 4: /* D4 vblank/vline */
7349 switch (src_data) {
7350 case 0: /* D4 vblank */
7351 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
7352 if (rdev->irq.crtc_vblank_int[3]) {
7353 drm_handle_vblank(rdev->ddev, 3);
7354 rdev->pm.vblank_sync = true;
7355 wake_up(&rdev->irq.vblank_queue);
7356 }
7357 if (atomic_read(&rdev->irq.pflip[3]))
7358 radeon_crtc_handle_flip(rdev, 3);
7359 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
7360 DRM_DEBUG("IH: D4 vblank\n");
7361 }
7362 break;
7363 case 1: /* D4 vline */
7364 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
7365 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
7366 DRM_DEBUG("IH: D4 vline\n");
7367 }
7368 break;
7369 default:
7370 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7371 break;
7372 }
7373 break;
7374 case 5: /* D5 vblank/vline */
7375 switch (src_data) {
7376 case 0: /* D5 vblank */
7377 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
7378 if (rdev->irq.crtc_vblank_int[4]) {
7379 drm_handle_vblank(rdev->ddev, 4);
7380 rdev->pm.vblank_sync = true;
7381 wake_up(&rdev->irq.vblank_queue);
7382 }
7383 if (atomic_read(&rdev->irq.pflip[4]))
7384 radeon_crtc_handle_flip(rdev, 4);
7385 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
7386 DRM_DEBUG("IH: D5 vblank\n");
7387 }
7388 break;
7389 case 1: /* D5 vline */
7390 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
7391 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
7392 DRM_DEBUG("IH: D5 vline\n");
7393 }
7394 break;
7395 default:
7396 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7397 break;
7398 }
7399 break;
7400 case 6: /* D6 vblank/vline */
7401 switch (src_data) {
7402 case 0: /* D6 vblank */
7403 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
7404 if (rdev->irq.crtc_vblank_int[5]) {
7405 drm_handle_vblank(rdev->ddev, 5);
7406 rdev->pm.vblank_sync = true;
7407 wake_up(&rdev->irq.vblank_queue);
7408 }
7409 if (atomic_read(&rdev->irq.pflip[5]))
7410 radeon_crtc_handle_flip(rdev, 5);
7411 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
7412 DRM_DEBUG("IH: D6 vblank\n");
7413 }
7414 break;
7415 case 1: /* D6 vline */
7416 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
7417 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
7418 DRM_DEBUG("IH: D6 vline\n");
7419 }
7420 break;
7421 default:
7422 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7423 break;
7424 }
7425 break;
7426 case 42: /* HPD hotplug */
7427 switch (src_data) {
7428 case 0:
7429 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
7430 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
7431 queue_hotplug = true;
7432 DRM_DEBUG("IH: HPD1\n");
7433 }
7434 break;
7435 case 1:
7436 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
7437 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
7438 queue_hotplug = true;
7439 DRM_DEBUG("IH: HPD2\n");
7440 }
7441 break;
7442 case 2:
7443 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
7444 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
7445 queue_hotplug = true;
7446 DRM_DEBUG("IH: HPD3\n");
7447 }
7448 break;
7449 case 3:
7450 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
7451 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
7452 queue_hotplug = true;
7453 DRM_DEBUG("IH: HPD4\n");
7454 }
7455 break;
7456 case 4:
7457 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
7458 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
7459 queue_hotplug = true;
7460 DRM_DEBUG("IH: HPD5\n");
7461 }
7462 break;
7463 case 5:
7464 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
7465 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
7466 queue_hotplug = true;
7467 DRM_DEBUG("IH: HPD6\n");
7468 }
7469 break;
7470 default:
7471 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7472 break;
7473 }
7474 break;
7475 case 124: /* UVD */
7476 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
7477 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
7478 break;
7479 case 146:
7480 case 147:
7481 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
7482 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
7483 mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
7484 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
7485 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
7486 addr);
7487 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
7488 status);
7489 cik_vm_decode_fault(rdev, status, addr, mc_client);
7490 /* reset addr and status */
7491 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
7492 break;
7493 case 167: /* VCE */
7494 DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data);
7495 switch (src_data) {
7496 case 0:
7497 radeon_fence_process(rdev, TN_RING_TYPE_VCE1_INDEX);
7498 break;
7499 case 1:
7500 radeon_fence_process(rdev, TN_RING_TYPE_VCE2_INDEX);
7501 break;
7502 default:
7503 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
7504 break;
7505 }
7506 break;
7507 case 176: /* GFX RB CP_INT */
7508 case 177: /* GFX IB CP_INT */
7509 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
7510 break;
7511 case 181: /* CP EOP event */
7512 DRM_DEBUG("IH: CP EOP\n");
7513 /* XXX check the bitfield order! */
7514 me_id = (ring_id & 0x60) >> 5;
7515 pipe_id = (ring_id & 0x18) >> 3;
7516 queue_id = (ring_id & 0x7) >> 0;
7517 switch (me_id) {
7518 case 0:
7519 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
7520 break;
7521 case 1:
7522 case 2:
7523 if ((cp1_ring->me == me_id) & (cp1_ring->pipe == pipe_id))
7524 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
7525 if ((cp2_ring->me == me_id) & (cp2_ring->pipe == pipe_id))
7526 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
7527 break;
7528 }
7529 break;
7530 case 184: /* CP Privileged reg access */
7531 DRM_ERROR("Illegal register access in command stream\n");
7532 /* XXX check the bitfield order! */
7533 me_id = (ring_id & 0x60) >> 5;
7534 pipe_id = (ring_id & 0x18) >> 3;
7535 queue_id = (ring_id & 0x7) >> 0;
7536 switch (me_id) {
7537 case 0:
7538 /* This results in a full GPU reset, but all we need to do is soft
7539 * reset the CP for gfx
7540 */
7541 queue_reset = true;
7542 break;
7543 case 1:
7544 /* XXX compute */
7545 queue_reset = true;
7546 break;
7547 case 2:
7548 /* XXX compute */
7549 queue_reset = true;
7550 break;
7551 }
7552 break;
7553 case 185: /* CP Privileged inst */
7554 DRM_ERROR("Illegal instruction in command stream\n");
7555 /* XXX check the bitfield order! */
7556 me_id = (ring_id & 0x60) >> 5;
7557 pipe_id = (ring_id & 0x18) >> 3;
7558 queue_id = (ring_id & 0x7) >> 0;
7559 switch (me_id) {
7560 case 0:
7561 /* This results in a full GPU reset, but all we need to do is soft
7562 * reset the CP for gfx
7563 */
7564 queue_reset = true;
7565 break;
7566 case 1:
7567 /* XXX compute */
7568 queue_reset = true;
7569 break;
7570 case 2:
7571 /* XXX compute */
7572 queue_reset = true;
7573 break;
7574 }
7575 break;
7576 case 224: /* SDMA trap event */
7577 /* XXX check the bitfield order! */
7578 me_id = (ring_id & 0x3) >> 0;
7579 queue_id = (ring_id & 0xc) >> 2;
7580 DRM_DEBUG("IH: SDMA trap\n");
7581 switch (me_id) {
7582 case 0:
7583 switch (queue_id) {
7584 case 0:
7585 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
7586 break;
7587 case 1:
7588 /* XXX compute */
7589 break;
7590 case 2:
7591 /* XXX compute */
7592 break;
7593 }
7594 break;
7595 case 1:
7596 switch (queue_id) {
7597 case 0:
7598 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
7599 break;
7600 case 1:
7601 /* XXX compute */
7602 break;
7603 case 2:
7604 /* XXX compute */
7605 break;
7606 }
7607 break;
7608 }
7609 break;
7610 case 230: /* thermal low to high */
7611 DRM_DEBUG("IH: thermal low to high\n");
7612 rdev->pm.dpm.thermal.high_to_low = false;
7613 queue_thermal = true;
7614 break;
7615 case 231: /* thermal high to low */
7616 DRM_DEBUG("IH: thermal high to low\n");
7617 rdev->pm.dpm.thermal.high_to_low = true;
7618 queue_thermal = true;
7619 break;
7620 case 233: /* GUI IDLE */
7621 DRM_DEBUG("IH: GUI idle\n");
7622 break;
7623 case 241: /* SDMA Privileged inst */
7624 case 247: /* SDMA Privileged inst */
7625 DRM_ERROR("Illegal instruction in SDMA command stream\n");
7626 /* XXX check the bitfield order! */
7627 me_id = (ring_id & 0x3) >> 0;
7628 queue_id = (ring_id & 0xc) >> 2;
7629 switch (me_id) {
7630 case 0:
7631 switch (queue_id) {
7632 case 0:
7633 queue_reset = true;
7634 break;
7635 case 1:
7636 /* XXX compute */
7637 queue_reset = true;
7638 break;
7639 case 2:
7640 /* XXX compute */
7641 queue_reset = true;
7642 break;
7643 }
7644 break;
7645 case 1:
7646 switch (queue_id) {
7647 case 0:
7648 queue_reset = true;
7649 break;
7650 case 1:
7651 /* XXX compute */
7652 queue_reset = true;
7653 break;
7654 case 2:
7655 /* XXX compute */
7656 queue_reset = true;
7657 break;
7658 }
7659 break;
7660 }
7661 break;
7662 default:
7663 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7664 break;
7665 }
7666
7667 /* wptr/rptr are in bytes! */
7668 rptr += 16;
7669 rptr &= rdev->ih.ptr_mask;
7670 }
7671 if (queue_hotplug)
7672 schedule_work(&rdev->hotplug_work);
7673 if (queue_reset)
7674 schedule_work(&rdev->reset_work);
7675 if (queue_thermal)
7676 schedule_work(&rdev->pm.dpm.thermal.work);
7677 rdev->ih.rptr = rptr;
7678 WREG32(IH_RB_RPTR, rdev->ih.rptr);
7679 atomic_set(&rdev->ih.lock, 0);
7680
7681 /* make sure wptr hasn't changed while processing */
7682 wptr = cik_get_ih_wptr(rdev);
7683 if (wptr != rptr)
7684 goto restart_ih;
7685
7686 return IRQ_HANDLED;
7687 }
7688
7689 /*
7690 * startup/shutdown callbacks
7691 */
7692 /**
7693 * cik_startup - program the asic to a functional state
7694 *
7695 * @rdev: radeon_device pointer
7696 *
7697 * Programs the asic to a functional state (CIK).
7698 * Called by cik_init() and cik_resume().
7699 * Returns 0 for success, error for failure.
7700 */
7701 static int cik_startup(struct radeon_device *rdev)
7702 {
7703 struct radeon_ring *ring;
7704 int r;
7705
7706 /* enable pcie gen2/3 link */
7707 cik_pcie_gen3_enable(rdev);
7708 /* enable aspm */
7709 cik_program_aspm(rdev);
7710
7711 /* scratch needs to be initialized before MC */
7712 r = r600_vram_scratch_init(rdev);
7713 if (r)
7714 return r;
7715
7716 cik_mc_program(rdev);
7717
7718 if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
7719 r = ci_mc_load_microcode(rdev);
7720 if (r) {
7721 DRM_ERROR("Failed to load MC firmware!\n");
7722 return r;
7723 }
7724 }
7725
7726 r = cik_pcie_gart_enable(rdev);
7727 if (r)
7728 return r;
7729 cik_gpu_init(rdev);
7730
7731 /* allocate rlc buffers */
7732 if (rdev->flags & RADEON_IS_IGP) {
7733 if (rdev->family == CHIP_KAVERI) {
7734 rdev->rlc.reg_list = spectre_rlc_save_restore_register_list;
7735 rdev->rlc.reg_list_size =
7736 (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
7737 } else {
7738 rdev->rlc.reg_list = kalindi_rlc_save_restore_register_list;
7739 rdev->rlc.reg_list_size =
7740 (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
7741 }
7742 }
7743 rdev->rlc.cs_data = ci_cs_data;
7744 rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
7745 r = sumo_rlc_init(rdev);
7746 if (r) {
7747 DRM_ERROR("Failed to init rlc BOs!\n");
7748 return r;
7749 }
7750
7751 /* allocate wb buffer */
7752 r = radeon_wb_init(rdev);
7753 if (r)
7754 return r;
7755
7756 /* allocate mec buffers */
7757 r = cik_mec_init(rdev);
7758 if (r) {
7759 DRM_ERROR("Failed to init MEC BOs!\n");
7760 return r;
7761 }
7762
7763 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
7764 if (r) {
7765 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
7766 return r;
7767 }
7768
7769 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
7770 if (r) {
7771 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
7772 return r;
7773 }
7774
7775 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
7776 if (r) {
7777 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
7778 return r;
7779 }
7780
7781 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
7782 if (r) {
7783 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
7784 return r;
7785 }
7786
7787 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
7788 if (r) {
7789 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
7790 return r;
7791 }
7792
7793 r = radeon_uvd_resume(rdev);
7794 if (!r) {
7795 r = uvd_v4_2_resume(rdev);
7796 if (!r) {
7797 r = radeon_fence_driver_start_ring(rdev,
7798 R600_RING_TYPE_UVD_INDEX);
7799 if (r)
7800 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
7801 }
7802 }
7803 if (r)
7804 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
7805
7806 r = radeon_vce_resume(rdev);
7807 if (!r) {
7808 r = vce_v2_0_resume(rdev);
7809 if (!r)
7810 r = radeon_fence_driver_start_ring(rdev,
7811 TN_RING_TYPE_VCE1_INDEX);
7812 if (!r)
7813 r = radeon_fence_driver_start_ring(rdev,
7814 TN_RING_TYPE_VCE2_INDEX);
7815 }
7816 if (r) {
7817 dev_err(rdev->dev, "VCE init error (%d).\n", r);
7818 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
7819 rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
7820 }
7821
7822 /* Enable IRQ */
7823 if (!rdev->irq.installed) {
7824 r = radeon_irq_kms_init(rdev);
7825 if (r)
7826 return r;
7827 }
7828
7829 r = cik_irq_init(rdev);
7830 if (r) {
7831 DRM_ERROR("radeon: IH init failed (%d).\n", r);
7832 radeon_irq_kms_fini(rdev);
7833 return r;
7834 }
7835 cik_irq_set(rdev);
7836
7837 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
7838 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
7839 PACKET3(PACKET3_NOP, 0x3FFF));
7840 if (r)
7841 return r;
7842
7843 /* set up the compute queues */
7844 /* type-2 packets are deprecated on MEC, use type-3 instead */
7845 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
7846 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
7847 PACKET3(PACKET3_NOP, 0x3FFF));
7848 if (r)
7849 return r;
7850 ring->me = 1; /* first MEC */
7851 ring->pipe = 0; /* first pipe */
7852 ring->queue = 0; /* first queue */
7853 ring->wptr_offs = CIK_WB_CP1_WPTR_OFFSET;
7854
7855 /* type-2 packets are deprecated on MEC, use type-3 instead */
7856 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
7857 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
7858 PACKET3(PACKET3_NOP, 0x3FFF));
7859 if (r)
7860 return r;
7861 /* dGPU only have 1 MEC */
7862 ring->me = 1; /* first MEC */
7863 ring->pipe = 0; /* first pipe */
7864 ring->queue = 1; /* second queue */
7865 ring->wptr_offs = CIK_WB_CP2_WPTR_OFFSET;
7866
7867 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
7868 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
7869 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
7870 if (r)
7871 return r;
7872
7873 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
7874 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
7875 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
7876 if (r)
7877 return r;
7878
7879 r = cik_cp_resume(rdev);
7880 if (r)
7881 return r;
7882
7883 r = cik_sdma_resume(rdev);
7884 if (r)
7885 return r;
7886
7887 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
7888 if (ring->ring_size) {
7889 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
7890 RADEON_CP_PACKET2);
7891 if (!r)
7892 r = uvd_v1_0_init(rdev);
7893 if (r)
7894 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
7895 }
7896
7897 r = -ENOENT;
7898
7899 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
7900 if (ring->ring_size)
7901 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
7902 VCE_CMD_NO_OP);
7903
7904 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
7905 if (ring->ring_size)
7906 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
7907 VCE_CMD_NO_OP);
7908
7909 if (!r)
7910 r = vce_v1_0_init(rdev);
7911 else if (r != -ENOENT)
7912 DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
7913
7914 r = radeon_ib_pool_init(rdev);
7915 if (r) {
7916 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
7917 return r;
7918 }
7919
7920 r = radeon_vm_manager_init(rdev);
7921 if (r) {
7922 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
7923 return r;
7924 }
7925
7926 r = dce6_audio_init(rdev);
7927 if (r)
7928 return r;
7929
7930 return 0;
7931 }
7932
7933 /**
7934 * cik_resume - resume the asic to a functional state
7935 *
7936 * @rdev: radeon_device pointer
7937 *
7938 * Programs the asic to a functional state (CIK).
7939 * Called at resume.
7940 * Returns 0 for success, error for failure.
7941 */
7942 int cik_resume(struct radeon_device *rdev)
7943 {
7944 int r;
7945
7946 /* post card */
7947 atom_asic_init(rdev->mode_info.atom_context);
7948
7949 /* init golden registers */
7950 cik_init_golden_registers(rdev);
7951
7952 radeon_pm_resume(rdev);
7953
7954 rdev->accel_working = true;
7955 r = cik_startup(rdev);
7956 if (r) {
7957 DRM_ERROR("cik startup failed on resume\n");
7958 rdev->accel_working = false;
7959 return r;
7960 }
7961
7962 return r;
7963
7964 }
7965
7966 /**
7967 * cik_suspend - suspend the asic
7968 *
7969 * @rdev: radeon_device pointer
7970 *
7971 * Bring the chip into a state suitable for suspend (CIK).
7972 * Called at suspend.
7973 * Returns 0 for success.
7974 */
7975 int cik_suspend(struct radeon_device *rdev)
7976 {
7977 radeon_pm_suspend(rdev);
7978 dce6_audio_fini(rdev);
7979 radeon_vm_manager_fini(rdev);
7980 cik_cp_enable(rdev, false);
7981 cik_sdma_enable(rdev, false);
7982 uvd_v1_0_fini(rdev);
7983 radeon_uvd_suspend(rdev);
7984 radeon_vce_suspend(rdev);
7985 cik_fini_pg(rdev);
7986 cik_fini_cg(rdev);
7987 cik_irq_suspend(rdev);
7988 radeon_wb_disable(rdev);
7989 cik_pcie_gart_disable(rdev);
7990 return 0;
7991 }
7992
7993 /* Plan is to move initialization in that function and use
7994 * helper function so that radeon_device_init pretty much
7995 * do nothing more than calling asic specific function. This
7996 * should also allow to remove a bunch of callback function
7997 * like vram_info.
7998 */
7999 /**
8000 * cik_init - asic specific driver and hw init
8001 *
8002 * @rdev: radeon_device pointer
8003 *
8004 * Setup asic specific driver variables and program the hw
8005 * to a functional state (CIK).
8006 * Called at driver startup.
8007 * Returns 0 for success, errors for failure.
8008 */
8009 int cik_init(struct radeon_device *rdev)
8010 {
8011 struct radeon_ring *ring;
8012 int r;
8013
8014 /* Read BIOS */
8015 if (!radeon_get_bios(rdev)) {
8016 if (ASIC_IS_AVIVO(rdev))
8017 return -EINVAL;
8018 }
8019 /* Must be an ATOMBIOS */
8020 if (!rdev->is_atom_bios) {
8021 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
8022 return -EINVAL;
8023 }
8024 r = radeon_atombios_init(rdev);
8025 if (r)
8026 return r;
8027
8028 /* Post card if necessary */
8029 if (!radeon_card_posted(rdev)) {
8030 if (!rdev->bios) {
8031 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
8032 return -EINVAL;
8033 }
8034 DRM_INFO("GPU not posted. posting now...\n");
8035 atom_asic_init(rdev->mode_info.atom_context);
8036 }
8037 /* init golden registers */
8038 cik_init_golden_registers(rdev);
8039 /* Initialize scratch registers */
8040 cik_scratch_init(rdev);
8041 /* Initialize surface registers */
8042 radeon_surface_init(rdev);
8043 /* Initialize clocks */
8044 radeon_get_clock_info(rdev->ddev);
8045
8046 /* Fence driver */
8047 r = radeon_fence_driver_init(rdev);
8048 if (r)
8049 return r;
8050
8051 /* initialize memory controller */
8052 r = cik_mc_init(rdev);
8053 if (r)
8054 return r;
8055 /* Memory manager */
8056 r = radeon_bo_init(rdev);
8057 if (r)
8058 return r;
8059
8060 if (rdev->flags & RADEON_IS_IGP) {
8061 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
8062 !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
8063 r = cik_init_microcode(rdev);
8064 if (r) {
8065 DRM_ERROR("Failed to load firmware!\n");
8066 return r;
8067 }
8068 }
8069 } else {
8070 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
8071 !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
8072 !rdev->mc_fw) {
8073 r = cik_init_microcode(rdev);
8074 if (r) {
8075 DRM_ERROR("Failed to load firmware!\n");
8076 return r;
8077 }
8078 }
8079 }
8080
8081 /* Initialize power management */
8082 radeon_pm_init(rdev);
8083
8084 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
8085 ring->ring_obj = NULL;
8086 r600_ring_init(rdev, ring, 1024 * 1024);
8087
8088 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
8089 ring->ring_obj = NULL;
8090 r600_ring_init(rdev, ring, 1024 * 1024);
8091 r = radeon_doorbell_get(rdev, &ring->doorbell_index);
8092 if (r)
8093 return r;
8094
8095 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
8096 ring->ring_obj = NULL;
8097 r600_ring_init(rdev, ring, 1024 * 1024);
8098 r = radeon_doorbell_get(rdev, &ring->doorbell_index);
8099 if (r)
8100 return r;
8101
8102 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
8103 ring->ring_obj = NULL;
8104 r600_ring_init(rdev, ring, 256 * 1024);
8105
8106 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
8107 ring->ring_obj = NULL;
8108 r600_ring_init(rdev, ring, 256 * 1024);
8109
8110 r = radeon_uvd_init(rdev);
8111 if (!r) {
8112 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
8113 ring->ring_obj = NULL;
8114 r600_ring_init(rdev, ring, 4096);
8115 }
8116
8117 r = radeon_vce_init(rdev);
8118 if (!r) {
8119 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
8120 ring->ring_obj = NULL;
8121 r600_ring_init(rdev, ring, 4096);
8122
8123 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
8124 ring->ring_obj = NULL;
8125 r600_ring_init(rdev, ring, 4096);
8126 }
8127
8128 rdev->ih.ring_obj = NULL;
8129 r600_ih_ring_init(rdev, 64 * 1024);
8130
8131 r = r600_pcie_gart_init(rdev);
8132 if (r)
8133 return r;
8134
8135 rdev->accel_working = true;
8136 r = cik_startup(rdev);
8137 if (r) {
8138 dev_err(rdev->dev, "disabling GPU acceleration\n");
8139 cik_cp_fini(rdev);
8140 cik_sdma_fini(rdev);
8141 cik_irq_fini(rdev);
8142 sumo_rlc_fini(rdev);
8143 cik_mec_fini(rdev);
8144 radeon_wb_fini(rdev);
8145 radeon_ib_pool_fini(rdev);
8146 radeon_vm_manager_fini(rdev);
8147 radeon_irq_kms_fini(rdev);
8148 cik_pcie_gart_fini(rdev);
8149 rdev->accel_working = false;
8150 }
8151
8152 /* Don't start up if the MC ucode is missing.
8153 * The default clocks and voltages before the MC ucode
8154 * is loaded are not suffient for advanced operations.
8155 */
8156 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
8157 DRM_ERROR("radeon: MC ucode required for NI+.\n");
8158 return -EINVAL;
8159 }
8160
8161 return 0;
8162 }
8163
8164 /**
8165 * cik_fini - asic specific driver and hw fini
8166 *
8167 * @rdev: radeon_device pointer
8168 *
8169 * Tear down the asic specific driver variables and program the hw
8170 * to an idle state (CIK).
8171 * Called at driver unload.
8172 */
8173 void cik_fini(struct radeon_device *rdev)
8174 {
8175 radeon_pm_fini(rdev);
8176 cik_cp_fini(rdev);
8177 cik_sdma_fini(rdev);
8178 cik_fini_pg(rdev);
8179 cik_fini_cg(rdev);
8180 cik_irq_fini(rdev);
8181 sumo_rlc_fini(rdev);
8182 cik_mec_fini(rdev);
8183 radeon_wb_fini(rdev);
8184 radeon_vm_manager_fini(rdev);
8185 radeon_ib_pool_fini(rdev);
8186 radeon_irq_kms_fini(rdev);
8187 uvd_v1_0_fini(rdev);
8188 radeon_uvd_fini(rdev);
8189 radeon_vce_fini(rdev);
8190 cik_pcie_gart_fini(rdev);
8191 r600_vram_scratch_fini(rdev);
8192 radeon_gem_fini(rdev);
8193 radeon_fence_driver_fini(rdev);
8194 radeon_bo_fini(rdev);
8195 radeon_atombios_fini(rdev);
8196 kfree(rdev->bios);
8197 rdev->bios = NULL;
8198 }
8199
8200 void dce8_program_fmt(struct drm_encoder *encoder)
8201 {
8202 struct drm_device *dev = encoder->dev;
8203 struct radeon_device *rdev = dev->dev_private;
8204 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
8205 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
8206 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
8207 int bpc = 0;
8208 u32 tmp = 0;
8209 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
8210
8211 if (connector) {
8212 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
8213 bpc = radeon_get_monitor_bpc(connector);
8214 dither = radeon_connector->dither;
8215 }
8216
8217 /* LVDS/eDP FMT is set up by atom */
8218 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
8219 return;
8220
8221 /* not needed for analog */
8222 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
8223 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
8224 return;
8225
8226 if (bpc == 0)
8227 return;
8228
8229 switch (bpc) {
8230 case 6:
8231 if (dither == RADEON_FMT_DITHER_ENABLE)
8232 /* XXX sort out optimal dither settings */
8233 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
8234 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(0));
8235 else
8236 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(0));
8237 break;
8238 case 8:
8239 if (dither == RADEON_FMT_DITHER_ENABLE)
8240 /* XXX sort out optimal dither settings */
8241 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
8242 FMT_RGB_RANDOM_ENABLE |
8243 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(1));
8244 else
8245 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(1));
8246 break;
8247 case 10:
8248 if (dither == RADEON_FMT_DITHER_ENABLE)
8249 /* XXX sort out optimal dither settings */
8250 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
8251 FMT_RGB_RANDOM_ENABLE |
8252 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(2));
8253 else
8254 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(2));
8255 break;
8256 default:
8257 /* not needed */
8258 break;
8259 }
8260
8261 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
8262 }
8263
8264 /* display watermark setup */
8265 /**
8266 * dce8_line_buffer_adjust - Set up the line buffer
8267 *
8268 * @rdev: radeon_device pointer
8269 * @radeon_crtc: the selected display controller
8270 * @mode: the current display mode on the selected display
8271 * controller
8272 *
8273 * Setup up the line buffer allocation for
8274 * the selected display controller (CIK).
8275 * Returns the line buffer size in pixels.
8276 */
8277 static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
8278 struct radeon_crtc *radeon_crtc,
8279 struct drm_display_mode *mode)
8280 {
8281 u32 tmp, buffer_alloc, i;
8282 u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
8283 /*
8284 * Line Buffer Setup
8285 * There are 6 line buffers, one for each display controllers.
8286 * There are 3 partitions per LB. Select the number of partitions
8287 * to enable based on the display width. For display widths larger
8288 * than 4096, you need use to use 2 display controllers and combine
8289 * them using the stereo blender.
8290 */
8291 if (radeon_crtc->base.enabled && mode) {
8292 if (mode->crtc_hdisplay < 1920) {
8293 tmp = 1;
8294 buffer_alloc = 2;
8295 } else if (mode->crtc_hdisplay < 2560) {
8296 tmp = 2;
8297 buffer_alloc = 2;
8298 } else if (mode->crtc_hdisplay < 4096) {
8299 tmp = 0;
8300 buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
8301 } else {
8302 DRM_DEBUG_KMS("Mode too big for LB!\n");
8303 tmp = 0;
8304 buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
8305 }
8306 } else {
8307 tmp = 1;
8308 buffer_alloc = 0;
8309 }
8310
8311 WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset,
8312 LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0));
8313
8314 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
8315 DMIF_BUFFERS_ALLOCATED(buffer_alloc));
8316 for (i = 0; i < rdev->usec_timeout; i++) {
8317 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
8318 DMIF_BUFFERS_ALLOCATED_COMPLETED)
8319 break;
8320 udelay(1);
8321 }
8322
8323 if (radeon_crtc->base.enabled && mode) {
8324 switch (tmp) {
8325 case 0:
8326 default:
8327 return 4096 * 2;
8328 case 1:
8329 return 1920 * 2;
8330 case 2:
8331 return 2560 * 2;
8332 }
8333 }
8334
8335 /* controller not enabled, so no lb used */
8336 return 0;
8337 }
8338
8339 /**
8340 * cik_get_number_of_dram_channels - get the number of dram channels
8341 *
8342 * @rdev: radeon_device pointer
8343 *
8344 * Look up the number of video ram channels (CIK).
8345 * Used for display watermark bandwidth calculations
8346 * Returns the number of dram channels
8347 */
8348 static u32 cik_get_number_of_dram_channels(struct radeon_device *rdev)
8349 {
8350 u32 tmp = RREG32(MC_SHARED_CHMAP);
8351
8352 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
8353 case 0:
8354 default:
8355 return 1;
8356 case 1:
8357 return 2;
8358 case 2:
8359 return 4;
8360 case 3:
8361 return 8;
8362 case 4:
8363 return 3;
8364 case 5:
8365 return 6;
8366 case 6:
8367 return 10;
8368 case 7:
8369 return 12;
8370 case 8:
8371 return 16;
8372 }
8373 }
8374
8375 struct dce8_wm_params {
8376 u32 dram_channels; /* number of dram channels */
8377 u32 yclk; /* bandwidth per dram data pin in kHz */
8378 u32 sclk; /* engine clock in kHz */
8379 u32 disp_clk; /* display clock in kHz */
8380 u32 src_width; /* viewport width */
8381 u32 active_time; /* active display time in ns */
8382 u32 blank_time; /* blank time in ns */
8383 bool interlaced; /* mode is interlaced */
8384 fixed20_12 vsc; /* vertical scale ratio */
8385 u32 num_heads; /* number of active crtcs */
8386 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
8387 u32 lb_size; /* line buffer allocated to pipe */
8388 u32 vtaps; /* vertical scaler taps */
8389 };
8390
8391 /**
8392 * dce8_dram_bandwidth - get the dram bandwidth
8393 *
8394 * @wm: watermark calculation data
8395 *
8396 * Calculate the raw dram bandwidth (CIK).
8397 * Used for display watermark bandwidth calculations
8398 * Returns the dram bandwidth in MBytes/s
8399 */
8400 static u32 dce8_dram_bandwidth(struct dce8_wm_params *wm)
8401 {
8402 /* Calculate raw DRAM Bandwidth */
8403 fixed20_12 dram_efficiency; /* 0.7 */
8404 fixed20_12 yclk, dram_channels, bandwidth;
8405 fixed20_12 a;
8406
8407 a.full = dfixed_const(1000);
8408 yclk.full = dfixed_const(wm->yclk);
8409 yclk.full = dfixed_div(yclk, a);
8410 dram_channels.full = dfixed_const(wm->dram_channels * 4);
8411 a.full = dfixed_const(10);
8412 dram_efficiency.full = dfixed_const(7);
8413 dram_efficiency.full = dfixed_div(dram_efficiency, a);
8414 bandwidth.full = dfixed_mul(dram_channels, yclk);
8415 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
8416
8417 return dfixed_trunc(bandwidth);
8418 }
8419
8420 /**
8421 * dce8_dram_bandwidth_for_display - get the dram bandwidth for display
8422 *
8423 * @wm: watermark calculation data
8424 *
8425 * Calculate the dram bandwidth used for display (CIK).
8426 * Used for display watermark bandwidth calculations
8427 * Returns the dram bandwidth for display in MBytes/s
8428 */
8429 static u32 dce8_dram_bandwidth_for_display(struct dce8_wm_params *wm)
8430 {
8431 /* Calculate DRAM Bandwidth and the part allocated to display. */
8432 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
8433 fixed20_12 yclk, dram_channels, bandwidth;
8434 fixed20_12 a;
8435
8436 a.full = dfixed_const(1000);
8437 yclk.full = dfixed_const(wm->yclk);
8438 yclk.full = dfixed_div(yclk, a);
8439 dram_channels.full = dfixed_const(wm->dram_channels * 4);
8440 a.full = dfixed_const(10);
8441 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
8442 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
8443 bandwidth.full = dfixed_mul(dram_channels, yclk);
8444 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
8445
8446 return dfixed_trunc(bandwidth);
8447 }
8448
8449 /**
8450 * dce8_data_return_bandwidth - get the data return bandwidth
8451 *
8452 * @wm: watermark calculation data
8453 *
8454 * Calculate the data return bandwidth used for display (CIK).
8455 * Used for display watermark bandwidth calculations
8456 * Returns the data return bandwidth in MBytes/s
8457 */
8458 static u32 dce8_data_return_bandwidth(struct dce8_wm_params *wm)
8459 {
8460 /* Calculate the display Data return Bandwidth */
8461 fixed20_12 return_efficiency; /* 0.8 */
8462 fixed20_12 sclk, bandwidth;
8463 fixed20_12 a;
8464
8465 a.full = dfixed_const(1000);
8466 sclk.full = dfixed_const(wm->sclk);
8467 sclk.full = dfixed_div(sclk, a);
8468 a.full = dfixed_const(10);
8469 return_efficiency.full = dfixed_const(8);
8470 return_efficiency.full = dfixed_div(return_efficiency, a);
8471 a.full = dfixed_const(32);
8472 bandwidth.full = dfixed_mul(a, sclk);
8473 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
8474
8475 return dfixed_trunc(bandwidth);
8476 }
8477
8478 /**
8479 * dce8_dmif_request_bandwidth - get the dmif bandwidth
8480 *
8481 * @wm: watermark calculation data
8482 *
8483 * Calculate the dmif bandwidth used for display (CIK).
8484 * Used for display watermark bandwidth calculations
8485 * Returns the dmif bandwidth in MBytes/s
8486 */
8487 static u32 dce8_dmif_request_bandwidth(struct dce8_wm_params *wm)
8488 {
8489 /* Calculate the DMIF Request Bandwidth */
8490 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
8491 fixed20_12 disp_clk, bandwidth;
8492 fixed20_12 a, b;
8493
8494 a.full = dfixed_const(1000);
8495 disp_clk.full = dfixed_const(wm->disp_clk);
8496 disp_clk.full = dfixed_div(disp_clk, a);
8497 a.full = dfixed_const(32);
8498 b.full = dfixed_mul(a, disp_clk);
8499
8500 a.full = dfixed_const(10);
8501 disp_clk_request_efficiency.full = dfixed_const(8);
8502 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
8503
8504 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
8505
8506 return dfixed_trunc(bandwidth);
8507 }
8508
8509 /**
8510 * dce8_available_bandwidth - get the min available bandwidth
8511 *
8512 * @wm: watermark calculation data
8513 *
8514 * Calculate the min available bandwidth used for display (CIK).
8515 * Used for display watermark bandwidth calculations
8516 * Returns the min available bandwidth in MBytes/s
8517 */
8518 static u32 dce8_available_bandwidth(struct dce8_wm_params *wm)
8519 {
8520 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
8521 u32 dram_bandwidth = dce8_dram_bandwidth(wm);
8522 u32 data_return_bandwidth = dce8_data_return_bandwidth(wm);
8523 u32 dmif_req_bandwidth = dce8_dmif_request_bandwidth(wm);
8524
8525 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
8526 }
8527
8528 /**
8529 * dce8_average_bandwidth - get the average available bandwidth
8530 *
8531 * @wm: watermark calculation data
8532 *
8533 * Calculate the average available bandwidth used for display (CIK).
8534 * Used for display watermark bandwidth calculations
8535 * Returns the average available bandwidth in MBytes/s
8536 */
8537 static u32 dce8_average_bandwidth(struct dce8_wm_params *wm)
8538 {
8539 /* Calculate the display mode Average Bandwidth
8540 * DisplayMode should contain the source and destination dimensions,
8541 * timing, etc.
8542 */
8543 fixed20_12 bpp;
8544 fixed20_12 line_time;
8545 fixed20_12 src_width;
8546 fixed20_12 bandwidth;
8547 fixed20_12 a;
8548
8549 a.full = dfixed_const(1000);
8550 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
8551 line_time.full = dfixed_div(line_time, a);
8552 bpp.full = dfixed_const(wm->bytes_per_pixel);
8553 src_width.full = dfixed_const(wm->src_width);
8554 bandwidth.full = dfixed_mul(src_width, bpp);
8555 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
8556 bandwidth.full = dfixed_div(bandwidth, line_time);
8557
8558 return dfixed_trunc(bandwidth);
8559 }
8560
8561 /**
8562 * dce8_latency_watermark - get the latency watermark
8563 *
8564 * @wm: watermark calculation data
8565 *
8566 * Calculate the latency watermark (CIK).
8567 * Used for display watermark bandwidth calculations
8568 * Returns the latency watermark in ns
8569 */
8570 static u32 dce8_latency_watermark(struct dce8_wm_params *wm)
8571 {
8572 /* First calculate the latency in ns */
8573 u32 mc_latency = 2000; /* 2000 ns. */
8574 u32 available_bandwidth = dce8_available_bandwidth(wm);
8575 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
8576 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
8577 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
8578 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
8579 (wm->num_heads * cursor_line_pair_return_time);
8580 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
8581 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
8582 u32 tmp, dmif_size = 12288;
8583 fixed20_12 a, b, c;
8584
8585 if (wm->num_heads == 0)
8586 return 0;
8587
8588 a.full = dfixed_const(2);
8589 b.full = dfixed_const(1);
8590 if ((wm->vsc.full > a.full) ||
8591 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
8592 (wm->vtaps >= 5) ||
8593 ((wm->vsc.full >= a.full) && wm->interlaced))
8594 max_src_lines_per_dst_line = 4;
8595 else
8596 max_src_lines_per_dst_line = 2;
8597
8598 a.full = dfixed_const(available_bandwidth);
8599 b.full = dfixed_const(wm->num_heads);
8600 a.full = dfixed_div(a, b);
8601
8602 b.full = dfixed_const(mc_latency + 512);
8603 c.full = dfixed_const(wm->disp_clk);
8604 b.full = dfixed_div(b, c);
8605
8606 c.full = dfixed_const(dmif_size);
8607 b.full = dfixed_div(c, b);
8608
8609 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
8610
8611 b.full = dfixed_const(1000);
8612 c.full = dfixed_const(wm->disp_clk);
8613 b.full = dfixed_div(c, b);
8614 c.full = dfixed_const(wm->bytes_per_pixel);
8615 b.full = dfixed_mul(b, c);
8616
8617 lb_fill_bw = min(tmp, dfixed_trunc(b));
8618
8619 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
8620 b.full = dfixed_const(1000);
8621 c.full = dfixed_const(lb_fill_bw);
8622 b.full = dfixed_div(c, b);
8623 a.full = dfixed_div(a, b);
8624 line_fill_time = dfixed_trunc(a);
8625
8626 if (line_fill_time < wm->active_time)
8627 return latency;
8628 else
8629 return latency + (line_fill_time - wm->active_time);
8630
8631 }
8632
8633 /**
8634 * dce8_average_bandwidth_vs_dram_bandwidth_for_display - check
8635 * average and available dram bandwidth
8636 *
8637 * @wm: watermark calculation data
8638 *
8639 * Check if the display average bandwidth fits in the display
8640 * dram bandwidth (CIK).
8641 * Used for display watermark bandwidth calculations
8642 * Returns true if the display fits, false if not.
8643 */
8644 static bool dce8_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
8645 {
8646 if (dce8_average_bandwidth(wm) <=
8647 (dce8_dram_bandwidth_for_display(wm) / wm->num_heads))
8648 return true;
8649 else
8650 return false;
8651 }
8652
8653 /**
8654 * dce8_average_bandwidth_vs_available_bandwidth - check
8655 * average and available bandwidth
8656 *
8657 * @wm: watermark calculation data
8658 *
8659 * Check if the display average bandwidth fits in the display
8660 * available bandwidth (CIK).
8661 * Used for display watermark bandwidth calculations
8662 * Returns true if the display fits, false if not.
8663 */
8664 static bool dce8_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
8665 {
8666 if (dce8_average_bandwidth(wm) <=
8667 (dce8_available_bandwidth(wm) / wm->num_heads))
8668 return true;
8669 else
8670 return false;
8671 }
8672
8673 /**
8674 * dce8_check_latency_hiding - check latency hiding
8675 *
8676 * @wm: watermark calculation data
8677 *
8678 * Check latency hiding (CIK).
8679 * Used for display watermark bandwidth calculations
8680 * Returns true if the display fits, false if not.
8681 */
8682 static bool dce8_check_latency_hiding(struct dce8_wm_params *wm)
8683 {
8684 u32 lb_partitions = wm->lb_size / wm->src_width;
8685 u32 line_time = wm->active_time + wm->blank_time;
8686 u32 latency_tolerant_lines;
8687 u32 latency_hiding;
8688 fixed20_12 a;
8689
8690 a.full = dfixed_const(1);
8691 if (wm->vsc.full > a.full)
8692 latency_tolerant_lines = 1;
8693 else {
8694 if (lb_partitions <= (wm->vtaps + 1))
8695 latency_tolerant_lines = 1;
8696 else
8697 latency_tolerant_lines = 2;
8698 }
8699
8700 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
8701
8702 if (dce8_latency_watermark(wm) <= latency_hiding)
8703 return true;
8704 else
8705 return false;
8706 }
8707
8708 /**
8709 * dce8_program_watermarks - program display watermarks
8710 *
8711 * @rdev: radeon_device pointer
8712 * @radeon_crtc: the selected display controller
8713 * @lb_size: line buffer size
8714 * @num_heads: number of display controllers in use
8715 *
8716 * Calculate and program the display watermarks for the
8717 * selected display controller (CIK).
8718 */
8719 static void dce8_program_watermarks(struct radeon_device *rdev,
8720 struct radeon_crtc *radeon_crtc,
8721 u32 lb_size, u32 num_heads)
8722 {
8723 struct drm_display_mode *mode = &radeon_crtc->base.mode;
8724 struct dce8_wm_params wm_low, wm_high;
8725 u32 pixel_period;
8726 u32 line_time = 0;
8727 u32 latency_watermark_a = 0, latency_watermark_b = 0;
8728 u32 tmp, wm_mask;
8729
8730 if (radeon_crtc->base.enabled && num_heads && mode) {
8731 pixel_period = 1000000 / (u32)mode->clock;
8732 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
8733
8734 /* watermark for high clocks */
8735 if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
8736 rdev->pm.dpm_enabled) {
8737 wm_high.yclk =
8738 radeon_dpm_get_mclk(rdev, false) * 10;
8739 wm_high.sclk =
8740 radeon_dpm_get_sclk(rdev, false) * 10;
8741 } else {
8742 wm_high.yclk = rdev->pm.current_mclk * 10;
8743 wm_high.sclk = rdev->pm.current_sclk * 10;
8744 }
8745
8746 wm_high.disp_clk = mode->clock;
8747 wm_high.src_width = mode->crtc_hdisplay;
8748 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
8749 wm_high.blank_time = line_time - wm_high.active_time;
8750 wm_high.interlaced = false;
8751 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
8752 wm_high.interlaced = true;
8753 wm_high.vsc = radeon_crtc->vsc;
8754 wm_high.vtaps = 1;
8755 if (radeon_crtc->rmx_type != RMX_OFF)
8756 wm_high.vtaps = 2;
8757 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
8758 wm_high.lb_size = lb_size;
8759 wm_high.dram_channels = cik_get_number_of_dram_channels(rdev);
8760 wm_high.num_heads = num_heads;
8761
8762 /* set for high clocks */
8763 latency_watermark_a = min(dce8_latency_watermark(&wm_high), (u32)65535);
8764
8765 /* possibly force display priority to high */
8766 /* should really do this at mode validation time... */
8767 if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
8768 !dce8_average_bandwidth_vs_available_bandwidth(&wm_high) ||
8769 !dce8_check_latency_hiding(&wm_high) ||
8770 (rdev->disp_priority == 2)) {
8771 DRM_DEBUG_KMS("force priority to high\n");
8772 }
8773
8774 /* watermark for low clocks */
8775 if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
8776 rdev->pm.dpm_enabled) {
8777 wm_low.yclk =
8778 radeon_dpm_get_mclk(rdev, true) * 10;
8779 wm_low.sclk =
8780 radeon_dpm_get_sclk(rdev, true) * 10;
8781 } else {
8782 wm_low.yclk = rdev->pm.current_mclk * 10;
8783 wm_low.sclk = rdev->pm.current_sclk * 10;
8784 }
8785
8786 wm_low.disp_clk = mode->clock;
8787 wm_low.src_width = mode->crtc_hdisplay;
8788 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
8789 wm_low.blank_time = line_time - wm_low.active_time;
8790 wm_low.interlaced = false;
8791 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
8792 wm_low.interlaced = true;
8793 wm_low.vsc = radeon_crtc->vsc;
8794 wm_low.vtaps = 1;
8795 if (radeon_crtc->rmx_type != RMX_OFF)
8796 wm_low.vtaps = 2;
8797 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
8798 wm_low.lb_size = lb_size;
8799 wm_low.dram_channels = cik_get_number_of_dram_channels(rdev);
8800 wm_low.num_heads = num_heads;
8801
8802 /* set for low clocks */
8803 latency_watermark_b = min(dce8_latency_watermark(&wm_low), (u32)65535);
8804
8805 /* possibly force display priority to high */
8806 /* should really do this at mode validation time... */
8807 if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
8808 !dce8_average_bandwidth_vs_available_bandwidth(&wm_low) ||
8809 !dce8_check_latency_hiding(&wm_low) ||
8810 (rdev->disp_priority == 2)) {
8811 DRM_DEBUG_KMS("force priority to high\n");
8812 }
8813 }
8814
8815 /* select wm A */
8816 wm_mask = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
8817 tmp = wm_mask;
8818 tmp &= ~LATENCY_WATERMARK_MASK(3);
8819 tmp |= LATENCY_WATERMARK_MASK(1);
8820 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
8821 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
8822 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
8823 LATENCY_HIGH_WATERMARK(line_time)));
8824 /* select wm B */
8825 tmp = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
8826 tmp &= ~LATENCY_WATERMARK_MASK(3);
8827 tmp |= LATENCY_WATERMARK_MASK(2);
8828 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
8829 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
8830 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
8831 LATENCY_HIGH_WATERMARK(line_time)));
8832 /* restore original selection */
8833 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask);
8834
8835 /* save values for DPM */
8836 radeon_crtc->line_time = line_time;
8837 radeon_crtc->wm_high = latency_watermark_a;
8838 radeon_crtc->wm_low = latency_watermark_b;
8839 }
8840
8841 /**
8842 * dce8_bandwidth_update - program display watermarks
8843 *
8844 * @rdev: radeon_device pointer
8845 *
8846 * Calculate and program the display watermarks and line
8847 * buffer allocation (CIK).
8848 */
8849 void dce8_bandwidth_update(struct radeon_device *rdev)
8850 {
8851 struct drm_display_mode *mode = NULL;
8852 u32 num_heads = 0, lb_size;
8853 int i;
8854
8855 radeon_update_display_priority(rdev);
8856
8857 for (i = 0; i < rdev->num_crtc; i++) {
8858 if (rdev->mode_info.crtcs[i]->base.enabled)
8859 num_heads++;
8860 }
8861 for (i = 0; i < rdev->num_crtc; i++) {
8862 mode = &rdev->mode_info.crtcs[i]->base.mode;
8863 lb_size = dce8_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode);
8864 dce8_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
8865 }
8866 }
8867
8868 /**
8869 * cik_get_gpu_clock_counter - return GPU clock counter snapshot
8870 *
8871 * @rdev: radeon_device pointer
8872 *
8873 * Fetches a GPU clock counter snapshot (SI).
8874 * Returns the 64 bit clock counter snapshot.
8875 */
8876 uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev)
8877 {
8878 uint64_t clock;
8879
8880 mutex_lock(&rdev->gpu_clock_mutex);
8881 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
8882 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
8883 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
8884 mutex_unlock(&rdev->gpu_clock_mutex);
8885 return clock;
8886 }
8887
8888 static int cik_set_uvd_clock(struct radeon_device *rdev, u32 clock,
8889 u32 cntl_reg, u32 status_reg)
8890 {
8891 int r, i;
8892 struct atom_clock_dividers dividers;
8893 uint32_t tmp;
8894
8895 r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
8896 clock, false, &dividers);
8897 if (r)
8898 return r;
8899
8900 tmp = RREG32_SMC(cntl_reg);
8901 tmp &= ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK);
8902 tmp |= dividers.post_divider;
8903 WREG32_SMC(cntl_reg, tmp);
8904
8905 for (i = 0; i < 100; i++) {
8906 if (RREG32_SMC(status_reg) & DCLK_STATUS)
8907 break;
8908 mdelay(10);
8909 }
8910 if (i == 100)
8911 return -ETIMEDOUT;
8912
8913 return 0;
8914 }
8915
8916 int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
8917 {
8918 int r = 0;
8919
8920 r = cik_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
8921 if (r)
8922 return r;
8923
8924 r = cik_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
8925 return r;
8926 }
8927
8928 int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
8929 {
8930 int r, i;
8931 struct atom_clock_dividers dividers;
8932 u32 tmp;
8933
8934 r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
8935 ecclk, false, &dividers);
8936 if (r)
8937 return r;
8938
8939 for (i = 0; i < 100; i++) {
8940 if (RREG32_SMC(CG_ECLK_STATUS) & ECLK_STATUS)
8941 break;
8942 mdelay(10);
8943 }
8944 if (i == 100)
8945 return -ETIMEDOUT;
8946
8947 tmp = RREG32_SMC(CG_ECLK_CNTL);
8948 tmp &= ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK);
8949 tmp |= dividers.post_divider;
8950 WREG32_SMC(CG_ECLK_CNTL, tmp);
8951
8952 for (i = 0; i < 100; i++) {
8953 if (RREG32_SMC(CG_ECLK_STATUS) & ECLK_STATUS)
8954 break;
8955 mdelay(10);
8956 }
8957 if (i == 100)
8958 return -ETIMEDOUT;
8959
8960 return 0;
8961 }
8962
8963 static void cik_pcie_gen3_enable(struct radeon_device *rdev)
8964 {
8965 struct pci_dev *root = rdev->pdev->bus->self;
8966 int bridge_pos, gpu_pos;
8967 u32 speed_cntl, mask, current_data_rate;
8968 int ret, i;
8969 u16 tmp16;
8970
8971 if (radeon_pcie_gen2 == 0)
8972 return;
8973
8974 if (rdev->flags & RADEON_IS_IGP)
8975 return;
8976
8977 if (!(rdev->flags & RADEON_IS_PCIE))
8978 return;
8979
8980 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
8981 if (ret != 0)
8982 return;
8983
8984 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
8985 return;
8986
8987 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
8988 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
8989 LC_CURRENT_DATA_RATE_SHIFT;
8990 if (mask & DRM_PCIE_SPEED_80) {
8991 if (current_data_rate == 2) {
8992 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
8993 return;
8994 }
8995 DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
8996 } else if (mask & DRM_PCIE_SPEED_50) {
8997 if (current_data_rate == 1) {
8998 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
8999 return;
9000 }
9001 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
9002 }
9003
9004 bridge_pos = pci_pcie_cap(root);
9005 if (!bridge_pos)
9006 return;
9007
9008 gpu_pos = pci_pcie_cap(rdev->pdev);
9009 if (!gpu_pos)
9010 return;
9011
9012 if (mask & DRM_PCIE_SPEED_80) {
9013 /* re-try equalization if gen3 is not already enabled */
9014 if (current_data_rate != 2) {
9015 u16 bridge_cfg, gpu_cfg;
9016 u16 bridge_cfg2, gpu_cfg2;
9017 u32 max_lw, current_lw, tmp;
9018
9019 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
9020 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
9021
9022 tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
9023 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
9024
9025 tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
9026 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
9027
9028 tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
9029 max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
9030 current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
9031
9032 if (current_lw < max_lw) {
9033 tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
9034 if (tmp & LC_RENEGOTIATION_SUPPORT) {
9035 tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
9036 tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
9037 tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
9038 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
9039 }
9040 }
9041
9042 for (i = 0; i < 10; i++) {
9043 /* check status */
9044 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
9045 if (tmp16 & PCI_EXP_DEVSTA_TRPND)
9046 break;
9047
9048 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
9049 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
9050
9051 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
9052 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
9053
9054 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
9055 tmp |= LC_SET_QUIESCE;
9056 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
9057
9058 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
9059 tmp |= LC_REDO_EQ;
9060 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
9061
9062 mdelay(100);
9063
9064 /* linkctl */
9065 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
9066 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
9067 tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
9068 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
9069
9070 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
9071 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
9072 tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
9073 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
9074
9075 /* linkctl2 */
9076 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
9077 tmp16 &= ~((1 << 4) | (7 << 9));
9078 tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
9079 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
9080
9081 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
9082 tmp16 &= ~((1 << 4) | (7 << 9));
9083 tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
9084 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
9085
9086 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
9087 tmp &= ~LC_SET_QUIESCE;
9088 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
9089 }
9090 }
9091 }
9092
9093 /* set the link speed */
9094 speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
9095 speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
9096 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
9097
9098 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
9099 tmp16 &= ~0xf;
9100 if (mask & DRM_PCIE_SPEED_80)
9101 tmp16 |= 3; /* gen3 */
9102 else if (mask & DRM_PCIE_SPEED_50)
9103 tmp16 |= 2; /* gen2 */
9104 else
9105 tmp16 |= 1; /* gen1 */
9106 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
9107
9108 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
9109 speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
9110 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
9111
9112 for (i = 0; i < rdev->usec_timeout; i++) {
9113 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
9114 if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
9115 break;
9116 udelay(1);
9117 }
9118 }
9119
9120 static void cik_program_aspm(struct radeon_device *rdev)
9121 {
9122 u32 data, orig;
9123 bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
9124 bool disable_clkreq = false;
9125
9126 if (radeon_aspm == 0)
9127 return;
9128
9129 /* XXX double check IGPs */
9130 if (rdev->flags & RADEON_IS_IGP)
9131 return;
9132
9133 if (!(rdev->flags & RADEON_IS_PCIE))
9134 return;
9135
9136 orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
9137 data &= ~LC_XMIT_N_FTS_MASK;
9138 data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
9139 if (orig != data)
9140 WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
9141
9142 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
9143 data |= LC_GO_TO_RECOVERY;
9144 if (orig != data)
9145 WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
9146
9147 orig = data = RREG32_PCIE_PORT(PCIE_P_CNTL);
9148 data |= P_IGNORE_EDB_ERR;
9149 if (orig != data)
9150 WREG32_PCIE_PORT(PCIE_P_CNTL, data);
9151
9152 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
9153 data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
9154 data |= LC_PMI_TO_L1_DIS;
9155 if (!disable_l0s)
9156 data |= LC_L0S_INACTIVITY(7);
9157
9158 if (!disable_l1) {
9159 data |= LC_L1_INACTIVITY(7);
9160 data &= ~LC_PMI_TO_L1_DIS;
9161 if (orig != data)
9162 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
9163
9164 if (!disable_plloff_in_l1) {
9165 bool clk_req_support;
9166
9167 orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0);
9168 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
9169 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
9170 if (orig != data)
9171 WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0, data);
9172
9173 orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1);
9174 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
9175 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
9176 if (orig != data)
9177 WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1, data);
9178
9179 orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0);
9180 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
9181 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
9182 if (orig != data)
9183 WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0, data);
9184
9185 orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1);
9186 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
9187 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
9188 if (orig != data)
9189 WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1, data);
9190
9191 orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
9192 data &= ~LC_DYN_LANES_PWR_STATE_MASK;
9193 data |= LC_DYN_LANES_PWR_STATE(3);
9194 if (orig != data)
9195 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
9196
9197 if (!disable_clkreq) {
9198 struct pci_dev *root = rdev->pdev->bus->self;
9199 u32 lnkcap;
9200
9201 clk_req_support = false;
9202 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
9203 if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
9204 clk_req_support = true;
9205 } else {
9206 clk_req_support = false;
9207 }
9208
9209 if (clk_req_support) {
9210 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
9211 data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
9212 if (orig != data)
9213 WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
9214
9215 orig = data = RREG32_SMC(THM_CLK_CNTL);
9216 data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
9217 data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
9218 if (orig != data)
9219 WREG32_SMC(THM_CLK_CNTL, data);
9220
9221 orig = data = RREG32_SMC(MISC_CLK_CTRL);
9222 data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
9223 data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
9224 if (orig != data)
9225 WREG32_SMC(MISC_CLK_CTRL, data);
9226
9227 orig = data = RREG32_SMC(CG_CLKPIN_CNTL);
9228 data &= ~BCLK_AS_XCLK;
9229 if (orig != data)
9230 WREG32_SMC(CG_CLKPIN_CNTL, data);
9231
9232 orig = data = RREG32_SMC(CG_CLKPIN_CNTL_2);
9233 data &= ~FORCE_BIF_REFCLK_EN;
9234 if (orig != data)
9235 WREG32_SMC(CG_CLKPIN_CNTL_2, data);
9236
9237 orig = data = RREG32_SMC(MPLL_BYPASSCLK_SEL);
9238 data &= ~MPLL_CLKOUT_SEL_MASK;
9239 data |= MPLL_CLKOUT_SEL(4);
9240 if (orig != data)
9241 WREG32_SMC(MPLL_BYPASSCLK_SEL, data);
9242 }
9243 }
9244 } else {
9245 if (orig != data)
9246 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
9247 }
9248
9249 orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
9250 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
9251 if (orig != data)
9252 WREG32_PCIE_PORT(PCIE_CNTL2, data);
9253
9254 if (!disable_l0s) {
9255 data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
9256 if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
9257 data = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
9258 if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
9259 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
9260 data &= ~LC_L0S_INACTIVITY_MASK;
9261 if (orig != data)
9262 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
9263 }
9264 }
9265 }
9266 }
This page took 0.317928 seconds and 6 git commands to generate.