Merge tag 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux...
[deliverable/linux.git] / drivers / gpu / drm / radeon / ni.c
CommitLineData
0af62b01
AD
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
0af62b01 25#include <linux/slab.h>
e0cd3608 26#include <linux/module.h>
760285e7 27#include <drm/drmP.h>
0af62b01
AD
28#include "radeon.h"
29#include "radeon_asic.h"
bfc1f97d 30#include "radeon_audio.h"
760285e7 31#include <drm/radeon_drm.h>
0af62b01
AD
32#include "nid.h"
33#include "atom.h"
34#include "ni_reg.h"
0c88a02e 35#include "cayman_blit_shaders.h"
138e4e16 36#include "radeon_ucode.h"
2948f5e6
AD
37#include "clearstate_cayman.h"
38
1fd11777 39static const u32 tn_rlc_save_restore_register_list[] =
2948f5e6
AD
40{
41 0x98fc,
42 0x98f0,
43 0x9834,
44 0x9838,
45 0x9870,
46 0x9874,
47 0x8a14,
48 0x8b24,
49 0x8bcc,
50 0x8b10,
51 0x8c30,
52 0x8d00,
53 0x8d04,
54 0x8c00,
55 0x8c04,
56 0x8c10,
57 0x8c14,
58 0x8d8c,
59 0x8cf0,
60 0x8e38,
61 0x9508,
62 0x9688,
63 0x9608,
64 0x960c,
65 0x9610,
66 0x9614,
67 0x88c4,
68 0x8978,
69 0x88d4,
70 0x900c,
71 0x9100,
72 0x913c,
73 0x90e8,
74 0x9354,
75 0xa008,
76 0x98f8,
77 0x9148,
78 0x914c,
79 0x3f94,
80 0x98f4,
81 0x9b7c,
82 0x3f8c,
83 0x8950,
84 0x8954,
85 0x8a18,
86 0x8b28,
87 0x9144,
88 0x3f90,
89 0x915c,
90 0x9160,
91 0x9178,
92 0x917c,
93 0x9180,
94 0x918c,
95 0x9190,
96 0x9194,
97 0x9198,
98 0x919c,
99 0x91a8,
100 0x91ac,
101 0x91b0,
102 0x91b4,
103 0x91b8,
104 0x91c4,
105 0x91c8,
106 0x91cc,
107 0x91d0,
108 0x91d4,
109 0x91e0,
110 0x91e4,
111 0x91ec,
112 0x91f0,
113 0x91f4,
114 0x9200,
115 0x9204,
116 0x929c,
117 0x8030,
118 0x9150,
119 0x9a60,
120 0x920c,
121 0x9210,
122 0x9228,
123 0x922c,
124 0x9244,
125 0x9248,
126 0x91e8,
127 0x9294,
128 0x9208,
129 0x9224,
130 0x9240,
131 0x9220,
132 0x923c,
133 0x9258,
134 0x9744,
135 0xa200,
136 0xa204,
137 0xa208,
138 0xa20c,
139 0x8d58,
140 0x9030,
141 0x9034,
142 0x9038,
143 0x903c,
144 0x9040,
145 0x9654,
146 0x897c,
147 0xa210,
148 0xa214,
149 0x9868,
150 0xa02c,
151 0x9664,
152 0x9698,
153 0x949c,
154 0x8e10,
155 0x8e18,
156 0x8c50,
157 0x8c58,
158 0x8c60,
159 0x8c68,
160 0x89b4,
161 0x9830,
162 0x802c,
163};
0af62b01 164
168757ea 165extern bool evergreen_is_display_hung(struct radeon_device *rdev);
187e3593 166extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
b9952a8a
AD
167extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
168extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
169extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
755d819e
AD
170extern void evergreen_mc_program(struct radeon_device *rdev);
171extern void evergreen_irq_suspend(struct radeon_device *rdev);
172extern int evergreen_mc_init(struct radeon_device *rdev);
d054ac16 173extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
b07759bf 174extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
f52382d7 175extern void evergreen_program_aspm(struct radeon_device *rdev);
2948f5e6
AD
176extern void sumo_rlc_fini(struct radeon_device *rdev);
177extern int sumo_rlc_init(struct radeon_device *rdev);
b5470b03 178extern void evergreen_gpu_pci_config_reset(struct radeon_device *rdev);
b9952a8a 179
0af62b01
AD
180/* Firmware Names */
181MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
182MODULE_FIRMWARE("radeon/BARTS_me.bin");
183MODULE_FIRMWARE("radeon/BARTS_mc.bin");
6596afd4 184MODULE_FIRMWARE("radeon/BARTS_smc.bin");
0af62b01
AD
185MODULE_FIRMWARE("radeon/BTC_rlc.bin");
186MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
187MODULE_FIRMWARE("radeon/TURKS_me.bin");
188MODULE_FIRMWARE("radeon/TURKS_mc.bin");
6596afd4 189MODULE_FIRMWARE("radeon/TURKS_smc.bin");
0af62b01
AD
190MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
191MODULE_FIRMWARE("radeon/CAICOS_me.bin");
192MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
6596afd4 193MODULE_FIRMWARE("radeon/CAICOS_smc.bin");
9b8253ce
AD
194MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
195MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
196MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
197MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
69e0b57a 198MODULE_FIRMWARE("radeon/CAYMAN_smc.bin");
c420c745
AD
199MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
200MODULE_FIRMWARE("radeon/ARUBA_me.bin");
201MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
0af62b01 202
a2c96a21
AD
203
204static const u32 cayman_golden_registers2[] =
205{
206 0x3e5c, 0xffffffff, 0x00000000,
207 0x3e48, 0xffffffff, 0x00000000,
208 0x3e4c, 0xffffffff, 0x00000000,
209 0x3e64, 0xffffffff, 0x00000000,
210 0x3e50, 0xffffffff, 0x00000000,
211 0x3e60, 0xffffffff, 0x00000000
212};
213
214static const u32 cayman_golden_registers[] =
215{
216 0x5eb4, 0xffffffff, 0x00000002,
217 0x5e78, 0x8f311ff1, 0x001000f0,
218 0x3f90, 0xffff0000, 0xff000000,
219 0x9148, 0xffff0000, 0xff000000,
220 0x3f94, 0xffff0000, 0xff000000,
221 0x914c, 0xffff0000, 0xff000000,
222 0xc78, 0x00000080, 0x00000080,
223 0xbd4, 0x70073777, 0x00011003,
224 0xd02c, 0xbfffff1f, 0x08421000,
225 0xd0b8, 0x73773777, 0x02011003,
226 0x5bc0, 0x00200000, 0x50100000,
227 0x98f8, 0x33773777, 0x02011003,
228 0x98fc, 0xffffffff, 0x76541032,
229 0x7030, 0x31000311, 0x00000011,
230 0x2f48, 0x33773777, 0x42010001,
231 0x6b28, 0x00000010, 0x00000012,
232 0x7728, 0x00000010, 0x00000012,
233 0x10328, 0x00000010, 0x00000012,
234 0x10f28, 0x00000010, 0x00000012,
235 0x11b28, 0x00000010, 0x00000012,
236 0x12728, 0x00000010, 0x00000012,
237 0x240c, 0x000007ff, 0x00000000,
238 0x8a14, 0xf000001f, 0x00000007,
239 0x8b24, 0x3fff3fff, 0x00ff0fff,
240 0x8b10, 0x0000ff0f, 0x00000000,
241 0x28a4c, 0x07ffffff, 0x06000000,
242 0x10c, 0x00000001, 0x00010003,
243 0xa02c, 0xffffffff, 0x0000009b,
244 0x913c, 0x0000010f, 0x01000100,
245 0x8c04, 0xf8ff00ff, 0x40600060,
246 0x28350, 0x00000f01, 0x00000000,
247 0x9508, 0x3700001f, 0x00000002,
248 0x960c, 0xffffffff, 0x54763210,
249 0x88c4, 0x001f3ae3, 0x00000082,
250 0x88d0, 0xffffffff, 0x0f40df40,
251 0x88d4, 0x0000001f, 0x00000010,
252 0x8974, 0xffffffff, 0x00000000
253};
254
255static const u32 dvst_golden_registers2[] =
256{
257 0x8f8, 0xffffffff, 0,
258 0x8fc, 0x00380000, 0,
259 0x8f8, 0xffffffff, 1,
260 0x8fc, 0x0e000000, 0
261};
262
263static const u32 dvst_golden_registers[] =
264{
265 0x690, 0x3fff3fff, 0x20c00033,
266 0x918c, 0x0fff0fff, 0x00010006,
267 0x91a8, 0x0fff0fff, 0x00010006,
268 0x9150, 0xffffdfff, 0x6e944040,
269 0x917c, 0x0fff0fff, 0x00030002,
270 0x9198, 0x0fff0fff, 0x00030002,
271 0x915c, 0x0fff0fff, 0x00010000,
272 0x3f90, 0xffff0001, 0xff000000,
273 0x9178, 0x0fff0fff, 0x00070000,
274 0x9194, 0x0fff0fff, 0x00070000,
275 0x9148, 0xffff0001, 0xff000000,
276 0x9190, 0x0fff0fff, 0x00090008,
277 0x91ac, 0x0fff0fff, 0x00090008,
278 0x3f94, 0xffff0000, 0xff000000,
279 0x914c, 0xffff0000, 0xff000000,
280 0x929c, 0x00000fff, 0x00000001,
281 0x55e4, 0xff607fff, 0xfc000100,
282 0x8a18, 0xff000fff, 0x00000100,
283 0x8b28, 0xff000fff, 0x00000100,
284 0x9144, 0xfffc0fff, 0x00000100,
285 0x6ed8, 0x00010101, 0x00010000,
286 0x9830, 0xffffffff, 0x00000000,
287 0x9834, 0xf00fffff, 0x00000400,
288 0x9838, 0xfffffffe, 0x00000000,
289 0xd0c0, 0xff000fff, 0x00000100,
290 0xd02c, 0xbfffff1f, 0x08421000,
291 0xd0b8, 0x73773777, 0x12010001,
292 0x5bb0, 0x000000f0, 0x00000070,
293 0x98f8, 0x73773777, 0x12010001,
294 0x98fc, 0xffffffff, 0x00000010,
295 0x9b7c, 0x00ff0000, 0x00fc0000,
296 0x8030, 0x00001f0f, 0x0000100a,
297 0x2f48, 0x73773777, 0x12010001,
298 0x2408, 0x00030000, 0x000c007f,
299 0x8a14, 0xf000003f, 0x00000007,
300 0x8b24, 0x3fff3fff, 0x00ff0fff,
301 0x8b10, 0x0000ff0f, 0x00000000,
302 0x28a4c, 0x07ffffff, 0x06000000,
303 0x4d8, 0x00000fff, 0x00000100,
304 0xa008, 0xffffffff, 0x00010000,
305 0x913c, 0xffff03ff, 0x01000100,
306 0x8c00, 0x000000ff, 0x00000003,
307 0x8c04, 0xf8ff00ff, 0x40600060,
308 0x8cf0, 0x1fff1fff, 0x08e00410,
309 0x28350, 0x00000f01, 0x00000000,
310 0x9508, 0xf700071f, 0x00000002,
311 0x960c, 0xffffffff, 0x54763210,
312 0x20ef8, 0x01ff01ff, 0x00000002,
313 0x20e98, 0xfffffbff, 0x00200000,
314 0x2015c, 0xffffffff, 0x00000f40,
315 0x88c4, 0x001f3ae3, 0x00000082,
316 0x8978, 0x3fffffff, 0x04050140,
317 0x88d4, 0x0000001f, 0x00000010,
318 0x8974, 0xffffffff, 0x00000000
319};
320
321static const u32 scrapper_golden_registers[] =
322{
323 0x690, 0x3fff3fff, 0x20c00033,
324 0x918c, 0x0fff0fff, 0x00010006,
325 0x918c, 0x0fff0fff, 0x00010006,
326 0x91a8, 0x0fff0fff, 0x00010006,
327 0x91a8, 0x0fff0fff, 0x00010006,
328 0x9150, 0xffffdfff, 0x6e944040,
329 0x9150, 0xffffdfff, 0x6e944040,
330 0x917c, 0x0fff0fff, 0x00030002,
331 0x917c, 0x0fff0fff, 0x00030002,
332 0x9198, 0x0fff0fff, 0x00030002,
333 0x9198, 0x0fff0fff, 0x00030002,
334 0x915c, 0x0fff0fff, 0x00010000,
335 0x915c, 0x0fff0fff, 0x00010000,
336 0x3f90, 0xffff0001, 0xff000000,
337 0x3f90, 0xffff0001, 0xff000000,
338 0x9178, 0x0fff0fff, 0x00070000,
339 0x9178, 0x0fff0fff, 0x00070000,
340 0x9194, 0x0fff0fff, 0x00070000,
341 0x9194, 0x0fff0fff, 0x00070000,
342 0x9148, 0xffff0001, 0xff000000,
343 0x9148, 0xffff0001, 0xff000000,
344 0x9190, 0x0fff0fff, 0x00090008,
345 0x9190, 0x0fff0fff, 0x00090008,
346 0x91ac, 0x0fff0fff, 0x00090008,
347 0x91ac, 0x0fff0fff, 0x00090008,
348 0x3f94, 0xffff0000, 0xff000000,
349 0x3f94, 0xffff0000, 0xff000000,
350 0x914c, 0xffff0000, 0xff000000,
351 0x914c, 0xffff0000, 0xff000000,
352 0x929c, 0x00000fff, 0x00000001,
353 0x929c, 0x00000fff, 0x00000001,
354 0x55e4, 0xff607fff, 0xfc000100,
355 0x8a18, 0xff000fff, 0x00000100,
356 0x8a18, 0xff000fff, 0x00000100,
357 0x8b28, 0xff000fff, 0x00000100,
358 0x8b28, 0xff000fff, 0x00000100,
359 0x9144, 0xfffc0fff, 0x00000100,
360 0x9144, 0xfffc0fff, 0x00000100,
361 0x6ed8, 0x00010101, 0x00010000,
362 0x9830, 0xffffffff, 0x00000000,
363 0x9830, 0xffffffff, 0x00000000,
364 0x9834, 0xf00fffff, 0x00000400,
365 0x9834, 0xf00fffff, 0x00000400,
366 0x9838, 0xfffffffe, 0x00000000,
367 0x9838, 0xfffffffe, 0x00000000,
368 0xd0c0, 0xff000fff, 0x00000100,
369 0xd02c, 0xbfffff1f, 0x08421000,
370 0xd02c, 0xbfffff1f, 0x08421000,
371 0xd0b8, 0x73773777, 0x12010001,
372 0xd0b8, 0x73773777, 0x12010001,
373 0x5bb0, 0x000000f0, 0x00000070,
374 0x98f8, 0x73773777, 0x12010001,
375 0x98f8, 0x73773777, 0x12010001,
376 0x98fc, 0xffffffff, 0x00000010,
377 0x98fc, 0xffffffff, 0x00000010,
378 0x9b7c, 0x00ff0000, 0x00fc0000,
379 0x9b7c, 0x00ff0000, 0x00fc0000,
380 0x8030, 0x00001f0f, 0x0000100a,
381 0x8030, 0x00001f0f, 0x0000100a,
382 0x2f48, 0x73773777, 0x12010001,
383 0x2f48, 0x73773777, 0x12010001,
384 0x2408, 0x00030000, 0x000c007f,
385 0x8a14, 0xf000003f, 0x00000007,
386 0x8a14, 0xf000003f, 0x00000007,
387 0x8b24, 0x3fff3fff, 0x00ff0fff,
388 0x8b24, 0x3fff3fff, 0x00ff0fff,
389 0x8b10, 0x0000ff0f, 0x00000000,
390 0x8b10, 0x0000ff0f, 0x00000000,
391 0x28a4c, 0x07ffffff, 0x06000000,
392 0x28a4c, 0x07ffffff, 0x06000000,
393 0x4d8, 0x00000fff, 0x00000100,
394 0x4d8, 0x00000fff, 0x00000100,
395 0xa008, 0xffffffff, 0x00010000,
396 0xa008, 0xffffffff, 0x00010000,
397 0x913c, 0xffff03ff, 0x01000100,
398 0x913c, 0xffff03ff, 0x01000100,
399 0x90e8, 0x001fffff, 0x010400c0,
400 0x8c00, 0x000000ff, 0x00000003,
401 0x8c00, 0x000000ff, 0x00000003,
402 0x8c04, 0xf8ff00ff, 0x40600060,
403 0x8c04, 0xf8ff00ff, 0x40600060,
404 0x8c30, 0x0000000f, 0x00040005,
405 0x8cf0, 0x1fff1fff, 0x08e00410,
406 0x8cf0, 0x1fff1fff, 0x08e00410,
407 0x900c, 0x00ffffff, 0x0017071f,
408 0x28350, 0x00000f01, 0x00000000,
409 0x28350, 0x00000f01, 0x00000000,
410 0x9508, 0xf700071f, 0x00000002,
411 0x9508, 0xf700071f, 0x00000002,
412 0x9688, 0x00300000, 0x0017000f,
413 0x960c, 0xffffffff, 0x54763210,
414 0x960c, 0xffffffff, 0x54763210,
415 0x20ef8, 0x01ff01ff, 0x00000002,
416 0x20e98, 0xfffffbff, 0x00200000,
417 0x2015c, 0xffffffff, 0x00000f40,
418 0x88c4, 0x001f3ae3, 0x00000082,
419 0x88c4, 0x001f3ae3, 0x00000082,
420 0x8978, 0x3fffffff, 0x04050140,
421 0x8978, 0x3fffffff, 0x04050140,
422 0x88d4, 0x0000001f, 0x00000010,
423 0x88d4, 0x0000001f, 0x00000010,
424 0x8974, 0xffffffff, 0x00000000,
425 0x8974, 0xffffffff, 0x00000000
426};
427
428static void ni_init_golden_registers(struct radeon_device *rdev)
429{
430 switch (rdev->family) {
431 case CHIP_CAYMAN:
432 radeon_program_register_sequence(rdev,
433 cayman_golden_registers,
434 (const u32)ARRAY_SIZE(cayman_golden_registers));
435 radeon_program_register_sequence(rdev,
436 cayman_golden_registers2,
437 (const u32)ARRAY_SIZE(cayman_golden_registers2));
438 break;
439 case CHIP_ARUBA:
440 if ((rdev->pdev->device == 0x9900) ||
441 (rdev->pdev->device == 0x9901) ||
442 (rdev->pdev->device == 0x9903) ||
443 (rdev->pdev->device == 0x9904) ||
444 (rdev->pdev->device == 0x9905) ||
445 (rdev->pdev->device == 0x9906) ||
446 (rdev->pdev->device == 0x9907) ||
447 (rdev->pdev->device == 0x9908) ||
448 (rdev->pdev->device == 0x9909) ||
449 (rdev->pdev->device == 0x990A) ||
450 (rdev->pdev->device == 0x990B) ||
451 (rdev->pdev->device == 0x990C) ||
452 (rdev->pdev->device == 0x990D) ||
453 (rdev->pdev->device == 0x990E) ||
454 (rdev->pdev->device == 0x990F) ||
455 (rdev->pdev->device == 0x9910) ||
456 (rdev->pdev->device == 0x9913) ||
457 (rdev->pdev->device == 0x9917) ||
458 (rdev->pdev->device == 0x9918)) {
459 radeon_program_register_sequence(rdev,
460 dvst_golden_registers,
461 (const u32)ARRAY_SIZE(dvst_golden_registers));
462 radeon_program_register_sequence(rdev,
463 dvst_golden_registers2,
464 (const u32)ARRAY_SIZE(dvst_golden_registers2));
465 } else {
466 radeon_program_register_sequence(rdev,
467 scrapper_golden_registers,
468 (const u32)ARRAY_SIZE(scrapper_golden_registers));
469 radeon_program_register_sequence(rdev,
470 dvst_golden_registers2,
471 (const u32)ARRAY_SIZE(dvst_golden_registers2));
472 }
473 break;
474 default:
475 break;
476 }
477}
478
0af62b01
AD
479#define BTC_IO_MC_REGS_SIZE 29
480
481static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
482 {0x00000077, 0xff010100},
483 {0x00000078, 0x00000000},
484 {0x00000079, 0x00001434},
485 {0x0000007a, 0xcc08ec08},
486 {0x0000007b, 0x00040000},
487 {0x0000007c, 0x000080c0},
488 {0x0000007d, 0x09000000},
489 {0x0000007e, 0x00210404},
490 {0x00000081, 0x08a8e800},
491 {0x00000082, 0x00030444},
492 {0x00000083, 0x00000000},
493 {0x00000085, 0x00000001},
494 {0x00000086, 0x00000002},
495 {0x00000087, 0x48490000},
496 {0x00000088, 0x20244647},
497 {0x00000089, 0x00000005},
498 {0x0000008b, 0x66030000},
499 {0x0000008c, 0x00006603},
500 {0x0000008d, 0x00000100},
501 {0x0000008f, 0x00001c0a},
502 {0x00000090, 0xff000001},
503 {0x00000094, 0x00101101},
504 {0x00000095, 0x00000fff},
505 {0x00000096, 0x00116fff},
506 {0x00000097, 0x60010000},
507 {0x00000098, 0x10010000},
508 {0x00000099, 0x00006000},
509 {0x0000009a, 0x00001000},
510 {0x0000009f, 0x00946a00}
511};
512
513static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
514 {0x00000077, 0xff010100},
515 {0x00000078, 0x00000000},
516 {0x00000079, 0x00001434},
517 {0x0000007a, 0xcc08ec08},
518 {0x0000007b, 0x00040000},
519 {0x0000007c, 0x000080c0},
520 {0x0000007d, 0x09000000},
521 {0x0000007e, 0x00210404},
522 {0x00000081, 0x08a8e800},
523 {0x00000082, 0x00030444},
524 {0x00000083, 0x00000000},
525 {0x00000085, 0x00000001},
526 {0x00000086, 0x00000002},
527 {0x00000087, 0x48490000},
528 {0x00000088, 0x20244647},
529 {0x00000089, 0x00000005},
530 {0x0000008b, 0x66030000},
531 {0x0000008c, 0x00006603},
532 {0x0000008d, 0x00000100},
533 {0x0000008f, 0x00001c0a},
534 {0x00000090, 0xff000001},
535 {0x00000094, 0x00101101},
536 {0x00000095, 0x00000fff},
537 {0x00000096, 0x00116fff},
538 {0x00000097, 0x60010000},
539 {0x00000098, 0x10010000},
540 {0x00000099, 0x00006000},
541 {0x0000009a, 0x00001000},
542 {0x0000009f, 0x00936a00}
543};
544
545static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
546 {0x00000077, 0xff010100},
547 {0x00000078, 0x00000000},
548 {0x00000079, 0x00001434},
549 {0x0000007a, 0xcc08ec08},
550 {0x0000007b, 0x00040000},
551 {0x0000007c, 0x000080c0},
552 {0x0000007d, 0x09000000},
553 {0x0000007e, 0x00210404},
554 {0x00000081, 0x08a8e800},
555 {0x00000082, 0x00030444},
556 {0x00000083, 0x00000000},
557 {0x00000085, 0x00000001},
558 {0x00000086, 0x00000002},
559 {0x00000087, 0x48490000},
560 {0x00000088, 0x20244647},
561 {0x00000089, 0x00000005},
562 {0x0000008b, 0x66030000},
563 {0x0000008c, 0x00006603},
564 {0x0000008d, 0x00000100},
565 {0x0000008f, 0x00001c0a},
566 {0x00000090, 0xff000001},
567 {0x00000094, 0x00101101},
568 {0x00000095, 0x00000fff},
569 {0x00000096, 0x00116fff},
570 {0x00000097, 0x60010000},
571 {0x00000098, 0x10010000},
572 {0x00000099, 0x00006000},
573 {0x0000009a, 0x00001000},
574 {0x0000009f, 0x00916a00}
575};
576
9b8253ce
AD
577static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
578 {0x00000077, 0xff010100},
579 {0x00000078, 0x00000000},
580 {0x00000079, 0x00001434},
581 {0x0000007a, 0xcc08ec08},
582 {0x0000007b, 0x00040000},
583 {0x0000007c, 0x000080c0},
584 {0x0000007d, 0x09000000},
585 {0x0000007e, 0x00210404},
586 {0x00000081, 0x08a8e800},
587 {0x00000082, 0x00030444},
588 {0x00000083, 0x00000000},
589 {0x00000085, 0x00000001},
590 {0x00000086, 0x00000002},
591 {0x00000087, 0x48490000},
592 {0x00000088, 0x20244647},
593 {0x00000089, 0x00000005},
594 {0x0000008b, 0x66030000},
595 {0x0000008c, 0x00006603},
596 {0x0000008d, 0x00000100},
597 {0x0000008f, 0x00001c0a},
598 {0x00000090, 0xff000001},
599 {0x00000094, 0x00101101},
600 {0x00000095, 0x00000fff},
601 {0x00000096, 0x00116fff},
602 {0x00000097, 0x60010000},
603 {0x00000098, 0x10010000},
604 {0x00000099, 0x00006000},
605 {0x0000009a, 0x00001000},
606 {0x0000009f, 0x00976b00}
607};
608
755d819e 609int ni_mc_load_microcode(struct radeon_device *rdev)
0af62b01
AD
610{
611 const __be32 *fw_data;
612 u32 mem_type, running, blackout = 0;
613 u32 *io_mc_regs;
9b8253ce 614 int i, ucode_size, regs_size;
0af62b01
AD
615
616 if (!rdev->mc_fw)
617 return -EINVAL;
618
619 switch (rdev->family) {
620 case CHIP_BARTS:
621 io_mc_regs = (u32 *)&barts_io_mc_regs;
9b8253ce
AD
622 ucode_size = BTC_MC_UCODE_SIZE;
623 regs_size = BTC_IO_MC_REGS_SIZE;
0af62b01
AD
624 break;
625 case CHIP_TURKS:
626 io_mc_regs = (u32 *)&turks_io_mc_regs;
9b8253ce
AD
627 ucode_size = BTC_MC_UCODE_SIZE;
628 regs_size = BTC_IO_MC_REGS_SIZE;
0af62b01
AD
629 break;
630 case CHIP_CAICOS:
631 default:
632 io_mc_regs = (u32 *)&caicos_io_mc_regs;
9b8253ce
AD
633 ucode_size = BTC_MC_UCODE_SIZE;
634 regs_size = BTC_IO_MC_REGS_SIZE;
635 break;
636 case CHIP_CAYMAN:
637 io_mc_regs = (u32 *)&cayman_io_mc_regs;
638 ucode_size = CAYMAN_MC_UCODE_SIZE;
639 regs_size = BTC_IO_MC_REGS_SIZE;
0af62b01
AD
640 break;
641 }
642
643 mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
644 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
645
646 if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
647 if (running) {
648 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
649 WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
650 }
651
652 /* reset the engine and set to writable */
653 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
654 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
655
656 /* load mc io regs */
9b8253ce 657 for (i = 0; i < regs_size; i++) {
0af62b01
AD
658 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
659 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
660 }
661 /* load the MC ucode */
662 fw_data = (const __be32 *)rdev->mc_fw->data;
9b8253ce 663 for (i = 0; i < ucode_size; i++)
0af62b01
AD
664 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
665
666 /* put the engine back into the active state */
667 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
668 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
669 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
670
671 /* wait for training to complete */
0e2c978e
AD
672 for (i = 0; i < rdev->usec_timeout; i++) {
673 if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
674 break;
675 udelay(1);
676 }
0af62b01
AD
677
678 if (running)
679 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
680 }
681
682 return 0;
683}
684
685int ni_init_microcode(struct radeon_device *rdev)
686{
0af62b01
AD
687 const char *chip_name;
688 const char *rlc_chip_name;
689 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
6596afd4 690 size_t smc_req_size = 0;
0af62b01
AD
691 char fw_name[30];
692 int err;
693
694 DRM_DEBUG("\n");
695
0af62b01
AD
696 switch (rdev->family) {
697 case CHIP_BARTS:
698 chip_name = "BARTS";
699 rlc_chip_name = "BTC";
9b8253ce
AD
700 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
701 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
702 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
703 mc_req_size = BTC_MC_UCODE_SIZE * 4;
6596afd4 704 smc_req_size = ALIGN(BARTS_SMC_UCODE_SIZE, 4);
0af62b01
AD
705 break;
706 case CHIP_TURKS:
707 chip_name = "TURKS";
708 rlc_chip_name = "BTC";
9b8253ce
AD
709 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
710 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
711 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
712 mc_req_size = BTC_MC_UCODE_SIZE * 4;
6596afd4 713 smc_req_size = ALIGN(TURKS_SMC_UCODE_SIZE, 4);
0af62b01
AD
714 break;
715 case CHIP_CAICOS:
716 chip_name = "CAICOS";
717 rlc_chip_name = "BTC";
9b8253ce
AD
718 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
719 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
720 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
721 mc_req_size = BTC_MC_UCODE_SIZE * 4;
6596afd4 722 smc_req_size = ALIGN(CAICOS_SMC_UCODE_SIZE, 4);
9b8253ce
AD
723 break;
724 case CHIP_CAYMAN:
725 chip_name = "CAYMAN";
726 rlc_chip_name = "CAYMAN";
727 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
728 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
729 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
730 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
69e0b57a 731 smc_req_size = ALIGN(CAYMAN_SMC_UCODE_SIZE, 4);
0af62b01 732 break;
c420c745
AD
733 case CHIP_ARUBA:
734 chip_name = "ARUBA";
735 rlc_chip_name = "ARUBA";
736 /* pfp/me same size as CAYMAN */
737 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
738 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
739 rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
740 mc_req_size = 0;
741 break;
0af62b01
AD
742 default: BUG();
743 }
744
0af62b01
AD
745 DRM_INFO("Loading %s Microcode\n", chip_name);
746
747 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
0a168933 748 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
0af62b01
AD
749 if (err)
750 goto out;
751 if (rdev->pfp_fw->size != pfp_req_size) {
752 printk(KERN_ERR
753 "ni_cp: Bogus length %zu in firmware \"%s\"\n",
754 rdev->pfp_fw->size, fw_name);
755 err = -EINVAL;
756 goto out;
757 }
758
759 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
0a168933 760 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
0af62b01
AD
761 if (err)
762 goto out;
763 if (rdev->me_fw->size != me_req_size) {
764 printk(KERN_ERR
765 "ni_cp: Bogus length %zu in firmware \"%s\"\n",
766 rdev->me_fw->size, fw_name);
767 err = -EINVAL;
768 }
769
770 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
0a168933 771 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
0af62b01
AD
772 if (err)
773 goto out;
774 if (rdev->rlc_fw->size != rlc_req_size) {
775 printk(KERN_ERR
776 "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
777 rdev->rlc_fw->size, fw_name);
778 err = -EINVAL;
779 }
780
c420c745
AD
781 /* no MC ucode on TN */
782 if (!(rdev->flags & RADEON_IS_IGP)) {
783 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
0a168933 784 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
c420c745
AD
785 if (err)
786 goto out;
787 if (rdev->mc_fw->size != mc_req_size) {
788 printk(KERN_ERR
789 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
790 rdev->mc_fw->size, fw_name);
791 err = -EINVAL;
792 }
0af62b01 793 }
6596afd4 794
69e0b57a 795 if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
6596afd4 796 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
0a168933 797 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
8a53fa23
AD
798 if (err) {
799 printk(KERN_ERR
800 "smc: error loading firmware \"%s\"\n",
801 fw_name);
802 release_firmware(rdev->smc_fw);
803 rdev->smc_fw = NULL;
d8367112 804 err = 0;
8a53fa23 805 } else if (rdev->smc_fw->size != smc_req_size) {
6596afd4
AD
806 printk(KERN_ERR
807 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
808 rdev->mc_fw->size, fw_name);
809 err = -EINVAL;
810 }
811 }
812
0af62b01 813out:
0af62b01
AD
814 if (err) {
815 if (err != -EINVAL)
816 printk(KERN_ERR
817 "ni_cp: Failed to load firmware \"%s\"\n",
818 fw_name);
819 release_firmware(rdev->pfp_fw);
820 rdev->pfp_fw = NULL;
821 release_firmware(rdev->me_fw);
822 rdev->me_fw = NULL;
823 release_firmware(rdev->rlc_fw);
824 rdev->rlc_fw = NULL;
825 release_firmware(rdev->mc_fw);
826 rdev->mc_fw = NULL;
827 }
828 return err;
829}
830
29a15221
AD
831int tn_get_temp(struct radeon_device *rdev)
832{
833 u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
834 int actual_temp = (temp / 8) - 49;
835
836 return actual_temp * 1000;
837}
838
fecf1d07
AD
839/*
840 * Core functions
841 */
fecf1d07
AD
842static void cayman_gpu_init(struct radeon_device *rdev)
843{
fecf1d07
AD
844 u32 gb_addr_config = 0;
845 u32 mc_shared_chmap, mc_arb_ramcfg;
fecf1d07
AD
846 u32 cgts_tcc_disable;
847 u32 sx_debug_1;
848 u32 smx_dc_ctl0;
fecf1d07
AD
849 u32 cgts_sm_ctrl_reg;
850 u32 hdp_host_path_cntl;
851 u32 tmp;
416a2bd2 852 u32 disabled_rb_mask;
fecf1d07
AD
853 int i, j;
854
855 switch (rdev->family) {
856 case CHIP_CAYMAN:
fecf1d07
AD
857 rdev->config.cayman.max_shader_engines = 2;
858 rdev->config.cayman.max_pipes_per_simd = 4;
859 rdev->config.cayman.max_tile_pipes = 8;
860 rdev->config.cayman.max_simds_per_se = 12;
861 rdev->config.cayman.max_backends_per_se = 4;
862 rdev->config.cayman.max_texture_channel_caches = 8;
863 rdev->config.cayman.max_gprs = 256;
864 rdev->config.cayman.max_threads = 256;
865 rdev->config.cayman.max_gs_threads = 32;
866 rdev->config.cayman.max_stack_entries = 512;
867 rdev->config.cayman.sx_num_of_sets = 8;
868 rdev->config.cayman.sx_max_export_size = 256;
869 rdev->config.cayman.sx_max_export_pos_size = 64;
870 rdev->config.cayman.sx_max_export_smx_size = 192;
871 rdev->config.cayman.max_hw_contexts = 8;
872 rdev->config.cayman.sq_num_cf_insts = 2;
873
874 rdev->config.cayman.sc_prim_fifo_size = 0x100;
875 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
876 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
416a2bd2 877 gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
fecf1d07 878 break;
7b76e479
AD
879 case CHIP_ARUBA:
880 default:
881 rdev->config.cayman.max_shader_engines = 1;
882 rdev->config.cayman.max_pipes_per_simd = 4;
883 rdev->config.cayman.max_tile_pipes = 2;
884 if ((rdev->pdev->device == 0x9900) ||
d430f7db
AD
885 (rdev->pdev->device == 0x9901) ||
886 (rdev->pdev->device == 0x9905) ||
887 (rdev->pdev->device == 0x9906) ||
888 (rdev->pdev->device == 0x9907) ||
889 (rdev->pdev->device == 0x9908) ||
890 (rdev->pdev->device == 0x9909) ||
e4d17063
AD
891 (rdev->pdev->device == 0x990B) ||
892 (rdev->pdev->device == 0x990C) ||
893 (rdev->pdev->device == 0x990F) ||
d430f7db 894 (rdev->pdev->device == 0x9910) ||
e4d17063 895 (rdev->pdev->device == 0x9917) ||
62d1f92e
AD
896 (rdev->pdev->device == 0x9999) ||
897 (rdev->pdev->device == 0x999C)) {
7b76e479
AD
898 rdev->config.cayman.max_simds_per_se = 6;
899 rdev->config.cayman.max_backends_per_se = 2;
e2f6c88f
AD
900 rdev->config.cayman.max_hw_contexts = 8;
901 rdev->config.cayman.sx_max_export_size = 256;
902 rdev->config.cayman.sx_max_export_pos_size = 64;
903 rdev->config.cayman.sx_max_export_smx_size = 192;
7b76e479 904 } else if ((rdev->pdev->device == 0x9903) ||
d430f7db
AD
905 (rdev->pdev->device == 0x9904) ||
906 (rdev->pdev->device == 0x990A) ||
e4d17063
AD
907 (rdev->pdev->device == 0x990D) ||
908 (rdev->pdev->device == 0x990E) ||
d430f7db 909 (rdev->pdev->device == 0x9913) ||
62d1f92e
AD
910 (rdev->pdev->device == 0x9918) ||
911 (rdev->pdev->device == 0x999D)) {
7b76e479
AD
912 rdev->config.cayman.max_simds_per_se = 4;
913 rdev->config.cayman.max_backends_per_se = 2;
e2f6c88f
AD
914 rdev->config.cayman.max_hw_contexts = 8;
915 rdev->config.cayman.sx_max_export_size = 256;
916 rdev->config.cayman.sx_max_export_pos_size = 64;
917 rdev->config.cayman.sx_max_export_smx_size = 192;
d430f7db
AD
918 } else if ((rdev->pdev->device == 0x9919) ||
919 (rdev->pdev->device == 0x9990) ||
920 (rdev->pdev->device == 0x9991) ||
921 (rdev->pdev->device == 0x9994) ||
e4d17063
AD
922 (rdev->pdev->device == 0x9995) ||
923 (rdev->pdev->device == 0x9996) ||
924 (rdev->pdev->device == 0x999A) ||
d430f7db 925 (rdev->pdev->device == 0x99A0)) {
7b76e479
AD
926 rdev->config.cayman.max_simds_per_se = 3;
927 rdev->config.cayman.max_backends_per_se = 1;
e2f6c88f
AD
928 rdev->config.cayman.max_hw_contexts = 4;
929 rdev->config.cayman.sx_max_export_size = 128;
930 rdev->config.cayman.sx_max_export_pos_size = 32;
931 rdev->config.cayman.sx_max_export_smx_size = 96;
7b76e479
AD
932 } else {
933 rdev->config.cayman.max_simds_per_se = 2;
934 rdev->config.cayman.max_backends_per_se = 1;
e2f6c88f
AD
935 rdev->config.cayman.max_hw_contexts = 4;
936 rdev->config.cayman.sx_max_export_size = 128;
937 rdev->config.cayman.sx_max_export_pos_size = 32;
938 rdev->config.cayman.sx_max_export_smx_size = 96;
7b76e479
AD
939 }
940 rdev->config.cayman.max_texture_channel_caches = 2;
941 rdev->config.cayman.max_gprs = 256;
942 rdev->config.cayman.max_threads = 256;
943 rdev->config.cayman.max_gs_threads = 32;
944 rdev->config.cayman.max_stack_entries = 512;
945 rdev->config.cayman.sx_num_of_sets = 8;
7b76e479
AD
946 rdev->config.cayman.sq_num_cf_insts = 2;
947
948 rdev->config.cayman.sc_prim_fifo_size = 0x40;
949 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
950 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
416a2bd2 951 gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
7b76e479 952 break;
fecf1d07
AD
953 }
954
955 /* Initialize HDP */
956 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
957 WREG32((0x2c14 + j), 0x00000000);
958 WREG32((0x2c18 + j), 0x00000000);
959 WREG32((0x2c1c + j), 0x00000000);
960 WREG32((0x2c20 + j), 0x00000000);
961 WREG32((0x2c24 + j), 0x00000000);
962 }
963
964 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
acc1522a
CK
965 WREG32(SRBM_INT_CNTL, 0x1);
966 WREG32(SRBM_INT_ACK, 0x1);
fecf1d07 967
d054ac16
AD
968 evergreen_fix_pci_max_read_req_size(rdev);
969
fecf1d07
AD
970 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
971 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
972
fecf1d07
AD
973 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
974 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
975 if (rdev->config.cayman.mem_row_size_in_kb > 4)
976 rdev->config.cayman.mem_row_size_in_kb = 4;
977 /* XXX use MC settings? */
978 rdev->config.cayman.shader_engine_tile_size = 32;
979 rdev->config.cayman.num_gpus = 1;
980 rdev->config.cayman.multi_gpu_tile_size = 64;
981
fecf1d07
AD
982 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
983 rdev->config.cayman.num_tile_pipes = (1 << tmp);
984 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
985 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
986 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
987 rdev->config.cayman.num_shader_engines = tmp + 1;
988 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
989 rdev->config.cayman.num_gpus = tmp + 1;
990 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
991 rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
992 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
993 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
994
416a2bd2 995
fecf1d07
AD
996 /* setup tiling info dword. gb_addr_config is not adequate since it does
997 * not have bank info, so create a custom tiling dword.
998 * bits 3:0 num_pipes
999 * bits 7:4 num_banks
1000 * bits 11:8 group_size
1001 * bits 15:12 row_size
1002 */
1003 rdev->config.cayman.tile_config = 0;
1004 switch (rdev->config.cayman.num_tile_pipes) {
1005 case 1:
1006 default:
1007 rdev->config.cayman.tile_config |= (0 << 0);
1008 break;
1009 case 2:
1010 rdev->config.cayman.tile_config |= (1 << 0);
1011 break;
1012 case 4:
1013 rdev->config.cayman.tile_config |= (2 << 0);
1014 break;
1015 case 8:
1016 rdev->config.cayman.tile_config |= (3 << 0);
1017 break;
1018 }
7b76e479
AD
1019
1020 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
1021 if (rdev->flags & RADEON_IS_IGP)
1f73cca7 1022 rdev->config.cayman.tile_config |= 1 << 4;
29d65406 1023 else {
5b23c904
AD
1024 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
1025 case 0: /* four banks */
29d65406 1026 rdev->config.cayman.tile_config |= 0 << 4;
5b23c904
AD
1027 break;
1028 case 1: /* eight banks */
1029 rdev->config.cayman.tile_config |= 1 << 4;
1030 break;
1031 case 2: /* sixteen banks */
1032 default:
1033 rdev->config.cayman.tile_config |= 2 << 4;
1034 break;
1035 }
29d65406 1036 }
fecf1d07 1037 rdev->config.cayman.tile_config |=
cde5083b 1038 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
fecf1d07
AD
1039 rdev->config.cayman.tile_config |=
1040 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1041
416a2bd2
AD
1042 tmp = 0;
1043 for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
1044 u32 rb_disable_bitmap;
1045
1046 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1047 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1048 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
1049 tmp <<= 4;
1050 tmp |= rb_disable_bitmap;
1051 }
1052 /* enabled rb are just the one not disabled :) */
1053 disabled_rb_mask = tmp;
cedb655a
AD
1054 tmp = 0;
1055 for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
1056 tmp |= (1 << i);
1057 /* if all the backends are disabled, fix it up here */
1058 if ((disabled_rb_mask & tmp) == tmp) {
1059 for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
1060 disabled_rb_mask &= ~(1 << i);
1061 }
416a2bd2 1062
65fcf668
AD
1063 for (i = 0; i < rdev->config.cayman.max_shader_engines; i++) {
1064 u32 simd_disable_bitmap;
1065
1066 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1067 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1068 simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
1069 simd_disable_bitmap |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
1070 tmp <<= 16;
1071 tmp |= simd_disable_bitmap;
1072 }
1073 rdev->config.cayman.active_simds = hweight32(~tmp);
1074
416a2bd2
AD
1075 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1076 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1077
fecf1d07
AD
1078 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1079 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
7c1c7c18
AD
1080 if (ASIC_IS_DCE6(rdev))
1081 WREG32(DMIF_ADDR_CALC, gb_addr_config);
fecf1d07 1082 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
f60cbd11
AD
1083 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
1084 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
9a21059d
CK
1085 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
1086 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
1087 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
fecf1d07 1088
8f612b23
AD
1089 if ((rdev->config.cayman.max_backends_per_se == 1) &&
1090 (rdev->flags & RADEON_IS_IGP)) {
dbfb00c3 1091 if ((disabled_rb_mask & 3) == 2) {
8f612b23
AD
1092 /* RB1 disabled, RB0 enabled */
1093 tmp = 0x00000000;
dbfb00c3
AD
1094 } else {
1095 /* RB0 disabled, RB1 enabled */
1096 tmp = 0x11111111;
8f612b23
AD
1097 }
1098 } else {
1099 tmp = gb_addr_config & NUM_PIPES_MASK;
1100 tmp = r6xx_remap_render_backend(rdev, tmp,
1101 rdev->config.cayman.max_backends_per_se *
1102 rdev->config.cayman.max_shader_engines,
1103 CAYMAN_MAX_BACKENDS, disabled_rb_mask);
1104 }
416a2bd2 1105 WREG32(GB_BACKEND_MAP, tmp);
fecf1d07 1106
416a2bd2
AD
1107 cgts_tcc_disable = 0xffff0000;
1108 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
1109 cgts_tcc_disable &= ~(1 << (16 + i));
fecf1d07
AD
1110 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
1111 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
fecf1d07
AD
1112 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
1113 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
1114
1115 /* reprogram the shader complex */
1116 cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
1117 for (i = 0; i < 16; i++)
1118 WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
1119 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
1120
1121 /* set HW defaults for 3D engine */
1122 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
1123
1124 sx_debug_1 = RREG32(SX_DEBUG_1);
1125 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
1126 WREG32(SX_DEBUG_1, sx_debug_1);
1127
1128 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
1129 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
285e042d 1130 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
fecf1d07
AD
1131 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
1132
1133 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
1134
1135 /* need to be explicitly zero-ed */
1136 WREG32(VGT_OFFCHIP_LDS_BASE, 0);
1137 WREG32(SQ_LSTMP_RING_BASE, 0);
1138 WREG32(SQ_HSTMP_RING_BASE, 0);
1139 WREG32(SQ_ESTMP_RING_BASE, 0);
1140 WREG32(SQ_GSTMP_RING_BASE, 0);
1141 WREG32(SQ_VSTMP_RING_BASE, 0);
1142 WREG32(SQ_PSTMP_RING_BASE, 0);
1143
1144 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
1145
285e042d
DA
1146 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
1147 POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
1148 SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
fecf1d07 1149
285e042d
DA
1150 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
1151 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
1152 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
fecf1d07
AD
1153
1154
1155 WREG32(VGT_NUM_INSTANCES, 1);
1156
1157 WREG32(CP_PERFMON_CNTL, 0);
1158
285e042d 1159 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
fecf1d07
AD
1160 FETCH_FIFO_HIWATER(0x4) |
1161 DONE_FIFO_HIWATER(0xe0) |
1162 ALU_UPDATE_FIFO_HIWATER(0x8)));
1163
1164 WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
1165 WREG32(SQ_CONFIG, (VC_ENABLE |
1166 EXPORT_SRC_C |
1167 GFX_PRIO(0) |
1168 CS1_PRIO(0) |
1169 CS2_PRIO(1)));
1170 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
1171
1172 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
1173 FORCE_EOV_MAX_REZ_CNT(255)));
1174
1175 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
1176 AUTO_INVLD_EN(ES_AND_GS_AUTO));
1177
1178 WREG32(VGT_GS_VERTEX_REUSE, 16);
1179 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1180
1181 WREG32(CB_PERF_CTR0_SEL_0, 0);
1182 WREG32(CB_PERF_CTR0_SEL_1, 0);
1183 WREG32(CB_PERF_CTR1_SEL_0, 0);
1184 WREG32(CB_PERF_CTR1_SEL_1, 0);
1185 WREG32(CB_PERF_CTR2_SEL_0, 0);
1186 WREG32(CB_PERF_CTR2_SEL_1, 0);
1187 WREG32(CB_PERF_CTR3_SEL_0, 0);
1188 WREG32(CB_PERF_CTR3_SEL_1, 0);
1189
0b65f83f
DA
1190 tmp = RREG32(HDP_MISC_CNTL);
1191 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
1192 WREG32(HDP_MISC_CNTL, tmp);
1193
fecf1d07
AD
1194 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1195 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1196
1197 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
1198
1199 udelay(50);
8ba10463
AD
1200
1201 /* set clockgating golden values on TN */
1202 if (rdev->family == CHIP_ARUBA) {
1203 tmp = RREG32_CG(CG_CGTT_LOCAL_0);
1204 tmp &= ~0x00380000;
1205 WREG32_CG(CG_CGTT_LOCAL_0, tmp);
1206 tmp = RREG32_CG(CG_CGTT_LOCAL_1);
1207 tmp &= ~0x0e000000;
1208 WREG32_CG(CG_CGTT_LOCAL_1, tmp);
1209 }
fecf1d07
AD
1210}
1211
fa8198ea
AD
1212/*
1213 * GART
1214 */
1215void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
1216{
1217 /* flush hdp cache */
1218 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1219
1220 /* bits 0-7 are the VM contexts0-7 */
1221 WREG32(VM_INVALIDATE_REQUEST, 1);
1222}
1223
1109ca09 1224static int cayman_pcie_gart_enable(struct radeon_device *rdev)
fa8198ea 1225{
721604a1 1226 int i, r;
fa8198ea 1227
c9a1be96 1228 if (rdev->gart.robj == NULL) {
fa8198ea
AD
1229 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1230 return -EINVAL;
1231 }
1232 r = radeon_gart_table_vram_pin(rdev);
1233 if (r)
1234 return r;
fa8198ea 1235 /* Setup TLB control */
721604a1
JG
1236 WREG32(MC_VM_MX_L1_TLB_CNTL,
1237 (0xA << 7) |
1238 ENABLE_L1_TLB |
fa8198ea
AD
1239 ENABLE_L1_FRAGMENT_PROCESSING |
1240 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
721604a1 1241 ENABLE_ADVANCED_DRIVER_MODEL |
fa8198ea
AD
1242 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1243 /* Setup L2 cache */
1244 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
ec3dbbcb 1245 ENABLE_L2_FRAGMENT_PROCESSING |
fa8198ea
AD
1246 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1247 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1248 EFFECTIVE_L2_QUEUE_SIZE(7) |
1249 CONTEXT1_IDENTITY_ACCESS_MODE(1));
1250 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
1251 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
ec3dbbcb 1252 BANK_SELECT(6) |
fa8198ea
AD
1253 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1254 /* setup context0 */
1255 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1256 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1257 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1258 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1259 (u32)(rdev->dummy_page.addr >> 12));
1260 WREG32(VM_CONTEXT0_CNTL2, 0);
1261 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1262 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
721604a1
JG
1263
1264 WREG32(0x15D4, 0);
1265 WREG32(0x15D8, 0);
1266 WREG32(0x15DC, 0);
1267
1268 /* empty context1-7 */
23d4f1f2
AD
1269 /* Assign the pt base to something valid for now; the pts used for
1270 * the VMs are determined by the application and setup and assigned
1271 * on the fly in the vm part of radeon_gart.c
1272 */
721604a1
JG
1273 for (i = 1; i < 8; i++) {
1274 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
c1a7ca0d 1275 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
721604a1 1276 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
054e01d6 1277 rdev->vm_manager.saved_table_addr[i]);
721604a1
JG
1278 }
1279
1280 /* enable context1-7 */
1281 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
1282 (u32)(rdev->dummy_page.addr >> 12));
ae133a11 1283 WREG32(VM_CONTEXT1_CNTL2, 4);
fa87e62d 1284 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
4510fb98 1285 PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
ae133a11
CK
1286 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1287 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
1288 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1289 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
1290 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
1291 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
1292 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
1293 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
1294 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
1295 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
1296 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1297 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
fa8198ea
AD
1298
1299 cayman_pcie_gart_tlb_flush(rdev);
fcf4de5a
TV
1300 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1301 (unsigned)(rdev->mc.gtt_size >> 20),
1302 (unsigned long long)rdev->gart.table_addr);
fa8198ea
AD
1303 rdev->gart.ready = true;
1304 return 0;
1305}
1306
1109ca09 1307static void cayman_pcie_gart_disable(struct radeon_device *rdev)
fa8198ea 1308{
054e01d6
CK
1309 unsigned i;
1310
1311 for (i = 1; i < 8; ++i) {
1312 rdev->vm_manager.saved_table_addr[i] = RREG32(
1313 VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2));
1314 }
1315
fa8198ea
AD
1316 /* Disable all tables */
1317 WREG32(VM_CONTEXT0_CNTL, 0);
1318 WREG32(VM_CONTEXT1_CNTL, 0);
1319 /* Setup TLB control */
1320 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
1321 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1322 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1323 /* Setup L2 cache */
1324 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1325 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1326 EFFECTIVE_L2_QUEUE_SIZE(7) |
1327 CONTEXT1_IDENTITY_ACCESS_MODE(1));
1328 WREG32(VM_L2_CNTL2, 0);
1329 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1330 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
c9a1be96 1331 radeon_gart_table_vram_unpin(rdev);
fa8198ea
AD
1332}
1333
1109ca09 1334static void cayman_pcie_gart_fini(struct radeon_device *rdev)
fa8198ea
AD
1335{
1336 cayman_pcie_gart_disable(rdev);
1337 radeon_gart_table_vram_free(rdev);
1338 radeon_gart_fini(rdev);
1339}
1340
1b37078b
AD
1341void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
1342 int ring, u32 cp_int_cntl)
1343{
1344 u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
1345
1346 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
1347 WREG32(CP_INT_CNTL, cp_int_cntl);
1348}
1349
0c88a02e
AD
1350/*
1351 * CP.
1352 */
b40e7e16
AD
1353void cayman_fence_ring_emit(struct radeon_device *rdev,
1354 struct radeon_fence *fence)
1355{
1356 struct radeon_ring *ring = &rdev->ring[fence->ring];
1357 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
10e9ffae
AD
1358 u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
1359 PACKET3_SH_ACTION_ENA;
b40e7e16 1360
721604a1 1361 /* flush read cache over gart for this vmid */
b40e7e16 1362 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
10e9ffae 1363 radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
b40e7e16
AD
1364 radeon_ring_write(ring, 0xFFFFFFFF);
1365 radeon_ring_write(ring, 0);
1366 radeon_ring_write(ring, 10); /* poll interval */
1367 /* EVENT_WRITE_EOP - flush caches, send int */
1368 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1369 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
5e167cdb 1370 radeon_ring_write(ring, lower_32_bits(addr));
b40e7e16
AD
1371 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
1372 radeon_ring_write(ring, fence->seq);
1373 radeon_ring_write(ring, 0);
1374}
1375
721604a1
JG
1376void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1377{
876dc9f3 1378 struct radeon_ring *ring = &rdev->ring[ib->ring];
7c42bc1a 1379 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
10e9ffae
AD
1380 u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
1381 PACKET3_SH_ACTION_ENA;
721604a1
JG
1382
1383 /* set to DX10/11 mode */
1384 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1385 radeon_ring_write(ring, 1);
45df6803
CK
1386
1387 if (ring->rptr_save_reg) {
1388 uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
1389 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1390 radeon_ring_write(ring, ((ring->rptr_save_reg -
1391 PACKET3_SET_CONFIG_REG_START) >> 2));
1392 radeon_ring_write(ring, next_rptr);
1393 }
1394
721604a1
JG
1395 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1396 radeon_ring_write(ring,
1397#ifdef __BIG_ENDIAN
1398 (2 << 0) |
1399#endif
1400 (ib->gpu_addr & 0xFFFFFFFC));
1401 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
7c42bc1a 1402 radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
721604a1
JG
1403
1404 /* flush read cache over gart for this vmid */
721604a1 1405 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
10e9ffae 1406 radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
721604a1
JG
1407 radeon_ring_write(ring, 0xFFFFFFFF);
1408 radeon_ring_write(ring, 0);
7c42bc1a 1409 radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */
721604a1
JG
1410}
1411
0c88a02e
AD
1412static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
1413{
1414 if (enable)
1415 WREG32(CP_ME_CNTL, 0);
1416 else {
50efa51a
AD
1417 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
1418 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
0c88a02e
AD
1419 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
1420 WREG32(SCRATCH_UMSK, 0);
f60cbd11 1421 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
0c88a02e
AD
1422 }
1423}
1424
ea31bf69
AD
1425u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
1426 struct radeon_ring *ring)
1427{
1428 u32 rptr;
1429
1430 if (rdev->wb.enabled)
1431 rptr = rdev->wb.wb[ring->rptr_offs/4];
1432 else {
1433 if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
1434 rptr = RREG32(CP_RB0_RPTR);
1435 else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
1436 rptr = RREG32(CP_RB1_RPTR);
1437 else
1438 rptr = RREG32(CP_RB2_RPTR);
1439 }
1440
1441 return rptr;
1442}
1443
1444u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
1445 struct radeon_ring *ring)
1446{
1447 u32 wptr;
1448
1449 if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
1450 wptr = RREG32(CP_RB0_WPTR);
1451 else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
1452 wptr = RREG32(CP_RB1_WPTR);
1453 else
1454 wptr = RREG32(CP_RB2_WPTR);
1455
1456 return wptr;
1457}
1458
1459void cayman_gfx_set_wptr(struct radeon_device *rdev,
1460 struct radeon_ring *ring)
1461{
1462 if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
1463 WREG32(CP_RB0_WPTR, ring->wptr);
1464 (void)RREG32(CP_RB0_WPTR);
1465 } else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) {
1466 WREG32(CP_RB1_WPTR, ring->wptr);
1467 (void)RREG32(CP_RB1_WPTR);
1468 } else {
1469 WREG32(CP_RB2_WPTR, ring->wptr);
1470 (void)RREG32(CP_RB2_WPTR);
1471 }
1472}
1473
0c88a02e
AD
1474static int cayman_cp_load_microcode(struct radeon_device *rdev)
1475{
1476 const __be32 *fw_data;
1477 int i;
1478
1479 if (!rdev->me_fw || !rdev->pfp_fw)
1480 return -EINVAL;
1481
1482 cayman_cp_enable(rdev, false);
1483
1484 fw_data = (const __be32 *)rdev->pfp_fw->data;
1485 WREG32(CP_PFP_UCODE_ADDR, 0);
1486 for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
1487 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1488 WREG32(CP_PFP_UCODE_ADDR, 0);
1489
1490 fw_data = (const __be32 *)rdev->me_fw->data;
1491 WREG32(CP_ME_RAM_WADDR, 0);
1492 for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
1493 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1494
1495 WREG32(CP_PFP_UCODE_ADDR, 0);
1496 WREG32(CP_ME_RAM_WADDR, 0);
1497 WREG32(CP_ME_RAM_RADDR, 0);
1498 return 0;
1499}
1500
1501static int cayman_cp_start(struct radeon_device *rdev)
1502{
e32eb50d 1503 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
0c88a02e
AD
1504 int r, i;
1505
e32eb50d 1506 r = radeon_ring_lock(rdev, ring, 7);
0c88a02e
AD
1507 if (r) {
1508 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1509 return r;
1510 }
e32eb50d
CK
1511 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1512 radeon_ring_write(ring, 0x1);
1513 radeon_ring_write(ring, 0x0);
1514 radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
1515 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1516 radeon_ring_write(ring, 0);
1517 radeon_ring_write(ring, 0);
1538a9e0 1518 radeon_ring_unlock_commit(rdev, ring, false);
0c88a02e
AD
1519
1520 cayman_cp_enable(rdev, true);
1521
e32eb50d 1522 r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
0c88a02e
AD
1523 if (r) {
1524 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1525 return r;
1526 }
1527
1528 /* setup clear context state */
e32eb50d
CK
1529 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1530 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
0c88a02e
AD
1531
1532 for (i = 0; i < cayman_default_size; i++)
e32eb50d 1533 radeon_ring_write(ring, cayman_default_state[i]);
0c88a02e 1534
e32eb50d
CK
1535 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1536 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
0c88a02e
AD
1537
1538 /* set clear context state */
e32eb50d
CK
1539 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1540 radeon_ring_write(ring, 0);
0c88a02e
AD
1541
1542 /* SQ_VTX_BASE_VTX_LOC */
e32eb50d
CK
1543 radeon_ring_write(ring, 0xc0026f00);
1544 radeon_ring_write(ring, 0x00000000);
1545 radeon_ring_write(ring, 0x00000000);
1546 radeon_ring_write(ring, 0x00000000);
0c88a02e
AD
1547
1548 /* Clear consts */
e32eb50d
CK
1549 radeon_ring_write(ring, 0xc0036f00);
1550 radeon_ring_write(ring, 0x00000bc4);
1551 radeon_ring_write(ring, 0xffffffff);
1552 radeon_ring_write(ring, 0xffffffff);
1553 radeon_ring_write(ring, 0xffffffff);
0c88a02e 1554
e32eb50d
CK
1555 radeon_ring_write(ring, 0xc0026900);
1556 radeon_ring_write(ring, 0x00000316);
1557 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1558 radeon_ring_write(ring, 0x00000010); /* */
9b91d18d 1559
1538a9e0 1560 radeon_ring_unlock_commit(rdev, ring, false);
0c88a02e
AD
1561
1562 /* XXX init other rings */
1563
1564 return 0;
1565}
1566
755d819e
AD
1567static void cayman_cp_fini(struct radeon_device *rdev)
1568{
45df6803 1569 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
755d819e 1570 cayman_cp_enable(rdev, false);
45df6803
CK
1571 radeon_ring_fini(rdev, ring);
1572 radeon_scratch_free(rdev, ring->rptr_save_reg);
755d819e
AD
1573}
1574
1109ca09 1575static int cayman_cp_resume(struct radeon_device *rdev)
0c88a02e 1576{
b90ca986
CK
1577 static const int ridx[] = {
1578 RADEON_RING_TYPE_GFX_INDEX,
1579 CAYMAN_RING_TYPE_CP1_INDEX,
1580 CAYMAN_RING_TYPE_CP2_INDEX
1581 };
1582 static const unsigned cp_rb_cntl[] = {
1583 CP_RB0_CNTL,
1584 CP_RB1_CNTL,
1585 CP_RB2_CNTL,
1586 };
1587 static const unsigned cp_rb_rptr_addr[] = {
1588 CP_RB0_RPTR_ADDR,
1589 CP_RB1_RPTR_ADDR,
1590 CP_RB2_RPTR_ADDR
1591 };
1592 static const unsigned cp_rb_rptr_addr_hi[] = {
1593 CP_RB0_RPTR_ADDR_HI,
1594 CP_RB1_RPTR_ADDR_HI,
1595 CP_RB2_RPTR_ADDR_HI
1596 };
1597 static const unsigned cp_rb_base[] = {
1598 CP_RB0_BASE,
1599 CP_RB1_BASE,
1600 CP_RB2_BASE
1601 };
ea31bf69
AD
1602 static const unsigned cp_rb_rptr[] = {
1603 CP_RB0_RPTR,
1604 CP_RB1_RPTR,
1605 CP_RB2_RPTR
1606 };
1607 static const unsigned cp_rb_wptr[] = {
1608 CP_RB0_WPTR,
1609 CP_RB1_WPTR,
1610 CP_RB2_WPTR
1611 };
e32eb50d 1612 struct radeon_ring *ring;
b90ca986 1613 int i, r;
0c88a02e
AD
1614
1615 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1616 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1617 SOFT_RESET_PA |
1618 SOFT_RESET_SH |
1619 SOFT_RESET_VGT |
a49a50da 1620 SOFT_RESET_SPI |
0c88a02e
AD
1621 SOFT_RESET_SX));
1622 RREG32(GRBM_SOFT_RESET);
1623 mdelay(15);
1624 WREG32(GRBM_SOFT_RESET, 0);
1625 RREG32(GRBM_SOFT_RESET);
1626
15d3332f 1627 WREG32(CP_SEM_WAIT_TIMER, 0x0);
11ef3f1f 1628 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
0c88a02e
AD
1629
1630 /* Set the write pointer delay */
1631 WREG32(CP_RB_WPTR_DELAY, 0);
1632
1633 WREG32(CP_DEBUG, (1 << 27));
1634
48fc7f7e 1635 /* set the wb address whether it's enabled or not */
0c88a02e 1636 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
b90ca986 1637 WREG32(SCRATCH_UMSK, 0xff);
0c88a02e 1638
b90ca986
CK
1639 for (i = 0; i < 3; ++i) {
1640 uint32_t rb_cntl;
1641 uint64_t addr;
0c88a02e 1642
b90ca986
CK
1643 /* Set ring buffer size */
1644 ring = &rdev->ring[ridx[i]];
b72a8925
DV
1645 rb_cntl = order_base_2(ring->ring_size / 8);
1646 rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
0c88a02e 1647#ifdef __BIG_ENDIAN
b90ca986 1648 rb_cntl |= BUF_SWAP_32BIT;
0c88a02e 1649#endif
b90ca986 1650 WREG32(cp_rb_cntl[i], rb_cntl);
0c88a02e 1651
48fc7f7e 1652 /* set the wb address whether it's enabled or not */
b90ca986
CK
1653 addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
1654 WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
1655 WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
1656 }
0c88a02e 1657
b90ca986
CK
1658 /* set the rb base addr, this causes an internal reset of ALL rings */
1659 for (i = 0; i < 3; ++i) {
1660 ring = &rdev->ring[ridx[i]];
1661 WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
1662 }
0c88a02e 1663
b90ca986
CK
1664 for (i = 0; i < 3; ++i) {
1665 /* Initialize the ring buffer's read and write pointers */
1666 ring = &rdev->ring[ridx[i]];
1667 WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
0c88a02e 1668
ff212f25
CK
1669 ring->wptr = 0;
1670 WREG32(cp_rb_rptr[i], 0);
ea31bf69 1671 WREG32(cp_rb_wptr[i], ring->wptr);
0c88a02e 1672
b90ca986
CK
1673 mdelay(1);
1674 WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
1675 }
0c88a02e
AD
1676
1677 /* start the rings */
1678 cayman_cp_start(rdev);
e32eb50d
CK
1679 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1680 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1681 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
0c88a02e 1682 /* this only test cp0 */
f712812e 1683 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
0c88a02e 1684 if (r) {
e32eb50d
CK
1685 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1686 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1687 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
0c88a02e
AD
1688 return r;
1689 }
1690
50efa51a
AD
1691 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
1692 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1693
0c88a02e
AD
1694 return 0;
1695}
1696
2483b4ea 1697u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
271d6fed 1698{
168757ea 1699 u32 reset_mask = 0;
187e3593 1700 u32 tmp;
271d6fed 1701
168757ea
AD
1702 /* GRBM_STATUS */
1703 tmp = RREG32(GRBM_STATUS);
1704 if (tmp & (PA_BUSY | SC_BUSY |
1705 SH_BUSY | SX_BUSY |
1706 TA_BUSY | VGT_BUSY |
1707 DB_BUSY | CB_BUSY |
1708 GDS_BUSY | SPI_BUSY |
1709 IA_BUSY | IA_BUSY_NO_DMA))
1710 reset_mask |= RADEON_RESET_GFX;
1711
1712 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
1713 CP_BUSY | CP_COHERENCY_BUSY))
1714 reset_mask |= RADEON_RESET_CP;
1715
1716 if (tmp & GRBM_EE_BUSY)
1717 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1718
1719 /* DMA_STATUS_REG 0 */
1720 tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
1721 if (!(tmp & DMA_IDLE))
1722 reset_mask |= RADEON_RESET_DMA;
1723
1724 /* DMA_STATUS_REG 1 */
1725 tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
1726 if (!(tmp & DMA_IDLE))
1727 reset_mask |= RADEON_RESET_DMA1;
1728
1729 /* SRBM_STATUS2 */
1730 tmp = RREG32(SRBM_STATUS2);
1731 if (tmp & DMA_BUSY)
1732 reset_mask |= RADEON_RESET_DMA;
1733
1734 if (tmp & DMA1_BUSY)
1735 reset_mask |= RADEON_RESET_DMA1;
1736
1737 /* SRBM_STATUS */
1738 tmp = RREG32(SRBM_STATUS);
1739 if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
1740 reset_mask |= RADEON_RESET_RLC;
1741
1742 if (tmp & IH_BUSY)
1743 reset_mask |= RADEON_RESET_IH;
1744
1745 if (tmp & SEM_BUSY)
1746 reset_mask |= RADEON_RESET_SEM;
1747
1748 if (tmp & GRBM_RQ_PENDING)
1749 reset_mask |= RADEON_RESET_GRBM;
1750
1751 if (tmp & VMC_BUSY)
1752 reset_mask |= RADEON_RESET_VMC;
19fc42ed 1753
168757ea
AD
1754 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
1755 MCC_BUSY | MCD_BUSY))
1756 reset_mask |= RADEON_RESET_MC;
1757
1758 if (evergreen_is_display_hung(rdev))
1759 reset_mask |= RADEON_RESET_DISPLAY;
1760
1761 /* VM_L2_STATUS */
1762 tmp = RREG32(VM_L2_STATUS);
1763 if (tmp & L2_BUSY)
1764 reset_mask |= RADEON_RESET_VMC;
1765
d808fc88
AD
1766 /* Skip MC reset as it's mostly likely not hung, just busy */
1767 if (reset_mask & RADEON_RESET_MC) {
1768 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1769 reset_mask &= ~RADEON_RESET_MC;
1770 }
1771
168757ea
AD
1772 return reset_mask;
1773}
1774
1775static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1776{
1777 struct evergreen_mc_save save;
1778 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1779 u32 tmp;
19fc42ed 1780
271d6fed 1781 if (reset_mask == 0)
168757ea 1782 return;
271d6fed
AD
1783
1784 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1785
187e3593 1786 evergreen_print_gpu_status_regs(rdev);
271d6fed
AD
1787 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
1788 RREG32(0x14F8));
1789 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1790 RREG32(0x14D8));
1791 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1792 RREG32(0x14FC));
1793 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1794 RREG32(0x14DC));
1795
187e3593
AD
1796 /* Disable CP parsing/prefetching */
1797 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1798
1799 if (reset_mask & RADEON_RESET_DMA) {
1800 /* dma0 */
1801 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1802 tmp &= ~DMA_RB_ENABLE;
1803 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
168757ea 1804 }
187e3593 1805
168757ea 1806 if (reset_mask & RADEON_RESET_DMA1) {
187e3593
AD
1807 /* dma1 */
1808 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1809 tmp &= ~DMA_RB_ENABLE;
1810 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
1811 }
1812
90fb8779
AD
1813 udelay(50);
1814
1815 evergreen_mc_stop(rdev, &save);
1816 if (evergreen_mc_wait_for_idle(rdev)) {
1817 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1818 }
1819
187e3593
AD
1820 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1821 grbm_soft_reset = SOFT_RESET_CB |
1822 SOFT_RESET_DB |
1823 SOFT_RESET_GDS |
1824 SOFT_RESET_PA |
1825 SOFT_RESET_SC |
1826 SOFT_RESET_SPI |
1827 SOFT_RESET_SH |
1828 SOFT_RESET_SX |
1829 SOFT_RESET_TC |
1830 SOFT_RESET_TA |
1831 SOFT_RESET_VGT |
1832 SOFT_RESET_IA;
1833 }
1834
1835 if (reset_mask & RADEON_RESET_CP) {
1836 grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
1837
1838 srbm_soft_reset |= SOFT_RESET_GRBM;
1839 }
271d6fed
AD
1840
1841 if (reset_mask & RADEON_RESET_DMA)
168757ea
AD
1842 srbm_soft_reset |= SOFT_RESET_DMA;
1843
1844 if (reset_mask & RADEON_RESET_DMA1)
1845 srbm_soft_reset |= SOFT_RESET_DMA1;
1846
1847 if (reset_mask & RADEON_RESET_DISPLAY)
1848 srbm_soft_reset |= SOFT_RESET_DC;
1849
1850 if (reset_mask & RADEON_RESET_RLC)
1851 srbm_soft_reset |= SOFT_RESET_RLC;
1852
1853 if (reset_mask & RADEON_RESET_SEM)
1854 srbm_soft_reset |= SOFT_RESET_SEM;
1855
1856 if (reset_mask & RADEON_RESET_IH)
1857 srbm_soft_reset |= SOFT_RESET_IH;
1858
1859 if (reset_mask & RADEON_RESET_GRBM)
1860 srbm_soft_reset |= SOFT_RESET_GRBM;
1861
1862 if (reset_mask & RADEON_RESET_VMC)
1863 srbm_soft_reset |= SOFT_RESET_VMC;
1864
24178ec4
AD
1865 if (!(rdev->flags & RADEON_IS_IGP)) {
1866 if (reset_mask & RADEON_RESET_MC)
1867 srbm_soft_reset |= SOFT_RESET_MC;
1868 }
187e3593
AD
1869
1870 if (grbm_soft_reset) {
1871 tmp = RREG32(GRBM_SOFT_RESET);
1872 tmp |= grbm_soft_reset;
1873 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
1874 WREG32(GRBM_SOFT_RESET, tmp);
1875 tmp = RREG32(GRBM_SOFT_RESET);
1876
1877 udelay(50);
1878
1879 tmp &= ~grbm_soft_reset;
1880 WREG32(GRBM_SOFT_RESET, tmp);
1881 tmp = RREG32(GRBM_SOFT_RESET);
1882 }
1883
1884 if (srbm_soft_reset) {
1885 tmp = RREG32(SRBM_SOFT_RESET);
1886 tmp |= srbm_soft_reset;
1887 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1888 WREG32(SRBM_SOFT_RESET, tmp);
1889 tmp = RREG32(SRBM_SOFT_RESET);
1890
1891 udelay(50);
1892
1893 tmp &= ~srbm_soft_reset;
1894 WREG32(SRBM_SOFT_RESET, tmp);
1895 tmp = RREG32(SRBM_SOFT_RESET);
1896 }
271d6fed
AD
1897
1898 /* Wait a little for things to settle down */
1899 udelay(50);
1900
b9952a8a 1901 evergreen_mc_resume(rdev, &save);
187e3593
AD
1902 udelay(50);
1903
187e3593 1904 evergreen_print_gpu_status_regs(rdev);
b9952a8a
AD
1905}
1906
1907int cayman_asic_reset(struct radeon_device *rdev)
1908{
168757ea
AD
1909 u32 reset_mask;
1910
1911 reset_mask = cayman_gpu_check_soft_reset(rdev);
1912
1913 if (reset_mask)
1914 r600_set_bios_scratch_engine_hung(rdev, true);
1915
1916 cayman_gpu_soft_reset(rdev, reset_mask);
1917
1918 reset_mask = cayman_gpu_check_soft_reset(rdev);
1919
b5470b03
AD
1920 if (reset_mask)
1921 evergreen_gpu_pci_config_reset(rdev);
1922
1923 r600_set_bios_scratch_engine_hung(rdev, false);
168757ea
AD
1924
1925 return 0;
b9952a8a
AD
1926}
1927
123bc183
AD
1928/**
1929 * cayman_gfx_is_lockup - Check if the GFX engine is locked up
1930 *
1931 * @rdev: radeon_device pointer
1932 * @ring: radeon_ring structure holding ring information
1933 *
1934 * Check if the GFX engine is locked up.
1935 * Returns true if the engine appears to be locked up, false if not.
1936 */
1937bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1938{
1939 u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
1940
1941 if (!(reset_mask & (RADEON_RESET_GFX |
1942 RADEON_RESET_COMPUTE |
1943 RADEON_RESET_CP))) {
ff212f25 1944 radeon_ring_lockup_update(rdev, ring);
123bc183
AD
1945 return false;
1946 }
123bc183
AD
1947 return radeon_ring_test_lockup(rdev, ring);
1948}
1949
755d819e
AD
1950static int cayman_startup(struct radeon_device *rdev)
1951{
e32eb50d 1952 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
755d819e
AD
1953 int r;
1954
b07759bf
IH
1955 /* enable pcie gen2 link */
1956 evergreen_pcie_gen2_enable(rdev);
f52382d7
AD
1957 /* enable aspm */
1958 evergreen_program_aspm(rdev);
b07759bf 1959
e5903d39
AD
1960 /* scratch needs to be initialized before MC */
1961 r = r600_vram_scratch_init(rdev);
1962 if (r)
1963 return r;
1964
6fab3feb
AD
1965 evergreen_mc_program(rdev);
1966
6c7bccea 1967 if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
c420c745 1968 r = ni_mc_load_microcode(rdev);
755d819e 1969 if (r) {
c420c745 1970 DRM_ERROR("Failed to load MC firmware!\n");
755d819e
AD
1971 return r;
1972 }
1973 }
755d819e 1974
755d819e
AD
1975 r = cayman_pcie_gart_enable(rdev);
1976 if (r)
1977 return r;
1978 cayman_gpu_init(rdev);
1979
c420c745
AD
1980 /* allocate rlc buffers */
1981 if (rdev->flags & RADEON_IS_IGP) {
2948f5e6 1982 rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
1fd11777
AD
1983 rdev->rlc.reg_list_size =
1984 (u32)ARRAY_SIZE(tn_rlc_save_restore_register_list);
2948f5e6
AD
1985 rdev->rlc.cs_data = cayman_cs_data;
1986 r = sumo_rlc_init(rdev);
c420c745
AD
1987 if (r) {
1988 DRM_ERROR("Failed to init rlc BOs!\n");
1989 return r;
1990 }
1991 }
1992
755d819e
AD
1993 /* allocate wb buffer */
1994 r = radeon_wb_init(rdev);
1995 if (r)
1996 return r;
1997
30eb77f4
JG
1998 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
1999 if (r) {
2000 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2001 return r;
2002 }
2003
e409b128 2004 r = uvd_v2_2_resume(rdev);
f2ba57b5
CK
2005 if (!r) {
2006 r = radeon_fence_driver_start_ring(rdev,
2007 R600_RING_TYPE_UVD_INDEX);
2008 if (r)
2009 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
2010 }
2011 if (r)
2012 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
2013
30eb77f4
JG
2014 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
2015 if (r) {
2016 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2017 return r;
2018 }
2019
2020 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
2021 if (r) {
2022 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2023 return r;
2024 }
2025
f60cbd11
AD
2026 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
2027 if (r) {
2028 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2029 return r;
2030 }
2031
2032 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
2033 if (r) {
2034 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2035 return r;
2036 }
2037
755d819e 2038 /* Enable IRQ */
e49f3959
AH
2039 if (!rdev->irq.installed) {
2040 r = radeon_irq_kms_init(rdev);
2041 if (r)
2042 return r;
2043 }
2044
755d819e
AD
2045 r = r600_irq_init(rdev);
2046 if (r) {
2047 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2048 radeon_irq_kms_fini(rdev);
2049 return r;
2050 }
2051 evergreen_irq_set(rdev);
2052
e32eb50d 2053 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2e1e6dad 2054 RADEON_CP_PACKET2);
755d819e
AD
2055 if (r)
2056 return r;
f60cbd11
AD
2057
2058 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2059 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
2e1e6dad 2060 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
f60cbd11
AD
2061 if (r)
2062 return r;
2063
2064 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
2065 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
2e1e6dad 2066 DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
f60cbd11
AD
2067 if (r)
2068 return r;
2069
755d819e
AD
2070 r = cayman_cp_load_microcode(rdev);
2071 if (r)
2072 return r;
2073 r = cayman_cp_resume(rdev);
2074 if (r)
2075 return r;
2076
f60cbd11
AD
2077 r = cayman_dma_resume(rdev);
2078 if (r)
2079 return r;
2080
f2ba57b5
CK
2081 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2082 if (ring->ring_size) {
02c9f7fa 2083 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
2e1e6dad 2084 RADEON_CP_PACKET2);
f2ba57b5 2085 if (!r)
e409b128 2086 r = uvd_v1_0_init(rdev);
f2ba57b5
CK
2087 if (r)
2088 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
2089 }
2090
2898c348
CK
2091 r = radeon_ib_pool_init(rdev);
2092 if (r) {
2093 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
b15ba512 2094 return r;
2898c348 2095 }
b15ba512 2096
c6105f24
CK
2097 r = radeon_vm_manager_init(rdev);
2098 if (r) {
2099 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
721604a1 2100 return r;
c6105f24 2101 }
721604a1 2102
bfc1f97d
SG
2103 r = radeon_audio_init(rdev);
2104 if (r)
2105 return r;
6b53a050 2106
755d819e
AD
2107 return 0;
2108}
2109
2110int cayman_resume(struct radeon_device *rdev)
2111{
2112 int r;
2113
2114 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
2115 * posting will perform necessary task to bring back GPU into good
2116 * shape.
2117 */
2118 /* post card */
2119 atom_asic_init(rdev->mode_info.atom_context);
2120
a2c96a21
AD
2121 /* init golden registers */
2122 ni_init_golden_registers(rdev);
2123
bc6a6295
AD
2124 if (rdev->pm.pm_method == PM_METHOD_DPM)
2125 radeon_pm_resume(rdev);
6c7bccea 2126
b15ba512 2127 rdev->accel_working = true;
755d819e
AD
2128 r = cayman_startup(rdev);
2129 if (r) {
2130 DRM_ERROR("cayman startup failed on resume\n");
6b7746e8 2131 rdev->accel_working = false;
755d819e
AD
2132 return r;
2133 }
755d819e 2134 return r;
755d819e
AD
2135}
2136
2137int cayman_suspend(struct radeon_device *rdev)
2138{
6c7bccea 2139 radeon_pm_suspend(rdev);
7991d665 2140 radeon_audio_fini(rdev);
fa3daf9a 2141 radeon_vm_manager_fini(rdev);
755d819e 2142 cayman_cp_enable(rdev, false);
f60cbd11 2143 cayman_dma_stop(rdev);
e409b128 2144 uvd_v1_0_fini(rdev);
f2ba57b5 2145 radeon_uvd_suspend(rdev);
755d819e
AD
2146 evergreen_irq_suspend(rdev);
2147 radeon_wb_disable(rdev);
2148 cayman_pcie_gart_disable(rdev);
755d819e
AD
2149 return 0;
2150}
2151
2152/* Plan is to move initialization in that function and use
2153 * helper function so that radeon_device_init pretty much
2154 * do nothing more than calling asic specific function. This
2155 * should also allow to remove a bunch of callback function
2156 * like vram_info.
2157 */
2158int cayman_init(struct radeon_device *rdev)
2159{
e32eb50d 2160 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
755d819e
AD
2161 int r;
2162
755d819e
AD
2163 /* Read BIOS */
2164 if (!radeon_get_bios(rdev)) {
2165 if (ASIC_IS_AVIVO(rdev))
2166 return -EINVAL;
2167 }
2168 /* Must be an ATOMBIOS */
2169 if (!rdev->is_atom_bios) {
2170 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
2171 return -EINVAL;
2172 }
2173 r = radeon_atombios_init(rdev);
2174 if (r)
2175 return r;
2176
2177 /* Post card if necessary */
2178 if (!radeon_card_posted(rdev)) {
2179 if (!rdev->bios) {
2180 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2181 return -EINVAL;
2182 }
2183 DRM_INFO("GPU not posted. posting now...\n");
2184 atom_asic_init(rdev->mode_info.atom_context);
2185 }
a2c96a21
AD
2186 /* init golden registers */
2187 ni_init_golden_registers(rdev);
755d819e
AD
2188 /* Initialize scratch registers */
2189 r600_scratch_init(rdev);
2190 /* Initialize surface registers */
2191 radeon_surface_init(rdev);
2192 /* Initialize clocks */
2193 radeon_get_clock_info(rdev->ddev);
2194 /* Fence driver */
30eb77f4 2195 r = radeon_fence_driver_init(rdev);
755d819e
AD
2196 if (r)
2197 return r;
2198 /* initialize memory controller */
2199 r = evergreen_mc_init(rdev);
2200 if (r)
2201 return r;
2202 /* Memory manager */
2203 r = radeon_bo_init(rdev);
2204 if (r)
2205 return r;
2206
01ac8794
AD
2207 if (rdev->flags & RADEON_IS_IGP) {
2208 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2209 r = ni_init_microcode(rdev);
2210 if (r) {
2211 DRM_ERROR("Failed to load firmware!\n");
2212 return r;
2213 }
2214 }
2215 } else {
2216 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
2217 r = ni_init_microcode(rdev);
2218 if (r) {
2219 DRM_ERROR("Failed to load firmware!\n");
2220 return r;
2221 }
2222 }
2223 }
2224
6c7bccea
AD
2225 /* Initialize power management */
2226 radeon_pm_init(rdev);
2227
e32eb50d
CK
2228 ring->ring_obj = NULL;
2229 r600_ring_init(rdev, ring, 1024 * 1024);
755d819e 2230
f60cbd11
AD
2231 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2232 ring->ring_obj = NULL;
2233 r600_ring_init(rdev, ring, 64 * 1024);
2234
2235 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
2236 ring->ring_obj = NULL;
2237 r600_ring_init(rdev, ring, 64 * 1024);
2238
f2ba57b5
CK
2239 r = radeon_uvd_init(rdev);
2240 if (!r) {
2241 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2242 ring->ring_obj = NULL;
2243 r600_ring_init(rdev, ring, 4096);
2244 }
2245
755d819e
AD
2246 rdev->ih.ring_obj = NULL;
2247 r600_ih_ring_init(rdev, 64 * 1024);
2248
2249 r = r600_pcie_gart_init(rdev);
2250 if (r)
2251 return r;
2252
2253 rdev->accel_working = true;
2254 r = cayman_startup(rdev);
2255 if (r) {
2256 dev_err(rdev->dev, "disabling GPU acceleration\n");
2257 cayman_cp_fini(rdev);
f60cbd11 2258 cayman_dma_fini(rdev);
755d819e 2259 r600_irq_fini(rdev);
c420c745 2260 if (rdev->flags & RADEON_IS_IGP)
2948f5e6 2261 sumo_rlc_fini(rdev);
755d819e 2262 radeon_wb_fini(rdev);
2898c348 2263 radeon_ib_pool_fini(rdev);
721604a1 2264 radeon_vm_manager_fini(rdev);
755d819e
AD
2265 radeon_irq_kms_fini(rdev);
2266 cayman_pcie_gart_fini(rdev);
2267 rdev->accel_working = false;
2268 }
755d819e
AD
2269
2270 /* Don't start up if the MC ucode is missing.
2271 * The default clocks and voltages before the MC ucode
2272 * is loaded are not suffient for advanced operations.
c420c745
AD
2273 *
2274 * We can skip this check for TN, because there is no MC
2275 * ucode.
755d819e 2276 */
c420c745 2277 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
755d819e
AD
2278 DRM_ERROR("radeon: MC ucode required for NI+.\n");
2279 return -EINVAL;
2280 }
2281
2282 return 0;
2283}
2284
2285void cayman_fini(struct radeon_device *rdev)
2286{
6c7bccea 2287 radeon_pm_fini(rdev);
755d819e 2288 cayman_cp_fini(rdev);
f60cbd11 2289 cayman_dma_fini(rdev);
755d819e 2290 r600_irq_fini(rdev);
c420c745 2291 if (rdev->flags & RADEON_IS_IGP)
2948f5e6 2292 sumo_rlc_fini(rdev);
755d819e 2293 radeon_wb_fini(rdev);
721604a1 2294 radeon_vm_manager_fini(rdev);
2898c348 2295 radeon_ib_pool_fini(rdev);
755d819e 2296 radeon_irq_kms_fini(rdev);
e409b128 2297 uvd_v1_0_fini(rdev);
f2ba57b5 2298 radeon_uvd_fini(rdev);
755d819e 2299 cayman_pcie_gart_fini(rdev);
16cdf04d 2300 r600_vram_scratch_fini(rdev);
755d819e
AD
2301 radeon_gem_fini(rdev);
2302 radeon_fence_driver_fini(rdev);
2303 radeon_bo_fini(rdev);
2304 radeon_atombios_fini(rdev);
2305 kfree(rdev->bios);
2306 rdev->bios = NULL;
2307}
2308
721604a1
JG
2309/*
2310 * vm
2311 */
2312int cayman_vm_init(struct radeon_device *rdev)
2313{
2314 /* number of VMs */
2315 rdev->vm_manager.nvm = 8;
2316 /* base offset of vram pages */
e71270fd
AD
2317 if (rdev->flags & RADEON_IS_IGP) {
2318 u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
2319 tmp <<= 22;
2320 rdev->vm_manager.vram_base_offset = tmp;
2321 } else
2322 rdev->vm_manager.vram_base_offset = 0;
721604a1
JG
2323 return 0;
2324}
2325
2326void cayman_vm_fini(struct radeon_device *rdev)
2327{
2328}
2329
54e2e49c
AD
2330/**
2331 * cayman_vm_decode_fault - print human readable fault info
2332 *
2333 * @rdev: radeon_device pointer
2334 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
2335 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
2336 *
2337 * Print human readable fault information (cayman/TN).
2338 */
2339void cayman_vm_decode_fault(struct radeon_device *rdev,
2340 u32 status, u32 addr)
2341{
2342 u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
2343 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
2344 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
2345 char *block;
2346
2347 switch (mc_id) {
2348 case 32:
2349 case 16:
2350 case 96:
2351 case 80:
2352 case 160:
2353 case 144:
2354 case 224:
2355 case 208:
2356 block = "CB";
2357 break;
2358 case 33:
2359 case 17:
2360 case 97:
2361 case 81:
2362 case 161:
2363 case 145:
2364 case 225:
2365 case 209:
2366 block = "CB_FMASK";
2367 break;
2368 case 34:
2369 case 18:
2370 case 98:
2371 case 82:
2372 case 162:
2373 case 146:
2374 case 226:
2375 case 210:
2376 block = "CB_CMASK";
2377 break;
2378 case 35:
2379 case 19:
2380 case 99:
2381 case 83:
2382 case 163:
2383 case 147:
2384 case 227:
2385 case 211:
2386 block = "CB_IMMED";
2387 break;
2388 case 36:
2389 case 20:
2390 case 100:
2391 case 84:
2392 case 164:
2393 case 148:
2394 case 228:
2395 case 212:
2396 block = "DB";
2397 break;
2398 case 37:
2399 case 21:
2400 case 101:
2401 case 85:
2402 case 165:
2403 case 149:
2404 case 229:
2405 case 213:
2406 block = "DB_HTILE";
2407 break;
2408 case 38:
2409 case 22:
2410 case 102:
2411 case 86:
2412 case 166:
2413 case 150:
2414 case 230:
2415 case 214:
2416 block = "SX";
2417 break;
2418 case 39:
2419 case 23:
2420 case 103:
2421 case 87:
2422 case 167:
2423 case 151:
2424 case 231:
2425 case 215:
2426 block = "DB_STEN";
2427 break;
2428 case 40:
2429 case 24:
2430 case 104:
2431 case 88:
2432 case 232:
2433 case 216:
2434 case 168:
2435 case 152:
2436 block = "TC_TFETCH";
2437 break;
2438 case 41:
2439 case 25:
2440 case 105:
2441 case 89:
2442 case 233:
2443 case 217:
2444 case 169:
2445 case 153:
2446 block = "TC_VFETCH";
2447 break;
2448 case 42:
2449 case 26:
2450 case 106:
2451 case 90:
2452 case 234:
2453 case 218:
2454 case 170:
2455 case 154:
2456 block = "VC";
2457 break;
2458 case 112:
2459 block = "CP";
2460 break;
2461 case 113:
2462 case 114:
2463 block = "SH";
2464 break;
2465 case 115:
2466 block = "VGT";
2467 break;
2468 case 178:
2469 block = "IH";
2470 break;
2471 case 51:
2472 block = "RLC";
2473 break;
2474 case 55:
2475 block = "DMA";
2476 break;
2477 case 56:
2478 block = "HDP";
2479 break;
2480 default:
2481 block = "unknown";
2482 break;
2483 }
2484
2485 printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
2486 protections, vmid, addr,
2487 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
2488 block, mc_id);
2489}
2490
7a083293
AD
2491/**
2492 * cayman_vm_flush - vm flush using the CP
2493 *
2494 * @rdev: radeon_device pointer
2495 *
2496 * Update the page table base and flush the VM TLB
2497 * using the CP (cayman-si).
2498 */
faffaf62
CK
2499void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
2500 unsigned vm_id, uint64_t pd_addr)
721604a1 2501{
faffaf62
CK
2502 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0));
2503 radeon_ring_write(ring, pd_addr >> 12);
ee60e29f 2504
9b40e5d8
CK
2505 /* flush hdp cache */
2506 radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
2507 radeon_ring_write(ring, 0x1);
2508
2509 /* bits 0-7 are the VM contexts0-7 */
2510 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
faffaf62 2511 radeon_ring_write(ring, 1 << vm_id);
58f8cf56 2512
cbfc35b9
AD
2513 /* wait for the invalidate to complete */
2514 radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2515 radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
2516 WAIT_REG_MEM_ENGINE(0))); /* me */
2517 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
2518 radeon_ring_write(ring, 0);
2519 radeon_ring_write(ring, 0); /* ref */
2520 radeon_ring_write(ring, 0); /* mask */
2521 radeon_ring_write(ring, 0x20); /* poll interval */
2522
58f8cf56
CK
2523 /* sync PFP to ME, otherwise we might get invalid PFP reads */
2524 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2525 radeon_ring_write(ring, 0x0);
721604a1 2526}
This page took 0.643314 seconds and 5 git commands to generate.