drm/radeon/dpm: add debugfs support for cayman
[deliverable/linux.git] / drivers / gpu / drm / radeon / ni_dpm.c
1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "drmP.h"
25 #include "radeon.h"
26 #include "nid.h"
27 #include "r600_dpm.h"
28 #include "ni_dpm.h"
29 #include "atom.h"
30 #include <linux/math64.h>
31
32 #define MC_CG_ARB_FREQ_F0 0x0a
33 #define MC_CG_ARB_FREQ_F1 0x0b
34 #define MC_CG_ARB_FREQ_F2 0x0c
35 #define MC_CG_ARB_FREQ_F3 0x0d
36
37 #define SMC_RAM_END 0xC000
38
39 static const struct ni_cac_weights cac_weights_cayman_xt =
40 {
41 0x15,
42 0x2,
43 0x19,
44 0x2,
45 0x8,
46 0x14,
47 0x2,
48 0x16,
49 0xE,
50 0x17,
51 0x13,
52 0x2B,
53 0x10,
54 0x7,
55 0x5,
56 0x5,
57 0x5,
58 0x2,
59 0x3,
60 0x9,
61 0x10,
62 0x10,
63 0x2B,
64 0xA,
65 0x9,
66 0x4,
67 0xD,
68 0xD,
69 0x3E,
70 0x18,
71 0x14,
72 0,
73 0x3,
74 0x3,
75 0x5,
76 0,
77 0x2,
78 0,
79 0,
80 0,
81 0,
82 0,
83 0,
84 0,
85 0,
86 0,
87 0x1CC,
88 0,
89 0x164,
90 1,
91 1,
92 1,
93 1,
94 12,
95 12,
96 12,
97 0x12,
98 0x1F,
99 132,
100 5,
101 7,
102 0,
103 { 0, 0, 0, 0, 0, 0, 0, 0 },
104 { 0, 0, 0, 0 },
105 true
106 };
107
108 static const struct ni_cac_weights cac_weights_cayman_pro =
109 {
110 0x16,
111 0x4,
112 0x10,
113 0x2,
114 0xA,
115 0x16,
116 0x2,
117 0x18,
118 0x10,
119 0x1A,
120 0x16,
121 0x2D,
122 0x12,
123 0xA,
124 0x6,
125 0x6,
126 0x6,
127 0x2,
128 0x4,
129 0xB,
130 0x11,
131 0x11,
132 0x2D,
133 0xC,
134 0xC,
135 0x7,
136 0x10,
137 0x10,
138 0x3F,
139 0x1A,
140 0x16,
141 0,
142 0x7,
143 0x4,
144 0x6,
145 1,
146 0x2,
147 0x1,
148 0,
149 0,
150 0,
151 0,
152 0,
153 0,
154 0x30,
155 0,
156 0x1CF,
157 0,
158 0x166,
159 1,
160 1,
161 1,
162 1,
163 12,
164 12,
165 12,
166 0x15,
167 0x1F,
168 132,
169 6,
170 6,
171 0,
172 { 0, 0, 0, 0, 0, 0, 0, 0 },
173 { 0, 0, 0, 0 },
174 true
175 };
176
177 static const struct ni_cac_weights cac_weights_cayman_le =
178 {
179 0x7,
180 0xE,
181 0x1,
182 0xA,
183 0x1,
184 0x3F,
185 0x2,
186 0x18,
187 0x10,
188 0x1A,
189 0x1,
190 0x3F,
191 0x1,
192 0xE,
193 0x6,
194 0x6,
195 0x6,
196 0x2,
197 0x4,
198 0x9,
199 0x1A,
200 0x1A,
201 0x2C,
202 0xA,
203 0x11,
204 0x8,
205 0x19,
206 0x19,
207 0x1,
208 0x1,
209 0x1A,
210 0,
211 0x8,
212 0x5,
213 0x8,
214 0x1,
215 0x3,
216 0x1,
217 0,
218 0,
219 0,
220 0,
221 0,
222 0,
223 0x38,
224 0x38,
225 0x239,
226 0x3,
227 0x18A,
228 1,
229 1,
230 1,
231 1,
232 12,
233 12,
234 12,
235 0x15,
236 0x22,
237 132,
238 6,
239 6,
240 0,
241 { 0, 0, 0, 0, 0, 0, 0, 0 },
242 { 0, 0, 0, 0 },
243 true
244 };
245
246 #define NISLANDS_MGCG_SEQUENCE 300
247
248 static const u32 cayman_cgcg_cgls_default[] =
249 {
250 0x000008f8, 0x00000010, 0xffffffff,
251 0x000008fc, 0x00000000, 0xffffffff,
252 0x000008f8, 0x00000011, 0xffffffff,
253 0x000008fc, 0x00000000, 0xffffffff,
254 0x000008f8, 0x00000012, 0xffffffff,
255 0x000008fc, 0x00000000, 0xffffffff,
256 0x000008f8, 0x00000013, 0xffffffff,
257 0x000008fc, 0x00000000, 0xffffffff,
258 0x000008f8, 0x00000014, 0xffffffff,
259 0x000008fc, 0x00000000, 0xffffffff,
260 0x000008f8, 0x00000015, 0xffffffff,
261 0x000008fc, 0x00000000, 0xffffffff,
262 0x000008f8, 0x00000016, 0xffffffff,
263 0x000008fc, 0x00000000, 0xffffffff,
264 0x000008f8, 0x00000017, 0xffffffff,
265 0x000008fc, 0x00000000, 0xffffffff,
266 0x000008f8, 0x00000018, 0xffffffff,
267 0x000008fc, 0x00000000, 0xffffffff,
268 0x000008f8, 0x00000019, 0xffffffff,
269 0x000008fc, 0x00000000, 0xffffffff,
270 0x000008f8, 0x0000001a, 0xffffffff,
271 0x000008fc, 0x00000000, 0xffffffff,
272 0x000008f8, 0x0000001b, 0xffffffff,
273 0x000008fc, 0x00000000, 0xffffffff,
274 0x000008f8, 0x00000020, 0xffffffff,
275 0x000008fc, 0x00000000, 0xffffffff,
276 0x000008f8, 0x00000021, 0xffffffff,
277 0x000008fc, 0x00000000, 0xffffffff,
278 0x000008f8, 0x00000022, 0xffffffff,
279 0x000008fc, 0x00000000, 0xffffffff,
280 0x000008f8, 0x00000023, 0xffffffff,
281 0x000008fc, 0x00000000, 0xffffffff,
282 0x000008f8, 0x00000024, 0xffffffff,
283 0x000008fc, 0x00000000, 0xffffffff,
284 0x000008f8, 0x00000025, 0xffffffff,
285 0x000008fc, 0x00000000, 0xffffffff,
286 0x000008f8, 0x00000026, 0xffffffff,
287 0x000008fc, 0x00000000, 0xffffffff,
288 0x000008f8, 0x00000027, 0xffffffff,
289 0x000008fc, 0x00000000, 0xffffffff,
290 0x000008f8, 0x00000028, 0xffffffff,
291 0x000008fc, 0x00000000, 0xffffffff,
292 0x000008f8, 0x00000029, 0xffffffff,
293 0x000008fc, 0x00000000, 0xffffffff,
294 0x000008f8, 0x0000002a, 0xffffffff,
295 0x000008fc, 0x00000000, 0xffffffff,
296 0x000008f8, 0x0000002b, 0xffffffff,
297 0x000008fc, 0x00000000, 0xffffffff
298 };
299 #define CAYMAN_CGCG_CGLS_DEFAULT_LENGTH sizeof(cayman_cgcg_cgls_default) / (3 * sizeof(u32))
300
301 static const u32 cayman_cgcg_cgls_disable[] =
302 {
303 0x000008f8, 0x00000010, 0xffffffff,
304 0x000008fc, 0xffffffff, 0xffffffff,
305 0x000008f8, 0x00000011, 0xffffffff,
306 0x000008fc, 0xffffffff, 0xffffffff,
307 0x000008f8, 0x00000012, 0xffffffff,
308 0x000008fc, 0xffffffff, 0xffffffff,
309 0x000008f8, 0x00000013, 0xffffffff,
310 0x000008fc, 0xffffffff, 0xffffffff,
311 0x000008f8, 0x00000014, 0xffffffff,
312 0x000008fc, 0xffffffff, 0xffffffff,
313 0x000008f8, 0x00000015, 0xffffffff,
314 0x000008fc, 0xffffffff, 0xffffffff,
315 0x000008f8, 0x00000016, 0xffffffff,
316 0x000008fc, 0xffffffff, 0xffffffff,
317 0x000008f8, 0x00000017, 0xffffffff,
318 0x000008fc, 0xffffffff, 0xffffffff,
319 0x000008f8, 0x00000018, 0xffffffff,
320 0x000008fc, 0xffffffff, 0xffffffff,
321 0x000008f8, 0x00000019, 0xffffffff,
322 0x000008fc, 0xffffffff, 0xffffffff,
323 0x000008f8, 0x0000001a, 0xffffffff,
324 0x000008fc, 0xffffffff, 0xffffffff,
325 0x000008f8, 0x0000001b, 0xffffffff,
326 0x000008fc, 0xffffffff, 0xffffffff,
327 0x000008f8, 0x00000020, 0xffffffff,
328 0x000008fc, 0x00000000, 0xffffffff,
329 0x000008f8, 0x00000021, 0xffffffff,
330 0x000008fc, 0x00000000, 0xffffffff,
331 0x000008f8, 0x00000022, 0xffffffff,
332 0x000008fc, 0x00000000, 0xffffffff,
333 0x000008f8, 0x00000023, 0xffffffff,
334 0x000008fc, 0x00000000, 0xffffffff,
335 0x000008f8, 0x00000024, 0xffffffff,
336 0x000008fc, 0x00000000, 0xffffffff,
337 0x000008f8, 0x00000025, 0xffffffff,
338 0x000008fc, 0x00000000, 0xffffffff,
339 0x000008f8, 0x00000026, 0xffffffff,
340 0x000008fc, 0x00000000, 0xffffffff,
341 0x000008f8, 0x00000027, 0xffffffff,
342 0x000008fc, 0x00000000, 0xffffffff,
343 0x000008f8, 0x00000028, 0xffffffff,
344 0x000008fc, 0x00000000, 0xffffffff,
345 0x000008f8, 0x00000029, 0xffffffff,
346 0x000008fc, 0x00000000, 0xffffffff,
347 0x000008f8, 0x0000002a, 0xffffffff,
348 0x000008fc, 0x00000000, 0xffffffff,
349 0x000008f8, 0x0000002b, 0xffffffff,
350 0x000008fc, 0x00000000, 0xffffffff,
351 0x00000644, 0x000f7902, 0x001f4180,
352 0x00000644, 0x000f3802, 0x001f4180
353 };
354 #define CAYMAN_CGCG_CGLS_DISABLE_LENGTH sizeof(cayman_cgcg_cgls_disable) / (3 * sizeof(u32))
355
356 static const u32 cayman_cgcg_cgls_enable[] =
357 {
358 0x00000644, 0x000f7882, 0x001f4080,
359 0x000008f8, 0x00000010, 0xffffffff,
360 0x000008fc, 0x00000000, 0xffffffff,
361 0x000008f8, 0x00000011, 0xffffffff,
362 0x000008fc, 0x00000000, 0xffffffff,
363 0x000008f8, 0x00000012, 0xffffffff,
364 0x000008fc, 0x00000000, 0xffffffff,
365 0x000008f8, 0x00000013, 0xffffffff,
366 0x000008fc, 0x00000000, 0xffffffff,
367 0x000008f8, 0x00000014, 0xffffffff,
368 0x000008fc, 0x00000000, 0xffffffff,
369 0x000008f8, 0x00000015, 0xffffffff,
370 0x000008fc, 0x00000000, 0xffffffff,
371 0x000008f8, 0x00000016, 0xffffffff,
372 0x000008fc, 0x00000000, 0xffffffff,
373 0x000008f8, 0x00000017, 0xffffffff,
374 0x000008fc, 0x00000000, 0xffffffff,
375 0x000008f8, 0x00000018, 0xffffffff,
376 0x000008fc, 0x00000000, 0xffffffff,
377 0x000008f8, 0x00000019, 0xffffffff,
378 0x000008fc, 0x00000000, 0xffffffff,
379 0x000008f8, 0x0000001a, 0xffffffff,
380 0x000008fc, 0x00000000, 0xffffffff,
381 0x000008f8, 0x0000001b, 0xffffffff,
382 0x000008fc, 0x00000000, 0xffffffff,
383 0x000008f8, 0x00000020, 0xffffffff,
384 0x000008fc, 0xffffffff, 0xffffffff,
385 0x000008f8, 0x00000021, 0xffffffff,
386 0x000008fc, 0xffffffff, 0xffffffff,
387 0x000008f8, 0x00000022, 0xffffffff,
388 0x000008fc, 0xffffffff, 0xffffffff,
389 0x000008f8, 0x00000023, 0xffffffff,
390 0x000008fc, 0xffffffff, 0xffffffff,
391 0x000008f8, 0x00000024, 0xffffffff,
392 0x000008fc, 0xffffffff, 0xffffffff,
393 0x000008f8, 0x00000025, 0xffffffff,
394 0x000008fc, 0xffffffff, 0xffffffff,
395 0x000008f8, 0x00000026, 0xffffffff,
396 0x000008fc, 0xffffffff, 0xffffffff,
397 0x000008f8, 0x00000027, 0xffffffff,
398 0x000008fc, 0xffffffff, 0xffffffff,
399 0x000008f8, 0x00000028, 0xffffffff,
400 0x000008fc, 0xffffffff, 0xffffffff,
401 0x000008f8, 0x00000029, 0xffffffff,
402 0x000008fc, 0xffffffff, 0xffffffff,
403 0x000008f8, 0x0000002a, 0xffffffff,
404 0x000008fc, 0xffffffff, 0xffffffff,
405 0x000008f8, 0x0000002b, 0xffffffff,
406 0x000008fc, 0xffffffff, 0xffffffff
407 };
408 #define CAYMAN_CGCG_CGLS_ENABLE_LENGTH sizeof(cayman_cgcg_cgls_enable) / (3 * sizeof(u32))
409
410 static const u32 cayman_mgcg_default[] =
411 {
412 0x0000802c, 0xc0000000, 0xffffffff,
413 0x00003fc4, 0xc0000000, 0xffffffff,
414 0x00005448, 0x00000100, 0xffffffff,
415 0x000055e4, 0x00000100, 0xffffffff,
416 0x0000160c, 0x00000100, 0xffffffff,
417 0x00008984, 0x06000100, 0xffffffff,
418 0x0000c164, 0x00000100, 0xffffffff,
419 0x00008a18, 0x00000100, 0xffffffff,
420 0x0000897c, 0x06000100, 0xffffffff,
421 0x00008b28, 0x00000100, 0xffffffff,
422 0x00009144, 0x00800200, 0xffffffff,
423 0x00009a60, 0x00000100, 0xffffffff,
424 0x00009868, 0x00000100, 0xffffffff,
425 0x00008d58, 0x00000100, 0xffffffff,
426 0x00009510, 0x00000100, 0xffffffff,
427 0x0000949c, 0x00000100, 0xffffffff,
428 0x00009654, 0x00000100, 0xffffffff,
429 0x00009030, 0x00000100, 0xffffffff,
430 0x00009034, 0x00000100, 0xffffffff,
431 0x00009038, 0x00000100, 0xffffffff,
432 0x0000903c, 0x00000100, 0xffffffff,
433 0x00009040, 0x00000100, 0xffffffff,
434 0x0000a200, 0x00000100, 0xffffffff,
435 0x0000a204, 0x00000100, 0xffffffff,
436 0x0000a208, 0x00000100, 0xffffffff,
437 0x0000a20c, 0x00000100, 0xffffffff,
438 0x00009744, 0x00000100, 0xffffffff,
439 0x00003f80, 0x00000100, 0xffffffff,
440 0x0000a210, 0x00000100, 0xffffffff,
441 0x0000a214, 0x00000100, 0xffffffff,
442 0x000004d8, 0x00000100, 0xffffffff,
443 0x00009664, 0x00000100, 0xffffffff,
444 0x00009698, 0x00000100, 0xffffffff,
445 0x000004d4, 0x00000200, 0xffffffff,
446 0x000004d0, 0x00000000, 0xffffffff,
447 0x000030cc, 0x00000104, 0xffffffff,
448 0x0000d0c0, 0x00000100, 0xffffffff,
449 0x0000d8c0, 0x00000100, 0xffffffff,
450 0x0000802c, 0x40000000, 0xffffffff,
451 0x00003fc4, 0x40000000, 0xffffffff,
452 0x0000915c, 0x00010000, 0xffffffff,
453 0x00009160, 0x00030002, 0xffffffff,
454 0x00009164, 0x00050004, 0xffffffff,
455 0x00009168, 0x00070006, 0xffffffff,
456 0x00009178, 0x00070000, 0xffffffff,
457 0x0000917c, 0x00030002, 0xffffffff,
458 0x00009180, 0x00050004, 0xffffffff,
459 0x0000918c, 0x00010006, 0xffffffff,
460 0x00009190, 0x00090008, 0xffffffff,
461 0x00009194, 0x00070000, 0xffffffff,
462 0x00009198, 0x00030002, 0xffffffff,
463 0x0000919c, 0x00050004, 0xffffffff,
464 0x000091a8, 0x00010006, 0xffffffff,
465 0x000091ac, 0x00090008, 0xffffffff,
466 0x000091b0, 0x00070000, 0xffffffff,
467 0x000091b4, 0x00030002, 0xffffffff,
468 0x000091b8, 0x00050004, 0xffffffff,
469 0x000091c4, 0x00010006, 0xffffffff,
470 0x000091c8, 0x00090008, 0xffffffff,
471 0x000091cc, 0x00070000, 0xffffffff,
472 0x000091d0, 0x00030002, 0xffffffff,
473 0x000091d4, 0x00050004, 0xffffffff,
474 0x000091e0, 0x00010006, 0xffffffff,
475 0x000091e4, 0x00090008, 0xffffffff,
476 0x000091e8, 0x00000000, 0xffffffff,
477 0x000091ec, 0x00070000, 0xffffffff,
478 0x000091f0, 0x00030002, 0xffffffff,
479 0x000091f4, 0x00050004, 0xffffffff,
480 0x00009200, 0x00010006, 0xffffffff,
481 0x00009204, 0x00090008, 0xffffffff,
482 0x00009208, 0x00070000, 0xffffffff,
483 0x0000920c, 0x00030002, 0xffffffff,
484 0x00009210, 0x00050004, 0xffffffff,
485 0x0000921c, 0x00010006, 0xffffffff,
486 0x00009220, 0x00090008, 0xffffffff,
487 0x00009224, 0x00070000, 0xffffffff,
488 0x00009228, 0x00030002, 0xffffffff,
489 0x0000922c, 0x00050004, 0xffffffff,
490 0x00009238, 0x00010006, 0xffffffff,
491 0x0000923c, 0x00090008, 0xffffffff,
492 0x00009240, 0x00070000, 0xffffffff,
493 0x00009244, 0x00030002, 0xffffffff,
494 0x00009248, 0x00050004, 0xffffffff,
495 0x00009254, 0x00010006, 0xffffffff,
496 0x00009258, 0x00090008, 0xffffffff,
497 0x0000925c, 0x00070000, 0xffffffff,
498 0x00009260, 0x00030002, 0xffffffff,
499 0x00009264, 0x00050004, 0xffffffff,
500 0x00009270, 0x00010006, 0xffffffff,
501 0x00009274, 0x00090008, 0xffffffff,
502 0x00009278, 0x00070000, 0xffffffff,
503 0x0000927c, 0x00030002, 0xffffffff,
504 0x00009280, 0x00050004, 0xffffffff,
505 0x0000928c, 0x00010006, 0xffffffff,
506 0x00009290, 0x00090008, 0xffffffff,
507 0x000092a8, 0x00070000, 0xffffffff,
508 0x000092ac, 0x00030002, 0xffffffff,
509 0x000092b0, 0x00050004, 0xffffffff,
510 0x000092bc, 0x00010006, 0xffffffff,
511 0x000092c0, 0x00090008, 0xffffffff,
512 0x000092c4, 0x00070000, 0xffffffff,
513 0x000092c8, 0x00030002, 0xffffffff,
514 0x000092cc, 0x00050004, 0xffffffff,
515 0x000092d8, 0x00010006, 0xffffffff,
516 0x000092dc, 0x00090008, 0xffffffff,
517 0x00009294, 0x00000000, 0xffffffff,
518 0x0000802c, 0x40010000, 0xffffffff,
519 0x00003fc4, 0x40010000, 0xffffffff,
520 0x0000915c, 0x00010000, 0xffffffff,
521 0x00009160, 0x00030002, 0xffffffff,
522 0x00009164, 0x00050004, 0xffffffff,
523 0x00009168, 0x00070006, 0xffffffff,
524 0x00009178, 0x00070000, 0xffffffff,
525 0x0000917c, 0x00030002, 0xffffffff,
526 0x00009180, 0x00050004, 0xffffffff,
527 0x0000918c, 0x00010006, 0xffffffff,
528 0x00009190, 0x00090008, 0xffffffff,
529 0x00009194, 0x00070000, 0xffffffff,
530 0x00009198, 0x00030002, 0xffffffff,
531 0x0000919c, 0x00050004, 0xffffffff,
532 0x000091a8, 0x00010006, 0xffffffff,
533 0x000091ac, 0x00090008, 0xffffffff,
534 0x000091b0, 0x00070000, 0xffffffff,
535 0x000091b4, 0x00030002, 0xffffffff,
536 0x000091b8, 0x00050004, 0xffffffff,
537 0x000091c4, 0x00010006, 0xffffffff,
538 0x000091c8, 0x00090008, 0xffffffff,
539 0x000091cc, 0x00070000, 0xffffffff,
540 0x000091d0, 0x00030002, 0xffffffff,
541 0x000091d4, 0x00050004, 0xffffffff,
542 0x000091e0, 0x00010006, 0xffffffff,
543 0x000091e4, 0x00090008, 0xffffffff,
544 0x000091e8, 0x00000000, 0xffffffff,
545 0x000091ec, 0x00070000, 0xffffffff,
546 0x000091f0, 0x00030002, 0xffffffff,
547 0x000091f4, 0x00050004, 0xffffffff,
548 0x00009200, 0x00010006, 0xffffffff,
549 0x00009204, 0x00090008, 0xffffffff,
550 0x00009208, 0x00070000, 0xffffffff,
551 0x0000920c, 0x00030002, 0xffffffff,
552 0x00009210, 0x00050004, 0xffffffff,
553 0x0000921c, 0x00010006, 0xffffffff,
554 0x00009220, 0x00090008, 0xffffffff,
555 0x00009224, 0x00070000, 0xffffffff,
556 0x00009228, 0x00030002, 0xffffffff,
557 0x0000922c, 0x00050004, 0xffffffff,
558 0x00009238, 0x00010006, 0xffffffff,
559 0x0000923c, 0x00090008, 0xffffffff,
560 0x00009240, 0x00070000, 0xffffffff,
561 0x00009244, 0x00030002, 0xffffffff,
562 0x00009248, 0x00050004, 0xffffffff,
563 0x00009254, 0x00010006, 0xffffffff,
564 0x00009258, 0x00090008, 0xffffffff,
565 0x0000925c, 0x00070000, 0xffffffff,
566 0x00009260, 0x00030002, 0xffffffff,
567 0x00009264, 0x00050004, 0xffffffff,
568 0x00009270, 0x00010006, 0xffffffff,
569 0x00009274, 0x00090008, 0xffffffff,
570 0x00009278, 0x00070000, 0xffffffff,
571 0x0000927c, 0x00030002, 0xffffffff,
572 0x00009280, 0x00050004, 0xffffffff,
573 0x0000928c, 0x00010006, 0xffffffff,
574 0x00009290, 0x00090008, 0xffffffff,
575 0x000092a8, 0x00070000, 0xffffffff,
576 0x000092ac, 0x00030002, 0xffffffff,
577 0x000092b0, 0x00050004, 0xffffffff,
578 0x000092bc, 0x00010006, 0xffffffff,
579 0x000092c0, 0x00090008, 0xffffffff,
580 0x000092c4, 0x00070000, 0xffffffff,
581 0x000092c8, 0x00030002, 0xffffffff,
582 0x000092cc, 0x00050004, 0xffffffff,
583 0x000092d8, 0x00010006, 0xffffffff,
584 0x000092dc, 0x00090008, 0xffffffff,
585 0x00009294, 0x00000000, 0xffffffff,
586 0x0000802c, 0xc0000000, 0xffffffff,
587 0x00003fc4, 0xc0000000, 0xffffffff,
588 0x000008f8, 0x00000010, 0xffffffff,
589 0x000008fc, 0x00000000, 0xffffffff,
590 0x000008f8, 0x00000011, 0xffffffff,
591 0x000008fc, 0x00000000, 0xffffffff,
592 0x000008f8, 0x00000012, 0xffffffff,
593 0x000008fc, 0x00000000, 0xffffffff,
594 0x000008f8, 0x00000013, 0xffffffff,
595 0x000008fc, 0x00000000, 0xffffffff,
596 0x000008f8, 0x00000014, 0xffffffff,
597 0x000008fc, 0x00000000, 0xffffffff,
598 0x000008f8, 0x00000015, 0xffffffff,
599 0x000008fc, 0x00000000, 0xffffffff,
600 0x000008f8, 0x00000016, 0xffffffff,
601 0x000008fc, 0x00000000, 0xffffffff,
602 0x000008f8, 0x00000017, 0xffffffff,
603 0x000008fc, 0x00000000, 0xffffffff,
604 0x000008f8, 0x00000018, 0xffffffff,
605 0x000008fc, 0x00000000, 0xffffffff,
606 0x000008f8, 0x00000019, 0xffffffff,
607 0x000008fc, 0x00000000, 0xffffffff,
608 0x000008f8, 0x0000001a, 0xffffffff,
609 0x000008fc, 0x00000000, 0xffffffff,
610 0x000008f8, 0x0000001b, 0xffffffff,
611 0x000008fc, 0x00000000, 0xffffffff
612 };
613 #define CAYMAN_MGCG_DEFAULT_LENGTH sizeof(cayman_mgcg_default) / (3 * sizeof(u32))
614
615 static const u32 cayman_mgcg_disable[] =
616 {
617 0x0000802c, 0xc0000000, 0xffffffff,
618 0x000008f8, 0x00000000, 0xffffffff,
619 0x000008fc, 0xffffffff, 0xffffffff,
620 0x000008f8, 0x00000001, 0xffffffff,
621 0x000008fc, 0xffffffff, 0xffffffff,
622 0x000008f8, 0x00000002, 0xffffffff,
623 0x000008fc, 0xffffffff, 0xffffffff,
624 0x000008f8, 0x00000003, 0xffffffff,
625 0x000008fc, 0xffffffff, 0xffffffff,
626 0x00009150, 0x00600000, 0xffffffff
627 };
628 #define CAYMAN_MGCG_DISABLE_LENGTH sizeof(cayman_mgcg_disable) / (3 * sizeof(u32))
629
630 static const u32 cayman_mgcg_enable[] =
631 {
632 0x0000802c, 0xc0000000, 0xffffffff,
633 0x000008f8, 0x00000000, 0xffffffff,
634 0x000008fc, 0x00000000, 0xffffffff,
635 0x000008f8, 0x00000001, 0xffffffff,
636 0x000008fc, 0x00000000, 0xffffffff,
637 0x000008f8, 0x00000002, 0xffffffff,
638 0x000008fc, 0x00600000, 0xffffffff,
639 0x000008f8, 0x00000003, 0xffffffff,
640 0x000008fc, 0x00000000, 0xffffffff,
641 0x00009150, 0x96944200, 0xffffffff
642 };
643
644 #define CAYMAN_MGCG_ENABLE_LENGTH sizeof(cayman_mgcg_enable) / (3 * sizeof(u32))
645
646 #define NISLANDS_SYSLS_SEQUENCE 100
647
648 static const u32 cayman_sysls_default[] =
649 {
650 /* Register, Value, Mask bits */
651 0x000055e8, 0x00000000, 0xffffffff,
652 0x0000d0bc, 0x00000000, 0xffffffff,
653 0x0000d8bc, 0x00000000, 0xffffffff,
654 0x000015c0, 0x000c1401, 0xffffffff,
655 0x0000264c, 0x000c0400, 0xffffffff,
656 0x00002648, 0x000c0400, 0xffffffff,
657 0x00002650, 0x000c0400, 0xffffffff,
658 0x000020b8, 0x000c0400, 0xffffffff,
659 0x000020bc, 0x000c0400, 0xffffffff,
660 0x000020c0, 0x000c0c80, 0xffffffff,
661 0x0000f4a0, 0x000000c0, 0xffffffff,
662 0x0000f4a4, 0x00680fff, 0xffffffff,
663 0x00002f50, 0x00000404, 0xffffffff,
664 0x000004c8, 0x00000001, 0xffffffff,
665 0x000064ec, 0x00000000, 0xffffffff,
666 0x00000c7c, 0x00000000, 0xffffffff,
667 0x00008dfc, 0x00000000, 0xffffffff
668 };
669 #define CAYMAN_SYSLS_DEFAULT_LENGTH sizeof(cayman_sysls_default) / (3 * sizeof(u32))
670
671 static const u32 cayman_sysls_disable[] =
672 {
673 /* Register, Value, Mask bits */
674 0x0000d0c0, 0x00000000, 0xffffffff,
675 0x0000d8c0, 0x00000000, 0xffffffff,
676 0x000055e8, 0x00000000, 0xffffffff,
677 0x0000d0bc, 0x00000000, 0xffffffff,
678 0x0000d8bc, 0x00000000, 0xffffffff,
679 0x000015c0, 0x00041401, 0xffffffff,
680 0x0000264c, 0x00040400, 0xffffffff,
681 0x00002648, 0x00040400, 0xffffffff,
682 0x00002650, 0x00040400, 0xffffffff,
683 0x000020b8, 0x00040400, 0xffffffff,
684 0x000020bc, 0x00040400, 0xffffffff,
685 0x000020c0, 0x00040c80, 0xffffffff,
686 0x0000f4a0, 0x000000c0, 0xffffffff,
687 0x0000f4a4, 0x00680000, 0xffffffff,
688 0x00002f50, 0x00000404, 0xffffffff,
689 0x000004c8, 0x00000001, 0xffffffff,
690 0x000064ec, 0x00007ffd, 0xffffffff,
691 0x00000c7c, 0x0000ff00, 0xffffffff,
692 0x00008dfc, 0x0000007f, 0xffffffff
693 };
694 #define CAYMAN_SYSLS_DISABLE_LENGTH sizeof(cayman_sysls_disable) / (3 * sizeof(u32))
695
696 static const u32 cayman_sysls_enable[] =
697 {
698 /* Register, Value, Mask bits */
699 0x000055e8, 0x00000001, 0xffffffff,
700 0x0000d0bc, 0x00000100, 0xffffffff,
701 0x0000d8bc, 0x00000100, 0xffffffff,
702 0x000015c0, 0x000c1401, 0xffffffff,
703 0x0000264c, 0x000c0400, 0xffffffff,
704 0x00002648, 0x000c0400, 0xffffffff,
705 0x00002650, 0x000c0400, 0xffffffff,
706 0x000020b8, 0x000c0400, 0xffffffff,
707 0x000020bc, 0x000c0400, 0xffffffff,
708 0x000020c0, 0x000c0c80, 0xffffffff,
709 0x0000f4a0, 0x000000c0, 0xffffffff,
710 0x0000f4a4, 0x00680fff, 0xffffffff,
711 0x00002f50, 0x00000903, 0xffffffff,
712 0x000004c8, 0x00000000, 0xffffffff,
713 0x000064ec, 0x00000000, 0xffffffff,
714 0x00000c7c, 0x00000000, 0xffffffff,
715 0x00008dfc, 0x00000000, 0xffffffff
716 };
717 #define CAYMAN_SYSLS_ENABLE_LENGTH sizeof(cayman_sysls_enable) / (3 * sizeof(u32))
718
719 struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev);
720 struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
721
722 struct ni_power_info *ni_get_pi(struct radeon_device *rdev)
723 {
724 struct ni_power_info *pi = rdev->pm.dpm.priv;
725
726 return pi;
727 }
728
729 struct ni_ps *ni_get_ps(struct radeon_ps *rps)
730 {
731 struct ni_ps *ps = rps->ps_priv;
732
733 return ps;
734 }
735
736 static void ni_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
737 u16 v, s32 t,
738 u32 ileakage,
739 u32 *leakage)
740 {
741 s64 kt, kv, leakage_w, i_leakage, vddc, temperature;
742
743 i_leakage = div64_s64(drm_int2fixp(ileakage), 1000);
744 vddc = div64_s64(drm_int2fixp(v), 1000);
745 temperature = div64_s64(drm_int2fixp(t), 1000);
746
747 kt = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->at), 1000),
748 drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bt), 1000), temperature)));
749 kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 1000),
750 drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 1000), vddc)));
751
752 leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
753
754 *leakage = drm_fixp2int(leakage_w * 1000);
755 }
756
757 static void ni_calculate_leakage_for_v_and_t(struct radeon_device *rdev,
758 const struct ni_leakage_coeffients *coeff,
759 u16 v,
760 s32 t,
761 u32 i_leakage,
762 u32 *leakage)
763 {
764 ni_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
765 }
766
767 static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
768 struct radeon_ps *rps)
769 {
770 struct ni_ps *ps = ni_get_ps(rps);
771 struct radeon_clock_and_voltage_limits *max_limits;
772 bool disable_mclk_switching;
773 u32 mclk, sclk;
774 u16 vddc, vddci;
775 int i;
776
777 if (rdev->pm.dpm.new_active_crtc_count > 1)
778 disable_mclk_switching = true;
779 else
780 disable_mclk_switching = false;
781
782 if (rdev->pm.dpm.ac_power)
783 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
784 else
785 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
786
787 if (rdev->pm.dpm.ac_power == false) {
788 for (i = 0; i < ps->performance_level_count; i++) {
789 if (ps->performance_levels[i].mclk > max_limits->mclk)
790 ps->performance_levels[i].mclk = max_limits->mclk;
791 if (ps->performance_levels[i].sclk > max_limits->sclk)
792 ps->performance_levels[i].sclk = max_limits->sclk;
793 if (ps->performance_levels[i].vddc > max_limits->vddc)
794 ps->performance_levels[i].vddc = max_limits->vddc;
795 if (ps->performance_levels[i].vddci > max_limits->vddci)
796 ps->performance_levels[i].vddci = max_limits->vddci;
797 }
798 }
799
800 /* XXX validate the min clocks required for display */
801
802 if (disable_mclk_switching) {
803 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
804 sclk = ps->performance_levels[0].sclk;
805 vddc = ps->performance_levels[0].vddc;
806 vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
807 } else {
808 sclk = ps->performance_levels[0].sclk;
809 mclk = ps->performance_levels[0].mclk;
810 vddc = ps->performance_levels[0].vddc;
811 vddci = ps->performance_levels[0].vddci;
812 }
813
814 /* adjusted low state */
815 ps->performance_levels[0].sclk = sclk;
816 ps->performance_levels[0].mclk = mclk;
817 ps->performance_levels[0].vddc = vddc;
818 ps->performance_levels[0].vddci = vddci;
819
820 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
821 &ps->performance_levels[0].sclk,
822 &ps->performance_levels[0].mclk);
823
824 for (i = 1; i < ps->performance_level_count; i++) {
825 if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
826 ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
827 if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
828 ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
829 }
830
831 if (disable_mclk_switching) {
832 mclk = ps->performance_levels[0].mclk;
833 for (i = 1; i < ps->performance_level_count; i++) {
834 if (mclk < ps->performance_levels[i].mclk)
835 mclk = ps->performance_levels[i].mclk;
836 }
837 for (i = 0; i < ps->performance_level_count; i++) {
838 ps->performance_levels[i].mclk = mclk;
839 ps->performance_levels[i].vddci = vddci;
840 }
841 } else {
842 for (i = 1; i < ps->performance_level_count; i++) {
843 if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
844 ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
845 if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
846 ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
847 }
848 }
849
850 for (i = 1; i < ps->performance_level_count; i++)
851 btc_skip_blacklist_clocks(rdev, max_limits->sclk, max_limits->mclk,
852 &ps->performance_levels[i].sclk,
853 &ps->performance_levels[i].mclk);
854
855 for (i = 0; i < ps->performance_level_count; i++)
856 btc_adjust_clock_combinations(rdev, max_limits,
857 &ps->performance_levels[i]);
858
859 for (i = 0; i < ps->performance_level_count; i++) {
860 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
861 ps->performance_levels[i].sclk,
862 max_limits->vddc, &ps->performance_levels[i].vddc);
863 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
864 ps->performance_levels[i].mclk,
865 max_limits->vddci, &ps->performance_levels[i].vddci);
866 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
867 ps->performance_levels[i].mclk,
868 max_limits->vddc, &ps->performance_levels[i].vddc);
869 btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
870 rdev->clock.current_dispclk,
871 max_limits->vddc, &ps->performance_levels[i].vddc);
872 }
873
874 for (i = 0; i < ps->performance_level_count; i++) {
875 btc_apply_voltage_delta_rules(rdev,
876 max_limits->vddc, max_limits->vddci,
877 &ps->performance_levels[i].vddc,
878 &ps->performance_levels[i].vddci);
879 }
880
881 ps->dc_compatible = true;
882 for (i = 0; i < ps->performance_level_count; i++) {
883 if (ps->performance_levels[i].vddc > rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
884 ps->dc_compatible = false;
885
886 if (ps->performance_levels[i].vddc < rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2)
887 ps->performance_levels[i].flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2;
888 }
889 }
890
891 static void ni_cg_clockgating_default(struct radeon_device *rdev)
892 {
893 u32 count;
894 const u32 *ps = NULL;
895
896 ps = (const u32 *)&cayman_cgcg_cgls_default;
897 count = CAYMAN_CGCG_CGLS_DEFAULT_LENGTH;
898
899 btc_program_mgcg_hw_sequence(rdev, ps, count);
900 }
901
902 static void ni_gfx_clockgating_enable(struct radeon_device *rdev,
903 bool enable)
904 {
905 u32 count;
906 const u32 *ps = NULL;
907
908 if (enable) {
909 ps = (const u32 *)&cayman_cgcg_cgls_enable;
910 count = CAYMAN_CGCG_CGLS_ENABLE_LENGTH;
911 } else {
912 ps = (const u32 *)&cayman_cgcg_cgls_disable;
913 count = CAYMAN_CGCG_CGLS_DISABLE_LENGTH;
914 }
915
916 btc_program_mgcg_hw_sequence(rdev, ps, count);
917 }
918
919 static void ni_mg_clockgating_default(struct radeon_device *rdev)
920 {
921 u32 count;
922 const u32 *ps = NULL;
923
924 ps = (const u32 *)&cayman_mgcg_default;
925 count = CAYMAN_MGCG_DEFAULT_LENGTH;
926
927 btc_program_mgcg_hw_sequence(rdev, ps, count);
928 }
929
930 static void ni_mg_clockgating_enable(struct radeon_device *rdev,
931 bool enable)
932 {
933 u32 count;
934 const u32 *ps = NULL;
935
936 if (enable) {
937 ps = (const u32 *)&cayman_mgcg_enable;
938 count = CAYMAN_MGCG_ENABLE_LENGTH;
939 } else {
940 ps = (const u32 *)&cayman_mgcg_disable;
941 count = CAYMAN_MGCG_DISABLE_LENGTH;
942 }
943
944 btc_program_mgcg_hw_sequence(rdev, ps, count);
945 }
946
947 static void ni_ls_clockgating_default(struct radeon_device *rdev)
948 {
949 u32 count;
950 const u32 *ps = NULL;
951
952 ps = (const u32 *)&cayman_sysls_default;
953 count = CAYMAN_SYSLS_DEFAULT_LENGTH;
954
955 btc_program_mgcg_hw_sequence(rdev, ps, count);
956 }
957
958 static void ni_ls_clockgating_enable(struct radeon_device *rdev,
959 bool enable)
960 {
961 u32 count;
962 const u32 *ps = NULL;
963
964 if (enable) {
965 ps = (const u32 *)&cayman_sysls_enable;
966 count = CAYMAN_SYSLS_ENABLE_LENGTH;
967 } else {
968 ps = (const u32 *)&cayman_sysls_disable;
969 count = CAYMAN_SYSLS_DISABLE_LENGTH;
970 }
971
972 btc_program_mgcg_hw_sequence(rdev, ps, count);
973
974 }
975
976 static int ni_patch_single_dependency_table_based_on_leakage(struct radeon_device *rdev,
977 struct radeon_clock_voltage_dependency_table *table)
978 {
979 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
980 u32 i;
981
982 if (table) {
983 for (i = 0; i < table->count; i++) {
984 if (0xff01 == table->entries[i].v) {
985 if (pi->max_vddc == 0)
986 return -EINVAL;
987 table->entries[i].v = pi->max_vddc;
988 }
989 }
990 }
991 return 0;
992 }
993
994 static int ni_patch_dependency_tables_based_on_leakage(struct radeon_device *rdev)
995 {
996 int ret = 0;
997
998 ret = ni_patch_single_dependency_table_based_on_leakage(rdev,
999 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
1000
1001 ret = ni_patch_single_dependency_table_based_on_leakage(rdev,
1002 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
1003 return ret;
1004 }
1005
1006 static void ni_stop_dpm(struct radeon_device *rdev)
1007 {
1008 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
1009 }
1010
1011 #if 0
1012 static int ni_notify_hw_of_power_source(struct radeon_device *rdev,
1013 bool ac_power)
1014 {
1015 if (ac_power)
1016 return (rv770_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
1017 0 : -EINVAL;
1018
1019 return 0;
1020 }
1021 #endif
1022
1023 static PPSMC_Result ni_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1024 PPSMC_Msg msg, u32 parameter)
1025 {
1026 WREG32(SMC_SCRATCH0, parameter);
1027 return rv770_send_msg_to_smc(rdev, msg);
1028 }
1029
1030 static int ni_restrict_performance_levels_before_switch(struct radeon_device *rdev)
1031 {
1032 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
1033 return -EINVAL;
1034
1035 return (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
1036 0 : -EINVAL;
1037 }
1038
1039 static int ni_unrestrict_performance_levels_after_switch(struct radeon_device *rdev)
1040 {
1041 if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
1042 return -EINVAL;
1043
1044 return (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) == PPSMC_Result_OK) ?
1045 0 : -EINVAL;
1046 }
1047
1048 static void ni_stop_smc(struct radeon_device *rdev)
1049 {
1050 u32 tmp;
1051 int i;
1052
1053 for (i = 0; i < rdev->usec_timeout; i++) {
1054 tmp = RREG32(LB_SYNC_RESET_SEL) & LB_SYNC_RESET_SEL_MASK;
1055 if (tmp != 1)
1056 break;
1057 udelay(1);
1058 }
1059
1060 udelay(100);
1061
1062 r7xx_stop_smc(rdev);
1063 }
1064
1065 static int ni_process_firmware_header(struct radeon_device *rdev)
1066 {
1067 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1068 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1069 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1070 u32 tmp;
1071 int ret;
1072
1073 ret = rv770_read_smc_sram_dword(rdev,
1074 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1075 NISLANDS_SMC_FIRMWARE_HEADER_stateTable,
1076 &tmp, pi->sram_end);
1077
1078 if (ret)
1079 return ret;
1080
1081 pi->state_table_start = (u16)tmp;
1082
1083 ret = rv770_read_smc_sram_dword(rdev,
1084 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1085 NISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
1086 &tmp, pi->sram_end);
1087
1088 if (ret)
1089 return ret;
1090
1091 pi->soft_regs_start = (u16)tmp;
1092
1093 ret = rv770_read_smc_sram_dword(rdev,
1094 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1095 NISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
1096 &tmp, pi->sram_end);
1097
1098 if (ret)
1099 return ret;
1100
1101 eg_pi->mc_reg_table_start = (u16)tmp;
1102
1103 ret = rv770_read_smc_sram_dword(rdev,
1104 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1105 NISLANDS_SMC_FIRMWARE_HEADER_fanTable,
1106 &tmp, pi->sram_end);
1107
1108 if (ret)
1109 return ret;
1110
1111 ni_pi->fan_table_start = (u16)tmp;
1112
1113 ret = rv770_read_smc_sram_dword(rdev,
1114 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1115 NISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
1116 &tmp, pi->sram_end);
1117
1118 if (ret)
1119 return ret;
1120
1121 ni_pi->arb_table_start = (u16)tmp;
1122
1123 ret = rv770_read_smc_sram_dword(rdev,
1124 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1125 NISLANDS_SMC_FIRMWARE_HEADER_cacTable,
1126 &tmp, pi->sram_end);
1127
1128 if (ret)
1129 return ret;
1130
1131 ni_pi->cac_table_start = (u16)tmp;
1132
1133 ret = rv770_read_smc_sram_dword(rdev,
1134 NISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
1135 NISLANDS_SMC_FIRMWARE_HEADER_spllTable,
1136 &tmp, pi->sram_end);
1137
1138 if (ret)
1139 return ret;
1140
1141 ni_pi->spll_table_start = (u16)tmp;
1142
1143
1144 return ret;
1145 }
1146
1147 static void ni_read_clock_registers(struct radeon_device *rdev)
1148 {
1149 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1150
1151 ni_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
1152 ni_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
1153 ni_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
1154 ni_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
1155 ni_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
1156 ni_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
1157 ni_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1158 ni_pi->clock_registers.mpll_ad_func_cntl_2 = RREG32(MPLL_AD_FUNC_CNTL_2);
1159 ni_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1160 ni_pi->clock_registers.mpll_dq_func_cntl_2 = RREG32(MPLL_DQ_FUNC_CNTL_2);
1161 ni_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1162 ni_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1163 ni_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1164 ni_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1165 }
1166
1167 #if 0
1168 static int ni_enter_ulp_state(struct radeon_device *rdev)
1169 {
1170 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1171
1172 if (pi->gfx_clock_gating) {
1173 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
1174 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
1175 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
1176 RREG32(GB_ADDR_CONFIG);
1177 }
1178
1179 WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
1180 ~HOST_SMC_MSG_MASK);
1181
1182 udelay(25000);
1183
1184 return 0;
1185 }
1186 #endif
1187
1188 static void ni_program_response_times(struct radeon_device *rdev)
1189 {
1190 u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
1191 u32 vddc_dly, bb_dly, acpi_dly, vbi_dly, mclk_switch_limit;
1192 u32 reference_clock;
1193
1194 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
1195
1196 voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
1197 backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
1198
1199 if (voltage_response_time == 0)
1200 voltage_response_time = 1000;
1201
1202 if (backbias_response_time == 0)
1203 backbias_response_time = 1000;
1204
1205 acpi_delay_time = 15000;
1206 vbi_time_out = 100000;
1207
1208 reference_clock = radeon_get_xclk(rdev);
1209
1210 vddc_dly = (voltage_response_time * reference_clock) / 1600;
1211 bb_dly = (backbias_response_time * reference_clock) / 1600;
1212 acpi_dly = (acpi_delay_time * reference_clock) / 1600;
1213 vbi_dly = (vbi_time_out * reference_clock) / 1600;
1214
1215 mclk_switch_limit = (460 * reference_clock) / 100;
1216
1217 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
1218 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_bbias, bb_dly);
1219 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
1220 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
1221 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
1222 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_mclk_switch_lim, mclk_switch_limit);
1223 }
1224
1225 static void ni_populate_smc_voltage_table(struct radeon_device *rdev,
1226 struct atom_voltage_table *voltage_table,
1227 NISLANDS_SMC_STATETABLE *table)
1228 {
1229 unsigned int i;
1230
1231 for (i = 0; i < voltage_table->count; i++) {
1232 table->highSMIO[i] = 0;
1233 table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
1234 }
1235 }
1236
1237 static void ni_populate_smc_voltage_tables(struct radeon_device *rdev,
1238 NISLANDS_SMC_STATETABLE *table)
1239 {
1240 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1241 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1242 unsigned char i;
1243
1244 if (eg_pi->vddc_voltage_table.count) {
1245 ni_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
1246 table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] = 0;
1247 table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDC] =
1248 cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
1249
1250 for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
1251 if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
1252 table->maxVDDCIndexInPPTable = i;
1253 break;
1254 }
1255 }
1256 }
1257
1258 if (eg_pi->vddci_voltage_table.count) {
1259 ni_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
1260
1261 table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
1262 table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
1263 cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
1264 }
1265 }
1266
1267 static int ni_populate_voltage_value(struct radeon_device *rdev,
1268 struct atom_voltage_table *table,
1269 u16 value,
1270 NISLANDS_SMC_VOLTAGE_VALUE *voltage)
1271 {
1272 unsigned int i;
1273
1274 for (i = 0; i < table->count; i++) {
1275 if (value <= table->entries[i].value) {
1276 voltage->index = (u8)i;
1277 voltage->value = cpu_to_be16(table->entries[i].value);
1278 break;
1279 }
1280 }
1281
1282 if (i >= table->count)
1283 return -EINVAL;
1284
1285 return 0;
1286 }
1287
1288 static void ni_populate_mvdd_value(struct radeon_device *rdev,
1289 u32 mclk,
1290 NISLANDS_SMC_VOLTAGE_VALUE *voltage)
1291 {
1292 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1293 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1294
1295 if (!pi->mvdd_control) {
1296 voltage->index = eg_pi->mvdd_high_index;
1297 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
1298 return;
1299 }
1300
1301 if (mclk <= pi->mvdd_split_frequency) {
1302 voltage->index = eg_pi->mvdd_low_index;
1303 voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
1304 } else {
1305 voltage->index = eg_pi->mvdd_high_index;
1306 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
1307 }
1308 }
1309
1310 static int ni_get_std_voltage_value(struct radeon_device *rdev,
1311 NISLANDS_SMC_VOLTAGE_VALUE *voltage,
1312 u16 *std_voltage)
1313 {
1314 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries &&
1315 ((u32)voltage->index < rdev->pm.dpm.dyn_state.cac_leakage_table.count))
1316 *std_voltage = rdev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
1317 else
1318 *std_voltage = be16_to_cpu(voltage->value);
1319
1320 return 0;
1321 }
1322
1323 static void ni_populate_std_voltage_value(struct radeon_device *rdev,
1324 u16 value, u8 index,
1325 NISLANDS_SMC_VOLTAGE_VALUE *voltage)
1326 {
1327 voltage->index = index;
1328 voltage->value = cpu_to_be16(value);
1329 }
1330
1331 static u32 ni_get_smc_power_scaling_factor(struct radeon_device *rdev)
1332 {
1333 u32 xclk_period;
1334 u32 xclk = radeon_get_xclk(rdev);
1335 u32 tmp = RREG32(CG_CAC_CTRL) & TID_CNT_MASK;
1336
1337 xclk_period = (1000000000UL / xclk);
1338 xclk_period /= 10000UL;
1339
1340 return tmp * xclk_period;
1341 }
1342
1343 static u32 ni_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
1344 {
1345 return (power_in_watts * scaling_factor) << 2;
1346 }
1347
1348 static u32 ni_calculate_power_boost_limit(struct radeon_device *rdev,
1349 struct radeon_ps *radeon_state,
1350 u32 near_tdp_limit)
1351 {
1352 struct ni_ps *state = ni_get_ps(radeon_state);
1353 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1354 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1355 u32 power_boost_limit = 0;
1356 int ret;
1357
1358 if (ni_pi->enable_power_containment &&
1359 ni_pi->use_power_boost_limit) {
1360 NISLANDS_SMC_VOLTAGE_VALUE vddc;
1361 u16 std_vddc_med;
1362 u16 std_vddc_high;
1363 u64 tmp, n, d;
1364
1365 if (state->performance_level_count < 3)
1366 return 0;
1367
1368 ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
1369 state->performance_levels[state->performance_level_count - 2].vddc,
1370 &vddc);
1371 if (ret)
1372 return 0;
1373
1374 ret = ni_get_std_voltage_value(rdev, &vddc, &std_vddc_med);
1375 if (ret)
1376 return 0;
1377
1378 ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
1379 state->performance_levels[state->performance_level_count - 1].vddc,
1380 &vddc);
1381 if (ret)
1382 return 0;
1383
1384 ret = ni_get_std_voltage_value(rdev, &vddc, &std_vddc_high);
1385 if (ret)
1386 return 0;
1387
1388 n = ((u64)near_tdp_limit * ((u64)std_vddc_med * (u64)std_vddc_med) * 90);
1389 d = ((u64)std_vddc_high * (u64)std_vddc_high * 100);
1390 tmp = div64_u64(n, d);
1391
1392 if (tmp >> 32)
1393 return 0;
1394 power_boost_limit = (u32)tmp;
1395 }
1396
1397 return power_boost_limit;
1398 }
1399
1400 static int ni_calculate_adjusted_tdp_limits(struct radeon_device *rdev,
1401 bool adjust_polarity,
1402 u32 tdp_adjustment,
1403 u32 *tdp_limit,
1404 u32 *near_tdp_limit)
1405 {
1406 if (tdp_adjustment > (u32)rdev->pm.dpm.tdp_od_limit)
1407 return -EINVAL;
1408
1409 if (adjust_polarity) {
1410 *tdp_limit = ((100 + tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
1411 *near_tdp_limit = rdev->pm.dpm.near_tdp_limit + (*tdp_limit - rdev->pm.dpm.tdp_limit);
1412 } else {
1413 *tdp_limit = ((100 - tdp_adjustment) * rdev->pm.dpm.tdp_limit) / 100;
1414 *near_tdp_limit = rdev->pm.dpm.near_tdp_limit - (rdev->pm.dpm.tdp_limit - *tdp_limit);
1415 }
1416
1417 return 0;
1418 }
1419
1420 static int ni_populate_smc_tdp_limits(struct radeon_device *rdev,
1421 struct radeon_ps *radeon_state)
1422 {
1423 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1424 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1425
1426 if (ni_pi->enable_power_containment) {
1427 NISLANDS_SMC_STATETABLE *smc_table = &ni_pi->smc_statetable;
1428 u32 scaling_factor = ni_get_smc_power_scaling_factor(rdev);
1429 u32 tdp_limit;
1430 u32 near_tdp_limit;
1431 u32 power_boost_limit;
1432 int ret;
1433
1434 if (scaling_factor == 0)
1435 return -EINVAL;
1436
1437 memset(smc_table, 0, sizeof(NISLANDS_SMC_STATETABLE));
1438
1439 ret = ni_calculate_adjusted_tdp_limits(rdev,
1440 false, /* ??? */
1441 rdev->pm.dpm.tdp_adjustment,
1442 &tdp_limit,
1443 &near_tdp_limit);
1444 if (ret)
1445 return ret;
1446
1447 power_boost_limit = ni_calculate_power_boost_limit(rdev, radeon_state,
1448 near_tdp_limit);
1449
1450 smc_table->dpm2Params.TDPLimit =
1451 cpu_to_be32(ni_scale_power_for_smc(tdp_limit, scaling_factor));
1452 smc_table->dpm2Params.NearTDPLimit =
1453 cpu_to_be32(ni_scale_power_for_smc(near_tdp_limit, scaling_factor));
1454 smc_table->dpm2Params.SafePowerLimit =
1455 cpu_to_be32(ni_scale_power_for_smc((near_tdp_limit * NISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100,
1456 scaling_factor));
1457 smc_table->dpm2Params.PowerBoostLimit =
1458 cpu_to_be32(ni_scale_power_for_smc(power_boost_limit, scaling_factor));
1459
1460 ret = rv770_copy_bytes_to_smc(rdev,
1461 (u16)(pi->state_table_start + offsetof(NISLANDS_SMC_STATETABLE, dpm2Params) +
1462 offsetof(PP_NIslands_DPM2Parameters, TDPLimit)),
1463 (u8 *)(&smc_table->dpm2Params.TDPLimit),
1464 sizeof(u32) * 4, pi->sram_end);
1465 if (ret)
1466 return ret;
1467 }
1468
1469 return 0;
1470 }
1471
1472 int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
1473 u32 arb_freq_src, u32 arb_freq_dest)
1474 {
1475 u32 mc_arb_dram_timing;
1476 u32 mc_arb_dram_timing2;
1477 u32 burst_time;
1478 u32 mc_cg_config;
1479
1480 switch (arb_freq_src) {
1481 case MC_CG_ARB_FREQ_F0:
1482 mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
1483 mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
1484 burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
1485 break;
1486 case MC_CG_ARB_FREQ_F1:
1487 mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1);
1488 mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
1489 burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
1490 break;
1491 case MC_CG_ARB_FREQ_F2:
1492 mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2);
1493 mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
1494 burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
1495 break;
1496 case MC_CG_ARB_FREQ_F3:
1497 mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3);
1498 mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
1499 burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
1500 break;
1501 default:
1502 return -EINVAL;
1503 }
1504
1505 switch (arb_freq_dest) {
1506 case MC_CG_ARB_FREQ_F0:
1507 WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
1508 WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
1509 WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
1510 break;
1511 case MC_CG_ARB_FREQ_F1:
1512 WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
1513 WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
1514 WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
1515 break;
1516 case MC_CG_ARB_FREQ_F2:
1517 WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
1518 WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
1519 WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
1520 break;
1521 case MC_CG_ARB_FREQ_F3:
1522 WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
1523 WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
1524 WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
1525 break;
1526 default:
1527 return -EINVAL;
1528 }
1529
1530 mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
1531 WREG32(MC_CG_CONFIG, mc_cg_config);
1532 WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
1533
1534 return 0;
1535 }
1536
1537 static int ni_init_arb_table_index(struct radeon_device *rdev)
1538 {
1539 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1540 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1541 u32 tmp;
1542 int ret;
1543
1544 ret = rv770_read_smc_sram_dword(rdev, ni_pi->arb_table_start,
1545 &tmp, pi->sram_end);
1546 if (ret)
1547 return ret;
1548
1549 tmp &= 0x00FFFFFF;
1550 tmp |= ((u32)MC_CG_ARB_FREQ_F1) << 24;
1551
1552 return rv770_write_smc_sram_dword(rdev, ni_pi->arb_table_start,
1553 tmp, pi->sram_end);
1554 }
1555
1556 static int ni_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
1557 {
1558 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
1559 }
1560
1561 static int ni_force_switch_to_arb_f0(struct radeon_device *rdev)
1562 {
1563 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1564 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1565 u32 tmp;
1566 int ret;
1567
1568 ret = rv770_read_smc_sram_dword(rdev, ni_pi->arb_table_start,
1569 &tmp, pi->sram_end);
1570 if (ret)
1571 return ret;
1572
1573 tmp = (tmp >> 24) & 0xff;
1574
1575 if (tmp == MC_CG_ARB_FREQ_F0)
1576 return 0;
1577
1578 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
1579 }
1580
1581 static int ni_populate_memory_timing_parameters(struct radeon_device *rdev,
1582 struct rv7xx_pl *pl,
1583 SMC_NIslands_MCArbDramTimingRegisterSet *arb_regs)
1584 {
1585 u32 dram_timing;
1586 u32 dram_timing2;
1587
1588 arb_regs->mc_arb_rfsh_rate =
1589 (u8)rv770_calculate_memory_refresh_rate(rdev, pl->sclk);
1590
1591
1592 radeon_atom_set_engine_dram_timings(rdev,
1593 pl->sclk,
1594 pl->mclk);
1595
1596 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
1597 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
1598
1599 arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing);
1600 arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
1601
1602 return 0;
1603 }
1604
1605 static int ni_do_program_memory_timing_parameters(struct radeon_device *rdev,
1606 struct radeon_ps *radeon_state,
1607 unsigned int first_arb_set)
1608 {
1609 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1610 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1611 struct ni_ps *state = ni_get_ps(radeon_state);
1612 SMC_NIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
1613 int i, ret = 0;
1614
1615 for (i = 0; i < state->performance_level_count; i++) {
1616 ret = ni_populate_memory_timing_parameters(rdev, &state->performance_levels[i], &arb_regs);
1617 if (ret)
1618 break;
1619
1620 ret = rv770_copy_bytes_to_smc(rdev,
1621 (u16)(ni_pi->arb_table_start +
1622 offsetof(SMC_NIslands_MCArbDramTimingRegisters, data) +
1623 sizeof(SMC_NIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i)),
1624 (u8 *)&arb_regs,
1625 (u16)sizeof(SMC_NIslands_MCArbDramTimingRegisterSet),
1626 pi->sram_end);
1627 if (ret)
1628 break;
1629 }
1630 return ret;
1631 }
1632
1633 static int ni_program_memory_timing_parameters(struct radeon_device *rdev,
1634 struct radeon_ps *radeon_new_state)
1635 {
1636 return ni_do_program_memory_timing_parameters(rdev, radeon_new_state,
1637 NISLANDS_DRIVER_STATE_ARB_INDEX);
1638 }
1639
1640 static void ni_populate_initial_mvdd_value(struct radeon_device *rdev,
1641 struct NISLANDS_SMC_VOLTAGE_VALUE *voltage)
1642 {
1643 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1644
1645 voltage->index = eg_pi->mvdd_high_index;
1646 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
1647 }
1648
1649 static int ni_populate_smc_initial_state(struct radeon_device *rdev,
1650 struct radeon_ps *radeon_initial_state,
1651 NISLANDS_SMC_STATETABLE *table)
1652 {
1653 struct ni_ps *initial_state = ni_get_ps(radeon_initial_state);
1654 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1655 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1656 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1657 u32 reg;
1658 int ret;
1659
1660 table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
1661 cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl);
1662 table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 =
1663 cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl_2);
1664 table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
1665 cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl);
1666 table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 =
1667 cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl_2);
1668 table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
1669 cpu_to_be32(ni_pi->clock_registers.mclk_pwrmgt_cntl);
1670 table->initialState.levels[0].mclk.vDLL_CNTL =
1671 cpu_to_be32(ni_pi->clock_registers.dll_cntl);
1672 table->initialState.levels[0].mclk.vMPLL_SS =
1673 cpu_to_be32(ni_pi->clock_registers.mpll_ss1);
1674 table->initialState.levels[0].mclk.vMPLL_SS2 =
1675 cpu_to_be32(ni_pi->clock_registers.mpll_ss2);
1676 table->initialState.levels[0].mclk.mclk_value =
1677 cpu_to_be32(initial_state->performance_levels[0].mclk);
1678
1679 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
1680 cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl);
1681 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
1682 cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_2);
1683 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
1684 cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_3);
1685 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
1686 cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_4);
1687 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
1688 cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum);
1689 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
1690 cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum_2);
1691 table->initialState.levels[0].sclk.sclk_value =
1692 cpu_to_be32(initial_state->performance_levels[0].sclk);
1693 table->initialState.levels[0].arbRefreshState =
1694 NISLANDS_INITIAL_STATE_ARB_INDEX;
1695
1696 table->initialState.levels[0].ACIndex = 0;
1697
1698 ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
1699 initial_state->performance_levels[0].vddc,
1700 &table->initialState.levels[0].vddc);
1701 if (!ret) {
1702 u16 std_vddc;
1703
1704 ret = ni_get_std_voltage_value(rdev,
1705 &table->initialState.levels[0].vddc,
1706 &std_vddc);
1707 if (!ret)
1708 ni_populate_std_voltage_value(rdev, std_vddc,
1709 table->initialState.levels[0].vddc.index,
1710 &table->initialState.levels[0].std_vddc);
1711 }
1712
1713 if (eg_pi->vddci_control)
1714 ni_populate_voltage_value(rdev,
1715 &eg_pi->vddci_voltage_table,
1716 initial_state->performance_levels[0].vddci,
1717 &table->initialState.levels[0].vddci);
1718
1719 ni_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd);
1720
1721 reg = CG_R(0xffff) | CG_L(0);
1722 table->initialState.levels[0].aT = cpu_to_be32(reg);
1723
1724 table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
1725
1726 if (pi->boot_in_gen2)
1727 table->initialState.levels[0].gen2PCIE = 1;
1728 else
1729 table->initialState.levels[0].gen2PCIE = 0;
1730
1731 if (pi->mem_gddr5) {
1732 table->initialState.levels[0].strobeMode =
1733 cypress_get_strobe_mode_settings(rdev,
1734 initial_state->performance_levels[0].mclk);
1735
1736 if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
1737 table->initialState.levels[0].mcFlags = NISLANDS_SMC_MC_EDC_RD_FLAG | NISLANDS_SMC_MC_EDC_WR_FLAG;
1738 else
1739 table->initialState.levels[0].mcFlags = 0;
1740 }
1741
1742 table->initialState.levelCount = 1;
1743
1744 table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
1745
1746 table->initialState.levels[0].dpm2.MaxPS = 0;
1747 table->initialState.levels[0].dpm2.NearTDPDec = 0;
1748 table->initialState.levels[0].dpm2.AboveSafeInc = 0;
1749 table->initialState.levels[0].dpm2.BelowSafeInc = 0;
1750
1751 reg = MIN_POWER_MASK | MAX_POWER_MASK;
1752 table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
1753
1754 reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
1755 table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
1756
1757 return 0;
1758 }
1759
1760 static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
1761 NISLANDS_SMC_STATETABLE *table)
1762 {
1763 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1764 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
1765 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1766 u32 mpll_ad_func_cntl = ni_pi->clock_registers.mpll_ad_func_cntl;
1767 u32 mpll_ad_func_cntl_2 = ni_pi->clock_registers.mpll_ad_func_cntl_2;
1768 u32 mpll_dq_func_cntl = ni_pi->clock_registers.mpll_dq_func_cntl;
1769 u32 mpll_dq_func_cntl_2 = ni_pi->clock_registers.mpll_dq_func_cntl_2;
1770 u32 spll_func_cntl = ni_pi->clock_registers.cg_spll_func_cntl;
1771 u32 spll_func_cntl_2 = ni_pi->clock_registers.cg_spll_func_cntl_2;
1772 u32 spll_func_cntl_3 = ni_pi->clock_registers.cg_spll_func_cntl_3;
1773 u32 spll_func_cntl_4 = ni_pi->clock_registers.cg_spll_func_cntl_4;
1774 u32 mclk_pwrmgt_cntl = ni_pi->clock_registers.mclk_pwrmgt_cntl;
1775 u32 dll_cntl = ni_pi->clock_registers.dll_cntl;
1776 u32 reg;
1777 int ret;
1778
1779 table->ACPIState = table->initialState;
1780
1781 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
1782
1783 if (pi->acpi_vddc) {
1784 ret = ni_populate_voltage_value(rdev,
1785 &eg_pi->vddc_voltage_table,
1786 pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
1787 if (!ret) {
1788 u16 std_vddc;
1789
1790 ret = ni_get_std_voltage_value(rdev,
1791 &table->ACPIState.levels[0].vddc, &std_vddc);
1792 if (!ret)
1793 ni_populate_std_voltage_value(rdev, std_vddc,
1794 table->ACPIState.levels[0].vddc.index,
1795 &table->ACPIState.levels[0].std_vddc);
1796 }
1797
1798 if (pi->pcie_gen2) {
1799 if (pi->acpi_pcie_gen2)
1800 table->ACPIState.levels[0].gen2PCIE = 1;
1801 else
1802 table->ACPIState.levels[0].gen2PCIE = 0;
1803 } else {
1804 table->ACPIState.levels[0].gen2PCIE = 0;
1805 }
1806 } else {
1807 ret = ni_populate_voltage_value(rdev,
1808 &eg_pi->vddc_voltage_table,
1809 pi->min_vddc_in_table,
1810 &table->ACPIState.levels[0].vddc);
1811 if (!ret) {
1812 u16 std_vddc;
1813
1814 ret = ni_get_std_voltage_value(rdev,
1815 &table->ACPIState.levels[0].vddc,
1816 &std_vddc);
1817 if (!ret)
1818 ni_populate_std_voltage_value(rdev, std_vddc,
1819 table->ACPIState.levels[0].vddc.index,
1820 &table->ACPIState.levels[0].std_vddc);
1821 }
1822 table->ACPIState.levels[0].gen2PCIE = 0;
1823 }
1824
1825 if (eg_pi->acpi_vddci) {
1826 if (eg_pi->vddci_control)
1827 ni_populate_voltage_value(rdev,
1828 &eg_pi->vddci_voltage_table,
1829 eg_pi->acpi_vddci,
1830 &table->ACPIState.levels[0].vddci);
1831 }
1832
1833
1834 mpll_ad_func_cntl &= ~PDNB;
1835
1836 mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
1837
1838 if (pi->mem_gddr5)
1839 mpll_dq_func_cntl &= ~PDNB;
1840 mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
1841
1842
1843 mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
1844 MRDCKA1_RESET |
1845 MRDCKB0_RESET |
1846 MRDCKB1_RESET |
1847 MRDCKC0_RESET |
1848 MRDCKC1_RESET |
1849 MRDCKD0_RESET |
1850 MRDCKD1_RESET);
1851
1852 mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
1853 MRDCKA1_PDNB |
1854 MRDCKB0_PDNB |
1855 MRDCKB1_PDNB |
1856 MRDCKC0_PDNB |
1857 MRDCKC1_PDNB |
1858 MRDCKD0_PDNB |
1859 MRDCKD1_PDNB);
1860
1861 dll_cntl |= (MRDCKA0_BYPASS |
1862 MRDCKA1_BYPASS |
1863 MRDCKB0_BYPASS |
1864 MRDCKB1_BYPASS |
1865 MRDCKC0_BYPASS |
1866 MRDCKC1_BYPASS |
1867 MRDCKD0_BYPASS |
1868 MRDCKD1_BYPASS);
1869
1870 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
1871 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
1872
1873 table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
1874 table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
1875 table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
1876 table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
1877 table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
1878 table->ACPIState.levels[0].mclk.vDLL_CNTL = cpu_to_be32(dll_cntl);
1879
1880 table->ACPIState.levels[0].mclk.mclk_value = 0;
1881
1882 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
1883 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
1884 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
1885 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4);
1886
1887 table->ACPIState.levels[0].sclk.sclk_value = 0;
1888
1889 ni_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
1890
1891 if (eg_pi->dynamic_ac_timing)
1892 table->ACPIState.levels[0].ACIndex = 1;
1893
1894 table->ACPIState.levels[0].dpm2.MaxPS = 0;
1895 table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
1896 table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
1897 table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
1898
1899 reg = MIN_POWER_MASK | MAX_POWER_MASK;
1900 table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
1901
1902 reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
1903 table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
1904
1905 return 0;
1906 }
1907
1908 static int ni_init_smc_table(struct radeon_device *rdev)
1909 {
1910 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1911 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1912 int ret;
1913 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
1914 NISLANDS_SMC_STATETABLE *table = &ni_pi->smc_statetable;
1915
1916 memset(table, 0, sizeof(NISLANDS_SMC_STATETABLE));
1917
1918 ni_populate_smc_voltage_tables(rdev, table);
1919
1920 switch (rdev->pm.int_thermal_type) {
1921 case THERMAL_TYPE_NI:
1922 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
1923 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
1924 break;
1925 case THERMAL_TYPE_NONE:
1926 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
1927 break;
1928 default:
1929 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
1930 break;
1931 }
1932
1933 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
1934 table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1935
1936 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1937 table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
1938
1939 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1940 table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1941
1942 if (pi->mem_gddr5)
1943 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1944
1945 ret = ni_populate_smc_initial_state(rdev, radeon_boot_state, table);
1946 if (ret)
1947 return ret;
1948
1949 ret = ni_populate_smc_acpi_state(rdev, table);
1950 if (ret)
1951 return ret;
1952
1953 table->driverState = table->initialState;
1954
1955 table->ULVState = table->initialState;
1956
1957 ret = ni_do_program_memory_timing_parameters(rdev, radeon_boot_state,
1958 NISLANDS_INITIAL_STATE_ARB_INDEX);
1959 if (ret)
1960 return ret;
1961
1962 return rv770_copy_bytes_to_smc(rdev, pi->state_table_start, (u8 *)table,
1963 sizeof(NISLANDS_SMC_STATETABLE), pi->sram_end);
1964 }
1965
1966 static int ni_calculate_sclk_params(struct radeon_device *rdev,
1967 u32 engine_clock,
1968 NISLANDS_SMC_SCLK_VALUE *sclk)
1969 {
1970 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1971 struct ni_power_info *ni_pi = ni_get_pi(rdev);
1972 struct atom_clock_dividers dividers;
1973 u32 spll_func_cntl = ni_pi->clock_registers.cg_spll_func_cntl;
1974 u32 spll_func_cntl_2 = ni_pi->clock_registers.cg_spll_func_cntl_2;
1975 u32 spll_func_cntl_3 = ni_pi->clock_registers.cg_spll_func_cntl_3;
1976 u32 spll_func_cntl_4 = ni_pi->clock_registers.cg_spll_func_cntl_4;
1977 u32 cg_spll_spread_spectrum = ni_pi->clock_registers.cg_spll_spread_spectrum;
1978 u32 cg_spll_spread_spectrum_2 = ni_pi->clock_registers.cg_spll_spread_spectrum_2;
1979 u64 tmp;
1980 u32 reference_clock = rdev->clock.spll.reference_freq;
1981 u32 reference_divider;
1982 u32 fbdiv;
1983 int ret;
1984
1985 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
1986 engine_clock, false, &dividers);
1987 if (ret)
1988 return ret;
1989
1990 reference_divider = 1 + dividers.ref_div;
1991
1992
1993 tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16834;
1994 do_div(tmp, reference_clock);
1995 fbdiv = (u32) tmp;
1996
1997 spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
1998 spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
1999 spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
2000
2001 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2002 spll_func_cntl_2 |= SCLK_MUX_SEL(2);
2003
2004 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2005 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2006 spll_func_cntl_3 |= SPLL_DITHEN;
2007
2008 if (pi->sclk_ss) {
2009 struct radeon_atom_ss ss;
2010 u32 vco_freq = engine_clock * dividers.post_div;
2011
2012 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2013 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2014 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2015 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2016
2017 cg_spll_spread_spectrum &= ~CLK_S_MASK;
2018 cg_spll_spread_spectrum |= CLK_S(clk_s);
2019 cg_spll_spread_spectrum |= SSEN;
2020
2021 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2022 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2023 }
2024 }
2025
2026 sclk->sclk_value = engine_clock;
2027 sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
2028 sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
2029 sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
2030 sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
2031 sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
2032 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
2033
2034 return 0;
2035 }
2036
2037 static int ni_populate_sclk_value(struct radeon_device *rdev,
2038 u32 engine_clock,
2039 NISLANDS_SMC_SCLK_VALUE *sclk)
2040 {
2041 NISLANDS_SMC_SCLK_VALUE sclk_tmp;
2042 int ret;
2043
2044 ret = ni_calculate_sclk_params(rdev, engine_clock, &sclk_tmp);
2045 if (!ret) {
2046 sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
2047 sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
2048 sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
2049 sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
2050 sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
2051 sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
2052 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
2053 }
2054
2055 return ret;
2056 }
2057
2058 static int ni_init_smc_spll_table(struct radeon_device *rdev)
2059 {
2060 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2061 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2062 SMC_NISLANDS_SPLL_DIV_TABLE *spll_table;
2063 NISLANDS_SMC_SCLK_VALUE sclk_params;
2064 u32 fb_div;
2065 u32 p_div;
2066 u32 clk_s;
2067 u32 clk_v;
2068 u32 sclk = 0;
2069 int i, ret;
2070 u32 tmp;
2071
2072 if (ni_pi->spll_table_start == 0)
2073 return -EINVAL;
2074
2075 spll_table = kzalloc(sizeof(SMC_NISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
2076 if (spll_table == NULL)
2077 return -ENOMEM;
2078
2079 for (i = 0; i < 256; i++) {
2080 ret = ni_calculate_sclk_params(rdev, sclk, &sclk_params);
2081 if (ret)
2082 break;
2083
2084 p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
2085 fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
2086 clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
2087 clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
2088
2089 fb_div &= ~0x00001FFF;
2090 fb_div >>= 1;
2091 clk_v >>= 6;
2092
2093 if (p_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
2094 ret = -EINVAL;
2095
2096 if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
2097 ret = -EINVAL;
2098
2099 if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
2100 ret = -EINVAL;
2101
2102 if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
2103 ret = -EINVAL;
2104
2105 if (ret)
2106 break;
2107
2108 tmp = ((fb_div << SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
2109 ((p_div << SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
2110 spll_table->freq[i] = cpu_to_be32(tmp);
2111
2112 tmp = ((clk_v << SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
2113 ((clk_s << SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
2114 spll_table->ss[i] = cpu_to_be32(tmp);
2115
2116 sclk += 512;
2117 }
2118
2119 if (!ret)
2120 ret = rv770_copy_bytes_to_smc(rdev, ni_pi->spll_table_start, (u8 *)spll_table,
2121 sizeof(SMC_NISLANDS_SPLL_DIV_TABLE), pi->sram_end);
2122
2123 kfree(spll_table);
2124
2125 return ret;
2126 }
2127
2128 static int ni_populate_mclk_value(struct radeon_device *rdev,
2129 u32 engine_clock,
2130 u32 memory_clock,
2131 NISLANDS_SMC_MCLK_VALUE *mclk,
2132 bool strobe_mode,
2133 bool dll_state_on)
2134 {
2135 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2136 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2137 u32 mpll_ad_func_cntl = ni_pi->clock_registers.mpll_ad_func_cntl;
2138 u32 mpll_ad_func_cntl_2 = ni_pi->clock_registers.mpll_ad_func_cntl_2;
2139 u32 mpll_dq_func_cntl = ni_pi->clock_registers.mpll_dq_func_cntl;
2140 u32 mpll_dq_func_cntl_2 = ni_pi->clock_registers.mpll_dq_func_cntl_2;
2141 u32 mclk_pwrmgt_cntl = ni_pi->clock_registers.mclk_pwrmgt_cntl;
2142 u32 dll_cntl = ni_pi->clock_registers.dll_cntl;
2143 u32 mpll_ss1 = ni_pi->clock_registers.mpll_ss1;
2144 u32 mpll_ss2 = ni_pi->clock_registers.mpll_ss2;
2145 struct atom_clock_dividers dividers;
2146 u32 ibias;
2147 u32 dll_speed;
2148 int ret;
2149 u32 mc_seq_misc7;
2150
2151 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
2152 memory_clock, strobe_mode, &dividers);
2153 if (ret)
2154 return ret;
2155
2156 if (!strobe_mode) {
2157 mc_seq_misc7 = RREG32(MC_SEQ_MISC7);
2158
2159 if (mc_seq_misc7 & 0x8000000)
2160 dividers.post_div = 1;
2161 }
2162
2163 ibias = cypress_map_clkf_to_ibias(rdev, dividers.whole_fb_div);
2164
2165 mpll_ad_func_cntl &= ~(CLKR_MASK |
2166 YCLK_POST_DIV_MASK |
2167 CLKF_MASK |
2168 CLKFRAC_MASK |
2169 IBIAS_MASK);
2170 mpll_ad_func_cntl |= CLKR(dividers.ref_div);
2171 mpll_ad_func_cntl |= YCLK_POST_DIV(dividers.post_div);
2172 mpll_ad_func_cntl |= CLKF(dividers.whole_fb_div);
2173 mpll_ad_func_cntl |= CLKFRAC(dividers.frac_fb_div);
2174 mpll_ad_func_cntl |= IBIAS(ibias);
2175
2176 if (dividers.vco_mode)
2177 mpll_ad_func_cntl_2 |= VCO_MODE;
2178 else
2179 mpll_ad_func_cntl_2 &= ~VCO_MODE;
2180
2181 if (pi->mem_gddr5) {
2182 mpll_dq_func_cntl &= ~(CLKR_MASK |
2183 YCLK_POST_DIV_MASK |
2184 CLKF_MASK |
2185 CLKFRAC_MASK |
2186 IBIAS_MASK);
2187 mpll_dq_func_cntl |= CLKR(dividers.ref_div);
2188 mpll_dq_func_cntl |= YCLK_POST_DIV(dividers.post_div);
2189 mpll_dq_func_cntl |= CLKF(dividers.whole_fb_div);
2190 mpll_dq_func_cntl |= CLKFRAC(dividers.frac_fb_div);
2191 mpll_dq_func_cntl |= IBIAS(ibias);
2192
2193 if (strobe_mode)
2194 mpll_dq_func_cntl &= ~PDNB;
2195 else
2196 mpll_dq_func_cntl |= PDNB;
2197
2198 if (dividers.vco_mode)
2199 mpll_dq_func_cntl_2 |= VCO_MODE;
2200 else
2201 mpll_dq_func_cntl_2 &= ~VCO_MODE;
2202 }
2203
2204 if (pi->mclk_ss) {
2205 struct radeon_atom_ss ss;
2206 u32 vco_freq = memory_clock * dividers.post_div;
2207
2208 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2209 ASIC_INTERNAL_MEMORY_SS, vco_freq)) {
2210 u32 reference_clock = rdev->clock.mpll.reference_freq;
2211 u32 decoded_ref = rv740_get_decoded_reference_divider(dividers.ref_div);
2212 u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate);
2213 u32 clk_v = ss.percentage *
2214 (0x4000 * dividers.whole_fb_div + 0x800 * dividers.frac_fb_div) / (clk_s * 625);
2215
2216 mpll_ss1 &= ~CLKV_MASK;
2217 mpll_ss1 |= CLKV(clk_v);
2218
2219 mpll_ss2 &= ~CLKS_MASK;
2220 mpll_ss2 |= CLKS(clk_s);
2221 }
2222 }
2223
2224 dll_speed = rv740_get_dll_speed(pi->mem_gddr5,
2225 memory_clock);
2226
2227 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2228 mclk_pwrmgt_cntl |= DLL_SPEED(dll_speed);
2229 if (dll_state_on)
2230 mclk_pwrmgt_cntl |= (MRDCKA0_PDNB |
2231 MRDCKA1_PDNB |
2232 MRDCKB0_PDNB |
2233 MRDCKB1_PDNB |
2234 MRDCKC0_PDNB |
2235 MRDCKC1_PDNB |
2236 MRDCKD0_PDNB |
2237 MRDCKD1_PDNB);
2238 else
2239 mclk_pwrmgt_cntl &= ~(MRDCKA0_PDNB |
2240 MRDCKA1_PDNB |
2241 MRDCKB0_PDNB |
2242 MRDCKB1_PDNB |
2243 MRDCKC0_PDNB |
2244 MRDCKC1_PDNB |
2245 MRDCKD0_PDNB |
2246 MRDCKD1_PDNB);
2247
2248
2249 mclk->mclk_value = cpu_to_be32(memory_clock);
2250 mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
2251 mclk->vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
2252 mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
2253 mclk->vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
2254 mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
2255 mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
2256 mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
2257 mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
2258
2259 return 0;
2260 }
2261
2262 static void ni_populate_smc_sp(struct radeon_device *rdev,
2263 struct radeon_ps *radeon_state,
2264 NISLANDS_SMC_SWSTATE *smc_state)
2265 {
2266 struct ni_ps *ps = ni_get_ps(radeon_state);
2267 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2268 int i;
2269
2270 for (i = 0; i < ps->performance_level_count - 1; i++)
2271 smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
2272
2273 smc_state->levels[ps->performance_level_count - 1].bSP =
2274 cpu_to_be32(pi->psp);
2275 }
2276
2277 static int ni_convert_power_level_to_smc(struct radeon_device *rdev,
2278 struct rv7xx_pl *pl,
2279 NISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
2280 {
2281 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2282 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2283 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2284 int ret;
2285 bool dll_state_on;
2286 u16 std_vddc;
2287 u32 tmp = RREG32(DC_STUTTER_CNTL);
2288
2289 level->gen2PCIE = pi->pcie_gen2 ?
2290 ((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0;
2291
2292 ret = ni_populate_sclk_value(rdev, pl->sclk, &level->sclk);
2293 if (ret)
2294 return ret;
2295
2296 level->mcFlags = 0;
2297 if (pi->mclk_stutter_mode_threshold &&
2298 (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
2299 !eg_pi->uvd_enabled &&
2300 (tmp & DC_STUTTER_ENABLE_A) &&
2301 (tmp & DC_STUTTER_ENABLE_B))
2302 level->mcFlags |= NISLANDS_SMC_MC_STUTTER_EN;
2303
2304 if (pi->mem_gddr5) {
2305 if (pl->mclk > pi->mclk_edc_enable_threshold)
2306 level->mcFlags |= NISLANDS_SMC_MC_EDC_RD_FLAG;
2307 if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
2308 level->mcFlags |= NISLANDS_SMC_MC_EDC_WR_FLAG;
2309
2310 level->strobeMode = cypress_get_strobe_mode_settings(rdev, pl->mclk);
2311
2312 if (level->strobeMode & NISLANDS_SMC_STROBE_ENABLE) {
2313 if (cypress_get_mclk_frequency_ratio(rdev, pl->mclk, true) >=
2314 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2315 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2316 else
2317 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2318 } else {
2319 dll_state_on = false;
2320 if (pl->mclk > ni_pi->mclk_rtt_mode_threshold)
2321 level->mcFlags |= NISLANDS_SMC_MC_RTT_ENABLE;
2322 }
2323
2324 ret = ni_populate_mclk_value(rdev, pl->sclk, pl->mclk,
2325 &level->mclk,
2326 (level->strobeMode & NISLANDS_SMC_STROBE_ENABLE) != 0,
2327 dll_state_on);
2328 } else
2329 ret = ni_populate_mclk_value(rdev, pl->sclk, pl->mclk, &level->mclk, 1, 1);
2330
2331 if (ret)
2332 return ret;
2333
2334 ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
2335 pl->vddc, &level->vddc);
2336 if (ret)
2337 return ret;
2338
2339 ret = ni_get_std_voltage_value(rdev, &level->vddc, &std_vddc);
2340 if (ret)
2341 return ret;
2342
2343 ni_populate_std_voltage_value(rdev, std_vddc,
2344 level->vddc.index, &level->std_vddc);
2345
2346 if (eg_pi->vddci_control) {
2347 ret = ni_populate_voltage_value(rdev, &eg_pi->vddci_voltage_table,
2348 pl->vddci, &level->vddci);
2349 if (ret)
2350 return ret;
2351 }
2352
2353 ni_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
2354
2355 return ret;
2356 }
2357
2358 static int ni_populate_smc_t(struct radeon_device *rdev,
2359 struct radeon_ps *radeon_state,
2360 NISLANDS_SMC_SWSTATE *smc_state)
2361 {
2362 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2363 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2364 struct ni_ps *state = ni_get_ps(radeon_state);
2365 u32 a_t;
2366 u32 t_l, t_h;
2367 u32 high_bsp;
2368 int i, ret;
2369
2370 if (state->performance_level_count >= 9)
2371 return -EINVAL;
2372
2373 if (state->performance_level_count < 2) {
2374 a_t = CG_R(0xffff) | CG_L(0);
2375 smc_state->levels[0].aT = cpu_to_be32(a_t);
2376 return 0;
2377 }
2378
2379 smc_state->levels[0].aT = cpu_to_be32(0);
2380
2381 for (i = 0; i <= state->performance_level_count - 2; i++) {
2382 if (eg_pi->uvd_enabled)
2383 ret = r600_calculate_at(
2384 1000 * (i * (eg_pi->smu_uvd_hs ? 2 : 8) + 2),
2385 100 * R600_AH_DFLT,
2386 state->performance_levels[i + 1].sclk,
2387 state->performance_levels[i].sclk,
2388 &t_l,
2389 &t_h);
2390 else
2391 ret = r600_calculate_at(
2392 1000 * (i + 1),
2393 100 * R600_AH_DFLT,
2394 state->performance_levels[i + 1].sclk,
2395 state->performance_levels[i].sclk,
2396 &t_l,
2397 &t_h);
2398
2399 if (ret) {
2400 t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
2401 t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
2402 }
2403
2404 a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
2405 a_t |= CG_R(t_l * pi->bsp / 20000);
2406 smc_state->levels[i].aT = cpu_to_be32(a_t);
2407
2408 high_bsp = (i == state->performance_level_count - 2) ?
2409 pi->pbsp : pi->bsp;
2410
2411 a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
2412 smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
2413 }
2414
2415 return 0;
2416 }
2417
2418 static int ni_populate_power_containment_values(struct radeon_device *rdev,
2419 struct radeon_ps *radeon_state,
2420 NISLANDS_SMC_SWSTATE *smc_state)
2421 {
2422 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2423 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2424 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2425 struct ni_ps *state = ni_get_ps(radeon_state);
2426 u32 prev_sclk;
2427 u32 max_sclk;
2428 u32 min_sclk;
2429 int i, ret;
2430 u32 tdp_limit;
2431 u32 near_tdp_limit;
2432 u32 power_boost_limit;
2433 u8 max_ps_percent;
2434
2435 if (ni_pi->enable_power_containment == false)
2436 return 0;
2437
2438 if (state->performance_level_count == 0)
2439 return -EINVAL;
2440
2441 if (smc_state->levelCount != state->performance_level_count)
2442 return -EINVAL;
2443
2444 ret = ni_calculate_adjusted_tdp_limits(rdev,
2445 false, /* ??? */
2446 rdev->pm.dpm.tdp_adjustment,
2447 &tdp_limit,
2448 &near_tdp_limit);
2449 if (ret)
2450 return ret;
2451
2452 power_boost_limit = ni_calculate_power_boost_limit(rdev, radeon_state, near_tdp_limit);
2453
2454 ret = rv770_write_smc_sram_dword(rdev,
2455 pi->state_table_start +
2456 offsetof(NISLANDS_SMC_STATETABLE, dpm2Params) +
2457 offsetof(PP_NIslands_DPM2Parameters, PowerBoostLimit),
2458 ni_scale_power_for_smc(power_boost_limit, ni_get_smc_power_scaling_factor(rdev)),
2459 pi->sram_end);
2460 if (ret)
2461 power_boost_limit = 0;
2462
2463 smc_state->levels[0].dpm2.MaxPS = 0;
2464 smc_state->levels[0].dpm2.NearTDPDec = 0;
2465 smc_state->levels[0].dpm2.AboveSafeInc = 0;
2466 smc_state->levels[0].dpm2.BelowSafeInc = 0;
2467 smc_state->levels[0].stateFlags |= power_boost_limit ? PPSMC_STATEFLAG_POWERBOOST : 0;
2468
2469 for (i = 1; i < state->performance_level_count; i++) {
2470 prev_sclk = state->performance_levels[i-1].sclk;
2471 max_sclk = state->performance_levels[i].sclk;
2472 max_ps_percent = (i != (state->performance_level_count - 1)) ?
2473 NISLANDS_DPM2_MAXPS_PERCENT_M : NISLANDS_DPM2_MAXPS_PERCENT_H;
2474
2475 if (max_sclk < prev_sclk)
2476 return -EINVAL;
2477
2478 if ((max_ps_percent == 0) || (prev_sclk == max_sclk) || eg_pi->uvd_enabled)
2479 min_sclk = max_sclk;
2480 else if (1 == i)
2481 min_sclk = prev_sclk;
2482 else
2483 min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
2484
2485 if (min_sclk < state->performance_levels[0].sclk)
2486 min_sclk = state->performance_levels[0].sclk;
2487
2488 if (min_sclk == 0)
2489 return -EINVAL;
2490
2491 smc_state->levels[i].dpm2.MaxPS =
2492 (u8)((NISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
2493 smc_state->levels[i].dpm2.NearTDPDec = NISLANDS_DPM2_NEAR_TDP_DEC;
2494 smc_state->levels[i].dpm2.AboveSafeInc = NISLANDS_DPM2_ABOVE_SAFE_INC;
2495 smc_state->levels[i].dpm2.BelowSafeInc = NISLANDS_DPM2_BELOW_SAFE_INC;
2496 smc_state->levels[i].stateFlags |=
2497 ((i != (state->performance_level_count - 1)) && power_boost_limit) ?
2498 PPSMC_STATEFLAG_POWERBOOST : 0;
2499 }
2500
2501 return 0;
2502 }
2503
2504 static int ni_populate_sq_ramping_values(struct radeon_device *rdev,
2505 struct radeon_ps *radeon_state,
2506 NISLANDS_SMC_SWSTATE *smc_state)
2507 {
2508 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2509 struct ni_ps *state = ni_get_ps(radeon_state);
2510 u32 sq_power_throttle;
2511 u32 sq_power_throttle2;
2512 bool enable_sq_ramping = ni_pi->enable_sq_ramping;
2513 int i;
2514
2515 if (state->performance_level_count == 0)
2516 return -EINVAL;
2517
2518 if (smc_state->levelCount != state->performance_level_count)
2519 return -EINVAL;
2520
2521 if (rdev->pm.dpm.sq_ramping_threshold == 0)
2522 return -EINVAL;
2523
2524 if (NISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
2525 enable_sq_ramping = false;
2526
2527 if (NISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
2528 enable_sq_ramping = false;
2529
2530 if (NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
2531 enable_sq_ramping = false;
2532
2533 if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
2534 enable_sq_ramping = false;
2535
2536 if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
2537 enable_sq_ramping = false;
2538
2539 for (i = 0; i < state->performance_level_count; i++) {
2540 sq_power_throttle = 0;
2541 sq_power_throttle2 = 0;
2542
2543 if ((state->performance_levels[i].sclk >= rdev->pm.dpm.sq_ramping_threshold) &&
2544 enable_sq_ramping) {
2545 sq_power_throttle |= MAX_POWER(NISLANDS_DPM2_SQ_RAMP_MAX_POWER);
2546 sq_power_throttle |= MIN_POWER(NISLANDS_DPM2_SQ_RAMP_MIN_POWER);
2547 sq_power_throttle2 |= MAX_POWER_DELTA(NISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
2548 sq_power_throttle2 |= STI_SIZE(NISLANDS_DPM2_SQ_RAMP_STI_SIZE);
2549 sq_power_throttle2 |= LTI_RATIO(NISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
2550 } else {
2551 sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
2552 sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
2553 }
2554
2555 smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
2556 smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
2557 }
2558
2559 return 0;
2560 }
2561
2562 static int ni_enable_power_containment(struct radeon_device *rdev,
2563 struct radeon_ps *radeon_new_state,
2564 bool enable)
2565 {
2566 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2567 PPSMC_Result smc_result;
2568 int ret = 0;
2569
2570 if (ni_pi->enable_power_containment) {
2571 if (enable) {
2572 if (!r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) {
2573 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_TDPClampingActive);
2574 if (smc_result != PPSMC_Result_OK) {
2575 ret = -EINVAL;
2576 ni_pi->pc_enabled = false;
2577 } else {
2578 ni_pi->pc_enabled = true;
2579 }
2580 }
2581 } else {
2582 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_TDPClampingInactive);
2583 if (smc_result != PPSMC_Result_OK)
2584 ret = -EINVAL;
2585 ni_pi->pc_enabled = false;
2586 }
2587 }
2588
2589 return ret;
2590 }
2591
2592 static int ni_convert_power_state_to_smc(struct radeon_device *rdev,
2593 struct radeon_ps *radeon_state,
2594 NISLANDS_SMC_SWSTATE *smc_state)
2595 {
2596 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2597 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2598 struct ni_ps *state = ni_get_ps(radeon_state);
2599 int i, ret;
2600 u32 threshold = state->performance_levels[state->performance_level_count - 1].sclk * 100 / 100;
2601
2602 if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC))
2603 smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
2604
2605 smc_state->levelCount = 0;
2606
2607 if (state->performance_level_count > NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE)
2608 return -EINVAL;
2609
2610 for (i = 0; i < state->performance_level_count; i++) {
2611 ret = ni_convert_power_level_to_smc(rdev, &state->performance_levels[i],
2612 &smc_state->levels[i]);
2613 smc_state->levels[i].arbRefreshState =
2614 (u8)(NISLANDS_DRIVER_STATE_ARB_INDEX + i);
2615
2616 if (ret)
2617 return ret;
2618
2619 if (ni_pi->enable_power_containment)
2620 smc_state->levels[i].displayWatermark =
2621 (state->performance_levels[i].sclk < threshold) ?
2622 PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
2623 else
2624 smc_state->levels[i].displayWatermark = (i < 2) ?
2625 PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
2626
2627 if (eg_pi->dynamic_ac_timing)
2628 smc_state->levels[i].ACIndex = NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
2629 else
2630 smc_state->levels[i].ACIndex = 0;
2631
2632 smc_state->levelCount++;
2633 }
2634
2635 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_watermark_threshold,
2636 cpu_to_be32(threshold / 512));
2637
2638 ni_populate_smc_sp(rdev, radeon_state, smc_state);
2639
2640 ret = ni_populate_power_containment_values(rdev, radeon_state, smc_state);
2641 if (ret)
2642 ni_pi->enable_power_containment = false;
2643
2644 ret = ni_populate_sq_ramping_values(rdev, radeon_state, smc_state);
2645 if (ret)
2646 ni_pi->enable_sq_ramping = false;
2647
2648 return ni_populate_smc_t(rdev, radeon_state, smc_state);
2649 }
2650
2651 static int ni_upload_sw_state(struct radeon_device *rdev,
2652 struct radeon_ps *radeon_new_state)
2653 {
2654 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2655 u16 address = pi->state_table_start +
2656 offsetof(NISLANDS_SMC_STATETABLE, driverState);
2657 u16 state_size = sizeof(NISLANDS_SMC_SWSTATE) +
2658 ((NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1) * sizeof(NISLANDS_SMC_HW_PERFORMANCE_LEVEL));
2659 int ret;
2660 NISLANDS_SMC_SWSTATE *smc_state = kzalloc(state_size, GFP_KERNEL);
2661
2662 if (smc_state == NULL)
2663 return -ENOMEM;
2664
2665 ret = ni_convert_power_state_to_smc(rdev, radeon_new_state, smc_state);
2666 if (ret)
2667 goto done;
2668
2669 ret = rv770_copy_bytes_to_smc(rdev, address, (u8 *)smc_state, state_size, pi->sram_end);
2670
2671 done:
2672 kfree(smc_state);
2673
2674 return ret;
2675 }
2676
2677 static int ni_set_mc_special_registers(struct radeon_device *rdev,
2678 struct ni_mc_reg_table *table)
2679 {
2680 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2681 u8 i, j, k;
2682 u32 temp_reg;
2683
2684 for (i = 0, j = table->last; i < table->last; i++) {
2685 switch (table->mc_reg_address[i].s1) {
2686 case MC_SEQ_MISC1 >> 2:
2687 if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2688 return -EINVAL;
2689 temp_reg = RREG32(MC_PMG_CMD_EMRS);
2690 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
2691 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
2692 for (k = 0; k < table->num_entries; k++)
2693 table->mc_reg_table_entry[k].mc_data[j] =
2694 ((temp_reg & 0xffff0000)) |
2695 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2696 j++;
2697 if (j >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2698 return -EINVAL;
2699
2700 temp_reg = RREG32(MC_PMG_CMD_MRS);
2701 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
2702 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
2703 for(k = 0; k < table->num_entries; k++) {
2704 table->mc_reg_table_entry[k].mc_data[j] =
2705 (temp_reg & 0xffff0000) |
2706 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2707 if (!pi->mem_gddr5)
2708 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
2709 }
2710 j++;
2711 if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2712 return -EINVAL;
2713 break;
2714 case MC_SEQ_RESERVE_M >> 2:
2715 temp_reg = RREG32(MC_PMG_CMD_MRS1);
2716 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
2717 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
2718 for (k = 0; k < table->num_entries; k++)
2719 table->mc_reg_table_entry[k].mc_data[j] =
2720 (temp_reg & 0xffff0000) |
2721 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2722 j++;
2723 if (j > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2724 return -EINVAL;
2725 break;
2726 default:
2727 break;
2728 }
2729 }
2730
2731 table->last = j;
2732
2733 return 0;
2734 }
2735
2736 static bool ni_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
2737 {
2738 bool result = true;
2739
2740 switch (in_reg) {
2741 case MC_SEQ_RAS_TIMING >> 2:
2742 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
2743 break;
2744 case MC_SEQ_CAS_TIMING >> 2:
2745 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
2746 break;
2747 case MC_SEQ_MISC_TIMING >> 2:
2748 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
2749 break;
2750 case MC_SEQ_MISC_TIMING2 >> 2:
2751 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
2752 break;
2753 case MC_SEQ_RD_CTL_D0 >> 2:
2754 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
2755 break;
2756 case MC_SEQ_RD_CTL_D1 >> 2:
2757 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
2758 break;
2759 case MC_SEQ_WR_CTL_D0 >> 2:
2760 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
2761 break;
2762 case MC_SEQ_WR_CTL_D1 >> 2:
2763 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
2764 break;
2765 case MC_PMG_CMD_EMRS >> 2:
2766 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
2767 break;
2768 case MC_PMG_CMD_MRS >> 2:
2769 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
2770 break;
2771 case MC_PMG_CMD_MRS1 >> 2:
2772 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
2773 break;
2774 case MC_SEQ_PMG_TIMING >> 2:
2775 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
2776 break;
2777 case MC_PMG_CMD_MRS2 >> 2:
2778 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
2779 break;
2780 default:
2781 result = false;
2782 break;
2783 }
2784
2785 return result;
2786 }
2787
2788 static void ni_set_valid_flag(struct ni_mc_reg_table *table)
2789 {
2790 u8 i, j;
2791
2792 for (i = 0; i < table->last; i++) {
2793 for (j = 1; j < table->num_entries; j++) {
2794 if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
2795 table->valid_flag |= 1 << i;
2796 break;
2797 }
2798 }
2799 }
2800 }
2801
2802 static void ni_set_s0_mc_reg_index(struct ni_mc_reg_table *table)
2803 {
2804 u32 i;
2805 u16 address;
2806
2807 for (i = 0; i < table->last; i++)
2808 table->mc_reg_address[i].s0 =
2809 ni_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
2810 address : table->mc_reg_address[i].s1;
2811 }
2812
2813 static int ni_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
2814 struct ni_mc_reg_table *ni_table)
2815 {
2816 u8 i, j;
2817
2818 if (table->last > SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2819 return -EINVAL;
2820 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
2821 return -EINVAL;
2822
2823 for (i = 0; i < table->last; i++)
2824 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2825 ni_table->last = table->last;
2826
2827 for (i = 0; i < table->num_entries; i++) {
2828 ni_table->mc_reg_table_entry[i].mclk_max =
2829 table->mc_reg_table_entry[i].mclk_max;
2830 for (j = 0; j < table->last; j++)
2831 ni_table->mc_reg_table_entry[i].mc_data[j] =
2832 table->mc_reg_table_entry[i].mc_data[j];
2833 }
2834 ni_table->num_entries = table->num_entries;
2835
2836 return 0;
2837 }
2838
2839 static int ni_initialize_mc_reg_table(struct radeon_device *rdev)
2840 {
2841 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2842 int ret;
2843 struct atom_mc_reg_table *table;
2844 struct ni_mc_reg_table *ni_table = &ni_pi->mc_reg_table;
2845 u8 module_index = rv770_get_memory_module_index(rdev);
2846
2847 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
2848 if (!table)
2849 return -ENOMEM;
2850
2851 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
2852 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
2853 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
2854 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
2855 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
2856 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
2857 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
2858 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
2859 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
2860 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
2861 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
2862 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
2863 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
2864
2865 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
2866
2867 if (ret)
2868 goto init_mc_done;
2869
2870 ret = ni_copy_vbios_mc_reg_table(table, ni_table);
2871
2872 if (ret)
2873 goto init_mc_done;
2874
2875 ni_set_s0_mc_reg_index(ni_table);
2876
2877 ret = ni_set_mc_special_registers(rdev, ni_table);
2878
2879 if (ret)
2880 goto init_mc_done;
2881
2882 ni_set_valid_flag(ni_table);
2883
2884 init_mc_done:
2885 kfree(table);
2886
2887 return ret;
2888 }
2889
2890 static void ni_populate_mc_reg_addresses(struct radeon_device *rdev,
2891 SMC_NIslands_MCRegisters *mc_reg_table)
2892 {
2893 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2894 u32 i, j;
2895
2896 for (i = 0, j = 0; j < ni_pi->mc_reg_table.last; j++) {
2897 if (ni_pi->mc_reg_table.valid_flag & (1 << j)) {
2898 if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE)
2899 break;
2900 mc_reg_table->address[i].s0 =
2901 cpu_to_be16(ni_pi->mc_reg_table.mc_reg_address[j].s0);
2902 mc_reg_table->address[i].s1 =
2903 cpu_to_be16(ni_pi->mc_reg_table.mc_reg_address[j].s1);
2904 i++;
2905 }
2906 }
2907 mc_reg_table->last = (u8)i;
2908 }
2909
2910
2911 static void ni_convert_mc_registers(struct ni_mc_reg_entry *entry,
2912 SMC_NIslands_MCRegisterSet *data,
2913 u32 num_entries, u32 valid_flag)
2914 {
2915 u32 i, j;
2916
2917 for (i = 0, j = 0; j < num_entries; j++) {
2918 if (valid_flag & (1 << j)) {
2919 data->value[i] = cpu_to_be32(entry->mc_data[j]);
2920 i++;
2921 }
2922 }
2923 }
2924
2925 static void ni_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
2926 struct rv7xx_pl *pl,
2927 SMC_NIslands_MCRegisterSet *mc_reg_table_data)
2928 {
2929 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2930 u32 i = 0;
2931
2932 for (i = 0; i < ni_pi->mc_reg_table.num_entries; i++) {
2933 if (pl->mclk <= ni_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
2934 break;
2935 }
2936
2937 if ((i == ni_pi->mc_reg_table.num_entries) && (i > 0))
2938 --i;
2939
2940 ni_convert_mc_registers(&ni_pi->mc_reg_table.mc_reg_table_entry[i],
2941 mc_reg_table_data,
2942 ni_pi->mc_reg_table.last,
2943 ni_pi->mc_reg_table.valid_flag);
2944 }
2945
2946 static void ni_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
2947 struct radeon_ps *radeon_state,
2948 SMC_NIslands_MCRegisters *mc_reg_table)
2949 {
2950 struct ni_ps *state = ni_get_ps(radeon_state);
2951 int i;
2952
2953 for (i = 0; i < state->performance_level_count; i++) {
2954 ni_convert_mc_reg_table_entry_to_smc(rdev,
2955 &state->performance_levels[i],
2956 &mc_reg_table->data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
2957 }
2958 }
2959
2960 static int ni_populate_mc_reg_table(struct radeon_device *rdev,
2961 struct radeon_ps *radeon_boot_state)
2962 {
2963 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2964 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2965 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2966 struct ni_ps *boot_state = ni_get_ps(radeon_boot_state);
2967 SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
2968
2969 memset(mc_reg_table, 0, sizeof(SMC_NIslands_MCRegisters));
2970
2971 rv770_write_smc_soft_register(rdev, NI_SMC_SOFT_REGISTER_seq_index, 1);
2972
2973 ni_populate_mc_reg_addresses(rdev, mc_reg_table);
2974
2975 ni_convert_mc_reg_table_entry_to_smc(rdev, &boot_state->performance_levels[0],
2976 &mc_reg_table->data[0]);
2977
2978 ni_convert_mc_registers(&ni_pi->mc_reg_table.mc_reg_table_entry[0],
2979 &mc_reg_table->data[1],
2980 ni_pi->mc_reg_table.last,
2981 ni_pi->mc_reg_table.valid_flag);
2982
2983 ni_convert_mc_reg_table_to_smc(rdev, radeon_boot_state, mc_reg_table);
2984
2985 return rv770_copy_bytes_to_smc(rdev, eg_pi->mc_reg_table_start,
2986 (u8 *)mc_reg_table,
2987 sizeof(SMC_NIslands_MCRegisters),
2988 pi->sram_end);
2989 }
2990
2991 static int ni_upload_mc_reg_table(struct radeon_device *rdev,
2992 struct radeon_ps *radeon_new_state)
2993 {
2994 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2995 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2996 struct ni_power_info *ni_pi = ni_get_pi(rdev);
2997 struct ni_ps *ni_new_state = ni_get_ps(radeon_new_state);
2998 SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
2999 u16 address;
3000
3001 memset(mc_reg_table, 0, sizeof(SMC_NIslands_MCRegisters));
3002
3003 ni_convert_mc_reg_table_to_smc(rdev, radeon_new_state, mc_reg_table);
3004
3005 address = eg_pi->mc_reg_table_start +
3006 (u16)offsetof(SMC_NIslands_MCRegisters, data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
3007
3008 return rv770_copy_bytes_to_smc(rdev, address,
3009 (u8 *)&mc_reg_table->data[NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
3010 sizeof(SMC_NIslands_MCRegisterSet) * ni_new_state->performance_level_count,
3011 pi->sram_end);
3012 }
3013
3014 static int ni_init_driver_calculated_leakage_table(struct radeon_device *rdev,
3015 PP_NIslands_CACTABLES *cac_tables)
3016 {
3017 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3018 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3019 u32 leakage = 0;
3020 unsigned int i, j, table_size;
3021 s32 t;
3022 u32 smc_leakage, max_leakage = 0;
3023 u32 scaling_factor;
3024
3025 table_size = eg_pi->vddc_voltage_table.count;
3026
3027 if (SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES < table_size)
3028 table_size = SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
3029
3030 scaling_factor = ni_get_smc_power_scaling_factor(rdev);
3031
3032 for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++) {
3033 for (j = 0; j < table_size; j++) {
3034 t = (1000 * ((i + 1) * 8));
3035
3036 if (t < ni_pi->cac_data.leakage_minimum_temperature)
3037 t = ni_pi->cac_data.leakage_minimum_temperature;
3038
3039 ni_calculate_leakage_for_v_and_t(rdev,
3040 &ni_pi->cac_data.leakage_coefficients,
3041 eg_pi->vddc_voltage_table.entries[j].value,
3042 t,
3043 ni_pi->cac_data.i_leakage,
3044 &leakage);
3045
3046 smc_leakage = ni_scale_power_for_smc(leakage, scaling_factor) / 1000;
3047 if (smc_leakage > max_leakage)
3048 max_leakage = smc_leakage;
3049
3050 cac_tables->cac_lkge_lut[i][j] = cpu_to_be32(smc_leakage);
3051 }
3052 }
3053
3054 for (j = table_size; j < SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
3055 for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
3056 cac_tables->cac_lkge_lut[i][j] = cpu_to_be32(max_leakage);
3057 }
3058 return 0;
3059 }
3060
3061 static int ni_init_simplified_leakage_table(struct radeon_device *rdev,
3062 PP_NIslands_CACTABLES *cac_tables)
3063 {
3064 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3065 struct radeon_cac_leakage_table *leakage_table =
3066 &rdev->pm.dpm.dyn_state.cac_leakage_table;
3067 u32 i, j, table_size;
3068 u32 smc_leakage, max_leakage = 0;
3069 u32 scaling_factor;
3070
3071 if (!leakage_table)
3072 return -EINVAL;
3073
3074 table_size = leakage_table->count;
3075
3076 if (eg_pi->vddc_voltage_table.count != table_size)
3077 table_size = (eg_pi->vddc_voltage_table.count < leakage_table->count) ?
3078 eg_pi->vddc_voltage_table.count : leakage_table->count;
3079
3080 if (SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES < table_size)
3081 table_size = SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
3082
3083 if (table_size == 0)
3084 return -EINVAL;
3085
3086 scaling_factor = ni_get_smc_power_scaling_factor(rdev);
3087
3088 for (j = 0; j < table_size; j++) {
3089 smc_leakage = leakage_table->entries[j].leakage;
3090
3091 if (smc_leakage > max_leakage)
3092 max_leakage = smc_leakage;
3093
3094 for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
3095 cac_tables->cac_lkge_lut[i][j] =
3096 cpu_to_be32(ni_scale_power_for_smc(smc_leakage, scaling_factor));
3097 }
3098
3099 for (j = table_size; j < SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
3100 for (i = 0; i < SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES; i++)
3101 cac_tables->cac_lkge_lut[i][j] =
3102 cpu_to_be32(ni_scale_power_for_smc(max_leakage, scaling_factor));
3103 }
3104 return 0;
3105 }
3106
3107 static int ni_initialize_smc_cac_tables(struct radeon_device *rdev)
3108 {
3109 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3110 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3111 PP_NIslands_CACTABLES *cac_tables = NULL;
3112 int i, ret;
3113 u32 reg;
3114
3115 if (ni_pi->enable_cac == false)
3116 return 0;
3117
3118 cac_tables = kzalloc(sizeof(PP_NIslands_CACTABLES), GFP_KERNEL);
3119 if (!cac_tables)
3120 return -ENOMEM;
3121
3122 reg = RREG32(CG_CAC_CTRL) & ~(TID_CNT_MASK | TID_UNIT_MASK);
3123 reg |= (TID_CNT(ni_pi->cac_weights->tid_cnt) |
3124 TID_UNIT(ni_pi->cac_weights->tid_unit));
3125 WREG32(CG_CAC_CTRL, reg);
3126
3127 for (i = 0; i < NISLANDS_DCCAC_MAX_LEVELS; i++)
3128 ni_pi->dc_cac_table[i] = ni_pi->cac_weights->dc_cac[i];
3129
3130 for (i = 0; i < SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES; i++)
3131 cac_tables->cac_bif_lut[i] = ni_pi->cac_weights->pcie_cac[i];
3132
3133 ni_pi->cac_data.i_leakage = rdev->pm.dpm.cac_leakage;
3134 ni_pi->cac_data.pwr_const = 0;
3135 ni_pi->cac_data.dc_cac_value = ni_pi->dc_cac_table[NISLANDS_DCCAC_LEVEL_0];
3136 ni_pi->cac_data.bif_cac_value = 0;
3137 ni_pi->cac_data.mc_wr_weight = ni_pi->cac_weights->mc_write_weight;
3138 ni_pi->cac_data.mc_rd_weight = ni_pi->cac_weights->mc_read_weight;
3139 ni_pi->cac_data.allow_ovrflw = 0;
3140 ni_pi->cac_data.l2num_win_tdp = ni_pi->lta_window_size;
3141 ni_pi->cac_data.num_win_tdp = 0;
3142 ni_pi->cac_data.lts_truncate_n = ni_pi->lts_truncate;
3143
3144 if (ni_pi->driver_calculate_cac_leakage)
3145 ret = ni_init_driver_calculated_leakage_table(rdev, cac_tables);
3146 else
3147 ret = ni_init_simplified_leakage_table(rdev, cac_tables);
3148
3149 if (ret)
3150 goto done_free;
3151
3152 cac_tables->pwr_const = cpu_to_be32(ni_pi->cac_data.pwr_const);
3153 cac_tables->dc_cacValue = cpu_to_be32(ni_pi->cac_data.dc_cac_value);
3154 cac_tables->bif_cacValue = cpu_to_be32(ni_pi->cac_data.bif_cac_value);
3155 cac_tables->AllowOvrflw = ni_pi->cac_data.allow_ovrflw;
3156 cac_tables->MCWrWeight = ni_pi->cac_data.mc_wr_weight;
3157 cac_tables->MCRdWeight = ni_pi->cac_data.mc_rd_weight;
3158 cac_tables->numWin_TDP = ni_pi->cac_data.num_win_tdp;
3159 cac_tables->l2numWin_TDP = ni_pi->cac_data.l2num_win_tdp;
3160 cac_tables->lts_truncate_n = ni_pi->cac_data.lts_truncate_n;
3161
3162 ret = rv770_copy_bytes_to_smc(rdev, ni_pi->cac_table_start, (u8 *)cac_tables,
3163 sizeof(PP_NIslands_CACTABLES), pi->sram_end);
3164
3165 done_free:
3166 if (ret) {
3167 ni_pi->enable_cac = false;
3168 ni_pi->enable_power_containment = false;
3169 }
3170
3171 kfree(cac_tables);
3172
3173 return 0;
3174 }
3175
3176 static int ni_initialize_hardware_cac_manager(struct radeon_device *rdev)
3177 {
3178 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3179 u32 reg;
3180
3181 if (!ni_pi->enable_cac ||
3182 !ni_pi->cac_configuration_required)
3183 return 0;
3184
3185 if (ni_pi->cac_weights == NULL)
3186 return -EINVAL;
3187
3188 reg = RREG32_CG(CG_CAC_REGION_1_WEIGHT_0) & ~(WEIGHT_TCP_SIG0_MASK |
3189 WEIGHT_TCP_SIG1_MASK |
3190 WEIGHT_TA_SIG_MASK);
3191 reg |= (WEIGHT_TCP_SIG0(ni_pi->cac_weights->weight_tcp_sig0) |
3192 WEIGHT_TCP_SIG1(ni_pi->cac_weights->weight_tcp_sig1) |
3193 WEIGHT_TA_SIG(ni_pi->cac_weights->weight_ta_sig));
3194 WREG32_CG(CG_CAC_REGION_1_WEIGHT_0, reg);
3195
3196 reg = RREG32_CG(CG_CAC_REGION_1_WEIGHT_1) & ~(WEIGHT_TCC_EN0_MASK |
3197 WEIGHT_TCC_EN1_MASK |
3198 WEIGHT_TCC_EN2_MASK);
3199 reg |= (WEIGHT_TCC_EN0(ni_pi->cac_weights->weight_tcc_en0) |
3200 WEIGHT_TCC_EN1(ni_pi->cac_weights->weight_tcc_en1) |
3201 WEIGHT_TCC_EN2(ni_pi->cac_weights->weight_tcc_en2));
3202 WREG32_CG(CG_CAC_REGION_1_WEIGHT_1, reg);
3203
3204 reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_0) & ~(WEIGHT_CB_EN0_MASK |
3205 WEIGHT_CB_EN1_MASK |
3206 WEIGHT_CB_EN2_MASK |
3207 WEIGHT_CB_EN3_MASK);
3208 reg |= (WEIGHT_CB_EN0(ni_pi->cac_weights->weight_cb_en0) |
3209 WEIGHT_CB_EN1(ni_pi->cac_weights->weight_cb_en1) |
3210 WEIGHT_CB_EN2(ni_pi->cac_weights->weight_cb_en2) |
3211 WEIGHT_CB_EN3(ni_pi->cac_weights->weight_cb_en3));
3212 WREG32_CG(CG_CAC_REGION_2_WEIGHT_0, reg);
3213
3214 reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_1) & ~(WEIGHT_DB_SIG0_MASK |
3215 WEIGHT_DB_SIG1_MASK |
3216 WEIGHT_DB_SIG2_MASK |
3217 WEIGHT_DB_SIG3_MASK);
3218 reg |= (WEIGHT_DB_SIG0(ni_pi->cac_weights->weight_db_sig0) |
3219 WEIGHT_DB_SIG1(ni_pi->cac_weights->weight_db_sig1) |
3220 WEIGHT_DB_SIG2(ni_pi->cac_weights->weight_db_sig2) |
3221 WEIGHT_DB_SIG3(ni_pi->cac_weights->weight_db_sig3));
3222 WREG32_CG(CG_CAC_REGION_2_WEIGHT_1, reg);
3223
3224 reg = RREG32_CG(CG_CAC_REGION_2_WEIGHT_2) & ~(WEIGHT_SXM_SIG0_MASK |
3225 WEIGHT_SXM_SIG1_MASK |
3226 WEIGHT_SXM_SIG2_MASK |
3227 WEIGHT_SXS_SIG0_MASK |
3228 WEIGHT_SXS_SIG1_MASK);
3229 reg |= (WEIGHT_SXM_SIG0(ni_pi->cac_weights->weight_sxm_sig0) |
3230 WEIGHT_SXM_SIG1(ni_pi->cac_weights->weight_sxm_sig1) |
3231 WEIGHT_SXM_SIG2(ni_pi->cac_weights->weight_sxm_sig2) |
3232 WEIGHT_SXS_SIG0(ni_pi->cac_weights->weight_sxs_sig0) |
3233 WEIGHT_SXS_SIG1(ni_pi->cac_weights->weight_sxs_sig1));
3234 WREG32_CG(CG_CAC_REGION_2_WEIGHT_2, reg);
3235
3236 reg = RREG32_CG(CG_CAC_REGION_3_WEIGHT_0) & ~(WEIGHT_XBR_0_MASK |
3237 WEIGHT_XBR_1_MASK |
3238 WEIGHT_XBR_2_MASK |
3239 WEIGHT_SPI_SIG0_MASK);
3240 reg |= (WEIGHT_XBR_0(ni_pi->cac_weights->weight_xbr_0) |
3241 WEIGHT_XBR_1(ni_pi->cac_weights->weight_xbr_1) |
3242 WEIGHT_XBR_2(ni_pi->cac_weights->weight_xbr_2) |
3243 WEIGHT_SPI_SIG0(ni_pi->cac_weights->weight_spi_sig0));
3244 WREG32_CG(CG_CAC_REGION_3_WEIGHT_0, reg);
3245
3246 reg = RREG32_CG(CG_CAC_REGION_3_WEIGHT_1) & ~(WEIGHT_SPI_SIG1_MASK |
3247 WEIGHT_SPI_SIG2_MASK |
3248 WEIGHT_SPI_SIG3_MASK |
3249 WEIGHT_SPI_SIG4_MASK |
3250 WEIGHT_SPI_SIG5_MASK);
3251 reg |= (WEIGHT_SPI_SIG1(ni_pi->cac_weights->weight_spi_sig1) |
3252 WEIGHT_SPI_SIG2(ni_pi->cac_weights->weight_spi_sig2) |
3253 WEIGHT_SPI_SIG3(ni_pi->cac_weights->weight_spi_sig3) |
3254 WEIGHT_SPI_SIG4(ni_pi->cac_weights->weight_spi_sig4) |
3255 WEIGHT_SPI_SIG5(ni_pi->cac_weights->weight_spi_sig5));
3256 WREG32_CG(CG_CAC_REGION_3_WEIGHT_1, reg);
3257
3258 reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_0) & ~(WEIGHT_LDS_SIG0_MASK |
3259 WEIGHT_LDS_SIG1_MASK |
3260 WEIGHT_SC_MASK);
3261 reg |= (WEIGHT_LDS_SIG0(ni_pi->cac_weights->weight_lds_sig0) |
3262 WEIGHT_LDS_SIG1(ni_pi->cac_weights->weight_lds_sig1) |
3263 WEIGHT_SC(ni_pi->cac_weights->weight_sc));
3264 WREG32_CG(CG_CAC_REGION_4_WEIGHT_0, reg);
3265
3266 reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_1) & ~(WEIGHT_BIF_MASK |
3267 WEIGHT_CP_MASK |
3268 WEIGHT_PA_SIG0_MASK |
3269 WEIGHT_PA_SIG1_MASK |
3270 WEIGHT_VGT_SIG0_MASK);
3271 reg |= (WEIGHT_BIF(ni_pi->cac_weights->weight_bif) |
3272 WEIGHT_CP(ni_pi->cac_weights->weight_cp) |
3273 WEIGHT_PA_SIG0(ni_pi->cac_weights->weight_pa_sig0) |
3274 WEIGHT_PA_SIG1(ni_pi->cac_weights->weight_pa_sig1) |
3275 WEIGHT_VGT_SIG0(ni_pi->cac_weights->weight_vgt_sig0));
3276 WREG32_CG(CG_CAC_REGION_4_WEIGHT_1, reg);
3277
3278 reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_2) & ~(WEIGHT_VGT_SIG1_MASK |
3279 WEIGHT_VGT_SIG2_MASK |
3280 WEIGHT_DC_SIG0_MASK |
3281 WEIGHT_DC_SIG1_MASK |
3282 WEIGHT_DC_SIG2_MASK);
3283 reg |= (WEIGHT_VGT_SIG1(ni_pi->cac_weights->weight_vgt_sig1) |
3284 WEIGHT_VGT_SIG2(ni_pi->cac_weights->weight_vgt_sig2) |
3285 WEIGHT_DC_SIG0(ni_pi->cac_weights->weight_dc_sig0) |
3286 WEIGHT_DC_SIG1(ni_pi->cac_weights->weight_dc_sig1) |
3287 WEIGHT_DC_SIG2(ni_pi->cac_weights->weight_dc_sig2));
3288 WREG32_CG(CG_CAC_REGION_4_WEIGHT_2, reg);
3289
3290 reg = RREG32_CG(CG_CAC_REGION_4_WEIGHT_3) & ~(WEIGHT_DC_SIG3_MASK |
3291 WEIGHT_UVD_SIG0_MASK |
3292 WEIGHT_UVD_SIG1_MASK |
3293 WEIGHT_SPARE0_MASK |
3294 WEIGHT_SPARE1_MASK);
3295 reg |= (WEIGHT_DC_SIG3(ni_pi->cac_weights->weight_dc_sig3) |
3296 WEIGHT_UVD_SIG0(ni_pi->cac_weights->weight_uvd_sig0) |
3297 WEIGHT_UVD_SIG1(ni_pi->cac_weights->weight_uvd_sig1) |
3298 WEIGHT_SPARE0(ni_pi->cac_weights->weight_spare0) |
3299 WEIGHT_SPARE1(ni_pi->cac_weights->weight_spare1));
3300 WREG32_CG(CG_CAC_REGION_4_WEIGHT_3, reg);
3301
3302 reg = RREG32_CG(CG_CAC_REGION_5_WEIGHT_0) & ~(WEIGHT_SQ_VSP_MASK |
3303 WEIGHT_SQ_VSP0_MASK);
3304 reg |= (WEIGHT_SQ_VSP(ni_pi->cac_weights->weight_sq_vsp) |
3305 WEIGHT_SQ_VSP0(ni_pi->cac_weights->weight_sq_vsp0));
3306 WREG32_CG(CG_CAC_REGION_5_WEIGHT_0, reg);
3307
3308 reg = RREG32_CG(CG_CAC_REGION_5_WEIGHT_1) & ~(WEIGHT_SQ_GPR_MASK);
3309 reg |= WEIGHT_SQ_GPR(ni_pi->cac_weights->weight_sq_gpr);
3310 WREG32_CG(CG_CAC_REGION_5_WEIGHT_1, reg);
3311
3312 reg = RREG32_CG(CG_CAC_REGION_4_OVERRIDE_4) & ~(OVR_MODE_SPARE_0_MASK |
3313 OVR_VAL_SPARE_0_MASK |
3314 OVR_MODE_SPARE_1_MASK |
3315 OVR_VAL_SPARE_1_MASK);
3316 reg |= (OVR_MODE_SPARE_0(ni_pi->cac_weights->ovr_mode_spare_0) |
3317 OVR_VAL_SPARE_0(ni_pi->cac_weights->ovr_val_spare_0) |
3318 OVR_MODE_SPARE_1(ni_pi->cac_weights->ovr_mode_spare_1) |
3319 OVR_VAL_SPARE_1(ni_pi->cac_weights->ovr_val_spare_1));
3320 WREG32_CG(CG_CAC_REGION_4_OVERRIDE_4, reg);
3321
3322 reg = RREG32(SQ_CAC_THRESHOLD) & ~(VSP_MASK |
3323 VSP0_MASK |
3324 GPR_MASK);
3325 reg |= (VSP(ni_pi->cac_weights->vsp) |
3326 VSP0(ni_pi->cac_weights->vsp0) |
3327 GPR(ni_pi->cac_weights->gpr));
3328 WREG32(SQ_CAC_THRESHOLD, reg);
3329
3330 reg = (MCDW_WR_ENABLE |
3331 MCDX_WR_ENABLE |
3332 MCDY_WR_ENABLE |
3333 MCDZ_WR_ENABLE |
3334 INDEX(0x09D4));
3335 WREG32(MC_CG_CONFIG, reg);
3336
3337 reg = (READ_WEIGHT(ni_pi->cac_weights->mc_read_weight) |
3338 WRITE_WEIGHT(ni_pi->cac_weights->mc_write_weight) |
3339 ALLOW_OVERFLOW);
3340 WREG32(MC_CG_DATAPORT, reg);
3341
3342 return 0;
3343 }
3344
3345 static int ni_enable_smc_cac(struct radeon_device *rdev,
3346 struct radeon_ps *radeon_new_state,
3347 bool enable)
3348 {
3349 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3350 int ret = 0;
3351 PPSMC_Result smc_result;
3352
3353 if (ni_pi->enable_cac) {
3354 if (enable) {
3355 if (!r600_is_uvd_state(radeon_new_state->class, radeon_new_state->class2)) {
3356 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_CollectCAC_PowerCorreln);
3357
3358 if (ni_pi->support_cac_long_term_average) {
3359 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgEnable);
3360 if (PPSMC_Result_OK != smc_result)
3361 ni_pi->support_cac_long_term_average = false;
3362 }
3363
3364 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
3365 if (PPSMC_Result_OK != smc_result)
3366 ret = -EINVAL;
3367
3368 ni_pi->cac_enabled = (PPSMC_Result_OK == smc_result) ? true : false;
3369 }
3370 } else if (ni_pi->cac_enabled) {
3371 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
3372
3373 ni_pi->cac_enabled = false;
3374
3375 if (ni_pi->support_cac_long_term_average) {
3376 smc_result = rv770_send_msg_to_smc(rdev, PPSMC_CACLongTermAvgDisable);
3377 if (PPSMC_Result_OK != smc_result)
3378 ni_pi->support_cac_long_term_average = false;
3379 }
3380 }
3381 }
3382
3383 return ret;
3384 }
3385
3386 static int ni_pcie_performance_request(struct radeon_device *rdev,
3387 u8 perf_req, bool advertise)
3388 {
3389 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3390
3391 #if defined(CONFIG_ACPI)
3392 if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
3393 (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
3394 if (eg_pi->pcie_performance_request_registered == false)
3395 radeon_acpi_pcie_notify_device_ready(rdev);
3396 eg_pi->pcie_performance_request_registered = true;
3397 return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
3398 } else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) &&
3399 eg_pi->pcie_performance_request_registered) {
3400 eg_pi->pcie_performance_request_registered = false;
3401 return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
3402 }
3403 #endif
3404 return 0;
3405 }
3406
3407 static int ni_advertise_gen2_capability(struct radeon_device *rdev)
3408 {
3409 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3410 u32 tmp;
3411
3412 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
3413
3414 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3415 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
3416 pi->pcie_gen2 = true;
3417 else
3418 pi->pcie_gen2 = false;
3419
3420 if (!pi->pcie_gen2)
3421 ni_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, true);
3422
3423 return 0;
3424 }
3425
3426 static void ni_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
3427 bool enable)
3428 {
3429 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3430 u32 tmp, bif;
3431
3432 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
3433
3434 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3435 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3436 if (enable) {
3437 if (!pi->boot_in_gen2) {
3438 bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
3439 bif |= CG_CLIENT_REQ(0xd);
3440 WREG32(CG_BIF_REQ_AND_RSP, bif);
3441 }
3442 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
3443 tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
3444 tmp |= LC_GEN2_EN_STRAP;
3445
3446 tmp |= LC_CLR_FAILED_SPD_CHANGE_CNT;
3447 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
3448 udelay(10);
3449 tmp &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
3450 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
3451 } else {
3452 if (!pi->boot_in_gen2) {
3453 bif = RREG32(CG_BIF_REQ_AND_RSP) & ~CG_CLIENT_REQ_MASK;
3454 bif |= CG_CLIENT_REQ(0xd);
3455 WREG32(CG_BIF_REQ_AND_RSP, bif);
3456
3457 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
3458 tmp &= ~LC_GEN2_EN_STRAP;
3459 }
3460 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
3461 }
3462 }
3463 }
3464
3465 static void ni_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
3466 bool enable)
3467 {
3468 ni_enable_bif_dynamic_pcie_gen2(rdev, enable);
3469
3470 if (enable)
3471 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
3472 else
3473 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
3474 }
3475
3476 void ni_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
3477 struct radeon_ps *new_ps,
3478 struct radeon_ps *old_ps)
3479 {
3480 struct ni_ps *new_state = ni_get_ps(new_ps);
3481 struct ni_ps *current_state = ni_get_ps(old_ps);
3482
3483 if ((new_ps->vclk == old_ps->vclk) &&
3484 (new_ps->dclk == old_ps->dclk))
3485 return;
3486
3487 if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
3488 current_state->performance_levels[current_state->performance_level_count - 1].sclk)
3489 return;
3490
3491 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
3492 }
3493
3494 void ni_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
3495 struct radeon_ps *new_ps,
3496 struct radeon_ps *old_ps)
3497 {
3498 struct ni_ps *new_state = ni_get_ps(new_ps);
3499 struct ni_ps *current_state = ni_get_ps(old_ps);
3500
3501 if ((new_ps->vclk == old_ps->vclk) &&
3502 (new_ps->dclk == old_ps->dclk))
3503 return;
3504
3505 if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
3506 current_state->performance_levels[current_state->performance_level_count - 1].sclk)
3507 return;
3508
3509 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
3510 }
3511
3512 void ni_dpm_setup_asic(struct radeon_device *rdev)
3513 {
3514 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3515
3516 ni_read_clock_registers(rdev);
3517 btc_read_arb_registers(rdev);
3518 rv770_get_memory_type(rdev);
3519 if (eg_pi->pcie_performance_request)
3520 ni_advertise_gen2_capability(rdev);
3521 rv770_get_pcie_gen2_status(rdev);
3522 rv770_enable_acpi_pm(rdev);
3523 }
3524
3525 void ni_update_current_ps(struct radeon_device *rdev,
3526 struct radeon_ps *rps)
3527 {
3528 struct ni_ps *new_ps = ni_get_ps(rps);
3529 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3530 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3531
3532 eg_pi->current_rps = *rps;
3533 ni_pi->current_ps = *new_ps;
3534 eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
3535 }
3536
3537 void ni_update_requested_ps(struct radeon_device *rdev,
3538 struct radeon_ps *rps)
3539 {
3540 struct ni_ps *new_ps = ni_get_ps(rps);
3541 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3542 struct ni_power_info *ni_pi = ni_get_pi(rdev);
3543
3544 eg_pi->requested_rps = *rps;
3545 ni_pi->requested_ps = *new_ps;
3546 eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
3547 }
3548
3549 int ni_dpm_enable(struct radeon_device *rdev)
3550 {
3551 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3552 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3553 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
3554 int ret;
3555
3556 if (pi->gfx_clock_gating)
3557 ni_cg_clockgating_default(rdev);
3558 if (btc_dpm_enabled(rdev))
3559 return -EINVAL;
3560 if (pi->mg_clock_gating)
3561 ni_mg_clockgating_default(rdev);
3562 if (eg_pi->ls_clock_gating)
3563 ni_ls_clockgating_default(rdev);
3564 if (pi->voltage_control) {
3565 rv770_enable_voltage_control(rdev, true);
3566 ret = cypress_construct_voltage_tables(rdev);
3567 if (ret) {
3568 DRM_ERROR("cypress_construct_voltage_tables failed\n");
3569 return ret;
3570 }
3571 }
3572 if (eg_pi->dynamic_ac_timing) {
3573 ret = ni_initialize_mc_reg_table(rdev);
3574 if (ret)
3575 eg_pi->dynamic_ac_timing = false;
3576 }
3577 if (pi->dynamic_ss)
3578 cypress_enable_spread_spectrum(rdev, true);
3579 if (pi->thermal_protection)
3580 rv770_enable_thermal_protection(rdev, true);
3581 rv770_setup_bsp(rdev);
3582 rv770_program_git(rdev);
3583 rv770_program_tp(rdev);
3584 rv770_program_tpp(rdev);
3585 rv770_program_sstp(rdev);
3586 cypress_enable_display_gap(rdev);
3587 rv770_program_vc(rdev);
3588 if (pi->dynamic_pcie_gen2)
3589 ni_enable_dynamic_pcie_gen2(rdev, true);
3590 ret = rv770_upload_firmware(rdev);
3591 if (ret) {
3592 DRM_ERROR("rv770_upload_firmware failed\n");
3593 return ret;
3594 }
3595 ret = ni_process_firmware_header(rdev);
3596 if (ret) {
3597 DRM_ERROR("ni_process_firmware_header failed\n");
3598 return ret;
3599 }
3600 ret = ni_initial_switch_from_arb_f0_to_f1(rdev);
3601 if (ret) {
3602 DRM_ERROR("ni_initial_switch_from_arb_f0_to_f1 failed\n");
3603 return ret;
3604 }
3605 ret = ni_init_smc_table(rdev);
3606 if (ret) {
3607 DRM_ERROR("ni_init_smc_table failed\n");
3608 return ret;
3609 }
3610 ret = ni_init_smc_spll_table(rdev);
3611 if (ret) {
3612 DRM_ERROR("ni_init_smc_spll_table failed\n");
3613 return ret;
3614 }
3615 ret = ni_init_arb_table_index(rdev);
3616 if (ret) {
3617 DRM_ERROR("ni_init_arb_table_index failed\n");
3618 return ret;
3619 }
3620 if (eg_pi->dynamic_ac_timing) {
3621 ret = ni_populate_mc_reg_table(rdev, boot_ps);
3622 if (ret) {
3623 DRM_ERROR("ni_populate_mc_reg_table failed\n");
3624 return ret;
3625 }
3626 }
3627 ret = ni_initialize_smc_cac_tables(rdev);
3628 if (ret) {
3629 DRM_ERROR("ni_initialize_smc_cac_tables failed\n");
3630 return ret;
3631 }
3632 ret = ni_initialize_hardware_cac_manager(rdev);
3633 if (ret) {
3634 DRM_ERROR("ni_initialize_hardware_cac_manager failed\n");
3635 return ret;
3636 }
3637 ret = ni_populate_smc_tdp_limits(rdev, boot_ps);
3638 if (ret) {
3639 DRM_ERROR("ni_populate_smc_tdp_limits failed\n");
3640 return ret;
3641 }
3642 ni_program_response_times(rdev);
3643 r7xx_start_smc(rdev);
3644 ret = cypress_notify_smc_display_change(rdev, false);
3645 if (ret) {
3646 DRM_ERROR("cypress_notify_smc_display_change failed\n");
3647 return ret;
3648 }
3649 cypress_enable_sclk_control(rdev, true);
3650 if (eg_pi->memory_transition)
3651 cypress_enable_mclk_control(rdev, true);
3652 cypress_start_dpm(rdev);
3653 if (pi->gfx_clock_gating)
3654 ni_gfx_clockgating_enable(rdev, true);
3655 if (pi->mg_clock_gating)
3656 ni_mg_clockgating_enable(rdev, true);
3657 if (eg_pi->ls_clock_gating)
3658 ni_ls_clockgating_enable(rdev, true);
3659
3660 if (rdev->irq.installed &&
3661 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
3662 PPSMC_Result result;
3663
3664 ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, 0xff * 1000);
3665 if (ret)
3666 return ret;
3667 rdev->irq.dpm_thermal = true;
3668 radeon_irq_set(rdev);
3669 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
3670
3671 if (result != PPSMC_Result_OK)
3672 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
3673 }
3674
3675 rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
3676
3677 ni_update_current_ps(rdev, boot_ps);
3678
3679 return 0;
3680 }
3681
3682 void ni_dpm_disable(struct radeon_device *rdev)
3683 {
3684 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3685 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3686 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
3687
3688 if (!btc_dpm_enabled(rdev))
3689 return;
3690 rv770_clear_vc(rdev);
3691 if (pi->thermal_protection)
3692 rv770_enable_thermal_protection(rdev, false);
3693 ni_enable_power_containment(rdev, boot_ps, false);
3694 ni_enable_smc_cac(rdev, boot_ps, false);
3695 cypress_enable_spread_spectrum(rdev, false);
3696 rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
3697 if (pi->dynamic_pcie_gen2)
3698 ni_enable_dynamic_pcie_gen2(rdev, false);
3699
3700 if (rdev->irq.installed &&
3701 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
3702 rdev->irq.dpm_thermal = false;
3703 radeon_irq_set(rdev);
3704 }
3705
3706 if (pi->gfx_clock_gating)
3707 ni_gfx_clockgating_enable(rdev, false);
3708 if (pi->mg_clock_gating)
3709 ni_mg_clockgating_enable(rdev, false);
3710 if (eg_pi->ls_clock_gating)
3711 ni_ls_clockgating_enable(rdev, false);
3712 ni_stop_dpm(rdev);
3713 btc_reset_to_default(rdev);
3714 ni_stop_smc(rdev);
3715 ni_force_switch_to_arb_f0(rdev);
3716
3717 ni_update_current_ps(rdev, boot_ps);
3718 }
3719
3720 static int ni_power_control_set_level(struct radeon_device *rdev)
3721 {
3722 struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
3723 int ret;
3724
3725 ret = ni_restrict_performance_levels_before_switch(rdev);
3726 if (ret)
3727 return ret;
3728 ret = rv770_halt_smc(rdev);
3729 if (ret)
3730 return ret;
3731 ret = ni_populate_smc_tdp_limits(rdev, new_ps);
3732 if (ret)
3733 return ret;
3734 ret = rv770_resume_smc(rdev);
3735 if (ret)
3736 return ret;
3737 ret = rv770_set_sw_state(rdev);
3738 if (ret)
3739 return ret;
3740
3741 return 0;
3742 }
3743
3744 int ni_dpm_pre_set_power_state(struct radeon_device *rdev)
3745 {
3746 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3747 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
3748 struct radeon_ps *new_ps = &requested_ps;
3749
3750 ni_update_requested_ps(rdev, new_ps);
3751
3752 ni_apply_state_adjust_rules(rdev, &eg_pi->requested_rps);
3753
3754 return 0;
3755 }
3756
3757 int ni_dpm_set_power_state(struct radeon_device *rdev)
3758 {
3759 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3760 struct radeon_ps *new_ps = &eg_pi->requested_rps;
3761 struct radeon_ps *old_ps = &eg_pi->current_rps;
3762 int ret;
3763
3764 ret = ni_restrict_performance_levels_before_switch(rdev);
3765 if (ret) {
3766 DRM_ERROR("ni_restrict_performance_levels_before_switch failed\n");
3767 return ret;
3768 }
3769 ni_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
3770 ret = ni_enable_power_containment(rdev, new_ps, false);
3771 if (ret) {
3772 DRM_ERROR("ni_enable_power_containment failed\n");
3773 return ret;
3774 }
3775 ret = ni_enable_smc_cac(rdev, new_ps, false);
3776 if (ret) {
3777 DRM_ERROR("ni_enable_smc_cac failed\n");
3778 return ret;
3779 }
3780 ret = rv770_halt_smc(rdev);
3781 if (ret) {
3782 DRM_ERROR("rv770_halt_smc failed\n");
3783 return ret;
3784 }
3785 if (eg_pi->smu_uvd_hs)
3786 btc_notify_uvd_to_smc(rdev, new_ps);
3787 ret = ni_upload_sw_state(rdev, new_ps);
3788 if (ret) {
3789 DRM_ERROR("ni_upload_sw_state failed\n");
3790 return ret;
3791 }
3792 if (eg_pi->dynamic_ac_timing) {
3793 ret = ni_upload_mc_reg_table(rdev, new_ps);
3794 if (ret) {
3795 DRM_ERROR("ni_upload_mc_reg_table failed\n");
3796 return ret;
3797 }
3798 }
3799 ret = ni_program_memory_timing_parameters(rdev, new_ps);
3800 if (ret) {
3801 DRM_ERROR("ni_program_memory_timing_parameters failed\n");
3802 return ret;
3803 }
3804 ret = rv770_resume_smc(rdev);
3805 if (ret) {
3806 DRM_ERROR("rv770_resume_smc failed\n");
3807 return ret;
3808 }
3809 ret = rv770_set_sw_state(rdev);
3810 if (ret) {
3811 DRM_ERROR("rv770_set_sw_state failed\n");
3812 return ret;
3813 }
3814 ni_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
3815 ret = ni_enable_smc_cac(rdev, new_ps, true);
3816 if (ret) {
3817 DRM_ERROR("ni_enable_smc_cac failed\n");
3818 return ret;
3819 }
3820 ret = ni_enable_power_containment(rdev, new_ps, true);
3821 if (ret) {
3822 DRM_ERROR("ni_enable_power_containment failed\n");
3823 return ret;
3824 }
3825
3826 /* update tdp */
3827 ret = ni_power_control_set_level(rdev);
3828 if (ret) {
3829 DRM_ERROR("ni_power_control_set_level failed\n");
3830 return ret;
3831 }
3832
3833 ret = ni_unrestrict_performance_levels_after_switch(rdev);
3834 if (ret) {
3835 DRM_ERROR("ni_unrestrict_performance_levels_after_switch failed\n");
3836 return ret;
3837 }
3838
3839 return 0;
3840 }
3841
3842 void ni_dpm_post_set_power_state(struct radeon_device *rdev)
3843 {
3844 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3845 struct radeon_ps *new_ps = &eg_pi->requested_rps;
3846
3847 ni_update_current_ps(rdev, new_ps);
3848 }
3849
3850 void ni_dpm_reset_asic(struct radeon_device *rdev)
3851 {
3852 ni_restrict_performance_levels_before_switch(rdev);
3853 rv770_set_boot_state(rdev);
3854 }
3855
3856 union power_info {
3857 struct _ATOM_POWERPLAY_INFO info;
3858 struct _ATOM_POWERPLAY_INFO_V2 info_2;
3859 struct _ATOM_POWERPLAY_INFO_V3 info_3;
3860 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
3861 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
3862 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
3863 };
3864
3865 union pplib_clock_info {
3866 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
3867 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
3868 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
3869 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
3870 };
3871
3872 union pplib_power_state {
3873 struct _ATOM_PPLIB_STATE v1;
3874 struct _ATOM_PPLIB_STATE_V2 v2;
3875 };
3876
3877 static void ni_parse_pplib_non_clock_info(struct radeon_device *rdev,
3878 struct radeon_ps *rps,
3879 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
3880 u8 table_rev)
3881 {
3882 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
3883 rps->class = le16_to_cpu(non_clock_info->usClassification);
3884 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
3885
3886 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
3887 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
3888 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
3889 } else if (r600_is_uvd_state(rps->class, rps->class2)) {
3890 rps->vclk = RV770_DEFAULT_VCLK_FREQ;
3891 rps->dclk = RV770_DEFAULT_DCLK_FREQ;
3892 } else {
3893 rps->vclk = 0;
3894 rps->dclk = 0;
3895 }
3896
3897 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
3898 rdev->pm.dpm.boot_ps = rps;
3899 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
3900 rdev->pm.dpm.uvd_ps = rps;
3901 }
3902
3903 static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
3904 struct radeon_ps *rps, int index,
3905 union pplib_clock_info *clock_info)
3906 {
3907 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
3908 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
3909 struct ni_ps *ps = ni_get_ps(rps);
3910 u16 vddc;
3911 struct rv7xx_pl *pl = &ps->performance_levels[index];
3912
3913 ps->performance_level_count = index + 1;
3914
3915 pl->sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
3916 pl->sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
3917 pl->mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
3918 pl->mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
3919
3920 pl->vddc = le16_to_cpu(clock_info->evergreen.usVDDC);
3921 pl->vddci = le16_to_cpu(clock_info->evergreen.usVDDCI);
3922 pl->flags = le32_to_cpu(clock_info->evergreen.ulFlags);
3923
3924 /* patch up vddc if necessary */
3925 if (pl->vddc == 0xff01) {
3926 if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
3927 pl->vddc = vddc;
3928 }
3929
3930 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
3931 pi->acpi_vddc = pl->vddc;
3932 eg_pi->acpi_vddci = pl->vddci;
3933 if (ps->performance_levels[0].flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
3934 pi->acpi_pcie_gen2 = true;
3935 else
3936 pi->acpi_pcie_gen2 = false;
3937 }
3938
3939 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
3940 eg_pi->ulv.supported = true;
3941 eg_pi->ulv.pl = pl;
3942 }
3943
3944 if (pi->min_vddc_in_table > pl->vddc)
3945 pi->min_vddc_in_table = pl->vddc;
3946
3947 if (pi->max_vddc_in_table < pl->vddc)
3948 pi->max_vddc_in_table = pl->vddc;
3949
3950 /* patch up boot state */
3951 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
3952 u16 vddc, vddci, mvdd;
3953 radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
3954 pl->mclk = rdev->clock.default_mclk;
3955 pl->sclk = rdev->clock.default_sclk;
3956 pl->vddc = vddc;
3957 pl->vddci = vddci;
3958 }
3959
3960 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
3961 ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
3962 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
3963 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
3964 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
3965 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
3966 }
3967 }
3968
3969 static int ni_parse_power_table(struct radeon_device *rdev)
3970 {
3971 struct radeon_mode_info *mode_info = &rdev->mode_info;
3972 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
3973 union pplib_power_state *power_state;
3974 int i, j;
3975 union pplib_clock_info *clock_info;
3976 union power_info *power_info;
3977 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
3978 u16 data_offset;
3979 u8 frev, crev;
3980 struct ni_ps *ps;
3981
3982 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
3983 &frev, &crev, &data_offset))
3984 return -EINVAL;
3985 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
3986
3987 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
3988 power_info->pplib.ucNumStates, GFP_KERNEL);
3989 if (!rdev->pm.dpm.ps)
3990 return -ENOMEM;
3991 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
3992 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
3993 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
3994
3995 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
3996 power_state = (union pplib_power_state *)
3997 (mode_info->atom_context->bios + data_offset +
3998 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
3999 i * power_info->pplib.ucStateEntrySize);
4000 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
4001 (mode_info->atom_context->bios + data_offset +
4002 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
4003 (power_state->v1.ucNonClockStateIndex *
4004 power_info->pplib.ucNonClockSize));
4005 if (power_info->pplib.ucStateEntrySize - 1) {
4006 ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL);
4007 if (ps == NULL) {
4008 kfree(rdev->pm.dpm.ps);
4009 return -ENOMEM;
4010 }
4011 rdev->pm.dpm.ps[i].ps_priv = ps;
4012 ni_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4013 non_clock_info,
4014 power_info->pplib.ucNonClockSize);
4015 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
4016 clock_info = (union pplib_clock_info *)
4017 (mode_info->atom_context->bios + data_offset +
4018 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
4019 (power_state->v1.ucClockStateIndices[j] *
4020 power_info->pplib.ucClockInfoSize));
4021 ni_parse_pplib_clock_info(rdev,
4022 &rdev->pm.dpm.ps[i], j,
4023 clock_info);
4024 }
4025 }
4026 }
4027 rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
4028 return 0;
4029 }
4030
4031 int ni_dpm_init(struct radeon_device *rdev)
4032 {
4033 struct rv7xx_power_info *pi;
4034 struct evergreen_power_info *eg_pi;
4035 struct ni_power_info *ni_pi;
4036 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
4037 u16 data_offset, size;
4038 u8 frev, crev;
4039 struct atom_clock_dividers dividers;
4040 int ret;
4041
4042 ni_pi = kzalloc(sizeof(struct ni_power_info), GFP_KERNEL);
4043 if (ni_pi == NULL)
4044 return -ENOMEM;
4045 rdev->pm.dpm.priv = ni_pi;
4046 eg_pi = &ni_pi->eg;
4047 pi = &eg_pi->rv7xx;
4048
4049 rv770_get_max_vddc(rdev);
4050
4051 eg_pi->ulv.supported = false;
4052 pi->acpi_vddc = 0;
4053 eg_pi->acpi_vddci = 0;
4054 pi->min_vddc_in_table = 0;
4055 pi->max_vddc_in_table = 0;
4056
4057 ret = ni_parse_power_table(rdev);
4058 if (ret)
4059 return ret;
4060 ret = r600_parse_extended_power_table(rdev);
4061 if (ret)
4062 return ret;
4063
4064 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
4065 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
4066 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
4067 r600_free_extended_power_table(rdev);
4068 return -ENOMEM;
4069 }
4070 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
4071 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
4072 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
4073 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
4074 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
4075 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
4076 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
4077 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
4078 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
4079
4080 ni_patch_dependency_tables_based_on_leakage(rdev);
4081
4082 if (rdev->pm.dpm.voltage_response_time == 0)
4083 rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
4084 if (rdev->pm.dpm.backbias_response_time == 0)
4085 rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
4086
4087 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
4088 0, false, &dividers);
4089 if (ret)
4090 pi->ref_div = dividers.ref_div + 1;
4091 else
4092 pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
4093
4094 pi->rlp = RV770_RLP_DFLT;
4095 pi->rmp = RV770_RMP_DFLT;
4096 pi->lhp = RV770_LHP_DFLT;
4097 pi->lmp = RV770_LMP_DFLT;
4098
4099 eg_pi->ats[0].rlp = RV770_RLP_DFLT;
4100 eg_pi->ats[0].rmp = RV770_RMP_DFLT;
4101 eg_pi->ats[0].lhp = RV770_LHP_DFLT;
4102 eg_pi->ats[0].lmp = RV770_LMP_DFLT;
4103
4104 eg_pi->ats[1].rlp = BTC_RLP_UVD_DFLT;
4105 eg_pi->ats[1].rmp = BTC_RMP_UVD_DFLT;
4106 eg_pi->ats[1].lhp = BTC_LHP_UVD_DFLT;
4107 eg_pi->ats[1].lmp = BTC_LMP_UVD_DFLT;
4108
4109 eg_pi->smu_uvd_hs = true;
4110
4111 if (rdev->pdev->device == 0x6707) {
4112 pi->mclk_strobe_mode_threshold = 55000;
4113 pi->mclk_edc_enable_threshold = 55000;
4114 eg_pi->mclk_edc_wr_enable_threshold = 55000;
4115 } else {
4116 pi->mclk_strobe_mode_threshold = 40000;
4117 pi->mclk_edc_enable_threshold = 40000;
4118 eg_pi->mclk_edc_wr_enable_threshold = 40000;
4119 }
4120 ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
4121
4122 pi->voltage_control =
4123 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
4124
4125 pi->mvdd_control =
4126 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
4127
4128 eg_pi->vddci_control =
4129 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
4130
4131 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
4132 &frev, &crev, &data_offset)) {
4133 pi->sclk_ss = true;
4134 pi->mclk_ss = true;
4135 pi->dynamic_ss = true;
4136 } else {
4137 pi->sclk_ss = false;
4138 pi->mclk_ss = false;
4139 pi->dynamic_ss = true;
4140 }
4141
4142 pi->asi = RV770_ASI_DFLT;
4143 pi->pasi = CYPRESS_HASI_DFLT;
4144 pi->vrc = CYPRESS_VRC_DFLT;
4145
4146 pi->power_gating = false;
4147
4148 pi->gfx_clock_gating = true;
4149
4150 pi->mg_clock_gating = true;
4151 pi->mgcgtssm = true;
4152 eg_pi->ls_clock_gating = false;
4153 eg_pi->sclk_deep_sleep = false;
4154
4155 pi->dynamic_pcie_gen2 = true;
4156
4157 if (pi->gfx_clock_gating &&
4158 (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
4159 pi->thermal_protection = true;
4160 else
4161 pi->thermal_protection = false;
4162
4163 pi->display_gap = true;
4164
4165 pi->dcodt = true;
4166
4167 pi->ulps = true;
4168
4169 eg_pi->dynamic_ac_timing = true;
4170 eg_pi->abm = true;
4171 eg_pi->mcls = true;
4172 eg_pi->light_sleep = true;
4173 eg_pi->memory_transition = true;
4174 #if defined(CONFIG_ACPI)
4175 eg_pi->pcie_performance_request =
4176 radeon_acpi_is_pcie_performance_request_supported(rdev);
4177 #else
4178 eg_pi->pcie_performance_request = false;
4179 #endif
4180
4181 eg_pi->dll_default_on = false;
4182
4183 eg_pi->sclk_deep_sleep = false;
4184
4185 pi->mclk_stutter_mode_threshold = 0;
4186
4187 pi->sram_end = SMC_RAM_END;
4188
4189 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 3;
4190 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
4191 rdev->pm.dpm.dyn_state.min_vddc_for_pcie_gen2 = 900;
4192 rdev->pm.dpm.dyn_state.valid_sclk_values.count = ARRAY_SIZE(btc_valid_sclk);
4193 rdev->pm.dpm.dyn_state.valid_sclk_values.values = btc_valid_sclk;
4194 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
4195 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
4196 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 12500;
4197
4198 ni_pi->cac_data.leakage_coefficients.at = 516;
4199 ni_pi->cac_data.leakage_coefficients.bt = 18;
4200 ni_pi->cac_data.leakage_coefficients.av = 51;
4201 ni_pi->cac_data.leakage_coefficients.bv = 2957;
4202
4203 switch (rdev->pdev->device) {
4204 case 0x6700:
4205 case 0x6701:
4206 case 0x6702:
4207 case 0x6703:
4208 case 0x6718:
4209 ni_pi->cac_weights = &cac_weights_cayman_xt;
4210 break;
4211 case 0x6705:
4212 case 0x6719:
4213 case 0x671D:
4214 case 0x671C:
4215 default:
4216 ni_pi->cac_weights = &cac_weights_cayman_pro;
4217 break;
4218 case 0x6704:
4219 case 0x6706:
4220 case 0x6707:
4221 case 0x6708:
4222 case 0x6709:
4223 ni_pi->cac_weights = &cac_weights_cayman_le;
4224 break;
4225 }
4226
4227 if (ni_pi->cac_weights->enable_power_containment_by_default) {
4228 ni_pi->enable_power_containment = true;
4229 ni_pi->enable_cac = true;
4230 ni_pi->enable_sq_ramping = true;
4231 } else {
4232 ni_pi->enable_power_containment = false;
4233 ni_pi->enable_cac = false;
4234 ni_pi->enable_sq_ramping = false;
4235 }
4236
4237 ni_pi->driver_calculate_cac_leakage = false;
4238 ni_pi->cac_configuration_required = true;
4239
4240 if (ni_pi->cac_configuration_required) {
4241 ni_pi->support_cac_long_term_average = true;
4242 ni_pi->lta_window_size = ni_pi->cac_weights->l2_lta_window_size;
4243 ni_pi->lts_truncate = ni_pi->cac_weights->lts_truncate;
4244 } else {
4245 ni_pi->support_cac_long_term_average = false;
4246 ni_pi->lta_window_size = 0;
4247 ni_pi->lts_truncate = 0;
4248 }
4249
4250 ni_pi->use_power_boost_limit = true;
4251
4252 return 0;
4253 }
4254
4255 void ni_dpm_fini(struct radeon_device *rdev)
4256 {
4257 int i;
4258
4259 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
4260 kfree(rdev->pm.dpm.ps[i].ps_priv);
4261 }
4262 kfree(rdev->pm.dpm.ps);
4263 kfree(rdev->pm.dpm.priv);
4264 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
4265 r600_free_extended_power_table(rdev);
4266 }
4267
4268 void ni_dpm_print_power_state(struct radeon_device *rdev,
4269 struct radeon_ps *rps)
4270 {
4271 struct ni_ps *ps = ni_get_ps(rps);
4272 struct rv7xx_pl *pl;
4273 int i;
4274
4275 r600_dpm_print_class_info(rps->class, rps->class2);
4276 r600_dpm_print_cap_info(rps->caps);
4277 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
4278 for (i = 0; i < ps->performance_level_count; i++) {
4279 pl = &ps->performance_levels[i];
4280 if (rdev->family >= CHIP_TAHITI)
4281 printk("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
4282 i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
4283 else
4284 printk("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
4285 i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
4286 }
4287 r600_dpm_print_ps_status(rdev, rps);
4288 }
4289
4290 void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
4291 struct seq_file *m)
4292 {
4293 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
4294 struct ni_ps *ps = ni_get_ps(rps);
4295 struct rv7xx_pl *pl;
4296 u32 current_index =
4297 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
4298 CURRENT_STATE_INDEX_SHIFT;
4299
4300 if (current_index >= ps->performance_level_count) {
4301 seq_printf(m, "invalid dpm profile %d\n", current_index);
4302 } else {
4303 pl = &ps->performance_levels[current_index];
4304 seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
4305 seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
4306 current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
4307 }
4308 }
4309
4310 u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low)
4311 {
4312 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4313 struct ni_ps *requested_state = ni_get_ps(&eg_pi->requested_rps);
4314
4315 if (low)
4316 return requested_state->performance_levels[0].sclk;
4317 else
4318 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
4319 }
4320
4321 u32 ni_dpm_get_mclk(struct radeon_device *rdev, bool low)
4322 {
4323 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
4324 struct ni_ps *requested_state = ni_get_ps(&eg_pi->requested_rps);
4325
4326 if (low)
4327 return requested_state->performance_levels[0].mclk;
4328 else
4329 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
4330 }
4331
This page took 0.207653 seconds and 5 git commands to generate.