Merge branch 'linus' into x86/apic
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / cz_smc.c
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/firmware.h>
24 #include "drmP.h"
25 #include "amdgpu.h"
26 #include "smu8.h"
27 #include "smu8_fusion.h"
28 #include "cz_ppsmc.h"
29 #include "cz_smumgr.h"
30 #include "smu_ucode_xfer_cz.h"
31 #include "amdgpu_ucode.h"
32
33 #include "smu/smu_8_0_d.h"
34 #include "smu/smu_8_0_sh_mask.h"
35 #include "gca/gfx_8_0_d.h"
36 #include "gca/gfx_8_0_sh_mask.h"
37
38 uint32_t cz_get_argument(struct amdgpu_device *adev)
39 {
40 return RREG32(mmSMU_MP1_SRBM2P_ARG_0);
41 }
42
43 static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
44 {
45 struct cz_smu_private_data *priv =
46 (struct cz_smu_private_data *)(adev->smu.priv);
47
48 return priv;
49 }
50
51 int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
52 {
53 int i;
54 u32 content = 0, tmp;
55
56 for (i = 0; i < adev->usec_timeout; i++) {
57 tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
58 SMU_MP1_SRBM2P_RESP_0, CONTENT);
59 if (content != tmp)
60 break;
61 udelay(1);
62 }
63
64 /* timeout means wrong logic*/
65 if (i == adev->usec_timeout)
66 return -EINVAL;
67
68 WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0);
69 WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg);
70
71 return 0;
72 }
73
74 int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
75 {
76 int i;
77 u32 content = 0, tmp = 0;
78
79 if (cz_send_msg_to_smc_async(adev, msg))
80 return -EINVAL;
81
82 for (i = 0; i < adev->usec_timeout; i++) {
83 tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
84 SMU_MP1_SRBM2P_RESP_0, CONTENT);
85 if (content != tmp)
86 break;
87 udelay(1);
88 }
89
90 /* timeout means wrong logic*/
91 if (i == adev->usec_timeout)
92 return -EINVAL;
93
94 if (PPSMC_Result_OK != tmp) {
95 dev_err(adev->dev, "SMC Failed to send Message.\n");
96 return -EINVAL;
97 }
98
99 return 0;
100 }
101
102 int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
103 u16 msg, u32 parameter)
104 {
105 WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
106 return cz_send_msg_to_smc_async(adev, msg);
107 }
108
109 int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
110 u16 msg, u32 parameter)
111 {
112 WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
113 return cz_send_msg_to_smc(adev, msg);
114 }
115
116 static int cz_set_smc_sram_address(struct amdgpu_device *adev,
117 u32 smc_address, u32 limit)
118 {
119 if (smc_address & 3)
120 return -EINVAL;
121 if ((smc_address + 3) > limit)
122 return -EINVAL;
123
124 WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address);
125
126 return 0;
127 }
128
129 int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
130 u32 *value, u32 limit)
131 {
132 int ret;
133
134 ret = cz_set_smc_sram_address(adev, smc_address, limit);
135 if (ret)
136 return ret;
137
138 *value = RREG32(mmMP0PUB_IND_DATA_0);
139
140 return 0;
141 }
142
143 int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
144 u32 value, u32 limit)
145 {
146 int ret;
147
148 ret = cz_set_smc_sram_address(adev, smc_address, limit);
149 if (ret)
150 return ret;
151
152 WREG32(mmMP0PUB_IND_DATA_0, value);
153
154 return 0;
155 }
156
157 static int cz_smu_request_load_fw(struct amdgpu_device *adev)
158 {
159 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
160
161 uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION +
162 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
163
164 cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4);
165
166 /*prepare toc buffers*/
167 cz_send_msg_to_smc_with_parameter(adev,
168 PPSMC_MSG_DriverDramAddrHi,
169 priv->toc_buffer.mc_addr_high);
170 cz_send_msg_to_smc_with_parameter(adev,
171 PPSMC_MSG_DriverDramAddrLo,
172 priv->toc_buffer.mc_addr_low);
173 cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs);
174
175 /*execute jobs*/
176 cz_send_msg_to_smc_with_parameter(adev,
177 PPSMC_MSG_ExecuteJob,
178 priv->toc_entry_aram);
179
180 cz_send_msg_to_smc_with_parameter(adev,
181 PPSMC_MSG_ExecuteJob,
182 priv->toc_entry_power_profiling_index);
183
184 cz_send_msg_to_smc_with_parameter(adev,
185 PPSMC_MSG_ExecuteJob,
186 priv->toc_entry_initialize_index);
187
188 return 0;
189 }
190
191 /*
192 *Check if the FW has been loaded, SMU will not return if loading
193 *has not finished.
194 */
195 static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev,
196 uint32_t fw_mask)
197 {
198 int i;
199 uint32_t index = SMN_MP1_SRAM_START_ADDR +
200 SMU8_FIRMWARE_HEADER_LOCATION +
201 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
202
203 WREG32(mmMP0PUB_IND_INDEX, index);
204
205 for (i = 0; i < adev->usec_timeout; i++) {
206 if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask))
207 break;
208 udelay(1);
209 }
210
211 if (i >= adev->usec_timeout) {
212 dev_err(adev->dev,
213 "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x",
214 fw_mask, RREG32(mmMP0PUB_IND_DATA));
215 return -EINVAL;
216 }
217
218 return 0;
219 }
220
221 /*
222 * interfaces for different ip blocks to check firmware loading status
223 * 0 for success otherwise failed
224 */
225 static int cz_smu_check_finished(struct amdgpu_device *adev,
226 enum AMDGPU_UCODE_ID id)
227 {
228 switch (id) {
229 case AMDGPU_UCODE_ID_SDMA0:
230 if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED)
231 return 0;
232 break;
233 case AMDGPU_UCODE_ID_SDMA1:
234 if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED)
235 return 0;
236 break;
237 case AMDGPU_UCODE_ID_CP_CE:
238 if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED)
239 return 0;
240 break;
241 case AMDGPU_UCODE_ID_CP_PFP:
242 if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED)
243 return 0;
244 case AMDGPU_UCODE_ID_CP_ME:
245 if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED)
246 return 0;
247 break;
248 case AMDGPU_UCODE_ID_CP_MEC1:
249 if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED)
250 return 0;
251 break;
252 case AMDGPU_UCODE_ID_CP_MEC2:
253 if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED)
254 return 0;
255 break;
256 case AMDGPU_UCODE_ID_RLC_G:
257 if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED)
258 return 0;
259 break;
260 case AMDGPU_UCODE_ID_MAXIMUM:
261 default:
262 break;
263 }
264
265 return 1;
266 }
267
268 static int cz_load_mec_firmware(struct amdgpu_device *adev)
269 {
270 struct amdgpu_firmware_info *ucode =
271 &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
272 uint32_t reg_data;
273 uint32_t tmp;
274
275 if (ucode->fw == NULL)
276 return -EINVAL;
277
278 /* Disable MEC parsing/prefetching */
279 tmp = RREG32(mmCP_MEC_CNTL);
280 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
281 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
282 WREG32(mmCP_MEC_CNTL, tmp);
283
284 tmp = RREG32(mmCP_CPC_IC_BASE_CNTL);
285 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
286 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
287 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
288 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
289 WREG32(mmCP_CPC_IC_BASE_CNTL, tmp);
290
291 reg_data = lower_32_bits(ucode->mc_addr) &
292 REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
293 WREG32(mmCP_CPC_IC_BASE_LO, reg_data);
294
295 reg_data = upper_32_bits(ucode->mc_addr) &
296 REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
297 WREG32(mmCP_CPC_IC_BASE_HI, reg_data);
298
299 return 0;
300 }
301
302 int cz_smu_start(struct amdgpu_device *adev)
303 {
304 int ret = 0;
305
306 uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
307 UCODE_ID_SDMA0_MASK |
308 UCODE_ID_SDMA1_MASK |
309 UCODE_ID_CP_CE_MASK |
310 UCODE_ID_CP_ME_MASK |
311 UCODE_ID_CP_PFP_MASK |
312 UCODE_ID_CP_MEC_JT1_MASK |
313 UCODE_ID_CP_MEC_JT2_MASK;
314
315 if (adev->asic_type == CHIP_STONEY)
316 fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
317
318 cz_smu_request_load_fw(adev);
319 ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
320 if (ret)
321 return ret;
322
323 /* manually load MEC firmware for CZ */
324 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
325 ret = cz_load_mec_firmware(adev);
326 if (ret) {
327 dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
328 return ret;
329 }
330 }
331
332 /* setup fw load flag */
333 adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED |
334 AMDGPU_SDMA1_UCODE_LOADED |
335 AMDGPU_CPCE_UCODE_LOADED |
336 AMDGPU_CPPFP_UCODE_LOADED |
337 AMDGPU_CPME_UCODE_LOADED |
338 AMDGPU_CPMEC1_UCODE_LOADED |
339 AMDGPU_CPMEC2_UCODE_LOADED |
340 AMDGPU_CPRLC_UCODE_LOADED;
341
342 if (adev->asic_type == CHIP_STONEY)
343 adev->smu.fw_flags &= ~(AMDGPU_SDMA1_UCODE_LOADED | AMDGPU_CPMEC2_UCODE_LOADED);
344
345 return ret;
346 }
347
348 static uint32_t cz_convert_fw_type(uint32_t fw_type)
349 {
350 enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
351
352 switch (fw_type) {
353 case UCODE_ID_SDMA0:
354 result = AMDGPU_UCODE_ID_SDMA0;
355 break;
356 case UCODE_ID_SDMA1:
357 result = AMDGPU_UCODE_ID_SDMA1;
358 break;
359 case UCODE_ID_CP_CE:
360 result = AMDGPU_UCODE_ID_CP_CE;
361 break;
362 case UCODE_ID_CP_PFP:
363 result = AMDGPU_UCODE_ID_CP_PFP;
364 break;
365 case UCODE_ID_CP_ME:
366 result = AMDGPU_UCODE_ID_CP_ME;
367 break;
368 case UCODE_ID_CP_MEC_JT1:
369 case UCODE_ID_CP_MEC_JT2:
370 result = AMDGPU_UCODE_ID_CP_MEC1;
371 break;
372 case UCODE_ID_RLC_G:
373 result = AMDGPU_UCODE_ID_RLC_G;
374 break;
375 default:
376 DRM_ERROR("UCode type is out of range!");
377 }
378
379 return result;
380 }
381
382 static uint8_t cz_smu_translate_firmware_enum_to_arg(
383 enum cz_scratch_entry firmware_enum)
384 {
385 uint8_t ret = 0;
386
387 switch (firmware_enum) {
388 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
389 ret = UCODE_ID_SDMA0;
390 break;
391 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
392 ret = UCODE_ID_SDMA1;
393 break;
394 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
395 ret = UCODE_ID_CP_CE;
396 break;
397 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
398 ret = UCODE_ID_CP_PFP;
399 break;
400 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
401 ret = UCODE_ID_CP_ME;
402 break;
403 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
404 ret = UCODE_ID_CP_MEC_JT1;
405 break;
406 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
407 ret = UCODE_ID_CP_MEC_JT2;
408 break;
409 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
410 ret = UCODE_ID_GMCON_RENG;
411 break;
412 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
413 ret = UCODE_ID_RLC_G;
414 break;
415 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
416 ret = UCODE_ID_RLC_SCRATCH;
417 break;
418 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
419 ret = UCODE_ID_RLC_SRM_ARAM;
420 break;
421 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
422 ret = UCODE_ID_RLC_SRM_DRAM;
423 break;
424 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
425 ret = UCODE_ID_DMCU_ERAM;
426 break;
427 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
428 ret = UCODE_ID_DMCU_IRAM;
429 break;
430 case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
431 ret = TASK_ARG_INIT_MM_PWR_LOG;
432 break;
433 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
434 case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
435 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
436 case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
437 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
438 case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
439 ret = TASK_ARG_REG_MMIO;
440 break;
441 case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
442 ret = TASK_ARG_INIT_CLK_TABLE;
443 break;
444 }
445
446 return ret;
447 }
448
449 static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
450 enum cz_scratch_entry firmware_enum,
451 struct cz_buffer_entry *entry)
452 {
453 uint64_t gpu_addr;
454 uint32_t data_size;
455 uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
456 enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id);
457 struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
458 const struct gfx_firmware_header_v1_0 *header;
459
460 if (ucode->fw == NULL)
461 return -EINVAL;
462
463 gpu_addr = ucode->mc_addr;
464 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
465 data_size = le32_to_cpu(header->header.ucode_size_bytes);
466
467 if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) ||
468 (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) {
469 gpu_addr += le32_to_cpu(header->jt_offset) << 2;
470 data_size = le32_to_cpu(header->jt_size) << 2;
471 }
472
473 entry->mc_addr_low = lower_32_bits(gpu_addr);
474 entry->mc_addr_high = upper_32_bits(gpu_addr);
475 entry->data_size = data_size;
476 entry->firmware_ID = firmware_enum;
477
478 return 0;
479 }
480
481 static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev,
482 enum cz_scratch_entry scratch_type,
483 uint32_t size_in_byte,
484 struct cz_buffer_entry *entry)
485 {
486 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
487 uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) |
488 priv->smu_buffer.mc_addr_low;
489 mc_addr += size_in_byte;
490
491 priv->smu_buffer_used_bytes += size_in_byte;
492 entry->data_size = size_in_byte;
493 entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes;
494 entry->mc_addr_low = lower_32_bits(mc_addr);
495 entry->mc_addr_high = upper_32_bits(mc_addr);
496 entry->firmware_ID = scratch_type;
497
498 return 0;
499 }
500
501 static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev,
502 enum cz_scratch_entry firmware_enum,
503 bool is_last)
504 {
505 uint8_t i;
506 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
507 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
508 struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
509
510 task->type = TASK_TYPE_UCODE_LOAD;
511 task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
512 task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
513
514 for (i = 0; i < priv->driver_buffer_length; i++)
515 if (priv->driver_buffer[i].firmware_ID == firmware_enum)
516 break;
517
518 if (i >= priv->driver_buffer_length) {
519 dev_err(adev->dev, "Invalid Firmware Type\n");
520 return -EINVAL;
521 }
522
523 task->addr.low = priv->driver_buffer[i].mc_addr_low;
524 task->addr.high = priv->driver_buffer[i].mc_addr_high;
525 task->size_bytes = priv->driver_buffer[i].data_size;
526
527 return 0;
528 }
529
530 static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev,
531 enum cz_scratch_entry firmware_enum,
532 uint8_t type, bool is_last)
533 {
534 uint8_t i;
535 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
536 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
537 struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
538
539 task->type = type;
540 task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
541 task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
542
543 for (i = 0; i < priv->scratch_buffer_length; i++)
544 if (priv->scratch_buffer[i].firmware_ID == firmware_enum)
545 break;
546
547 if (i >= priv->scratch_buffer_length) {
548 dev_err(adev->dev, "Invalid Firmware Type\n");
549 return -EINVAL;
550 }
551
552 task->addr.low = priv->scratch_buffer[i].mc_addr_low;
553 task->addr.high = priv->scratch_buffer[i].mc_addr_high;
554 task->size_bytes = priv->scratch_buffer[i].data_size;
555
556 if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) {
557 struct cz_ih_meta_data *pIHReg_restore =
558 (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr;
559 pIHReg_restore->command =
560 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
561 }
562
563 return 0;
564 }
565
566 static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev)
567 {
568 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
569 priv->toc_entry_aram = priv->toc_entry_used_count;
570 cz_smu_populate_single_scratch_task(adev,
571 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
572 TASK_TYPE_UCODE_SAVE, true);
573
574 return 0;
575 }
576
577 static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev)
578 {
579 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
580 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
581
582 toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count;
583 cz_smu_populate_single_scratch_task(adev,
584 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
585 TASK_TYPE_UCODE_SAVE, false);
586 cz_smu_populate_single_scratch_task(adev,
587 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
588 TASK_TYPE_UCODE_SAVE, true);
589
590 return 0;
591 }
592
593 static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
594 {
595 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
596 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
597
598 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count;
599
600 /* populate ucode */
601 if (adev->firmware.smu_load) {
602 cz_smu_populate_single_ucode_load_task(adev,
603 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
604 cz_smu_populate_single_ucode_load_task(adev,
605 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
606 cz_smu_populate_single_ucode_load_task(adev,
607 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
608 cz_smu_populate_single_ucode_load_task(adev,
609 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
610 if (adev->asic_type == CHIP_STONEY) {
611 cz_smu_populate_single_ucode_load_task(adev,
612 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
613 } else {
614 cz_smu_populate_single_ucode_load_task(adev,
615 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
616 }
617 cz_smu_populate_single_ucode_load_task(adev,
618 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
619 }
620
621 /* populate scratch */
622 cz_smu_populate_single_scratch_task(adev,
623 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
624 TASK_TYPE_UCODE_LOAD, false);
625 cz_smu_populate_single_scratch_task(adev,
626 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
627 TASK_TYPE_UCODE_LOAD, false);
628 cz_smu_populate_single_scratch_task(adev,
629 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
630 TASK_TYPE_UCODE_LOAD, true);
631
632 return 0;
633 }
634
635 static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev)
636 {
637 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
638
639 priv->toc_entry_power_profiling_index = priv->toc_entry_used_count;
640
641 cz_smu_populate_single_scratch_task(adev,
642 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
643 TASK_TYPE_INITIALIZE, true);
644 return 0;
645 }
646
647 static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
648 {
649 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
650
651 priv->toc_entry_initialize_index = priv->toc_entry_used_count;
652
653 if (adev->firmware.smu_load) {
654 cz_smu_populate_single_ucode_load_task(adev,
655 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
656 if (adev->asic_type == CHIP_STONEY) {
657 cz_smu_populate_single_ucode_load_task(adev,
658 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
659 } else {
660 cz_smu_populate_single_ucode_load_task(adev,
661 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
662 }
663 cz_smu_populate_single_ucode_load_task(adev,
664 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
665 cz_smu_populate_single_ucode_load_task(adev,
666 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
667 cz_smu_populate_single_ucode_load_task(adev,
668 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
669 cz_smu_populate_single_ucode_load_task(adev,
670 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
671 if (adev->asic_type == CHIP_STONEY) {
672 cz_smu_populate_single_ucode_load_task(adev,
673 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
674 } else {
675 cz_smu_populate_single_ucode_load_task(adev,
676 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
677 }
678 cz_smu_populate_single_ucode_load_task(adev,
679 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
680 }
681
682 return 0;
683 }
684
685 static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev)
686 {
687 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
688
689 priv->toc_entry_clock_table = priv->toc_entry_used_count;
690
691 cz_smu_populate_single_scratch_task(adev,
692 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
693 TASK_TYPE_INITIALIZE, true);
694
695 return 0;
696 }
697
698 static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev)
699 {
700 int i;
701 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
702 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
703
704 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
705 toc->JobList[i] = (uint8_t)IGNORE_JOB;
706
707 return 0;
708 }
709
710 /*
711 * cz smu uninitialization
712 */
713 int cz_smu_fini(struct amdgpu_device *adev)
714 {
715 amdgpu_bo_unref(&adev->smu.toc_buf);
716 amdgpu_bo_unref(&adev->smu.smu_buf);
717 kfree(adev->smu.priv);
718 adev->smu.priv = NULL;
719 if (adev->firmware.smu_load)
720 amdgpu_ucode_fini_bo(adev);
721
722 return 0;
723 }
724
725 int cz_smu_download_pptable(struct amdgpu_device *adev, void **table)
726 {
727 uint8_t i;
728 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
729
730 for (i = 0; i < priv->scratch_buffer_length; i++)
731 if (priv->scratch_buffer[i].firmware_ID ==
732 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
733 break;
734
735 if (i >= priv->scratch_buffer_length) {
736 dev_err(adev->dev, "Invalid Scratch Type\n");
737 return -EINVAL;
738 }
739
740 *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr;
741
742 /* prepare buffer for pptable */
743 cz_send_msg_to_smc_with_parameter(adev,
744 PPSMC_MSG_SetClkTableAddrHi,
745 priv->scratch_buffer[i].mc_addr_high);
746 cz_send_msg_to_smc_with_parameter(adev,
747 PPSMC_MSG_SetClkTableAddrLo,
748 priv->scratch_buffer[i].mc_addr_low);
749 cz_send_msg_to_smc_with_parameter(adev,
750 PPSMC_MSG_ExecuteJob,
751 priv->toc_entry_clock_table);
752
753 /* actual downloading */
754 cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram);
755
756 return 0;
757 }
758
759 int cz_smu_upload_pptable(struct amdgpu_device *adev)
760 {
761 uint8_t i;
762 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
763
764 for (i = 0; i < priv->scratch_buffer_length; i++)
765 if (priv->scratch_buffer[i].firmware_ID ==
766 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
767 break;
768
769 if (i >= priv->scratch_buffer_length) {
770 dev_err(adev->dev, "Invalid Scratch Type\n");
771 return -EINVAL;
772 }
773
774 /* prepare SMU */
775 cz_send_msg_to_smc_with_parameter(adev,
776 PPSMC_MSG_SetClkTableAddrHi,
777 priv->scratch_buffer[i].mc_addr_high);
778 cz_send_msg_to_smc_with_parameter(adev,
779 PPSMC_MSG_SetClkTableAddrLo,
780 priv->scratch_buffer[i].mc_addr_low);
781 cz_send_msg_to_smc_with_parameter(adev,
782 PPSMC_MSG_ExecuteJob,
783 priv->toc_entry_clock_table);
784
785 /* actual uploading */
786 cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu);
787
788 return 0;
789 }
790
791 /*
792 * cz smumgr functions initialization
793 */
794 static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = {
795 .check_fw_load_finish = cz_smu_check_finished,
796 .request_smu_load_fw = NULL,
797 .request_smu_specific_fw = NULL,
798 };
799
800 /*
801 * cz smu initialization
802 */
803 int cz_smu_init(struct amdgpu_device *adev)
804 {
805 int ret = -EINVAL;
806 uint64_t mc_addr = 0;
807 struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
808 struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
809 void *toc_buf_ptr = NULL;
810 void *smu_buf_ptr = NULL;
811
812 struct cz_smu_private_data *priv =
813 kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL);
814 if (priv == NULL)
815 return -ENOMEM;
816
817 /* allocate firmware buffers */
818 if (adev->firmware.smu_load)
819 amdgpu_ucode_init_bo(adev);
820
821 adev->smu.priv = priv;
822 adev->smu.fw_flags = 0;
823 priv->toc_buffer.data_size = 4096;
824
825 priv->smu_buffer.data_size =
826 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
827 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
828 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
829 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
830 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
831
832 /* prepare toc buffer and smu buffer:
833 * 1. create amdgpu_bo for toc buffer and smu buffer
834 * 2. pin mc address
835 * 3. map kernel virtual address
836 */
837 ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
838 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
839 toc_buf);
840
841 if (ret) {
842 dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
843 return ret;
844 }
845
846 ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
847 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
848 smu_buf);
849
850 if (ret) {
851 dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
852 return ret;
853 }
854
855 /* toc buffer reserve/pin/map */
856 ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
857 if (ret) {
858 amdgpu_bo_unref(&adev->smu.toc_buf);
859 dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret);
860 return ret;
861 }
862
863 ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
864 if (ret) {
865 amdgpu_bo_unreserve(adev->smu.toc_buf);
866 amdgpu_bo_unref(&adev->smu.toc_buf);
867 dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret);
868 return ret;
869 }
870
871 ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
872 if (ret)
873 goto smu_init_failed;
874
875 amdgpu_bo_unreserve(adev->smu.toc_buf);
876
877 priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr);
878 priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr);
879 priv->toc_buffer.kaddr = toc_buf_ptr;
880
881 /* smu buffer reserve/pin/map */
882 ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
883 if (ret) {
884 amdgpu_bo_unref(&adev->smu.smu_buf);
885 dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret);
886 return ret;
887 }
888
889 ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
890 if (ret) {
891 amdgpu_bo_unreserve(adev->smu.smu_buf);
892 amdgpu_bo_unref(&adev->smu.smu_buf);
893 dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret);
894 return ret;
895 }
896
897 ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
898 if (ret)
899 goto smu_init_failed;
900
901 amdgpu_bo_unreserve(adev->smu.smu_buf);
902
903 priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr);
904 priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr);
905 priv->smu_buffer.kaddr = smu_buf_ptr;
906
907 if (adev->firmware.smu_load) {
908 if (cz_smu_populate_single_firmware_entry(adev,
909 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
910 &priv->driver_buffer[priv->driver_buffer_length++]))
911 goto smu_init_failed;
912
913 if (adev->asic_type == CHIP_STONEY) {
914 if (cz_smu_populate_single_firmware_entry(adev,
915 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
916 &priv->driver_buffer[priv->driver_buffer_length++]))
917 goto smu_init_failed;
918 } else {
919 if (cz_smu_populate_single_firmware_entry(adev,
920 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
921 &priv->driver_buffer[priv->driver_buffer_length++]))
922 goto smu_init_failed;
923 }
924 if (cz_smu_populate_single_firmware_entry(adev,
925 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
926 &priv->driver_buffer[priv->driver_buffer_length++]))
927 goto smu_init_failed;
928 if (cz_smu_populate_single_firmware_entry(adev,
929 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
930 &priv->driver_buffer[priv->driver_buffer_length++]))
931 goto smu_init_failed;
932 if (cz_smu_populate_single_firmware_entry(adev,
933 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
934 &priv->driver_buffer[priv->driver_buffer_length++]))
935 goto smu_init_failed;
936 if (cz_smu_populate_single_firmware_entry(adev,
937 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
938 &priv->driver_buffer[priv->driver_buffer_length++]))
939 goto smu_init_failed;
940 if (adev->asic_type == CHIP_STONEY) {
941 if (cz_smu_populate_single_firmware_entry(adev,
942 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
943 &priv->driver_buffer[priv->driver_buffer_length++]))
944 goto smu_init_failed;
945 } else {
946 if (cz_smu_populate_single_firmware_entry(adev,
947 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
948 &priv->driver_buffer[priv->driver_buffer_length++]))
949 goto smu_init_failed;
950 }
951 if (cz_smu_populate_single_firmware_entry(adev,
952 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
953 &priv->driver_buffer[priv->driver_buffer_length++]))
954 goto smu_init_failed;
955 }
956
957 if (cz_smu_populate_single_scratch_entry(adev,
958 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
959 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
960 &priv->scratch_buffer[priv->scratch_buffer_length++]))
961 goto smu_init_failed;
962 if (cz_smu_populate_single_scratch_entry(adev,
963 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
964 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
965 &priv->scratch_buffer[priv->scratch_buffer_length++]))
966 goto smu_init_failed;
967 if (cz_smu_populate_single_scratch_entry(adev,
968 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
969 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
970 &priv->scratch_buffer[priv->scratch_buffer_length++]))
971 goto smu_init_failed;
972 if (cz_smu_populate_single_scratch_entry(adev,
973 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
974 sizeof(struct SMU8_MultimediaPowerLogData),
975 &priv->scratch_buffer[priv->scratch_buffer_length++]))
976 goto smu_init_failed;
977 if (cz_smu_populate_single_scratch_entry(adev,
978 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
979 sizeof(struct SMU8_Fusion_ClkTable),
980 &priv->scratch_buffer[priv->scratch_buffer_length++]))
981 goto smu_init_failed;
982
983 cz_smu_initialize_toc_empty_job_list(adev);
984 cz_smu_construct_toc_for_rlc_aram_save(adev);
985 cz_smu_construct_toc_for_vddgfx_enter(adev);
986 cz_smu_construct_toc_for_vddgfx_exit(adev);
987 cz_smu_construct_toc_for_power_profiling(adev);
988 cz_smu_construct_toc_for_bootup(adev);
989 cz_smu_construct_toc_for_clock_table(adev);
990 /* init the smumgr functions */
991 adev->smu.smumgr_funcs = &cz_smumgr_funcs;
992
993 return 0;
994
995 smu_init_failed:
996 amdgpu_bo_unref(toc_buf);
997 amdgpu_bo_unref(smu_buf);
998
999 return ret;
1000 }
This page took 0.056359 seconds and 6 git commands to generate.