2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/gfp.h>
27 #include "linux/delay.h"
28 #include "cgs_common.h"
29 #include "smu/smu_8_0_d.h"
30 #include "smu/smu_8_0_sh_mask.h"
32 #include "smu8_fusion.h"
33 #include "cz_smumgr.h"
35 #include "smu_ucode_xfer_cz.h"
36 #include "gca/gfx_8_0_d.h"
37 #include "gca/gfx_8_0_sh_mask.h"
40 #define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
42 static enum cz_scratch_entry firmware_list
[] = {
43 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0
,
44 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1
,
45 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE
,
46 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP
,
47 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME
,
48 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1
,
49 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2
,
50 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G
,
53 static int cz_smum_get_argument(struct pp_smumgr
*smumgr
)
55 if (smumgr
== NULL
|| smumgr
->device
== NULL
)
58 return cgs_read_register(smumgr
->device
,
59 mmSMU_MP1_SRBM2P_ARG_0
);
62 static int cz_send_msg_to_smc_async(struct pp_smumgr
*smumgr
,
67 if (smumgr
== NULL
|| smumgr
->device
== NULL
)
70 result
= SMUM_WAIT_FIELD_UNEQUAL(smumgr
,
71 SMU_MP1_SRBM2P_RESP_0
, CONTENT
, 0);
73 printk(KERN_ERR
"[ powerplay ] cz_send_msg_to_smc_async failed\n");
77 cgs_write_register(smumgr
->device
, mmSMU_MP1_SRBM2P_RESP_0
, 0);
78 cgs_write_register(smumgr
->device
, mmSMU_MP1_SRBM2P_MSG_0
, msg
);
83 /* Send a message to the SMC, and wait for its response.*/
84 static int cz_send_msg_to_smc(struct pp_smumgr
*smumgr
, uint16_t msg
)
88 result
= cz_send_msg_to_smc_async(smumgr
, msg
);
92 result
= SMUM_WAIT_FIELD_UNEQUAL(smumgr
,
93 SMU_MP1_SRBM2P_RESP_0
, CONTENT
, 0);
101 static int cz_set_smc_sram_address(struct pp_smumgr
*smumgr
,
102 uint32_t smc_address
, uint32_t limit
)
104 if (smumgr
== NULL
|| smumgr
->device
== NULL
)
107 if (0 != (3 & smc_address
)) {
108 printk(KERN_ERR
"[ powerplay ] SMC address must be 4 byte aligned\n");
112 if (limit
<= (smc_address
+ 3)) {
113 printk(KERN_ERR
"[ powerplay ] SMC address beyond the SMC RAM area\n");
117 cgs_write_register(smumgr
->device
, mmMP0PUB_IND_INDEX_0
,
118 SMN_MP1_SRAM_START_ADDR
+ smc_address
);
123 static int cz_write_smc_sram_dword(struct pp_smumgr
*smumgr
,
124 uint32_t smc_address
, uint32_t value
, uint32_t limit
)
128 if (smumgr
== NULL
|| smumgr
->device
== NULL
)
131 result
= cz_set_smc_sram_address(smumgr
, smc_address
, limit
);
132 cgs_write_register(smumgr
->device
, mmMP0PUB_IND_DATA_0
, value
);
137 static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr
*smumgr
,
138 uint16_t msg
, uint32_t parameter
)
140 if (smumgr
== NULL
|| smumgr
->device
== NULL
)
143 cgs_write_register(smumgr
->device
, mmSMU_MP1_SRBM2P_ARG_0
, parameter
);
145 return cz_send_msg_to_smc(smumgr
, msg
);
148 static int cz_request_smu_load_fw(struct pp_smumgr
*smumgr
)
150 struct cz_smumgr
*cz_smu
= (struct cz_smumgr
*)(smumgr
->backend
);
152 uint32_t smc_address
;
154 if (!smumgr
->reload_fw
) {
155 printk(KERN_INFO
"[ powerplay ] skip reloading...\n");
159 smc_address
= SMU8_FIRMWARE_HEADER_LOCATION
+
160 offsetof(struct SMU8_Firmware_Header
, UcodeLoadStatus
);
162 cz_write_smc_sram_dword(smumgr
, smc_address
, 0, smc_address
+4);
164 cz_send_msg_to_smc_with_parameter(smumgr
,
165 PPSMC_MSG_DriverDramAddrHi
,
166 cz_smu
->toc_buffer
.mc_addr_high
);
168 cz_send_msg_to_smc_with_parameter(smumgr
,
169 PPSMC_MSG_DriverDramAddrLo
,
170 cz_smu
->toc_buffer
.mc_addr_low
);
172 cz_send_msg_to_smc(smumgr
, PPSMC_MSG_InitJobs
);
174 cz_send_msg_to_smc_with_parameter(smumgr
,
175 PPSMC_MSG_ExecuteJob
,
176 cz_smu
->toc_entry_aram
);
177 cz_send_msg_to_smc_with_parameter(smumgr
, PPSMC_MSG_ExecuteJob
,
178 cz_smu
->toc_entry_power_profiling_index
);
180 result
= cz_send_msg_to_smc_with_parameter(smumgr
,
181 PPSMC_MSG_ExecuteJob
,
182 cz_smu
->toc_entry_initialize_index
);
187 static int cz_check_fw_load_finish(struct pp_smumgr
*smumgr
,
191 uint32_t index
= SMN_MP1_SRAM_START_ADDR
+
192 SMU8_FIRMWARE_HEADER_LOCATION
+
193 offsetof(struct SMU8_Firmware_Header
, UcodeLoadStatus
);
195 if (smumgr
== NULL
|| smumgr
->device
== NULL
)
198 return cgs_read_register(smumgr
->device
,
199 mmSMU_MP1_SRBM2P_ARG_0
);
201 cgs_write_register(smumgr
->device
, mmMP0PUB_IND_INDEX
, index
);
203 for (i
= 0; i
< smumgr
->usec_timeout
; i
++) {
205 (cgs_read_register(smumgr
->device
, mmMP0PUB_IND_DATA
) & firmware
))
210 if (i
>= smumgr
->usec_timeout
) {
211 printk(KERN_ERR
"[ powerplay ] SMU check loaded firmware failed.\n");
218 static int cz_load_mec_firmware(struct pp_smumgr
*smumgr
)
223 struct cgs_firmware_info info
= {0};
224 struct cz_smumgr
*cz_smu
;
226 if (smumgr
== NULL
|| smumgr
->device
== NULL
)
229 cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
230 ret
= cgs_get_firmware_info(smumgr
->device
,
231 CGS_UCODE_ID_CP_MEC
, &info
);
236 /* Disable MEC parsing/prefetching */
237 tmp
= cgs_read_register(smumgr
->device
,
239 tmp
= SMUM_SET_FIELD(tmp
, CP_MEC_CNTL
, MEC_ME1_HALT
, 1);
240 tmp
= SMUM_SET_FIELD(tmp
, CP_MEC_CNTL
, MEC_ME2_HALT
, 1);
241 cgs_write_register(smumgr
->device
, mmCP_MEC_CNTL
, tmp
);
243 tmp
= cgs_read_register(smumgr
->device
,
244 mmCP_CPC_IC_BASE_CNTL
);
246 tmp
= SMUM_SET_FIELD(tmp
, CP_CPC_IC_BASE_CNTL
, VMID
, 0);
247 tmp
= SMUM_SET_FIELD(tmp
, CP_CPC_IC_BASE_CNTL
, ATC
, 0);
248 tmp
= SMUM_SET_FIELD(tmp
, CP_CPC_IC_BASE_CNTL
, CACHE_POLICY
, 0);
249 tmp
= SMUM_SET_FIELD(tmp
, CP_CPC_IC_BASE_CNTL
, MTYPE
, 1);
250 cgs_write_register(smumgr
->device
, mmCP_CPC_IC_BASE_CNTL
, tmp
);
252 reg_data
= smu_lower_32_bits(info
.mc_addr
) &
253 SMUM_FIELD_MASK(CP_CPC_IC_BASE_LO
, IC_BASE_LO
);
254 cgs_write_register(smumgr
->device
, mmCP_CPC_IC_BASE_LO
, reg_data
);
256 reg_data
= smu_upper_32_bits(info
.mc_addr
) &
257 SMUM_FIELD_MASK(CP_CPC_IC_BASE_HI
, IC_BASE_HI
);
258 cgs_write_register(smumgr
->device
, mmCP_CPC_IC_BASE_HI
, reg_data
);
263 static int cz_start_smu(struct pp_smumgr
*smumgr
)
266 uint32_t fw_to_check
= UCODE_ID_RLC_G_MASK
|
267 UCODE_ID_SDMA0_MASK
|
268 UCODE_ID_SDMA1_MASK
|
269 UCODE_ID_CP_CE_MASK
|
270 UCODE_ID_CP_ME_MASK
|
271 UCODE_ID_CP_PFP_MASK
|
272 UCODE_ID_CP_MEC_JT1_MASK
|
273 UCODE_ID_CP_MEC_JT2_MASK
;
275 if (smumgr
->chip_id
== CHIP_STONEY
)
276 fw_to_check
&= ~(UCODE_ID_SDMA1_MASK
| UCODE_ID_CP_MEC_JT2_MASK
);
278 cz_request_smu_load_fw(smumgr
);
279 cz_check_fw_load_finish(smumgr
, fw_to_check
);
281 ret
= cz_load_mec_firmware(smumgr
);
283 printk(KERN_ERR
"[ powerplay ] Mec Firmware load failed\n");
288 static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr
*smumgr
,
289 enum cz_scratch_entry firmware_enum
)
293 switch (firmware_enum
) {
294 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0
:
295 ret
= UCODE_ID_SDMA0
;
297 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1
:
298 if (smumgr
->chip_id
== CHIP_STONEY
)
299 ret
= UCODE_ID_SDMA0
;
301 ret
= UCODE_ID_SDMA1
;
303 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE
:
304 ret
= UCODE_ID_CP_CE
;
306 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP
:
307 ret
= UCODE_ID_CP_PFP
;
309 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME
:
310 ret
= UCODE_ID_CP_ME
;
312 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1
:
313 ret
= UCODE_ID_CP_MEC_JT1
;
315 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2
:
316 if (smumgr
->chip_id
== CHIP_STONEY
)
317 ret
= UCODE_ID_CP_MEC_JT1
;
319 ret
= UCODE_ID_CP_MEC_JT2
;
321 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG
:
322 ret
= UCODE_ID_GMCON_RENG
;
324 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G
:
325 ret
= UCODE_ID_RLC_G
;
327 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH
:
328 ret
= UCODE_ID_RLC_SCRATCH
;
330 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM
:
331 ret
= UCODE_ID_RLC_SRM_ARAM
;
333 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM
:
334 ret
= UCODE_ID_RLC_SRM_DRAM
;
336 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM
:
337 ret
= UCODE_ID_DMCU_ERAM
;
339 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM
:
340 ret
= UCODE_ID_DMCU_IRAM
;
342 case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING
:
343 ret
= TASK_ARG_INIT_MM_PWR_LOG
;
345 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT
:
346 case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING
:
347 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS
:
348 case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT
:
349 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START
:
350 case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS
:
351 ret
= TASK_ARG_REG_MMIO
;
353 case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
:
354 ret
= TASK_ARG_INIT_CLK_TABLE
;
361 static enum cgs_ucode_id
cz_convert_fw_type_to_cgs(uint32_t fw_type
)
363 enum cgs_ucode_id result
= CGS_UCODE_ID_MAXIMUM
;
367 result
= CGS_UCODE_ID_SDMA0
;
370 result
= CGS_UCODE_ID_SDMA1
;
373 result
= CGS_UCODE_ID_CP_CE
;
375 case UCODE_ID_CP_PFP
:
376 result
= CGS_UCODE_ID_CP_PFP
;
379 result
= CGS_UCODE_ID_CP_ME
;
381 case UCODE_ID_CP_MEC_JT1
:
382 result
= CGS_UCODE_ID_CP_MEC_JT1
;
384 case UCODE_ID_CP_MEC_JT2
:
385 result
= CGS_UCODE_ID_CP_MEC_JT2
;
388 result
= CGS_UCODE_ID_RLC_G
;
397 static int cz_smu_populate_single_scratch_task(
398 struct pp_smumgr
*smumgr
,
399 enum cz_scratch_entry fw_enum
,
400 uint8_t type
, bool is_last
)
403 struct cz_smumgr
*cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
404 struct TOC
*toc
= (struct TOC
*)cz_smu
->toc_buffer
.kaddr
;
405 struct SMU_Task
*task
= &toc
->tasks
[cz_smu
->toc_entry_used_count
++];
408 task
->arg
= cz_translate_firmware_enum_to_arg(smumgr
, fw_enum
);
409 task
->next
= is_last
? END_OF_TASK_LIST
: cz_smu
->toc_entry_used_count
;
411 for (i
= 0; i
< cz_smu
->scratch_buffer_length
; i
++)
412 if (cz_smu
->scratch_buffer
[i
].firmware_ID
== fw_enum
)
415 if (i
>= cz_smu
->scratch_buffer_length
) {
416 printk(KERN_ERR
"[ powerplay ] Invalid Firmware Type\n");
420 task
->addr
.low
= cz_smu
->scratch_buffer
[i
].mc_addr_low
;
421 task
->addr
.high
= cz_smu
->scratch_buffer
[i
].mc_addr_high
;
422 task
->size_bytes
= cz_smu
->scratch_buffer
[i
].data_size
;
424 if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS
== fw_enum
) {
425 struct cz_ih_meta_data
*pIHReg_restore
=
426 (struct cz_ih_meta_data
*)cz_smu
->scratch_buffer
[i
].kaddr
;
427 pIHReg_restore
->command
=
428 METADATA_CMD_MODE0
| METADATA_PERFORM_ON_LOAD
;
434 static int cz_smu_populate_single_ucode_load_task(
435 struct pp_smumgr
*smumgr
,
436 enum cz_scratch_entry fw_enum
,
440 struct cz_smumgr
*cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
441 struct TOC
*toc
= (struct TOC
*)cz_smu
->toc_buffer
.kaddr
;
442 struct SMU_Task
*task
= &toc
->tasks
[cz_smu
->toc_entry_used_count
++];
444 task
->type
= TASK_TYPE_UCODE_LOAD
;
445 task
->arg
= cz_translate_firmware_enum_to_arg(smumgr
, fw_enum
);
446 task
->next
= is_last
? END_OF_TASK_LIST
: cz_smu
->toc_entry_used_count
;
448 for (i
= 0; i
< cz_smu
->driver_buffer_length
; i
++)
449 if (cz_smu
->driver_buffer
[i
].firmware_ID
== fw_enum
)
452 if (i
>= cz_smu
->driver_buffer_length
) {
453 printk(KERN_ERR
"[ powerplay ] Invalid Firmware Type\n");
457 task
->addr
.low
= cz_smu
->driver_buffer
[i
].mc_addr_low
;
458 task
->addr
.high
= cz_smu
->driver_buffer
[i
].mc_addr_high
;
459 task
->size_bytes
= cz_smu
->driver_buffer
[i
].data_size
;
464 static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_smumgr
*smumgr
)
466 struct cz_smumgr
*cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
468 cz_smu
->toc_entry_aram
= cz_smu
->toc_entry_used_count
;
469 cz_smu_populate_single_scratch_task(smumgr
,
470 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM
,
471 TASK_TYPE_UCODE_SAVE
, true);
476 static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr
*smumgr
)
479 struct cz_smumgr
*cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
480 struct TOC
*toc
= (struct TOC
*)cz_smu
->toc_buffer
.kaddr
;
482 for (i
= 0; i
< NUM_JOBLIST_ENTRIES
; i
++)
483 toc
->JobList
[i
] = (uint8_t)IGNORE_JOB
;
488 static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr
*smumgr
)
490 struct cz_smumgr
*cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
491 struct TOC
*toc
= (struct TOC
*)cz_smu
->toc_buffer
.kaddr
;
493 toc
->JobList
[JOB_GFX_SAVE
] = (uint8_t)cz_smu
->toc_entry_used_count
;
494 cz_smu_populate_single_scratch_task(smumgr
,
495 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH
,
496 TASK_TYPE_UCODE_SAVE
, false);
498 cz_smu_populate_single_scratch_task(smumgr
,
499 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM
,
500 TASK_TYPE_UCODE_SAVE
, true);
506 static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr
*smumgr
)
508 struct cz_smumgr
*cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
509 struct TOC
*toc
= (struct TOC
*)cz_smu
->toc_buffer
.kaddr
;
511 toc
->JobList
[JOB_GFX_RESTORE
] = (uint8_t)cz_smu
->toc_entry_used_count
;
513 cz_smu_populate_single_ucode_load_task(smumgr
,
514 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE
, false);
515 cz_smu_populate_single_ucode_load_task(smumgr
,
516 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP
, false);
517 cz_smu_populate_single_ucode_load_task(smumgr
,
518 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME
, false);
519 cz_smu_populate_single_ucode_load_task(smumgr
,
520 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1
, false);
522 if (smumgr
->chip_id
== CHIP_STONEY
)
523 cz_smu_populate_single_ucode_load_task(smumgr
,
524 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1
, false);
526 cz_smu_populate_single_ucode_load_task(smumgr
,
527 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2
, false);
529 cz_smu_populate_single_ucode_load_task(smumgr
,
530 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G
, false);
532 /* populate scratch */
533 cz_smu_populate_single_scratch_task(smumgr
,
534 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH
,
535 TASK_TYPE_UCODE_LOAD
, false);
537 cz_smu_populate_single_scratch_task(smumgr
,
538 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM
,
539 TASK_TYPE_UCODE_LOAD
, false);
541 cz_smu_populate_single_scratch_task(smumgr
,
542 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM
,
543 TASK_TYPE_UCODE_LOAD
, true);
548 static int cz_smu_construct_toc_for_power_profiling(
549 struct pp_smumgr
*smumgr
)
551 struct cz_smumgr
*cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
553 cz_smu
->toc_entry_power_profiling_index
= cz_smu
->toc_entry_used_count
;
555 cz_smu_populate_single_scratch_task(smumgr
,
556 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING
,
557 TASK_TYPE_INITIALIZE
, true);
561 static int cz_smu_construct_toc_for_bootup(struct pp_smumgr
*smumgr
)
563 struct cz_smumgr
*cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
565 cz_smu
->toc_entry_initialize_index
= cz_smu
->toc_entry_used_count
;
567 cz_smu_populate_single_ucode_load_task(smumgr
,
568 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0
, false);
569 if (smumgr
->chip_id
== CHIP_STONEY
)
570 cz_smu_populate_single_ucode_load_task(smumgr
,
571 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0
, false);
573 cz_smu_populate_single_ucode_load_task(smumgr
,
574 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1
, false);
575 cz_smu_populate_single_ucode_load_task(smumgr
,
576 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE
, false);
577 cz_smu_populate_single_ucode_load_task(smumgr
,
578 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP
, false);
579 cz_smu_populate_single_ucode_load_task(smumgr
,
580 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME
, false);
581 cz_smu_populate_single_ucode_load_task(smumgr
,
582 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1
, false);
583 if (smumgr
->chip_id
== CHIP_STONEY
)
584 cz_smu_populate_single_ucode_load_task(smumgr
,
585 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1
, false);
587 cz_smu_populate_single_ucode_load_task(smumgr
,
588 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2
, false);
589 cz_smu_populate_single_ucode_load_task(smumgr
,
590 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G
, true);
595 static int cz_smu_construct_toc_for_clock_table(struct pp_smumgr
*smumgr
)
597 struct cz_smumgr
*cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
599 cz_smu
->toc_entry_clock_table
= cz_smu
->toc_entry_used_count
;
601 cz_smu_populate_single_scratch_task(smumgr
,
602 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
,
603 TASK_TYPE_INITIALIZE
, true);
608 static int cz_smu_construct_toc(struct pp_smumgr
*smumgr
)
610 struct cz_smumgr
*cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
612 cz_smu
->toc_entry_used_count
= 0;
614 cz_smu_initialize_toc_empty_job_list(smumgr
);
616 cz_smu_construct_toc_for_rlc_aram_save(smumgr
);
618 cz_smu_construct_toc_for_vddgfx_enter(smumgr
);
620 cz_smu_construct_toc_for_vddgfx_exit(smumgr
);
622 cz_smu_construct_toc_for_power_profiling(smumgr
);
624 cz_smu_construct_toc_for_bootup(smumgr
);
626 cz_smu_construct_toc_for_clock_table(smumgr
);
631 static int cz_smu_populate_firmware_entries(struct pp_smumgr
*smumgr
)
633 struct cz_smumgr
*cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
634 uint32_t firmware_type
;
637 enum cgs_ucode_id ucode_id
;
638 struct cgs_firmware_info info
= {0};
640 cz_smu
->driver_buffer_length
= 0;
642 for (i
= 0; i
< sizeof(firmware_list
)/sizeof(*firmware_list
); i
++) {
644 firmware_type
= cz_translate_firmware_enum_to_arg(smumgr
,
647 ucode_id
= cz_convert_fw_type_to_cgs(firmware_type
);
649 ret
= cgs_get_firmware_info(smumgr
->device
,
653 cz_smu
->driver_buffer
[i
].mc_addr_high
=
654 smu_upper_32_bits(info
.mc_addr
);
656 cz_smu
->driver_buffer
[i
].mc_addr_low
=
657 smu_lower_32_bits(info
.mc_addr
);
659 cz_smu
->driver_buffer
[i
].data_size
= info
.image_size
;
661 cz_smu
->driver_buffer
[i
].firmware_ID
= firmware_list
[i
];
662 cz_smu
->driver_buffer_length
++;
669 static int cz_smu_populate_single_scratch_entry(
670 struct pp_smumgr
*smumgr
,
671 enum cz_scratch_entry scratch_type
,
672 uint32_t ulsize_byte
,
673 struct cz_buffer_entry
*entry
)
675 struct cz_smumgr
*cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
677 ((long long)(cz_smu
->smu_buffer
.mc_addr_high
) << 32)
678 | cz_smu
->smu_buffer
.mc_addr_low
;
680 uint32_t ulsize_aligned
= SIZE_ALIGN_32(ulsize_byte
);
682 mc_addr
+= cz_smu
->smu_buffer_used_bytes
;
684 entry
->data_size
= ulsize_byte
;
685 entry
->kaddr
= (char *) cz_smu
->smu_buffer
.kaddr
+
686 cz_smu
->smu_buffer_used_bytes
;
687 entry
->mc_addr_low
= smu_lower_32_bits(mc_addr
);
688 entry
->mc_addr_high
= smu_upper_32_bits(mc_addr
);
689 entry
->firmware_ID
= scratch_type
;
691 cz_smu
->smu_buffer_used_bytes
+= ulsize_aligned
;
696 static int cz_download_pptable_settings(struct pp_smumgr
*smumgr
, void **table
)
698 struct cz_smumgr
*cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
701 for (i
= 0; i
< cz_smu
->scratch_buffer_length
; i
++) {
702 if (cz_smu
->scratch_buffer
[i
].firmware_ID
703 == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
)
707 *table
= (struct SMU8_Fusion_ClkTable
*)cz_smu
->scratch_buffer
[i
].kaddr
;
709 cz_send_msg_to_smc_with_parameter(smumgr
,
710 PPSMC_MSG_SetClkTableAddrHi
,
711 cz_smu
->scratch_buffer
[i
].mc_addr_high
);
713 cz_send_msg_to_smc_with_parameter(smumgr
,
714 PPSMC_MSG_SetClkTableAddrLo
,
715 cz_smu
->scratch_buffer
[i
].mc_addr_low
);
717 cz_send_msg_to_smc_with_parameter(smumgr
, PPSMC_MSG_ExecuteJob
,
718 cz_smu
->toc_entry_clock_table
);
720 cz_send_msg_to_smc(smumgr
, PPSMC_MSG_ClkTableXferToDram
);
725 static int cz_upload_pptable_settings(struct pp_smumgr
*smumgr
)
727 struct cz_smumgr
*cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
730 for (i
= 0; i
< cz_smu
->scratch_buffer_length
; i
++) {
731 if (cz_smu
->scratch_buffer
[i
].firmware_ID
732 == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
)
736 cz_send_msg_to_smc_with_parameter(smumgr
,
737 PPSMC_MSG_SetClkTableAddrHi
,
738 cz_smu
->scratch_buffer
[i
].mc_addr_high
);
740 cz_send_msg_to_smc_with_parameter(smumgr
,
741 PPSMC_MSG_SetClkTableAddrLo
,
742 cz_smu
->scratch_buffer
[i
].mc_addr_low
);
744 cz_send_msg_to_smc_with_parameter(smumgr
, PPSMC_MSG_ExecuteJob
,
745 cz_smu
->toc_entry_clock_table
);
747 cz_send_msg_to_smc(smumgr
, PPSMC_MSG_ClkTableXferToSmu
);
752 static int cz_smu_init(struct pp_smumgr
*smumgr
)
754 struct cz_smumgr
*cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
755 uint64_t mc_addr
= 0;
758 cz_smu
->toc_buffer
.data_size
= 4096;
759 cz_smu
->smu_buffer
.data_size
=
760 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE
, 32) +
761 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE
, 32) +
762 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE
, 32) +
763 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData
), 32) +
764 ALIGN(sizeof(struct SMU8_Fusion_ClkTable
), 32);
766 ret
= smu_allocate_memory(smumgr
->device
,
767 cz_smu
->toc_buffer
.data_size
,
768 CGS_GPU_MEM_TYPE__GART_CACHEABLE
,
771 &cz_smu
->toc_buffer
.kaddr
,
772 &cz_smu
->toc_buffer
.handle
);
776 cz_smu
->toc_buffer
.mc_addr_high
= smu_upper_32_bits(mc_addr
);
777 cz_smu
->toc_buffer
.mc_addr_low
= smu_lower_32_bits(mc_addr
);
779 ret
= smu_allocate_memory(smumgr
->device
,
780 cz_smu
->smu_buffer
.data_size
,
781 CGS_GPU_MEM_TYPE__GART_CACHEABLE
,
784 &cz_smu
->smu_buffer
.kaddr
,
785 &cz_smu
->smu_buffer
.handle
);
789 cz_smu
->smu_buffer
.mc_addr_high
= smu_upper_32_bits(mc_addr
);
790 cz_smu
->smu_buffer
.mc_addr_low
= smu_lower_32_bits(mc_addr
);
792 cz_smu_populate_firmware_entries(smumgr
);
793 if (0 != cz_smu_populate_single_scratch_entry(smumgr
,
794 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH
,
795 UCODE_ID_RLC_SCRATCH_SIZE_BYTE
,
796 &cz_smu
->scratch_buffer
[cz_smu
->scratch_buffer_length
++])) {
797 printk(KERN_ERR
"[ powerplay ] Error when Populate Firmware Entry.\n");
801 if (0 != cz_smu_populate_single_scratch_entry(smumgr
,
802 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM
,
803 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE
,
804 &cz_smu
->scratch_buffer
[cz_smu
->scratch_buffer_length
++])) {
805 printk(KERN_ERR
"[ powerplay ] Error when Populate Firmware Entry.\n");
808 if (0 != cz_smu_populate_single_scratch_entry(smumgr
,
809 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM
,
810 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE
,
811 &cz_smu
->scratch_buffer
[cz_smu
->scratch_buffer_length
++])) {
812 printk(KERN_ERR
"[ powerplay ] Error when Populate Firmware Entry.\n");
816 if (0 != cz_smu_populate_single_scratch_entry(smumgr
,
817 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING
,
818 sizeof(struct SMU8_MultimediaPowerLogData
),
819 &cz_smu
->scratch_buffer
[cz_smu
->scratch_buffer_length
++])) {
820 printk(KERN_ERR
"[ powerplay ] Error when Populate Firmware Entry.\n");
824 if (0 != cz_smu_populate_single_scratch_entry(smumgr
,
825 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE
,
826 sizeof(struct SMU8_Fusion_ClkTable
),
827 &cz_smu
->scratch_buffer
[cz_smu
->scratch_buffer_length
++])) {
828 printk(KERN_ERR
"[ powerplay ] Error when Populate Firmware Entry.\n");
831 cz_smu_construct_toc(smumgr
);
836 static int cz_smu_fini(struct pp_smumgr
*smumgr
)
838 struct cz_smumgr
*cz_smu
;
840 if (smumgr
== NULL
|| smumgr
->device
== NULL
)
843 cz_smu
= (struct cz_smumgr
*)smumgr
->backend
;
845 cgs_free_gpu_mem(smumgr
->device
,
846 cz_smu
->toc_buffer
.handle
);
847 cgs_free_gpu_mem(smumgr
->device
,
848 cz_smu
->smu_buffer
.handle
);
856 static const struct pp_smumgr_func cz_smu_funcs
= {
857 .smu_init
= cz_smu_init
,
858 .smu_fini
= cz_smu_fini
,
859 .start_smu
= cz_start_smu
,
860 .check_fw_load_finish
= cz_check_fw_load_finish
,
861 .request_smu_load_fw
= NULL
,
862 .request_smu_load_specific_fw
= NULL
,
863 .get_argument
= cz_smum_get_argument
,
864 .send_msg_to_smc
= cz_send_msg_to_smc
,
865 .send_msg_to_smc_with_parameter
= cz_send_msg_to_smc_with_parameter
,
866 .download_pptable_settings
= cz_download_pptable_settings
,
867 .upload_pptable_settings
= cz_upload_pptable_settings
,
870 int cz_smum_init(struct pp_smumgr
*smumgr
)
872 struct cz_smumgr
*cz_smu
;
874 cz_smu
= kzalloc(sizeof(struct cz_smumgr
), GFP_KERNEL
);
878 smumgr
->backend
= cz_smu
;
879 smumgr
->smumgr_funcs
= &cz_smu_funcs
;