2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "smu_ucode_xfer_vi.h"
28 #include "ppatomctrl.h"
29 #include "cgs_common.h"
30 #include "smu7_ppsmc.h"
31 #include "smu7_smumgr.h"
32 #include "smu7_common.h"
34 #include "polaris10_pwrvirus.h"
36 #define SMU7_SMC_SIZE 0x20000
38 static int smu7_set_smc_sram_address(struct pp_hwmgr
*hwmgr
, uint32_t smc_addr
, uint32_t limit
)
40 PP_ASSERT_WITH_CODE((0 == (3 & smc_addr
)), "SMC address must be 4 byte aligned.", return -EINVAL
);
41 PP_ASSERT_WITH_CODE((limit
> (smc_addr
+ 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL
);
43 cgs_write_register(hwmgr
->device
, mmSMC_IND_INDEX_11
, smc_addr
);
44 PHM_WRITE_FIELD(hwmgr
->device
, SMC_IND_ACCESS_CNTL
, AUTO_INCREMENT_IND_11
, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
49 int smu7_copy_bytes_from_smc(struct pp_hwmgr
*hwmgr
, uint32_t smc_start_address
, uint32_t *dest
, uint32_t byte_count
, uint32_t limit
)
54 uint8_t i
, data_byte
[4] = {0};
55 uint32_t *pdata
= (uint32_t *)&data_byte
;
57 PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address
)), "SMC address must be 4 byte aligned.", return -EINVAL
);
58 PP_ASSERT_WITH_CODE((limit
> (smc_start_address
+ byte_count
)), "SMC address is beyond the SMC RAM area.", return -EINVAL
);
60 addr
= smc_start_address
;
62 while (byte_count
>= 4) {
63 smu7_read_smc_sram_dword(hwmgr
, addr
, &data
, limit
);
65 *dest
= PP_SMC_TO_HOST_UL(data
);
73 smu7_read_smc_sram_dword(hwmgr
, addr
, &data
, limit
);
74 *pdata
= PP_SMC_TO_HOST_UL(data
);
75 /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
76 dest_byte
= (uint8_t *)dest
;
77 for (i
= 0; i
< byte_count
; i
++)
78 dest_byte
[i
] = data_byte
[i
];
85 int smu7_copy_bytes_to_smc(struct pp_hwmgr
*hwmgr
, uint32_t smc_start_address
,
86 const uint8_t *src
, uint32_t byte_count
, uint32_t limit
)
90 uint32_t original_data
;
94 PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address
)), "SMC address must be 4 byte aligned.", return -EINVAL
);
95 PP_ASSERT_WITH_CODE((limit
> (smc_start_address
+ byte_count
)), "SMC address is beyond the SMC RAM area.", return -EINVAL
);
97 addr
= smc_start_address
;
99 while (byte_count
>= 4) {
100 /* Bytes are written into the SMC addres space with the MSB first. */
101 data
= src
[0] * 0x1000000 + src
[1] * 0x10000 + src
[2] * 0x100 + src
[3];
103 result
= smu7_set_smc_sram_address(hwmgr
, addr
, limit
);
108 cgs_write_register(hwmgr
->device
, mmSMC_IND_DATA_11
, data
);
115 if (0 != byte_count
) {
119 result
= smu7_set_smc_sram_address(hwmgr
, addr
, limit
);
125 original_data
= cgs_read_register(hwmgr
->device
, mmSMC_IND_DATA_11
);
127 extra_shift
= 8 * (4 - byte_count
);
129 while (byte_count
> 0) {
130 /* Bytes are written into the SMC addres space with the MSB first. */
131 data
= (0x100 * data
) + *src
++;
135 data
<<= extra_shift
;
137 data
|= (original_data
& ~((~0UL) << extra_shift
));
139 result
= smu7_set_smc_sram_address(hwmgr
, addr
, limit
);
144 cgs_write_register(hwmgr
->device
, mmSMC_IND_DATA_11
, data
);
151 int smu7_program_jump_on_start(struct pp_hwmgr
*hwmgr
)
153 static const unsigned char data
[4] = { 0xE0, 0x00, 0x80, 0x40 };
155 smu7_copy_bytes_to_smc(hwmgr
, 0x0, data
, 4, sizeof(data
)+1);
160 bool smu7_is_smc_ram_running(struct pp_hwmgr
*hwmgr
)
162 return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr
->device
, CGS_IND_REG__SMC
, SMC_SYSCON_CLOCK_CNTL_0
, ck_disable
))
163 && (0x20100 <= cgs_read_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
, ixSMC_PC_C
)));
166 int smu7_send_msg_to_smc(struct pp_hwmgr
*hwmgr
, uint16_t msg
)
170 if (!smu7_is_smc_ram_running(hwmgr
))
174 PHM_WAIT_FIELD_UNEQUAL(hwmgr
, SMC_RESP_0
, SMC_RESP
, 0);
176 ret
= PHM_READ_FIELD(hwmgr
->device
, SMC_RESP_0
, SMC_RESP
);
179 pr_info("\n failed to send pre message %x ret is %d \n", msg
, ret
);
181 cgs_write_register(hwmgr
->device
, mmSMC_MESSAGE_0
, msg
);
183 PHM_WAIT_FIELD_UNEQUAL(hwmgr
, SMC_RESP_0
, SMC_RESP
, 0);
185 ret
= PHM_READ_FIELD(hwmgr
->device
, SMC_RESP_0
, SMC_RESP
);
188 pr_info("\n failed to send message %x ret is %d \n", msg
, ret
);
193 int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr
*hwmgr
, uint16_t msg
)
195 cgs_write_register(hwmgr
->device
, mmSMC_MESSAGE_0
, msg
);
200 int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr
*hwmgr
, uint16_t msg
, uint32_t parameter
)
202 if (!smu7_is_smc_ram_running(hwmgr
)) {
206 PHM_WAIT_FIELD_UNEQUAL(hwmgr
, SMC_RESP_0
, SMC_RESP
, 0);
208 cgs_write_register(hwmgr
->device
, mmSMC_MSG_ARG_0
, parameter
);
210 return smu7_send_msg_to_smc(hwmgr
, msg
);
213 int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr
*hwmgr
, uint16_t msg
, uint32_t parameter
)
215 cgs_write_register(hwmgr
->device
, mmSMC_MSG_ARG_0
, parameter
);
217 return smu7_send_msg_to_smc_without_waiting(hwmgr
, msg
);
220 int smu7_send_msg_to_smc_offset(struct pp_hwmgr
*hwmgr
)
222 cgs_write_register(hwmgr
->device
, mmSMC_MSG_ARG_0
, 0x20000);
224 cgs_write_register(hwmgr
->device
, mmSMC_MESSAGE_0
, PPSMC_MSG_Test
);
226 PHM_WAIT_FIELD_UNEQUAL(hwmgr
, SMC_RESP_0
, SMC_RESP
, 0);
228 if (1 != PHM_READ_FIELD(hwmgr
->device
, SMC_RESP_0
, SMC_RESP
))
229 pr_info("Failed to send Message.\n");
234 int smu7_wait_for_smc_inactive(struct pp_hwmgr
*hwmgr
)
236 if (!smu7_is_smc_ram_running(hwmgr
))
239 PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr
, SMC_IND
, SMC_SYSCON_CLOCK_CNTL_0
, cken
, 0);
244 enum cgs_ucode_id
smu7_convert_fw_type_to_cgs(uint32_t fw_type
)
246 enum cgs_ucode_id result
= CGS_UCODE_ID_MAXIMUM
;
250 result
= CGS_UCODE_ID_SMU
;
252 case UCODE_ID_SMU_SK
:
253 result
= CGS_UCODE_ID_SMU_SK
;
256 result
= CGS_UCODE_ID_SDMA0
;
259 result
= CGS_UCODE_ID_SDMA1
;
262 result
= CGS_UCODE_ID_CP_CE
;
264 case UCODE_ID_CP_PFP
:
265 result
= CGS_UCODE_ID_CP_PFP
;
268 result
= CGS_UCODE_ID_CP_ME
;
270 case UCODE_ID_CP_MEC
:
271 result
= CGS_UCODE_ID_CP_MEC
;
273 case UCODE_ID_CP_MEC_JT1
:
274 result
= CGS_UCODE_ID_CP_MEC_JT1
;
276 case UCODE_ID_CP_MEC_JT2
:
277 result
= CGS_UCODE_ID_CP_MEC_JT2
;
280 result
= CGS_UCODE_ID_RLC_G
;
282 case UCODE_ID_MEC_STORAGE
:
283 result
= CGS_UCODE_ID_STORAGE
;
293 int smu7_read_smc_sram_dword(struct pp_hwmgr
*hwmgr
, uint32_t smc_addr
, uint32_t *value
, uint32_t limit
)
297 result
= smu7_set_smc_sram_address(hwmgr
, smc_addr
, limit
);
302 *value
= cgs_read_register(hwmgr
->device
, mmSMC_IND_DATA_11
);
306 int smu7_write_smc_sram_dword(struct pp_hwmgr
*hwmgr
, uint32_t smc_addr
, uint32_t value
, uint32_t limit
)
310 result
= smu7_set_smc_sram_address(hwmgr
, smc_addr
, limit
);
315 cgs_write_register(hwmgr
->device
, mmSMC_IND_DATA_11
, value
);
320 /* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
322 static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type
)
328 result
= UCODE_ID_SDMA0_MASK
;
331 result
= UCODE_ID_SDMA1_MASK
;
334 result
= UCODE_ID_CP_CE_MASK
;
336 case UCODE_ID_CP_PFP
:
337 result
= UCODE_ID_CP_PFP_MASK
;
340 result
= UCODE_ID_CP_ME_MASK
;
342 case UCODE_ID_CP_MEC
:
343 case UCODE_ID_CP_MEC_JT1
:
344 case UCODE_ID_CP_MEC_JT2
:
345 result
= UCODE_ID_CP_MEC_MASK
;
348 result
= UCODE_ID_RLC_G_MASK
;
351 pr_info("UCode type is out of range! \n");
358 static int smu7_populate_single_firmware_entry(struct pp_hwmgr
*hwmgr
,
360 struct SMU_Entry
*entry
)
363 struct cgs_firmware_info info
= {0};
365 result
= cgs_get_firmware_info(hwmgr
->device
,
366 smu7_convert_fw_type_to_cgs(fw_type
),
370 entry
->version
= info
.fw_version
;
371 entry
->id
= (uint16_t)fw_type
;
372 entry
->image_addr_high
= smu_upper_32_bits(info
.mc_addr
);
373 entry
->image_addr_low
= smu_lower_32_bits(info
.mc_addr
);
374 entry
->meta_data_addr_high
= 0;
375 entry
->meta_data_addr_low
= 0;
377 /* digest need be excluded out */
378 if (cgs_is_virtualization_enabled(hwmgr
->device
))
379 info
.image_size
-= 20;
380 entry
->data_size_byte
= info
.image_size
;
381 entry
->num_register_entries
= 0;
384 if ((fw_type
== UCODE_ID_RLC_G
)
385 || (fw_type
== UCODE_ID_CP_MEC
))
393 int smu7_request_smu_load_fw(struct pp_hwmgr
*hwmgr
)
395 struct smu7_smumgr
*smu_data
= (struct smu7_smumgr
*)(hwmgr
->smu_backend
);
398 struct SMU_DRAMData_TOC
*toc
;
400 if (!hwmgr
->reload_fw
) {
401 pr_info("skip reloading...\n");
405 if (smu_data
->soft_regs_start
)
406 cgs_write_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
,
407 smu_data
->soft_regs_start
+ smum_get_offsetof(hwmgr
,
408 SMU_SoftRegisters
, UcodeLoadStatus
),
411 if (hwmgr
->chip_id
> CHIP_TOPAZ
) { /* add support for Topaz */
412 if (!cgs_is_virtualization_enabled(hwmgr
->device
)) {
413 smu7_send_msg_to_smc_with_parameter(hwmgr
,
414 PPSMC_MSG_SMU_DRAM_ADDR_HI
,
415 smu_data
->smu_buffer
.mc_addr_high
);
416 smu7_send_msg_to_smc_with_parameter(hwmgr
,
417 PPSMC_MSG_SMU_DRAM_ADDR_LO
,
418 smu_data
->smu_buffer
.mc_addr_low
);
420 fw_to_load
= UCODE_ID_RLC_G_MASK
421 + UCODE_ID_SDMA0_MASK
422 + UCODE_ID_SDMA1_MASK
423 + UCODE_ID_CP_CE_MASK
424 + UCODE_ID_CP_ME_MASK
425 + UCODE_ID_CP_PFP_MASK
426 + UCODE_ID_CP_MEC_MASK
;
428 fw_to_load
= UCODE_ID_RLC_G_MASK
429 + UCODE_ID_SDMA0_MASK
430 + UCODE_ID_SDMA1_MASK
431 + UCODE_ID_CP_CE_MASK
432 + UCODE_ID_CP_ME_MASK
433 + UCODE_ID_CP_PFP_MASK
434 + UCODE_ID_CP_MEC_MASK
435 + UCODE_ID_CP_MEC_JT1_MASK
436 + UCODE_ID_CP_MEC_JT2_MASK
;
439 toc
= (struct SMU_DRAMData_TOC
*)smu_data
->header
;
440 toc
->num_entries
= 0;
441 toc
->structure_version
= 1;
443 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr
,
444 UCODE_ID_RLC_G
, &toc
->entry
[toc
->num_entries
++]),
445 "Failed to Get Firmware Entry.", return -EINVAL
);
446 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr
,
447 UCODE_ID_CP_CE
, &toc
->entry
[toc
->num_entries
++]),
448 "Failed to Get Firmware Entry.", return -EINVAL
);
449 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr
,
450 UCODE_ID_CP_PFP
, &toc
->entry
[toc
->num_entries
++]),
451 "Failed to Get Firmware Entry.", return -EINVAL
);
452 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr
,
453 UCODE_ID_CP_ME
, &toc
->entry
[toc
->num_entries
++]),
454 "Failed to Get Firmware Entry.", return -EINVAL
);
455 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr
,
456 UCODE_ID_CP_MEC
, &toc
->entry
[toc
->num_entries
++]),
457 "Failed to Get Firmware Entry.", return -EINVAL
);
458 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr
,
459 UCODE_ID_CP_MEC_JT1
, &toc
->entry
[toc
->num_entries
++]),
460 "Failed to Get Firmware Entry.", return -EINVAL
);
461 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr
,
462 UCODE_ID_CP_MEC_JT2
, &toc
->entry
[toc
->num_entries
++]),
463 "Failed to Get Firmware Entry.", return -EINVAL
);
464 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr
,
465 UCODE_ID_SDMA0
, &toc
->entry
[toc
->num_entries
++]),
466 "Failed to Get Firmware Entry.", return -EINVAL
);
467 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr
,
468 UCODE_ID_SDMA1
, &toc
->entry
[toc
->num_entries
++]),
469 "Failed to Get Firmware Entry.", return -EINVAL
);
470 if (cgs_is_virtualization_enabled(hwmgr
->device
))
471 PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr
,
472 UCODE_ID_MEC_STORAGE
, &toc
->entry
[toc
->num_entries
++]),
473 "Failed to Get Firmware Entry.", return -EINVAL
);
475 smu7_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_DRV_DRAM_ADDR_HI
, smu_data
->header_buffer
.mc_addr_high
);
476 smu7_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_DRV_DRAM_ADDR_LO
, smu_data
->header_buffer
.mc_addr_low
);
478 if (smu7_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_LoadUcodes
, fw_to_load
))
479 pr_err("Fail to Request SMU Load uCode");
484 /* Check if the FW has been loaded, SMU will not return if loading has not finished. */
485 int smu7_check_fw_load_finish(struct pp_hwmgr
*hwmgr
, uint32_t fw_type
)
487 struct smu7_smumgr
*smu_data
= (struct smu7_smumgr
*)(hwmgr
->smu_backend
);
488 uint32_t fw_mask
= smu7_get_mask_for_firmware_type(fw_type
);
491 ret
= phm_wait_on_indirect_register(hwmgr
, mmSMC_IND_INDEX_11
,
492 smu_data
->soft_regs_start
+ smum_get_offsetof(hwmgr
,
493 SMU_SoftRegisters
, UcodeLoadStatus
),
498 int smu7_reload_firmware(struct pp_hwmgr
*hwmgr
)
500 return hwmgr
->smumgr_funcs
->start_smu(hwmgr
);
503 static int smu7_upload_smc_firmware_data(struct pp_hwmgr
*hwmgr
, uint32_t length
, uint32_t *src
, uint32_t limit
)
505 uint32_t byte_count
= length
;
507 PP_ASSERT_WITH_CODE((limit
>= byte_count
), "SMC address is beyond the SMC RAM area.", return -EINVAL
);
509 cgs_write_register(hwmgr
->device
, mmSMC_IND_INDEX_11
, 0x20000);
510 PHM_WRITE_FIELD(hwmgr
->device
, SMC_IND_ACCESS_CNTL
, AUTO_INCREMENT_IND_11
, 1);
512 for (; byte_count
>= 4; byte_count
-= 4)
513 cgs_write_register(hwmgr
->device
, mmSMC_IND_DATA_11
, *src
++);
515 PHM_WRITE_FIELD(hwmgr
->device
, SMC_IND_ACCESS_CNTL
, AUTO_INCREMENT_IND_11
, 0);
517 PP_ASSERT_WITH_CODE((0 == byte_count
), "SMC size must be divisible by 4.", return -EINVAL
);
523 int smu7_upload_smu_firmware_image(struct pp_hwmgr
*hwmgr
)
526 struct smu7_smumgr
*smu_data
= (struct smu7_smumgr
*)(hwmgr
->smu_backend
);
528 struct cgs_firmware_info info
= {0};
530 if (smu_data
->security_hard_key
== 1)
531 cgs_get_firmware_info(hwmgr
->device
,
532 smu7_convert_fw_type_to_cgs(UCODE_ID_SMU
), &info
);
534 cgs_get_firmware_info(hwmgr
->device
,
535 smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK
), &info
);
537 hwmgr
->is_kicker
= info
.is_kicker
;
539 result
= smu7_upload_smc_firmware_data(hwmgr
, info
.image_size
, (uint32_t *)info
.kptr
, SMU7_SMC_SIZE
);
544 static void execute_pwr_table(struct pp_hwmgr
*hwmgr
, const PWR_Command_Table
*pvirus
, int size
)
549 for (i
= 0; i
< size
; i
++) {
552 if (reg
!= 0xffffffff)
553 cgs_write_register(hwmgr
->device
, reg
, data
);
560 static void execute_pwr_dfy_table(struct pp_hwmgr
*hwmgr
, const PWR_DFY_Section
*section
)
564 cgs_write_register(hwmgr
->device
, mmCP_DFY_CNTL
, section
->dfy_cntl
);
565 cgs_write_register(hwmgr
->device
, mmCP_DFY_ADDR_HI
, section
->dfy_addr_hi
);
566 cgs_write_register(hwmgr
->device
, mmCP_DFY_ADDR_LO
, section
->dfy_addr_lo
);
567 for (i
= 0; i
< section
->dfy_size
; i
++)
568 cgs_write_register(hwmgr
->device
, mmCP_DFY_DATA_0
, section
->dfy_data
[i
]);
571 int smu7_setup_pwr_virus(struct pp_hwmgr
*hwmgr
)
573 execute_pwr_table(hwmgr
, pwr_virus_table_pre
, ARRAY_SIZE(pwr_virus_table_pre
));
574 execute_pwr_dfy_table(hwmgr
, &pwr_virus_section1
);
575 execute_pwr_dfy_table(hwmgr
, &pwr_virus_section2
);
576 execute_pwr_dfy_table(hwmgr
, &pwr_virus_section3
);
577 execute_pwr_dfy_table(hwmgr
, &pwr_virus_section4
);
578 execute_pwr_dfy_table(hwmgr
, &pwr_virus_section5
);
579 execute_pwr_dfy_table(hwmgr
, &pwr_virus_section6
);
580 execute_pwr_table(hwmgr
, pwr_virus_table_post
, ARRAY_SIZE(pwr_virus_table_post
));
585 int smu7_init(struct pp_hwmgr
*hwmgr
)
587 struct smu7_smumgr
*smu_data
;
588 uint8_t *internal_buf
;
589 uint64_t mc_addr
= 0;
591 /* Allocate memory for backend private data */
592 smu_data
= (struct smu7_smumgr
*)(hwmgr
->smu_backend
);
593 smu_data
->header_buffer
.data_size
=
594 ((sizeof(struct SMU_DRAMData_TOC
) / 4096) + 1) * 4096;
596 /* Allocate FW image data structure and header buffer and
597 * send the header buffer address to SMU */
598 smu_allocate_memory(hwmgr
->device
,
599 smu_data
->header_buffer
.data_size
,
600 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB
,
603 &smu_data
->header_buffer
.kaddr
,
604 &smu_data
->header_buffer
.handle
);
606 smu_data
->header
= smu_data
->header_buffer
.kaddr
;
607 smu_data
->header_buffer
.mc_addr_high
= smu_upper_32_bits(mc_addr
);
608 smu_data
->header_buffer
.mc_addr_low
= smu_lower_32_bits(mc_addr
);
610 PP_ASSERT_WITH_CODE((NULL
!= smu_data
->header
),
612 kfree(hwmgr
->smu_backend
);
613 cgs_free_gpu_mem(hwmgr
->device
,
614 (cgs_handle_t
)smu_data
->header_buffer
.handle
);
617 if (cgs_is_virtualization_enabled(hwmgr
->device
))
620 smu_data
->smu_buffer
.data_size
= 200*4096;
621 smu_allocate_memory(hwmgr
->device
,
622 smu_data
->smu_buffer
.data_size
,
623 CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB
,
626 &smu_data
->smu_buffer
.kaddr
,
627 &smu_data
->smu_buffer
.handle
);
629 internal_buf
= smu_data
->smu_buffer
.kaddr
;
630 smu_data
->smu_buffer
.mc_addr_high
= smu_upper_32_bits(mc_addr
);
631 smu_data
->smu_buffer
.mc_addr_low
= smu_lower_32_bits(mc_addr
);
633 PP_ASSERT_WITH_CODE((NULL
!= internal_buf
),
635 kfree(hwmgr
->smu_backend
);
636 cgs_free_gpu_mem(hwmgr
->device
,
637 (cgs_handle_t
)smu_data
->smu_buffer
.handle
);
640 if (smum_is_hw_avfs_present(hwmgr
))
641 smu_data
->avfs
.avfs_btc_status
= AVFS_BTC_BOOT
;
643 smu_data
->avfs
.avfs_btc_status
= AVFS_BTC_NOTSUPPORTED
;
649 int smu7_smu_fini(struct pp_hwmgr
*hwmgr
)
651 kfree(hwmgr
->smu_backend
);
652 hwmgr
->smu_backend
= NULL
;
653 cgs_rel_firmware(hwmgr
->device
, CGS_UCODE_ID_SMU
);