]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
drm/amd: include <linux/delay.h> instead of "linux/delay.h"
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / amd / powerplay / smumgr / cz_smumgr.c
CommitLineData
4630f0fa
JZ
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
f90dee20
MY
23
24#include <linux/delay.h>
25#include <linux/gfp.h>
4630f0fa
JZ
26#include <linux/kernel.h>
27#include <linux/slab.h>
f90dee20
MY
28#include <linux/types.h>
29
4630f0fa
JZ
30#include "cgs_common.h"
31#include "smu/smu_8_0_d.h"
32#include "smu/smu_8_0_sh_mask.h"
33#include "smu8.h"
34#include "smu8_fusion.h"
35#include "cz_smumgr.h"
36#include "cz_ppsmc.h"
37#include "smu_ucode_xfer_cz.h"
38#include "gca/gfx_8_0_d.h"
39#include "gca/gfx_8_0_sh_mask.h"
40#include "smumgr.h"
41
42#define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
43
f498d9ed 44static const enum cz_scratch_entry firmware_list[] = {
4630f0fa
JZ
45 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
46 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
47 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
48 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
49 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
50 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
51 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
52 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
53};
54
55static int cz_smum_get_argument(struct pp_smumgr *smumgr)
56{
57 if (smumgr == NULL || smumgr->device == NULL)
58 return -EINVAL;
59
60 return cgs_read_register(smumgr->device,
61 mmSMU_MP1_SRBM2P_ARG_0);
62}
63
64static int cz_send_msg_to_smc_async(struct pp_smumgr *smumgr,
65 uint16_t msg)
66{
67 int result = 0;
68
69 if (smumgr == NULL || smumgr->device == NULL)
70 return -EINVAL;
71
72 result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
73 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
74 if (result != 0) {
634a24d8 75 pr_err("cz_send_msg_to_smc_async failed\n");
4630f0fa
JZ
76 return result;
77 }
78
79 cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
80 cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
81
82 return 0;
83}
84
85/* Send a message to the SMC, and wait for its response.*/
86static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
87{
88 int result = 0;
89
90 result = cz_send_msg_to_smc_async(smumgr, msg);
91 if (result != 0)
92 return result;
93
a3477255 94 return SMUM_WAIT_FIELD_UNEQUAL(smumgr,
4630f0fa 95 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
4630f0fa
JZ
96}
97
98static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
99 uint32_t smc_address, uint32_t limit)
100{
101 if (smumgr == NULL || smumgr->device == NULL)
102 return -EINVAL;
103
104 if (0 != (3 & smc_address)) {
634a24d8 105 pr_err("SMC address must be 4 byte aligned\n");
610ecfd6 106 return -EINVAL;
4630f0fa
JZ
107 }
108
109 if (limit <= (smc_address + 3)) {
634a24d8 110 pr_err("SMC address beyond the SMC RAM area\n");
610ecfd6 111 return -EINVAL;
4630f0fa
JZ
112 }
113
114 cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0,
115 SMN_MP1_SRAM_START_ADDR + smc_address);
116
117 return 0;
118}
119
120static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr,
121 uint32_t smc_address, uint32_t value, uint32_t limit)
122{
123 int result;
124
125 if (smumgr == NULL || smumgr->device == NULL)
126 return -EINVAL;
127
128 result = cz_set_smc_sram_address(smumgr, smc_address, limit);
610ecfd6
TSD
129 if (!result)
130 cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
4630f0fa 131
610ecfd6 132 return result;
4630f0fa
JZ
133}
134
135static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
136 uint16_t msg, uint32_t parameter)
137{
138 if (smumgr == NULL || smumgr->device == NULL)
139 return -EINVAL;
140
141 cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
142
143 return cz_send_msg_to_smc(smumgr, msg);
144}
145
4630f0fa
JZ
146static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
147 uint32_t firmware)
148{
149 int i;
150 uint32_t index = SMN_MP1_SRAM_START_ADDR +
151 SMU8_FIRMWARE_HEADER_LOCATION +
152 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
153
154 if (smumgr == NULL || smumgr->device == NULL)
155 return -EINVAL;
156
4630f0fa
JZ
157 cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index);
158
159 for (i = 0; i < smumgr->usec_timeout; i++) {
160 if (firmware ==
161 (cgs_read_register(smumgr->device, mmMP0PUB_IND_DATA) & firmware))
162 break;
163 udelay(1);
164 }
165
166 if (i >= smumgr->usec_timeout) {
634a24d8 167 pr_err("SMU check loaded firmware failed.\n");
4630f0fa
JZ
168 return -EINVAL;
169 }
170
171 return 0;
172}
173
174static int cz_load_mec_firmware(struct pp_smumgr *smumgr)
175{
176 uint32_t reg_data;
177 uint32_t tmp;
178 int ret = 0;
179 struct cgs_firmware_info info = {0};
180 struct cz_smumgr *cz_smu;
181
182 if (smumgr == NULL || smumgr->device == NULL)
183 return -EINVAL;
184
185 cz_smu = (struct cz_smumgr *)smumgr->backend;
186 ret = cgs_get_firmware_info(smumgr->device,
187 CGS_UCODE_ID_CP_MEC, &info);
188
189 if (ret)
190 return -EINVAL;
191
192 /* Disable MEC parsing/prefetching */
193 tmp = cgs_read_register(smumgr->device,
194 mmCP_MEC_CNTL);
195 tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
196 tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
197 cgs_write_register(smumgr->device, mmCP_MEC_CNTL, tmp);
198
199 tmp = cgs_read_register(smumgr->device,
200 mmCP_CPC_IC_BASE_CNTL);
201
202 tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
203 tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
204 tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
205 tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
206 cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
207
208 reg_data = smu_lower_32_bits(info.mc_addr) &
209 SMUM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
210 cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
211
212 reg_data = smu_upper_32_bits(info.mc_addr) &
213 SMUM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
214 cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
215
216 return 0;
217}
218
08b21d30 219static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
4630f0fa
JZ
220 enum cz_scratch_entry firmware_enum)
221{
222 uint8_t ret = 0;
223
224 switch (firmware_enum) {
225 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
226 ret = UCODE_ID_SDMA0;
227 break;
228 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
08b21d30
RZ
229 if (smumgr->chip_id == CHIP_STONEY)
230 ret = UCODE_ID_SDMA0;
231 else
232 ret = UCODE_ID_SDMA1;
4630f0fa
JZ
233 break;
234 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
235 ret = UCODE_ID_CP_CE;
236 break;
237 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
238 ret = UCODE_ID_CP_PFP;
239 break;
240 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
241 ret = UCODE_ID_CP_ME;
242 break;
243 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
244 ret = UCODE_ID_CP_MEC_JT1;
245 break;
246 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
08b21d30
RZ
247 if (smumgr->chip_id == CHIP_STONEY)
248 ret = UCODE_ID_CP_MEC_JT1;
249 else
250 ret = UCODE_ID_CP_MEC_JT2;
4630f0fa
JZ
251 break;
252 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
253 ret = UCODE_ID_GMCON_RENG;
254 break;
255 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
256 ret = UCODE_ID_RLC_G;
257 break;
258 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
259 ret = UCODE_ID_RLC_SCRATCH;
260 break;
261 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
262 ret = UCODE_ID_RLC_SRM_ARAM;
263 break;
264 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
265 ret = UCODE_ID_RLC_SRM_DRAM;
266 break;
267 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
268 ret = UCODE_ID_DMCU_ERAM;
269 break;
270 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
271 ret = UCODE_ID_DMCU_IRAM;
272 break;
273 case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
274 ret = TASK_ARG_INIT_MM_PWR_LOG;
275 break;
276 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
277 case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
278 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
279 case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
280 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
281 case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
282 ret = TASK_ARG_REG_MMIO;
283 break;
284 case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
285 ret = TASK_ARG_INIT_CLK_TABLE;
286 break;
287 }
288
289 return ret;
290}
291
292static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type)
293{
294 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
295
296 switch (fw_type) {
297 case UCODE_ID_SDMA0:
298 result = CGS_UCODE_ID_SDMA0;
299 break;
300 case UCODE_ID_SDMA1:
301 result = CGS_UCODE_ID_SDMA1;
302 break;
303 case UCODE_ID_CP_CE:
304 result = CGS_UCODE_ID_CP_CE;
305 break;
306 case UCODE_ID_CP_PFP:
307 result = CGS_UCODE_ID_CP_PFP;
308 break;
309 case UCODE_ID_CP_ME:
310 result = CGS_UCODE_ID_CP_ME;
311 break;
312 case UCODE_ID_CP_MEC_JT1:
313 result = CGS_UCODE_ID_CP_MEC_JT1;
314 break;
315 case UCODE_ID_CP_MEC_JT2:
316 result = CGS_UCODE_ID_CP_MEC_JT2;
317 break;
318 case UCODE_ID_RLC_G:
319 result = CGS_UCODE_ID_RLC_G;
320 break;
321 default:
322 break;
323 }
324
325 return result;
326}
327
328static int cz_smu_populate_single_scratch_task(
329 struct pp_smumgr *smumgr,
330 enum cz_scratch_entry fw_enum,
331 uint8_t type, bool is_last)
332{
333 uint8_t i;
334 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
335 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
336 struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
337
338 task->type = type;
08b21d30 339 task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
4630f0fa
JZ
340 task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
341
342 for (i = 0; i < cz_smu->scratch_buffer_length; i++)
343 if (cz_smu->scratch_buffer[i].firmware_ID == fw_enum)
344 break;
345
346 if (i >= cz_smu->scratch_buffer_length) {
634a24d8 347 pr_err("Invalid Firmware Type\n");
4630f0fa
JZ
348 return -EINVAL;
349 }
350
351 task->addr.low = cz_smu->scratch_buffer[i].mc_addr_low;
352 task->addr.high = cz_smu->scratch_buffer[i].mc_addr_high;
353 task->size_bytes = cz_smu->scratch_buffer[i].data_size;
354
355 if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
356 struct cz_ih_meta_data *pIHReg_restore =
357 (struct cz_ih_meta_data *)cz_smu->scratch_buffer[i].kaddr;
358 pIHReg_restore->command =
359 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
360 }
361
362 return 0;
363}
364
365static int cz_smu_populate_single_ucode_load_task(
366 struct pp_smumgr *smumgr,
367 enum cz_scratch_entry fw_enum,
368 bool is_last)
369{
370 uint8_t i;
371 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
372 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
373 struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
374
375 task->type = TASK_TYPE_UCODE_LOAD;
08b21d30 376 task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
4630f0fa
JZ
377 task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
378
379 for (i = 0; i < cz_smu->driver_buffer_length; i++)
380 if (cz_smu->driver_buffer[i].firmware_ID == fw_enum)
381 break;
382
383 if (i >= cz_smu->driver_buffer_length) {
634a24d8 384 pr_err("Invalid Firmware Type\n");
4630f0fa
JZ
385 return -EINVAL;
386 }
387
388 task->addr.low = cz_smu->driver_buffer[i].mc_addr_low;
389 task->addr.high = cz_smu->driver_buffer[i].mc_addr_high;
390 task->size_bytes = cz_smu->driver_buffer[i].data_size;
391
392 return 0;
393}
394
395static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_smumgr *smumgr)
396{
397 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
398
399 cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count;
400 cz_smu_populate_single_scratch_task(smumgr,
401 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
402 TASK_TYPE_UCODE_SAVE, true);
403
404 return 0;
405}
406
407static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr)
408{
409 int i;
410 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
411 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
412
413 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
414 toc->JobList[i] = (uint8_t)IGNORE_JOB;
415
416 return 0;
417}
418
419static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr)
420{
421 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
422 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
423
424 toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count;
425 cz_smu_populate_single_scratch_task(smumgr,
426 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
427 TASK_TYPE_UCODE_SAVE, false);
428
429 cz_smu_populate_single_scratch_task(smumgr,
430 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
431 TASK_TYPE_UCODE_SAVE, true);
432
433 return 0;
434}
435
436
437static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr)
438{
439 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
440 struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
441
442 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count;
443
444 cz_smu_populate_single_ucode_load_task(smumgr,
445 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
446 cz_smu_populate_single_ucode_load_task(smumgr,
447 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
448 cz_smu_populate_single_ucode_load_task(smumgr,
449 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
450 cz_smu_populate_single_ucode_load_task(smumgr,
451 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
08b21d30
RZ
452
453 if (smumgr->chip_id == CHIP_STONEY)
454 cz_smu_populate_single_ucode_load_task(smumgr,
455 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
456 else
457 cz_smu_populate_single_ucode_load_task(smumgr,
4630f0fa 458 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
08b21d30 459
4630f0fa
JZ
460 cz_smu_populate_single_ucode_load_task(smumgr,
461 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
462
463 /* populate scratch */
464 cz_smu_populate_single_scratch_task(smumgr,
465 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
466 TASK_TYPE_UCODE_LOAD, false);
467
468 cz_smu_populate_single_scratch_task(smumgr,
469 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
470 TASK_TYPE_UCODE_LOAD, false);
471
472 cz_smu_populate_single_scratch_task(smumgr,
473 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
474 TASK_TYPE_UCODE_LOAD, true);
475
476 return 0;
477}
478
479static int cz_smu_construct_toc_for_power_profiling(
480 struct pp_smumgr *smumgr)
481{
482 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
483
484 cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count;
485
486 cz_smu_populate_single_scratch_task(smumgr,
487 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
488 TASK_TYPE_INITIALIZE, true);
489 return 0;
490}
491
492static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
493{
494 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
495
496 cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count;
497
498 cz_smu_populate_single_ucode_load_task(smumgr,
499 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
83667ed6 500 if (smumgr->chip_id != CHIP_STONEY)
08b21d30 501 cz_smu_populate_single_ucode_load_task(smumgr,
4630f0fa
JZ
502 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
503 cz_smu_populate_single_ucode_load_task(smumgr,
504 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
505 cz_smu_populate_single_ucode_load_task(smumgr,
506 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
507 cz_smu_populate_single_ucode_load_task(smumgr,
508 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
509 cz_smu_populate_single_ucode_load_task(smumgr,
510 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
83667ed6 511 if (smumgr->chip_id != CHIP_STONEY)
08b21d30 512 cz_smu_populate_single_ucode_load_task(smumgr,
4630f0fa
JZ
513 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
514 cz_smu_populate_single_ucode_load_task(smumgr,
515 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
516
517 return 0;
518}
519
520static int cz_smu_construct_toc_for_clock_table(struct pp_smumgr *smumgr)
521{
522 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
523
524 cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count;
525
526 cz_smu_populate_single_scratch_task(smumgr,
527 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
528 TASK_TYPE_INITIALIZE, true);
529
530 return 0;
531}
532
533static int cz_smu_construct_toc(struct pp_smumgr *smumgr)
534{
535 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
536
537 cz_smu->toc_entry_used_count = 0;
4630f0fa 538 cz_smu_initialize_toc_empty_job_list(smumgr);
4630f0fa 539 cz_smu_construct_toc_for_rlc_aram_save(smumgr);
4630f0fa 540 cz_smu_construct_toc_for_vddgfx_enter(smumgr);
4630f0fa 541 cz_smu_construct_toc_for_vddgfx_exit(smumgr);
4630f0fa 542 cz_smu_construct_toc_for_power_profiling(smumgr);
4630f0fa 543 cz_smu_construct_toc_for_bootup(smumgr);
4630f0fa
JZ
544 cz_smu_construct_toc_for_clock_table(smumgr);
545
546 return 0;
547}
548
549static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
550{
551 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
552 uint32_t firmware_type;
553 uint32_t i;
554 int ret;
555 enum cgs_ucode_id ucode_id;
556 struct cgs_firmware_info info = {0};
557
558 cz_smu->driver_buffer_length = 0;
559
a7aabcc8 560 for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
4630f0fa 561
08b21d30 562 firmware_type = cz_translate_firmware_enum_to_arg(smumgr,
4630f0fa
JZ
563 firmware_list[i]);
564
565 ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
566
567 ret = cgs_get_firmware_info(smumgr->device,
568 ucode_id, &info);
569
570 if (ret == 0) {
571 cz_smu->driver_buffer[i].mc_addr_high =
572 smu_upper_32_bits(info.mc_addr);
573
574 cz_smu->driver_buffer[i].mc_addr_low =
575 smu_lower_32_bits(info.mc_addr);
576
577 cz_smu->driver_buffer[i].data_size = info.image_size;
578
579 cz_smu->driver_buffer[i].firmware_ID = firmware_list[i];
580 cz_smu->driver_buffer_length++;
581 }
582 }
583
584 return 0;
585}
586
587static int cz_smu_populate_single_scratch_entry(
588 struct pp_smumgr *smumgr,
589 enum cz_scratch_entry scratch_type,
590 uint32_t ulsize_byte,
591 struct cz_buffer_entry *entry)
592{
593 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
594 long long mc_addr =
595 ((long long)(cz_smu->smu_buffer.mc_addr_high) << 32)
596 | cz_smu->smu_buffer.mc_addr_low;
597
598 uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
599
600 mc_addr += cz_smu->smu_buffer_used_bytes;
601
602 entry->data_size = ulsize_byte;
603 entry->kaddr = (char *) cz_smu->smu_buffer.kaddr +
604 cz_smu->smu_buffer_used_bytes;
605 entry->mc_addr_low = smu_lower_32_bits(mc_addr);
606 entry->mc_addr_high = smu_upper_32_bits(mc_addr);
607 entry->firmware_ID = scratch_type;
608
609 cz_smu->smu_buffer_used_bytes += ulsize_aligned;
610
611 return 0;
612}
613
614static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table)
615{
616 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
617 unsigned long i;
618
619 for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
620 if (cz_smu->scratch_buffer[i].firmware_ID
621 == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
622 break;
623 }
624
625 *table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr;
626
627 cz_send_msg_to_smc_with_parameter(smumgr,
628 PPSMC_MSG_SetClkTableAddrHi,
629 cz_smu->scratch_buffer[i].mc_addr_high);
630
631 cz_send_msg_to_smc_with_parameter(smumgr,
632 PPSMC_MSG_SetClkTableAddrLo,
633 cz_smu->scratch_buffer[i].mc_addr_low);
634
635 cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
636 cz_smu->toc_entry_clock_table);
637
638 cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToDram);
639
640 return 0;
641}
642
643static int cz_upload_pptable_settings(struct pp_smumgr *smumgr)
644{
645 struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
646 unsigned long i;
647
648 for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
649 if (cz_smu->scratch_buffer[i].firmware_ID
650 == CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
651 break;
652 }
653
654 cz_send_msg_to_smc_with_parameter(smumgr,
655 PPSMC_MSG_SetClkTableAddrHi,
656 cz_smu->scratch_buffer[i].mc_addr_high);
657
658 cz_send_msg_to_smc_with_parameter(smumgr,
659 PPSMC_MSG_SetClkTableAddrLo,
660 cz_smu->scratch_buffer[i].mc_addr_low);
661
662 cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
663 cz_smu->toc_entry_clock_table);
664
665 cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToSmu);
666
667 return 0;
668}
669
bcb5487b
RZ
670static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
671{
672 struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
673 uint32_t smc_address;
674
675 if (!smumgr->reload_fw) {
676 pr_info("skip reloading...\n");
677 return 0;
678 }
679
680 cz_smu_populate_firmware_entries(smumgr);
681
682 cz_smu_construct_toc(smumgr);
683
684 smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
685 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
686
687 cz_write_smc_sram_dword(smumgr, smc_address, 0, smc_address+4);
688
689 cz_send_msg_to_smc_with_parameter(smumgr,
690 PPSMC_MSG_DriverDramAddrHi,
691 cz_smu->toc_buffer.mc_addr_high);
692
693 cz_send_msg_to_smc_with_parameter(smumgr,
694 PPSMC_MSG_DriverDramAddrLo,
695 cz_smu->toc_buffer.mc_addr_low);
696
697 cz_send_msg_to_smc(smumgr, PPSMC_MSG_InitJobs);
698
699 cz_send_msg_to_smc_with_parameter(smumgr,
700 PPSMC_MSG_ExecuteJob,
701 cz_smu->toc_entry_aram);
702 cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
703 cz_smu->toc_entry_power_profiling_index);
704
705 return cz_send_msg_to_smc_with_parameter(smumgr,
706 PPSMC_MSG_ExecuteJob,
707 cz_smu->toc_entry_initialize_index);
708}
709
710static int cz_start_smu(struct pp_smumgr *smumgr)
711{
712 int ret = 0;
713 uint32_t fw_to_check = 0;
714
715 fw_to_check = UCODE_ID_RLC_G_MASK |
716 UCODE_ID_SDMA0_MASK |
717 UCODE_ID_SDMA1_MASK |
718 UCODE_ID_CP_CE_MASK |
719 UCODE_ID_CP_ME_MASK |
720 UCODE_ID_CP_PFP_MASK |
721 UCODE_ID_CP_MEC_JT1_MASK |
722 UCODE_ID_CP_MEC_JT2_MASK;
723
724 if (smumgr->chip_id == CHIP_STONEY)
725 fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
726
727 ret = cz_request_smu_load_fw(smumgr);
728 if (ret)
729 pr_err("SMU firmware load failed\n");
730
731 cz_check_fw_load_finish(smumgr, fw_to_check);
732
733 ret = cz_load_mec_firmware(smumgr);
734 if (ret)
735 pr_err("Mec Firmware load failed\n");
736
737 return ret;
738}
739
4630f0fa
JZ
740static int cz_smu_init(struct pp_smumgr *smumgr)
741{
4630f0fa
JZ
742 uint64_t mc_addr = 0;
743 int ret = 0;
63b55943
RZ
744 struct cz_smumgr *cz_smu;
745
746 cz_smu = kzalloc(sizeof(struct cz_smumgr), GFP_KERNEL);
747 if (cz_smu == NULL)
748 return -ENOMEM;
749
750 smumgr->backend = cz_smu;
4630f0fa
JZ
751
752 cz_smu->toc_buffer.data_size = 4096;
753 cz_smu->smu_buffer.data_size =
754 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
755 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
756 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
757 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
758 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
759
760 ret = smu_allocate_memory(smumgr->device,
761 cz_smu->toc_buffer.data_size,
762 CGS_GPU_MEM_TYPE__GART_CACHEABLE,
763 PAGE_SIZE,
764 &mc_addr,
765 &cz_smu->toc_buffer.kaddr,
766 &cz_smu->toc_buffer.handle);
767 if (ret != 0)
768 return -1;
769
770 cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
771 cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
772
773 ret = smu_allocate_memory(smumgr->device,
774 cz_smu->smu_buffer.data_size,
775 CGS_GPU_MEM_TYPE__GART_CACHEABLE,
776 PAGE_SIZE,
777 &mc_addr,
778 &cz_smu->smu_buffer.kaddr,
779 &cz_smu->smu_buffer.handle);
780 if (ret != 0)
781 return -1;
782
783 cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
784 cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
785
4630f0fa
JZ
786 if (0 != cz_smu_populate_single_scratch_entry(smumgr,
787 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
788 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
789 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
634a24d8 790 pr_err("Error when Populate Firmware Entry.\n");
4630f0fa
JZ
791 return -1;
792 }
793
794 if (0 != cz_smu_populate_single_scratch_entry(smumgr,
795 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
796 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
797 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
634a24d8 798 pr_err("Error when Populate Firmware Entry.\n");
4630f0fa
JZ
799 return -1;
800 }
801 if (0 != cz_smu_populate_single_scratch_entry(smumgr,
802 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
803 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
804 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
634a24d8 805 pr_err("Error when Populate Firmware Entry.\n");
4630f0fa
JZ
806 return -1;
807 }
808
809 if (0 != cz_smu_populate_single_scratch_entry(smumgr,
810 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
811 sizeof(struct SMU8_MultimediaPowerLogData),
812 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
634a24d8 813 pr_err("Error when Populate Firmware Entry.\n");
4630f0fa
JZ
814 return -1;
815 }
816
817 if (0 != cz_smu_populate_single_scratch_entry(smumgr,
818 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
819 sizeof(struct SMU8_Fusion_ClkTable),
820 &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
634a24d8 821 pr_err("Error when Populate Firmware Entry.\n");
4630f0fa
JZ
822 return -1;
823 }
4630f0fa
JZ
824
825 return 0;
826}
827
828static int cz_smu_fini(struct pp_smumgr *smumgr)
829{
830 struct cz_smumgr *cz_smu;
831
832 if (smumgr == NULL || smumgr->device == NULL)
833 return -EINVAL;
834
835 cz_smu = (struct cz_smumgr *)smumgr->backend;
2500a3c9 836 if (cz_smu) {
4630f0fa
JZ
837 cgs_free_gpu_mem(smumgr->device,
838 cz_smu->toc_buffer.handle);
839 cgs_free_gpu_mem(smumgr->device,
840 cz_smu->smu_buffer.handle);
841 kfree(cz_smu);
4630f0fa
JZ
842 }
843
844 return 0;
845}
846
63b55943 847const struct pp_smumgr_func cz_smu_funcs = {
4630f0fa
JZ
848 .smu_init = cz_smu_init,
849 .smu_fini = cz_smu_fini,
850 .start_smu = cz_start_smu,
851 .check_fw_load_finish = cz_check_fw_load_finish,
852 .request_smu_load_fw = NULL,
853 .request_smu_load_specific_fw = NULL,
854 .get_argument = cz_smum_get_argument,
855 .send_msg_to_smc = cz_send_msg_to_smc,
856 .send_msg_to_smc_with_parameter = cz_send_msg_to_smc_with_parameter,
857 .download_pptable_settings = cz_download_pptable_settings,
858 .upload_pptable_settings = cz_upload_pptable_settings,
859};
860