]>
Commit | Line | Data |
---|---|---|
c6b6a421 HZ |
1 | /* |
2 | * Copyright 2019 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | #include <linux/firmware.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/module.h> | |
e9eea902 AD |
26 | #include <linux/pci.h> |
27 | ||
c6b6a421 HZ |
28 | #include "amdgpu.h" |
29 | #include "amdgpu_atombios.h" | |
30 | #include "amdgpu_ih.h" | |
31 | #include "amdgpu_uvd.h" | |
32 | #include "amdgpu_vce.h" | |
33 | #include "amdgpu_ucode.h" | |
34 | #include "amdgpu_psp.h" | |
767acabd | 35 | #include "amdgpu_smu.h" |
c6b6a421 HZ |
36 | #include "atom.h" |
37 | #include "amd_pcie.h" | |
38 | ||
39 | #include "gc/gc_10_1_0_offset.h" | |
40 | #include "gc/gc_10_1_0_sh_mask.h" | |
41 | #include "hdp/hdp_5_0_0_offset.h" | |
42 | #include "hdp/hdp_5_0_0_sh_mask.h" | |
29bc37b4 | 43 | #include "smuio/smuio_11_0_0_offset.h" |
c6b6a421 HZ |
44 | |
45 | #include "soc15.h" | |
46 | #include "soc15_common.h" | |
47 | #include "gmc_v10_0.h" | |
48 | #include "gfxhub_v2_0.h" | |
49 | #include "mmhub_v2_0.h" | |
bebc0762 | 50 | #include "nbio_v2_3.h" |
c6b6a421 HZ |
51 | #include "nv.h" |
52 | #include "navi10_ih.h" | |
53 | #include "gfx_v10_0.h" | |
54 | #include "sdma_v5_0.h" | |
55 | #include "vcn_v2_0.h" | |
5be45a26 | 56 | #include "jpeg_v2_0.h" |
c6b6a421 HZ |
57 | #include "dce_virtual.h" |
58 | #include "mes_v10_1.h" | |
b05b6903 | 59 | #include "mxgpu_nv.h" |
c6b6a421 HZ |
60 | |
61 | static const struct amd_ip_funcs nv_common_ip_funcs; | |
62 | ||
63 | /* | |
64 | * Indirect registers accessor | |
65 | */ | |
66 | static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) | |
67 | { | |
68 | unsigned long flags, address, data; | |
69 | u32 r; | |
bebc0762 HZ |
70 | address = adev->nbio.funcs->get_pcie_index_offset(adev); |
71 | data = adev->nbio.funcs->get_pcie_data_offset(adev); | |
c6b6a421 HZ |
72 | |
73 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); | |
74 | WREG32(address, reg); | |
75 | (void)RREG32(address); | |
76 | r = RREG32(data); | |
77 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); | |
78 | return r; | |
79 | } | |
80 | ||
81 | static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |
82 | { | |
83 | unsigned long flags, address, data; | |
84 | ||
bebc0762 HZ |
85 | address = adev->nbio.funcs->get_pcie_index_offset(adev); |
86 | data = adev->nbio.funcs->get_pcie_data_offset(adev); | |
c6b6a421 HZ |
87 | |
88 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); | |
89 | WREG32(address, reg); | |
90 | (void)RREG32(address); | |
91 | WREG32(data, v); | |
92 | (void)RREG32(data); | |
93 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); | |
94 | } | |
95 | ||
96 | static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) | |
97 | { | |
98 | unsigned long flags, address, data; | |
99 | u32 r; | |
100 | ||
101 | address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); | |
102 | data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); | |
103 | ||
104 | spin_lock_irqsave(&adev->didt_idx_lock, flags); | |
105 | WREG32(address, (reg)); | |
106 | r = RREG32(data); | |
107 | spin_unlock_irqrestore(&adev->didt_idx_lock, flags); | |
108 | return r; | |
109 | } | |
110 | ||
111 | static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |
112 | { | |
113 | unsigned long flags, address, data; | |
114 | ||
115 | address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); | |
116 | data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); | |
117 | ||
118 | spin_lock_irqsave(&adev->didt_idx_lock, flags); | |
119 | WREG32(address, (reg)); | |
120 | WREG32(data, (v)); | |
121 | spin_unlock_irqrestore(&adev->didt_idx_lock, flags); | |
122 | } | |
123 | ||
124 | static u32 nv_get_config_memsize(struct amdgpu_device *adev) | |
125 | { | |
bebc0762 | 126 | return adev->nbio.funcs->get_memsize(adev); |
c6b6a421 HZ |
127 | } |
128 | ||
129 | static u32 nv_get_xclk(struct amdgpu_device *adev) | |
130 | { | |
462a70d8 | 131 | return adev->clock.spll.reference_freq; |
c6b6a421 HZ |
132 | } |
133 | ||
134 | ||
135 | void nv_grbm_select(struct amdgpu_device *adev, | |
136 | u32 me, u32 pipe, u32 queue, u32 vmid) | |
137 | { | |
138 | u32 grbm_gfx_cntl = 0; | |
139 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); | |
140 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); | |
141 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); | |
142 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); | |
143 | ||
144 | WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); | |
145 | } | |
146 | ||
147 | static void nv_vga_set_state(struct amdgpu_device *adev, bool state) | |
148 | { | |
149 | /* todo */ | |
150 | } | |
151 | ||
152 | static bool nv_read_disabled_bios(struct amdgpu_device *adev) | |
153 | { | |
154 | /* todo */ | |
155 | return false; | |
156 | } | |
157 | ||
158 | static bool nv_read_bios_from_rom(struct amdgpu_device *adev, | |
159 | u8 *bios, u32 length_bytes) | |
160 | { | |
29bc37b4 AD |
161 | u32 *dw_ptr; |
162 | u32 i, length_dw; | |
163 | ||
164 | if (bios == NULL) | |
165 | return false; | |
166 | if (length_bytes == 0) | |
167 | return false; | |
168 | /* APU vbios image is part of sbios image */ | |
169 | if (adev->flags & AMD_IS_APU) | |
170 | return false; | |
171 | ||
172 | dw_ptr = (u32 *)bios; | |
173 | length_dw = ALIGN(length_bytes, 4) / 4; | |
174 | ||
175 | /* set rom index to 0 */ | |
176 | WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); | |
177 | /* read out the rom data */ | |
178 | for (i = 0; i < length_dw; i++) | |
179 | dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); | |
180 | ||
181 | return true; | |
c6b6a421 HZ |
182 | } |
183 | ||
184 | static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { | |
185 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, | |
186 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, | |
187 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, | |
188 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, | |
189 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, | |
190 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, | |
191 | #if 0 /* TODO: will set it when SDMA header is available */ | |
192 | { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, | |
193 | { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, | |
194 | #endif | |
195 | { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, | |
196 | { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, | |
197 | { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, | |
198 | { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, | |
199 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, | |
200 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, | |
201 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, | |
664fe85a | 202 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, |
c6b6a421 HZ |
203 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, |
204 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, | |
205 | { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, | |
206 | }; | |
207 | ||
208 | static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num, | |
209 | u32 sh_num, u32 reg_offset) | |
210 | { | |
211 | uint32_t val; | |
212 | ||
213 | mutex_lock(&adev->grbm_idx_mutex); | |
214 | if (se_num != 0xffffffff || sh_num != 0xffffffff) | |
215 | amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); | |
216 | ||
217 | val = RREG32(reg_offset); | |
218 | ||
219 | if (se_num != 0xffffffff || sh_num != 0xffffffff) | |
220 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | |
221 | mutex_unlock(&adev->grbm_idx_mutex); | |
222 | return val; | |
223 | } | |
224 | ||
225 | static uint32_t nv_get_register_value(struct amdgpu_device *adev, | |
226 | bool indexed, u32 se_num, | |
227 | u32 sh_num, u32 reg_offset) | |
228 | { | |
229 | if (indexed) { | |
230 | return nv_read_indexed_register(adev, se_num, sh_num, reg_offset); | |
231 | } else { | |
232 | if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) | |
233 | return adev->gfx.config.gb_addr_config; | |
234 | return RREG32(reg_offset); | |
235 | } | |
236 | } | |
237 | ||
238 | static int nv_read_register(struct amdgpu_device *adev, u32 se_num, | |
239 | u32 sh_num, u32 reg_offset, u32 *value) | |
240 | { | |
241 | uint32_t i; | |
242 | struct soc15_allowed_register_entry *en; | |
243 | ||
244 | *value = 0; | |
245 | for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { | |
246 | en = &nv_allowed_read_registers[i]; | |
247 | if (reg_offset != | |
248 | (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) | |
249 | continue; | |
250 | ||
251 | *value = nv_get_register_value(adev, | |
252 | nv_allowed_read_registers[i].grbm_indexed, | |
253 | se_num, sh_num, reg_offset); | |
254 | return 0; | |
255 | } | |
256 | return -EINVAL; | |
257 | } | |
258 | ||
259 | #if 0 | |
260 | static void nv_gpu_pci_config_reset(struct amdgpu_device *adev) | |
261 | { | |
262 | u32 i; | |
263 | ||
264 | dev_info(adev->dev, "GPU pci config reset\n"); | |
265 | ||
266 | /* disable BM */ | |
267 | pci_clear_master(adev->pdev); | |
268 | /* reset */ | |
269 | amdgpu_pci_config_reset(adev); | |
270 | ||
271 | udelay(100); | |
272 | ||
273 | /* wait for asic to come out of reset */ | |
274 | for (i = 0; i < adev->usec_timeout; i++) { | |
275 | u32 memsize = nbio_v2_3_get_memsize(adev); | |
276 | if (memsize != 0xffffffff) | |
277 | break; | |
278 | udelay(1); | |
279 | } | |
280 | ||
281 | } | |
282 | #endif | |
283 | ||
3e2bb60a KW |
284 | static int nv_asic_mode1_reset(struct amdgpu_device *adev) |
285 | { | |
286 | u32 i; | |
287 | int ret = 0; | |
288 | ||
289 | amdgpu_atombios_scratch_regs_engine_hung(adev, true); | |
290 | ||
291 | dev_info(adev->dev, "GPU mode1 reset\n"); | |
292 | ||
293 | /* disable BM */ | |
294 | pci_clear_master(adev->pdev); | |
295 | ||
296 | pci_save_state(adev->pdev); | |
297 | ||
298 | ret = psp_gpu_reset(adev); | |
299 | if (ret) | |
300 | dev_err(adev->dev, "GPU mode1 reset failed\n"); | |
301 | ||
302 | pci_restore_state(adev->pdev); | |
303 | ||
304 | /* wait for asic to come out of reset */ | |
305 | for (i = 0; i < adev->usec_timeout; i++) { | |
bebc0762 | 306 | u32 memsize = adev->nbio.funcs->get_memsize(adev); |
3e2bb60a KW |
307 | |
308 | if (memsize != 0xffffffff) | |
309 | break; | |
310 | udelay(1); | |
311 | } | |
312 | ||
313 | amdgpu_atombios_scratch_regs_engine_hung(adev, false); | |
314 | ||
315 | return ret; | |
316 | } | |
2ddc6c3e | 317 | |
ac742616 AD |
318 | static bool nv_asic_supports_baco(struct amdgpu_device *adev) |
319 | { | |
320 | struct smu_context *smu = &adev->smu; | |
321 | ||
322 | if (smu_baco_is_support(smu)) | |
323 | return true; | |
324 | else | |
325 | return false; | |
326 | } | |
327 | ||
2ddc6c3e AD |
328 | static enum amd_reset_method |
329 | nv_asic_reset_method(struct amdgpu_device *adev) | |
330 | { | |
331 | struct smu_context *smu = &adev->smu; | |
332 | ||
b4def374 | 333 | if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu)) |
2ddc6c3e AD |
334 | return AMD_RESET_METHOD_BACO; |
335 | else | |
336 | return AMD_RESET_METHOD_MODE1; | |
337 | } | |
338 | ||
c6b6a421 HZ |
339 | static int nv_asic_reset(struct amdgpu_device *adev) |
340 | { | |
341 | ||
342 | /* FIXME: it doesn't work since vega10 */ | |
343 | #if 0 | |
344 | amdgpu_atombios_scratch_regs_engine_hung(adev, true); | |
345 | ||
346 | nv_gpu_pci_config_reset(adev); | |
347 | ||
348 | amdgpu_atombios_scratch_regs_engine_hung(adev, false); | |
349 | #endif | |
767acabd KW |
350 | int ret = 0; |
351 | struct smu_context *smu = &adev->smu; | |
c6b6a421 | 352 | |
e3526257 | 353 | if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { |
11520f27 AD |
354 | ret = smu_baco_enter(smu); |
355 | if (ret) | |
356 | return ret; | |
357 | ret = smu_baco_exit(smu); | |
358 | if (ret) | |
359 | return ret; | |
e3526257 | 360 | } else { |
3e2bb60a | 361 | ret = nv_asic_mode1_reset(adev); |
e3526257 | 362 | } |
767acabd KW |
363 | |
364 | return ret; | |
c6b6a421 HZ |
365 | } |
366 | ||
367 | static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) | |
368 | { | |
369 | /* todo */ | |
370 | return 0; | |
371 | } | |
372 | ||
373 | static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) | |
374 | { | |
375 | /* todo */ | |
376 | return 0; | |
377 | } | |
378 | ||
379 | static void nv_pcie_gen3_enable(struct amdgpu_device *adev) | |
380 | { | |
381 | if (pci_is_root_bus(adev->pdev->bus)) | |
382 | return; | |
383 | ||
384 | if (amdgpu_pcie_gen2 == 0) | |
385 | return; | |
386 | ||
387 | if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | | |
388 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) | |
389 | return; | |
390 | ||
391 | /* todo */ | |
392 | } | |
393 | ||
394 | static void nv_program_aspm(struct amdgpu_device *adev) | |
395 | { | |
396 | ||
397 | if (amdgpu_aspm == 0) | |
398 | return; | |
399 | ||
400 | /* todo */ | |
401 | } | |
402 | ||
403 | static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, | |
404 | bool enable) | |
405 | { | |
bebc0762 HZ |
406 | adev->nbio.funcs->enable_doorbell_aperture(adev, enable); |
407 | adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); | |
c6b6a421 HZ |
408 | } |
409 | ||
410 | static const struct amdgpu_ip_block_version nv_common_ip_block = | |
411 | { | |
412 | .type = AMD_IP_BLOCK_TYPE_COMMON, | |
413 | .major = 1, | |
414 | .minor = 0, | |
415 | .rev = 0, | |
416 | .funcs = &nv_common_ip_funcs, | |
417 | }; | |
418 | ||
b5c73856 | 419 | static int nv_reg_base_init(struct amdgpu_device *adev) |
c6b6a421 | 420 | { |
b5c73856 XY |
421 | int r; |
422 | ||
423 | if (amdgpu_discovery) { | |
424 | r = amdgpu_discovery_reg_base_init(adev); | |
425 | if (r) { | |
426 | DRM_WARN("failed to init reg base from ip discovery table, " | |
427 | "fallback to legacy init method\n"); | |
428 | goto legacy_init; | |
429 | } | |
430 | ||
431 | return 0; | |
432 | } | |
433 | ||
434 | legacy_init: | |
c6b6a421 HZ |
435 | switch (adev->asic_type) { |
436 | case CHIP_NAVI10: | |
437 | navi10_reg_base_init(adev); | |
438 | break; | |
a0f6d926 XY |
439 | case CHIP_NAVI14: |
440 | navi14_reg_base_init(adev); | |
441 | break; | |
03d0a073 XY |
442 | case CHIP_NAVI12: |
443 | navi12_reg_base_init(adev); | |
444 | break; | |
c6b6a421 HZ |
445 | default: |
446 | return -EINVAL; | |
447 | } | |
448 | ||
b5c73856 XY |
449 | return 0; |
450 | } | |
451 | ||
452 | int nv_set_ip_blocks(struct amdgpu_device *adev) | |
453 | { | |
454 | int r; | |
455 | ||
bebc0762 HZ |
456 | adev->nbio.funcs = &nbio_v2_3_funcs; |
457 | adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; | |
c6b6a421 | 458 | |
122078de | 459 | if (amdgpu_sriov_vf(adev)) { |
b05b6903 | 460 | adev->virt.ops = &xgpu_nv_virt_ops; |
122078de ML |
461 | /* try send GPU_INIT_DATA request to host */ |
462 | amdgpu_virt_request_init_data(adev); | |
463 | } | |
464 | ||
465 | /* Set IP register base before any HW register access */ | |
466 | r = nv_reg_base_init(adev); | |
467 | if (r) | |
468 | return r; | |
b05b6903 | 469 | |
c6b6a421 HZ |
470 | switch (adev->asic_type) { |
471 | case CHIP_NAVI10: | |
d1daf850 | 472 | case CHIP_NAVI14: |
c6b6a421 HZ |
473 | amdgpu_device_ip_block_add(adev, &nv_common_ip_block); |
474 | amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); | |
475 | amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); | |
476 | amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); | |
477 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && | |
9530273e | 478 | !amdgpu_sriov_vf(adev)) |
c6b6a421 HZ |
479 | amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); |
480 | if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) | |
481 | amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); | |
f8a7976b | 482 | #if defined(CONFIG_DRM_AMD_DC) |
b4f199c7 HW |
483 | else if (amdgpu_device_has_dc_support(adev)) |
484 | amdgpu_device_ip_block_add(adev, &dm_ip_block); | |
f8a7976b | 485 | #endif |
c6b6a421 HZ |
486 | amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); |
487 | amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); | |
488 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && | |
9530273e | 489 | !amdgpu_sriov_vf(adev)) |
c6b6a421 HZ |
490 | amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); |
491 | amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); | |
5be45a26 | 492 | amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); |
c6b6a421 HZ |
493 | if (adev->enable_mes) |
494 | amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); | |
495 | break; | |
44e9e7c9 XY |
496 | case CHIP_NAVI12: |
497 | amdgpu_device_ip_block_add(adev, &nv_common_ip_block); | |
498 | amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); | |
499 | amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); | |
6b66ae2e | 500 | amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); |
79bebabb | 501 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) |
7f47efeb | 502 | amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); |
79902029 XY |
503 | if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) |
504 | amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); | |
20c14ee1 | 505 | #if defined(CONFIG_DRM_AMD_DC) |
078655d9 LL |
506 | else if (amdgpu_device_has_dc_support(adev)) |
507 | amdgpu_device_ip_block_add(adev, &dm_ip_block); | |
20c14ee1 | 508 | #endif |
44e9e7c9 XY |
509 | amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); |
510 | amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); | |
7f47efeb | 511 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && |
9530273e | 512 | !amdgpu_sriov_vf(adev)) |
7f47efeb | 513 | amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); |
1fbed280 | 514 | amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); |
fe442491 ML |
515 | if (!amdgpu_sriov_vf(adev)) |
516 | amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); | |
44e9e7c9 | 517 | break; |
c6b6a421 HZ |
518 | default: |
519 | return -EINVAL; | |
520 | } | |
521 | ||
522 | return 0; | |
523 | } | |
524 | ||
525 | static uint32_t nv_get_rev_id(struct amdgpu_device *adev) | |
526 | { | |
bebc0762 | 527 | return adev->nbio.funcs->get_rev_id(adev); |
c6b6a421 HZ |
528 | } |
529 | ||
530 | static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) | |
531 | { | |
bebc0762 | 532 | adev->nbio.funcs->hdp_flush(adev, ring); |
c6b6a421 HZ |
533 | } |
534 | ||
535 | static void nv_invalidate_hdp(struct amdgpu_device *adev, | |
536 | struct amdgpu_ring *ring) | |
537 | { | |
538 | if (!ring || !ring->funcs->emit_wreg) { | |
539 | WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); | |
540 | } else { | |
541 | amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( | |
542 | HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); | |
543 | } | |
544 | } | |
545 | ||
546 | static bool nv_need_full_reset(struct amdgpu_device *adev) | |
547 | { | |
548 | return true; | |
549 | } | |
550 | ||
c6b6a421 HZ |
551 | static bool nv_need_reset_on_init(struct amdgpu_device *adev) |
552 | { | |
553 | #if 0 | |
554 | u32 sol_reg; | |
555 | ||
556 | if (adev->flags & AMD_IS_APU) | |
557 | return false; | |
558 | ||
559 | /* Check sOS sign of life register to confirm sys driver and sOS | |
560 | * are already been loaded. | |
561 | */ | |
562 | sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); | |
563 | if (sol_reg) | |
564 | return true; | |
565 | #endif | |
566 | /* TODO: re-enable it when mode1 reset is functional */ | |
567 | return false; | |
568 | } | |
569 | ||
2af81531 KW |
570 | static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev) |
571 | { | |
572 | ||
573 | /* TODO | |
574 | * dummy implement for pcie_replay_count sysfs interface | |
575 | * */ | |
576 | ||
577 | return 0; | |
578 | } | |
579 | ||
c6b6a421 HZ |
580 | static void nv_init_doorbell_index(struct amdgpu_device *adev) |
581 | { | |
582 | adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; | |
583 | adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0; | |
584 | adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1; | |
585 | adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2; | |
586 | adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3; | |
587 | adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4; | |
588 | adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5; | |
589 | adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6; | |
590 | adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7; | |
591 | adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START; | |
592 | adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END; | |
593 | adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0; | |
594 | adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1; | |
595 | adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0; | |
596 | adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1; | |
597 | adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH; | |
598 | adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1; | |
599 | adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3; | |
600 | adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5; | |
601 | adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7; | |
602 | adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP; | |
603 | adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP; | |
604 | ||
605 | adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1; | |
606 | adev->doorbell_index.sdma_doorbell_range = 20; | |
607 | } | |
608 | ||
609 | static const struct amdgpu_asic_funcs nv_asic_funcs = | |
610 | { | |
611 | .read_disabled_bios = &nv_read_disabled_bios, | |
612 | .read_bios_from_rom = &nv_read_bios_from_rom, | |
613 | .read_register = &nv_read_register, | |
614 | .reset = &nv_asic_reset, | |
2ddc6c3e | 615 | .reset_method = &nv_asic_reset_method, |
c6b6a421 HZ |
616 | .set_vga_state = &nv_vga_set_state, |
617 | .get_xclk = &nv_get_xclk, | |
618 | .set_uvd_clocks = &nv_set_uvd_clocks, | |
619 | .set_vce_clocks = &nv_set_vce_clocks, | |
620 | .get_config_memsize = &nv_get_config_memsize, | |
621 | .flush_hdp = &nv_flush_hdp, | |
622 | .invalidate_hdp = &nv_invalidate_hdp, | |
623 | .init_doorbell_index = &nv_init_doorbell_index, | |
624 | .need_full_reset = &nv_need_full_reset, | |
c6b6a421 | 625 | .need_reset_on_init = &nv_need_reset_on_init, |
2af81531 | 626 | .get_pcie_replay_count = &nv_get_pcie_replay_count, |
ac742616 | 627 | .supports_baco = &nv_asic_supports_baco, |
c6b6a421 HZ |
628 | }; |
629 | ||
630 | static int nv_common_early_init(void *handle) | |
631 | { | |
923c087a | 632 | #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) |
c6b6a421 HZ |
633 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
634 | ||
923c087a YZ |
635 | adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; |
636 | adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; | |
c6b6a421 HZ |
637 | adev->smc_rreg = NULL; |
638 | adev->smc_wreg = NULL; | |
639 | adev->pcie_rreg = &nv_pcie_rreg; | |
640 | adev->pcie_wreg = &nv_pcie_wreg; | |
641 | ||
642 | /* TODO: will add them during VCN v2 implementation */ | |
643 | adev->uvd_ctx_rreg = NULL; | |
644 | adev->uvd_ctx_wreg = NULL; | |
645 | ||
646 | adev->didt_rreg = &nv_didt_rreg; | |
647 | adev->didt_wreg = &nv_didt_wreg; | |
648 | ||
649 | adev->asic_funcs = &nv_asic_funcs; | |
650 | ||
c6b6a421 HZ |
651 | adev->rev_id = nv_get_rev_id(adev); |
652 | adev->external_rev_id = 0xff; | |
653 | switch (adev->asic_type) { | |
654 | case CHIP_NAVI10: | |
655 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | | |
c6b6a421 HZ |
656 | AMD_CG_SUPPORT_GFX_CGCG | |
657 | AMD_CG_SUPPORT_IH_CG | | |
658 | AMD_CG_SUPPORT_HDP_MGCG | | |
659 | AMD_CG_SUPPORT_HDP_LS | | |
660 | AMD_CG_SUPPORT_SDMA_MGCG | | |
661 | AMD_CG_SUPPORT_SDMA_LS | | |
662 | AMD_CG_SUPPORT_MC_MGCG | | |
663 | AMD_CG_SUPPORT_MC_LS | | |
664 | AMD_CG_SUPPORT_ATHUB_MGCG | | |
665 | AMD_CG_SUPPORT_ATHUB_LS | | |
666 | AMD_CG_SUPPORT_VCN_MGCG | | |
099d66e4 | 667 | AMD_CG_SUPPORT_JPEG_MGCG | |
c6b6a421 HZ |
668 | AMD_CG_SUPPORT_BIF_MGCG | |
669 | AMD_CG_SUPPORT_BIF_LS; | |
157710ea | 670 | adev->pg_flags = AMD_PG_SUPPORT_VCN | |
c12d410f | 671 | AMD_PG_SUPPORT_VCN_DPG | |
099d66e4 | 672 | AMD_PG_SUPPORT_JPEG | |
a201b6ac | 673 | AMD_PG_SUPPORT_ATHUB; |
c6b6a421 HZ |
674 | adev->external_rev_id = adev->rev_id + 0x1; |
675 | break; | |
5e71e011 | 676 | case CHIP_NAVI14: |
d0c39f8c XY |
677 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
678 | AMD_CG_SUPPORT_GFX_CGCG | | |
679 | AMD_CG_SUPPORT_IH_CG | | |
680 | AMD_CG_SUPPORT_HDP_MGCG | | |
681 | AMD_CG_SUPPORT_HDP_LS | | |
682 | AMD_CG_SUPPORT_SDMA_MGCG | | |
683 | AMD_CG_SUPPORT_SDMA_LS | | |
684 | AMD_CG_SUPPORT_MC_MGCG | | |
685 | AMD_CG_SUPPORT_MC_LS | | |
686 | AMD_CG_SUPPORT_ATHUB_MGCG | | |
687 | AMD_CG_SUPPORT_ATHUB_LS | | |
688 | AMD_CG_SUPPORT_VCN_MGCG | | |
099d66e4 | 689 | AMD_CG_SUPPORT_JPEG_MGCG | |
d0c39f8c XY |
690 | AMD_CG_SUPPORT_BIF_MGCG | |
691 | AMD_CG_SUPPORT_BIF_LS; | |
0377b088 | 692 | adev->pg_flags = AMD_PG_SUPPORT_VCN | |
099d66e4 | 693 | AMD_PG_SUPPORT_JPEG | |
0377b088 | 694 | AMD_PG_SUPPORT_VCN_DPG; |
35ef88fa | 695 | adev->external_rev_id = adev->rev_id + 20; |
5e71e011 | 696 | break; |
74b5e509 | 697 | case CHIP_NAVI12: |
dca009e7 XY |
698 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
699 | AMD_CG_SUPPORT_GFX_MGLS | | |
700 | AMD_CG_SUPPORT_GFX_CGCG | | |
701 | AMD_CG_SUPPORT_GFX_CP_LS | | |
5211c37a | 702 | AMD_CG_SUPPORT_GFX_RLC_LS | |
fbe0bc57 | 703 | AMD_CG_SUPPORT_IH_CG | |
5211c37a | 704 | AMD_CG_SUPPORT_HDP_MGCG | |
358ab97f XY |
705 | AMD_CG_SUPPORT_HDP_LS | |
706 | AMD_CG_SUPPORT_SDMA_MGCG | | |
8b797b3d XY |
707 | AMD_CG_SUPPORT_SDMA_LS | |
708 | AMD_CG_SUPPORT_MC_MGCG | | |
ca51678d XY |
709 | AMD_CG_SUPPORT_MC_LS | |
710 | AMD_CG_SUPPORT_ATHUB_MGCG | | |
65872e59 | 711 | AMD_CG_SUPPORT_ATHUB_LS | |
099d66e4 LL |
712 | AMD_CG_SUPPORT_VCN_MGCG | |
713 | AMD_CG_SUPPORT_JPEG_MGCG; | |
c1653ea0 | 714 | adev->pg_flags = AMD_PG_SUPPORT_VCN | |
5ef3b8ac | 715 | AMD_PG_SUPPORT_VCN_DPG | |
099d66e4 | 716 | AMD_PG_SUPPORT_JPEG | |
5ef3b8ac | 717 | AMD_PG_SUPPORT_ATHUB; |
df5e984c TZ |
718 | /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0, |
719 | * as a consequence, the rev_id and external_rev_id are wrong. | |
720 | * workaround it by hardcoding rev_id to 0 (default value). | |
721 | */ | |
722 | if (amdgpu_sriov_vf(adev)) | |
723 | adev->rev_id = 0; | |
74b5e509 XY |
724 | adev->external_rev_id = adev->rev_id + 0xa; |
725 | break; | |
c6b6a421 HZ |
726 | default: |
727 | /* FIXME: not supported yet */ | |
728 | return -EINVAL; | |
729 | } | |
730 | ||
b05b6903 JZ |
731 | if (amdgpu_sriov_vf(adev)) { |
732 | amdgpu_virt_init_setting(adev); | |
733 | xgpu_nv_mailbox_set_irq_funcs(adev); | |
734 | } | |
735 | ||
c6b6a421 HZ |
736 | return 0; |
737 | } | |
738 | ||
739 | static int nv_common_late_init(void *handle) | |
740 | { | |
b05b6903 JZ |
741 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
742 | ||
743 | if (amdgpu_sriov_vf(adev)) | |
744 | xgpu_nv_mailbox_get_irq(adev); | |
745 | ||
c6b6a421 HZ |
746 | return 0; |
747 | } | |
748 | ||
749 | static int nv_common_sw_init(void *handle) | |
750 | { | |
b05b6903 JZ |
751 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
752 | ||
753 | if (amdgpu_sriov_vf(adev)) | |
754 | xgpu_nv_mailbox_add_irq_id(adev); | |
755 | ||
c6b6a421 HZ |
756 | return 0; |
757 | } | |
758 | ||
759 | static int nv_common_sw_fini(void *handle) | |
760 | { | |
761 | return 0; | |
762 | } | |
763 | ||
764 | static int nv_common_hw_init(void *handle) | |
765 | { | |
766 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
767 | ||
768 | /* enable pcie gen2/3 link */ | |
769 | nv_pcie_gen3_enable(adev); | |
770 | /* enable aspm */ | |
771 | nv_program_aspm(adev); | |
772 | /* setup nbio registers */ | |
bebc0762 | 773 | adev->nbio.funcs->init_registers(adev); |
923c087a YZ |
774 | /* remap HDP registers to a hole in mmio space, |
775 | * for the purpose of expose those registers | |
776 | * to process space | |
777 | */ | |
778 | if (adev->nbio.funcs->remap_hdp_registers) | |
779 | adev->nbio.funcs->remap_hdp_registers(adev); | |
c6b6a421 HZ |
780 | /* enable the doorbell aperture */ |
781 | nv_enable_doorbell_aperture(adev, true); | |
782 | ||
783 | return 0; | |
784 | } | |
785 | ||
786 | static int nv_common_hw_fini(void *handle) | |
787 | { | |
788 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
789 | ||
790 | /* disable the doorbell aperture */ | |
791 | nv_enable_doorbell_aperture(adev, false); | |
792 | ||
793 | return 0; | |
794 | } | |
795 | ||
796 | static int nv_common_suspend(void *handle) | |
797 | { | |
798 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
799 | ||
800 | return nv_common_hw_fini(adev); | |
801 | } | |
802 | ||
803 | static int nv_common_resume(void *handle) | |
804 | { | |
805 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
806 | ||
807 | return nv_common_hw_init(adev); | |
808 | } | |
809 | ||
810 | static bool nv_common_is_idle(void *handle) | |
811 | { | |
812 | return true; | |
813 | } | |
814 | ||
815 | static int nv_common_wait_for_idle(void *handle) | |
816 | { | |
817 | return 0; | |
818 | } | |
819 | ||
820 | static int nv_common_soft_reset(void *handle) | |
821 | { | |
822 | return 0; | |
823 | } | |
824 | ||
825 | static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev, | |
826 | bool enable) | |
827 | { | |
828 | uint32_t hdp_clk_cntl, hdp_clk_cntl1; | |
829 | uint32_t hdp_mem_pwr_cntl; | |
830 | ||
831 | if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | | |
832 | AMD_CG_SUPPORT_HDP_DS | | |
833 | AMD_CG_SUPPORT_HDP_SD))) | |
834 | return; | |
835 | ||
836 | hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); | |
837 | hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); | |
838 | ||
839 | /* Before doing clock/power mode switch, | |
840 | * forced on IPH & RC clock */ | |
841 | hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, | |
842 | IPH_MEM_CLK_SOFT_OVERRIDE, 1); | |
843 | hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, | |
844 | RC_MEM_CLK_SOFT_OVERRIDE, 1); | |
845 | WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); | |
846 | ||
847 | /* HDP 5.0 doesn't support dynamic power mode switch, | |
848 | * disable clock and power gating before any changing */ | |
849 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, | |
850 | IPH_MEM_POWER_CTRL_EN, 0); | |
851 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, | |
852 | IPH_MEM_POWER_LS_EN, 0); | |
853 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, | |
854 | IPH_MEM_POWER_DS_EN, 0); | |
855 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, | |
856 | IPH_MEM_POWER_SD_EN, 0); | |
857 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, | |
858 | RC_MEM_POWER_CTRL_EN, 0); | |
859 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, | |
860 | RC_MEM_POWER_LS_EN, 0); | |
861 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, | |
862 | RC_MEM_POWER_DS_EN, 0); | |
863 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL, | |
864 | RC_MEM_POWER_SD_EN, 0); | |
865 | WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); | |
866 | ||
867 | /* only one clock gating mode (LS/DS/SD) can be enabled */ | |
868 | if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { | |
869 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, | |
870 | HDP_MEM_POWER_CTRL, | |
871 | IPH_MEM_POWER_LS_EN, enable); | |
872 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, | |
873 | HDP_MEM_POWER_CTRL, | |
874 | RC_MEM_POWER_LS_EN, enable); | |
875 | } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) { | |
876 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, | |
877 | HDP_MEM_POWER_CTRL, | |
878 | IPH_MEM_POWER_DS_EN, enable); | |
879 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, | |
880 | HDP_MEM_POWER_CTRL, | |
881 | RC_MEM_POWER_DS_EN, enable); | |
882 | } else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) { | |
883 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, | |
884 | HDP_MEM_POWER_CTRL, | |
885 | IPH_MEM_POWER_SD_EN, enable); | |
886 | /* RC should not use shut down mode, fallback to ds */ | |
887 | hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, | |
888 | HDP_MEM_POWER_CTRL, | |
889 | RC_MEM_POWER_DS_EN, enable); | |
890 | } | |
891 | ||
892 | WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl); | |
893 | ||
894 | /* restore IPH & RC clock override after clock/power mode changing */ | |
895 | WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1); | |
896 | } | |
897 | ||
898 | static void nv_update_hdp_clock_gating(struct amdgpu_device *adev, | |
899 | bool enable) | |
900 | { | |
901 | uint32_t hdp_clk_cntl; | |
902 | ||
903 | if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) | |
904 | return; | |
905 | ||
906 | hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); | |
907 | ||
908 | if (enable) { | |
909 | hdp_clk_cntl &= | |
910 | ~(uint32_t) | |
911 | (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | | |
912 | HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | | |
913 | HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | | |
914 | HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | | |
915 | HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | | |
916 | HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK); | |
917 | } else { | |
918 | hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | | |
919 | HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | | |
920 | HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | | |
921 | HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | | |
922 | HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | | |
923 | HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK; | |
924 | } | |
925 | ||
926 | WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl); | |
927 | } | |
928 | ||
929 | static int nv_common_set_clockgating_state(void *handle, | |
930 | enum amd_clockgating_state state) | |
931 | { | |
932 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
933 | ||
934 | if (amdgpu_sriov_vf(adev)) | |
935 | return 0; | |
936 | ||
937 | switch (adev->asic_type) { | |
938 | case CHIP_NAVI10: | |
5e71e011 | 939 | case CHIP_NAVI14: |
7e17e58b | 940 | case CHIP_NAVI12: |
bebc0762 | 941 | adev->nbio.funcs->update_medium_grain_clock_gating(adev, |
a9d4fe2f | 942 | state == AMD_CG_STATE_GATE); |
bebc0762 | 943 | adev->nbio.funcs->update_medium_grain_light_sleep(adev, |
a9d4fe2f | 944 | state == AMD_CG_STATE_GATE); |
c6b6a421 | 945 | nv_update_hdp_mem_power_gating(adev, |
a9d4fe2f | 946 | state == AMD_CG_STATE_GATE); |
c6b6a421 | 947 | nv_update_hdp_clock_gating(adev, |
a9d4fe2f | 948 | state == AMD_CG_STATE_GATE); |
c6b6a421 HZ |
949 | break; |
950 | default: | |
951 | break; | |
952 | } | |
953 | return 0; | |
954 | } | |
955 | ||
956 | static int nv_common_set_powergating_state(void *handle, | |
957 | enum amd_powergating_state state) | |
958 | { | |
959 | /* TODO */ | |
960 | return 0; | |
961 | } | |
962 | ||
963 | static void nv_common_get_clockgating_state(void *handle, u32 *flags) | |
964 | { | |
965 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
966 | uint32_t tmp; | |
967 | ||
968 | if (amdgpu_sriov_vf(adev)) | |
969 | *flags = 0; | |
970 | ||
bebc0762 | 971 | adev->nbio.funcs->get_clockgating_state(adev, flags); |
c6b6a421 HZ |
972 | |
973 | /* AMD_CG_SUPPORT_HDP_MGCG */ | |
974 | tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); | |
975 | if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK | | |
976 | HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK | | |
977 | HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK | | |
978 | HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK | | |
979 | HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK | | |
980 | HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK))) | |
981 | *flags |= AMD_CG_SUPPORT_HDP_MGCG; | |
982 | ||
983 | /* AMD_CG_SUPPORT_HDP_LS/DS/SD */ | |
984 | tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL); | |
985 | if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK) | |
986 | *flags |= AMD_CG_SUPPORT_HDP_LS; | |
987 | else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK) | |
988 | *flags |= AMD_CG_SUPPORT_HDP_DS; | |
989 | else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK) | |
990 | *flags |= AMD_CG_SUPPORT_HDP_SD; | |
991 | ||
992 | return; | |
993 | } | |
994 | ||
995 | static const struct amd_ip_funcs nv_common_ip_funcs = { | |
996 | .name = "nv_common", | |
997 | .early_init = nv_common_early_init, | |
998 | .late_init = nv_common_late_init, | |
999 | .sw_init = nv_common_sw_init, | |
1000 | .sw_fini = nv_common_sw_fini, | |
1001 | .hw_init = nv_common_hw_init, | |
1002 | .hw_fini = nv_common_hw_fini, | |
1003 | .suspend = nv_common_suspend, | |
1004 | .resume = nv_common_resume, | |
1005 | .is_idle = nv_common_is_idle, | |
1006 | .wait_for_idle = nv_common_wait_for_idle, | |
1007 | .soft_reset = nv_common_soft_reset, | |
1008 | .set_clockgating_state = nv_common_set_clockgating_state, | |
1009 | .set_powergating_state = nv_common_set_powergating_state, | |
1010 | .get_clockgating_state = nv_common_get_clockgating_state, | |
1011 | }; |