]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drm/amdgpu/gmc9: switch to using amdgpu_gmc_get_vbios_allocations
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / amd / amdgpu / gmc_v10_0.c
CommitLineData
f9df67e9
HZ
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
d7929c1e 24#include <linux/pci.h>
f9df67e9
HZ
25#include "amdgpu.h"
26#include "amdgpu_atomfirmware.h"
27#include "gmc_v10_0.h"
01eee24f 28#include "umc_v8_7.h"
f9df67e9
HZ
29
30#include "hdp/hdp_5_0_0_offset.h"
31#include "hdp/hdp_5_0_0_sh_mask.h"
ea930000
AS
32#include "athub/athub_2_0_0_sh_mask.h"
33#include "athub/athub_2_0_0_offset.h"
f9df67e9
HZ
34#include "dcn/dcn_2_0_0_offset.h"
35#include "dcn/dcn_2_0_0_sh_mask.h"
36#include "oss/osssys_5_0_0_offset.h"
37#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
38#include "navi10_enum.h"
39
40#include "soc15.h"
ea930000 41#include "soc15d.h"
f9df67e9
HZ
42#include "soc15_common.h"
43
44#include "nbio_v2_3.h"
45
46#include "gfxhub_v2_0.h"
0b3df16b 47#include "gfxhub_v2_1.h"
f9df67e9
HZ
48#include "mmhub_v2_0.h"
49#include "athub_v2_0.h"
920a4cd3 50#include "athub_v2_1.h"
f9df67e9
HZ
51
52#if 0
53static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
54{
55 /* TODO add golden setting for hdp */
56};
57#endif
58
01eee24f
JC
59static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
60 struct amdgpu_irq_src *src,
61 unsigned type,
62 enum amdgpu_interrupt_state state)
63{
64 return 0;
65}
66
f9df67e9
HZ
67static int
68gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
69 struct amdgpu_irq_src *src, unsigned type,
70 enum amdgpu_interrupt_state state)
71{
f9df67e9
HZ
72 switch (state) {
73 case AMDGPU_IRQ_STATE_DISABLE:
74 /* MM HUB */
f2c1b5c1 75 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
f9df67e9 76 /* GFX HUB */
f2c1b5c1 77 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
f9df67e9
HZ
78 break;
79 case AMDGPU_IRQ_STATE_ENABLE:
80 /* MM HUB */
f2c1b5c1 81 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
f9df67e9 82 /* GFX HUB */
f2c1b5c1 83 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
f9df67e9
HZ
84 break;
85 default:
86 break;
87 }
88
89 return 0;
90}
91
92static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
93 struct amdgpu_irq_src *source,
94 struct amdgpu_iv_entry *entry)
95{
96 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
97 uint32_t status = 0;
98 u64 addr;
99
100 addr = (u64)entry->src_data[0] << 12;
101 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
102
103 if (!amdgpu_sriov_vf(adev)) {
53499173
XY
104 /*
105 * Issue a dummy read to wait for the status register to
106 * be updated to avoid reading an incorrect value due to
107 * the new fast GRBM interface.
108 */
109 if (entry->vmid_src == AMDGPU_GFXHUB_0)
110 RREG32(hub->vm_l2_pro_fault_status);
111
f9df67e9
HZ
112 status = RREG32(hub->vm_l2_pro_fault_status);
113 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
114 }
115
116 if (printk_ratelimit()) {
5d36d4c9
YZ
117 struct amdgpu_task_info task_info;
118
119 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
120 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
121
f9df67e9 122 dev_err(adev->dev,
5d36d4c9
YZ
123 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
124 "for process %s pid %d thread %s pid %d)\n",
f9df67e9
HZ
125 entry->vmid_src ? "mmhub" : "gfxhub",
126 entry->src_id, entry->ring_id, entry->vmid,
5d36d4c9
YZ
127 entry->pasid, task_info.process_name, task_info.tgid,
128 task_info.task_name, task_info.pid);
129 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
f9df67e9 130 addr, entry->client_id);
2577db91
HR
131 if (!amdgpu_sriov_vf(adev))
132 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
f9df67e9
HZ
133 }
134
135 return 0;
136}
137
138static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
139 .set = gmc_v10_0_vm_fault_interrupt_state,
140 .process = gmc_v10_0_process_interrupt,
141};
142
01eee24f
JC
143static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
144 .set = gmc_v10_0_ecc_interrupt_state,
145 .process = amdgpu_umc_process_ecc_irq,
146};
147
148 static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
f9df67e9
HZ
149{
150 adev->gmc.vm_fault.num_types = 1;
151 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
01eee24f
JC
152
153 if (!amdgpu_sriov_vf(adev)) {
154 adev->gmc.ecc_irq.num_types = 1;
155 adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
156 }
f9df67e9
HZ
157}
158
f271fe18 159/**
160 * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
161 *
162 * @adev: amdgpu_device pointer
163 * @vmhub: vmhub type
164 *
165 */
166static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
167 uint32_t vmhub)
168{
169 return ((vmhub == AMDGPU_MMHUB_0 ||
170 vmhub == AMDGPU_MMHUB_1) &&
171 (!amdgpu_sriov_vf(adev)));
172}
173
ea930000
AS
174static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
175 struct amdgpu_device *adev,
176 uint8_t vmid, uint16_t *p_pasid)
177{
178 uint32_t value;
179
180 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
181 + vmid);
182 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
183
184 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
185}
186
f9df67e9
HZ
187/*
188 * GART
189 * VMID 0 is the physical GPU addresses as used by the kernel.
190 * VMIDs 1-15 are used for userspace clients and are handled
191 * by the amdgpu vm/hsa code.
192 */
193
194static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
195 unsigned int vmhub, uint32_t flush_type)
196{
f271fe18 197 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
f9df67e9 198 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
caa9f483 199 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
37c58ddf 200 u32 tmp;
f9df67e9
HZ
201 /* Use register 17 for GART */
202 const unsigned eng = 17;
203 unsigned int i;
204
f920d1bb 205 spin_lock(&adev->gmc.invalidate_lock);
206 /*
207 * It may lose gpuvm invalidate acknowldege state across power-gating
208 * off cycle, add semaphore acquire before invalidation and semaphore
209 * release after invalidation to avoid entering power gated state
210 * to WA the Issue
211 */
212
213 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
f271fe18 214 if (use_semaphore) {
f920d1bb 215 for (i = 0; i < adev->usec_timeout; i++) {
216 /* a read return value of 1 means semaphore acuqire */
af6c5c4f
HR
217 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
218 hub->eng_distance * eng);
f920d1bb 219 if (tmp & 0x1)
220 break;
221 udelay(1);
222 }
223
224 if (i >= adev->usec_timeout)
225 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
226 }
227
af6c5c4f 228 WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
f9df67e9 229
53499173
XY
230 /*
231 * Issue a dummy read to wait for the ACK register to be cleared
232 * to avoid a false ACK due to the new fast GRBM interface.
233 */
234 if (vmhub == AMDGPU_GFXHUB_0)
af6c5c4f 235 RREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng);
53499173 236
f9df67e9
HZ
237 /* Wait for ACK with a delay.*/
238 for (i = 0; i < adev->usec_timeout; i++) {
af6c5c4f
HR
239 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
240 hub->eng_distance * eng);
f9df67e9
HZ
241 tmp &= 1 << vmid;
242 if (tmp)
243 break;
244
245 udelay(1);
246 }
247
f920d1bb 248 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
f271fe18 249 if (use_semaphore)
f920d1bb 250 /*
251 * add semaphore release after invalidation,
252 * write with 0 means semaphore release
253 */
af6c5c4f
HR
254 WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
255 hub->eng_distance * eng, 0);
f920d1bb 256
257 spin_unlock(&adev->gmc.invalidate_lock);
258
f9df67e9
HZ
259 if (i < adev->usec_timeout)
260 return;
261
262 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
263}
264
265/**
266 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
267 *
268 * @adev: amdgpu_device pointer
269 * @vmid: vm instance to flush
270 *
271 * Flush the TLB for the requested page table.
272 */
3ff98548
OZ
273static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
274 uint32_t vmhub, uint32_t flush_type)
f9df67e9
HZ
275{
276 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
277 struct dma_fence *fence;
278 struct amdgpu_job *job;
279
280 int r;
281
282 /* flush hdp cache */
bebc0762 283 adev->nbio.funcs->hdp_flush(adev, NULL);
f9df67e9 284
8db1015b 285 /* For SRIOV run time, driver shouldn't access the register through MMIO
286 * Directly use kiq to do the vm invalidation instead
287 */
288 if (adev->gfx.kiq.ring.sched.ready &&
289 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
df9c8d1a 290 !amdgpu_in_reset(adev)) {
8db1015b 291
292 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
293 const unsigned eng = 17;
caa9f483 294 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
af6c5c4f
HR
295 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
296 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
8db1015b 297
298 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
299 1 << vmid);
300 return;
301 }
302
f9df67e9
HZ
303 mutex_lock(&adev->mman.gtt_window_lock);
304
3ff98548
OZ
305 if (vmhub == AMDGPU_MMHUB_0) {
306 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
307 mutex_unlock(&adev->mman.gtt_window_lock);
308 return;
309 }
310
311 BUG_ON(vmhub != AMDGPU_GFXHUB_0);
312
767acabd
KW
313 if (!adev->mman.buffer_funcs_enabled ||
314 !adev->ib_pool_ready ||
df9c8d1a 315 amdgpu_in_reset(adev) ||
e2195f7d 316 ring->sched.ready == false) {
a2d15ed7 317 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
f9df67e9
HZ
318 mutex_unlock(&adev->mman.gtt_window_lock);
319 return;
320 }
321
322 /* The SDMA on Navi has a bug which can theoretically result in memory
323 * corruption if an invalidation happens at the same time as an VA
324 * translation. Avoid this by doing the invalidation from the SDMA
325 * itself.
326 */
9ecefb19
CK
327 r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
328 &job);
f9df67e9
HZ
329 if (r)
330 goto error_alloc;
331
332 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
333 job->vm_needs_flush = true;
3f378758 334 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
f9df67e9
HZ
335 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
336 r = amdgpu_job_submit(job, &adev->mman.entity,
337 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
338 if (r)
339 goto error_submit;
340
341 mutex_unlock(&adev->mman.gtt_window_lock);
342
343 dma_fence_wait(fence, false);
344 dma_fence_put(fence);
345
346 return;
347
348error_submit:
349 amdgpu_job_free(job);
350
351error_alloc:
352 mutex_unlock(&adev->mman.gtt_window_lock);
353 DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
354}
355
ea930000
AS
356/**
357 * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
358 *
359 * @adev: amdgpu_device pointer
360 * @pasid: pasid to be flush
361 *
362 * Flush the TLB for the requested pasid.
363 */
364static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
365 uint16_t pasid, uint32_t flush_type,
366 bool all_hub)
367{
368 int vmid, i;
369 signed long r;
370 uint32_t seq;
371 uint16_t queried_pasid;
372 bool ret;
373 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
374 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
375
376 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
377 spin_lock(&adev->gfx.kiq.ring_lock);
36a1707a
AS
378 /* 2 dwords flush + 8 dwords fence */
379 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
ea930000
AS
380 kiq->pmf->kiq_invalidate_tlbs(ring,
381 pasid, flush_type, all_hub);
04e4e2e9
YT
382 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
383 if (r) {
384 amdgpu_ring_undo(ring);
abb17b1e 385 spin_unlock(&adev->gfx.kiq.ring_lock);
04e4e2e9
YT
386 return -ETIME;
387 }
388
ea930000
AS
389 amdgpu_ring_commit(ring);
390 spin_unlock(&adev->gfx.kiq.ring_lock);
391 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
392 if (r < 1) {
393 DRM_ERROR("wait for kiq fence error: %ld.\n", r);
394 return -ETIME;
395 }
396
397 return 0;
398 }
399
400 for (vmid = 1; vmid < 16; vmid++) {
401
402 ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
403 &queried_pasid);
404 if (ret && queried_pasid == pasid) {
405 if (all_hub) {
406 for (i = 0; i < adev->num_vmhubs; i++)
407 gmc_v10_0_flush_gpu_tlb(adev, vmid,
fa34edbe 408 i, flush_type);
ea930000
AS
409 } else {
410 gmc_v10_0_flush_gpu_tlb(adev, vmid,
fa34edbe 411 AMDGPU_GFXHUB_0, flush_type);
ea930000
AS
412 }
413 break;
414 }
415 }
416
417 return 0;
418}
419
f9df67e9
HZ
420static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
421 unsigned vmid, uint64_t pd_addr)
422{
f271fe18 423 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
f9df67e9 424 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
caa9f483 425 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
f9df67e9
HZ
426 unsigned eng = ring->vm_inv_eng;
427
f920d1bb 428 /*
429 * It may lose gpuvm invalidate acknowldege state across power-gating
430 * off cycle, add semaphore acquire before invalidation and semaphore
431 * release after invalidation to avoid entering power gated state
432 * to WA the Issue
433 */
434
435 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
f271fe18 436 if (use_semaphore)
f920d1bb 437 /* a read return value of 1 means semaphore acuqire */
438 amdgpu_ring_emit_reg_wait(ring,
af6c5c4f
HR
439 hub->vm_inv_eng0_sem +
440 hub->eng_distance * eng, 0x1, 0x1);
f920d1bb 441
af6c5c4f
HR
442 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
443 (hub->ctx_addr_distance * vmid),
f9df67e9
HZ
444 lower_32_bits(pd_addr));
445
af6c5c4f
HR
446 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
447 (hub->ctx_addr_distance * vmid),
f9df67e9
HZ
448 upper_32_bits(pd_addr));
449
af6c5c4f
HR
450 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
451 hub->eng_distance * eng,
452 hub->vm_inv_eng0_ack +
453 hub->eng_distance * eng,
589b64a7 454 req, 1 << vmid);
f9df67e9 455
f920d1bb 456 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
f271fe18 457 if (use_semaphore)
f920d1bb 458 /*
459 * add semaphore release after invalidation,
460 * write with 0 means semaphore release
461 */
af6c5c4f
HR
462 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
463 hub->eng_distance * eng, 0);
f920d1bb 464
f9df67e9
HZ
465 return pd_addr;
466}
467
468static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
469 unsigned pasid)
470{
471 struct amdgpu_device *adev = ring->adev;
472 uint32_t reg;
473
a2d15ed7 474 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
f9df67e9
HZ
475 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
476 else
477 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
478
479 amdgpu_ring_emit_wreg(ring, reg, pasid);
480}
481
482/*
483 * PTE format on NAVI 10:
484 * 63:59 reserved
485 * 58:57 reserved
486 * 56 F
487 * 55 L
488 * 54 reserved
489 * 53:52 SW
490 * 51 T
491 * 50:48 mtype
492 * 47:12 4k physical page base address
493 * 11:7 fragment
494 * 6 write
495 * 5 read
496 * 4 exe
497 * 3 Z
498 * 2 snooped
499 * 1 system
500 * 0 valid
501 *
502 * PDE format on NAVI 10:
503 * 63:59 block fragment size
504 * 58:55 reserved
505 * 54 P
506 * 53:48 reserved
507 * 47:6 physical base address of PD or PTE
508 * 5:3 reserved
509 * 2 C
510 * 1 system
511 * 0 valid
512 */
f9df67e9 513
71776b6d
CK
514static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
515{
516 switch (flags) {
f9df67e9 517 case AMDGPU_VM_MTYPE_DEFAULT:
71776b6d 518 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
f9df67e9 519 case AMDGPU_VM_MTYPE_NC:
71776b6d 520 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
f9df67e9 521 case AMDGPU_VM_MTYPE_WC:
71776b6d 522 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
f9df67e9 523 case AMDGPU_VM_MTYPE_CC:
71776b6d 524 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
f9df67e9 525 case AMDGPU_VM_MTYPE_UC:
71776b6d 526 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
f9df67e9 527 default:
71776b6d 528 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
f9df67e9 529 }
f9df67e9
HZ
530}
531
532static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
533 uint64_t *addr, uint64_t *flags)
534{
535 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
536 *addr = adev->vm_manager.vram_base_offset + *addr -
537 adev->gmc.vram_start;
538 BUG_ON(*addr & 0xFFFF00000000003FULL);
539
540 if (!adev->gmc.translate_further)
541 return;
542
543 if (level == AMDGPU_VM_PDB1) {
544 /* Set the block fragment size */
545 if (!(*flags & AMDGPU_PDE_PTE))
546 *flags |= AMDGPU_PDE_BFS(0x9);
547
548 } else if (level == AMDGPU_VM_PDB0) {
549 if (*flags & AMDGPU_PDE_PTE)
550 *flags &= ~AMDGPU_PDE_PTE;
551 else
552 *flags |= AMDGPU_PTE_TF;
553 }
554}
555
cbfae36c
CK
556static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
557 struct amdgpu_bo_va_mapping *mapping,
558 uint64_t *flags)
559{
560 *flags &= ~AMDGPU_PTE_EXECUTABLE;
561 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
562
563 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
564 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
565
566 if (mapping->flags & AMDGPU_PTE_PRT) {
567 *flags |= AMDGPU_PTE_PRT;
568 *flags |= AMDGPU_PTE_SNOOPED;
569 *flags |= AMDGPU_PTE_LOG;
570 *flags |= AMDGPU_PTE_SYSTEM;
571 *flags &= ~AMDGPU_PTE_VALID;
572 }
573}
574
f9df67e9
HZ
575static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
576 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
ea930000 577 .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
f9df67e9
HZ
578 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
579 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
71776b6d 580 .map_mtype = gmc_v10_0_map_mtype,
cbfae36c
CK
581 .get_vm_pde = gmc_v10_0_get_vm_pde,
582 .get_vm_pte = gmc_v10_0_get_vm_pte
f9df67e9
HZ
583};
584
585static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
586{
587 if (adev->gmc.gmc_funcs == NULL)
588 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
589}
590
01eee24f
JC
591static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
592{
593 switch (adev->asic_type) {
594 case CHIP_SIENNA_CICHLID:
595 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
596 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
597 adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
598 adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
599 adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
600 adev->umc.funcs = &umc_v8_7_funcs;
601 break;
602 default:
603 break;
604 }
605}
606
f9df67e9
HZ
607static int gmc_v10_0_early_init(void *handle)
608{
609 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
610
611 gmc_v10_0_set_gmc_funcs(adev);
612 gmc_v10_0_set_irq_funcs(adev);
01eee24f 613 gmc_v10_0_set_umc_funcs(adev);
f9df67e9
HZ
614
615 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
616 adev->gmc.shared_aperture_end =
617 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
618 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
619 adev->gmc.private_aperture_end =
620 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
621
622 return 0;
623}
624
625static int gmc_v10_0_late_init(void *handle)
626{
627 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5677c520 628 int r;
f9df67e9 629
7db1d560 630 amdgpu_bo_late_init(adev);
1499bcc7 631
5677c520
AD
632 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
633 if (r)
634 return r;
f9df67e9 635
0ad7a64d
JC
636 r = amdgpu_gmc_ras_late_init(adev);
637 if (r)
638 return r;
639
f9df67e9
HZ
640 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
641}
642
643static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
644 struct amdgpu_gmc *mc)
645{
646 u64 base = 0;
647
fc8f07da
JC
648 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
649 adev->asic_type == CHIP_NAVY_FLOUNDER)
0b3df16b
LG
650 base = gfxhub_v2_1_get_fb_location(adev);
651 else
652 base = gfxhub_v2_0_get_fb_location(adev);
f9df67e9 653
fdb8483b
JC
654 /* add the xgmi offset of the physical node */
655 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
656
f9df67e9
HZ
657 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
658 amdgpu_gmc_gart_location(adev, mc);
659
660 /* base offset of vram pages */
fc8f07da
JC
661 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
662 adev->asic_type == CHIP_NAVY_FLOUNDER)
0b3df16b
LG
663 adev->vm_manager.vram_base_offset = gfxhub_v2_1_get_mc_fb_offset(adev);
664 else
665 adev->vm_manager.vram_base_offset = gfxhub_v2_0_get_mc_fb_offset(adev);
fdb8483b
JC
666
667 /* add the xgmi offset of the physical node */
668 adev->vm_manager.vram_base_offset +=
669 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
f9df67e9
HZ
670}
671
672/**
673 * gmc_v10_0_mc_init - initialize the memory controller driver params
674 *
675 * @adev: amdgpu_device pointer
676 *
677 * Look up the amount of vram, vram width, and decide how to place
678 * vram and gart within the GPU's physical address space.
679 * Returns 0 for success.
680 */
681static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
682{
78b7dfd9 683 int r;
f9df67e9
HZ
684
685 /* size in MB on si */
686 adev->gmc.mc_vram_size =
bebc0762 687 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
f9df67e9 688 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
78b7dfd9
AS
689
690 if (!(adev->flags & AMD_IS_APU)) {
691 r = amdgpu_device_resize_fb_bar(adev);
692 if (r)
693 return r;
694 }
695 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
696 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
f9df67e9
HZ
697
698 /* In case the PCI BAR is larger than the actual amount of vram */
78b7dfd9 699 adev->gmc.visible_vram_size = adev->gmc.aper_size;
f9df67e9
HZ
700 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
701 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
702
703 /* set the gart size */
704 if (amdgpu_gart_size == -1) {
705 switch (adev->asic_type) {
706 case CHIP_NAVI10:
05d72b8d 707 case CHIP_NAVI14:
4a0e815f 708 case CHIP_NAVI12:
57d70602 709 case CHIP_SIENNA_CICHLID:
0287ac57 710 case CHIP_NAVY_FLOUNDER:
f9df67e9
HZ
711 default:
712 adev->gmc.gart_size = 512ULL << 20;
713 break;
714 }
715 } else
716 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
717
718 gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
719
720 return 0;
721}
722
723static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
724{
725 int r;
726
727 if (adev->gart.bo) {
728 WARN(1, "NAVI10 PCIE GART already initialized\n");
729 return 0;
730 }
731
732 /* Initialize common gart structure */
733 r = amdgpu_gart_init(adev);
734 if (r)
735 return r;
736
737 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
738 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
739 AMDGPU_PTE_EXECUTABLE;
740
741 return amdgpu_gart_table_vram_alloc(adev);
742}
743
744static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
745{
746 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
747 unsigned size;
748
749 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
81b54fb7 750 size = AMDGPU_VBIOS_VGA_ALLOCATION;
f9df67e9
HZ
751 } else {
752 u32 viewport;
753 u32 pitch;
754
755 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
756 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
757 size = (REG_GET_FIELD(viewport,
758 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
759 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
760 4);
761 }
762 /* return 0 if the pre-OS buffer uses up most of vram */
763 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) {
764 DRM_ERROR("Warning: pre-OS buffer uses most of vram, \
765 be aware of gart table overwrite\n");
766 return 0;
767 }
768
769 return size;
770}
771
772
773
774static int gmc_v10_0_sw_init(void *handle)
775{
ad02e08e 776 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
f9df67e9
HZ
777 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
778
fc8f07da
JC
779 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
780 adev->asic_type == CHIP_NAVY_FLOUNDER)
0b3df16b
LG
781 gfxhub_v2_1_init(adev);
782 else
783 gfxhub_v2_0_init(adev);
784
f9df67e9
HZ
785 mmhub_v2_0_init(adev);
786
787 spin_lock_init(&adev->gmc.invalidate_lock);
788
0b3df16b
LG
789 if (adev->asic_type == CHIP_SIENNA_CICHLID && amdgpu_emu_mode == 1) {
790 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
631cdbd2 791 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
0b3df16b
LG
792 } else {
793 r = amdgpu_atomfirmware_get_vram_info(adev,
794 &vram_width, &vram_type, &vram_vendor);
795 adev->gmc.vram_width = vram_width;
796
797 adev->gmc.vram_type = vram_type;
798 adev->gmc.vram_vendor = vram_vendor;
799 }
631cdbd2 800
f9df67e9
HZ
801 switch (adev->asic_type) {
802 case CHIP_NAVI10:
05d72b8d 803 case CHIP_NAVI14:
4a0e815f 804 case CHIP_NAVI12:
57d70602 805 case CHIP_SIENNA_CICHLID:
0287ac57 806 case CHIP_NAVY_FLOUNDER:
1daa2bfa 807 adev->num_vmhubs = 2;
f9df67e9
HZ
808 /*
809 * To fulfill 4-level page support,
4a0e815f 810 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
f9df67e9
HZ
811 * block size 512 (9bit)
812 */
813 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
814 break;
815 default:
816 break;
817 }
818
819 /* This interrupt is VMC page fault.*/
820 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
821 VMC_1_0__SRCID__VM_FAULT,
822 &adev->gmc.vm_fault);
5021e9a8
ND
823
824 if (r)
825 return r;
826
f9df67e9
HZ
827 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
828 UTCL2_1_0__SRCID__FAULT,
829 &adev->gmc.vm_fault);
830 if (r)
831 return r;
832
01eee24f
JC
833 if (!amdgpu_sriov_vf(adev)) {
834 /* interrupt sent to DF. */
835 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
836 &adev->gmc.ecc_irq);
837 if (r)
838 return r;
839 }
840
f9df67e9
HZ
841 /*
842 * Set the internal MC address mask This is the max address of the GPU's
843 * internal address space.
844 */
845 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
846
244511f3 847 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
f9df67e9 848 if (r) {
f9df67e9 849 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
244511f3 850 return r;
f9df67e9
HZ
851 }
852
fdb8483b
JC
853 if (adev->gmc.xgmi.supported) {
854 r = gfxhub_v2_1_get_xgmi_info(adev);
855 if (r)
856 return r;
857 }
858
f9df67e9
HZ
859 r = gmc_v10_0_mc_init(adev);
860 if (r)
861 return r;
862
fcbc92e2 863 adev->gmc.stolen_vga_size = gmc_v10_0_get_vbios_fb_size(adev);
f9df67e9
HZ
864
865 /* Memory manager */
866 r = amdgpu_bo_init(adev);
867 if (r)
868 return r;
869
870 r = gmc_v10_0_gart_init(adev);
871 if (r)
872 return r;
873
874 /*
875 * number of VMs
876 * VMID 0 is reserved for System
877 * amdgpu graphics/compute will use VMIDs 1-7
878 * amdkfd will use VMIDs 8-15
879 */
40111ec2 880 adev->vm_manager.first_kfd_vmid = 8;
f9df67e9
HZ
881
882 amdgpu_vm_manager_init(adev);
883
884 return 0;
885}
886
887/**
888 * gmc_v8_0_gart_fini - vm fini callback
889 *
890 * @adev: amdgpu_device pointer
891 *
892 * Tears down the driver GART/VM setup (CIK).
893 */
894static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
895{
896 amdgpu_gart_table_vram_free(adev);
897 amdgpu_gart_fini(adev);
898}
899
900static int gmc_v10_0_sw_fini(void *handle)
901{
902 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
903
904 amdgpu_vm_manager_fini(adev);
905 gmc_v10_0_gart_fini(adev);
906 amdgpu_gem_force_release(adev);
907 amdgpu_bo_fini(adev);
908
909 return 0;
910}
911
912static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
913{
914 switch (adev->asic_type) {
915 case CHIP_NAVI10:
05d72b8d 916 case CHIP_NAVI14:
4a0e815f 917 case CHIP_NAVI12:
57d70602 918 case CHIP_SIENNA_CICHLID:
0287ac57 919 case CHIP_NAVY_FLOUNDER:
f9df67e9
HZ
920 break;
921 default:
922 break;
923 }
924}
925
926/**
927 * gmc_v10_0_gart_enable - gart enable
928 *
929 * @adev: amdgpu_device pointer
930 */
931static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
932{
933 int r;
934 bool value;
935 u32 tmp;
936
937 if (adev->gart.bo == NULL) {
938 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
939 return -EINVAL;
940 }
941
942 r = amdgpu_gart_table_vram_pin(adev);
943 if (r)
944 return r;
945
fc8f07da
JC
946 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
947 adev->asic_type == CHIP_NAVY_FLOUNDER)
0b3df16b
LG
948 r = gfxhub_v2_1_gart_enable(adev);
949 else
950 r = gfxhub_v2_0_gart_enable(adev);
f9df67e9
HZ
951 if (r)
952 return r;
953
954 r = mmhub_v2_0_gart_enable(adev);
955 if (r)
956 return r;
957
958 tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
959 tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
960 WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
961
962 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
963 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
964
965 /* Flush HDP after it is initialized */
bebc0762 966 adev->nbio.funcs->hdp_flush(adev, NULL);
f9df67e9
HZ
967
968 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
969 false : true;
970
fc8f07da
JC
971 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
972 adev->asic_type == CHIP_NAVY_FLOUNDER)
0b3df16b
LG
973 gfxhub_v2_1_set_fault_enable_default(adev, value);
974 else
975 gfxhub_v2_0_set_fault_enable_default(adev, value);
f9df67e9 976 mmhub_v2_0_set_fault_enable_default(adev, value);
3ff98548
OZ
977 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
978 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
f9df67e9
HZ
979
980 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
981 (unsigned)(adev->gmc.gart_size >> 20),
982 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
983
984 adev->gart.ready = true;
985
986 return 0;
987}
988
989static int gmc_v10_0_hw_init(void *handle)
990{
991 int r;
992 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
993
994 /* The sequence of these two function calls matters.*/
995 gmc_v10_0_init_golden_registers(adev);
996
997 r = gmc_v10_0_gart_enable(adev);
998 if (r)
999 return r;
1000
01eee24f
JC
1001 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1002 adev->umc.funcs->init_registers(adev);
1003
f9df67e9
HZ
1004 return 0;
1005}
1006
1007/**
1008 * gmc_v10_0_gart_disable - gart disable
1009 *
1010 * @adev: amdgpu_device pointer
1011 *
1012 * This disables all VM page table.
1013 */
1014static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1015{
fc8f07da
JC
1016 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
1017 adev->asic_type == CHIP_NAVY_FLOUNDER)
0b3df16b
LG
1018 gfxhub_v2_1_gart_disable(adev);
1019 else
1020 gfxhub_v2_0_gart_disable(adev);
f9df67e9
HZ
1021 mmhub_v2_0_gart_disable(adev);
1022 amdgpu_gart_table_vram_unpin(adev);
1023}
1024
1025static int gmc_v10_0_hw_fini(void *handle)
1026{
1027 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1028
1029 if (amdgpu_sriov_vf(adev)) {
1030 /* full access mode, so don't touch any GMC register */
1031 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1032 return 0;
1033 }
1034
01eee24f 1035 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
f9df67e9
HZ
1036 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1037 gmc_v10_0_gart_disable(adev);
1038
1039 return 0;
1040}
1041
1042static int gmc_v10_0_suspend(void *handle)
1043{
1044 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1045
1046 gmc_v10_0_hw_fini(adev);
1047
1048 return 0;
1049}
1050
1051static int gmc_v10_0_resume(void *handle)
1052{
1053 int r;
1054 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1055
1056 r = gmc_v10_0_hw_init(adev);
1057 if (r)
1058 return r;
1059
1060 amdgpu_vmid_reset_all(adev);
1061
1062 return 0;
1063}
1064
1065static bool gmc_v10_0_is_idle(void *handle)
1066{
1067 /* MC is always ready in GMC v10.*/
1068 return true;
1069}
1070
1071static int gmc_v10_0_wait_for_idle(void *handle)
1072{
1073 /* There is no need to wait for MC idle in GMC v10.*/
1074 return 0;
1075}
1076
1077static int gmc_v10_0_soft_reset(void *handle)
1078{
1079 return 0;
1080}
1081
1082static int gmc_v10_0_set_clockgating_state(void *handle,
1083 enum amd_clockgating_state state)
1084{
1085 int r;
1086 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1087
1088 r = mmhub_v2_0_set_clockgating(adev, state);
1089 if (r)
1090 return r;
1091
92278375
JC
1092 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
1093 adev->asic_type == CHIP_NAVY_FLOUNDER)
920a4cd3
LG
1094 return athub_v2_1_set_clockgating(adev, state);
1095 else
1096 return athub_v2_0_set_clockgating(adev, state);
f9df67e9
HZ
1097}
1098
1099static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
1100{
1101 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1102
1103 mmhub_v2_0_get_clockgating(adev, flags);
1104
92278375
JC
1105 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
1106 adev->asic_type == CHIP_NAVY_FLOUNDER)
920a4cd3
LG
1107 athub_v2_1_get_clockgating(adev, flags);
1108 else
1109 athub_v2_0_get_clockgating(adev, flags);
f9df67e9
HZ
1110}
1111
1112static int gmc_v10_0_set_powergating_state(void *handle,
1113 enum amd_powergating_state state)
1114{
1115 return 0;
1116}
1117
1118const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1119 .name = "gmc_v10_0",
1120 .early_init = gmc_v10_0_early_init,
1121 .late_init = gmc_v10_0_late_init,
1122 .sw_init = gmc_v10_0_sw_init,
1123 .sw_fini = gmc_v10_0_sw_fini,
1124 .hw_init = gmc_v10_0_hw_init,
1125 .hw_fini = gmc_v10_0_hw_fini,
1126 .suspend = gmc_v10_0_suspend,
1127 .resume = gmc_v10_0_resume,
1128 .is_idle = gmc_v10_0_is_idle,
1129 .wait_for_idle = gmc_v10_0_wait_for_idle,
1130 .soft_reset = gmc_v10_0_soft_reset,
1131 .set_clockgating_state = gmc_v10_0_set_clockgating_state,
1132 .set_powergating_state = gmc_v10_0_set_powergating_state,
1133 .get_clockgating_state = gmc_v10_0_get_clockgating_state,
1134};
1135
1136const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1137{
1138 .type = AMD_IP_BLOCK_TYPE_GMC,
1139 .major = 10,
1140 .minor = 0,
1141 .rev = 0,
1142 .funcs = &gmc_v10_0_ip_funcs,
1143};