]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drm/amdgpu/soc15: bypass PSP for VF
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / amd / amdgpu / gmc_v9_0.c
CommitLineData
e60f8db5
AX
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include "amdgpu.h"
25#include "gmc_v9_0.h"
26
27#include "vega10/soc15ip.h"
28#include "vega10/HDP/hdp_4_0_offset.h"
29#include "vega10/HDP/hdp_4_0_sh_mask.h"
30#include "vega10/GC/gc_9_0_sh_mask.h"
31#include "vega10/vega10_enum.h"
32
33#include "soc15_common.h"
34
35#include "nbio_v6_1.h"
36#include "gfxhub_v1_0.h"
37#include "mmhub_v1_0.h"
38
39#define mmDF_CS_AON0_DramBaseAddress0 0x0044
40#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
41//DF_CS_AON0_DramBaseAddress0
42#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
43#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
44#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
45#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
46#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
47#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
48#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
49#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
50#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
51#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
52
53/* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
54#define AMDGPU_NUM_OF_VMIDS 8
55
56static const u32 golden_settings_vega10_hdp[] =
57{
58 0xf64, 0x0fffffff, 0x00000000,
59 0xf65, 0x0fffffff, 0x00000000,
60 0xf66, 0x0fffffff, 0x00000000,
61 0xf67, 0x0fffffff, 0x00000000,
62 0xf68, 0x0fffffff, 0x00000000,
63 0xf6a, 0x0fffffff, 0x00000000,
64 0xf6b, 0x0fffffff, 0x00000000,
65 0xf6c, 0x0fffffff, 0x00000000,
66 0xf6d, 0x0fffffff, 0x00000000,
67 0xf6e, 0x0fffffff, 0x00000000,
68};
69
70static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
71 struct amdgpu_irq_src *src,
72 unsigned type,
73 enum amdgpu_interrupt_state state)
74{
75 struct amdgpu_vmhub *hub;
76 u32 tmp, reg, bits, i;
77
78 switch (state) {
79 case AMDGPU_IRQ_STATE_DISABLE:
80 /* MM HUB */
81 hub = &adev->vmhub[AMDGPU_MMHUB];
82 bits = hub->get_vm_protection_bits();
83 for (i = 0; i< 16; i++) {
84 reg = hub->vm_context0_cntl + i;
85 tmp = RREG32(reg);
86 tmp &= ~bits;
87 WREG32(reg, tmp);
88 }
89
90 /* GFX HUB */
91 hub = &adev->vmhub[AMDGPU_GFXHUB];
92 bits = hub->get_vm_protection_bits();
93 for (i = 0; i < 16; i++) {
94 reg = hub->vm_context0_cntl + i;
95 tmp = RREG32(reg);
96 tmp &= ~bits;
97 WREG32(reg, tmp);
98 }
99 break;
100 case AMDGPU_IRQ_STATE_ENABLE:
101 /* MM HUB */
102 hub = &adev->vmhub[AMDGPU_MMHUB];
103 bits = hub->get_vm_protection_bits();
104 for (i = 0; i< 16; i++) {
105 reg = hub->vm_context0_cntl + i;
106 tmp = RREG32(reg);
107 tmp |= bits;
108 WREG32(reg, tmp);
109 }
110
111 /* GFX HUB */
112 hub = &adev->vmhub[AMDGPU_GFXHUB];
113 bits = hub->get_vm_protection_bits();
114 for (i = 0; i < 16; i++) {
115 reg = hub->vm_context0_cntl + i;
116 tmp = RREG32(reg);
117 tmp |= bits;
118 WREG32(reg, tmp);
119 }
120 break;
121 default:
122 break;
123 }
124
125 return 0;
126}
127
128static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
129 struct amdgpu_irq_src *source,
130 struct amdgpu_iv_entry *entry)
131{
132 struct amdgpu_vmhub *gfxhub = &adev->vmhub[AMDGPU_GFXHUB];
133 struct amdgpu_vmhub *mmhub = &adev->vmhub[AMDGPU_MMHUB];
134 uint32_t status;
135 u64 addr;
136
137 addr = (u64)entry->src_data[0] << 12;
138 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
139
140 if (entry->vm_id_src) {
141 status = RREG32(mmhub->vm_l2_pro_fault_status);
142 WREG32_P(mmhub->vm_l2_pro_fault_cntl, 1, ~1);
143 } else {
144 status = RREG32(gfxhub->vm_l2_pro_fault_status);
145 WREG32_P(gfxhub->vm_l2_pro_fault_cntl, 1, ~1);
146 }
147
148 DRM_ERROR("[%s]VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u) "
149 "at page 0x%016llx from %d\n"
150 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
151 entry->vm_id_src ? "mmhub" : "gfxhub",
152 entry->src_id, entry->ring_id, entry->vm_id, entry->pas_id,
153 addr, entry->client_id, status);
154
155 return 0;
156}
157
158static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
159 .set = gmc_v9_0_vm_fault_interrupt_state,
160 .process = gmc_v9_0_process_interrupt,
161};
162
163static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
164{
165 adev->mc.vm_fault.num_types = 1;
166 adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
167}
168
169/*
170 * GART
171 * VMID 0 is the physical GPU addresses as used by the kernel.
172 * VMIDs 1-15 are used for userspace clients and are handled
173 * by the amdgpu vm/hsa code.
174 */
175
176/**
177 * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback
178 *
179 * @adev: amdgpu_device pointer
180 * @vmid: vm instance to flush
181 *
182 * Flush the TLB for the requested page table.
183 */
184static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
185 uint32_t vmid)
186{
187 /* Use register 17 for GART */
188 const unsigned eng = 17;
189 unsigned i, j;
190
191 /* flush hdp cache */
192 nbio_v6_1_hdp_flush(adev);
193
194 spin_lock(&adev->mc.invalidate_lock);
195
196 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
197 struct amdgpu_vmhub *hub = &adev->vmhub[i];
198 u32 tmp = hub->get_invalidate_req(vmid);
199
200 WREG32(hub->vm_inv_eng0_req + eng, tmp);
201
202 /* Busy wait for ACK.*/
203 for (j = 0; j < 100; j++) {
204 tmp = RREG32(hub->vm_inv_eng0_ack + eng);
205 tmp &= 1 << vmid;
206 if (tmp)
207 break;
208 cpu_relax();
209 }
210 if (j < 100)
211 continue;
212
213 /* Wait for ACK with a delay.*/
214 for (j = 0; j < adev->usec_timeout; j++) {
215 tmp = RREG32(hub->vm_inv_eng0_ack + eng);
216 tmp &= 1 << vmid;
217 if (tmp)
218 break;
219 udelay(1);
220 }
221 if (j < adev->usec_timeout)
222 continue;
223
224 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
225 }
226
227 spin_unlock(&adev->mc.invalidate_lock);
228}
229
230/**
231 * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO
232 *
233 * @adev: amdgpu_device pointer
234 * @cpu_pt_addr: cpu address of the page table
235 * @gpu_page_idx: entry in the page table to update
236 * @addr: dst addr to write into pte/pde
237 * @flags: access flags
238 *
239 * Update the page tables using the CPU.
240 */
241static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev,
242 void *cpu_pt_addr,
243 uint32_t gpu_page_idx,
244 uint64_t addr,
245 uint64_t flags)
246{
247 void __iomem *ptr = (void *)cpu_pt_addr;
248 uint64_t value;
249
250 /*
251 * PTE format on VEGA 10:
252 * 63:59 reserved
253 * 58:57 mtype
254 * 56 F
255 * 55 L
256 * 54 P
257 * 53 SW
258 * 52 T
259 * 50:48 reserved
260 * 47:12 4k physical page base address
261 * 11:7 fragment
262 * 6 write
263 * 5 read
264 * 4 exe
265 * 3 Z
266 * 2 snooped
267 * 1 system
268 * 0 valid
269 *
270 * PDE format on VEGA 10:
271 * 63:59 block fragment size
272 * 58:55 reserved
273 * 54 P
274 * 53:48 reserved
275 * 47:6 physical base address of PD or PTE
276 * 5:3 reserved
277 * 2 C
278 * 1 system
279 * 0 valid
280 */
281
282 /*
283 * The following is for PTE only. GART does not have PDEs.
284 */
285 value = addr & 0x0000FFFFFFFFF000ULL;
286 value |= flags;
287 writeq(value, ptr + (gpu_page_idx * 8));
288 return 0;
289}
290
291static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
292 uint32_t flags)
293
294{
295 uint64_t pte_flag = 0;
296
297 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
298 pte_flag |= AMDGPU_PTE_EXECUTABLE;
299 if (flags & AMDGPU_VM_PAGE_READABLE)
300 pte_flag |= AMDGPU_PTE_READABLE;
301 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
302 pte_flag |= AMDGPU_PTE_WRITEABLE;
303
304 switch (flags & AMDGPU_VM_MTYPE_MASK) {
305 case AMDGPU_VM_MTYPE_DEFAULT:
306 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
307 break;
308 case AMDGPU_VM_MTYPE_NC:
309 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
310 break;
311 case AMDGPU_VM_MTYPE_WC:
312 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
313 break;
314 case AMDGPU_VM_MTYPE_CC:
315 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
316 break;
317 case AMDGPU_VM_MTYPE_UC:
318 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
319 break;
320 default:
321 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
322 break;
323 }
324
325 if (flags & AMDGPU_VM_PAGE_PRT)
326 pte_flag |= AMDGPU_PTE_PRT;
327
328 return pte_flag;
329}
330
331static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
332 .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
333 .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
334 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags
335};
336
337static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
338{
339 if (adev->gart.gart_funcs == NULL)
340 adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
341}
342
343static u64 gmc_v9_0_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
344{
345 return adev->vm_manager.vram_base_offset + mc_addr - adev->mc.vram_start;
346}
347
348static const struct amdgpu_mc_funcs gmc_v9_0_mc_funcs = {
349 .adjust_mc_addr = gmc_v9_0_adjust_mc_addr,
350};
351
352static void gmc_v9_0_set_mc_funcs(struct amdgpu_device *adev)
353{
354 adev->mc.mc_funcs = &gmc_v9_0_mc_funcs;
355}
356
357static int gmc_v9_0_early_init(void *handle)
358{
359 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
360
361 gmc_v9_0_set_gart_funcs(adev);
362 gmc_v9_0_set_mc_funcs(adev);
363 gmc_v9_0_set_irq_funcs(adev);
364
365 return 0;
366}
367
368static int gmc_v9_0_late_init(void *handle)
369{
370 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
371 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
372}
373
374static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
375 struct amdgpu_mc *mc)
376{
377 u64 base = mmhub_v1_0_get_fb_location(adev);
378 amdgpu_vram_location(adev, &adev->mc, base);
379 adev->mc.gtt_base_align = 0;
380 amdgpu_gtt_location(adev, mc);
381}
382
383/**
384 * gmc_v9_0_mc_init - initialize the memory controller driver params
385 *
386 * @adev: amdgpu_device pointer
387 *
388 * Look up the amount of vram, vram width, and decide how to place
389 * vram and gart within the GPU's physical address space.
390 * Returns 0 for success.
391 */
392static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
393{
394 u32 tmp;
395 int chansize, numchan;
396
397 /* hbm memory channel size */
398 chansize = 128;
399
400 tmp = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_CS_AON0_DramBaseAddress0));
401 tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
402 tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
403 switch (tmp) {
404 case 0:
405 default:
406 numchan = 1;
407 break;
408 case 1:
409 numchan = 2;
410 break;
411 case 2:
412 numchan = 0;
413 break;
414 case 3:
415 numchan = 4;
416 break;
417 case 4:
418 numchan = 0;
419 break;
420 case 5:
421 numchan = 8;
422 break;
423 case 6:
424 numchan = 0;
425 break;
426 case 7:
427 numchan = 16;
428 break;
429 case 8:
430 numchan = 2;
431 break;
432 }
433 adev->mc.vram_width = numchan * chansize;
434
435 /* Could aper size report 0 ? */
436 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
437 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
438 /* size in MB on si */
439 adev->mc.mc_vram_size =
440 nbio_v6_1_get_memsize(adev) * 1024ULL * 1024ULL;
441 adev->mc.real_vram_size = adev->mc.mc_vram_size;
442 adev->mc.visible_vram_size = adev->mc.aper_size;
443
444 /* In case the PCI BAR is larger than the actual amount of vram */
445 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
446 adev->mc.visible_vram_size = adev->mc.real_vram_size;
447
448 /* unless the user had overridden it, set the gart
449 * size equal to the 1024 or vram, whichever is larger.
450 */
451 if (amdgpu_gart_size == -1)
452 adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
453 else
454 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
455
456 gmc_v9_0_vram_gtt_location(adev, &adev->mc);
457
458 return 0;
459}
460
461static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
462{
463 int r;
464
465 if (adev->gart.robj) {
466 WARN(1, "VEGA10 PCIE GART already initialized\n");
467 return 0;
468 }
469 /* Initialize common gart structure */
470 r = amdgpu_gart_init(adev);
471 if (r)
472 return r;
473 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
474 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
475 AMDGPU_PTE_EXECUTABLE;
476 return amdgpu_gart_table_vram_alloc(adev);
477}
478
479/*
480 * vm
481 * VMID 0 is the physical GPU addresses as used by the kernel.
482 * VMIDs 1-15 are used for userspace clients and are handled
483 * by the amdgpu vm/hsa code.
484 */
485/**
486 * gmc_v9_0_vm_init - vm init callback
487 *
488 * @adev: amdgpu_device pointer
489 *
490 * Inits vega10 specific vm parameters (number of VMs, base of vram for
491 * VMIDs 1-15) (vega10).
492 * Returns 0 for success.
493 */
494static int gmc_v9_0_vm_init(struct amdgpu_device *adev)
495{
496 /*
497 * number of VMs
498 * VMID 0 is reserved for System
499 * amdgpu graphics/compute will use VMIDs 1-7
500 * amdkfd will use VMIDs 8-15
501 */
502 adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
503 amdgpu_vm_manager_init(adev);
504
505 /* base offset of vram pages */
506 /*XXX This value is not zero for APU*/
507 adev->vm_manager.vram_base_offset = 0;
508
509 return 0;
510}
511
512/**
513 * gmc_v9_0_vm_fini - vm fini callback
514 *
515 * @adev: amdgpu_device pointer
516 *
517 * Tear down any asic specific VM setup.
518 */
519static void gmc_v9_0_vm_fini(struct amdgpu_device *adev)
520{
521 return;
522}
523
524static int gmc_v9_0_sw_init(void *handle)
525{
526 int r;
527 int dma_bits;
528 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
529
530 spin_lock_init(&adev->mc.invalidate_lock);
531
532 if (adev->flags & AMD_IS_APU) {
533 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
534 } else {
535 /* XXX Don't know how to get VRAM type yet. */
536 adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
537 }
538
539 /* This interrupt is VMC page fault.*/
540 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
541 &adev->mc.vm_fault);
542
543 if (r)
544 return r;
545
546 /* Adjust VM size here.
547 * Currently default to 64GB ((16 << 20) 4k pages).
548 * Max GPUVM size is 48 bits.
549 */
550 adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
551
552 /* Set the internal MC address mask
553 * This is the max address of the GPU's
554 * internal address space.
555 */
556 adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
557
558 /* set DMA mask + need_dma32 flags.
559 * PCIE - can handle 44-bits.
560 * IGP - can handle 44-bits
561 * PCI - dma32 for legacy pci gart, 44 bits on vega10
562 */
563 adev->need_dma32 = false;
564 dma_bits = adev->need_dma32 ? 32 : 44;
565 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
566 if (r) {
567 adev->need_dma32 = true;
568 dma_bits = 32;
569 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
570 }
571 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
572 if (r) {
573 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
574 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
575 }
576
577 r = gmc_v9_0_mc_init(adev);
578 if (r)
579 return r;
580
581 /* Memory manager */
582 r = amdgpu_bo_init(adev);
583 if (r)
584 return r;
585
586 r = gmc_v9_0_gart_init(adev);
587 if (r)
588 return r;
589
590 if (!adev->vm_manager.enabled) {
591 r = gmc_v9_0_vm_init(adev);
592 if (r) {
593 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
594 return r;
595 }
596 adev->vm_manager.enabled = true;
597 }
598 return r;
599}
600
601/**
602 * gmc_v8_0_gart_fini - vm fini callback
603 *
604 * @adev: amdgpu_device pointer
605 *
606 * Tears down the driver GART/VM setup (CIK).
607 */
608static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
609{
610 amdgpu_gart_table_vram_free(adev);
611 amdgpu_gart_fini(adev);
612}
613
614static int gmc_v9_0_sw_fini(void *handle)
615{
616 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
617
618 if (adev->vm_manager.enabled) {
619 amdgpu_vm_manager_fini(adev);
620 gmc_v9_0_vm_fini(adev);
621 adev->vm_manager.enabled = false;
622 }
623 gmc_v9_0_gart_fini(adev);
624 amdgpu_gem_force_release(adev);
625 amdgpu_bo_fini(adev);
626
627 return 0;
628}
629
630static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
631{
632 switch (adev->asic_type) {
633 case CHIP_VEGA10:
634 break;
635 default:
636 break;
637 }
638}
639
640/**
641 * gmc_v9_0_gart_enable - gart enable
642 *
643 * @adev: amdgpu_device pointer
644 */
645static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
646{
647 int r;
648 bool value;
649 u32 tmp;
650
651 amdgpu_program_register_sequence(adev,
652 golden_settings_vega10_hdp,
653 (const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
654
655 if (adev->gart.robj == NULL) {
656 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
657 return -EINVAL;
658 }
659 r = amdgpu_gart_table_vram_pin(adev);
660 if (r)
661 return r;
662
663 /* After HDP is initialized, flush HDP.*/
664 nbio_v6_1_hdp_flush(adev);
665
666 r = gfxhub_v1_0_gart_enable(adev);
667 if (r)
668 return r;
669
670 r = mmhub_v1_0_gart_enable(adev);
671 if (r)
672 return r;
673
674 tmp = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MISC_CNTL));
675 tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
676 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MISC_CNTL), tmp);
677
678 tmp = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_HOST_PATH_CNTL));
679 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_HOST_PATH_CNTL), tmp);
680
681
682 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
683 value = false;
684 else
685 value = true;
686
687 gfxhub_v1_0_set_fault_enable_default(adev, value);
688 mmhub_v1_0_set_fault_enable_default(adev, value);
689
690 gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
691
692 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
693 (unsigned)(adev->mc.gtt_size >> 20),
694 (unsigned long long)adev->gart.table_addr);
695 adev->gart.ready = true;
696 return 0;
697}
698
699static int gmc_v9_0_hw_init(void *handle)
700{
701 int r;
702 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
703
704 /* The sequence of these two function calls matters.*/
705 gmc_v9_0_init_golden_registers(adev);
706
707 r = gmc_v9_0_gart_enable(adev);
708
709 return r;
710}
711
712/**
713 * gmc_v9_0_gart_disable - gart disable
714 *
715 * @adev: amdgpu_device pointer
716 *
717 * This disables all VM page table.
718 */
719static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
720{
721 gfxhub_v1_0_gart_disable(adev);
722 mmhub_v1_0_gart_disable(adev);
723 amdgpu_gart_table_vram_unpin(adev);
724}
725
726static int gmc_v9_0_hw_fini(void *handle)
727{
728 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
729
730 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
731 gmc_v9_0_gart_disable(adev);
732
733 return 0;
734}
735
736static int gmc_v9_0_suspend(void *handle)
737{
738 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
739
740 if (adev->vm_manager.enabled) {
741 gmc_v9_0_vm_fini(adev);
742 adev->vm_manager.enabled = false;
743 }
744 gmc_v9_0_hw_fini(adev);
745
746 return 0;
747}
748
749static int gmc_v9_0_resume(void *handle)
750{
751 int r;
752 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
753
754 r = gmc_v9_0_hw_init(adev);
755 if (r)
756 return r;
757
758 if (!adev->vm_manager.enabled) {
759 r = gmc_v9_0_vm_init(adev);
760 if (r) {
761 dev_err(adev->dev,
762 "vm manager initialization failed (%d).\n", r);
763 return r;
764 }
765 adev->vm_manager.enabled = true;
766 }
767
768 return r;
769}
770
771static bool gmc_v9_0_is_idle(void *handle)
772{
773 /* MC is always ready in GMC v9.*/
774 return true;
775}
776
777static int gmc_v9_0_wait_for_idle(void *handle)
778{
779 /* There is no need to wait for MC idle in GMC v9.*/
780 return 0;
781}
782
783static int gmc_v9_0_soft_reset(void *handle)
784{
785 /* XXX for emulation.*/
786 return 0;
787}
788
789static int gmc_v9_0_set_clockgating_state(void *handle,
790 enum amd_clockgating_state state)
791{
792 return 0;
793}
794
795static int gmc_v9_0_set_powergating_state(void *handle,
796 enum amd_powergating_state state)
797{
798 return 0;
799}
800
801const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
802 .name = "gmc_v9_0",
803 .early_init = gmc_v9_0_early_init,
804 .late_init = gmc_v9_0_late_init,
805 .sw_init = gmc_v9_0_sw_init,
806 .sw_fini = gmc_v9_0_sw_fini,
807 .hw_init = gmc_v9_0_hw_init,
808 .hw_fini = gmc_v9_0_hw_fini,
809 .suspend = gmc_v9_0_suspend,
810 .resume = gmc_v9_0_resume,
811 .is_idle = gmc_v9_0_is_idle,
812 .wait_for_idle = gmc_v9_0_wait_for_idle,
813 .soft_reset = gmc_v9_0_soft_reset,
814 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
815 .set_powergating_state = gmc_v9_0_set_powergating_state,
816};
817
818const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
819{
820 .type = AMD_IP_BLOCK_TYPE_GMC,
821 .major = 9,
822 .minor = 0,
823 .rev = 0,
824 .funcs = &gmc_v9_0_ip_funcs,
825};