]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drm/amd/amdgpu: store fragment_size in vm_manager
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / amd / amdgpu / gmc_v9_0.c
CommitLineData
e60f8db5
AX
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include "amdgpu.h"
25#include "gmc_v9_0.h"
8d6a5230 26#include "amdgpu_atomfirmware.h"
e60f8db5
AX
27
28#include "vega10/soc15ip.h"
29#include "vega10/HDP/hdp_4_0_offset.h"
30#include "vega10/HDP/hdp_4_0_sh_mask.h"
31#include "vega10/GC/gc_9_0_sh_mask.h"
edca2d05
AD
32#include "vega10/DC/dce_12_0_offset.h"
33#include "vega10/DC/dce_12_0_sh_mask.h"
e60f8db5
AX
34#include "vega10/vega10_enum.h"
35
36#include "soc15_common.h"
37
38#include "nbio_v6_1.h"
aecbe64f 39#include "nbio_v7_0.h"
e60f8db5
AX
40#include "gfxhub_v1_0.h"
41#include "mmhub_v1_0.h"
42
43#define mmDF_CS_AON0_DramBaseAddress0 0x0044
44#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
45//DF_CS_AON0_DramBaseAddress0
46#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
47#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
48#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
49#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
50#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
51#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
52#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
53#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
54#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
55#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
56
57/* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
58#define AMDGPU_NUM_OF_VMIDS 8
59
60static const u32 golden_settings_vega10_hdp[] =
61{
62 0xf64, 0x0fffffff, 0x00000000,
63 0xf65, 0x0fffffff, 0x00000000,
64 0xf66, 0x0fffffff, 0x00000000,
65 0xf67, 0x0fffffff, 0x00000000,
66 0xf68, 0x0fffffff, 0x00000000,
67 0xf6a, 0x0fffffff, 0x00000000,
68 0xf6b, 0x0fffffff, 0x00000000,
69 0xf6c, 0x0fffffff, 0x00000000,
70 0xf6d, 0x0fffffff, 0x00000000,
71 0xf6e, 0x0fffffff, 0x00000000,
72};
73
74static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
75 struct amdgpu_irq_src *src,
76 unsigned type,
77 enum amdgpu_interrupt_state state)
78{
79 struct amdgpu_vmhub *hub;
80 u32 tmp, reg, bits, i;
81
11250164
CK
82 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
83 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
84 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
85 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
86 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
87 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
88 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
89
e60f8db5
AX
90 switch (state) {
91 case AMDGPU_IRQ_STATE_DISABLE:
92 /* MM HUB */
93 hub = &adev->vmhub[AMDGPU_MMHUB];
e60f8db5
AX
94 for (i = 0; i< 16; i++) {
95 reg = hub->vm_context0_cntl + i;
96 tmp = RREG32(reg);
97 tmp &= ~bits;
98 WREG32(reg, tmp);
99 }
100
101 /* GFX HUB */
102 hub = &adev->vmhub[AMDGPU_GFXHUB];
e60f8db5
AX
103 for (i = 0; i < 16; i++) {
104 reg = hub->vm_context0_cntl + i;
105 tmp = RREG32(reg);
106 tmp &= ~bits;
107 WREG32(reg, tmp);
108 }
109 break;
110 case AMDGPU_IRQ_STATE_ENABLE:
111 /* MM HUB */
112 hub = &adev->vmhub[AMDGPU_MMHUB];
e60f8db5
AX
113 for (i = 0; i< 16; i++) {
114 reg = hub->vm_context0_cntl + i;
115 tmp = RREG32(reg);
116 tmp |= bits;
117 WREG32(reg, tmp);
118 }
119
120 /* GFX HUB */
121 hub = &adev->vmhub[AMDGPU_GFXHUB];
e60f8db5
AX
122 for (i = 0; i < 16; i++) {
123 reg = hub->vm_context0_cntl + i;
124 tmp = RREG32(reg);
125 tmp |= bits;
126 WREG32(reg, tmp);
127 }
128 break;
129 default:
130 break;
131 }
132
133 return 0;
134}
135
136static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
137 struct amdgpu_irq_src *source,
138 struct amdgpu_iv_entry *entry)
139{
5a9b8e8a 140 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
4d6cbde3 141 uint32_t status = 0;
e60f8db5
AX
142 u64 addr;
143
144 addr = (u64)entry->src_data[0] << 12;
145 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
146
79a0c465 147 if (!amdgpu_sriov_vf(adev)) {
5a9b8e8a
CK
148 status = RREG32(hub->vm_l2_pro_fault_status);
149 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
4d6cbde3 150 }
e60f8db5 151
4d6cbde3
FK
152 if (printk_ratelimit()) {
153 dev_err(adev->dev,
154 "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n",
155 entry->vm_id_src ? "mmhub" : "gfxhub",
156 entry->src_id, entry->ring_id, entry->vm_id,
157 entry->pas_id);
158 dev_err(adev->dev, " at page 0x%016llx from %d\n",
159 addr, entry->client_id);
160 if (!amdgpu_sriov_vf(adev))
161 dev_err(adev->dev,
162 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
163 status);
79a0c465 164 }
e60f8db5
AX
165
166 return 0;
167}
168
169static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
170 .set = gmc_v9_0_vm_fault_interrupt_state,
171 .process = gmc_v9_0_process_interrupt,
172};
173
174static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
175{
176 adev->mc.vm_fault.num_types = 1;
177 adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
178}
179
03f89feb
CK
180static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
181{
182 u32 req = 0;
183
184 /* invalidate using legacy mode on vm_id*/
185 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
186 PER_VMID_INVALIDATE_REQ, 1 << vm_id);
187 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
188 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
189 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
190 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
191 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
192 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
193 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
194 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
195
196 return req;
197}
198
e60f8db5
AX
199/*
200 * GART
201 * VMID 0 is the physical GPU addresses as used by the kernel.
202 * VMIDs 1-15 are used for userspace clients and are handled
203 * by the amdgpu vm/hsa code.
204 */
205
206/**
207 * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback
208 *
209 * @adev: amdgpu_device pointer
210 * @vmid: vm instance to flush
211 *
212 * Flush the TLB for the requested page table.
213 */
214static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
215 uint32_t vmid)
216{
217 /* Use register 17 for GART */
218 const unsigned eng = 17;
219 unsigned i, j;
220
221 /* flush hdp cache */
aecbe64f
CZ
222 if (adev->flags & AMD_IS_APU)
223 nbio_v7_0_hdp_flush(adev);
224 else
225 nbio_v6_1_hdp_flush(adev);
e60f8db5
AX
226
227 spin_lock(&adev->mc.invalidate_lock);
228
229 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
230 struct amdgpu_vmhub *hub = &adev->vmhub[i];
03f89feb 231 u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
e60f8db5 232
c7a7266b 233 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
e60f8db5
AX
234
235 /* Busy wait for ACK.*/
236 for (j = 0; j < 100; j++) {
c7a7266b 237 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
e60f8db5
AX
238 tmp &= 1 << vmid;
239 if (tmp)
240 break;
241 cpu_relax();
242 }
243 if (j < 100)
244 continue;
245
246 /* Wait for ACK with a delay.*/
247 for (j = 0; j < adev->usec_timeout; j++) {
c7a7266b 248 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
e60f8db5
AX
249 tmp &= 1 << vmid;
250 if (tmp)
251 break;
252 udelay(1);
253 }
254 if (j < adev->usec_timeout)
255 continue;
256
257 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
258 }
259
260 spin_unlock(&adev->mc.invalidate_lock);
261}
262
263/**
264 * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO
265 *
266 * @adev: amdgpu_device pointer
267 * @cpu_pt_addr: cpu address of the page table
268 * @gpu_page_idx: entry in the page table to update
269 * @addr: dst addr to write into pte/pde
270 * @flags: access flags
271 *
272 * Update the page tables using the CPU.
273 */
274static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev,
275 void *cpu_pt_addr,
276 uint32_t gpu_page_idx,
277 uint64_t addr,
278 uint64_t flags)
279{
280 void __iomem *ptr = (void *)cpu_pt_addr;
281 uint64_t value;
282
283 /*
284 * PTE format on VEGA 10:
285 * 63:59 reserved
286 * 58:57 mtype
287 * 56 F
288 * 55 L
289 * 54 P
290 * 53 SW
291 * 52 T
292 * 50:48 reserved
293 * 47:12 4k physical page base address
294 * 11:7 fragment
295 * 6 write
296 * 5 read
297 * 4 exe
298 * 3 Z
299 * 2 snooped
300 * 1 system
301 * 0 valid
302 *
303 * PDE format on VEGA 10:
304 * 63:59 block fragment size
305 * 58:55 reserved
306 * 54 P
307 * 53:48 reserved
308 * 47:6 physical base address of PD or PTE
309 * 5:3 reserved
310 * 2 C
311 * 1 system
312 * 0 valid
313 */
314
315 /*
316 * The following is for PTE only. GART does not have PDEs.
317 */
318 value = addr & 0x0000FFFFFFFFF000ULL;
319 value |= flags;
320 writeq(value, ptr + (gpu_page_idx * 8));
321 return 0;
322}
323
324static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
325 uint32_t flags)
326
327{
328 uint64_t pte_flag = 0;
329
330 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
331 pte_flag |= AMDGPU_PTE_EXECUTABLE;
332 if (flags & AMDGPU_VM_PAGE_READABLE)
333 pte_flag |= AMDGPU_PTE_READABLE;
334 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
335 pte_flag |= AMDGPU_PTE_WRITEABLE;
336
337 switch (flags & AMDGPU_VM_MTYPE_MASK) {
338 case AMDGPU_VM_MTYPE_DEFAULT:
339 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
340 break;
341 case AMDGPU_VM_MTYPE_NC:
342 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
343 break;
344 case AMDGPU_VM_MTYPE_WC:
345 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
346 break;
347 case AMDGPU_VM_MTYPE_CC:
348 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
349 break;
350 case AMDGPU_VM_MTYPE_UC:
351 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
352 break;
353 default:
354 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
355 break;
356 }
357
358 if (flags & AMDGPU_VM_PAGE_PRT)
359 pte_flag |= AMDGPU_PTE_PRT;
360
361 return pte_flag;
362}
363
b1166325 364static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
e60f8db5 365{
b1166325
CK
366 addr = adev->vm_manager.vram_base_offset + addr - adev->mc.vram_start;
367 BUG_ON(addr & 0xFFFF00000000003FULL);
368 return addr;
e60f8db5
AX
369}
370
f75e237c
CK
371static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
372 .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
373 .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
03f89feb 374 .get_invalidate_req = gmc_v9_0_get_invalidate_req,
b1166325
CK
375 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
376 .get_vm_pde = gmc_v9_0_get_vm_pde
e60f8db5
AX
377};
378
f75e237c 379static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
e60f8db5 380{
f75e237c
CK
381 if (adev->gart.gart_funcs == NULL)
382 adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
e60f8db5
AX
383}
384
385static int gmc_v9_0_early_init(void *handle)
386{
387 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
388
389 gmc_v9_0_set_gart_funcs(adev);
e60f8db5
AX
390 gmc_v9_0_set_irq_funcs(adev);
391
392 return 0;
393}
394
395static int gmc_v9_0_late_init(void *handle)
396{
397 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
43b9176f 398 unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
4789c463
CK
399 unsigned i;
400
401 for(i = 0; i < adev->num_rings; ++i) {
402 struct amdgpu_ring *ring = adev->rings[i];
403 unsigned vmhub = ring->funcs->vmhub;
404
405 ring->vm_inv_eng = vm_inv_eng[vmhub]++;
775f55f1
TSD
406 dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
407 ring->idx, ring->name, ring->vm_inv_eng,
408 ring->funcs->vmhub);
4789c463
CK
409 }
410
411 /* Engine 17 is used for GART flushes */
412 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
413 BUG_ON(vm_inv_eng[i] > 17);
414
e60f8db5
AX
415 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
416}
417
418static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
419 struct amdgpu_mc *mc)
420{
eeb2487d
ML
421 u64 base = 0;
422 if (!amdgpu_sriov_vf(adev))
423 base = mmhub_v1_0_get_fb_location(adev);
e60f8db5 424 amdgpu_vram_location(adev, &adev->mc, base);
6f02a696 425 amdgpu_gart_location(adev, mc);
bc099ee9
CZ
426 /* base offset of vram pages */
427 if (adev->flags & AMD_IS_APU)
428 adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
429 else
430 adev->vm_manager.vram_base_offset = 0;
e60f8db5
AX
431}
432
433/**
434 * gmc_v9_0_mc_init - initialize the memory controller driver params
435 *
436 * @adev: amdgpu_device pointer
437 *
438 * Look up the amount of vram, vram width, and decide how to place
439 * vram and gart within the GPU's physical address space.
440 * Returns 0 for success.
441 */
442static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
443{
444 u32 tmp;
445 int chansize, numchan;
446
8d6a5230
AD
447 adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
448 if (!adev->mc.vram_width) {
449 /* hbm memory channel size */
450 chansize = 128;
451
452 tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
453 tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
454 tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
455 switch (tmp) {
456 case 0:
457 default:
458 numchan = 1;
459 break;
460 case 1:
461 numchan = 2;
462 break;
463 case 2:
464 numchan = 0;
465 break;
466 case 3:
467 numchan = 4;
468 break;
469 case 4:
470 numchan = 0;
471 break;
472 case 5:
473 numchan = 8;
474 break;
475 case 6:
476 numchan = 0;
477 break;
478 case 7:
479 numchan = 16;
480 break;
481 case 8:
482 numchan = 2;
483 break;
484 }
485 adev->mc.vram_width = numchan * chansize;
e60f8db5 486 }
e60f8db5
AX
487
488 /* Could aper size report 0 ? */
489 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
490 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
491 /* size in MB on si */
492 adev->mc.mc_vram_size =
aecbe64f
CZ
493 ((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) :
494 nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL;
e60f8db5
AX
495 adev->mc.real_vram_size = adev->mc.mc_vram_size;
496 adev->mc.visible_vram_size = adev->mc.aper_size;
497
498 /* In case the PCI BAR is larger than the actual amount of vram */
499 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
500 adev->mc.visible_vram_size = adev->mc.real_vram_size;
501
011d4bbe 502 amdgpu_gart_set_defaults(adev);
e60f8db5
AX
503 gmc_v9_0_vram_gtt_location(adev, &adev->mc);
504
505 return 0;
506}
507
508static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
509{
510 int r;
511
512 if (adev->gart.robj) {
513 WARN(1, "VEGA10 PCIE GART already initialized\n");
514 return 0;
515 }
516 /* Initialize common gart structure */
517 r = amdgpu_gart_init(adev);
518 if (r)
519 return r;
520 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
521 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
522 AMDGPU_PTE_EXECUTABLE;
523 return amdgpu_gart_table_vram_alloc(adev);
524}
525
e60f8db5
AX
526static int gmc_v9_0_sw_init(void *handle)
527{
528 int r;
529 int dma_bits;
530 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
531
0c8c0847 532 gfxhub_v1_0_init(adev);
77f6c763 533 mmhub_v1_0_init(adev);
0c8c0847 534
e60f8db5
AX
535 spin_lock_init(&adev->mc.invalidate_lock);
536
fd66560b
HZ
537 switch (adev->asic_type) {
538 case CHIP_RAVEN:
e60f8db5 539 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
fd66560b
HZ
540 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
541 adev->vm_manager.vm_size = 1U << 18;
542 adev->vm_manager.block_size = 9;
543 adev->vm_manager.num_level = 3;
e618d306 544 adev->vm_manager.fragment_size = 9;
fd66560b
HZ
545 } else {
546 /* vm_size is 64GB for legacy 2-level page support*/
547 amdgpu_vm_adjust_size(adev, 64);
548 adev->vm_manager.num_level = 1;
e618d306 549 adev->vm_manager.fragment_size = 9;
fd66560b
HZ
550 }
551 break;
552 case CHIP_VEGA10:
e60f8db5
AX
553 /* XXX Don't know how to get VRAM type yet. */
554 adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
36b32a68
ZJ
555 /*
556 * To fulfill 4-level page support,
557 * vm size is 256TB (48bit), maximum size of Vega10,
558 * block size 512 (9bit)
559 */
560 adev->vm_manager.vm_size = 1U << 18;
561 adev->vm_manager.block_size = 9;
fd66560b 562 adev->vm_manager.num_level = 3;
e618d306 563 adev->vm_manager.fragment_size = 9;
fd66560b
HZ
564 break;
565 default:
566 break;
e60f8db5
AX
567 }
568
e618d306 569 DRM_INFO("vm size is %llu GB, block size is %u-bit,fragment size is %u-bit\n",
fd66560b 570 adev->vm_manager.vm_size,
e618d306
RH
571 adev->vm_manager.block_size,
572 adev->vm_manager.fragment_size);
fd66560b 573
e60f8db5
AX
574 /* This interrupt is VMC page fault.*/
575 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
576 &adev->mc.vm_fault);
d7c434d3
FK
577 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
578 &adev->mc.vm_fault);
e60f8db5
AX
579
580 if (r)
581 return r;
582
36b32a68 583 adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
e60f8db5
AX
584
585 /* Set the internal MC address mask
586 * This is the max address of the GPU's
587 * internal address space.
588 */
589 adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
590
916910ad
HR
591 /*
592 * It needs to reserve 8M stolen memory for vega10
593 * TODO: Figure out how to avoid that...
594 */
595 adev->mc.stolen_size = 8 * 1024 * 1024;
596
e60f8db5
AX
597 /* set DMA mask + need_dma32 flags.
598 * PCIE - can handle 44-bits.
599 * IGP - can handle 44-bits
600 * PCI - dma32 for legacy pci gart, 44 bits on vega10
601 */
602 adev->need_dma32 = false;
603 dma_bits = adev->need_dma32 ? 32 : 44;
604 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
605 if (r) {
606 adev->need_dma32 = true;
607 dma_bits = 32;
608 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
609 }
610 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
611 if (r) {
612 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
613 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
614 }
615
616 r = gmc_v9_0_mc_init(adev);
617 if (r)
618 return r;
619
620 /* Memory manager */
621 r = amdgpu_bo_init(adev);
622 if (r)
623 return r;
624
625 r = gmc_v9_0_gart_init(adev);
626 if (r)
627 return r;
628
05ec3eda
CK
629 /*
630 * number of VMs
631 * VMID 0 is reserved for System
632 * amdgpu graphics/compute will use VMIDs 1-7
633 * amdkfd will use VMIDs 8-15
634 */
635 adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
636 adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
637
05ec3eda
CK
638 amdgpu_vm_manager_init(adev);
639
640 return 0;
e60f8db5
AX
641}
642
643/**
644 * gmc_v8_0_gart_fini - vm fini callback
645 *
646 * @adev: amdgpu_device pointer
647 *
648 * Tears down the driver GART/VM setup (CIK).
649 */
650static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
651{
652 amdgpu_gart_table_vram_free(adev);
653 amdgpu_gart_fini(adev);
654}
655
656static int gmc_v9_0_sw_fini(void *handle)
657{
658 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
659
05ec3eda 660 amdgpu_vm_manager_fini(adev);
e60f8db5
AX
661 gmc_v9_0_gart_fini(adev);
662 amdgpu_gem_force_release(adev);
663 amdgpu_bo_fini(adev);
664
665 return 0;
666}
667
668static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
669{
670 switch (adev->asic_type) {
671 case CHIP_VEGA10:
672 break;
e4f3abaa
CZ
673 case CHIP_RAVEN:
674 break;
e60f8db5
AX
675 default:
676 break;
677 }
678}
679
680/**
681 * gmc_v9_0_gart_enable - gart enable
682 *
683 * @adev: amdgpu_device pointer
684 */
685static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
686{
687 int r;
688 bool value;
689 u32 tmp;
690
691 amdgpu_program_register_sequence(adev,
692 golden_settings_vega10_hdp,
693 (const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
694
695 if (adev->gart.robj == NULL) {
696 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
697 return -EINVAL;
698 }
699 r = amdgpu_gart_table_vram_pin(adev);
700 if (r)
701 return r;
702
703 /* After HDP is initialized, flush HDP.*/
aecbe64f
CZ
704 if (adev->flags & AMD_IS_APU)
705 nbio_v7_0_hdp_flush(adev);
706 else
707 nbio_v6_1_hdp_flush(adev);
e60f8db5 708
2fcd43ce
HZ
709 switch (adev->asic_type) {
710 case CHIP_RAVEN:
711 mmhub_v1_0_initialize_power_gating(adev);
f8386b35 712 mmhub_v1_0_update_power_gating(adev, true);
2fcd43ce
HZ
713 break;
714 default:
715 break;
716 }
717
e60f8db5
AX
718 r = gfxhub_v1_0_gart_enable(adev);
719 if (r)
720 return r;
721
722 r = mmhub_v1_0_gart_enable(adev);
723 if (r)
724 return r;
725
b9509c80 726 tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
e60f8db5 727 tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
b9509c80 728 WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
e60f8db5 729
b9509c80
HR
730 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
731 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
e60f8db5
AX
732
733
734 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
735 value = false;
736 else
737 value = true;
738
739 gfxhub_v1_0_set_fault_enable_default(adev, value);
740 mmhub_v1_0_set_fault_enable_default(adev, value);
741
742 gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
743
744 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
6f02a696 745 (unsigned)(adev->mc.gart_size >> 20),
e60f8db5
AX
746 (unsigned long long)adev->gart.table_addr);
747 adev->gart.ready = true;
748 return 0;
749}
750
751static int gmc_v9_0_hw_init(void *handle)
752{
753 int r;
754 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
755
756 /* The sequence of these two function calls matters.*/
757 gmc_v9_0_init_golden_registers(adev);
758
edca2d05
AD
759 if (adev->mode_info.num_crtc) {
760 u32 tmp;
761
762 /* Lockout access through VGA aperture*/
763 tmp = RREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL);
764 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
765 WREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL, tmp);
766
767 /* disable VGA render */
768 tmp = RREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL);
769 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
770 WREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL, tmp);
771 }
772
e60f8db5
AX
773 r = gmc_v9_0_gart_enable(adev);
774
775 return r;
776}
777
778/**
779 * gmc_v9_0_gart_disable - gart disable
780 *
781 * @adev: amdgpu_device pointer
782 *
783 * This disables all VM page table.
784 */
785static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
786{
787 gfxhub_v1_0_gart_disable(adev);
788 mmhub_v1_0_gart_disable(adev);
789 amdgpu_gart_table_vram_unpin(adev);
790}
791
792static int gmc_v9_0_hw_fini(void *handle)
793{
794 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
795
5dd696ae
TH
796 if (amdgpu_sriov_vf(adev)) {
797 /* full access mode, so don't touch any GMC register */
798 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
799 return 0;
800 }
801
e60f8db5
AX
802 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
803 gmc_v9_0_gart_disable(adev);
804
805 return 0;
806}
807
808static int gmc_v9_0_suspend(void *handle)
809{
810 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
811
e60f8db5
AX
812 gmc_v9_0_hw_fini(adev);
813
814 return 0;
815}
816
817static int gmc_v9_0_resume(void *handle)
818{
819 int r;
820 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
821
822 r = gmc_v9_0_hw_init(adev);
823 if (r)
824 return r;
825
32601d48 826 amdgpu_vm_reset_all_ids(adev);
e60f8db5 827
32601d48 828 return 0;
e60f8db5
AX
829}
830
831static bool gmc_v9_0_is_idle(void *handle)
832{
833 /* MC is always ready in GMC v9.*/
834 return true;
835}
836
837static int gmc_v9_0_wait_for_idle(void *handle)
838{
839 /* There is no need to wait for MC idle in GMC v9.*/
840 return 0;
841}
842
843static int gmc_v9_0_soft_reset(void *handle)
844{
845 /* XXX for emulation.*/
846 return 0;
847}
848
849static int gmc_v9_0_set_clockgating_state(void *handle,
850 enum amd_clockgating_state state)
851{
d5583d4f
HR
852 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
853
854 return mmhub_v1_0_set_clockgating(adev, state);
e60f8db5
AX
855}
856
13052be5
HR
857static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
858{
859 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
860
861 mmhub_v1_0_get_clockgating(adev, flags);
862}
863
e60f8db5
AX
864static int gmc_v9_0_set_powergating_state(void *handle,
865 enum amd_powergating_state state)
866{
867 return 0;
868}
869
870const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
871 .name = "gmc_v9_0",
872 .early_init = gmc_v9_0_early_init,
873 .late_init = gmc_v9_0_late_init,
874 .sw_init = gmc_v9_0_sw_init,
875 .sw_fini = gmc_v9_0_sw_fini,
876 .hw_init = gmc_v9_0_hw_init,
877 .hw_fini = gmc_v9_0_hw_fini,
878 .suspend = gmc_v9_0_suspend,
879 .resume = gmc_v9_0_resume,
880 .is_idle = gmc_v9_0_is_idle,
881 .wait_for_idle = gmc_v9_0_wait_for_idle,
882 .soft_reset = gmc_v9_0_soft_reset,
883 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
884 .set_powergating_state = gmc_v9_0_set_powergating_state,
13052be5 885 .get_clockgating_state = gmc_v9_0_get_clockgating_state,
e60f8db5
AX
886};
887
888const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
889{
890 .type = AMD_IP_BLOCK_TYPE_GMC,
891 .major = 9,
892 .minor = 0,
893 .rev = 0,
894 .funcs = &gmc_v9_0_ip_funcs,
895};