]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
Merge tag 'at91-ab-4.13-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/abellon...
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / amd / amdgpu / gmc_v9_0.c
1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/firmware.h>
24 #include "amdgpu.h"
25 #include "gmc_v9_0.h"
26
27 #include "vega10/soc15ip.h"
28 #include "vega10/HDP/hdp_4_0_offset.h"
29 #include "vega10/HDP/hdp_4_0_sh_mask.h"
30 #include "vega10/GC/gc_9_0_sh_mask.h"
31 #include "vega10/vega10_enum.h"
32
33 #include "soc15_common.h"
34
35 #include "nbio_v6_1.h"
36 #include "gfxhub_v1_0.h"
37 #include "mmhub_v1_0.h"
38
39 #define mmDF_CS_AON0_DramBaseAddress0 0x0044
40 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
41 //DF_CS_AON0_DramBaseAddress0
42 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
43 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
44 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
45 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
46 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
47 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
48 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
49 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
50 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
51 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
52
53 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
54 #define AMDGPU_NUM_OF_VMIDS 8
55
56 static const u32 golden_settings_vega10_hdp[] =
57 {
58 0xf64, 0x0fffffff, 0x00000000,
59 0xf65, 0x0fffffff, 0x00000000,
60 0xf66, 0x0fffffff, 0x00000000,
61 0xf67, 0x0fffffff, 0x00000000,
62 0xf68, 0x0fffffff, 0x00000000,
63 0xf6a, 0x0fffffff, 0x00000000,
64 0xf6b, 0x0fffffff, 0x00000000,
65 0xf6c, 0x0fffffff, 0x00000000,
66 0xf6d, 0x0fffffff, 0x00000000,
67 0xf6e, 0x0fffffff, 0x00000000,
68 };
69
70 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
71 struct amdgpu_irq_src *src,
72 unsigned type,
73 enum amdgpu_interrupt_state state)
74 {
75 struct amdgpu_vmhub *hub;
76 u32 tmp, reg, bits, i;
77
78 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
79 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
80 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
81 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
82 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
83 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
84 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
85
86 switch (state) {
87 case AMDGPU_IRQ_STATE_DISABLE:
88 /* MM HUB */
89 hub = &adev->vmhub[AMDGPU_MMHUB];
90 for (i = 0; i< 16; i++) {
91 reg = hub->vm_context0_cntl + i;
92 tmp = RREG32(reg);
93 tmp &= ~bits;
94 WREG32(reg, tmp);
95 }
96
97 /* GFX HUB */
98 hub = &adev->vmhub[AMDGPU_GFXHUB];
99 for (i = 0; i < 16; i++) {
100 reg = hub->vm_context0_cntl + i;
101 tmp = RREG32(reg);
102 tmp &= ~bits;
103 WREG32(reg, tmp);
104 }
105 break;
106 case AMDGPU_IRQ_STATE_ENABLE:
107 /* MM HUB */
108 hub = &adev->vmhub[AMDGPU_MMHUB];
109 for (i = 0; i< 16; i++) {
110 reg = hub->vm_context0_cntl + i;
111 tmp = RREG32(reg);
112 tmp |= bits;
113 WREG32(reg, tmp);
114 }
115
116 /* GFX HUB */
117 hub = &adev->vmhub[AMDGPU_GFXHUB];
118 for (i = 0; i < 16; i++) {
119 reg = hub->vm_context0_cntl + i;
120 tmp = RREG32(reg);
121 tmp |= bits;
122 WREG32(reg, tmp);
123 }
124 break;
125 default:
126 break;
127 }
128
129 return 0;
130 }
131
132 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
133 struct amdgpu_irq_src *source,
134 struct amdgpu_iv_entry *entry)
135 {
136 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
137 uint32_t status = 0;
138 u64 addr;
139
140 addr = (u64)entry->src_data[0] << 12;
141 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
142
143 if (!amdgpu_sriov_vf(adev)) {
144 status = RREG32(hub->vm_l2_pro_fault_status);
145 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
146 }
147
148 if (printk_ratelimit()) {
149 dev_err(adev->dev,
150 "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n",
151 entry->vm_id_src ? "mmhub" : "gfxhub",
152 entry->src_id, entry->ring_id, entry->vm_id,
153 entry->pas_id);
154 dev_err(adev->dev, " at page 0x%016llx from %d\n",
155 addr, entry->client_id);
156 if (!amdgpu_sriov_vf(adev))
157 dev_err(adev->dev,
158 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
159 status);
160 }
161
162 return 0;
163 }
164
165 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
166 .set = gmc_v9_0_vm_fault_interrupt_state,
167 .process = gmc_v9_0_process_interrupt,
168 };
169
170 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
171 {
172 adev->mc.vm_fault.num_types = 1;
173 adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
174 }
175
176 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
177 {
178 u32 req = 0;
179
180 /* invalidate using legacy mode on vm_id*/
181 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
182 PER_VMID_INVALIDATE_REQ, 1 << vm_id);
183 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
184 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
185 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
186 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
187 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
188 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
189 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
190 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
191
192 return req;
193 }
194
195 /*
196 * GART
197 * VMID 0 is the physical GPU addresses as used by the kernel.
198 * VMIDs 1-15 are used for userspace clients and are handled
199 * by the amdgpu vm/hsa code.
200 */
201
202 /**
203 * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback
204 *
205 * @adev: amdgpu_device pointer
206 * @vmid: vm instance to flush
207 *
208 * Flush the TLB for the requested page table.
209 */
210 static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
211 uint32_t vmid)
212 {
213 /* Use register 17 for GART */
214 const unsigned eng = 17;
215 unsigned i, j;
216
217 /* flush hdp cache */
218 nbio_v6_1_hdp_flush(adev);
219
220 spin_lock(&adev->mc.invalidate_lock);
221
222 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
223 struct amdgpu_vmhub *hub = &adev->vmhub[i];
224 u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
225
226 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
227
228 /* Busy wait for ACK.*/
229 for (j = 0; j < 100; j++) {
230 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
231 tmp &= 1 << vmid;
232 if (tmp)
233 break;
234 cpu_relax();
235 }
236 if (j < 100)
237 continue;
238
239 /* Wait for ACK with a delay.*/
240 for (j = 0; j < adev->usec_timeout; j++) {
241 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
242 tmp &= 1 << vmid;
243 if (tmp)
244 break;
245 udelay(1);
246 }
247 if (j < adev->usec_timeout)
248 continue;
249
250 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
251 }
252
253 spin_unlock(&adev->mc.invalidate_lock);
254 }
255
256 /**
257 * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO
258 *
259 * @adev: amdgpu_device pointer
260 * @cpu_pt_addr: cpu address of the page table
261 * @gpu_page_idx: entry in the page table to update
262 * @addr: dst addr to write into pte/pde
263 * @flags: access flags
264 *
265 * Update the page tables using the CPU.
266 */
267 static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev,
268 void *cpu_pt_addr,
269 uint32_t gpu_page_idx,
270 uint64_t addr,
271 uint64_t flags)
272 {
273 void __iomem *ptr = (void *)cpu_pt_addr;
274 uint64_t value;
275
276 /*
277 * PTE format on VEGA 10:
278 * 63:59 reserved
279 * 58:57 mtype
280 * 56 F
281 * 55 L
282 * 54 P
283 * 53 SW
284 * 52 T
285 * 50:48 reserved
286 * 47:12 4k physical page base address
287 * 11:7 fragment
288 * 6 write
289 * 5 read
290 * 4 exe
291 * 3 Z
292 * 2 snooped
293 * 1 system
294 * 0 valid
295 *
296 * PDE format on VEGA 10:
297 * 63:59 block fragment size
298 * 58:55 reserved
299 * 54 P
300 * 53:48 reserved
301 * 47:6 physical base address of PD or PTE
302 * 5:3 reserved
303 * 2 C
304 * 1 system
305 * 0 valid
306 */
307
308 /*
309 * The following is for PTE only. GART does not have PDEs.
310 */
311 value = addr & 0x0000FFFFFFFFF000ULL;
312 value |= flags;
313 writeq(value, ptr + (gpu_page_idx * 8));
314 return 0;
315 }
316
317 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
318 uint32_t flags)
319
320 {
321 uint64_t pte_flag = 0;
322
323 if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
324 pte_flag |= AMDGPU_PTE_EXECUTABLE;
325 if (flags & AMDGPU_VM_PAGE_READABLE)
326 pte_flag |= AMDGPU_PTE_READABLE;
327 if (flags & AMDGPU_VM_PAGE_WRITEABLE)
328 pte_flag |= AMDGPU_PTE_WRITEABLE;
329
330 switch (flags & AMDGPU_VM_MTYPE_MASK) {
331 case AMDGPU_VM_MTYPE_DEFAULT:
332 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
333 break;
334 case AMDGPU_VM_MTYPE_NC:
335 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
336 break;
337 case AMDGPU_VM_MTYPE_WC:
338 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
339 break;
340 case AMDGPU_VM_MTYPE_CC:
341 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
342 break;
343 case AMDGPU_VM_MTYPE_UC:
344 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
345 break;
346 default:
347 pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
348 break;
349 }
350
351 if (flags & AMDGPU_VM_PAGE_PRT)
352 pte_flag |= AMDGPU_PTE_PRT;
353
354 return pte_flag;
355 }
356
357 static u64 gmc_v9_0_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
358 {
359 return adev->vm_manager.vram_base_offset + mc_addr - adev->mc.vram_start;
360 }
361
362 static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
363 .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
364 .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
365 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
366 .adjust_mc_addr = gmc_v9_0_adjust_mc_addr,
367 .get_invalidate_req = gmc_v9_0_get_invalidate_req,
368 };
369
370 static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
371 {
372 if (adev->gart.gart_funcs == NULL)
373 adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
374 }
375
376 static int gmc_v9_0_early_init(void *handle)
377 {
378 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
379
380 gmc_v9_0_set_gart_funcs(adev);
381 gmc_v9_0_set_irq_funcs(adev);
382
383 return 0;
384 }
385
386 static int gmc_v9_0_late_init(void *handle)
387 {
388 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
389 unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
390 unsigned i;
391
392 for(i = 0; i < adev->num_rings; ++i) {
393 struct amdgpu_ring *ring = adev->rings[i];
394 unsigned vmhub = ring->funcs->vmhub;
395
396 ring->vm_inv_eng = vm_inv_eng[vmhub]++;
397 dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
398 ring->idx, ring->name, ring->vm_inv_eng,
399 ring->funcs->vmhub);
400 }
401
402 /* Engine 17 is used for GART flushes */
403 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
404 BUG_ON(vm_inv_eng[i] > 17);
405
406 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
407 }
408
409 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
410 struct amdgpu_mc *mc)
411 {
412 u64 base = 0;
413 if (!amdgpu_sriov_vf(adev))
414 base = mmhub_v1_0_get_fb_location(adev);
415 amdgpu_vram_location(adev, &adev->mc, base);
416 adev->mc.gtt_base_align = 0;
417 amdgpu_gtt_location(adev, mc);
418 }
419
420 /**
421 * gmc_v9_0_mc_init - initialize the memory controller driver params
422 *
423 * @adev: amdgpu_device pointer
424 *
425 * Look up the amount of vram, vram width, and decide how to place
426 * vram and gart within the GPU's physical address space.
427 * Returns 0 for success.
428 */
429 static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
430 {
431 u32 tmp;
432 int chansize, numchan;
433
434 /* hbm memory channel size */
435 chansize = 128;
436
437 tmp = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_CS_AON0_DramBaseAddress0));
438 tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
439 tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
440 switch (tmp) {
441 case 0:
442 default:
443 numchan = 1;
444 break;
445 case 1:
446 numchan = 2;
447 break;
448 case 2:
449 numchan = 0;
450 break;
451 case 3:
452 numchan = 4;
453 break;
454 case 4:
455 numchan = 0;
456 break;
457 case 5:
458 numchan = 8;
459 break;
460 case 6:
461 numchan = 0;
462 break;
463 case 7:
464 numchan = 16;
465 break;
466 case 8:
467 numchan = 2;
468 break;
469 }
470 adev->mc.vram_width = numchan * chansize;
471
472 /* Could aper size report 0 ? */
473 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
474 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
475 /* size in MB on si */
476 adev->mc.mc_vram_size =
477 nbio_v6_1_get_memsize(adev) * 1024ULL * 1024ULL;
478 adev->mc.real_vram_size = adev->mc.mc_vram_size;
479 adev->mc.visible_vram_size = adev->mc.aper_size;
480
481 /* In case the PCI BAR is larger than the actual amount of vram */
482 if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
483 adev->mc.visible_vram_size = adev->mc.real_vram_size;
484
485 /* unless the user had overridden it, set the gart
486 * size equal to the 1024 or vram, whichever is larger.
487 */
488 if (amdgpu_gart_size == -1)
489 adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
490 adev->mc.mc_vram_size);
491 else
492 adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
493
494 gmc_v9_0_vram_gtt_location(adev, &adev->mc);
495
496 return 0;
497 }
498
499 static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
500 {
501 int r;
502
503 if (adev->gart.robj) {
504 WARN(1, "VEGA10 PCIE GART already initialized\n");
505 return 0;
506 }
507 /* Initialize common gart structure */
508 r = amdgpu_gart_init(adev);
509 if (r)
510 return r;
511 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
512 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
513 AMDGPU_PTE_EXECUTABLE;
514 return amdgpu_gart_table_vram_alloc(adev);
515 }
516
517 /*
518 * vm
519 * VMID 0 is the physical GPU addresses as used by the kernel.
520 * VMIDs 1-15 are used for userspace clients and are handled
521 * by the amdgpu vm/hsa code.
522 */
523 /**
524 * gmc_v9_0_vm_init - vm init callback
525 *
526 * @adev: amdgpu_device pointer
527 *
528 * Inits vega10 specific vm parameters (number of VMs, base of vram for
529 * VMIDs 1-15) (vega10).
530 * Returns 0 for success.
531 */
532 static int gmc_v9_0_vm_init(struct amdgpu_device *adev)
533 {
534 /*
535 * number of VMs
536 * VMID 0 is reserved for System
537 * amdgpu graphics/compute will use VMIDs 1-7
538 * amdkfd will use VMIDs 8-15
539 */
540 adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
541 adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
542
543 /* TODO: fix num_level for APU when updating vm size and block size */
544 if (adev->flags & AMD_IS_APU)
545 adev->vm_manager.num_level = 1;
546 else
547 adev->vm_manager.num_level = 3;
548 amdgpu_vm_manager_init(adev);
549
550 /* base offset of vram pages */
551 /*XXX This value is not zero for APU*/
552 adev->vm_manager.vram_base_offset = 0;
553
554 return 0;
555 }
556
557 /**
558 * gmc_v9_0_vm_fini - vm fini callback
559 *
560 * @adev: amdgpu_device pointer
561 *
562 * Tear down any asic specific VM setup.
563 */
564 static void gmc_v9_0_vm_fini(struct amdgpu_device *adev)
565 {
566 return;
567 }
568
569 static int gmc_v9_0_sw_init(void *handle)
570 {
571 int r;
572 int dma_bits;
573 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
574
575 spin_lock_init(&adev->mc.invalidate_lock);
576
577 if (adev->flags & AMD_IS_APU) {
578 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
579 amdgpu_vm_adjust_size(adev, 64);
580 } else {
581 /* XXX Don't know how to get VRAM type yet. */
582 adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
583 /*
584 * To fulfill 4-level page support,
585 * vm size is 256TB (48bit), maximum size of Vega10,
586 * block size 512 (9bit)
587 */
588 adev->vm_manager.vm_size = 1U << 18;
589 adev->vm_manager.block_size = 9;
590 DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
591 adev->vm_manager.vm_size,
592 adev->vm_manager.block_size);
593 }
594
595 /* This interrupt is VMC page fault.*/
596 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
597 &adev->mc.vm_fault);
598 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
599 &adev->mc.vm_fault);
600
601 if (r)
602 return r;
603
604 adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
605
606 /* Set the internal MC address mask
607 * This is the max address of the GPU's
608 * internal address space.
609 */
610 adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
611
612 /* set DMA mask + need_dma32 flags.
613 * PCIE - can handle 44-bits.
614 * IGP - can handle 44-bits
615 * PCI - dma32 for legacy pci gart, 44 bits on vega10
616 */
617 adev->need_dma32 = false;
618 dma_bits = adev->need_dma32 ? 32 : 44;
619 r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
620 if (r) {
621 adev->need_dma32 = true;
622 dma_bits = 32;
623 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
624 }
625 r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
626 if (r) {
627 pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
628 printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
629 }
630
631 r = gmc_v9_0_mc_init(adev);
632 if (r)
633 return r;
634
635 /* Memory manager */
636 r = amdgpu_bo_init(adev);
637 if (r)
638 return r;
639
640 r = gmc_v9_0_gart_init(adev);
641 if (r)
642 return r;
643
644 if (!adev->vm_manager.enabled) {
645 r = gmc_v9_0_vm_init(adev);
646 if (r) {
647 dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
648 return r;
649 }
650 adev->vm_manager.enabled = true;
651 }
652 return r;
653 }
654
655 /**
656 * gmc_v8_0_gart_fini - vm fini callback
657 *
658 * @adev: amdgpu_device pointer
659 *
660 * Tears down the driver GART/VM setup (CIK).
661 */
662 static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
663 {
664 amdgpu_gart_table_vram_free(adev);
665 amdgpu_gart_fini(adev);
666 }
667
668 static int gmc_v9_0_sw_fini(void *handle)
669 {
670 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
671
672 if (adev->vm_manager.enabled) {
673 amdgpu_vm_manager_fini(adev);
674 gmc_v9_0_vm_fini(adev);
675 adev->vm_manager.enabled = false;
676 }
677 gmc_v9_0_gart_fini(adev);
678 amdgpu_gem_force_release(adev);
679 amdgpu_bo_fini(adev);
680
681 return 0;
682 }
683
684 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
685 {
686 switch (adev->asic_type) {
687 case CHIP_VEGA10:
688 break;
689 default:
690 break;
691 }
692 }
693
694 /**
695 * gmc_v9_0_gart_enable - gart enable
696 *
697 * @adev: amdgpu_device pointer
698 */
699 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
700 {
701 int r;
702 bool value;
703 u32 tmp;
704
705 amdgpu_program_register_sequence(adev,
706 golden_settings_vega10_hdp,
707 (const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
708
709 if (adev->gart.robj == NULL) {
710 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
711 return -EINVAL;
712 }
713 r = amdgpu_gart_table_vram_pin(adev);
714 if (r)
715 return r;
716
717 /* After HDP is initialized, flush HDP.*/
718 nbio_v6_1_hdp_flush(adev);
719
720 r = gfxhub_v1_0_gart_enable(adev);
721 if (r)
722 return r;
723
724 r = mmhub_v1_0_gart_enable(adev);
725 if (r)
726 return r;
727
728 tmp = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MISC_CNTL));
729 tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
730 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MISC_CNTL), tmp);
731
732 tmp = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_HOST_PATH_CNTL));
733 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_HOST_PATH_CNTL), tmp);
734
735
736 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
737 value = false;
738 else
739 value = true;
740
741 gfxhub_v1_0_set_fault_enable_default(adev, value);
742 mmhub_v1_0_set_fault_enable_default(adev, value);
743
744 gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
745
746 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
747 (unsigned)(adev->mc.gtt_size >> 20),
748 (unsigned long long)adev->gart.table_addr);
749 adev->gart.ready = true;
750 return 0;
751 }
752
753 static int gmc_v9_0_hw_init(void *handle)
754 {
755 int r;
756 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
757
758 /* The sequence of these two function calls matters.*/
759 gmc_v9_0_init_golden_registers(adev);
760
761 r = gmc_v9_0_gart_enable(adev);
762
763 return r;
764 }
765
766 /**
767 * gmc_v9_0_gart_disable - gart disable
768 *
769 * @adev: amdgpu_device pointer
770 *
771 * This disables all VM page table.
772 */
773 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
774 {
775 gfxhub_v1_0_gart_disable(adev);
776 mmhub_v1_0_gart_disable(adev);
777 amdgpu_gart_table_vram_unpin(adev);
778 }
779
780 static int gmc_v9_0_hw_fini(void *handle)
781 {
782 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
783
784 amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
785 gmc_v9_0_gart_disable(adev);
786
787 return 0;
788 }
789
790 static int gmc_v9_0_suspend(void *handle)
791 {
792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
793
794 gmc_v9_0_hw_fini(adev);
795
796 return 0;
797 }
798
799 static int gmc_v9_0_resume(void *handle)
800 {
801 int r;
802 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
803
804 r = gmc_v9_0_hw_init(adev);
805 if (r)
806 return r;
807
808 amdgpu_vm_reset_all_ids(adev);
809
810 return 0;
811 }
812
813 static bool gmc_v9_0_is_idle(void *handle)
814 {
815 /* MC is always ready in GMC v9.*/
816 return true;
817 }
818
819 static int gmc_v9_0_wait_for_idle(void *handle)
820 {
821 /* There is no need to wait for MC idle in GMC v9.*/
822 return 0;
823 }
824
825 static int gmc_v9_0_soft_reset(void *handle)
826 {
827 /* XXX for emulation.*/
828 return 0;
829 }
830
831 static int gmc_v9_0_set_clockgating_state(void *handle,
832 enum amd_clockgating_state state)
833 {
834 return 0;
835 }
836
837 static int gmc_v9_0_set_powergating_state(void *handle,
838 enum amd_powergating_state state)
839 {
840 return 0;
841 }
842
843 const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
844 .name = "gmc_v9_0",
845 .early_init = gmc_v9_0_early_init,
846 .late_init = gmc_v9_0_late_init,
847 .sw_init = gmc_v9_0_sw_init,
848 .sw_fini = gmc_v9_0_sw_fini,
849 .hw_init = gmc_v9_0_hw_init,
850 .hw_fini = gmc_v9_0_hw_fini,
851 .suspend = gmc_v9_0_suspend,
852 .resume = gmc_v9_0_resume,
853 .is_idle = gmc_v9_0_is_idle,
854 .wait_for_idle = gmc_v9_0_wait_for_idle,
855 .soft_reset = gmc_v9_0_soft_reset,
856 .set_clockgating_state = gmc_v9_0_set_clockgating_state,
857 .set_powergating_state = gmc_v9_0_set_powergating_state,
858 };
859
860 const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
861 {
862 .type = AMD_IP_BLOCK_TYPE_GMC,
863 .major = 9,
864 .minor = 0,
865 .rev = 0,
866 .funcs = &gmc_v9_0_ip_funcs,
867 };