2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
27 #include "amdgpu_gfx.h"
28 #include "amdgpu_dma_buf.h"
29 #include <linux/module.h>
30 #include <linux/dma-buf.h>
31 #include "amdgpu_xgmi.h"
33 static const unsigned int compute_vmid_bitmap
= 0xFF00;
35 /* Total memory size in system memory and all GPU VRAM. Used to
36 * estimate worst case amount of memory to reserve for page tables
38 uint64_t amdgpu_amdkfd_total_mem_size
;
40 int amdgpu_amdkfd_init(void)
46 amdgpu_amdkfd_total_mem_size
= si
.totalram
- si
.totalhigh
;
47 amdgpu_amdkfd_total_mem_size
*= si
.mem_unit
;
51 amdgpu_amdkfd_gpuvm_init_mem_limits();
59 void amdgpu_amdkfd_fini(void)
64 void amdgpu_amdkfd_device_probe(struct amdgpu_device
*adev
)
66 const struct kfd2kgd_calls
*kfd2kgd
;
68 switch (adev
->asic_type
) {
69 #ifdef CONFIG_DRM_AMDGPU_CIK
72 kfd2kgd
= amdgpu_amdkfd_gfx_7_get_functions();
82 kfd2kgd
= amdgpu_amdkfd_gfx_8_0_get_functions();
88 kfd2kgd
= amdgpu_amdkfd_gfx_9_0_get_functions();
91 kfd2kgd
= amdgpu_amdkfd_arcturus_get_functions();
96 kfd2kgd
= amdgpu_amdkfd_gfx_10_0_get_functions();
99 dev_info(adev
->dev
, "kfd not supported on this ASIC\n");
103 adev
->kfd
.dev
= kgd2kfd_probe((struct kgd_dev
*)adev
,
104 adev
->pdev
, kfd2kgd
);
107 amdgpu_amdkfd_total_mem_size
+= adev
->gmc
.real_vram_size
;
111 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
114 * @adev: amdgpu_device pointer
115 * @aperture_base: output returning doorbell aperture base physical address
116 * @aperture_size: output returning doorbell aperture size in bytes
117 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
119 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
120 * takes doorbells required for its own rings and reports the setup to amdkfd.
121 * amdgpu reserved doorbells are at the start of the doorbell aperture.
123 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device
*adev
,
124 phys_addr_t
*aperture_base
,
125 size_t *aperture_size
,
126 size_t *start_offset
)
129 * The first num_doorbells are used by amdgpu.
130 * amdkfd takes whatever's left in the aperture.
132 if (adev
->doorbell
.size
> adev
->doorbell
.num_doorbells
* sizeof(u32
)) {
133 *aperture_base
= adev
->doorbell
.base
;
134 *aperture_size
= adev
->doorbell
.size
;
135 *start_offset
= adev
->doorbell
.num_doorbells
* sizeof(u32
);
143 void amdgpu_amdkfd_device_init(struct amdgpu_device
*adev
)
149 struct kgd2kfd_shared_resources gpu_resources
= {
150 .compute_vmid_bitmap
= compute_vmid_bitmap
,
151 .num_pipe_per_mec
= adev
->gfx
.mec
.num_pipe_per_mec
,
152 .num_queue_per_pipe
= adev
->gfx
.mec
.num_queue_per_pipe
,
153 .gpuvm_size
= min(adev
->vm_manager
.max_pfn
154 << AMDGPU_GPU_PAGE_SHIFT
,
155 AMDGPU_GMC_HOLE_START
),
156 .drm_render_minor
= adev
->ddev
->render
->index
,
157 .sdma_doorbell_idx
= adev
->doorbell_index
.sdma_engine
,
161 /* this is going to have a few of the MSBs set that we need to
164 bitmap_complement(gpu_resources
.queue_bitmap
,
165 adev
->gfx
.mec
.queue_bitmap
,
168 /* remove the KIQ bit as well */
169 if (adev
->gfx
.kiq
.ring
.sched
.ready
)
170 clear_bit(amdgpu_gfx_mec_queue_to_bit(adev
,
171 adev
->gfx
.kiq
.ring
.me
- 1,
172 adev
->gfx
.kiq
.ring
.pipe
,
173 adev
->gfx
.kiq
.ring
.queue
),
174 gpu_resources
.queue_bitmap
);
176 /* According to linux/bitmap.h we shouldn't use bitmap_clear if
177 * nbits is not compile time constant
179 last_valid_bit
= 1 /* only first MEC can have compute queues */
180 * adev
->gfx
.mec
.num_pipe_per_mec
181 * adev
->gfx
.mec
.num_queue_per_pipe
;
182 for (i
= last_valid_bit
; i
< KGD_MAX_QUEUES
; ++i
)
183 clear_bit(i
, gpu_resources
.queue_bitmap
);
185 amdgpu_doorbell_get_kfd_info(adev
,
186 &gpu_resources
.doorbell_physical_address
,
187 &gpu_resources
.doorbell_aperture_size
,
188 &gpu_resources
.doorbell_start_offset
);
190 /* Since SOC15, BIF starts to statically use the
191 * lower 12 bits of doorbell addresses for routing
192 * based on settings in registers like
193 * SDMA0_DOORBELL_RANGE etc..
194 * In order to route a doorbell to CP engine, the lower
195 * 12 bits of its address has to be outside the range
196 * set for SDMA, VCN, and IH blocks.
198 if (adev
->asic_type
>= CHIP_VEGA10
) {
199 gpu_resources
.non_cp_doorbells_start
=
200 adev
->doorbell_index
.first_non_cp
;
201 gpu_resources
.non_cp_doorbells_end
=
202 adev
->doorbell_index
.last_non_cp
;
205 kgd2kfd_device_init(adev
->kfd
.dev
, &gpu_resources
);
209 void amdgpu_amdkfd_device_fini(struct amdgpu_device
*adev
)
212 kgd2kfd_device_exit(adev
->kfd
.dev
);
213 adev
->kfd
.dev
= NULL
;
217 void amdgpu_amdkfd_interrupt(struct amdgpu_device
*adev
,
218 const void *ih_ring_entry
)
221 kgd2kfd_interrupt(adev
->kfd
.dev
, ih_ring_entry
);
224 void amdgpu_amdkfd_suspend(struct amdgpu_device
*adev
)
227 kgd2kfd_suspend(adev
->kfd
.dev
);
230 int amdgpu_amdkfd_resume(struct amdgpu_device
*adev
)
235 r
= kgd2kfd_resume(adev
->kfd
.dev
);
240 int amdgpu_amdkfd_pre_reset(struct amdgpu_device
*adev
)
245 r
= kgd2kfd_pre_reset(adev
->kfd
.dev
);
250 int amdgpu_amdkfd_post_reset(struct amdgpu_device
*adev
)
255 r
= kgd2kfd_post_reset(adev
->kfd
.dev
);
260 void amdgpu_amdkfd_gpu_reset(struct kgd_dev
*kgd
)
262 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
264 if (amdgpu_device_should_recover_gpu(adev
))
265 amdgpu_device_gpu_recover(adev
, NULL
);
268 int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev
*kgd
, size_t size
,
269 void **mem_obj
, uint64_t *gpu_addr
,
270 void **cpu_ptr
, bool mqd_gfx9
)
272 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
273 struct amdgpu_bo
*bo
= NULL
;
274 struct amdgpu_bo_param bp
;
276 void *cpu_ptr_tmp
= NULL
;
278 memset(&bp
, 0, sizeof(bp
));
280 bp
.byte_align
= PAGE_SIZE
;
281 bp
.domain
= AMDGPU_GEM_DOMAIN_GTT
;
282 bp
.flags
= AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
283 bp
.type
= ttm_bo_type_kernel
;
287 bp
.flags
|= AMDGPU_GEM_CREATE_MQD_GFX9
;
289 r
= amdgpu_bo_create(adev
, &bp
, &bo
);
292 "failed to allocate BO for amdkfd (%d)\n", r
);
297 r
= amdgpu_bo_reserve(bo
, true);
299 dev_err(adev
->dev
, "(%d) failed to reserve bo for amdkfd\n", r
);
300 goto allocate_mem_reserve_bo_failed
;
303 r
= amdgpu_bo_pin(bo
, AMDGPU_GEM_DOMAIN_GTT
);
305 dev_err(adev
->dev
, "(%d) failed to pin bo for amdkfd\n", r
);
306 goto allocate_mem_pin_bo_failed
;
309 r
= amdgpu_ttm_alloc_gart(&bo
->tbo
);
311 dev_err(adev
->dev
, "%p bind failed\n", bo
);
312 goto allocate_mem_kmap_bo_failed
;
315 r
= amdgpu_bo_kmap(bo
, &cpu_ptr_tmp
);
318 "(%d) failed to map bo to kernel for amdkfd\n", r
);
319 goto allocate_mem_kmap_bo_failed
;
323 *gpu_addr
= amdgpu_bo_gpu_offset(bo
);
324 *cpu_ptr
= cpu_ptr_tmp
;
326 amdgpu_bo_unreserve(bo
);
330 allocate_mem_kmap_bo_failed
:
332 allocate_mem_pin_bo_failed
:
333 amdgpu_bo_unreserve(bo
);
334 allocate_mem_reserve_bo_failed
:
335 amdgpu_bo_unref(&bo
);
340 void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev
*kgd
, void *mem_obj
)
342 struct amdgpu_bo
*bo
= (struct amdgpu_bo
*) mem_obj
;
344 amdgpu_bo_reserve(bo
, true);
345 amdgpu_bo_kunmap(bo
);
347 amdgpu_bo_unreserve(bo
);
348 amdgpu_bo_unref(&(bo
));
351 int amdgpu_amdkfd_alloc_gws(struct kgd_dev
*kgd
, size_t size
,
354 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
355 struct amdgpu_bo
*bo
= NULL
;
356 struct amdgpu_bo_param bp
;
359 memset(&bp
, 0, sizeof(bp
));
362 bp
.domain
= AMDGPU_GEM_DOMAIN_GWS
;
363 bp
.flags
= AMDGPU_GEM_CREATE_NO_CPU_ACCESS
;
364 bp
.type
= ttm_bo_type_device
;
367 r
= amdgpu_bo_create(adev
, &bp
, &bo
);
370 "failed to allocate gws BO for amdkfd (%d)\n", r
);
378 void amdgpu_amdkfd_free_gws(struct kgd_dev
*kgd
, void *mem_obj
)
380 struct amdgpu_bo
*bo
= (struct amdgpu_bo
*)mem_obj
;
382 amdgpu_bo_unref(&bo
);
385 uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev
*kgd
,
386 enum kgd_engine_type type
)
388 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
392 return adev
->gfx
.pfp_fw_version
;
395 return adev
->gfx
.me_fw_version
;
398 return adev
->gfx
.ce_fw_version
;
400 case KGD_ENGINE_MEC1
:
401 return adev
->gfx
.mec_fw_version
;
403 case KGD_ENGINE_MEC2
:
404 return adev
->gfx
.mec2_fw_version
;
407 return adev
->gfx
.rlc_fw_version
;
409 case KGD_ENGINE_SDMA1
:
410 return adev
->sdma
.instance
[0].fw_version
;
412 case KGD_ENGINE_SDMA2
:
413 return adev
->sdma
.instance
[1].fw_version
;
422 void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev
*kgd
,
423 struct kfd_local_mem_info
*mem_info
)
425 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
426 uint64_t address_mask
= adev
->dev
->dma_mask
? ~*adev
->dev
->dma_mask
:
428 resource_size_t aper_limit
= adev
->gmc
.aper_base
+ adev
->gmc
.aper_size
;
430 memset(mem_info
, 0, sizeof(*mem_info
));
431 if (!(adev
->gmc
.aper_base
& address_mask
|| aper_limit
& address_mask
)) {
432 mem_info
->local_mem_size_public
= adev
->gmc
.visible_vram_size
;
433 mem_info
->local_mem_size_private
= adev
->gmc
.real_vram_size
-
434 adev
->gmc
.visible_vram_size
;
436 mem_info
->local_mem_size_public
= 0;
437 mem_info
->local_mem_size_private
= adev
->gmc
.real_vram_size
;
439 mem_info
->vram_width
= adev
->gmc
.vram_width
;
441 pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
442 &adev
->gmc
.aper_base
, &aper_limit
,
443 mem_info
->local_mem_size_public
,
444 mem_info
->local_mem_size_private
);
446 if (amdgpu_sriov_vf(adev
))
447 mem_info
->mem_clk_max
= adev
->clock
.default_mclk
/ 100;
448 else if (adev
->powerplay
.pp_funcs
) {
449 if (amdgpu_emu_mode
== 1)
450 mem_info
->mem_clk_max
= 0;
452 mem_info
->mem_clk_max
= amdgpu_dpm_get_mclk(adev
, false) / 100;
454 mem_info
->mem_clk_max
= 100;
457 uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev
*kgd
)
459 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
461 if (adev
->gfx
.funcs
->get_gpu_clock_counter
)
462 return adev
->gfx
.funcs
->get_gpu_clock_counter(adev
);
466 uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev
*kgd
)
468 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
470 /* the sclk is in quantas of 10kHz */
471 if (amdgpu_sriov_vf(adev
))
472 return adev
->clock
.default_sclk
/ 100;
473 else if (adev
->powerplay
.pp_funcs
)
474 return amdgpu_dpm_get_sclk(adev
, false) / 100;
479 void amdgpu_amdkfd_get_cu_info(struct kgd_dev
*kgd
, struct kfd_cu_info
*cu_info
)
481 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
482 struct amdgpu_cu_info acu_info
= adev
->gfx
.cu_info
;
484 memset(cu_info
, 0, sizeof(*cu_info
));
485 if (sizeof(cu_info
->cu_bitmap
) != sizeof(acu_info
.bitmap
))
488 cu_info
->cu_active_number
= acu_info
.number
;
489 cu_info
->cu_ao_mask
= acu_info
.ao_cu_mask
;
490 memcpy(&cu_info
->cu_bitmap
[0], &acu_info
.bitmap
[0],
491 sizeof(acu_info
.bitmap
));
492 cu_info
->num_shader_engines
= adev
->gfx
.config
.max_shader_engines
;
493 cu_info
->num_shader_arrays_per_engine
= adev
->gfx
.config
.max_sh_per_se
;
494 cu_info
->num_cu_per_sh
= adev
->gfx
.config
.max_cu_per_sh
;
495 cu_info
->simd_per_cu
= acu_info
.simd_per_cu
;
496 cu_info
->max_waves_per_simd
= acu_info
.max_waves_per_simd
;
497 cu_info
->wave_front_size
= acu_info
.wave_front_size
;
498 cu_info
->max_scratch_slots_per_cu
= acu_info
.max_scratch_slots_per_cu
;
499 cu_info
->lds_size
= acu_info
.lds_size
;
502 int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev
*kgd
, int dma_buf_fd
,
503 struct kgd_dev
**dma_buf_kgd
,
504 uint64_t *bo_size
, void *metadata_buffer
,
505 size_t buffer_size
, uint32_t *metadata_size
,
508 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
509 struct dma_buf
*dma_buf
;
510 struct drm_gem_object
*obj
;
511 struct amdgpu_bo
*bo
;
512 uint64_t metadata_flags
;
515 dma_buf
= dma_buf_get(dma_buf_fd
);
517 return PTR_ERR(dma_buf
);
519 if (dma_buf
->ops
!= &amdgpu_dmabuf_ops
)
520 /* Can't handle non-graphics buffers */
524 if (obj
->dev
->driver
!= adev
->ddev
->driver
)
525 /* Can't handle buffers from different drivers */
528 adev
= obj
->dev
->dev_private
;
529 bo
= gem_to_amdgpu_bo(obj
);
530 if (!(bo
->preferred_domains
& (AMDGPU_GEM_DOMAIN_VRAM
|
531 AMDGPU_GEM_DOMAIN_GTT
)))
532 /* Only VRAM and GTT BOs are supported */
537 *dma_buf_kgd
= (struct kgd_dev
*)adev
;
539 *bo_size
= amdgpu_bo_size(bo
);
541 *metadata_size
= bo
->metadata_size
;
543 r
= amdgpu_bo_get_metadata(bo
, metadata_buffer
, buffer_size
,
544 metadata_size
, &metadata_flags
);
546 *flags
= (bo
->preferred_domains
& AMDGPU_GEM_DOMAIN_VRAM
) ?
547 ALLOC_MEM_FLAGS_VRAM
: ALLOC_MEM_FLAGS_GTT
;
549 if (bo
->flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
)
550 *flags
|= ALLOC_MEM_FLAGS_PUBLIC
;
554 dma_buf_put(dma_buf
);
558 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev
*kgd
)
560 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
562 return amdgpu_vram_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
565 uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev
*kgd
)
567 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
569 return adev
->gmc
.xgmi
.hive_id
;
571 uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev
*dst
, struct kgd_dev
*src
)
573 struct amdgpu_device
*peer_adev
= (struct amdgpu_device
*)src
;
574 struct amdgpu_device
*adev
= (struct amdgpu_device
*)dst
;
575 int ret
= amdgpu_xgmi_get_hops_count(adev
, peer_adev
);
578 DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n",
579 adev
->gmc
.xgmi
.physical_node_id
,
580 peer_adev
->gmc
.xgmi
.physical_node_id
, ret
);
586 uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev
*kgd
)
588 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
590 return adev
->rmmio_remap
.bus_addr
;
593 uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev
*kgd
)
595 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
597 return adev
->gds
.gws_size
;
600 int amdgpu_amdkfd_submit_ib(struct kgd_dev
*kgd
, enum kgd_engine_type engine
,
601 uint32_t vmid
, uint64_t gpu_addr
,
602 uint32_t *ib_cmd
, uint32_t ib_len
)
604 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
605 struct amdgpu_job
*job
;
606 struct amdgpu_ib
*ib
;
607 struct amdgpu_ring
*ring
;
608 struct dma_fence
*f
= NULL
;
612 case KGD_ENGINE_MEC1
:
613 ring
= &adev
->gfx
.compute_ring
[0];
615 case KGD_ENGINE_SDMA1
:
616 ring
= &adev
->sdma
.instance
[0].ring
;
618 case KGD_ENGINE_SDMA2
:
619 ring
= &adev
->sdma
.instance
[1].ring
;
622 pr_err("Invalid engine in IB submission: %d\n", engine
);
627 ret
= amdgpu_job_alloc(adev
, 1, &job
, NULL
);
632 memset(ib
, 0, sizeof(struct amdgpu_ib
));
634 ib
->gpu_addr
= gpu_addr
;
636 ib
->length_dw
= ib_len
;
637 /* This works for NO_HWS. TODO: need to handle without knowing VMID */
640 ret
= amdgpu_ib_schedule(ring
, 1, ib
, job
, &f
);
642 DRM_ERROR("amdgpu: failed to schedule IB.\n");
646 ret
= dma_fence_wait(f
, false);
650 amdgpu_job_free(job
);
655 void amdgpu_amdkfd_set_compute_idle(struct kgd_dev
*kgd
, bool idle
)
657 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
659 if (is_support_sw_smu(adev
))
660 smu_switch_power_profile(&adev
->smu
,
661 PP_SMC_POWER_PROFILE_COMPUTE
,
663 else if (adev
->powerplay
.pp_funcs
&&
664 adev
->powerplay
.pp_funcs
->switch_power_profile
)
665 amdgpu_dpm_switch_power_profile(adev
,
666 PP_SMC_POWER_PROFILE_COMPUTE
,
670 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device
*adev
, u32 vmid
)
673 if ((1 << vmid
) & compute_vmid_bitmap
)
680 bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev
*kgd
)
682 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
684 return adev
->have_atomics_support
;
687 #ifndef CONFIG_HSA_AMD
688 bool amdkfd_fence_check_mm(struct dma_fence
*f
, struct mm_struct
*mm
)
693 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo
*bo
)
697 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device
*adev
,
698 struct amdgpu_vm
*vm
)
702 struct amdgpu_amdkfd_fence
*to_amdgpu_amdkfd_fence(struct dma_fence
*f
)
707 int amdgpu_amdkfd_evict_userptr(struct kgd_mem
*mem
, struct mm_struct
*mm
)
712 struct kfd2kgd_calls
*amdgpu_amdkfd_gfx_7_get_functions(void)
717 struct kfd2kgd_calls
*amdgpu_amdkfd_gfx_8_0_get_functions(void)
722 struct kfd2kgd_calls
*amdgpu_amdkfd_gfx_9_0_get_functions(void)
727 struct kfd2kgd_calls
*amdgpu_amdkfd_arcturus_get_functions(void)
732 struct kfd2kgd_calls
*amdgpu_amdkfd_gfx_10_0_get_functions(void)
737 struct kfd_dev
*kgd2kfd_probe(struct kgd_dev
*kgd
, struct pci_dev
*pdev
,
738 const struct kfd2kgd_calls
*f2g
)
743 bool kgd2kfd_device_init(struct kfd_dev
*kfd
,
744 const struct kgd2kfd_shared_resources
*gpu_resources
)
749 void kgd2kfd_device_exit(struct kfd_dev
*kfd
)
753 void kgd2kfd_exit(void)
757 void kgd2kfd_suspend(struct kfd_dev
*kfd
)
761 int kgd2kfd_resume(struct kfd_dev
*kfd
)
766 int kgd2kfd_pre_reset(struct kfd_dev
*kfd
)
771 int kgd2kfd_post_reset(struct kfd_dev
*kfd
)
776 void kgd2kfd_interrupt(struct kfd_dev
*kfd
, const void *ih_ring_entry
)
780 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev
*kfd
)