2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
27 #include "amdgpu_gfx.h"
28 #include "amdgpu_dma_buf.h"
29 #include <linux/module.h>
30 #include <linux/dma-buf.h>
31 #include "amdgpu_xgmi.h"
32 #include <uapi/linux/kfd_ioctl.h>
34 /* Total memory size in system memory and all GPU VRAM. Used to
35 * estimate worst case amount of memory to reserve for page tables
37 uint64_t amdgpu_amdkfd_total_mem_size
;
39 int amdgpu_amdkfd_init(void)
45 amdgpu_amdkfd_total_mem_size
= si
.totalram
- si
.totalhigh
;
46 amdgpu_amdkfd_total_mem_size
*= si
.mem_unit
;
50 amdgpu_amdkfd_gpuvm_init_mem_limits();
58 void amdgpu_amdkfd_fini(void)
63 void amdgpu_amdkfd_device_probe(struct amdgpu_device
*adev
)
65 bool vf
= amdgpu_sriov_vf(adev
);
67 adev
->kfd
.dev
= kgd2kfd_probe((struct kgd_dev
*)adev
,
68 adev
->pdev
, adev
->asic_type
, vf
);
71 amdgpu_amdkfd_total_mem_size
+= adev
->gmc
.real_vram_size
;
75 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
78 * @adev: amdgpu_device pointer
79 * @aperture_base: output returning doorbell aperture base physical address
80 * @aperture_size: output returning doorbell aperture size in bytes
81 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
83 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
84 * takes doorbells required for its own rings and reports the setup to amdkfd.
85 * amdgpu reserved doorbells are at the start of the doorbell aperture.
87 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device
*adev
,
88 phys_addr_t
*aperture_base
,
89 size_t *aperture_size
,
93 * The first num_doorbells are used by amdgpu.
94 * amdkfd takes whatever's left in the aperture.
96 if (adev
->doorbell
.size
> adev
->doorbell
.num_doorbells
* sizeof(u32
)) {
97 *aperture_base
= adev
->doorbell
.base
;
98 *aperture_size
= adev
->doorbell
.size
;
99 *start_offset
= adev
->doorbell
.num_doorbells
* sizeof(u32
);
107 void amdgpu_amdkfd_device_init(struct amdgpu_device
*adev
)
113 struct kgd2kfd_shared_resources gpu_resources
= {
114 .compute_vmid_bitmap
=
115 ((1 << AMDGPU_NUM_VMID
) - 1) -
116 ((1 << adev
->vm_manager
.first_kfd_vmid
) - 1),
117 .num_pipe_per_mec
= adev
->gfx
.mec
.num_pipe_per_mec
,
118 .num_queue_per_pipe
= adev
->gfx
.mec
.num_queue_per_pipe
,
119 .gpuvm_size
= min(adev
->vm_manager
.max_pfn
120 << AMDGPU_GPU_PAGE_SHIFT
,
121 AMDGPU_GMC_HOLE_START
),
122 .drm_render_minor
= adev
->ddev
->render
->index
,
123 .sdma_doorbell_idx
= adev
->doorbell_index
.sdma_engine
,
127 /* this is going to have a few of the MSBs set that we need to
130 bitmap_complement(gpu_resources
.cp_queue_bitmap
,
131 adev
->gfx
.mec
.queue_bitmap
,
134 /* According to linux/bitmap.h we shouldn't use bitmap_clear if
135 * nbits is not compile time constant
137 last_valid_bit
= 1 /* only first MEC can have compute queues */
138 * adev
->gfx
.mec
.num_pipe_per_mec
139 * adev
->gfx
.mec
.num_queue_per_pipe
;
140 for (i
= last_valid_bit
; i
< KGD_MAX_QUEUES
; ++i
)
141 clear_bit(i
, gpu_resources
.cp_queue_bitmap
);
143 amdgpu_doorbell_get_kfd_info(adev
,
144 &gpu_resources
.doorbell_physical_address
,
145 &gpu_resources
.doorbell_aperture_size
,
146 &gpu_resources
.doorbell_start_offset
);
148 /* Since SOC15, BIF starts to statically use the
149 * lower 12 bits of doorbell addresses for routing
150 * based on settings in registers like
151 * SDMA0_DOORBELL_RANGE etc..
152 * In order to route a doorbell to CP engine, the lower
153 * 12 bits of its address has to be outside the range
154 * set for SDMA, VCN, and IH blocks.
156 if (adev
->asic_type
>= CHIP_VEGA10
) {
157 gpu_resources
.non_cp_doorbells_start
=
158 adev
->doorbell_index
.first_non_cp
;
159 gpu_resources
.non_cp_doorbells_end
=
160 adev
->doorbell_index
.last_non_cp
;
163 kgd2kfd_device_init(adev
->kfd
.dev
, adev
->ddev
, &gpu_resources
);
167 void amdgpu_amdkfd_device_fini(struct amdgpu_device
*adev
)
170 kgd2kfd_device_exit(adev
->kfd
.dev
);
171 adev
->kfd
.dev
= NULL
;
175 void amdgpu_amdkfd_interrupt(struct amdgpu_device
*adev
,
176 const void *ih_ring_entry
)
179 kgd2kfd_interrupt(adev
->kfd
.dev
, ih_ring_entry
);
182 void amdgpu_amdkfd_suspend(struct amdgpu_device
*adev
, bool run_pm
)
185 kgd2kfd_suspend(adev
->kfd
.dev
, run_pm
);
188 int amdgpu_amdkfd_resume(struct amdgpu_device
*adev
, bool run_pm
)
193 r
= kgd2kfd_resume(adev
->kfd
.dev
, run_pm
);
198 int amdgpu_amdkfd_pre_reset(struct amdgpu_device
*adev
)
203 r
= kgd2kfd_pre_reset(adev
->kfd
.dev
);
208 int amdgpu_amdkfd_post_reset(struct amdgpu_device
*adev
)
213 r
= kgd2kfd_post_reset(adev
->kfd
.dev
);
218 void amdgpu_amdkfd_gpu_reset(struct kgd_dev
*kgd
)
220 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
222 if (amdgpu_device_should_recover_gpu(adev
))
223 amdgpu_device_gpu_recover(adev
, NULL
);
226 int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev
*kgd
, size_t size
,
227 void **mem_obj
, uint64_t *gpu_addr
,
228 void **cpu_ptr
, bool cp_mqd_gfx9
)
230 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
231 struct amdgpu_bo
*bo
= NULL
;
232 struct amdgpu_bo_param bp
;
234 void *cpu_ptr_tmp
= NULL
;
236 memset(&bp
, 0, sizeof(bp
));
238 bp
.byte_align
= PAGE_SIZE
;
239 bp
.domain
= AMDGPU_GEM_DOMAIN_GTT
;
240 bp
.flags
= AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
241 bp
.type
= ttm_bo_type_kernel
;
245 bp
.flags
|= AMDGPU_GEM_CREATE_CP_MQD_GFX9
;
247 r
= amdgpu_bo_create(adev
, &bp
, &bo
);
250 "failed to allocate BO for amdkfd (%d)\n", r
);
255 r
= amdgpu_bo_reserve(bo
, true);
257 dev_err(adev
->dev
, "(%d) failed to reserve bo for amdkfd\n", r
);
258 goto allocate_mem_reserve_bo_failed
;
261 r
= amdgpu_bo_pin(bo
, AMDGPU_GEM_DOMAIN_GTT
);
263 dev_err(adev
->dev
, "(%d) failed to pin bo for amdkfd\n", r
);
264 goto allocate_mem_pin_bo_failed
;
267 r
= amdgpu_ttm_alloc_gart(&bo
->tbo
);
269 dev_err(adev
->dev
, "%p bind failed\n", bo
);
270 goto allocate_mem_kmap_bo_failed
;
273 r
= amdgpu_bo_kmap(bo
, &cpu_ptr_tmp
);
276 "(%d) failed to map bo to kernel for amdkfd\n", r
);
277 goto allocate_mem_kmap_bo_failed
;
281 *gpu_addr
= amdgpu_bo_gpu_offset(bo
);
282 *cpu_ptr
= cpu_ptr_tmp
;
284 amdgpu_bo_unreserve(bo
);
288 allocate_mem_kmap_bo_failed
:
290 allocate_mem_pin_bo_failed
:
291 amdgpu_bo_unreserve(bo
);
292 allocate_mem_reserve_bo_failed
:
293 amdgpu_bo_unref(&bo
);
298 void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev
*kgd
, void *mem_obj
)
300 struct amdgpu_bo
*bo
= (struct amdgpu_bo
*) mem_obj
;
302 amdgpu_bo_reserve(bo
, true);
303 amdgpu_bo_kunmap(bo
);
305 amdgpu_bo_unreserve(bo
);
306 amdgpu_bo_unref(&(bo
));
309 int amdgpu_amdkfd_alloc_gws(struct kgd_dev
*kgd
, size_t size
,
312 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
313 struct amdgpu_bo
*bo
= NULL
;
314 struct amdgpu_bo_param bp
;
317 memset(&bp
, 0, sizeof(bp
));
320 bp
.domain
= AMDGPU_GEM_DOMAIN_GWS
;
321 bp
.flags
= AMDGPU_GEM_CREATE_NO_CPU_ACCESS
;
322 bp
.type
= ttm_bo_type_device
;
325 r
= amdgpu_bo_create(adev
, &bp
, &bo
);
328 "failed to allocate gws BO for amdkfd (%d)\n", r
);
336 void amdgpu_amdkfd_free_gws(struct kgd_dev
*kgd
, void *mem_obj
)
338 struct amdgpu_bo
*bo
= (struct amdgpu_bo
*)mem_obj
;
340 amdgpu_bo_unref(&bo
);
343 uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev
*kgd
,
344 enum kgd_engine_type type
)
346 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
350 return adev
->gfx
.pfp_fw_version
;
353 return adev
->gfx
.me_fw_version
;
356 return adev
->gfx
.ce_fw_version
;
358 case KGD_ENGINE_MEC1
:
359 return adev
->gfx
.mec_fw_version
;
361 case KGD_ENGINE_MEC2
:
362 return adev
->gfx
.mec2_fw_version
;
365 return adev
->gfx
.rlc_fw_version
;
367 case KGD_ENGINE_SDMA1
:
368 return adev
->sdma
.instance
[0].fw_version
;
370 case KGD_ENGINE_SDMA2
:
371 return adev
->sdma
.instance
[1].fw_version
;
380 void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev
*kgd
,
381 struct kfd_local_mem_info
*mem_info
)
383 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
384 uint64_t address_mask
= adev
->dev
->dma_mask
? ~*adev
->dev
->dma_mask
:
386 resource_size_t aper_limit
= adev
->gmc
.aper_base
+ adev
->gmc
.aper_size
;
388 memset(mem_info
, 0, sizeof(*mem_info
));
389 if (!(adev
->gmc
.aper_base
& address_mask
|| aper_limit
& address_mask
)) {
390 mem_info
->local_mem_size_public
= adev
->gmc
.visible_vram_size
;
391 mem_info
->local_mem_size_private
= adev
->gmc
.real_vram_size
-
392 adev
->gmc
.visible_vram_size
;
394 mem_info
->local_mem_size_public
= 0;
395 mem_info
->local_mem_size_private
= adev
->gmc
.real_vram_size
;
397 mem_info
->vram_width
= adev
->gmc
.vram_width
;
399 pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
400 &adev
->gmc
.aper_base
, &aper_limit
,
401 mem_info
->local_mem_size_public
,
402 mem_info
->local_mem_size_private
);
404 if (amdgpu_sriov_vf(adev
))
405 mem_info
->mem_clk_max
= adev
->clock
.default_mclk
/ 100;
406 else if (adev
->pm
.dpm_enabled
) {
407 if (amdgpu_emu_mode
== 1)
408 mem_info
->mem_clk_max
= 0;
410 mem_info
->mem_clk_max
= amdgpu_dpm_get_mclk(adev
, false) / 100;
412 mem_info
->mem_clk_max
= 100;
415 uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev
*kgd
)
417 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
419 if (adev
->gfx
.funcs
->get_gpu_clock_counter
)
420 return adev
->gfx
.funcs
->get_gpu_clock_counter(adev
);
424 uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev
*kgd
)
426 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
428 /* the sclk is in quantas of 10kHz */
429 if (amdgpu_sriov_vf(adev
))
430 return adev
->clock
.default_sclk
/ 100;
431 else if (adev
->pm
.dpm_enabled
)
432 return amdgpu_dpm_get_sclk(adev
, false) / 100;
437 void amdgpu_amdkfd_get_cu_info(struct kgd_dev
*kgd
, struct kfd_cu_info
*cu_info
)
439 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
440 struct amdgpu_cu_info acu_info
= adev
->gfx
.cu_info
;
442 memset(cu_info
, 0, sizeof(*cu_info
));
443 if (sizeof(cu_info
->cu_bitmap
) != sizeof(acu_info
.bitmap
))
446 cu_info
->cu_active_number
= acu_info
.number
;
447 cu_info
->cu_ao_mask
= acu_info
.ao_cu_mask
;
448 memcpy(&cu_info
->cu_bitmap
[0], &acu_info
.bitmap
[0],
449 sizeof(acu_info
.bitmap
));
450 cu_info
->num_shader_engines
= adev
->gfx
.config
.max_shader_engines
;
451 cu_info
->num_shader_arrays_per_engine
= adev
->gfx
.config
.max_sh_per_se
;
452 cu_info
->num_cu_per_sh
= adev
->gfx
.config
.max_cu_per_sh
;
453 cu_info
->simd_per_cu
= acu_info
.simd_per_cu
;
454 cu_info
->max_waves_per_simd
= acu_info
.max_waves_per_simd
;
455 cu_info
->wave_front_size
= acu_info
.wave_front_size
;
456 cu_info
->max_scratch_slots_per_cu
= acu_info
.max_scratch_slots_per_cu
;
457 cu_info
->lds_size
= acu_info
.lds_size
;
460 int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev
*kgd
, int dma_buf_fd
,
461 struct kgd_dev
**dma_buf_kgd
,
462 uint64_t *bo_size
, void *metadata_buffer
,
463 size_t buffer_size
, uint32_t *metadata_size
,
466 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
467 struct dma_buf
*dma_buf
;
468 struct drm_gem_object
*obj
;
469 struct amdgpu_bo
*bo
;
470 uint64_t metadata_flags
;
473 dma_buf
= dma_buf_get(dma_buf_fd
);
475 return PTR_ERR(dma_buf
);
477 if (dma_buf
->ops
!= &amdgpu_dmabuf_ops
)
478 /* Can't handle non-graphics buffers */
482 if (obj
->dev
->driver
!= adev
->ddev
->driver
)
483 /* Can't handle buffers from different drivers */
486 adev
= obj
->dev
->dev_private
;
487 bo
= gem_to_amdgpu_bo(obj
);
488 if (!(bo
->preferred_domains
& (AMDGPU_GEM_DOMAIN_VRAM
|
489 AMDGPU_GEM_DOMAIN_GTT
)))
490 /* Only VRAM and GTT BOs are supported */
495 *dma_buf_kgd
= (struct kgd_dev
*)adev
;
497 *bo_size
= amdgpu_bo_size(bo
);
499 *metadata_size
= bo
->metadata_size
;
501 r
= amdgpu_bo_get_metadata(bo
, metadata_buffer
, buffer_size
,
502 metadata_size
, &metadata_flags
);
504 *flags
= (bo
->preferred_domains
& AMDGPU_GEM_DOMAIN_VRAM
) ?
505 KFD_IOC_ALLOC_MEM_FLAGS_VRAM
506 : KFD_IOC_ALLOC_MEM_FLAGS_GTT
;
508 if (bo
->flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
)
509 *flags
|= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC
;
513 dma_buf_put(dma_buf
);
517 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev
*kgd
)
519 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
521 return amdgpu_vram_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
524 uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev
*kgd
)
526 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
528 return adev
->gmc
.xgmi
.hive_id
;
531 uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev
*kgd
)
533 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
535 return adev
->unique_id
;
538 uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev
*dst
, struct kgd_dev
*src
)
540 struct amdgpu_device
*peer_adev
= (struct amdgpu_device
*)src
;
541 struct amdgpu_device
*adev
= (struct amdgpu_device
*)dst
;
542 int ret
= amdgpu_xgmi_get_hops_count(adev
, peer_adev
);
545 DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n",
546 adev
->gmc
.xgmi
.physical_node_id
,
547 peer_adev
->gmc
.xgmi
.physical_node_id
, ret
);
553 uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev
*kgd
)
555 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
557 return adev
->rmmio_remap
.bus_addr
;
560 uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev
*kgd
)
562 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
564 return adev
->gds
.gws_size
;
567 uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev
*kgd
)
569 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
574 int amdgpu_amdkfd_submit_ib(struct kgd_dev
*kgd
, enum kgd_engine_type engine
,
575 uint32_t vmid
, uint64_t gpu_addr
,
576 uint32_t *ib_cmd
, uint32_t ib_len
)
578 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
579 struct amdgpu_job
*job
;
580 struct amdgpu_ib
*ib
;
581 struct amdgpu_ring
*ring
;
582 struct dma_fence
*f
= NULL
;
586 case KGD_ENGINE_MEC1
:
587 ring
= &adev
->gfx
.compute_ring
[0];
589 case KGD_ENGINE_SDMA1
:
590 ring
= &adev
->sdma
.instance
[0].ring
;
592 case KGD_ENGINE_SDMA2
:
593 ring
= &adev
->sdma
.instance
[1].ring
;
596 pr_err("Invalid engine in IB submission: %d\n", engine
);
601 ret
= amdgpu_job_alloc(adev
, 1, &job
, NULL
);
606 memset(ib
, 0, sizeof(struct amdgpu_ib
));
608 ib
->gpu_addr
= gpu_addr
;
610 ib
->length_dw
= ib_len
;
611 /* This works for NO_HWS. TODO: need to handle without knowing VMID */
614 ret
= amdgpu_ib_schedule(ring
, 1, ib
, job
, &f
);
616 DRM_ERROR("amdgpu: failed to schedule IB.\n");
620 ret
= dma_fence_wait(f
, false);
624 amdgpu_job_free(job
);
629 void amdgpu_amdkfd_set_compute_idle(struct kgd_dev
*kgd
, bool idle
)
631 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
633 amdgpu_dpm_switch_power_profile(adev
,
634 PP_SMC_POWER_PROFILE_COMPUTE
,
638 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device
*adev
, u32 vmid
)
641 return vmid
>= adev
->vm_manager
.first_kfd_vmid
;
646 int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev
*kgd
, uint16_t vmid
)
648 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
650 if (adev
->family
== AMDGPU_FAMILY_AI
) {
653 for (i
= 0; i
< adev
->num_vmhubs
; i
++)
654 amdgpu_gmc_flush_gpu_tlb(adev
, vmid
, i
, 0);
656 amdgpu_gmc_flush_gpu_tlb(adev
, vmid
, AMDGPU_GFXHUB_0
, 0);
662 int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev
*kgd
, uint16_t pasid
)
664 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
665 const uint32_t flush_type
= 0;
666 bool all_hub
= false;
668 if (adev
->family
== AMDGPU_FAMILY_AI
)
671 return amdgpu_gmc_flush_gpu_tlb_pasid(adev
, pasid
, flush_type
, all_hub
);
674 bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev
*kgd
)
676 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
678 return adev
->have_atomics_support
;
681 #ifndef CONFIG_HSA_AMD
682 bool amdkfd_fence_check_mm(struct dma_fence
*f
, struct mm_struct
*mm
)
687 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo
*bo
)
691 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo
*bo
)
696 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device
*adev
,
697 struct amdgpu_vm
*vm
)
701 struct amdgpu_amdkfd_fence
*to_amdgpu_amdkfd_fence(struct dma_fence
*f
)
706 int amdgpu_amdkfd_evict_userptr(struct kgd_mem
*mem
, struct mm_struct
*mm
)
711 struct kfd_dev
*kgd2kfd_probe(struct kgd_dev
*kgd
, struct pci_dev
*pdev
,
712 unsigned int asic_type
, bool vf
)
717 bool kgd2kfd_device_init(struct kfd_dev
*kfd
,
718 struct drm_device
*ddev
,
719 const struct kgd2kfd_shared_resources
*gpu_resources
)
724 void kgd2kfd_device_exit(struct kfd_dev
*kfd
)
728 void kgd2kfd_exit(void)
732 void kgd2kfd_suspend(struct kfd_dev
*kfd
, bool run_pm
)
736 int kgd2kfd_resume(struct kfd_dev
*kfd
, bool run_pm
)
741 int kgd2kfd_pre_reset(struct kfd_dev
*kfd
)
746 int kgd2kfd_post_reset(struct kfd_dev
*kfd
)
751 void kgd2kfd_interrupt(struct kfd_dev
*kfd
, const void *ih_ring_entry
)
755 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev
*kfd
)