2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/module.h>
24 #include <linux/fdtable.h>
25 #include <linux/uaccess.h>
26 #include <linux/firmware.h>
29 #include "amdgpu_amdkfd.h"
30 #include "amdgpu_ucode.h"
32 #include "gca/gfx_8_0_sh_mask.h"
33 #include "gca/gfx_8_0_d.h"
34 #include "gca/gfx_8_0_enum.h"
35 #include "oss/oss_3_0_sh_mask.h"
36 #include "oss/oss_3_0_d.h"
37 #include "gmc/gmc_8_1_sh_mask.h"
38 #include "gmc/gmc_8_1_d.h"
39 #include "vi_structs.h"
42 enum hqd_dequeue_request_type
{
48 struct cik_sdma_rlc_registers
;
51 * Register access functions
54 static void kgd_program_sh_mem_settings(struct kgd_dev
*kgd
, uint32_t vmid
,
55 uint32_t sh_mem_config
,
56 uint32_t sh_mem_ape1_base
, uint32_t sh_mem_ape1_limit
,
57 uint32_t sh_mem_bases
);
58 static int kgd_set_pasid_vmid_mapping(struct kgd_dev
*kgd
, unsigned int pasid
,
60 static int kgd_init_pipeline(struct kgd_dev
*kgd
, uint32_t pipe_id
,
61 uint32_t hpd_size
, uint64_t hpd_gpu_addr
);
62 static int kgd_init_interrupts(struct kgd_dev
*kgd
, uint32_t pipe_id
);
63 static int kgd_hqd_load(struct kgd_dev
*kgd
, void *mqd
, uint32_t pipe_id
,
64 uint32_t queue_id
, uint32_t __user
*wptr
,
65 uint32_t wptr_shift
, uint32_t wptr_mask
,
66 struct mm_struct
*mm
);
67 static int kgd_hqd_sdma_load(struct kgd_dev
*kgd
, void *mqd
);
68 static bool kgd_hqd_is_occupied(struct kgd_dev
*kgd
, uint64_t queue_address
,
69 uint32_t pipe_id
, uint32_t queue_id
);
70 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev
*kgd
, void *mqd
);
71 static int kgd_hqd_destroy(struct kgd_dev
*kgd
, void *mqd
,
72 enum kfd_preempt_type reset_type
,
73 unsigned int utimeout
, uint32_t pipe_id
,
75 static int kgd_hqd_sdma_destroy(struct kgd_dev
*kgd
, void *mqd
,
76 unsigned int utimeout
);
77 static void write_vmid_invalidate_request(struct kgd_dev
*kgd
, uint8_t vmid
);
78 static int kgd_address_watch_disable(struct kgd_dev
*kgd
);
79 static int kgd_address_watch_execute(struct kgd_dev
*kgd
,
80 unsigned int watch_point_id
,
84 static int kgd_wave_control_execute(struct kgd_dev
*kgd
,
85 uint32_t gfx_index_val
,
87 static uint32_t kgd_address_watch_get_offset(struct kgd_dev
*kgd
,
88 unsigned int watch_point_id
,
89 unsigned int reg_offset
);
91 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev
*kgd
,
93 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev
*kgd
,
95 static void write_vmid_invalidate_request(struct kgd_dev
*kgd
, uint8_t vmid
);
96 static uint16_t get_fw_version(struct kgd_dev
*kgd
, enum kgd_engine_type type
);
97 static void set_scratch_backing_va(struct kgd_dev
*kgd
,
98 uint64_t va
, uint32_t vmid
);
100 /* Because of REG_GET_FIELD() being used, we put this function in the
101 * asic specific file.
103 static int get_tile_config(struct kgd_dev
*kgd
,
104 struct tile_config
*config
)
106 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
108 config
->gb_addr_config
= adev
->gfx
.config
.gb_addr_config
;
109 config
->num_banks
= REG_GET_FIELD(adev
->gfx
.config
.mc_arb_ramcfg
,
110 MC_ARB_RAMCFG
, NOOFBANK
);
111 config
->num_ranks
= REG_GET_FIELD(adev
->gfx
.config
.mc_arb_ramcfg
,
112 MC_ARB_RAMCFG
, NOOFRANKS
);
114 config
->tile_config_ptr
= adev
->gfx
.config
.tile_mode_array
;
115 config
->num_tile_configs
=
116 ARRAY_SIZE(adev
->gfx
.config
.tile_mode_array
);
117 config
->macro_tile_config_ptr
=
118 adev
->gfx
.config
.macrotile_mode_array
;
119 config
->num_macro_tile_configs
=
120 ARRAY_SIZE(adev
->gfx
.config
.macrotile_mode_array
);
125 static const struct kfd2kgd_calls kfd2kgd
= {
126 .init_gtt_mem_allocation
= alloc_gtt_mem
,
127 .free_gtt_mem
= free_gtt_mem
,
128 .get_vmem_size
= get_vmem_size
,
129 .get_gpu_clock_counter
= get_gpu_clock_counter
,
130 .get_max_engine_clock_in_mhz
= get_max_engine_clock_in_mhz
,
131 .alloc_pasid
= amdgpu_vm_alloc_pasid
,
132 .free_pasid
= amdgpu_vm_free_pasid
,
133 .program_sh_mem_settings
= kgd_program_sh_mem_settings
,
134 .set_pasid_vmid_mapping
= kgd_set_pasid_vmid_mapping
,
135 .init_pipeline
= kgd_init_pipeline
,
136 .init_interrupts
= kgd_init_interrupts
,
137 .hqd_load
= kgd_hqd_load
,
138 .hqd_sdma_load
= kgd_hqd_sdma_load
,
139 .hqd_is_occupied
= kgd_hqd_is_occupied
,
140 .hqd_sdma_is_occupied
= kgd_hqd_sdma_is_occupied
,
141 .hqd_destroy
= kgd_hqd_destroy
,
142 .hqd_sdma_destroy
= kgd_hqd_sdma_destroy
,
143 .address_watch_disable
= kgd_address_watch_disable
,
144 .address_watch_execute
= kgd_address_watch_execute
,
145 .wave_control_execute
= kgd_wave_control_execute
,
146 .address_watch_get_offset
= kgd_address_watch_get_offset
,
147 .get_atc_vmid_pasid_mapping_pasid
=
148 get_atc_vmid_pasid_mapping_pasid
,
149 .get_atc_vmid_pasid_mapping_valid
=
150 get_atc_vmid_pasid_mapping_valid
,
151 .write_vmid_invalidate_request
= write_vmid_invalidate_request
,
152 .get_fw_version
= get_fw_version
,
153 .set_scratch_backing_va
= set_scratch_backing_va
,
154 .get_tile_config
= get_tile_config
,
157 struct kfd2kgd_calls
*amdgpu_amdkfd_gfx_8_0_get_functions(void)
159 return (struct kfd2kgd_calls
*)&kfd2kgd
;
162 static inline struct amdgpu_device
*get_amdgpu_device(struct kgd_dev
*kgd
)
164 return (struct amdgpu_device
*)kgd
;
167 static void lock_srbm(struct kgd_dev
*kgd
, uint32_t mec
, uint32_t pipe
,
168 uint32_t queue
, uint32_t vmid
)
170 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
171 uint32_t value
= PIPEID(pipe
) | MEID(mec
) | VMID(vmid
) | QUEUEID(queue
);
173 mutex_lock(&adev
->srbm_mutex
);
174 WREG32(mmSRBM_GFX_CNTL
, value
);
177 static void unlock_srbm(struct kgd_dev
*kgd
)
179 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
181 WREG32(mmSRBM_GFX_CNTL
, 0);
182 mutex_unlock(&adev
->srbm_mutex
);
185 static void acquire_queue(struct kgd_dev
*kgd
, uint32_t pipe_id
,
188 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
190 uint32_t mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
191 uint32_t pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
193 lock_srbm(kgd
, mec
, pipe
, queue_id
, 0);
196 static void release_queue(struct kgd_dev
*kgd
)
201 static void kgd_program_sh_mem_settings(struct kgd_dev
*kgd
, uint32_t vmid
,
202 uint32_t sh_mem_config
,
203 uint32_t sh_mem_ape1_base
,
204 uint32_t sh_mem_ape1_limit
,
205 uint32_t sh_mem_bases
)
207 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
209 lock_srbm(kgd
, 0, 0, 0, vmid
);
211 WREG32(mmSH_MEM_CONFIG
, sh_mem_config
);
212 WREG32(mmSH_MEM_APE1_BASE
, sh_mem_ape1_base
);
213 WREG32(mmSH_MEM_APE1_LIMIT
, sh_mem_ape1_limit
);
214 WREG32(mmSH_MEM_BASES
, sh_mem_bases
);
219 static int kgd_set_pasid_vmid_mapping(struct kgd_dev
*kgd
, unsigned int pasid
,
222 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
225 * We have to assume that there is no outstanding mapping.
226 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
227 * a mapping is in progress or because a mapping finished
228 * and the SW cleared it.
229 * So the protocol is to always wait & clear.
231 uint32_t pasid_mapping
= (pasid
== 0) ? 0 : (uint32_t)pasid
|
232 ATC_VMID0_PASID_MAPPING__VALID_MASK
;
234 WREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
, pasid_mapping
);
236 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS
) & (1U << vmid
)))
238 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS
, 1U << vmid
);
240 /* Mapping vmid to pasid also for IH block */
241 WREG32(mmIH_VMID_0_LUT
+ vmid
, pasid_mapping
);
246 static int kgd_init_pipeline(struct kgd_dev
*kgd
, uint32_t pipe_id
,
247 uint32_t hpd_size
, uint64_t hpd_gpu_addr
)
249 /* amdgpu owns the per-pipe state */
253 static int kgd_init_interrupts(struct kgd_dev
*kgd
, uint32_t pipe_id
)
255 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
259 mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
260 pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
262 lock_srbm(kgd
, mec
, pipe
, 0, 0);
264 WREG32(mmCPC_INT_CNTL
, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
);
271 static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers
*m
)
276 static inline struct vi_mqd
*get_mqd(void *mqd
)
278 return (struct vi_mqd
*)mqd
;
281 static inline struct cik_sdma_rlc_registers
*get_sdma_mqd(void *mqd
)
283 return (struct cik_sdma_rlc_registers
*)mqd
;
286 static int kgd_hqd_load(struct kgd_dev
*kgd
, void *mqd
, uint32_t pipe_id
,
287 uint32_t queue_id
, uint32_t __user
*wptr
,
288 uint32_t wptr_shift
, uint32_t wptr_mask
,
289 struct mm_struct
*mm
)
291 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
294 uint32_t reg
, wptr_val
, data
;
298 acquire_queue(kgd
, pipe_id
, queue_id
);
300 /* HIQ is set during driver init period with vmid set to 0*/
301 if (m
->cp_hqd_vmid
== 0) {
302 uint32_t value
, mec
, pipe
;
304 mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
305 pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
307 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
308 mec
, pipe
, queue_id
);
309 value
= RREG32(mmRLC_CP_SCHEDULERS
);
310 value
= REG_SET_FIELD(value
, RLC_CP_SCHEDULERS
, scheduler1
,
311 ((mec
<< 5) | (pipe
<< 3) | queue_id
| 0x80));
312 WREG32(mmRLC_CP_SCHEDULERS
, value
);
315 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
316 mqd_hqd
= &m
->cp_mqd_base_addr_lo
;
318 for (reg
= mmCP_MQD_BASE_ADDR
; reg
<= mmCP_HQD_EOP_CONTROL
; reg
++)
319 WREG32(reg
, mqd_hqd
[reg
- mmCP_MQD_BASE_ADDR
]);
321 /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
322 * This is safe since EOP RPTR==WPTR for any inactive HQD
323 * on ASICs that do not support context-save.
324 * EOP writes/reads can start anywhere in the ring.
326 if (get_amdgpu_device(kgd
)->asic_type
!= CHIP_TONGA
) {
327 WREG32(mmCP_HQD_EOP_RPTR
, m
->cp_hqd_eop_rptr
);
328 WREG32(mmCP_HQD_EOP_WPTR
, m
->cp_hqd_eop_wptr
);
329 WREG32(mmCP_HQD_EOP_WPTR_MEM
, m
->cp_hqd_eop_wptr_mem
);
332 for (reg
= mmCP_HQD_EOP_EVENTS
; reg
<= mmCP_HQD_ERROR
; reg
++)
333 WREG32(reg
, mqd_hqd
[reg
- mmCP_MQD_BASE_ADDR
]);
335 /* Copy userspace write pointer value to register.
336 * Activate doorbell logic to monitor subsequent changes.
338 data
= REG_SET_FIELD(m
->cp_hqd_pq_doorbell_control
,
339 CP_HQD_PQ_DOORBELL_CONTROL
, DOORBELL_EN
, 1);
340 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
, data
);
342 if (read_user_wptr(mm
, wptr
, wptr_val
))
343 WREG32(mmCP_HQD_PQ_WPTR
, (wptr_val
<< wptr_shift
) & wptr_mask
);
345 data
= REG_SET_FIELD(m
->cp_hqd_active
, CP_HQD_ACTIVE
, ACTIVE
, 1);
346 WREG32(mmCP_HQD_ACTIVE
, data
);
353 static int kgd_hqd_sdma_load(struct kgd_dev
*kgd
, void *mqd
)
358 static bool kgd_hqd_is_occupied(struct kgd_dev
*kgd
, uint64_t queue_address
,
359 uint32_t pipe_id
, uint32_t queue_id
)
361 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
366 acquire_queue(kgd
, pipe_id
, queue_id
);
367 act
= RREG32(mmCP_HQD_ACTIVE
);
369 low
= lower_32_bits(queue_address
>> 8);
370 high
= upper_32_bits(queue_address
>> 8);
372 if (low
== RREG32(mmCP_HQD_PQ_BASE
) &&
373 high
== RREG32(mmCP_HQD_PQ_BASE_HI
))
380 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev
*kgd
, void *mqd
)
382 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
383 struct cik_sdma_rlc_registers
*m
;
384 uint32_t sdma_base_addr
;
385 uint32_t sdma_rlc_rb_cntl
;
387 m
= get_sdma_mqd(mqd
);
388 sdma_base_addr
= get_sdma_base_addr(m
);
390 sdma_rlc_rb_cntl
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
);
392 if (sdma_rlc_rb_cntl
& SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
)
398 static int kgd_hqd_destroy(struct kgd_dev
*kgd
, void *mqd
,
399 enum kfd_preempt_type reset_type
,
400 unsigned int utimeout
, uint32_t pipe_id
,
403 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
405 enum hqd_dequeue_request_type type
;
406 unsigned long flags
, end_jiffies
;
408 struct vi_mqd
*m
= get_mqd(mqd
);
410 acquire_queue(kgd
, pipe_id
, queue_id
);
412 if (m
->cp_hqd_vmid
== 0)
413 WREG32_FIELD(RLC_CP_SCHEDULERS
, scheduler1
, 0);
415 switch (reset_type
) {
416 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN
:
419 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET
:
427 /* Workaround: If IQ timer is active and the wait time is close to or
428 * equal to 0, dequeueing is not safe. Wait until either the wait time
429 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
430 * cleared before continuing. Also, ensure wait times are set to at
433 local_irq_save(flags
);
435 retry
= 5000; /* wait for 500 usecs at maximum */
437 temp
= RREG32(mmCP_HQD_IQ_TIMER
);
438 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, PROCESSING_IQ
)) {
439 pr_debug("HW is processing IQ\n");
442 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, ACTIVE
)) {
443 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, RETRY_TYPE
)
444 == 3) /* SEM-rearm is safe */
446 /* Wait time 3 is safe for CP, but our MMIO read/write
447 * time is close to 1 microsecond, so check for 10 to
448 * leave more buffer room
450 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, WAIT_TIME
)
453 pr_debug("IQ timer is active\n");
458 pr_err("CP HQD IQ timer status time out\n");
466 temp
= RREG32(mmCP_HQD_DEQUEUE_REQUEST
);
467 if (!(temp
& CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK
))
469 pr_debug("Dequeue request is pending\n");
472 pr_err("CP HQD dequeue request time out\n");
478 local_irq_restore(flags
);
481 WREG32(mmCP_HQD_DEQUEUE_REQUEST
, type
);
483 end_jiffies
= (utimeout
* HZ
/ 1000) + jiffies
;
485 temp
= RREG32(mmCP_HQD_ACTIVE
);
486 if (!(temp
& CP_HQD_ACTIVE__ACTIVE_MASK
))
488 if (time_after(jiffies
, end_jiffies
)) {
489 pr_err("cp queue preemption time out.\n");
493 usleep_range(500, 1000);
500 static int kgd_hqd_sdma_destroy(struct kgd_dev
*kgd
, void *mqd
,
501 unsigned int utimeout
)
503 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
504 struct cik_sdma_rlc_registers
*m
;
505 uint32_t sdma_base_addr
;
507 int timeout
= utimeout
;
509 m
= get_sdma_mqd(mqd
);
510 sdma_base_addr
= get_sdma_base_addr(m
);
512 temp
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
);
513 temp
= temp
& ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
;
514 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
, temp
);
517 temp
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_CONTEXT_STATUS
);
518 if (temp
& SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT
)
526 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_DOORBELL
, 0);
527 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_RPTR
, 0);
528 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_WPTR
, 0);
529 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_BASE
, 0);
534 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev
*kgd
,
538 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
540 reg
= RREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
);
541 return reg
& ATC_VMID0_PASID_MAPPING__VALID_MASK
;
544 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev
*kgd
,
548 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
550 reg
= RREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
);
551 return reg
& ATC_VMID0_PASID_MAPPING__VALID_MASK
;
554 static void write_vmid_invalidate_request(struct kgd_dev
*kgd
, uint8_t vmid
)
556 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
558 WREG32(mmVM_INVALIDATE_REQUEST
, 1 << vmid
);
561 static int kgd_address_watch_disable(struct kgd_dev
*kgd
)
566 static int kgd_address_watch_execute(struct kgd_dev
*kgd
,
567 unsigned int watch_point_id
,
575 static int kgd_wave_control_execute(struct kgd_dev
*kgd
,
576 uint32_t gfx_index_val
,
579 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
582 mutex_lock(&adev
->grbm_idx_mutex
);
584 WREG32(mmGRBM_GFX_INDEX
, gfx_index_val
);
585 WREG32(mmSQ_CMD
, sq_cmd
);
587 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
,
588 INSTANCE_BROADCAST_WRITES
, 1);
589 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
,
590 SH_BROADCAST_WRITES
, 1);
591 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
,
592 SE_BROADCAST_WRITES
, 1);
594 WREG32(mmGRBM_GFX_INDEX
, data
);
595 mutex_unlock(&adev
->grbm_idx_mutex
);
600 static uint32_t kgd_address_watch_get_offset(struct kgd_dev
*kgd
,
601 unsigned int watch_point_id
,
602 unsigned int reg_offset
)
607 static void set_scratch_backing_va(struct kgd_dev
*kgd
,
608 uint64_t va
, uint32_t vmid
)
610 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
612 lock_srbm(kgd
, 0, 0, 0, vmid
);
613 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID
, va
);
617 static uint16_t get_fw_version(struct kgd_dev
*kgd
, enum kgd_engine_type type
)
619 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
620 const union amdgpu_firmware_header
*hdr
;
626 hdr
= (const union amdgpu_firmware_header
*)
627 adev
->gfx
.pfp_fw
->data
;
631 hdr
= (const union amdgpu_firmware_header
*)
632 adev
->gfx
.me_fw
->data
;
636 hdr
= (const union amdgpu_firmware_header
*)
637 adev
->gfx
.ce_fw
->data
;
640 case KGD_ENGINE_MEC1
:
641 hdr
= (const union amdgpu_firmware_header
*)
642 adev
->gfx
.mec_fw
->data
;
645 case KGD_ENGINE_MEC2
:
646 hdr
= (const union amdgpu_firmware_header
*)
647 adev
->gfx
.mec2_fw
->data
;
651 hdr
= (const union amdgpu_firmware_header
*)
652 adev
->gfx
.rlc_fw
->data
;
655 case KGD_ENGINE_SDMA1
:
656 hdr
= (const union amdgpu_firmware_header
*)
657 adev
->sdma
.instance
[0].fw
->data
;
660 case KGD_ENGINE_SDMA2
:
661 hdr
= (const union amdgpu_firmware_header
*)
662 adev
->sdma
.instance
[1].fw
->data
;
672 /* Only 12 bit in use*/
673 return hdr
->common
.ucode_version
;