2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "amdgpu_amdkfd.h"
26 #include "gca/gfx_8_0_sh_mask.h"
27 #include "gca/gfx_8_0_d.h"
28 #include "gca/gfx_8_0_enum.h"
29 #include "oss/oss_3_0_sh_mask.h"
30 #include "oss/oss_3_0_d.h"
31 #include "gmc/gmc_8_1_sh_mask.h"
32 #include "gmc/gmc_8_1_d.h"
33 #include "vi_structs.h"
36 enum hqd_dequeue_request_type
{
42 static inline struct amdgpu_device
*get_amdgpu_device(struct kgd_dev
*kgd
)
44 return (struct amdgpu_device
*)kgd
;
47 static void lock_srbm(struct kgd_dev
*kgd
, uint32_t mec
, uint32_t pipe
,
48 uint32_t queue
, uint32_t vmid
)
50 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
51 uint32_t value
= PIPEID(pipe
) | MEID(mec
) | VMID(vmid
) | QUEUEID(queue
);
53 mutex_lock(&adev
->srbm_mutex
);
54 WREG32(mmSRBM_GFX_CNTL
, value
);
57 static void unlock_srbm(struct kgd_dev
*kgd
)
59 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
61 WREG32(mmSRBM_GFX_CNTL
, 0);
62 mutex_unlock(&adev
->srbm_mutex
);
65 static void acquire_queue(struct kgd_dev
*kgd
, uint32_t pipe_id
,
68 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
70 uint32_t mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
71 uint32_t pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
73 lock_srbm(kgd
, mec
, pipe
, queue_id
, 0);
76 static void release_queue(struct kgd_dev
*kgd
)
81 static void kgd_program_sh_mem_settings(struct kgd_dev
*kgd
, uint32_t vmid
,
82 uint32_t sh_mem_config
,
83 uint32_t sh_mem_ape1_base
,
84 uint32_t sh_mem_ape1_limit
,
85 uint32_t sh_mem_bases
)
87 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
89 lock_srbm(kgd
, 0, 0, 0, vmid
);
91 WREG32(mmSH_MEM_CONFIG
, sh_mem_config
);
92 WREG32(mmSH_MEM_APE1_BASE
, sh_mem_ape1_base
);
93 WREG32(mmSH_MEM_APE1_LIMIT
, sh_mem_ape1_limit
);
94 WREG32(mmSH_MEM_BASES
, sh_mem_bases
);
99 static int kgd_set_pasid_vmid_mapping(struct kgd_dev
*kgd
, unsigned int pasid
,
102 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
105 * We have to assume that there is no outstanding mapping.
106 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
107 * a mapping is in progress or because a mapping finished
108 * and the SW cleared it.
109 * So the protocol is to always wait & clear.
111 uint32_t pasid_mapping
= (pasid
== 0) ? 0 : (uint32_t)pasid
|
112 ATC_VMID0_PASID_MAPPING__VALID_MASK
;
114 WREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
, pasid_mapping
);
116 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS
) & (1U << vmid
)))
118 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS
, 1U << vmid
);
120 /* Mapping vmid to pasid also for IH block */
121 WREG32(mmIH_VMID_0_LUT
+ vmid
, pasid_mapping
);
126 static int kgd_init_interrupts(struct kgd_dev
*kgd
, uint32_t pipe_id
)
128 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
132 mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
133 pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
135 lock_srbm(kgd
, mec
, pipe
, 0, 0);
137 WREG32(mmCPC_INT_CNTL
, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
|
138 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK
);
145 static inline uint32_t get_sdma_rlc_reg_offset(struct vi_sdma_mqd
*m
)
149 retval
= m
->sdma_engine_id
* SDMA1_REGISTER_OFFSET
+
150 m
->sdma_queue_id
* KFD_VI_SDMA_QUEUE_OFFSET
;
152 pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
153 m
->sdma_engine_id
, m
->sdma_queue_id
, retval
);
158 static inline struct vi_mqd
*get_mqd(void *mqd
)
160 return (struct vi_mqd
*)mqd
;
163 static inline struct vi_sdma_mqd
*get_sdma_mqd(void *mqd
)
165 return (struct vi_sdma_mqd
*)mqd
;
168 static int kgd_hqd_load(struct kgd_dev
*kgd
, void *mqd
, uint32_t pipe_id
,
169 uint32_t queue_id
, uint32_t __user
*wptr
,
170 uint32_t wptr_shift
, uint32_t wptr_mask
,
171 struct mm_struct
*mm
)
173 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
176 uint32_t reg
, wptr_val
, data
;
177 bool valid_wptr
= false;
181 acquire_queue(kgd
, pipe_id
, queue_id
);
183 /* HIQ is set during driver init period with vmid set to 0*/
184 if (m
->cp_hqd_vmid
== 0) {
185 uint32_t value
, mec
, pipe
;
187 mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
188 pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
190 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
191 mec
, pipe
, queue_id
);
192 value
= RREG32(mmRLC_CP_SCHEDULERS
);
193 value
= REG_SET_FIELD(value
, RLC_CP_SCHEDULERS
, scheduler1
,
194 ((mec
<< 5) | (pipe
<< 3) | queue_id
| 0x80));
195 WREG32(mmRLC_CP_SCHEDULERS
, value
);
198 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
199 mqd_hqd
= &m
->cp_mqd_base_addr_lo
;
201 for (reg
= mmCP_MQD_BASE_ADDR
; reg
<= mmCP_HQD_EOP_CONTROL
; reg
++)
202 WREG32(reg
, mqd_hqd
[reg
- mmCP_MQD_BASE_ADDR
]);
204 /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
205 * This is safe since EOP RPTR==WPTR for any inactive HQD
206 * on ASICs that do not support context-save.
207 * EOP writes/reads can start anywhere in the ring.
209 if (get_amdgpu_device(kgd
)->asic_type
!= CHIP_TONGA
) {
210 WREG32(mmCP_HQD_EOP_RPTR
, m
->cp_hqd_eop_rptr
);
211 WREG32(mmCP_HQD_EOP_WPTR
, m
->cp_hqd_eop_wptr
);
212 WREG32(mmCP_HQD_EOP_WPTR_MEM
, m
->cp_hqd_eop_wptr_mem
);
215 for (reg
= mmCP_HQD_EOP_EVENTS
; reg
<= mmCP_HQD_ERROR
; reg
++)
216 WREG32(reg
, mqd_hqd
[reg
- mmCP_MQD_BASE_ADDR
]);
218 /* Copy userspace write pointer value to register.
219 * Activate doorbell logic to monitor subsequent changes.
221 data
= REG_SET_FIELD(m
->cp_hqd_pq_doorbell_control
,
222 CP_HQD_PQ_DOORBELL_CONTROL
, DOORBELL_EN
, 1);
223 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
, data
);
225 /* read_user_ptr may take the mm->mmap_lock.
226 * release srbm_mutex to avoid circular dependency between
227 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
230 valid_wptr
= read_user_wptr(mm
, wptr
, wptr_val
);
231 acquire_queue(kgd
, pipe_id
, queue_id
);
233 WREG32(mmCP_HQD_PQ_WPTR
, (wptr_val
<< wptr_shift
) & wptr_mask
);
235 data
= REG_SET_FIELD(m
->cp_hqd_active
, CP_HQD_ACTIVE
, ACTIVE
, 1);
236 WREG32(mmCP_HQD_ACTIVE
, data
);
243 static int kgd_hqd_dump(struct kgd_dev
*kgd
,
244 uint32_t pipe_id
, uint32_t queue_id
,
245 uint32_t (**dump
)[2], uint32_t *n_regs
)
247 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
249 #define HQD_N_REGS (54+4)
250 #define DUMP_REG(addr) do { \
251 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
253 (*dump)[i][0] = (addr) << 2; \
254 (*dump)[i++][1] = RREG32(addr); \
257 *dump
= kmalloc_array(HQD_N_REGS
* 2, sizeof(uint32_t), GFP_KERNEL
);
261 acquire_queue(kgd
, pipe_id
, queue_id
);
263 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0
);
264 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1
);
265 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2
);
266 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3
);
268 for (reg
= mmCP_MQD_BASE_ADDR
; reg
<= mmCP_HQD_EOP_DONES
; reg
++)
273 WARN_ON_ONCE(i
!= HQD_N_REGS
);
279 static int kgd_hqd_sdma_load(struct kgd_dev
*kgd
, void *mqd
,
280 uint32_t __user
*wptr
, struct mm_struct
*mm
)
282 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
283 struct vi_sdma_mqd
*m
;
284 unsigned long end_jiffies
;
285 uint32_t sdma_rlc_reg_offset
;
288 m
= get_sdma_mqd(mqd
);
289 sdma_rlc_reg_offset
= get_sdma_rlc_reg_offset(m
);
290 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_CNTL
,
291 m
->sdmax_rlcx_rb_cntl
& (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
));
293 end_jiffies
= msecs_to_jiffies(2000) + jiffies
;
295 data
= RREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_CONTEXT_STATUS
);
296 if (data
& SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK
)
298 if (time_after(jiffies
, end_jiffies
)) {
299 pr_err("SDMA RLC not idle in %s\n", __func__
);
302 usleep_range(500, 1000);
305 data
= REG_SET_FIELD(m
->sdmax_rlcx_doorbell
, SDMA0_RLC0_DOORBELL
,
307 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_DOORBELL
, data
);
308 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_RPTR
,
309 m
->sdmax_rlcx_rb_rptr
);
311 if (read_user_wptr(mm
, wptr
, data
))
312 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_WPTR
, data
);
314 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_WPTR
,
315 m
->sdmax_rlcx_rb_rptr
);
317 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_VIRTUAL_ADDR
,
318 m
->sdmax_rlcx_virtual_addr
);
319 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_BASE
, m
->sdmax_rlcx_rb_base
);
320 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_BASE_HI
,
321 m
->sdmax_rlcx_rb_base_hi
);
322 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_RPTR_ADDR_LO
,
323 m
->sdmax_rlcx_rb_rptr_addr_lo
);
324 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_RPTR_ADDR_HI
,
325 m
->sdmax_rlcx_rb_rptr_addr_hi
);
327 data
= REG_SET_FIELD(m
->sdmax_rlcx_rb_cntl
, SDMA0_RLC0_RB_CNTL
,
329 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_CNTL
, data
);
334 static int kgd_hqd_sdma_dump(struct kgd_dev
*kgd
,
335 uint32_t engine_id
, uint32_t queue_id
,
336 uint32_t (**dump
)[2], uint32_t *n_regs
)
338 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
339 uint32_t sdma_offset
= engine_id
* SDMA1_REGISTER_OFFSET
+
340 queue_id
* KFD_VI_SDMA_QUEUE_OFFSET
;
343 #define HQD_N_REGS (19+4+2+3+7)
345 *dump
= kmalloc_array(HQD_N_REGS
* 2, sizeof(uint32_t), GFP_KERNEL
);
349 for (reg
= mmSDMA0_RLC0_RB_CNTL
; reg
<= mmSDMA0_RLC0_DOORBELL
; reg
++)
350 DUMP_REG(sdma_offset
+ reg
);
351 for (reg
= mmSDMA0_RLC0_VIRTUAL_ADDR
; reg
<= mmSDMA0_RLC0_WATERMARK
;
353 DUMP_REG(sdma_offset
+ reg
);
354 for (reg
= mmSDMA0_RLC0_CSA_ADDR_LO
; reg
<= mmSDMA0_RLC0_CSA_ADDR_HI
;
356 DUMP_REG(sdma_offset
+ reg
);
357 for (reg
= mmSDMA0_RLC0_IB_SUB_REMAIN
; reg
<= mmSDMA0_RLC0_DUMMY_REG
;
359 DUMP_REG(sdma_offset
+ reg
);
360 for (reg
= mmSDMA0_RLC0_MIDCMD_DATA0
; reg
<= mmSDMA0_RLC0_MIDCMD_CNTL
;
362 DUMP_REG(sdma_offset
+ reg
);
364 WARN_ON_ONCE(i
!= HQD_N_REGS
);
370 static bool kgd_hqd_is_occupied(struct kgd_dev
*kgd
, uint64_t queue_address
,
371 uint32_t pipe_id
, uint32_t queue_id
)
373 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
378 acquire_queue(kgd
, pipe_id
, queue_id
);
379 act
= RREG32(mmCP_HQD_ACTIVE
);
381 low
= lower_32_bits(queue_address
>> 8);
382 high
= upper_32_bits(queue_address
>> 8);
384 if (low
== RREG32(mmCP_HQD_PQ_BASE
) &&
385 high
== RREG32(mmCP_HQD_PQ_BASE_HI
))
392 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev
*kgd
, void *mqd
)
394 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
395 struct vi_sdma_mqd
*m
;
396 uint32_t sdma_rlc_reg_offset
;
397 uint32_t sdma_rlc_rb_cntl
;
399 m
= get_sdma_mqd(mqd
);
400 sdma_rlc_reg_offset
= get_sdma_rlc_reg_offset(m
);
402 sdma_rlc_rb_cntl
= RREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_CNTL
);
404 if (sdma_rlc_rb_cntl
& SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
)
410 static int kgd_hqd_destroy(struct kgd_dev
*kgd
, void *mqd
,
411 enum kfd_preempt_type reset_type
,
412 unsigned int utimeout
, uint32_t pipe_id
,
415 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
417 enum hqd_dequeue_request_type type
;
418 unsigned long flags
, end_jiffies
;
420 struct vi_mqd
*m
= get_mqd(mqd
);
422 if (adev
->in_gpu_reset
)
425 acquire_queue(kgd
, pipe_id
, queue_id
);
427 if (m
->cp_hqd_vmid
== 0)
428 WREG32_FIELD(RLC_CP_SCHEDULERS
, scheduler1
, 0);
430 switch (reset_type
) {
431 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN
:
434 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET
:
442 /* Workaround: If IQ timer is active and the wait time is close to or
443 * equal to 0, dequeueing is not safe. Wait until either the wait time
444 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
445 * cleared before continuing. Also, ensure wait times are set to at
448 local_irq_save(flags
);
450 retry
= 5000; /* wait for 500 usecs at maximum */
452 temp
= RREG32(mmCP_HQD_IQ_TIMER
);
453 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, PROCESSING_IQ
)) {
454 pr_debug("HW is processing IQ\n");
457 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, ACTIVE
)) {
458 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, RETRY_TYPE
)
459 == 3) /* SEM-rearm is safe */
461 /* Wait time 3 is safe for CP, but our MMIO read/write
462 * time is close to 1 microsecond, so check for 10 to
463 * leave more buffer room
465 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, WAIT_TIME
)
468 pr_debug("IQ timer is active\n");
473 pr_err("CP HQD IQ timer status time out\n");
481 temp
= RREG32(mmCP_HQD_DEQUEUE_REQUEST
);
482 if (!(temp
& CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK
))
484 pr_debug("Dequeue request is pending\n");
487 pr_err("CP HQD dequeue request time out\n");
493 local_irq_restore(flags
);
496 WREG32(mmCP_HQD_DEQUEUE_REQUEST
, type
);
498 end_jiffies
= (utimeout
* HZ
/ 1000) + jiffies
;
500 temp
= RREG32(mmCP_HQD_ACTIVE
);
501 if (!(temp
& CP_HQD_ACTIVE__ACTIVE_MASK
))
503 if (time_after(jiffies
, end_jiffies
)) {
504 pr_err("cp queue preemption time out.\n");
508 usleep_range(500, 1000);
515 static int kgd_hqd_sdma_destroy(struct kgd_dev
*kgd
, void *mqd
,
516 unsigned int utimeout
)
518 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
519 struct vi_sdma_mqd
*m
;
520 uint32_t sdma_rlc_reg_offset
;
522 unsigned long end_jiffies
= (utimeout
* HZ
/ 1000) + jiffies
;
524 m
= get_sdma_mqd(mqd
);
525 sdma_rlc_reg_offset
= get_sdma_rlc_reg_offset(m
);
527 temp
= RREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_CNTL
);
528 temp
= temp
& ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
;
529 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_CNTL
, temp
);
532 temp
= RREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_CONTEXT_STATUS
);
533 if (temp
& SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK
)
535 if (time_after(jiffies
, end_jiffies
)) {
536 pr_err("SDMA RLC not idle in %s\n", __func__
);
539 usleep_range(500, 1000);
542 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_DOORBELL
, 0);
543 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_CNTL
,
544 RREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_CNTL
) |
545 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
);
547 m
->sdmax_rlcx_rb_rptr
= RREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_RPTR
);
552 static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev
*kgd
,
553 uint8_t vmid
, uint16_t *p_pasid
)
556 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
558 value
= RREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
);
559 *p_pasid
= value
& ATC_VMID0_PASID_MAPPING__PASID_MASK
;
561 return !!(value
& ATC_VMID0_PASID_MAPPING__VALID_MASK
);
564 static int kgd_address_watch_disable(struct kgd_dev
*kgd
)
569 static int kgd_address_watch_execute(struct kgd_dev
*kgd
,
570 unsigned int watch_point_id
,
578 static int kgd_wave_control_execute(struct kgd_dev
*kgd
,
579 uint32_t gfx_index_val
,
582 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
585 mutex_lock(&adev
->grbm_idx_mutex
);
587 WREG32(mmGRBM_GFX_INDEX
, gfx_index_val
);
588 WREG32(mmSQ_CMD
, sq_cmd
);
590 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
,
591 INSTANCE_BROADCAST_WRITES
, 1);
592 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
,
593 SH_BROADCAST_WRITES
, 1);
594 data
= REG_SET_FIELD(data
, GRBM_GFX_INDEX
,
595 SE_BROADCAST_WRITES
, 1);
597 WREG32(mmGRBM_GFX_INDEX
, data
);
598 mutex_unlock(&adev
->grbm_idx_mutex
);
603 static uint32_t kgd_address_watch_get_offset(struct kgd_dev
*kgd
,
604 unsigned int watch_point_id
,
605 unsigned int reg_offset
)
610 static void set_scratch_backing_va(struct kgd_dev
*kgd
,
611 uint64_t va
, uint32_t vmid
)
613 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
615 lock_srbm(kgd
, 0, 0, 0, vmid
);
616 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID
, va
);
620 static void set_vm_context_page_table_base(struct kgd_dev
*kgd
, uint32_t vmid
,
621 uint64_t page_table_base
)
623 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
625 if (!amdgpu_amdkfd_is_kfd_vmid(adev
, vmid
)) {
626 pr_err("trying to set page table base for wrong VMID\n");
629 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ vmid
- 8,
630 lower_32_bits(page_table_base
));
633 const struct kfd2kgd_calls gfx_v8_kfd2kgd
= {
634 .program_sh_mem_settings
= kgd_program_sh_mem_settings
,
635 .set_pasid_vmid_mapping
= kgd_set_pasid_vmid_mapping
,
636 .init_interrupts
= kgd_init_interrupts
,
637 .hqd_load
= kgd_hqd_load
,
638 .hqd_sdma_load
= kgd_hqd_sdma_load
,
639 .hqd_dump
= kgd_hqd_dump
,
640 .hqd_sdma_dump
= kgd_hqd_sdma_dump
,
641 .hqd_is_occupied
= kgd_hqd_is_occupied
,
642 .hqd_sdma_is_occupied
= kgd_hqd_sdma_is_occupied
,
643 .hqd_destroy
= kgd_hqd_destroy
,
644 .hqd_sdma_destroy
= kgd_hqd_sdma_destroy
,
645 .address_watch_disable
= kgd_address_watch_disable
,
646 .address_watch_execute
= kgd_address_watch_execute
,
647 .wave_control_execute
= kgd_wave_control_execute
,
648 .address_watch_get_offset
= kgd_address_watch_get_offset
,
649 .get_atc_vmid_pasid_mapping_info
=
650 get_atc_vmid_pasid_mapping_info
,
651 .set_scratch_backing_va
= set_scratch_backing_va
,
652 .set_vm_context_page_table_base
= set_vm_context_page_table_base
,