2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/mmu_context.h>
26 #include "amdgpu_amdkfd.h"
30 #include "gca/gfx_7_2_d.h"
31 #include "gca/gfx_7_2_enum.h"
32 #include "gca/gfx_7_2_sh_mask.h"
33 #include "oss/oss_2_0_d.h"
34 #include "oss/oss_2_0_sh_mask.h"
35 #include "gmc/gmc_7_1_d.h"
36 #include "gmc/gmc_7_1_sh_mask.h"
37 #include "cik_structs.h"
39 enum hqd_dequeue_request_type
{
46 MAX_TRAPID
= 8, /* 3 bits in the bitfield. */
47 MAX_WATCH_ADDRESSES
= 4
51 ADDRESS_WATCH_REG_ADDR_HI
= 0,
52 ADDRESS_WATCH_REG_ADDR_LO
,
53 ADDRESS_WATCH_REG_CNTL
,
57 /* not defined in the CI/KV reg file */
59 ADDRESS_WATCH_REG_CNTL_ATC_BIT
= 0x10000000UL
,
60 ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK
= 0x00FFFFFF,
61 ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION
= 0x03000000,
62 /* extend the mask to 26 bits to match the low address field */
63 ADDRESS_WATCH_REG_ADDLOW_SHIFT
= 6,
64 ADDRESS_WATCH_REG_ADDHIGH_MASK
= 0xFFFF
67 static const uint32_t watchRegs
[MAX_WATCH_ADDRESSES
* ADDRESS_WATCH_REG_MAX
] = {
68 mmTCP_WATCH0_ADDR_H
, mmTCP_WATCH0_ADDR_L
, mmTCP_WATCH0_CNTL
,
69 mmTCP_WATCH1_ADDR_H
, mmTCP_WATCH1_ADDR_L
, mmTCP_WATCH1_CNTL
,
70 mmTCP_WATCH2_ADDR_H
, mmTCP_WATCH2_ADDR_L
, mmTCP_WATCH2_CNTL
,
71 mmTCP_WATCH3_ADDR_H
, mmTCP_WATCH3_ADDR_L
, mmTCP_WATCH3_CNTL
74 union TCP_WATCH_CNTL_BITS
{
87 static inline struct amdgpu_device
*get_amdgpu_device(struct kgd_dev
*kgd
)
89 return (struct amdgpu_device
*)kgd
;
92 static void lock_srbm(struct kgd_dev
*kgd
, uint32_t mec
, uint32_t pipe
,
93 uint32_t queue
, uint32_t vmid
)
95 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
96 uint32_t value
= PIPEID(pipe
) | MEID(mec
) | VMID(vmid
) | QUEUEID(queue
);
98 mutex_lock(&adev
->srbm_mutex
);
99 WREG32(mmSRBM_GFX_CNTL
, value
);
102 static void unlock_srbm(struct kgd_dev
*kgd
)
104 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
106 WREG32(mmSRBM_GFX_CNTL
, 0);
107 mutex_unlock(&adev
->srbm_mutex
);
110 static void acquire_queue(struct kgd_dev
*kgd
, uint32_t pipe_id
,
113 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
115 uint32_t mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
116 uint32_t pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
118 lock_srbm(kgd
, mec
, pipe
, queue_id
, 0);
121 static void release_queue(struct kgd_dev
*kgd
)
126 static void kgd_program_sh_mem_settings(struct kgd_dev
*kgd
, uint32_t vmid
,
127 uint32_t sh_mem_config
,
128 uint32_t sh_mem_ape1_base
,
129 uint32_t sh_mem_ape1_limit
,
130 uint32_t sh_mem_bases
)
132 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
134 lock_srbm(kgd
, 0, 0, 0, vmid
);
136 WREG32(mmSH_MEM_CONFIG
, sh_mem_config
);
137 WREG32(mmSH_MEM_APE1_BASE
, sh_mem_ape1_base
);
138 WREG32(mmSH_MEM_APE1_LIMIT
, sh_mem_ape1_limit
);
139 WREG32(mmSH_MEM_BASES
, sh_mem_bases
);
144 static int kgd_set_pasid_vmid_mapping(struct kgd_dev
*kgd
, unsigned int pasid
,
147 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
150 * We have to assume that there is no outstanding mapping.
151 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
152 * a mapping is in progress or because a mapping finished and the
153 * SW cleared it. So the protocol is to always wait & clear.
155 uint32_t pasid_mapping
= (pasid
== 0) ? 0 : (uint32_t)pasid
|
156 ATC_VMID0_PASID_MAPPING__VALID_MASK
;
158 WREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
, pasid_mapping
);
160 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS
) & (1U << vmid
)))
162 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS
, 1U << vmid
);
164 /* Mapping vmid to pasid also for IH block */
165 WREG32(mmIH_VMID_0_LUT
+ vmid
, pasid_mapping
);
170 static int kgd_init_interrupts(struct kgd_dev
*kgd
, uint32_t pipe_id
)
172 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
176 mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
177 pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
179 lock_srbm(kgd
, mec
, pipe
, 0, 0);
181 WREG32(mmCPC_INT_CNTL
, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
|
182 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK
);
189 static inline uint32_t get_sdma_rlc_reg_offset(struct cik_sdma_rlc_registers
*m
)
193 retval
= m
->sdma_engine_id
* SDMA1_REGISTER_OFFSET
+
194 m
->sdma_queue_id
* KFD_CIK_SDMA_QUEUE_OFFSET
;
196 pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
197 m
->sdma_engine_id
, m
->sdma_queue_id
, retval
);
202 static inline struct cik_mqd
*get_mqd(void *mqd
)
204 return (struct cik_mqd
*)mqd
;
207 static inline struct cik_sdma_rlc_registers
*get_sdma_mqd(void *mqd
)
209 return (struct cik_sdma_rlc_registers
*)mqd
;
212 static int kgd_hqd_load(struct kgd_dev
*kgd
, void *mqd
, uint32_t pipe_id
,
213 uint32_t queue_id
, uint32_t __user
*wptr
,
214 uint32_t wptr_shift
, uint32_t wptr_mask
,
215 struct mm_struct
*mm
)
217 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
220 uint32_t reg
, wptr_val
, data
;
221 bool valid_wptr
= false;
225 acquire_queue(kgd
, pipe_id
, queue_id
);
227 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
228 mqd_hqd
= &m
->cp_mqd_base_addr_lo
;
230 for (reg
= mmCP_MQD_BASE_ADDR
; reg
<= mmCP_MQD_CONTROL
; reg
++)
231 WREG32(reg
, mqd_hqd
[reg
- mmCP_MQD_BASE_ADDR
]);
233 /* Copy userspace write pointer value to register.
234 * Activate doorbell logic to monitor subsequent changes.
236 data
= REG_SET_FIELD(m
->cp_hqd_pq_doorbell_control
,
237 CP_HQD_PQ_DOORBELL_CONTROL
, DOORBELL_EN
, 1);
238 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
, data
);
240 /* read_user_ptr may take the mm->mmap_lock.
241 * release srbm_mutex to avoid circular dependency between
242 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
245 valid_wptr
= read_user_wptr(mm
, wptr
, wptr_val
);
246 acquire_queue(kgd
, pipe_id
, queue_id
);
248 WREG32(mmCP_HQD_PQ_WPTR
, (wptr_val
<< wptr_shift
) & wptr_mask
);
250 data
= REG_SET_FIELD(m
->cp_hqd_active
, CP_HQD_ACTIVE
, ACTIVE
, 1);
251 WREG32(mmCP_HQD_ACTIVE
, data
);
258 static int kgd_hqd_dump(struct kgd_dev
*kgd
,
259 uint32_t pipe_id
, uint32_t queue_id
,
260 uint32_t (**dump
)[2], uint32_t *n_regs
)
262 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
264 #define HQD_N_REGS (35+4)
265 #define DUMP_REG(addr) do { \
266 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
268 (*dump)[i][0] = (addr) << 2; \
269 (*dump)[i++][1] = RREG32(addr); \
272 *dump
= kmalloc_array(HQD_N_REGS
* 2, sizeof(uint32_t), GFP_KERNEL
);
276 acquire_queue(kgd
, pipe_id
, queue_id
);
278 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0
);
279 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1
);
280 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2
);
281 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3
);
283 for (reg
= mmCP_MQD_BASE_ADDR
; reg
<= mmCP_MQD_CONTROL
; reg
++)
288 WARN_ON_ONCE(i
!= HQD_N_REGS
);
294 static int kgd_hqd_sdma_load(struct kgd_dev
*kgd
, void *mqd
,
295 uint32_t __user
*wptr
, struct mm_struct
*mm
)
297 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
298 struct cik_sdma_rlc_registers
*m
;
299 unsigned long end_jiffies
;
300 uint32_t sdma_rlc_reg_offset
;
303 m
= get_sdma_mqd(mqd
);
304 sdma_rlc_reg_offset
= get_sdma_rlc_reg_offset(m
);
306 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_CNTL
,
307 m
->sdma_rlc_rb_cntl
& (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
));
309 end_jiffies
= msecs_to_jiffies(2000) + jiffies
;
311 data
= RREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_CONTEXT_STATUS
);
312 if (data
& SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK
)
314 if (time_after(jiffies
, end_jiffies
)) {
315 pr_err("SDMA RLC not idle in %s\n", __func__
);
318 usleep_range(500, 1000);
321 data
= REG_SET_FIELD(m
->sdma_rlc_doorbell
, SDMA0_RLC0_DOORBELL
,
323 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_DOORBELL
, data
);
324 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_RPTR
,
325 m
->sdma_rlc_rb_rptr
);
327 if (read_user_wptr(mm
, wptr
, data
))
328 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_WPTR
, data
);
330 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_WPTR
,
331 m
->sdma_rlc_rb_rptr
);
333 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_VIRTUAL_ADDR
,
334 m
->sdma_rlc_virtual_addr
);
335 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_BASE
, m
->sdma_rlc_rb_base
);
336 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_BASE_HI
,
337 m
->sdma_rlc_rb_base_hi
);
338 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_RPTR_ADDR_LO
,
339 m
->sdma_rlc_rb_rptr_addr_lo
);
340 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_RPTR_ADDR_HI
,
341 m
->sdma_rlc_rb_rptr_addr_hi
);
343 data
= REG_SET_FIELD(m
->sdma_rlc_rb_cntl
, SDMA0_RLC0_RB_CNTL
,
345 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_CNTL
, data
);
350 static int kgd_hqd_sdma_dump(struct kgd_dev
*kgd
,
351 uint32_t engine_id
, uint32_t queue_id
,
352 uint32_t (**dump
)[2], uint32_t *n_regs
)
354 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
355 uint32_t sdma_offset
= engine_id
* SDMA1_REGISTER_OFFSET
+
356 queue_id
* KFD_CIK_SDMA_QUEUE_OFFSET
;
359 #define HQD_N_REGS (19+4)
361 *dump
= kmalloc_array(HQD_N_REGS
* 2, sizeof(uint32_t), GFP_KERNEL
);
365 for (reg
= mmSDMA0_RLC0_RB_CNTL
; reg
<= mmSDMA0_RLC0_DOORBELL
; reg
++)
366 DUMP_REG(sdma_offset
+ reg
);
367 for (reg
= mmSDMA0_RLC0_VIRTUAL_ADDR
; reg
<= mmSDMA0_RLC0_WATERMARK
;
369 DUMP_REG(sdma_offset
+ reg
);
371 WARN_ON_ONCE(i
!= HQD_N_REGS
);
377 static bool kgd_hqd_is_occupied(struct kgd_dev
*kgd
, uint64_t queue_address
,
378 uint32_t pipe_id
, uint32_t queue_id
)
380 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
385 acquire_queue(kgd
, pipe_id
, queue_id
);
386 act
= RREG32(mmCP_HQD_ACTIVE
);
388 low
= lower_32_bits(queue_address
>> 8);
389 high
= upper_32_bits(queue_address
>> 8);
391 if (low
== RREG32(mmCP_HQD_PQ_BASE
) &&
392 high
== RREG32(mmCP_HQD_PQ_BASE_HI
))
399 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev
*kgd
, void *mqd
)
401 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
402 struct cik_sdma_rlc_registers
*m
;
403 uint32_t sdma_rlc_reg_offset
;
404 uint32_t sdma_rlc_rb_cntl
;
406 m
= get_sdma_mqd(mqd
);
407 sdma_rlc_reg_offset
= get_sdma_rlc_reg_offset(m
);
409 sdma_rlc_rb_cntl
= RREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_CNTL
);
411 if (sdma_rlc_rb_cntl
& SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
)
417 static int kgd_hqd_destroy(struct kgd_dev
*kgd
, void *mqd
,
418 enum kfd_preempt_type reset_type
,
419 unsigned int utimeout
, uint32_t pipe_id
,
422 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
424 enum hqd_dequeue_request_type type
;
425 unsigned long flags
, end_jiffies
;
428 if (adev
->in_gpu_reset
)
431 acquire_queue(kgd
, pipe_id
, queue_id
);
432 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
, 0);
434 switch (reset_type
) {
435 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN
:
438 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET
:
446 /* Workaround: If IQ timer is active and the wait time is close to or
447 * equal to 0, dequeueing is not safe. Wait until either the wait time
448 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
449 * cleared before continuing. Also, ensure wait times are set to at
452 local_irq_save(flags
);
454 retry
= 5000; /* wait for 500 usecs at maximum */
456 temp
= RREG32(mmCP_HQD_IQ_TIMER
);
457 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, PROCESSING_IQ
)) {
458 pr_debug("HW is processing IQ\n");
461 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, ACTIVE
)) {
462 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, RETRY_TYPE
)
463 == 3) /* SEM-rearm is safe */
465 /* Wait time 3 is safe for CP, but our MMIO read/write
466 * time is close to 1 microsecond, so check for 10 to
467 * leave more buffer room
469 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, WAIT_TIME
)
472 pr_debug("IQ timer is active\n");
477 pr_err("CP HQD IQ timer status time out\n");
485 temp
= RREG32(mmCP_HQD_DEQUEUE_REQUEST
);
486 if (!(temp
& CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK
))
488 pr_debug("Dequeue request is pending\n");
491 pr_err("CP HQD dequeue request time out\n");
497 local_irq_restore(flags
);
500 WREG32(mmCP_HQD_DEQUEUE_REQUEST
, type
);
502 end_jiffies
= (utimeout
* HZ
/ 1000) + jiffies
;
504 temp
= RREG32(mmCP_HQD_ACTIVE
);
505 if (!(temp
& CP_HQD_ACTIVE__ACTIVE_MASK
))
507 if (time_after(jiffies
, end_jiffies
)) {
508 pr_err("cp queue preemption time out\n");
512 usleep_range(500, 1000);
519 static int kgd_hqd_sdma_destroy(struct kgd_dev
*kgd
, void *mqd
,
520 unsigned int utimeout
)
522 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
523 struct cik_sdma_rlc_registers
*m
;
524 uint32_t sdma_rlc_reg_offset
;
526 unsigned long end_jiffies
= (utimeout
* HZ
/ 1000) + jiffies
;
528 m
= get_sdma_mqd(mqd
);
529 sdma_rlc_reg_offset
= get_sdma_rlc_reg_offset(m
);
531 temp
= RREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_CNTL
);
532 temp
= temp
& ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
;
533 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_CNTL
, temp
);
536 temp
= RREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_CONTEXT_STATUS
);
537 if (temp
& SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK
)
539 if (time_after(jiffies
, end_jiffies
)) {
540 pr_err("SDMA RLC not idle in %s\n", __func__
);
543 usleep_range(500, 1000);
546 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_DOORBELL
, 0);
547 WREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_CNTL
,
548 RREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_CNTL
) |
549 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
);
551 m
->sdma_rlc_rb_rptr
= RREG32(sdma_rlc_reg_offset
+ mmSDMA0_RLC0_RB_RPTR
);
556 static int kgd_address_watch_disable(struct kgd_dev
*kgd
)
558 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
559 union TCP_WATCH_CNTL_BITS cntl
;
564 cntl
.bitfields
.valid
= 0;
565 cntl
.bitfields
.mask
= ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK
;
566 cntl
.bitfields
.atc
= 1;
568 /* Turning off this address until we set all the registers */
569 for (i
= 0; i
< MAX_WATCH_ADDRESSES
; i
++)
570 WREG32(watchRegs
[i
* ADDRESS_WATCH_REG_MAX
+
571 ADDRESS_WATCH_REG_CNTL
], cntl
.u32All
);
576 static int kgd_address_watch_execute(struct kgd_dev
*kgd
,
577 unsigned int watch_point_id
,
582 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
583 union TCP_WATCH_CNTL_BITS cntl
;
585 cntl
.u32All
= cntl_val
;
587 /* Turning off this watch point until we set all the registers */
588 cntl
.bitfields
.valid
= 0;
589 WREG32(watchRegs
[watch_point_id
* ADDRESS_WATCH_REG_MAX
+
590 ADDRESS_WATCH_REG_CNTL
], cntl
.u32All
);
592 WREG32(watchRegs
[watch_point_id
* ADDRESS_WATCH_REG_MAX
+
593 ADDRESS_WATCH_REG_ADDR_HI
], addr_hi
);
595 WREG32(watchRegs
[watch_point_id
* ADDRESS_WATCH_REG_MAX
+
596 ADDRESS_WATCH_REG_ADDR_LO
], addr_lo
);
598 /* Enable the watch point */
599 cntl
.bitfields
.valid
= 1;
601 WREG32(watchRegs
[watch_point_id
* ADDRESS_WATCH_REG_MAX
+
602 ADDRESS_WATCH_REG_CNTL
], cntl
.u32All
);
607 static int kgd_wave_control_execute(struct kgd_dev
*kgd
,
608 uint32_t gfx_index_val
,
611 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
614 mutex_lock(&adev
->grbm_idx_mutex
);
616 WREG32(mmGRBM_GFX_INDEX
, gfx_index_val
);
617 WREG32(mmSQ_CMD
, sq_cmd
);
619 /* Restore the GRBM_GFX_INDEX register */
621 data
= GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK
|
622 GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK
|
623 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK
;
625 WREG32(mmGRBM_GFX_INDEX
, data
);
627 mutex_unlock(&adev
->grbm_idx_mutex
);
632 static uint32_t kgd_address_watch_get_offset(struct kgd_dev
*kgd
,
633 unsigned int watch_point_id
,
634 unsigned int reg_offset
)
636 return watchRegs
[watch_point_id
* ADDRESS_WATCH_REG_MAX
+ reg_offset
];
639 static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev
*kgd
,
640 uint8_t vmid
, uint16_t *p_pasid
)
643 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
645 value
= RREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
);
646 *p_pasid
= value
& ATC_VMID0_PASID_MAPPING__PASID_MASK
;
648 return !!(value
& ATC_VMID0_PASID_MAPPING__VALID_MASK
);
651 static void set_scratch_backing_va(struct kgd_dev
*kgd
,
652 uint64_t va
, uint32_t vmid
)
654 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
656 lock_srbm(kgd
, 0, 0, 0, vmid
);
657 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID
, va
);
661 static void set_vm_context_page_table_base(struct kgd_dev
*kgd
, uint32_t vmid
,
662 uint64_t page_table_base
)
664 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
666 if (!amdgpu_amdkfd_is_kfd_vmid(adev
, vmid
)) {
667 pr_err("trying to set page table base for wrong VMID\n");
670 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ vmid
- 8,
671 lower_32_bits(page_table_base
));
675 * read_vmid_from_vmfault_reg - read vmid from register
677 * adev: amdgpu_device pointer
678 * @vmid: vmid pointer
679 * read vmid from register (CIK).
681 static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev
*kgd
)
683 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
685 uint32_t status
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS
);
687 return REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
, VMID
);
690 const struct kfd2kgd_calls gfx_v7_kfd2kgd
= {
691 .program_sh_mem_settings
= kgd_program_sh_mem_settings
,
692 .set_pasid_vmid_mapping
= kgd_set_pasid_vmid_mapping
,
693 .init_interrupts
= kgd_init_interrupts
,
694 .hqd_load
= kgd_hqd_load
,
695 .hqd_sdma_load
= kgd_hqd_sdma_load
,
696 .hqd_dump
= kgd_hqd_dump
,
697 .hqd_sdma_dump
= kgd_hqd_sdma_dump
,
698 .hqd_is_occupied
= kgd_hqd_is_occupied
,
699 .hqd_sdma_is_occupied
= kgd_hqd_sdma_is_occupied
,
700 .hqd_destroy
= kgd_hqd_destroy
,
701 .hqd_sdma_destroy
= kgd_hqd_sdma_destroy
,
702 .address_watch_disable
= kgd_address_watch_disable
,
703 .address_watch_execute
= kgd_address_watch_execute
,
704 .wave_control_execute
= kgd_wave_control_execute
,
705 .address_watch_get_offset
= kgd_address_watch_get_offset
,
706 .get_atc_vmid_pasid_mapping_info
= get_atc_vmid_pasid_mapping_info
,
707 .set_scratch_backing_va
= set_scratch_backing_va
,
708 .set_vm_context_page_table_base
= set_vm_context_page_table_base
,
709 .read_vmid_from_vmfault_reg
= read_vmid_from_vmfault_reg
,