2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/fdtable.h>
24 #include <linux/uaccess.h>
25 #include <linux/firmware.h>
28 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_ucode.h"
33 #include "gca/gfx_7_2_d.h"
34 #include "gca/gfx_7_2_enum.h"
35 #include "gca/gfx_7_2_sh_mask.h"
36 #include "oss/oss_2_0_d.h"
37 #include "oss/oss_2_0_sh_mask.h"
38 #include "gmc/gmc_7_1_d.h"
39 #include "gmc/gmc_7_1_sh_mask.h"
40 #include "cik_structs.h"
42 enum hqd_dequeue_request_type
{
49 MAX_TRAPID
= 8, /* 3 bits in the bitfield. */
50 MAX_WATCH_ADDRESSES
= 4
54 ADDRESS_WATCH_REG_ADDR_HI
= 0,
55 ADDRESS_WATCH_REG_ADDR_LO
,
56 ADDRESS_WATCH_REG_CNTL
,
60 /* not defined in the CI/KV reg file */
62 ADDRESS_WATCH_REG_CNTL_ATC_BIT
= 0x10000000UL
,
63 ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK
= 0x00FFFFFF,
64 ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION
= 0x03000000,
65 /* extend the mask to 26 bits to match the low address field */
66 ADDRESS_WATCH_REG_ADDLOW_SHIFT
= 6,
67 ADDRESS_WATCH_REG_ADDHIGH_MASK
= 0xFFFF
70 static const uint32_t watchRegs
[MAX_WATCH_ADDRESSES
* ADDRESS_WATCH_REG_MAX
] = {
71 mmTCP_WATCH0_ADDR_H
, mmTCP_WATCH0_ADDR_L
, mmTCP_WATCH0_CNTL
,
72 mmTCP_WATCH1_ADDR_H
, mmTCP_WATCH1_ADDR_L
, mmTCP_WATCH1_CNTL
,
73 mmTCP_WATCH2_ADDR_H
, mmTCP_WATCH2_ADDR_L
, mmTCP_WATCH2_CNTL
,
74 mmTCP_WATCH3_ADDR_H
, mmTCP_WATCH3_ADDR_L
, mmTCP_WATCH3_CNTL
77 union TCP_WATCH_CNTL_BITS
{
91 * Register access functions
94 static void kgd_program_sh_mem_settings(struct kgd_dev
*kgd
, uint32_t vmid
,
95 uint32_t sh_mem_config
, uint32_t sh_mem_ape1_base
,
96 uint32_t sh_mem_ape1_limit
, uint32_t sh_mem_bases
);
98 static int kgd_set_pasid_vmid_mapping(struct kgd_dev
*kgd
, unsigned int pasid
,
101 static int kgd_init_pipeline(struct kgd_dev
*kgd
, uint32_t pipe_id
,
102 uint32_t hpd_size
, uint64_t hpd_gpu_addr
);
103 static int kgd_init_interrupts(struct kgd_dev
*kgd
, uint32_t pipe_id
);
104 static int kgd_hqd_load(struct kgd_dev
*kgd
, void *mqd
, uint32_t pipe_id
,
105 uint32_t queue_id
, uint32_t __user
*wptr
,
106 uint32_t wptr_shift
, uint32_t wptr_mask
,
107 struct mm_struct
*mm
);
108 static int kgd_hqd_sdma_load(struct kgd_dev
*kgd
, void *mqd
);
109 static bool kgd_hqd_is_occupied(struct kgd_dev
*kgd
, uint64_t queue_address
,
110 uint32_t pipe_id
, uint32_t queue_id
);
112 static int kgd_hqd_destroy(struct kgd_dev
*kgd
, void *mqd
,
113 enum kfd_preempt_type reset_type
,
114 unsigned int utimeout
, uint32_t pipe_id
,
116 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev
*kgd
, void *mqd
);
117 static int kgd_hqd_sdma_destroy(struct kgd_dev
*kgd
, void *mqd
,
118 unsigned int utimeout
);
119 static int kgd_address_watch_disable(struct kgd_dev
*kgd
);
120 static int kgd_address_watch_execute(struct kgd_dev
*kgd
,
121 unsigned int watch_point_id
,
125 static int kgd_wave_control_execute(struct kgd_dev
*kgd
,
126 uint32_t gfx_index_val
,
128 static uint32_t kgd_address_watch_get_offset(struct kgd_dev
*kgd
,
129 unsigned int watch_point_id
,
130 unsigned int reg_offset
);
132 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev
*kgd
, uint8_t vmid
);
133 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev
*kgd
,
135 static void write_vmid_invalidate_request(struct kgd_dev
*kgd
, uint8_t vmid
);
137 static uint16_t get_fw_version(struct kgd_dev
*kgd
, enum kgd_engine_type type
);
138 static void set_scratch_backing_va(struct kgd_dev
*kgd
,
139 uint64_t va
, uint32_t vmid
);
141 /* Because of REG_GET_FIELD() being used, we put this function in the
142 * asic specific file.
144 static int get_tile_config(struct kgd_dev
*kgd
,
145 struct tile_config
*config
)
147 struct amdgpu_device
*adev
= (struct amdgpu_device
*)kgd
;
149 config
->gb_addr_config
= adev
->gfx
.config
.gb_addr_config
;
150 config
->num_banks
= REG_GET_FIELD(adev
->gfx
.config
.mc_arb_ramcfg
,
151 MC_ARB_RAMCFG
, NOOFBANK
);
152 config
->num_ranks
= REG_GET_FIELD(adev
->gfx
.config
.mc_arb_ramcfg
,
153 MC_ARB_RAMCFG
, NOOFRANKS
);
155 config
->tile_config_ptr
= adev
->gfx
.config
.tile_mode_array
;
156 config
->num_tile_configs
=
157 ARRAY_SIZE(adev
->gfx
.config
.tile_mode_array
);
158 config
->macro_tile_config_ptr
=
159 adev
->gfx
.config
.macrotile_mode_array
;
160 config
->num_macro_tile_configs
=
161 ARRAY_SIZE(adev
->gfx
.config
.macrotile_mode_array
);
166 static const struct kfd2kgd_calls kfd2kgd
= {
167 .init_gtt_mem_allocation
= alloc_gtt_mem
,
168 .free_gtt_mem
= free_gtt_mem
,
169 .get_vmem_size
= get_vmem_size
,
170 .get_gpu_clock_counter
= get_gpu_clock_counter
,
171 .get_max_engine_clock_in_mhz
= get_max_engine_clock_in_mhz
,
172 .alloc_pasid
= amdgpu_vm_alloc_pasid
,
173 .free_pasid
= amdgpu_vm_free_pasid
,
174 .program_sh_mem_settings
= kgd_program_sh_mem_settings
,
175 .set_pasid_vmid_mapping
= kgd_set_pasid_vmid_mapping
,
176 .init_pipeline
= kgd_init_pipeline
,
177 .init_interrupts
= kgd_init_interrupts
,
178 .hqd_load
= kgd_hqd_load
,
179 .hqd_sdma_load
= kgd_hqd_sdma_load
,
180 .hqd_is_occupied
= kgd_hqd_is_occupied
,
181 .hqd_sdma_is_occupied
= kgd_hqd_sdma_is_occupied
,
182 .hqd_destroy
= kgd_hqd_destroy
,
183 .hqd_sdma_destroy
= kgd_hqd_sdma_destroy
,
184 .address_watch_disable
= kgd_address_watch_disable
,
185 .address_watch_execute
= kgd_address_watch_execute
,
186 .wave_control_execute
= kgd_wave_control_execute
,
187 .address_watch_get_offset
= kgd_address_watch_get_offset
,
188 .get_atc_vmid_pasid_mapping_pasid
= get_atc_vmid_pasid_mapping_pasid
,
189 .get_atc_vmid_pasid_mapping_valid
= get_atc_vmid_pasid_mapping_valid
,
190 .write_vmid_invalidate_request
= write_vmid_invalidate_request
,
191 .get_fw_version
= get_fw_version
,
192 .set_scratch_backing_va
= set_scratch_backing_va
,
193 .get_tile_config
= get_tile_config
,
196 struct kfd2kgd_calls
*amdgpu_amdkfd_gfx_7_get_functions(void)
198 return (struct kfd2kgd_calls
*)&kfd2kgd
;
201 static inline struct amdgpu_device
*get_amdgpu_device(struct kgd_dev
*kgd
)
203 return (struct amdgpu_device
*)kgd
;
206 static void lock_srbm(struct kgd_dev
*kgd
, uint32_t mec
, uint32_t pipe
,
207 uint32_t queue
, uint32_t vmid
)
209 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
210 uint32_t value
= PIPEID(pipe
) | MEID(mec
) | VMID(vmid
) | QUEUEID(queue
);
212 mutex_lock(&adev
->srbm_mutex
);
213 WREG32(mmSRBM_GFX_CNTL
, value
);
216 static void unlock_srbm(struct kgd_dev
*kgd
)
218 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
220 WREG32(mmSRBM_GFX_CNTL
, 0);
221 mutex_unlock(&adev
->srbm_mutex
);
224 static void acquire_queue(struct kgd_dev
*kgd
, uint32_t pipe_id
,
227 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
229 uint32_t mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
230 uint32_t pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
232 lock_srbm(kgd
, mec
, pipe
, queue_id
, 0);
235 static void release_queue(struct kgd_dev
*kgd
)
240 static void kgd_program_sh_mem_settings(struct kgd_dev
*kgd
, uint32_t vmid
,
241 uint32_t sh_mem_config
,
242 uint32_t sh_mem_ape1_base
,
243 uint32_t sh_mem_ape1_limit
,
244 uint32_t sh_mem_bases
)
246 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
248 lock_srbm(kgd
, 0, 0, 0, vmid
);
250 WREG32(mmSH_MEM_CONFIG
, sh_mem_config
);
251 WREG32(mmSH_MEM_APE1_BASE
, sh_mem_ape1_base
);
252 WREG32(mmSH_MEM_APE1_LIMIT
, sh_mem_ape1_limit
);
253 WREG32(mmSH_MEM_BASES
, sh_mem_bases
);
258 static int kgd_set_pasid_vmid_mapping(struct kgd_dev
*kgd
, unsigned int pasid
,
261 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
264 * We have to assume that there is no outstanding mapping.
265 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
266 * a mapping is in progress or because a mapping finished and the
267 * SW cleared it. So the protocol is to always wait & clear.
269 uint32_t pasid_mapping
= (pasid
== 0) ? 0 : (uint32_t)pasid
|
270 ATC_VMID0_PASID_MAPPING__VALID_MASK
;
272 WREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
, pasid_mapping
);
274 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS
) & (1U << vmid
)))
276 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS
, 1U << vmid
);
278 /* Mapping vmid to pasid also for IH block */
279 WREG32(mmIH_VMID_0_LUT
+ vmid
, pasid_mapping
);
284 static int kgd_init_pipeline(struct kgd_dev
*kgd
, uint32_t pipe_id
,
285 uint32_t hpd_size
, uint64_t hpd_gpu_addr
)
287 /* amdgpu owns the per-pipe state */
291 static int kgd_init_interrupts(struct kgd_dev
*kgd
, uint32_t pipe_id
)
293 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
297 mec
= (pipe_id
/ adev
->gfx
.mec
.num_pipe_per_mec
) + 1;
298 pipe
= (pipe_id
% adev
->gfx
.mec
.num_pipe_per_mec
);
300 lock_srbm(kgd
, mec
, pipe
, 0, 0);
302 WREG32(mmCPC_INT_CNTL
, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
|
303 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK
);
310 static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers
*m
)
314 retval
= m
->sdma_engine_id
* SDMA1_REGISTER_OFFSET
+
315 m
->sdma_queue_id
* KFD_CIK_SDMA_QUEUE_OFFSET
;
317 pr_debug("kfd: sdma base address: 0x%x\n", retval
);
322 static inline struct cik_mqd
*get_mqd(void *mqd
)
324 return (struct cik_mqd
*)mqd
;
327 static inline struct cik_sdma_rlc_registers
*get_sdma_mqd(void *mqd
)
329 return (struct cik_sdma_rlc_registers
*)mqd
;
332 static int kgd_hqd_load(struct kgd_dev
*kgd
, void *mqd
, uint32_t pipe_id
,
333 uint32_t queue_id
, uint32_t __user
*wptr
,
334 uint32_t wptr_shift
, uint32_t wptr_mask
,
335 struct mm_struct
*mm
)
337 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
340 uint32_t reg
, wptr_val
, data
;
341 bool valid_wptr
= false;
345 acquire_queue(kgd
, pipe_id
, queue_id
);
347 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
348 mqd_hqd
= &m
->cp_mqd_base_addr_lo
;
350 for (reg
= mmCP_MQD_BASE_ADDR
; reg
<= mmCP_MQD_CONTROL
; reg
++)
351 WREG32(reg
, mqd_hqd
[reg
- mmCP_MQD_BASE_ADDR
]);
353 /* Copy userspace write pointer value to register.
354 * Activate doorbell logic to monitor subsequent changes.
356 data
= REG_SET_FIELD(m
->cp_hqd_pq_doorbell_control
,
357 CP_HQD_PQ_DOORBELL_CONTROL
, DOORBELL_EN
, 1);
358 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
, data
);
360 /* read_user_ptr may take the mm->mmap_sem.
361 * release srbm_mutex to avoid circular dependency between
362 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
365 valid_wptr
= read_user_wptr(mm
, wptr
, wptr_val
);
366 acquire_queue(kgd
, pipe_id
, queue_id
);
368 WREG32(mmCP_HQD_PQ_WPTR
, (wptr_val
<< wptr_shift
) & wptr_mask
);
370 data
= REG_SET_FIELD(m
->cp_hqd_active
, CP_HQD_ACTIVE
, ACTIVE
, 1);
371 WREG32(mmCP_HQD_ACTIVE
, data
);
378 static int kgd_hqd_sdma_load(struct kgd_dev
*kgd
, void *mqd
)
380 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
381 struct cik_sdma_rlc_registers
*m
;
382 unsigned long end_jiffies
;
383 uint32_t sdma_base_addr
;
386 m
= get_sdma_mqd(mqd
);
387 sdma_base_addr
= get_sdma_base_addr(m
);
389 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
,
390 m
->sdma_rlc_rb_cntl
& (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
));
392 end_jiffies
= msecs_to_jiffies(2000) + jiffies
;
394 data
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_CONTEXT_STATUS
);
395 if (data
& SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK
)
397 if (time_after(jiffies
, end_jiffies
))
399 usleep_range(500, 1000);
401 if (m
->sdma_engine_id
) {
402 data
= RREG32(mmSDMA1_GFX_CONTEXT_CNTL
);
403 data
= REG_SET_FIELD(data
, SDMA1_GFX_CONTEXT_CNTL
,
405 WREG32(mmSDMA1_GFX_CONTEXT_CNTL
, data
);
407 data
= RREG32(mmSDMA0_GFX_CONTEXT_CNTL
);
408 data
= REG_SET_FIELD(data
, SDMA0_GFX_CONTEXT_CNTL
,
410 WREG32(mmSDMA0_GFX_CONTEXT_CNTL
, data
);
413 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_DOORBELL
,
414 m
->sdma_rlc_doorbell
);
415 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_RPTR
, 0);
416 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_WPTR
, 0);
417 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_VIRTUAL_ADDR
,
418 m
->sdma_rlc_virtual_addr
);
419 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_BASE
, m
->sdma_rlc_rb_base
);
420 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_BASE_HI
,
421 m
->sdma_rlc_rb_base_hi
);
422 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_RPTR_ADDR_LO
,
423 m
->sdma_rlc_rb_rptr_addr_lo
);
424 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_RPTR_ADDR_HI
,
425 m
->sdma_rlc_rb_rptr_addr_hi
);
426 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
,
427 m
->sdma_rlc_rb_cntl
);
432 static bool kgd_hqd_is_occupied(struct kgd_dev
*kgd
, uint64_t queue_address
,
433 uint32_t pipe_id
, uint32_t queue_id
)
435 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
440 acquire_queue(kgd
, pipe_id
, queue_id
);
441 act
= RREG32(mmCP_HQD_ACTIVE
);
443 low
= lower_32_bits(queue_address
>> 8);
444 high
= upper_32_bits(queue_address
>> 8);
446 if (low
== RREG32(mmCP_HQD_PQ_BASE
) &&
447 high
== RREG32(mmCP_HQD_PQ_BASE_HI
))
454 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev
*kgd
, void *mqd
)
456 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
457 struct cik_sdma_rlc_registers
*m
;
458 uint32_t sdma_base_addr
;
459 uint32_t sdma_rlc_rb_cntl
;
461 m
= get_sdma_mqd(mqd
);
462 sdma_base_addr
= get_sdma_base_addr(m
);
464 sdma_rlc_rb_cntl
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
);
466 if (sdma_rlc_rb_cntl
& SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
)
472 static int kgd_hqd_destroy(struct kgd_dev
*kgd
, void *mqd
,
473 enum kfd_preempt_type reset_type
,
474 unsigned int utimeout
, uint32_t pipe_id
,
477 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
479 enum hqd_dequeue_request_type type
;
480 unsigned long flags
, end_jiffies
;
483 acquire_queue(kgd
, pipe_id
, queue_id
);
484 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
, 0);
486 switch (reset_type
) {
487 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN
:
490 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET
:
498 /* Workaround: If IQ timer is active and the wait time is close to or
499 * equal to 0, dequeueing is not safe. Wait until either the wait time
500 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
501 * cleared before continuing. Also, ensure wait times are set to at
504 local_irq_save(flags
);
506 retry
= 5000; /* wait for 500 usecs at maximum */
508 temp
= RREG32(mmCP_HQD_IQ_TIMER
);
509 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, PROCESSING_IQ
)) {
510 pr_debug("HW is processing IQ\n");
513 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, ACTIVE
)) {
514 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, RETRY_TYPE
)
515 == 3) /* SEM-rearm is safe */
517 /* Wait time 3 is safe for CP, but our MMIO read/write
518 * time is close to 1 microsecond, so check for 10 to
519 * leave more buffer room
521 if (REG_GET_FIELD(temp
, CP_HQD_IQ_TIMER
, WAIT_TIME
)
524 pr_debug("IQ timer is active\n");
529 pr_err("CP HQD IQ timer status time out\n");
537 temp
= RREG32(mmCP_HQD_DEQUEUE_REQUEST
);
538 if (!(temp
& CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK
))
540 pr_debug("Dequeue request is pending\n");
543 pr_err("CP HQD dequeue request time out\n");
549 local_irq_restore(flags
);
552 WREG32(mmCP_HQD_DEQUEUE_REQUEST
, type
);
554 end_jiffies
= (utimeout
* HZ
/ 1000) + jiffies
;
556 temp
= RREG32(mmCP_HQD_ACTIVE
);
557 if (!(temp
& CP_HQD_ACTIVE__ACTIVE_MASK
))
559 if (time_after(jiffies
, end_jiffies
)) {
560 pr_err("cp queue preemption time out\n");
564 usleep_range(500, 1000);
571 static int kgd_hqd_sdma_destroy(struct kgd_dev
*kgd
, void *mqd
,
572 unsigned int utimeout
)
574 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
575 struct cik_sdma_rlc_registers
*m
;
576 uint32_t sdma_base_addr
;
578 int timeout
= utimeout
;
580 m
= get_sdma_mqd(mqd
);
581 sdma_base_addr
= get_sdma_base_addr(m
);
583 temp
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
);
584 temp
= temp
& ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
;
585 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
, temp
);
588 temp
= RREG32(sdma_base_addr
+ mmSDMA0_RLC0_CONTEXT_STATUS
);
589 if (temp
& SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT
)
597 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_DOORBELL
, 0);
598 WREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
,
599 RREG32(sdma_base_addr
+ mmSDMA0_RLC0_RB_CNTL
) |
600 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK
);
605 static int kgd_address_watch_disable(struct kgd_dev
*kgd
)
607 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
608 union TCP_WATCH_CNTL_BITS cntl
;
613 cntl
.bitfields
.valid
= 0;
614 cntl
.bitfields
.mask
= ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK
;
615 cntl
.bitfields
.atc
= 1;
617 /* Turning off this address until we set all the registers */
618 for (i
= 0; i
< MAX_WATCH_ADDRESSES
; i
++)
619 WREG32(watchRegs
[i
* ADDRESS_WATCH_REG_MAX
+
620 ADDRESS_WATCH_REG_CNTL
], cntl
.u32All
);
625 static int kgd_address_watch_execute(struct kgd_dev
*kgd
,
626 unsigned int watch_point_id
,
631 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
632 union TCP_WATCH_CNTL_BITS cntl
;
634 cntl
.u32All
= cntl_val
;
636 /* Turning off this watch point until we set all the registers */
637 cntl
.bitfields
.valid
= 0;
638 WREG32(watchRegs
[watch_point_id
* ADDRESS_WATCH_REG_MAX
+
639 ADDRESS_WATCH_REG_CNTL
], cntl
.u32All
);
641 WREG32(watchRegs
[watch_point_id
* ADDRESS_WATCH_REG_MAX
+
642 ADDRESS_WATCH_REG_ADDR_HI
], addr_hi
);
644 WREG32(watchRegs
[watch_point_id
* ADDRESS_WATCH_REG_MAX
+
645 ADDRESS_WATCH_REG_ADDR_LO
], addr_lo
);
647 /* Enable the watch point */
648 cntl
.bitfields
.valid
= 1;
650 WREG32(watchRegs
[watch_point_id
* ADDRESS_WATCH_REG_MAX
+
651 ADDRESS_WATCH_REG_CNTL
], cntl
.u32All
);
656 static int kgd_wave_control_execute(struct kgd_dev
*kgd
,
657 uint32_t gfx_index_val
,
660 struct amdgpu_device
*adev
= get_amdgpu_device(kgd
);
663 mutex_lock(&adev
->grbm_idx_mutex
);
665 WREG32(mmGRBM_GFX_INDEX
, gfx_index_val
);
666 WREG32(mmSQ_CMD
, sq_cmd
);
668 /* Restore the GRBM_GFX_INDEX register */
670 data
= GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK
|
671 GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK
|
672 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK
;
674 WREG32(mmGRBM_GFX_INDEX
, data
);
676 mutex_unlock(&adev
->grbm_idx_mutex
);
681 static uint32_t kgd_address_watch_get_offset(struct kgd_dev
*kgd
,
682 unsigned int watch_point_id
,
683 unsigned int reg_offset
)
685 return watchRegs
[watch_point_id
* ADDRESS_WATCH_REG_MAX
+ reg_offset
];
688 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev
*kgd
,
692 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
694 reg
= RREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
);
695 return reg
& ATC_VMID0_PASID_MAPPING__VALID_MASK
;
698 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev
*kgd
,
702 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
704 reg
= RREG32(mmATC_VMID0_PASID_MAPPING
+ vmid
);
705 return reg
& ATC_VMID0_PASID_MAPPING__VALID_MASK
;
708 static void write_vmid_invalidate_request(struct kgd_dev
*kgd
, uint8_t vmid
)
710 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
712 WREG32(mmVM_INVALIDATE_REQUEST
, 1 << vmid
);
715 static void set_scratch_backing_va(struct kgd_dev
*kgd
,
716 uint64_t va
, uint32_t vmid
)
718 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
720 lock_srbm(kgd
, 0, 0, 0, vmid
);
721 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID
, va
);
725 static uint16_t get_fw_version(struct kgd_dev
*kgd
, enum kgd_engine_type type
)
727 struct amdgpu_device
*adev
= (struct amdgpu_device
*) kgd
;
728 const union amdgpu_firmware_header
*hdr
;
734 hdr
= (const union amdgpu_firmware_header
*)
735 adev
->gfx
.pfp_fw
->data
;
739 hdr
= (const union amdgpu_firmware_header
*)
740 adev
->gfx
.me_fw
->data
;
744 hdr
= (const union amdgpu_firmware_header
*)
745 adev
->gfx
.ce_fw
->data
;
748 case KGD_ENGINE_MEC1
:
749 hdr
= (const union amdgpu_firmware_header
*)
750 adev
->gfx
.mec_fw
->data
;
753 case KGD_ENGINE_MEC2
:
754 hdr
= (const union amdgpu_firmware_header
*)
755 adev
->gfx
.mec2_fw
->data
;
759 hdr
= (const union amdgpu_firmware_header
*)
760 adev
->gfx
.rlc_fw
->data
;
763 case KGD_ENGINE_SDMA1
:
764 hdr
= (const union amdgpu_firmware_header
*)
765 adev
->sdma
.instance
[0].fw
->data
;
768 case KGD_ENGINE_SDMA2
:
769 hdr
= (const union amdgpu_firmware_header
*)
770 adev
->sdma
.instance
[1].fw
->data
;
780 /* Only 12 bit in use*/
781 return hdr
->common
.ucode_version
;