]>
Commit | Line | Data |
---|---|---|
32c22e99 OG |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | */ | |
22 | ||
23 | #include <linux/fdtable.h> | |
24 | #include <linux/uaccess.h> | |
25 | #include <linux/firmware.h> | |
26 | #include <drm/drmP.h> | |
27 | #include "amdgpu.h" | |
28 | #include "amdgpu_amdkfd.h" | |
29 | #include "cikd.h" | |
30 | #include "cik_sdma.h" | |
31 | #include "amdgpu_ucode.h" | |
97bf47b2 | 32 | #include "gfx_v7_0.h" |
32c22e99 OG |
33 | #include "gca/gfx_7_2_d.h" |
34 | #include "gca/gfx_7_2_enum.h" | |
35 | #include "gca/gfx_7_2_sh_mask.h" | |
36 | #include "oss/oss_2_0_d.h" | |
37 | #include "oss/oss_2_0_sh_mask.h" | |
38 | #include "gmc/gmc_7_1_d.h" | |
39 | #include "gmc/gmc_7_1_sh_mask.h" | |
40 | #include "cik_structs.h" | |
41 | ||
70539bd7 FK |
42 | enum hqd_dequeue_request_type { |
43 | NO_ACTION = 0, | |
44 | DRAIN_PIPE, | |
45 | RESET_WAVES | |
46 | }; | |
47 | ||
32c22e99 OG |
48 | enum { |
49 | MAX_TRAPID = 8, /* 3 bits in the bitfield. */ | |
50 | MAX_WATCH_ADDRESSES = 4 | |
51 | }; | |
52 | ||
53 | enum { | |
54 | ADDRESS_WATCH_REG_ADDR_HI = 0, | |
55 | ADDRESS_WATCH_REG_ADDR_LO, | |
56 | ADDRESS_WATCH_REG_CNTL, | |
57 | ADDRESS_WATCH_REG_MAX | |
58 | }; | |
59 | ||
60 | /* not defined in the CI/KV reg file */ | |
61 | enum { | |
62 | ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL, | |
63 | ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF, | |
64 | ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000, | |
65 | /* extend the mask to 26 bits to match the low address field */ | |
66 | ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6, | |
67 | ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF | |
68 | }; | |
69 | ||
70 | static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = { | |
71 | mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL, | |
72 | mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL, | |
73 | mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL, | |
74 | mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL | |
75 | }; | |
76 | ||
77 | union TCP_WATCH_CNTL_BITS { | |
78 | struct { | |
79 | uint32_t mask:24; | |
80 | uint32_t vmid:4; | |
81 | uint32_t atc:1; | |
82 | uint32_t mode:2; | |
83 | uint32_t valid:1; | |
84 | } bitfields, bits; | |
85 | uint32_t u32All; | |
86 | signed int i32All; | |
87 | float f32All; | |
88 | }; | |
89 | ||
90 | /* | |
91 | * Register access functions | |
92 | */ | |
93 | ||
94 | static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, | |
95 | uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, | |
96 | uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); | |
97 | ||
98 | static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, | |
99 | unsigned int vmid); | |
100 | ||
101 | static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, | |
102 | uint32_t hpd_size, uint64_t hpd_gpu_addr); | |
103 | static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); | |
104 | static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, | |
70539bd7 FK |
105 | uint32_t queue_id, uint32_t __user *wptr, |
106 | uint32_t wptr_shift, uint32_t wptr_mask, | |
107 | struct mm_struct *mm); | |
32c22e99 OG |
108 | static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd); |
109 | static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, | |
110 | uint32_t pipe_id, uint32_t queue_id); | |
111 | ||
70539bd7 FK |
112 | static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, |
113 | enum kfd_preempt_type reset_type, | |
1d602430 | 114 | unsigned int utimeout, uint32_t pipe_id, |
32c22e99 OG |
115 | uint32_t queue_id); |
116 | static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); | |
117 | static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, | |
1d602430 | 118 | unsigned int utimeout); |
32c22e99 OG |
119 | static int kgd_address_watch_disable(struct kgd_dev *kgd); |
120 | static int kgd_address_watch_execute(struct kgd_dev *kgd, | |
121 | unsigned int watch_point_id, | |
122 | uint32_t cntl_val, | |
123 | uint32_t addr_hi, | |
124 | uint32_t addr_lo); | |
125 | static int kgd_wave_control_execute(struct kgd_dev *kgd, | |
126 | uint32_t gfx_index_val, | |
127 | uint32_t sq_cmd); | |
128 | static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, | |
129 | unsigned int watch_point_id, | |
130 | unsigned int reg_offset); | |
131 | ||
132 | static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid); | |
133 | static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, | |
134 | uint8_t vmid); | |
135 | static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid); | |
136 | ||
137 | static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); | |
09e56abb MR |
138 | static void set_scratch_backing_va(struct kgd_dev *kgd, |
139 | uint64_t va, uint32_t vmid); | |
32c22e99 | 140 | |
fb31a0c9 YZ |
141 | /* Because of REG_GET_FIELD() being used, we put this function in the |
142 | * asic specific file. | |
143 | */ | |
144 | static int get_tile_config(struct kgd_dev *kgd, | |
145 | struct tile_config *config) | |
146 | { | |
147 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; | |
148 | ||
149 | config->gb_addr_config = adev->gfx.config.gb_addr_config; | |
150 | config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, | |
151 | MC_ARB_RAMCFG, NOOFBANK); | |
152 | config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, | |
153 | MC_ARB_RAMCFG, NOOFRANKS); | |
154 | ||
155 | config->tile_config_ptr = adev->gfx.config.tile_mode_array; | |
156 | config->num_tile_configs = | |
157 | ARRAY_SIZE(adev->gfx.config.tile_mode_array); | |
158 | config->macro_tile_config_ptr = | |
159 | adev->gfx.config.macrotile_mode_array; | |
160 | config->num_macro_tile_configs = | |
161 | ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); | |
162 | ||
163 | return 0; | |
164 | } | |
165 | ||
32c22e99 OG |
166 | static const struct kfd2kgd_calls kfd2kgd = { |
167 | .init_gtt_mem_allocation = alloc_gtt_mem, | |
168 | .free_gtt_mem = free_gtt_mem, | |
169 | .get_vmem_size = get_vmem_size, | |
170 | .get_gpu_clock_counter = get_gpu_clock_counter, | |
171 | .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, | |
02208441 FK |
172 | .alloc_pasid = amdgpu_vm_alloc_pasid, |
173 | .free_pasid = amdgpu_vm_free_pasid, | |
32c22e99 OG |
174 | .program_sh_mem_settings = kgd_program_sh_mem_settings, |
175 | .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, | |
176 | .init_pipeline = kgd_init_pipeline, | |
177 | .init_interrupts = kgd_init_interrupts, | |
178 | .hqd_load = kgd_hqd_load, | |
179 | .hqd_sdma_load = kgd_hqd_sdma_load, | |
180 | .hqd_is_occupied = kgd_hqd_is_occupied, | |
181 | .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, | |
182 | .hqd_destroy = kgd_hqd_destroy, | |
183 | .hqd_sdma_destroy = kgd_hqd_sdma_destroy, | |
184 | .address_watch_disable = kgd_address_watch_disable, | |
185 | .address_watch_execute = kgd_address_watch_execute, | |
186 | .wave_control_execute = kgd_wave_control_execute, | |
187 | .address_watch_get_offset = kgd_address_watch_get_offset, | |
188 | .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid, | |
189 | .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, | |
190 | .write_vmid_invalidate_request = write_vmid_invalidate_request, | |
09e56abb MR |
191 | .get_fw_version = get_fw_version, |
192 | .set_scratch_backing_va = set_scratch_backing_va, | |
fb31a0c9 | 193 | .get_tile_config = get_tile_config, |
32c22e99 OG |
194 | }; |
195 | ||
f785d987 | 196 | struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) |
32c22e99 OG |
197 | { |
198 | return (struct kfd2kgd_calls *)&kfd2kgd; | |
199 | } | |
200 | ||
201 | static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) | |
202 | { | |
203 | return (struct amdgpu_device *)kgd; | |
204 | } | |
205 | ||
206 | static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, | |
207 | uint32_t queue, uint32_t vmid) | |
208 | { | |
209 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
210 | uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue); | |
211 | ||
212 | mutex_lock(&adev->srbm_mutex); | |
213 | WREG32(mmSRBM_GFX_CNTL, value); | |
214 | } | |
215 | ||
216 | static void unlock_srbm(struct kgd_dev *kgd) | |
217 | { | |
218 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
219 | ||
220 | WREG32(mmSRBM_GFX_CNTL, 0); | |
221 | mutex_unlock(&adev->srbm_mutex); | |
222 | } | |
223 | ||
224 | static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, | |
225 | uint32_t queue_id) | |
226 | { | |
5e709562 AR |
227 | struct amdgpu_device *adev = get_amdgpu_device(kgd); |
228 | ||
438e29a2 | 229 | uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; |
5e709562 | 230 | uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); |
32c22e99 OG |
231 | |
232 | lock_srbm(kgd, mec, pipe, queue_id, 0); | |
233 | } | |
234 | ||
235 | static void release_queue(struct kgd_dev *kgd) | |
236 | { | |
237 | unlock_srbm(kgd); | |
238 | } | |
239 | ||
240 | static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, | |
241 | uint32_t sh_mem_config, | |
242 | uint32_t sh_mem_ape1_base, | |
243 | uint32_t sh_mem_ape1_limit, | |
244 | uint32_t sh_mem_bases) | |
245 | { | |
246 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
247 | ||
248 | lock_srbm(kgd, 0, 0, 0, vmid); | |
249 | ||
250 | WREG32(mmSH_MEM_CONFIG, sh_mem_config); | |
251 | WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base); | |
252 | WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit); | |
253 | WREG32(mmSH_MEM_BASES, sh_mem_bases); | |
254 | ||
255 | unlock_srbm(kgd); | |
256 | } | |
257 | ||
258 | static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, | |
259 | unsigned int vmid) | |
260 | { | |
261 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
262 | ||
263 | /* | |
264 | * We have to assume that there is no outstanding mapping. | |
265 | * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because | |
266 | * a mapping is in progress or because a mapping finished and the | |
267 | * SW cleared it. So the protocol is to always wait & clear. | |
268 | */ | |
269 | uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid | | |
270 | ATC_VMID0_PASID_MAPPING__VALID_MASK; | |
271 | ||
272 | WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping); | |
273 | ||
274 | while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid))) | |
275 | cpu_relax(); | |
276 | WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid); | |
277 | ||
278 | /* Mapping vmid to pasid also for IH block */ | |
279 | WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping); | |
280 | ||
281 | return 0; | |
282 | } | |
283 | ||
284 | static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, | |
285 | uint32_t hpd_size, uint64_t hpd_gpu_addr) | |
286 | { | |
42794b27 | 287 | /* amdgpu owns the per-pipe state */ |
32c22e99 OG |
288 | return 0; |
289 | } | |
290 | ||
291 | static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) | |
292 | { | |
293 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
294 | uint32_t mec; | |
295 | uint32_t pipe; | |
296 | ||
5e709562 AR |
297 | mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; |
298 | pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); | |
32c22e99 OG |
299 | |
300 | lock_srbm(kgd, mec, pipe, 0, 0); | |
301 | ||
302 | WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | | |
303 | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); | |
304 | ||
305 | unlock_srbm(kgd); | |
306 | ||
307 | return 0; | |
308 | } | |
309 | ||
310 | static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m) | |
311 | { | |
312 | uint32_t retval; | |
313 | ||
314 | retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET + | |
315 | m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET; | |
316 | ||
317 | pr_debug("kfd: sdma base address: 0x%x\n", retval); | |
318 | ||
319 | return retval; | |
320 | } | |
321 | ||
322 | static inline struct cik_mqd *get_mqd(void *mqd) | |
323 | { | |
324 | return (struct cik_mqd *)mqd; | |
325 | } | |
326 | ||
327 | static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) | |
328 | { | |
329 | return (struct cik_sdma_rlc_registers *)mqd; | |
330 | } | |
331 | ||
332 | static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, | |
70539bd7 FK |
333 | uint32_t queue_id, uint32_t __user *wptr, |
334 | uint32_t wptr_shift, uint32_t wptr_mask, | |
335 | struct mm_struct *mm) | |
32c22e99 OG |
336 | { |
337 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
32c22e99 | 338 | struct cik_mqd *m; |
70539bd7 FK |
339 | uint32_t *mqd_hqd; |
340 | uint32_t reg, wptr_val, data; | |
a50ecc54 | 341 | bool valid_wptr = false; |
32c22e99 OG |
342 | |
343 | m = get_mqd(mqd); | |
344 | ||
97bf47b2 | 345 | acquire_queue(kgd, pipe_id, queue_id); |
70539bd7 FK |
346 | |
347 | /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */ | |
348 | mqd_hqd = &m->cp_mqd_base_addr_lo; | |
349 | ||
350 | for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++) | |
351 | WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]); | |
352 | ||
353 | /* Copy userspace write pointer value to register. | |
354 | * Activate doorbell logic to monitor subsequent changes. | |
355 | */ | |
356 | data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, | |
357 | CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); | |
358 | WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data); | |
359 | ||
a50ecc54 | 360 | /* read_user_ptr may take the mm->mmap_sem. |
361 | * release srbm_mutex to avoid circular dependency between | |
362 | * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex. | |
363 | */ | |
364 | release_queue(kgd); | |
365 | valid_wptr = read_user_wptr(mm, wptr, wptr_val); | |
366 | acquire_queue(kgd, pipe_id, queue_id); | |
367 | if (valid_wptr) | |
70539bd7 FK |
368 | WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); |
369 | ||
370 | data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); | |
371 | WREG32(mmCP_HQD_ACTIVE, data); | |
372 | ||
32c22e99 OG |
373 | release_queue(kgd); |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
378 | static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd) | |
379 | { | |
380 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
381 | struct cik_sdma_rlc_registers *m; | |
cf21654b | 382 | unsigned long end_jiffies; |
32c22e99 | 383 | uint32_t sdma_base_addr; |
cf21654b | 384 | uint32_t data; |
32c22e99 OG |
385 | |
386 | m = get_sdma_mqd(mqd); | |
387 | sdma_base_addr = get_sdma_base_addr(m); | |
388 | ||
cf21654b FK |
389 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, |
390 | m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); | |
32c22e99 | 391 | |
cf21654b FK |
392 | end_jiffies = msecs_to_jiffies(2000) + jiffies; |
393 | while (true) { | |
394 | data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); | |
395 | if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) | |
396 | break; | |
397 | if (time_after(jiffies, end_jiffies)) | |
398 | return -ETIME; | |
399 | usleep_range(500, 1000); | |
400 | } | |
401 | if (m->sdma_engine_id) { | |
402 | data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL); | |
403 | data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL, | |
404 | RESUME_CTX, 0); | |
405 | WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data); | |
406 | } else { | |
407 | data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL); | |
408 | data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, | |
409 | RESUME_CTX, 0); | |
410 | WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data); | |
411 | } | |
32c22e99 | 412 | |
cf21654b FK |
413 | WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, |
414 | m->sdma_rlc_doorbell); | |
415 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); | |
416 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); | |
417 | WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, | |
418 | m->sdma_rlc_virtual_addr); | |
419 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base); | |
32c22e99 OG |
420 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, |
421 | m->sdma_rlc_rb_base_hi); | |
32c22e99 OG |
422 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, |
423 | m->sdma_rlc_rb_rptr_addr_lo); | |
32c22e99 OG |
424 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, |
425 | m->sdma_rlc_rb_rptr_addr_hi); | |
32c22e99 OG |
426 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, |
427 | m->sdma_rlc_rb_cntl); | |
428 | ||
429 | return 0; | |
430 | } | |
431 | ||
432 | static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, | |
433 | uint32_t pipe_id, uint32_t queue_id) | |
434 | { | |
435 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
436 | uint32_t act; | |
437 | bool retval = false; | |
438 | uint32_t low, high; | |
439 | ||
440 | acquire_queue(kgd, pipe_id, queue_id); | |
441 | act = RREG32(mmCP_HQD_ACTIVE); | |
442 | if (act) { | |
443 | low = lower_32_bits(queue_address >> 8); | |
444 | high = upper_32_bits(queue_address >> 8); | |
445 | ||
446 | if (low == RREG32(mmCP_HQD_PQ_BASE) && | |
447 | high == RREG32(mmCP_HQD_PQ_BASE_HI)) | |
448 | retval = true; | |
449 | } | |
450 | release_queue(kgd); | |
451 | return retval; | |
452 | } | |
453 | ||
454 | static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) | |
455 | { | |
456 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
457 | struct cik_sdma_rlc_registers *m; | |
458 | uint32_t sdma_base_addr; | |
459 | uint32_t sdma_rlc_rb_cntl; | |
460 | ||
461 | m = get_sdma_mqd(mqd); | |
462 | sdma_base_addr = get_sdma_base_addr(m); | |
463 | ||
464 | sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); | |
465 | ||
466 | if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) | |
467 | return true; | |
468 | ||
469 | return false; | |
470 | } | |
471 | ||
70539bd7 FK |
472 | static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, |
473 | enum kfd_preempt_type reset_type, | |
1d602430 | 474 | unsigned int utimeout, uint32_t pipe_id, |
32c22e99 OG |
475 | uint32_t queue_id) |
476 | { | |
477 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
478 | uint32_t temp; | |
70539bd7 FK |
479 | enum hqd_dequeue_request_type type; |
480 | unsigned long flags, end_jiffies; | |
481 | int retry; | |
32c22e99 OG |
482 | |
483 | acquire_queue(kgd, pipe_id, queue_id); | |
484 | WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0); | |
485 | ||
70539bd7 FK |
486 | switch (reset_type) { |
487 | case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: | |
488 | type = DRAIN_PIPE; | |
489 | break; | |
490 | case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: | |
491 | type = RESET_WAVES; | |
492 | break; | |
493 | default: | |
494 | type = DRAIN_PIPE; | |
495 | break; | |
496 | } | |
32c22e99 | 497 | |
70539bd7 FK |
498 | /* Workaround: If IQ timer is active and the wait time is close to or |
499 | * equal to 0, dequeueing is not safe. Wait until either the wait time | |
500 | * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is | |
501 | * cleared before continuing. Also, ensure wait times are set to at | |
502 | * least 0x3. | |
503 | */ | |
504 | local_irq_save(flags); | |
505 | preempt_disable(); | |
506 | retry = 5000; /* wait for 500 usecs at maximum */ | |
507 | while (true) { | |
508 | temp = RREG32(mmCP_HQD_IQ_TIMER); | |
509 | if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) { | |
510 | pr_debug("HW is processing IQ\n"); | |
511 | goto loop; | |
512 | } | |
513 | if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) { | |
514 | if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE) | |
515 | == 3) /* SEM-rearm is safe */ | |
516 | break; | |
517 | /* Wait time 3 is safe for CP, but our MMIO read/write | |
518 | * time is close to 1 microsecond, so check for 10 to | |
519 | * leave more buffer room | |
520 | */ | |
521 | if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME) | |
522 | >= 10) | |
523 | break; | |
524 | pr_debug("IQ timer is active\n"); | |
525 | } else | |
526 | break; | |
527 | loop: | |
528 | if (!retry) { | |
529 | pr_err("CP HQD IQ timer status time out\n"); | |
530 | break; | |
531 | } | |
532 | ndelay(100); | |
533 | --retry; | |
534 | } | |
535 | retry = 1000; | |
536 | while (true) { | |
537 | temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST); | |
538 | if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK)) | |
539 | break; | |
540 | pr_debug("Dequeue request is pending\n"); | |
541 | ||
542 | if (!retry) { | |
543 | pr_err("CP HQD dequeue request time out\n"); | |
544 | break; | |
545 | } | |
546 | ndelay(100); | |
547 | --retry; | |
548 | } | |
549 | local_irq_restore(flags); | |
550 | preempt_enable(); | |
551 | ||
552 | WREG32(mmCP_HQD_DEQUEUE_REQUEST, type); | |
553 | ||
554 | end_jiffies = (utimeout * HZ / 1000) + jiffies; | |
32c22e99 OG |
555 | while (true) { |
556 | temp = RREG32(mmCP_HQD_ACTIVE); | |
70539bd7 | 557 | if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) |
32c22e99 | 558 | break; |
70539bd7 FK |
559 | if (time_after(jiffies, end_jiffies)) { |
560 | pr_err("cp queue preemption time out\n"); | |
32c22e99 OG |
561 | release_queue(kgd); |
562 | return -ETIME; | |
563 | } | |
70539bd7 | 564 | usleep_range(500, 1000); |
32c22e99 OG |
565 | } |
566 | ||
567 | release_queue(kgd); | |
568 | return 0; | |
569 | } | |
570 | ||
571 | static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, | |
1d602430 | 572 | unsigned int utimeout) |
32c22e99 OG |
573 | { |
574 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
575 | struct cik_sdma_rlc_registers *m; | |
576 | uint32_t sdma_base_addr; | |
577 | uint32_t temp; | |
1d602430 | 578 | int timeout = utimeout; |
32c22e99 OG |
579 | |
580 | m = get_sdma_mqd(mqd); | |
581 | sdma_base_addr = get_sdma_base_addr(m); | |
582 | ||
583 | temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); | |
584 | temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; | |
585 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); | |
586 | ||
587 | while (true) { | |
588 | temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); | |
589 | if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT) | |
590 | break; | |
1d602430 | 591 | if (timeout <= 0) |
32c22e99 OG |
592 | return -ETIME; |
593 | msleep(20); | |
594 | timeout -= 20; | |
595 | } | |
596 | ||
597 | WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); | |
cf21654b FK |
598 | WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, |
599 | RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | | |
600 | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); | |
32c22e99 OG |
601 | |
602 | return 0; | |
603 | } | |
604 | ||
605 | static int kgd_address_watch_disable(struct kgd_dev *kgd) | |
606 | { | |
607 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
608 | union TCP_WATCH_CNTL_BITS cntl; | |
609 | unsigned int i; | |
610 | ||
611 | cntl.u32All = 0; | |
612 | ||
613 | cntl.bitfields.valid = 0; | |
614 | cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK; | |
615 | cntl.bitfields.atc = 1; | |
616 | ||
617 | /* Turning off this address until we set all the registers */ | |
618 | for (i = 0; i < MAX_WATCH_ADDRESSES; i++) | |
619 | WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX + | |
620 | ADDRESS_WATCH_REG_CNTL], cntl.u32All); | |
621 | ||
622 | return 0; | |
623 | } | |
624 | ||
625 | static int kgd_address_watch_execute(struct kgd_dev *kgd, | |
626 | unsigned int watch_point_id, | |
627 | uint32_t cntl_val, | |
628 | uint32_t addr_hi, | |
629 | uint32_t addr_lo) | |
630 | { | |
631 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
632 | union TCP_WATCH_CNTL_BITS cntl; | |
633 | ||
634 | cntl.u32All = cntl_val; | |
635 | ||
636 | /* Turning off this watch point until we set all the registers */ | |
637 | cntl.bitfields.valid = 0; | |
638 | WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + | |
639 | ADDRESS_WATCH_REG_CNTL], cntl.u32All); | |
640 | ||
641 | WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + | |
642 | ADDRESS_WATCH_REG_ADDR_HI], addr_hi); | |
643 | ||
644 | WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + | |
645 | ADDRESS_WATCH_REG_ADDR_LO], addr_lo); | |
646 | ||
647 | /* Enable the watch point */ | |
648 | cntl.bitfields.valid = 1; | |
649 | ||
650 | WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + | |
651 | ADDRESS_WATCH_REG_CNTL], cntl.u32All); | |
652 | ||
653 | return 0; | |
654 | } | |
655 | ||
656 | static int kgd_wave_control_execute(struct kgd_dev *kgd, | |
657 | uint32_t gfx_index_val, | |
658 | uint32_t sq_cmd) | |
659 | { | |
660 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | |
661 | uint32_t data; | |
662 | ||
663 | mutex_lock(&adev->grbm_idx_mutex); | |
664 | ||
665 | WREG32(mmGRBM_GFX_INDEX, gfx_index_val); | |
666 | WREG32(mmSQ_CMD, sq_cmd); | |
667 | ||
668 | /* Restore the GRBM_GFX_INDEX register */ | |
669 | ||
670 | data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | | |
671 | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | | |
672 | GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK; | |
673 | ||
674 | WREG32(mmGRBM_GFX_INDEX, data); | |
675 | ||
676 | mutex_unlock(&adev->grbm_idx_mutex); | |
677 | ||
678 | return 0; | |
679 | } | |
680 | ||
681 | static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, | |
682 | unsigned int watch_point_id, | |
683 | unsigned int reg_offset) | |
684 | { | |
685 | return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]; | |
686 | } | |
687 | ||
688 | static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, | |
689 | uint8_t vmid) | |
690 | { | |
691 | uint32_t reg; | |
692 | struct amdgpu_device *adev = (struct amdgpu_device *) kgd; | |
693 | ||
694 | reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); | |
695 | return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; | |
696 | } | |
697 | ||
698 | static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, | |
699 | uint8_t vmid) | |
700 | { | |
701 | uint32_t reg; | |
702 | struct amdgpu_device *adev = (struct amdgpu_device *) kgd; | |
703 | ||
704 | reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); | |
705 | return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; | |
706 | } | |
707 | ||
708 | static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid) | |
709 | { | |
710 | struct amdgpu_device *adev = (struct amdgpu_device *) kgd; | |
711 | ||
712 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); | |
713 | } | |
714 | ||
09e56abb MR |
715 | static void set_scratch_backing_va(struct kgd_dev *kgd, |
716 | uint64_t va, uint32_t vmid) | |
717 | { | |
718 | struct amdgpu_device *adev = (struct amdgpu_device *) kgd; | |
719 | ||
720 | lock_srbm(kgd, 0, 0, 0, vmid); | |
721 | WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); | |
722 | unlock_srbm(kgd); | |
723 | } | |
724 | ||
32c22e99 OG |
725 | static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) |
726 | { | |
727 | struct amdgpu_device *adev = (struct amdgpu_device *) kgd; | |
728 | const union amdgpu_firmware_header *hdr; | |
729 | ||
730 | BUG_ON(kgd == NULL); | |
731 | ||
732 | switch (type) { | |
733 | case KGD_ENGINE_PFP: | |
734 | hdr = (const union amdgpu_firmware_header *) | |
8eabaf54 | 735 | adev->gfx.pfp_fw->data; |
32c22e99 OG |
736 | break; |
737 | ||
738 | case KGD_ENGINE_ME: | |
739 | hdr = (const union amdgpu_firmware_header *) | |
8eabaf54 | 740 | adev->gfx.me_fw->data; |
32c22e99 OG |
741 | break; |
742 | ||
743 | case KGD_ENGINE_CE: | |
744 | hdr = (const union amdgpu_firmware_header *) | |
8eabaf54 | 745 | adev->gfx.ce_fw->data; |
32c22e99 OG |
746 | break; |
747 | ||
748 | case KGD_ENGINE_MEC1: | |
749 | hdr = (const union amdgpu_firmware_header *) | |
8eabaf54 | 750 | adev->gfx.mec_fw->data; |
32c22e99 OG |
751 | break; |
752 | ||
753 | case KGD_ENGINE_MEC2: | |
754 | hdr = (const union amdgpu_firmware_header *) | |
8eabaf54 | 755 | adev->gfx.mec2_fw->data; |
32c22e99 OG |
756 | break; |
757 | ||
758 | case KGD_ENGINE_RLC: | |
759 | hdr = (const union amdgpu_firmware_header *) | |
8eabaf54 | 760 | adev->gfx.rlc_fw->data; |
32c22e99 OG |
761 | break; |
762 | ||
763 | case KGD_ENGINE_SDMA1: | |
764 | hdr = (const union amdgpu_firmware_header *) | |
8eabaf54 | 765 | adev->sdma.instance[0].fw->data; |
32c22e99 OG |
766 | break; |
767 | ||
768 | case KGD_ENGINE_SDMA2: | |
769 | hdr = (const union amdgpu_firmware_header *) | |
8eabaf54 | 770 | adev->sdma.instance[1].fw->data; |
32c22e99 OG |
771 | break; |
772 | ||
773 | default: | |
774 | return 0; | |
775 | } | |
776 | ||
777 | if (hdr == NULL) | |
778 | return 0; | |
779 | ||
780 | /* Only 12 bit in use*/ | |
781 | return hdr->common.ucode_version; | |
782 | } | |
783 |