2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "amdgpu_gfx.h"
29 /* delay 0.1 second to enable gfx off feature */
30 #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
33 * GPU GFX IP block helpers function.
36 int amdgpu_gfx_queue_to_bit(struct amdgpu_device
*adev
, int mec
,
41 bit
+= mec
* adev
->gfx
.mec
.num_pipe_per_mec
42 * adev
->gfx
.mec
.num_queue_per_pipe
;
43 bit
+= pipe
* adev
->gfx
.mec
.num_queue_per_pipe
;
49 void amdgpu_gfx_bit_to_queue(struct amdgpu_device
*adev
, int bit
,
50 int *mec
, int *pipe
, int *queue
)
52 *queue
= bit
% adev
->gfx
.mec
.num_queue_per_pipe
;
53 *pipe
= (bit
/ adev
->gfx
.mec
.num_queue_per_pipe
)
54 % adev
->gfx
.mec
.num_pipe_per_mec
;
55 *mec
= (bit
/ adev
->gfx
.mec
.num_queue_per_pipe
)
56 / adev
->gfx
.mec
.num_pipe_per_mec
;
60 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device
*adev
,
61 int mec
, int pipe
, int queue
)
63 return test_bit(amdgpu_gfx_queue_to_bit(adev
, mec
, pipe
, queue
),
64 adev
->gfx
.mec
.queue_bitmap
);
68 * amdgpu_gfx_scratch_get - Allocate a scratch register
70 * @adev: amdgpu_device pointer
71 * @reg: scratch register mmio offset
73 * Allocate a CP scratch register for use by the driver (all asics).
74 * Returns 0 on success or -EINVAL on failure.
76 int amdgpu_gfx_scratch_get(struct amdgpu_device
*adev
, uint32_t *reg
)
80 i
= ffs(adev
->gfx
.scratch
.free_mask
);
81 if (i
!= 0 && i
<= adev
->gfx
.scratch
.num_reg
) {
83 adev
->gfx
.scratch
.free_mask
&= ~(1u << i
);
84 *reg
= adev
->gfx
.scratch
.reg_base
+ i
;
91 * amdgpu_gfx_scratch_free - Free a scratch register
93 * @adev: amdgpu_device pointer
94 * @reg: scratch register mmio offset
96 * Free a CP scratch register allocated for use by the driver (all asics)
98 void amdgpu_gfx_scratch_free(struct amdgpu_device
*adev
, uint32_t reg
)
100 adev
->gfx
.scratch
.free_mask
|= 1u << (reg
- adev
->gfx
.scratch
.reg_base
);
104 * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
106 * @mask: array in which the per-shader array disable masks will be stored
107 * @max_se: number of SEs
108 * @max_sh: number of SHs
110 * The bitmask of CUs to be disabled in the shader array determined by se and
111 * sh is stored in mask[se * max_sh + sh].
113 void amdgpu_gfx_parse_disable_cu(unsigned *mask
, unsigned max_se
, unsigned max_sh
)
118 memset(mask
, 0, sizeof(*mask
) * max_se
* max_sh
);
120 if (!amdgpu_disable_cu
|| !*amdgpu_disable_cu
)
123 p
= amdgpu_disable_cu
;
126 int ret
= sscanf(p
, "%u.%u.%u", &se
, &sh
, &cu
);
128 DRM_ERROR("amdgpu: could not parse disable_cu\n");
132 if (se
< max_se
&& sh
< max_sh
&& cu
< 16) {
133 DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se
, sh
, cu
);
134 mask
[se
* max_sh
+ sh
] |= 1u << cu
;
136 DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
140 next
= strchr(p
, ',');
147 static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device
*adev
)
149 if (amdgpu_compute_multipipe
!= -1) {
150 DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
151 amdgpu_compute_multipipe
);
152 return amdgpu_compute_multipipe
== 1;
155 /* FIXME: spreading the queues across pipes causes perf regressions
156 * on POLARIS11 compute workloads */
157 if (adev
->asic_type
== CHIP_POLARIS11
)
160 return adev
->gfx
.mec
.num_mec
> 1;
163 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device
*adev
)
165 int i
, queue
, pipe
, mec
;
166 bool multipipe_policy
= amdgpu_gfx_is_multipipe_capable(adev
);
168 /* policy for amdgpu compute queue ownership */
169 for (i
= 0; i
< AMDGPU_MAX_COMPUTE_QUEUES
; ++i
) {
170 queue
= i
% adev
->gfx
.mec
.num_queue_per_pipe
;
171 pipe
= (i
/ adev
->gfx
.mec
.num_queue_per_pipe
)
172 % adev
->gfx
.mec
.num_pipe_per_mec
;
173 mec
= (i
/ adev
->gfx
.mec
.num_queue_per_pipe
)
174 / adev
->gfx
.mec
.num_pipe_per_mec
;
176 /* we've run out of HW */
177 if (mec
>= adev
->gfx
.mec
.num_mec
)
180 if (multipipe_policy
) {
181 /* policy: amdgpu owns the first two queues of the first MEC */
182 if (mec
== 0 && queue
< 2)
183 set_bit(i
, adev
->gfx
.mec
.queue_bitmap
);
185 /* policy: amdgpu owns all queues in the first pipe */
186 if (mec
== 0 && pipe
== 0)
187 set_bit(i
, adev
->gfx
.mec
.queue_bitmap
);
191 /* update the number of active compute rings */
192 adev
->gfx
.num_compute_rings
=
193 bitmap_weight(adev
->gfx
.mec
.queue_bitmap
, AMDGPU_MAX_COMPUTE_QUEUES
);
195 /* If you hit this case and edited the policy, you probably just
196 * need to increase AMDGPU_MAX_COMPUTE_RINGS */
197 if (WARN_ON(adev
->gfx
.num_compute_rings
> AMDGPU_MAX_COMPUTE_RINGS
))
198 adev
->gfx
.num_compute_rings
= AMDGPU_MAX_COMPUTE_RINGS
;
201 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device
*adev
,
202 struct amdgpu_ring
*ring
)
205 int mec
, pipe
, queue
;
207 queue_bit
= adev
->gfx
.mec
.num_mec
208 * adev
->gfx
.mec
.num_pipe_per_mec
209 * adev
->gfx
.mec
.num_queue_per_pipe
;
211 while (queue_bit
-- >= 0) {
212 if (test_bit(queue_bit
, adev
->gfx
.mec
.queue_bitmap
))
215 amdgpu_gfx_bit_to_queue(adev
, queue_bit
, &mec
, &pipe
, &queue
);
218 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
219 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
220 * only can be issued on queue 0.
222 if ((mec
== 1 && pipe
> 1) || queue
!= 0)
232 dev_err(adev
->dev
, "Failed to find a queue for KIQ\n");
236 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device
*adev
,
237 struct amdgpu_ring
*ring
,
238 struct amdgpu_irq_src
*irq
)
240 struct amdgpu_kiq
*kiq
= &adev
->gfx
.kiq
;
243 spin_lock_init(&kiq
->ring_lock
);
245 r
= amdgpu_device_wb_get(adev
, &adev
->virt
.reg_val_offs
);
250 ring
->ring_obj
= NULL
;
251 ring
->use_doorbell
= true;
252 ring
->doorbell_index
= AMDGPU_DOORBELL_KIQ
;
254 r
= amdgpu_gfx_kiq_acquire(adev
, ring
);
258 ring
->eop_gpu_addr
= kiq
->eop_gpu_addr
;
259 sprintf(ring
->name
, "kiq_%d.%d.%d", ring
->me
, ring
->pipe
, ring
->queue
);
260 r
= amdgpu_ring_init(adev
, ring
, 1024,
261 irq
, AMDGPU_CP_KIQ_IRQ_DRIVER0
);
263 dev_warn(adev
->dev
, "(%d) failed to init kiq ring\n", r
);
268 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring
*ring
,
269 struct amdgpu_irq_src
*irq
)
271 amdgpu_device_wb_free(ring
->adev
, ring
->adev
->virt
.reg_val_offs
);
272 amdgpu_ring_fini(ring
);
275 void amdgpu_gfx_kiq_fini(struct amdgpu_device
*adev
)
277 struct amdgpu_kiq
*kiq
= &adev
->gfx
.kiq
;
279 amdgpu_bo_free_kernel(&kiq
->eop_obj
, &kiq
->eop_gpu_addr
, NULL
);
282 int amdgpu_gfx_kiq_init(struct amdgpu_device
*adev
,
287 struct amdgpu_kiq
*kiq
= &adev
->gfx
.kiq
;
289 r
= amdgpu_bo_create_kernel(adev
, hpd_size
, PAGE_SIZE
,
290 AMDGPU_GEM_DOMAIN_GTT
, &kiq
->eop_obj
,
291 &kiq
->eop_gpu_addr
, (void **)&hpd
);
293 dev_warn(adev
->dev
, "failed to create KIQ bo (%d).\n", r
);
297 memset(hpd
, 0, hpd_size
);
299 r
= amdgpu_bo_reserve(kiq
->eop_obj
, true);
300 if (unlikely(r
!= 0))
301 dev_warn(adev
->dev
, "(%d) reserve kiq eop bo failed\n", r
);
302 amdgpu_bo_kunmap(kiq
->eop_obj
);
303 amdgpu_bo_unreserve(kiq
->eop_obj
);
308 /* create MQD for each compute queue */
309 int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device
*adev
,
312 struct amdgpu_ring
*ring
= NULL
;
315 /* create MQD for KIQ */
316 ring
= &adev
->gfx
.kiq
.ring
;
317 if (!ring
->mqd_obj
) {
318 /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
319 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
320 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
321 * KIQ MQD no matter SRIOV or Bare-metal
323 r
= amdgpu_bo_create_kernel(adev
, mqd_size
, PAGE_SIZE
,
324 AMDGPU_GEM_DOMAIN_VRAM
, &ring
->mqd_obj
,
325 &ring
->mqd_gpu_addr
, &ring
->mqd_ptr
);
327 dev_warn(adev
->dev
, "failed to create ring mqd ob (%d)", r
);
331 /* prepare MQD backup */
332 adev
->gfx
.mec
.mqd_backup
[AMDGPU_MAX_COMPUTE_RINGS
] = kmalloc(mqd_size
, GFP_KERNEL
);
333 if (!adev
->gfx
.mec
.mqd_backup
[AMDGPU_MAX_COMPUTE_RINGS
])
334 dev_warn(adev
->dev
, "no memory to create MQD backup for ring %s\n", ring
->name
);
337 /* create MQD for each KCQ */
338 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
339 ring
= &adev
->gfx
.compute_ring
[i
];
340 if (!ring
->mqd_obj
) {
341 r
= amdgpu_bo_create_kernel(adev
, mqd_size
, PAGE_SIZE
,
342 AMDGPU_GEM_DOMAIN_GTT
, &ring
->mqd_obj
,
343 &ring
->mqd_gpu_addr
, &ring
->mqd_ptr
);
345 dev_warn(adev
->dev
, "failed to create ring mqd ob (%d)", r
);
349 /* prepare MQD backup */
350 adev
->gfx
.mec
.mqd_backup
[i
] = kmalloc(mqd_size
, GFP_KERNEL
);
351 if (!adev
->gfx
.mec
.mqd_backup
[i
])
352 dev_warn(adev
->dev
, "no memory to create MQD backup for ring %s\n", ring
->name
);
359 void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device
*adev
)
361 struct amdgpu_ring
*ring
= NULL
;
364 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
365 ring
= &adev
->gfx
.compute_ring
[i
];
366 kfree(adev
->gfx
.mec
.mqd_backup
[i
]);
367 amdgpu_bo_free_kernel(&ring
->mqd_obj
,
372 ring
= &adev
->gfx
.kiq
.ring
;
373 kfree(adev
->gfx
.mec
.mqd_backup
[AMDGPU_MAX_COMPUTE_RINGS
]);
374 amdgpu_bo_free_kernel(&ring
->mqd_obj
,
379 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
381 * @adev: amdgpu_device pointer
382 * @bool enable true: enable gfx off feature, false: disable gfx off feature
384 * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
385 * 2. other client can send request to disable gfx off feature, the request should be honored.
386 * 3. other client can cancel their request of disable gfx off feature
387 * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
390 void amdgpu_gfx_off_ctrl(struct amdgpu_device
*adev
, bool enable
)
392 if (!(adev
->powerplay
.pp_feature
& PP_GFXOFF_MASK
))
395 if (!adev
->powerplay
.pp_funcs
->set_powergating_by_smu
)
399 mutex_lock(&adev
->gfx
.gfx_off_mutex
);
402 adev
->gfx
.gfx_off_req_count
++;
403 else if (adev
->gfx
.gfx_off_req_count
> 0)
404 adev
->gfx
.gfx_off_req_count
--;
406 if (enable
&& !adev
->gfx
.gfx_off_state
&& !adev
->gfx
.gfx_off_req_count
) {
407 schedule_delayed_work(&adev
->gfx
.gfx_off_delay_work
, GFX_OFF_DELAY_ENABLE
);
408 } else if (!enable
&& adev
->gfx
.gfx_off_state
) {
409 if (!amdgpu_dpm_set_powergating_by_smu(adev
, AMD_IP_BLOCK_TYPE_GFX
, false))
410 adev
->gfx
.gfx_off_state
= false;
413 mutex_unlock(&adev
->gfx
.gfx_off_mutex
);