2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/slab.h>
25 #include <linux/list.h>
26 #include <linux/types.h>
27 #include <linux/printk.h>
28 #include <linux/bitops.h>
29 #include <linux/sched.h>
31 #include "kfd_device_queue_manager.h"
32 #include "kfd_mqd_manager.h"
34 #include "kfd_kernel_queue.h"
36 /* Size of the per-pipe EOP queue */
37 #define CIK_HPD_EOP_BYTES_LOG2 11
38 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
40 static int set_pasid_vmid_mapping(struct device_queue_manager
*dqm
,
41 unsigned int pasid
, unsigned int vmid
);
43 static int create_compute_queue_nocpsch(struct device_queue_manager
*dqm
,
45 struct qcm_process_device
*qpd
);
47 static int execute_queues_cpsch(struct device_queue_manager
*dqm
, bool lock
);
48 static int destroy_queues_cpsch(struct device_queue_manager
*dqm
,
49 bool preempt_static_queues
, bool lock
);
51 static int create_sdma_queue_nocpsch(struct device_queue_manager
*dqm
,
53 struct qcm_process_device
*qpd
);
55 static void deallocate_sdma_queue(struct device_queue_manager
*dqm
,
56 unsigned int sdma_queue_id
);
59 enum KFD_MQD_TYPE
get_mqd_type_from_queue_type(enum kfd_queue_type type
)
61 if (type
== KFD_QUEUE_TYPE_SDMA
)
62 return KFD_MQD_TYPE_SDMA
;
63 return KFD_MQD_TYPE_CP
;
66 static bool is_pipe_enabled(struct device_queue_manager
*dqm
, int mec
, int pipe
)
69 int pipe_offset
= mec
* dqm
->dev
->shared_resources
.num_pipe_per_mec
70 + pipe
* dqm
->dev
->shared_resources
.num_queue_per_pipe
;
72 /* queue is available for KFD usage if bit is 1 */
73 for (i
= 0; i
< dqm
->dev
->shared_resources
.num_queue_per_pipe
; ++i
)
74 if (test_bit(pipe_offset
+ i
,
75 dqm
->dev
->shared_resources
.queue_bitmap
))
80 unsigned int get_mec_num(struct device_queue_manager
*dqm
)
82 BUG_ON(!dqm
|| !dqm
->dev
);
84 return dqm
->dev
->shared_resources
.num_mec
;
87 unsigned int get_queues_num(struct device_queue_manager
*dqm
)
89 BUG_ON(!dqm
|| !dqm
->dev
);
90 return bitmap_weight(dqm
->dev
->shared_resources
.queue_bitmap
,
94 unsigned int get_queues_per_pipe(struct device_queue_manager
*dqm
)
96 BUG_ON(!dqm
|| !dqm
->dev
);
97 return dqm
->dev
->shared_resources
.num_queue_per_pipe
;
100 unsigned int get_pipes_per_mec(struct device_queue_manager
*dqm
)
102 BUG_ON(!dqm
|| !dqm
->dev
);
103 return dqm
->dev
->shared_resources
.num_pipe_per_mec
;
106 void program_sh_mem_settings(struct device_queue_manager
*dqm
,
107 struct qcm_process_device
*qpd
)
109 return dqm
->dev
->kfd2kgd
->program_sh_mem_settings(
110 dqm
->dev
->kgd
, qpd
->vmid
,
112 qpd
->sh_mem_ape1_base
,
113 qpd
->sh_mem_ape1_limit
,
117 static int allocate_vmid(struct device_queue_manager
*dqm
,
118 struct qcm_process_device
*qpd
,
121 int bit
, allocated_vmid
;
123 if (dqm
->vmid_bitmap
== 0)
126 bit
= find_first_bit((unsigned long *)&dqm
->vmid_bitmap
, CIK_VMID_NUM
);
127 clear_bit(bit
, (unsigned long *)&dqm
->vmid_bitmap
);
129 /* Kaveri kfd vmid's starts from vmid 8 */
130 allocated_vmid
= bit
+ KFD_VMID_START_OFFSET
;
131 pr_debug("kfd: vmid allocation %d\n", allocated_vmid
);
132 qpd
->vmid
= allocated_vmid
;
133 q
->properties
.vmid
= allocated_vmid
;
135 set_pasid_vmid_mapping(dqm
, q
->process
->pasid
, q
->properties
.vmid
);
136 program_sh_mem_settings(dqm
, qpd
);
141 static void deallocate_vmid(struct device_queue_manager
*dqm
,
142 struct qcm_process_device
*qpd
,
145 int bit
= qpd
->vmid
- KFD_VMID_START_OFFSET
;
147 /* Release the vmid mapping */
148 set_pasid_vmid_mapping(dqm
, 0, qpd
->vmid
);
150 set_bit(bit
, (unsigned long *)&dqm
->vmid_bitmap
);
152 q
->properties
.vmid
= 0;
155 static int create_queue_nocpsch(struct device_queue_manager
*dqm
,
157 struct qcm_process_device
*qpd
,
162 BUG_ON(!dqm
|| !q
|| !qpd
|| !allocated_vmid
);
164 pr_debug("kfd: In func %s\n", __func__
);
167 mutex_lock(&dqm
->lock
);
169 if (dqm
->total_queue_count
>= max_num_of_queues_per_device
) {
170 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
171 dqm
->total_queue_count
);
172 mutex_unlock(&dqm
->lock
);
176 if (list_empty(&qpd
->queues_list
)) {
177 retval
= allocate_vmid(dqm
, qpd
, q
);
179 mutex_unlock(&dqm
->lock
);
183 *allocated_vmid
= qpd
->vmid
;
184 q
->properties
.vmid
= qpd
->vmid
;
186 if (q
->properties
.type
== KFD_QUEUE_TYPE_COMPUTE
)
187 retval
= create_compute_queue_nocpsch(dqm
, q
, qpd
);
188 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
189 retval
= create_sdma_queue_nocpsch(dqm
, q
, qpd
);
192 if (list_empty(&qpd
->queues_list
)) {
193 deallocate_vmid(dqm
, qpd
, q
);
196 mutex_unlock(&dqm
->lock
);
200 list_add(&q
->list
, &qpd
->queues_list
);
201 if (q
->properties
.is_active
)
204 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
205 dqm
->sdma_queue_count
++;
208 * Unconditionally increment this counter, regardless of the queue's
209 * type or whether the queue is active.
211 dqm
->total_queue_count
++;
212 pr_debug("Total of %d queues are accountable so far\n",
213 dqm
->total_queue_count
);
215 mutex_unlock(&dqm
->lock
);
219 static int allocate_hqd(struct device_queue_manager
*dqm
, struct queue
*q
)
226 for (pipe
= dqm
->next_pipe_to_allocate
, i
= 0; i
< get_pipes_per_mec(dqm
);
227 pipe
= ((pipe
+ 1) % get_pipes_per_mec(dqm
)), ++i
) {
229 if (!is_pipe_enabled(dqm
, 0, pipe
))
232 if (dqm
->allocated_queues
[pipe
] != 0) {
233 bit
= find_first_bit(
234 (unsigned long *)&dqm
->allocated_queues
[pipe
],
235 get_queues_per_pipe(dqm
));
238 (unsigned long *)&dqm
->allocated_queues
[pipe
]);
249 pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
250 __func__
, q
->pipe
, q
->queue
);
251 /* horizontal hqd allocation */
252 dqm
->next_pipe_to_allocate
= (pipe
+ 1) % get_pipes_per_mec(dqm
);
257 static inline void deallocate_hqd(struct device_queue_manager
*dqm
,
260 set_bit(q
->queue
, (unsigned long *)&dqm
->allocated_queues
[q
->pipe
]);
263 static int create_compute_queue_nocpsch(struct device_queue_manager
*dqm
,
265 struct qcm_process_device
*qpd
)
268 struct mqd_manager
*mqd
;
270 BUG_ON(!dqm
|| !q
|| !qpd
);
272 mqd
= dqm
->ops
.get_mqd_manager(dqm
, KFD_MQD_TYPE_COMPUTE
);
276 retval
= allocate_hqd(dqm
, q
);
280 retval
= mqd
->init_mqd(mqd
, &q
->mqd
, &q
->mqd_mem_obj
,
281 &q
->gart_mqd_addr
, &q
->properties
);
283 deallocate_hqd(dqm
, q
);
287 pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
291 retval
= mqd
->load_mqd(mqd
, q
->mqd
, q
->pipe
,
292 q
->queue
, (uint32_t __user
*) q
->properties
.write_ptr
);
294 deallocate_hqd(dqm
, q
);
295 mqd
->uninit_mqd(mqd
, q
->mqd
, q
->mqd_mem_obj
);
302 static int destroy_queue_nocpsch(struct device_queue_manager
*dqm
,
303 struct qcm_process_device
*qpd
,
307 struct mqd_manager
*mqd
;
309 BUG_ON(!dqm
|| !q
|| !q
->mqd
|| !qpd
);
313 pr_debug("kfd: In Func %s\n", __func__
);
315 mutex_lock(&dqm
->lock
);
317 if (q
->properties
.type
== KFD_QUEUE_TYPE_COMPUTE
) {
318 mqd
= dqm
->ops
.get_mqd_manager(dqm
, KFD_MQD_TYPE_COMPUTE
);
323 deallocate_hqd(dqm
, q
);
324 } else if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
) {
325 mqd
= dqm
->ops
.get_mqd_manager(dqm
, KFD_MQD_TYPE_SDMA
);
330 dqm
->sdma_queue_count
--;
331 deallocate_sdma_queue(dqm
, q
->sdma_id
);
333 pr_debug("q->properties.type is invalid (%d)\n",
339 retval
= mqd
->destroy_mqd(mqd
, q
->mqd
,
340 KFD_PREEMPT_TYPE_WAVEFRONT_RESET
,
341 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS
,
347 mqd
->uninit_mqd(mqd
, q
->mqd
, q
->mqd_mem_obj
);
350 if (list_empty(&qpd
->queues_list
))
351 deallocate_vmid(dqm
, qpd
, q
);
352 if (q
->properties
.is_active
)
356 * Unconditionally decrement this counter, regardless of the queue's
359 dqm
->total_queue_count
--;
360 pr_debug("Total of %d queues are accountable so far\n",
361 dqm
->total_queue_count
);
364 mutex_unlock(&dqm
->lock
);
368 static int update_queue(struct device_queue_manager
*dqm
, struct queue
*q
)
371 struct mqd_manager
*mqd
;
372 bool prev_active
= false;
374 BUG_ON(!dqm
|| !q
|| !q
->mqd
);
376 mutex_lock(&dqm
->lock
);
377 mqd
= dqm
->ops
.get_mqd_manager(dqm
,
378 get_mqd_type_from_queue_type(q
->properties
.type
));
380 mutex_unlock(&dqm
->lock
);
384 if (q
->properties
.is_active
)
389 * check active state vs. the previous state
390 * and modify counter accordingly
392 retval
= mqd
->update_mqd(mqd
, q
->mqd
, &q
->properties
);
393 if ((q
->properties
.is_active
) && (!prev_active
))
395 else if ((!q
->properties
.is_active
) && (prev_active
))
398 if (sched_policy
!= KFD_SCHED_POLICY_NO_HWS
)
399 retval
= execute_queues_cpsch(dqm
, false);
401 mutex_unlock(&dqm
->lock
);
405 static struct mqd_manager
*get_mqd_manager_nocpsch(
406 struct device_queue_manager
*dqm
, enum KFD_MQD_TYPE type
)
408 struct mqd_manager
*mqd
;
410 BUG_ON(!dqm
|| type
>= KFD_MQD_TYPE_MAX
);
412 pr_debug("kfd: In func %s mqd type %d\n", __func__
, type
);
414 mqd
= dqm
->mqds
[type
];
416 mqd
= mqd_manager_init(type
, dqm
->dev
);
418 pr_err("kfd: mqd manager is NULL");
419 dqm
->mqds
[type
] = mqd
;
425 static int register_process_nocpsch(struct device_queue_manager
*dqm
,
426 struct qcm_process_device
*qpd
)
428 struct device_process_node
*n
;
431 BUG_ON(!dqm
|| !qpd
);
433 pr_debug("kfd: In func %s\n", __func__
);
435 n
= kzalloc(sizeof(struct device_process_node
), GFP_KERNEL
);
441 mutex_lock(&dqm
->lock
);
442 list_add(&n
->list
, &dqm
->queues
);
444 retval
= dqm
->ops_asic_specific
.register_process(dqm
, qpd
);
446 dqm
->processes_count
++;
448 mutex_unlock(&dqm
->lock
);
453 static int unregister_process_nocpsch(struct device_queue_manager
*dqm
,
454 struct qcm_process_device
*qpd
)
457 struct device_process_node
*cur
, *next
;
459 BUG_ON(!dqm
|| !qpd
);
461 pr_debug("In func %s\n", __func__
);
463 pr_debug("qpd->queues_list is %s\n",
464 list_empty(&qpd
->queues_list
) ? "empty" : "not empty");
467 mutex_lock(&dqm
->lock
);
469 list_for_each_entry_safe(cur
, next
, &dqm
->queues
, list
) {
470 if (qpd
== cur
->qpd
) {
471 list_del(&cur
->list
);
473 dqm
->processes_count
--;
477 /* qpd not found in dqm list */
480 mutex_unlock(&dqm
->lock
);
485 set_pasid_vmid_mapping(struct device_queue_manager
*dqm
, unsigned int pasid
,
488 uint32_t pasid_mapping
;
490 pasid_mapping
= (pasid
== 0) ? 0 :
492 ATC_VMID_PASID_MAPPING_VALID
;
494 return dqm
->dev
->kfd2kgd
->set_pasid_vmid_mapping(
495 dqm
->dev
->kgd
, pasid_mapping
,
499 static void init_interrupts(struct device_queue_manager
*dqm
)
505 for (i
= 0 ; i
< get_pipes_per_mec(dqm
) ; i
++)
506 if (is_pipe_enabled(dqm
, 0, i
))
507 dqm
->dev
->kfd2kgd
->init_interrupts(dqm
->dev
->kgd
, i
);
510 static int init_scheduler(struct device_queue_manager
*dqm
)
516 pr_debug("kfd: In %s\n", __func__
);
521 static int initialize_nocpsch(struct device_queue_manager
*dqm
)
527 pr_debug("kfd: In func %s num of pipes: %d\n",
528 __func__
, get_pipes_per_mec(dqm
));
530 mutex_init(&dqm
->lock
);
531 INIT_LIST_HEAD(&dqm
->queues
);
532 dqm
->queue_count
= dqm
->next_pipe_to_allocate
= 0;
533 dqm
->sdma_queue_count
= 0;
534 dqm
->allocated_queues
= kcalloc(get_pipes_per_mec(dqm
),
535 sizeof(unsigned int), GFP_KERNEL
);
536 if (!dqm
->allocated_queues
) {
537 mutex_destroy(&dqm
->lock
);
541 for (i
= 0; i
< get_pipes_per_mec(dqm
); i
++)
542 dqm
->allocated_queues
[i
] = (1 << get_queues_per_pipe(dqm
)) - 1;
544 dqm
->vmid_bitmap
= (1 << VMID_PER_DEVICE
) - 1;
545 dqm
->sdma_bitmap
= (1 << CIK_SDMA_QUEUES
) - 1;
551 static void uninitialize_nocpsch(struct device_queue_manager
*dqm
)
557 BUG_ON(dqm
->queue_count
> 0 || dqm
->processes_count
> 0);
559 kfree(dqm
->allocated_queues
);
560 for (i
= 0 ; i
< KFD_MQD_TYPE_MAX
; i
++)
562 mutex_destroy(&dqm
->lock
);
563 kfd_gtt_sa_free(dqm
->dev
, dqm
->pipeline_mem
);
566 static int start_nocpsch(struct device_queue_manager
*dqm
)
568 init_interrupts(dqm
);
572 static int stop_nocpsch(struct device_queue_manager
*dqm
)
577 static int allocate_sdma_queue(struct device_queue_manager
*dqm
,
578 unsigned int *sdma_queue_id
)
582 if (dqm
->sdma_bitmap
== 0)
585 bit
= find_first_bit((unsigned long *)&dqm
->sdma_bitmap
,
588 clear_bit(bit
, (unsigned long *)&dqm
->sdma_bitmap
);
589 *sdma_queue_id
= bit
;
594 static void deallocate_sdma_queue(struct device_queue_manager
*dqm
,
595 unsigned int sdma_queue_id
)
597 if (sdma_queue_id
>= CIK_SDMA_QUEUES
)
599 set_bit(sdma_queue_id
, (unsigned long *)&dqm
->sdma_bitmap
);
602 static int create_sdma_queue_nocpsch(struct device_queue_manager
*dqm
,
604 struct qcm_process_device
*qpd
)
606 struct mqd_manager
*mqd
;
609 mqd
= dqm
->ops
.get_mqd_manager(dqm
, KFD_MQD_TYPE_SDMA
);
613 retval
= allocate_sdma_queue(dqm
, &q
->sdma_id
);
617 q
->properties
.sdma_queue_id
= q
->sdma_id
% CIK_SDMA_QUEUES_PER_ENGINE
;
618 q
->properties
.sdma_engine_id
= q
->sdma_id
/ CIK_SDMA_ENGINE_NUM
;
620 pr_debug("kfd: sdma id is: %d\n", q
->sdma_id
);
621 pr_debug(" sdma queue id: %d\n", q
->properties
.sdma_queue_id
);
622 pr_debug(" sdma engine id: %d\n", q
->properties
.sdma_engine_id
);
624 dqm
->ops_asic_specific
.init_sdma_vm(dqm
, q
, qpd
);
625 retval
= mqd
->init_mqd(mqd
, &q
->mqd
, &q
->mqd_mem_obj
,
626 &q
->gart_mqd_addr
, &q
->properties
);
628 deallocate_sdma_queue(dqm
, q
->sdma_id
);
632 retval
= mqd
->load_mqd(mqd
, q
->mqd
, 0,
635 deallocate_sdma_queue(dqm
, q
->sdma_id
);
636 mqd
->uninit_mqd(mqd
, q
->mqd
, q
->mqd_mem_obj
);
644 * Device Queue Manager implementation for cp scheduler
647 static int set_sched_resources(struct device_queue_manager
*dqm
)
650 struct scheduling_resources res
;
654 pr_debug("kfd: In func %s\n", __func__
);
656 res
.vmid_mask
= (1 << VMID_PER_DEVICE
) - 1;
657 res
.vmid_mask
<<= KFD_VMID_START_OFFSET
;
660 for (i
= 0; i
< KGD_MAX_QUEUES
; ++i
) {
661 mec
= (i
/ dqm
->dev
->shared_resources
.num_queue_per_pipe
)
662 / dqm
->dev
->shared_resources
.num_pipe_per_mec
;
664 if (!test_bit(i
, dqm
->dev
->shared_resources
.queue_bitmap
))
667 /* only acquire queues from the first MEC */
671 /* This situation may be hit in the future if a new HW
672 * generation exposes more than 64 queues. If so, the
673 * definition of res.queue_mask needs updating */
674 if (WARN_ON(i
> (sizeof(res
.queue_mask
)*8))) {
675 pr_err("Invalid queue enabled by amdgpu: %d\n", i
);
679 res
.queue_mask
|= (1ull << i
);
681 res
.gws_mask
= res
.oac_mask
= res
.gds_heap_base
=
682 res
.gds_heap_size
= 0;
684 pr_debug("kfd: scheduling resources:\n"
685 " vmid mask: 0x%8X\n"
686 " queue mask: 0x%8llX\n",
687 res
.vmid_mask
, res
.queue_mask
);
689 return pm_send_set_resources(&dqm
->packets
, &res
);
692 static int initialize_cpsch(struct device_queue_manager
*dqm
)
698 pr_debug("kfd: In func %s num of pipes: %d\n",
699 __func__
, get_pipes_per_mec(dqm
));
701 mutex_init(&dqm
->lock
);
702 INIT_LIST_HEAD(&dqm
->queues
);
703 dqm
->queue_count
= dqm
->processes_count
= 0;
704 dqm
->sdma_queue_count
= 0;
705 dqm
->active_runlist
= false;
706 retval
= dqm
->ops_asic_specific
.initialize(dqm
);
708 goto fail_init_pipelines
;
713 mutex_destroy(&dqm
->lock
);
717 static int start_cpsch(struct device_queue_manager
*dqm
)
719 struct device_process_node
*node
;
726 retval
= pm_init(&dqm
->packets
, dqm
);
728 goto fail_packet_manager_init
;
730 retval
= set_sched_resources(dqm
);
732 goto fail_set_sched_resources
;
734 pr_debug("kfd: allocating fence memory\n");
736 /* allocate fence memory on the gart */
737 retval
= kfd_gtt_sa_allocate(dqm
->dev
, sizeof(*dqm
->fence_addr
),
741 goto fail_allocate_vidmem
;
743 dqm
->fence_addr
= dqm
->fence_mem
->cpu_ptr
;
744 dqm
->fence_gpu_addr
= dqm
->fence_mem
->gpu_addr
;
746 init_interrupts(dqm
);
748 list_for_each_entry(node
, &dqm
->queues
, list
)
749 if (node
->qpd
->pqm
->process
&& dqm
->dev
)
750 kfd_bind_process_to_device(dqm
->dev
,
751 node
->qpd
->pqm
->process
);
753 execute_queues_cpsch(dqm
, true);
756 fail_allocate_vidmem
:
757 fail_set_sched_resources
:
758 pm_uninit(&dqm
->packets
);
759 fail_packet_manager_init
:
763 static int stop_cpsch(struct device_queue_manager
*dqm
)
765 struct device_process_node
*node
;
766 struct kfd_process_device
*pdd
;
770 destroy_queues_cpsch(dqm
, true, true);
772 list_for_each_entry(node
, &dqm
->queues
, list
) {
773 pdd
= qpd_to_pdd(node
->qpd
);
776 kfd_gtt_sa_free(dqm
->dev
, dqm
->fence_mem
);
777 pm_uninit(&dqm
->packets
);
782 static int create_kernel_queue_cpsch(struct device_queue_manager
*dqm
,
783 struct kernel_queue
*kq
,
784 struct qcm_process_device
*qpd
)
786 BUG_ON(!dqm
|| !kq
|| !qpd
);
788 pr_debug("kfd: In func %s\n", __func__
);
790 mutex_lock(&dqm
->lock
);
791 if (dqm
->total_queue_count
>= max_num_of_queues_per_device
) {
792 pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
793 dqm
->total_queue_count
);
794 mutex_unlock(&dqm
->lock
);
799 * Unconditionally increment this counter, regardless of the queue's
800 * type or whether the queue is active.
802 dqm
->total_queue_count
++;
803 pr_debug("Total of %d queues are accountable so far\n",
804 dqm
->total_queue_count
);
806 list_add(&kq
->list
, &qpd
->priv_queue_list
);
808 qpd
->is_debug
= true;
809 execute_queues_cpsch(dqm
, false);
810 mutex_unlock(&dqm
->lock
);
815 static void destroy_kernel_queue_cpsch(struct device_queue_manager
*dqm
,
816 struct kernel_queue
*kq
,
817 struct qcm_process_device
*qpd
)
821 pr_debug("kfd: In %s\n", __func__
);
823 mutex_lock(&dqm
->lock
);
824 /* here we actually preempt the DIQ */
825 destroy_queues_cpsch(dqm
, true, false);
828 qpd
->is_debug
= false;
829 execute_queues_cpsch(dqm
, false);
831 * Unconditionally decrement this counter, regardless of the queue's
834 dqm
->total_queue_count
--;
835 pr_debug("Total of %d queues are accountable so far\n",
836 dqm
->total_queue_count
);
837 mutex_unlock(&dqm
->lock
);
840 static void select_sdma_engine_id(struct queue
*q
)
844 q
->sdma_id
= sdma_id
;
845 sdma_id
= (sdma_id
+ 1) % 2;
848 static int create_queue_cpsch(struct device_queue_manager
*dqm
, struct queue
*q
,
849 struct qcm_process_device
*qpd
, int *allocate_vmid
)
852 struct mqd_manager
*mqd
;
854 BUG_ON(!dqm
|| !q
|| !qpd
);
861 mutex_lock(&dqm
->lock
);
863 if (dqm
->total_queue_count
>= max_num_of_queues_per_device
) {
864 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
865 dqm
->total_queue_count
);
870 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
871 select_sdma_engine_id(q
);
873 mqd
= dqm
->ops
.get_mqd_manager(dqm
,
874 get_mqd_type_from_queue_type(q
->properties
.type
));
877 mutex_unlock(&dqm
->lock
);
881 dqm
->ops_asic_specific
.init_sdma_vm(dqm
, q
, qpd
);
882 retval
= mqd
->init_mqd(mqd
, &q
->mqd
, &q
->mqd_mem_obj
,
883 &q
->gart_mqd_addr
, &q
->properties
);
887 list_add(&q
->list
, &qpd
->queues_list
);
888 if (q
->properties
.is_active
) {
890 retval
= execute_queues_cpsch(dqm
, false);
893 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
894 dqm
->sdma_queue_count
++;
896 * Unconditionally increment this counter, regardless of the queue's
897 * type or whether the queue is active.
899 dqm
->total_queue_count
++;
901 pr_debug("Total of %d queues are accountable so far\n",
902 dqm
->total_queue_count
);
905 mutex_unlock(&dqm
->lock
);
909 int amdkfd_fence_wait_timeout(unsigned int *fence_addr
,
910 unsigned int fence_value
,
911 unsigned long timeout
)
916 while (*fence_addr
!= fence_value
) {
917 if (time_after(jiffies
, timeout
)) {
918 pr_err("kfd: qcm fence wait loop timeout expired\n");
927 static int destroy_sdma_queues(struct device_queue_manager
*dqm
,
928 unsigned int sdma_engine
)
930 return pm_send_unmap_queue(&dqm
->packets
, KFD_QUEUE_TYPE_SDMA
,
931 KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES
, 0, false,
935 static int destroy_queues_cpsch(struct device_queue_manager
*dqm
,
936 bool preempt_static_queues
, bool lock
)
939 enum kfd_preempt_type_filter preempt_type
;
940 struct kfd_process_device
*pdd
;
947 mutex_lock(&dqm
->lock
);
948 if (!dqm
->active_runlist
)
951 pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
952 dqm
->sdma_queue_count
);
954 if (dqm
->sdma_queue_count
> 0) {
955 destroy_sdma_queues(dqm
, 0);
956 destroy_sdma_queues(dqm
, 1);
959 preempt_type
= preempt_static_queues
?
960 KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES
:
961 KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES
;
963 retval
= pm_send_unmap_queue(&dqm
->packets
, KFD_QUEUE_TYPE_COMPUTE
,
964 preempt_type
, 0, false, 0);
968 *dqm
->fence_addr
= KFD_FENCE_INIT
;
969 pm_send_query_status(&dqm
->packets
, dqm
->fence_gpu_addr
,
970 KFD_FENCE_COMPLETED
);
971 /* should be timed out */
972 retval
= amdkfd_fence_wait_timeout(dqm
->fence_addr
, KFD_FENCE_COMPLETED
,
973 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS
);
975 pdd
= kfd_get_process_device_data(dqm
->dev
,
976 kfd_get_process(current
));
977 pdd
->reset_wavefronts
= true;
980 pm_release_ib(&dqm
->packets
);
981 dqm
->active_runlist
= false;
985 mutex_unlock(&dqm
->lock
);
989 static int execute_queues_cpsch(struct device_queue_manager
*dqm
, bool lock
)
996 mutex_lock(&dqm
->lock
);
998 retval
= destroy_queues_cpsch(dqm
, false, false);
1000 pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
1004 if (dqm
->queue_count
<= 0 || dqm
->processes_count
<= 0) {
1009 if (dqm
->active_runlist
) {
1014 retval
= pm_send_runlist(&dqm
->packets
, &dqm
->queues
);
1016 pr_err("kfd: failed to execute runlist");
1019 dqm
->active_runlist
= true;
1023 mutex_unlock(&dqm
->lock
);
1027 static int destroy_queue_cpsch(struct device_queue_manager
*dqm
,
1028 struct qcm_process_device
*qpd
,
1032 struct mqd_manager
*mqd
;
1033 bool preempt_all_queues
;
1035 BUG_ON(!dqm
|| !qpd
|| !q
);
1037 preempt_all_queues
= false;
1041 /* remove queue from list to prevent rescheduling after preemption */
1042 mutex_lock(&dqm
->lock
);
1044 if (qpd
->is_debug
) {
1046 * error, currently we do not allow to destroy a queue
1047 * of a currently debugged process
1050 goto failed_try_destroy_debugged_queue
;
1054 mqd
= dqm
->ops
.get_mqd_manager(dqm
,
1055 get_mqd_type_from_queue_type(q
->properties
.type
));
1061 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
1062 dqm
->sdma_queue_count
--;
1065 if (q
->properties
.is_active
)
1068 execute_queues_cpsch(dqm
, false);
1070 mqd
->uninit_mqd(mqd
, q
->mqd
, q
->mqd_mem_obj
);
1073 * Unconditionally decrement this counter, regardless of the queue's
1076 dqm
->total_queue_count
--;
1077 pr_debug("Total of %d queues are accountable so far\n",
1078 dqm
->total_queue_count
);
1080 mutex_unlock(&dqm
->lock
);
1085 failed_try_destroy_debugged_queue
:
1087 mutex_unlock(&dqm
->lock
);
1092 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1093 * stay in user mode.
1095 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1096 /* APE1 limit is inclusive and 64K aligned. */
1097 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1099 static bool set_cache_memory_policy(struct device_queue_manager
*dqm
,
1100 struct qcm_process_device
*qpd
,
1101 enum cache_policy default_policy
,
1102 enum cache_policy alternate_policy
,
1103 void __user
*alternate_aperture_base
,
1104 uint64_t alternate_aperture_size
)
1108 pr_debug("kfd: In func %s\n", __func__
);
1110 mutex_lock(&dqm
->lock
);
1112 if (alternate_aperture_size
== 0) {
1113 /* base > limit disables APE1 */
1114 qpd
->sh_mem_ape1_base
= 1;
1115 qpd
->sh_mem_ape1_limit
= 0;
1118 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1119 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1120 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1121 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1122 * Verify that the base and size parameters can be
1123 * represented in this format and convert them.
1124 * Additionally restrict APE1 to user-mode addresses.
1127 uint64_t base
= (uintptr_t)alternate_aperture_base
;
1128 uint64_t limit
= base
+ alternate_aperture_size
- 1;
1133 if ((base
& APE1_FIXED_BITS_MASK
) != 0)
1136 if ((limit
& APE1_FIXED_BITS_MASK
) != APE1_LIMIT_ALIGNMENT
)
1139 qpd
->sh_mem_ape1_base
= base
>> 16;
1140 qpd
->sh_mem_ape1_limit
= limit
>> 16;
1143 retval
= dqm
->ops_asic_specific
.set_cache_memory_policy(
1148 alternate_aperture_base
,
1149 alternate_aperture_size
);
1151 if ((sched_policy
== KFD_SCHED_POLICY_NO_HWS
) && (qpd
->vmid
!= 0))
1152 program_sh_mem_settings(dqm
, qpd
);
1154 pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1155 qpd
->sh_mem_config
, qpd
->sh_mem_ape1_base
,
1156 qpd
->sh_mem_ape1_limit
);
1158 mutex_unlock(&dqm
->lock
);
1162 mutex_unlock(&dqm
->lock
);
1166 struct device_queue_manager
*device_queue_manager_init(struct kfd_dev
*dev
)
1168 struct device_queue_manager
*dqm
;
1172 pr_debug("kfd: loading device queue manager\n");
1174 dqm
= kzalloc(sizeof(struct device_queue_manager
), GFP_KERNEL
);
1179 switch (sched_policy
) {
1180 case KFD_SCHED_POLICY_HWS
:
1181 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION
:
1182 /* initialize dqm for cp scheduling */
1183 dqm
->ops
.create_queue
= create_queue_cpsch
;
1184 dqm
->ops
.initialize
= initialize_cpsch
;
1185 dqm
->ops
.start
= start_cpsch
;
1186 dqm
->ops
.stop
= stop_cpsch
;
1187 dqm
->ops
.destroy_queue
= destroy_queue_cpsch
;
1188 dqm
->ops
.update_queue
= update_queue
;
1189 dqm
->ops
.get_mqd_manager
= get_mqd_manager_nocpsch
;
1190 dqm
->ops
.register_process
= register_process_nocpsch
;
1191 dqm
->ops
.unregister_process
= unregister_process_nocpsch
;
1192 dqm
->ops
.uninitialize
= uninitialize_nocpsch
;
1193 dqm
->ops
.create_kernel_queue
= create_kernel_queue_cpsch
;
1194 dqm
->ops
.destroy_kernel_queue
= destroy_kernel_queue_cpsch
;
1195 dqm
->ops
.set_cache_memory_policy
= set_cache_memory_policy
;
1197 case KFD_SCHED_POLICY_NO_HWS
:
1198 /* initialize dqm for no cp scheduling */
1199 dqm
->ops
.start
= start_nocpsch
;
1200 dqm
->ops
.stop
= stop_nocpsch
;
1201 dqm
->ops
.create_queue
= create_queue_nocpsch
;
1202 dqm
->ops
.destroy_queue
= destroy_queue_nocpsch
;
1203 dqm
->ops
.update_queue
= update_queue
;
1204 dqm
->ops
.get_mqd_manager
= get_mqd_manager_nocpsch
;
1205 dqm
->ops
.register_process
= register_process_nocpsch
;
1206 dqm
->ops
.unregister_process
= unregister_process_nocpsch
;
1207 dqm
->ops
.initialize
= initialize_nocpsch
;
1208 dqm
->ops
.uninitialize
= uninitialize_nocpsch
;
1209 dqm
->ops
.set_cache_memory_policy
= set_cache_memory_policy
;
1216 switch (dev
->device_info
->asic_family
) {
1218 device_queue_manager_init_vi(&dqm
->ops_asic_specific
);
1222 device_queue_manager_init_cik(&dqm
->ops_asic_specific
);
1226 if (dqm
->ops
.initialize(dqm
) != 0) {
1234 void device_queue_manager_uninit(struct device_queue_manager
*dqm
)
1238 dqm
->ops
.uninitialize(dqm
);