2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/slab.h>
25 #include <linux/list.h>
26 #include <linux/types.h>
27 #include <linux/printk.h>
28 #include <linux/bitops.h>
29 #include <linux/sched.h>
31 #include "kfd_device_queue_manager.h"
32 #include "kfd_mqd_manager.h"
34 #include "kfd_kernel_queue.h"
36 /* Size of the per-pipe EOP queue */
37 #define CIK_HPD_EOP_BYTES_LOG2 11
38 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
40 static int set_pasid_vmid_mapping(struct device_queue_manager
*dqm
,
41 unsigned int pasid
, unsigned int vmid
);
43 static int create_compute_queue_nocpsch(struct device_queue_manager
*dqm
,
45 struct qcm_process_device
*qpd
);
47 static int execute_queues_cpsch(struct device_queue_manager
*dqm
, bool lock
);
48 static int destroy_queues_cpsch(struct device_queue_manager
*dqm
, bool lock
);
50 static int create_sdma_queue_nocpsch(struct device_queue_manager
*dqm
,
52 struct qcm_process_device
*qpd
);
54 static void deallocate_sdma_queue(struct device_queue_manager
*dqm
,
55 unsigned int sdma_queue_id
);
58 enum KFD_MQD_TYPE
get_mqd_type_from_queue_type(enum kfd_queue_type type
)
60 if (type
== KFD_QUEUE_TYPE_SDMA
)
61 return KFD_MQD_TYPE_SDMA
;
62 return KFD_MQD_TYPE_CP
;
65 unsigned int get_first_pipe(struct device_queue_manager
*dqm
)
67 BUG_ON(!dqm
|| !dqm
->dev
);
68 return dqm
->dev
->shared_resources
.first_compute_pipe
;
71 unsigned int get_pipes_num(struct device_queue_manager
*dqm
)
73 BUG_ON(!dqm
|| !dqm
->dev
);
74 return dqm
->dev
->shared_resources
.compute_pipe_count
;
77 static inline unsigned int get_pipes_num_cpsch(void)
79 return PIPE_PER_ME_CP_SCHEDULING
;
82 void program_sh_mem_settings(struct device_queue_manager
*dqm
,
83 struct qcm_process_device
*qpd
)
85 return dqm
->dev
->kfd2kgd
->program_sh_mem_settings(
86 dqm
->dev
->kgd
, qpd
->vmid
,
88 qpd
->sh_mem_ape1_base
,
89 qpd
->sh_mem_ape1_limit
,
93 static int allocate_vmid(struct device_queue_manager
*dqm
,
94 struct qcm_process_device
*qpd
,
97 int bit
, allocated_vmid
;
99 if (dqm
->vmid_bitmap
== 0)
102 bit
= find_first_bit((unsigned long *)&dqm
->vmid_bitmap
, CIK_VMID_NUM
);
103 clear_bit(bit
, (unsigned long *)&dqm
->vmid_bitmap
);
105 /* Kaveri kfd vmid's starts from vmid 8 */
106 allocated_vmid
= bit
+ KFD_VMID_START_OFFSET
;
107 pr_debug("kfd: vmid allocation %d\n", allocated_vmid
);
108 qpd
->vmid
= allocated_vmid
;
109 q
->properties
.vmid
= allocated_vmid
;
111 set_pasid_vmid_mapping(dqm
, q
->process
->pasid
, q
->properties
.vmid
);
112 program_sh_mem_settings(dqm
, qpd
);
117 static void deallocate_vmid(struct device_queue_manager
*dqm
,
118 struct qcm_process_device
*qpd
,
121 int bit
= qpd
->vmid
- KFD_VMID_START_OFFSET
;
123 /* Release the vmid mapping */
124 set_pasid_vmid_mapping(dqm
, 0, qpd
->vmid
);
126 set_bit(bit
, (unsigned long *)&dqm
->vmid_bitmap
);
128 q
->properties
.vmid
= 0;
131 static int create_queue_nocpsch(struct device_queue_manager
*dqm
,
133 struct qcm_process_device
*qpd
,
138 BUG_ON(!dqm
|| !q
|| !qpd
|| !allocated_vmid
);
140 pr_debug("kfd: In func %s\n", __func__
);
143 mutex_lock(&dqm
->lock
);
145 if (dqm
->total_queue_count
>= max_num_of_queues_per_device
) {
146 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
147 dqm
->total_queue_count
);
148 mutex_unlock(&dqm
->lock
);
152 if (list_empty(&qpd
->queues_list
)) {
153 retval
= allocate_vmid(dqm
, qpd
, q
);
155 mutex_unlock(&dqm
->lock
);
159 *allocated_vmid
= qpd
->vmid
;
160 q
->properties
.vmid
= qpd
->vmid
;
162 if (q
->properties
.type
== KFD_QUEUE_TYPE_COMPUTE
)
163 retval
= create_compute_queue_nocpsch(dqm
, q
, qpd
);
164 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
165 retval
= create_sdma_queue_nocpsch(dqm
, q
, qpd
);
168 if (list_empty(&qpd
->queues_list
)) {
169 deallocate_vmid(dqm
, qpd
, q
);
172 mutex_unlock(&dqm
->lock
);
176 list_add(&q
->list
, &qpd
->queues_list
);
177 if (q
->properties
.is_active
)
180 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
181 dqm
->sdma_queue_count
++;
184 * Unconditionally increment this counter, regardless of the queue's
185 * type or whether the queue is active.
187 dqm
->total_queue_count
++;
188 pr_debug("Total of %d queues are accountable so far\n",
189 dqm
->total_queue_count
);
191 mutex_unlock(&dqm
->lock
);
195 static int allocate_hqd(struct device_queue_manager
*dqm
, struct queue
*q
)
202 for (pipe
= dqm
->next_pipe_to_allocate
, i
= 0; i
< get_pipes_num(dqm
);
203 pipe
= ((pipe
+ 1) % get_pipes_num(dqm
)), ++i
) {
204 if (dqm
->allocated_queues
[pipe
] != 0) {
205 bit
= find_first_bit(
206 (unsigned long *)&dqm
->allocated_queues
[pipe
],
210 (unsigned long *)&dqm
->allocated_queues
[pipe
]);
221 pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
222 __func__
, q
->pipe
, q
->queue
);
223 /* horizontal hqd allocation */
224 dqm
->next_pipe_to_allocate
= (pipe
+ 1) % get_pipes_num(dqm
);
229 static inline void deallocate_hqd(struct device_queue_manager
*dqm
,
232 set_bit(q
->queue
, (unsigned long *)&dqm
->allocated_queues
[q
->pipe
]);
235 static int create_compute_queue_nocpsch(struct device_queue_manager
*dqm
,
237 struct qcm_process_device
*qpd
)
240 struct mqd_manager
*mqd
;
242 BUG_ON(!dqm
|| !q
|| !qpd
);
244 mqd
= dqm
->ops
.get_mqd_manager(dqm
, KFD_MQD_TYPE_COMPUTE
);
248 retval
= allocate_hqd(dqm
, q
);
252 retval
= mqd
->init_mqd(mqd
, &q
->mqd
, &q
->mqd_mem_obj
,
253 &q
->gart_mqd_addr
, &q
->properties
);
255 deallocate_hqd(dqm
, q
);
259 pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
263 retval
= mqd
->load_mqd(mqd
, q
->mqd
, q
->pipe
,
264 q
->queue
, (uint32_t __user
*) q
->properties
.write_ptr
);
266 deallocate_hqd(dqm
, q
);
267 mqd
->uninit_mqd(mqd
, q
->mqd
, q
->mqd_mem_obj
);
274 static int destroy_queue_nocpsch(struct device_queue_manager
*dqm
,
275 struct qcm_process_device
*qpd
,
279 struct mqd_manager
*mqd
;
281 BUG_ON(!dqm
|| !q
|| !q
->mqd
|| !qpd
);
285 pr_debug("kfd: In Func %s\n", __func__
);
287 mutex_lock(&dqm
->lock
);
289 if (q
->properties
.type
== KFD_QUEUE_TYPE_COMPUTE
) {
290 mqd
= dqm
->ops
.get_mqd_manager(dqm
, KFD_MQD_TYPE_COMPUTE
);
295 deallocate_hqd(dqm
, q
);
296 } else if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
) {
297 mqd
= dqm
->ops
.get_mqd_manager(dqm
, KFD_MQD_TYPE_SDMA
);
302 dqm
->sdma_queue_count
--;
303 deallocate_sdma_queue(dqm
, q
->sdma_id
);
305 pr_debug("q->properties.type is invalid (%d)\n",
311 retval
= mqd
->destroy_mqd(mqd
, q
->mqd
,
312 KFD_PREEMPT_TYPE_WAVEFRONT_RESET
,
313 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS
,
319 mqd
->uninit_mqd(mqd
, q
->mqd
, q
->mqd_mem_obj
);
322 if (list_empty(&qpd
->queues_list
))
323 deallocate_vmid(dqm
, qpd
, q
);
324 if (q
->properties
.is_active
)
328 * Unconditionally decrement this counter, regardless of the queue's
331 dqm
->total_queue_count
--;
332 pr_debug("Total of %d queues are accountable so far\n",
333 dqm
->total_queue_count
);
336 mutex_unlock(&dqm
->lock
);
340 static int update_queue(struct device_queue_manager
*dqm
, struct queue
*q
)
343 struct mqd_manager
*mqd
;
344 bool prev_active
= false;
346 BUG_ON(!dqm
|| !q
|| !q
->mqd
);
348 mutex_lock(&dqm
->lock
);
349 mqd
= dqm
->ops
.get_mqd_manager(dqm
,
350 get_mqd_type_from_queue_type(q
->properties
.type
));
352 mutex_unlock(&dqm
->lock
);
356 if (q
->properties
.is_active
== true)
361 * check active state vs. the previous state
362 * and modify counter accordingly
364 retval
= mqd
->update_mqd(mqd
, q
->mqd
, &q
->properties
);
365 if ((q
->properties
.is_active
== true) && (prev_active
== false))
367 else if ((q
->properties
.is_active
== false) && (prev_active
== true))
370 if (sched_policy
!= KFD_SCHED_POLICY_NO_HWS
)
371 retval
= execute_queues_cpsch(dqm
, false);
373 mutex_unlock(&dqm
->lock
);
377 static struct mqd_manager
*get_mqd_manager_nocpsch(
378 struct device_queue_manager
*dqm
, enum KFD_MQD_TYPE type
)
380 struct mqd_manager
*mqd
;
382 BUG_ON(!dqm
|| type
>= KFD_MQD_TYPE_MAX
);
384 pr_debug("kfd: In func %s mqd type %d\n", __func__
, type
);
386 mqd
= dqm
->mqds
[type
];
388 mqd
= mqd_manager_init(type
, dqm
->dev
);
390 pr_err("kfd: mqd manager is NULL");
391 dqm
->mqds
[type
] = mqd
;
397 static int register_process_nocpsch(struct device_queue_manager
*dqm
,
398 struct qcm_process_device
*qpd
)
400 struct device_process_node
*n
;
403 BUG_ON(!dqm
|| !qpd
);
405 pr_debug("kfd: In func %s\n", __func__
);
407 n
= kzalloc(sizeof(struct device_process_node
), GFP_KERNEL
);
413 mutex_lock(&dqm
->lock
);
414 list_add(&n
->list
, &dqm
->queues
);
416 retval
= dqm
->ops_asic_specific
.register_process(dqm
, qpd
);
418 dqm
->processes_count
++;
420 mutex_unlock(&dqm
->lock
);
425 static int unregister_process_nocpsch(struct device_queue_manager
*dqm
,
426 struct qcm_process_device
*qpd
)
429 struct device_process_node
*cur
, *next
;
431 BUG_ON(!dqm
|| !qpd
);
433 BUG_ON(!list_empty(&qpd
->queues_list
));
435 pr_debug("kfd: In func %s\n", __func__
);
438 mutex_lock(&dqm
->lock
);
440 list_for_each_entry_safe(cur
, next
, &dqm
->queues
, list
) {
441 if (qpd
== cur
->qpd
) {
442 list_del(&cur
->list
);
444 dqm
->processes_count
--;
448 /* qpd not found in dqm list */
451 mutex_unlock(&dqm
->lock
);
456 set_pasid_vmid_mapping(struct device_queue_manager
*dqm
, unsigned int pasid
,
459 uint32_t pasid_mapping
;
461 pasid_mapping
= (pasid
== 0) ? 0 :
463 ATC_VMID_PASID_MAPPING_VALID
;
465 return dqm
->dev
->kfd2kgd
->set_pasid_vmid_mapping(
466 dqm
->dev
->kgd
, pasid_mapping
,
470 int init_pipelines(struct device_queue_manager
*dqm
,
471 unsigned int pipes_num
, unsigned int first_pipe
)
474 struct mqd_manager
*mqd
;
475 unsigned int i
, err
, inx
;
476 uint64_t pipe_hpd_addr
;
478 BUG_ON(!dqm
|| !dqm
->dev
);
480 pr_debug("kfd: In func %s\n", __func__
);
483 * Allocate memory for the HPDs. This is hardware-owned per-pipe data.
484 * The driver never accesses this memory after zeroing it.
485 * It doesn't even have to be saved/restored on suspend/resume
486 * because it contains no data when there are no active queues.
489 err
= kfd_gtt_sa_allocate(dqm
->dev
, CIK_HPD_EOP_BYTES
* pipes_num
,
493 pr_err("kfd: error allocate vidmem num pipes: %d\n",
498 hpdptr
= dqm
->pipeline_mem
->cpu_ptr
;
499 dqm
->pipelines_addr
= dqm
->pipeline_mem
->gpu_addr
;
501 memset(hpdptr
, 0, CIK_HPD_EOP_BYTES
* pipes_num
);
503 mqd
= dqm
->ops
.get_mqd_manager(dqm
, KFD_MQD_TYPE_COMPUTE
);
505 kfd_gtt_sa_free(dqm
->dev
, dqm
->pipeline_mem
);
509 for (i
= 0; i
< pipes_num
; i
++) {
510 inx
= i
+ first_pipe
;
512 * HPD buffer on GTT is allocated by amdkfd, no need to waste
513 * space in GTT for pipelines we don't initialize
515 pipe_hpd_addr
= dqm
->pipelines_addr
+ i
* CIK_HPD_EOP_BYTES
;
516 pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr
);
517 /* = log2(bytes/4)-1 */
518 dqm
->dev
->kfd2kgd
->init_pipeline(dqm
->dev
->kgd
, inx
,
519 CIK_HPD_EOP_BYTES_LOG2
- 3, pipe_hpd_addr
);
525 static int init_scheduler(struct device_queue_manager
*dqm
)
531 pr_debug("kfd: In %s\n", __func__
);
533 retval
= init_pipelines(dqm
, get_pipes_num(dqm
), get_first_pipe(dqm
));
537 static int initialize_nocpsch(struct device_queue_manager
*dqm
)
543 pr_debug("kfd: In func %s num of pipes: %d\n",
544 __func__
, get_pipes_num(dqm
));
546 mutex_init(&dqm
->lock
);
547 INIT_LIST_HEAD(&dqm
->queues
);
548 dqm
->queue_count
= dqm
->next_pipe_to_allocate
= 0;
549 dqm
->sdma_queue_count
= 0;
550 dqm
->allocated_queues
= kcalloc(get_pipes_num(dqm
),
551 sizeof(unsigned int), GFP_KERNEL
);
552 if (!dqm
->allocated_queues
) {
553 mutex_destroy(&dqm
->lock
);
557 for (i
= 0; i
< get_pipes_num(dqm
); i
++)
558 dqm
->allocated_queues
[i
] = (1 << QUEUES_PER_PIPE
) - 1;
560 dqm
->vmid_bitmap
= (1 << VMID_PER_DEVICE
) - 1;
561 dqm
->sdma_bitmap
= (1 << CIK_SDMA_QUEUES
) - 1;
567 static void uninitialize_nocpsch(struct device_queue_manager
*dqm
)
573 BUG_ON(dqm
->queue_count
> 0 || dqm
->processes_count
> 0);
575 kfree(dqm
->allocated_queues
);
576 for (i
= 0 ; i
< KFD_MQD_TYPE_MAX
; i
++)
578 mutex_destroy(&dqm
->lock
);
579 kfd_gtt_sa_free(dqm
->dev
, dqm
->pipeline_mem
);
582 static int start_nocpsch(struct device_queue_manager
*dqm
)
587 static int stop_nocpsch(struct device_queue_manager
*dqm
)
592 static int allocate_sdma_queue(struct device_queue_manager
*dqm
,
593 unsigned int *sdma_queue_id
)
597 if (dqm
->sdma_bitmap
== 0)
600 bit
= find_first_bit((unsigned long *)&dqm
->sdma_bitmap
,
603 clear_bit(bit
, (unsigned long *)&dqm
->sdma_bitmap
);
604 *sdma_queue_id
= bit
;
609 static void deallocate_sdma_queue(struct device_queue_manager
*dqm
,
610 unsigned int sdma_queue_id
)
612 if (sdma_queue_id
>= CIK_SDMA_QUEUES
)
614 set_bit(sdma_queue_id
, (unsigned long *)&dqm
->sdma_bitmap
);
617 static void init_sdma_vm(struct device_queue_manager
*dqm
, struct queue
*q
,
618 struct qcm_process_device
*qpd
)
620 uint32_t value
= SDMA_ATC
;
622 if (q
->process
->is_32bit_user_mode
)
623 value
|= SDMA_VA_PTR32
| get_sh_mem_bases_32(qpd_to_pdd(qpd
));
625 value
|= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64(
627 q
->properties
.sdma_vm_addr
= value
;
630 static int create_sdma_queue_nocpsch(struct device_queue_manager
*dqm
,
632 struct qcm_process_device
*qpd
)
634 struct mqd_manager
*mqd
;
637 mqd
= dqm
->ops
.get_mqd_manager(dqm
, KFD_MQD_TYPE_SDMA
);
641 retval
= allocate_sdma_queue(dqm
, &q
->sdma_id
);
645 q
->properties
.sdma_queue_id
= q
->sdma_id
% CIK_SDMA_QUEUES_PER_ENGINE
;
646 q
->properties
.sdma_engine_id
= q
->sdma_id
/ CIK_SDMA_ENGINE_NUM
;
648 pr_debug("kfd: sdma id is: %d\n", q
->sdma_id
);
649 pr_debug(" sdma queue id: %d\n", q
->properties
.sdma_queue_id
);
650 pr_debug(" sdma engine id: %d\n", q
->properties
.sdma_engine_id
);
652 init_sdma_vm(dqm
, q
, qpd
);
653 retval
= mqd
->init_mqd(mqd
, &q
->mqd
, &q
->mqd_mem_obj
,
654 &q
->gart_mqd_addr
, &q
->properties
);
656 deallocate_sdma_queue(dqm
, q
->sdma_id
);
660 retval
= mqd
->load_mqd(mqd
, q
->mqd
, 0,
663 deallocate_sdma_queue(dqm
, q
->sdma_id
);
664 mqd
->uninit_mqd(mqd
, q
->mqd
, q
->mqd_mem_obj
);
672 * Device Queue Manager implementation for cp scheduler
675 static int set_sched_resources(struct device_queue_manager
*dqm
)
677 struct scheduling_resources res
;
678 unsigned int queue_num
, queue_mask
;
682 pr_debug("kfd: In func %s\n", __func__
);
684 queue_num
= get_pipes_num_cpsch() * QUEUES_PER_PIPE
;
685 queue_mask
= (1 << queue_num
) - 1;
686 res
.vmid_mask
= (1 << VMID_PER_DEVICE
) - 1;
687 res
.vmid_mask
<<= KFD_VMID_START_OFFSET
;
688 res
.queue_mask
= queue_mask
<< (get_first_pipe(dqm
) * QUEUES_PER_PIPE
);
689 res
.gws_mask
= res
.oac_mask
= res
.gds_heap_base
=
690 res
.gds_heap_size
= 0;
692 pr_debug("kfd: scheduling resources:\n"
693 " vmid mask: 0x%8X\n"
694 " queue mask: 0x%8llX\n",
695 res
.vmid_mask
, res
.queue_mask
);
697 return pm_send_set_resources(&dqm
->packets
, &res
);
700 static int initialize_cpsch(struct device_queue_manager
*dqm
)
706 pr_debug("kfd: In func %s num of pipes: %d\n",
707 __func__
, get_pipes_num_cpsch());
709 mutex_init(&dqm
->lock
);
710 INIT_LIST_HEAD(&dqm
->queues
);
711 dqm
->queue_count
= dqm
->processes_count
= 0;
712 dqm
->sdma_queue_count
= 0;
713 dqm
->active_runlist
= false;
714 retval
= dqm
->ops_asic_specific
.initialize(dqm
);
716 goto fail_init_pipelines
;
721 mutex_destroy(&dqm
->lock
);
725 static int start_cpsch(struct device_queue_manager
*dqm
)
727 struct device_process_node
*node
;
734 retval
= pm_init(&dqm
->packets
, dqm
);
736 goto fail_packet_manager_init
;
738 retval
= set_sched_resources(dqm
);
740 goto fail_set_sched_resources
;
742 pr_debug("kfd: allocating fence memory\n");
744 /* allocate fence memory on the gart */
745 retval
= kfd_gtt_sa_allocate(dqm
->dev
, sizeof(*dqm
->fence_addr
),
749 goto fail_allocate_vidmem
;
751 dqm
->fence_addr
= dqm
->fence_mem
->cpu_ptr
;
752 dqm
->fence_gpu_addr
= dqm
->fence_mem
->gpu_addr
;
753 list_for_each_entry(node
, &dqm
->queues
, list
)
754 if (node
->qpd
->pqm
->process
&& dqm
->dev
)
755 kfd_bind_process_to_device(dqm
->dev
,
756 node
->qpd
->pqm
->process
);
758 execute_queues_cpsch(dqm
, true);
761 fail_allocate_vidmem
:
762 fail_set_sched_resources
:
763 pm_uninit(&dqm
->packets
);
764 fail_packet_manager_init
:
768 static int stop_cpsch(struct device_queue_manager
*dqm
)
770 struct device_process_node
*node
;
771 struct kfd_process_device
*pdd
;
775 destroy_queues_cpsch(dqm
, true);
777 list_for_each_entry(node
, &dqm
->queues
, list
) {
778 pdd
= qpd_to_pdd(node
->qpd
);
781 kfd_gtt_sa_free(dqm
->dev
, dqm
->fence_mem
);
782 pm_uninit(&dqm
->packets
);
787 static int create_kernel_queue_cpsch(struct device_queue_manager
*dqm
,
788 struct kernel_queue
*kq
,
789 struct qcm_process_device
*qpd
)
791 BUG_ON(!dqm
|| !kq
|| !qpd
);
793 pr_debug("kfd: In func %s\n", __func__
);
795 mutex_lock(&dqm
->lock
);
796 if (dqm
->total_queue_count
>= max_num_of_queues_per_device
) {
797 pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
798 dqm
->total_queue_count
);
799 mutex_unlock(&dqm
->lock
);
804 * Unconditionally increment this counter, regardless of the queue's
805 * type or whether the queue is active.
807 dqm
->total_queue_count
++;
808 pr_debug("Total of %d queues are accountable so far\n",
809 dqm
->total_queue_count
);
811 list_add(&kq
->list
, &qpd
->priv_queue_list
);
813 qpd
->is_debug
= true;
814 execute_queues_cpsch(dqm
, false);
815 mutex_unlock(&dqm
->lock
);
820 static void destroy_kernel_queue_cpsch(struct device_queue_manager
*dqm
,
821 struct kernel_queue
*kq
,
822 struct qcm_process_device
*qpd
)
826 pr_debug("kfd: In %s\n", __func__
);
828 mutex_lock(&dqm
->lock
);
829 destroy_queues_cpsch(dqm
, false);
832 qpd
->is_debug
= false;
833 execute_queues_cpsch(dqm
, false);
835 * Unconditionally decrement this counter, regardless of the queue's
838 dqm
->total_queue_count
--;
839 pr_debug("Total of %d queues are accountable so far\n",
840 dqm
->total_queue_count
);
841 mutex_unlock(&dqm
->lock
);
844 static void select_sdma_engine_id(struct queue
*q
)
848 q
->sdma_id
= sdma_id
;
849 sdma_id
= (sdma_id
+ 1) % 2;
852 static int create_queue_cpsch(struct device_queue_manager
*dqm
, struct queue
*q
,
853 struct qcm_process_device
*qpd
, int *allocate_vmid
)
856 struct mqd_manager
*mqd
;
858 BUG_ON(!dqm
|| !q
|| !qpd
);
865 mutex_lock(&dqm
->lock
);
867 if (dqm
->total_queue_count
>= max_num_of_queues_per_device
) {
868 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
869 dqm
->total_queue_count
);
874 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
875 select_sdma_engine_id(q
);
877 mqd
= dqm
->ops
.get_mqd_manager(dqm
,
878 get_mqd_type_from_queue_type(q
->properties
.type
));
881 mutex_unlock(&dqm
->lock
);
885 retval
= mqd
->init_mqd(mqd
, &q
->mqd
, &q
->mqd_mem_obj
,
886 &q
->gart_mqd_addr
, &q
->properties
);
890 list_add(&q
->list
, &qpd
->queues_list
);
891 if (q
->properties
.is_active
) {
893 retval
= execute_queues_cpsch(dqm
, false);
896 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
897 dqm
->sdma_queue_count
++;
899 * Unconditionally increment this counter, regardless of the queue's
900 * type or whether the queue is active.
902 dqm
->total_queue_count
++;
904 pr_debug("Total of %d queues are accountable so far\n",
905 dqm
->total_queue_count
);
908 mutex_unlock(&dqm
->lock
);
912 static int amdkfd_fence_wait_timeout(unsigned int *fence_addr
,
913 unsigned int fence_value
,
914 unsigned long timeout
)
919 while (*fence_addr
!= fence_value
) {
920 if (time_after(jiffies
, timeout
)) {
921 pr_err("kfd: qcm fence wait loop timeout expired\n");
930 static int destroy_sdma_queues(struct device_queue_manager
*dqm
,
931 unsigned int sdma_engine
)
933 return pm_send_unmap_queue(&dqm
->packets
, KFD_QUEUE_TYPE_SDMA
,
934 KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES
, 0, false,
938 static int destroy_queues_cpsch(struct device_queue_manager
*dqm
, bool lock
)
947 mutex_lock(&dqm
->lock
);
948 if (dqm
->active_runlist
== false)
951 pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
952 dqm
->sdma_queue_count
);
954 if (dqm
->sdma_queue_count
> 0) {
955 destroy_sdma_queues(dqm
, 0);
956 destroy_sdma_queues(dqm
, 1);
959 retval
= pm_send_unmap_queue(&dqm
->packets
, KFD_QUEUE_TYPE_COMPUTE
,
960 KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES
, 0, false, 0);
964 *dqm
->fence_addr
= KFD_FENCE_INIT
;
965 pm_send_query_status(&dqm
->packets
, dqm
->fence_gpu_addr
,
966 KFD_FENCE_COMPLETED
);
967 /* should be timed out */
968 amdkfd_fence_wait_timeout(dqm
->fence_addr
, KFD_FENCE_COMPLETED
,
969 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS
);
970 pm_release_ib(&dqm
->packets
);
971 dqm
->active_runlist
= false;
975 mutex_unlock(&dqm
->lock
);
979 static int execute_queues_cpsch(struct device_queue_manager
*dqm
, bool lock
)
986 mutex_lock(&dqm
->lock
);
988 retval
= destroy_queues_cpsch(dqm
, false);
990 pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
994 if (dqm
->queue_count
<= 0 || dqm
->processes_count
<= 0) {
999 if (dqm
->active_runlist
) {
1004 retval
= pm_send_runlist(&dqm
->packets
, &dqm
->queues
);
1006 pr_err("kfd: failed to execute runlist");
1009 dqm
->active_runlist
= true;
1013 mutex_unlock(&dqm
->lock
);
1017 static int destroy_queue_cpsch(struct device_queue_manager
*dqm
,
1018 struct qcm_process_device
*qpd
,
1022 struct mqd_manager
*mqd
;
1024 BUG_ON(!dqm
|| !qpd
|| !q
);
1028 /* remove queue from list to prevent rescheduling after preemption */
1029 mutex_lock(&dqm
->lock
);
1030 mqd
= dqm
->ops
.get_mqd_manager(dqm
,
1031 get_mqd_type_from_queue_type(q
->properties
.type
));
1037 if (q
->properties
.type
== KFD_QUEUE_TYPE_SDMA
)
1038 dqm
->sdma_queue_count
--;
1041 if (q
->properties
.is_active
)
1044 execute_queues_cpsch(dqm
, false);
1046 mqd
->uninit_mqd(mqd
, q
->mqd
, q
->mqd_mem_obj
);
1049 * Unconditionally decrement this counter, regardless of the queue's
1052 dqm
->total_queue_count
--;
1053 pr_debug("Total of %d queues are accountable so far\n",
1054 dqm
->total_queue_count
);
1056 mutex_unlock(&dqm
->lock
);
1061 mutex_unlock(&dqm
->lock
);
1066 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1067 * stay in user mode.
1069 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1070 /* APE1 limit is inclusive and 64K aligned. */
1071 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1073 static bool set_cache_memory_policy(struct device_queue_manager
*dqm
,
1074 struct qcm_process_device
*qpd
,
1075 enum cache_policy default_policy
,
1076 enum cache_policy alternate_policy
,
1077 void __user
*alternate_aperture_base
,
1078 uint64_t alternate_aperture_size
)
1082 pr_debug("kfd: In func %s\n", __func__
);
1084 mutex_lock(&dqm
->lock
);
1086 if (alternate_aperture_size
== 0) {
1087 /* base > limit disables APE1 */
1088 qpd
->sh_mem_ape1_base
= 1;
1089 qpd
->sh_mem_ape1_limit
= 0;
1092 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1093 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1094 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1095 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1096 * Verify that the base and size parameters can be
1097 * represented in this format and convert them.
1098 * Additionally restrict APE1 to user-mode addresses.
1101 uint64_t base
= (uintptr_t)alternate_aperture_base
;
1102 uint64_t limit
= base
+ alternate_aperture_size
- 1;
1107 if ((base
& APE1_FIXED_BITS_MASK
) != 0)
1110 if ((limit
& APE1_FIXED_BITS_MASK
) != APE1_LIMIT_ALIGNMENT
)
1113 qpd
->sh_mem_ape1_base
= base
>> 16;
1114 qpd
->sh_mem_ape1_limit
= limit
>> 16;
1117 retval
= dqm
->ops_asic_specific
.set_cache_memory_policy(
1122 alternate_aperture_base
,
1123 alternate_aperture_size
);
1125 if ((sched_policy
== KFD_SCHED_POLICY_NO_HWS
) && (qpd
->vmid
!= 0))
1126 program_sh_mem_settings(dqm
, qpd
);
1128 pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1129 qpd
->sh_mem_config
, qpd
->sh_mem_ape1_base
,
1130 qpd
->sh_mem_ape1_limit
);
1132 mutex_unlock(&dqm
->lock
);
1136 mutex_unlock(&dqm
->lock
);
1140 struct device_queue_manager
*device_queue_manager_init(struct kfd_dev
*dev
)
1142 struct device_queue_manager
*dqm
;
1146 pr_debug("kfd: loading device queue manager\n");
1148 dqm
= kzalloc(sizeof(struct device_queue_manager
), GFP_KERNEL
);
1153 switch (sched_policy
) {
1154 case KFD_SCHED_POLICY_HWS
:
1155 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION
:
1156 /* initialize dqm for cp scheduling */
1157 dqm
->ops
.create_queue
= create_queue_cpsch
;
1158 dqm
->ops
.initialize
= initialize_cpsch
;
1159 dqm
->ops
.start
= start_cpsch
;
1160 dqm
->ops
.stop
= stop_cpsch
;
1161 dqm
->ops
.destroy_queue
= destroy_queue_cpsch
;
1162 dqm
->ops
.update_queue
= update_queue
;
1163 dqm
->ops
.get_mqd_manager
= get_mqd_manager_nocpsch
;
1164 dqm
->ops
.register_process
= register_process_nocpsch
;
1165 dqm
->ops
.unregister_process
= unregister_process_nocpsch
;
1166 dqm
->ops
.uninitialize
= uninitialize_nocpsch
;
1167 dqm
->ops
.create_kernel_queue
= create_kernel_queue_cpsch
;
1168 dqm
->ops
.destroy_kernel_queue
= destroy_kernel_queue_cpsch
;
1169 dqm
->ops
.set_cache_memory_policy
= set_cache_memory_policy
;
1171 case KFD_SCHED_POLICY_NO_HWS
:
1172 /* initialize dqm for no cp scheduling */
1173 dqm
->ops
.start
= start_nocpsch
;
1174 dqm
->ops
.stop
= stop_nocpsch
;
1175 dqm
->ops
.create_queue
= create_queue_nocpsch
;
1176 dqm
->ops
.destroy_queue
= destroy_queue_nocpsch
;
1177 dqm
->ops
.update_queue
= update_queue
;
1178 dqm
->ops
.get_mqd_manager
= get_mqd_manager_nocpsch
;
1179 dqm
->ops
.register_process
= register_process_nocpsch
;
1180 dqm
->ops
.unregister_process
= unregister_process_nocpsch
;
1181 dqm
->ops
.initialize
= initialize_nocpsch
;
1182 dqm
->ops
.uninitialize
= uninitialize_nocpsch
;
1183 dqm
->ops
.set_cache_memory_policy
= set_cache_memory_policy
;
1190 switch (dev
->device_info
->asic_family
) {
1192 device_queue_manager_init_vi(&dqm
->ops_asic_specific
);
1196 device_queue_manager_init_cik(&dqm
->ops_asic_specific
);
1200 if (dqm
->ops
.initialize(dqm
) != 0) {
1208 void device_queue_manager_uninit(struct device_queue_manager
*dqm
)
1212 dqm
->ops
.uninitialize(dqm
);