1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "habanalabs.h"
10 #include <linux/slab.h>
13 * hl_queue_add_ptr - add to pi or ci and checks if it wraps around
15 * @ptr: the current pi/ci value
16 * @val: the amount to add
18 * Add val to ptr. It can go until twice the queue length.
20 inline u32
hl_hw_queue_add_ptr(u32 ptr
, u16 val
)
23 ptr
&= ((HL_QUEUE_LENGTH
<< 1) - 1);
27 static inline int queue_free_slots(struct hl_hw_queue
*q
, u32 queue_len
)
29 int delta
= (q
->pi
- q
->ci
);
32 return (queue_len
- delta
);
34 return (abs(delta
) - queue_len
);
37 void hl_int_hw_queue_update_ci(struct hl_cs
*cs
)
39 struct hl_device
*hdev
= cs
->ctx
->hdev
;
40 struct hl_hw_queue
*q
;
43 hdev
->asic_funcs
->hw_queues_lock(hdev
);
48 q
= &hdev
->kernel_queues
[0];
49 for (i
= 0 ; i
< HL_MAX_QUEUES
; i
++, q
++) {
50 if (q
->queue_type
== QUEUE_TYPE_INT
) {
51 q
->ci
+= cs
->jobs_in_queue_cnt
[i
];
52 q
->ci
&= ((q
->int_queue_len
<< 1) - 1);
57 hdev
->asic_funcs
->hw_queues_unlock(hdev
);
61 * ext_queue_submit_bd - Submit a buffer descriptor to an external queue
63 * @hdev: pointer to habanalabs device structure
64 * @q: pointer to habanalabs queue structure
65 * @ctl: BD's control word
69 * This function assumes there is enough space on the queue to submit a new
70 * BD to it. It initializes the next BD and calls the device specific
71 * function to set the pi (and doorbell)
73 * This function must be called when the scheduler mutex is taken
76 static void ext_queue_submit_bd(struct hl_device
*hdev
, struct hl_hw_queue
*q
,
77 u32 ctl
, u32 len
, u64 ptr
)
81 bd
= (struct hl_bd
*) (uintptr_t) q
->kernel_address
;
82 bd
+= hl_pi_2_offset(q
->pi
);
83 bd
->ctl
= __cpu_to_le32(ctl
);
84 bd
->len
= __cpu_to_le32(len
);
85 bd
->ptr
= __cpu_to_le64(ptr
+ hdev
->asic_prop
.host_phys_base_address
);
87 q
->pi
= hl_queue_inc_ptr(q
->pi
);
88 hdev
->asic_funcs
->ring_doorbell(hdev
, q
->hw_queue_id
, q
->pi
);
92 * ext_queue_sanity_checks - perform some sanity checks on external queue
94 * @hdev : pointer to hl_device structure
95 * @q : pointer to hl_hw_queue structure
96 * @num_of_entries : how many entries to check for space
97 * @reserve_cq_entry : whether to reserve an entry in the cq
99 * H/W queues spinlock should be taken before calling this function
101 * Perform the following:
102 * - Make sure we have enough space in the h/w queue
103 * - Make sure we have enough space in the completion queue
104 * - Reserve space in the completion queue (needs to be reversed if there
105 * is a failure down the road before the actual submission of work). Only
106 * do this action if reserve_cq_entry is true
109 static int ext_queue_sanity_checks(struct hl_device
*hdev
,
110 struct hl_hw_queue
*q
, int num_of_entries
,
111 bool reserve_cq_entry
)
113 atomic_t
*free_slots
=
114 &hdev
->completion_queue
[q
->hw_queue_id
].free_slots_cnt
;
117 /* Check we have enough space in the queue */
118 free_slots_cnt
= queue_free_slots(q
, HL_QUEUE_LENGTH
);
120 if (free_slots_cnt
< num_of_entries
) {
121 dev_dbg(hdev
->dev
, "Queue %d doesn't have room for %d CBs\n",
122 q
->hw_queue_id
, num_of_entries
);
126 if (reserve_cq_entry
) {
128 * Check we have enough space in the completion queue
129 * Add -1 to counter (decrement) unless counter was already 0
130 * In that case, CQ is full so we can't submit a new CB because
131 * we won't get ack on its completion
132 * atomic_add_unless will return 0 if counter was already 0
134 if (atomic_add_negative(num_of_entries
* -1, free_slots
)) {
135 dev_dbg(hdev
->dev
, "No space for %d on CQ %d\n",
136 num_of_entries
, q
->hw_queue_id
);
137 atomic_add(num_of_entries
, free_slots
);
146 * int_queue_sanity_checks - perform some sanity checks on internal queue
148 * @hdev : pointer to hl_device structure
149 * @q : pointer to hl_hw_queue structure
150 * @num_of_entries : how many entries to check for space
152 * H/W queues spinlock should be taken before calling this function
154 * Perform the following:
155 * - Make sure we have enough space in the h/w queue
158 static int int_queue_sanity_checks(struct hl_device
*hdev
,
159 struct hl_hw_queue
*q
,
164 /* Check we have enough space in the queue */
165 free_slots_cnt
= queue_free_slots(q
, q
->int_queue_len
);
167 if (free_slots_cnt
< num_of_entries
) {
168 dev_dbg(hdev
->dev
, "Queue %d doesn't have room for %d CBs\n",
169 q
->hw_queue_id
, num_of_entries
);
177 * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
179 * @hdev: pointer to hl_device structure
180 * @hw_queue_id: Queue's type
181 * @cb_size: size of CB
182 * @cb_ptr: pointer to CB location
184 * This function sends a single CB, that must NOT generate a completion entry
187 int hl_hw_queue_send_cb_no_cmpl(struct hl_device
*hdev
, u32 hw_queue_id
,
188 u32 cb_size
, u64 cb_ptr
)
190 struct hl_hw_queue
*q
= &hdev
->kernel_queues
[hw_queue_id
];
194 * The CPU queue is a synchronous queue with an effective depth of
195 * a single entry (although it is allocated with room for multiple
196 * entries). Therefore, there is a different lock, called
197 * send_cpu_message_lock, that serializes accesses to the CPU queue.
198 * As a result, we don't need to lock the access to the entire H/W
199 * queues module when submitting a JOB to the CPU queue
201 if (q
->queue_type
!= QUEUE_TYPE_CPU
)
202 hdev
->asic_funcs
->hw_queues_lock(hdev
);
204 if (hdev
->disabled
) {
209 rc
= ext_queue_sanity_checks(hdev
, q
, 1, false);
213 ext_queue_submit_bd(hdev
, q
, 0, cb_size
, cb_ptr
);
216 if (q
->queue_type
!= QUEUE_TYPE_CPU
)
217 hdev
->asic_funcs
->hw_queues_unlock(hdev
);
223 * ext_hw_queue_schedule_job - submit an JOB to an external queue
225 * @job: pointer to the job that needs to be submitted to the queue
227 * This function must be called when the scheduler mutex is taken
230 static void ext_hw_queue_schedule_job(struct hl_cs_job
*job
)
232 struct hl_device
*hdev
= job
->cs
->ctx
->hdev
;
233 struct hl_hw_queue
*q
= &hdev
->kernel_queues
[job
->hw_queue_id
];
234 struct hl_cq_entry cq_pkt
;
243 * Update the JOB ID inside the BD CTL so the device would know what
244 * to write in the completion queue
246 ctl
= ((q
->pi
<< BD_CTL_SHADOW_INDEX_SHIFT
) & BD_CTL_SHADOW_INDEX_MASK
);
248 cb
= job
->patched_cb
;
249 len
= job
->job_cb_size
;
250 ptr
= cb
->bus_address
;
252 cq_pkt
.data
= __cpu_to_le32(
253 ((q
->pi
<< CQ_ENTRY_SHADOW_INDEX_SHIFT
)
254 & CQ_ENTRY_SHADOW_INDEX_MASK
) |
255 (1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT
) |
256 (1 << CQ_ENTRY_READY_SHIFT
));
259 * No need to protect pi_offset because scheduling to the
260 * H/W queues is done under the scheduler mutex
262 * No need to check if CQ is full because it was already
263 * checked in hl_queue_sanity_checks
265 cq
= &hdev
->completion_queue
[q
->hw_queue_id
];
266 cq_addr
= cq
->bus_address
+
267 hdev
->asic_prop
.host_phys_base_address
;
268 cq_addr
+= cq
->pi
* sizeof(struct hl_cq_entry
);
270 hdev
->asic_funcs
->add_end_of_cb_packets(cb
->kernel_address
, len
,
272 __le32_to_cpu(cq_pkt
.data
),
275 q
->shadow_queue
[hl_pi_2_offset(q
->pi
)] = job
;
277 cq
->pi
= hl_cq_inc_ptr(cq
->pi
);
279 ext_queue_submit_bd(hdev
, q
, ctl
, len
, ptr
);
283 * int_hw_queue_schedule_job - submit an JOB to an internal queue
285 * @job: pointer to the job that needs to be submitted to the queue
287 * This function must be called when the scheduler mutex is taken
290 static void int_hw_queue_schedule_job(struct hl_cs_job
*job
)
292 struct hl_device
*hdev
= job
->cs
->ctx
->hdev
;
293 struct hl_hw_queue
*q
= &hdev
->kernel_queues
[job
->hw_queue_id
];
295 u64
*pi
, *pbd
= (u64
*) &bd
;
298 bd
.len
= __cpu_to_le32(job
->job_cb_size
);
299 bd
.ptr
= __cpu_to_le64((u64
) (uintptr_t) job
->user_cb
);
301 pi
= (u64
*) (uintptr_t) (q
->kernel_address
+
302 ((q
->pi
& (q
->int_queue_len
- 1)) * sizeof(bd
)));
308 q
->pi
&= ((q
->int_queue_len
<< 1) - 1);
310 /* Flush PQ entry write. Relevant only for specific ASICs */
311 hdev
->asic_funcs
->flush_pq_write(hdev
, pi
, pbd
[0]);
313 hdev
->asic_funcs
->ring_doorbell(hdev
, q
->hw_queue_id
, q
->pi
);
317 * hl_hw_queue_schedule_cs - schedule a command submission
319 * @job : pointer to the CS
322 int hl_hw_queue_schedule_cs(struct hl_cs
*cs
)
324 struct hl_device
*hdev
= cs
->ctx
->hdev
;
325 struct hl_cs_job
*job
, *tmp
;
326 struct hl_hw_queue
*q
;
327 int rc
= 0, i
, cq_cnt
;
329 hdev
->asic_funcs
->hw_queues_lock(hdev
);
331 if (hl_device_disabled_or_in_reset(hdev
)) {
333 "device is disabled or in reset, CS rejected!\n");
338 q
= &hdev
->kernel_queues
[0];
339 /* This loop assumes all external queues are consecutive */
340 for (i
= 0, cq_cnt
= 0 ; i
< HL_MAX_QUEUES
; i
++, q
++) {
341 if (q
->queue_type
== QUEUE_TYPE_EXT
) {
342 if (cs
->jobs_in_queue_cnt
[i
]) {
343 rc
= ext_queue_sanity_checks(hdev
, q
,
344 cs
->jobs_in_queue_cnt
[i
], true);
349 } else if (q
->queue_type
== QUEUE_TYPE_INT
) {
350 if (cs
->jobs_in_queue_cnt
[i
]) {
351 rc
= int_queue_sanity_checks(hdev
, q
,
352 cs
->jobs_in_queue_cnt
[i
]);
359 spin_lock(&hdev
->hw_queues_mirror_lock
);
360 list_add_tail(&cs
->mirror_node
, &hdev
->hw_queues_mirror_list
);
362 /* Queue TDR if the CS is the first entry and if timeout is wanted */
363 if ((hdev
->timeout_jiffies
!= MAX_SCHEDULE_TIMEOUT
) &&
364 (list_first_entry(&hdev
->hw_queues_mirror_list
,
365 struct hl_cs
, mirror_node
) == cs
)) {
366 cs
->tdr_active
= true;
367 schedule_delayed_work(&cs
->work_tdr
, hdev
->timeout_jiffies
);
368 spin_unlock(&hdev
->hw_queues_mirror_lock
);
370 spin_unlock(&hdev
->hw_queues_mirror_lock
);
373 atomic_inc(&hdev
->cs_active_cnt
);
375 list_for_each_entry_safe(job
, tmp
, &cs
->job_list
, cs_node
)
377 ext_hw_queue_schedule_job(job
);
379 int_hw_queue_schedule_job(job
);
381 cs
->submitted
= true;
386 /* This loop assumes all external queues are consecutive */
387 q
= &hdev
->kernel_queues
[0];
388 for (i
= 0 ; (i
< HL_MAX_QUEUES
) && (cq_cnt
> 0) ; i
++, q
++) {
389 if ((q
->queue_type
== QUEUE_TYPE_EXT
) &&
390 (cs
->jobs_in_queue_cnt
[i
])) {
391 atomic_t
*free_slots
=
392 &hdev
->completion_queue
[i
].free_slots_cnt
;
393 atomic_add(cs
->jobs_in_queue_cnt
[i
], free_slots
);
399 hdev
->asic_funcs
->hw_queues_unlock(hdev
);
405 * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue
407 * @hdev: pointer to hl_device structure
408 * @hw_queue_id: which queue to increment its ci
410 void hl_hw_queue_inc_ci_kernel(struct hl_device
*hdev
, u32 hw_queue_id
)
412 struct hl_hw_queue
*q
= &hdev
->kernel_queues
[hw_queue_id
];
414 q
->ci
= hl_queue_inc_ptr(q
->ci
);
417 static int ext_and_cpu_hw_queue_init(struct hl_device
*hdev
,
418 struct hl_hw_queue
*q
)
423 p
= hdev
->asic_funcs
->dma_alloc_coherent(hdev
,
424 HL_QUEUE_SIZE_IN_BYTES
,
425 &q
->bus_address
, GFP_KERNEL
| __GFP_ZERO
);
429 q
->kernel_address
= (u64
) (uintptr_t) p
;
431 q
->shadow_queue
= kmalloc_array(HL_QUEUE_LENGTH
,
432 sizeof(*q
->shadow_queue
),
434 if (!q
->shadow_queue
) {
436 "Failed to allocate shadow queue for H/W queue %d\n",
442 /* Make sure read/write pointers are initialized to start of queue */
449 hdev
->asic_funcs
->dma_free_coherent(hdev
, HL_QUEUE_SIZE_IN_BYTES
,
450 (void *) (uintptr_t) q
->kernel_address
, q
->bus_address
);
455 static int int_hw_queue_init(struct hl_device
*hdev
, struct hl_hw_queue
*q
)
459 p
= hdev
->asic_funcs
->get_int_queue_base(hdev
, q
->hw_queue_id
,
460 &q
->bus_address
, &q
->int_queue_len
);
463 "Failed to get base address for internal queue %d\n",
468 q
->kernel_address
= (u64
) (uintptr_t) p
;
475 static int cpu_hw_queue_init(struct hl_device
*hdev
, struct hl_hw_queue
*q
)
477 return ext_and_cpu_hw_queue_init(hdev
, q
);
480 static int ext_hw_queue_init(struct hl_device
*hdev
, struct hl_hw_queue
*q
)
482 return ext_and_cpu_hw_queue_init(hdev
, q
);
486 * hw_queue_init - main initialization function for H/W queue object
488 * @hdev: pointer to hl_device device structure
489 * @q: pointer to hl_hw_queue queue structure
490 * @hw_queue_id: The id of the H/W queue
492 * Allocate dma-able memory for the queue and initialize fields
493 * Returns 0 on success
495 static int hw_queue_init(struct hl_device
*hdev
, struct hl_hw_queue
*q
,
500 BUILD_BUG_ON(HL_QUEUE_SIZE_IN_BYTES
> HL_PAGE_SIZE
);
502 q
->hw_queue_id
= hw_queue_id
;
504 switch (q
->queue_type
) {
506 rc
= ext_hw_queue_init(hdev
, q
);
510 rc
= int_hw_queue_init(hdev
, q
);
514 rc
= cpu_hw_queue_init(hdev
, q
);
522 dev_crit(hdev
->dev
, "wrong queue type %d during init\n",
537 * hw_queue_fini - destroy queue
539 * @hdev: pointer to hl_device device structure
540 * @q: pointer to hl_hw_queue queue structure
542 * Free the queue memory
544 static void hw_queue_fini(struct hl_device
*hdev
, struct hl_hw_queue
*q
)
550 * If we arrived here, there are no jobs waiting on this queue
551 * so we can safely remove it.
552 * This is because this function can only called when:
553 * 1. Either a context is deleted, which only can occur if all its
555 * 2. A context wasn't able to be created due to failure or timeout,
556 * which means there are no jobs on the queue yet
558 * The only exception are the queues of the kernel context, but
559 * if they are being destroyed, it means that the entire module is
560 * being removed. If the module is removed, it means there is no open
561 * user context. It also means that if a job was submitted by
562 * the kernel driver (e.g. context creation), the job itself was
563 * released by the kernel driver when a timeout occurred on its
564 * Completion. Thus, we don't need to release it again.
567 if (q
->queue_type
== QUEUE_TYPE_INT
)
570 kfree(q
->shadow_queue
);
572 hdev
->asic_funcs
->dma_free_coherent(hdev
, HL_QUEUE_SIZE_IN_BYTES
,
573 (void *) (uintptr_t) q
->kernel_address
, q
->bus_address
);
576 int hl_hw_queues_create(struct hl_device
*hdev
)
578 struct asic_fixed_properties
*asic
= &hdev
->asic_prop
;
579 struct hl_hw_queue
*q
;
580 int i
, rc
, q_ready_cnt
;
582 hdev
->kernel_queues
= kcalloc(HL_MAX_QUEUES
,
583 sizeof(*hdev
->kernel_queues
), GFP_KERNEL
);
585 if (!hdev
->kernel_queues
) {
586 dev_err(hdev
->dev
, "Not enough memory for H/W queues\n");
590 /* Initialize the H/W queues */
591 for (i
= 0, q_ready_cnt
= 0, q
= hdev
->kernel_queues
;
592 i
< HL_MAX_QUEUES
; i
++, q_ready_cnt
++, q
++) {
594 q
->queue_type
= asic
->hw_queues_props
[i
].type
;
595 rc
= hw_queue_init(hdev
, q
, i
);
598 "failed to initialize queue %d\n", i
);
606 for (i
= 0, q
= hdev
->kernel_queues
; i
< q_ready_cnt
; i
++, q
++)
607 hw_queue_fini(hdev
, q
);
609 kfree(hdev
->kernel_queues
);
614 void hl_hw_queues_destroy(struct hl_device
*hdev
)
616 struct hl_hw_queue
*q
;
619 for (i
= 0, q
= hdev
->kernel_queues
; i
< HL_MAX_QUEUES
; i
++, q
++)
620 hw_queue_fini(hdev
, q
);
622 kfree(hdev
->kernel_queues
);
625 void hl_hw_queue_reset(struct hl_device
*hdev
, bool hard_reset
)
627 struct hl_hw_queue
*q
;
630 for (i
= 0, q
= hdev
->kernel_queues
; i
< HL_MAX_QUEUES
; i
++, q
++) {
632 ((!hard_reset
) && (q
->queue_type
== QUEUE_TYPE_CPU
)))