]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/misc/habanalabs/common/command_submission.c
Merge tag 'char-misc-5.15-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregk...
[mirror_ubuntu-jammy-kernel.git] / drivers / misc / habanalabs / common / command_submission.c
CommitLineData
eff6f4a0
OG
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include <uapi/misc/habanalabs.h>
9#include "habanalabs.h"
10
11#include <linux/uaccess.h>
12#include <linux/slab.h>
13
3e438b42
TT
14#define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
15 HL_CS_FLAGS_COLLECTIVE_WAIT)
16
9d127ad5
OB
17/**
18 * enum hl_cs_wait_status - cs wait status
19 * @CS_WAIT_STATUS_BUSY: cs was not completed yet
20 * @CS_WAIT_STATUS_COMPLETED: cs completed
21 * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
22 */
23enum hl_cs_wait_status {
24 CS_WAIT_STATUS_BUSY,
25 CS_WAIT_STATUS_COMPLETED,
26 CS_WAIT_STATUS_GONE
27};
28
eff6f4a0 29static void job_wq_completion(struct work_struct *work);
9d127ad5
OB
30static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
31 u64 timeout_us, u64 seq,
bd2f477f 32 enum hl_cs_wait_status *status, s64 *timestamp);
eff6f4a0
OG
33static void cs_do_release(struct kref *ref);
34
b75f2250
OS
35static void hl_sob_reset(struct kref *ref)
36{
37 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
38 kref);
39 struct hl_device *hdev = hw_sob->hdev;
40
dadf17ab 41 dev_dbg(hdev->dev, "reset sob id %u\n", hw_sob->sob_id);
42
b75f2250 43 hdev->asic_funcs->reset_sob(hdev, hw_sob);
dadf17ab 44
45 hw_sob->need_reset = false;
b75f2250
OS
46}
47
48void hl_sob_reset_error(struct kref *ref)
49{
50 struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
51 kref);
52 struct hl_device *hdev = hw_sob->hdev;
53
54 dev_crit(hdev->dev,
75d9a2a0
AM
55 "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
56 hw_sob->q_idx, hw_sob->sob_id);
b75f2250
OS
57}
58
dadf17ab 59void hw_sob_put(struct hl_hw_sob *hw_sob)
8ca2072e 60{
61 if (hw_sob)
62 kref_put(&hw_sob->kref, hl_sob_reset);
63}
64
65static void hw_sob_put_err(struct hl_hw_sob *hw_sob)
66{
67 if (hw_sob)
68 kref_put(&hw_sob->kref, hl_sob_reset_error);
69}
70
dadf17ab 71void hw_sob_get(struct hl_hw_sob *hw_sob)
8ca2072e 72{
73 if (hw_sob)
74 kref_get(&hw_sob->kref);
75}
76
2992c1dc
OB
77/**
78 * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet
79 * @sob_base: sob base id
80 * @sob_mask: sob user mask, each bit represents a sob offset from sob base
81 * @mask: generated mask
82 *
83 * Return: 0 if given parameters are valid
84 */
85int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
86{
87 int i;
88
89 if (sob_mask == 0)
90 return -EINVAL;
91
92 if (sob_mask == 0x1) {
93 *mask = ~(1 << (sob_base & 0x7));
94 } else {
95 /* find msb in order to verify sob range is valid */
96 for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--)
97 if (BIT(i) & sob_mask)
98 break;
99
100 if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1))
101 return -EINVAL;
102
103 *mask = ~sob_mask;
104 }
105
106 return 0;
107}
108
a98d73c7 109static void hl_fence_release(struct kref *kref)
eff6f4a0 110{
a98d73c7
OB
111 struct hl_fence *fence =
112 container_of(kref, struct hl_fence, refcount);
b0b5d925
OS
113 struct hl_cs_compl *hl_cs_cmpl =
114 container_of(fence, struct hl_cs_compl, base_fence);
b75f2250 115
a98d73c7 116 kfree(hl_cs_cmpl);
eff6f4a0
OG
117}
118
a98d73c7
OB
119void hl_fence_put(struct hl_fence *fence)
120{
c457d5ab
OS
121 if (IS_ERR_OR_NULL(fence))
122 return;
123 kref_put(&fence->refcount, hl_fence_release);
124}
125
126void hl_fences_put(struct hl_fence **fence, int len)
127{
128 int i;
129
130 for (i = 0; i < len; i++, fence++)
131 hl_fence_put(*fence);
a98d73c7
OB
132}
133
134void hl_fence_get(struct hl_fence *fence)
135{
136 if (fence)
137 kref_get(&fence->refcount);
138}
139
0811b391 140static void hl_fence_init(struct hl_fence *fence, u64 sequence)
a98d73c7
OB
141{
142 kref_init(&fence->refcount);
0811b391 143 fence->cs_sequence = sequence;
a98d73c7 144 fence->error = 0;
bd2f477f 145 fence->timestamp = ktime_set(0, 0);
a98d73c7
OB
146 init_completion(&fence->completion);
147}
eff6f4a0 148
5de406c0 149void cs_get(struct hl_cs *cs)
eff6f4a0
OG
150{
151 kref_get(&cs->refcount);
152}
153
154static int cs_get_unless_zero(struct hl_cs *cs)
155{
156 return kref_get_unless_zero(&cs->refcount);
157}
158
159static void cs_put(struct hl_cs *cs)
160{
161 kref_put(&cs->refcount, cs_do_release);
162}
163
649c4592
TT
164static void cs_job_do_release(struct kref *ref)
165{
166 struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount);
167
168 kfree(job);
169}
170
171static void cs_job_put(struct hl_cs_job *job)
172{
173 kref_put(&job->refcount, cs_job_do_release);
174}
175
0811b391
OB
176bool cs_needs_completion(struct hl_cs *cs)
177{
178 /* In case this is a staged CS, only the last CS in sequence should
179 * get a completion, any non staged CS will always get a completion
180 */
181 if (cs->staged_cs && !cs->staged_last)
182 return false;
183
184 return true;
185}
186
187bool cs_needs_timeout(struct hl_cs *cs)
188{
189 /* In case this is a staged CS, only the first CS in sequence should
190 * get a timeout, any non staged CS will always get a timeout
191 */
192 if (cs->staged_cs && !cs->staged_first)
193 return false;
194
195 return true;
196}
197
cb596aee
TT
198static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
199{
200 /*
201 * Patched CB is created for external queues jobs, and for H/W queues
202 * jobs if the user CB was allocated by driver and MMU is disabled.
203 */
204 return (job->queue_type == QUEUE_TYPE_EXT ||
205 (job->queue_type == QUEUE_TYPE_HW &&
206 job->is_kernel_allocated_cb &&
207 !hdev->mmu_enable));
208}
209
eff6f4a0
OG
210/*
211 * cs_parser - parse the user command submission
212 *
213 * @hpriv : pointer to the private data of the fd
214 * @job : pointer to the job that holds the command submission info
215 *
216 * The function parses the command submission of the user. It calls the
217 * ASIC specific parser, which returns a list of memory blocks to send
218 * to the device as different command buffers
219 *
220 */
221static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
222{
223 struct hl_device *hdev = hpriv->hdev;
224 struct hl_cs_parser parser;
225 int rc;
226
227 parser.ctx_id = job->cs->ctx->asid;
228 parser.cs_sequence = job->cs->sequence;
229 parser.job_id = job->id;
230
231 parser.hw_queue_id = job->hw_queue_id;
232 parser.job_userptr_list = &job->userptr_list;
233 parser.patched_cb = NULL;
234 parser.user_cb = job->user_cb;
235 parser.user_cb_size = job->user_cb_size;
cb596aee
TT
236 parser.queue_type = job->queue_type;
237 parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
eff6f4a0 238 job->patched_cb = NULL;
0811b391 239 parser.completion = cs_needs_completion(job->cs);
eff6f4a0
OG
240
241 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
cb596aee
TT
242
243 if (is_cb_patched(hdev, job)) {
eff6f4a0
OG
244 if (!rc) {
245 job->patched_cb = parser.patched_cb;
246 job->job_cb_size = parser.patched_cb_size;
926ba4cc 247 job->contains_dma_pkt = parser.contains_dma_pkt;
f0748674 248 atomic_inc(&job->patched_cb->cs_cnt);
eff6f4a0
OG
249 }
250
251 /*
252 * Whether the parsing worked or not, we don't need the
253 * original CB anymore because it was already parsed and
254 * won't be accessed again for this CS
255 */
f0748674 256 atomic_dec(&job->user_cb->cs_cnt);
eff6f4a0
OG
257 hl_cb_put(job->user_cb);
258 job->user_cb = NULL;
240c92fd
OS
259 } else if (!rc) {
260 job->job_cb_size = job->user_cb_size;
eff6f4a0
OG
261 }
262
263 return rc;
264}
265
649c4592 266static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
eff6f4a0
OG
267{
268 struct hl_cs *cs = job->cs;
269
cb596aee 270 if (is_cb_patched(hdev, job)) {
eff6f4a0
OG
271 hl_userptr_delete_list(hdev, &job->userptr_list);
272
273 /*
274 * We might arrive here from rollback and patched CB wasn't
275 * created, so we need to check it's not NULL
276 */
277 if (job->patched_cb) {
f0748674 278 atomic_dec(&job->patched_cb->cs_cnt);
eff6f4a0
OG
279 hl_cb_put(job->patched_cb);
280 }
281 }
282
cb596aee
TT
283 /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
284 * enabled, the user CB isn't released in cs_parser() and thus should be
285 * released here.
5fe1c17d 286 * This is also true for INT queues jobs which were allocated by driver
cb596aee 287 */
5fe1c17d
OB
288 if (job->is_kernel_allocated_cb &&
289 ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) ||
290 job->queue_type == QUEUE_TYPE_INT)) {
f0748674 291 atomic_dec(&job->user_cb->cs_cnt);
cb596aee
TT
292 hl_cb_put(job->user_cb);
293 }
294
eff6f4a0
OG
295 /*
296 * This is the only place where there can be multiple threads
297 * modifying the list at the same time
298 */
299 spin_lock(&cs->job_lock);
300 list_del(&job->cs_node);
301 spin_unlock(&cs->job_lock);
302
c2164773
OG
303 hl_debugfs_remove_job(hdev, job);
304
0811b391
OB
305 /* We decrement reference only for a CS that gets completion
306 * because the reference was incremented only for this kind of CS
307 * right before it was scheduled.
308 *
309 * In staged submission, only the last CS marked as 'staged_last'
310 * gets completion, hence its release function will be called from here.
311 * As for all the rest CS's in the staged submission which do not get
312 * completion, their CS reference will be decremented by the
313 * 'staged_last' CS during the CS release flow.
314 * All relevant PQ CI counters will be incremented during the CS release
315 * flow by calling 'hl_hw_queue_update_ci'.
316 */
317 if (cs_needs_completion(cs) &&
318 (job->queue_type == QUEUE_TYPE_EXT ||
319 job->queue_type == QUEUE_TYPE_HW))
eff6f4a0
OG
320 cs_put(cs);
321
649c4592 322 cs_job_put(job);
eff6f4a0
OG
323}
324
2795c889
OB
325/*
326 * hl_staged_cs_find_first - locate the first CS in this staged submission
327 *
328 * @hdev: pointer to device structure
329 * @cs_seq: staged submission sequence number
330 *
331 * @note: This function must be called under 'hdev->cs_mirror_lock'
332 *
333 * Find and return a CS pointer with the given sequence
334 */
335struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq)
336{
337 struct hl_cs *cs;
338
339 list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node)
340 if (cs->staged_cs && cs->staged_first &&
341 cs->sequence == cs_seq)
342 return cs;
343
344 return NULL;
345}
346
347/*
348 * is_staged_cs_last_exists - returns true if the last CS in sequence exists
349 *
350 * @hdev: pointer to device structure
351 * @cs: staged submission member
352 *
353 */
354bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs)
355{
356 struct hl_cs *last_entry;
357
358 last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs,
359 staged_cs_node);
360
361 if (last_entry->staged_last)
362 return true;
363
364 return false;
365}
366
367/*
368 * staged_cs_get - get CS reference if this CS is a part of a staged CS
369 *
370 * @hdev: pointer to device structure
371 * @cs: current CS
372 * @cs_seq: staged submission sequence number
373 *
374 * Increment CS reference for every CS in this staged submission except for
375 * the CS which get completion.
376 */
377static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs)
378{
379 /* Only the last CS in this staged submission will get a completion.
380 * We must increment the reference for all other CS's in this
381 * staged submission.
382 * Once we get a completion we will release the whole staged submission.
383 */
384 if (!cs->staged_last)
385 cs_get(cs);
386}
387
388/*
389 * staged_cs_put - put a CS in case it is part of staged submission
390 *
391 * @hdev: pointer to device structure
392 * @cs: CS to put
393 *
394 * This function decrements a CS reference (for a non completion CS)
395 */
396static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
397{
398 /* We release all CS's in a staged submission except the last
399 * CS which we have never incremented its reference.
400 */
401 if (!cs_needs_completion(cs))
402 cs_put(cs);
403}
404
405static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
406{
407 bool next_entry_found = false;
beb71ee3 408 struct hl_cs *next, *first_cs;
2795c889
OB
409
410 if (!cs_needs_timeout(cs))
411 return;
412
413 spin_lock(&hdev->cs_mirror_lock);
414
415 /* We need to handle tdr only once for the complete staged submission.
416 * Hence, we choose the CS that reaches this function first which is
417 * the CS marked as 'staged_last'.
beb71ee3 418 * In case single staged cs was submitted which has both first and last
419 * indications, then "cs_find_first" below will return NULL, since we
420 * removed the cs node from the list before getting here,
421 * in such cases just continue with the cs to cancel it's TDR work.
2795c889 422 */
beb71ee3 423 if (cs->staged_cs && cs->staged_last) {
424 first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
425 if (first_cs)
426 cs = first_cs;
427 }
2795c889
OB
428
429 spin_unlock(&hdev->cs_mirror_lock);
430
431 /* Don't cancel TDR in case this CS was timedout because we might be
432 * running from the TDR context
433 */
434 if (cs && (cs->timedout ||
435 hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT))
436 return;
437
438 if (cs && cs->tdr_active)
439 cancel_delayed_work_sync(&cs->work_tdr);
440
441 spin_lock(&hdev->cs_mirror_lock);
442
443 /* queue TDR for next CS */
444 list_for_each_entry(next, &hdev->cs_mirror_list, mirror_node)
445 if (cs_needs_timeout(next)) {
446 next_entry_found = true;
447 break;
448 }
449
450 if (next_entry_found && !next->tdr_active) {
451 next->tdr_active = true;
cf393950 452 schedule_delayed_work(&next->work_tdr, next->timeout_jiffies);
2795c889
OB
453 }
454
455 spin_unlock(&hdev->cs_mirror_lock);
456}
457
215f0c17
OS
458/*
459 * force_complete_multi_cs - complete all contexts that wait on multi-CS
460 *
461 * @hdev: pointer to habanalabs device structure
462 */
463static void force_complete_multi_cs(struct hl_device *hdev)
464{
465 int i;
466
467 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
468 struct multi_cs_completion *mcs_compl;
469
470 mcs_compl = &hdev->multi_cs_completion[i];
471
472 spin_lock(&mcs_compl->lock);
473
474 if (!mcs_compl->used) {
475 spin_unlock(&mcs_compl->lock);
476 continue;
477 }
478
479 /* when calling force complete no context should be waiting on
480 * multi-cS.
481 * We are calling the function as a protection for such case
482 * to free any pending context and print error message
483 */
484 dev_err(hdev->dev,
485 "multi-CS completion context %d still waiting when calling force completion\n",
486 i);
487 complete_all(&mcs_compl->completion);
488 spin_unlock(&mcs_compl->lock);
489 }
490}
491
492/*
493 * complete_multi_cs - complete all waiting entities on multi-CS
494 *
495 * @hdev: pointer to habanalabs device structure
496 * @cs: CS structure
72d66255
OS
497 * The function signals a waiting entity that has an overlapping stream masters
498 * with the completed CS.
215f0c17 499 * For example:
72d66255
OS
500 * - a completed CS worked on stream master QID 4, multi CS completion
501 * is actively waiting on stream master QIDs 3, 5. don't send signal as no
502 * common stream master QID
503 * - a completed CS worked on stream master QID 4, multi CS completion
504 * is actively waiting on stream master QIDs 3, 4. send signal as stream
505 * master QID 4 is common
215f0c17
OS
506 */
507static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
508{
509 struct hl_fence *fence = cs->fence;
510 int i;
511
512 /* in case of multi CS check for completion only for the first CS */
513 if (cs->staged_cs && !cs->staged_first)
514 return;
515
516 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
517 struct multi_cs_completion *mcs_compl;
518
519 mcs_compl = &hdev->multi_cs_completion[i];
520 if (!mcs_compl->used)
521 continue;
522
523 spin_lock(&mcs_compl->lock);
524
525 /*
526 * complete if:
527 * 1. still waiting for completion
528 * 2. the completed CS has at least one overlapping stream
72d66255 529 * master with the stream masters in the completion
215f0c17
OS
530 */
531 if (mcs_compl->used &&
72d66255
OS
532 (fence->stream_master_qid_map &
533 mcs_compl->stream_master_qid_map)) {
215f0c17
OS
534 /* extract the timestamp only of first completed CS */
535 if (!mcs_compl->timestamp)
536 mcs_compl->timestamp =
537 ktime_to_ns(fence->timestamp);
538 complete_all(&mcs_compl->completion);
539 }
540
541 spin_unlock(&mcs_compl->lock);
542 }
543}
544
8ca2072e 545static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
546 struct hl_cs *cs,
547 struct hl_cs_compl *hl_cs_cmpl)
548{
549 /* Skip this handler if the cs wasn't submitted, to avoid putting
550 * the hw_sob twice, since this case already handled at this point,
551 * also skip if the hw_sob pointer wasn't set.
552 */
553 if (!hl_cs_cmpl->hw_sob || !cs->submitted)
554 return;
555
556 spin_lock(&hl_cs_cmpl->lock);
557
e4cdccd2 558 /*
559 * we get refcount upon reservation of signals or signal/wait cs for the
560 * hw_sob object, and need to put it when the first staged cs
561 * (which cotains the encaps signals) or cs signal/wait is completed.
562 */
8ca2072e 563 if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
564 (hl_cs_cmpl->type == CS_TYPE_WAIT) ||
dadf17ab 565 (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) ||
566 (!!hl_cs_cmpl->encaps_signals)) {
8ca2072e 567 dev_dbg(hdev->dev,
e4cdccd2 568 "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n",
8ca2072e 569 hl_cs_cmpl->cs_seq,
570 hl_cs_cmpl->type,
571 hl_cs_cmpl->hw_sob->sob_id,
572 hl_cs_cmpl->sob_val);
573
574 hw_sob_put(hl_cs_cmpl->hw_sob);
575
576 if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
577 hdev->asic_funcs->reset_sob_group(hdev,
578 hl_cs_cmpl->sob_group);
579 }
580
581 spin_unlock(&hl_cs_cmpl->lock);
582}
583
eff6f4a0
OG
584static void cs_do_release(struct kref *ref)
585{
ea6ee260 586 struct hl_cs *cs = container_of(ref, struct hl_cs, refcount);
eff6f4a0
OG
587 struct hl_device *hdev = cs->ctx->hdev;
588 struct hl_cs_job *job, *tmp;
8ca2072e 589 struct hl_cs_compl *hl_cs_cmpl =
590 container_of(cs->fence, struct hl_cs_compl, base_fence);
eff6f4a0
OG
591
592 cs->completed = true;
593
594 /*
595 * Although if we reached here it means that all external jobs have
596 * finished, because each one of them took refcnt to CS, we still
649c4592 597 * need to go over the internal jobs and complete them. Otherwise, we
eff6f4a0
OG
598 * will have leaked memory and what's worse, the CS object (and
599 * potentially the CTX object) could be released, while the JOB
600 * still holds a pointer to them (but no reference).
601 */
602 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
649c4592 603 complete_job(hdev, job);
eff6f4a0 604
ea6ee260 605 if (!cs->submitted) {
8ca2072e 606 /*
607 * In case the wait for signal CS was submitted, the fence put
608 * occurs in init_signal_wait_cs() or collective_wait_init_cs()
c1d505a9 609 * right before hanging on the PQ.
ea6ee260 610 */
c1d505a9
OB
611 if (cs->type == CS_TYPE_WAIT ||
612 cs->type == CS_TYPE_COLLECTIVE_WAIT)
ea6ee260 613 hl_fence_put(cs->signal_fence);
cbaa99ed 614
ea6ee260
TT
615 goto out;
616 }
75b3cb2b 617
0811b391
OB
618 /* Need to update CI for all queue jobs that does not get completion */
619 hl_hw_queue_update_ci(cs);
eff6f4a0 620
804a7227
TT
621 /* remove CS from CS mirror list */
622 spin_lock(&hdev->cs_mirror_lock);
ea6ee260 623 list_del_init(&cs->mirror_node);
804a7227 624 spin_unlock(&hdev->cs_mirror_lock);
eff6f4a0 625
2795c889 626 cs_handle_tdr(hdev, cs);
eff6f4a0 627
2795c889
OB
628 if (cs->staged_cs) {
629 /* the completion CS decrements reference for the entire
630 * staged submission
631 */
632 if (cs->staged_last) {
633 struct hl_cs *staged_cs, *tmp;
eff6f4a0 634
2795c889
OB
635 list_for_each_entry_safe(staged_cs, tmp,
636 &cs->staged_cs_node, staged_cs_node)
637 staged_cs_put(hdev, staged_cs);
eff6f4a0 638 }
ea6ee260 639
2795c889
OB
640 /* A staged CS will be a member in the list only after it
641 * was submitted. We used 'cs_mirror_lock' when inserting
642 * it to list so we will use it again when removing it
643 */
644 if (cs->submitted) {
645 spin_lock(&hdev->cs_mirror_lock);
646 list_del(&cs->staged_cs_node);
647 spin_unlock(&hdev->cs_mirror_lock);
648 }
e4cdccd2 649
650 /* decrement refcount to handle when first staged cs
651 * with encaps signals is completed.
652 */
653 if (hl_cs_cmpl->encaps_signals)
654 kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
655 hl_encaps_handle_do_release);
eff6f4a0
OG
656 }
657
e4cdccd2 658 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
659 && cs->encaps_signals)
660 kref_put(&cs->encaps_sig_hdl->refcount,
661 hl_encaps_handle_do_release);
662
ea6ee260
TT
663out:
664 /* Must be called before hl_ctx_put because inside we use ctx to get
c2164773
OG
665 * the device
666 */
667 hl_debugfs_remove_cs(cs);
668
eff6f4a0
OG
669 hl_ctx_put(cs->ctx);
670
3292055c 671 /* We need to mark an error for not submitted because in that case
a98d73c7 672 * the hl fence release flow is different. Mainly, we don't need
3292055c
OB
673 * to handle hw_sob for signal/wait
674 */
eff6f4a0 675 if (cs->timedout)
a98d73c7 676 cs->fence->error = -ETIMEDOUT;
eff6f4a0 677 else if (cs->aborted)
a98d73c7 678 cs->fence->error = -EIO;
3292055c 679 else if (!cs->submitted)
a98d73c7 680 cs->fence->error = -EBUSY;
eff6f4a0 681
8e8125f1
YN
682 if (unlikely(cs->skip_reset_on_timeout)) {
683 dev_err(hdev->dev,
684 "Command submission %llu completed after %llu (s)\n",
685 cs->sequence,
686 div_u64(jiffies - cs->submission_time_jiffies, HZ));
687 }
688
bd2f477f
OB
689 if (cs->timestamp)
690 cs->fence->timestamp = ktime_get();
a98d73c7 691 complete_all(&cs->fence->completion);
215f0c17 692 complete_multi_cs(hdev, cs);
8ca2072e 693
694 cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl);
695
a98d73c7 696 hl_fence_put(cs->fence);
db491e4f 697
3abc99bb 698 kfree(cs->jobs_in_queue_cnt);
eff6f4a0
OG
699 kfree(cs);
700}
701
702static void cs_timedout(struct work_struct *work)
703{
704 struct hl_device *hdev;
22362aa3 705 int rc;
eff6f4a0
OG
706 struct hl_cs *cs = container_of(work, struct hl_cs,
707 work_tdr.work);
8e8125f1
YN
708 bool skip_reset_on_timeout = cs->skip_reset_on_timeout;
709
eff6f4a0
OG
710 rc = cs_get_unless_zero(cs);
711 if (!rc)
712 return;
713
714 if ((!cs->submitted) || (cs->completed)) {
715 cs_put(cs);
716 return;
717 }
718
719 /* Mark the CS is timed out so we won't try to cancel its TDR */
8e8125f1
YN
720 if (likely(!skip_reset_on_timeout))
721 cs->timedout = true;
eff6f4a0
OG
722
723 hdev = cs->ctx->hdev;
eff6f4a0 724
5e5867e5
OG
725 switch (cs->type) {
726 case CS_TYPE_SIGNAL:
727 dev_err(hdev->dev,
728 "Signal command submission %llu has not finished in time!\n",
729 cs->sequence);
730 break;
731
732 case CS_TYPE_WAIT:
733 dev_err(hdev->dev,
734 "Wait command submission %llu has not finished in time!\n",
735 cs->sequence);
736 break;
737
738 case CS_TYPE_COLLECTIVE_WAIT:
739 dev_err(hdev->dev,
740 "Collective Wait command submission %llu has not finished in time!\n",
741 cs->sequence);
742 break;
743
744 default:
745 dev_err(hdev->dev,
746 "Command submission %llu has not finished in time!\n",
747 cs->sequence);
748 break;
749 }
eff6f4a0 750
938b793f
YN
751 rc = hl_state_dump(hdev);
752 if (rc)
753 dev_err(hdev->dev, "Error during system state dump %d\n", rc);
754
eff6f4a0
OG
755 cs_put(cs);
756
8e8125f1
YN
757 if (likely(!skip_reset_on_timeout)) {
758 if (hdev->reset_on_lockup)
759 hl_device_reset(hdev, HL_RESET_TDR);
760 else
761 hdev->needs_reset = true;
762 }
eff6f4a0
OG
763}
764
765static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
0811b391 766 enum hl_cs_type cs_type, u64 user_sequence,
cf393950 767 struct hl_cs **cs_new, u32 flags, u32 timeout)
eff6f4a0 768{
23c15ae6 769 struct hl_cs_counters_atomic *cntr;
a98d73c7 770 struct hl_fence *other = NULL;
23c15ae6 771 struct hl_cs_compl *cs_cmpl;
eff6f4a0
OG
772 struct hl_cs *cs;
773 int rc;
774
23c15ae6
OG
775 cntr = &hdev->aggregated_cs_counters;
776
eff6f4a0 777 cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
d5eb8373
OB
778 if (!cs)
779 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
780
a3fd2830
AM
781 if (!cs) {
782 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
783 atomic64_inc(&cntr->out_of_mem_drop_cnt);
eff6f4a0 784 return -ENOMEM;
a3fd2830 785 }
eff6f4a0 786
1e3f2536
OB
787 /* increment refcnt for context */
788 hl_ctx_get(hdev, ctx);
789
eff6f4a0
OG
790 cs->ctx = ctx;
791 cs->submitted = false;
792 cs->completed = false;
b75f2250 793 cs->type = cs_type;
cf393950 794 cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP);
e4cdccd2 795 cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
cf393950 796 cs->timeout_jiffies = timeout;
8e8125f1 797 cs->skip_reset_on_timeout =
4d041216 798 hdev->skip_reset_on_timeout ||
8e8125f1
YN
799 !!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT);
800 cs->submission_time_jiffies = jiffies;
eff6f4a0
OG
801 INIT_LIST_HEAD(&cs->job_list);
802 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
803 kref_init(&cs->refcount);
804 spin_lock_init(&cs->job_lock);
805
e4cdccd2 806 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
d5eb8373 807 if (!cs_cmpl)
e4cdccd2 808 cs_cmpl = kzalloc(sizeof(*cs_cmpl), GFP_KERNEL);
d5eb8373 809
b0b5d925 810 if (!cs_cmpl) {
a3fd2830
AM
811 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
812 atomic64_inc(&cntr->out_of_mem_drop_cnt);
eff6f4a0
OG
813 rc = -ENOMEM;
814 goto free_cs;
815 }
816
8445dde1
OB
817 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
818 sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
819 if (!cs->jobs_in_queue_cnt)
820 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
821 sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL);
822
823 if (!cs->jobs_in_queue_cnt) {
824 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
825 atomic64_inc(&cntr->out_of_mem_drop_cnt);
826 rc = -ENOMEM;
827 goto free_cs_cmpl;
828 }
829
b0b5d925 830 cs_cmpl->hdev = hdev;
b75f2250 831 cs_cmpl->type = cs->type;
b0b5d925
OS
832 spin_lock_init(&cs_cmpl->lock);
833 cs->fence = &cs_cmpl->base_fence;
eff6f4a0
OG
834
835 spin_lock(&ctx->cs_lock);
836
b0b5d925 837 cs_cmpl->cs_seq = ctx->cs_sequence;
c16d45f4
OB
838 other = ctx->cs_pending[cs_cmpl->cs_seq &
839 (hdev->asic_prop.max_pending_cs - 1)];
a98d73c7
OB
840
841 if (other && !completion_done(&other->completion)) {
0811b391
OB
842 /* If the following statement is true, it means we have reached
843 * a point in which only part of the staged submission was
844 * submitted and we don't have enough room in the 'cs_pending'
845 * array for the rest of the submission.
846 * This causes a deadlock because this CS will never be
847 * completed as it depends on future CS's for completion.
848 */
849 if (other->cs_sequence == user_sequence)
850 dev_crit_ratelimited(hdev->dev,
851 "Staged CS %llu deadlock due to lack of resources",
852 user_sequence);
853
975ab7b3 854 dev_dbg_ratelimited(hdev->dev,
52a1ae11 855 "Rejecting CS because of too many in-flights CS\n");
e753643d 856 atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
23c15ae6 857 atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
eff6f4a0
OG
858 rc = -EAGAIN;
859 goto free_fence;
860 }
861
a98d73c7 862 /* init hl_fence */
0811b391 863 hl_fence_init(&cs_cmpl->base_fence, cs_cmpl->cs_seq);
eff6f4a0 864
b0b5d925 865 cs->sequence = cs_cmpl->cs_seq;
eff6f4a0 866
c16d45f4
OB
867 ctx->cs_pending[cs_cmpl->cs_seq &
868 (hdev->asic_prop.max_pending_cs - 1)] =
b0b5d925 869 &cs_cmpl->base_fence;
eff6f4a0
OG
870 ctx->cs_sequence++;
871
a98d73c7 872 hl_fence_get(&cs_cmpl->base_fence);
eff6f4a0 873
a98d73c7 874 hl_fence_put(other);
eff6f4a0
OG
875
876 spin_unlock(&ctx->cs_lock);
877
878 *cs_new = cs;
879
880 return 0;
881
882free_fence:
3abc99bb 883 spin_unlock(&ctx->cs_lock);
8445dde1
OB
884 kfree(cs->jobs_in_queue_cnt);
885free_cs_cmpl:
b0b5d925 886 kfree(cs_cmpl);
eff6f4a0
OG
887free_cs:
888 kfree(cs);
1e3f2536 889 hl_ctx_put(ctx);
eff6f4a0
OG
890 return rc;
891}
892
893static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
894{
895 struct hl_cs_job *job, *tmp;
896
2795c889
OB
897 staged_cs_put(hdev, cs);
898
eff6f4a0 899 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
649c4592 900 complete_job(hdev, job);
eff6f4a0
OG
901}
902
903void hl_cs_rollback_all(struct hl_device *hdev)
904{
5574cb21 905 int i;
eff6f4a0
OG
906 struct hl_cs *cs, *tmp;
907
8445dde1
OB
908 flush_workqueue(hdev->sob_reset_wq);
909
2795c889
OB
910 /* flush all completions before iterating over the CS mirror list in
911 * order to avoid a race with the release functions
912 */
5574cb21
OB
913 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
914 flush_workqueue(hdev->cq_wq[i]);
eff6f4a0 915
7a585dfc 916 /* Make sure we don't have leftovers in the CS mirror list */
804a7227 917 list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
eff6f4a0
OG
918 cs_get(cs);
919 cs->aborted = true;
920 dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
2795c889 921 cs->ctx->asid, cs->sequence);
eff6f4a0
OG
922 cs_rollback(hdev, cs);
923 cs_put(cs);
924 }
215f0c17
OS
925
926 force_complete_multi_cs(hdev);
eff6f4a0
OG
927}
928
ab5f5c30
OB
929static void
930wake_pending_user_interrupt_threads(struct hl_user_interrupt *interrupt)
931{
932 struct hl_user_pending_interrupt *pend;
176d23a7 933 unsigned long flags;
ab5f5c30 934
176d23a7 935 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
ab5f5c30
OB
936 list_for_each_entry(pend, &interrupt->wait_list_head, wait_list_node) {
937 pend->fence.error = -EIO;
938 complete_all(&pend->fence.completion);
939 }
176d23a7 940 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
ab5f5c30
OB
941}
942
943void hl_release_pending_user_interrupts(struct hl_device *hdev)
944{
945 struct asic_fixed_properties *prop = &hdev->asic_prop;
946 struct hl_user_interrupt *interrupt;
947 int i;
948
949 if (!prop->user_interrupt_count)
950 return;
951
952 /* We iterate through the user interrupt requests and waking up all
953 * user threads waiting for interrupt completion. We iterate the
954 * list under a lock, this is why all user threads, once awake,
955 * will wait on the same lock and will release the waiting object upon
956 * unlock.
957 */
958
959 for (i = 0 ; i < prop->user_interrupt_count ; i++) {
960 interrupt = &hdev->user_interrupt[i];
961 wake_pending_user_interrupt_threads(interrupt);
962 }
963
964 interrupt = &hdev->common_user_interrupt;
965 wake_pending_user_interrupt_threads(interrupt);
966}
967
eff6f4a0
OG
968static void job_wq_completion(struct work_struct *work)
969{
970 struct hl_cs_job *job = container_of(work, struct hl_cs_job,
971 finish_work);
972 struct hl_cs *cs = job->cs;
973 struct hl_device *hdev = cs->ctx->hdev;
974
975 /* job is no longer needed */
649c4592 976 complete_job(hdev, job);
eff6f4a0
OG
977}
978
cb596aee
TT
979static int validate_queue_index(struct hl_device *hdev,
980 struct hl_cs_chunk *chunk,
981 enum hl_queue_type *queue_type,
982 bool *is_kernel_allocated_cb)
eff6f4a0
OG
983{
984 struct asic_fixed_properties *asic = &hdev->asic_prop;
985 struct hw_queue_properties *hw_queue_prop;
eff6f4a0 986
3abc99bb
OB
987 /* This must be checked here to prevent out-of-bounds access to
988 * hw_queues_props array
989 */
990 if (chunk->queue_index >= asic->max_queues) {
991 dev_err(hdev->dev, "Queue index %d is invalid\n",
992 chunk->queue_index);
993 return -EINVAL;
994 }
995
eff6f4a0
OG
996 hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
997
3abc99bb 998 if (hw_queue_prop->type == QUEUE_TYPE_NA) {
eff6f4a0
OG
999 dev_err(hdev->dev, "Queue index %d is invalid\n",
1000 chunk->queue_index);
cb596aee 1001 return -EINVAL;
eff6f4a0
OG
1002 }
1003
4c172bbf
OG
1004 if (hw_queue_prop->driver_only) {
1005 dev_err(hdev->dev,
1006 "Queue index %d is restricted for the kernel driver\n",
eff6f4a0 1007 chunk->queue_index);
cb596aee 1008 return -EINVAL;
df762375
TT
1009 }
1010
4bb1f2f3
TC
1011 /* When hw queue type isn't QUEUE_TYPE_HW,
1012 * USER_ALLOC_CB flag shall be referred as "don't care".
1013 */
1014 if (hw_queue_prop->type == QUEUE_TYPE_HW) {
1015 if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) {
1016 if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
1017 dev_err(hdev->dev,
1018 "Queue index %d doesn't support user CB\n",
1019 chunk->queue_index);
1020 return -EINVAL;
1021 }
1022
1023 *is_kernel_allocated_cb = false;
1024 } else {
1025 if (!(hw_queue_prop->cb_alloc_flags &
1026 CB_ALLOC_KERNEL)) {
1027 dev_err(hdev->dev,
1028 "Queue index %d doesn't support kernel CB\n",
1029 chunk->queue_index);
1030 return -EINVAL;
1031 }
cb596aee 1032
4bb1f2f3
TC
1033 *is_kernel_allocated_cb = true;
1034 }
1035 } else {
1036 *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags
1037 & CB_ALLOC_KERNEL);
1038 }
1039
1040 *queue_type = hw_queue_prop->type;
cb596aee
TT
1041 return 0;
1042}
1043
1044static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
1045 struct hl_cb_mgr *cb_mgr,
1046 struct hl_cs_chunk *chunk)
1047{
1048 struct hl_cb *cb;
1049 u32 cb_handle;
eff6f4a0 1050
eff6f4a0
OG
1051 cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
1052
1053 cb = hl_cb_get(hdev, cb_mgr, cb_handle);
1054 if (!cb) {
1055 dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle);
1056 return NULL;
1057 }
1058
1059 if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) {
1060 dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size);
1061 goto release_cb;
1062 }
1063
f0748674 1064 atomic_inc(&cb->cs_cnt);
eff6f4a0
OG
1065
1066 return cb;
1067
1068release_cb:
1069 hl_cb_put(cb);
1070 return NULL;
1071}
1072
cb596aee
TT
1073struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
1074 enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
eff6f4a0
OG
1075{
1076 struct hl_cs_job *job;
1077
1078 job = kzalloc(sizeof(*job), GFP_ATOMIC);
d5eb8373
OB
1079 if (!job)
1080 job = kzalloc(sizeof(*job), GFP_KERNEL);
1081
eff6f4a0
OG
1082 if (!job)
1083 return NULL;
1084
649c4592 1085 kref_init(&job->refcount);
cb596aee
TT
1086 job->queue_type = queue_type;
1087 job->is_kernel_allocated_cb = is_kernel_allocated_cb;
eff6f4a0 1088
cb596aee 1089 if (is_cb_patched(hdev, job))
eff6f4a0 1090 INIT_LIST_HEAD(&job->userptr_list);
cb596aee
TT
1091
1092 if (job->queue_type == QUEUE_TYPE_EXT)
eff6f4a0 1093 INIT_WORK(&job->finish_work, job_wq_completion);
eff6f4a0
OG
1094
1095 return job;
1096}
1097
6de3d769
TT
1098static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
1099{
1100 if (cs_type_flags & HL_CS_FLAGS_SIGNAL)
1101 return CS_TYPE_SIGNAL;
1102 else if (cs_type_flags & HL_CS_FLAGS_WAIT)
1103 return CS_TYPE_WAIT;
1104 else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT)
1105 return CS_TYPE_COLLECTIVE_WAIT;
dadf17ab 1106 else if (cs_type_flags & HL_CS_FLAGS_RESERVE_SIGNALS_ONLY)
1107 return CS_RESERVE_SIGNALS;
1108 else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
1109 return CS_UNRESERVE_SIGNALS;
6de3d769
TT
1110 else
1111 return CS_TYPE_DEFAULT;
1112}
1113
1114static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
eff6f4a0
OG
1115{
1116 struct hl_device *hdev = hpriv->hdev;
6de3d769
TT
1117 struct hl_ctx *ctx = hpriv->ctx;
1118 u32 cs_type_flags, num_chunks;
66a76401 1119 enum hl_device_status status;
6de3d769 1120 enum hl_cs_type cs_type;
eff6f4a0 1121
66a76401 1122 if (!hl_device_operational(hdev, &status)) {
6de3d769
TT
1123 dev_warn_ratelimited(hdev->dev,
1124 "Device is %s. Can't submit new CS\n",
66a76401 1125 hdev->status[status]);
6de3d769
TT
1126 return -EBUSY;
1127 }
1128
2795c889
OB
1129 if ((args->in.cs_flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1130 !hdev->supports_staged_submission) {
1131 dev_err(hdev->dev, "staged submission not supported");
1132 return -EPERM;
1133 }
1134
3e438b42 1135 cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK;
6de3d769
TT
1136
1137 if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) {
1138 dev_err(hdev->dev,
1139 "CS type flags are mutually exclusive, context %d\n",
1140 ctx->asid);
1141 return -EINVAL;
1142 }
1143
1144 cs_type = hl_cs_get_cs_type(cs_type_flags);
1145 num_chunks = args->in.num_chunks_execute;
1146
1147 if (unlikely((cs_type != CS_TYPE_DEFAULT) &&
1148 !hdev->supports_sync_stream)) {
1149 dev_err(hdev->dev, "Sync stream CS is not supported\n");
1150 return -EINVAL;
1151 }
1152
1153 if (cs_type == CS_TYPE_DEFAULT) {
1154 if (!num_chunks) {
1155 dev_err(hdev->dev,
1156 "Got execute CS with 0 chunks, context %d\n",
1157 ctx->asid);
1158 return -EINVAL;
1159 }
1160 } else if (num_chunks != 1) {
1161 dev_err(hdev->dev,
1162 "Sync stream CS mandates one chunk only, context %d\n",
1163 ctx->asid);
1164 return -EINVAL;
1165 }
1166
1167 return 0;
1168}
1169
1170static int hl_cs_copy_chunk_array(struct hl_device *hdev,
1171 struct hl_cs_chunk **cs_chunk_array,
a3fd2830
AM
1172 void __user *chunks, u32 num_chunks,
1173 struct hl_ctx *ctx)
6de3d769
TT
1174{
1175 u32 size_to_copy;
eff6f4a0
OG
1176
1177 if (num_chunks > HL_MAX_JOBS_PER_CS) {
a3fd2830
AM
1178 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1179 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
eff6f4a0
OG
1180 dev_err(hdev->dev,
1181 "Number of chunks can NOT be larger than %d\n",
1182 HL_MAX_JOBS_PER_CS);
6de3d769 1183 return -EINVAL;
eff6f4a0
OG
1184 }
1185
6de3d769 1186 *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
eff6f4a0 1187 GFP_ATOMIC);
d5eb8373
OB
1188 if (!*cs_chunk_array)
1189 *cs_chunk_array = kmalloc_array(num_chunks,
1190 sizeof(**cs_chunk_array), GFP_KERNEL);
a3fd2830
AM
1191 if (!*cs_chunk_array) {
1192 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1193 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
6de3d769 1194 return -ENOMEM;
a3fd2830 1195 }
eff6f4a0
OG
1196
1197 size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
6de3d769 1198 if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
a3fd2830
AM
1199 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1200 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
eff6f4a0 1201 dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
6de3d769
TT
1202 kfree(*cs_chunk_array);
1203 return -EFAULT;
eff6f4a0
OG
1204 }
1205
6de3d769
TT
1206 return 0;
1207}
1208
2795c889 1209static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs,
e4cdccd2 1210 u64 sequence, u32 flags,
1211 u32 encaps_signal_handle)
2795c889
OB
1212{
1213 if (!(flags & HL_CS_FLAGS_STAGED_SUBMISSION))
1214 return 0;
1215
1216 cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST);
1217 cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST);
1218
1219 if (cs->staged_first) {
1220 /* Staged CS sequence is the first CS sequence */
1221 INIT_LIST_HEAD(&cs->staged_cs_node);
1222 cs->staged_sequence = cs->sequence;
e4cdccd2 1223
1224 if (cs->encaps_signals)
1225 cs->encaps_sig_hdl_id = encaps_signal_handle;
2795c889
OB
1226 } else {
1227 /* User sequence will be validated in 'hl_hw_queue_schedule_cs'
1228 * under the cs_mirror_lock
1229 */
1230 cs->staged_sequence = sequence;
1231 }
1232
1233 /* Increment CS reference if needed */
1234 staged_cs_get(hdev, cs);
1235
1236 cs->staged_cs = true;
1237
1238 return 0;
1239}
1240
72d66255
OS
1241static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
1242{
1243 int i;
1244
1245 for (i = 0; i < hdev->stream_master_qid_arr_size; i++)
1246 if (qid == hdev->stream_master_qid_arr[i])
1247 return BIT(i);
1248
1249 return 0;
1250}
1251
6de3d769 1252static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
cf393950 1253 u32 num_chunks, u64 *cs_seq, u32 flags,
e4cdccd2 1254 u32 encaps_signals_handle, u32 timeout)
6de3d769 1255{
0811b391 1256 bool staged_mid, int_queues_only = true;
6de3d769
TT
1257 struct hl_device *hdev = hpriv->hdev;
1258 struct hl_cs_chunk *cs_chunk_array;
1259 struct hl_cs_counters_atomic *cntr;
a3fd2830 1260 struct hl_ctx *ctx = hpriv->ctx;
6de3d769
TT
1261 struct hl_cs_job *job;
1262 struct hl_cs *cs;
1263 struct hl_cb *cb;
0811b391 1264 u64 user_sequence;
72d66255 1265 u8 stream_master_qid_map = 0;
6de3d769
TT
1266 int rc, i;
1267
1268 cntr = &hdev->aggregated_cs_counters;
0811b391 1269 user_sequence = *cs_seq;
6de3d769
TT
1270 *cs_seq = ULLONG_MAX;
1271
a3fd2830
AM
1272 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1273 hpriv->ctx);
6de3d769
TT
1274 if (rc)
1275 goto out;
1276
0811b391
OB
1277 if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
1278 !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
1279 staged_mid = true;
1280 else
1281 staged_mid = false;
1282
1283 rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT,
cf393950
AM
1284 staged_mid ? user_sequence : ULLONG_MAX, &cs, flags,
1285 timeout);
1e3f2536 1286 if (rc)
eff6f4a0 1287 goto free_cs_chunk_array;
eff6f4a0
OG
1288
1289 *cs_seq = cs->sequence;
1290
c2164773
OG
1291 hl_debugfs_add_cs(cs);
1292
e4cdccd2 1293 rc = cs_staged_submission(hdev, cs, user_sequence, flags,
1294 encaps_signals_handle);
2795c889
OB
1295 if (rc)
1296 goto free_cs_object;
1297
c8fee419
OB
1298 /* If this is a staged submission we must return the staged sequence
1299 * rather than the internal CS sequence
1300 */
1301 if (cs->staged_cs)
1302 *cs_seq = cs->staged_sequence;
1303
eff6f4a0 1304 /* Validate ALL the CS chunks before submitting the CS */
b41e9728 1305 for (i = 0 ; i < num_chunks ; i++) {
eff6f4a0 1306 struct hl_cs_chunk *chunk = &cs_chunk_array[i];
cb596aee
TT
1307 enum hl_queue_type queue_type;
1308 bool is_kernel_allocated_cb;
1309
1310 rc = validate_queue_index(hdev, chunk, &queue_type,
1311 &is_kernel_allocated_cb);
db491e4f 1312 if (rc) {
a3fd2830
AM
1313 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1314 atomic64_inc(&cntr->validation_drop_cnt);
cb596aee 1315 goto free_cs_object;
db491e4f 1316 }
eff6f4a0 1317
cb596aee
TT
1318 if (is_kernel_allocated_cb) {
1319 cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
eff6f4a0 1320 if (!cb) {
e753643d 1321 atomic64_inc(
a3fd2830
AM
1322 &ctx->cs_counters.validation_drop_cnt);
1323 atomic64_inc(&cntr->validation_drop_cnt);
eff6f4a0
OG
1324 rc = -EINVAL;
1325 goto free_cs_object;
1326 }
cb596aee
TT
1327 } else {
1328 cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
eff6f4a0
OG
1329 }
1330
215f0c17
OS
1331 if (queue_type == QUEUE_TYPE_EXT ||
1332 queue_type == QUEUE_TYPE_HW) {
cb596aee
TT
1333 int_queues_only = false;
1334
215f0c17
OS
1335 /*
1336 * store which stream are being used for external/HW
1337 * queues of this CS
1338 */
1339 if (hdev->supports_wait_for_multi_cs)
72d66255
OS
1340 stream_master_qid_map |=
1341 get_stream_master_qid_mask(hdev,
1342 chunk->queue_index);
215f0c17
OS
1343 }
1344
cb596aee
TT
1345 job = hl_cs_allocate_job(hdev, queue_type,
1346 is_kernel_allocated_cb);
eff6f4a0 1347 if (!job) {
a3fd2830 1348 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
23c15ae6 1349 atomic64_inc(&cntr->out_of_mem_drop_cnt);
eff6f4a0
OG
1350 dev_err(hdev->dev, "Failed to allocate a new job\n");
1351 rc = -ENOMEM;
cb596aee 1352 if (is_kernel_allocated_cb)
eff6f4a0 1353 goto release_cb;
bd4ef372
OG
1354
1355 goto free_cs_object;
eff6f4a0
OG
1356 }
1357
1358 job->id = i + 1;
1359 job->cs = cs;
1360 job->user_cb = cb;
1361 job->user_cb_size = chunk->cb_size;
eff6f4a0
OG
1362 job->hw_queue_id = chunk->queue_index;
1363
1364 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1365
1366 list_add_tail(&job->cs_node, &cs->job_list);
1367
1368 /*
1369 * Increment CS reference. When CS reference is 0, CS is
1370 * done and can be signaled to user and free all its resources
cb596aee
TT
1371 * Only increment for JOB on external or H/W queues, because
1372 * only for those JOBs we get completion
eff6f4a0 1373 */
0811b391
OB
1374 if (cs_needs_completion(cs) &&
1375 (job->queue_type == QUEUE_TYPE_EXT ||
1376 job->queue_type == QUEUE_TYPE_HW))
eff6f4a0
OG
1377 cs_get(cs);
1378
c2164773
OG
1379 hl_debugfs_add_job(hdev, job);
1380
eff6f4a0
OG
1381 rc = cs_parser(hpriv, job);
1382 if (rc) {
a3fd2830 1383 atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
23c15ae6 1384 atomic64_inc(&cntr->parsing_drop_cnt);
eff6f4a0
OG
1385 dev_err(hdev->dev,
1386 "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
1387 cs->ctx->asid, cs->sequence, job->id, rc);
1388 goto free_cs_object;
1389 }
1390 }
1391
0811b391
OB
1392 /* We allow a CS with any queue type combination as long as it does
1393 * not get a completion
1394 */
1395 if (int_queues_only && cs_needs_completion(cs)) {
a3fd2830
AM
1396 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1397 atomic64_inc(&cntr->validation_drop_cnt);
eff6f4a0 1398 dev_err(hdev->dev,
0811b391 1399 "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n",
eff6f4a0
OG
1400 cs->ctx->asid, cs->sequence);
1401 rc = -EINVAL;
1402 goto free_cs_object;
1403 }
1404
215f0c17
OS
1405 /*
1406 * store the (external/HW queues) streams used by the CS in the
1407 * fence object for multi-CS completion
1408 */
1409 if (hdev->supports_wait_for_multi_cs)
72d66255 1410 cs->fence->stream_master_qid_map = stream_master_qid_map;
215f0c17 1411
eff6f4a0
OG
1412 rc = hl_hw_queue_schedule_cs(cs);
1413 if (rc) {
eda58bf7
OG
1414 if (rc != -EAGAIN)
1415 dev_err(hdev->dev,
1416 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
1417 cs->ctx->asid, cs->sequence, rc);
eff6f4a0
OG
1418 goto free_cs_object;
1419 }
1420
1421 rc = HL_CS_STATUS_SUCCESS;
1422 goto put_cs;
1423
1424release_cb:
f0748674 1425 atomic_dec(&cb->cs_cnt);
eff6f4a0
OG
1426 hl_cb_put(cb);
1427free_cs_object:
1428 cs_rollback(hdev, cs);
1429 *cs_seq = ULLONG_MAX;
1430 /* The path below is both for good and erroneous exits */
1431put_cs:
1432 /* We finished with the CS in this function, so put the ref */
1433 cs_put(cs);
1434free_cs_chunk_array:
1435 kfree(cs_chunk_array);
1436out:
1437 return rc;
1438}
1439
6de3d769
TT
1440static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
1441 u64 *cs_seq)
1442{
1443 struct hl_device *hdev = hpriv->hdev;
1444 struct hl_ctx *ctx = hpriv->ctx;
1445 bool need_soft_reset = false;
1446 int rc = 0, do_ctx_switch;
1447 void __user *chunks;
1448 u32 num_chunks, tmp;
9d127ad5 1449 int ret;
6de3d769
TT
1450
1451 do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
1452
1453 if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
1454 mutex_lock(&hpriv->restore_phase_mutex);
1455
1456 if (do_ctx_switch) {
1457 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
1458 if (rc) {
1459 dev_err_ratelimited(hdev->dev,
1460 "Failed to switch to context %d, rejecting CS! %d\n",
1461 ctx->asid, rc);
1462 /*
1463 * If we timedout, or if the device is not IDLE
1464 * while we want to do context-switch (-EBUSY),
1465 * we need to soft-reset because QMAN is
1466 * probably stuck. However, we can't call to
1467 * reset here directly because of deadlock, so
1468 * need to do it at the very end of this
1469 * function
1470 */
1471 if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
1472 need_soft_reset = true;
1473 mutex_unlock(&hpriv->restore_phase_mutex);
1474 goto out;
1475 }
1476 }
1477
1478 hdev->asic_funcs->restore_phase_topology(hdev);
1479
1480 chunks = (void __user *) (uintptr_t) args->in.chunks_restore;
1481 num_chunks = args->in.num_chunks_restore;
1482
1483 if (!num_chunks) {
1484 dev_dbg(hdev->dev,
1485 "Need to run restore phase but restore CS is empty\n");
1486 rc = 0;
1487 } else {
1488 rc = cs_ioctl_default(hpriv, chunks, num_chunks,
e4cdccd2 1489 cs_seq, 0, 0, hdev->timeout_jiffies);
6de3d769
TT
1490 }
1491
1492 mutex_unlock(&hpriv->restore_phase_mutex);
1493
1494 if (rc) {
1495 dev_err(hdev->dev,
1496 "Failed to submit restore CS for context %d (%d)\n",
1497 ctx->asid, rc);
1498 goto out;
1499 }
1500
1501 /* Need to wait for restore completion before execution phase */
1502 if (num_chunks) {
9d127ad5 1503 enum hl_cs_wait_status status;
6de3d769
TT
1504wait_again:
1505 ret = _hl_cs_wait_ioctl(hdev, ctx,
1506 jiffies_to_usecs(hdev->timeout_jiffies),
bd2f477f 1507 *cs_seq, &status, NULL);
9d127ad5 1508 if (ret) {
6de3d769
TT
1509 if (ret == -ERESTARTSYS) {
1510 usleep_range(100, 200);
1511 goto wait_again;
1512 }
1513
1514 dev_err(hdev->dev,
9d127ad5 1515 "Restore CS for context %d failed to complete %d\n",
6de3d769
TT
1516 ctx->asid, ret);
1517 rc = -ENOEXEC;
1518 goto out;
1519 }
1520 }
1521
1522 ctx->thread_ctx_switch_wait_token = 1;
1523
1524 } else if (!ctx->thread_ctx_switch_wait_token) {
1525 rc = hl_poll_timeout_memory(hdev,
1526 &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
1527 100, jiffies_to_usecs(hdev->timeout_jiffies), false);
1528
1529 if (rc == -ETIMEDOUT) {
1530 dev_err(hdev->dev,
1531 "context switch phase timeout (%d)\n", tmp);
1532 goto out;
1533 }
1534 }
1535
1536out:
1537 if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset))
e42a6400 1538 hl_device_reset(hdev, 0);
6de3d769
TT
1539
1540 return rc;
1541}
1542
3817b352 1543/*
1544 * hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case.
1545 * if the SOB value reaches the max value move to the other SOB reserved
1546 * to the queue.
dadf17ab 1547 * @hdev: pointer to device structure
1548 * @q_idx: stream queue index
1549 * @hw_sob: the H/W SOB used in this signal CS.
1550 * @count: signals count
1551 * @encaps_sig: tells whether it's reservation for encaps signals or not.
1552 *
3817b352 1553 * Note that this function must be called while hw_queues_lock is taken.
1554 */
1555int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
dadf17ab 1556 struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig)
1557
3817b352 1558{
1559 struct hl_sync_stream_properties *prop;
1560 struct hl_hw_sob *sob = *hw_sob, *other_sob;
1561 u8 other_sob_offset;
1562
1563 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1564
8ca2072e 1565 hw_sob_get(sob);
3817b352 1566
1567 /* check for wraparound */
1568 if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) {
1569 /*
1570 * Decrement as we reached the max value.
1571 * The release function won't be called here as we've
1572 * just incremented the refcount right before calling this
1573 * function.
1574 */
8ca2072e 1575 hw_sob_put_err(sob);
3817b352 1576
1577 /*
1578 * check the other sob value, if it still in use then fail
1579 * otherwise make the switch
1580 */
1581 other_sob_offset = (prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
1582 other_sob = &prop->hw_sob[other_sob_offset];
1583
1584 if (kref_read(&other_sob->kref) != 1) {
1585 dev_err(hdev->dev, "error: Cannot switch SOBs q_idx: %d\n",
1586 q_idx);
1587 return -EINVAL;
1588 }
1589
e4cdccd2 1590 /*
1591 * next_sob_val always points to the next available signal
1592 * in the sob, so in encaps signals it will be the next one
1593 * after reserving the required amount.
1594 */
1595 if (encaps_sig)
1596 prop->next_sob_val = count + 1;
1597 else
1598 prop->next_sob_val = count;
3817b352 1599
1600 /* only two SOBs are currently in use */
1601 prop->curr_sob_offset = other_sob_offset;
1602 *hw_sob = other_sob;
1603
dadf17ab 1604 /*
1605 * check if other_sob needs reset, then do it before using it
1606 * for the reservation or the next signal cs.
1607 * we do it here, and for both encaps and regular signal cs
1608 * cases in order to avoid possible races of two kref_put
1609 * of the sob which can occur at the same time if we move the
1610 * sob reset(kref_put) to cs_do_release function.
1611 * in addition, if we have combination of cs signal and
1612 * encaps, and at the point we need to reset the sob there was
1613 * no more reservations and only signal cs keep coming,
e4cdccd2 1614 * in such case we need signal_cs to put the refcount and
dadf17ab 1615 * reset the sob.
1616 */
1617 if (other_sob->need_reset)
e4cdccd2 1618 hw_sob_put(other_sob);
dadf17ab 1619
1620 if (encaps_sig) {
1621 /* set reset indication for the sob */
1622 sob->need_reset = true;
1623 hw_sob_get(other_sob);
1624 }
1625
3817b352 1626 dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
1627 prop->curr_sob_offset, q_idx);
1628 } else {
1629 prop->next_sob_val += count;
1630 }
1631
1632 return 0;
1633}
1634
06f791f7 1635static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
e4cdccd2 1636 struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx,
1637 bool encaps_signals)
06f791f7
OB
1638{
1639 u64 *signal_seq_arr = NULL;
1640 u32 size_to_copy, signal_seq_arr_len;
1641 int rc = 0;
1642
e4cdccd2 1643 if (encaps_signals) {
1644 *signal_seq = chunk->encaps_signal_seq;
1645 return 0;
1646 }
1647
06f791f7
OB
1648 signal_seq_arr_len = chunk->num_signal_seq_arr;
1649
1650 /* currently only one signal seq is supported */
1651 if (signal_seq_arr_len != 1) {
a3fd2830
AM
1652 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1653 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
06f791f7
OB
1654 dev_err(hdev->dev,
1655 "Wait for signal CS supports only one signal CS seq\n");
1656 return -EINVAL;
1657 }
1658
1659 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1660 sizeof(*signal_seq_arr),
1661 GFP_ATOMIC);
d5eb8373
OB
1662 if (!signal_seq_arr)
1663 signal_seq_arr = kmalloc_array(signal_seq_arr_len,
1664 sizeof(*signal_seq_arr),
1665 GFP_KERNEL);
a3fd2830
AM
1666 if (!signal_seq_arr) {
1667 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1668 atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
06f791f7 1669 return -ENOMEM;
a3fd2830 1670 }
06f791f7 1671
e4cdccd2 1672 size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr);
06f791f7
OB
1673 if (copy_from_user(signal_seq_arr,
1674 u64_to_user_ptr(chunk->signal_seq_arr),
1675 size_to_copy)) {
a3fd2830
AM
1676 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1677 atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
06f791f7
OB
1678 dev_err(hdev->dev,
1679 "Failed to copy signal seq array from user\n");
1680 rc = -EFAULT;
1681 goto out;
1682 }
1683
1684 /* currently it is guaranteed to have only one signal seq */
1685 *signal_seq = signal_seq_arr[0];
1686
1687out:
1688 kfree(signal_seq_arr);
1689
1690 return rc;
1691}
1692
1693static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
e4cdccd2 1694 struct hl_ctx *ctx, struct hl_cs *cs,
1695 enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset)
06f791f7
OB
1696{
1697 struct hl_cs_counters_atomic *cntr;
1698 struct hl_cs_job *job;
1699 struct hl_cb *cb;
1700 u32 cb_size;
1701
1702 cntr = &hdev->aggregated_cs_counters;
1703
1704 job = hl_cs_allocate_job(hdev, q_type, true);
1705 if (!job) {
e753643d 1706 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
06f791f7
OB
1707 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1708 dev_err(hdev->dev, "Failed to allocate a new job\n");
1709 return -ENOMEM;
1710 }
1711
1712 if (cs->type == CS_TYPE_WAIT)
1713 cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
1714 else
1715 cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
1716
1717 cb = hl_cb_kernel_create(hdev, cb_size,
1718 q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
1719 if (!cb) {
e753643d 1720 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
06f791f7
OB
1721 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1722 kfree(job);
1723 return -EFAULT;
1724 }
1725
1726 job->id = 0;
1727 job->cs = cs;
1728 job->user_cb = cb;
f0748674 1729 atomic_inc(&job->user_cb->cs_cnt);
06f791f7
OB
1730 job->user_cb_size = cb_size;
1731 job->hw_queue_id = q_idx;
1732
e4cdccd2 1733 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
1734 && cs->encaps_signals)
1735 job->encaps_sig_wait_offset = encaps_signal_offset;
06f791f7
OB
1736 /*
1737 * No need in parsing, user CB is the patched CB.
1738 * We call hl_cb_destroy() out of two reasons - we don't need the CB in
1739 * the CB idr anymore and to decrement its refcount as it was
1740 * incremented inside hl_cb_kernel_create().
1741 */
1742 job->patched_cb = job->user_cb;
1743 job->job_cb_size = job->user_cb_size;
1744 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
1745
5de406c0
OB
1746 /* increment refcount as for external queues we get completion */
1747 cs_get(cs);
1748
06f791f7
OB
1749 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1750
1751 list_add_tail(&job->cs_node, &cs->job_list);
1752
1753 hl_debugfs_add_job(hdev, job);
1754
1755 return 0;
1756}
1757
dadf17ab 1758static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
1759 u32 q_idx, u32 count,
1760 u32 *handle_id, u32 *sob_addr,
1761 u32 *signals_count)
1762{
1763 struct hw_queue_properties *hw_queue_prop;
1764 struct hl_sync_stream_properties *prop;
1765 struct hl_device *hdev = hpriv->hdev;
1766 struct hl_cs_encaps_sig_handle *handle;
1767 struct hl_encaps_signals_mgr *mgr;
1768 struct hl_hw_sob *hw_sob;
1769 int hdl_id;
1770 int rc = 0;
1771
1772 if (count >= HL_MAX_SOB_VAL) {
1773 dev_err(hdev->dev, "signals count(%u) exceeds the max SOB value\n",
1774 count);
1775 rc = -EINVAL;
1776 goto out;
1777 }
1778
1779 if (q_idx >= hdev->asic_prop.max_queues) {
1780 dev_err(hdev->dev, "Queue index %d is invalid\n",
1781 q_idx);
1782 rc = -EINVAL;
1783 goto out;
1784 }
1785
1786 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
1787
1788 if (!hw_queue_prop->supports_sync_stream) {
1789 dev_err(hdev->dev,
1790 "Queue index %d does not support sync stream operations\n",
1791 q_idx);
1792 rc = -EINVAL;
1793 goto out;
1794 }
1795
1796 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1797
1798 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1799 if (!handle) {
1800 rc = -ENOMEM;
1801 goto out;
1802 }
1803
1804 handle->count = count;
1805 mgr = &hpriv->ctx->sig_mgr;
1806
1807 spin_lock(&mgr->lock);
e4cdccd2 1808 hdl_id = idr_alloc(&mgr->handles, handle, 1, 0, GFP_ATOMIC);
dadf17ab 1809 spin_unlock(&mgr->lock);
1810
1811 if (hdl_id < 0) {
1812 dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
1813 rc = -EINVAL;
1814 goto out;
1815 }
1816
1817 handle->id = hdl_id;
1818 handle->q_idx = q_idx;
1819 handle->hdev = hdev;
1820 kref_init(&handle->refcount);
1821
1822 hdev->asic_funcs->hw_queues_lock(hdev);
1823
1824 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
1825
1826 /*
1827 * Increment the SOB value by count by user request
1828 * to reserve those signals
1829 * check if the signals amount to reserve is not exceeding the max sob
1830 * value, if yes then switch sob.
1831 */
1832 rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count,
e4cdccd2 1833 true);
dadf17ab 1834 if (rc) {
1835 dev_err(hdev->dev, "Failed to switch SOB\n");
1836 hdev->asic_funcs->hw_queues_unlock(hdev);
1837 rc = -EINVAL;
1838 goto remove_idr;
1839 }
dadf17ab 1840 /* set the hw_sob to the handle after calling the sob wraparound handler
1841 * since sob could have changed.
1842 */
1843 handle->hw_sob = hw_sob;
1844
1845 /* store the current sob value for unreserve validity check, and
1846 * signal offset support
1847 */
1848 handle->pre_sob_val = prop->next_sob_val - handle->count;
1849
1850 *signals_count = prop->next_sob_val;
1851 hdev->asic_funcs->hw_queues_unlock(hdev);
1852
1853 *sob_addr = handle->hw_sob->sob_addr;
1854 *handle_id = hdl_id;
1855
1856 dev_dbg(hdev->dev,
e4cdccd2 1857 "Signals reserved, sob_id: %d, sob addr: 0x%x, last sob_val: %u, q_idx: %d, hdl_id: %d\n",
dadf17ab 1858 hw_sob->sob_id, handle->hw_sob->sob_addr,
e4cdccd2 1859 prop->next_sob_val - 1, q_idx, hdl_id);
dadf17ab 1860 goto out;
1861
1862remove_idr:
1863 spin_lock(&mgr->lock);
1864 idr_remove(&mgr->handles, hdl_id);
1865 spin_unlock(&mgr->lock);
1866
1867 kfree(handle);
1868out:
1869 return rc;
1870}
1871
1872static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
1873{
1874 struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
1875 struct hl_sync_stream_properties *prop;
1876 struct hl_device *hdev = hpriv->hdev;
1877 struct hl_encaps_signals_mgr *mgr;
1878 struct hl_hw_sob *hw_sob;
1879 u32 q_idx, sob_addr;
1880 int rc = 0;
1881
1882 mgr = &hpriv->ctx->sig_mgr;
1883
1884 spin_lock(&mgr->lock);
1885 encaps_sig_hdl = idr_find(&mgr->handles, handle_id);
1886 if (encaps_sig_hdl) {
1887 dev_dbg(hdev->dev, "unreserve signals, handle: %u, SOB:0x%x, count: %u\n",
1888 handle_id, encaps_sig_hdl->hw_sob->sob_addr,
1889 encaps_sig_hdl->count);
1890
1891 hdev->asic_funcs->hw_queues_lock(hdev);
1892
1893 q_idx = encaps_sig_hdl->q_idx;
1894 prop = &hdev->kernel_queues[q_idx].sync_stream_prop;
1895 hw_sob = &prop->hw_sob[prop->curr_sob_offset];
1896 sob_addr = hdev->asic_funcs->get_sob_addr(hdev, hw_sob->sob_id);
1897
1898 /* Check if sob_val got out of sync due to other
1899 * signal submission requests which were handled
1900 * between the reserve-unreserve calls or SOB switch
1901 * upon reaching SOB max value.
1902 */
1903 if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count
1904 != prop->next_sob_val ||
1905 sob_addr != encaps_sig_hdl->hw_sob->sob_addr) {
1906 dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n",
1907 encaps_sig_hdl->pre_sob_val,
1908 (prop->next_sob_val - encaps_sig_hdl->count));
1909
1910 hdev->asic_funcs->hw_queues_unlock(hdev);
1911 rc = -EINVAL;
1912 goto out;
1913 }
1914
1915 /*
1916 * Decrement the SOB value by count by user request
1917 * to unreserve those signals
1918 */
1919 prop->next_sob_val -= encaps_sig_hdl->count;
1920
1921 hdev->asic_funcs->hw_queues_unlock(hdev);
1922
1923 hw_sob_put(hw_sob);
1924
1925 /* Release the id and free allocated memory of the handle */
1926 idr_remove(&mgr->handles, handle_id);
1927 kfree(encaps_sig_hdl);
1928 } else {
1929 rc = -EINVAL;
1930 dev_err(hdev->dev, "failed to unreserve signals, cannot find handler\n");
1931 }
1932out:
1933 spin_unlock(&mgr->lock);
1934
1935 return rc;
1936}
1937
b75f2250
OS
1938static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
1939 void __user *chunks, u32 num_chunks,
cf393950 1940 u64 *cs_seq, u32 flags, u32 timeout)
b75f2250 1941{
e4cdccd2 1942 struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
1943 bool handle_found = false, is_wait_cs = false,
1944 wait_cs_submitted = false,
1945 cs_encaps_signals = false;
b75f2250 1946 struct hl_cs_chunk *cs_chunk_array, *chunk;
e4cdccd2 1947 bool staged_cs_with_encaps_signals = false;
b75f2250 1948 struct hw_queue_properties *hw_queue_prop;
6de3d769 1949 struct hl_device *hdev = hpriv->hdev;
06f791f7 1950 struct hl_cs_compl *sig_waitcs_cmpl;
6de3d769 1951 u32 q_idx, collective_engine_id = 0;
a3fd2830 1952 struct hl_cs_counters_atomic *cntr;
6de3d769
TT
1953 struct hl_fence *sig_fence = NULL;
1954 struct hl_ctx *ctx = hpriv->ctx;
06f791f7 1955 enum hl_queue_type q_type;
6de3d769 1956 struct hl_cs *cs;
06f791f7 1957 u64 signal_seq;
b75f2250
OS
1958 int rc;
1959
a3fd2830 1960 cntr = &hdev->aggregated_cs_counters;
b75f2250
OS
1961 *cs_seq = ULLONG_MAX;
1962
a3fd2830
AM
1963 rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
1964 ctx);
6de3d769 1965 if (rc)
b75f2250 1966 goto out;
b75f2250
OS
1967
1968 /* currently it is guaranteed to have only one chunk */
1969 chunk = &cs_chunk_array[0];
1cff1197
OB
1970
1971 if (chunk->queue_index >= hdev->asic_prop.max_queues) {
a3fd2830
AM
1972 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1973 atomic64_inc(&cntr->validation_drop_cnt);
1cff1197
OB
1974 dev_err(hdev->dev, "Queue index %d is invalid\n",
1975 chunk->queue_index);
1976 rc = -EINVAL;
1977 goto free_cs_chunk_array;
1978 }
1979
b75f2250
OS
1980 q_idx = chunk->queue_index;
1981 hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
21e7a346 1982 q_type = hw_queue_prop->type;
b75f2250 1983
becce5f9 1984 if (!hw_queue_prop->supports_sync_stream) {
a3fd2830
AM
1985 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1986 atomic64_inc(&cntr->validation_drop_cnt);
becce5f9
OG
1987 dev_err(hdev->dev,
1988 "Queue index %d does not support sync stream operations\n",
1989 q_idx);
b75f2250
OS
1990 rc = -EINVAL;
1991 goto free_cs_chunk_array;
1992 }
1993
5fe1c17d
OB
1994 if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
1995 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
a3fd2830
AM
1996 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
1997 atomic64_inc(&cntr->validation_drop_cnt);
5fe1c17d
OB
1998 dev_err(hdev->dev,
1999 "Queue index %d is invalid\n", q_idx);
2000 rc = -EINVAL;
2001 goto free_cs_chunk_array;
2002 }
2003
d09ff62c
OB
2004 if (!hdev->nic_ports_mask) {
2005 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2006 atomic64_inc(&cntr->validation_drop_cnt);
2007 dev_err(hdev->dev,
2008 "Collective operations not supported when NIC ports are disabled");
2009 rc = -EINVAL;
2010 goto free_cs_chunk_array;
2011 }
2012
5fe1c17d
OB
2013 collective_engine_id = chunk->collective_engine_id;
2014 }
2015
e4cdccd2 2016 is_wait_cs = !!(cs_type == CS_TYPE_WAIT ||
2017 cs_type == CS_TYPE_COLLECTIVE_WAIT);
8ca2072e 2018
e4cdccd2 2019 cs_encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
2020
2021 if (is_wait_cs) {
2022 rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq,
2023 ctx, cs_encaps_signals);
06f791f7 2024 if (rc)
b75f2250 2025 goto free_cs_chunk_array;
b75f2250 2026
e4cdccd2 2027 if (cs_encaps_signals) {
2028 /* check if cs sequence has encapsulated
2029 * signals handle
2030 */
2031 struct idr *idp;
2032 u32 id;
2033
2034 spin_lock(&ctx->sig_mgr.lock);
2035 idp = &ctx->sig_mgr.handles;
2036 idr_for_each_entry(idp, encaps_sig_hdl, id) {
2037 if (encaps_sig_hdl->cs_seq == signal_seq) {
2038 handle_found = true;
2039 /* get refcount to protect removing
2040 * this handle from idr, needed when
2041 * multiple wait cs are used with offset
2042 * to wait on reserved encaps signals.
2043 */
2044 kref_get(&encaps_sig_hdl->refcount);
2045 break;
2046 }
2047 }
2048 spin_unlock(&ctx->sig_mgr.lock);
2049
2050 if (!handle_found) {
beb71ee3 2051 /* treat as signal CS already finished */
2052 dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
e4cdccd2 2053 signal_seq);
beb71ee3 2054 rc = 0;
e4cdccd2 2055 goto free_cs_chunk_array;
2056 }
2057
2058 /* validate also the signal offset value */
2059 if (chunk->encaps_signal_offset >
2060 encaps_sig_hdl->count) {
2061 dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n",
2062 chunk->encaps_signal_offset,
2063 encaps_sig_hdl->count);
2064 rc = -EINVAL;
2065 goto free_cs_chunk_array;
2066 }
2067 }
2068
b75f2250
OS
2069 sig_fence = hl_ctx_get_fence(ctx, signal_seq);
2070 if (IS_ERR(sig_fence)) {
a3fd2830
AM
2071 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2072 atomic64_inc(&cntr->validation_drop_cnt);
b75f2250
OS
2073 dev_err(hdev->dev,
2074 "Failed to get signal CS with seq 0x%llx\n",
2075 signal_seq);
2076 rc = PTR_ERR(sig_fence);
06f791f7 2077 goto free_cs_chunk_array;
b75f2250
OS
2078 }
2079
2080 if (!sig_fence) {
2081 /* signal CS already finished */
2082 rc = 0;
06f791f7 2083 goto free_cs_chunk_array;
b75f2250
OS
2084 }
2085
2086 sig_waitcs_cmpl =
2087 container_of(sig_fence, struct hl_cs_compl, base_fence);
2088
e4cdccd2 2089 staged_cs_with_encaps_signals = !!
2090 (sig_waitcs_cmpl->type == CS_TYPE_DEFAULT &&
2091 (flags & HL_CS_FLAGS_ENCAP_SIGNALS));
2092
2093 if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL &&
2094 !staged_cs_with_encaps_signals) {
a3fd2830
AM
2095 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2096 atomic64_inc(&cntr->validation_drop_cnt);
b75f2250 2097 dev_err(hdev->dev,
e4cdccd2 2098 "CS seq 0x%llx is not of a signal/encaps-signal CS\n",
b75f2250 2099 signal_seq);
a98d73c7 2100 hl_fence_put(sig_fence);
b75f2250 2101 rc = -EINVAL;
06f791f7 2102 goto free_cs_chunk_array;
b75f2250
OS
2103 }
2104
a98d73c7 2105 if (completion_done(&sig_fence->completion)) {
b75f2250 2106 /* signal CS already finished */
a98d73c7 2107 hl_fence_put(sig_fence);
b75f2250 2108 rc = 0;
06f791f7 2109 goto free_cs_chunk_array;
b75f2250
OS
2110 }
2111 }
2112
cf393950 2113 rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout);
b75f2250 2114 if (rc) {
8ca2072e 2115 if (is_wait_cs)
a98d73c7 2116 hl_fence_put(sig_fence);
8ca2072e 2117
06f791f7 2118 goto free_cs_chunk_array;
b75f2250
OS
2119 }
2120
2121 /*
2122 * Save the signal CS fence for later initialization right before
2123 * hanging the wait CS on the queue.
e4cdccd2 2124 * for encaps signals case, we save the cs sequence and handle pointer
2125 * for later initialization.
b75f2250 2126 */
e4cdccd2 2127 if (is_wait_cs) {
b75f2250 2128 cs->signal_fence = sig_fence;
e4cdccd2 2129 /* store the handle pointer, so we don't have to
2130 * look for it again, later on the flow
2131 * when we need to set SOB info in hw_queue.
2132 */
2133 if (cs->encaps_signals)
2134 cs->encaps_sig_hdl = encaps_sig_hdl;
2135 }
b75f2250
OS
2136
2137 hl_debugfs_add_cs(cs);
2138
2139 *cs_seq = cs->sequence;
2140
06f791f7
OB
2141 if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL)
2142 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type,
e4cdccd2 2143 q_idx, chunk->encaps_signal_offset);
e716ad3c 2144 else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
5fe1c17d 2145 rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
e4cdccd2 2146 cs, q_idx, collective_engine_id,
2147 chunk->encaps_signal_offset);
a3fd2830
AM
2148 else {
2149 atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
2150 atomic64_inc(&cntr->validation_drop_cnt);
e716ad3c 2151 rc = -EINVAL;
a3fd2830 2152 }
a04b7cd9 2153
06f791f7 2154 if (rc)
5de406c0 2155 goto free_cs_object;
b75f2250 2156
b75f2250
OS
2157 rc = hl_hw_queue_schedule_cs(cs);
2158 if (rc) {
8ca2072e 2159 /* In case wait cs failed here, it means the signal cs
2160 * already completed. we want to free all it's related objects
2161 * but we don't want to fail the ioctl.
2162 */
2163 if (is_wait_cs)
2164 rc = 0;
2165 else if (rc != -EAGAIN)
b75f2250
OS
2166 dev_err(hdev->dev,
2167 "Failed to submit CS %d.%llu to H/W queues, error %d\n",
2168 ctx->asid, cs->sequence, rc);
2169 goto free_cs_object;
2170 }
2171
2172 rc = HL_CS_STATUS_SUCCESS;
e4cdccd2 2173 if (is_wait_cs)
2174 wait_cs_submitted = true;
b75f2250
OS
2175 goto put_cs;
2176
2177free_cs_object:
2178 cs_rollback(hdev, cs);
2179 *cs_seq = ULLONG_MAX;
2180 /* The path below is both for good and erroneous exits */
2181put_cs:
2182 /* We finished with the CS in this function, so put the ref */
2183 cs_put(cs);
b75f2250 2184free_cs_chunk_array:
e4cdccd2 2185 if (!wait_cs_submitted && cs_encaps_signals && handle_found &&
2186 is_wait_cs)
2187 kref_put(&encaps_sig_hdl->refcount,
2188 hl_encaps_handle_do_release);
b75f2250
OS
2189 kfree(cs_chunk_array);
2190out:
2191 return rc;
2192}
2193
eff6f4a0
OG
2194int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
2195{
eff6f4a0 2196 union hl_cs_args *args = data;
dadf17ab 2197 enum hl_cs_type cs_type = 0;
eff6f4a0 2198 u64 cs_seq = ULONG_MAX;
6de3d769 2199 void __user *chunks;
dadf17ab 2200 u32 num_chunks, flags, timeout,
2201 signals_count = 0, sob_addr = 0, handle_id = 0;
6de3d769 2202 int rc;
b75f2250 2203
6de3d769
TT
2204 rc = hl_cs_sanity_checks(hpriv, args);
2205 if (rc)
f9e5f295 2206 goto out;
f9e5f295 2207
6de3d769
TT
2208 rc = hl_cs_ctx_switch(hpriv, args, &cs_seq);
2209 if (rc)
1718a45b 2210 goto out;
eff6f4a0 2211
6de3d769
TT
2212 cs_type = hl_cs_get_cs_type(args->in.cs_flags &
2213 ~HL_CS_FLAGS_FORCE_RESTORE);
2214 chunks = (void __user *) (uintptr_t) args->in.chunks_execute;
2215 num_chunks = args->in.num_chunks_execute;
c209e742
OB
2216 flags = args->in.cs_flags;
2217
2218 /* In case this is a staged CS, user should supply the CS sequence */
2219 if ((flags & HL_CS_FLAGS_STAGED_SUBMISSION) &&
2220 !(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST))
2221 cs_seq = args->in.seq;
6de3d769 2222
cf393950
AM
2223 timeout = flags & HL_CS_FLAGS_CUSTOM_TIMEOUT
2224 ? msecs_to_jiffies(args->in.timeout * 1000)
2225 : hpriv->hdev->timeout_jiffies;
2226
6de3d769
TT
2227 switch (cs_type) {
2228 case CS_TYPE_SIGNAL:
2229 case CS_TYPE_WAIT:
2230 case CS_TYPE_COLLECTIVE_WAIT:
2231 rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
cf393950 2232 &cs_seq, args->in.cs_flags, timeout);
6de3d769 2233 break;
dadf17ab 2234 case CS_RESERVE_SIGNALS:
2235 rc = cs_ioctl_reserve_signals(hpriv,
2236 args->in.encaps_signals_q_idx,
2237 args->in.encaps_signals_count,
2238 &handle_id, &sob_addr, &signals_count);
2239 break;
2240 case CS_UNRESERVE_SIGNALS:
2241 rc = cs_ioctl_unreserve_signals(hpriv,
2242 args->in.encaps_sig_handle_id);
2243 break;
6de3d769 2244 default:
bd2f477f 2245 rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
e4cdccd2 2246 args->in.cs_flags,
2247 args->in.encaps_sig_handle_id,
2248 timeout);
6de3d769 2249 break;
eff6f4a0 2250 }
eff6f4a0
OG
2251out:
2252 if (rc != -EAGAIN) {
2253 memset(args, 0, sizeof(*args));
dadf17ab 2254
2255 if (cs_type == CS_RESERVE_SIGNALS) {
2256 args->out.handle_id = handle_id;
2257 args->out.sob_base_addr_offset = sob_addr;
2258 args->out.count = signals_count;
2259 } else {
2260 args->out.seq = cs_seq;
2261 }
eff6f4a0 2262 args->out.status = rc;
eff6f4a0
OG
2263 }
2264
eff6f4a0
OG
2265 return rc;
2266}
2267
c457d5ab
OS
2268static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
2269 enum hl_cs_wait_status *status, u64 timeout_us,
2270 s64 *timestamp)
eff6f4a0 2271{
c457d5ab 2272 struct hl_device *hdev = ctx->hdev;
9d127ad5 2273 long completion_rc;
c457d5ab 2274 int rc = 0;
eff6f4a0 2275
eff6f4a0
OG
2276 if (IS_ERR(fence)) {
2277 rc = PTR_ERR(fence);
b75f2250
OS
2278 if (rc == -EINVAL)
2279 dev_notice_ratelimited(hdev->dev,
0eab4f89 2280 "Can't wait on CS %llu because current CS is at seq %llu\n",
b75f2250 2281 seq, ctx->cs_sequence);
c457d5ab
OS
2282 return rc;
2283 }
a98d73c7 2284
c457d5ab 2285 if (!fence) {
b75f2250
OS
2286 dev_dbg(hdev->dev,
2287 "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
c457d5ab
OS
2288 seq, ctx->cs_sequence);
2289
9d127ad5 2290 *status = CS_WAIT_STATUS_GONE;
c457d5ab
OS
2291 return 0;
2292 }
2293
2294 if (!timeout_us) {
2295 completion_rc = completion_done(&fence->completion);
2296 } else {
2297 unsigned long timeout;
2298
2299 timeout = (timeout_us == MAX_SCHEDULE_TIMEOUT) ?
2300 timeout_us : usecs_to_jiffies(timeout_us);
2301 completion_rc =
2302 wait_for_completion_interruptible_timeout(
2303 &fence->completion, timeout);
2304 }
2305
2306 if (completion_rc > 0) {
2307 *status = CS_WAIT_STATUS_COMPLETED;
2308 if (timestamp)
2309 *timestamp = ktime_to_ns(fence->timestamp);
2310 } else {
2311 *status = CS_WAIT_STATUS_BUSY;
b75f2250 2312 }
eff6f4a0 2313
c457d5ab
OS
2314 if (fence->error == -ETIMEDOUT)
2315 rc = -ETIMEDOUT;
2316 else if (fence->error == -EIO)
2317 rc = -EIO;
2318
2319 return rc;
2320}
2321
215f0c17
OS
2322/*
2323 * hl_cs_poll_fences - iterate CS fences to check for CS completion
2324 *
2325 * @mcs_data: multi-CS internal data
2326 *
2327 * @return 0 on success, otherwise non 0 error code
2328 *
2329 * The function iterates on all CS sequence in the list and set bit in
2330 * completion_bitmap for each completed CS.
2331 * while iterating, the function can extracts the stream map to be later
2332 * used by the waiting function.
2333 * this function shall be called after taking context ref
2334 */
2335static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
2336{
2337 struct hl_fence **fence_ptr = mcs_data->fence_arr;
2338 struct hl_device *hdev = mcs_data->ctx->hdev;
2339 int i, rc, arr_len = mcs_data->arr_len;
2340 u64 *seq_arr = mcs_data->seq_arr;
2341 ktime_t max_ktime, first_cs_time;
2342 enum hl_cs_wait_status status;
2343
2344 memset(fence_ptr, 0, arr_len * sizeof(*fence_ptr));
2345
2346 /* get all fences under the same lock */
2347 rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
2348 if (rc)
2349 return rc;
2350
2351 /*
2352 * set to maximum time to verify timestamp is valid: if at the end
2353 * this value is maintained- no timestamp was updated
2354 */
2355 max_ktime = ktime_set(KTIME_SEC_MAX, 0);
2356 first_cs_time = max_ktime;
2357
2358 for (i = 0; i < arr_len; i++, fence_ptr++) {
2359 struct hl_fence *fence = *fence_ptr;
2360
2361 /*
2362 * function won't sleep as it is called with timeout 0 (i.e.
2363 * poll the fence)
2364 */
2365 rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence,
2366 &status, 0, NULL);
2367 if (rc) {
2368 dev_err(hdev->dev,
2369 "wait_for_fence error :%d for CS seq %llu\n",
2370 rc, seq_arr[i]);
2371 break;
2372 }
2373
72d66255 2374 mcs_data->stream_master_qid_map |= fence->stream_master_qid_map;
215f0c17
OS
2375
2376 if (status == CS_WAIT_STATUS_BUSY)
2377 continue;
2378
2379 mcs_data->completion_bitmap |= BIT(i);
2380
2381 /*
2382 * best effort to extract timestamp. few notes:
2383 * - if even single fence is gone we cannot extract timestamp
2384 * (as fence not exist anymore)
2385 * - for all completed CSs we take the earliest timestamp.
2386 * for this we have to validate that:
2387 * 1. given timestamp was indeed set
2388 * 2. the timestamp is earliest of all timestamps so far
2389 */
2390
2391 if (status == CS_WAIT_STATUS_GONE) {
2392 mcs_data->update_ts = false;
2393 mcs_data->gone_cs = true;
2394 } else if (mcs_data->update_ts &&
2395 (ktime_compare(fence->timestamp,
2396 ktime_set(0, 0)) > 0) &&
2397 (ktime_compare(fence->timestamp, first_cs_time) < 0)) {
2398 first_cs_time = fence->timestamp;
2399 }
2400 }
2401
2402 hl_fences_put(mcs_data->fence_arr, arr_len);
2403
2404 if (mcs_data->update_ts &&
2405 (ktime_compare(first_cs_time, max_ktime) != 0))
2406 mcs_data->timestamp = ktime_to_ns(first_cs_time);
2407
2408 return rc;
2409}
2410
c457d5ab
OS
2411static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
2412 u64 timeout_us, u64 seq,
2413 enum hl_cs_wait_status *status, s64 *timestamp)
2414{
2415 struct hl_fence *fence;
2416 int rc = 0;
2417
2418 if (timestamp)
2419 *timestamp = 0;
2420
2421 hl_ctx_get(hdev, ctx);
2422
2423 fence = hl_ctx_get_fence(ctx, seq);
2424
2425 rc = hl_wait_for_fence(ctx, seq, fence, status, timeout_us, timestamp);
2426 hl_fence_put(fence);
eff6f4a0
OG
2427 hl_ctx_put(ctx);
2428
2429 return rc;
2430}
2431
215f0c17
OS
2432/*
2433 * hl_wait_multi_cs_completion_init - init completion structure
2434 *
2435 * @hdev: pointer to habanalabs device structure
72d66255
OS
2436 * @stream_master_bitmap: stream master QIDs map, set bit indicates stream
2437 * master QID to wait on
215f0c17
OS
2438 *
2439 * @return valid completion struct pointer on success, otherwise error pointer
2440 *
2441 * up to MULTI_CS_MAX_USER_CTX calls can be done concurrently to the driver.
2442 * the function gets the first available completion (by marking it "used")
2443 * and initialize its values.
2444 */
2445static struct multi_cs_completion *hl_wait_multi_cs_completion_init(
2446 struct hl_device *hdev,
72d66255 2447 u8 stream_master_bitmap)
215f0c17
OS
2448{
2449 struct multi_cs_completion *mcs_compl;
2450 int i;
2451
2452 /* find free multi_cs completion structure */
2453 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2454 mcs_compl = &hdev->multi_cs_completion[i];
2455 spin_lock(&mcs_compl->lock);
2456 if (!mcs_compl->used) {
2457 mcs_compl->used = 1;
2458 mcs_compl->timestamp = 0;
72d66255 2459 mcs_compl->stream_master_qid_map = stream_master_bitmap;
215f0c17
OS
2460 reinit_completion(&mcs_compl->completion);
2461 spin_unlock(&mcs_compl->lock);
2462 break;
2463 }
2464 spin_unlock(&mcs_compl->lock);
2465 }
2466
2467 if (i == MULTI_CS_MAX_USER_CTX) {
2468 dev_err(hdev->dev,
2469 "no available multi-CS completion structure\n");
2470 return ERR_PTR(-ENOMEM);
2471 }
2472 return mcs_compl;
2473}
2474
2475/*
2476 * hl_wait_multi_cs_completion_fini - return completion structure and set as
2477 * unused
2478 *
2479 * @mcs_compl: pointer to the completion structure
2480 */
2481static void hl_wait_multi_cs_completion_fini(
2482 struct multi_cs_completion *mcs_compl)
2483{
2484 /*
2485 * free completion structure, do it under lock to be in-sync with the
2486 * thread that signals completion
2487 */
2488 spin_lock(&mcs_compl->lock);
2489 mcs_compl->used = 0;
2490 spin_unlock(&mcs_compl->lock);
2491}
2492
2493/*
2494 * hl_wait_multi_cs_completion - wait for first CS to complete
2495 *
2496 * @mcs_data: multi-CS internal data
2497 *
2498 * @return 0 on success, otherwise non 0 error code
2499 */
2500static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data)
2501{
2502 struct hl_device *hdev = mcs_data->ctx->hdev;
2503 struct multi_cs_completion *mcs_compl;
2504 long completion_rc;
2505
2506 mcs_compl = hl_wait_multi_cs_completion_init(hdev,
72d66255 2507 mcs_data->stream_master_qid_map);
215f0c17
OS
2508 if (IS_ERR(mcs_compl))
2509 return PTR_ERR(mcs_compl);
2510
2511 completion_rc = wait_for_completion_interruptible_timeout(
2512 &mcs_compl->completion,
2513 usecs_to_jiffies(mcs_data->timeout_us));
2514
2515 /* update timestamp */
2516 if (completion_rc > 0)
2517 mcs_data->timestamp = mcs_compl->timestamp;
2518
2519 hl_wait_multi_cs_completion_fini(mcs_compl);
2520
2521 mcs_data->wait_status = completion_rc;
2522
2523 return 0;
2524}
2525
2526/*
2527 * hl_multi_cs_completion_init - init array of multi-CS completion structures
2528 *
2529 * @hdev: pointer to habanalabs device structure
2530 */
2531void hl_multi_cs_completion_init(struct hl_device *hdev)
2532{
2533 struct multi_cs_completion *mcs_cmpl;
2534 int i;
2535
2536 for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) {
2537 mcs_cmpl = &hdev->multi_cs_completion[i];
2538 mcs_cmpl->used = 0;
2539 spin_lock_init(&mcs_cmpl->lock);
2540 init_completion(&mcs_cmpl->completion);
2541 }
2542}
2543
2544/*
2545 * hl_multi_cs_wait_ioctl - implementation of the multi-CS wait ioctl
2546 *
2547 * @hpriv: pointer to the private data of the fd
2548 * @data: pointer to multi-CS wait ioctl in/out args
2549 *
2550 */
2551static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2552{
2553 struct hl_device *hdev = hpriv->hdev;
2554 struct multi_cs_data mcs_data = {0};
2555 union hl_wait_cs_args *args = data;
2556 struct hl_ctx *ctx = hpriv->ctx;
2557 struct hl_fence **fence_arr;
2558 void __user *seq_arr;
2559 u32 size_to_copy;
2560 u64 *cs_seq_arr;
2561 u8 seq_arr_len;
2562 int rc;
2563
2564 if (!hdev->supports_wait_for_multi_cs) {
2565 dev_err(hdev->dev, "Wait for multi CS is not supported\n");
2566 return -EPERM;
2567 }
2568
2569 seq_arr_len = args->in.seq_arr_len;
2570
2571 if (seq_arr_len > HL_WAIT_MULTI_CS_LIST_MAX_LEN) {
2572 dev_err(hdev->dev, "Can wait only up to %d CSs, input sequence is of length %u\n",
2573 HL_WAIT_MULTI_CS_LIST_MAX_LEN, seq_arr_len);
2574 return -EINVAL;
2575 }
2576
2577 /* allocate memory for sequence array */
2578 cs_seq_arr =
2579 kmalloc_array(seq_arr_len, sizeof(*cs_seq_arr), GFP_KERNEL);
2580 if (!cs_seq_arr)
2581 return -ENOMEM;
2582
2583 /* copy CS sequence array from user */
2584 seq_arr = (void __user *) (uintptr_t) args->in.seq;
2585 size_to_copy = seq_arr_len * sizeof(*cs_seq_arr);
2586 if (copy_from_user(cs_seq_arr, seq_arr, size_to_copy)) {
2587 dev_err(hdev->dev, "Failed to copy multi-cs sequence array from user\n");
2588 rc = -EFAULT;
2589 goto free_seq_arr;
2590 }
2591
2592 /* allocate array for the fences */
2593 fence_arr = kmalloc_array(seq_arr_len, sizeof(*fence_arr), GFP_KERNEL);
2594 if (!fence_arr) {
2595 rc = -ENOMEM;
2596 goto free_seq_arr;
2597 }
2598
2599 /* initialize the multi-CS internal data */
2600 mcs_data.ctx = ctx;
2601 mcs_data.seq_arr = cs_seq_arr;
2602 mcs_data.fence_arr = fence_arr;
2603 mcs_data.arr_len = seq_arr_len;
2604
2605 hl_ctx_get(hdev, ctx);
2606
2607 /* poll all CS fences, extract timestamp */
2608 mcs_data.update_ts = true;
2609 rc = hl_cs_poll_fences(&mcs_data);
2610 /*
2611 * skip wait for CS completion when one of the below is true:
2612 * - an error on the poll function
2613 * - one or more CS in the list completed
2614 * - the user called ioctl with timeout 0
2615 */
2616 if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
2617 goto put_ctx;
2618
2619 /* wait (with timeout) for the first CS to be completed */
2620 mcs_data.timeout_us = args->in.timeout_us;
2621 rc = hl_wait_multi_cs_completion(&mcs_data);
2622 if (rc)
2623 goto put_ctx;
2624
2625 if (mcs_data.wait_status > 0) {
2626 /*
2627 * poll fences once again to update the CS map.
2628 * no timestamp should be updated this time.
2629 */
2630 mcs_data.update_ts = false;
2631 rc = hl_cs_poll_fences(&mcs_data);
2632
2633 /*
2634 * if hl_wait_multi_cs_completion returned before timeout (i.e.
2635 * it got a completion) we expect to see at least one CS
2636 * completed after the poll function.
2637 */
2638 if (!mcs_data.completion_bitmap) {
3d3200ae
OB
2639 dev_warn_ratelimited(hdev->dev,
2640 "Multi-CS got completion on wait but no CS completed\n");
215f0c17
OS
2641 rc = -EFAULT;
2642 }
2643 }
2644
2645put_ctx:
2646 hl_ctx_put(ctx);
2647 kfree(fence_arr);
2648
2649free_seq_arr:
2650 kfree(cs_seq_arr);
2651
2652 /* update output args */
2653 memset(args, 0, sizeof(*args));
2654 if (rc)
2655 return rc;
2656
2657 if (mcs_data.completion_bitmap) {
2658 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
2659 args->out.cs_completion_map = mcs_data.completion_bitmap;
2660
2661 /* if timestamp not 0- it's valid */
2662 if (mcs_data.timestamp) {
2663 args->out.timestamp_nsec = mcs_data.timestamp;
2664 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
2665 }
2666
2667 /* update if some CS was gone */
2668 if (mcs_data.timestamp)
2669 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
2670 } else if (mcs_data.wait_status == -ERESTARTSYS) {
2671 args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
2672 } else {
2673 args->out.status = HL_WAIT_CS_STATUS_BUSY;
2674 }
2675
2676 return 0;
2677}
2678
ab5f5c30 2679static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
eff6f4a0
OG
2680{
2681 struct hl_device *hdev = hpriv->hdev;
2682 union hl_wait_cs_args *args = data;
9d127ad5 2683 enum hl_cs_wait_status status;
eff6f4a0 2684 u64 seq = args->in.seq;
bd2f477f 2685 s64 timestamp;
9d127ad5 2686 int rc;
eff6f4a0 2687
9d127ad5 2688 rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq,
bd2f477f 2689 &status, &timestamp);
eff6f4a0
OG
2690
2691 memset(args, 0, sizeof(*args));
2692
9d127ad5 2693 if (rc) {
eff6f4a0 2694 if (rc == -ERESTARTSYS) {
0eab4f89
OG
2695 dev_err_ratelimited(hdev->dev,
2696 "user process got signal while waiting for CS handle %llu\n",
2697 seq);
eff6f4a0
OG
2698 args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
2699 rc = -EINTR;
2700 } else if (rc == -ETIMEDOUT) {
0eab4f89
OG
2701 dev_err_ratelimited(hdev->dev,
2702 "CS %llu has timed-out while user process is waiting for it\n",
2703 seq);
eff6f4a0
OG
2704 args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT;
2705 } else if (rc == -EIO) {
0eab4f89
OG
2706 dev_err_ratelimited(hdev->dev,
2707 "CS %llu has been aborted while user process is waiting for it\n",
2708 seq);
eff6f4a0
OG
2709 args->out.status = HL_WAIT_CS_STATUS_ABORTED;
2710 }
2711 return rc;
2712 }
2713
bd2f477f
OB
2714 if (timestamp) {
2715 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
2716 args->out.timestamp_nsec = timestamp;
2717 }
2718
9d127ad5
OB
2719 switch (status) {
2720 case CS_WAIT_STATUS_GONE:
2721 args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
2722 fallthrough;
2723 case CS_WAIT_STATUS_COMPLETED:
eff6f4a0 2724 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
9d127ad5
OB
2725 break;
2726 case CS_WAIT_STATUS_BUSY:
2727 default:
2728 args->out.status = HL_WAIT_CS_STATUS_BUSY;
2729 break;
2730 }
eff6f4a0
OG
2731
2732 return 0;
2733}
ab5f5c30
OB
2734
2735static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
2736 u32 timeout_us, u64 user_address,
2737 u32 target_value, u16 interrupt_offset,
2738 enum hl_cs_wait_status *status)
2739{
2740 struct hl_user_pending_interrupt *pend;
2741 struct hl_user_interrupt *interrupt;
176d23a7 2742 unsigned long timeout, flags;
ab5f5c30 2743 u32 completion_value;
176d23a7 2744 long completion_rc;
ab5f5c30
OB
2745 int rc = 0;
2746
366addb0 2747 if (timeout_us == U32_MAX)
ab5f5c30
OB
2748 timeout = timeout_us;
2749 else
2750 timeout = usecs_to_jiffies(timeout_us);
2751
2752 hl_ctx_get(hdev, ctx);
2753
d5eb8373 2754 pend = kmalloc(sizeof(*pend), GFP_KERNEL);
ab5f5c30
OB
2755 if (!pend) {
2756 hl_ctx_put(ctx);
2757 return -ENOMEM;
2758 }
2759
2760 hl_fence_init(&pend->fence, ULONG_MAX);
2761
2762 if (interrupt_offset == HL_COMMON_USER_INTERRUPT_ID)
2763 interrupt = &hdev->common_user_interrupt;
2764 else
2765 interrupt = &hdev->user_interrupt[interrupt_offset];
2766
d53c6659
OB
2767 /* Add pending user interrupt to relevant list for the interrupt
2768 * handler to monitor
2769 */
2770 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
2771 list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
2772 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
2773
2774 /* We check for completion value as interrupt could have been received
2775 * before we added the node to the wait list
2776 */
698f744a
OG
2777 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
2778 dev_err(hdev->dev, "Failed to copy completion value from user\n");
ab5f5c30 2779 rc = -EFAULT;
d53c6659 2780 goto remove_pending_user_interrupt;
ab5f5c30
OB
2781 }
2782
2783 if (completion_value >= target_value)
2784 *status = CS_WAIT_STATUS_COMPLETED;
2785 else
2786 *status = CS_WAIT_STATUS_BUSY;
2787
2788 if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
d53c6659 2789 goto remove_pending_user_interrupt;
ab5f5c30
OB
2790
2791wait_again:
2792 /* Wait for interrupt handler to signal completion */
698f744a
OG
2793 completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
2794 timeout);
ab5f5c30
OB
2795
2796 /* If timeout did not expire we need to perform the comparison.
2797 * If comparison fails, keep waiting until timeout expires
2798 */
2799 if (completion_rc > 0) {
d53c6659
OB
2800 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
2801 /* reinit_completion must be called before we check for user
2802 * completion value, otherwise, if interrupt is received after
2803 * the comparison and before the next wait_for_completion,
2804 * we will reach timeout and fail
2805 */
2806 reinit_completion(&pend->fence.completion);
2807 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
2808
698f744a
OG
2809 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
2810 dev_err(hdev->dev, "Failed to copy completion value from user\n");
ab5f5c30 2811 rc = -EFAULT;
d5546d78 2812
ab5f5c30
OB
2813 goto remove_pending_user_interrupt;
2814 }
2815
2816 if (completion_value >= target_value) {
2817 *status = CS_WAIT_STATUS_COMPLETED;
2818 } else {
24a10709 2819 timeout = completion_rc;
ab5f5c30
OB
2820 goto wait_again;
2821 }
429d77ca
OG
2822 } else if (completion_rc == -ERESTARTSYS) {
2823 dev_err_ratelimited(hdev->dev,
2824 "user process got signal while waiting for interrupt ID %d\n",
2825 interrupt->interrupt_id);
2826 *status = HL_WAIT_CS_STATUS_INTERRUPTED;
2827 rc = -EINTR;
ab5f5c30
OB
2828 } else {
2829 *status = CS_WAIT_STATUS_BUSY;
2830 }
2831
2832remove_pending_user_interrupt:
176d23a7 2833 spin_lock_irqsave(&interrupt->wait_list_lock, flags);
ab5f5c30 2834 list_del(&pend->wait_list_node);
176d23a7 2835 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
698f744a 2836
ab5f5c30
OB
2837 kfree(pend);
2838 hl_ctx_put(ctx);
2839
2840 return rc;
2841}
2842
2843static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2844{
2845 u16 interrupt_id, interrupt_offset, first_interrupt, last_interrupt;
2846 struct hl_device *hdev = hpriv->hdev;
2847 struct asic_fixed_properties *prop;
2848 union hl_wait_cs_args *args = data;
2849 enum hl_cs_wait_status status;
2850 int rc;
2851
2852 prop = &hdev->asic_prop;
2853
2854 if (!prop->user_interrupt_count) {
2855 dev_err(hdev->dev, "no user interrupts allowed");
2856 return -EPERM;
2857 }
2858
2859 interrupt_id =
2860 FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
2861
2862 first_interrupt = prop->first_available_user_msix_interrupt;
2863 last_interrupt = prop->first_available_user_msix_interrupt +
2864 prop->user_interrupt_count - 1;
2865
2866 if ((interrupt_id < first_interrupt || interrupt_id > last_interrupt) &&
2867 interrupt_id != HL_COMMON_USER_INTERRUPT_ID) {
2868 dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id);
2869 return -EINVAL;
2870 }
2871
2872 if (interrupt_id == HL_COMMON_USER_INTERRUPT_ID)
2873 interrupt_offset = HL_COMMON_USER_INTERRUPT_ID;
2874 else
2875 interrupt_offset = interrupt_id - first_interrupt;
2876
2877 rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx,
2878 args->in.interrupt_timeout_us, args->in.addr,
2879 args->in.target, interrupt_offset, &status);
2880
2881 memset(args, 0, sizeof(*args));
2882
2883 if (rc) {
429d77ca
OG
2884 if (rc != -EINTR)
2885 dev_err_ratelimited(hdev->dev,
2886 "interrupt_wait_ioctl failed (%d)\n", rc);
ab5f5c30
OB
2887
2888 return rc;
2889 }
2890
2891 switch (status) {
2892 case CS_WAIT_STATUS_COMPLETED:
2893 args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
2894 break;
2895 case CS_WAIT_STATUS_BUSY:
2896 default:
2897 args->out.status = HL_WAIT_CS_STATUS_BUSY;
2898 break;
2899 }
2900
2901 return 0;
2902}
2903
2904int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
2905{
2906 union hl_wait_cs_args *args = data;
2907 u32 flags = args->in.flags;
2908 int rc;
2909
053caa26
OG
2910 /* If the device is not operational, no point in waiting for any command submission or
2911 * user interrupt
2912 */
2913 if (!hl_device_operational(hpriv->hdev, NULL))
2914 return -EPERM;
2915
ab5f5c30
OB
2916 if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
2917 rc = hl_interrupt_wait_ioctl(hpriv, data);
215f0c17
OS
2918 else if (flags & HL_WAIT_CS_FLAGS_MULTI_CS)
2919 rc = hl_multi_cs_wait_ioctl(hpriv, data);
ab5f5c30
OB
2920 else
2921 rc = hl_cs_wait_ioctl(hpriv, data);
2922
2923 return rc;
2924}