]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/dma-buf/dma-fence.c
MAINTAINERS: update Sync File Framework files
[mirror_ubuntu-hirsute-kernel.git] / drivers / dma-buf / dma-fence.c
CommitLineData
e941759c
ML
1/*
2 * Fence mechanism for dma-buf and to allow for asynchronous dma access
3 *
4 * Copyright (C) 2012 Canonical Ltd
5 * Copyright (C) 2012 Texas Instruments
6 *
7 * Authors:
8 * Rob Clark <robdclark@gmail.com>
9 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 */
20
21#include <linux/slab.h>
22#include <linux/export.h>
23#include <linux/atomic.h>
f54d1867 24#include <linux/dma-fence.h>
e941759c
ML
25
26#define CREATE_TRACE_POINTS
f54d1867 27#include <trace/events/dma_fence.h>
e941759c 28
f54d1867
CW
29EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
30EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
e941759c 31
e9f3b796 32/*
e941759c
ML
33 * fence context counter: each execution context should have its own
34 * fence context, this allows checking if fences belong to the same
35 * context or not. One device can have multiple separate contexts,
36 * and they're used if some engine can run independently of another.
37 */
f54d1867 38static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
e941759c
ML
39
40/**
f54d1867 41 * dma_fence_context_alloc - allocate an array of fence contexts
e941759c
ML
42 * @num: [in] amount of contexts to allocate
43 *
44 * This function will return the first index of the number of fences allocated.
45 * The fence context is used for setting fence->context to a unique number.
46 */
f54d1867 47u64 dma_fence_context_alloc(unsigned num)
e941759c
ML
48{
49 BUG_ON(!num);
f54d1867 50 return atomic64_add_return(num, &dma_fence_context_counter) - num;
e941759c 51}
f54d1867 52EXPORT_SYMBOL(dma_fence_context_alloc);
e941759c
ML
53
54/**
f54d1867 55 * dma_fence_signal_locked - signal completion of a fence
e941759c
ML
56 * @fence: the fence to signal
57 *
58 * Signal completion for software callbacks on a fence, this will unblock
f54d1867
CW
59 * dma_fence_wait() calls and run all the callbacks added with
60 * dma_fence_add_callback(). Can be called multiple times, but since a fence
e941759c
ML
61 * can only go from unsignaled to signaled state, it will only be effective
62 * the first time.
63 *
f54d1867 64 * Unlike dma_fence_signal, this function must be called with fence->lock held.
e941759c 65 */
f54d1867 66int dma_fence_signal_locked(struct dma_fence *fence)
e941759c 67{
f54d1867 68 struct dma_fence_cb *cur, *tmp;
e941759c
ML
69 int ret = 0;
70
78010cd9
RC
71 lockdep_assert_held(fence->lock);
72
e941759c
ML
73 if (WARN_ON(!fence))
74 return -EINVAL;
75
76 if (!ktime_to_ns(fence->timestamp)) {
77 fence->timestamp = ktime_get();
78 smp_mb__before_atomic();
79 }
80
f54d1867 81 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
e941759c
ML
82 ret = -EINVAL;
83
84 /*
f54d1867 85 * we might have raced with the unlocked dma_fence_signal,
e941759c
ML
86 * still run through all callbacks
87 */
88 } else
f54d1867 89 trace_dma_fence_signaled(fence);
e941759c
ML
90
91 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
92 list_del_init(&cur->node);
93 cur->func(fence, cur);
94 }
95 return ret;
96}
f54d1867 97EXPORT_SYMBOL(dma_fence_signal_locked);
e941759c
ML
98
99/**
f54d1867 100 * dma_fence_signal - signal completion of a fence
e941759c
ML
101 * @fence: the fence to signal
102 *
103 * Signal completion for software callbacks on a fence, this will unblock
f54d1867
CW
104 * dma_fence_wait() calls and run all the callbacks added with
105 * dma_fence_add_callback(). Can be called multiple times, but since a fence
e941759c
ML
106 * can only go from unsignaled to signaled state, it will only be effective
107 * the first time.
108 */
f54d1867 109int dma_fence_signal(struct dma_fence *fence)
e941759c
ML
110{
111 unsigned long flags;
112
113 if (!fence)
114 return -EINVAL;
115
116 if (!ktime_to_ns(fence->timestamp)) {
117 fence->timestamp = ktime_get();
118 smp_mb__before_atomic();
119 }
120
f54d1867 121 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
e941759c
ML
122 return -EINVAL;
123
f54d1867 124 trace_dma_fence_signaled(fence);
e941759c 125
f54d1867
CW
126 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
127 struct dma_fence_cb *cur, *tmp;
e941759c
ML
128
129 spin_lock_irqsave(fence->lock, flags);
130 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
131 list_del_init(&cur->node);
132 cur->func(fence, cur);
133 }
134 spin_unlock_irqrestore(fence->lock, flags);
135 }
136 return 0;
137}
f54d1867 138EXPORT_SYMBOL(dma_fence_signal);
e941759c
ML
139
140/**
f54d1867 141 * dma_fence_wait_timeout - sleep until the fence gets signaled
e941759c
ML
142 * or until timeout elapses
143 * @fence: [in] the fence to wait on
144 * @intr: [in] if true, do an interruptible wait
145 * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
146 *
147 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
148 * remaining timeout in jiffies on success. Other error values may be
149 * returned on custom implementations.
150 *
151 * Performs a synchronous wait on this fence. It is assumed the caller
152 * directly or indirectly (buf-mgr between reservation and committing)
153 * holds a reference to the fence, otherwise the fence might be
154 * freed before return, resulting in undefined behavior.
155 */
156signed long
f54d1867 157dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
e941759c
ML
158{
159 signed long ret;
160
161 if (WARN_ON(timeout < 0))
162 return -EINVAL;
163
847b19a3 164 if (timeout == 0)
f54d1867 165 return dma_fence_is_signaled(fence);
847b19a3 166
f54d1867 167 trace_dma_fence_wait_start(fence);
e941759c 168 ret = fence->ops->wait(fence, intr, timeout);
f54d1867 169 trace_dma_fence_wait_end(fence);
e941759c
ML
170 return ret;
171}
f54d1867 172EXPORT_SYMBOL(dma_fence_wait_timeout);
e941759c 173
f54d1867 174void dma_fence_release(struct kref *kref)
e941759c 175{
f54d1867
CW
176 struct dma_fence *fence =
177 container_of(kref, struct dma_fence, refcount);
e941759c 178
f54d1867 179 trace_dma_fence_destroy(fence);
e941759c
ML
180
181 BUG_ON(!list_empty(&fence->cb_list));
182
183 if (fence->ops->release)
184 fence->ops->release(fence);
185 else
f54d1867 186 dma_fence_free(fence);
e941759c 187}
f54d1867 188EXPORT_SYMBOL(dma_fence_release);
e941759c 189
f54d1867 190void dma_fence_free(struct dma_fence *fence)
e941759c 191{
3c3b177a 192 kfree_rcu(fence, rcu);
e941759c 193}
f54d1867 194EXPORT_SYMBOL(dma_fence_free);
e941759c
ML
195
196/**
f54d1867 197 * dma_fence_enable_sw_signaling - enable signaling on fence
e941759c
ML
198 * @fence: [in] the fence to enable
199 *
200 * this will request for sw signaling to be enabled, to make the fence
201 * complete as soon as possible
202 */
f54d1867 203void dma_fence_enable_sw_signaling(struct dma_fence *fence)
e941759c
ML
204{
205 unsigned long flags;
206
f54d1867
CW
207 if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
208 &fence->flags) &&
209 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
210 trace_dma_fence_enable_signal(fence);
e941759c
ML
211
212 spin_lock_irqsave(fence->lock, flags);
213
214 if (!fence->ops->enable_signaling(fence))
f54d1867 215 dma_fence_signal_locked(fence);
e941759c
ML
216
217 spin_unlock_irqrestore(fence->lock, flags);
218 }
219}
f54d1867 220EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
e941759c
ML
221
222/**
f54d1867 223 * dma_fence_add_callback - add a callback to be called when the fence
e941759c
ML
224 * is signaled
225 * @fence: [in] the fence to wait on
226 * @cb: [in] the callback to register
227 * @func: [in] the function to call
228 *
f54d1867 229 * cb will be initialized by dma_fence_add_callback, no initialization
e941759c
ML
230 * by the caller is required. Any number of callbacks can be registered
231 * to a fence, but a callback can only be registered to one fence at a time.
232 *
233 * Note that the callback can be called from an atomic context. If
234 * fence is already signaled, this function will return -ENOENT (and
235 * *not* call the callback)
236 *
237 * Add a software callback to the fence. Same restrictions apply to
f54d1867 238 * refcount as it does to dma_fence_wait, however the caller doesn't need to
e941759c
ML
239 * keep a refcount to fence afterwards: when software access is enabled,
240 * the creator of the fence is required to keep the fence alive until
f54d1867 241 * after it signals with dma_fence_signal. The callback itself can be called
e941759c
ML
242 * from irq context.
243 *
244 */
f54d1867
CW
245int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
246 dma_fence_func_t func)
e941759c
ML
247{
248 unsigned long flags;
249 int ret = 0;
250 bool was_set;
251
252 if (WARN_ON(!fence || !func))
253 return -EINVAL;
254
f54d1867 255 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
e941759c
ML
256 INIT_LIST_HEAD(&cb->node);
257 return -ENOENT;
258 }
259
260 spin_lock_irqsave(fence->lock, flags);
261
f54d1867
CW
262 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
263 &fence->flags);
e941759c 264
f54d1867 265 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
e941759c
ML
266 ret = -ENOENT;
267 else if (!was_set) {
f54d1867 268 trace_dma_fence_enable_signal(fence);
e941759c
ML
269
270 if (!fence->ops->enable_signaling(fence)) {
f54d1867 271 dma_fence_signal_locked(fence);
e941759c
ML
272 ret = -ENOENT;
273 }
274 }
275
276 if (!ret) {
277 cb->func = func;
278 list_add_tail(&cb->node, &fence->cb_list);
279 } else
280 INIT_LIST_HEAD(&cb->node);
281 spin_unlock_irqrestore(fence->lock, flags);
282
283 return ret;
284}
f54d1867 285EXPORT_SYMBOL(dma_fence_add_callback);
e941759c
ML
286
287/**
f54d1867 288 * dma_fence_remove_callback - remove a callback from the signaling list
e941759c
ML
289 * @fence: [in] the fence to wait on
290 * @cb: [in] the callback to remove
291 *
292 * Remove a previously queued callback from the fence. This function returns
f353d71f 293 * true if the callback is successfully removed, or false if the fence has
e941759c
ML
294 * already been signaled.
295 *
296 * *WARNING*:
297 * Cancelling a callback should only be done if you really know what you're
298 * doing, since deadlocks and race conditions could occur all too easily. For
299 * this reason, it should only ever be done on hardware lockup recovery,
300 * with a reference held to the fence.
301 */
302bool
f54d1867 303dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
e941759c
ML
304{
305 unsigned long flags;
306 bool ret;
307
308 spin_lock_irqsave(fence->lock, flags);
309
310 ret = !list_empty(&cb->node);
311 if (ret)
312 list_del_init(&cb->node);
313
314 spin_unlock_irqrestore(fence->lock, flags);
315
316 return ret;
317}
f54d1867 318EXPORT_SYMBOL(dma_fence_remove_callback);
e941759c
ML
319
320struct default_wait_cb {
f54d1867 321 struct dma_fence_cb base;
e941759c
ML
322 struct task_struct *task;
323};
324
325static void
f54d1867 326dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
e941759c
ML
327{
328 struct default_wait_cb *wait =
329 container_of(cb, struct default_wait_cb, base);
330
331 wake_up_state(wait->task, TASK_NORMAL);
332}
333
334/**
f54d1867 335 * dma_fence_default_wait - default sleep until the fence gets signaled
e941759c
ML
336 * or until timeout elapses
337 * @fence: [in] the fence to wait on
338 * @intr: [in] if true, do an interruptible wait
339 * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
340 *
341 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
342 * remaining timeout in jiffies on success.
343 */
344signed long
f54d1867 345dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
e941759c
ML
346{
347 struct default_wait_cb cb;
348 unsigned long flags;
349 signed long ret = timeout;
350 bool was_set;
351
f54d1867 352 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
e941759c
ML
353 return timeout;
354
355 spin_lock_irqsave(fence->lock, flags);
356
357 if (intr && signal_pending(current)) {
358 ret = -ERESTARTSYS;
359 goto out;
360 }
361
f54d1867
CW
362 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
363 &fence->flags);
e941759c 364
f54d1867 365 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
e941759c
ML
366 goto out;
367
368 if (!was_set) {
f54d1867 369 trace_dma_fence_enable_signal(fence);
e941759c
ML
370
371 if (!fence->ops->enable_signaling(fence)) {
f54d1867 372 dma_fence_signal_locked(fence);
e941759c
ML
373 goto out;
374 }
375 }
376
f54d1867 377 cb.base.func = dma_fence_default_wait_cb;
e941759c
ML
378 cb.task = current;
379 list_add(&cb.base.node, &fence->cb_list);
380
f54d1867 381 while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
e941759c
ML
382 if (intr)
383 __set_current_state(TASK_INTERRUPTIBLE);
384 else
385 __set_current_state(TASK_UNINTERRUPTIBLE);
386 spin_unlock_irqrestore(fence->lock, flags);
387
388 ret = schedule_timeout(ret);
389
390 spin_lock_irqsave(fence->lock, flags);
391 if (ret > 0 && intr && signal_pending(current))
392 ret = -ERESTARTSYS;
393 }
394
395 if (!list_empty(&cb.base.node))
396 list_del(&cb.base.node);
397 __set_current_state(TASK_RUNNING);
398
399out:
400 spin_unlock_irqrestore(fence->lock, flags);
401 return ret;
402}
f54d1867 403EXPORT_SYMBOL(dma_fence_default_wait);
e941759c 404
a519435a 405static bool
f54d1867 406dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count)
a519435a
CK
407{
408 int i;
409
410 for (i = 0; i < count; ++i) {
f54d1867
CW
411 struct dma_fence *fence = fences[i];
412 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
a519435a
CK
413 return true;
414 }
415 return false;
416}
417
418/**
f54d1867 419 * dma_fence_wait_any_timeout - sleep until any fence gets signaled
a519435a
CK
420 * or until timeout elapses
421 * @fences: [in] array of fences to wait on
422 * @count: [in] number of fences to wait on
423 * @intr: [in] if true, do an interruptible wait
424 * @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
425 *
426 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
427 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
428 * on success.
429 *
430 * Synchronous waits for the first fence in the array to be signaled. The
431 * caller needs to hold a reference to all fences in the array, otherwise a
432 * fence might be freed before return, resulting in undefined behavior.
433 */
434signed long
f54d1867
CW
435dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
436 bool intr, signed long timeout)
a519435a
CK
437{
438 struct default_wait_cb *cb;
439 signed long ret = timeout;
440 unsigned i;
441
442 if (WARN_ON(!fences || !count || timeout < 0))
443 return -EINVAL;
444
445 if (timeout == 0) {
446 for (i = 0; i < count; ++i)
f54d1867 447 if (dma_fence_is_signaled(fences[i]))
a519435a
CK
448 return 1;
449
450 return 0;
451 }
452
453 cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
454 if (cb == NULL) {
455 ret = -ENOMEM;
456 goto err_free_cb;
457 }
458
459 for (i = 0; i < count; ++i) {
f54d1867 460 struct dma_fence *fence = fences[i];
a519435a 461
f54d1867 462 if (fence->ops->wait != dma_fence_default_wait) {
a519435a
CK
463 ret = -EINVAL;
464 goto fence_rm_cb;
465 }
466
467 cb[i].task = current;
f54d1867
CW
468 if (dma_fence_add_callback(fence, &cb[i].base,
469 dma_fence_default_wait_cb)) {
a519435a
CK
470 /* This fence is already signaled */
471 goto fence_rm_cb;
472 }
473 }
474
475 while (ret > 0) {
476 if (intr)
477 set_current_state(TASK_INTERRUPTIBLE);
478 else
479 set_current_state(TASK_UNINTERRUPTIBLE);
480
f54d1867 481 if (dma_fence_test_signaled_any(fences, count))
a519435a
CK
482 break;
483
484 ret = schedule_timeout(ret);
485
486 if (ret > 0 && intr && signal_pending(current))
487 ret = -ERESTARTSYS;
488 }
489
490 __set_current_state(TASK_RUNNING);
491
492fence_rm_cb:
493 while (i-- > 0)
f54d1867 494 dma_fence_remove_callback(fences[i], &cb[i].base);
a519435a
CK
495
496err_free_cb:
497 kfree(cb);
498
499 return ret;
500}
f54d1867 501EXPORT_SYMBOL(dma_fence_wait_any_timeout);
a519435a 502
e941759c 503/**
f54d1867 504 * dma_fence_init - Initialize a custom fence.
e941759c 505 * @fence: [in] the fence to initialize
f54d1867 506 * @ops: [in] the dma_fence_ops for operations on this fence
e941759c
ML
507 * @lock: [in] the irqsafe spinlock to use for locking this fence
508 * @context: [in] the execution context this fence is run on
509 * @seqno: [in] a linear increasing sequence number for this context
510 *
511 * Initializes an allocated fence, the caller doesn't have to keep its
512 * refcount after committing with this fence, but it will need to hold a
f54d1867 513 * refcount again if dma_fence_ops.enable_signaling gets called. This can
e941759c
ML
514 * be used for other implementing other types of fence.
515 *
516 * context and seqno are used for easy comparison between fences, allowing
f54d1867 517 * to check which fence is later by simply using dma_fence_later.
e941759c
ML
518 */
519void
f54d1867
CW
520dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
521 spinlock_t *lock, u64 context, unsigned seqno)
e941759c
ML
522{
523 BUG_ON(!lock);
524 BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
525 !ops->get_driver_name || !ops->get_timeline_name);
526
527 kref_init(&fence->refcount);
528 fence->ops = ops;
529 INIT_LIST_HEAD(&fence->cb_list);
530 fence->lock = lock;
531 fence->context = context;
532 fence->seqno = seqno;
533 fence->flags = 0UL;
534
f54d1867 535 trace_dma_fence_init(fence);
e941759c 536}
f54d1867 537EXPORT_SYMBOL(dma_fence_init);