2 * Copyright 2017 Red Hat
3 * Parts ported from amdgpu (fence wait code).
4 * Copyright 2016 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * DRM synchronisation objects (syncobj) are a persistent objects,
33 * that contain an optional fence. The fence can be updated with a new
36 * syncobj's can be waited upon, where it will wait for the underlying
39 * syncobj's can be export to fd's and back, these fd's are opaque and
40 * have no other use case, except passing the syncobj between processes.
42 * Their primary use-case is to implement Vulkan fences and semaphores.
44 * syncobj have a kref reference count, but also have an optional file.
45 * The file is only created once the syncobj is exported.
46 * The file takes a reference on the kref.
50 #include <linux/file.h>
52 #include <linux/anon_inodes.h>
53 #include <linux/sync_file.h>
54 #include <linux/sched/signal.h>
56 #include "drm_internal.h"
57 #include <drm/drm_syncobj.h>
60 * drm_syncobj_find - lookup and reference a sync object.
61 * @file_private: drm file private pointer
62 * @handle: sync object handle to lookup.
64 * Returns a reference to the syncobj pointed to by handle or NULL.
66 struct drm_syncobj
*drm_syncobj_find(struct drm_file
*file_private
,
69 struct drm_syncobj
*syncobj
;
71 spin_lock(&file_private
->syncobj_table_lock
);
73 /* Check if we currently have a reference on the object */
74 syncobj
= idr_find(&file_private
->syncobj_idr
, handle
);
76 drm_syncobj_get(syncobj
);
78 spin_unlock(&file_private
->syncobj_table_lock
);
82 EXPORT_SYMBOL(drm_syncobj_find
);
84 static void drm_syncobj_add_callback_locked(struct drm_syncobj
*syncobj
,
85 struct drm_syncobj_cb
*cb
,
86 drm_syncobj_func_t func
)
89 list_add_tail(&cb
->node
, &syncobj
->cb_list
);
92 static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj
*syncobj
,
93 struct dma_fence
**fence
,
94 struct drm_syncobj_cb
*cb
,
95 drm_syncobj_func_t func
)
99 *fence
= drm_syncobj_fence_get(syncobj
);
103 spin_lock(&syncobj
->lock
);
104 /* We've already tried once to get a fence and failed. Now that we
105 * have the lock, try one more time just to be sure we don't add a
106 * callback when a fence has already been set.
108 if (syncobj
->fence
) {
109 *fence
= dma_fence_get(syncobj
->fence
);
113 drm_syncobj_add_callback_locked(syncobj
, cb
, func
);
116 spin_unlock(&syncobj
->lock
);
122 * drm_syncobj_add_callback - adds a callback to syncobj::cb_list
123 * @syncobj: Sync object to which to add the callback
124 * @cb: Callback to add
125 * @func: Func to use when initializing the drm_syncobj_cb struct
127 * This adds a callback to be called next time the fence is replaced
129 void drm_syncobj_add_callback(struct drm_syncobj
*syncobj
,
130 struct drm_syncobj_cb
*cb
,
131 drm_syncobj_func_t func
)
133 spin_lock(&syncobj
->lock
);
134 drm_syncobj_add_callback_locked(syncobj
, cb
, func
);
135 spin_unlock(&syncobj
->lock
);
137 EXPORT_SYMBOL(drm_syncobj_add_callback
);
140 * drm_syncobj_add_callback - removes a callback to syncobj::cb_list
141 * @syncobj: Sync object from which to remove the callback
142 * @cb: Callback to remove
144 void drm_syncobj_remove_callback(struct drm_syncobj
*syncobj
,
145 struct drm_syncobj_cb
*cb
)
147 spin_lock(&syncobj
->lock
);
148 list_del_init(&cb
->node
);
149 spin_unlock(&syncobj
->lock
);
151 EXPORT_SYMBOL(drm_syncobj_remove_callback
);
154 * drm_syncobj_replace_fence - replace fence in a sync object.
155 * @syncobj: Sync object to replace fence in
156 * @fence: fence to install in sync file.
158 * This replaces the fence on a sync object.
160 void drm_syncobj_replace_fence(struct drm_syncobj
*syncobj
,
161 struct dma_fence
*fence
)
163 struct dma_fence
*old_fence
;
164 struct drm_syncobj_cb
*cur
, *tmp
;
167 dma_fence_get(fence
);
169 spin_lock(&syncobj
->lock
);
171 old_fence
= syncobj
->fence
;
172 syncobj
->fence
= fence
;
174 if (fence
!= old_fence
) {
175 list_for_each_entry_safe(cur
, tmp
, &syncobj
->cb_list
, node
) {
176 list_del_init(&cur
->node
);
177 cur
->func(syncobj
, cur
);
181 spin_unlock(&syncobj
->lock
);
183 dma_fence_put(old_fence
);
185 EXPORT_SYMBOL(drm_syncobj_replace_fence
);
187 struct drm_syncobj_null_fence
{
188 struct dma_fence base
;
192 static const char *drm_syncobj_null_fence_get_name(struct dma_fence
*fence
)
194 return "syncobjnull";
197 static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence
*fence
)
199 dma_fence_enable_sw_signaling(fence
);
200 return !dma_fence_is_signaled(fence
);
203 static const struct dma_fence_ops drm_syncobj_null_fence_ops
= {
204 .get_driver_name
= drm_syncobj_null_fence_get_name
,
205 .get_timeline_name
= drm_syncobj_null_fence_get_name
,
206 .enable_signaling
= drm_syncobj_null_fence_enable_signaling
,
207 .wait
= dma_fence_default_wait
,
211 static int drm_syncobj_assign_null_handle(struct drm_syncobj
*syncobj
)
213 struct drm_syncobj_null_fence
*fence
;
214 fence
= kzalloc(sizeof(*fence
), GFP_KERNEL
);
218 spin_lock_init(&fence
->lock
);
219 dma_fence_init(&fence
->base
, &drm_syncobj_null_fence_ops
,
221 dma_fence_signal(&fence
->base
);
223 drm_syncobj_replace_fence(syncobj
, &fence
->base
);
225 dma_fence_put(&fence
->base
);
230 int drm_syncobj_find_fence(struct drm_file
*file_private
,
232 struct dma_fence
**fence
)
234 struct drm_syncobj
*syncobj
= drm_syncobj_find(file_private
, handle
);
240 *fence
= drm_syncobj_fence_get(syncobj
);
244 drm_syncobj_put(syncobj
);
247 EXPORT_SYMBOL(drm_syncobj_find_fence
);
250 * drm_syncobj_free - free a sync object.
251 * @kref: kref to free.
253 * Only to be called from kref_put in drm_syncobj_put.
255 void drm_syncobj_free(struct kref
*kref
)
257 struct drm_syncobj
*syncobj
= container_of(kref
,
260 drm_syncobj_replace_fence(syncobj
, NULL
);
263 EXPORT_SYMBOL(drm_syncobj_free
);
266 * drm_syncobj_create - create a new syncobj
267 * @out_syncobj: returned syncobj
268 * @flags: DRM_SYNCOBJ_* flags
269 * @fence: if non-NULL, the syncobj will represent this fence
271 int drm_syncobj_create(struct drm_syncobj
**out_syncobj
, uint32_t flags
,
272 struct dma_fence
*fence
)
275 struct drm_syncobj
*syncobj
;
277 syncobj
= kzalloc(sizeof(struct drm_syncobj
), GFP_KERNEL
);
281 kref_init(&syncobj
->refcount
);
282 INIT_LIST_HEAD(&syncobj
->cb_list
);
283 spin_lock_init(&syncobj
->lock
);
285 if (flags
& DRM_SYNCOBJ_CREATE_SIGNALED
) {
286 ret
= drm_syncobj_assign_null_handle(syncobj
);
288 drm_syncobj_put(syncobj
);
294 drm_syncobj_replace_fence(syncobj
, fence
);
296 *out_syncobj
= syncobj
;
299 EXPORT_SYMBOL(drm_syncobj_create
);
302 * drm_syncobj_get_handle - get a handle from a syncobj
304 int drm_syncobj_get_handle(struct drm_file
*file_private
,
305 struct drm_syncobj
*syncobj
, u32
*handle
)
309 /* take a reference to put in the idr */
310 drm_syncobj_get(syncobj
);
312 idr_preload(GFP_KERNEL
);
313 spin_lock(&file_private
->syncobj_table_lock
);
314 ret
= idr_alloc(&file_private
->syncobj_idr
, syncobj
, 1, 0, GFP_NOWAIT
);
315 spin_unlock(&file_private
->syncobj_table_lock
);
320 drm_syncobj_put(syncobj
);
327 EXPORT_SYMBOL(drm_syncobj_get_handle
);
329 static int drm_syncobj_create_as_handle(struct drm_file
*file_private
,
330 u32
*handle
, uint32_t flags
)
333 struct drm_syncobj
*syncobj
;
335 ret
= drm_syncobj_create(&syncobj
, flags
, NULL
);
339 ret
= drm_syncobj_get_handle(file_private
, syncobj
, handle
);
340 drm_syncobj_put(syncobj
);
344 static int drm_syncobj_destroy(struct drm_file
*file_private
,
347 struct drm_syncobj
*syncobj
;
349 spin_lock(&file_private
->syncobj_table_lock
);
350 syncobj
= idr_remove(&file_private
->syncobj_idr
, handle
);
351 spin_unlock(&file_private
->syncobj_table_lock
);
356 drm_syncobj_put(syncobj
);
360 static int drm_syncobj_file_release(struct inode
*inode
, struct file
*file
)
362 struct drm_syncobj
*syncobj
= file
->private_data
;
364 drm_syncobj_put(syncobj
);
368 static const struct file_operations drm_syncobj_file_fops
= {
369 .release
= drm_syncobj_file_release
,
372 static int drm_syncobj_alloc_file(struct drm_syncobj
*syncobj
)
374 struct file
*file
= anon_inode_getfile("syncobj_file",
375 &drm_syncobj_file_fops
,
378 return PTR_ERR(file
);
380 drm_syncobj_get(syncobj
);
381 if (cmpxchg(&syncobj
->file
, NULL
, file
)) {
389 int drm_syncobj_get_fd(struct drm_syncobj
*syncobj
, int *p_fd
)
394 fd
= get_unused_fd_flags(O_CLOEXEC
);
398 if (!syncobj
->file
) {
399 ret
= drm_syncobj_alloc_file(syncobj
);
405 fd_install(fd
, syncobj
->file
);
409 EXPORT_SYMBOL(drm_syncobj_get_fd
);
411 static int drm_syncobj_handle_to_fd(struct drm_file
*file_private
,
412 u32 handle
, int *p_fd
)
414 struct drm_syncobj
*syncobj
= drm_syncobj_find(file_private
, handle
);
420 ret
= drm_syncobj_get_fd(syncobj
, p_fd
);
421 drm_syncobj_put(syncobj
);
425 static struct drm_syncobj
*drm_syncobj_fdget(int fd
)
427 struct file
*file
= fget(fd
);
431 if (file
->f_op
!= &drm_syncobj_file_fops
)
434 return file
->private_data
;
440 static int drm_syncobj_fd_to_handle(struct drm_file
*file_private
,
443 struct drm_syncobj
*syncobj
= drm_syncobj_fdget(fd
);
449 /* take a reference to put in the idr */
450 drm_syncobj_get(syncobj
);
452 idr_preload(GFP_KERNEL
);
453 spin_lock(&file_private
->syncobj_table_lock
);
454 ret
= idr_alloc(&file_private
->syncobj_idr
, syncobj
, 1, 0, GFP_NOWAIT
);
455 spin_unlock(&file_private
->syncobj_table_lock
);
466 int drm_syncobj_import_sync_file_fence(struct drm_file
*file_private
,
469 struct dma_fence
*fence
= sync_file_get_fence(fd
);
470 struct drm_syncobj
*syncobj
;
475 syncobj
= drm_syncobj_find(file_private
, handle
);
477 dma_fence_put(fence
);
481 drm_syncobj_replace_fence(syncobj
, fence
);
482 dma_fence_put(fence
);
483 drm_syncobj_put(syncobj
);
487 int drm_syncobj_export_sync_file(struct drm_file
*file_private
,
488 int handle
, int *p_fd
)
491 struct dma_fence
*fence
;
492 struct sync_file
*sync_file
;
493 int fd
= get_unused_fd_flags(O_CLOEXEC
);
498 ret
= drm_syncobj_find_fence(file_private
, handle
, &fence
);
502 sync_file
= sync_file_create(fence
);
504 dma_fence_put(fence
);
511 fd_install(fd
, sync_file
->file
);
520 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
521 * @file_private: drm file-private structure to set up
523 * Called at device open time, sets up the structure for handling refcounting
527 drm_syncobj_open(struct drm_file
*file_private
)
529 idr_init(&file_private
->syncobj_idr
);
530 spin_lock_init(&file_private
->syncobj_table_lock
);
534 drm_syncobj_release_handle(int id
, void *ptr
, void *data
)
536 struct drm_syncobj
*syncobj
= ptr
;
538 drm_syncobj_put(syncobj
);
543 * drm_syncobj_release - release file-private sync object resources
544 * @file_private: drm file-private structure to clean up
546 * Called at close time when the filp is going away.
548 * Releases any remaining references on objects by this filp.
551 drm_syncobj_release(struct drm_file
*file_private
)
553 idr_for_each(&file_private
->syncobj_idr
,
554 &drm_syncobj_release_handle
, file_private
);
555 idr_destroy(&file_private
->syncobj_idr
);
559 drm_syncobj_create_ioctl(struct drm_device
*dev
, void *data
,
560 struct drm_file
*file_private
)
562 struct drm_syncobj_create
*args
= data
;
564 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
567 /* no valid flags yet */
568 if (args
->flags
& ~DRM_SYNCOBJ_CREATE_SIGNALED
)
571 return drm_syncobj_create_as_handle(file_private
,
572 &args
->handle
, args
->flags
);
576 drm_syncobj_destroy_ioctl(struct drm_device
*dev
, void *data
,
577 struct drm_file
*file_private
)
579 struct drm_syncobj_destroy
*args
= data
;
581 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
584 /* make sure padding is empty */
587 return drm_syncobj_destroy(file_private
, args
->handle
);
591 drm_syncobj_handle_to_fd_ioctl(struct drm_device
*dev
, void *data
,
592 struct drm_file
*file_private
)
594 struct drm_syncobj_handle
*args
= data
;
596 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
602 if (args
->flags
!= 0 &&
603 args
->flags
!= DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE
)
606 if (args
->flags
& DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE
)
607 return drm_syncobj_export_sync_file(file_private
, args
->handle
,
610 return drm_syncobj_handle_to_fd(file_private
, args
->handle
,
615 drm_syncobj_fd_to_handle_ioctl(struct drm_device
*dev
, void *data
,
616 struct drm_file
*file_private
)
618 struct drm_syncobj_handle
*args
= data
;
620 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
626 if (args
->flags
!= 0 &&
627 args
->flags
!= DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE
)
630 if (args
->flags
& DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE
)
631 return drm_syncobj_import_sync_file_fence(file_private
,
635 return drm_syncobj_fd_to_handle(file_private
, args
->fd
,
639 struct syncobj_wait_entry
{
640 struct task_struct
*task
;
641 struct dma_fence
*fence
;
642 struct dma_fence_cb fence_cb
;
643 struct drm_syncobj_cb syncobj_cb
;
646 static void syncobj_wait_fence_func(struct dma_fence
*fence
,
647 struct dma_fence_cb
*cb
)
649 struct syncobj_wait_entry
*wait
=
650 container_of(cb
, struct syncobj_wait_entry
, fence_cb
);
652 wake_up_process(wait
->task
);
655 static void syncobj_wait_syncobj_func(struct drm_syncobj
*syncobj
,
656 struct drm_syncobj_cb
*cb
)
658 struct syncobj_wait_entry
*wait
=
659 container_of(cb
, struct syncobj_wait_entry
, syncobj_cb
);
661 /* This happens inside the syncobj lock */
662 wait
->fence
= dma_fence_get(syncobj
->fence
);
663 wake_up_process(wait
->task
);
666 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj
**syncobjs
,
672 struct syncobj_wait_entry
*entries
;
673 struct dma_fence
*fence
;
675 uint32_t signaled_count
, i
;
677 entries
= kcalloc(count
, sizeof(*entries
), GFP_KERNEL
);
681 /* Walk the list of sync objects and initialize entries. We do
682 * this up-front so that we can properly return -EINVAL if there is
683 * a syncobj with a missing fence and then never have the chance of
684 * returning -EINVAL again.
687 for (i
= 0; i
< count
; ++i
) {
688 entries
[i
].task
= current
;
689 entries
[i
].fence
= drm_syncobj_fence_get(syncobjs
[i
]);
690 if (!entries
[i
].fence
) {
691 if (flags
& DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT
) {
695 goto cleanup_entries
;
699 if (dma_fence_is_signaled(entries
[i
].fence
)) {
700 if (signaled_count
== 0 && idx
)
706 /* Initialize ret to the max of timeout and 1. That way, the
707 * default return value indicates a successful wait and not a
710 ret
= max_t(signed long, timeout
, 1);
712 if (signaled_count
== count
||
713 (signaled_count
> 0 &&
714 !(flags
& DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
)))
715 goto cleanup_entries
;
717 /* There's a very annoying laxness in the dma_fence API here, in
718 * that backends are not required to automatically report when a
719 * fence is signaled prior to fence->ops->enable_signaling() being
720 * called. So here if we fail to match signaled_count, we need to
721 * fallthough and try a 0 timeout wait!
724 if (flags
& DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT
) {
725 for (i
= 0; i
< count
; ++i
) {
726 drm_syncobj_fence_get_or_add_callback(syncobjs
[i
],
728 &entries
[i
].syncobj_cb
,
729 syncobj_wait_syncobj_func
);
734 set_current_state(TASK_INTERRUPTIBLE
);
737 for (i
= 0; i
< count
; ++i
) {
738 fence
= entries
[i
].fence
;
742 if (dma_fence_is_signaled(fence
) ||
743 (!entries
[i
].fence_cb
.func
&&
744 dma_fence_add_callback(fence
,
745 &entries
[i
].fence_cb
,
746 syncobj_wait_fence_func
))) {
747 /* The fence has been signaled */
748 if (flags
& DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
) {
758 if (signaled_count
== count
)
762 /* If we are doing a 0 timeout wait and we got
763 * here, then we just timed out.
769 ret
= schedule_timeout(ret
);
771 if (ret
> 0 && signal_pending(current
))
776 __set_current_state(TASK_RUNNING
);
779 for (i
= 0; i
< count
; ++i
) {
780 if (entries
[i
].syncobj_cb
.func
)
781 drm_syncobj_remove_callback(syncobjs
[i
],
782 &entries
[i
].syncobj_cb
);
783 if (entries
[i
].fence_cb
.func
)
784 dma_fence_remove_callback(entries
[i
].fence
,
785 &entries
[i
].fence_cb
);
786 dma_fence_put(entries
[i
].fence
);
794 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
796 * @timeout_nsec: timeout nsec component in ns, 0 for poll
798 * Calculate the timeout in jiffies from an absolute time in sec/nsec.
800 static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec
)
802 ktime_t abs_timeout
, now
;
803 u64 timeout_ns
, timeout_jiffies64
;
805 /* make 0 timeout means poll - absolute 0 doesn't seem valid */
806 if (timeout_nsec
== 0)
809 abs_timeout
= ns_to_ktime(timeout_nsec
);
812 if (!ktime_after(abs_timeout
, now
))
815 timeout_ns
= ktime_to_ns(ktime_sub(abs_timeout
, now
));
817 timeout_jiffies64
= nsecs_to_jiffies64(timeout_ns
);
818 /* clamp timeout to avoid infinite timeout */
819 if (timeout_jiffies64
>= MAX_SCHEDULE_TIMEOUT
- 1)
820 return MAX_SCHEDULE_TIMEOUT
- 1;
822 return timeout_jiffies64
+ 1;
825 static int drm_syncobj_array_wait(struct drm_device
*dev
,
826 struct drm_file
*file_private
,
827 struct drm_syncobj_wait
*wait
,
828 struct drm_syncobj
**syncobjs
)
830 signed long timeout
= drm_timeout_abs_to_jiffies(wait
->timeout_nsec
);
834 ret
= drm_syncobj_array_wait_timeout(syncobjs
,
841 wait
->first_signaled
= first
;
847 static int drm_syncobj_array_find(struct drm_file
*file_private
,
848 void *user_handles
, uint32_t count_handles
,
849 struct drm_syncobj
***syncobjs_out
)
851 uint32_t i
, *handles
;
852 struct drm_syncobj
**syncobjs
;
855 handles
= kmalloc_array(count_handles
, sizeof(*handles
), GFP_KERNEL
);
859 if (copy_from_user(handles
, user_handles
,
860 sizeof(uint32_t) * count_handles
)) {
862 goto err_free_handles
;
865 syncobjs
= kmalloc_array(count_handles
, sizeof(*syncobjs
), GFP_KERNEL
);
866 if (syncobjs
== NULL
) {
868 goto err_free_handles
;
871 for (i
= 0; i
< count_handles
; i
++) {
872 syncobjs
[i
] = drm_syncobj_find(file_private
, handles
[i
]);
875 goto err_put_syncobjs
;
880 *syncobjs_out
= syncobjs
;
885 drm_syncobj_put(syncobjs
[i
]);
893 static void drm_syncobj_array_free(struct drm_syncobj
**syncobjs
,
897 for (i
= 0; i
< count
; i
++)
898 drm_syncobj_put(syncobjs
[i
]);
903 drm_syncobj_wait_ioctl(struct drm_device
*dev
, void *data
,
904 struct drm_file
*file_private
)
906 struct drm_syncobj_wait
*args
= data
;
907 struct drm_syncobj
**syncobjs
;
910 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
913 if (args
->flags
& ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
|
914 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT
))
917 if (args
->count_handles
== 0)
920 ret
= drm_syncobj_array_find(file_private
,
921 u64_to_user_ptr(args
->handles
),
927 ret
= drm_syncobj_array_wait(dev
, file_private
,
930 drm_syncobj_array_free(syncobjs
, args
->count_handles
);
936 drm_syncobj_reset_ioctl(struct drm_device
*dev
, void *data
,
937 struct drm_file
*file_private
)
939 struct drm_syncobj_array
*args
= data
;
940 struct drm_syncobj
**syncobjs
;
944 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
950 if (args
->count_handles
== 0)
953 ret
= drm_syncobj_array_find(file_private
,
954 u64_to_user_ptr(args
->handles
),
960 for (i
= 0; i
< args
->count_handles
; i
++)
961 drm_syncobj_replace_fence(syncobjs
[i
], NULL
);
963 drm_syncobj_array_free(syncobjs
, args
->count_handles
);
969 drm_syncobj_signal_ioctl(struct drm_device
*dev
, void *data
,
970 struct drm_file
*file_private
)
972 struct drm_syncobj_array
*args
= data
;
973 struct drm_syncobj
**syncobjs
;
977 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
983 if (args
->count_handles
== 0)
986 ret
= drm_syncobj_array_find(file_private
,
987 u64_to_user_ptr(args
->handles
),
993 for (i
= 0; i
< args
->count_handles
; i
++) {
994 ret
= drm_syncobj_assign_null_handle(syncobjs
[i
]);
999 drm_syncobj_array_free(syncobjs
, args
->count_handles
);