2 * Copyright 2017 Red Hat
3 * Parts ported from amdgpu (fence wait code).
4 * Copyright 2016 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * DRM synchronisation objects (syncobj) are a persistent objects,
33 * that contain an optional fence. The fence can be updated with a new
36 * syncobj's can be waited upon, where it will wait for the underlying
39 * syncobj's can be export to fd's and back, these fd's are opaque and
40 * have no other use case, except passing the syncobj between processes.
42 * Their primary use-case is to implement Vulkan fences and semaphores.
44 * syncobj have a kref reference count, but also have an optional file.
45 * The file is only created once the syncobj is exported.
46 * The file takes a reference on the kref.
50 #include <linux/file.h>
52 #include <linux/anon_inodes.h>
53 #include <linux/sync_file.h>
54 #include <linux/sched/signal.h>
56 #include "drm_internal.h"
57 #include <drm/drm_syncobj.h>
60 * drm_syncobj_find - lookup and reference a sync object.
61 * @file_private: drm file private pointer
62 * @handle: sync object handle to lookup.
64 * Returns a reference to the syncobj pointed to by handle or NULL.
66 struct drm_syncobj
*drm_syncobj_find(struct drm_file
*file_private
,
69 struct drm_syncobj
*syncobj
;
71 spin_lock(&file_private
->syncobj_table_lock
);
73 /* Check if we currently have a reference on the object */
74 syncobj
= idr_find(&file_private
->syncobj_idr
, handle
);
76 drm_syncobj_get(syncobj
);
78 spin_unlock(&file_private
->syncobj_table_lock
);
82 EXPORT_SYMBOL(drm_syncobj_find
);
84 static void drm_syncobj_add_callback_locked(struct drm_syncobj
*syncobj
,
85 struct drm_syncobj_cb
*cb
,
86 drm_syncobj_func_t func
)
89 list_add_tail(&cb
->node
, &syncobj
->cb_list
);
92 static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj
*syncobj
,
93 struct dma_fence
**fence
,
94 struct drm_syncobj_cb
*cb
,
95 drm_syncobj_func_t func
)
99 *fence
= drm_syncobj_fence_get(syncobj
);
103 spin_lock(&syncobj
->lock
);
104 /* We've already tried once to get a fence and failed. Now that we
105 * have the lock, try one more time just to be sure we don't add a
106 * callback when a fence has already been set.
108 if (syncobj
->fence
) {
109 *fence
= dma_fence_get(syncobj
->fence
);
113 drm_syncobj_add_callback_locked(syncobj
, cb
, func
);
116 spin_unlock(&syncobj
->lock
);
122 * drm_syncobj_add_callback - adds a callback to syncobj::cb_list
123 * @syncobj: Sync object to which to add the callback
124 * @cb: Callback to add
125 * @func: Func to use when initializing the drm_syncobj_cb struct
127 * This adds a callback to be called next time the fence is replaced
129 void drm_syncobj_add_callback(struct drm_syncobj
*syncobj
,
130 struct drm_syncobj_cb
*cb
,
131 drm_syncobj_func_t func
)
133 spin_lock(&syncobj
->lock
);
134 drm_syncobj_add_callback_locked(syncobj
, cb
, func
);
135 spin_unlock(&syncobj
->lock
);
137 EXPORT_SYMBOL(drm_syncobj_add_callback
);
140 * drm_syncobj_add_callback - removes a callback to syncobj::cb_list
141 * @syncobj: Sync object from which to remove the callback
142 * @cb: Callback to remove
144 void drm_syncobj_remove_callback(struct drm_syncobj
*syncobj
,
145 struct drm_syncobj_cb
*cb
)
147 spin_lock(&syncobj
->lock
);
148 list_del_init(&cb
->node
);
149 spin_unlock(&syncobj
->lock
);
151 EXPORT_SYMBOL(drm_syncobj_remove_callback
);
154 * drm_syncobj_replace_fence - replace fence in a sync object.
155 * @syncobj: Sync object to replace fence in
156 * @fence: fence to install in sync file.
158 * This replaces the fence on a sync object.
160 void drm_syncobj_replace_fence(struct drm_syncobj
*syncobj
,
161 struct dma_fence
*fence
)
163 struct dma_fence
*old_fence
;
164 struct drm_syncobj_cb
*cur
, *tmp
;
167 dma_fence_get(fence
);
169 spin_lock(&syncobj
->lock
);
171 old_fence
= syncobj
->fence
;
172 syncobj
->fence
= fence
;
174 if (fence
!= old_fence
) {
175 list_for_each_entry_safe(cur
, tmp
, &syncobj
->cb_list
, node
) {
176 list_del_init(&cur
->node
);
177 cur
->func(syncobj
, cur
);
181 spin_unlock(&syncobj
->lock
);
183 dma_fence_put(old_fence
);
185 EXPORT_SYMBOL(drm_syncobj_replace_fence
);
187 struct drm_syncobj_null_fence
{
188 struct dma_fence base
;
192 static const char *drm_syncobj_null_fence_get_name(struct dma_fence
*fence
)
194 return "syncobjnull";
197 static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence
*fence
)
199 dma_fence_enable_sw_signaling(fence
);
200 return !dma_fence_is_signaled(fence
);
203 static const struct dma_fence_ops drm_syncobj_null_fence_ops
= {
204 .get_driver_name
= drm_syncobj_null_fence_get_name
,
205 .get_timeline_name
= drm_syncobj_null_fence_get_name
,
206 .enable_signaling
= drm_syncobj_null_fence_enable_signaling
,
207 .wait
= dma_fence_default_wait
,
211 static int drm_syncobj_assign_null_handle(struct drm_syncobj
*syncobj
)
213 struct drm_syncobj_null_fence
*fence
;
214 fence
= kzalloc(sizeof(*fence
), GFP_KERNEL
);
218 spin_lock_init(&fence
->lock
);
219 dma_fence_init(&fence
->base
, &drm_syncobj_null_fence_ops
,
221 dma_fence_signal(&fence
->base
);
223 drm_syncobj_replace_fence(syncobj
, &fence
->base
);
225 dma_fence_put(&fence
->base
);
230 int drm_syncobj_find_fence(struct drm_file
*file_private
,
232 struct dma_fence
**fence
)
234 struct drm_syncobj
*syncobj
= drm_syncobj_find(file_private
, handle
);
240 *fence
= drm_syncobj_fence_get(syncobj
);
244 drm_syncobj_put(syncobj
);
247 EXPORT_SYMBOL(drm_syncobj_find_fence
);
250 * drm_syncobj_free - free a sync object.
251 * @kref: kref to free.
253 * Only to be called from kref_put in drm_syncobj_put.
255 void drm_syncobj_free(struct kref
*kref
)
257 struct drm_syncobj
*syncobj
= container_of(kref
,
260 drm_syncobj_replace_fence(syncobj
, NULL
);
263 EXPORT_SYMBOL(drm_syncobj_free
);
265 static int drm_syncobj_create(struct drm_file
*file_private
,
266 u32
*handle
, uint32_t flags
)
269 struct drm_syncobj
*syncobj
;
271 syncobj
= kzalloc(sizeof(struct drm_syncobj
), GFP_KERNEL
);
275 kref_init(&syncobj
->refcount
);
276 INIT_LIST_HEAD(&syncobj
->cb_list
);
277 spin_lock_init(&syncobj
->lock
);
279 if (flags
& DRM_SYNCOBJ_CREATE_SIGNALED
) {
280 ret
= drm_syncobj_assign_null_handle(syncobj
);
282 drm_syncobj_put(syncobj
);
287 idr_preload(GFP_KERNEL
);
288 spin_lock(&file_private
->syncobj_table_lock
);
289 ret
= idr_alloc(&file_private
->syncobj_idr
, syncobj
, 1, 0, GFP_NOWAIT
);
290 spin_unlock(&file_private
->syncobj_table_lock
);
295 drm_syncobj_put(syncobj
);
303 static int drm_syncobj_destroy(struct drm_file
*file_private
,
306 struct drm_syncobj
*syncobj
;
308 spin_lock(&file_private
->syncobj_table_lock
);
309 syncobj
= idr_remove(&file_private
->syncobj_idr
, handle
);
310 spin_unlock(&file_private
->syncobj_table_lock
);
315 drm_syncobj_put(syncobj
);
319 static int drm_syncobj_file_release(struct inode
*inode
, struct file
*file
)
321 struct drm_syncobj
*syncobj
= file
->private_data
;
323 drm_syncobj_put(syncobj
);
327 static const struct file_operations drm_syncobj_file_fops
= {
328 .release
= drm_syncobj_file_release
,
331 static int drm_syncobj_alloc_file(struct drm_syncobj
*syncobj
)
333 struct file
*file
= anon_inode_getfile("syncobj_file",
334 &drm_syncobj_file_fops
,
337 return PTR_ERR(file
);
339 drm_syncobj_get(syncobj
);
340 if (cmpxchg(&syncobj
->file
, NULL
, file
)) {
348 static int drm_syncobj_handle_to_fd(struct drm_file
*file_private
,
349 u32 handle
, int *p_fd
)
351 struct drm_syncobj
*syncobj
= drm_syncobj_find(file_private
, handle
);
358 fd
= get_unused_fd_flags(O_CLOEXEC
);
360 drm_syncobj_put(syncobj
);
364 if (!syncobj
->file
) {
365 ret
= drm_syncobj_alloc_file(syncobj
);
369 fd_install(fd
, syncobj
->file
);
370 drm_syncobj_put(syncobj
);
375 drm_syncobj_put(syncobj
);
379 static struct drm_syncobj
*drm_syncobj_fdget(int fd
)
381 struct file
*file
= fget(fd
);
385 if (file
->f_op
!= &drm_syncobj_file_fops
)
388 return file
->private_data
;
394 static int drm_syncobj_fd_to_handle(struct drm_file
*file_private
,
397 struct drm_syncobj
*syncobj
= drm_syncobj_fdget(fd
);
403 /* take a reference to put in the idr */
404 drm_syncobj_get(syncobj
);
406 idr_preload(GFP_KERNEL
);
407 spin_lock(&file_private
->syncobj_table_lock
);
408 ret
= idr_alloc(&file_private
->syncobj_idr
, syncobj
, 1, 0, GFP_NOWAIT
);
409 spin_unlock(&file_private
->syncobj_table_lock
);
420 static int drm_syncobj_import_sync_file_fence(struct drm_file
*file_private
,
423 struct dma_fence
*fence
= sync_file_get_fence(fd
);
424 struct drm_syncobj
*syncobj
;
429 syncobj
= drm_syncobj_find(file_private
, handle
);
431 dma_fence_put(fence
);
435 drm_syncobj_replace_fence(syncobj
, fence
);
436 dma_fence_put(fence
);
437 drm_syncobj_put(syncobj
);
441 static int drm_syncobj_export_sync_file(struct drm_file
*file_private
,
442 int handle
, int *p_fd
)
445 struct dma_fence
*fence
;
446 struct sync_file
*sync_file
;
447 int fd
= get_unused_fd_flags(O_CLOEXEC
);
452 ret
= drm_syncobj_find_fence(file_private
, handle
, &fence
);
456 sync_file
= sync_file_create(fence
);
458 dma_fence_put(fence
);
465 fd_install(fd
, sync_file
->file
);
474 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
475 * @file_private: drm file-private structure to set up
477 * Called at device open time, sets up the structure for handling refcounting
481 drm_syncobj_open(struct drm_file
*file_private
)
483 idr_init(&file_private
->syncobj_idr
);
484 spin_lock_init(&file_private
->syncobj_table_lock
);
488 drm_syncobj_release_handle(int id
, void *ptr
, void *data
)
490 struct drm_syncobj
*syncobj
= ptr
;
492 drm_syncobj_put(syncobj
);
497 * drm_syncobj_release - release file-private sync object resources
498 * @file_private: drm file-private structure to clean up
500 * Called at close time when the filp is going away.
502 * Releases any remaining references on objects by this filp.
505 drm_syncobj_release(struct drm_file
*file_private
)
507 idr_for_each(&file_private
->syncobj_idr
,
508 &drm_syncobj_release_handle
, file_private
);
509 idr_destroy(&file_private
->syncobj_idr
);
513 drm_syncobj_create_ioctl(struct drm_device
*dev
, void *data
,
514 struct drm_file
*file_private
)
516 struct drm_syncobj_create
*args
= data
;
518 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
521 /* no valid flags yet */
522 if (args
->flags
& ~DRM_SYNCOBJ_CREATE_SIGNALED
)
525 return drm_syncobj_create(file_private
,
526 &args
->handle
, args
->flags
);
530 drm_syncobj_destroy_ioctl(struct drm_device
*dev
, void *data
,
531 struct drm_file
*file_private
)
533 struct drm_syncobj_destroy
*args
= data
;
535 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
538 /* make sure padding is empty */
541 return drm_syncobj_destroy(file_private
, args
->handle
);
545 drm_syncobj_handle_to_fd_ioctl(struct drm_device
*dev
, void *data
,
546 struct drm_file
*file_private
)
548 struct drm_syncobj_handle
*args
= data
;
550 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
556 if (args
->flags
!= 0 &&
557 args
->flags
!= DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE
)
560 if (args
->flags
& DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE
)
561 return drm_syncobj_export_sync_file(file_private
, args
->handle
,
564 return drm_syncobj_handle_to_fd(file_private
, args
->handle
,
569 drm_syncobj_fd_to_handle_ioctl(struct drm_device
*dev
, void *data
,
570 struct drm_file
*file_private
)
572 struct drm_syncobj_handle
*args
= data
;
574 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
580 if (args
->flags
!= 0 &&
581 args
->flags
!= DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE
)
584 if (args
->flags
& DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE
)
585 return drm_syncobj_import_sync_file_fence(file_private
,
589 return drm_syncobj_fd_to_handle(file_private
, args
->fd
,
593 struct syncobj_wait_entry
{
594 struct task_struct
*task
;
595 struct dma_fence
*fence
;
596 struct dma_fence_cb fence_cb
;
597 struct drm_syncobj_cb syncobj_cb
;
600 static void syncobj_wait_fence_func(struct dma_fence
*fence
,
601 struct dma_fence_cb
*cb
)
603 struct syncobj_wait_entry
*wait
=
604 container_of(cb
, struct syncobj_wait_entry
, fence_cb
);
606 wake_up_process(wait
->task
);
609 static void syncobj_wait_syncobj_func(struct drm_syncobj
*syncobj
,
610 struct drm_syncobj_cb
*cb
)
612 struct syncobj_wait_entry
*wait
=
613 container_of(cb
, struct syncobj_wait_entry
, syncobj_cb
);
615 /* This happens inside the syncobj lock */
616 wait
->fence
= dma_fence_get(syncobj
->fence
);
617 wake_up_process(wait
->task
);
620 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj
**syncobjs
,
626 struct syncobj_wait_entry
*entries
;
627 struct dma_fence
*fence
;
629 uint32_t signaled_count
, i
;
631 entries
= kcalloc(count
, sizeof(*entries
), GFP_KERNEL
);
635 /* Walk the list of sync objects and initialize entries. We do
636 * this up-front so that we can properly return -EINVAL if there is
637 * a syncobj with a missing fence and then never have the chance of
638 * returning -EINVAL again.
641 for (i
= 0; i
< count
; ++i
) {
642 entries
[i
].task
= current
;
643 entries
[i
].fence
= drm_syncobj_fence_get(syncobjs
[i
]);
644 if (!entries
[i
].fence
) {
645 if (flags
& DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT
) {
649 goto cleanup_entries
;
653 if (dma_fence_is_signaled(entries
[i
].fence
)) {
654 if (signaled_count
== 0 && idx
)
660 /* Initialize ret to the max of timeout and 1. That way, the
661 * default return value indicates a successful wait and not a
664 ret
= max_t(signed long, timeout
, 1);
666 if (signaled_count
== count
||
667 (signaled_count
> 0 &&
668 !(flags
& DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
)))
669 goto cleanup_entries
;
671 /* There's a very annoying laxness in the dma_fence API here, in
672 * that backends are not required to automatically report when a
673 * fence is signaled prior to fence->ops->enable_signaling() being
674 * called. So here if we fail to match signaled_count, we need to
675 * fallthough and try a 0 timeout wait!
678 if (flags
& DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT
) {
679 for (i
= 0; i
< count
; ++i
) {
680 drm_syncobj_fence_get_or_add_callback(syncobjs
[i
],
682 &entries
[i
].syncobj_cb
,
683 syncobj_wait_syncobj_func
);
688 set_current_state(TASK_INTERRUPTIBLE
);
691 for (i
= 0; i
< count
; ++i
) {
692 fence
= entries
[i
].fence
;
696 if (dma_fence_is_signaled(fence
) ||
697 (!entries
[i
].fence_cb
.func
&&
698 dma_fence_add_callback(fence
,
699 &entries
[i
].fence_cb
,
700 syncobj_wait_fence_func
))) {
701 /* The fence has been signaled */
702 if (flags
& DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
) {
712 if (signaled_count
== count
)
716 /* If we are doing a 0 timeout wait and we got
717 * here, then we just timed out.
723 ret
= schedule_timeout(ret
);
725 if (ret
> 0 && signal_pending(current
))
730 __set_current_state(TASK_RUNNING
);
733 for (i
= 0; i
< count
; ++i
) {
734 if (entries
[i
].syncobj_cb
.func
)
735 drm_syncobj_remove_callback(syncobjs
[i
],
736 &entries
[i
].syncobj_cb
);
737 if (entries
[i
].fence_cb
.func
)
738 dma_fence_remove_callback(entries
[i
].fence
,
739 &entries
[i
].fence_cb
);
740 dma_fence_put(entries
[i
].fence
);
748 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
750 * @timeout_nsec: timeout nsec component in ns, 0 for poll
752 * Calculate the timeout in jiffies from an absolute time in sec/nsec.
754 static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec
)
756 ktime_t abs_timeout
, now
;
757 u64 timeout_ns
, timeout_jiffies64
;
759 /* make 0 timeout means poll - absolute 0 doesn't seem valid */
760 if (timeout_nsec
== 0)
763 abs_timeout
= ns_to_ktime(timeout_nsec
);
766 if (!ktime_after(abs_timeout
, now
))
769 timeout_ns
= ktime_to_ns(ktime_sub(abs_timeout
, now
));
771 timeout_jiffies64
= nsecs_to_jiffies64(timeout_ns
);
772 /* clamp timeout to avoid infinite timeout */
773 if (timeout_jiffies64
>= MAX_SCHEDULE_TIMEOUT
- 1)
774 return MAX_SCHEDULE_TIMEOUT
- 1;
776 return timeout_jiffies64
+ 1;
779 static int drm_syncobj_array_wait(struct drm_device
*dev
,
780 struct drm_file
*file_private
,
781 struct drm_syncobj_wait
*wait
,
782 struct drm_syncobj
**syncobjs
)
784 signed long timeout
= drm_timeout_abs_to_jiffies(wait
->timeout_nsec
);
788 ret
= drm_syncobj_array_wait_timeout(syncobjs
,
795 wait
->first_signaled
= first
;
801 static int drm_syncobj_array_find(struct drm_file
*file_private
,
802 void *user_handles
, uint32_t count_handles
,
803 struct drm_syncobj
***syncobjs_out
)
805 uint32_t i
, *handles
;
806 struct drm_syncobj
**syncobjs
;
809 handles
= kmalloc_array(count_handles
, sizeof(*handles
), GFP_KERNEL
);
813 if (copy_from_user(handles
, user_handles
,
814 sizeof(uint32_t) * count_handles
)) {
816 goto err_free_handles
;
819 syncobjs
= kmalloc_array(count_handles
, sizeof(*syncobjs
), GFP_KERNEL
);
820 if (syncobjs
== NULL
) {
822 goto err_free_handles
;
825 for (i
= 0; i
< count_handles
; i
++) {
826 syncobjs
[i
] = drm_syncobj_find(file_private
, handles
[i
]);
829 goto err_put_syncobjs
;
834 *syncobjs_out
= syncobjs
;
839 drm_syncobj_put(syncobjs
[i
]);
847 static void drm_syncobj_array_free(struct drm_syncobj
**syncobjs
,
851 for (i
= 0; i
< count
; i
++)
852 drm_syncobj_put(syncobjs
[i
]);
857 drm_syncobj_wait_ioctl(struct drm_device
*dev
, void *data
,
858 struct drm_file
*file_private
)
860 struct drm_syncobj_wait
*args
= data
;
861 struct drm_syncobj
**syncobjs
;
864 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
867 if (args
->flags
& ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
|
868 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT
))
871 if (args
->count_handles
== 0)
874 ret
= drm_syncobj_array_find(file_private
,
875 u64_to_user_ptr(args
->handles
),
881 ret
= drm_syncobj_array_wait(dev
, file_private
,
884 drm_syncobj_array_free(syncobjs
, args
->count_handles
);
890 drm_syncobj_reset_ioctl(struct drm_device
*dev
, void *data
,
891 struct drm_file
*file_private
)
893 struct drm_syncobj_array
*args
= data
;
894 struct drm_syncobj
**syncobjs
;
898 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
904 if (args
->count_handles
== 0)
907 ret
= drm_syncobj_array_find(file_private
,
908 u64_to_user_ptr(args
->handles
),
914 for (i
= 0; i
< args
->count_handles
; i
++)
915 drm_syncobj_replace_fence(syncobjs
[i
], NULL
);
917 drm_syncobj_array_free(syncobjs
, args
->count_handles
);
923 drm_syncobj_signal_ioctl(struct drm_device
*dev
, void *data
,
924 struct drm_file
*file_private
)
926 struct drm_syncobj_array
*args
= data
;
927 struct drm_syncobj
**syncobjs
;
931 if (!drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
937 if (args
->count_handles
== 0)
940 ret
= drm_syncobj_array_find(file_private
,
941 u64_to_user_ptr(args
->handles
),
947 for (i
= 0; i
< args
->count_handles
; i
++) {
948 ret
= drm_syncobj_assign_null_handle(syncobjs
[i
]);
953 drm_syncobj_array_free(syncobjs
, args
->count_handles
);