2 * Copyright 2017 Red Hat
3 * Parts ported from amdgpu (fence wait code).
4 * Copyright 2016 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are
33 * persistent objects that contain an optional fence. The fence can be updated
34 * with a new fence, or be NULL.
36 * syncobj's can be waited upon, where it will wait for the underlying
39 * syncobj's can be export to fd's and back, these fd's are opaque and
40 * have no other use case, except passing the syncobj between processes.
42 * Their primary use-case is to implement Vulkan fences and semaphores.
44 * syncobj have a kref reference count, but also have an optional file.
45 * The file is only created once the syncobj is exported.
46 * The file takes a reference on the kref.
49 #include <linux/anon_inodes.h>
50 #include <linux/file.h>
52 #include <linux/sched/signal.h>
53 #include <linux/sync_file.h>
54 #include <linux/uaccess.h>
56 #include <drm/drm_drv.h>
57 #include <drm/drm_file.h>
58 #include <drm/drm_gem.h>
59 #include <drm/drm_print.h>
60 #include <drm/drm_syncobj.h>
62 #include "drm_internal.h"
64 struct syncobj_wait_entry {
65 struct list_head node;
66 struct task_struct *task;
67 struct dma_fence *fence;
68 struct dma_fence_cb fence_cb;
72 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
73 struct syncobj_wait_entry *wait);
76 * drm_syncobj_find - lookup and reference a sync object.
77 * @file_private: drm file private pointer
78 * @handle: sync object handle to lookup.
80 * Returns a reference to the syncobj pointed to by handle or NULL. The
81 * reference must be released by calling drm_syncobj_put().
83 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
86 struct drm_syncobj *syncobj;
88 spin_lock(&file_private->syncobj_table_lock);
90 /* Check if we currently have a reference on the object */
91 syncobj = idr_find(&file_private->syncobj_idr, handle);
93 drm_syncobj_get(syncobj);
95 spin_unlock(&file_private->syncobj_table_lock);
99 EXPORT_SYMBOL(drm_syncobj_find);
101 static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
102 struct syncobj_wait_entry *wait)
104 struct dma_fence *fence;
109 spin_lock(&syncobj->lock);
110 /* We've already tried once to get a fence and failed. Now that we
111 * have the lock, try one more time just to be sure we don't add a
112 * callback when a fence has already been set.
114 fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
115 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
116 dma_fence_put(fence);
117 list_add_tail(&wait->node, &syncobj->cb_list);
119 wait->fence = dma_fence_get_stub();
123 spin_unlock(&syncobj->lock);
126 static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
127 struct syncobj_wait_entry *wait)
129 if (!wait->node.next)
132 spin_lock(&syncobj->lock);
133 list_del_init(&wait->node);
134 spin_unlock(&syncobj->lock);
138 * drm_syncobj_add_point - add new timeline point to the syncobj
139 * @syncobj: sync object to add timeline point do
140 * @chain: chain node to use to add the point
141 * @fence: fence to encapsulate in the chain node
142 * @point: sequence number to use for the point
144 * Add the chain node as new timeline point to the syncobj.
146 void drm_syncobj_add_point(struct drm_syncobj *syncobj,
147 struct dma_fence_chain *chain,
148 struct dma_fence *fence,
151 struct syncobj_wait_entry *cur, *tmp;
152 struct dma_fence *prev;
154 dma_fence_get(fence);
156 spin_lock(&syncobj->lock);
158 prev = drm_syncobj_fence_get(syncobj);
159 /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
160 if (prev && prev->seqno >= point)
161 DRM_ERROR("You are adding an unorder point to timeline!\n");
162 dma_fence_chain_init(chain, prev, fence, point);
163 rcu_assign_pointer(syncobj->fence, &chain->base);
165 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
166 syncobj_wait_syncobj_func(syncobj, cur);
167 spin_unlock(&syncobj->lock);
169 /* Walk the chain once to trigger garbage collection */
170 dma_fence_chain_for_each(fence, prev);
173 EXPORT_SYMBOL(drm_syncobj_add_point);
176 * drm_syncobj_replace_fence - replace fence in a sync object.
177 * @syncobj: Sync object to replace fence in
178 * @fence: fence to install in sync file.
180 * This replaces the fence on a sync object.
182 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
183 struct dma_fence *fence)
185 struct dma_fence *old_fence;
186 struct syncobj_wait_entry *cur, *tmp;
189 dma_fence_get(fence);
191 spin_lock(&syncobj->lock);
193 old_fence = rcu_dereference_protected(syncobj->fence,
194 lockdep_is_held(&syncobj->lock));
195 rcu_assign_pointer(syncobj->fence, fence);
197 if (fence != old_fence) {
198 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
199 syncobj_wait_syncobj_func(syncobj, cur);
202 spin_unlock(&syncobj->lock);
204 dma_fence_put(old_fence);
206 EXPORT_SYMBOL(drm_syncobj_replace_fence);
209 * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
210 * @syncobj: sync object to assign the fence on
212 * Assign a already signaled stub fence to the sync object.
214 static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
216 struct dma_fence *fence = dma_fence_get_stub();
218 drm_syncobj_replace_fence(syncobj, fence);
219 dma_fence_put(fence);
222 /* 5s default for wait submission */
223 #define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
225 * drm_syncobj_find_fence - lookup and reference the fence in a sync object
226 * @file_private: drm file private pointer
227 * @handle: sync object handle to lookup.
228 * @point: timeline point
229 * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
230 * @fence: out parameter for the fence
232 * This is just a convenience function that combines drm_syncobj_find() and
233 * drm_syncobj_fence_get().
235 * Returns 0 on success or a negative error value on failure. On success @fence
236 * contains a reference to the fence, which must be released by calling
239 int drm_syncobj_find_fence(struct drm_file *file_private,
240 u32 handle, u64 point, u64 flags,
241 struct dma_fence **fence)
243 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
244 struct syncobj_wait_entry wait;
245 u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
251 *fence = drm_syncobj_fence_get(syncobj);
252 drm_syncobj_put(syncobj);
255 ret = dma_fence_chain_find_seqno(fence, point);
258 dma_fence_put(*fence);
263 if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
266 memset(&wait, 0, sizeof(wait));
269 drm_syncobj_fence_add_wait(syncobj, &wait);
272 set_current_state(TASK_INTERRUPTIBLE);
282 if (signal_pending(current)) {
287 timeout = schedule_timeout(timeout);
290 __set_current_state(TASK_RUNNING);
294 drm_syncobj_remove_wait(syncobj, &wait);
298 EXPORT_SYMBOL(drm_syncobj_find_fence);
301 * drm_syncobj_free - free a sync object.
302 * @kref: kref to free.
304 * Only to be called from kref_put in drm_syncobj_put.
306 void drm_syncobj_free(struct kref *kref)
308 struct drm_syncobj *syncobj = container_of(kref,
311 drm_syncobj_replace_fence(syncobj, NULL);
314 EXPORT_SYMBOL(drm_syncobj_free);
317 * drm_syncobj_create - create a new syncobj
318 * @out_syncobj: returned syncobj
319 * @flags: DRM_SYNCOBJ_* flags
320 * @fence: if non-NULL, the syncobj will represent this fence
322 * This is the first function to create a sync object. After creating, drivers
323 * probably want to make it available to userspace, either through
324 * drm_syncobj_get_handle() or drm_syncobj_get_fd().
326 * Returns 0 on success or a negative error value on failure.
328 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
329 struct dma_fence *fence)
331 struct drm_syncobj *syncobj;
333 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
337 kref_init(&syncobj->refcount);
338 INIT_LIST_HEAD(&syncobj->cb_list);
339 spin_lock_init(&syncobj->lock);
341 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
342 drm_syncobj_assign_null_handle(syncobj);
345 drm_syncobj_replace_fence(syncobj, fence);
347 *out_syncobj = syncobj;
350 EXPORT_SYMBOL(drm_syncobj_create);
353 * drm_syncobj_get_handle - get a handle from a syncobj
354 * @file_private: drm file private pointer
355 * @syncobj: Sync object to export
356 * @handle: out parameter with the new handle
358 * Exports a sync object created with drm_syncobj_create() as a handle on
359 * @file_private to userspace.
361 * Returns 0 on success or a negative error value on failure.
363 int drm_syncobj_get_handle(struct drm_file *file_private,
364 struct drm_syncobj *syncobj, u32 *handle)
368 /* take a reference to put in the idr */
369 drm_syncobj_get(syncobj);
371 idr_preload(GFP_KERNEL);
372 spin_lock(&file_private->syncobj_table_lock);
373 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
374 spin_unlock(&file_private->syncobj_table_lock);
379 drm_syncobj_put(syncobj);
386 EXPORT_SYMBOL(drm_syncobj_get_handle);
388 static int drm_syncobj_create_as_handle(struct drm_file *file_private,
389 u32 *handle, uint32_t flags)
392 struct drm_syncobj *syncobj;
394 ret = drm_syncobj_create(&syncobj, flags, NULL);
398 ret = drm_syncobj_get_handle(file_private, syncobj, handle);
399 drm_syncobj_put(syncobj);
403 static int drm_syncobj_destroy(struct drm_file *file_private,
406 struct drm_syncobj *syncobj;
408 spin_lock(&file_private->syncobj_table_lock);
409 syncobj = idr_remove(&file_private->syncobj_idr, handle);
410 spin_unlock(&file_private->syncobj_table_lock);
415 drm_syncobj_put(syncobj);
419 static int drm_syncobj_file_release(struct inode *inode, struct file *file)
421 struct drm_syncobj *syncobj = file->private_data;
423 drm_syncobj_put(syncobj);
427 static const struct file_operations drm_syncobj_file_fops = {
428 .release = drm_syncobj_file_release,
432 * drm_syncobj_get_fd - get a file descriptor from a syncobj
433 * @syncobj: Sync object to export
434 * @p_fd: out parameter with the new file descriptor
436 * Exports a sync object created with drm_syncobj_create() as a file descriptor.
438 * Returns 0 on success or a negative error value on failure.
440 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
445 fd = get_unused_fd_flags(O_CLOEXEC);
449 file = anon_inode_getfile("syncobj_file",
450 &drm_syncobj_file_fops,
454 return PTR_ERR(file);
457 drm_syncobj_get(syncobj);
458 fd_install(fd, file);
463 EXPORT_SYMBOL(drm_syncobj_get_fd);
465 static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
466 u32 handle, int *p_fd)
468 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
474 ret = drm_syncobj_get_fd(syncobj, p_fd);
475 drm_syncobj_put(syncobj);
479 static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
482 struct drm_syncobj *syncobj;
483 struct fd f = fdget(fd);
489 if (f.file->f_op != &drm_syncobj_file_fops) {
494 /* take a reference to put in the idr */
495 syncobj = f.file->private_data;
496 drm_syncobj_get(syncobj);
498 idr_preload(GFP_KERNEL);
499 spin_lock(&file_private->syncobj_table_lock);
500 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
501 spin_unlock(&file_private->syncobj_table_lock);
508 drm_syncobj_put(syncobj);
514 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
517 struct dma_fence *fence = sync_file_get_fence(fd);
518 struct drm_syncobj *syncobj;
523 syncobj = drm_syncobj_find(file_private, handle);
525 dma_fence_put(fence);
529 drm_syncobj_replace_fence(syncobj, fence);
530 dma_fence_put(fence);
531 drm_syncobj_put(syncobj);
535 static int drm_syncobj_export_sync_file(struct drm_file *file_private,
536 int handle, int *p_fd)
539 struct dma_fence *fence;
540 struct sync_file *sync_file;
541 int fd = get_unused_fd_flags(O_CLOEXEC);
546 ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
550 sync_file = sync_file_create(fence);
552 dma_fence_put(fence);
559 fd_install(fd, sync_file->file);
568 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
569 * @file_private: drm file-private structure to set up
571 * Called at device open time, sets up the structure for handling refcounting
575 drm_syncobj_open(struct drm_file *file_private)
577 idr_init_base(&file_private->syncobj_idr, 1);
578 spin_lock_init(&file_private->syncobj_table_lock);
582 drm_syncobj_release_handle(int id, void *ptr, void *data)
584 struct drm_syncobj *syncobj = ptr;
586 drm_syncobj_put(syncobj);
591 * drm_syncobj_release - release file-private sync object resources
592 * @file_private: drm file-private structure to clean up
594 * Called at close time when the filp is going away.
596 * Releases any remaining references on objects by this filp.
599 drm_syncobj_release(struct drm_file *file_private)
601 idr_for_each(&file_private->syncobj_idr,
602 &drm_syncobj_release_handle, file_private);
603 idr_destroy(&file_private->syncobj_idr);
607 drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
608 struct drm_file *file_private)
610 struct drm_syncobj_create *args = data;
612 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
615 /* no valid flags yet */
616 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
619 return drm_syncobj_create_as_handle(file_private,
620 &args->handle, args->flags);
624 drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
625 struct drm_file *file_private)
627 struct drm_syncobj_destroy *args = data;
629 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
632 /* make sure padding is empty */
635 return drm_syncobj_destroy(file_private, args->handle);
639 drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
640 struct drm_file *file_private)
642 struct drm_syncobj_handle *args = data;
644 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
650 if (args->flags != 0 &&
651 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
654 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
655 return drm_syncobj_export_sync_file(file_private, args->handle,
658 return drm_syncobj_handle_to_fd(file_private, args->handle,
663 drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
664 struct drm_file *file_private)
666 struct drm_syncobj_handle *args = data;
668 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
674 if (args->flags != 0 &&
675 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
678 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
679 return drm_syncobj_import_sync_file_fence(file_private,
683 return drm_syncobj_fd_to_handle(file_private, args->fd,
687 static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
688 struct drm_syncobj_transfer *args)
690 struct drm_syncobj *timeline_syncobj = NULL;
691 struct dma_fence *fence;
692 struct dma_fence_chain *chain;
695 timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
696 if (!timeline_syncobj) {
699 ret = drm_syncobj_find_fence(file_private, args->src_handle,
700 args->src_point, args->flags,
704 chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
709 drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
711 dma_fence_put(fence);
713 drm_syncobj_put(timeline_syncobj);
719 drm_syncobj_transfer_to_binary(struct drm_file *file_private,
720 struct drm_syncobj_transfer *args)
722 struct drm_syncobj *binary_syncobj = NULL;
723 struct dma_fence *fence;
726 binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
729 ret = drm_syncobj_find_fence(file_private, args->src_handle,
730 args->src_point, args->flags, &fence);
733 drm_syncobj_replace_fence(binary_syncobj, fence);
734 dma_fence_put(fence);
736 drm_syncobj_put(binary_syncobj);
741 drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
742 struct drm_file *file_private)
744 struct drm_syncobj_transfer *args = data;
747 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
754 ret = drm_syncobj_transfer_to_timeline(file_private, args);
756 ret = drm_syncobj_transfer_to_binary(file_private, args);
761 static void syncobj_wait_fence_func(struct dma_fence *fence,
762 struct dma_fence_cb *cb)
764 struct syncobj_wait_entry *wait =
765 container_of(cb, struct syncobj_wait_entry, fence_cb);
767 wake_up_process(wait->task);
770 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
771 struct syncobj_wait_entry *wait)
773 struct dma_fence *fence;
775 /* This happens inside the syncobj lock */
776 fence = rcu_dereference_protected(syncobj->fence,
777 lockdep_is_held(&syncobj->lock));
778 dma_fence_get(fence);
779 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
780 dma_fence_put(fence);
783 wait->fence = dma_fence_get_stub();
788 wake_up_process(wait->task);
789 list_del_init(&wait->node);
792 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
793 void __user *user_points,
799 struct syncobj_wait_entry *entries;
800 struct dma_fence *fence;
802 uint32_t signaled_count, i;
804 points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
809 memset(points, 0, count * sizeof(uint64_t));
811 } else if (copy_from_user(points, user_points,
812 sizeof(uint64_t) * count)) {
814 goto err_free_points;
817 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
820 goto err_free_points;
822 /* Walk the list of sync objects and initialize entries. We do
823 * this up-front so that we can properly return -EINVAL if there is
824 * a syncobj with a missing fence and then never have the chance of
825 * returning -EINVAL again.
828 for (i = 0; i < count; ++i) {
829 struct dma_fence *fence;
831 entries[i].task = current;
832 entries[i].point = points[i];
833 fence = drm_syncobj_fence_get(syncobjs[i]);
834 if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
835 dma_fence_put(fence);
836 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
840 goto cleanup_entries;
845 entries[i].fence = fence;
847 entries[i].fence = dma_fence_get_stub();
849 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
850 dma_fence_is_signaled(entries[i].fence)) {
851 if (signaled_count == 0 && idx)
857 if (signaled_count == count ||
858 (signaled_count > 0 &&
859 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
860 goto cleanup_entries;
862 /* There's a very annoying laxness in the dma_fence API here, in
863 * that backends are not required to automatically report when a
864 * fence is signaled prior to fence->ops->enable_signaling() being
865 * called. So here if we fail to match signaled_count, we need to
866 * fallthough and try a 0 timeout wait!
869 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
870 for (i = 0; i < count; ++i)
871 drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
875 set_current_state(TASK_INTERRUPTIBLE);
878 for (i = 0; i < count; ++i) {
879 fence = entries[i].fence;
883 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
884 dma_fence_is_signaled(fence) ||
885 (!entries[i].fence_cb.func &&
886 dma_fence_add_callback(fence,
887 &entries[i].fence_cb,
888 syncobj_wait_fence_func))) {
889 /* The fence has been signaled */
890 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
900 if (signaled_count == count)
908 if (signal_pending(current)) {
909 timeout = -ERESTARTSYS;
913 timeout = schedule_timeout(timeout);
917 __set_current_state(TASK_RUNNING);
920 for (i = 0; i < count; ++i) {
921 drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
922 if (entries[i].fence_cb.func)
923 dma_fence_remove_callback(entries[i].fence,
924 &entries[i].fence_cb);
925 dma_fence_put(entries[i].fence);
936 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
938 * @timeout_nsec: timeout nsec component in ns, 0 for poll
940 * Calculate the timeout in jiffies from an absolute time in sec/nsec.
942 signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
944 ktime_t abs_timeout, now;
945 u64 timeout_ns, timeout_jiffies64;
947 /* make 0 timeout means poll - absolute 0 doesn't seem valid */
948 if (timeout_nsec == 0)
951 abs_timeout = ns_to_ktime(timeout_nsec);
954 if (!ktime_after(abs_timeout, now))
957 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
959 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
960 /* clamp timeout to avoid infinite timeout */
961 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
962 return MAX_SCHEDULE_TIMEOUT - 1;
964 return timeout_jiffies64 + 1;
966 EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
968 static int drm_syncobj_array_wait(struct drm_device *dev,
969 struct drm_file *file_private,
970 struct drm_syncobj_wait *wait,
971 struct drm_syncobj_timeline_wait *timeline_wait,
972 struct drm_syncobj **syncobjs, bool timeline)
974 signed long timeout = 0;
978 timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
979 timeout = drm_syncobj_array_wait_timeout(syncobjs,
986 wait->first_signaled = first;
988 timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
989 timeout = drm_syncobj_array_wait_timeout(syncobjs,
990 u64_to_user_ptr(timeline_wait->points),
991 timeline_wait->count_handles,
992 timeline_wait->flags,
996 timeline_wait->first_signaled = first;
1001 static int drm_syncobj_array_find(struct drm_file *file_private,
1002 void __user *user_handles,
1003 uint32_t count_handles,
1004 struct drm_syncobj ***syncobjs_out)
1006 uint32_t i, *handles;
1007 struct drm_syncobj **syncobjs;
1010 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
1011 if (handles == NULL)
1014 if (copy_from_user(handles, user_handles,
1015 sizeof(uint32_t) * count_handles)) {
1017 goto err_free_handles;
1020 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
1021 if (syncobjs == NULL) {
1023 goto err_free_handles;
1026 for (i = 0; i < count_handles; i++) {
1027 syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
1030 goto err_put_syncobjs;
1035 *syncobjs_out = syncobjs;
1040 drm_syncobj_put(syncobjs[i]);
1048 static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
1052 for (i = 0; i < count; i++)
1053 drm_syncobj_put(syncobjs[i]);
1058 drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
1059 struct drm_file *file_private)
1061 struct drm_syncobj_wait *args = data;
1062 struct drm_syncobj **syncobjs;
1065 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1068 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1069 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
1072 if (args->count_handles == 0)
1075 ret = drm_syncobj_array_find(file_private,
1076 u64_to_user_ptr(args->handles),
1077 args->count_handles,
1082 ret = drm_syncobj_array_wait(dev, file_private,
1083 args, NULL, syncobjs, false);
1085 drm_syncobj_array_free(syncobjs, args->count_handles);
1091 drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
1092 struct drm_file *file_private)
1094 struct drm_syncobj_timeline_wait *args = data;
1095 struct drm_syncobj **syncobjs;
1098 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1101 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1102 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1103 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
1106 if (args->count_handles == 0)
1109 ret = drm_syncobj_array_find(file_private,
1110 u64_to_user_ptr(args->handles),
1111 args->count_handles,
1116 ret = drm_syncobj_array_wait(dev, file_private,
1117 NULL, args, syncobjs, true);
1119 drm_syncobj_array_free(syncobjs, args->count_handles);
1126 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
1127 struct drm_file *file_private)
1129 struct drm_syncobj_array *args = data;
1130 struct drm_syncobj **syncobjs;
1134 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1140 if (args->count_handles == 0)
1143 ret = drm_syncobj_array_find(file_private,
1144 u64_to_user_ptr(args->handles),
1145 args->count_handles,
1150 for (i = 0; i < args->count_handles; i++)
1151 drm_syncobj_replace_fence(syncobjs[i], NULL);
1153 drm_syncobj_array_free(syncobjs, args->count_handles);
1159 drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
1160 struct drm_file *file_private)
1162 struct drm_syncobj_array *args = data;
1163 struct drm_syncobj **syncobjs;
1167 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1173 if (args->count_handles == 0)
1176 ret = drm_syncobj_array_find(file_private,
1177 u64_to_user_ptr(args->handles),
1178 args->count_handles,
1183 for (i = 0; i < args->count_handles; i++)
1184 drm_syncobj_assign_null_handle(syncobjs[i]);
1186 drm_syncobj_array_free(syncobjs, args->count_handles);
1192 drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
1193 struct drm_file *file_private)
1195 struct drm_syncobj_timeline_array *args = data;
1196 struct drm_syncobj **syncobjs;
1197 struct dma_fence_chain **chains;
1202 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1208 if (args->count_handles == 0)
1211 ret = drm_syncobj_array_find(file_private,
1212 u64_to_user_ptr(args->handles),
1213 args->count_handles,
1218 points = kmalloc_array(args->count_handles, sizeof(*points),
1224 if (!u64_to_user_ptr(args->points)) {
1225 memset(points, 0, args->count_handles * sizeof(uint64_t));
1226 } else if (copy_from_user(points, u64_to_user_ptr(args->points),
1227 sizeof(uint64_t) * args->count_handles)) {
1232 chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
1237 for (i = 0; i < args->count_handles; i++) {
1238 chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
1240 for (j = 0; j < i; j++)
1247 for (i = 0; i < args->count_handles; i++) {
1248 struct dma_fence *fence = dma_fence_get_stub();
1250 drm_syncobj_add_point(syncobjs[i], chains[i],
1252 dma_fence_put(fence);
1259 drm_syncobj_array_free(syncobjs, args->count_handles);
1264 int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
1265 struct drm_file *file_private)
1267 struct drm_syncobj_timeline_array *args = data;
1268 struct drm_syncobj **syncobjs;
1269 uint64_t __user *points = u64_to_user_ptr(args->points);
1273 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1279 if (args->count_handles == 0)
1282 ret = drm_syncobj_array_find(file_private,
1283 u64_to_user_ptr(args->handles),
1284 args->count_handles,
1289 for (i = 0; i < args->count_handles; i++) {
1290 struct dma_fence_chain *chain;
1291 struct dma_fence *fence;
1294 fence = drm_syncobj_fence_get(syncobjs[i]);
1295 chain = to_dma_fence_chain(fence);
1297 struct dma_fence *iter, *last_signaled = NULL;
1299 dma_fence_chain_for_each(iter, fence) {
1302 dma_fence_put(last_signaled);
1303 last_signaled = dma_fence_get(iter);
1304 if (!to_dma_fence_chain(last_signaled)->prev_seqno)
1305 /* It is most likely that timeline has
1306 * unorder points. */
1309 point = dma_fence_is_signaled(last_signaled) ?
1310 last_signaled->seqno :
1311 to_dma_fence_chain(last_signaled)->prev_seqno;
1312 dma_fence_put(last_signaled);
1316 ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
1317 ret = ret ? -EFAULT : 0;
1321 drm_syncobj_array_free(syncobjs, args->count_handles);