]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drm/vmwgfx: use monotonic event timestamps
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fence.c
CommitLineData
ae2a1040
TH
1/**************************************************************************
2 *
54fbde8a 3 * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
ae2a1040
TH
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
760285e7 28#include <drm/drmP.h>
ae2a1040
TH
29#include "vmwgfx_drv.h"
30
31#define VMW_FENCE_WRAP (1 << 31)
32
33struct vmw_fence_manager {
34 int num_fence_objects;
35 struct vmw_private *dev_priv;
36 spinlock_t lock;
ae2a1040 37 struct list_head fence_list;
496eb6fd 38 struct work_struct work;
ae2a1040
TH
39 u32 user_fence_size;
40 u32 fence_size;
57c5ee79 41 u32 event_fence_action_size;
ae2a1040
TH
42 bool fifo_down;
43 struct list_head cleanup_list;
57c5ee79
TH
44 uint32_t pending_actions[VMW_ACTION_MAX];
45 struct mutex goal_irq_mutex;
46 bool goal_irq_on; /* Protected by @goal_irq_mutex */
47 bool seqno_valid; /* Protected by @lock, and may not be set to true
48 without the @goal_irq_mutex held. */
76bf0db5 49 u64 ctx;
ae2a1040
TH
50};
51
52struct vmw_user_fence {
53 struct ttm_base_object base;
54 struct vmw_fence_obj fence;
55};
56
57/**
57c5ee79 58 * struct vmw_event_fence_action - fence action that delivers a drm event.
ae2a1040 59 *
57c5ee79
TH
60 * @e: A struct drm_pending_event that controls the event delivery.
61 * @action: A struct vmw_fence_action to hook up to a fence.
62 * @fence: A referenced pointer to the fence to keep it alive while @action
63 * hangs on it.
64 * @dev: Pointer to a struct drm_device so we can access the event stuff.
65 * @kref: Both @e and @action has destructors, so we need to refcount.
66 * @size: Size accounted for this object.
67 * @tv_sec: If non-null, the variable pointed to will be assigned
68 * current time tv_sec val when the fence signals.
69 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
70 * be assigned the current time tv_usec val when the fence signals.
71 */
72struct vmw_event_fence_action {
57c5ee79 73 struct vmw_fence_action action;
8b7de6aa
JB
74
75 struct drm_pending_event *event;
57c5ee79
TH
76 struct vmw_fence_obj *fence;
77 struct drm_device *dev;
8b7de6aa 78
57c5ee79
TH
79 uint32_t *tv_sec;
80 uint32_t *tv_usec;
81};
82
2298e804
ML
83static struct vmw_fence_manager *
84fman_from_fence(struct vmw_fence_obj *fence)
85{
86 return container_of(fence->base.lock, struct vmw_fence_manager, lock);
87}
88
57c5ee79
TH
89/**
90 * Note on fencing subsystem usage of irqs:
91 * Typically the vmw_fences_update function is called
92 *
93 * a) When a new fence seqno has been submitted by the fifo code.
94 * b) On-demand when we have waiters. Sleeping waiters will switch on the
95 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
96 * irq is received. When the last fence waiter is gone, that IRQ is masked
97 * away.
98 *
99 * In situations where there are no waiters and we don't submit any new fences,
100 * fence objects may not be signaled. This is perfectly OK, since there are
101 * no consumers of the signaled data, but that is NOT ok when there are fence
102 * actions attached to a fence. The fencing subsystem then makes use of the
103 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
104 * which has an action attached, and each time vmw_fences_update is called,
105 * the subsystem makes sure the fence goal seqno is updated.
106 *
107 * The fence goal seqno irq is on as long as there are unsignaled fence
108 * objects with actions attached to them.
ae2a1040
TH
109 */
110
f54d1867 111static void vmw_fence_obj_destroy(struct dma_fence *f)
ae2a1040
TH
112{
113 struct vmw_fence_obj *fence =
2298e804 114 container_of(f, struct vmw_fence_obj, base);
ae2a1040 115
2298e804 116 struct vmw_fence_manager *fman = fman_from_fence(fence);
ae2a1040 117
ef369904 118 spin_lock(&fman->lock);
ae2a1040 119 list_del_init(&fence->head);
2298e804 120 --fman->num_fence_objects;
ef369904 121 spin_unlock(&fman->lock);
2298e804
ML
122 fence->destroy(fence);
123}
ae2a1040 124
f54d1867 125static const char *vmw_fence_get_driver_name(struct dma_fence *f)
2298e804
ML
126{
127 return "vmwgfx";
128}
129
f54d1867 130static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
2298e804
ML
131{
132 return "svga";
ae2a1040
TH
133}
134
f54d1867 135static bool vmw_fence_enable_signaling(struct dma_fence *f)
2298e804
ML
136{
137 struct vmw_fence_obj *fence =
138 container_of(f, struct vmw_fence_obj, base);
139
140 struct vmw_fence_manager *fman = fman_from_fence(fence);
141 struct vmw_private *dev_priv = fman->dev_priv;
142
b76ff5ea
TH
143 u32 *fifo_mem = dev_priv->mmio_virt;
144 u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
2298e804
ML
145 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
146 return false;
147
496eb6fd 148 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
2298e804
ML
149
150 return true;
151}
152
153struct vmwgfx_wait_cb {
f54d1867 154 struct dma_fence_cb base;
2298e804
ML
155 struct task_struct *task;
156};
157
158static void
f54d1867 159vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
2298e804
ML
160{
161 struct vmwgfx_wait_cb *wait =
162 container_of(cb, struct vmwgfx_wait_cb, base);
163
164 wake_up_process(wait->task);
165}
166
167static void __vmw_fences_update(struct vmw_fence_manager *fman);
168
f54d1867 169static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
2298e804
ML
170{
171 struct vmw_fence_obj *fence =
172 container_of(f, struct vmw_fence_obj, base);
173
174 struct vmw_fence_manager *fman = fman_from_fence(fence);
175 struct vmw_private *dev_priv = fman->dev_priv;
176 struct vmwgfx_wait_cb cb;
177 long ret = timeout;
178 unsigned long irq_flags;
179
180 if (likely(vmw_fence_obj_signaled(fence)))
181 return timeout;
182
183 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
184 vmw_seqno_waiter_add(dev_priv);
185
186 spin_lock_irqsave(f->lock, irq_flags);
187
188 if (intr && signal_pending(current)) {
189 ret = -ERESTARTSYS;
190 goto out;
191 }
192
193 cb.base.func = vmwgfx_wait_cb;
194 cb.task = current;
195 list_add(&cb.base.node, &f->cb_list);
196
197 while (ret > 0) {
198 __vmw_fences_update(fman);
f54d1867 199 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
2298e804
ML
200 break;
201
202 if (intr)
203 __set_current_state(TASK_INTERRUPTIBLE);
204 else
205 __set_current_state(TASK_UNINTERRUPTIBLE);
206 spin_unlock_irqrestore(f->lock, irq_flags);
207
208 ret = schedule_timeout(ret);
209
210 spin_lock_irqsave(f->lock, irq_flags);
211 if (ret > 0 && intr && signal_pending(current))
212 ret = -ERESTARTSYS;
213 }
214
215 if (!list_empty(&cb.base.node))
216 list_del(&cb.base.node);
217 __set_current_state(TASK_RUNNING);
218
219out:
220 spin_unlock_irqrestore(f->lock, irq_flags);
221
222 vmw_seqno_waiter_remove(dev_priv);
223
224 return ret;
225}
226
ef217b1f 227static const struct dma_fence_ops vmw_fence_ops = {
2298e804
ML
228 .get_driver_name = vmw_fence_get_driver_name,
229 .get_timeline_name = vmw_fence_get_timeline_name,
230 .enable_signaling = vmw_fence_enable_signaling,
231 .wait = vmw_fence_wait,
232 .release = vmw_fence_obj_destroy,
233};
234
ae2a1040
TH
235
236/**
237 * Execute signal actions on fences recently signaled.
238 * This is done from a workqueue so we don't have to execute
239 * signal actions from atomic context.
240 */
241
242static void vmw_fence_work_func(struct work_struct *work)
243{
244 struct vmw_fence_manager *fman =
245 container_of(work, struct vmw_fence_manager, work);
246 struct list_head list;
247 struct vmw_fence_action *action, *next_action;
57c5ee79 248 bool seqno_valid;
ae2a1040
TH
249
250 do {
251 INIT_LIST_HEAD(&list);
57c5ee79
TH
252 mutex_lock(&fman->goal_irq_mutex);
253
ef369904 254 spin_lock(&fman->lock);
ae2a1040 255 list_splice_init(&fman->cleanup_list, &list);
57c5ee79 256 seqno_valid = fman->seqno_valid;
ef369904 257 spin_unlock(&fman->lock);
ae2a1040 258
57c5ee79
TH
259 if (!seqno_valid && fman->goal_irq_on) {
260 fman->goal_irq_on = false;
261 vmw_goal_waiter_remove(fman->dev_priv);
262 }
263 mutex_unlock(&fman->goal_irq_mutex);
264
ae2a1040
TH
265 if (list_empty(&list))
266 return;
267
268 /*
269 * At this point, only we should be able to manipulate the
270 * list heads of the actions we have on the private list.
57c5ee79 271 * hence fman::lock not held.
ae2a1040
TH
272 */
273
274 list_for_each_entry_safe(action, next_action, &list, head) {
275 list_del_init(&action->head);
57c5ee79
TH
276 if (action->cleanup)
277 action->cleanup(action);
ae2a1040
TH
278 }
279 } while (1);
280}
281
282struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
283{
284 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
285
1a4adb05 286 if (unlikely(!fman))
ae2a1040
TH
287 return NULL;
288
289 fman->dev_priv = dev_priv;
290 spin_lock_init(&fman->lock);
291 INIT_LIST_HEAD(&fman->fence_list);
292 INIT_LIST_HEAD(&fman->cleanup_list);
293 INIT_WORK(&fman->work, &vmw_fence_work_func);
294 fman->fifo_down = true;
295 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
296 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
57c5ee79
TH
297 fman->event_fence_action_size =
298 ttm_round_pot(sizeof(struct vmw_event_fence_action));
299 mutex_init(&fman->goal_irq_mutex);
f54d1867 300 fman->ctx = dma_fence_context_alloc(1);
ae2a1040
TH
301
302 return fman;
303}
304
305void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
306{
ae2a1040
TH
307 bool lists_empty;
308
309 (void) cancel_work_sync(&fman->work);
310
ef369904 311 spin_lock(&fman->lock);
ae2a1040
TH
312 lists_empty = list_empty(&fman->fence_list) &&
313 list_empty(&fman->cleanup_list);
ef369904 314 spin_unlock(&fman->lock);
ae2a1040
TH
315
316 BUG_ON(!lists_empty);
317 kfree(fman);
318}
319
320static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
c060a4e1 321 struct vmw_fence_obj *fence, u32 seqno,
ae2a1040
TH
322 void (*destroy) (struct vmw_fence_obj *fence))
323{
ae2a1040
TH
324 int ret = 0;
325
f54d1867
CW
326 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
327 fman->ctx, seqno);
ae2a1040 328 INIT_LIST_HEAD(&fence->seq_passed_actions);
ae2a1040 329 fence->destroy = destroy;
ae2a1040 330
ef369904 331 spin_lock(&fman->lock);
ae2a1040
TH
332 if (unlikely(fman->fifo_down)) {
333 ret = -EBUSY;
334 goto out_unlock;
335 }
336 list_add_tail(&fence->head, &fman->fence_list);
2298e804 337 ++fman->num_fence_objects;
ae2a1040
TH
338
339out_unlock:
ef369904 340 spin_unlock(&fman->lock);
ae2a1040
TH
341 return ret;
342
343}
344
94844cf0 345static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
ae2a1040
TH
346 struct list_head *list)
347{
348 struct vmw_fence_action *action, *next_action;
349
350 list_for_each_entry_safe(action, next_action, list, head) {
351 list_del_init(&action->head);
57c5ee79 352 fman->pending_actions[action->type]--;
ae2a1040
TH
353 if (action->seq_passed != NULL)
354 action->seq_passed(action);
355
356 /*
357 * Add the cleanup action to the cleanup list so that
358 * it will be performed by a worker task.
359 */
360
57c5ee79
TH
361 list_add_tail(&action->head, &fman->cleanup_list);
362 }
363}
364
365/**
366 * vmw_fence_goal_new_locked - Figure out a new device fence goal
367 * seqno if needed.
368 *
369 * @fman: Pointer to a fence manager.
370 * @passed_seqno: The seqno the device currently signals as passed.
371 *
372 * This function should be called with the fence manager lock held.
373 * It is typically called when we have a new passed_seqno, and
374 * we might need to update the fence goal. It checks to see whether
375 * the current fence goal has already passed, and, in that case,
376 * scans through all unsignaled fences to get the next fence object with an
377 * action attached, and sets the seqno of that fence as a new fence goal.
378 *
379 * returns true if the device goal seqno was updated. False otherwise.
380 */
381static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
382 u32 passed_seqno)
383{
384 u32 goal_seqno;
b76ff5ea 385 u32 *fifo_mem;
57c5ee79
TH
386 struct vmw_fence_obj *fence;
387
388 if (likely(!fman->seqno_valid))
389 return false;
390
391 fifo_mem = fman->dev_priv->mmio_virt;
b76ff5ea 392 goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
57c5ee79
TH
393 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
394 return false;
395
396 fman->seqno_valid = false;
397 list_for_each_entry(fence, &fman->fence_list, head) {
398 if (!list_empty(&fence->seq_passed_actions)) {
399 fman->seqno_valid = true;
b76ff5ea
TH
400 vmw_mmio_write(fence->base.seqno,
401 fifo_mem + SVGA_FIFO_FENCE_GOAL);
57c5ee79
TH
402 break;
403 }
ae2a1040 404 }
57c5ee79
TH
405
406 return true;
407}
408
409
410/**
411 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
412 * needed.
413 *
414 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
415 * considered as a device fence goal.
416 *
417 * This function should be called with the fence manager lock held.
418 * It is typically called when an action has been attached to a fence to
419 * check whether the seqno of that fence should be used for a fence
420 * goal interrupt. This is typically needed if the current fence goal is
421 * invalid, or has a higher seqno than that of the current fence object.
422 *
423 * returns true if the device goal seqno was updated. False otherwise.
424 */
425static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
426{
2298e804 427 struct vmw_fence_manager *fman = fman_from_fence(fence);
57c5ee79 428 u32 goal_seqno;
b76ff5ea 429 u32 *fifo_mem;
57c5ee79 430
f54d1867 431 if (dma_fence_is_signaled_locked(&fence->base))
57c5ee79
TH
432 return false;
433
2298e804 434 fifo_mem = fman->dev_priv->mmio_virt;
b76ff5ea 435 goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
2298e804
ML
436 if (likely(fman->seqno_valid &&
437 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
57c5ee79
TH
438 return false;
439
b76ff5ea 440 vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
2298e804 441 fman->seqno_valid = true;
57c5ee79
TH
442
443 return true;
ae2a1040
TH
444}
445
2298e804 446static void __vmw_fences_update(struct vmw_fence_manager *fman)
ae2a1040 447{
ae2a1040
TH
448 struct vmw_fence_obj *fence, *next_fence;
449 struct list_head action_list;
57c5ee79
TH
450 bool needs_rerun;
451 uint32_t seqno, new_seqno;
b76ff5ea 452 u32 *fifo_mem = fman->dev_priv->mmio_virt;
ae2a1040 453
b76ff5ea 454 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
57c5ee79 455rerun:
ae2a1040 456 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
2298e804 457 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
ae2a1040 458 list_del_init(&fence->head);
f54d1867 459 dma_fence_signal_locked(&fence->base);
ae2a1040
TH
460 INIT_LIST_HEAD(&action_list);
461 list_splice_init(&fence->seq_passed_actions,
462 &action_list);
463 vmw_fences_perform_actions(fman, &action_list);
57c5ee79
TH
464 } else
465 break;
ae2a1040 466 }
57c5ee79 467
57c5ee79
TH
468 /*
469 * Rerun if the fence goal seqno was updated, and the
470 * hardware might have raced with that update, so that
471 * we missed a fence_goal irq.
472 */
473
2298e804 474 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
57c5ee79 475 if (unlikely(needs_rerun)) {
b76ff5ea 476 new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
57c5ee79
TH
477 if (new_seqno != seqno) {
478 seqno = new_seqno;
479 goto rerun;
480 }
481 }
2298e804
ML
482
483 if (!list_empty(&fman->cleanup_list))
484 (void) schedule_work(&fman->work);
57c5ee79 485}
ae2a1040 486
2298e804 487void vmw_fences_update(struct vmw_fence_manager *fman)
ae2a1040 488{
ef369904 489 spin_lock(&fman->lock);
2298e804 490 __vmw_fences_update(fman);
ef369904 491 spin_unlock(&fman->lock);
2298e804
ML
492}
493
494bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
495{
496 struct vmw_fence_manager *fman = fman_from_fence(fence);
ae2a1040 497
f54d1867 498 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
ae2a1040
TH
499 return 1;
500
c060a4e1 501 vmw_fences_update(fman);
ae2a1040 502
f54d1867 503 return dma_fence_is_signaled(&fence->base);
ae2a1040
TH
504}
505
c060a4e1 506int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
ae2a1040
TH
507 bool interruptible, unsigned long timeout)
508{
f54d1867 509 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
ae2a1040 510
2298e804 511 if (likely(ret > 0))
ae2a1040 512 return 0;
2298e804
ML
513 else if (ret == 0)
514 return -EBUSY;
ae2a1040 515 else
2298e804 516 return ret;
ae2a1040
TH
517}
518
519void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
520{
2298e804 521 struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
ae2a1040
TH
522
523 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
524}
525
526static void vmw_fence_destroy(struct vmw_fence_obj *fence)
527{
f54d1867 528 dma_fence_free(&fence->base);
ae2a1040
TH
529}
530
531int vmw_fence_create(struct vmw_fence_manager *fman,
532 uint32_t seqno,
ae2a1040
TH
533 struct vmw_fence_obj **p_fence)
534{
ae2a1040 535 struct vmw_fence_obj *fence;
f7652afa 536 int ret;
ae2a1040 537
ae2a1040 538 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
1a4adb05 539 if (unlikely(!fence))
1f563a6a 540 return -ENOMEM;
ae2a1040 541
c060a4e1 542 ret = vmw_fence_obj_init(fman, fence, seqno,
ae2a1040
TH
543 vmw_fence_destroy);
544 if (unlikely(ret != 0))
545 goto out_err_init;
546
547 *p_fence = fence;
548 return 0;
549
550out_err_init:
551 kfree(fence);
ae2a1040
TH
552 return ret;
553}
554
555
556static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
557{
558 struct vmw_user_fence *ufence =
559 container_of(fence, struct vmw_user_fence, fence);
2298e804 560 struct vmw_fence_manager *fman = fman_from_fence(fence);
ae2a1040 561
35f62a58 562 ttm_base_object_kfree(ufence, base);
ae2a1040
TH
563 /*
564 * Free kernel space accounting.
565 */
566 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
567 fman->user_fence_size);
568}
569
570static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
571{
572 struct ttm_base_object *base = *p_base;
573 struct vmw_user_fence *ufence =
574 container_of(base, struct vmw_user_fence, base);
575 struct vmw_fence_obj *fence = &ufence->fence;
576
577 *p_base = NULL;
578 vmw_fence_obj_unreference(&fence);
579}
580
581int vmw_user_fence_create(struct drm_file *file_priv,
582 struct vmw_fence_manager *fman,
583 uint32_t seqno,
ae2a1040
TH
584 struct vmw_fence_obj **p_fence,
585 uint32_t *p_handle)
586{
587 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
588 struct vmw_user_fence *ufence;
589 struct vmw_fence_obj *tmp;
590 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
591 int ret;
592
593 /*
594 * Kernel memory space accounting, since this object may
595 * be created by a user-space request.
596 */
597
598 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
599 false, false);
600 if (unlikely(ret != 0))
601 return ret;
602
603 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
1a4adb05 604 if (unlikely(!ufence)) {
ae2a1040
TH
605 ret = -ENOMEM;
606 goto out_no_object;
607 }
608
609 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
c060a4e1 610 vmw_user_fence_destroy);
ae2a1040
TH
611 if (unlikely(ret != 0)) {
612 kfree(ufence);
613 goto out_no_object;
614 }
615
616 /*
617 * The base object holds a reference which is freed in
618 * vmw_user_fence_base_release.
619 */
620 tmp = vmw_fence_obj_reference(&ufence->fence);
621 ret = ttm_base_object_init(tfile, &ufence->base, false,
622 VMW_RES_FENCE,
623 &vmw_user_fence_base_release, NULL);
624
625
626 if (unlikely(ret != 0)) {
627 /*
628 * Free the base object's reference
629 */
630 vmw_fence_obj_unreference(&tmp);
631 goto out_err;
632 }
633
634 *p_fence = &ufence->fence;
635 *p_handle = ufence->base.hash.key;
636
637 return 0;
638out_err:
639 tmp = &ufence->fence;
640 vmw_fence_obj_unreference(&tmp);
641out_no_object:
642 ttm_mem_global_free(mem_glob, fman->user_fence_size);
643 return ret;
58585116
SY
644}
645
646
647/**
648 * vmw_wait_dma_fence - Wait for a dma fence
649 *
650 * @fman: pointer to a fence manager
651 * @fence: DMA fence to wait on
652 *
653 * This function handles the case when the fence is actually a fence
654 * array. If that's the case, it'll wait on each of the child fence
655 */
656int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
657 struct dma_fence *fence)
658{
659 struct dma_fence_array *fence_array;
660 int ret = 0;
661 int i;
662
663
664 if (dma_fence_is_signaled(fence))
665 return 0;
666
667 if (!dma_fence_is_array(fence))
668 return dma_fence_wait(fence, true);
669
670 /* From i915: Note that if the fence-array was created in
671 * signal-on-any mode, we should *not* decompose it into its individual
672 * fences. However, we don't currently store which mode the fence-array
673 * is operating in. Fortunately, the only user of signal-on-any is
674 * private to amdgpu and we should not see any incoming fence-array
675 * from sync-file being in signal-on-any mode.
676 */
677
678 fence_array = to_dma_fence_array(fence);
679 for (i = 0; i < fence_array->num_fences; i++) {
680 struct dma_fence *child = fence_array->fences[i];
681
682 ret = dma_fence_wait(child, true);
683
684 if (ret < 0)
685 return ret;
686 }
687
688 return 0;
ae2a1040
TH
689}
690
691
692/**
693 * vmw_fence_fifo_down - signal all unsignaled fence objects.
694 */
695
696void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
697{
ae2a1040
TH
698 struct list_head action_list;
699 int ret;
700
701 /*
702 * The list may be altered while we traverse it, so always
703 * restart when we've released the fman->lock.
704 */
705
ef369904 706 spin_lock(&fman->lock);
ae2a1040
TH
707 fman->fifo_down = true;
708 while (!list_empty(&fman->fence_list)) {
709 struct vmw_fence_obj *fence =
710 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
711 head);
f54d1867 712 dma_fence_get(&fence->base);
ef369904 713 spin_unlock(&fman->lock);
ae2a1040 714
c060a4e1 715 ret = vmw_fence_obj_wait(fence, false, false,
ae2a1040
TH
716 VMW_FENCE_WAIT_TIMEOUT);
717
718 if (unlikely(ret != 0)) {
719 list_del_init(&fence->head);
f54d1867 720 dma_fence_signal(&fence->base);
ae2a1040
TH
721 INIT_LIST_HEAD(&action_list);
722 list_splice_init(&fence->seq_passed_actions,
723 &action_list);
724 vmw_fences_perform_actions(fman, &action_list);
ae2a1040
TH
725 }
726
ae2a1040 727 BUG_ON(!list_empty(&fence->head));
f54d1867 728 dma_fence_put(&fence->base);
ef369904 729 spin_lock(&fman->lock);
ae2a1040 730 }
ef369904 731 spin_unlock(&fman->lock);
ae2a1040
TH
732}
733
734void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
735{
ef369904 736 spin_lock(&fman->lock);
ae2a1040 737 fman->fifo_down = false;
ef369904 738 spin_unlock(&fman->lock);
ae2a1040
TH
739}
740
741
f7652afa
TH
742/**
743 * vmw_fence_obj_lookup - Look up a user-space fence object
744 *
745 * @tfile: A struct ttm_object_file identifying the caller.
746 * @handle: A handle identifying the fence object.
747 * @return: A struct vmw_user_fence base ttm object on success or
748 * an error pointer on failure.
749 *
750 * The fence object is looked up and type-checked. The caller needs
751 * to have opened the fence object first, but since that happens on
752 * creation and fence objects aren't shareable, that's not an
753 * issue currently.
754 */
755static struct ttm_base_object *
756vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
757{
758 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
759
760 if (!base) {
761 pr_err("Invalid fence object handle 0x%08lx.\n",
762 (unsigned long)handle);
763 return ERR_PTR(-EINVAL);
764 }
765
766 if (base->refcount_release != vmw_user_fence_base_release) {
767 pr_err("Invalid fence object handle 0x%08lx.\n",
768 (unsigned long)handle);
769 ttm_base_object_unref(&base);
770 return ERR_PTR(-EINVAL);
771 }
772
773 return base;
774}
775
776
ae2a1040
TH
777int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
778 struct drm_file *file_priv)
779{
780 struct drm_vmw_fence_wait_arg *arg =
781 (struct drm_vmw_fence_wait_arg *)data;
782 unsigned long timeout;
783 struct ttm_base_object *base;
784 struct vmw_fence_obj *fence;
785 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
786 int ret;
787 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
788
789 /*
790 * 64-bit division not present on 32-bit systems, so do an
791 * approximation. (Divide by 1000000).
792 */
793
794 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
795 (wait_timeout >> 26);
796
797 if (!arg->cookie_valid) {
798 arg->cookie_valid = 1;
799 arg->kernel_cookie = jiffies + wait_timeout;
800 }
801
f7652afa
TH
802 base = vmw_fence_obj_lookup(tfile, arg->handle);
803 if (IS_ERR(base))
804 return PTR_ERR(base);
ae2a1040
TH
805
806 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
807
808 timeout = jiffies;
809 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
c060a4e1 810 ret = ((vmw_fence_obj_signaled(fence)) ?
ae2a1040
TH
811 0 : -EBUSY);
812 goto out;
813 }
814
815 timeout = (unsigned long)arg->kernel_cookie - timeout;
816
c060a4e1 817 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
ae2a1040
TH
818
819out:
820 ttm_base_object_unref(&base);
821
822 /*
823 * Optionally unref the fence object.
824 */
825
826 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
827 return ttm_ref_object_base_unref(tfile, arg->handle,
828 TTM_REF_USAGE);
829 return ret;
830}
831
832int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
833 struct drm_file *file_priv)
834{
835 struct drm_vmw_fence_signaled_arg *arg =
836 (struct drm_vmw_fence_signaled_arg *) data;
837 struct ttm_base_object *base;
838 struct vmw_fence_obj *fence;
839 struct vmw_fence_manager *fman;
840 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
841 struct vmw_private *dev_priv = vmw_priv(dev);
842
f7652afa
TH
843 base = vmw_fence_obj_lookup(tfile, arg->handle);
844 if (IS_ERR(base))
845 return PTR_ERR(base);
ae2a1040
TH
846
847 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
2298e804 848 fman = fman_from_fence(fence);
ae2a1040 849
c060a4e1 850 arg->signaled = vmw_fence_obj_signaled(fence);
ae2a1040 851
c060a4e1 852 arg->signaled_flags = arg->flags;
ef369904 853 spin_lock(&fman->lock);
ae2a1040 854 arg->passed_seqno = dev_priv->last_read_seqno;
ef369904 855 spin_unlock(&fman->lock);
ae2a1040
TH
856
857 ttm_base_object_unref(&base);
858
859 return 0;
860}
861
862
863int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
864 struct drm_file *file_priv)
865{
866 struct drm_vmw_fence_arg *arg =
867 (struct drm_vmw_fence_arg *) data;
868
869 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
870 arg->handle,
871 TTM_REF_USAGE);
872}
57c5ee79 873
57c5ee79
TH
874/**
875 * vmw_event_fence_action_seq_passed
876 *
877 * @action: The struct vmw_fence_action embedded in a struct
878 * vmw_event_fence_action.
879 *
880 * This function is called when the seqno of the fence where @action is
881 * attached has passed. It queues the event on the submitter's event list.
ef369904 882 * This function is always called from atomic context.
57c5ee79
TH
883 */
884static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
885{
886 struct vmw_event_fence_action *eaction =
887 container_of(action, struct vmw_event_fence_action, action);
888 struct drm_device *dev = eaction->dev;
6b82ef50
TH
889 struct drm_pending_event *event = eaction->event;
890 struct drm_file *file_priv;
ef369904 891
57c5ee79 892
6b82ef50
TH
893 if (unlikely(event == NULL))
894 return;
895
896 file_priv = event->file_priv;
ef369904 897 spin_lock_irq(&dev->event_lock);
57c5ee79
TH
898
899 if (likely(eaction->tv_sec != NULL)) {
3f7756dc 900 struct timespec64 ts;
57c5ee79 901
3f7756dc
AB
902 ktime_get_ts64(&ts);
903 /* monotonic time, so no y2038 overflow */
904 *eaction->tv_sec = ts.tv_sec;
905 *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
57c5ee79
TH
906 }
907
fb740cf2 908 drm_send_event_locked(dev, eaction->event);
15b6b804 909 eaction->event = NULL;
ef369904 910 spin_unlock_irq(&dev->event_lock);
57c5ee79
TH
911}
912
913/**
914 * vmw_event_fence_action_cleanup
915 *
916 * @action: The struct vmw_fence_action embedded in a struct
917 * vmw_event_fence_action.
918 *
919 * This function is the struct vmw_fence_action destructor. It's typically
920 * called from a workqueue.
921 */
922static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
923{
924 struct vmw_event_fence_action *eaction =
925 container_of(action, struct vmw_event_fence_action, action);
926
927 vmw_fence_obj_unreference(&eaction->fence);
8b7de6aa 928 kfree(eaction);
57c5ee79
TH
929}
930
931
932/**
933 * vmw_fence_obj_add_action - Add an action to a fence object.
934 *
935 * @fence - The fence object.
936 * @action - The action to add.
937 *
938 * Note that the action callbacks may be executed before this function
939 * returns.
940 */
94844cf0 941static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
57c5ee79
TH
942 struct vmw_fence_action *action)
943{
2298e804 944 struct vmw_fence_manager *fman = fman_from_fence(fence);
57c5ee79
TH
945 bool run_update = false;
946
947 mutex_lock(&fman->goal_irq_mutex);
ef369904 948 spin_lock(&fman->lock);
57c5ee79
TH
949
950 fman->pending_actions[action->type]++;
f54d1867 951 if (dma_fence_is_signaled_locked(&fence->base)) {
57c5ee79
TH
952 struct list_head action_list;
953
954 INIT_LIST_HEAD(&action_list);
955 list_add_tail(&action->head, &action_list);
956 vmw_fences_perform_actions(fman, &action_list);
957 } else {
958 list_add_tail(&action->head, &fence->seq_passed_actions);
959
960 /*
961 * This function may set fman::seqno_valid, so it must
962 * be run with the goal_irq_mutex held.
963 */
964 run_update = vmw_fence_goal_check_locked(fence);
965 }
966
ef369904 967 spin_unlock(&fman->lock);
57c5ee79
TH
968
969 if (run_update) {
970 if (!fman->goal_irq_on) {
971 fman->goal_irq_on = true;
972 vmw_goal_waiter_add(fman->dev_priv);
973 }
974 vmw_fences_update(fman);
975 }
976 mutex_unlock(&fman->goal_irq_mutex);
977
978}
979
980/**
981 * vmw_event_fence_action_create - Post an event for sending when a fence
982 * object seqno has passed.
983 *
984 * @file_priv: The file connection on which the event should be posted.
985 * @fence: The fence object on which to post the event.
986 * @event: Event to be posted. This event should've been alloced
987 * using k[mz]alloc, and should've been completely initialized.
988 * @interruptible: Interruptible waits if possible.
989 *
990 * As a side effect, the object pointed to by @event may have been
991 * freed when this function returns. If this function returns with
992 * an error code, the caller needs to free that object.
993 */
994
8b7de6aa
JB
995int vmw_event_fence_action_queue(struct drm_file *file_priv,
996 struct vmw_fence_obj *fence,
997 struct drm_pending_event *event,
998 uint32_t *tv_sec,
999 uint32_t *tv_usec,
1000 bool interruptible)
57c5ee79 1001{
0c5d3703 1002 struct vmw_event_fence_action *eaction;
2298e804 1003 struct vmw_fence_manager *fman = fman_from_fence(fence);
57c5ee79
TH
1004
1005 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1a4adb05 1006 if (unlikely(!eaction))
57c5ee79 1007 return -ENOMEM;
57c5ee79 1008
8b7de6aa 1009 eaction->event = event;
57c5ee79
TH
1010
1011 eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1012 eaction->action.cleanup = vmw_event_fence_action_cleanup;
1013 eaction->action.type = VMW_ACTION_EVENT;
1014
1015 eaction->fence = vmw_fence_obj_reference(fence);
1016 eaction->dev = fman->dev_priv->dev;
57c5ee79
TH
1017 eaction->tv_sec = tv_sec;
1018 eaction->tv_usec = tv_usec;
1019
57c5ee79
TH
1020 vmw_fence_obj_add_action(fence, &eaction->action);
1021
1022 return 0;
1023}
1024
8b7de6aa
JB
1025struct vmw_event_fence_pending {
1026 struct drm_pending_event base;
1027 struct drm_vmw_event_fence event;
1028};
1029
94844cf0 1030static int vmw_event_fence_action_create(struct drm_file *file_priv,
8b7de6aa
JB
1031 struct vmw_fence_obj *fence,
1032 uint32_t flags,
1033 uint64_t user_data,
1034 bool interruptible)
1035{
1036 struct vmw_event_fence_pending *event;
2298e804
ML
1037 struct vmw_fence_manager *fman = fman_from_fence(fence);
1038 struct drm_device *dev = fman->dev_priv->dev;
8b7de6aa
JB
1039 int ret;
1040
68c4fce7 1041 event = kzalloc(sizeof(*event), GFP_KERNEL);
1a4adb05 1042 if (unlikely(!event)) {
8b7de6aa
JB
1043 DRM_ERROR("Failed to allocate an event.\n");
1044 ret = -ENOMEM;
6d3729ac 1045 goto out_no_space;
8b7de6aa
JB
1046 }
1047
1048 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1049 event->event.base.length = sizeof(*event);
1050 event->event.user_data = user_data;
1051
6d3729ac 1052 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
8b7de6aa 1053
6d3729ac
DV
1054 if (unlikely(ret != 0)) {
1055 DRM_ERROR("Failed to allocate event space for this file.\n");
1056 kfree(event);
1057 goto out_no_space;
1058 }
8b7de6aa
JB
1059
1060 if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1061 ret = vmw_event_fence_action_queue(file_priv, fence,
1062 &event->base,
1063 &event->event.tv_sec,
1064 &event->event.tv_usec,
1065 interruptible);
1066 else
1067 ret = vmw_event_fence_action_queue(file_priv, fence,
1068 &event->base,
1069 NULL,
1070 NULL,
1071 interruptible);
1072 if (ret != 0)
1073 goto out_no_queue;
1074
89669e7a
TH
1075 return 0;
1076
8b7de6aa 1077out_no_queue:
6d3729ac 1078 drm_event_cancel_free(dev, &event->base);
8b7de6aa
JB
1079out_no_space:
1080 return ret;
1081}
1082
57c5ee79
TH
1083int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1084 struct drm_file *file_priv)
1085{
1086 struct vmw_private *dev_priv = vmw_priv(dev);
1087 struct drm_vmw_fence_event_arg *arg =
1088 (struct drm_vmw_fence_event_arg *) data;
1089 struct vmw_fence_obj *fence = NULL;
1090 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
f7652afa 1091 struct ttm_object_file *tfile = vmw_fp->tfile;
57c5ee79
TH
1092 struct drm_vmw_fence_rep __user *user_fence_rep =
1093 (struct drm_vmw_fence_rep __user *)(unsigned long)
1094 arg->fence_rep;
1095 uint32_t handle;
57c5ee79
TH
1096 int ret;
1097
1098 /*
1099 * Look up an existing fence object,
1100 * and if user-space wants a new reference,
1101 * add one.
1102 */
1103 if (arg->handle) {
1104 struct ttm_base_object *base =
f7652afa
TH
1105 vmw_fence_obj_lookup(tfile, arg->handle);
1106
1107 if (IS_ERR(base))
1108 return PTR_ERR(base);
1109
57c5ee79
TH
1110 fence = &(container_of(base, struct vmw_user_fence,
1111 base)->fence);
1112 (void) vmw_fence_obj_reference(fence);
1113
1114 if (user_fence_rep != NULL) {
fe25deb7
TH
1115 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1116 TTM_REF_USAGE, NULL, false);
57c5ee79
TH
1117 if (unlikely(ret != 0)) {
1118 DRM_ERROR("Failed to reference a fence "
1119 "object.\n");
1120 goto out_no_ref_obj;
1121 }
1122 handle = base->hash.key;
1123 }
1124 ttm_base_object_unref(&base);
1125 }
1126
1127 /*
1128 * Create a new fence object.
1129 */
1130 if (!fence) {
1131 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1132 &fence,
1133 (user_fence_rep) ?
1134 &handle : NULL);
1135 if (unlikely(ret != 0)) {
1136 DRM_ERROR("Fence event failed to create fence.\n");
1137 return ret;
1138 }
1139 }
1140
1141 BUG_ON(fence == NULL);
1142
89669e7a
TH
1143 ret = vmw_event_fence_action_create(file_priv, fence,
1144 arg->flags,
1145 arg->user_data,
1146 true);
57c5ee79
TH
1147 if (unlikely(ret != 0)) {
1148 if (ret != -ERESTARTSYS)
1149 DRM_ERROR("Failed to attach event to fence.\n");
8b7de6aa 1150 goto out_no_create;
57c5ee79
TH
1151 }
1152
1153 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
c906965d 1154 handle, -1, NULL);
57c5ee79
TH
1155 vmw_fence_obj_unreference(&fence);
1156 return 0;
8b7de6aa 1157out_no_create:
57c5ee79 1158 if (user_fence_rep != NULL)
f7652afa 1159 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
57c5ee79
TH
1160out_no_ref_obj:
1161 vmw_fence_obj_unreference(&fence);
1162 return ret;
1163}