]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
spi-imx: Implements handling of the SPI_READY mode flag.
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fence.c
1 /**************************************************************************
2 *
3 * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include <drm/drmP.h>
29 #include "vmwgfx_drv.h"
30
31 #define VMW_FENCE_WRAP (1 << 31)
32
33 struct vmw_fence_manager {
34 int num_fence_objects;
35 struct vmw_private *dev_priv;
36 spinlock_t lock;
37 struct list_head fence_list;
38 struct work_struct work;
39 u32 user_fence_size;
40 u32 fence_size;
41 u32 event_fence_action_size;
42 bool fifo_down;
43 struct list_head cleanup_list;
44 uint32_t pending_actions[VMW_ACTION_MAX];
45 struct mutex goal_irq_mutex;
46 bool goal_irq_on; /* Protected by @goal_irq_mutex */
47 bool seqno_valid; /* Protected by @lock, and may not be set to true
48 without the @goal_irq_mutex held. */
49 u64 ctx;
50 };
51
52 struct vmw_user_fence {
53 struct ttm_base_object base;
54 struct vmw_fence_obj fence;
55 };
56
57 /**
58 * struct vmw_event_fence_action - fence action that delivers a drm event.
59 *
60 * @e: A struct drm_pending_event that controls the event delivery.
61 * @action: A struct vmw_fence_action to hook up to a fence.
62 * @fence: A referenced pointer to the fence to keep it alive while @action
63 * hangs on it.
64 * @dev: Pointer to a struct drm_device so we can access the event stuff.
65 * @kref: Both @e and @action has destructors, so we need to refcount.
66 * @size: Size accounted for this object.
67 * @tv_sec: If non-null, the variable pointed to will be assigned
68 * current time tv_sec val when the fence signals.
69 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
70 * be assigned the current time tv_usec val when the fence signals.
71 */
72 struct vmw_event_fence_action {
73 struct vmw_fence_action action;
74
75 struct drm_pending_event *event;
76 struct vmw_fence_obj *fence;
77 struct drm_device *dev;
78
79 uint32_t *tv_sec;
80 uint32_t *tv_usec;
81 };
82
83 static struct vmw_fence_manager *
84 fman_from_fence(struct vmw_fence_obj *fence)
85 {
86 return container_of(fence->base.lock, struct vmw_fence_manager, lock);
87 }
88
89 /**
90 * Note on fencing subsystem usage of irqs:
91 * Typically the vmw_fences_update function is called
92 *
93 * a) When a new fence seqno has been submitted by the fifo code.
94 * b) On-demand when we have waiters. Sleeping waiters will switch on the
95 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
96 * irq is received. When the last fence waiter is gone, that IRQ is masked
97 * away.
98 *
99 * In situations where there are no waiters and we don't submit any new fences,
100 * fence objects may not be signaled. This is perfectly OK, since there are
101 * no consumers of the signaled data, but that is NOT ok when there are fence
102 * actions attached to a fence. The fencing subsystem then makes use of the
103 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
104 * which has an action attached, and each time vmw_fences_update is called,
105 * the subsystem makes sure the fence goal seqno is updated.
106 *
107 * The fence goal seqno irq is on as long as there are unsignaled fence
108 * objects with actions attached to them.
109 */
110
111 static void vmw_fence_obj_destroy(struct dma_fence *f)
112 {
113 struct vmw_fence_obj *fence =
114 container_of(f, struct vmw_fence_obj, base);
115
116 struct vmw_fence_manager *fman = fman_from_fence(fence);
117 unsigned long irq_flags;
118
119 spin_lock_irqsave(&fman->lock, irq_flags);
120 list_del_init(&fence->head);
121 --fman->num_fence_objects;
122 spin_unlock_irqrestore(&fman->lock, irq_flags);
123 fence->destroy(fence);
124 }
125
126 static const char *vmw_fence_get_driver_name(struct dma_fence *f)
127 {
128 return "vmwgfx";
129 }
130
131 static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
132 {
133 return "svga";
134 }
135
136 static bool vmw_fence_enable_signaling(struct dma_fence *f)
137 {
138 struct vmw_fence_obj *fence =
139 container_of(f, struct vmw_fence_obj, base);
140
141 struct vmw_fence_manager *fman = fman_from_fence(fence);
142 struct vmw_private *dev_priv = fman->dev_priv;
143
144 u32 *fifo_mem = dev_priv->mmio_virt;
145 u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
146 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
147 return false;
148
149 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
150
151 return true;
152 }
153
154 struct vmwgfx_wait_cb {
155 struct dma_fence_cb base;
156 struct task_struct *task;
157 };
158
159 static void
160 vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
161 {
162 struct vmwgfx_wait_cb *wait =
163 container_of(cb, struct vmwgfx_wait_cb, base);
164
165 wake_up_process(wait->task);
166 }
167
168 static void __vmw_fences_update(struct vmw_fence_manager *fman);
169
170 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
171 {
172 struct vmw_fence_obj *fence =
173 container_of(f, struct vmw_fence_obj, base);
174
175 struct vmw_fence_manager *fman = fman_from_fence(fence);
176 struct vmw_private *dev_priv = fman->dev_priv;
177 struct vmwgfx_wait_cb cb;
178 long ret = timeout;
179 unsigned long irq_flags;
180
181 if (likely(vmw_fence_obj_signaled(fence)))
182 return timeout;
183
184 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
185 vmw_seqno_waiter_add(dev_priv);
186
187 spin_lock_irqsave(f->lock, irq_flags);
188
189 if (intr && signal_pending(current)) {
190 ret = -ERESTARTSYS;
191 goto out;
192 }
193
194 cb.base.func = vmwgfx_wait_cb;
195 cb.task = current;
196 list_add(&cb.base.node, &f->cb_list);
197
198 while (ret > 0) {
199 __vmw_fences_update(fman);
200 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
201 break;
202
203 if (intr)
204 __set_current_state(TASK_INTERRUPTIBLE);
205 else
206 __set_current_state(TASK_UNINTERRUPTIBLE);
207 spin_unlock_irqrestore(f->lock, irq_flags);
208
209 ret = schedule_timeout(ret);
210
211 spin_lock_irqsave(f->lock, irq_flags);
212 if (ret > 0 && intr && signal_pending(current))
213 ret = -ERESTARTSYS;
214 }
215
216 if (!list_empty(&cb.base.node))
217 list_del(&cb.base.node);
218 __set_current_state(TASK_RUNNING);
219
220 out:
221 spin_unlock_irqrestore(f->lock, irq_flags);
222
223 vmw_seqno_waiter_remove(dev_priv);
224
225 return ret;
226 }
227
228 static struct dma_fence_ops vmw_fence_ops = {
229 .get_driver_name = vmw_fence_get_driver_name,
230 .get_timeline_name = vmw_fence_get_timeline_name,
231 .enable_signaling = vmw_fence_enable_signaling,
232 .wait = vmw_fence_wait,
233 .release = vmw_fence_obj_destroy,
234 };
235
236
237 /**
238 * Execute signal actions on fences recently signaled.
239 * This is done from a workqueue so we don't have to execute
240 * signal actions from atomic context.
241 */
242
243 static void vmw_fence_work_func(struct work_struct *work)
244 {
245 struct vmw_fence_manager *fman =
246 container_of(work, struct vmw_fence_manager, work);
247 struct list_head list;
248 struct vmw_fence_action *action, *next_action;
249 bool seqno_valid;
250
251 do {
252 INIT_LIST_HEAD(&list);
253 mutex_lock(&fman->goal_irq_mutex);
254
255 spin_lock_irq(&fman->lock);
256 list_splice_init(&fman->cleanup_list, &list);
257 seqno_valid = fman->seqno_valid;
258 spin_unlock_irq(&fman->lock);
259
260 if (!seqno_valid && fman->goal_irq_on) {
261 fman->goal_irq_on = false;
262 vmw_goal_waiter_remove(fman->dev_priv);
263 }
264 mutex_unlock(&fman->goal_irq_mutex);
265
266 if (list_empty(&list))
267 return;
268
269 /*
270 * At this point, only we should be able to manipulate the
271 * list heads of the actions we have on the private list.
272 * hence fman::lock not held.
273 */
274
275 list_for_each_entry_safe(action, next_action, &list, head) {
276 list_del_init(&action->head);
277 if (action->cleanup)
278 action->cleanup(action);
279 }
280 } while (1);
281 }
282
283 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
284 {
285 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
286
287 if (unlikely(fman == NULL))
288 return NULL;
289
290 fman->dev_priv = dev_priv;
291 spin_lock_init(&fman->lock);
292 INIT_LIST_HEAD(&fman->fence_list);
293 INIT_LIST_HEAD(&fman->cleanup_list);
294 INIT_WORK(&fman->work, &vmw_fence_work_func);
295 fman->fifo_down = true;
296 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
297 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
298 fman->event_fence_action_size =
299 ttm_round_pot(sizeof(struct vmw_event_fence_action));
300 mutex_init(&fman->goal_irq_mutex);
301 fman->ctx = dma_fence_context_alloc(1);
302
303 return fman;
304 }
305
306 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
307 {
308 unsigned long irq_flags;
309 bool lists_empty;
310
311 (void) cancel_work_sync(&fman->work);
312
313 spin_lock_irqsave(&fman->lock, irq_flags);
314 lists_empty = list_empty(&fman->fence_list) &&
315 list_empty(&fman->cleanup_list);
316 spin_unlock_irqrestore(&fman->lock, irq_flags);
317
318 BUG_ON(!lists_empty);
319 kfree(fman);
320 }
321
322 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
323 struct vmw_fence_obj *fence, u32 seqno,
324 void (*destroy) (struct vmw_fence_obj *fence))
325 {
326 unsigned long irq_flags;
327 int ret = 0;
328
329 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
330 fman->ctx, seqno);
331 INIT_LIST_HEAD(&fence->seq_passed_actions);
332 fence->destroy = destroy;
333
334 spin_lock_irqsave(&fman->lock, irq_flags);
335 if (unlikely(fman->fifo_down)) {
336 ret = -EBUSY;
337 goto out_unlock;
338 }
339 list_add_tail(&fence->head, &fman->fence_list);
340 ++fman->num_fence_objects;
341
342 out_unlock:
343 spin_unlock_irqrestore(&fman->lock, irq_flags);
344 return ret;
345
346 }
347
348 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
349 struct list_head *list)
350 {
351 struct vmw_fence_action *action, *next_action;
352
353 list_for_each_entry_safe(action, next_action, list, head) {
354 list_del_init(&action->head);
355 fman->pending_actions[action->type]--;
356 if (action->seq_passed != NULL)
357 action->seq_passed(action);
358
359 /*
360 * Add the cleanup action to the cleanup list so that
361 * it will be performed by a worker task.
362 */
363
364 list_add_tail(&action->head, &fman->cleanup_list);
365 }
366 }
367
368 /**
369 * vmw_fence_goal_new_locked - Figure out a new device fence goal
370 * seqno if needed.
371 *
372 * @fman: Pointer to a fence manager.
373 * @passed_seqno: The seqno the device currently signals as passed.
374 *
375 * This function should be called with the fence manager lock held.
376 * It is typically called when we have a new passed_seqno, and
377 * we might need to update the fence goal. It checks to see whether
378 * the current fence goal has already passed, and, in that case,
379 * scans through all unsignaled fences to get the next fence object with an
380 * action attached, and sets the seqno of that fence as a new fence goal.
381 *
382 * returns true if the device goal seqno was updated. False otherwise.
383 */
384 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
385 u32 passed_seqno)
386 {
387 u32 goal_seqno;
388 u32 *fifo_mem;
389 struct vmw_fence_obj *fence;
390
391 if (likely(!fman->seqno_valid))
392 return false;
393
394 fifo_mem = fman->dev_priv->mmio_virt;
395 goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
396 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
397 return false;
398
399 fman->seqno_valid = false;
400 list_for_each_entry(fence, &fman->fence_list, head) {
401 if (!list_empty(&fence->seq_passed_actions)) {
402 fman->seqno_valid = true;
403 vmw_mmio_write(fence->base.seqno,
404 fifo_mem + SVGA_FIFO_FENCE_GOAL);
405 break;
406 }
407 }
408
409 return true;
410 }
411
412
413 /**
414 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
415 * needed.
416 *
417 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
418 * considered as a device fence goal.
419 *
420 * This function should be called with the fence manager lock held.
421 * It is typically called when an action has been attached to a fence to
422 * check whether the seqno of that fence should be used for a fence
423 * goal interrupt. This is typically needed if the current fence goal is
424 * invalid, or has a higher seqno than that of the current fence object.
425 *
426 * returns true if the device goal seqno was updated. False otherwise.
427 */
428 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
429 {
430 struct vmw_fence_manager *fman = fman_from_fence(fence);
431 u32 goal_seqno;
432 u32 *fifo_mem;
433
434 if (dma_fence_is_signaled_locked(&fence->base))
435 return false;
436
437 fifo_mem = fman->dev_priv->mmio_virt;
438 goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
439 if (likely(fman->seqno_valid &&
440 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
441 return false;
442
443 vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
444 fman->seqno_valid = true;
445
446 return true;
447 }
448
449 static void __vmw_fences_update(struct vmw_fence_manager *fman)
450 {
451 struct vmw_fence_obj *fence, *next_fence;
452 struct list_head action_list;
453 bool needs_rerun;
454 uint32_t seqno, new_seqno;
455 u32 *fifo_mem = fman->dev_priv->mmio_virt;
456
457 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
458 rerun:
459 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
460 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
461 list_del_init(&fence->head);
462 dma_fence_signal_locked(&fence->base);
463 INIT_LIST_HEAD(&action_list);
464 list_splice_init(&fence->seq_passed_actions,
465 &action_list);
466 vmw_fences_perform_actions(fman, &action_list);
467 } else
468 break;
469 }
470
471 /*
472 * Rerun if the fence goal seqno was updated, and the
473 * hardware might have raced with that update, so that
474 * we missed a fence_goal irq.
475 */
476
477 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
478 if (unlikely(needs_rerun)) {
479 new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
480 if (new_seqno != seqno) {
481 seqno = new_seqno;
482 goto rerun;
483 }
484 }
485
486 if (!list_empty(&fman->cleanup_list))
487 (void) schedule_work(&fman->work);
488 }
489
490 void vmw_fences_update(struct vmw_fence_manager *fman)
491 {
492 unsigned long irq_flags;
493
494 spin_lock_irqsave(&fman->lock, irq_flags);
495 __vmw_fences_update(fman);
496 spin_unlock_irqrestore(&fman->lock, irq_flags);
497 }
498
499 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
500 {
501 struct vmw_fence_manager *fman = fman_from_fence(fence);
502
503 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
504 return 1;
505
506 vmw_fences_update(fman);
507
508 return dma_fence_is_signaled(&fence->base);
509 }
510
511 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
512 bool interruptible, unsigned long timeout)
513 {
514 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
515
516 if (likely(ret > 0))
517 return 0;
518 else if (ret == 0)
519 return -EBUSY;
520 else
521 return ret;
522 }
523
524 void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
525 {
526 struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
527
528 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
529 }
530
531 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
532 {
533 dma_fence_free(&fence->base);
534 }
535
536 int vmw_fence_create(struct vmw_fence_manager *fman,
537 uint32_t seqno,
538 struct vmw_fence_obj **p_fence)
539 {
540 struct vmw_fence_obj *fence;
541 int ret;
542
543 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
544 if (unlikely(fence == NULL))
545 return -ENOMEM;
546
547 ret = vmw_fence_obj_init(fman, fence, seqno,
548 vmw_fence_destroy);
549 if (unlikely(ret != 0))
550 goto out_err_init;
551
552 *p_fence = fence;
553 return 0;
554
555 out_err_init:
556 kfree(fence);
557 return ret;
558 }
559
560
561 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
562 {
563 struct vmw_user_fence *ufence =
564 container_of(fence, struct vmw_user_fence, fence);
565 struct vmw_fence_manager *fman = fman_from_fence(fence);
566
567 ttm_base_object_kfree(ufence, base);
568 /*
569 * Free kernel space accounting.
570 */
571 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
572 fman->user_fence_size);
573 }
574
575 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
576 {
577 struct ttm_base_object *base = *p_base;
578 struct vmw_user_fence *ufence =
579 container_of(base, struct vmw_user_fence, base);
580 struct vmw_fence_obj *fence = &ufence->fence;
581
582 *p_base = NULL;
583 vmw_fence_obj_unreference(&fence);
584 }
585
586 int vmw_user_fence_create(struct drm_file *file_priv,
587 struct vmw_fence_manager *fman,
588 uint32_t seqno,
589 struct vmw_fence_obj **p_fence,
590 uint32_t *p_handle)
591 {
592 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
593 struct vmw_user_fence *ufence;
594 struct vmw_fence_obj *tmp;
595 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
596 int ret;
597
598 /*
599 * Kernel memory space accounting, since this object may
600 * be created by a user-space request.
601 */
602
603 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
604 false, false);
605 if (unlikely(ret != 0))
606 return ret;
607
608 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
609 if (unlikely(ufence == NULL)) {
610 ret = -ENOMEM;
611 goto out_no_object;
612 }
613
614 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
615 vmw_user_fence_destroy);
616 if (unlikely(ret != 0)) {
617 kfree(ufence);
618 goto out_no_object;
619 }
620
621 /*
622 * The base object holds a reference which is freed in
623 * vmw_user_fence_base_release.
624 */
625 tmp = vmw_fence_obj_reference(&ufence->fence);
626 ret = ttm_base_object_init(tfile, &ufence->base, false,
627 VMW_RES_FENCE,
628 &vmw_user_fence_base_release, NULL);
629
630
631 if (unlikely(ret != 0)) {
632 /*
633 * Free the base object's reference
634 */
635 vmw_fence_obj_unreference(&tmp);
636 goto out_err;
637 }
638
639 *p_fence = &ufence->fence;
640 *p_handle = ufence->base.hash.key;
641
642 return 0;
643 out_err:
644 tmp = &ufence->fence;
645 vmw_fence_obj_unreference(&tmp);
646 out_no_object:
647 ttm_mem_global_free(mem_glob, fman->user_fence_size);
648 return ret;
649 }
650
651
652 /**
653 * vmw_fence_fifo_down - signal all unsignaled fence objects.
654 */
655
656 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
657 {
658 struct list_head action_list;
659 int ret;
660
661 /*
662 * The list may be altered while we traverse it, so always
663 * restart when we've released the fman->lock.
664 */
665
666 spin_lock_irq(&fman->lock);
667 fman->fifo_down = true;
668 while (!list_empty(&fman->fence_list)) {
669 struct vmw_fence_obj *fence =
670 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
671 head);
672 dma_fence_get(&fence->base);
673 spin_unlock_irq(&fman->lock);
674
675 ret = vmw_fence_obj_wait(fence, false, false,
676 VMW_FENCE_WAIT_TIMEOUT);
677
678 if (unlikely(ret != 0)) {
679 list_del_init(&fence->head);
680 dma_fence_signal(&fence->base);
681 INIT_LIST_HEAD(&action_list);
682 list_splice_init(&fence->seq_passed_actions,
683 &action_list);
684 vmw_fences_perform_actions(fman, &action_list);
685 }
686
687 BUG_ON(!list_empty(&fence->head));
688 dma_fence_put(&fence->base);
689 spin_lock_irq(&fman->lock);
690 }
691 spin_unlock_irq(&fman->lock);
692 }
693
694 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
695 {
696 unsigned long irq_flags;
697
698 spin_lock_irqsave(&fman->lock, irq_flags);
699 fman->fifo_down = false;
700 spin_unlock_irqrestore(&fman->lock, irq_flags);
701 }
702
703
704 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
705 struct drm_file *file_priv)
706 {
707 struct drm_vmw_fence_wait_arg *arg =
708 (struct drm_vmw_fence_wait_arg *)data;
709 unsigned long timeout;
710 struct ttm_base_object *base;
711 struct vmw_fence_obj *fence;
712 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
713 int ret;
714 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
715
716 /*
717 * 64-bit division not present on 32-bit systems, so do an
718 * approximation. (Divide by 1000000).
719 */
720
721 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
722 (wait_timeout >> 26);
723
724 if (!arg->cookie_valid) {
725 arg->cookie_valid = 1;
726 arg->kernel_cookie = jiffies + wait_timeout;
727 }
728
729 base = ttm_base_object_lookup(tfile, arg->handle);
730 if (unlikely(base == NULL)) {
731 printk(KERN_ERR "Wait invalid fence object handle "
732 "0x%08lx.\n",
733 (unsigned long)arg->handle);
734 return -EINVAL;
735 }
736
737 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
738
739 timeout = jiffies;
740 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
741 ret = ((vmw_fence_obj_signaled(fence)) ?
742 0 : -EBUSY);
743 goto out;
744 }
745
746 timeout = (unsigned long)arg->kernel_cookie - timeout;
747
748 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
749
750 out:
751 ttm_base_object_unref(&base);
752
753 /*
754 * Optionally unref the fence object.
755 */
756
757 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
758 return ttm_ref_object_base_unref(tfile, arg->handle,
759 TTM_REF_USAGE);
760 return ret;
761 }
762
763 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
764 struct drm_file *file_priv)
765 {
766 struct drm_vmw_fence_signaled_arg *arg =
767 (struct drm_vmw_fence_signaled_arg *) data;
768 struct ttm_base_object *base;
769 struct vmw_fence_obj *fence;
770 struct vmw_fence_manager *fman;
771 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
772 struct vmw_private *dev_priv = vmw_priv(dev);
773
774 base = ttm_base_object_lookup(tfile, arg->handle);
775 if (unlikely(base == NULL)) {
776 printk(KERN_ERR "Fence signaled invalid fence object handle "
777 "0x%08lx.\n",
778 (unsigned long)arg->handle);
779 return -EINVAL;
780 }
781
782 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
783 fman = fman_from_fence(fence);
784
785 arg->signaled = vmw_fence_obj_signaled(fence);
786
787 arg->signaled_flags = arg->flags;
788 spin_lock_irq(&fman->lock);
789 arg->passed_seqno = dev_priv->last_read_seqno;
790 spin_unlock_irq(&fman->lock);
791
792 ttm_base_object_unref(&base);
793
794 return 0;
795 }
796
797
798 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
799 struct drm_file *file_priv)
800 {
801 struct drm_vmw_fence_arg *arg =
802 (struct drm_vmw_fence_arg *) data;
803
804 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
805 arg->handle,
806 TTM_REF_USAGE);
807 }
808
809 /**
810 * vmw_event_fence_action_seq_passed
811 *
812 * @action: The struct vmw_fence_action embedded in a struct
813 * vmw_event_fence_action.
814 *
815 * This function is called when the seqno of the fence where @action is
816 * attached has passed. It queues the event on the submitter's event list.
817 * This function is always called from atomic context, and may be called
818 * from irq context.
819 */
820 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
821 {
822 struct vmw_event_fence_action *eaction =
823 container_of(action, struct vmw_event_fence_action, action);
824 struct drm_device *dev = eaction->dev;
825 struct drm_pending_event *event = eaction->event;
826 struct drm_file *file_priv;
827 unsigned long irq_flags;
828
829 if (unlikely(event == NULL))
830 return;
831
832 file_priv = event->file_priv;
833 spin_lock_irqsave(&dev->event_lock, irq_flags);
834
835 if (likely(eaction->tv_sec != NULL)) {
836 struct timeval tv;
837
838 do_gettimeofday(&tv);
839 *eaction->tv_sec = tv.tv_sec;
840 *eaction->tv_usec = tv.tv_usec;
841 }
842
843 drm_send_event_locked(dev, eaction->event);
844 eaction->event = NULL;
845 spin_unlock_irqrestore(&dev->event_lock, irq_flags);
846 }
847
848 /**
849 * vmw_event_fence_action_cleanup
850 *
851 * @action: The struct vmw_fence_action embedded in a struct
852 * vmw_event_fence_action.
853 *
854 * This function is the struct vmw_fence_action destructor. It's typically
855 * called from a workqueue.
856 */
857 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
858 {
859 struct vmw_event_fence_action *eaction =
860 container_of(action, struct vmw_event_fence_action, action);
861
862 vmw_fence_obj_unreference(&eaction->fence);
863 kfree(eaction);
864 }
865
866
867 /**
868 * vmw_fence_obj_add_action - Add an action to a fence object.
869 *
870 * @fence - The fence object.
871 * @action - The action to add.
872 *
873 * Note that the action callbacks may be executed before this function
874 * returns.
875 */
876 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
877 struct vmw_fence_action *action)
878 {
879 struct vmw_fence_manager *fman = fman_from_fence(fence);
880 unsigned long irq_flags;
881 bool run_update = false;
882
883 mutex_lock(&fman->goal_irq_mutex);
884 spin_lock_irqsave(&fman->lock, irq_flags);
885
886 fman->pending_actions[action->type]++;
887 if (dma_fence_is_signaled_locked(&fence->base)) {
888 struct list_head action_list;
889
890 INIT_LIST_HEAD(&action_list);
891 list_add_tail(&action->head, &action_list);
892 vmw_fences_perform_actions(fman, &action_list);
893 } else {
894 list_add_tail(&action->head, &fence->seq_passed_actions);
895
896 /*
897 * This function may set fman::seqno_valid, so it must
898 * be run with the goal_irq_mutex held.
899 */
900 run_update = vmw_fence_goal_check_locked(fence);
901 }
902
903 spin_unlock_irqrestore(&fman->lock, irq_flags);
904
905 if (run_update) {
906 if (!fman->goal_irq_on) {
907 fman->goal_irq_on = true;
908 vmw_goal_waiter_add(fman->dev_priv);
909 }
910 vmw_fences_update(fman);
911 }
912 mutex_unlock(&fman->goal_irq_mutex);
913
914 }
915
916 /**
917 * vmw_event_fence_action_create - Post an event for sending when a fence
918 * object seqno has passed.
919 *
920 * @file_priv: The file connection on which the event should be posted.
921 * @fence: The fence object on which to post the event.
922 * @event: Event to be posted. This event should've been alloced
923 * using k[mz]alloc, and should've been completely initialized.
924 * @interruptible: Interruptible waits if possible.
925 *
926 * As a side effect, the object pointed to by @event may have been
927 * freed when this function returns. If this function returns with
928 * an error code, the caller needs to free that object.
929 */
930
931 int vmw_event_fence_action_queue(struct drm_file *file_priv,
932 struct vmw_fence_obj *fence,
933 struct drm_pending_event *event,
934 uint32_t *tv_sec,
935 uint32_t *tv_usec,
936 bool interruptible)
937 {
938 struct vmw_event_fence_action *eaction;
939 struct vmw_fence_manager *fman = fman_from_fence(fence);
940
941 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
942 if (unlikely(eaction == NULL))
943 return -ENOMEM;
944
945 eaction->event = event;
946
947 eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
948 eaction->action.cleanup = vmw_event_fence_action_cleanup;
949 eaction->action.type = VMW_ACTION_EVENT;
950
951 eaction->fence = vmw_fence_obj_reference(fence);
952 eaction->dev = fman->dev_priv->dev;
953 eaction->tv_sec = tv_sec;
954 eaction->tv_usec = tv_usec;
955
956 vmw_fence_obj_add_action(fence, &eaction->action);
957
958 return 0;
959 }
960
961 struct vmw_event_fence_pending {
962 struct drm_pending_event base;
963 struct drm_vmw_event_fence event;
964 };
965
966 static int vmw_event_fence_action_create(struct drm_file *file_priv,
967 struct vmw_fence_obj *fence,
968 uint32_t flags,
969 uint64_t user_data,
970 bool interruptible)
971 {
972 struct vmw_event_fence_pending *event;
973 struct vmw_fence_manager *fman = fman_from_fence(fence);
974 struct drm_device *dev = fman->dev_priv->dev;
975 int ret;
976
977 event = kzalloc(sizeof(*event), GFP_KERNEL);
978 if (unlikely(event == NULL)) {
979 DRM_ERROR("Failed to allocate an event.\n");
980 ret = -ENOMEM;
981 goto out_no_space;
982 }
983
984 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
985 event->event.base.length = sizeof(*event);
986 event->event.user_data = user_data;
987
988 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
989
990 if (unlikely(ret != 0)) {
991 DRM_ERROR("Failed to allocate event space for this file.\n");
992 kfree(event);
993 goto out_no_space;
994 }
995
996 if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
997 ret = vmw_event_fence_action_queue(file_priv, fence,
998 &event->base,
999 &event->event.tv_sec,
1000 &event->event.tv_usec,
1001 interruptible);
1002 else
1003 ret = vmw_event_fence_action_queue(file_priv, fence,
1004 &event->base,
1005 NULL,
1006 NULL,
1007 interruptible);
1008 if (ret != 0)
1009 goto out_no_queue;
1010
1011 return 0;
1012
1013 out_no_queue:
1014 drm_event_cancel_free(dev, &event->base);
1015 out_no_space:
1016 return ret;
1017 }
1018
1019 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1020 struct drm_file *file_priv)
1021 {
1022 struct vmw_private *dev_priv = vmw_priv(dev);
1023 struct drm_vmw_fence_event_arg *arg =
1024 (struct drm_vmw_fence_event_arg *) data;
1025 struct vmw_fence_obj *fence = NULL;
1026 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1027 struct drm_vmw_fence_rep __user *user_fence_rep =
1028 (struct drm_vmw_fence_rep __user *)(unsigned long)
1029 arg->fence_rep;
1030 uint32_t handle;
1031 int ret;
1032
1033 /*
1034 * Look up an existing fence object,
1035 * and if user-space wants a new reference,
1036 * add one.
1037 */
1038 if (arg->handle) {
1039 struct ttm_base_object *base =
1040 ttm_base_object_lookup_for_ref(dev_priv->tdev,
1041 arg->handle);
1042
1043 if (unlikely(base == NULL)) {
1044 DRM_ERROR("Fence event invalid fence object handle "
1045 "0x%08lx.\n",
1046 (unsigned long)arg->handle);
1047 return -EINVAL;
1048 }
1049 fence = &(container_of(base, struct vmw_user_fence,
1050 base)->fence);
1051 (void) vmw_fence_obj_reference(fence);
1052
1053 if (user_fence_rep != NULL) {
1054 bool existed;
1055
1056 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1057 TTM_REF_USAGE, &existed);
1058 if (unlikely(ret != 0)) {
1059 DRM_ERROR("Failed to reference a fence "
1060 "object.\n");
1061 goto out_no_ref_obj;
1062 }
1063 handle = base->hash.key;
1064 }
1065 ttm_base_object_unref(&base);
1066 }
1067
1068 /*
1069 * Create a new fence object.
1070 */
1071 if (!fence) {
1072 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1073 &fence,
1074 (user_fence_rep) ?
1075 &handle : NULL);
1076 if (unlikely(ret != 0)) {
1077 DRM_ERROR("Fence event failed to create fence.\n");
1078 return ret;
1079 }
1080 }
1081
1082 BUG_ON(fence == NULL);
1083
1084 ret = vmw_event_fence_action_create(file_priv, fence,
1085 arg->flags,
1086 arg->user_data,
1087 true);
1088 if (unlikely(ret != 0)) {
1089 if (ret != -ERESTARTSYS)
1090 DRM_ERROR("Failed to attach event to fence.\n");
1091 goto out_no_create;
1092 }
1093
1094 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1095 handle);
1096 vmw_fence_obj_unreference(&fence);
1097 return 0;
1098 out_no_create:
1099 if (user_fence_rep != NULL)
1100 ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1101 handle, TTM_REF_USAGE);
1102 out_no_ref_obj:
1103 vmw_fence_obj_unreference(&fence);
1104 return ret;
1105 }