]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drm/vmwgfx: Don't use memory accounting for kernel-side fence objects
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fence.c
CommitLineData
ae2a1040
TH
1/**************************************************************************
2 *
3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
760285e7 28#include <drm/drmP.h>
ae2a1040
TH
29#include "vmwgfx_drv.h"
30
31#define VMW_FENCE_WRAP (1 << 31)
32
33struct vmw_fence_manager {
34 int num_fence_objects;
35 struct vmw_private *dev_priv;
36 spinlock_t lock;
ae2a1040 37 struct list_head fence_list;
2298e804 38 struct work_struct work, ping_work;
ae2a1040
TH
39 u32 user_fence_size;
40 u32 fence_size;
57c5ee79 41 u32 event_fence_action_size;
ae2a1040
TH
42 bool fifo_down;
43 struct list_head cleanup_list;
57c5ee79
TH
44 uint32_t pending_actions[VMW_ACTION_MAX];
45 struct mutex goal_irq_mutex;
46 bool goal_irq_on; /* Protected by @goal_irq_mutex */
47 bool seqno_valid; /* Protected by @lock, and may not be set to true
48 without the @goal_irq_mutex held. */
2298e804 49 unsigned ctx;
ae2a1040
TH
50};
51
52struct vmw_user_fence {
53 struct ttm_base_object base;
54 struct vmw_fence_obj fence;
55};
56
57/**
57c5ee79 58 * struct vmw_event_fence_action - fence action that delivers a drm event.
ae2a1040 59 *
57c5ee79
TH
60 * @e: A struct drm_pending_event that controls the event delivery.
61 * @action: A struct vmw_fence_action to hook up to a fence.
62 * @fence: A referenced pointer to the fence to keep it alive while @action
63 * hangs on it.
64 * @dev: Pointer to a struct drm_device so we can access the event stuff.
65 * @kref: Both @e and @action has destructors, so we need to refcount.
66 * @size: Size accounted for this object.
67 * @tv_sec: If non-null, the variable pointed to will be assigned
68 * current time tv_sec val when the fence signals.
69 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
70 * be assigned the current time tv_usec val when the fence signals.
71 */
72struct vmw_event_fence_action {
57c5ee79 73 struct vmw_fence_action action;
6b82ef50 74 struct list_head fpriv_head;
8b7de6aa
JB
75
76 struct drm_pending_event *event;
57c5ee79
TH
77 struct vmw_fence_obj *fence;
78 struct drm_device *dev;
8b7de6aa 79
57c5ee79
TH
80 uint32_t *tv_sec;
81 uint32_t *tv_usec;
82};
83
2298e804
ML
84static struct vmw_fence_manager *
85fman_from_fence(struct vmw_fence_obj *fence)
86{
87 return container_of(fence->base.lock, struct vmw_fence_manager, lock);
88}
89
57c5ee79
TH
90/**
91 * Note on fencing subsystem usage of irqs:
92 * Typically the vmw_fences_update function is called
93 *
94 * a) When a new fence seqno has been submitted by the fifo code.
95 * b) On-demand when we have waiters. Sleeping waiters will switch on the
96 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
97 * irq is received. When the last fence waiter is gone, that IRQ is masked
98 * away.
99 *
100 * In situations where there are no waiters and we don't submit any new fences,
101 * fence objects may not be signaled. This is perfectly OK, since there are
102 * no consumers of the signaled data, but that is NOT ok when there are fence
103 * actions attached to a fence. The fencing subsystem then makes use of the
104 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
105 * which has an action attached, and each time vmw_fences_update is called,
106 * the subsystem makes sure the fence goal seqno is updated.
107 *
108 * The fence goal seqno irq is on as long as there are unsignaled fence
109 * objects with actions attached to them.
ae2a1040
TH
110 */
111
2298e804 112static void vmw_fence_obj_destroy(struct fence *f)
ae2a1040
TH
113{
114 struct vmw_fence_obj *fence =
2298e804 115 container_of(f, struct vmw_fence_obj, base);
ae2a1040 116
2298e804
ML
117 struct vmw_fence_manager *fman = fman_from_fence(fence);
118 unsigned long irq_flags;
ae2a1040 119
2298e804 120 spin_lock_irqsave(&fman->lock, irq_flags);
ae2a1040 121 list_del_init(&fence->head);
2298e804
ML
122 --fman->num_fence_objects;
123 spin_unlock_irqrestore(&fman->lock, irq_flags);
124 fence->destroy(fence);
125}
ae2a1040 126
2298e804
ML
127static const char *vmw_fence_get_driver_name(struct fence *f)
128{
129 return "vmwgfx";
130}
131
132static const char *vmw_fence_get_timeline_name(struct fence *f)
133{
134 return "svga";
ae2a1040
TH
135}
136
2298e804
ML
137static void vmw_fence_ping_func(struct work_struct *work)
138{
139 struct vmw_fence_manager *fman =
140 container_of(work, struct vmw_fence_manager, ping_work);
141
142 vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
143}
144
145static bool vmw_fence_enable_signaling(struct fence *f)
146{
147 struct vmw_fence_obj *fence =
148 container_of(f, struct vmw_fence_obj, base);
149
150 struct vmw_fence_manager *fman = fman_from_fence(fence);
151 struct vmw_private *dev_priv = fman->dev_priv;
152
153 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
154 u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
155 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
156 return false;
157
158 if (mutex_trylock(&dev_priv->hw_mutex)) {
159 vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
160 mutex_unlock(&dev_priv->hw_mutex);
161 } else
162 schedule_work(&fman->ping_work);
163
164 return true;
165}
166
167struct vmwgfx_wait_cb {
168 struct fence_cb base;
169 struct task_struct *task;
170};
171
172static void
173vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
174{
175 struct vmwgfx_wait_cb *wait =
176 container_of(cb, struct vmwgfx_wait_cb, base);
177
178 wake_up_process(wait->task);
179}
180
181static void __vmw_fences_update(struct vmw_fence_manager *fman);
182
183static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
184{
185 struct vmw_fence_obj *fence =
186 container_of(f, struct vmw_fence_obj, base);
187
188 struct vmw_fence_manager *fman = fman_from_fence(fence);
189 struct vmw_private *dev_priv = fman->dev_priv;
190 struct vmwgfx_wait_cb cb;
191 long ret = timeout;
192 unsigned long irq_flags;
193
194 if (likely(vmw_fence_obj_signaled(fence)))
195 return timeout;
196
197 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
198 vmw_seqno_waiter_add(dev_priv);
199
200 spin_lock_irqsave(f->lock, irq_flags);
201
202 if (intr && signal_pending(current)) {
203 ret = -ERESTARTSYS;
204 goto out;
205 }
206
207 cb.base.func = vmwgfx_wait_cb;
208 cb.task = current;
209 list_add(&cb.base.node, &f->cb_list);
210
211 while (ret > 0) {
212 __vmw_fences_update(fman);
213 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags))
214 break;
215
216 if (intr)
217 __set_current_state(TASK_INTERRUPTIBLE);
218 else
219 __set_current_state(TASK_UNINTERRUPTIBLE);
220 spin_unlock_irqrestore(f->lock, irq_flags);
221
222 ret = schedule_timeout(ret);
223
224 spin_lock_irqsave(f->lock, irq_flags);
225 if (ret > 0 && intr && signal_pending(current))
226 ret = -ERESTARTSYS;
227 }
228
229 if (!list_empty(&cb.base.node))
230 list_del(&cb.base.node);
231 __set_current_state(TASK_RUNNING);
232
233out:
234 spin_unlock_irqrestore(f->lock, irq_flags);
235
236 vmw_seqno_waiter_remove(dev_priv);
237
238 return ret;
239}
240
241static struct fence_ops vmw_fence_ops = {
242 .get_driver_name = vmw_fence_get_driver_name,
243 .get_timeline_name = vmw_fence_get_timeline_name,
244 .enable_signaling = vmw_fence_enable_signaling,
245 .wait = vmw_fence_wait,
246 .release = vmw_fence_obj_destroy,
247};
248
ae2a1040
TH
249
250/**
251 * Execute signal actions on fences recently signaled.
252 * This is done from a workqueue so we don't have to execute
253 * signal actions from atomic context.
254 */
255
256static void vmw_fence_work_func(struct work_struct *work)
257{
258 struct vmw_fence_manager *fman =
259 container_of(work, struct vmw_fence_manager, work);
260 struct list_head list;
261 struct vmw_fence_action *action, *next_action;
57c5ee79 262 bool seqno_valid;
ae2a1040
TH
263
264 do {
265 INIT_LIST_HEAD(&list);
57c5ee79
TH
266 mutex_lock(&fman->goal_irq_mutex);
267
ae2a1040
TH
268 spin_lock_irq(&fman->lock);
269 list_splice_init(&fman->cleanup_list, &list);
57c5ee79 270 seqno_valid = fman->seqno_valid;
ae2a1040
TH
271 spin_unlock_irq(&fman->lock);
272
57c5ee79
TH
273 if (!seqno_valid && fman->goal_irq_on) {
274 fman->goal_irq_on = false;
275 vmw_goal_waiter_remove(fman->dev_priv);
276 }
277 mutex_unlock(&fman->goal_irq_mutex);
278
ae2a1040
TH
279 if (list_empty(&list))
280 return;
281
282 /*
283 * At this point, only we should be able to manipulate the
284 * list heads of the actions we have on the private list.
57c5ee79 285 * hence fman::lock not held.
ae2a1040
TH
286 */
287
288 list_for_each_entry_safe(action, next_action, &list, head) {
289 list_del_init(&action->head);
57c5ee79
TH
290 if (action->cleanup)
291 action->cleanup(action);
ae2a1040
TH
292 }
293 } while (1);
294}
295
296struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
297{
298 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
299
300 if (unlikely(fman == NULL))
301 return NULL;
302
303 fman->dev_priv = dev_priv;
304 spin_lock_init(&fman->lock);
305 INIT_LIST_HEAD(&fman->fence_list);
306 INIT_LIST_HEAD(&fman->cleanup_list);
307 INIT_WORK(&fman->work, &vmw_fence_work_func);
2298e804 308 INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
ae2a1040
TH
309 fman->fifo_down = true;
310 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
311 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
57c5ee79
TH
312 fman->event_fence_action_size =
313 ttm_round_pot(sizeof(struct vmw_event_fence_action));
314 mutex_init(&fman->goal_irq_mutex);
2298e804 315 fman->ctx = fence_context_alloc(1);
ae2a1040
TH
316
317 return fman;
318}
319
320void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
321{
322 unsigned long irq_flags;
323 bool lists_empty;
324
325 (void) cancel_work_sync(&fman->work);
2298e804 326 (void) cancel_work_sync(&fman->ping_work);
ae2a1040
TH
327
328 spin_lock_irqsave(&fman->lock, irq_flags);
329 lists_empty = list_empty(&fman->fence_list) &&
330 list_empty(&fman->cleanup_list);
331 spin_unlock_irqrestore(&fman->lock, irq_flags);
332
333 BUG_ON(!lists_empty);
334 kfree(fman);
335}
336
337static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
c060a4e1 338 struct vmw_fence_obj *fence, u32 seqno,
ae2a1040
TH
339 void (*destroy) (struct vmw_fence_obj *fence))
340{
341 unsigned long irq_flags;
ae2a1040
TH
342 int ret = 0;
343
2298e804
ML
344 fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
345 fman->ctx, seqno);
ae2a1040 346 INIT_LIST_HEAD(&fence->seq_passed_actions);
ae2a1040 347 fence->destroy = destroy;
ae2a1040
TH
348
349 spin_lock_irqsave(&fman->lock, irq_flags);
350 if (unlikely(fman->fifo_down)) {
351 ret = -EBUSY;
352 goto out_unlock;
353 }
354 list_add_tail(&fence->head, &fman->fence_list);
2298e804 355 ++fman->num_fence_objects;
ae2a1040
TH
356
357out_unlock:
358 spin_unlock_irqrestore(&fman->lock, irq_flags);
359 return ret;
360
361}
362
94844cf0 363static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
ae2a1040
TH
364 struct list_head *list)
365{
366 struct vmw_fence_action *action, *next_action;
367
368 list_for_each_entry_safe(action, next_action, list, head) {
369 list_del_init(&action->head);
57c5ee79 370 fman->pending_actions[action->type]--;
ae2a1040
TH
371 if (action->seq_passed != NULL)
372 action->seq_passed(action);
373
374 /*
375 * Add the cleanup action to the cleanup list so that
376 * it will be performed by a worker task.
377 */
378
57c5ee79
TH
379 list_add_tail(&action->head, &fman->cleanup_list);
380 }
381}
382
383/**
384 * vmw_fence_goal_new_locked - Figure out a new device fence goal
385 * seqno if needed.
386 *
387 * @fman: Pointer to a fence manager.
388 * @passed_seqno: The seqno the device currently signals as passed.
389 *
390 * This function should be called with the fence manager lock held.
391 * It is typically called when we have a new passed_seqno, and
392 * we might need to update the fence goal. It checks to see whether
393 * the current fence goal has already passed, and, in that case,
394 * scans through all unsignaled fences to get the next fence object with an
395 * action attached, and sets the seqno of that fence as a new fence goal.
396 *
397 * returns true if the device goal seqno was updated. False otherwise.
398 */
399static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
400 u32 passed_seqno)
401{
402 u32 goal_seqno;
403 __le32 __iomem *fifo_mem;
404 struct vmw_fence_obj *fence;
405
406 if (likely(!fman->seqno_valid))
407 return false;
408
409 fifo_mem = fman->dev_priv->mmio_virt;
410 goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
411 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
412 return false;
413
414 fman->seqno_valid = false;
415 list_for_each_entry(fence, &fman->fence_list, head) {
416 if (!list_empty(&fence->seq_passed_actions)) {
417 fman->seqno_valid = true;
2298e804 418 iowrite32(fence->base.seqno,
57c5ee79
TH
419 fifo_mem + SVGA_FIFO_FENCE_GOAL);
420 break;
421 }
ae2a1040 422 }
57c5ee79
TH
423
424 return true;
425}
426
427
428/**
429 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
430 * needed.
431 *
432 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
433 * considered as a device fence goal.
434 *
435 * This function should be called with the fence manager lock held.
436 * It is typically called when an action has been attached to a fence to
437 * check whether the seqno of that fence should be used for a fence
438 * goal interrupt. This is typically needed if the current fence goal is
439 * invalid, or has a higher seqno than that of the current fence object.
440 *
441 * returns true if the device goal seqno was updated. False otherwise.
442 */
443static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
444{
2298e804 445 struct vmw_fence_manager *fman = fman_from_fence(fence);
57c5ee79
TH
446 u32 goal_seqno;
447 __le32 __iomem *fifo_mem;
448
2298e804 449 if (fence_is_signaled_locked(&fence->base))
57c5ee79
TH
450 return false;
451
2298e804 452 fifo_mem = fman->dev_priv->mmio_virt;
57c5ee79 453 goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
2298e804
ML
454 if (likely(fman->seqno_valid &&
455 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
57c5ee79
TH
456 return false;
457
2298e804
ML
458 iowrite32(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
459 fman->seqno_valid = true;
57c5ee79
TH
460
461 return true;
ae2a1040
TH
462}
463
2298e804 464static void __vmw_fences_update(struct vmw_fence_manager *fman)
ae2a1040 465{
ae2a1040
TH
466 struct vmw_fence_obj *fence, *next_fence;
467 struct list_head action_list;
57c5ee79
TH
468 bool needs_rerun;
469 uint32_t seqno, new_seqno;
470 __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
ae2a1040 471
57c5ee79
TH
472 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
473rerun:
ae2a1040 474 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
2298e804 475 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
ae2a1040 476 list_del_init(&fence->head);
2298e804 477 fence_signal_locked(&fence->base);
ae2a1040
TH
478 INIT_LIST_HEAD(&action_list);
479 list_splice_init(&fence->seq_passed_actions,
480 &action_list);
481 vmw_fences_perform_actions(fman, &action_list);
57c5ee79
TH
482 } else
483 break;
ae2a1040 484 }
57c5ee79 485
57c5ee79
TH
486 /*
487 * Rerun if the fence goal seqno was updated, and the
488 * hardware might have raced with that update, so that
489 * we missed a fence_goal irq.
490 */
491
2298e804 492 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
57c5ee79
TH
493 if (unlikely(needs_rerun)) {
494 new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
495 if (new_seqno != seqno) {
496 seqno = new_seqno;
497 goto rerun;
498 }
499 }
2298e804
ML
500
501 if (!list_empty(&fman->cleanup_list))
502 (void) schedule_work(&fman->work);
57c5ee79 503}
ae2a1040 504
2298e804 505void vmw_fences_update(struct vmw_fence_manager *fman)
ae2a1040 506{
ae2a1040 507 unsigned long irq_flags;
ae2a1040
TH
508
509 spin_lock_irqsave(&fman->lock, irq_flags);
2298e804 510 __vmw_fences_update(fman);
ae2a1040 511 spin_unlock_irqrestore(&fman->lock, irq_flags);
2298e804
ML
512}
513
514bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
515{
516 struct vmw_fence_manager *fman = fman_from_fence(fence);
ae2a1040 517
2298e804 518 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
ae2a1040
TH
519 return 1;
520
c060a4e1 521 vmw_fences_update(fman);
ae2a1040 522
2298e804 523 return fence_is_signaled(&fence->base);
ae2a1040
TH
524}
525
c060a4e1 526int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
ae2a1040
TH
527 bool interruptible, unsigned long timeout)
528{
2298e804 529 long ret = fence_wait_timeout(&fence->base, interruptible, timeout);
ae2a1040 530
2298e804 531 if (likely(ret > 0))
ae2a1040 532 return 0;
2298e804
ML
533 else if (ret == 0)
534 return -EBUSY;
ae2a1040 535 else
2298e804 536 return ret;
ae2a1040
TH
537}
538
539void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
540{
2298e804 541 struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
ae2a1040
TH
542
543 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
544}
545
546static void vmw_fence_destroy(struct vmw_fence_obj *fence)
547{
2298e804 548 fence_free(&fence->base);
ae2a1040
TH
549}
550
551int vmw_fence_create(struct vmw_fence_manager *fman,
552 uint32_t seqno,
ae2a1040
TH
553 struct vmw_fence_obj **p_fence)
554{
ae2a1040
TH
555 struct vmw_fence_obj *fence;
556 int ret;
557
ae2a1040 558 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
1f563a6a
TH
559 if (unlikely(fence == NULL))
560 return -ENOMEM;
ae2a1040 561
c060a4e1 562 ret = vmw_fence_obj_init(fman, fence, seqno,
ae2a1040
TH
563 vmw_fence_destroy);
564 if (unlikely(ret != 0))
565 goto out_err_init;
566
567 *p_fence = fence;
568 return 0;
569
570out_err_init:
571 kfree(fence);
ae2a1040
TH
572 return ret;
573}
574
575
576static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
577{
578 struct vmw_user_fence *ufence =
579 container_of(fence, struct vmw_user_fence, fence);
2298e804 580 struct vmw_fence_manager *fman = fman_from_fence(fence);
ae2a1040 581
35f62a58 582 ttm_base_object_kfree(ufence, base);
ae2a1040
TH
583 /*
584 * Free kernel space accounting.
585 */
586 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
587 fman->user_fence_size);
588}
589
590static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
591{
592 struct ttm_base_object *base = *p_base;
593 struct vmw_user_fence *ufence =
594 container_of(base, struct vmw_user_fence, base);
595 struct vmw_fence_obj *fence = &ufence->fence;
596
597 *p_base = NULL;
598 vmw_fence_obj_unreference(&fence);
599}
600
601int vmw_user_fence_create(struct drm_file *file_priv,
602 struct vmw_fence_manager *fman,
603 uint32_t seqno,
ae2a1040
TH
604 struct vmw_fence_obj **p_fence,
605 uint32_t *p_handle)
606{
607 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
608 struct vmw_user_fence *ufence;
609 struct vmw_fence_obj *tmp;
610 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
611 int ret;
612
613 /*
614 * Kernel memory space accounting, since this object may
615 * be created by a user-space request.
616 */
617
618 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
619 false, false);
620 if (unlikely(ret != 0))
621 return ret;
622
623 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
624 if (unlikely(ufence == NULL)) {
625 ret = -ENOMEM;
626 goto out_no_object;
627 }
628
629 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
c060a4e1 630 vmw_user_fence_destroy);
ae2a1040
TH
631 if (unlikely(ret != 0)) {
632 kfree(ufence);
633 goto out_no_object;
634 }
635
636 /*
637 * The base object holds a reference which is freed in
638 * vmw_user_fence_base_release.
639 */
640 tmp = vmw_fence_obj_reference(&ufence->fence);
641 ret = ttm_base_object_init(tfile, &ufence->base, false,
642 VMW_RES_FENCE,
643 &vmw_user_fence_base_release, NULL);
644
645
646 if (unlikely(ret != 0)) {
647 /*
648 * Free the base object's reference
649 */
650 vmw_fence_obj_unreference(&tmp);
651 goto out_err;
652 }
653
654 *p_fence = &ufence->fence;
655 *p_handle = ufence->base.hash.key;
656
657 return 0;
658out_err:
659 tmp = &ufence->fence;
660 vmw_fence_obj_unreference(&tmp);
661out_no_object:
662 ttm_mem_global_free(mem_glob, fman->user_fence_size);
663 return ret;
664}
665
666
667/**
668 * vmw_fence_fifo_down - signal all unsignaled fence objects.
669 */
670
671void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
672{
ae2a1040
TH
673 struct list_head action_list;
674 int ret;
675
676 /*
677 * The list may be altered while we traverse it, so always
678 * restart when we've released the fman->lock.
679 */
680
2298e804 681 spin_lock_irq(&fman->lock);
ae2a1040
TH
682 fman->fifo_down = true;
683 while (!list_empty(&fman->fence_list)) {
684 struct vmw_fence_obj *fence =
685 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
686 head);
2298e804 687 fence_get(&fence->base);
ae2a1040
TH
688 spin_unlock_irq(&fman->lock);
689
c060a4e1 690 ret = vmw_fence_obj_wait(fence, false, false,
ae2a1040
TH
691 VMW_FENCE_WAIT_TIMEOUT);
692
693 if (unlikely(ret != 0)) {
694 list_del_init(&fence->head);
2298e804 695 fence_signal(&fence->base);
ae2a1040
TH
696 INIT_LIST_HEAD(&action_list);
697 list_splice_init(&fence->seq_passed_actions,
698 &action_list);
699 vmw_fences_perform_actions(fman, &action_list);
ae2a1040
TH
700 }
701
ae2a1040 702 BUG_ON(!list_empty(&fence->head));
2298e804
ML
703 fence_put(&fence->base);
704 spin_lock_irq(&fman->lock);
ae2a1040 705 }
2298e804 706 spin_unlock_irq(&fman->lock);
ae2a1040
TH
707}
708
709void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
710{
711 unsigned long irq_flags;
712
713 spin_lock_irqsave(&fman->lock, irq_flags);
714 fman->fifo_down = false;
715 spin_unlock_irqrestore(&fman->lock, irq_flags);
716}
717
718
719int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
720 struct drm_file *file_priv)
721{
722 struct drm_vmw_fence_wait_arg *arg =
723 (struct drm_vmw_fence_wait_arg *)data;
724 unsigned long timeout;
725 struct ttm_base_object *base;
726 struct vmw_fence_obj *fence;
727 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
728 int ret;
729 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
730
731 /*
732 * 64-bit division not present on 32-bit systems, so do an
733 * approximation. (Divide by 1000000).
734 */
735
736 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
737 (wait_timeout >> 26);
738
739 if (!arg->cookie_valid) {
740 arg->cookie_valid = 1;
741 arg->kernel_cookie = jiffies + wait_timeout;
742 }
743
744 base = ttm_base_object_lookup(tfile, arg->handle);
745 if (unlikely(base == NULL)) {
746 printk(KERN_ERR "Wait invalid fence object handle "
747 "0x%08lx.\n",
748 (unsigned long)arg->handle);
749 return -EINVAL;
750 }
751
752 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
753
754 timeout = jiffies;
755 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
c060a4e1 756 ret = ((vmw_fence_obj_signaled(fence)) ?
ae2a1040
TH
757 0 : -EBUSY);
758 goto out;
759 }
760
761 timeout = (unsigned long)arg->kernel_cookie - timeout;
762
c060a4e1 763 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
ae2a1040
TH
764
765out:
766 ttm_base_object_unref(&base);
767
768 /*
769 * Optionally unref the fence object.
770 */
771
772 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
773 return ttm_ref_object_base_unref(tfile, arg->handle,
774 TTM_REF_USAGE);
775 return ret;
776}
777
778int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
779 struct drm_file *file_priv)
780{
781 struct drm_vmw_fence_signaled_arg *arg =
782 (struct drm_vmw_fence_signaled_arg *) data;
783 struct ttm_base_object *base;
784 struct vmw_fence_obj *fence;
785 struct vmw_fence_manager *fman;
786 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
787 struct vmw_private *dev_priv = vmw_priv(dev);
788
789 base = ttm_base_object_lookup(tfile, arg->handle);
790 if (unlikely(base == NULL)) {
791 printk(KERN_ERR "Fence signaled invalid fence object handle "
792 "0x%08lx.\n",
793 (unsigned long)arg->handle);
794 return -EINVAL;
795 }
796
797 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
2298e804 798 fman = fman_from_fence(fence);
ae2a1040 799
c060a4e1 800 arg->signaled = vmw_fence_obj_signaled(fence);
ae2a1040 801
c060a4e1 802 arg->signaled_flags = arg->flags;
2298e804 803 spin_lock_irq(&fman->lock);
ae2a1040
TH
804 arg->passed_seqno = dev_priv->last_read_seqno;
805 spin_unlock_irq(&fman->lock);
806
807 ttm_base_object_unref(&base);
808
809 return 0;
810}
811
812
813int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
814 struct drm_file *file_priv)
815{
816 struct drm_vmw_fence_arg *arg =
817 (struct drm_vmw_fence_arg *) data;
818
819 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
820 arg->handle,
821 TTM_REF_USAGE);
822}
57c5ee79 823
6b82ef50
TH
824/**
825 * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
826 *
827 * @fman: Pointer to a struct vmw_fence_manager
828 * @event_list: Pointer to linked list of struct vmw_event_fence_action objects
829 * with pointers to a struct drm_file object about to be closed.
830 *
831 * This function removes all pending fence events with references to a
832 * specific struct drm_file object about to be closed. The caller is required
833 * to pass a list of all struct vmw_event_fence_action objects with such
834 * events attached. This function is typically called before the
835 * struct drm_file object's event management is taken down.
836 */
837void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
838 struct list_head *event_list)
839{
840 struct vmw_event_fence_action *eaction;
841 struct drm_pending_event *event;
842 unsigned long irq_flags;
843
844 while (1) {
845 spin_lock_irqsave(&fman->lock, irq_flags);
846 if (list_empty(event_list))
847 goto out_unlock;
848 eaction = list_first_entry(event_list,
849 struct vmw_event_fence_action,
850 fpriv_head);
851 list_del_init(&eaction->fpriv_head);
852 event = eaction->event;
853 eaction->event = NULL;
854 spin_unlock_irqrestore(&fman->lock, irq_flags);
855 event->destroy(event);
856 }
857out_unlock:
858 spin_unlock_irqrestore(&fman->lock, irq_flags);
859}
860
57c5ee79
TH
861
862/**
863 * vmw_event_fence_action_seq_passed
864 *
865 * @action: The struct vmw_fence_action embedded in a struct
866 * vmw_event_fence_action.
867 *
868 * This function is called when the seqno of the fence where @action is
869 * attached has passed. It queues the event on the submitter's event list.
870 * This function is always called from atomic context, and may be called
8b7de6aa 871 * from irq context.
57c5ee79
TH
872 */
873static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
874{
875 struct vmw_event_fence_action *eaction =
876 container_of(action, struct vmw_event_fence_action, action);
877 struct drm_device *dev = eaction->dev;
6b82ef50
TH
878 struct drm_pending_event *event = eaction->event;
879 struct drm_file *file_priv;
57c5ee79
TH
880 unsigned long irq_flags;
881
6b82ef50
TH
882 if (unlikely(event == NULL))
883 return;
884
885 file_priv = event->file_priv;
57c5ee79
TH
886 spin_lock_irqsave(&dev->event_lock, irq_flags);
887
888 if (likely(eaction->tv_sec != NULL)) {
889 struct timeval tv;
890
891 do_gettimeofday(&tv);
892 *eaction->tv_sec = tv.tv_sec;
893 *eaction->tv_usec = tv.tv_usec;
894 }
895
6b82ef50 896 list_del_init(&eaction->fpriv_head);
8b7de6aa 897 list_add_tail(&eaction->event->link, &file_priv->event_list);
6b82ef50 898 eaction->event = NULL;
57c5ee79
TH
899 wake_up_all(&file_priv->event_wait);
900 spin_unlock_irqrestore(&dev->event_lock, irq_flags);
901}
902
903/**
904 * vmw_event_fence_action_cleanup
905 *
906 * @action: The struct vmw_fence_action embedded in a struct
907 * vmw_event_fence_action.
908 *
909 * This function is the struct vmw_fence_action destructor. It's typically
910 * called from a workqueue.
911 */
912static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
913{
914 struct vmw_event_fence_action *eaction =
915 container_of(action, struct vmw_event_fence_action, action);
2298e804 916 struct vmw_fence_manager *fman = fman_from_fence(eaction->fence);
6b82ef50
TH
917 unsigned long irq_flags;
918
919 spin_lock_irqsave(&fman->lock, irq_flags);
920 list_del(&eaction->fpriv_head);
921 spin_unlock_irqrestore(&fman->lock, irq_flags);
57c5ee79
TH
922
923 vmw_fence_obj_unreference(&eaction->fence);
8b7de6aa 924 kfree(eaction);
57c5ee79
TH
925}
926
927
928/**
929 * vmw_fence_obj_add_action - Add an action to a fence object.
930 *
931 * @fence - The fence object.
932 * @action - The action to add.
933 *
934 * Note that the action callbacks may be executed before this function
935 * returns.
936 */
94844cf0 937static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
57c5ee79
TH
938 struct vmw_fence_action *action)
939{
2298e804 940 struct vmw_fence_manager *fman = fman_from_fence(fence);
57c5ee79
TH
941 unsigned long irq_flags;
942 bool run_update = false;
943
944 mutex_lock(&fman->goal_irq_mutex);
945 spin_lock_irqsave(&fman->lock, irq_flags);
946
947 fman->pending_actions[action->type]++;
2298e804 948 if (fence_is_signaled_locked(&fence->base)) {
57c5ee79
TH
949 struct list_head action_list;
950
951 INIT_LIST_HEAD(&action_list);
952 list_add_tail(&action->head, &action_list);
953 vmw_fences_perform_actions(fman, &action_list);
954 } else {
955 list_add_tail(&action->head, &fence->seq_passed_actions);
956
957 /*
958 * This function may set fman::seqno_valid, so it must
959 * be run with the goal_irq_mutex held.
960 */
961 run_update = vmw_fence_goal_check_locked(fence);
962 }
963
964 spin_unlock_irqrestore(&fman->lock, irq_flags);
965
966 if (run_update) {
967 if (!fman->goal_irq_on) {
968 fman->goal_irq_on = true;
969 vmw_goal_waiter_add(fman->dev_priv);
970 }
971 vmw_fences_update(fman);
972 }
973 mutex_unlock(&fman->goal_irq_mutex);
974
975}
976
977/**
978 * vmw_event_fence_action_create - Post an event for sending when a fence
979 * object seqno has passed.
980 *
981 * @file_priv: The file connection on which the event should be posted.
982 * @fence: The fence object on which to post the event.
983 * @event: Event to be posted. This event should've been alloced
984 * using k[mz]alloc, and should've been completely initialized.
985 * @interruptible: Interruptible waits if possible.
986 *
987 * As a side effect, the object pointed to by @event may have been
988 * freed when this function returns. If this function returns with
989 * an error code, the caller needs to free that object.
990 */
991
8b7de6aa
JB
992int vmw_event_fence_action_queue(struct drm_file *file_priv,
993 struct vmw_fence_obj *fence,
994 struct drm_pending_event *event,
995 uint32_t *tv_sec,
996 uint32_t *tv_usec,
997 bool interruptible)
57c5ee79 998{
0c5d3703 999 struct vmw_event_fence_action *eaction;
2298e804 1000 struct vmw_fence_manager *fman = fman_from_fence(fence);
6b82ef50
TH
1001 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1002 unsigned long irq_flags;
57c5ee79
TH
1003
1004 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
8b7de6aa 1005 if (unlikely(eaction == NULL))
57c5ee79 1006 return -ENOMEM;
57c5ee79 1007
8b7de6aa 1008 eaction->event = event;
57c5ee79
TH
1009
1010 eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1011 eaction->action.cleanup = vmw_event_fence_action_cleanup;
1012 eaction->action.type = VMW_ACTION_EVENT;
1013
1014 eaction->fence = vmw_fence_obj_reference(fence);
1015 eaction->dev = fman->dev_priv->dev;
57c5ee79
TH
1016 eaction->tv_sec = tv_sec;
1017 eaction->tv_usec = tv_usec;
1018
6b82ef50
TH
1019 spin_lock_irqsave(&fman->lock, irq_flags);
1020 list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
1021 spin_unlock_irqrestore(&fman->lock, irq_flags);
1022
57c5ee79
TH
1023 vmw_fence_obj_add_action(fence, &eaction->action);
1024
1025 return 0;
1026}
1027
8b7de6aa
JB
1028struct vmw_event_fence_pending {
1029 struct drm_pending_event base;
1030 struct drm_vmw_event_fence event;
1031};
1032
94844cf0 1033static int vmw_event_fence_action_create(struct drm_file *file_priv,
8b7de6aa
JB
1034 struct vmw_fence_obj *fence,
1035 uint32_t flags,
1036 uint64_t user_data,
1037 bool interruptible)
1038{
1039 struct vmw_event_fence_pending *event;
2298e804
ML
1040 struct vmw_fence_manager *fman = fman_from_fence(fence);
1041 struct drm_device *dev = fman->dev_priv->dev;
8b7de6aa
JB
1042 unsigned long irq_flags;
1043 int ret;
1044
1045 spin_lock_irqsave(&dev->event_lock, irq_flags);
1046
1047 ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
1048 if (likely(ret == 0))
1049 file_priv->event_space -= sizeof(event->event);
1050
1051 spin_unlock_irqrestore(&dev->event_lock, irq_flags);
1052
1053 if (unlikely(ret != 0)) {
1054 DRM_ERROR("Failed to allocate event space for this file.\n");
1055 goto out_no_space;
1056 }
1057
1058
68c4fce7 1059 event = kzalloc(sizeof(*event), GFP_KERNEL);
8b7de6aa
JB
1060 if (unlikely(event == NULL)) {
1061 DRM_ERROR("Failed to allocate an event.\n");
1062 ret = -ENOMEM;
1063 goto out_no_event;
1064 }
1065
1066 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1067 event->event.base.length = sizeof(*event);
1068 event->event.user_data = user_data;
1069
1070 event->base.event = &event->event.base;
1071 event->base.file_priv = file_priv;
1072 event->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
1073
1074
1075 if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1076 ret = vmw_event_fence_action_queue(file_priv, fence,
1077 &event->base,
1078 &event->event.tv_sec,
1079 &event->event.tv_usec,
1080 interruptible);
1081 else
1082 ret = vmw_event_fence_action_queue(file_priv, fence,
1083 &event->base,
1084 NULL,
1085 NULL,
1086 interruptible);
1087 if (ret != 0)
1088 goto out_no_queue;
1089
1090out_no_queue:
1091 event->base.destroy(&event->base);
1092out_no_event:
1093 spin_lock_irqsave(&dev->event_lock, irq_flags);
1094 file_priv->event_space += sizeof(*event);
1095 spin_unlock_irqrestore(&dev->event_lock, irq_flags);
1096out_no_space:
1097 return ret;
1098}
1099
57c5ee79
TH
1100int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1101 struct drm_file *file_priv)
1102{
1103 struct vmw_private *dev_priv = vmw_priv(dev);
1104 struct drm_vmw_fence_event_arg *arg =
1105 (struct drm_vmw_fence_event_arg *) data;
1106 struct vmw_fence_obj *fence = NULL;
1107 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1108 struct drm_vmw_fence_rep __user *user_fence_rep =
1109 (struct drm_vmw_fence_rep __user *)(unsigned long)
1110 arg->fence_rep;
1111 uint32_t handle;
57c5ee79
TH
1112 int ret;
1113
1114 /*
1115 * Look up an existing fence object,
1116 * and if user-space wants a new reference,
1117 * add one.
1118 */
1119 if (arg->handle) {
1120 struct ttm_base_object *base =
05efb1ab
TH
1121 ttm_base_object_lookup_for_ref(dev_priv->tdev,
1122 arg->handle);
57c5ee79
TH
1123
1124 if (unlikely(base == NULL)) {
1125 DRM_ERROR("Fence event invalid fence object handle "
1126 "0x%08lx.\n",
1127 (unsigned long)arg->handle);
1128 return -EINVAL;
1129 }
1130 fence = &(container_of(base, struct vmw_user_fence,
1131 base)->fence);
1132 (void) vmw_fence_obj_reference(fence);
1133
1134 if (user_fence_rep != NULL) {
1135 bool existed;
1136
1137 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1138 TTM_REF_USAGE, &existed);
1139 if (unlikely(ret != 0)) {
1140 DRM_ERROR("Failed to reference a fence "
1141 "object.\n");
1142 goto out_no_ref_obj;
1143 }
1144 handle = base->hash.key;
1145 }
1146 ttm_base_object_unref(&base);
1147 }
1148
1149 /*
1150 * Create a new fence object.
1151 */
1152 if (!fence) {
1153 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1154 &fence,
1155 (user_fence_rep) ?
1156 &handle : NULL);
1157 if (unlikely(ret != 0)) {
1158 DRM_ERROR("Fence event failed to create fence.\n");
1159 return ret;
1160 }
1161 }
1162
1163 BUG_ON(fence == NULL);
1164
57c5ee79
TH
1165 if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
1166 ret = vmw_event_fence_action_create(file_priv, fence,
8b7de6aa
JB
1167 arg->flags,
1168 arg->user_data,
57c5ee79
TH
1169 true);
1170 else
1171 ret = vmw_event_fence_action_create(file_priv, fence,
8b7de6aa
JB
1172 arg->flags,
1173 arg->user_data,
57c5ee79
TH
1174 true);
1175
1176 if (unlikely(ret != 0)) {
1177 if (ret != -ERESTARTSYS)
1178 DRM_ERROR("Failed to attach event to fence.\n");
8b7de6aa 1179 goto out_no_create;
57c5ee79
TH
1180 }
1181
1182 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1183 handle);
1184 vmw_fence_obj_unreference(&fence);
1185 return 0;
8b7de6aa 1186out_no_create:
57c5ee79
TH
1187 if (user_fence_rep != NULL)
1188 ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1189 handle, TTM_REF_USAGE);
1190out_no_ref_obj:
1191 vmw_fence_obj_unreference(&fence);
1192 return ret;
1193}