]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
vmwgfx: Break out and comment vmw_execbuf_copy_fence_user
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fence.c
CommitLineData
ae2a1040
TH
1/**************************************************************************
2 *
3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "drmP.h"
29#include "vmwgfx_drv.h"
30
31#define VMW_FENCE_WRAP (1 << 31)
32
33struct vmw_fence_manager {
34 int num_fence_objects;
35 struct vmw_private *dev_priv;
36 spinlock_t lock;
37 u32 next_seqno;
38 struct list_head fence_list;
39 struct work_struct work;
40 u32 user_fence_size;
41 u32 fence_size;
42 bool fifo_down;
43 struct list_head cleanup_list;
44};
45
46struct vmw_user_fence {
47 struct ttm_base_object base;
48 struct vmw_fence_obj fence;
49};
50
51/**
52 * vmw_fence_destroy_locked
53 *
54 */
55
56static void vmw_fence_obj_destroy_locked(struct kref *kref)
57{
58 struct vmw_fence_obj *fence =
59 container_of(kref, struct vmw_fence_obj, kref);
60
61 struct vmw_fence_manager *fman = fence->fman;
62 unsigned int num_fences;
63
64 list_del_init(&fence->head);
65 num_fences = --fman->num_fence_objects;
66 spin_unlock_irq(&fman->lock);
67 if (fence->destroy)
68 fence->destroy(fence);
69 else
70 kfree(fence);
71
72 spin_lock_irq(&fman->lock);
73}
74
75
76/**
77 * Execute signal actions on fences recently signaled.
78 * This is done from a workqueue so we don't have to execute
79 * signal actions from atomic context.
80 */
81
82static void vmw_fence_work_func(struct work_struct *work)
83{
84 struct vmw_fence_manager *fman =
85 container_of(work, struct vmw_fence_manager, work);
86 struct list_head list;
87 struct vmw_fence_action *action, *next_action;
88
89 do {
90 INIT_LIST_HEAD(&list);
91 spin_lock_irq(&fman->lock);
92 list_splice_init(&fman->cleanup_list, &list);
93 spin_unlock_irq(&fman->lock);
94
95 if (list_empty(&list))
96 return;
97
98 /*
99 * At this point, only we should be able to manipulate the
100 * list heads of the actions we have on the private list.
101 */
102
103 list_for_each_entry_safe(action, next_action, &list, head) {
104 list_del_init(&action->head);
105 action->cleanup(action);
106 }
107 } while (1);
108}
109
110struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
111{
112 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
113
114 if (unlikely(fman == NULL))
115 return NULL;
116
117 fman->dev_priv = dev_priv;
118 spin_lock_init(&fman->lock);
119 INIT_LIST_HEAD(&fman->fence_list);
120 INIT_LIST_HEAD(&fman->cleanup_list);
121 INIT_WORK(&fman->work, &vmw_fence_work_func);
122 fman->fifo_down = true;
123 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
124 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
125
126 return fman;
127}
128
129void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
130{
131 unsigned long irq_flags;
132 bool lists_empty;
133
134 (void) cancel_work_sync(&fman->work);
135
136 spin_lock_irqsave(&fman->lock, irq_flags);
137 lists_empty = list_empty(&fman->fence_list) &&
138 list_empty(&fman->cleanup_list);
139 spin_unlock_irqrestore(&fman->lock, irq_flags);
140
141 BUG_ON(!lists_empty);
142 kfree(fman);
143}
144
145static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
146 struct vmw_fence_obj *fence,
147 u32 seqno,
148 uint32_t mask,
149 void (*destroy) (struct vmw_fence_obj *fence))
150{
151 unsigned long irq_flags;
152 unsigned int num_fences;
153 int ret = 0;
154
155 fence->seqno = seqno;
156 INIT_LIST_HEAD(&fence->seq_passed_actions);
157 fence->fman = fman;
158 fence->signaled = 0;
159 fence->signal_mask = mask;
160 kref_init(&fence->kref);
161 fence->destroy = destroy;
162 init_waitqueue_head(&fence->queue);
163
164 spin_lock_irqsave(&fman->lock, irq_flags);
165 if (unlikely(fman->fifo_down)) {
166 ret = -EBUSY;
167 goto out_unlock;
168 }
169 list_add_tail(&fence->head, &fman->fence_list);
170 num_fences = ++fman->num_fence_objects;
171
172out_unlock:
173 spin_unlock_irqrestore(&fman->lock, irq_flags);
174 return ret;
175
176}
177
178struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
179{
e93daed8
TH
180 if (unlikely(fence == NULL))
181 return NULL;
182
ae2a1040
TH
183 kref_get(&fence->kref);
184 return fence;
185}
186
187/**
188 * vmw_fence_obj_unreference
189 *
190 * Note that this function may not be entered with disabled irqs since
191 * it may re-enable them in the destroy function.
192 *
193 */
194void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
195{
196 struct vmw_fence_obj *fence = *fence_p;
e93daed8
TH
197 struct vmw_fence_manager *fman;
198
199 if (unlikely(fence == NULL))
200 return;
ae2a1040 201
e93daed8 202 fman = fence->fman;
ae2a1040
TH
203 *fence_p = NULL;
204 spin_lock_irq(&fman->lock);
205 BUG_ON(atomic_read(&fence->kref.refcount) == 0);
206 kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
207 spin_unlock_irq(&fman->lock);
208}
209
210void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
211 struct list_head *list)
212{
213 struct vmw_fence_action *action, *next_action;
214
215 list_for_each_entry_safe(action, next_action, list, head) {
216 list_del_init(&action->head);
217 if (action->seq_passed != NULL)
218 action->seq_passed(action);
219
220 /*
221 * Add the cleanup action to the cleanup list so that
222 * it will be performed by a worker task.
223 */
224
225 if (action->cleanup != NULL)
226 list_add_tail(&action->head, &fman->cleanup_list);
227 }
228}
229
230void vmw_fences_update(struct vmw_fence_manager *fman, u32 seqno)
231{
232 unsigned long flags;
233 struct vmw_fence_obj *fence, *next_fence;
234 struct list_head action_list;
235
236 spin_lock_irqsave(&fman->lock, flags);
237 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
238 if (seqno - fence->seqno < VMW_FENCE_WRAP) {
239 list_del_init(&fence->head);
240 fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
241 INIT_LIST_HEAD(&action_list);
242 list_splice_init(&fence->seq_passed_actions,
243 &action_list);
244 vmw_fences_perform_actions(fman, &action_list);
245 wake_up_all(&fence->queue);
246 }
247
248 }
249 if (!list_empty(&fman->cleanup_list))
250 (void) schedule_work(&fman->work);
251 spin_unlock_irqrestore(&fman->lock, flags);
252}
253
254
255bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
256 uint32_t flags)
257{
258 struct vmw_fence_manager *fman = fence->fman;
259 unsigned long irq_flags;
260 uint32_t signaled;
261
262 spin_lock_irqsave(&fman->lock, irq_flags);
263 signaled = fence->signaled;
264 spin_unlock_irqrestore(&fman->lock, irq_flags);
265
266 flags &= fence->signal_mask;
267 if ((signaled & flags) == flags)
268 return 1;
269
270 if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0) {
271 struct vmw_private *dev_priv = fman->dev_priv;
272 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
273 u32 seqno;
274
275 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
276 vmw_fences_update(fman, seqno);
277 }
278
279 spin_lock_irqsave(&fman->lock, irq_flags);
280 signaled = fence->signaled;
281 spin_unlock_irqrestore(&fman->lock, irq_flags);
282
283 return ((signaled & flags) == flags);
284}
285
286int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
287 uint32_t flags, bool lazy,
288 bool interruptible, unsigned long timeout)
289{
290 struct vmw_private *dev_priv = fence->fman->dev_priv;
291 long ret;
292
293 if (likely(vmw_fence_obj_signaled(fence, flags)))
294 return 0;
295
296 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
297 vmw_seqno_waiter_add(dev_priv);
298
299 if (interruptible)
300 ret = wait_event_interruptible_timeout
301 (fence->queue,
302 vmw_fence_obj_signaled(fence, flags),
303 timeout);
304 else
305 ret = wait_event_timeout
306 (fence->queue,
307 vmw_fence_obj_signaled(fence, flags),
308 timeout);
309
310 vmw_seqno_waiter_remove(dev_priv);
311
312 if (unlikely(ret == 0))
313 ret = -EBUSY;
314 else if (likely(ret > 0))
315 ret = 0;
316
317 return ret;
318}
319
320void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
321{
322 struct vmw_private *dev_priv = fence->fman->dev_priv;
323
324 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
325}
326
327static void vmw_fence_destroy(struct vmw_fence_obj *fence)
328{
329 struct vmw_fence_manager *fman = fence->fman;
330
331 kfree(fence);
332 /*
333 * Free kernel space accounting.
334 */
335 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
336 fman->fence_size);
337}
338
339int vmw_fence_create(struct vmw_fence_manager *fman,
340 uint32_t seqno,
341 uint32_t mask,
342 struct vmw_fence_obj **p_fence)
343{
344 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
345 struct vmw_fence_obj *fence;
346 int ret;
347
348 ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
349 false, false);
350 if (unlikely(ret != 0))
351 return ret;
352
353 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
354 if (unlikely(fence == NULL)) {
355 ret = -ENOMEM;
356 goto out_no_object;
357 }
358
359 ret = vmw_fence_obj_init(fman, fence, seqno, mask,
360 vmw_fence_destroy);
361 if (unlikely(ret != 0))
362 goto out_err_init;
363
364 *p_fence = fence;
365 return 0;
366
367out_err_init:
368 kfree(fence);
369out_no_object:
370 ttm_mem_global_free(mem_glob, fman->fence_size);
371 return ret;
372}
373
374
375static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
376{
377 struct vmw_user_fence *ufence =
378 container_of(fence, struct vmw_user_fence, fence);
379 struct vmw_fence_manager *fman = fence->fman;
380
381 kfree(ufence);
382 /*
383 * Free kernel space accounting.
384 */
385 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
386 fman->user_fence_size);
387}
388
389static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
390{
391 struct ttm_base_object *base = *p_base;
392 struct vmw_user_fence *ufence =
393 container_of(base, struct vmw_user_fence, base);
394 struct vmw_fence_obj *fence = &ufence->fence;
395
396 *p_base = NULL;
397 vmw_fence_obj_unreference(&fence);
398}
399
400int vmw_user_fence_create(struct drm_file *file_priv,
401 struct vmw_fence_manager *fman,
402 uint32_t seqno,
403 uint32_t mask,
404 struct vmw_fence_obj **p_fence,
405 uint32_t *p_handle)
406{
407 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
408 struct vmw_user_fence *ufence;
409 struct vmw_fence_obj *tmp;
410 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
411 int ret;
412
413 /*
414 * Kernel memory space accounting, since this object may
415 * be created by a user-space request.
416 */
417
418 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
419 false, false);
420 if (unlikely(ret != 0))
421 return ret;
422
423 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
424 if (unlikely(ufence == NULL)) {
425 ret = -ENOMEM;
426 goto out_no_object;
427 }
428
429 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
430 mask, vmw_user_fence_destroy);
431 if (unlikely(ret != 0)) {
432 kfree(ufence);
433 goto out_no_object;
434 }
435
436 /*
437 * The base object holds a reference which is freed in
438 * vmw_user_fence_base_release.
439 */
440 tmp = vmw_fence_obj_reference(&ufence->fence);
441 ret = ttm_base_object_init(tfile, &ufence->base, false,
442 VMW_RES_FENCE,
443 &vmw_user_fence_base_release, NULL);
444
445
446 if (unlikely(ret != 0)) {
447 /*
448 * Free the base object's reference
449 */
450 vmw_fence_obj_unreference(&tmp);
451 goto out_err;
452 }
453
454 *p_fence = &ufence->fence;
455 *p_handle = ufence->base.hash.key;
456
457 return 0;
458out_err:
459 tmp = &ufence->fence;
460 vmw_fence_obj_unreference(&tmp);
461out_no_object:
462 ttm_mem_global_free(mem_glob, fman->user_fence_size);
463 return ret;
464}
465
466
467/**
468 * vmw_fence_fifo_down - signal all unsignaled fence objects.
469 */
470
471void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
472{
473 unsigned long irq_flags;
474 struct list_head action_list;
475 int ret;
476
477 /*
478 * The list may be altered while we traverse it, so always
479 * restart when we've released the fman->lock.
480 */
481
482 spin_lock_irqsave(&fman->lock, irq_flags);
483 fman->fifo_down = true;
484 while (!list_empty(&fman->fence_list)) {
485 struct vmw_fence_obj *fence =
486 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
487 head);
488 kref_get(&fence->kref);
489 spin_unlock_irq(&fman->lock);
490
491 ret = vmw_fence_obj_wait(fence, fence->signal_mask,
492 false, false,
493 VMW_FENCE_WAIT_TIMEOUT);
494
495 if (unlikely(ret != 0)) {
496 list_del_init(&fence->head);
497 fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
498 INIT_LIST_HEAD(&action_list);
499 list_splice_init(&fence->seq_passed_actions,
500 &action_list);
501 vmw_fences_perform_actions(fman, &action_list);
502 wake_up_all(&fence->queue);
503 }
504
505 spin_lock_irq(&fman->lock);
506
507 BUG_ON(!list_empty(&fence->head));
508 kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
509 }
510 spin_unlock_irqrestore(&fman->lock, irq_flags);
511}
512
513void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
514{
515 unsigned long irq_flags;
516
517 spin_lock_irqsave(&fman->lock, irq_flags);
518 fman->fifo_down = false;
519 spin_unlock_irqrestore(&fman->lock, irq_flags);
520}
521
522
523int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
524 struct drm_file *file_priv)
525{
526 struct drm_vmw_fence_wait_arg *arg =
527 (struct drm_vmw_fence_wait_arg *)data;
528 unsigned long timeout;
529 struct ttm_base_object *base;
530 struct vmw_fence_obj *fence;
531 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
532 int ret;
533 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
534
535 /*
536 * 64-bit division not present on 32-bit systems, so do an
537 * approximation. (Divide by 1000000).
538 */
539
540 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
541 (wait_timeout >> 26);
542
543 if (!arg->cookie_valid) {
544 arg->cookie_valid = 1;
545 arg->kernel_cookie = jiffies + wait_timeout;
546 }
547
548 base = ttm_base_object_lookup(tfile, arg->handle);
549 if (unlikely(base == NULL)) {
550 printk(KERN_ERR "Wait invalid fence object handle "
551 "0x%08lx.\n",
552 (unsigned long)arg->handle);
553 return -EINVAL;
554 }
555
556 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
557
558 timeout = jiffies;
559 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
560 ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
561 0 : -EBUSY);
562 goto out;
563 }
564
565 timeout = (unsigned long)arg->kernel_cookie - timeout;
566
567 ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout);
568
569out:
570 ttm_base_object_unref(&base);
571
572 /*
573 * Optionally unref the fence object.
574 */
575
576 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
577 return ttm_ref_object_base_unref(tfile, arg->handle,
578 TTM_REF_USAGE);
579 return ret;
580}
581
582int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
583 struct drm_file *file_priv)
584{
585 struct drm_vmw_fence_signaled_arg *arg =
586 (struct drm_vmw_fence_signaled_arg *) data;
587 struct ttm_base_object *base;
588 struct vmw_fence_obj *fence;
589 struct vmw_fence_manager *fman;
590 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
591 struct vmw_private *dev_priv = vmw_priv(dev);
592
593 base = ttm_base_object_lookup(tfile, arg->handle);
594 if (unlikely(base == NULL)) {
595 printk(KERN_ERR "Fence signaled invalid fence object handle "
596 "0x%08lx.\n",
597 (unsigned long)arg->handle);
598 return -EINVAL;
599 }
600
601 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
602 fman = fence->fman;
603
604 arg->signaled = vmw_fence_obj_signaled(fence, arg->flags);
605 spin_lock_irq(&fman->lock);
606
607 arg->signaled_flags = fence->signaled;
608 arg->passed_seqno = dev_priv->last_read_seqno;
609 spin_unlock_irq(&fman->lock);
610
611 ttm_base_object_unref(&base);
612
613 return 0;
614}
615
616
617int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
618 struct drm_file *file_priv)
619{
620 struct drm_vmw_fence_arg *arg =
621 (struct drm_vmw_fence_arg *) data;
622
623 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
624 arg->handle,
625 TTM_REF_USAGE);
626}