1 /**************************************************************************
3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_resource_priv.h"
34 #include "vmwgfx_binding.h"
36 #define VMW_RES_EVICT_ERR_COUNT 10
38 struct vmw_user_dma_buffer
{
39 struct ttm_prime_object prime
;
40 struct vmw_dma_buffer dma
;
43 struct vmw_bo_user_rep
{
48 static inline struct vmw_dma_buffer
*
49 vmw_dma_buffer(struct ttm_buffer_object
*bo
)
51 return container_of(bo
, struct vmw_dma_buffer
, base
);
54 static inline struct vmw_user_dma_buffer
*
55 vmw_user_dma_buffer(struct ttm_buffer_object
*bo
)
57 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
58 return container_of(vmw_bo
, struct vmw_user_dma_buffer
, dma
);
61 struct vmw_resource
*vmw_resource_reference(struct vmw_resource
*res
)
68 vmw_resource_reference_unless_doomed(struct vmw_resource
*res
)
70 return kref_get_unless_zero(&res
->kref
) ? res
: NULL
;
74 * vmw_resource_release_id - release a resource id to the id manager.
76 * @res: Pointer to the resource.
78 * Release the resource id to the resource id manager and set it to -1
80 void vmw_resource_release_id(struct vmw_resource
*res
)
82 struct vmw_private
*dev_priv
= res
->dev_priv
;
83 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
85 write_lock(&dev_priv
->resource_lock
);
87 idr_remove(idr
, res
->id
);
89 write_unlock(&dev_priv
->resource_lock
);
92 static void vmw_resource_release(struct kref
*kref
)
94 struct vmw_resource
*res
=
95 container_of(kref
, struct vmw_resource
, kref
);
96 struct vmw_private
*dev_priv
= res
->dev_priv
;
98 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
100 write_lock(&dev_priv
->resource_lock
);
102 list_del_init(&res
->lru_head
);
103 write_unlock(&dev_priv
->resource_lock
);
105 struct ttm_buffer_object
*bo
= &res
->backup
->base
;
107 ttm_bo_reserve(bo
, false, false, NULL
);
108 if (!list_empty(&res
->mob_head
) &&
109 res
->func
->unbind
!= NULL
) {
110 struct ttm_validate_buffer val_buf
;
113 val_buf
.shared
= false;
114 res
->func
->unbind(res
, false, &val_buf
);
116 res
->backup_dirty
= false;
117 list_del_init(&res
->mob_head
);
118 ttm_bo_unreserve(bo
);
119 vmw_dmabuf_unreference(&res
->backup
);
122 if (likely(res
->hw_destroy
!= NULL
)) {
123 mutex_lock(&dev_priv
->binding_mutex
);
124 vmw_binding_res_list_kill(&res
->binding_head
);
125 mutex_unlock(&dev_priv
->binding_mutex
);
126 res
->hw_destroy(res
);
130 if (res
->res_free
!= NULL
)
135 write_lock(&dev_priv
->resource_lock
);
138 write_unlock(&dev_priv
->resource_lock
);
141 void vmw_resource_unreference(struct vmw_resource
**p_res
)
143 struct vmw_resource
*res
= *p_res
;
146 kref_put(&res
->kref
, vmw_resource_release
);
151 * vmw_resource_alloc_id - release a resource id to the id manager.
153 * @res: Pointer to the resource.
155 * Allocate the lowest free resource from the resource manager, and set
156 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
158 int vmw_resource_alloc_id(struct vmw_resource
*res
)
160 struct vmw_private
*dev_priv
= res
->dev_priv
;
162 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
164 BUG_ON(res
->id
!= -1);
166 idr_preload(GFP_KERNEL
);
167 write_lock(&dev_priv
->resource_lock
);
169 ret
= idr_alloc(idr
, res
, 1, 0, GFP_NOWAIT
);
173 write_unlock(&dev_priv
->resource_lock
);
175 return ret
< 0 ? ret
: 0;
179 * vmw_resource_init - initialize a struct vmw_resource
181 * @dev_priv: Pointer to a device private struct.
182 * @res: The struct vmw_resource to initialize.
183 * @obj_type: Resource object type.
184 * @delay_id: Boolean whether to defer device id allocation until
185 * the first validation.
186 * @res_free: Resource destructor.
187 * @func: Resource function table.
189 int vmw_resource_init(struct vmw_private
*dev_priv
, struct vmw_resource
*res
,
191 void (*res_free
) (struct vmw_resource
*res
),
192 const struct vmw_res_func
*func
)
194 kref_init(&res
->kref
);
195 res
->hw_destroy
= NULL
;
196 res
->res_free
= res_free
;
198 res
->dev_priv
= dev_priv
;
200 INIT_LIST_HEAD(&res
->lru_head
);
201 INIT_LIST_HEAD(&res
->mob_head
);
202 INIT_LIST_HEAD(&res
->binding_head
);
205 res
->backup_offset
= 0;
206 res
->backup_dirty
= false;
207 res
->res_dirty
= false;
211 return vmw_resource_alloc_id(res
);
215 * vmw_resource_activate
217 * @res: Pointer to the newly created resource
218 * @hw_destroy: Destroy function. NULL if none.
220 * Activate a resource after the hardware has been made aware of it.
221 * Set tye destroy function to @destroy. Typically this frees the
222 * resource and destroys the hardware resources associated with it.
223 * Activate basically means that the function vmw_resource_lookup will
226 void vmw_resource_activate(struct vmw_resource
*res
,
227 void (*hw_destroy
) (struct vmw_resource
*))
229 struct vmw_private
*dev_priv
= res
->dev_priv
;
231 write_lock(&dev_priv
->resource_lock
);
233 res
->hw_destroy
= hw_destroy
;
234 write_unlock(&dev_priv
->resource_lock
);
238 * vmw_user_resource_lookup_handle - lookup a struct resource from a
239 * TTM user-space handle and perform basic type checks
241 * @dev_priv: Pointer to a device private struct
242 * @tfile: Pointer to a struct ttm_object_file identifying the caller
243 * @handle: The TTM user-space handle
244 * @converter: Pointer to an object describing the resource type
245 * @p_res: On successful return the location pointed to will contain
246 * a pointer to a refcounted struct vmw_resource.
248 * If the handle can't be found or is associated with an incorrect resource
249 * type, -EINVAL will be returned.
251 int vmw_user_resource_lookup_handle(struct vmw_private
*dev_priv
,
252 struct ttm_object_file
*tfile
,
254 const struct vmw_user_resource_conv
256 struct vmw_resource
**p_res
)
258 struct ttm_base_object
*base
;
259 struct vmw_resource
*res
;
262 base
= ttm_base_object_lookup(tfile
, handle
);
263 if (unlikely(base
== NULL
))
266 if (unlikely(ttm_base_object_type(base
) != converter
->object_type
))
267 goto out_bad_resource
;
269 res
= converter
->base_obj_to_res(base
);
271 read_lock(&dev_priv
->resource_lock
);
272 if (!res
->avail
|| res
->res_free
!= converter
->res_free
) {
273 read_unlock(&dev_priv
->resource_lock
);
274 goto out_bad_resource
;
277 kref_get(&res
->kref
);
278 read_unlock(&dev_priv
->resource_lock
);
284 ttm_base_object_unref(&base
);
290 * Helper function that looks either a surface or dmabuf.
292 * The pointer this pointed at by out_surf and out_buf needs to be null.
294 int vmw_user_lookup_handle(struct vmw_private
*dev_priv
,
295 struct ttm_object_file
*tfile
,
297 struct vmw_surface
**out_surf
,
298 struct vmw_dma_buffer
**out_buf
)
300 struct vmw_resource
*res
;
303 BUG_ON(*out_surf
|| *out_buf
);
305 ret
= vmw_user_resource_lookup_handle(dev_priv
, tfile
, handle
,
306 user_surface_converter
,
309 *out_surf
= vmw_res_to_srf(res
);
314 ret
= vmw_user_dmabuf_lookup(tfile
, handle
, out_buf
, NULL
);
323 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
325 * @dev_priv: Pointer to a struct vmw_private identifying the device.
326 * @size: The requested buffer size.
327 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
329 static size_t vmw_dmabuf_acc_size(struct vmw_private
*dev_priv
, size_t size
,
332 static size_t struct_size
, user_struct_size
;
333 size_t num_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
334 size_t page_array_size
= ttm_round_pot(num_pages
* sizeof(void *));
336 if (unlikely(struct_size
== 0)) {
337 size_t backend_size
= ttm_round_pot(vmw_tt_size
);
339 struct_size
= backend_size
+
340 ttm_round_pot(sizeof(struct vmw_dma_buffer
));
341 user_struct_size
= backend_size
+
342 ttm_round_pot(sizeof(struct vmw_user_dma_buffer
));
345 if (dev_priv
->map_mode
== vmw_dma_alloc_coherent
)
347 ttm_round_pot(num_pages
* sizeof(dma_addr_t
));
349 return ((user
) ? user_struct_size
: struct_size
) +
353 void vmw_dmabuf_bo_free(struct ttm_buffer_object
*bo
)
355 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
360 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object
*bo
)
362 struct vmw_user_dma_buffer
*vmw_user_bo
= vmw_user_dma_buffer(bo
);
364 ttm_prime_object_kfree(vmw_user_bo
, prime
);
367 int vmw_dmabuf_init(struct vmw_private
*dev_priv
,
368 struct vmw_dma_buffer
*vmw_bo
,
369 size_t size
, struct ttm_placement
*placement
,
371 void (*bo_free
) (struct ttm_buffer_object
*bo
))
373 struct ttm_bo_device
*bdev
= &dev_priv
->bdev
;
376 bool user
= (bo_free
== &vmw_user_dmabuf_destroy
);
378 BUG_ON(!bo_free
&& (!user
&& (bo_free
!= vmw_dmabuf_bo_free
)));
380 acc_size
= vmw_dmabuf_acc_size(dev_priv
, size
, user
);
381 memset(vmw_bo
, 0, sizeof(*vmw_bo
));
383 INIT_LIST_HEAD(&vmw_bo
->res_list
);
385 ret
= ttm_bo_init(bdev
, &vmw_bo
->base
, size
,
386 ttm_bo_type_device
, placement
,
388 NULL
, acc_size
, NULL
, NULL
, bo_free
);
392 static void vmw_user_dmabuf_release(struct ttm_base_object
**p_base
)
394 struct vmw_user_dma_buffer
*vmw_user_bo
;
395 struct ttm_base_object
*base
= *p_base
;
396 struct ttm_buffer_object
*bo
;
400 if (unlikely(base
== NULL
))
403 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
,
405 bo
= &vmw_user_bo
->dma
.base
;
409 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object
*base
,
410 enum ttm_ref_type ref_type
)
412 struct vmw_user_dma_buffer
*user_bo
;
413 user_bo
= container_of(base
, struct vmw_user_dma_buffer
, prime
.base
);
416 case TTM_REF_SYNCCPU_WRITE
:
417 ttm_bo_synccpu_write_release(&user_bo
->dma
.base
);
425 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
427 * @dev_priv: Pointer to a struct device private.
428 * @tfile: Pointer to a struct ttm_object_file on which to register the user
430 * @size: Size of the dma buffer.
431 * @shareable: Boolean whether the buffer is shareable with other open files.
432 * @handle: Pointer to where the handle value should be assigned.
433 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
434 * should be assigned.
436 int vmw_user_dmabuf_alloc(struct vmw_private
*dev_priv
,
437 struct ttm_object_file
*tfile
,
441 struct vmw_dma_buffer
**p_dma_buf
,
442 struct ttm_base_object
**p_base
)
444 struct vmw_user_dma_buffer
*user_bo
;
445 struct ttm_buffer_object
*tmp
;
448 user_bo
= kzalloc(sizeof(*user_bo
), GFP_KERNEL
);
449 if (unlikely(!user_bo
)) {
450 DRM_ERROR("Failed to allocate a buffer.\n");
454 ret
= vmw_dmabuf_init(dev_priv
, &user_bo
->dma
, size
,
455 (dev_priv
->has_mob
) ?
457 &vmw_vram_sys_placement
, true,
458 &vmw_user_dmabuf_destroy
);
459 if (unlikely(ret
!= 0))
462 tmp
= ttm_bo_reference(&user_bo
->dma
.base
);
463 ret
= ttm_prime_object_init(tfile
,
468 &vmw_user_dmabuf_release
,
469 &vmw_user_dmabuf_ref_obj_release
);
470 if (unlikely(ret
!= 0)) {
472 goto out_no_base_object
;
475 *p_dma_buf
= &user_bo
->dma
;
477 *p_base
= &user_bo
->prime
.base
;
478 kref_get(&(*p_base
)->refcount
);
480 *handle
= user_bo
->prime
.base
.hash
.key
;
487 * vmw_user_dmabuf_verify_access - verify access permissions on this
490 * @bo: Pointer to the buffer object being accessed
491 * @tfile: Identifying the caller.
493 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object
*bo
,
494 struct ttm_object_file
*tfile
)
496 struct vmw_user_dma_buffer
*vmw_user_bo
;
498 if (unlikely(bo
->destroy
!= vmw_user_dmabuf_destroy
))
501 vmw_user_bo
= vmw_user_dma_buffer(bo
);
503 /* Check that the caller has opened the object. */
504 if (likely(ttm_ref_object_exists(tfile
, &vmw_user_bo
->prime
.base
)))
507 DRM_ERROR("Could not grant buffer access.\n");
512 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
513 * access, idling previous GPU operations on the buffer and optionally
514 * blocking it for further command submissions.
516 * @user_bo: Pointer to the buffer object being grabbed for CPU access
517 * @tfile: Identifying the caller.
518 * @flags: Flags indicating how the grab should be performed.
520 * A blocking grab will be automatically released when @tfile is closed.
522 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer
*user_bo
,
523 struct ttm_object_file
*tfile
,
526 struct ttm_buffer_object
*bo
= &user_bo
->dma
.base
;
530 if (flags
& drm_vmw_synccpu_allow_cs
) {
531 bool nonblock
= !!(flags
& drm_vmw_synccpu_dontblock
);
534 lret
= reservation_object_wait_timeout_rcu(bo
->resv
, true, true,
535 nonblock
? 0 : MAX_SCHEDULE_TIMEOUT
);
543 ret
= ttm_bo_synccpu_write_grab
544 (bo
, !!(flags
& drm_vmw_synccpu_dontblock
));
545 if (unlikely(ret
!= 0))
548 ret
= ttm_ref_object_add(tfile
, &user_bo
->prime
.base
,
549 TTM_REF_SYNCCPU_WRITE
, &existed
, false);
550 if (ret
!= 0 || existed
)
551 ttm_bo_synccpu_write_release(&user_bo
->dma
.base
);
557 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
558 * and unblock command submission on the buffer if blocked.
560 * @handle: Handle identifying the buffer object.
561 * @tfile: Identifying the caller.
562 * @flags: Flags indicating the type of release.
564 static int vmw_user_dmabuf_synccpu_release(uint32_t handle
,
565 struct ttm_object_file
*tfile
,
568 if (!(flags
& drm_vmw_synccpu_allow_cs
))
569 return ttm_ref_object_base_unref(tfile
, handle
,
570 TTM_REF_SYNCCPU_WRITE
);
576 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
579 * @dev: Identifies the drm device.
580 * @data: Pointer to the ioctl argument.
581 * @file_priv: Identifies the caller.
583 * This function checks the ioctl arguments for validity and calls the
584 * relevant synccpu functions.
586 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device
*dev
, void *data
,
587 struct drm_file
*file_priv
)
589 struct drm_vmw_synccpu_arg
*arg
=
590 (struct drm_vmw_synccpu_arg
*) data
;
591 struct vmw_dma_buffer
*dma_buf
;
592 struct vmw_user_dma_buffer
*user_bo
;
593 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
594 struct ttm_base_object
*buffer_base
;
597 if ((arg
->flags
& (drm_vmw_synccpu_read
| drm_vmw_synccpu_write
)) == 0
598 || (arg
->flags
& ~(drm_vmw_synccpu_read
| drm_vmw_synccpu_write
|
599 drm_vmw_synccpu_dontblock
|
600 drm_vmw_synccpu_allow_cs
)) != 0) {
601 DRM_ERROR("Illegal synccpu flags.\n");
606 case drm_vmw_synccpu_grab
:
607 ret
= vmw_user_dmabuf_lookup(tfile
, arg
->handle
, &dma_buf
,
609 if (unlikely(ret
!= 0))
612 user_bo
= container_of(dma_buf
, struct vmw_user_dma_buffer
,
614 ret
= vmw_user_dmabuf_synccpu_grab(user_bo
, tfile
, arg
->flags
);
615 vmw_dmabuf_unreference(&dma_buf
);
616 ttm_base_object_unref(&buffer_base
);
617 if (unlikely(ret
!= 0 && ret
!= -ERESTARTSYS
&&
619 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
620 (unsigned int) arg
->handle
);
624 case drm_vmw_synccpu_release
:
625 ret
= vmw_user_dmabuf_synccpu_release(arg
->handle
, tfile
,
627 if (unlikely(ret
!= 0)) {
628 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
629 (unsigned int) arg
->handle
);
634 DRM_ERROR("Invalid synccpu operation.\n");
641 int vmw_dmabuf_alloc_ioctl(struct drm_device
*dev
, void *data
,
642 struct drm_file
*file_priv
)
644 struct vmw_private
*dev_priv
= vmw_priv(dev
);
645 union drm_vmw_alloc_dmabuf_arg
*arg
=
646 (union drm_vmw_alloc_dmabuf_arg
*)data
;
647 struct drm_vmw_alloc_dmabuf_req
*req
= &arg
->req
;
648 struct drm_vmw_dmabuf_rep
*rep
= &arg
->rep
;
649 struct vmw_dma_buffer
*dma_buf
;
653 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
654 if (unlikely(ret
!= 0))
657 ret
= vmw_user_dmabuf_alloc(dev_priv
, vmw_fpriv(file_priv
)->tfile
,
658 req
->size
, false, &handle
, &dma_buf
,
660 if (unlikely(ret
!= 0))
663 rep
->handle
= handle
;
664 rep
->map_handle
= drm_vma_node_offset_addr(&dma_buf
->base
.vma_node
);
665 rep
->cur_gmr_id
= handle
;
666 rep
->cur_gmr_offset
= 0;
668 vmw_dmabuf_unreference(&dma_buf
);
671 ttm_read_unlock(&dev_priv
->reservation_sem
);
676 int vmw_dmabuf_unref_ioctl(struct drm_device
*dev
, void *data
,
677 struct drm_file
*file_priv
)
679 struct drm_vmw_unref_dmabuf_arg
*arg
=
680 (struct drm_vmw_unref_dmabuf_arg
*)data
;
682 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
687 int vmw_user_dmabuf_lookup(struct ttm_object_file
*tfile
,
688 uint32_t handle
, struct vmw_dma_buffer
**out
,
689 struct ttm_base_object
**p_base
)
691 struct vmw_user_dma_buffer
*vmw_user_bo
;
692 struct ttm_base_object
*base
;
694 base
= ttm_base_object_lookup(tfile
, handle
);
695 if (unlikely(base
== NULL
)) {
696 pr_err("Invalid buffer object handle 0x%08lx\n",
697 (unsigned long)handle
);
701 if (unlikely(ttm_base_object_type(base
) != ttm_buffer_type
)) {
702 ttm_base_object_unref(&base
);
703 pr_err("Invalid buffer object handle 0x%08lx\n",
704 (unsigned long)handle
);
708 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
,
710 (void)ttm_bo_reference(&vmw_user_bo
->dma
.base
);
714 ttm_base_object_unref(&base
);
715 *out
= &vmw_user_bo
->dma
;
720 int vmw_user_dmabuf_reference(struct ttm_object_file
*tfile
,
721 struct vmw_dma_buffer
*dma_buf
,
724 struct vmw_user_dma_buffer
*user_bo
;
726 if (dma_buf
->base
.destroy
!= vmw_user_dmabuf_destroy
)
729 user_bo
= container_of(dma_buf
, struct vmw_user_dma_buffer
, dma
);
731 *handle
= user_bo
->prime
.base
.hash
.key
;
732 return ttm_ref_object_add(tfile
, &user_bo
->prime
.base
,
733 TTM_REF_USAGE
, NULL
, false);
737 * vmw_dumb_create - Create a dumb kms buffer
739 * @file_priv: Pointer to a struct drm_file identifying the caller.
740 * @dev: Pointer to the drm device.
741 * @args: Pointer to a struct drm_mode_create_dumb structure
743 * This is a driver callback for the core drm create_dumb functionality.
744 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
745 * that the arguments have a different format.
747 int vmw_dumb_create(struct drm_file
*file_priv
,
748 struct drm_device
*dev
,
749 struct drm_mode_create_dumb
*args
)
751 struct vmw_private
*dev_priv
= vmw_priv(dev
);
752 struct vmw_dma_buffer
*dma_buf
;
755 args
->pitch
= args
->width
* ((args
->bpp
+ 7) / 8);
756 args
->size
= args
->pitch
* args
->height
;
758 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
759 if (unlikely(ret
!= 0))
762 ret
= vmw_user_dmabuf_alloc(dev_priv
, vmw_fpriv(file_priv
)->tfile
,
763 args
->size
, false, &args
->handle
,
765 if (unlikely(ret
!= 0))
768 vmw_dmabuf_unreference(&dma_buf
);
770 ttm_read_unlock(&dev_priv
->reservation_sem
);
775 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
777 * @file_priv: Pointer to a struct drm_file identifying the caller.
778 * @dev: Pointer to the drm device.
779 * @handle: Handle identifying the dumb buffer.
780 * @offset: The address space offset returned.
782 * This is a driver callback for the core drm dumb_map_offset functionality.
784 int vmw_dumb_map_offset(struct drm_file
*file_priv
,
785 struct drm_device
*dev
, uint32_t handle
,
788 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
789 struct vmw_dma_buffer
*out_buf
;
792 ret
= vmw_user_dmabuf_lookup(tfile
, handle
, &out_buf
, NULL
);
796 *offset
= drm_vma_node_offset_addr(&out_buf
->base
.vma_node
);
797 vmw_dmabuf_unreference(&out_buf
);
802 * vmw_dumb_destroy - Destroy a dumb boffer
804 * @file_priv: Pointer to a struct drm_file identifying the caller.
805 * @dev: Pointer to the drm device.
806 * @handle: Handle identifying the dumb buffer.
808 * This is a driver callback for the core drm dumb_destroy functionality.
810 int vmw_dumb_destroy(struct drm_file
*file_priv
,
811 struct drm_device
*dev
,
814 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
815 handle
, TTM_REF_USAGE
);
819 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
821 * @res: The resource for which to allocate a backup buffer.
822 * @interruptible: Whether any sleeps during allocation should be
823 * performed while interruptible.
825 static int vmw_resource_buf_alloc(struct vmw_resource
*res
,
829 (res
->backup_size
+ PAGE_SIZE
- 1) & PAGE_MASK
;
830 struct vmw_dma_buffer
*backup
;
833 if (likely(res
->backup
)) {
834 BUG_ON(res
->backup
->base
.num_pages
* PAGE_SIZE
< size
);
838 backup
= kzalloc(sizeof(*backup
), GFP_KERNEL
);
839 if (unlikely(!backup
))
842 ret
= vmw_dmabuf_init(res
->dev_priv
, backup
, res
->backup_size
,
843 res
->func
->backup_placement
,
845 &vmw_dmabuf_bo_free
);
846 if (unlikely(ret
!= 0))
849 res
->backup
= backup
;
856 * vmw_resource_do_validate - Make a resource up-to-date and visible
859 * @res: The resource to make visible to the device.
860 * @val_buf: Information about a buffer possibly
861 * containing backup data if a bind operation is needed.
863 * On hardware resource shortage, this function returns -EBUSY and
864 * should be retried once resources have been freed up.
866 static int vmw_resource_do_validate(struct vmw_resource
*res
,
867 struct ttm_validate_buffer
*val_buf
)
870 const struct vmw_res_func
*func
= res
->func
;
872 if (unlikely(res
->id
== -1)) {
873 ret
= func
->create(res
);
874 if (unlikely(ret
!= 0))
879 ((func
->needs_backup
&& list_empty(&res
->mob_head
) &&
880 val_buf
->bo
!= NULL
) ||
881 (!func
->needs_backup
&& val_buf
->bo
!= NULL
))) {
882 ret
= func
->bind(res
, val_buf
);
883 if (unlikely(ret
!= 0))
884 goto out_bind_failed
;
885 if (func
->needs_backup
)
886 list_add_tail(&res
->mob_head
, &res
->backup
->res_list
);
890 * Only do this on write operations, and move to
891 * vmw_resource_unreserve if it can be called after
892 * backup buffers have been unreserved. Otherwise
895 res
->res_dirty
= true;
906 * vmw_resource_unreserve - Unreserve a resource previously reserved for
907 * command submission.
909 * @res: Pointer to the struct vmw_resource to unreserve.
910 * @switch_backup: Backup buffer has been switched.
911 * @new_backup: Pointer to new backup buffer if command submission
912 * switched. May be NULL.
913 * @new_backup_offset: New backup offset if @switch_backup is true.
915 * Currently unreserving a resource means putting it back on the device's
916 * resource lru list, so that it can be evicted if necessary.
918 void vmw_resource_unreserve(struct vmw_resource
*res
,
920 struct vmw_dma_buffer
*new_backup
,
921 unsigned long new_backup_offset
)
923 struct vmw_private
*dev_priv
= res
->dev_priv
;
925 if (!list_empty(&res
->lru_head
))
928 if (switch_backup
&& new_backup
!= res
->backup
) {
930 lockdep_assert_held(&res
->backup
->base
.resv
->lock
.base
);
931 list_del_init(&res
->mob_head
);
932 vmw_dmabuf_unreference(&res
->backup
);
936 res
->backup
= vmw_dmabuf_reference(new_backup
);
937 lockdep_assert_held(&new_backup
->base
.resv
->lock
.base
);
938 list_add_tail(&res
->mob_head
, &new_backup
->res_list
);
944 res
->backup_offset
= new_backup_offset
;
946 if (!res
->func
->may_evict
|| res
->id
== -1 || res
->pin_count
)
949 write_lock(&dev_priv
->resource_lock
);
950 list_add_tail(&res
->lru_head
,
951 &res
->dev_priv
->res_lru
[res
->func
->res_type
]);
952 write_unlock(&dev_priv
->resource_lock
);
956 * vmw_resource_check_buffer - Check whether a backup buffer is needed
957 * for a resource and in that case, allocate
958 * one, reserve and validate it.
960 * @res: The resource for which to allocate a backup buffer.
961 * @interruptible: Whether any sleeps during allocation should be
962 * performed while interruptible.
963 * @val_buf: On successful return contains data about the
964 * reserved and validated backup buffer.
967 vmw_resource_check_buffer(struct vmw_resource
*res
,
969 struct ttm_validate_buffer
*val_buf
)
971 struct list_head val_list
;
972 bool backup_dirty
= false;
975 if (unlikely(res
->backup
== NULL
)) {
976 ret
= vmw_resource_buf_alloc(res
, interruptible
);
977 if (unlikely(ret
!= 0))
981 INIT_LIST_HEAD(&val_list
);
982 val_buf
->bo
= ttm_bo_reference(&res
->backup
->base
);
983 val_buf
->shared
= false;
984 list_add_tail(&val_buf
->head
, &val_list
);
985 ret
= ttm_eu_reserve_buffers(NULL
, &val_list
, interruptible
, NULL
);
986 if (unlikely(ret
!= 0))
989 if (res
->func
->needs_backup
&& list_empty(&res
->mob_head
))
992 backup_dirty
= res
->backup_dirty
;
993 ret
= ttm_bo_validate(&res
->backup
->base
,
994 res
->func
->backup_placement
,
997 if (unlikely(ret
!= 0))
998 goto out_no_validate
;
1003 ttm_eu_backoff_reservation(NULL
, &val_list
);
1005 ttm_bo_unref(&val_buf
->bo
);
1007 vmw_dmabuf_unreference(&res
->backup
);
1013 * vmw_resource_reserve - Reserve a resource for command submission
1015 * @res: The resource to reserve.
1017 * This function takes the resource off the LRU list and make sure
1018 * a backup buffer is present for guest-backed resources. However,
1019 * the buffer may not be bound to the resource at this point.
1022 int vmw_resource_reserve(struct vmw_resource
*res
, bool interruptible
,
1025 struct vmw_private
*dev_priv
= res
->dev_priv
;
1028 write_lock(&dev_priv
->resource_lock
);
1029 list_del_init(&res
->lru_head
);
1030 write_unlock(&dev_priv
->resource_lock
);
1032 if (res
->func
->needs_backup
&& res
->backup
== NULL
&&
1034 ret
= vmw_resource_buf_alloc(res
, interruptible
);
1035 if (unlikely(ret
!= 0)) {
1036 DRM_ERROR("Failed to allocate a backup buffer "
1037 "of size %lu. bytes\n",
1038 (unsigned long) res
->backup_size
);
1047 * vmw_resource_backoff_reservation - Unreserve and unreference a
1050 * @val_buf: Backup buffer information.
1053 vmw_resource_backoff_reservation(struct ttm_validate_buffer
*val_buf
)
1055 struct list_head val_list
;
1057 if (likely(val_buf
->bo
== NULL
))
1060 INIT_LIST_HEAD(&val_list
);
1061 list_add_tail(&val_buf
->head
, &val_list
);
1062 ttm_eu_backoff_reservation(NULL
, &val_list
);
1063 ttm_bo_unref(&val_buf
->bo
);
1067 * vmw_resource_do_evict - Evict a resource, and transfer its data
1068 * to a backup buffer.
1070 * @res: The resource to evict.
1071 * @interruptible: Whether to wait interruptible.
1073 static int vmw_resource_do_evict(struct vmw_resource
*res
, bool interruptible
)
1075 struct ttm_validate_buffer val_buf
;
1076 const struct vmw_res_func
*func
= res
->func
;
1079 BUG_ON(!func
->may_evict
);
1082 val_buf
.shared
= false;
1083 ret
= vmw_resource_check_buffer(res
, interruptible
, &val_buf
);
1084 if (unlikely(ret
!= 0))
1087 if (unlikely(func
->unbind
!= NULL
&&
1088 (!func
->needs_backup
|| !list_empty(&res
->mob_head
)))) {
1089 ret
= func
->unbind(res
, res
->res_dirty
, &val_buf
);
1090 if (unlikely(ret
!= 0))
1092 list_del_init(&res
->mob_head
);
1094 ret
= func
->destroy(res
);
1095 res
->backup_dirty
= true;
1096 res
->res_dirty
= false;
1098 vmw_resource_backoff_reservation(&val_buf
);
1105 * vmw_resource_validate - Make a resource up-to-date and visible
1108 * @res: The resource to make visible to the device.
1110 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1111 * be reserved and validated.
1112 * On hardware resource shortage, this function will repeatedly evict
1113 * resources of the same type until the validation succeeds.
1115 int vmw_resource_validate(struct vmw_resource
*res
)
1118 struct vmw_resource
*evict_res
;
1119 struct vmw_private
*dev_priv
= res
->dev_priv
;
1120 struct list_head
*lru_list
= &dev_priv
->res_lru
[res
->func
->res_type
];
1121 struct ttm_validate_buffer val_buf
;
1122 unsigned err_count
= 0;
1124 if (!res
->func
->create
)
1128 val_buf
.shared
= false;
1130 val_buf
.bo
= &res
->backup
->base
;
1132 ret
= vmw_resource_do_validate(res
, &val_buf
);
1133 if (likely(ret
!= -EBUSY
))
1136 write_lock(&dev_priv
->resource_lock
);
1137 if (list_empty(lru_list
) || !res
->func
->may_evict
) {
1138 DRM_ERROR("Out of device device resources "
1139 "for %s.\n", res
->func
->type_name
);
1141 write_unlock(&dev_priv
->resource_lock
);
1145 evict_res
= vmw_resource_reference
1146 (list_first_entry(lru_list
, struct vmw_resource
,
1148 list_del_init(&evict_res
->lru_head
);
1150 write_unlock(&dev_priv
->resource_lock
);
1152 ret
= vmw_resource_do_evict(evict_res
, true);
1153 if (unlikely(ret
!= 0)) {
1154 write_lock(&dev_priv
->resource_lock
);
1155 list_add_tail(&evict_res
->lru_head
, lru_list
);
1156 write_unlock(&dev_priv
->resource_lock
);
1157 if (ret
== -ERESTARTSYS
||
1158 ++err_count
> VMW_RES_EVICT_ERR_COUNT
) {
1159 vmw_resource_unreference(&evict_res
);
1160 goto out_no_validate
;
1164 vmw_resource_unreference(&evict_res
);
1167 if (unlikely(ret
!= 0))
1168 goto out_no_validate
;
1169 else if (!res
->func
->needs_backup
&& res
->backup
) {
1170 list_del_init(&res
->mob_head
);
1171 vmw_dmabuf_unreference(&res
->backup
);
1181 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1182 * object without unreserving it.
1184 * @bo: Pointer to the struct ttm_buffer_object to fence.
1185 * @fence: Pointer to the fence. If NULL, this function will
1186 * insert a fence into the command stream..
1188 * Contrary to the ttm_eu version of this function, it takes only
1189 * a single buffer object instead of a list, and it also doesn't
1190 * unreserve the buffer object, which needs to be done separately.
1192 void vmw_fence_single_bo(struct ttm_buffer_object
*bo
,
1193 struct vmw_fence_obj
*fence
)
1195 struct ttm_bo_device
*bdev
= bo
->bdev
;
1197 struct vmw_private
*dev_priv
=
1198 container_of(bdev
, struct vmw_private
, bdev
);
1200 if (fence
== NULL
) {
1201 vmw_execbuf_fence_commands(NULL
, dev_priv
, &fence
, NULL
);
1202 reservation_object_add_excl_fence(bo
->resv
, &fence
->base
);
1203 dma_fence_put(&fence
->base
);
1205 reservation_object_add_excl_fence(bo
->resv
, &fence
->base
);
1209 * vmw_resource_move_notify - TTM move_notify_callback
1211 * @bo: The TTM buffer object about to move.
1212 * @mem: The struct ttm_mem_reg indicating to what memory
1213 * region the move is taking place.
1215 * Evicts the Guest Backed hardware resource if the backup
1216 * buffer is being moved out of MOB memory.
1217 * Note that this function should not race with the resource
1218 * validation code as long as it accesses only members of struct
1219 * resource that remain static while bo::res is !NULL and
1220 * while we have @bo reserved. struct resource::backup is *not* a
1221 * static member. The resource validation code will take care
1222 * to set @bo::res to NULL, while having @bo reserved when the
1223 * buffer is no longer bound to the resource, so @bo:res can be
1224 * used to determine whether there is a need to unbind and whether
1225 * it is safe to unbind.
1227 void vmw_resource_move_notify(struct ttm_buffer_object
*bo
,
1228 struct ttm_mem_reg
*mem
)
1230 struct vmw_dma_buffer
*dma_buf
;
1235 if (bo
->destroy
!= vmw_dmabuf_bo_free
&&
1236 bo
->destroy
!= vmw_user_dmabuf_destroy
)
1239 dma_buf
= container_of(bo
, struct vmw_dma_buffer
, base
);
1241 if (mem
->mem_type
!= VMW_PL_MOB
) {
1242 struct vmw_resource
*res
, *n
;
1243 struct ttm_validate_buffer val_buf
;
1246 val_buf
.shared
= false;
1248 list_for_each_entry_safe(res
, n
, &dma_buf
->res_list
, mob_head
) {
1250 if (unlikely(res
->func
->unbind
== NULL
))
1253 (void) res
->func
->unbind(res
, true, &val_buf
);
1254 res
->backup_dirty
= true;
1255 res
->res_dirty
= false;
1256 list_del_init(&res
->mob_head
);
1259 (void) ttm_bo_wait(bo
, false, false);
1266 * vmw_query_readback_all - Read back cached query states
1268 * @dx_query_mob: Buffer containing the DX query MOB
1270 * Read back cached states from the device if they exist. This function
1271 * assumings binding_mutex is held.
1273 int vmw_query_readback_all(struct vmw_dma_buffer
*dx_query_mob
)
1275 struct vmw_resource
*dx_query_ctx
;
1276 struct vmw_private
*dev_priv
;
1278 SVGA3dCmdHeader header
;
1279 SVGA3dCmdDXReadbackAllQuery body
;
1283 /* No query bound, so do nothing */
1284 if (!dx_query_mob
|| !dx_query_mob
->dx_query_ctx
)
1287 dx_query_ctx
= dx_query_mob
->dx_query_ctx
;
1288 dev_priv
= dx_query_ctx
->dev_priv
;
1290 cmd
= vmw_fifo_reserve_dx(dev_priv
, sizeof(*cmd
), dx_query_ctx
->id
);
1291 if (unlikely(cmd
== NULL
)) {
1292 DRM_ERROR("Failed reserving FIFO space for "
1293 "query MOB read back.\n");
1297 cmd
->header
.id
= SVGA_3D_CMD_DX_READBACK_ALL_QUERY
;
1298 cmd
->header
.size
= sizeof(cmd
->body
);
1299 cmd
->body
.cid
= dx_query_ctx
->id
;
1301 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
1303 /* Triggers a rebind the next time affected context is bound */
1304 dx_query_mob
->dx_query_ctx
= NULL
;
1312 * vmw_query_move_notify - Read back cached query states
1314 * @bo: The TTM buffer object about to move.
1315 * @mem: The memory region @bo is moving to.
1317 * Called before the query MOB is swapped out to read back cached query
1318 * states from the device.
1320 void vmw_query_move_notify(struct ttm_buffer_object
*bo
,
1321 struct ttm_mem_reg
*mem
)
1323 struct vmw_dma_buffer
*dx_query_mob
;
1324 struct ttm_bo_device
*bdev
= bo
->bdev
;
1325 struct vmw_private
*dev_priv
;
1328 dev_priv
= container_of(bdev
, struct vmw_private
, bdev
);
1330 mutex_lock(&dev_priv
->binding_mutex
);
1332 dx_query_mob
= container_of(bo
, struct vmw_dma_buffer
, base
);
1333 if (mem
== NULL
|| !dx_query_mob
|| !dx_query_mob
->dx_query_ctx
) {
1334 mutex_unlock(&dev_priv
->binding_mutex
);
1338 /* If BO is being moved from MOB to system memory */
1339 if (mem
->mem_type
== TTM_PL_SYSTEM
&& bo
->mem
.mem_type
== VMW_PL_MOB
) {
1340 struct vmw_fence_obj
*fence
;
1342 (void) vmw_query_readback_all(dx_query_mob
);
1343 mutex_unlock(&dev_priv
->binding_mutex
);
1345 /* Create a fence and attach the BO to it */
1346 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
, &fence
, NULL
);
1347 vmw_fence_single_bo(bo
, fence
);
1350 vmw_fence_obj_unreference(&fence
);
1352 (void) ttm_bo_wait(bo
, false, false);
1354 mutex_unlock(&dev_priv
->binding_mutex
);
1359 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1361 * @res: The resource being queried.
1363 bool vmw_resource_needs_backup(const struct vmw_resource
*res
)
1365 return res
->func
->needs_backup
;
1369 * vmw_resource_evict_type - Evict all resources of a specific type
1371 * @dev_priv: Pointer to a device private struct
1372 * @type: The resource type to evict
1374 * To avoid thrashing starvation or as part of the hibernation sequence,
1375 * try to evict all evictable resources of a specific type.
1377 static void vmw_resource_evict_type(struct vmw_private
*dev_priv
,
1378 enum vmw_res_type type
)
1380 struct list_head
*lru_list
= &dev_priv
->res_lru
[type
];
1381 struct vmw_resource
*evict_res
;
1382 unsigned err_count
= 0;
1386 write_lock(&dev_priv
->resource_lock
);
1388 if (list_empty(lru_list
))
1391 evict_res
= vmw_resource_reference(
1392 list_first_entry(lru_list
, struct vmw_resource
,
1394 list_del_init(&evict_res
->lru_head
);
1395 write_unlock(&dev_priv
->resource_lock
);
1397 ret
= vmw_resource_do_evict(evict_res
, false);
1398 if (unlikely(ret
!= 0)) {
1399 write_lock(&dev_priv
->resource_lock
);
1400 list_add_tail(&evict_res
->lru_head
, lru_list
);
1401 write_unlock(&dev_priv
->resource_lock
);
1402 if (++err_count
> VMW_RES_EVICT_ERR_COUNT
) {
1403 vmw_resource_unreference(&evict_res
);
1408 vmw_resource_unreference(&evict_res
);
1412 write_unlock(&dev_priv
->resource_lock
);
1416 * vmw_resource_evict_all - Evict all evictable resources
1418 * @dev_priv: Pointer to a device private struct
1420 * To avoid thrashing starvation or as part of the hibernation sequence,
1421 * evict all evictable resources. In particular this means that all
1422 * guest-backed resources that are registered with the device are
1423 * evicted and the OTable becomes clean.
1425 void vmw_resource_evict_all(struct vmw_private
*dev_priv
)
1427 enum vmw_res_type type
;
1429 mutex_lock(&dev_priv
->cmdbuf_mutex
);
1431 for (type
= 0; type
< vmw_res_max
; ++type
)
1432 vmw_resource_evict_type(dev_priv
, type
);
1434 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1438 * vmw_resource_pin - Add a pin reference on a resource
1440 * @res: The resource to add a pin reference on
1442 * This function adds a pin reference, and if needed validates the resource.
1443 * Having a pin reference means that the resource can never be evicted, and
1444 * its id will never change as long as there is a pin reference.
1445 * This function returns 0 on success and a negative error code on failure.
1447 int vmw_resource_pin(struct vmw_resource
*res
, bool interruptible
)
1449 struct vmw_private
*dev_priv
= res
->dev_priv
;
1452 ttm_write_lock(&dev_priv
->reservation_sem
, interruptible
);
1453 mutex_lock(&dev_priv
->cmdbuf_mutex
);
1454 ret
= vmw_resource_reserve(res
, interruptible
, false);
1456 goto out_no_reserve
;
1458 if (res
->pin_count
== 0) {
1459 struct vmw_dma_buffer
*vbo
= NULL
;
1464 ttm_bo_reserve(&vbo
->base
, interruptible
, false, NULL
);
1465 if (!vbo
->pin_count
) {
1466 ret
= ttm_bo_validate
1468 res
->func
->backup_placement
,
1469 interruptible
, false);
1471 ttm_bo_unreserve(&vbo
->base
);
1472 goto out_no_validate
;
1476 /* Do we really need to pin the MOB as well? */
1477 vmw_bo_pin_reserved(vbo
, true);
1479 ret
= vmw_resource_validate(res
);
1481 ttm_bo_unreserve(&vbo
->base
);
1483 goto out_no_validate
;
1488 vmw_resource_unreserve(res
, false, NULL
, 0UL);
1490 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1491 ttm_write_unlock(&dev_priv
->reservation_sem
);
1497 * vmw_resource_unpin - Remove a pin reference from a resource
1499 * @res: The resource to remove a pin reference from
1501 * Having a pin reference means that the resource can never be evicted, and
1502 * its id will never change as long as there is a pin reference.
1504 void vmw_resource_unpin(struct vmw_resource
*res
)
1506 struct vmw_private
*dev_priv
= res
->dev_priv
;
1509 (void) ttm_read_lock(&dev_priv
->reservation_sem
, false);
1510 mutex_lock(&dev_priv
->cmdbuf_mutex
);
1512 ret
= vmw_resource_reserve(res
, false, true);
1515 WARN_ON(res
->pin_count
== 0);
1516 if (--res
->pin_count
== 0 && res
->backup
) {
1517 struct vmw_dma_buffer
*vbo
= res
->backup
;
1519 (void) ttm_bo_reserve(&vbo
->base
, false, false, NULL
);
1520 vmw_bo_pin_reserved(vbo
, false);
1521 ttm_bo_unreserve(&vbo
->base
);
1524 vmw_resource_unreserve(res
, false, NULL
, 0UL);
1526 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1527 ttm_read_unlock(&dev_priv
->reservation_sem
);
1531 * vmw_res_type - Return the resource type
1533 * @res: Pointer to the resource
1535 enum vmw_res_type
vmw_res_type(const struct vmw_resource
*res
)
1537 return res
->func
->res_type
;