]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drm/ttm: cope with reserved buffers on lru list in ttm_mem_evict_first, v2
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
760285e7
DH
29#include <drm/vmwgfx_drm.h>
30#include <drm/ttm/ttm_object.h>
31#include <drm/ttm/ttm_placement.h>
32#include <drm/drmP.h>
543831cf 33#include "vmwgfx_resource_priv.h"
fb1d9738
JB
34
35struct vmw_user_dma_buffer {
36 struct ttm_base_object base;
37 struct vmw_dma_buffer dma;
38};
39
40struct vmw_bo_user_rep {
41 uint32_t handle;
42 uint64_t map_handle;
43};
44
45struct vmw_stream {
46 struct vmw_resource res;
47 uint32_t stream_id;
48};
49
50struct vmw_user_stream {
51 struct ttm_base_object base;
52 struct vmw_stream stream;
53};
54
c0951b79
TH
55
56static uint64_t vmw_user_stream_size;
57
58static const struct vmw_res_func vmw_stream_func = {
59 .res_type = vmw_res_stream,
60 .needs_backup = false,
61 .may_evict = false,
62 .type_name = "video streams",
63 .backup_placement = NULL,
64 .create = NULL,
65 .destroy = NULL,
66 .bind = NULL,
67 .unbind = NULL
68};
69
fb1d9738
JB
70static inline struct vmw_dma_buffer *
71vmw_dma_buffer(struct ttm_buffer_object *bo)
72{
73 return container_of(bo, struct vmw_dma_buffer, base);
74}
75
76static inline struct vmw_user_dma_buffer *
77vmw_user_dma_buffer(struct ttm_buffer_object *bo)
78{
79 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
80 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
81}
82
83struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
84{
85 kref_get(&res->kref);
86 return res;
87}
88
5bb39e81
TH
89
90/**
91 * vmw_resource_release_id - release a resource id to the id manager.
92 *
93 * @res: Pointer to the resource.
94 *
95 * Release the resource id to the resource id manager and set it to -1
96 */
543831cf 97void vmw_resource_release_id(struct vmw_resource *res)
5bb39e81
TH
98{
99 struct vmw_private *dev_priv = res->dev_priv;
c0951b79 100 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
5bb39e81
TH
101
102 write_lock(&dev_priv->resource_lock);
103 if (res->id != -1)
c0951b79 104 idr_remove(idr, res->id);
5bb39e81
TH
105 res->id = -1;
106 write_unlock(&dev_priv->resource_lock);
107}
108
fb1d9738
JB
109static void vmw_resource_release(struct kref *kref)
110{
111 struct vmw_resource *res =
112 container_of(kref, struct vmw_resource, kref);
113 struct vmw_private *dev_priv = res->dev_priv;
c0951b79
TH
114 int id;
115 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
fb1d9738 116
5bb39e81 117 res->avail = false;
c0951b79 118 list_del_init(&res->lru_head);
fb1d9738 119 write_unlock(&dev_priv->resource_lock);
c0951b79
TH
120 if (res->backup) {
121 struct ttm_buffer_object *bo = &res->backup->base;
122
123 ttm_bo_reserve(bo, false, false, false, 0);
124 if (!list_empty(&res->mob_head) &&
125 res->func->unbind != NULL) {
126 struct ttm_validate_buffer val_buf;
127
128 val_buf.bo = bo;
129 res->func->unbind(res, false, &val_buf);
130 }
131 res->backup_dirty = false;
132 list_del_init(&res->mob_head);
133 ttm_bo_unreserve(bo);
134 vmw_dmabuf_unreference(&res->backup);
135 }
fb1d9738
JB
136
137 if (likely(res->hw_destroy != NULL))
138 res->hw_destroy(res);
139
c0951b79 140 id = res->id;
fb1d9738
JB
141 if (res->res_free != NULL)
142 res->res_free(res);
143 else
144 kfree(res);
145
146 write_lock(&dev_priv->resource_lock);
5bb39e81
TH
147
148 if (id != -1)
149 idr_remove(idr, id);
fb1d9738
JB
150}
151
152void vmw_resource_unreference(struct vmw_resource **p_res)
153{
154 struct vmw_resource *res = *p_res;
155 struct vmw_private *dev_priv = res->dev_priv;
156
157 *p_res = NULL;
158 write_lock(&dev_priv->resource_lock);
159 kref_put(&res->kref, vmw_resource_release);
160 write_unlock(&dev_priv->resource_lock);
161}
162
5bb39e81
TH
163
164/**
165 * vmw_resource_alloc_id - release a resource id to the id manager.
166 *
5bb39e81
TH
167 * @res: Pointer to the resource.
168 *
169 * Allocate the lowest free resource from the resource manager, and set
170 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
171 */
543831cf 172int vmw_resource_alloc_id(struct vmw_resource *res)
5bb39e81 173{
c0951b79 174 struct vmw_private *dev_priv = res->dev_priv;
5bb39e81 175 int ret;
c0951b79 176 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
5bb39e81
TH
177
178 BUG_ON(res->id != -1);
179
180 do {
c0951b79 181 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
5bb39e81
TH
182 return -ENOMEM;
183
184 write_lock(&dev_priv->resource_lock);
c0951b79 185 ret = idr_get_new_above(idr, res, 1, &res->id);
5bb39e81
TH
186 write_unlock(&dev_priv->resource_lock);
187
188 } while (ret == -EAGAIN);
189
190 return ret;
191}
192
c0951b79
TH
193/**
194 * vmw_resource_init - initialize a struct vmw_resource
195 *
196 * @dev_priv: Pointer to a device private struct.
197 * @res: The struct vmw_resource to initialize.
198 * @obj_type: Resource object type.
199 * @delay_id: Boolean whether to defer device id allocation until
200 * the first validation.
201 * @res_free: Resource destructor.
202 * @func: Resource function table.
203 */
543831cf
TH
204int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
205 bool delay_id,
206 void (*res_free) (struct vmw_resource *res),
207 const struct vmw_res_func *func)
fb1d9738 208{
fb1d9738
JB
209 kref_init(&res->kref);
210 res->hw_destroy = NULL;
211 res->res_free = res_free;
fb1d9738
JB
212 res->avail = false;
213 res->dev_priv = dev_priv;
c0951b79
TH
214 res->func = func;
215 INIT_LIST_HEAD(&res->lru_head);
216 INIT_LIST_HEAD(&res->mob_head);
5bb39e81 217 res->id = -1;
c0951b79
TH
218 res->backup = NULL;
219 res->backup_offset = 0;
220 res->backup_dirty = false;
221 res->res_dirty = false;
5bb39e81
TH
222 if (delay_id)
223 return 0;
224 else
c0951b79 225 return vmw_resource_alloc_id(res);
fb1d9738
JB
226}
227
228/**
229 * vmw_resource_activate
230 *
231 * @res: Pointer to the newly created resource
232 * @hw_destroy: Destroy function. NULL if none.
233 *
234 * Activate a resource after the hardware has been made aware of it.
235 * Set tye destroy function to @destroy. Typically this frees the
236 * resource and destroys the hardware resources associated with it.
237 * Activate basically means that the function vmw_resource_lookup will
238 * find it.
239 */
543831cf
TH
240void vmw_resource_activate(struct vmw_resource *res,
241 void (*hw_destroy) (struct vmw_resource *))
fb1d9738
JB
242{
243 struct vmw_private *dev_priv = res->dev_priv;
244
245 write_lock(&dev_priv->resource_lock);
246 res->avail = true;
247 res->hw_destroy = hw_destroy;
248 write_unlock(&dev_priv->resource_lock);
249}
250
251struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
252 struct idr *idr, int id)
253{
254 struct vmw_resource *res;
255
256 read_lock(&dev_priv->resource_lock);
257 res = idr_find(idr, id);
258 if (res && res->avail)
259 kref_get(&res->kref);
260 else
261 res = NULL;
262 read_unlock(&dev_priv->resource_lock);
263
264 if (unlikely(res == NULL))
265 return NULL;
266
267 return res;
268}
269
c0951b79
TH
270/**
271 * vmw_user_resource_lookup_handle - lookup a struct resource from a
272 * TTM user-space handle and perform basic type checks
273 *
274 * @dev_priv: Pointer to a device private struct
275 * @tfile: Pointer to a struct ttm_object_file identifying the caller
276 * @handle: The TTM user-space handle
277 * @converter: Pointer to an object describing the resource type
278 * @p_res: On successful return the location pointed to will contain
279 * a pointer to a refcounted struct vmw_resource.
280 *
281 * If the handle can't be found or is associated with an incorrect resource
282 * type, -EINVAL will be returned.
283 */
284int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
285 struct ttm_object_file *tfile,
286 uint32_t handle,
287 const struct vmw_user_resource_conv
288 *converter,
289 struct vmw_resource **p_res)
fb1d9738 290{
7a73ba74 291 struct ttm_base_object *base;
c0951b79
TH
292 struct vmw_resource *res;
293 int ret = -EINVAL;
fb1d9738 294
7a73ba74
TH
295 base = ttm_base_object_lookup(tfile, handle);
296 if (unlikely(base == NULL))
297 return -EINVAL;
298
c0951b79
TH
299 if (unlikely(base->object_type != converter->object_type))
300 goto out_bad_resource;
7a73ba74 301
c0951b79 302 res = converter->base_obj_to_res(base);
7a73ba74 303
c0951b79
TH
304 read_lock(&dev_priv->resource_lock);
305 if (!res->avail || res->res_free != converter->res_free) {
306 read_unlock(&dev_priv->resource_lock);
307 goto out_bad_resource;
308 }
fb1d9738 309
c0951b79
TH
310 kref_get(&res->kref);
311 read_unlock(&dev_priv->resource_lock);
312
313 *p_res = res;
314 ret = 0;
315
316out_bad_resource:
7a73ba74 317 ttm_base_object_unref(&base);
c0951b79
TH
318
319 return ret;
320}
321
322/**
323 * Helper function that looks either a surface or dmabuf.
324 *
325 * The pointer this pointed at by out_surf and out_buf needs to be null.
326 */
327int vmw_user_lookup_handle(struct vmw_private *dev_priv,
328 struct ttm_object_file *tfile,
329 uint32_t handle,
330 struct vmw_surface **out_surf,
331 struct vmw_dma_buffer **out_buf)
332{
333 struct vmw_resource *res;
334 int ret;
335
336 BUG_ON(*out_surf || *out_buf);
337
338 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
339 user_surface_converter,
340 &res);
341 if (!ret) {
342 *out_surf = vmw_res_to_srf(res);
343 return 0;
344 }
345
346 *out_surf = NULL;
347 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
fb1d9738
JB
348 return ret;
349}
350
351/**
352 * Buffer management.
353 */
effe1105
TH
354void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
355{
356 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
effe1105 357
fb1d9738
JB
358 kfree(vmw_bo);
359}
360
361int vmw_dmabuf_init(struct vmw_private *dev_priv,
362 struct vmw_dma_buffer *vmw_bo,
363 size_t size, struct ttm_placement *placement,
364 bool interruptible,
365 void (*bo_free) (struct ttm_buffer_object *bo))
366{
367 struct ttm_bo_device *bdev = &dev_priv->bdev;
fb1d9738
JB
368 size_t acc_size;
369 int ret;
370
371 BUG_ON(!bo_free);
372
57de4ba9 373 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
fb1d9738
JB
374 memset(vmw_bo, 0, sizeof(*vmw_bo));
375
c0951b79 376 INIT_LIST_HEAD(&vmw_bo->res_list);
fb1d9738
JB
377
378 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
379 ttm_bo_type_device, placement,
0b91c4a1 380 0, interruptible,
129b78bf 381 NULL, acc_size, NULL, bo_free);
fb1d9738
JB
382 return ret;
383}
384
385static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
386{
387 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
fb1d9738 388
cdad0521 389 ttm_base_object_kfree(vmw_user_bo, base);
fb1d9738
JB
390}
391
392static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
393{
394 struct vmw_user_dma_buffer *vmw_user_bo;
395 struct ttm_base_object *base = *p_base;
396 struct ttm_buffer_object *bo;
397
398 *p_base = NULL;
399
400 if (unlikely(base == NULL))
401 return;
402
403 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
404 bo = &vmw_user_bo->dma.base;
405 ttm_bo_unref(&bo);
406}
407
c0951b79
TH
408/**
409 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
410 *
411 * @dev_priv: Pointer to a struct device private.
412 * @tfile: Pointer to a struct ttm_object_file on which to register the user
413 * object.
414 * @size: Size of the dma buffer.
415 * @shareable: Boolean whether the buffer is shareable with other open files.
416 * @handle: Pointer to where the handle value should be assigned.
417 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
418 * should be assigned.
419 */
420int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
421 struct ttm_object_file *tfile,
422 uint32_t size,
423 bool shareable,
424 uint32_t *handle,
425 struct vmw_dma_buffer **p_dma_buf)
426{
427 struct vmw_user_dma_buffer *user_bo;
428 struct ttm_buffer_object *tmp;
429 int ret;
430
431 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
432 if (unlikely(user_bo == NULL)) {
433 DRM_ERROR("Failed to allocate a buffer.\n");
434 return -ENOMEM;
435 }
436
437 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
438 &vmw_vram_sys_placement, true,
439 &vmw_user_dmabuf_destroy);
440 if (unlikely(ret != 0))
441 return ret;
442
443 tmp = ttm_bo_reference(&user_bo->dma.base);
444 ret = ttm_base_object_init(tfile,
445 &user_bo->base,
446 shareable,
447 ttm_buffer_type,
448 &vmw_user_dmabuf_release, NULL);
449 if (unlikely(ret != 0)) {
450 ttm_bo_unref(&tmp);
451 goto out_no_base_object;
452 }
453
454 *p_dma_buf = &user_bo->dma;
455 *handle = user_bo->base.hash.key;
456
457out_no_base_object:
458 return ret;
459}
460
d08a9b9c
TH
461/**
462 * vmw_user_dmabuf_verify_access - verify access permissions on this
463 * buffer object.
464 *
465 * @bo: Pointer to the buffer object being accessed
466 * @tfile: Identifying the caller.
467 */
468int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
469 struct ttm_object_file *tfile)
470{
471 struct vmw_user_dma_buffer *vmw_user_bo;
472
473 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
474 return -EPERM;
475
476 vmw_user_bo = vmw_user_dma_buffer(bo);
477 return (vmw_user_bo->base.tfile == tfile ||
478 vmw_user_bo->base.shareable) ? 0 : -EPERM;
479}
480
fb1d9738
JB
481int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
482 struct drm_file *file_priv)
483{
484 struct vmw_private *dev_priv = vmw_priv(dev);
485 union drm_vmw_alloc_dmabuf_arg *arg =
486 (union drm_vmw_alloc_dmabuf_arg *)data;
487 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
488 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
c0951b79
TH
489 struct vmw_dma_buffer *dma_buf;
490 uint32_t handle;
fb1d9738
JB
491 struct vmw_master *vmaster = vmw_master(file_priv->master);
492 int ret;
493
fb1d9738 494 ret = ttm_read_lock(&vmaster->lock, true);
c0951b79 495 if (unlikely(ret != 0))
fb1d9738 496 return ret;
fb1d9738 497
c0951b79
TH
498 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
499 req->size, false, &handle, &dma_buf);
fb1d9738 500 if (unlikely(ret != 0))
2f5993cc 501 goto out_no_dmabuf;
fb1d9738 502
c0951b79
TH
503 rep->handle = handle;
504 rep->map_handle = dma_buf->base.addr_space_offset;
505 rep->cur_gmr_id = handle;
506 rep->cur_gmr_offset = 0;
507
508 vmw_dmabuf_unreference(&dma_buf);
fb1d9738 509
2f5993cc 510out_no_dmabuf:
fb1d9738
JB
511 ttm_read_unlock(&vmaster->lock);
512
2f5993cc 513 return ret;
fb1d9738
JB
514}
515
516int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
517 struct drm_file *file_priv)
518{
519 struct drm_vmw_unref_dmabuf_arg *arg =
520 (struct drm_vmw_unref_dmabuf_arg *)data;
521
522 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
523 arg->handle,
524 TTM_REF_USAGE);
525}
526
fb1d9738
JB
527int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
528 uint32_t handle, struct vmw_dma_buffer **out)
529{
530 struct vmw_user_dma_buffer *vmw_user_bo;
531 struct ttm_base_object *base;
532
533 base = ttm_base_object_lookup(tfile, handle);
534 if (unlikely(base == NULL)) {
535 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
536 (unsigned long)handle);
537 return -ESRCH;
538 }
539
540 if (unlikely(base->object_type != ttm_buffer_type)) {
541 ttm_base_object_unref(&base);
542 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
543 (unsigned long)handle);
544 return -EINVAL;
545 }
546
547 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
548 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
549 ttm_base_object_unref(&base);
550 *out = &vmw_user_bo->dma;
551
552 return 0;
553}
554
c0951b79
TH
555int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
556 struct vmw_dma_buffer *dma_buf)
557{
558 struct vmw_user_dma_buffer *user_bo;
559
560 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
561 return -EINVAL;
562
563 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
564 return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
565}
566
fb1d9738 567/*
65155b37 568 * Stream management
fb1d9738
JB
569 */
570
571static void vmw_stream_destroy(struct vmw_resource *res)
572{
573 struct vmw_private *dev_priv = res->dev_priv;
574 struct vmw_stream *stream;
575 int ret;
576
577 DRM_INFO("%s: unref\n", __func__);
578 stream = container_of(res, struct vmw_stream, res);
579
580 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
581 WARN_ON(ret != 0);
582}
583
584static int vmw_stream_init(struct vmw_private *dev_priv,
585 struct vmw_stream *stream,
586 void (*res_free) (struct vmw_resource *res))
587{
588 struct vmw_resource *res = &stream->res;
589 int ret;
590
c0951b79
TH
591 ret = vmw_resource_init(dev_priv, res, false, res_free,
592 &vmw_stream_func);
fb1d9738
JB
593
594 if (unlikely(ret != 0)) {
595 if (res_free == NULL)
596 kfree(stream);
597 else
598 res_free(&stream->res);
599 return ret;
600 }
601
602 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
603 if (ret) {
604 vmw_resource_unreference(&res);
605 return ret;
606 }
607
608 DRM_INFO("%s: claimed\n", __func__);
609
610 vmw_resource_activate(&stream->res, vmw_stream_destroy);
611 return 0;
612}
613
fb1d9738
JB
614static void vmw_user_stream_free(struct vmw_resource *res)
615{
616 struct vmw_user_stream *stream =
617 container_of(res, struct vmw_user_stream, stream.res);
414ee50b 618 struct vmw_private *dev_priv = res->dev_priv;
fb1d9738 619
cdad0521 620 ttm_base_object_kfree(stream, base);
414ee50b
TH
621 ttm_mem_global_free(vmw_mem_glob(dev_priv),
622 vmw_user_stream_size);
fb1d9738
JB
623}
624
625/**
626 * This function is called when user space has no more references on the
627 * base object. It releases the base-object's reference on the resource object.
628 */
629
630static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
631{
632 struct ttm_base_object *base = *p_base;
633 struct vmw_user_stream *stream =
634 container_of(base, struct vmw_user_stream, base);
635 struct vmw_resource *res = &stream->stream.res;
636
637 *p_base = NULL;
638 vmw_resource_unreference(&res);
639}
640
641int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
642 struct drm_file *file_priv)
643{
644 struct vmw_private *dev_priv = vmw_priv(dev);
645 struct vmw_resource *res;
646 struct vmw_user_stream *stream;
647 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
648 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
c0951b79 649 struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
fb1d9738
JB
650 int ret = 0;
651
c0951b79
TH
652
653 res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
fb1d9738
JB
654 if (unlikely(res == NULL))
655 return -EINVAL;
656
657 if (res->res_free != &vmw_user_stream_free) {
658 ret = -EINVAL;
659 goto out;
660 }
661
662 stream = container_of(res, struct vmw_user_stream, stream.res);
663 if (stream->base.tfile != tfile) {
664 ret = -EINVAL;
665 goto out;
666 }
667
668 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
669out:
670 vmw_resource_unreference(&res);
671 return ret;
672}
673
674int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
675 struct drm_file *file_priv)
676{
677 struct vmw_private *dev_priv = vmw_priv(dev);
414ee50b 678 struct vmw_user_stream *stream;
fb1d9738
JB
679 struct vmw_resource *res;
680 struct vmw_resource *tmp;
681 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
682 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
414ee50b 683 struct vmw_master *vmaster = vmw_master(file_priv->master);
fb1d9738
JB
684 int ret;
685
414ee50b
TH
686 /*
687 * Approximate idr memory usage with 128 bytes. It will be limited
688 * by maximum number_of streams anyway?
689 */
690
691 if (unlikely(vmw_user_stream_size == 0))
692 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
693
694 ret = ttm_read_lock(&vmaster->lock, true);
695 if (unlikely(ret != 0))
696 return ret;
697
698 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
699 vmw_user_stream_size,
700 false, true);
701 if (unlikely(ret != 0)) {
702 if (ret != -ERESTARTSYS)
703 DRM_ERROR("Out of graphics memory for stream"
704 " creation.\n");
705 goto out_unlock;
706 }
707
708
709 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
710 if (unlikely(stream == NULL)) {
711 ttm_mem_global_free(vmw_mem_glob(dev_priv),
712 vmw_user_stream_size);
713 ret = -ENOMEM;
714 goto out_unlock;
715 }
fb1d9738
JB
716
717 res = &stream->stream.res;
718 stream->base.shareable = false;
719 stream->base.tfile = NULL;
720
414ee50b
TH
721 /*
722 * From here on, the destructor takes over resource freeing.
723 */
724
fb1d9738
JB
725 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
726 if (unlikely(ret != 0))
414ee50b 727 goto out_unlock;
fb1d9738
JB
728
729 tmp = vmw_resource_reference(res);
730 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
731 &vmw_user_stream_base_release, NULL);
732
733 if (unlikely(ret != 0)) {
734 vmw_resource_unreference(&tmp);
735 goto out_err;
736 }
737
738 arg->stream_id = res->id;
739out_err:
740 vmw_resource_unreference(&res);
414ee50b
TH
741out_unlock:
742 ttm_read_unlock(&vmaster->lock);
fb1d9738
JB
743 return ret;
744}
745
746int vmw_user_stream_lookup(struct vmw_private *dev_priv,
747 struct ttm_object_file *tfile,
748 uint32_t *inout_id, struct vmw_resource **out)
749{
750 struct vmw_user_stream *stream;
751 struct vmw_resource *res;
752 int ret;
753
c0951b79
TH
754 res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
755 *inout_id);
fb1d9738
JB
756 if (unlikely(res == NULL))
757 return -EINVAL;
758
759 if (res->res_free != &vmw_user_stream_free) {
760 ret = -EINVAL;
761 goto err_ref;
762 }
763
764 stream = container_of(res, struct vmw_user_stream, stream.res);
765 if (stream->base.tfile != tfile) {
766 ret = -EPERM;
767 goto err_ref;
768 }
769
770 *inout_id = stream->stream.stream_id;
771 *out = res;
772 return 0;
773err_ref:
774 vmw_resource_unreference(&res);
775 return ret;
776}
5e1782d2
DA
777
778
779int vmw_dumb_create(struct drm_file *file_priv,
780 struct drm_device *dev,
781 struct drm_mode_create_dumb *args)
782{
783 struct vmw_private *dev_priv = vmw_priv(dev);
784 struct vmw_master *vmaster = vmw_master(file_priv->master);
785 struct vmw_user_dma_buffer *vmw_user_bo;
786 struct ttm_buffer_object *tmp;
787 int ret;
788
789 args->pitch = args->width * ((args->bpp + 7) / 8);
790 args->size = args->pitch * args->height;
791
792 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
793 if (vmw_user_bo == NULL)
794 return -ENOMEM;
795
796 ret = ttm_read_lock(&vmaster->lock, true);
797 if (ret != 0) {
798 kfree(vmw_user_bo);
799 return ret;
800 }
801
802 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
803 &vmw_vram_sys_placement, true,
804 &vmw_user_dmabuf_destroy);
805 if (ret != 0)
806 goto out_no_dmabuf;
807
808 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
809 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
810 &vmw_user_bo->base,
811 false,
812 ttm_buffer_type,
813 &vmw_user_dmabuf_release, NULL);
814 if (unlikely(ret != 0))
815 goto out_no_base_object;
816
817 args->handle = vmw_user_bo->base.hash.key;
818
819out_no_base_object:
820 ttm_bo_unref(&tmp);
821out_no_dmabuf:
822 ttm_read_unlock(&vmaster->lock);
823 return ret;
824}
825
826int vmw_dumb_map_offset(struct drm_file *file_priv,
827 struct drm_device *dev, uint32_t handle,
828 uint64_t *offset)
829{
830 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
831 struct vmw_dma_buffer *out_buf;
832 int ret;
833
834 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
835 if (ret != 0)
836 return -EINVAL;
837
838 *offset = out_buf->base.addr_space_offset;
839 vmw_dmabuf_unreference(&out_buf);
840 return 0;
841}
842
843int vmw_dumb_destroy(struct drm_file *file_priv,
844 struct drm_device *dev,
845 uint32_t handle)
846{
847 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
848 handle, TTM_REF_USAGE);
849}
c0951b79
TH
850
851/**
852 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
853 *
854 * @res: The resource for which to allocate a backup buffer.
855 * @interruptible: Whether any sleeps during allocation should be
856 * performed while interruptible.
857 */
858static int vmw_resource_buf_alloc(struct vmw_resource *res,
859 bool interruptible)
860{
861 unsigned long size =
862 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
863 struct vmw_dma_buffer *backup;
864 int ret;
865
866 if (likely(res->backup)) {
867 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
868 return 0;
869 }
870
871 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
872 if (unlikely(backup == NULL))
873 return -ENOMEM;
874
875 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
876 res->func->backup_placement,
877 interruptible,
878 &vmw_dmabuf_bo_free);
879 if (unlikely(ret != 0))
880 goto out_no_dmabuf;
881
882 res->backup = backup;
883
884out_no_dmabuf:
885 return ret;
886}
887
888/**
889 * vmw_resource_do_validate - Make a resource up-to-date and visible
890 * to the device.
891 *
892 * @res: The resource to make visible to the device.
893 * @val_buf: Information about a buffer possibly
894 * containing backup data if a bind operation is needed.
895 *
896 * On hardware resource shortage, this function returns -EBUSY and
897 * should be retried once resources have been freed up.
898 */
899static int vmw_resource_do_validate(struct vmw_resource *res,
900 struct ttm_validate_buffer *val_buf)
901{
902 int ret = 0;
903 const struct vmw_res_func *func = res->func;
904
905 if (unlikely(res->id == -1)) {
906 ret = func->create(res);
907 if (unlikely(ret != 0))
908 return ret;
909 }
910
911 if (func->bind &&
912 ((func->needs_backup && list_empty(&res->mob_head) &&
913 val_buf->bo != NULL) ||
914 (!func->needs_backup && val_buf->bo != NULL))) {
915 ret = func->bind(res, val_buf);
916 if (unlikely(ret != 0))
917 goto out_bind_failed;
918 if (func->needs_backup)
919 list_add_tail(&res->mob_head, &res->backup->res_list);
920 }
921
922 /*
923 * Only do this on write operations, and move to
924 * vmw_resource_unreserve if it can be called after
925 * backup buffers have been unreserved. Otherwise
926 * sort out locking.
927 */
928 res->res_dirty = true;
929
930 return 0;
931
932out_bind_failed:
933 func->destroy(res);
934
935 return ret;
936}
937
938/**
939 * vmw_resource_unreserve - Unreserve a resource previously reserved for
940 * command submission.
941 *
942 * @res: Pointer to the struct vmw_resource to unreserve.
943 * @new_backup: Pointer to new backup buffer if command submission
944 * switched.
945 * @new_backup_offset: New backup offset if @new_backup is !NULL.
946 *
947 * Currently unreserving a resource means putting it back on the device's
948 * resource lru list, so that it can be evicted if necessary.
949 */
950void vmw_resource_unreserve(struct vmw_resource *res,
951 struct vmw_dma_buffer *new_backup,
952 unsigned long new_backup_offset)
953{
954 struct vmw_private *dev_priv = res->dev_priv;
955
956 if (!list_empty(&res->lru_head))
957 return;
958
959 if (new_backup && new_backup != res->backup) {
960
961 if (res->backup) {
962 BUG_ON(atomic_read(&res->backup->base.reserved) == 0);
963 list_del_init(&res->mob_head);
964 vmw_dmabuf_unreference(&res->backup);
965 }
966
967 res->backup = vmw_dmabuf_reference(new_backup);
968 BUG_ON(atomic_read(&new_backup->base.reserved) == 0);
969 list_add_tail(&res->mob_head, &new_backup->res_list);
970 }
971 if (new_backup)
972 res->backup_offset = new_backup_offset;
973
974 if (!res->func->may_evict)
975 return;
976
977 write_lock(&dev_priv->resource_lock);
978 list_add_tail(&res->lru_head,
979 &res->dev_priv->res_lru[res->func->res_type]);
980 write_unlock(&dev_priv->resource_lock);
981}
982
983/**
984 * vmw_resource_check_buffer - Check whether a backup buffer is needed
985 * for a resource and in that case, allocate
986 * one, reserve and validate it.
987 *
988 * @res: The resource for which to allocate a backup buffer.
989 * @interruptible: Whether any sleeps during allocation should be
990 * performed while interruptible.
991 * @val_buf: On successful return contains data about the
992 * reserved and validated backup buffer.
993 */
994int vmw_resource_check_buffer(struct vmw_resource *res,
995 bool interruptible,
996 struct ttm_validate_buffer *val_buf)
997{
998 struct list_head val_list;
999 bool backup_dirty = false;
1000 int ret;
1001
1002 if (unlikely(res->backup == NULL)) {
1003 ret = vmw_resource_buf_alloc(res, interruptible);
1004 if (unlikely(ret != 0))
1005 return ret;
1006 }
1007
1008 INIT_LIST_HEAD(&val_list);
1009 val_buf->bo = ttm_bo_reference(&res->backup->base);
1010 list_add_tail(&val_buf->head, &val_list);
1011 ret = ttm_eu_reserve_buffers(&val_list);
1012 if (unlikely(ret != 0))
1013 goto out_no_reserve;
1014
1015 if (res->func->needs_backup && list_empty(&res->mob_head))
1016 return 0;
1017
1018 backup_dirty = res->backup_dirty;
1019 ret = ttm_bo_validate(&res->backup->base,
1020 res->func->backup_placement,
1021 true, false, false);
1022
1023 if (unlikely(ret != 0))
1024 goto out_no_validate;
1025
1026 return 0;
1027
1028out_no_validate:
1029 ttm_eu_backoff_reservation(&val_list);
1030out_no_reserve:
1031 ttm_bo_unref(&val_buf->bo);
1032 if (backup_dirty)
1033 vmw_dmabuf_unreference(&res->backup);
1034
1035 return ret;
1036}
1037
1038/**
1039 * vmw_resource_reserve - Reserve a resource for command submission
1040 *
1041 * @res: The resource to reserve.
1042 *
1043 * This function takes the resource off the LRU list and make sure
1044 * a backup buffer is present for guest-backed resources. However,
1045 * the buffer may not be bound to the resource at this point.
1046 *
1047 */
1048int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1049{
1050 struct vmw_private *dev_priv = res->dev_priv;
1051 int ret;
1052
1053 write_lock(&dev_priv->resource_lock);
1054 list_del_init(&res->lru_head);
1055 write_unlock(&dev_priv->resource_lock);
1056
1057 if (res->func->needs_backup && res->backup == NULL &&
1058 !no_backup) {
1059 ret = vmw_resource_buf_alloc(res, true);
1060 if (unlikely(ret != 0))
1061 return ret;
1062 }
1063
1064 return 0;
1065}
1066
1067/**
1068 * vmw_resource_backoff_reservation - Unreserve and unreference a
1069 * backup buffer
1070 *.
1071 * @val_buf: Backup buffer information.
1072 */
1073void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1074{
1075 struct list_head val_list;
1076
1077 if (likely(val_buf->bo == NULL))
1078 return;
1079
1080 INIT_LIST_HEAD(&val_list);
1081 list_add_tail(&val_buf->head, &val_list);
1082 ttm_eu_backoff_reservation(&val_list);
1083 ttm_bo_unref(&val_buf->bo);
1084}
1085
1086/**
1087 * vmw_resource_do_evict - Evict a resource, and transfer its data
1088 * to a backup buffer.
1089 *
1090 * @res: The resource to evict.
1091 */
1092int vmw_resource_do_evict(struct vmw_resource *res)
1093{
1094 struct ttm_validate_buffer val_buf;
1095 const struct vmw_res_func *func = res->func;
1096 int ret;
1097
1098 BUG_ON(!func->may_evict);
1099
1100 val_buf.bo = NULL;
1101 ret = vmw_resource_check_buffer(res, true, &val_buf);
1102 if (unlikely(ret != 0))
1103 return ret;
1104
1105 if (unlikely(func->unbind != NULL &&
1106 (!func->needs_backup || !list_empty(&res->mob_head)))) {
1107 ret = func->unbind(res, res->res_dirty, &val_buf);
1108 if (unlikely(ret != 0))
1109 goto out_no_unbind;
1110 list_del_init(&res->mob_head);
1111 }
1112 ret = func->destroy(res);
1113 res->backup_dirty = true;
1114 res->res_dirty = false;
1115out_no_unbind:
1116 vmw_resource_backoff_reservation(&val_buf);
1117
1118 return ret;
1119}
1120
1121
1122/**
1123 * vmw_resource_validate - Make a resource up-to-date and visible
1124 * to the device.
1125 *
1126 * @res: The resource to make visible to the device.
1127 *
1128 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1129 * be reserved and validated.
1130 * On hardware resource shortage, this function will repeatedly evict
1131 * resources of the same type until the validation succeeds.
1132 */
1133int vmw_resource_validate(struct vmw_resource *res)
1134{
1135 int ret;
1136 struct vmw_resource *evict_res;
1137 struct vmw_private *dev_priv = res->dev_priv;
1138 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1139 struct ttm_validate_buffer val_buf;
1140
1141 if (likely(!res->func->may_evict))
1142 return 0;
1143
1144 val_buf.bo = NULL;
1145 if (res->backup)
1146 val_buf.bo = &res->backup->base;
1147 do {
1148 ret = vmw_resource_do_validate(res, &val_buf);
1149 if (likely(ret != -EBUSY))
1150 break;
1151
1152 write_lock(&dev_priv->resource_lock);
1153 if (list_empty(lru_list) || !res->func->may_evict) {
1154 DRM_ERROR("Out of device device id entries "
1155 "for %s.\n", res->func->type_name);
1156 ret = -EBUSY;
1157 write_unlock(&dev_priv->resource_lock);
1158 break;
1159 }
1160
1161 evict_res = vmw_resource_reference
1162 (list_first_entry(lru_list, struct vmw_resource,
1163 lru_head));
1164 list_del_init(&evict_res->lru_head);
1165
1166 write_unlock(&dev_priv->resource_lock);
1167 vmw_resource_do_evict(evict_res);
1168 vmw_resource_unreference(&evict_res);
1169 } while (1);
1170
1171 if (unlikely(ret != 0))
1172 goto out_no_validate;
1173 else if (!res->func->needs_backup && res->backup) {
1174 list_del_init(&res->mob_head);
1175 vmw_dmabuf_unreference(&res->backup);
1176 }
1177
1178 return 0;
1179
1180out_no_validate:
1181 return ret;
1182}
1183
1184/**
1185 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1186 * object without unreserving it.
1187 *
1188 * @bo: Pointer to the struct ttm_buffer_object to fence.
1189 * @fence: Pointer to the fence. If NULL, this function will
1190 * insert a fence into the command stream..
1191 *
1192 * Contrary to the ttm_eu version of this function, it takes only
1193 * a single buffer object instead of a list, and it also doesn't
1194 * unreserve the buffer object, which needs to be done separately.
1195 */
1196void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1197 struct vmw_fence_obj *fence)
1198{
1199 struct ttm_bo_device *bdev = bo->bdev;
1200 struct ttm_bo_driver *driver = bdev->driver;
1201 struct vmw_fence_obj *old_fence_obj;
1202 struct vmw_private *dev_priv =
1203 container_of(bdev, struct vmw_private, bdev);
1204
1205 if (fence == NULL)
1206 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1207 else
1208 driver->sync_obj_ref(fence);
1209
1210 spin_lock(&bdev->fence_lock);
1211
1212 old_fence_obj = bo->sync_obj;
1213 bo->sync_obj = fence;
1214
1215 spin_unlock(&bdev->fence_lock);
1216
1217 if (old_fence_obj)
1218 vmw_fence_obj_unreference(&old_fence_obj);
1219}
1220
1221/**
1222 * vmw_resource_move_notify - TTM move_notify_callback
1223 *
1224 * @bo: The TTM buffer object about to move.
1225 * @mem: The truct ttm_mem_reg indicating to what memory
1226 * region the move is taking place.
1227 *
1228 * For now does nothing.
1229 */
1230void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1231 struct ttm_mem_reg *mem)
1232{
1233}
1234
1235/**
1236 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1237 *
1238 * @res: The resource being queried.
1239 */
1240bool vmw_resource_needs_backup(const struct vmw_resource *res)
1241{
1242 return res->func->needs_backup;
1243}
1244
1245/**
1246 * vmw_resource_evict_type - Evict all resources of a specific type
1247 *
1248 * @dev_priv: Pointer to a device private struct
1249 * @type: The resource type to evict
1250 *
1251 * To avoid thrashing starvation or as part of the hibernation sequence,
1252 * evict all evictable resources of a specific type.
1253 */
1254static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1255 enum vmw_res_type type)
1256{
1257 struct list_head *lru_list = &dev_priv->res_lru[type];
1258 struct vmw_resource *evict_res;
1259
1260 do {
1261 write_lock(&dev_priv->resource_lock);
1262
1263 if (list_empty(lru_list))
1264 goto out_unlock;
1265
1266 evict_res = vmw_resource_reference(
1267 list_first_entry(lru_list, struct vmw_resource,
1268 lru_head));
1269 list_del_init(&evict_res->lru_head);
1270 write_unlock(&dev_priv->resource_lock);
1271 vmw_resource_do_evict(evict_res);
1272 vmw_resource_unreference(&evict_res);
1273 } while (1);
1274
1275out_unlock:
1276 write_unlock(&dev_priv->resource_lock);
1277}
1278
1279/**
1280 * vmw_resource_evict_all - Evict all evictable resources
1281 *
1282 * @dev_priv: Pointer to a device private struct
1283 *
1284 * To avoid thrashing starvation or as part of the hibernation sequence,
1285 * evict all evictable resources. In particular this means that all
1286 * guest-backed resources that are registered with the device are
1287 * evicted and the OTable becomes clean.
1288 */
1289void vmw_resource_evict_all(struct vmw_private *dev_priv)
1290{
1291 enum vmw_res_type type;
1292
1293 mutex_lock(&dev_priv->cmdbuf_mutex);
1294
1295 for (type = 0; type < vmw_res_max; ++type)
1296 vmw_resource_evict_type(dev_priv, type);
1297
1298 mutex_unlock(&dev_priv->cmdbuf_mutex);
1299}