]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/i915/gvt/dmabuf.c
Merge tag 'asoc-v5.7' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / i915 / gvt / dmabuf.c
CommitLineData
e546e281
TZ
1/*
2 * Copyright 2017 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Zhiyuan Lv <zhiyuan.lv@intel.com>
25 *
26 * Contributors:
27 * Xiaoguang Chen
28 * Tina Zhang <tina.zhang@intel.com>
29 */
30
31#include <linux/dma-buf.h>
e546e281
TZ
32#include <linux/vfio.h>
33
34#include "i915_drv.h"
35#include "gvt.h"
36
37#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
38
9f674c81
TZ
39static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
40 unsigned long size,
41 dma_addr_t dma_addr)
42{
43 int ret = 0;
44
45 if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
46 ret = -EINVAL;
47
48 return ret;
49}
50
51static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
52 dma_addr_t dma_addr)
53{
54 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
55}
56
e546e281
TZ
57static int vgpu_gem_get_pages(
58 struct drm_i915_gem_object *obj)
59{
60 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
9f674c81 61 struct intel_vgpu *vgpu;
e546e281
TZ
62 struct sg_table *st;
63 struct scatterlist *sg;
9f674c81 64 int i, j, ret;
e546e281
TZ
65 gen8_pte_t __iomem *gtt_entries;
66 struct intel_vgpu_fb_info *fb_info;
4a6eccbc 67 u32 page_num;
e546e281
TZ
68
69 fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
70 if (WARN_ON(!fb_info))
71 return -ENODEV;
72
9f674c81
TZ
73 vgpu = fb_info->obj->vgpu;
74 if (WARN_ON(!vgpu))
75 return -ENODEV;
76
e546e281
TZ
77 st = kmalloc(sizeof(*st), GFP_KERNEL);
78 if (unlikely(!st))
79 return -ENOMEM;
80
4a6eccbc
XZ
81 page_num = obj->base.size >> PAGE_SHIFT;
82 ret = sg_alloc_table(st, page_num, GFP_KERNEL);
e546e281
TZ
83 if (ret) {
84 kfree(st);
85 return ret;
86 }
87 gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
88 (fb_info->start >> PAGE_SHIFT);
4a6eccbc 89 for_each_sg(st->sgl, sg, page_num, i) {
9f674c81
TZ
90 dma_addr_t dma_addr =
91 GEN8_DECODE_PTE(readq(&gtt_entries[i]));
92 if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
93 ret = -EINVAL;
94 goto out;
95 }
96
e546e281
TZ
97 sg->offset = 0;
98 sg->length = PAGE_SIZE;
e546e281 99 sg_dma_len(sg) = PAGE_SIZE;
9f674c81 100 sg_dma_address(sg) = dma_addr;
e546e281
TZ
101 }
102
103 __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
9f674c81
TZ
104out:
105 if (ret) {
106 dma_addr_t dma_addr;
107
108 for_each_sg(st->sgl, sg, i, j) {
109 dma_addr = sg_dma_address(sg);
110 if (dma_addr)
111 vgpu_unpin_dma_address(vgpu, dma_addr);
112 }
113 sg_free_table(st);
114 kfree(st);
115 }
116
117 return ret;
e546e281 118
e546e281
TZ
119}
120
121static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
122 struct sg_table *pages)
123{
9f674c81
TZ
124 struct scatterlist *sg;
125
126 if (obj->base.dma_buf) {
127 struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
128 struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
129 struct intel_vgpu *vgpu = obj->vgpu;
130 int i;
131
132 for_each_sg(pages->sgl, sg, fb_info->size, i)
133 vgpu_unpin_dma_address(vgpu,
134 sg_dma_address(sg));
135 }
136
e546e281
TZ
137 sg_free_table(pages);
138 kfree(pages);
139}
140
141static void dmabuf_gem_object_free(struct kref *kref)
142{
143 struct intel_vgpu_dmabuf_obj *obj =
144 container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
145 struct intel_vgpu *vgpu = obj->vgpu;
146 struct list_head *pos;
e546e281
TZ
147 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
148
82a3b670 149 if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
dfb6ae4e
TZ
150 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
151 dmabuf_obj = container_of(pos,
152 struct intel_vgpu_dmabuf_obj, list);
153 if (dmabuf_obj == obj) {
b549c252 154 list_del(pos);
dfb6ae4e
TZ
155 intel_gvt_hypervisor_put_vfio_device(vgpu);
156 idr_remove(&vgpu->object_idr,
157 dmabuf_obj->dmabuf_id);
158 kfree(dmabuf_obj->info);
159 kfree(dmabuf_obj);
dfb6ae4e
TZ
160 break;
161 }
e546e281 162 }
dfb6ae4e
TZ
163 } else {
164 /* Free the orphan dmabuf_objs here */
165 kfree(obj->info);
166 kfree(obj);
e546e281
TZ
167 }
168}
169
170
171static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
172{
173 kref_get(&obj->kref);
174}
175
176static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
177{
178 kref_put(&obj->kref, dmabuf_gem_object_free);
179}
180
181static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
182{
183
184 struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
185 struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
186 struct intel_vgpu *vgpu = obj->vgpu;
187
dfb6ae4e
TZ
188 if (vgpu) {
189 mutex_lock(&vgpu->dmabuf_lock);
190 gem_obj->base.dma_buf = NULL;
191 dmabuf_obj_put(obj);
192 mutex_unlock(&vgpu->dmabuf_lock);
193 } else {
194 /* vgpu is NULL, as it has been removed already */
195 gem_obj->base.dma_buf = NULL;
196 dmabuf_obj_put(obj);
197 }
e546e281
TZ
198}
199
200static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
201 .flags = I915_GEM_OBJECT_IS_PROXY,
202 .get_pages = vgpu_gem_get_pages,
203 .put_pages = vgpu_gem_put_pages,
204 .release = vgpu_gem_release,
205};
206
207static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
208 struct intel_vgpu_fb_info *info)
209{
7867d709 210 static struct lock_class_key lock_class;
e546e281
TZ
211 struct drm_i915_private *dev_priv = to_i915(dev);
212 struct drm_i915_gem_object *obj;
213
13f1bfd3 214 obj = i915_gem_object_alloc();
e546e281
TZ
215 if (obj == NULL)
216 return NULL;
217
218 drm_gem_private_object_init(dev, &obj->base,
4a6eccbc 219 roundup(info->size, PAGE_SIZE));
7867d709 220 i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
4fc0a3ca 221 i915_gem_object_set_readonly(obj);
e546e281 222
c0a51fd0
CK
223 obj->read_domains = I915_GEM_DOMAIN_GTT;
224 obj->write_domain = 0;
c3b5a843 225 if (INTEL_GEN(dev_priv) >= 9) {
e546e281
TZ
226 unsigned int tiling_mode = 0;
227 unsigned int stride = 0;
228
b244ffa1
ZW
229 switch (info->drm_format_mod) {
230 case DRM_FORMAT_MOD_LINEAR:
e546e281
TZ
231 tiling_mode = I915_TILING_NONE;
232 break;
b244ffa1 233 case I915_FORMAT_MOD_X_TILED:
e546e281
TZ
234 tiling_mode = I915_TILING_X;
235 stride = info->stride;
236 break;
b244ffa1
ZW
237 case I915_FORMAT_MOD_Y_TILED:
238 case I915_FORMAT_MOD_Yf_TILED:
e546e281
TZ
239 tiling_mode = I915_TILING_Y;
240 stride = info->stride;
241 break;
242 default:
b244ffa1
ZW
243 gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
244 info->drm_format_mod);
e546e281
TZ
245 }
246 obj->tiling_and_stride = tiling_mode | stride;
247 } else {
248 obj->tiling_and_stride = info->drm_format_mod ?
249 I915_TILING_X : 0;
250 }
251
252 return obj;
253}
254
1c6ccad8
TZ
255static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
256{
257 if (c && c->x_hot <= c->width && c->y_hot <= c->height)
258 return true;
259 else
260 return false;
261}
262
e546e281
TZ
263static int vgpu_get_plane_info(struct drm_device *dev,
264 struct intel_vgpu *vgpu,
265 struct intel_vgpu_fb_info *info,
266 int plane_id)
267{
e546e281
TZ
268 struct intel_vgpu_primary_plane_format p;
269 struct intel_vgpu_cursor_plane_format c;
cd7879f7 270 int ret, tile_height = 1;
e546e281 271
d9420241
AG
272 memset(info, 0, sizeof(*info));
273
e546e281
TZ
274 if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
275 ret = intel_vgpu_decode_primary_plane(vgpu, &p);
276 if (ret)
277 return ret;
278 info->start = p.base;
279 info->start_gpa = p.base_gpa;
280 info->width = p.width;
281 info->height = p.height;
282 info->stride = p.stride;
283 info->drm_format = p.drm_format;
b244ffa1
ZW
284
285 switch (p.tiled) {
286 case PLANE_CTL_TILED_LINEAR:
287 info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
288 break;
289 case PLANE_CTL_TILED_X:
290 info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
cd7879f7 291 tile_height = 8;
b244ffa1
ZW
292 break;
293 case PLANE_CTL_TILED_Y:
294 info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
cd7879f7 295 tile_height = 32;
b244ffa1
ZW
296 break;
297 case PLANE_CTL_TILED_YF:
298 info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
cd7879f7 299 tile_height = 32;
b244ffa1
ZW
300 break;
301 default:
302 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
303 }
e546e281
TZ
304 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
305 ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
306 if (ret)
307 return ret;
308 info->start = c.base;
309 info->start_gpa = c.base_gpa;
310 info->width = c.width;
311 info->height = c.height;
312 info->stride = c.width * (c.bpp / 8);
313 info->drm_format = c.drm_format;
314 info->drm_format_mod = 0;
315 info->x_pos = c.x_pos;
316 info->y_pos = c.y_pos;
317
1c6ccad8
TZ
318 if (validate_hotspot(&c)) {
319 info->x_hot = c.x_hot;
320 info->y_hot = c.y_hot;
321 } else {
322 info->x_hot = UINT_MAX;
323 info->y_hot = UINT_MAX;
324 }
e546e281
TZ
325 } else {
326 gvt_vgpu_err("invalid plane id:%d\n", plane_id);
327 return -EINVAL;
328 }
329
4a6eccbc 330 info->size = info->stride * roundup(info->height, tile_height);
e546e281
TZ
331 if (info->size == 0) {
332 gvt_vgpu_err("fb size is zero\n");
333 return -EINVAL;
334 }
335
336 if (info->start & (PAGE_SIZE - 1)) {
337 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
338 return -EFAULT;
339 }
e546e281
TZ
340
341 if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
342 gvt_vgpu_err("invalid gma addr\n");
343 return -EFAULT;
344 }
345
346 return 0;
347}
348
349static struct intel_vgpu_dmabuf_obj *
350pick_dmabuf_by_info(struct intel_vgpu *vgpu,
351 struct intel_vgpu_fb_info *latest_info)
352{
353 struct list_head *pos;
354 struct intel_vgpu_fb_info *fb_info;
355 struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
356 struct intel_vgpu_dmabuf_obj *ret = NULL;
357
358 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
359 dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
360 list);
361 if ((dmabuf_obj == NULL) ||
362 (dmabuf_obj->info == NULL))
363 continue;
364
365 fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
366 if ((fb_info->start == latest_info->start) &&
367 (fb_info->start_gpa == latest_info->start_gpa) &&
368 (fb_info->size == latest_info->size) &&
369 (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
370 (fb_info->drm_format == latest_info->drm_format) &&
371 (fb_info->width == latest_info->width) &&
372 (fb_info->height == latest_info->height)) {
373 ret = dmabuf_obj;
374 break;
375 }
376 }
377
378 return ret;
379}
380
381static struct intel_vgpu_dmabuf_obj *
382pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
383{
384 struct list_head *pos;
385 struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
386 struct intel_vgpu_dmabuf_obj *ret = NULL;
387
388 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
389 dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
390 list);
391 if (!dmabuf_obj)
392 continue;
393
394 if (dmabuf_obj->dmabuf_id == id) {
395 ret = dmabuf_obj;
396 break;
397 }
398 }
399
400 return ret;
401}
402
403static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
404 struct intel_vgpu_fb_info *fb_info)
405{
406 gvt_dmabuf->drm_format = fb_info->drm_format;
10996f80 407 gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
e546e281
TZ
408 gvt_dmabuf->width = fb_info->width;
409 gvt_dmabuf->height = fb_info->height;
410 gvt_dmabuf->stride = fb_info->stride;
411 gvt_dmabuf->size = fb_info->size;
412 gvt_dmabuf->x_pos = fb_info->x_pos;
413 gvt_dmabuf->y_pos = fb_info->y_pos;
414 gvt_dmabuf->x_hot = fb_info->x_hot;
415 gvt_dmabuf->y_hot = fb_info->y_hot;
416}
417
418int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
419{
420 struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
421 struct vfio_device_gfx_plane_info *gfx_plane_info = args;
422 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
423 struct intel_vgpu_fb_info fb_info;
424 int ret = 0;
425
426 if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
427 VFIO_GFX_PLANE_TYPE_PROBE))
428 return ret;
429 else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
430 (!gfx_plane_info->flags))
431 return -EINVAL;
432
433 ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
434 gfx_plane_info->drm_plane_type);
435 if (ret != 0)
436 goto out;
437
438 mutex_lock(&vgpu->dmabuf_lock);
439 /* If exists, pick up the exposed dmabuf_obj */
440 dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
441 if (dmabuf_obj) {
442 update_fb_info(gfx_plane_info, &fb_info);
443 gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
444
445 /* This buffer may be released between query_plane ioctl and
446 * get_dmabuf ioctl. Add the refcount to make sure it won't
447 * be released between the two ioctls.
448 */
449 if (!dmabuf_obj->initref) {
450 dmabuf_obj->initref = true;
451 dmabuf_obj_get(dmabuf_obj);
452 }
453 ret = 0;
454 gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
455 vgpu->id, kref_read(&dmabuf_obj->kref),
456 gfx_plane_info->dmabuf_id);
457 mutex_unlock(&vgpu->dmabuf_lock);
458 goto out;
459 }
460
461 mutex_unlock(&vgpu->dmabuf_lock);
462
463 /* Need to allocate a new one*/
464 dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
465 if (unlikely(!dmabuf_obj)) {
466 gvt_vgpu_err("alloc dmabuf_obj failed\n");
467 ret = -ENOMEM;
468 goto out;
469 }
470
471 dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
472 GFP_KERNEL);
473 if (unlikely(!dmabuf_obj->info)) {
474 gvt_vgpu_err("allocate intel vgpu fb info failed\n");
475 ret = -ENOMEM;
476 goto out_free_dmabuf;
477 }
478 memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
479
480 ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
481
482 dmabuf_obj->vgpu = vgpu;
483
484 ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
485 if (ret < 0)
486 goto out_free_info;
487 gfx_plane_info->dmabuf_id = ret;
488 dmabuf_obj->dmabuf_id = ret;
489
490 dmabuf_obj->initref = true;
491
492 kref_init(&dmabuf_obj->kref);
493
494 mutex_lock(&vgpu->dmabuf_lock);
495 if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
496 gvt_vgpu_err("get vfio device failed\n");
497 mutex_unlock(&vgpu->dmabuf_lock);
498 goto out_free_info;
499 }
500 mutex_unlock(&vgpu->dmabuf_lock);
501
502 update_fb_info(gfx_plane_info, &fb_info);
503
504 INIT_LIST_HEAD(&dmabuf_obj->list);
505 mutex_lock(&vgpu->dmabuf_lock);
506 list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
507 mutex_unlock(&vgpu->dmabuf_lock);
508
509 gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
510 __func__, kref_read(&dmabuf_obj->kref), ret);
511
512 return 0;
513
514out_free_info:
515 kfree(dmabuf_obj->info);
516out_free_dmabuf:
517 kfree(dmabuf_obj);
518out:
519 /* ENODEV means plane isn't ready, which might be a normal case. */
520 return (ret == -ENODEV) ? 0 : ret;
521}
522
523/* To associate an exposed dmabuf with the dmabuf_obj */
524int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
525{
526 struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
527 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
528 struct drm_i915_gem_object *obj;
529 struct dma_buf *dmabuf;
530 int dmabuf_fd;
531 int ret = 0;
532
533 mutex_lock(&vgpu->dmabuf_lock);
534
535 dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
536 if (dmabuf_obj == NULL) {
537 gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
538 ret = -EINVAL;
539 goto out;
540 }
541
542 obj = vgpu_create_gem(dev, dmabuf_obj->info);
543 if (obj == NULL) {
7e534ac9 544 gvt_vgpu_err("create gvt gem obj failed\n");
e546e281
TZ
545 ret = -ENOMEM;
546 goto out;
547 }
548
549 obj->gvt_info = dmabuf_obj->info;
550
e4fa8457 551 dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR);
e546e281
TZ
552 if (IS_ERR(dmabuf)) {
553 gvt_vgpu_err("export dma-buf failed\n");
554 ret = PTR_ERR(dmabuf);
555 goto out_free_gem;
556 }
e546e281 557
e546e281
TZ
558 ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
559 if (ret < 0) {
560 gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
561 goto out_free_dmabuf;
562 }
563 dmabuf_fd = ret;
564
e546e281
TZ
565 dmabuf_obj_get(dmabuf_obj);
566
567 if (dmabuf_obj->initref) {
568 dmabuf_obj->initref = false;
569 dmabuf_obj_put(dmabuf_obj);
570 }
571
572 mutex_unlock(&vgpu->dmabuf_lock);
573
574 gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
575 " file count: %ld, GEM ref: %d\n",
576 vgpu->id, dmabuf_obj->dmabuf_id,
577 kref_read(&dmabuf_obj->kref),
578 dmabuf_fd,
579 file_count(dmabuf->file),
580 kref_read(&obj->base.refcount));
581
41d93145
PB
582 i915_gem_object_put(obj);
583
e546e281
TZ
584 return dmabuf_fd;
585
586out_free_dmabuf:
587 dma_buf_put(dmabuf);
588out_free_gem:
589 i915_gem_object_put(obj);
590out:
591 mutex_unlock(&vgpu->dmabuf_lock);
592 return ret;
593}
594
595void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
596{
597 struct list_head *pos, *n;
598 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
599
600 mutex_lock(&vgpu->dmabuf_lock);
601 list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
602 dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
603 list);
6ee942d5
TZ
604 dmabuf_obj->vgpu = NULL;
605
606 idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
607 intel_gvt_hypervisor_put_vfio_device(vgpu);
608 list_del(pos);
609
610 /* dmabuf_obj might be freed in dmabuf_obj_put */
e546e281
TZ
611 if (dmabuf_obj->initref) {
612 dmabuf_obj->initref = false;
613 dmabuf_obj_put(dmabuf_obj);
614 }
dfb6ae4e 615
e546e281
TZ
616 }
617 mutex_unlock(&vgpu->dmabuf_lock);
618}