2 * Copyright 2017 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Zhiyuan Lv <zhiyuan.lv@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
31 #include <linux/dma-buf.h>
32 #include <linux/vfio.h>
37 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
39 static int vgpu_gem_get_pages(
40 struct drm_i915_gem_object
*obj
)
42 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
44 struct scatterlist
*sg
;
46 gen8_pte_t __iomem
*gtt_entries
;
47 struct intel_vgpu_fb_info
*fb_info
;
49 fb_info
= (struct intel_vgpu_fb_info
*)obj
->gvt_info
;
50 if (WARN_ON(!fb_info
))
53 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
57 ret
= sg_alloc_table(st
, fb_info
->size
, GFP_KERNEL
);
62 gtt_entries
= (gen8_pte_t __iomem
*)dev_priv
->ggtt
.gsm
+
63 (fb_info
->start
>> PAGE_SHIFT
);
64 for_each_sg(st
->sgl
, sg
, fb_info
->size
, i
) {
66 sg
->length
= PAGE_SIZE
;
68 GEN8_DECODE_PTE(readq(>t_entries
[i
]));
69 sg_dma_len(sg
) = PAGE_SIZE
;
72 __i915_gem_object_set_pages(obj
, st
, PAGE_SIZE
);
77 static void vgpu_gem_put_pages(struct drm_i915_gem_object
*obj
,
78 struct sg_table
*pages
)
84 static void dmabuf_gem_object_free(struct kref
*kref
)
86 struct intel_vgpu_dmabuf_obj
*obj
=
87 container_of(kref
, struct intel_vgpu_dmabuf_obj
, kref
);
88 struct intel_vgpu
*vgpu
= obj
->vgpu
;
89 struct list_head
*pos
;
90 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
;
92 if (vgpu
&& vgpu
->active
&& !list_empty(&vgpu
->dmabuf_obj_list_head
)) {
93 list_for_each(pos
, &vgpu
->dmabuf_obj_list_head
) {
94 dmabuf_obj
= container_of(pos
,
95 struct intel_vgpu_dmabuf_obj
, list
);
96 if (dmabuf_obj
== obj
) {
97 intel_gvt_hypervisor_put_vfio_device(vgpu
);
98 idr_remove(&vgpu
->object_idr
,
99 dmabuf_obj
->dmabuf_id
);
100 kfree(dmabuf_obj
->info
);
107 /* Free the orphan dmabuf_objs here */
114 static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj
*obj
)
116 kref_get(&obj
->kref
);
119 static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj
*obj
)
121 kref_put(&obj
->kref
, dmabuf_gem_object_free
);
124 static void vgpu_gem_release(struct drm_i915_gem_object
*gem_obj
)
127 struct intel_vgpu_fb_info
*fb_info
= gem_obj
->gvt_info
;
128 struct intel_vgpu_dmabuf_obj
*obj
= fb_info
->obj
;
129 struct intel_vgpu
*vgpu
= obj
->vgpu
;
132 mutex_lock(&vgpu
->dmabuf_lock
);
133 gem_obj
->base
.dma_buf
= NULL
;
135 mutex_unlock(&vgpu
->dmabuf_lock
);
137 /* vgpu is NULL, as it has been removed already */
138 gem_obj
->base
.dma_buf
= NULL
;
143 static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops
= {
144 .flags
= I915_GEM_OBJECT_IS_PROXY
,
145 .get_pages
= vgpu_gem_get_pages
,
146 .put_pages
= vgpu_gem_put_pages
,
147 .release
= vgpu_gem_release
,
150 static struct drm_i915_gem_object
*vgpu_create_gem(struct drm_device
*dev
,
151 struct intel_vgpu_fb_info
*info
)
153 struct drm_i915_private
*dev_priv
= to_i915(dev
);
154 struct drm_i915_gem_object
*obj
;
156 obj
= i915_gem_object_alloc();
160 drm_gem_private_object_init(dev
, &obj
->base
,
161 info
->size
<< PAGE_SHIFT
);
162 i915_gem_object_init(obj
, &intel_vgpu_gem_ops
);
164 obj
->read_domains
= I915_GEM_DOMAIN_GTT
;
165 obj
->write_domain
= 0;
166 if (INTEL_GEN(dev_priv
) >= 9) {
167 unsigned int tiling_mode
= 0;
168 unsigned int stride
= 0;
170 switch (info
->drm_format_mod
) {
171 case DRM_FORMAT_MOD_LINEAR
:
172 tiling_mode
= I915_TILING_NONE
;
174 case I915_FORMAT_MOD_X_TILED
:
175 tiling_mode
= I915_TILING_X
;
176 stride
= info
->stride
;
178 case I915_FORMAT_MOD_Y_TILED
:
179 case I915_FORMAT_MOD_Yf_TILED
:
180 tiling_mode
= I915_TILING_Y
;
181 stride
= info
->stride
;
184 gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
185 info
->drm_format_mod
);
187 obj
->tiling_and_stride
= tiling_mode
| stride
;
189 obj
->tiling_and_stride
= info
->drm_format_mod
?
196 static bool validate_hotspot(struct intel_vgpu_cursor_plane_format
*c
)
198 if (c
&& c
->x_hot
<= c
->width
&& c
->y_hot
<= c
->height
)
204 static int vgpu_get_plane_info(struct drm_device
*dev
,
205 struct intel_vgpu
*vgpu
,
206 struct intel_vgpu_fb_info
*info
,
209 struct drm_i915_private
*dev_priv
= to_i915(dev
);
210 struct intel_vgpu_primary_plane_format p
;
211 struct intel_vgpu_cursor_plane_format c
;
212 int ret
, tile_height
= 1;
214 if (plane_id
== DRM_PLANE_TYPE_PRIMARY
) {
215 ret
= intel_vgpu_decode_primary_plane(vgpu
, &p
);
218 info
->start
= p
.base
;
219 info
->start_gpa
= p
.base_gpa
;
220 info
->width
= p
.width
;
221 info
->height
= p
.height
;
222 info
->stride
= p
.stride
;
223 info
->drm_format
= p
.drm_format
;
226 case PLANE_CTL_TILED_LINEAR
:
227 info
->drm_format_mod
= DRM_FORMAT_MOD_LINEAR
;
229 case PLANE_CTL_TILED_X
:
230 info
->drm_format_mod
= I915_FORMAT_MOD_X_TILED
;
233 case PLANE_CTL_TILED_Y
:
234 info
->drm_format_mod
= I915_FORMAT_MOD_Y_TILED
;
237 case PLANE_CTL_TILED_YF
:
238 info
->drm_format_mod
= I915_FORMAT_MOD_Yf_TILED
;
242 gvt_vgpu_err("invalid tiling mode: %x\n", p
.tiled
);
244 } else if (plane_id
== DRM_PLANE_TYPE_CURSOR
) {
245 ret
= intel_vgpu_decode_cursor_plane(vgpu
, &c
);
248 info
->start
= c
.base
;
249 info
->start_gpa
= c
.base_gpa
;
250 info
->width
= c
.width
;
251 info
->height
= c
.height
;
252 info
->stride
= c
.width
* (c
.bpp
/ 8);
253 info
->drm_format
= c
.drm_format
;
254 info
->drm_format_mod
= 0;
255 info
->x_pos
= c
.x_pos
;
256 info
->y_pos
= c
.y_pos
;
258 if (validate_hotspot(&c
)) {
259 info
->x_hot
= c
.x_hot
;
260 info
->y_hot
= c
.y_hot
;
262 info
->x_hot
= UINT_MAX
;
263 info
->y_hot
= UINT_MAX
;
266 gvt_vgpu_err("invalid plane id:%d\n", plane_id
);
270 info
->size
= (info
->stride
* roundup(info
->height
, tile_height
)
271 + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
272 if (info
->size
== 0) {
273 gvt_vgpu_err("fb size is zero\n");
277 if (info
->start
& (PAGE_SIZE
- 1)) {
278 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info
->start
);
281 if (((info
->start
>> PAGE_SHIFT
) + info
->size
) >
282 ggtt_total_entries(&dev_priv
->ggtt
)) {
283 gvt_vgpu_err("Invalid GTT offset or size\n");
287 if (!intel_gvt_ggtt_validate_range(vgpu
, info
->start
, info
->size
)) {
288 gvt_vgpu_err("invalid gma addr\n");
295 static struct intel_vgpu_dmabuf_obj
*
296 pick_dmabuf_by_info(struct intel_vgpu
*vgpu
,
297 struct intel_vgpu_fb_info
*latest_info
)
299 struct list_head
*pos
;
300 struct intel_vgpu_fb_info
*fb_info
;
301 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
= NULL
;
302 struct intel_vgpu_dmabuf_obj
*ret
= NULL
;
304 list_for_each(pos
, &vgpu
->dmabuf_obj_list_head
) {
305 dmabuf_obj
= container_of(pos
, struct intel_vgpu_dmabuf_obj
,
307 if ((dmabuf_obj
== NULL
) ||
308 (dmabuf_obj
->info
== NULL
))
311 fb_info
= (struct intel_vgpu_fb_info
*)dmabuf_obj
->info
;
312 if ((fb_info
->start
== latest_info
->start
) &&
313 (fb_info
->start_gpa
== latest_info
->start_gpa
) &&
314 (fb_info
->size
== latest_info
->size
) &&
315 (fb_info
->drm_format_mod
== latest_info
->drm_format_mod
) &&
316 (fb_info
->drm_format
== latest_info
->drm_format
) &&
317 (fb_info
->width
== latest_info
->width
) &&
318 (fb_info
->height
== latest_info
->height
)) {
327 static struct intel_vgpu_dmabuf_obj
*
328 pick_dmabuf_by_num(struct intel_vgpu
*vgpu
, u32 id
)
330 struct list_head
*pos
;
331 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
= NULL
;
332 struct intel_vgpu_dmabuf_obj
*ret
= NULL
;
334 list_for_each(pos
, &vgpu
->dmabuf_obj_list_head
) {
335 dmabuf_obj
= container_of(pos
, struct intel_vgpu_dmabuf_obj
,
340 if (dmabuf_obj
->dmabuf_id
== id
) {
349 static void update_fb_info(struct vfio_device_gfx_plane_info
*gvt_dmabuf
,
350 struct intel_vgpu_fb_info
*fb_info
)
352 gvt_dmabuf
->drm_format
= fb_info
->drm_format
;
353 gvt_dmabuf
->drm_format_mod
= fb_info
->drm_format_mod
;
354 gvt_dmabuf
->width
= fb_info
->width
;
355 gvt_dmabuf
->height
= fb_info
->height
;
356 gvt_dmabuf
->stride
= fb_info
->stride
;
357 gvt_dmabuf
->size
= fb_info
->size
;
358 gvt_dmabuf
->x_pos
= fb_info
->x_pos
;
359 gvt_dmabuf
->y_pos
= fb_info
->y_pos
;
360 gvt_dmabuf
->x_hot
= fb_info
->x_hot
;
361 gvt_dmabuf
->y_hot
= fb_info
->y_hot
;
364 int intel_vgpu_query_plane(struct intel_vgpu
*vgpu
, void *args
)
366 struct drm_device
*dev
= &vgpu
->gvt
->dev_priv
->drm
;
367 struct vfio_device_gfx_plane_info
*gfx_plane_info
= args
;
368 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
;
369 struct intel_vgpu_fb_info fb_info
;
372 if (gfx_plane_info
->flags
== (VFIO_GFX_PLANE_TYPE_DMABUF
|
373 VFIO_GFX_PLANE_TYPE_PROBE
))
375 else if ((gfx_plane_info
->flags
& ~VFIO_GFX_PLANE_TYPE_DMABUF
) ||
376 (!gfx_plane_info
->flags
))
379 ret
= vgpu_get_plane_info(dev
, vgpu
, &fb_info
,
380 gfx_plane_info
->drm_plane_type
);
384 mutex_lock(&vgpu
->dmabuf_lock
);
385 /* If exists, pick up the exposed dmabuf_obj */
386 dmabuf_obj
= pick_dmabuf_by_info(vgpu
, &fb_info
);
388 update_fb_info(gfx_plane_info
, &fb_info
);
389 gfx_plane_info
->dmabuf_id
= dmabuf_obj
->dmabuf_id
;
391 /* This buffer may be released between query_plane ioctl and
392 * get_dmabuf ioctl. Add the refcount to make sure it won't
393 * be released between the two ioctls.
395 if (!dmabuf_obj
->initref
) {
396 dmabuf_obj
->initref
= true;
397 dmabuf_obj_get(dmabuf_obj
);
400 gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
401 vgpu
->id
, kref_read(&dmabuf_obj
->kref
),
402 gfx_plane_info
->dmabuf_id
);
403 mutex_unlock(&vgpu
->dmabuf_lock
);
407 mutex_unlock(&vgpu
->dmabuf_lock
);
409 /* Need to allocate a new one*/
410 dmabuf_obj
= kmalloc(sizeof(struct intel_vgpu_dmabuf_obj
), GFP_KERNEL
);
411 if (unlikely(!dmabuf_obj
)) {
412 gvt_vgpu_err("alloc dmabuf_obj failed\n");
417 dmabuf_obj
->info
= kmalloc(sizeof(struct intel_vgpu_fb_info
),
419 if (unlikely(!dmabuf_obj
->info
)) {
420 gvt_vgpu_err("allocate intel vgpu fb info failed\n");
422 goto out_free_dmabuf
;
424 memcpy(dmabuf_obj
->info
, &fb_info
, sizeof(struct intel_vgpu_fb_info
));
426 ((struct intel_vgpu_fb_info
*)dmabuf_obj
->info
)->obj
= dmabuf_obj
;
428 dmabuf_obj
->vgpu
= vgpu
;
430 ret
= idr_alloc(&vgpu
->object_idr
, dmabuf_obj
, 1, 0, GFP_NOWAIT
);
433 gfx_plane_info
->dmabuf_id
= ret
;
434 dmabuf_obj
->dmabuf_id
= ret
;
436 dmabuf_obj
->initref
= true;
438 kref_init(&dmabuf_obj
->kref
);
440 mutex_lock(&vgpu
->dmabuf_lock
);
441 if (intel_gvt_hypervisor_get_vfio_device(vgpu
)) {
442 gvt_vgpu_err("get vfio device failed\n");
443 mutex_unlock(&vgpu
->dmabuf_lock
);
446 mutex_unlock(&vgpu
->dmabuf_lock
);
448 update_fb_info(gfx_plane_info
, &fb_info
);
450 INIT_LIST_HEAD(&dmabuf_obj
->list
);
451 mutex_lock(&vgpu
->dmabuf_lock
);
452 list_add_tail(&dmabuf_obj
->list
, &vgpu
->dmabuf_obj_list_head
);
453 mutex_unlock(&vgpu
->dmabuf_lock
);
455 gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu
->id
,
456 __func__
, kref_read(&dmabuf_obj
->kref
), ret
);
461 kfree(dmabuf_obj
->info
);
465 /* ENODEV means plane isn't ready, which might be a normal case. */
466 return (ret
== -ENODEV
) ? 0 : ret
;
469 /* To associate an exposed dmabuf with the dmabuf_obj */
470 int intel_vgpu_get_dmabuf(struct intel_vgpu
*vgpu
, unsigned int dmabuf_id
)
472 struct drm_device
*dev
= &vgpu
->gvt
->dev_priv
->drm
;
473 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
;
474 struct drm_i915_gem_object
*obj
;
475 struct dma_buf
*dmabuf
;
479 mutex_lock(&vgpu
->dmabuf_lock
);
481 dmabuf_obj
= pick_dmabuf_by_num(vgpu
, dmabuf_id
);
482 if (dmabuf_obj
== NULL
) {
483 gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id
);
488 obj
= vgpu_create_gem(dev
, dmabuf_obj
->info
);
490 gvt_vgpu_err("create gvt gem obj failed\n");
495 obj
->gvt_info
= dmabuf_obj
->info
;
497 dmabuf
= i915_gem_prime_export(dev
, &obj
->base
, DRM_CLOEXEC
| DRM_RDWR
);
498 if (IS_ERR(dmabuf
)) {
499 gvt_vgpu_err("export dma-buf failed\n");
500 ret
= PTR_ERR(dmabuf
);
504 i915_gem_object_put(obj
);
506 ret
= dma_buf_fd(dmabuf
, DRM_CLOEXEC
| DRM_RDWR
);
508 gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret
);
509 goto out_free_dmabuf
;
513 dmabuf_obj_get(dmabuf_obj
);
515 if (dmabuf_obj
->initref
) {
516 dmabuf_obj
->initref
= false;
517 dmabuf_obj_put(dmabuf_obj
);
520 mutex_unlock(&vgpu
->dmabuf_lock
);
522 gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
523 " file count: %ld, GEM ref: %d\n",
524 vgpu
->id
, dmabuf_obj
->dmabuf_id
,
525 kref_read(&dmabuf_obj
->kref
),
527 file_count(dmabuf
->file
),
528 kref_read(&obj
->base
.refcount
));
535 i915_gem_object_put(obj
);
537 mutex_unlock(&vgpu
->dmabuf_lock
);
541 void intel_vgpu_dmabuf_cleanup(struct intel_vgpu
*vgpu
)
543 struct list_head
*pos
, *n
;
544 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
;
546 mutex_lock(&vgpu
->dmabuf_lock
);
547 list_for_each_safe(pos
, n
, &vgpu
->dmabuf_obj_list_head
) {
548 dmabuf_obj
= container_of(pos
, struct intel_vgpu_dmabuf_obj
,
550 dmabuf_obj
->vgpu
= NULL
;
552 idr_remove(&vgpu
->object_idr
, dmabuf_obj
->dmabuf_id
);
553 intel_gvt_hypervisor_put_vfio_device(vgpu
);
556 /* dmabuf_obj might be freed in dmabuf_obj_put */
557 if (dmabuf_obj
->initref
) {
558 dmabuf_obj
->initref
= false;
559 dmabuf_obj_put(dmabuf_obj
);
563 mutex_unlock(&vgpu
->dmabuf_lock
);