2 * Copyright (C) 2015 Red Hat, Inc.
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
34 #include "virtgpu_drv.h"
35 #include "virtgpu_trace.h"
37 #define MAX_INLINE_CMD_SIZE 96
38 #define MAX_INLINE_RESP_SIZE 24
39 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
40 + MAX_INLINE_CMD_SIZE \
41 + MAX_INLINE_RESP_SIZE)
43 static void convert_to_hw_box(struct virtio_gpu_box
*dst
,
44 const struct drm_virtgpu_3d_box
*src
)
46 dst
->x
= cpu_to_le32(src
->x
);
47 dst
->y
= cpu_to_le32(src
->y
);
48 dst
->z
= cpu_to_le32(src
->z
);
49 dst
->w
= cpu_to_le32(src
->w
);
50 dst
->h
= cpu_to_le32(src
->h
);
51 dst
->d
= cpu_to_le32(src
->d
);
54 void virtio_gpu_ctrl_ack(struct virtqueue
*vq
)
56 struct drm_device
*dev
= vq
->vdev
->priv
;
57 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
59 schedule_work(&vgdev
->ctrlq
.dequeue_work
);
62 void virtio_gpu_cursor_ack(struct virtqueue
*vq
)
64 struct drm_device
*dev
= vq
->vdev
->priv
;
65 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
67 schedule_work(&vgdev
->cursorq
.dequeue_work
);
70 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device
*vgdev
)
72 vgdev
->vbufs
= kmem_cache_create("virtio-gpu-vbufs",
74 __alignof__(struct virtio_gpu_vbuffer
),
81 void virtio_gpu_free_vbufs(struct virtio_gpu_device
*vgdev
)
83 kmem_cache_destroy(vgdev
->vbufs
);
87 static struct virtio_gpu_vbuffer
*
88 virtio_gpu_get_vbuf(struct virtio_gpu_device
*vgdev
,
89 int size
, int resp_size
, void *resp_buf
,
90 virtio_gpu_resp_cb resp_cb
)
92 struct virtio_gpu_vbuffer
*vbuf
;
94 vbuf
= kmem_cache_zalloc(vgdev
->vbufs
, GFP_KERNEL
);
96 return ERR_PTR(-ENOMEM
);
98 BUG_ON(size
> MAX_INLINE_CMD_SIZE
||
99 size
< sizeof(struct virtio_gpu_ctrl_hdr
));
100 vbuf
->buf
= (void *)vbuf
+ sizeof(*vbuf
);
103 vbuf
->resp_cb
= resp_cb
;
104 vbuf
->resp_size
= resp_size
;
105 if (resp_size
<= MAX_INLINE_RESP_SIZE
)
106 vbuf
->resp_buf
= (void *)vbuf
->buf
+ size
;
108 vbuf
->resp_buf
= resp_buf
;
109 BUG_ON(!vbuf
->resp_buf
);
113 static struct virtio_gpu_ctrl_hdr
*
114 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer
*vbuf
)
116 /* this assumes a vbuf contains a command that starts with a
117 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
120 return (struct virtio_gpu_ctrl_hdr
*)vbuf
->buf
;
123 static struct virtio_gpu_update_cursor
*
124 virtio_gpu_alloc_cursor(struct virtio_gpu_device
*vgdev
,
125 struct virtio_gpu_vbuffer
**vbuffer_p
)
127 struct virtio_gpu_vbuffer
*vbuf
;
129 vbuf
= virtio_gpu_get_vbuf
130 (vgdev
, sizeof(struct virtio_gpu_update_cursor
),
134 return ERR_CAST(vbuf
);
137 return (struct virtio_gpu_update_cursor
*)vbuf
->buf
;
140 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device
*vgdev
,
141 virtio_gpu_resp_cb cb
,
142 struct virtio_gpu_vbuffer
**vbuffer_p
,
143 int cmd_size
, int resp_size
,
146 struct virtio_gpu_vbuffer
*vbuf
;
148 vbuf
= virtio_gpu_get_vbuf(vgdev
, cmd_size
,
149 resp_size
, resp_buf
, cb
);
152 return ERR_CAST(vbuf
);
155 return (struct virtio_gpu_command
*)vbuf
->buf
;
158 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device
*vgdev
,
159 struct virtio_gpu_vbuffer
**vbuffer_p
,
162 return virtio_gpu_alloc_cmd_resp(vgdev
, NULL
, vbuffer_p
, size
,
163 sizeof(struct virtio_gpu_ctrl_hdr
),
167 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device
*vgdev
,
168 struct virtio_gpu_vbuffer
**vbuffer_p
,
170 virtio_gpu_resp_cb cb
)
172 return virtio_gpu_alloc_cmd_resp(vgdev
, cb
, vbuffer_p
, size
,
173 sizeof(struct virtio_gpu_ctrl_hdr
),
177 static void free_vbuf(struct virtio_gpu_device
*vgdev
,
178 struct virtio_gpu_vbuffer
*vbuf
)
180 if (vbuf
->resp_size
> MAX_INLINE_RESP_SIZE
)
181 kfree(vbuf
->resp_buf
);
182 kvfree(vbuf
->data_buf
);
183 kmem_cache_free(vgdev
->vbufs
, vbuf
);
186 static void reclaim_vbufs(struct virtqueue
*vq
, struct list_head
*reclaim_list
)
188 struct virtio_gpu_vbuffer
*vbuf
;
192 while ((vbuf
= virtqueue_get_buf(vq
, &len
))) {
193 list_add_tail(&vbuf
->list
, reclaim_list
);
197 DRM_DEBUG("Huh? zero vbufs reclaimed");
200 void virtio_gpu_dequeue_ctrl_func(struct work_struct
*work
)
202 struct virtio_gpu_device
*vgdev
=
203 container_of(work
, struct virtio_gpu_device
,
205 struct list_head reclaim_list
;
206 struct virtio_gpu_vbuffer
*entry
, *tmp
;
207 struct virtio_gpu_ctrl_hdr
*resp
;
210 INIT_LIST_HEAD(&reclaim_list
);
211 spin_lock(&vgdev
->ctrlq
.qlock
);
213 virtqueue_disable_cb(vgdev
->ctrlq
.vq
);
214 reclaim_vbufs(vgdev
->ctrlq
.vq
, &reclaim_list
);
216 } while (!virtqueue_enable_cb(vgdev
->ctrlq
.vq
));
217 spin_unlock(&vgdev
->ctrlq
.qlock
);
219 list_for_each_entry(entry
, &reclaim_list
, list
) {
220 resp
= (struct virtio_gpu_ctrl_hdr
*)entry
->resp_buf
;
222 trace_virtio_gpu_cmd_response(vgdev
->ctrlq
.vq
, resp
);
224 if (resp
->type
!= cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA
)) {
225 if (le32_to_cpu(resp
->type
) >= VIRTIO_GPU_RESP_ERR_UNSPEC
) {
226 struct virtio_gpu_ctrl_hdr
*cmd
;
227 cmd
= virtio_gpu_vbuf_ctrl_hdr(entry
);
228 DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
229 le32_to_cpu(resp
->type
),
230 le32_to_cpu(cmd
->type
));
232 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp
->type
));
234 if (resp
->flags
& cpu_to_le32(VIRTIO_GPU_FLAG_FENCE
)) {
235 u64 f
= le64_to_cpu(resp
->fence_id
);
238 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
239 __func__
, fence_id
, f
);
245 entry
->resp_cb(vgdev
, entry
);
247 wake_up(&vgdev
->ctrlq
.ack_queue
);
250 virtio_gpu_fence_event_process(vgdev
, fence_id
);
252 list_for_each_entry_safe(entry
, tmp
, &reclaim_list
, list
) {
254 virtio_gpu_array_put_free_delayed(vgdev
, entry
->objs
);
255 list_del(&entry
->list
);
256 free_vbuf(vgdev
, entry
);
260 void virtio_gpu_dequeue_cursor_func(struct work_struct
*work
)
262 struct virtio_gpu_device
*vgdev
=
263 container_of(work
, struct virtio_gpu_device
,
264 cursorq
.dequeue_work
);
265 struct list_head reclaim_list
;
266 struct virtio_gpu_vbuffer
*entry
, *tmp
;
268 INIT_LIST_HEAD(&reclaim_list
);
269 spin_lock(&vgdev
->cursorq
.qlock
);
271 virtqueue_disable_cb(vgdev
->cursorq
.vq
);
272 reclaim_vbufs(vgdev
->cursorq
.vq
, &reclaim_list
);
273 } while (!virtqueue_enable_cb(vgdev
->cursorq
.vq
));
274 spin_unlock(&vgdev
->cursorq
.qlock
);
276 list_for_each_entry_safe(entry
, tmp
, &reclaim_list
, list
) {
277 list_del(&entry
->list
);
278 free_vbuf(vgdev
, entry
);
280 wake_up(&vgdev
->cursorq
.ack_queue
);
283 /* Create sg_table from a vmalloc'd buffer. */
284 static struct sg_table
*vmalloc_to_sgt(char *data
, uint32_t size
, int *sg_ents
)
287 struct sg_table
*sgt
;
288 struct scatterlist
*sg
;
291 if (WARN_ON(!PAGE_ALIGNED(data
)))
294 sgt
= kmalloc(sizeof(*sgt
), GFP_KERNEL
);
298 *sg_ents
= DIV_ROUND_UP(size
, PAGE_SIZE
);
299 ret
= sg_alloc_table(sgt
, *sg_ents
, GFP_KERNEL
);
305 for_each_sg(sgt
->sgl
, sg
, *sg_ents
, i
) {
306 pg
= vmalloc_to_page(data
);
313 s
= min_t(int, PAGE_SIZE
, size
);
314 sg_set_page(sg
, pg
, s
, 0);
323 static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device
*vgdev
,
324 struct virtio_gpu_vbuffer
*vbuf
,
325 struct virtio_gpu_fence
*fence
,
327 struct scatterlist
**sgs
,
331 struct virtqueue
*vq
= vgdev
->ctrlq
.vq
;
334 if (!drm_dev_enter(vgdev
->ddev
, &idx
)) {
335 if (fence
&& vbuf
->objs
)
336 virtio_gpu_array_unlock_resv(vbuf
->objs
);
337 free_vbuf(vgdev
, vbuf
);
341 if (vgdev
->has_indirect
)
345 spin_lock(&vgdev
->ctrlq
.qlock
);
347 if (vq
->num_free
< elemcnt
) {
348 spin_unlock(&vgdev
->ctrlq
.qlock
);
349 virtio_gpu_notify(vgdev
);
350 wait_event(vgdev
->ctrlq
.ack_queue
, vq
->num_free
>= elemcnt
);
354 /* now that the position of the vbuf in the virtqueue is known, we can
355 * finally set the fence id
358 virtio_gpu_fence_emit(vgdev
, virtio_gpu_vbuf_ctrl_hdr(vbuf
),
361 virtio_gpu_array_add_fence(vbuf
->objs
, &fence
->f
);
362 virtio_gpu_array_unlock_resv(vbuf
->objs
);
366 ret
= virtqueue_add_sgs(vq
, sgs
, outcnt
, incnt
, vbuf
, GFP_ATOMIC
);
369 trace_virtio_gpu_cmd_queue(vq
, virtio_gpu_vbuf_ctrl_hdr(vbuf
));
371 atomic_inc(&vgdev
->pending_commands
);
373 spin_unlock(&vgdev
->ctrlq
.qlock
);
378 static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device
*vgdev
,
379 struct virtio_gpu_vbuffer
*vbuf
,
380 struct virtio_gpu_fence
*fence
)
382 struct scatterlist
*sgs
[3], vcmd
, vout
, vresp
;
383 struct sg_table
*sgt
= NULL
;
384 int elemcnt
= 0, outcnt
= 0, incnt
= 0;
387 sg_init_one(&vcmd
, vbuf
->buf
, vbuf
->size
);
393 if (vbuf
->data_size
) {
394 if (is_vmalloc_addr(vbuf
->data_buf
)) {
396 sgt
= vmalloc_to_sgt(vbuf
->data_buf
, vbuf
->data_size
,
399 if (fence
&& vbuf
->objs
)
400 virtio_gpu_array_unlock_resv(vbuf
->objs
);
405 sgs
[outcnt
] = sgt
->sgl
;
407 sg_init_one(&vout
, vbuf
->data_buf
, vbuf
->data_size
);
415 if (vbuf
->resp_size
) {
416 sg_init_one(&vresp
, vbuf
->resp_buf
, vbuf
->resp_size
);
418 sgs
[outcnt
+ incnt
] = &vresp
;
422 virtio_gpu_queue_ctrl_sgs(vgdev
, vbuf
, fence
, elemcnt
, sgs
, outcnt
,
431 void virtio_gpu_notify(struct virtio_gpu_device
*vgdev
)
435 if (!atomic_read(&vgdev
->pending_commands
))
438 spin_lock(&vgdev
->ctrlq
.qlock
);
439 atomic_set(&vgdev
->pending_commands
, 0);
440 notify
= virtqueue_kick_prepare(vgdev
->ctrlq
.vq
);
441 spin_unlock(&vgdev
->ctrlq
.qlock
);
444 virtqueue_notify(vgdev
->ctrlq
.vq
);
447 static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device
*vgdev
,
448 struct virtio_gpu_vbuffer
*vbuf
)
450 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, NULL
);
453 static void virtio_gpu_queue_cursor(struct virtio_gpu_device
*vgdev
,
454 struct virtio_gpu_vbuffer
*vbuf
)
456 struct virtqueue
*vq
= vgdev
->cursorq
.vq
;
457 struct scatterlist
*sgs
[1], ccmd
;
458 int idx
, ret
, outcnt
;
461 if (!drm_dev_enter(vgdev
->ddev
, &idx
)) {
462 free_vbuf(vgdev
, vbuf
);
466 sg_init_one(&ccmd
, vbuf
->buf
, vbuf
->size
);
470 spin_lock(&vgdev
->cursorq
.qlock
);
472 ret
= virtqueue_add_sgs(vq
, sgs
, outcnt
, 0, vbuf
, GFP_ATOMIC
);
473 if (ret
== -ENOSPC
) {
474 spin_unlock(&vgdev
->cursorq
.qlock
);
475 wait_event(vgdev
->cursorq
.ack_queue
, vq
->num_free
>= outcnt
);
476 spin_lock(&vgdev
->cursorq
.qlock
);
479 trace_virtio_gpu_cmd_queue(vq
,
480 virtio_gpu_vbuf_ctrl_hdr(vbuf
));
482 notify
= virtqueue_kick_prepare(vq
);
485 spin_unlock(&vgdev
->cursorq
.qlock
);
488 virtqueue_notify(vq
);
493 /* just create gem objects for userspace and long lived objects,
494 * just use dma_alloced pages for the queue objects?
497 /* create a basic resource */
498 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device
*vgdev
,
499 struct virtio_gpu_object
*bo
,
500 struct virtio_gpu_object_params
*params
,
501 struct virtio_gpu_object_array
*objs
,
502 struct virtio_gpu_fence
*fence
)
504 struct virtio_gpu_resource_create_2d
*cmd_p
;
505 struct virtio_gpu_vbuffer
*vbuf
;
507 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
508 memset(cmd_p
, 0, sizeof(*cmd_p
));
511 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
);
512 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
513 cmd_p
->format
= cpu_to_le32(params
->format
);
514 cmd_p
->width
= cpu_to_le32(params
->width
);
515 cmd_p
->height
= cpu_to_le32(params
->height
);
517 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, fence
);
521 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device
*vgdev
,
522 struct virtio_gpu_vbuffer
*vbuf
)
524 struct virtio_gpu_object
*bo
;
526 bo
= vbuf
->resp_cb_data
;
527 vbuf
->resp_cb_data
= NULL
;
529 virtio_gpu_cleanup_object(bo
);
532 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device
*vgdev
,
533 struct virtio_gpu_object
*bo
)
535 struct virtio_gpu_resource_unref
*cmd_p
;
536 struct virtio_gpu_vbuffer
*vbuf
;
538 cmd_p
= virtio_gpu_alloc_cmd_cb(vgdev
, &vbuf
, sizeof(*cmd_p
),
539 virtio_gpu_cmd_unref_cb
);
540 memset(cmd_p
, 0, sizeof(*cmd_p
));
542 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF
);
543 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
545 vbuf
->resp_cb_data
= bo
;
546 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
549 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device
*vgdev
,
550 uint32_t scanout_id
, uint32_t resource_id
,
551 uint32_t width
, uint32_t height
,
552 uint32_t x
, uint32_t y
)
554 struct virtio_gpu_set_scanout
*cmd_p
;
555 struct virtio_gpu_vbuffer
*vbuf
;
557 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
558 memset(cmd_p
, 0, sizeof(*cmd_p
));
560 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT
);
561 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
562 cmd_p
->scanout_id
= cpu_to_le32(scanout_id
);
563 cmd_p
->r
.width
= cpu_to_le32(width
);
564 cmd_p
->r
.height
= cpu_to_le32(height
);
565 cmd_p
->r
.x
= cpu_to_le32(x
);
566 cmd_p
->r
.y
= cpu_to_le32(y
);
568 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
571 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device
*vgdev
,
572 uint32_t resource_id
,
573 uint32_t x
, uint32_t y
,
574 uint32_t width
, uint32_t height
)
576 struct virtio_gpu_resource_flush
*cmd_p
;
577 struct virtio_gpu_vbuffer
*vbuf
;
579 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
580 memset(cmd_p
, 0, sizeof(*cmd_p
));
582 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH
);
583 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
584 cmd_p
->r
.width
= cpu_to_le32(width
);
585 cmd_p
->r
.height
= cpu_to_le32(height
);
586 cmd_p
->r
.x
= cpu_to_le32(x
);
587 cmd_p
->r
.y
= cpu_to_le32(y
);
589 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
592 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device
*vgdev
,
594 uint32_t width
, uint32_t height
,
595 uint32_t x
, uint32_t y
,
596 struct virtio_gpu_object_array
*objs
,
597 struct virtio_gpu_fence
*fence
)
599 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
600 struct virtio_gpu_transfer_to_host_2d
*cmd_p
;
601 struct virtio_gpu_vbuffer
*vbuf
;
602 bool use_dma_api
= !virtio_has_dma_quirk(vgdev
->vdev
);
603 struct virtio_gpu_object_shmem
*shmem
= to_virtio_gpu_shmem(bo
);
606 dma_sync_sg_for_device(vgdev
->vdev
->dev
.parent
,
607 shmem
->pages
->sgl
, shmem
->pages
->nents
,
610 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
611 memset(cmd_p
, 0, sizeof(*cmd_p
));
614 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
);
615 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
616 cmd_p
->offset
= cpu_to_le64(offset
);
617 cmd_p
->r
.width
= cpu_to_le32(width
);
618 cmd_p
->r
.height
= cpu_to_le32(height
);
619 cmd_p
->r
.x
= cpu_to_le32(x
);
620 cmd_p
->r
.y
= cpu_to_le32(y
);
622 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, fence
);
626 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device
*vgdev
,
627 uint32_t resource_id
,
628 struct virtio_gpu_mem_entry
*ents
,
630 struct virtio_gpu_fence
*fence
)
632 struct virtio_gpu_resource_attach_backing
*cmd_p
;
633 struct virtio_gpu_vbuffer
*vbuf
;
635 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
636 memset(cmd_p
, 0, sizeof(*cmd_p
));
638 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
);
639 cmd_p
->resource_id
= cpu_to_le32(resource_id
);
640 cmd_p
->nr_entries
= cpu_to_le32(nents
);
642 vbuf
->data_buf
= ents
;
643 vbuf
->data_size
= sizeof(*ents
) * nents
;
645 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, fence
);
648 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device
*vgdev
,
649 struct virtio_gpu_vbuffer
*vbuf
)
651 struct virtio_gpu_resp_display_info
*resp
=
652 (struct virtio_gpu_resp_display_info
*)vbuf
->resp_buf
;
655 spin_lock(&vgdev
->display_info_lock
);
656 for (i
= 0; i
< vgdev
->num_scanouts
; i
++) {
657 vgdev
->outputs
[i
].info
= resp
->pmodes
[i
];
658 if (resp
->pmodes
[i
].enabled
) {
659 DRM_DEBUG("output %d: %dx%d+%d+%d", i
,
660 le32_to_cpu(resp
->pmodes
[i
].r
.width
),
661 le32_to_cpu(resp
->pmodes
[i
].r
.height
),
662 le32_to_cpu(resp
->pmodes
[i
].r
.x
),
663 le32_to_cpu(resp
->pmodes
[i
].r
.y
));
665 DRM_DEBUG("output %d: disabled", i
);
669 vgdev
->display_info_pending
= false;
670 spin_unlock(&vgdev
->display_info_lock
);
671 wake_up(&vgdev
->resp_wq
);
673 if (!drm_helper_hpd_irq_event(vgdev
->ddev
))
674 drm_kms_helper_hotplug_event(vgdev
->ddev
);
677 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device
*vgdev
,
678 struct virtio_gpu_vbuffer
*vbuf
)
680 struct virtio_gpu_get_capset_info
*cmd
=
681 (struct virtio_gpu_get_capset_info
*)vbuf
->buf
;
682 struct virtio_gpu_resp_capset_info
*resp
=
683 (struct virtio_gpu_resp_capset_info
*)vbuf
->resp_buf
;
684 int i
= le32_to_cpu(cmd
->capset_index
);
686 spin_lock(&vgdev
->display_info_lock
);
687 if (vgdev
->capsets
) {
688 vgdev
->capsets
[i
].id
= le32_to_cpu(resp
->capset_id
);
689 vgdev
->capsets
[i
].max_version
= le32_to_cpu(resp
->capset_max_version
);
690 vgdev
->capsets
[i
].max_size
= le32_to_cpu(resp
->capset_max_size
);
692 DRM_ERROR("invalid capset memory.");
694 spin_unlock(&vgdev
->display_info_lock
);
695 wake_up(&vgdev
->resp_wq
);
698 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device
*vgdev
,
699 struct virtio_gpu_vbuffer
*vbuf
)
701 struct virtio_gpu_get_capset
*cmd
=
702 (struct virtio_gpu_get_capset
*)vbuf
->buf
;
703 struct virtio_gpu_resp_capset
*resp
=
704 (struct virtio_gpu_resp_capset
*)vbuf
->resp_buf
;
705 struct virtio_gpu_drv_cap_cache
*cache_ent
;
707 spin_lock(&vgdev
->display_info_lock
);
708 list_for_each_entry(cache_ent
, &vgdev
->cap_cache
, head
) {
709 if (cache_ent
->version
== le32_to_cpu(cmd
->capset_version
) &&
710 cache_ent
->id
== le32_to_cpu(cmd
->capset_id
)) {
711 memcpy(cache_ent
->caps_cache
, resp
->capset_data
,
713 /* Copy must occur before is_valid is signalled. */
715 atomic_set(&cache_ent
->is_valid
, 1);
719 spin_unlock(&vgdev
->display_info_lock
);
720 wake_up_all(&vgdev
->resp_wq
);
723 static int virtio_get_edid_block(void *data
, u8
*buf
,
724 unsigned int block
, size_t len
)
726 struct virtio_gpu_resp_edid
*resp
= data
;
727 size_t start
= block
* EDID_LENGTH
;
729 if (start
+ len
> le32_to_cpu(resp
->size
))
731 memcpy(buf
, resp
->edid
+ start
, len
);
735 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device
*vgdev
,
736 struct virtio_gpu_vbuffer
*vbuf
)
738 struct virtio_gpu_cmd_get_edid
*cmd
=
739 (struct virtio_gpu_cmd_get_edid
*)vbuf
->buf
;
740 struct virtio_gpu_resp_edid
*resp
=
741 (struct virtio_gpu_resp_edid
*)vbuf
->resp_buf
;
742 uint32_t scanout
= le32_to_cpu(cmd
->scanout
);
743 struct virtio_gpu_output
*output
;
744 struct edid
*new_edid
, *old_edid
;
746 if (scanout
>= vgdev
->num_scanouts
)
748 output
= vgdev
->outputs
+ scanout
;
750 new_edid
= drm_do_get_edid(&output
->conn
, virtio_get_edid_block
, resp
);
751 drm_connector_update_edid_property(&output
->conn
, new_edid
);
753 spin_lock(&vgdev
->display_info_lock
);
754 old_edid
= output
->edid
;
755 output
->edid
= new_edid
;
756 spin_unlock(&vgdev
->display_info_lock
);
759 wake_up(&vgdev
->resp_wq
);
762 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device
*vgdev
)
764 struct virtio_gpu_ctrl_hdr
*cmd_p
;
765 struct virtio_gpu_vbuffer
*vbuf
;
768 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_display_info
),
773 cmd_p
= virtio_gpu_alloc_cmd_resp
774 (vgdev
, &virtio_gpu_cmd_get_display_info_cb
, &vbuf
,
775 sizeof(*cmd_p
), sizeof(struct virtio_gpu_resp_display_info
),
777 memset(cmd_p
, 0, sizeof(*cmd_p
));
779 vgdev
->display_info_pending
= true;
780 cmd_p
->type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO
);
781 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
785 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device
*vgdev
, int idx
)
787 struct virtio_gpu_get_capset_info
*cmd_p
;
788 struct virtio_gpu_vbuffer
*vbuf
;
791 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_capset_info
),
796 cmd_p
= virtio_gpu_alloc_cmd_resp
797 (vgdev
, &virtio_gpu_cmd_get_capset_info_cb
, &vbuf
,
798 sizeof(*cmd_p
), sizeof(struct virtio_gpu_resp_capset_info
),
800 memset(cmd_p
, 0, sizeof(*cmd_p
));
802 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO
);
803 cmd_p
->capset_index
= cpu_to_le32(idx
);
804 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
808 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device
*vgdev
,
809 int idx
, int version
,
810 struct virtio_gpu_drv_cap_cache
**cache_p
)
812 struct virtio_gpu_get_capset
*cmd_p
;
813 struct virtio_gpu_vbuffer
*vbuf
;
815 struct virtio_gpu_drv_cap_cache
*cache_ent
;
816 struct virtio_gpu_drv_cap_cache
*search_ent
;
821 if (idx
>= vgdev
->num_capsets
)
824 if (version
> vgdev
->capsets
[idx
].max_version
)
827 cache_ent
= kzalloc(sizeof(*cache_ent
), GFP_KERNEL
);
831 max_size
= vgdev
->capsets
[idx
].max_size
;
832 cache_ent
->caps_cache
= kmalloc(max_size
, GFP_KERNEL
);
833 if (!cache_ent
->caps_cache
) {
838 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_capset
) + max_size
,
841 kfree(cache_ent
->caps_cache
);
846 cache_ent
->version
= version
;
847 cache_ent
->id
= vgdev
->capsets
[idx
].id
;
848 atomic_set(&cache_ent
->is_valid
, 0);
849 cache_ent
->size
= max_size
;
850 spin_lock(&vgdev
->display_info_lock
);
851 /* Search while under lock in case it was added by another task. */
852 list_for_each_entry(search_ent
, &vgdev
->cap_cache
, head
) {
853 if (search_ent
->id
== vgdev
->capsets
[idx
].id
&&
854 search_ent
->version
== version
) {
855 *cache_p
= search_ent
;
860 list_add_tail(&cache_ent
->head
, &vgdev
->cap_cache
);
861 spin_unlock(&vgdev
->display_info_lock
);
864 /* Entry was found, so free everything that was just created. */
866 kfree(cache_ent
->caps_cache
);
871 cmd_p
= virtio_gpu_alloc_cmd_resp
872 (vgdev
, &virtio_gpu_cmd_capset_cb
, &vbuf
, sizeof(*cmd_p
),
873 sizeof(struct virtio_gpu_resp_capset
) + max_size
,
875 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET
);
876 cmd_p
->capset_id
= cpu_to_le32(vgdev
->capsets
[idx
].id
);
877 cmd_p
->capset_version
= cpu_to_le32(version
);
878 *cache_p
= cache_ent
;
879 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
884 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device
*vgdev
)
886 struct virtio_gpu_cmd_get_edid
*cmd_p
;
887 struct virtio_gpu_vbuffer
*vbuf
;
891 if (WARN_ON(!vgdev
->has_edid
))
894 for (scanout
= 0; scanout
< vgdev
->num_scanouts
; scanout
++) {
895 resp_buf
= kzalloc(sizeof(struct virtio_gpu_resp_edid
),
900 cmd_p
= virtio_gpu_alloc_cmd_resp
901 (vgdev
, &virtio_gpu_cmd_get_edid_cb
, &vbuf
,
902 sizeof(*cmd_p
), sizeof(struct virtio_gpu_resp_edid
),
904 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID
);
905 cmd_p
->scanout
= cpu_to_le32(scanout
);
906 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
912 void virtio_gpu_cmd_context_create(struct virtio_gpu_device
*vgdev
, uint32_t id
,
913 uint32_t nlen
, const char *name
)
915 struct virtio_gpu_ctx_create
*cmd_p
;
916 struct virtio_gpu_vbuffer
*vbuf
;
918 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
919 memset(cmd_p
, 0, sizeof(*cmd_p
));
921 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE
);
922 cmd_p
->hdr
.ctx_id
= cpu_to_le32(id
);
923 cmd_p
->nlen
= cpu_to_le32(nlen
);
924 strncpy(cmd_p
->debug_name
, name
, sizeof(cmd_p
->debug_name
) - 1);
925 cmd_p
->debug_name
[sizeof(cmd_p
->debug_name
) - 1] = 0;
926 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
929 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device
*vgdev
,
932 struct virtio_gpu_ctx_destroy
*cmd_p
;
933 struct virtio_gpu_vbuffer
*vbuf
;
935 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
936 memset(cmd_p
, 0, sizeof(*cmd_p
));
938 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY
);
939 cmd_p
->hdr
.ctx_id
= cpu_to_le32(id
);
940 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
943 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device
*vgdev
,
945 struct virtio_gpu_object_array
*objs
)
947 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
948 struct virtio_gpu_ctx_resource
*cmd_p
;
949 struct virtio_gpu_vbuffer
*vbuf
;
951 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
952 memset(cmd_p
, 0, sizeof(*cmd_p
));
955 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE
);
956 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
957 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
958 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
961 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device
*vgdev
,
963 struct virtio_gpu_object_array
*objs
)
965 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
966 struct virtio_gpu_ctx_resource
*cmd_p
;
967 struct virtio_gpu_vbuffer
*vbuf
;
969 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
970 memset(cmd_p
, 0, sizeof(*cmd_p
));
973 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE
);
974 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
975 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
976 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);
980 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device
*vgdev
,
981 struct virtio_gpu_object
*bo
,
982 struct virtio_gpu_object_params
*params
,
983 struct virtio_gpu_object_array
*objs
,
984 struct virtio_gpu_fence
*fence
)
986 struct virtio_gpu_resource_create_3d
*cmd_p
;
987 struct virtio_gpu_vbuffer
*vbuf
;
989 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
990 memset(cmd_p
, 0, sizeof(*cmd_p
));
993 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D
);
994 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
995 cmd_p
->format
= cpu_to_le32(params
->format
);
996 cmd_p
->width
= cpu_to_le32(params
->width
);
997 cmd_p
->height
= cpu_to_le32(params
->height
);
999 cmd_p
->target
= cpu_to_le32(params
->target
);
1000 cmd_p
->bind
= cpu_to_le32(params
->bind
);
1001 cmd_p
->depth
= cpu_to_le32(params
->depth
);
1002 cmd_p
->array_size
= cpu_to_le32(params
->array_size
);
1003 cmd_p
->last_level
= cpu_to_le32(params
->last_level
);
1004 cmd_p
->nr_samples
= cpu_to_le32(params
->nr_samples
);
1005 cmd_p
->flags
= cpu_to_le32(params
->flags
);
1007 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, fence
);
1012 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device
*vgdev
,
1014 uint64_t offset
, uint32_t level
,
1015 struct drm_virtgpu_3d_box
*box
,
1016 struct virtio_gpu_object_array
*objs
,
1017 struct virtio_gpu_fence
*fence
)
1019 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
1020 struct virtio_gpu_transfer_host_3d
*cmd_p
;
1021 struct virtio_gpu_vbuffer
*vbuf
;
1022 bool use_dma_api
= !virtio_has_dma_quirk(vgdev
->vdev
);
1023 struct virtio_gpu_object_shmem
*shmem
= to_virtio_gpu_shmem(bo
);
1026 dma_sync_sg_for_device(vgdev
->vdev
->dev
.parent
,
1027 shmem
->pages
->sgl
, shmem
->pages
->nents
,
1030 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
1031 memset(cmd_p
, 0, sizeof(*cmd_p
));
1035 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D
);
1036 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
1037 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
1038 convert_to_hw_box(&cmd_p
->box
, box
);
1039 cmd_p
->offset
= cpu_to_le64(offset
);
1040 cmd_p
->level
= cpu_to_le32(level
);
1042 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, fence
);
1045 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device
*vgdev
,
1047 uint64_t offset
, uint32_t level
,
1048 struct drm_virtgpu_3d_box
*box
,
1049 struct virtio_gpu_object_array
*objs
,
1050 struct virtio_gpu_fence
*fence
)
1052 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
1053 struct virtio_gpu_transfer_host_3d
*cmd_p
;
1054 struct virtio_gpu_vbuffer
*vbuf
;
1056 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
1057 memset(cmd_p
, 0, sizeof(*cmd_p
));
1061 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D
);
1062 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
1063 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
1064 convert_to_hw_box(&cmd_p
->box
, box
);
1065 cmd_p
->offset
= cpu_to_le64(offset
);
1066 cmd_p
->level
= cpu_to_le32(level
);
1068 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, fence
);
1071 void virtio_gpu_cmd_submit(struct virtio_gpu_device
*vgdev
,
1072 void *data
, uint32_t data_size
,
1074 struct virtio_gpu_object_array
*objs
,
1075 struct virtio_gpu_fence
*fence
)
1077 struct virtio_gpu_cmd_submit
*cmd_p
;
1078 struct virtio_gpu_vbuffer
*vbuf
;
1080 cmd_p
= virtio_gpu_alloc_cmd(vgdev
, &vbuf
, sizeof(*cmd_p
));
1081 memset(cmd_p
, 0, sizeof(*cmd_p
));
1083 vbuf
->data_buf
= data
;
1084 vbuf
->data_size
= data_size
;
1087 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D
);
1088 cmd_p
->hdr
.ctx_id
= cpu_to_le32(ctx_id
);
1089 cmd_p
->size
= cpu_to_le32(data_size
);
1091 virtio_gpu_queue_fenced_ctrl_buffer(vgdev
, vbuf
, fence
);
1094 void virtio_gpu_object_attach(struct virtio_gpu_device
*vgdev
,
1095 struct virtio_gpu_object
*obj
,
1096 struct virtio_gpu_mem_entry
*ents
,
1099 virtio_gpu_cmd_resource_attach_backing(vgdev
, obj
->hw_res_handle
,
1103 void virtio_gpu_cursor_ping(struct virtio_gpu_device
*vgdev
,
1104 struct virtio_gpu_output
*output
)
1106 struct virtio_gpu_vbuffer
*vbuf
;
1107 struct virtio_gpu_update_cursor
*cur_p
;
1109 output
->cursor
.pos
.scanout_id
= cpu_to_le32(output
->index
);
1110 cur_p
= virtio_gpu_alloc_cursor(vgdev
, &vbuf
);
1111 memcpy(cur_p
, &output
->cursor
, sizeof(output
->cursor
));
1112 virtio_gpu_queue_cursor(vgdev
, vbuf
);
1115 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device
*vgdev
,
1116 struct virtio_gpu_vbuffer
*vbuf
)
1118 struct virtio_gpu_object
*obj
=
1119 gem_to_virtio_gpu_obj(vbuf
->objs
->objs
[0]);
1120 struct virtio_gpu_resp_resource_uuid
*resp
=
1121 (struct virtio_gpu_resp_resource_uuid
*)vbuf
->resp_buf
;
1122 uint32_t resp_type
= le32_to_cpu(resp
->hdr
.type
);
1124 spin_lock(&vgdev
->resource_export_lock
);
1125 WARN_ON(obj
->uuid_state
!= UUID_INITIALIZING
);
1127 if (resp_type
== VIRTIO_GPU_RESP_OK_RESOURCE_UUID
&&
1128 obj
->uuid_state
== UUID_INITIALIZING
) {
1129 memcpy(&obj
->uuid
.b
, resp
->uuid
, sizeof(obj
->uuid
.b
));
1130 obj
->uuid_state
= UUID_INITIALIZED
;
1132 obj
->uuid_state
= UUID_INITIALIZATION_FAILED
;
1134 spin_unlock(&vgdev
->resource_export_lock
);
1136 wake_up_all(&vgdev
->resp_wq
);
1140 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device
*vgdev
,
1141 struct virtio_gpu_object_array
*objs
)
1143 struct virtio_gpu_object
*bo
= gem_to_virtio_gpu_obj(objs
->objs
[0]);
1144 struct virtio_gpu_resource_assign_uuid
*cmd_p
;
1145 struct virtio_gpu_vbuffer
*vbuf
;
1146 struct virtio_gpu_resp_resource_uuid
*resp_buf
;
1148 resp_buf
= kzalloc(sizeof(*resp_buf
), GFP_KERNEL
);
1150 spin_lock(&vgdev
->resource_export_lock
);
1151 bo
->uuid_state
= UUID_INITIALIZATION_FAILED
;
1152 spin_unlock(&vgdev
->resource_export_lock
);
1153 virtio_gpu_array_put_free(objs
);
1157 cmd_p
= virtio_gpu_alloc_cmd_resp
1158 (vgdev
, virtio_gpu_cmd_resource_uuid_cb
, &vbuf
, sizeof(*cmd_p
),
1159 sizeof(struct virtio_gpu_resp_resource_uuid
), resp_buf
);
1160 memset(cmd_p
, 0, sizeof(*cmd_p
));
1162 cmd_p
->hdr
.type
= cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID
);
1163 cmd_p
->resource_id
= cpu_to_le32(bo
->hw_res_handle
);
1166 virtio_gpu_queue_ctrl_buffer(vgdev
, vbuf
);