4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
17 #include "sysemu/cpus.h"
18 #include "ui/console.h"
21 #include "sysemu/dma.h"
22 #include "sysemu/sysemu.h"
23 #include "hw/virtio/virtio.h"
24 #include "migration/qemu-file-types.h"
25 #include "hw/virtio/virtio-gpu.h"
26 #include "hw/virtio/virtio-gpu-bswap.h"
27 #include "hw/virtio/virtio-gpu-pixman.h"
28 #include "hw/virtio/virtio-bus.h"
29 #include "hw/qdev-properties.h"
31 #include "qemu/module.h"
32 #include "qapi/error.h"
33 #include "qemu/error-report.h"
35 #define VIRTIO_GPU_VM_VERSION 1
37 static struct virtio_gpu_simple_resource
*
38 virtio_gpu_find_check_resource(VirtIOGPU
*g
, uint32_t resource_id
,
40 const char *caller
, uint32_t *error
);
42 static void virtio_gpu_reset_bh(void *opaque
);
44 void virtio_gpu_update_cursor_data(VirtIOGPU
*g
,
45 struct virtio_gpu_scanout
*s
,
48 struct virtio_gpu_simple_resource
*res
;
52 res
= virtio_gpu_find_check_resource(g
, resource_id
, false,
59 if (res
->blob_size
< (s
->current_cursor
->width
*
60 s
->current_cursor
->height
* 4)) {
65 if (pixman_image_get_width(res
->image
) != s
->current_cursor
->width
||
66 pixman_image_get_height(res
->image
) != s
->current_cursor
->height
) {
69 data
= pixman_image_get_data(res
->image
);
72 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
73 memcpy(s
->current_cursor
->data
, data
,
74 pixels
* sizeof(uint32_t));
77 static void update_cursor(VirtIOGPU
*g
, struct virtio_gpu_update_cursor
*cursor
)
79 struct virtio_gpu_scanout
*s
;
80 VirtIOGPUClass
*vgc
= VIRTIO_GPU_GET_CLASS(g
);
81 bool move
= cursor
->hdr
.type
== VIRTIO_GPU_CMD_MOVE_CURSOR
;
83 if (cursor
->pos
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
86 s
= &g
->parent_obj
.scanout
[cursor
->pos
.scanout_id
];
88 trace_virtio_gpu_update_cursor(cursor
->pos
.scanout_id
,
91 move
? "move" : "update",
95 if (!s
->current_cursor
) {
96 s
->current_cursor
= cursor_alloc(64, 64);
99 s
->current_cursor
->hot_x
= cursor
->hot_x
;
100 s
->current_cursor
->hot_y
= cursor
->hot_y
;
102 if (cursor
->resource_id
> 0) {
103 vgc
->update_cursor_data(g
, s
, cursor
->resource_id
);
105 dpy_cursor_define(s
->con
, s
->current_cursor
);
109 s
->cursor
.pos
.x
= cursor
->pos
.x
;
110 s
->cursor
.pos
.y
= cursor
->pos
.y
;
112 dpy_mouse_set(s
->con
, cursor
->pos
.x
, cursor
->pos
.y
,
113 cursor
->resource_id
? 1 : 0);
116 struct virtio_gpu_simple_resource
*
117 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
)
119 struct virtio_gpu_simple_resource
*res
;
121 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
122 if (res
->resource_id
== resource_id
) {
129 static struct virtio_gpu_simple_resource
*
130 virtio_gpu_find_check_resource(VirtIOGPU
*g
, uint32_t resource_id
,
131 bool require_backing
,
132 const char *caller
, uint32_t *error
)
134 struct virtio_gpu_simple_resource
*res
;
136 res
= virtio_gpu_find_resource(g
, resource_id
);
138 qemu_log_mask(LOG_GUEST_ERROR
, "%s: invalid resource specified %d\n",
139 caller
, resource_id
);
141 *error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
146 if (require_backing
) {
147 if (!res
->iov
|| (!res
->image
&& !res
->blob
)) {
148 qemu_log_mask(LOG_GUEST_ERROR
, "%s: no backing storage %d\n",
149 caller
, resource_id
);
151 *error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
160 void virtio_gpu_ctrl_response(VirtIOGPU
*g
,
161 struct virtio_gpu_ctrl_command
*cmd
,
162 struct virtio_gpu_ctrl_hdr
*resp
,
167 if (cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
) {
168 resp
->flags
|= VIRTIO_GPU_FLAG_FENCE
;
169 resp
->fence_id
= cmd
->cmd_hdr
.fence_id
;
170 resp
->ctx_id
= cmd
->cmd_hdr
.ctx_id
;
172 virtio_gpu_ctrl_hdr_bswap(resp
);
173 s
= iov_from_buf(cmd
->elem
.in_sg
, cmd
->elem
.in_num
, 0, resp
, resp_len
);
175 qemu_log_mask(LOG_GUEST_ERROR
,
176 "%s: response size incorrect %zu vs %zu\n",
177 __func__
, s
, resp_len
);
179 virtqueue_push(cmd
->vq
, &cmd
->elem
, s
);
180 virtio_notify(VIRTIO_DEVICE(g
), cmd
->vq
);
181 cmd
->finished
= true;
184 void virtio_gpu_ctrl_response_nodata(VirtIOGPU
*g
,
185 struct virtio_gpu_ctrl_command
*cmd
,
186 enum virtio_gpu_ctrl_type type
)
188 struct virtio_gpu_ctrl_hdr resp
;
190 memset(&resp
, 0, sizeof(resp
));
192 virtio_gpu_ctrl_response(g
, cmd
, &resp
, sizeof(resp
));
195 void virtio_gpu_get_display_info(VirtIOGPU
*g
,
196 struct virtio_gpu_ctrl_command
*cmd
)
198 struct virtio_gpu_resp_display_info display_info
;
200 trace_virtio_gpu_cmd_get_display_info();
201 memset(&display_info
, 0, sizeof(display_info
));
202 display_info
.hdr
.type
= VIRTIO_GPU_RESP_OK_DISPLAY_INFO
;
203 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g
), &display_info
);
204 virtio_gpu_ctrl_response(g
, cmd
, &display_info
.hdr
,
205 sizeof(display_info
));
208 void virtio_gpu_get_edid(VirtIOGPU
*g
,
209 struct virtio_gpu_ctrl_command
*cmd
)
211 struct virtio_gpu_resp_edid edid
;
212 struct virtio_gpu_cmd_get_edid get_edid
;
213 VirtIOGPUBase
*b
= VIRTIO_GPU_BASE(g
);
215 VIRTIO_GPU_FILL_CMD(get_edid
);
216 virtio_gpu_bswap_32(&get_edid
, sizeof(get_edid
));
218 if (get_edid
.scanout
>= b
->conf
.max_outputs
) {
219 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
223 trace_virtio_gpu_cmd_get_edid(get_edid
.scanout
);
224 memset(&edid
, 0, sizeof(edid
));
225 edid
.hdr
.type
= VIRTIO_GPU_RESP_OK_EDID
;
226 virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g
), get_edid
.scanout
, &edid
);
227 virtio_gpu_ctrl_response(g
, cmd
, &edid
.hdr
, sizeof(edid
));
230 static uint32_t calc_image_hostmem(pixman_format_code_t pformat
,
231 uint32_t width
, uint32_t height
)
233 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
234 * pixman_image_create_bits will fail in case it overflow.
237 int bpp
= PIXMAN_FORMAT_BPP(pformat
);
238 int stride
= ((width
* bpp
+ 0x1f) >> 5) * sizeof(uint32_t);
239 return height
* stride
;
244 win32_pixman_image_destroy(pixman_image_t
*image
, void *data
)
246 HANDLE handle
= data
;
248 qemu_win32_map_free(pixman_image_get_data(image
), handle
, &error_warn
);
252 static void virtio_gpu_resource_create_2d(VirtIOGPU
*g
,
253 struct virtio_gpu_ctrl_command
*cmd
)
255 pixman_format_code_t pformat
;
256 struct virtio_gpu_simple_resource
*res
;
257 struct virtio_gpu_resource_create_2d c2d
;
259 VIRTIO_GPU_FILL_CMD(c2d
);
260 virtio_gpu_bswap_32(&c2d
, sizeof(c2d
));
261 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
262 c2d
.width
, c2d
.height
);
264 if (c2d
.resource_id
== 0) {
265 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource id 0 is not allowed\n",
267 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
271 res
= virtio_gpu_find_resource(g
, c2d
.resource_id
);
273 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource already exists %d\n",
274 __func__
, c2d
.resource_id
);
275 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
279 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
281 res
->width
= c2d
.width
;
282 res
->height
= c2d
.height
;
283 res
->format
= c2d
.format
;
284 res
->resource_id
= c2d
.resource_id
;
286 pformat
= virtio_gpu_get_pixman_format(c2d
.format
);
288 qemu_log_mask(LOG_GUEST_ERROR
,
289 "%s: host couldn't handle guest format %d\n",
290 __func__
, c2d
.format
);
292 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
296 res
->hostmem
= calc_image_hostmem(pformat
, c2d
.width
, c2d
.height
);
297 if (res
->hostmem
+ g
->hostmem
< g
->conf_max_hostmem
) {
300 bits
= qemu_win32_map_alloc(res
->hostmem
, &res
->handle
, &error_warn
);
305 res
->image
= pixman_image_create_bits(
309 bits
, c2d
.height
? res
->hostmem
/ c2d
.height
: 0);
312 pixman_image_set_destroy_function(res
->image
, win32_pixman_image_destroy
, res
->handle
);
321 qemu_log_mask(LOG_GUEST_ERROR
,
322 "%s: resource creation failed %d %d %d\n",
323 __func__
, c2d
.resource_id
, c2d
.width
, c2d
.height
);
325 cmd
->error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
329 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
330 g
->hostmem
+= res
->hostmem
;
333 static void virtio_gpu_resource_create_blob(VirtIOGPU
*g
,
334 struct virtio_gpu_ctrl_command
*cmd
)
336 struct virtio_gpu_simple_resource
*res
;
337 struct virtio_gpu_resource_create_blob cblob
;
340 VIRTIO_GPU_FILL_CMD(cblob
);
341 virtio_gpu_create_blob_bswap(&cblob
);
342 trace_virtio_gpu_cmd_res_create_blob(cblob
.resource_id
, cblob
.size
);
344 if (cblob
.resource_id
== 0) {
345 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource id 0 is not allowed\n",
347 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
351 if (cblob
.blob_mem
!= VIRTIO_GPU_BLOB_MEM_GUEST
&&
352 cblob
.blob_flags
!= VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE
) {
353 qemu_log_mask(LOG_GUEST_ERROR
, "%s: invalid memory type\n",
355 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
359 if (virtio_gpu_find_resource(g
, cblob
.resource_id
)) {
360 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource already exists %d\n",
361 __func__
, cblob
.resource_id
);
362 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
366 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
367 res
->resource_id
= cblob
.resource_id
;
368 res
->blob_size
= cblob
.size
;
370 ret
= virtio_gpu_create_mapping_iov(g
, cblob
.nr_entries
, sizeof(cblob
),
371 cmd
, &res
->addrs
, &res
->iov
,
374 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
379 virtio_gpu_init_udmabuf(res
);
380 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
383 static void virtio_gpu_disable_scanout(VirtIOGPU
*g
, int scanout_id
)
385 struct virtio_gpu_scanout
*scanout
= &g
->parent_obj
.scanout
[scanout_id
];
386 struct virtio_gpu_simple_resource
*res
;
388 if (scanout
->resource_id
== 0) {
392 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
394 res
->scanout_bitmask
&= ~(1 << scanout_id
);
397 dpy_gfx_replace_surface(scanout
->con
, NULL
);
398 scanout
->resource_id
= 0;
404 static void virtio_gpu_resource_destroy(VirtIOGPU
*g
,
405 struct virtio_gpu_simple_resource
*res
)
409 if (res
->scanout_bitmask
) {
410 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
411 if (res
->scanout_bitmask
& (1 << i
)) {
412 virtio_gpu_disable_scanout(g
, i
);
417 qemu_pixman_image_unref(res
->image
);
418 virtio_gpu_cleanup_mapping(g
, res
);
419 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
420 g
->hostmem
-= res
->hostmem
;
424 static void virtio_gpu_resource_unref(VirtIOGPU
*g
,
425 struct virtio_gpu_ctrl_command
*cmd
)
427 struct virtio_gpu_simple_resource
*res
;
428 struct virtio_gpu_resource_unref unref
;
430 VIRTIO_GPU_FILL_CMD(unref
);
431 virtio_gpu_bswap_32(&unref
, sizeof(unref
));
432 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
434 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
436 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
437 __func__
, unref
.resource_id
);
438 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
441 virtio_gpu_resource_destroy(g
, res
);
444 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU
*g
,
445 struct virtio_gpu_ctrl_command
*cmd
)
447 struct virtio_gpu_simple_resource
*res
;
449 uint32_t src_offset
, dst_offset
, stride
;
450 pixman_format_code_t format
;
451 struct virtio_gpu_transfer_to_host_2d t2d
;
454 VIRTIO_GPU_FILL_CMD(t2d
);
455 virtio_gpu_t2d_bswap(&t2d
);
456 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
458 res
= virtio_gpu_find_check_resource(g
, t2d
.resource_id
, true,
459 __func__
, &cmd
->error
);
460 if (!res
|| res
->blob
) {
464 if (t2d
.r
.x
> res
->width
||
465 t2d
.r
.y
> res
->height
||
466 t2d
.r
.width
> res
->width
||
467 t2d
.r
.height
> res
->height
||
468 t2d
.r
.x
+ t2d
.r
.width
> res
->width
||
469 t2d
.r
.y
+ t2d
.r
.height
> res
->height
) {
470 qemu_log_mask(LOG_GUEST_ERROR
, "%s: transfer bounds outside resource"
471 " bounds for resource %d: %d %d %d %d vs %d %d\n",
472 __func__
, t2d
.resource_id
, t2d
.r
.x
, t2d
.r
.y
,
473 t2d
.r
.width
, t2d
.r
.height
, res
->width
, res
->height
);
474 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
478 format
= pixman_image_get_format(res
->image
);
479 bpp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format
), 8);
480 stride
= pixman_image_get_stride(res
->image
);
481 img_data
= pixman_image_get_data(res
->image
);
483 if (t2d
.r
.x
|| t2d
.r
.width
!= pixman_image_get_width(res
->image
)) {
484 for (h
= 0; h
< t2d
.r
.height
; h
++) {
485 src_offset
= t2d
.offset
+ stride
* h
;
486 dst_offset
= (t2d
.r
.y
+ h
) * stride
+ (t2d
.r
.x
* bpp
);
488 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
489 (uint8_t *)img_data
+ dst_offset
,
493 src_offset
= t2d
.offset
;
494 dst_offset
= t2d
.r
.y
* stride
+ t2d
.r
.x
* bpp
;
495 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
496 (uint8_t *)img_data
+ dst_offset
,
497 stride
* t2d
.r
.height
);
501 static void virtio_gpu_resource_flush(VirtIOGPU
*g
,
502 struct virtio_gpu_ctrl_command
*cmd
)
504 struct virtio_gpu_simple_resource
*res
;
505 struct virtio_gpu_resource_flush rf
;
506 struct virtio_gpu_scanout
*scanout
;
508 bool within_bounds
= false;
509 bool update_submitted
= false;
512 VIRTIO_GPU_FILL_CMD(rf
);
513 virtio_gpu_bswap_32(&rf
, sizeof(rf
));
514 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
515 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
517 res
= virtio_gpu_find_check_resource(g
, rf
.resource_id
, false,
518 __func__
, &cmd
->error
);
524 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
525 scanout
= &g
->parent_obj
.scanout
[i
];
526 if (scanout
->resource_id
== res
->resource_id
&&
527 rf
.r
.x
< scanout
->x
+ scanout
->width
&&
528 rf
.r
.x
+ rf
.r
.width
>= scanout
->x
&&
529 rf
.r
.y
< scanout
->y
+ scanout
->height
&&
530 rf
.r
.y
+ rf
.r
.height
>= scanout
->y
) {
531 within_bounds
= true;
533 if (console_has_gl(scanout
->con
)) {
534 dpy_gl_update(scanout
->con
, 0, 0, scanout
->width
,
536 update_submitted
= true;
541 if (update_submitted
) {
544 if (!within_bounds
) {
545 qemu_log_mask(LOG_GUEST_ERROR
, "%s: flush bounds outside scanouts"
546 " bounds for flush %d: %d %d %d %d\n",
547 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
548 rf
.r
.width
, rf
.r
.height
);
549 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
555 (rf
.r
.x
> res
->width
||
556 rf
.r
.y
> res
->height
||
557 rf
.r
.width
> res
->width
||
558 rf
.r
.height
> res
->height
||
559 rf
.r
.x
+ rf
.r
.width
> res
->width
||
560 rf
.r
.y
+ rf
.r
.height
> res
->height
)) {
561 qemu_log_mask(LOG_GUEST_ERROR
, "%s: flush bounds outside resource"
562 " bounds for resource %d: %d %d %d %d vs %d %d\n",
563 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
564 rf
.r
.width
, rf
.r
.height
, res
->width
, res
->height
);
565 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
569 qemu_rect_init(&flush_rect
, rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
570 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
573 if (!(res
->scanout_bitmask
& (1 << i
))) {
576 scanout
= &g
->parent_obj
.scanout
[i
];
578 qemu_rect_init(&rect
, scanout
->x
, scanout
->y
,
579 scanout
->width
, scanout
->height
);
581 /* work out the area we need to update for each console */
582 if (qemu_rect_intersect(&flush_rect
, &rect
, &rect
)) {
583 qemu_rect_translate(&rect
, -scanout
->x
, -scanout
->y
);
584 dpy_gfx_update(g
->parent_obj
.scanout
[i
].con
,
585 rect
.x
, rect
.y
, rect
.width
, rect
.height
);
590 static void virtio_unref_resource(pixman_image_t
*image
, void *data
)
592 pixman_image_unref(data
);
595 static void virtio_gpu_update_scanout(VirtIOGPU
*g
,
597 struct virtio_gpu_simple_resource
*res
,
598 struct virtio_gpu_rect
*r
)
600 struct virtio_gpu_simple_resource
*ores
;
601 struct virtio_gpu_scanout
*scanout
;
603 scanout
= &g
->parent_obj
.scanout
[scanout_id
];
604 ores
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
606 ores
->scanout_bitmask
&= ~(1 << scanout_id
);
609 res
->scanout_bitmask
|= (1 << scanout_id
);
610 scanout
->resource_id
= res
->resource_id
;
613 scanout
->width
= r
->width
;
614 scanout
->height
= r
->height
;
617 static void virtio_gpu_do_set_scanout(VirtIOGPU
*g
,
619 struct virtio_gpu_framebuffer
*fb
,
620 struct virtio_gpu_simple_resource
*res
,
621 struct virtio_gpu_rect
*r
,
624 struct virtio_gpu_scanout
*scanout
;
627 scanout
= &g
->parent_obj
.scanout
[scanout_id
];
629 if (r
->x
> fb
->width
||
633 r
->width
> fb
->width
||
634 r
->height
> fb
->height
||
635 r
->x
+ r
->width
> fb
->width
||
636 r
->y
+ r
->height
> fb
->height
) {
637 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout %d bounds for"
638 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
639 __func__
, scanout_id
, res
->resource_id
,
640 r
->x
, r
->y
, r
->width
, r
->height
,
641 fb
->width
, fb
->height
);
642 *error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
646 g
->parent_obj
.enable
= 1;
649 if (console_has_gl(scanout
->con
)) {
650 if (!virtio_gpu_update_dmabuf(g
, scanout_id
, res
, fb
, r
)) {
651 virtio_gpu_update_scanout(g
, scanout_id
, res
, r
);
653 *error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
660 data
= (uint8_t *)pixman_image_get_data(res
->image
);
663 /* create a surface for this scanout */
664 if ((res
->blob
&& !console_has_gl(scanout
->con
)) ||
666 surface_data(scanout
->ds
) != data
+ fb
->offset
||
667 scanout
->width
!= r
->width
||
668 scanout
->height
!= r
->height
) {
669 pixman_image_t
*rect
;
670 void *ptr
= data
+ fb
->offset
;
671 rect
= pixman_image_create_bits(fb
->format
, r
->width
, r
->height
,
675 pixman_image_ref(res
->image
);
676 pixman_image_set_destroy_function(rect
, virtio_unref_resource
,
680 /* realloc the surface ptr */
681 scanout
->ds
= qemu_create_displaysurface_pixman(rect
);
683 *error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
687 qemu_displaysurface_win32_set_handle(scanout
->ds
, res
->handle
, fb
->offset
);
690 pixman_image_unref(rect
);
691 dpy_gfx_replace_surface(g
->parent_obj
.scanout
[scanout_id
].con
,
695 virtio_gpu_update_scanout(g
, scanout_id
, res
, r
);
698 static void virtio_gpu_set_scanout(VirtIOGPU
*g
,
699 struct virtio_gpu_ctrl_command
*cmd
)
701 struct virtio_gpu_simple_resource
*res
;
702 struct virtio_gpu_framebuffer fb
= { 0 };
703 struct virtio_gpu_set_scanout ss
;
705 VIRTIO_GPU_FILL_CMD(ss
);
706 virtio_gpu_bswap_32(&ss
, sizeof(ss
));
707 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
708 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
710 if (ss
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
711 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
712 __func__
, ss
.scanout_id
);
713 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
717 if (ss
.resource_id
== 0) {
718 virtio_gpu_disable_scanout(g
, ss
.scanout_id
);
722 res
= virtio_gpu_find_check_resource(g
, ss
.resource_id
, true,
723 __func__
, &cmd
->error
);
728 fb
.format
= pixman_image_get_format(res
->image
);
729 fb
.bytes_pp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb
.format
), 8);
730 fb
.width
= pixman_image_get_width(res
->image
);
731 fb
.height
= pixman_image_get_height(res
->image
);
732 fb
.stride
= pixman_image_get_stride(res
->image
);
733 fb
.offset
= ss
.r
.x
* fb
.bytes_pp
+ ss
.r
.y
* fb
.stride
;
735 virtio_gpu_do_set_scanout(g
, ss
.scanout_id
,
736 &fb
, res
, &ss
.r
, &cmd
->error
);
739 static void virtio_gpu_set_scanout_blob(VirtIOGPU
*g
,
740 struct virtio_gpu_ctrl_command
*cmd
)
742 struct virtio_gpu_simple_resource
*res
;
743 struct virtio_gpu_framebuffer fb
= { 0 };
744 struct virtio_gpu_set_scanout_blob ss
;
747 VIRTIO_GPU_FILL_CMD(ss
);
748 virtio_gpu_scanout_blob_bswap(&ss
);
749 trace_virtio_gpu_cmd_set_scanout_blob(ss
.scanout_id
, ss
.resource_id
,
750 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
,
753 if (ss
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
754 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
755 __func__
, ss
.scanout_id
);
756 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
760 if (ss
.resource_id
== 0) {
761 virtio_gpu_disable_scanout(g
, ss
.scanout_id
);
765 res
= virtio_gpu_find_check_resource(g
, ss
.resource_id
, true,
766 __func__
, &cmd
->error
);
771 fb
.format
= virtio_gpu_get_pixman_format(ss
.format
);
773 qemu_log_mask(LOG_GUEST_ERROR
,
774 "%s: host couldn't handle guest format %d\n",
775 __func__
, ss
.format
);
776 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
780 fb
.bytes_pp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb
.format
), 8);
782 fb
.height
= ss
.height
;
783 fb
.stride
= ss
.strides
[0];
784 fb
.offset
= ss
.offsets
[0] + ss
.r
.x
* fb
.bytes_pp
+ ss
.r
.y
* fb
.stride
;
787 fbend
+= fb
.stride
* (ss
.r
.height
- 1);
788 fbend
+= fb
.bytes_pp
* ss
.r
.width
;
789 if (fbend
> res
->blob_size
) {
790 qemu_log_mask(LOG_GUEST_ERROR
,
791 "%s: fb end out of range\n",
793 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
797 virtio_gpu_do_set_scanout(g
, ss
.scanout_id
,
798 &fb
, res
, &ss
.r
, &cmd
->error
);
801 int virtio_gpu_create_mapping_iov(VirtIOGPU
*g
,
802 uint32_t nr_entries
, uint32_t offset
,
803 struct virtio_gpu_ctrl_command
*cmd
,
804 uint64_t **addr
, struct iovec
**iov
,
807 struct virtio_gpu_mem_entry
*ents
;
811 if (nr_entries
> 16384) {
812 qemu_log_mask(LOG_GUEST_ERROR
,
813 "%s: nr_entries is too big (%d > 16384)\n",
814 __func__
, nr_entries
);
818 esize
= sizeof(*ents
) * nr_entries
;
819 ents
= g_malloc(esize
);
820 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
821 offset
, ents
, esize
);
823 qemu_log_mask(LOG_GUEST_ERROR
,
824 "%s: command data size incorrect %zu vs %zu\n",
834 for (e
= 0, v
= 0; e
< nr_entries
; e
++) {
835 uint64_t a
= le64_to_cpu(ents
[e
].addr
);
836 uint32_t l
= le32_to_cpu(ents
[e
].length
);
842 map
= dma_memory_map(VIRTIO_DEVICE(g
)->dma_as
, a
, &len
,
843 DMA_DIRECTION_TO_DEVICE
,
844 MEMTXATTRS_UNSPECIFIED
);
846 qemu_log_mask(LOG_GUEST_ERROR
, "%s: failed to map MMIO memory for"
847 " element %d\n", __func__
, e
);
848 virtio_gpu_cleanup_mapping_iov(g
, *iov
, v
);
859 *iov
= g_renew(struct iovec
, *iov
, v
+ 16);
861 *addr
= g_renew(uint64_t, *addr
, v
+ 16);
864 (*iov
)[v
].iov_base
= map
;
865 (*iov
)[v
].iov_len
= len
;
881 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU
*g
,
882 struct iovec
*iov
, uint32_t count
)
886 for (i
= 0; i
< count
; i
++) {
887 dma_memory_unmap(VIRTIO_DEVICE(g
)->dma_as
,
888 iov
[i
].iov_base
, iov
[i
].iov_len
,
889 DMA_DIRECTION_TO_DEVICE
,
895 void virtio_gpu_cleanup_mapping(VirtIOGPU
*g
,
896 struct virtio_gpu_simple_resource
*res
)
898 virtio_gpu_cleanup_mapping_iov(g
, res
->iov
, res
->iov_cnt
);
905 virtio_gpu_fini_udmabuf(res
);
910 virtio_gpu_resource_attach_backing(VirtIOGPU
*g
,
911 struct virtio_gpu_ctrl_command
*cmd
)
913 struct virtio_gpu_simple_resource
*res
;
914 struct virtio_gpu_resource_attach_backing ab
;
917 VIRTIO_GPU_FILL_CMD(ab
);
918 virtio_gpu_bswap_32(&ab
, sizeof(ab
));
919 trace_virtio_gpu_cmd_res_back_attach(ab
.resource_id
);
921 res
= virtio_gpu_find_resource(g
, ab
.resource_id
);
923 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
924 __func__
, ab
.resource_id
);
925 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
930 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
934 ret
= virtio_gpu_create_mapping_iov(g
, ab
.nr_entries
, sizeof(ab
), cmd
,
935 &res
->addrs
, &res
->iov
, &res
->iov_cnt
);
937 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
943 virtio_gpu_resource_detach_backing(VirtIOGPU
*g
,
944 struct virtio_gpu_ctrl_command
*cmd
)
946 struct virtio_gpu_simple_resource
*res
;
947 struct virtio_gpu_resource_detach_backing detach
;
949 VIRTIO_GPU_FILL_CMD(detach
);
950 virtio_gpu_bswap_32(&detach
, sizeof(detach
));
951 trace_virtio_gpu_cmd_res_back_detach(detach
.resource_id
);
953 res
= virtio_gpu_find_check_resource(g
, detach
.resource_id
, true,
954 __func__
, &cmd
->error
);
958 virtio_gpu_cleanup_mapping(g
, res
);
961 void virtio_gpu_simple_process_cmd(VirtIOGPU
*g
,
962 struct virtio_gpu_ctrl_command
*cmd
)
964 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
965 virtio_gpu_ctrl_hdr_bswap(&cmd
->cmd_hdr
);
967 switch (cmd
->cmd_hdr
.type
) {
968 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
969 virtio_gpu_get_display_info(g
, cmd
);
971 case VIRTIO_GPU_CMD_GET_EDID
:
972 virtio_gpu_get_edid(g
, cmd
);
974 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
975 virtio_gpu_resource_create_2d(g
, cmd
);
977 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
:
978 if (!virtio_gpu_blob_enabled(g
->parent_obj
.conf
)) {
979 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
982 virtio_gpu_resource_create_blob(g
, cmd
);
984 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
985 virtio_gpu_resource_unref(g
, cmd
);
987 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
988 virtio_gpu_resource_flush(g
, cmd
);
990 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
991 virtio_gpu_transfer_to_host_2d(g
, cmd
);
993 case VIRTIO_GPU_CMD_SET_SCANOUT
:
994 virtio_gpu_set_scanout(g
, cmd
);
996 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB
:
997 if (!virtio_gpu_blob_enabled(g
->parent_obj
.conf
)) {
998 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
1001 virtio_gpu_set_scanout_blob(g
, cmd
);
1003 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
1004 virtio_gpu_resource_attach_backing(g
, cmd
);
1006 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
1007 virtio_gpu_resource_detach_backing(g
, cmd
);
1010 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
1013 if (!cmd
->finished
) {
1014 if (!g
->parent_obj
.renderer_blocked
) {
1015 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
? cmd
->error
:
1016 VIRTIO_GPU_RESP_OK_NODATA
);
1021 static void virtio_gpu_handle_ctrl_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
1023 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1024 qemu_bh_schedule(g
->ctrl_bh
);
1027 static void virtio_gpu_handle_cursor_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
1029 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1030 qemu_bh_schedule(g
->cursor_bh
);
1033 void virtio_gpu_process_cmdq(VirtIOGPU
*g
)
1035 struct virtio_gpu_ctrl_command
*cmd
;
1036 VirtIOGPUClass
*vgc
= VIRTIO_GPU_GET_CLASS(g
);
1038 if (g
->processing_cmdq
) {
1041 g
->processing_cmdq
= true;
1042 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
1043 cmd
= QTAILQ_FIRST(&g
->cmdq
);
1045 if (g
->parent_obj
.renderer_blocked
) {
1049 /* process command */
1050 vgc
->process_cmd(g
, cmd
);
1052 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
1053 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
1054 g
->stats
.requests
++;
1057 if (!cmd
->finished
) {
1058 QTAILQ_INSERT_TAIL(&g
->fenceq
, cmd
, next
);
1060 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
1061 if (g
->stats
.max_inflight
< g
->inflight
) {
1062 g
->stats
.max_inflight
= g
->inflight
;
1064 fprintf(stderr
, "inflight: %3d (+)\r", g
->inflight
);
1070 g
->processing_cmdq
= false;
1073 static void virtio_gpu_process_fenceq(VirtIOGPU
*g
)
1075 struct virtio_gpu_ctrl_command
*cmd
, *tmp
;
1077 QTAILQ_FOREACH_SAFE(cmd
, &g
->fenceq
, next
, tmp
) {
1078 trace_virtio_gpu_fence_resp(cmd
->cmd_hdr
.fence_id
);
1079 virtio_gpu_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
1080 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
1083 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
1084 fprintf(stderr
, "inflight: %3d (-)\r", g
->inflight
);
1089 static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase
*b
)
1091 VirtIOGPU
*g
= container_of(b
, VirtIOGPU
, parent_obj
);
1093 virtio_gpu_process_fenceq(g
);
1094 virtio_gpu_process_cmdq(g
);
1097 static void virtio_gpu_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
1099 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1100 struct virtio_gpu_ctrl_command
*cmd
;
1102 if (!virtio_queue_ready(vq
)) {
1106 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
1110 cmd
->finished
= false;
1111 QTAILQ_INSERT_TAIL(&g
->cmdq
, cmd
, next
);
1112 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
1115 virtio_gpu_process_cmdq(g
);
1118 static void virtio_gpu_ctrl_bh(void *opaque
)
1120 VirtIOGPU
*g
= opaque
;
1121 VirtIOGPUClass
*vgc
= VIRTIO_GPU_GET_CLASS(g
);
1123 vgc
->handle_ctrl(VIRTIO_DEVICE(g
), g
->ctrl_vq
);
1126 static void virtio_gpu_handle_cursor(VirtIODevice
*vdev
, VirtQueue
*vq
)
1128 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1129 VirtQueueElement
*elem
;
1131 struct virtio_gpu_update_cursor cursor_info
;
1133 if (!virtio_queue_ready(vq
)) {
1137 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
1142 s
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
1143 &cursor_info
, sizeof(cursor_info
));
1144 if (s
!= sizeof(cursor_info
)) {
1145 qemu_log_mask(LOG_GUEST_ERROR
,
1146 "%s: cursor size incorrect %zu vs %zu\n",
1147 __func__
, s
, sizeof(cursor_info
));
1149 virtio_gpu_bswap_32(&cursor_info
, sizeof(cursor_info
));
1150 update_cursor(g
, &cursor_info
);
1152 virtqueue_push(vq
, elem
, 0);
1153 virtio_notify(vdev
, vq
);
1158 static void virtio_gpu_cursor_bh(void *opaque
)
1160 VirtIOGPU
*g
= opaque
;
1161 virtio_gpu_handle_cursor(&g
->parent_obj
.parent_obj
, g
->cursor_vq
);
1164 static const VMStateDescription vmstate_virtio_gpu_scanout
= {
1165 .name
= "virtio-gpu-one-scanout",
1167 .fields
= (const VMStateField
[]) {
1168 VMSTATE_UINT32(resource_id
, struct virtio_gpu_scanout
),
1169 VMSTATE_UINT32(width
, struct virtio_gpu_scanout
),
1170 VMSTATE_UINT32(height
, struct virtio_gpu_scanout
),
1171 VMSTATE_INT32(x
, struct virtio_gpu_scanout
),
1172 VMSTATE_INT32(y
, struct virtio_gpu_scanout
),
1173 VMSTATE_UINT32(cursor
.resource_id
, struct virtio_gpu_scanout
),
1174 VMSTATE_UINT32(cursor
.hot_x
, struct virtio_gpu_scanout
),
1175 VMSTATE_UINT32(cursor
.hot_y
, struct virtio_gpu_scanout
),
1176 VMSTATE_UINT32(cursor
.pos
.x
, struct virtio_gpu_scanout
),
1177 VMSTATE_UINT32(cursor
.pos
.y
, struct virtio_gpu_scanout
),
1178 VMSTATE_END_OF_LIST()
1182 static const VMStateDescription vmstate_virtio_gpu_scanouts
= {
1183 .name
= "virtio-gpu-scanouts",
1185 .fields
= (const VMStateField
[]) {
1186 VMSTATE_INT32(parent_obj
.enable
, struct VirtIOGPU
),
1187 VMSTATE_UINT32_EQUAL(parent_obj
.conf
.max_outputs
,
1188 struct VirtIOGPU
, NULL
),
1189 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj
.scanout
, struct VirtIOGPU
,
1190 parent_obj
.conf
.max_outputs
, 1,
1191 vmstate_virtio_gpu_scanout
,
1192 struct virtio_gpu_scanout
),
1193 VMSTATE_END_OF_LIST()
1197 static int virtio_gpu_save(QEMUFile
*f
, void *opaque
, size_t size
,
1198 const VMStateField
*field
, JSONWriter
*vmdesc
)
1200 VirtIOGPU
*g
= opaque
;
1201 struct virtio_gpu_simple_resource
*res
;
1204 /* in 2d mode we should never find unprocessed commands here */
1205 assert(QTAILQ_EMPTY(&g
->cmdq
));
1207 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
1208 if (res
->blob_size
) {
1211 qemu_put_be32(f
, res
->resource_id
);
1212 qemu_put_be32(f
, res
->width
);
1213 qemu_put_be32(f
, res
->height
);
1214 qemu_put_be32(f
, res
->format
);
1215 qemu_put_be32(f
, res
->iov_cnt
);
1216 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1217 qemu_put_be64(f
, res
->addrs
[i
]);
1218 qemu_put_be32(f
, res
->iov
[i
].iov_len
);
1220 qemu_put_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1221 pixman_image_get_stride(res
->image
) * res
->height
);
1223 qemu_put_be32(f
, 0); /* end of list */
1225 return vmstate_save_state(f
, &vmstate_virtio_gpu_scanouts
, g
, NULL
);
1228 static bool virtio_gpu_load_restore_mapping(VirtIOGPU
*g
,
1229 struct virtio_gpu_simple_resource
*res
)
1233 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1234 hwaddr len
= res
->iov
[i
].iov_len
;
1235 res
->iov
[i
].iov_base
=
1236 dma_memory_map(VIRTIO_DEVICE(g
)->dma_as
, res
->addrs
[i
], &len
,
1237 DMA_DIRECTION_TO_DEVICE
, MEMTXATTRS_UNSPECIFIED
);
1239 if (!res
->iov
[i
].iov_base
|| len
!= res
->iov
[i
].iov_len
) {
1240 /* Clean up the half-a-mapping we just created... */
1241 if (res
->iov
[i
].iov_base
) {
1242 dma_memory_unmap(VIRTIO_DEVICE(g
)->dma_as
, res
->iov
[i
].iov_base
,
1243 len
, DMA_DIRECTION_TO_DEVICE
, 0);
1245 /* ...and the mappings for previous loop iterations */
1247 virtio_gpu_cleanup_mapping(g
, res
);
1252 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
1253 g
->hostmem
+= res
->hostmem
;
1257 static int virtio_gpu_load(QEMUFile
*f
, void *opaque
, size_t size
,
1258 const VMStateField
*field
)
1260 VirtIOGPU
*g
= opaque
;
1261 struct virtio_gpu_simple_resource
*res
;
1262 uint32_t resource_id
, pformat
;
1268 resource_id
= qemu_get_be32(f
);
1269 while (resource_id
!= 0) {
1270 res
= virtio_gpu_find_resource(g
, resource_id
);
1275 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
1276 res
->resource_id
= resource_id
;
1277 res
->width
= qemu_get_be32(f
);
1278 res
->height
= qemu_get_be32(f
);
1279 res
->format
= qemu_get_be32(f
);
1280 res
->iov_cnt
= qemu_get_be32(f
);
1283 pformat
= virtio_gpu_get_pixman_format(res
->format
);
1289 res
->hostmem
= calc_image_hostmem(pformat
, res
->width
, res
->height
);
1291 bits
= qemu_win32_map_alloc(res
->hostmem
, &res
->handle
, &error_warn
);
1297 res
->image
= pixman_image_create_bits(
1299 res
->width
, res
->height
,
1300 bits
, res
->height
? res
->hostmem
/ res
->height
: 0);
1306 pixman_image_set_destroy_function(res
->image
, win32_pixman_image_destroy
, res
->handle
);
1309 res
->addrs
= g_new(uint64_t, res
->iov_cnt
);
1310 res
->iov
= g_new(struct iovec
, res
->iov_cnt
);
1313 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1314 res
->addrs
[i
] = qemu_get_be64(f
);
1315 res
->iov
[i
].iov_len
= qemu_get_be32(f
);
1317 qemu_get_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1318 pixman_image_get_stride(res
->image
) * res
->height
);
1320 if (!virtio_gpu_load_restore_mapping(g
, res
)) {
1321 pixman_image_unref(res
->image
);
1326 resource_id
= qemu_get_be32(f
);
1329 /* load & apply scanout state */
1330 vmstate_load_state(f
, &vmstate_virtio_gpu_scanouts
, g
, 1);
1335 static int virtio_gpu_blob_save(QEMUFile
*f
, void *opaque
, size_t size
,
1336 const VMStateField
*field
, JSONWriter
*vmdesc
)
1338 VirtIOGPU
*g
= opaque
;
1339 struct virtio_gpu_simple_resource
*res
;
1342 /* in 2d mode we should never find unprocessed commands here */
1343 assert(QTAILQ_EMPTY(&g
->cmdq
));
1345 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
1346 if (!res
->blob_size
) {
1349 qemu_put_be32(f
, res
->resource_id
);
1350 qemu_put_be32(f
, res
->blob_size
);
1351 qemu_put_be32(f
, res
->iov_cnt
);
1352 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1353 qemu_put_be64(f
, res
->addrs
[i
]);
1354 qemu_put_be32(f
, res
->iov
[i
].iov_len
);
1357 qemu_put_be32(f
, 0); /* end of list */
1362 static int virtio_gpu_blob_load(QEMUFile
*f
, void *opaque
, size_t size
,
1363 const VMStateField
*field
)
1365 VirtIOGPU
*g
= opaque
;
1366 struct virtio_gpu_simple_resource
*res
;
1367 uint32_t resource_id
;
1370 resource_id
= qemu_get_be32(f
);
1371 while (resource_id
!= 0) {
1372 res
= virtio_gpu_find_resource(g
, resource_id
);
1377 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
1378 res
->resource_id
= resource_id
;
1379 res
->blob_size
= qemu_get_be32(f
);
1380 res
->iov_cnt
= qemu_get_be32(f
);
1381 res
->addrs
= g_new(uint64_t, res
->iov_cnt
);
1382 res
->iov
= g_new(struct iovec
, res
->iov_cnt
);
1385 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1386 res
->addrs
[i
] = qemu_get_be64(f
);
1387 res
->iov
[i
].iov_len
= qemu_get_be32(f
);
1390 if (!virtio_gpu_load_restore_mapping(g
, res
)) {
1395 virtio_gpu_init_udmabuf(res
);
1397 resource_id
= qemu_get_be32(f
);
1403 static int virtio_gpu_post_load(void *opaque
, int version_id
)
1405 VirtIOGPU
*g
= opaque
;
1406 struct virtio_gpu_scanout
*scanout
;
1407 struct virtio_gpu_simple_resource
*res
;
1410 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
1411 /* FIXME: should take scanout.r.{x,y} into account */
1412 scanout
= &g
->parent_obj
.scanout
[i
];
1413 if (!scanout
->resource_id
) {
1416 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
1420 scanout
->ds
= qemu_create_displaysurface_pixman(res
->image
);
1425 qemu_displaysurface_win32_set_handle(scanout
->ds
, res
->handle
, 0);
1428 dpy_gfx_replace_surface(scanout
->con
, scanout
->ds
);
1429 dpy_gfx_update_full(scanout
->con
);
1430 if (scanout
->cursor
.resource_id
) {
1431 update_cursor(g
, &scanout
->cursor
);
1433 res
->scanout_bitmask
|= (1 << i
);
1439 void virtio_gpu_device_realize(DeviceState
*qdev
, Error
**errp
)
1441 VirtIODevice
*vdev
= VIRTIO_DEVICE(qdev
);
1442 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1444 if (virtio_gpu_blob_enabled(g
->parent_obj
.conf
)) {
1445 if (!virtio_gpu_rutabaga_enabled(g
->parent_obj
.conf
) &&
1446 !virtio_gpu_have_udmabuf()) {
1447 error_setg(errp
, "need rutabaga or udmabuf for blob resources");
1451 if (virtio_gpu_virgl_enabled(g
->parent_obj
.conf
)) {
1452 error_setg(errp
, "blobs and virgl are not compatible (yet)");
1457 if (!virtio_gpu_base_device_realize(qdev
,
1458 virtio_gpu_handle_ctrl_cb
,
1459 virtio_gpu_handle_cursor_cb
,
1464 g
->ctrl_vq
= virtio_get_queue(vdev
, 0);
1465 g
->cursor_vq
= virtio_get_queue(vdev
, 1);
1466 g
->ctrl_bh
= qemu_bh_new_guarded(virtio_gpu_ctrl_bh
, g
,
1467 &qdev
->mem_reentrancy_guard
);
1468 g
->cursor_bh
= qemu_bh_new_guarded(virtio_gpu_cursor_bh
, g
,
1469 &qdev
->mem_reentrancy_guard
);
1470 g
->reset_bh
= qemu_bh_new(virtio_gpu_reset_bh
, g
);
1471 qemu_cond_init(&g
->reset_cond
);
1472 QTAILQ_INIT(&g
->reslist
);
1473 QTAILQ_INIT(&g
->cmdq
);
1474 QTAILQ_INIT(&g
->fenceq
);
1477 static void virtio_gpu_device_unrealize(DeviceState
*qdev
)
1479 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1481 g_clear_pointer(&g
->ctrl_bh
, qemu_bh_delete
);
1482 g_clear_pointer(&g
->cursor_bh
, qemu_bh_delete
);
1483 g_clear_pointer(&g
->reset_bh
, qemu_bh_delete
);
1484 qemu_cond_destroy(&g
->reset_cond
);
1485 virtio_gpu_base_device_unrealize(qdev
);
1488 static void virtio_gpu_reset_bh(void *opaque
)
1490 VirtIOGPU
*g
= VIRTIO_GPU(opaque
);
1491 struct virtio_gpu_simple_resource
*res
, *tmp
;
1494 QTAILQ_FOREACH_SAFE(res
, &g
->reslist
, next
, tmp
) {
1495 virtio_gpu_resource_destroy(g
, res
);
1498 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
1499 dpy_gfx_replace_surface(g
->parent_obj
.scanout
[i
].con
, NULL
);
1502 g
->reset_finished
= true;
1503 qemu_cond_signal(&g
->reset_cond
);
1506 void virtio_gpu_reset(VirtIODevice
*vdev
)
1508 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1509 struct virtio_gpu_ctrl_command
*cmd
;
1511 if (qemu_in_vcpu_thread()) {
1512 g
->reset_finished
= false;
1513 qemu_bh_schedule(g
->reset_bh
);
1514 while (!g
->reset_finished
) {
1515 qemu_cond_wait_bql(&g
->reset_cond
);
1518 virtio_gpu_reset_bh(g
);
1521 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
1522 cmd
= QTAILQ_FIRST(&g
->cmdq
);
1523 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
1527 while (!QTAILQ_EMPTY(&g
->fenceq
)) {
1528 cmd
= QTAILQ_FIRST(&g
->fenceq
);
1529 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
1534 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev
));
1538 virtio_gpu_get_config(VirtIODevice
*vdev
, uint8_t *config
)
1540 VirtIOGPUBase
*g
= VIRTIO_GPU_BASE(vdev
);
1542 memcpy(config
, &g
->virtio_config
, sizeof(g
->virtio_config
));
1546 virtio_gpu_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
1548 VirtIOGPUBase
*g
= VIRTIO_GPU_BASE(vdev
);
1549 const struct virtio_gpu_config
*vgconfig
=
1550 (const struct virtio_gpu_config
*)config
;
1552 if (vgconfig
->events_clear
) {
1553 g
->virtio_config
.events_read
&= ~vgconfig
->events_clear
;
1557 static bool virtio_gpu_blob_state_needed(void *opaque
)
1559 VirtIOGPU
*g
= VIRTIO_GPU(opaque
);
1561 return virtio_gpu_blob_enabled(g
->parent_obj
.conf
);
1564 const VMStateDescription vmstate_virtio_gpu_blob_state
= {
1565 .name
= "virtio-gpu/blob",
1566 .minimum_version_id
= VIRTIO_GPU_VM_VERSION
,
1567 .version_id
= VIRTIO_GPU_VM_VERSION
,
1568 .needed
= virtio_gpu_blob_state_needed
,
1569 .fields
= (const VMStateField
[]){
1571 .name
= "virtio-gpu/blob",
1572 .info
= &(const VMStateInfo
) {
1574 .get
= virtio_gpu_blob_load
,
1575 .put
= virtio_gpu_blob_save
,
1577 .flags
= VMS_SINGLE
,
1579 VMSTATE_END_OF_LIST()
1584 * For historical reasons virtio_gpu does not adhere to virtio migration
1585 * scheme as described in doc/virtio-migration.txt, in a sense that no
1586 * save/load callback are provided to the core. Instead the device data
1587 * is saved/loaded after the core data.
1589 * Because of this we need a special vmsd.
1591 static const VMStateDescription vmstate_virtio_gpu
= {
1592 .name
= "virtio-gpu",
1593 .minimum_version_id
= VIRTIO_GPU_VM_VERSION
,
1594 .version_id
= VIRTIO_GPU_VM_VERSION
,
1595 .fields
= (const VMStateField
[]) {
1596 VMSTATE_VIRTIO_DEVICE
/* core */,
1598 .name
= "virtio-gpu",
1599 .info
= &(const VMStateInfo
) {
1600 .name
= "virtio-gpu",
1601 .get
= virtio_gpu_load
,
1602 .put
= virtio_gpu_save
,
1604 .flags
= VMS_SINGLE
,
1606 VMSTATE_END_OF_LIST()
1608 .subsections
= (const VMStateDescription
* const []) {
1609 &vmstate_virtio_gpu_blob_state
,
1612 .post_load
= virtio_gpu_post_load
,
1615 static Property virtio_gpu_properties
[] = {
1616 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU
, parent_obj
.conf
),
1617 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU
, conf_max_hostmem
,
1619 DEFINE_PROP_BIT("blob", VirtIOGPU
, parent_obj
.conf
.flags
,
1620 VIRTIO_GPU_FLAG_BLOB_ENABLED
, false),
1621 DEFINE_PROP_SIZE("hostmem", VirtIOGPU
, parent_obj
.conf
.hostmem
, 0),
1622 DEFINE_PROP_END_OF_LIST(),
1625 static void virtio_gpu_class_init(ObjectClass
*klass
, void *data
)
1627 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1628 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1629 VirtIOGPUClass
*vgc
= VIRTIO_GPU_CLASS(klass
);
1630 VirtIOGPUBaseClass
*vgbc
= &vgc
->parent
;
1632 vgc
->handle_ctrl
= virtio_gpu_handle_ctrl
;
1633 vgc
->process_cmd
= virtio_gpu_simple_process_cmd
;
1634 vgc
->update_cursor_data
= virtio_gpu_update_cursor_data
;
1635 vgbc
->gl_flushed
= virtio_gpu_handle_gl_flushed
;
1637 vdc
->realize
= virtio_gpu_device_realize
;
1638 vdc
->unrealize
= virtio_gpu_device_unrealize
;
1639 vdc
->reset
= virtio_gpu_reset
;
1640 vdc
->get_config
= virtio_gpu_get_config
;
1641 vdc
->set_config
= virtio_gpu_set_config
;
1643 dc
->vmsd
= &vmstate_virtio_gpu
;
1644 device_class_set_props(dc
, virtio_gpu_properties
);
1647 static const TypeInfo virtio_gpu_info
= {
1648 .name
= TYPE_VIRTIO_GPU
,
1649 .parent
= TYPE_VIRTIO_GPU_BASE
,
1650 .instance_size
= sizeof(VirtIOGPU
),
1651 .class_size
= sizeof(VirtIOGPUClass
),
1652 .class_init
= virtio_gpu_class_init
,
1654 module_obj(TYPE_VIRTIO_GPU
);
1655 module_kconfig(VIRTIO_GPU
);
1657 static void virtio_register_types(void)
1659 type_register_static(&virtio_gpu_info
);
1662 type_init(virtio_register_types
)