4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
17 #include "ui/console.h"
19 #include "sysemu/dma.h"
20 #include "sysemu/sysemu.h"
21 #include "hw/virtio/virtio.h"
22 #include "migration/qemu-file-types.h"
23 #include "hw/virtio/virtio-gpu.h"
24 #include "hw/virtio/virtio-gpu-bswap.h"
25 #include "hw/virtio/virtio-gpu-pixman.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/display/edid.h"
28 #include "hw/qdev-properties.h"
30 #include "qemu/module.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
34 #define VIRTIO_GPU_VM_VERSION 1
36 static struct virtio_gpu_simple_resource
*
37 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
);
38 static struct virtio_gpu_simple_resource
*
39 virtio_gpu_find_check_resource(VirtIOGPU
*g
, uint32_t resource_id
,
41 const char *caller
, uint32_t *error
);
43 static void virtio_gpu_cleanup_mapping(VirtIOGPU
*g
,
44 struct virtio_gpu_simple_resource
*res
);
46 void virtio_gpu_update_cursor_data(VirtIOGPU
*g
,
47 struct virtio_gpu_scanout
*s
,
50 struct virtio_gpu_simple_resource
*res
;
54 res
= virtio_gpu_find_check_resource(g
, resource_id
, false,
61 if (res
->blob_size
< (s
->current_cursor
->width
*
62 s
->current_cursor
->height
* 4)) {
67 if (pixman_image_get_width(res
->image
) != s
->current_cursor
->width
||
68 pixman_image_get_height(res
->image
) != s
->current_cursor
->height
) {
71 data
= pixman_image_get_data(res
->image
);
74 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
75 memcpy(s
->current_cursor
->data
, data
,
76 pixels
* sizeof(uint32_t));
79 static void update_cursor(VirtIOGPU
*g
, struct virtio_gpu_update_cursor
*cursor
)
81 struct virtio_gpu_scanout
*s
;
82 VirtIOGPUClass
*vgc
= VIRTIO_GPU_GET_CLASS(g
);
83 bool move
= cursor
->hdr
.type
== VIRTIO_GPU_CMD_MOVE_CURSOR
;
85 if (cursor
->pos
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
88 s
= &g
->parent_obj
.scanout
[cursor
->pos
.scanout_id
];
90 trace_virtio_gpu_update_cursor(cursor
->pos
.scanout_id
,
93 move
? "move" : "update",
97 if (!s
->current_cursor
) {
98 s
->current_cursor
= cursor_alloc(64, 64);
101 s
->current_cursor
->hot_x
= cursor
->hot_x
;
102 s
->current_cursor
->hot_y
= cursor
->hot_y
;
104 if (cursor
->resource_id
> 0) {
105 vgc
->update_cursor_data(g
, s
, cursor
->resource_id
);
107 dpy_cursor_define(s
->con
, s
->current_cursor
);
111 s
->cursor
.pos
.x
= cursor
->pos
.x
;
112 s
->cursor
.pos
.y
= cursor
->pos
.y
;
114 dpy_mouse_set(s
->con
, cursor
->pos
.x
, cursor
->pos
.y
,
115 cursor
->resource_id
? 1 : 0);
118 static struct virtio_gpu_simple_resource
*
119 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
)
121 struct virtio_gpu_simple_resource
*res
;
123 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
124 if (res
->resource_id
== resource_id
) {
131 static struct virtio_gpu_simple_resource
*
132 virtio_gpu_find_check_resource(VirtIOGPU
*g
, uint32_t resource_id
,
133 bool require_backing
,
134 const char *caller
, uint32_t *error
)
136 struct virtio_gpu_simple_resource
*res
;
138 res
= virtio_gpu_find_resource(g
, resource_id
);
140 qemu_log_mask(LOG_GUEST_ERROR
, "%s: invalid resource specified %d\n",
141 caller
, resource_id
);
143 *error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
148 if (require_backing
) {
149 if (!res
->iov
|| (!res
->image
&& !res
->blob
)) {
150 qemu_log_mask(LOG_GUEST_ERROR
, "%s: no backing storage %d\n",
151 caller
, resource_id
);
153 *error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
162 void virtio_gpu_ctrl_response(VirtIOGPU
*g
,
163 struct virtio_gpu_ctrl_command
*cmd
,
164 struct virtio_gpu_ctrl_hdr
*resp
,
169 if (cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
) {
170 resp
->flags
|= VIRTIO_GPU_FLAG_FENCE
;
171 resp
->fence_id
= cmd
->cmd_hdr
.fence_id
;
172 resp
->ctx_id
= cmd
->cmd_hdr
.ctx_id
;
174 virtio_gpu_ctrl_hdr_bswap(resp
);
175 s
= iov_from_buf(cmd
->elem
.in_sg
, cmd
->elem
.in_num
, 0, resp
, resp_len
);
177 qemu_log_mask(LOG_GUEST_ERROR
,
178 "%s: response size incorrect %zu vs %zu\n",
179 __func__
, s
, resp_len
);
181 virtqueue_push(cmd
->vq
, &cmd
->elem
, s
);
182 virtio_notify(VIRTIO_DEVICE(g
), cmd
->vq
);
183 cmd
->finished
= true;
186 void virtio_gpu_ctrl_response_nodata(VirtIOGPU
*g
,
187 struct virtio_gpu_ctrl_command
*cmd
,
188 enum virtio_gpu_ctrl_type type
)
190 struct virtio_gpu_ctrl_hdr resp
;
192 memset(&resp
, 0, sizeof(resp
));
194 virtio_gpu_ctrl_response(g
, cmd
, &resp
, sizeof(resp
));
197 void virtio_gpu_get_display_info(VirtIOGPU
*g
,
198 struct virtio_gpu_ctrl_command
*cmd
)
200 struct virtio_gpu_resp_display_info display_info
;
202 trace_virtio_gpu_cmd_get_display_info();
203 memset(&display_info
, 0, sizeof(display_info
));
204 display_info
.hdr
.type
= VIRTIO_GPU_RESP_OK_DISPLAY_INFO
;
205 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g
), &display_info
);
206 virtio_gpu_ctrl_response(g
, cmd
, &display_info
.hdr
,
207 sizeof(display_info
));
211 virtio_gpu_generate_edid(VirtIOGPU
*g
, int scanout
,
212 struct virtio_gpu_resp_edid
*edid
)
214 VirtIOGPUBase
*b
= VIRTIO_GPU_BASE(g
);
215 qemu_edid_info info
= {
216 .width_mm
= b
->req_state
[scanout
].width_mm
,
217 .height_mm
= b
->req_state
[scanout
].height_mm
,
218 .prefx
= b
->req_state
[scanout
].width
,
219 .prefy
= b
->req_state
[scanout
].height
,
220 .refresh_rate
= b
->req_state
[scanout
].refresh_rate
,
223 edid
->size
= cpu_to_le32(sizeof(edid
->edid
));
224 qemu_edid_generate(edid
->edid
, sizeof(edid
->edid
), &info
);
227 void virtio_gpu_get_edid(VirtIOGPU
*g
,
228 struct virtio_gpu_ctrl_command
*cmd
)
230 struct virtio_gpu_resp_edid edid
;
231 struct virtio_gpu_cmd_get_edid get_edid
;
232 VirtIOGPUBase
*b
= VIRTIO_GPU_BASE(g
);
234 VIRTIO_GPU_FILL_CMD(get_edid
);
235 virtio_gpu_bswap_32(&get_edid
, sizeof(get_edid
));
237 if (get_edid
.scanout
>= b
->conf
.max_outputs
) {
238 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
242 trace_virtio_gpu_cmd_get_edid(get_edid
.scanout
);
243 memset(&edid
, 0, sizeof(edid
));
244 edid
.hdr
.type
= VIRTIO_GPU_RESP_OK_EDID
;
245 virtio_gpu_generate_edid(g
, get_edid
.scanout
, &edid
);
246 virtio_gpu_ctrl_response(g
, cmd
, &edid
.hdr
, sizeof(edid
));
249 static uint32_t calc_image_hostmem(pixman_format_code_t pformat
,
250 uint32_t width
, uint32_t height
)
252 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
253 * pixman_image_create_bits will fail in case it overflow.
256 int bpp
= PIXMAN_FORMAT_BPP(pformat
);
257 int stride
= ((width
* bpp
+ 0x1f) >> 5) * sizeof(uint32_t);
258 return height
* stride
;
261 static void virtio_gpu_resource_create_2d(VirtIOGPU
*g
,
262 struct virtio_gpu_ctrl_command
*cmd
)
264 pixman_format_code_t pformat
;
265 struct virtio_gpu_simple_resource
*res
;
266 struct virtio_gpu_resource_create_2d c2d
;
268 VIRTIO_GPU_FILL_CMD(c2d
);
269 virtio_gpu_bswap_32(&c2d
, sizeof(c2d
));
270 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
271 c2d
.width
, c2d
.height
);
273 if (c2d
.resource_id
== 0) {
274 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource id 0 is not allowed\n",
276 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
280 res
= virtio_gpu_find_resource(g
, c2d
.resource_id
);
282 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource already exists %d\n",
283 __func__
, c2d
.resource_id
);
284 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
288 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
290 res
->width
= c2d
.width
;
291 res
->height
= c2d
.height
;
292 res
->format
= c2d
.format
;
293 res
->resource_id
= c2d
.resource_id
;
295 pformat
= virtio_gpu_get_pixman_format(c2d
.format
);
297 qemu_log_mask(LOG_GUEST_ERROR
,
298 "%s: host couldn't handle guest format %d\n",
299 __func__
, c2d
.format
);
301 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
305 res
->hostmem
= calc_image_hostmem(pformat
, c2d
.width
, c2d
.height
);
306 if (res
->hostmem
+ g
->hostmem
< g
->conf_max_hostmem
) {
307 res
->image
= pixman_image_create_bits(pformat
,
314 qemu_log_mask(LOG_GUEST_ERROR
,
315 "%s: resource creation failed %d %d %d\n",
316 __func__
, c2d
.resource_id
, c2d
.width
, c2d
.height
);
318 cmd
->error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
322 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
323 g
->hostmem
+= res
->hostmem
;
326 static void virtio_gpu_resource_create_blob(VirtIOGPU
*g
,
327 struct virtio_gpu_ctrl_command
*cmd
)
329 struct virtio_gpu_simple_resource
*res
;
330 struct virtio_gpu_resource_create_blob cblob
;
333 VIRTIO_GPU_FILL_CMD(cblob
);
334 virtio_gpu_create_blob_bswap(&cblob
);
335 trace_virtio_gpu_cmd_res_create_blob(cblob
.resource_id
, cblob
.size
);
337 if (cblob
.resource_id
== 0) {
338 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource id 0 is not allowed\n",
340 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
344 if (cblob
.blob_mem
!= VIRTIO_GPU_BLOB_MEM_GUEST
&&
345 cblob
.blob_flags
!= VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE
) {
346 qemu_log_mask(LOG_GUEST_ERROR
, "%s: invalid memory type\n",
348 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
352 if (virtio_gpu_find_resource(g
, cblob
.resource_id
)) {
353 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource already exists %d\n",
354 __func__
, cblob
.resource_id
);
355 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
359 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
360 res
->resource_id
= cblob
.resource_id
;
361 res
->blob_size
= cblob
.size
;
363 ret
= virtio_gpu_create_mapping_iov(g
, cblob
.nr_entries
, sizeof(cblob
),
364 cmd
, &res
->addrs
, &res
->iov
,
367 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
372 virtio_gpu_init_udmabuf(res
);
373 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
376 static void virtio_gpu_disable_scanout(VirtIOGPU
*g
, int scanout_id
)
378 struct virtio_gpu_scanout
*scanout
= &g
->parent_obj
.scanout
[scanout_id
];
379 struct virtio_gpu_simple_resource
*res
;
381 if (scanout
->resource_id
== 0) {
385 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
387 res
->scanout_bitmask
&= ~(1 << scanout_id
);
390 dpy_gfx_replace_surface(scanout
->con
, NULL
);
391 scanout
->resource_id
= 0;
397 static void virtio_gpu_resource_destroy(VirtIOGPU
*g
,
398 struct virtio_gpu_simple_resource
*res
)
402 if (res
->scanout_bitmask
) {
403 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
404 if (res
->scanout_bitmask
& (1 << i
)) {
405 virtio_gpu_disable_scanout(g
, i
);
410 qemu_pixman_image_unref(res
->image
);
411 virtio_gpu_cleanup_mapping(g
, res
);
412 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
413 g
->hostmem
-= res
->hostmem
;
417 static void virtio_gpu_resource_unref(VirtIOGPU
*g
,
418 struct virtio_gpu_ctrl_command
*cmd
)
420 struct virtio_gpu_simple_resource
*res
;
421 struct virtio_gpu_resource_unref unref
;
423 VIRTIO_GPU_FILL_CMD(unref
);
424 virtio_gpu_bswap_32(&unref
, sizeof(unref
));
425 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
427 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
429 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
430 __func__
, unref
.resource_id
);
431 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
434 virtio_gpu_resource_destroy(g
, res
);
437 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU
*g
,
438 struct virtio_gpu_ctrl_command
*cmd
)
440 struct virtio_gpu_simple_resource
*res
;
442 uint32_t src_offset
, dst_offset
, stride
;
443 pixman_format_code_t format
;
444 struct virtio_gpu_transfer_to_host_2d t2d
;
447 VIRTIO_GPU_FILL_CMD(t2d
);
448 virtio_gpu_t2d_bswap(&t2d
);
449 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
451 res
= virtio_gpu_find_check_resource(g
, t2d
.resource_id
, true,
452 __func__
, &cmd
->error
);
453 if (!res
|| res
->blob
) {
457 if (t2d
.r
.x
> res
->width
||
458 t2d
.r
.y
> res
->height
||
459 t2d
.r
.width
> res
->width
||
460 t2d
.r
.height
> res
->height
||
461 t2d
.r
.x
+ t2d
.r
.width
> res
->width
||
462 t2d
.r
.y
+ t2d
.r
.height
> res
->height
) {
463 qemu_log_mask(LOG_GUEST_ERROR
, "%s: transfer bounds outside resource"
464 " bounds for resource %d: %d %d %d %d vs %d %d\n",
465 __func__
, t2d
.resource_id
, t2d
.r
.x
, t2d
.r
.y
,
466 t2d
.r
.width
, t2d
.r
.height
, res
->width
, res
->height
);
467 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
471 format
= pixman_image_get_format(res
->image
);
472 bpp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format
), 8);
473 stride
= pixman_image_get_stride(res
->image
);
474 img_data
= pixman_image_get_data(res
->image
);
476 if (t2d
.r
.x
|| t2d
.r
.width
!= pixman_image_get_width(res
->image
)) {
477 for (h
= 0; h
< t2d
.r
.height
; h
++) {
478 src_offset
= t2d
.offset
+ stride
* h
;
479 dst_offset
= (t2d
.r
.y
+ h
) * stride
+ (t2d
.r
.x
* bpp
);
481 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
482 (uint8_t *)img_data
+ dst_offset
,
486 src_offset
= t2d
.offset
;
487 dst_offset
= t2d
.r
.y
* stride
+ t2d
.r
.x
* bpp
;
488 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
489 (uint8_t *)img_data
+ dst_offset
,
490 stride
* t2d
.r
.height
);
494 static void virtio_gpu_resource_flush(VirtIOGPU
*g
,
495 struct virtio_gpu_ctrl_command
*cmd
)
497 struct virtio_gpu_simple_resource
*res
;
498 struct virtio_gpu_resource_flush rf
;
499 struct virtio_gpu_scanout
*scanout
;
500 pixman_region16_t flush_region
;
501 bool within_bounds
= false;
502 bool update_submitted
= false;
505 VIRTIO_GPU_FILL_CMD(rf
);
506 virtio_gpu_bswap_32(&rf
, sizeof(rf
));
507 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
508 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
510 res
= virtio_gpu_find_check_resource(g
, rf
.resource_id
, false,
511 __func__
, &cmd
->error
);
517 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
518 scanout
= &g
->parent_obj
.scanout
[i
];
519 if (scanout
->resource_id
== res
->resource_id
&&
520 rf
.r
.x
< scanout
->x
+ scanout
->width
&&
521 rf
.r
.x
+ rf
.r
.width
>= scanout
->x
&&
522 rf
.r
.y
< scanout
->y
+ scanout
->height
&&
523 rf
.r
.y
+ rf
.r
.height
>= scanout
->y
) {
524 within_bounds
= true;
526 if (console_has_gl(scanout
->con
)) {
527 dpy_gl_update(scanout
->con
, 0, 0, scanout
->width
,
529 update_submitted
= true;
534 if (update_submitted
) {
537 if (!within_bounds
) {
538 qemu_log_mask(LOG_GUEST_ERROR
, "%s: flush bounds outside scanouts"
539 " bounds for flush %d: %d %d %d %d\n",
540 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
541 rf
.r
.width
, rf
.r
.height
);
542 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
548 (rf
.r
.x
> res
->width
||
549 rf
.r
.y
> res
->height
||
550 rf
.r
.width
> res
->width
||
551 rf
.r
.height
> res
->height
||
552 rf
.r
.x
+ rf
.r
.width
> res
->width
||
553 rf
.r
.y
+ rf
.r
.height
> res
->height
)) {
554 qemu_log_mask(LOG_GUEST_ERROR
, "%s: flush bounds outside resource"
555 " bounds for resource %d: %d %d %d %d vs %d %d\n",
556 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
557 rf
.r
.width
, rf
.r
.height
, res
->width
, res
->height
);
558 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
562 pixman_region_init_rect(&flush_region
,
563 rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
564 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
565 pixman_region16_t region
, finalregion
;
566 pixman_box16_t
*extents
;
568 if (!(res
->scanout_bitmask
& (1 << i
))) {
571 scanout
= &g
->parent_obj
.scanout
[i
];
573 pixman_region_init(&finalregion
);
574 pixman_region_init_rect(®ion
, scanout
->x
, scanout
->y
,
575 scanout
->width
, scanout
->height
);
577 pixman_region_intersect(&finalregion
, &flush_region
, ®ion
);
578 pixman_region_translate(&finalregion
, -scanout
->x
, -scanout
->y
);
579 extents
= pixman_region_extents(&finalregion
);
580 /* work out the area we need to update for each console */
581 dpy_gfx_update(g
->parent_obj
.scanout
[i
].con
,
582 extents
->x1
, extents
->y1
,
583 extents
->x2
- extents
->x1
,
584 extents
->y2
- extents
->y1
);
586 pixman_region_fini(®ion
);
587 pixman_region_fini(&finalregion
);
589 pixman_region_fini(&flush_region
);
592 static void virtio_unref_resource(pixman_image_t
*image
, void *data
)
594 pixman_image_unref(data
);
597 static void virtio_gpu_update_scanout(VirtIOGPU
*g
,
599 struct virtio_gpu_simple_resource
*res
,
600 struct virtio_gpu_rect
*r
)
602 struct virtio_gpu_simple_resource
*ores
;
603 struct virtio_gpu_scanout
*scanout
;
605 scanout
= &g
->parent_obj
.scanout
[scanout_id
];
606 ores
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
608 ores
->scanout_bitmask
&= ~(1 << scanout_id
);
611 res
->scanout_bitmask
|= (1 << scanout_id
);
612 scanout
->resource_id
= res
->resource_id
;
615 scanout
->width
= r
->width
;
616 scanout
->height
= r
->height
;
619 static void virtio_gpu_do_set_scanout(VirtIOGPU
*g
,
621 struct virtio_gpu_framebuffer
*fb
,
622 struct virtio_gpu_simple_resource
*res
,
623 struct virtio_gpu_rect
*r
,
626 struct virtio_gpu_scanout
*scanout
;
629 scanout
= &g
->parent_obj
.scanout
[scanout_id
];
631 if (r
->x
> fb
->width
||
635 r
->width
> fb
->width
||
636 r
->height
> fb
->height
||
637 r
->x
+ r
->width
> fb
->width
||
638 r
->y
+ r
->height
> fb
->height
) {
639 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout %d bounds for"
640 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
641 __func__
, scanout_id
, res
->resource_id
,
642 r
->x
, r
->y
, r
->width
, r
->height
,
643 fb
->width
, fb
->height
);
644 *error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
648 g
->parent_obj
.enable
= 1;
651 if (console_has_gl(scanout
->con
)) {
652 if (!virtio_gpu_update_dmabuf(g
, scanout_id
, res
, fb
, r
)) {
653 virtio_gpu_update_scanout(g
, scanout_id
, res
, r
);
660 data
= (uint8_t *)pixman_image_get_data(res
->image
);
663 /* create a surface for this scanout */
664 if ((res
->blob
&& !console_has_gl(scanout
->con
)) ||
666 surface_data(scanout
->ds
) != data
+ fb
->offset
||
667 scanout
->width
!= r
->width
||
668 scanout
->height
!= r
->height
) {
669 pixman_image_t
*rect
;
670 void *ptr
= data
+ fb
->offset
;
671 rect
= pixman_image_create_bits(fb
->format
, r
->width
, r
->height
,
675 pixman_image_ref(res
->image
);
676 pixman_image_set_destroy_function(rect
, virtio_unref_resource
,
680 /* realloc the surface ptr */
681 scanout
->ds
= qemu_create_displaysurface_pixman(rect
);
683 *error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
687 pixman_image_unref(rect
);
688 dpy_gfx_replace_surface(g
->parent_obj
.scanout
[scanout_id
].con
,
692 virtio_gpu_update_scanout(g
, scanout_id
, res
, r
);
695 static void virtio_gpu_set_scanout(VirtIOGPU
*g
,
696 struct virtio_gpu_ctrl_command
*cmd
)
698 struct virtio_gpu_simple_resource
*res
;
699 struct virtio_gpu_framebuffer fb
= { 0 };
700 struct virtio_gpu_set_scanout ss
;
702 VIRTIO_GPU_FILL_CMD(ss
);
703 virtio_gpu_bswap_32(&ss
, sizeof(ss
));
704 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
705 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
707 if (ss
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
708 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
709 __func__
, ss
.scanout_id
);
710 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
714 if (ss
.resource_id
== 0) {
715 virtio_gpu_disable_scanout(g
, ss
.scanout_id
);
719 res
= virtio_gpu_find_check_resource(g
, ss
.resource_id
, true,
720 __func__
, &cmd
->error
);
725 fb
.format
= pixman_image_get_format(res
->image
);
726 fb
.bytes_pp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb
.format
), 8);
727 fb
.width
= pixman_image_get_width(res
->image
);
728 fb
.height
= pixman_image_get_height(res
->image
);
729 fb
.stride
= pixman_image_get_stride(res
->image
);
730 fb
.offset
= ss
.r
.x
* fb
.bytes_pp
+ ss
.r
.y
* fb
.stride
;
732 virtio_gpu_do_set_scanout(g
, ss
.scanout_id
,
733 &fb
, res
, &ss
.r
, &cmd
->error
);
736 static void virtio_gpu_set_scanout_blob(VirtIOGPU
*g
,
737 struct virtio_gpu_ctrl_command
*cmd
)
739 struct virtio_gpu_simple_resource
*res
;
740 struct virtio_gpu_framebuffer fb
= { 0 };
741 struct virtio_gpu_set_scanout_blob ss
;
744 VIRTIO_GPU_FILL_CMD(ss
);
745 virtio_gpu_scanout_blob_bswap(&ss
);
746 trace_virtio_gpu_cmd_set_scanout_blob(ss
.scanout_id
, ss
.resource_id
,
747 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
,
750 if (ss
.scanout_id
>= g
->parent_obj
.conf
.max_outputs
) {
751 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
752 __func__
, ss
.scanout_id
);
753 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
757 if (ss
.resource_id
== 0) {
758 virtio_gpu_disable_scanout(g
, ss
.scanout_id
);
762 res
= virtio_gpu_find_check_resource(g
, ss
.resource_id
, true,
763 __func__
, &cmd
->error
);
768 fb
.format
= virtio_gpu_get_pixman_format(ss
.format
);
770 qemu_log_mask(LOG_GUEST_ERROR
,
771 "%s: host couldn't handle guest format %d\n",
772 __func__
, ss
.format
);
773 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
777 fb
.bytes_pp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb
.format
), 8);
779 fb
.height
= ss
.height
;
780 fb
.stride
= ss
.strides
[0];
781 fb
.offset
= ss
.offsets
[0] + ss
.r
.x
* fb
.bytes_pp
+ ss
.r
.y
* fb
.stride
;
784 fbend
+= fb
.stride
* (ss
.r
.height
- 1);
785 fbend
+= fb
.bytes_pp
* ss
.r
.width
;
786 if (fbend
> res
->blob_size
) {
787 qemu_log_mask(LOG_GUEST_ERROR
,
788 "%s: fb end out of range\n",
790 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
794 virtio_gpu_do_set_scanout(g
, ss
.scanout_id
,
795 &fb
, res
, &ss
.r
, &cmd
->error
);
798 int virtio_gpu_create_mapping_iov(VirtIOGPU
*g
,
799 uint32_t nr_entries
, uint32_t offset
,
800 struct virtio_gpu_ctrl_command
*cmd
,
801 uint64_t **addr
, struct iovec
**iov
,
804 struct virtio_gpu_mem_entry
*ents
;
808 if (nr_entries
> 16384) {
809 qemu_log_mask(LOG_GUEST_ERROR
,
810 "%s: nr_entries is too big (%d > 16384)\n",
811 __func__
, nr_entries
);
815 esize
= sizeof(*ents
) * nr_entries
;
816 ents
= g_malloc(esize
);
817 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
818 offset
, ents
, esize
);
820 qemu_log_mask(LOG_GUEST_ERROR
,
821 "%s: command data size incorrect %zu vs %zu\n",
831 for (e
= 0, v
= 0; e
< nr_entries
; e
++) {
832 uint64_t a
= le64_to_cpu(ents
[e
].addr
);
833 uint32_t l
= le32_to_cpu(ents
[e
].length
);
839 map
= dma_memory_map(VIRTIO_DEVICE(g
)->dma_as
, a
, &len
,
840 DMA_DIRECTION_TO_DEVICE
,
841 MEMTXATTRS_UNSPECIFIED
);
843 qemu_log_mask(LOG_GUEST_ERROR
, "%s: failed to map MMIO memory for"
844 " element %d\n", __func__
, e
);
845 virtio_gpu_cleanup_mapping_iov(g
, *iov
, v
);
856 *iov
= g_renew(struct iovec
, *iov
, v
+ 16);
858 *addr
= g_renew(uint64_t, *addr
, v
+ 16);
861 (*iov
)[v
].iov_base
= map
;
862 (*iov
)[v
].iov_len
= len
;
878 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU
*g
,
879 struct iovec
*iov
, uint32_t count
)
883 for (i
= 0; i
< count
; i
++) {
884 dma_memory_unmap(VIRTIO_DEVICE(g
)->dma_as
,
885 iov
[i
].iov_base
, iov
[i
].iov_len
,
886 DMA_DIRECTION_TO_DEVICE
,
892 static void virtio_gpu_cleanup_mapping(VirtIOGPU
*g
,
893 struct virtio_gpu_simple_resource
*res
)
895 virtio_gpu_cleanup_mapping_iov(g
, res
->iov
, res
->iov_cnt
);
902 virtio_gpu_fini_udmabuf(res
);
907 virtio_gpu_resource_attach_backing(VirtIOGPU
*g
,
908 struct virtio_gpu_ctrl_command
*cmd
)
910 struct virtio_gpu_simple_resource
*res
;
911 struct virtio_gpu_resource_attach_backing ab
;
914 VIRTIO_GPU_FILL_CMD(ab
);
915 virtio_gpu_bswap_32(&ab
, sizeof(ab
));
916 trace_virtio_gpu_cmd_res_back_attach(ab
.resource_id
);
918 res
= virtio_gpu_find_resource(g
, ab
.resource_id
);
920 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
921 __func__
, ab
.resource_id
);
922 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
927 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
931 ret
= virtio_gpu_create_mapping_iov(g
, ab
.nr_entries
, sizeof(ab
), cmd
,
932 &res
->addrs
, &res
->iov
, &res
->iov_cnt
);
934 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
940 virtio_gpu_resource_detach_backing(VirtIOGPU
*g
,
941 struct virtio_gpu_ctrl_command
*cmd
)
943 struct virtio_gpu_simple_resource
*res
;
944 struct virtio_gpu_resource_detach_backing detach
;
946 VIRTIO_GPU_FILL_CMD(detach
);
947 virtio_gpu_bswap_32(&detach
, sizeof(detach
));
948 trace_virtio_gpu_cmd_res_back_detach(detach
.resource_id
);
950 res
= virtio_gpu_find_check_resource(g
, detach
.resource_id
, true,
951 __func__
, &cmd
->error
);
955 virtio_gpu_cleanup_mapping(g
, res
);
958 void virtio_gpu_simple_process_cmd(VirtIOGPU
*g
,
959 struct virtio_gpu_ctrl_command
*cmd
)
961 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
962 virtio_gpu_ctrl_hdr_bswap(&cmd
->cmd_hdr
);
964 switch (cmd
->cmd_hdr
.type
) {
965 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
966 virtio_gpu_get_display_info(g
, cmd
);
968 case VIRTIO_GPU_CMD_GET_EDID
:
969 virtio_gpu_get_edid(g
, cmd
);
971 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
972 virtio_gpu_resource_create_2d(g
, cmd
);
974 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
:
975 if (!virtio_gpu_blob_enabled(g
->parent_obj
.conf
)) {
976 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
979 virtio_gpu_resource_create_blob(g
, cmd
);
981 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
982 virtio_gpu_resource_unref(g
, cmd
);
984 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
985 virtio_gpu_resource_flush(g
, cmd
);
987 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
988 virtio_gpu_transfer_to_host_2d(g
, cmd
);
990 case VIRTIO_GPU_CMD_SET_SCANOUT
:
991 virtio_gpu_set_scanout(g
, cmd
);
993 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB
:
994 if (!virtio_gpu_blob_enabled(g
->parent_obj
.conf
)) {
995 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
998 virtio_gpu_set_scanout_blob(g
, cmd
);
1000 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
1001 virtio_gpu_resource_attach_backing(g
, cmd
);
1003 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
1004 virtio_gpu_resource_detach_backing(g
, cmd
);
1007 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
1010 if (!cmd
->finished
) {
1011 if (!g
->parent_obj
.renderer_blocked
) {
1012 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
? cmd
->error
:
1013 VIRTIO_GPU_RESP_OK_NODATA
);
1018 static void virtio_gpu_handle_ctrl_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
1020 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1021 qemu_bh_schedule(g
->ctrl_bh
);
1024 static void virtio_gpu_handle_cursor_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
1026 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1027 qemu_bh_schedule(g
->cursor_bh
);
1030 void virtio_gpu_process_cmdq(VirtIOGPU
*g
)
1032 struct virtio_gpu_ctrl_command
*cmd
;
1033 VirtIOGPUClass
*vgc
= VIRTIO_GPU_GET_CLASS(g
);
1035 if (g
->processing_cmdq
) {
1038 g
->processing_cmdq
= true;
1039 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
1040 cmd
= QTAILQ_FIRST(&g
->cmdq
);
1042 if (g
->parent_obj
.renderer_blocked
) {
1046 /* process command */
1047 vgc
->process_cmd(g
, cmd
);
1049 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
1050 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
1051 g
->stats
.requests
++;
1054 if (!cmd
->finished
) {
1055 QTAILQ_INSERT_TAIL(&g
->fenceq
, cmd
, next
);
1057 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
1058 if (g
->stats
.max_inflight
< g
->inflight
) {
1059 g
->stats
.max_inflight
= g
->inflight
;
1061 fprintf(stderr
, "inflight: %3d (+)\r", g
->inflight
);
1067 g
->processing_cmdq
= false;
1070 static void virtio_gpu_process_fenceq(VirtIOGPU
*g
)
1072 struct virtio_gpu_ctrl_command
*cmd
, *tmp
;
1074 QTAILQ_FOREACH_SAFE(cmd
, &g
->fenceq
, next
, tmp
) {
1075 trace_virtio_gpu_fence_resp(cmd
->cmd_hdr
.fence_id
);
1076 virtio_gpu_ctrl_response_nodata(g
, cmd
, VIRTIO_GPU_RESP_OK_NODATA
);
1077 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
1080 if (virtio_gpu_stats_enabled(g
->parent_obj
.conf
)) {
1081 fprintf(stderr
, "inflight: %3d (-)\r", g
->inflight
);
1086 static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase
*b
)
1088 VirtIOGPU
*g
= container_of(b
, VirtIOGPU
, parent_obj
);
1090 virtio_gpu_process_fenceq(g
);
1091 virtio_gpu_process_cmdq(g
);
1094 static void virtio_gpu_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
1096 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1097 struct virtio_gpu_ctrl_command
*cmd
;
1099 if (!virtio_queue_ready(vq
)) {
1103 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
1107 cmd
->finished
= false;
1108 QTAILQ_INSERT_TAIL(&g
->cmdq
, cmd
, next
);
1109 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
1112 virtio_gpu_process_cmdq(g
);
1115 static void virtio_gpu_ctrl_bh(void *opaque
)
1117 VirtIOGPU
*g
= opaque
;
1118 VirtIOGPUClass
*vgc
= VIRTIO_GPU_GET_CLASS(g
);
1120 vgc
->handle_ctrl(&g
->parent_obj
.parent_obj
, g
->ctrl_vq
);
1123 static void virtio_gpu_handle_cursor(VirtIODevice
*vdev
, VirtQueue
*vq
)
1125 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1126 VirtQueueElement
*elem
;
1128 struct virtio_gpu_update_cursor cursor_info
;
1130 if (!virtio_queue_ready(vq
)) {
1134 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
1139 s
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
1140 &cursor_info
, sizeof(cursor_info
));
1141 if (s
!= sizeof(cursor_info
)) {
1142 qemu_log_mask(LOG_GUEST_ERROR
,
1143 "%s: cursor size incorrect %zu vs %zu\n",
1144 __func__
, s
, sizeof(cursor_info
));
1146 virtio_gpu_bswap_32(&cursor_info
, sizeof(cursor_info
));
1147 update_cursor(g
, &cursor_info
);
1149 virtqueue_push(vq
, elem
, 0);
1150 virtio_notify(vdev
, vq
);
1155 static void virtio_gpu_cursor_bh(void *opaque
)
1157 VirtIOGPU
*g
= opaque
;
1158 virtio_gpu_handle_cursor(&g
->parent_obj
.parent_obj
, g
->cursor_vq
);
1161 static const VMStateDescription vmstate_virtio_gpu_scanout
= {
1162 .name
= "virtio-gpu-one-scanout",
1164 .fields
= (VMStateField
[]) {
1165 VMSTATE_UINT32(resource_id
, struct virtio_gpu_scanout
),
1166 VMSTATE_UINT32(width
, struct virtio_gpu_scanout
),
1167 VMSTATE_UINT32(height
, struct virtio_gpu_scanout
),
1168 VMSTATE_INT32(x
, struct virtio_gpu_scanout
),
1169 VMSTATE_INT32(y
, struct virtio_gpu_scanout
),
1170 VMSTATE_UINT32(cursor
.resource_id
, struct virtio_gpu_scanout
),
1171 VMSTATE_UINT32(cursor
.hot_x
, struct virtio_gpu_scanout
),
1172 VMSTATE_UINT32(cursor
.hot_y
, struct virtio_gpu_scanout
),
1173 VMSTATE_UINT32(cursor
.pos
.x
, struct virtio_gpu_scanout
),
1174 VMSTATE_UINT32(cursor
.pos
.y
, struct virtio_gpu_scanout
),
1175 VMSTATE_END_OF_LIST()
1179 static const VMStateDescription vmstate_virtio_gpu_scanouts
= {
1180 .name
= "virtio-gpu-scanouts",
1182 .fields
= (VMStateField
[]) {
1183 VMSTATE_INT32(parent_obj
.enable
, struct VirtIOGPU
),
1184 VMSTATE_UINT32_EQUAL(parent_obj
.conf
.max_outputs
,
1185 struct VirtIOGPU
, NULL
),
1186 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj
.scanout
, struct VirtIOGPU
,
1187 parent_obj
.conf
.max_outputs
, 1,
1188 vmstate_virtio_gpu_scanout
,
1189 struct virtio_gpu_scanout
),
1190 VMSTATE_END_OF_LIST()
1194 static int virtio_gpu_save(QEMUFile
*f
, void *opaque
, size_t size
,
1195 const VMStateField
*field
, JSONWriter
*vmdesc
)
1197 VirtIOGPU
*g
= opaque
;
1198 struct virtio_gpu_simple_resource
*res
;
1201 /* in 2d mode we should never find unprocessed commands here */
1202 assert(QTAILQ_EMPTY(&g
->cmdq
));
1204 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
1205 qemu_put_be32(f
, res
->resource_id
);
1206 qemu_put_be32(f
, res
->width
);
1207 qemu_put_be32(f
, res
->height
);
1208 qemu_put_be32(f
, res
->format
);
1209 qemu_put_be32(f
, res
->iov_cnt
);
1210 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1211 qemu_put_be64(f
, res
->addrs
[i
]);
1212 qemu_put_be32(f
, res
->iov
[i
].iov_len
);
1214 qemu_put_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1215 pixman_image_get_stride(res
->image
) * res
->height
);
1217 qemu_put_be32(f
, 0); /* end of list */
1219 return vmstate_save_state(f
, &vmstate_virtio_gpu_scanouts
, g
, NULL
);
1222 static int virtio_gpu_load(QEMUFile
*f
, void *opaque
, size_t size
,
1223 const VMStateField
*field
)
1225 VirtIOGPU
*g
= opaque
;
1226 struct virtio_gpu_simple_resource
*res
;
1227 struct virtio_gpu_scanout
*scanout
;
1228 uint32_t resource_id
, pformat
;
1233 resource_id
= qemu_get_be32(f
);
1234 while (resource_id
!= 0) {
1235 res
= virtio_gpu_find_resource(g
, resource_id
);
1240 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
1241 res
->resource_id
= resource_id
;
1242 res
->width
= qemu_get_be32(f
);
1243 res
->height
= qemu_get_be32(f
);
1244 res
->format
= qemu_get_be32(f
);
1245 res
->iov_cnt
= qemu_get_be32(f
);
1248 pformat
= virtio_gpu_get_pixman_format(res
->format
);
1253 res
->image
= pixman_image_create_bits(pformat
,
1254 res
->width
, res
->height
,
1261 res
->hostmem
= calc_image_hostmem(pformat
, res
->width
, res
->height
);
1263 res
->addrs
= g_new(uint64_t, res
->iov_cnt
);
1264 res
->iov
= g_new(struct iovec
, res
->iov_cnt
);
1267 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1268 res
->addrs
[i
] = qemu_get_be64(f
);
1269 res
->iov
[i
].iov_len
= qemu_get_be32(f
);
1271 qemu_get_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1272 pixman_image_get_stride(res
->image
) * res
->height
);
1274 /* restore mapping */
1275 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1276 hwaddr len
= res
->iov
[i
].iov_len
;
1277 res
->iov
[i
].iov_base
=
1278 dma_memory_map(VIRTIO_DEVICE(g
)->dma_as
, res
->addrs
[i
], &len
,
1279 DMA_DIRECTION_TO_DEVICE
,
1280 MEMTXATTRS_UNSPECIFIED
);
1282 if (!res
->iov
[i
].iov_base
|| len
!= res
->iov
[i
].iov_len
) {
1283 /* Clean up the half-a-mapping we just created... */
1284 if (res
->iov
[i
].iov_base
) {
1285 dma_memory_unmap(VIRTIO_DEVICE(g
)->dma_as
,
1286 res
->iov
[i
].iov_base
,
1288 DMA_DIRECTION_TO_DEVICE
,
1291 /* ...and the mappings for previous loop iterations */
1293 virtio_gpu_cleanup_mapping(g
, res
);
1294 pixman_image_unref(res
->image
);
1300 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
1301 g
->hostmem
+= res
->hostmem
;
1303 resource_id
= qemu_get_be32(f
);
1306 /* load & apply scanout state */
1307 vmstate_load_state(f
, &vmstate_virtio_gpu_scanouts
, g
, 1);
1308 for (i
= 0; i
< g
->parent_obj
.conf
.max_outputs
; i
++) {
1309 /* FIXME: should take scanout.r.{x,y} into account */
1310 scanout
= &g
->parent_obj
.scanout
[i
];
1311 if (!scanout
->resource_id
) {
1314 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
1318 scanout
->ds
= qemu_create_displaysurface_pixman(res
->image
);
1323 dpy_gfx_replace_surface(scanout
->con
, scanout
->ds
);
1324 dpy_gfx_update_full(scanout
->con
);
1325 if (scanout
->cursor
.resource_id
) {
1326 update_cursor(g
, &scanout
->cursor
);
1328 res
->scanout_bitmask
|= (1 << i
);
1334 void virtio_gpu_device_realize(DeviceState
*qdev
, Error
**errp
)
1336 VirtIODevice
*vdev
= VIRTIO_DEVICE(qdev
);
1337 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1339 if (virtio_gpu_blob_enabled(g
->parent_obj
.conf
)) {
1340 if (!virtio_gpu_have_udmabuf()) {
1341 error_setg(errp
, "cannot enable blob resources without udmabuf");
1345 if (virtio_gpu_virgl_enabled(g
->parent_obj
.conf
)) {
1346 error_setg(errp
, "blobs and virgl are not compatible (yet)");
1351 if (!virtio_gpu_base_device_realize(qdev
,
1352 virtio_gpu_handle_ctrl_cb
,
1353 virtio_gpu_handle_cursor_cb
,
1358 g
->ctrl_vq
= virtio_get_queue(vdev
, 0);
1359 g
->cursor_vq
= virtio_get_queue(vdev
, 1);
1360 g
->ctrl_bh
= qemu_bh_new_guarded(virtio_gpu_ctrl_bh
, g
,
1361 &qdev
->mem_reentrancy_guard
);
1362 g
->cursor_bh
= qemu_bh_new_guarded(virtio_gpu_cursor_bh
, g
,
1363 &qdev
->mem_reentrancy_guard
);
1364 QTAILQ_INIT(&g
->reslist
);
1365 QTAILQ_INIT(&g
->cmdq
);
1366 QTAILQ_INIT(&g
->fenceq
);
1369 void virtio_gpu_reset(VirtIODevice
*vdev
)
1371 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1372 struct virtio_gpu_simple_resource
*res
, *tmp
;
1373 struct virtio_gpu_ctrl_command
*cmd
;
1375 QTAILQ_FOREACH_SAFE(res
, &g
->reslist
, next
, tmp
) {
1376 virtio_gpu_resource_destroy(g
, res
);
1379 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
1380 cmd
= QTAILQ_FIRST(&g
->cmdq
);
1381 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
1385 while (!QTAILQ_EMPTY(&g
->fenceq
)) {
1386 cmd
= QTAILQ_FIRST(&g
->fenceq
);
1387 QTAILQ_REMOVE(&g
->fenceq
, cmd
, next
);
1392 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev
));
1396 virtio_gpu_get_config(VirtIODevice
*vdev
, uint8_t *config
)
1398 VirtIOGPUBase
*g
= VIRTIO_GPU_BASE(vdev
);
1400 memcpy(config
, &g
->virtio_config
, sizeof(g
->virtio_config
));
1404 virtio_gpu_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
1406 VirtIOGPUBase
*g
= VIRTIO_GPU_BASE(vdev
);
1407 const struct virtio_gpu_config
*vgconfig
=
1408 (const struct virtio_gpu_config
*)config
;
1410 if (vgconfig
->events_clear
) {
1411 g
->virtio_config
.events_read
&= ~vgconfig
->events_clear
;
1416 * For historical reasons virtio_gpu does not adhere to virtio migration
1417 * scheme as described in doc/virtio-migration.txt, in a sense that no
1418 * save/load callback are provided to the core. Instead the device data
1419 * is saved/loaded after the core data.
1421 * Because of this we need a special vmsd.
1423 static const VMStateDescription vmstate_virtio_gpu
= {
1424 .name
= "virtio-gpu",
1425 .minimum_version_id
= VIRTIO_GPU_VM_VERSION
,
1426 .version_id
= VIRTIO_GPU_VM_VERSION
,
1427 .fields
= (VMStateField
[]) {
1428 VMSTATE_VIRTIO_DEVICE
/* core */,
1430 .name
= "virtio-gpu",
1431 .info
= &(const VMStateInfo
) {
1432 .name
= "virtio-gpu",
1433 .get
= virtio_gpu_load
,
1434 .put
= virtio_gpu_save
,
1436 .flags
= VMS_SINGLE
,
1438 VMSTATE_END_OF_LIST()
1442 static Property virtio_gpu_properties
[] = {
1443 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU
, parent_obj
.conf
),
1444 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU
, conf_max_hostmem
,
1446 DEFINE_PROP_BIT("blob", VirtIOGPU
, parent_obj
.conf
.flags
,
1447 VIRTIO_GPU_FLAG_BLOB_ENABLED
, false),
1448 DEFINE_PROP_END_OF_LIST(),
1451 static void virtio_gpu_class_init(ObjectClass
*klass
, void *data
)
1453 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1454 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1455 VirtIOGPUClass
*vgc
= VIRTIO_GPU_CLASS(klass
);
1456 VirtIOGPUBaseClass
*vgbc
= &vgc
->parent
;
1458 vgc
->handle_ctrl
= virtio_gpu_handle_ctrl
;
1459 vgc
->process_cmd
= virtio_gpu_simple_process_cmd
;
1460 vgc
->update_cursor_data
= virtio_gpu_update_cursor_data
;
1461 vgbc
->gl_flushed
= virtio_gpu_handle_gl_flushed
;
1463 vdc
->realize
= virtio_gpu_device_realize
;
1464 vdc
->reset
= virtio_gpu_reset
;
1465 vdc
->get_config
= virtio_gpu_get_config
;
1466 vdc
->set_config
= virtio_gpu_set_config
;
1468 dc
->vmsd
= &vmstate_virtio_gpu
;
1469 device_class_set_props(dc
, virtio_gpu_properties
);
1472 static const TypeInfo virtio_gpu_info
= {
1473 .name
= TYPE_VIRTIO_GPU
,
1474 .parent
= TYPE_VIRTIO_GPU_BASE
,
1475 .instance_size
= sizeof(VirtIOGPU
),
1476 .class_size
= sizeof(VirtIOGPUClass
),
1477 .class_init
= virtio_gpu_class_init
,
1479 module_obj(TYPE_VIRTIO_GPU
);
1480 module_kconfig(VIRTIO_GPU
);
1482 static void virtio_register_types(void)
1484 type_register_static(&virtio_gpu_info
);
1487 type_init(virtio_register_types
)