4 * Copyright Red Hat, Inc. 2013-2014
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
16 #include "qemu-common.h"
18 #include "ui/console.h"
20 #include "hw/virtio/virtio.h"
21 #include "hw/virtio/virtio-gpu.h"
22 #include "hw/virtio/virtio-bus.h"
23 #include "migration/blocker.h"
25 #include "qapi/error.h"
27 #define VIRTIO_GPU_VM_VERSION 1
29 static struct virtio_gpu_simple_resource
*
30 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
);
32 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource
*res
);
35 virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr
*hdr
)
37 le32_to_cpus(&hdr
->type
);
38 le32_to_cpus(&hdr
->flags
);
39 le64_to_cpus(&hdr
->fence_id
);
40 le32_to_cpus(&hdr
->ctx_id
);
41 le32_to_cpus(&hdr
->padding
);
44 static void virtio_gpu_bswap_32(void *ptr
,
47 #ifdef HOST_WORDS_BIGENDIAN
50 struct virtio_gpu_ctrl_hdr
*hdr
= (struct virtio_gpu_ctrl_hdr
*) ptr
;
52 virtio_gpu_ctrl_hdr_bswap(hdr
);
54 i
= sizeof(struct virtio_gpu_ctrl_hdr
);
56 le32_to_cpus((uint32_t *)(ptr
+ i
));
57 i
= i
+ sizeof(uint32_t);
64 virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d
*t2d
)
66 virtio_gpu_ctrl_hdr_bswap(&t2d
->hdr
);
67 le32_to_cpus(&t2d
->r
.x
);
68 le32_to_cpus(&t2d
->r
.y
);
69 le32_to_cpus(&t2d
->r
.width
);
70 le32_to_cpus(&t2d
->r
.height
);
71 le64_to_cpus(&t2d
->offset
);
72 le32_to_cpus(&t2d
->resource_id
);
73 le32_to_cpus(&t2d
->padding
);
77 #include <virglrenderer.h>
78 #define VIRGL(_g, _virgl, _simple, ...) \
80 if (_g->use_virgl_renderer) { \
81 _virgl(__VA_ARGS__); \
83 _simple(__VA_ARGS__); \
87 #define VIRGL(_g, _virgl, _simple, ...) \
89 _simple(__VA_ARGS__); \
93 static void update_cursor_data_simple(VirtIOGPU
*g
,
94 struct virtio_gpu_scanout
*s
,
97 struct virtio_gpu_simple_resource
*res
;
100 res
= virtio_gpu_find_resource(g
, resource_id
);
105 if (pixman_image_get_width(res
->image
) != s
->current_cursor
->width
||
106 pixman_image_get_height(res
->image
) != s
->current_cursor
->height
) {
110 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
111 memcpy(s
->current_cursor
->data
,
112 pixman_image_get_data(res
->image
),
113 pixels
* sizeof(uint32_t));
118 static void update_cursor_data_virgl(VirtIOGPU
*g
,
119 struct virtio_gpu_scanout
*s
,
120 uint32_t resource_id
)
122 uint32_t width
, height
;
123 uint32_t pixels
, *data
;
125 data
= virgl_renderer_get_cursor_data(resource_id
, &width
, &height
);
130 if (width
!= s
->current_cursor
->width
||
131 height
!= s
->current_cursor
->height
) {
136 pixels
= s
->current_cursor
->width
* s
->current_cursor
->height
;
137 memcpy(s
->current_cursor
->data
, data
, pixels
* sizeof(uint32_t));
143 static void update_cursor(VirtIOGPU
*g
, struct virtio_gpu_update_cursor
*cursor
)
145 struct virtio_gpu_scanout
*s
;
146 bool move
= cursor
->hdr
.type
== VIRTIO_GPU_CMD_MOVE_CURSOR
;
148 if (cursor
->pos
.scanout_id
>= g
->conf
.max_outputs
) {
151 s
= &g
->scanout
[cursor
->pos
.scanout_id
];
153 trace_virtio_gpu_update_cursor(cursor
->pos
.scanout_id
,
156 move
? "move" : "update",
157 cursor
->resource_id
);
160 if (!s
->current_cursor
) {
161 s
->current_cursor
= cursor_alloc(64, 64);
164 s
->current_cursor
->hot_x
= cursor
->hot_x
;
165 s
->current_cursor
->hot_y
= cursor
->hot_y
;
167 if (cursor
->resource_id
> 0) {
168 VIRGL(g
, update_cursor_data_virgl
, update_cursor_data_simple
,
169 g
, s
, cursor
->resource_id
);
171 dpy_cursor_define(s
->con
, s
->current_cursor
);
175 s
->cursor
.pos
.x
= cursor
->pos
.x
;
176 s
->cursor
.pos
.y
= cursor
->pos
.y
;
178 dpy_mouse_set(s
->con
, cursor
->pos
.x
, cursor
->pos
.y
,
179 cursor
->resource_id
? 1 : 0);
182 static void virtio_gpu_get_config(VirtIODevice
*vdev
, uint8_t *config
)
184 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
185 memcpy(config
, &g
->virtio_config
, sizeof(g
->virtio_config
));
188 static void virtio_gpu_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
190 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
191 struct virtio_gpu_config vgconfig
;
193 memcpy(&vgconfig
, config
, sizeof(g
->virtio_config
));
195 if (vgconfig
.events_clear
) {
196 g
->virtio_config
.events_read
&= ~vgconfig
.events_clear
;
200 static uint64_t virtio_gpu_get_features(VirtIODevice
*vdev
, uint64_t features
,
203 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
205 if (virtio_gpu_virgl_enabled(g
->conf
)) {
206 features
|= (1 << VIRTIO_GPU_F_VIRGL
);
211 static void virtio_gpu_set_features(VirtIODevice
*vdev
, uint64_t features
)
213 static const uint32_t virgl
= (1 << VIRTIO_GPU_F_VIRGL
);
214 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
216 g
->use_virgl_renderer
= ((features
& virgl
) == virgl
);
217 trace_virtio_gpu_features(g
->use_virgl_renderer
);
220 static void virtio_gpu_notify_event(VirtIOGPU
*g
, uint32_t event_type
)
222 g
->virtio_config
.events_read
|= event_type
;
223 virtio_notify_config(&g
->parent_obj
);
226 static struct virtio_gpu_simple_resource
*
227 virtio_gpu_find_resource(VirtIOGPU
*g
, uint32_t resource_id
)
229 struct virtio_gpu_simple_resource
*res
;
231 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
232 if (res
->resource_id
== resource_id
) {
239 void virtio_gpu_ctrl_response(VirtIOGPU
*g
,
240 struct virtio_gpu_ctrl_command
*cmd
,
241 struct virtio_gpu_ctrl_hdr
*resp
,
246 if (cmd
->cmd_hdr
.flags
& VIRTIO_GPU_FLAG_FENCE
) {
247 resp
->flags
|= VIRTIO_GPU_FLAG_FENCE
;
248 resp
->fence_id
= cmd
->cmd_hdr
.fence_id
;
249 resp
->ctx_id
= cmd
->cmd_hdr
.ctx_id
;
251 virtio_gpu_ctrl_hdr_bswap(resp
);
252 s
= iov_from_buf(cmd
->elem
.in_sg
, cmd
->elem
.in_num
, 0, resp
, resp_len
);
254 qemu_log_mask(LOG_GUEST_ERROR
,
255 "%s: response size incorrect %zu vs %zu\n",
256 __func__
, s
, resp_len
);
258 virtqueue_push(cmd
->vq
, &cmd
->elem
, s
);
259 virtio_notify(VIRTIO_DEVICE(g
), cmd
->vq
);
260 cmd
->finished
= true;
263 void virtio_gpu_ctrl_response_nodata(VirtIOGPU
*g
,
264 struct virtio_gpu_ctrl_command
*cmd
,
265 enum virtio_gpu_ctrl_type type
)
267 struct virtio_gpu_ctrl_hdr resp
;
269 memset(&resp
, 0, sizeof(resp
));
271 virtio_gpu_ctrl_response(g
, cmd
, &resp
, sizeof(resp
));
275 virtio_gpu_fill_display_info(VirtIOGPU
*g
,
276 struct virtio_gpu_resp_display_info
*dpy_info
)
280 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
281 if (g
->enabled_output_bitmask
& (1 << i
)) {
282 dpy_info
->pmodes
[i
].enabled
= 1;
283 dpy_info
->pmodes
[i
].r
.width
= cpu_to_le32(g
->req_state
[i
].width
);
284 dpy_info
->pmodes
[i
].r
.height
= cpu_to_le32(g
->req_state
[i
].height
);
289 void virtio_gpu_get_display_info(VirtIOGPU
*g
,
290 struct virtio_gpu_ctrl_command
*cmd
)
292 struct virtio_gpu_resp_display_info display_info
;
294 trace_virtio_gpu_cmd_get_display_info();
295 memset(&display_info
, 0, sizeof(display_info
));
296 display_info
.hdr
.type
= VIRTIO_GPU_RESP_OK_DISPLAY_INFO
;
297 virtio_gpu_fill_display_info(g
, &display_info
);
298 virtio_gpu_ctrl_response(g
, cmd
, &display_info
.hdr
,
299 sizeof(display_info
));
302 static pixman_format_code_t
get_pixman_format(uint32_t virtio_gpu_format
)
304 switch (virtio_gpu_format
) {
305 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM
:
306 return PIXMAN_BE_b8g8r8x8
;
307 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM
:
308 return PIXMAN_BE_b8g8r8a8
;
309 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM
:
310 return PIXMAN_BE_x8r8g8b8
;
311 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM
:
312 return PIXMAN_BE_a8r8g8b8
;
313 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM
:
314 return PIXMAN_BE_r8g8b8x8
;
315 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM
:
316 return PIXMAN_BE_r8g8b8a8
;
317 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM
:
318 return PIXMAN_BE_x8b8g8r8
;
319 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM
:
320 return PIXMAN_BE_a8b8g8r8
;
326 static uint32_t calc_image_hostmem(pixman_format_code_t pformat
,
327 uint32_t width
, uint32_t height
)
329 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
330 * pixman_image_create_bits will fail in case it overflow.
333 int bpp
= PIXMAN_FORMAT_BPP(pformat
);
334 int stride
= ((width
* bpp
+ 0x1f) >> 5) * sizeof(uint32_t);
335 return height
* stride
;
338 static void virtio_gpu_resource_create_2d(VirtIOGPU
*g
,
339 struct virtio_gpu_ctrl_command
*cmd
)
341 pixman_format_code_t pformat
;
342 struct virtio_gpu_simple_resource
*res
;
343 struct virtio_gpu_resource_create_2d c2d
;
345 VIRTIO_GPU_FILL_CMD(c2d
);
346 virtio_gpu_bswap_32(&c2d
, sizeof(c2d
));
347 trace_virtio_gpu_cmd_res_create_2d(c2d
.resource_id
, c2d
.format
,
348 c2d
.width
, c2d
.height
);
350 if (c2d
.resource_id
== 0) {
351 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource id 0 is not allowed\n",
353 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
357 res
= virtio_gpu_find_resource(g
, c2d
.resource_id
);
359 qemu_log_mask(LOG_GUEST_ERROR
, "%s: resource already exists %d\n",
360 __func__
, c2d
.resource_id
);
361 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
365 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
367 res
->width
= c2d
.width
;
368 res
->height
= c2d
.height
;
369 res
->format
= c2d
.format
;
370 res
->resource_id
= c2d
.resource_id
;
372 pformat
= get_pixman_format(c2d
.format
);
374 qemu_log_mask(LOG_GUEST_ERROR
,
375 "%s: host couldn't handle guest format %d\n",
376 __func__
, c2d
.format
);
378 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
382 res
->hostmem
= calc_image_hostmem(pformat
, c2d
.width
, c2d
.height
);
383 if (res
->hostmem
+ g
->hostmem
< g
->conf
.max_hostmem
) {
384 res
->image
= pixman_image_create_bits(pformat
,
391 qemu_log_mask(LOG_GUEST_ERROR
,
392 "%s: resource creation failed %d %d %d\n",
393 __func__
, c2d
.resource_id
, c2d
.width
, c2d
.height
);
395 cmd
->error
= VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY
;
399 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
400 g
->hostmem
+= res
->hostmem
;
403 static void virtio_gpu_resource_destroy(VirtIOGPU
*g
,
404 struct virtio_gpu_simple_resource
*res
)
406 pixman_image_unref(res
->image
);
407 virtio_gpu_cleanup_mapping(res
);
408 QTAILQ_REMOVE(&g
->reslist
, res
, next
);
409 g
->hostmem
-= res
->hostmem
;
413 static void virtio_gpu_resource_unref(VirtIOGPU
*g
,
414 struct virtio_gpu_ctrl_command
*cmd
)
416 struct virtio_gpu_simple_resource
*res
;
417 struct virtio_gpu_resource_unref unref
;
419 VIRTIO_GPU_FILL_CMD(unref
);
420 virtio_gpu_bswap_32(&unref
, sizeof(unref
));
421 trace_virtio_gpu_cmd_res_unref(unref
.resource_id
);
423 res
= virtio_gpu_find_resource(g
, unref
.resource_id
);
425 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
426 __func__
, unref
.resource_id
);
427 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
430 virtio_gpu_resource_destroy(g
, res
);
433 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU
*g
,
434 struct virtio_gpu_ctrl_command
*cmd
)
436 struct virtio_gpu_simple_resource
*res
;
438 uint32_t src_offset
, dst_offset
, stride
;
440 pixman_format_code_t format
;
441 struct virtio_gpu_transfer_to_host_2d t2d
;
443 VIRTIO_GPU_FILL_CMD(t2d
);
444 virtio_gpu_t2d_bswap(&t2d
);
445 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d
.resource_id
);
447 res
= virtio_gpu_find_resource(g
, t2d
.resource_id
);
448 if (!res
|| !res
->iov
) {
449 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
450 __func__
, t2d
.resource_id
);
451 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
455 if (t2d
.r
.x
> res
->width
||
456 t2d
.r
.y
> res
->height
||
457 t2d
.r
.width
> res
->width
||
458 t2d
.r
.height
> res
->height
||
459 t2d
.r
.x
+ t2d
.r
.width
> res
->width
||
460 t2d
.r
.y
+ t2d
.r
.height
> res
->height
) {
461 qemu_log_mask(LOG_GUEST_ERROR
, "%s: transfer bounds outside resource"
462 " bounds for resource %d: %d %d %d %d vs %d %d\n",
463 __func__
, t2d
.resource_id
, t2d
.r
.x
, t2d
.r
.y
,
464 t2d
.r
.width
, t2d
.r
.height
, res
->width
, res
->height
);
465 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
469 format
= pixman_image_get_format(res
->image
);
470 bpp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format
), 8);
471 stride
= pixman_image_get_stride(res
->image
);
473 if (t2d
.offset
|| t2d
.r
.x
|| t2d
.r
.y
||
474 t2d
.r
.width
!= pixman_image_get_width(res
->image
)) {
475 void *img_data
= pixman_image_get_data(res
->image
);
476 for (h
= 0; h
< t2d
.r
.height
; h
++) {
477 src_offset
= t2d
.offset
+ stride
* h
;
478 dst_offset
= (t2d
.r
.y
+ h
) * stride
+ (t2d
.r
.x
* bpp
);
480 iov_to_buf(res
->iov
, res
->iov_cnt
, src_offset
,
482 + dst_offset
, t2d
.r
.width
* bpp
);
485 iov_to_buf(res
->iov
, res
->iov_cnt
, 0,
486 pixman_image_get_data(res
->image
),
487 pixman_image_get_stride(res
->image
)
488 * pixman_image_get_height(res
->image
));
492 static void virtio_gpu_resource_flush(VirtIOGPU
*g
,
493 struct virtio_gpu_ctrl_command
*cmd
)
495 struct virtio_gpu_simple_resource
*res
;
496 struct virtio_gpu_resource_flush rf
;
497 pixman_region16_t flush_region
;
500 VIRTIO_GPU_FILL_CMD(rf
);
501 virtio_gpu_bswap_32(&rf
, sizeof(rf
));
502 trace_virtio_gpu_cmd_res_flush(rf
.resource_id
,
503 rf
.r
.width
, rf
.r
.height
, rf
.r
.x
, rf
.r
.y
);
505 res
= virtio_gpu_find_resource(g
, rf
.resource_id
);
507 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
508 __func__
, rf
.resource_id
);
509 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
513 if (rf
.r
.x
> res
->width
||
514 rf
.r
.y
> res
->height
||
515 rf
.r
.width
> res
->width
||
516 rf
.r
.height
> res
->height
||
517 rf
.r
.x
+ rf
.r
.width
> res
->width
||
518 rf
.r
.y
+ rf
.r
.height
> res
->height
) {
519 qemu_log_mask(LOG_GUEST_ERROR
, "%s: flush bounds outside resource"
520 " bounds for resource %d: %d %d %d %d vs %d %d\n",
521 __func__
, rf
.resource_id
, rf
.r
.x
, rf
.r
.y
,
522 rf
.r
.width
, rf
.r
.height
, res
->width
, res
->height
);
523 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
527 pixman_region_init_rect(&flush_region
,
528 rf
.r
.x
, rf
.r
.y
, rf
.r
.width
, rf
.r
.height
);
529 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
530 struct virtio_gpu_scanout
*scanout
;
531 pixman_region16_t region
, finalregion
;
532 pixman_box16_t
*extents
;
534 if (!(res
->scanout_bitmask
& (1 << i
))) {
537 scanout
= &g
->scanout
[i
];
539 pixman_region_init(&finalregion
);
540 pixman_region_init_rect(®ion
, scanout
->x
, scanout
->y
,
541 scanout
->width
, scanout
->height
);
543 pixman_region_intersect(&finalregion
, &flush_region
, ®ion
);
544 pixman_region_translate(&finalregion
, -scanout
->x
, -scanout
->y
);
545 extents
= pixman_region_extents(&finalregion
);
546 /* work out the area we need to update for each console */
547 dpy_gfx_update(g
->scanout
[i
].con
,
548 extents
->x1
, extents
->y1
,
549 extents
->x2
- extents
->x1
,
550 extents
->y2
- extents
->y1
);
552 pixman_region_fini(®ion
);
553 pixman_region_fini(&finalregion
);
555 pixman_region_fini(&flush_region
);
558 static void virtio_unref_resource(pixman_image_t
*image
, void *data
)
560 pixman_image_unref(data
);
563 static void virtio_gpu_set_scanout(VirtIOGPU
*g
,
564 struct virtio_gpu_ctrl_command
*cmd
)
566 struct virtio_gpu_simple_resource
*res
;
567 struct virtio_gpu_scanout
*scanout
;
568 pixman_format_code_t format
;
571 struct virtio_gpu_set_scanout ss
;
573 VIRTIO_GPU_FILL_CMD(ss
);
574 virtio_gpu_bswap_32(&ss
, sizeof(ss
));
575 trace_virtio_gpu_cmd_set_scanout(ss
.scanout_id
, ss
.resource_id
,
576 ss
.r
.width
, ss
.r
.height
, ss
.r
.x
, ss
.r
.y
);
578 if (ss
.scanout_id
>= g
->conf
.max_outputs
) {
579 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout id specified %d",
580 __func__
, ss
.scanout_id
);
581 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
586 if (ss
.resource_id
== 0) {
587 scanout
= &g
->scanout
[ss
.scanout_id
];
588 if (scanout
->resource_id
) {
589 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
591 res
->scanout_bitmask
&= ~(1 << ss
.scanout_id
);
594 if (ss
.scanout_id
== 0) {
595 qemu_log_mask(LOG_GUEST_ERROR
,
596 "%s: illegal scanout id specified %d",
597 __func__
, ss
.scanout_id
);
598 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID
;
601 dpy_gfx_replace_surface(g
->scanout
[ss
.scanout_id
].con
, NULL
);
608 /* create a surface for this scanout */
609 res
= virtio_gpu_find_resource(g
, ss
.resource_id
);
611 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
612 __func__
, ss
.resource_id
);
613 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
617 if (ss
.r
.x
> res
->width
||
618 ss
.r
.y
> res
->height
||
619 ss
.r
.width
> res
->width
||
620 ss
.r
.height
> res
->height
||
621 ss
.r
.x
+ ss
.r
.width
> res
->width
||
622 ss
.r
.y
+ ss
.r
.height
> res
->height
) {
623 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal scanout %d bounds for"
624 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
625 __func__
, ss
.scanout_id
, ss
.resource_id
, ss
.r
.x
, ss
.r
.y
,
626 ss
.r
.width
, ss
.r
.height
, res
->width
, res
->height
);
627 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER
;
631 scanout
= &g
->scanout
[ss
.scanout_id
];
633 format
= pixman_image_get_format(res
->image
);
634 bpp
= DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format
), 8);
635 offset
= (ss
.r
.x
* bpp
) + ss
.r
.y
* pixman_image_get_stride(res
->image
);
636 if (!scanout
->ds
|| surface_data(scanout
->ds
)
637 != ((uint8_t *)pixman_image_get_data(res
->image
) + offset
) ||
638 scanout
->width
!= ss
.r
.width
||
639 scanout
->height
!= ss
.r
.height
) {
640 pixman_image_t
*rect
;
641 void *ptr
= (uint8_t *)pixman_image_get_data(res
->image
) + offset
;
642 rect
= pixman_image_create_bits(format
, ss
.r
.width
, ss
.r
.height
, ptr
,
643 pixman_image_get_stride(res
->image
));
644 pixman_image_ref(res
->image
);
645 pixman_image_set_destroy_function(rect
, virtio_unref_resource
,
647 /* realloc the surface ptr */
648 scanout
->ds
= qemu_create_displaysurface_pixman(rect
);
650 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
653 pixman_image_unref(rect
);
654 dpy_gfx_replace_surface(g
->scanout
[ss
.scanout_id
].con
, scanout
->ds
);
657 res
->scanout_bitmask
|= (1 << ss
.scanout_id
);
658 scanout
->resource_id
= ss
.resource_id
;
661 scanout
->width
= ss
.r
.width
;
662 scanout
->height
= ss
.r
.height
;
665 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing
*ab
,
666 struct virtio_gpu_ctrl_command
*cmd
,
667 uint64_t **addr
, struct iovec
**iov
)
669 struct virtio_gpu_mem_entry
*ents
;
673 if (ab
->nr_entries
> 16384) {
674 qemu_log_mask(LOG_GUEST_ERROR
,
675 "%s: nr_entries is too big (%d > 16384)\n",
676 __func__
, ab
->nr_entries
);
680 esize
= sizeof(*ents
) * ab
->nr_entries
;
681 ents
= g_malloc(esize
);
682 s
= iov_to_buf(cmd
->elem
.out_sg
, cmd
->elem
.out_num
,
683 sizeof(*ab
), ents
, esize
);
685 qemu_log_mask(LOG_GUEST_ERROR
,
686 "%s: command data size incorrect %zu vs %zu\n",
692 *iov
= g_malloc0(sizeof(struct iovec
) * ab
->nr_entries
);
694 *addr
= g_malloc0(sizeof(uint64_t) * ab
->nr_entries
);
696 for (i
= 0; i
< ab
->nr_entries
; i
++) {
697 uint64_t a
= le64_to_cpu(ents
[i
].addr
);
698 uint32_t l
= le32_to_cpu(ents
[i
].length
);
700 (*iov
)[i
].iov_len
= l
;
701 (*iov
)[i
].iov_base
= cpu_physical_memory_map(a
, &len
, 1);
705 if (!(*iov
)[i
].iov_base
|| len
!= l
) {
706 qemu_log_mask(LOG_GUEST_ERROR
, "%s: failed to map MMIO memory for"
707 " resource %d element %d\n",
708 __func__
, ab
->resource_id
, i
);
709 virtio_gpu_cleanup_mapping_iov(*iov
, i
);
723 void virtio_gpu_cleanup_mapping_iov(struct iovec
*iov
, uint32_t count
)
727 for (i
= 0; i
< count
; i
++) {
728 cpu_physical_memory_unmap(iov
[i
].iov_base
, iov
[i
].iov_len
, 1,
734 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource
*res
)
736 virtio_gpu_cleanup_mapping_iov(res
->iov
, res
->iov_cnt
);
744 virtio_gpu_resource_attach_backing(VirtIOGPU
*g
,
745 struct virtio_gpu_ctrl_command
*cmd
)
747 struct virtio_gpu_simple_resource
*res
;
748 struct virtio_gpu_resource_attach_backing ab
;
751 VIRTIO_GPU_FILL_CMD(ab
);
752 virtio_gpu_bswap_32(&ab
, sizeof(ab
));
753 trace_virtio_gpu_cmd_res_back_attach(ab
.resource_id
);
755 res
= virtio_gpu_find_resource(g
, ab
.resource_id
);
757 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
758 __func__
, ab
.resource_id
);
759 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
764 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
768 ret
= virtio_gpu_create_mapping_iov(&ab
, cmd
, &res
->addrs
, &res
->iov
);
770 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
774 res
->iov_cnt
= ab
.nr_entries
;
778 virtio_gpu_resource_detach_backing(VirtIOGPU
*g
,
779 struct virtio_gpu_ctrl_command
*cmd
)
781 struct virtio_gpu_simple_resource
*res
;
782 struct virtio_gpu_resource_detach_backing detach
;
784 VIRTIO_GPU_FILL_CMD(detach
);
785 virtio_gpu_bswap_32(&detach
, sizeof(detach
));
786 trace_virtio_gpu_cmd_res_back_detach(detach
.resource_id
);
788 res
= virtio_gpu_find_resource(g
, detach
.resource_id
);
789 if (!res
|| !res
->iov
) {
790 qemu_log_mask(LOG_GUEST_ERROR
, "%s: illegal resource specified %d\n",
791 __func__
, detach
.resource_id
);
792 cmd
->error
= VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID
;
795 virtio_gpu_cleanup_mapping(res
);
798 static void virtio_gpu_simple_process_cmd(VirtIOGPU
*g
,
799 struct virtio_gpu_ctrl_command
*cmd
)
801 VIRTIO_GPU_FILL_CMD(cmd
->cmd_hdr
);
802 virtio_gpu_ctrl_hdr_bswap(&cmd
->cmd_hdr
);
804 switch (cmd
->cmd_hdr
.type
) {
805 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO
:
806 virtio_gpu_get_display_info(g
, cmd
);
808 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D
:
809 virtio_gpu_resource_create_2d(g
, cmd
);
811 case VIRTIO_GPU_CMD_RESOURCE_UNREF
:
812 virtio_gpu_resource_unref(g
, cmd
);
814 case VIRTIO_GPU_CMD_RESOURCE_FLUSH
:
815 virtio_gpu_resource_flush(g
, cmd
);
817 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
:
818 virtio_gpu_transfer_to_host_2d(g
, cmd
);
820 case VIRTIO_GPU_CMD_SET_SCANOUT
:
821 virtio_gpu_set_scanout(g
, cmd
);
823 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING
:
824 virtio_gpu_resource_attach_backing(g
, cmd
);
826 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING
:
827 virtio_gpu_resource_detach_backing(g
, cmd
);
830 cmd
->error
= VIRTIO_GPU_RESP_ERR_UNSPEC
;
833 if (!cmd
->finished
) {
834 virtio_gpu_ctrl_response_nodata(g
, cmd
, cmd
->error
? cmd
->error
:
835 VIRTIO_GPU_RESP_OK_NODATA
);
839 static void virtio_gpu_handle_ctrl_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
841 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
842 qemu_bh_schedule(g
->ctrl_bh
);
845 static void virtio_gpu_handle_cursor_cb(VirtIODevice
*vdev
, VirtQueue
*vq
)
847 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
848 qemu_bh_schedule(g
->cursor_bh
);
851 void virtio_gpu_process_cmdq(VirtIOGPU
*g
)
853 struct virtio_gpu_ctrl_command
*cmd
;
855 while (!QTAILQ_EMPTY(&g
->cmdq
)) {
856 cmd
= QTAILQ_FIRST(&g
->cmdq
);
858 /* process command */
859 VIRGL(g
, virtio_gpu_virgl_process_cmd
, virtio_gpu_simple_process_cmd
,
864 QTAILQ_REMOVE(&g
->cmdq
, cmd
, next
);
865 if (virtio_gpu_stats_enabled(g
->conf
)) {
869 if (!cmd
->finished
) {
870 QTAILQ_INSERT_TAIL(&g
->fenceq
, cmd
, next
);
872 if (virtio_gpu_stats_enabled(g
->conf
)) {
873 if (g
->stats
.max_inflight
< g
->inflight
) {
874 g
->stats
.max_inflight
= g
->inflight
;
876 fprintf(stderr
, "inflight: %3d (+)\r", g
->inflight
);
884 static void virtio_gpu_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
886 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
887 struct virtio_gpu_ctrl_command
*cmd
;
889 if (!virtio_queue_ready(vq
)) {
894 if (!g
->renderer_inited
&& g
->use_virgl_renderer
) {
895 virtio_gpu_virgl_init(g
);
896 g
->renderer_inited
= true;
900 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
904 cmd
->finished
= false;
905 cmd
->waiting
= false;
906 QTAILQ_INSERT_TAIL(&g
->cmdq
, cmd
, next
);
907 cmd
= virtqueue_pop(vq
, sizeof(struct virtio_gpu_ctrl_command
));
910 virtio_gpu_process_cmdq(g
);
913 if (g
->use_virgl_renderer
) {
914 virtio_gpu_virgl_fence_poll(g
);
919 static void virtio_gpu_ctrl_bh(void *opaque
)
921 VirtIOGPU
*g
= opaque
;
922 virtio_gpu_handle_ctrl(&g
->parent_obj
, g
->ctrl_vq
);
925 static void virtio_gpu_handle_cursor(VirtIODevice
*vdev
, VirtQueue
*vq
)
927 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
928 VirtQueueElement
*elem
;
930 struct virtio_gpu_update_cursor cursor_info
;
932 if (!virtio_queue_ready(vq
)) {
936 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
941 s
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
942 &cursor_info
, sizeof(cursor_info
));
943 if (s
!= sizeof(cursor_info
)) {
944 qemu_log_mask(LOG_GUEST_ERROR
,
945 "%s: cursor size incorrect %zu vs %zu\n",
946 __func__
, s
, sizeof(cursor_info
));
948 virtio_gpu_bswap_32(&cursor_info
, sizeof(cursor_info
));
949 update_cursor(g
, &cursor_info
);
951 virtqueue_push(vq
, elem
, 0);
952 virtio_notify(vdev
, vq
);
957 static void virtio_gpu_cursor_bh(void *opaque
)
959 VirtIOGPU
*g
= opaque
;
960 virtio_gpu_handle_cursor(&g
->parent_obj
, g
->cursor_vq
);
963 static void virtio_gpu_invalidate_display(void *opaque
)
967 static void virtio_gpu_update_display(void *opaque
)
971 static void virtio_gpu_text_update(void *opaque
, console_ch_t
*chardata
)
975 static int virtio_gpu_ui_info(void *opaque
, uint32_t idx
, QemuUIInfo
*info
)
977 VirtIOGPU
*g
= opaque
;
979 if (idx
>= g
->conf
.max_outputs
) {
983 g
->req_state
[idx
].x
= info
->xoff
;
984 g
->req_state
[idx
].y
= info
->yoff
;
985 g
->req_state
[idx
].width
= info
->width
;
986 g
->req_state
[idx
].height
= info
->height
;
988 if (info
->width
&& info
->height
) {
989 g
->enabled_output_bitmask
|= (1 << idx
);
991 g
->enabled_output_bitmask
&= ~(1 << idx
);
994 /* send event to guest */
995 virtio_gpu_notify_event(g
, VIRTIO_GPU_EVENT_DISPLAY
);
999 const GraphicHwOps virtio_gpu_ops
= {
1000 .invalidate
= virtio_gpu_invalidate_display
,
1001 .gfx_update
= virtio_gpu_update_display
,
1002 .text_update
= virtio_gpu_text_update
,
1003 .ui_info
= virtio_gpu_ui_info
,
1005 .gl_block
= virtio_gpu_gl_block
,
1009 static const VMStateDescription vmstate_virtio_gpu_scanout
= {
1010 .name
= "virtio-gpu-one-scanout",
1012 .fields
= (VMStateField
[]) {
1013 VMSTATE_UINT32(resource_id
, struct virtio_gpu_scanout
),
1014 VMSTATE_UINT32(width
, struct virtio_gpu_scanout
),
1015 VMSTATE_UINT32(height
, struct virtio_gpu_scanout
),
1016 VMSTATE_INT32(x
, struct virtio_gpu_scanout
),
1017 VMSTATE_INT32(y
, struct virtio_gpu_scanout
),
1018 VMSTATE_UINT32(cursor
.resource_id
, struct virtio_gpu_scanout
),
1019 VMSTATE_UINT32(cursor
.hot_x
, struct virtio_gpu_scanout
),
1020 VMSTATE_UINT32(cursor
.hot_y
, struct virtio_gpu_scanout
),
1021 VMSTATE_UINT32(cursor
.pos
.x
, struct virtio_gpu_scanout
),
1022 VMSTATE_UINT32(cursor
.pos
.y
, struct virtio_gpu_scanout
),
1023 VMSTATE_END_OF_LIST()
1027 static const VMStateDescription vmstate_virtio_gpu_scanouts
= {
1028 .name
= "virtio-gpu-scanouts",
1030 .fields
= (VMStateField
[]) {
1031 VMSTATE_INT32(enable
, struct VirtIOGPU
),
1032 VMSTATE_UINT32_EQUAL(conf
.max_outputs
, struct VirtIOGPU
, NULL
),
1033 VMSTATE_STRUCT_VARRAY_UINT32(scanout
, struct VirtIOGPU
,
1034 conf
.max_outputs
, 1,
1035 vmstate_virtio_gpu_scanout
,
1036 struct virtio_gpu_scanout
),
1037 VMSTATE_END_OF_LIST()
1041 static int virtio_gpu_save(QEMUFile
*f
, void *opaque
, size_t size
,
1042 VMStateField
*field
, QJSON
*vmdesc
)
1044 VirtIOGPU
*g
= opaque
;
1045 struct virtio_gpu_simple_resource
*res
;
1048 /* in 2d mode we should never find unprocessed commands here */
1049 assert(QTAILQ_EMPTY(&g
->cmdq
));
1051 QTAILQ_FOREACH(res
, &g
->reslist
, next
) {
1052 qemu_put_be32(f
, res
->resource_id
);
1053 qemu_put_be32(f
, res
->width
);
1054 qemu_put_be32(f
, res
->height
);
1055 qemu_put_be32(f
, res
->format
);
1056 qemu_put_be32(f
, res
->iov_cnt
);
1057 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1058 qemu_put_be64(f
, res
->addrs
[i
]);
1059 qemu_put_be32(f
, res
->iov
[i
].iov_len
);
1061 qemu_put_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1062 pixman_image_get_stride(res
->image
) * res
->height
);
1064 qemu_put_be32(f
, 0); /* end of list */
1066 return vmstate_save_state(f
, &vmstate_virtio_gpu_scanouts
, g
, NULL
);
1069 static int virtio_gpu_load(QEMUFile
*f
, void *opaque
, size_t size
,
1070 VMStateField
*field
)
1072 VirtIOGPU
*g
= opaque
;
1073 struct virtio_gpu_simple_resource
*res
;
1074 struct virtio_gpu_scanout
*scanout
;
1075 uint32_t resource_id
, pformat
;
1080 resource_id
= qemu_get_be32(f
);
1081 while (resource_id
!= 0) {
1082 res
= g_new0(struct virtio_gpu_simple_resource
, 1);
1083 res
->resource_id
= resource_id
;
1084 res
->width
= qemu_get_be32(f
);
1085 res
->height
= qemu_get_be32(f
);
1086 res
->format
= qemu_get_be32(f
);
1087 res
->iov_cnt
= qemu_get_be32(f
);
1090 pformat
= get_pixman_format(res
->format
);
1095 res
->image
= pixman_image_create_bits(pformat
,
1096 res
->width
, res
->height
,
1103 res
->hostmem
= calc_image_hostmem(pformat
, res
->width
, res
->height
);
1105 res
->addrs
= g_new(uint64_t, res
->iov_cnt
);
1106 res
->iov
= g_new(struct iovec
, res
->iov_cnt
);
1109 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1110 res
->addrs
[i
] = qemu_get_be64(f
);
1111 res
->iov
[i
].iov_len
= qemu_get_be32(f
);
1113 qemu_get_buffer(f
, (void *)pixman_image_get_data(res
->image
),
1114 pixman_image_get_stride(res
->image
) * res
->height
);
1116 /* restore mapping */
1117 for (i
= 0; i
< res
->iov_cnt
; i
++) {
1118 hwaddr len
= res
->iov
[i
].iov_len
;
1119 res
->iov
[i
].iov_base
=
1120 cpu_physical_memory_map(res
->addrs
[i
], &len
, 1);
1121 if (!res
->iov
[i
].iov_base
|| len
!= res
->iov
[i
].iov_len
) {
1122 /* Clean up the half-a-mapping we just created... */
1123 if (res
->iov
[i
].iov_base
) {
1124 cpu_physical_memory_unmap(res
->iov
[i
].iov_base
,
1127 /* ...and the mappings for previous loop iterations */
1129 virtio_gpu_cleanup_mapping(res
);
1130 pixman_image_unref(res
->image
);
1136 QTAILQ_INSERT_HEAD(&g
->reslist
, res
, next
);
1137 g
->hostmem
+= res
->hostmem
;
1139 resource_id
= qemu_get_be32(f
);
1142 /* load & apply scanout state */
1143 vmstate_load_state(f
, &vmstate_virtio_gpu_scanouts
, g
, 1);
1144 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
1145 scanout
= &g
->scanout
[i
];
1146 if (!scanout
->resource_id
) {
1149 res
= virtio_gpu_find_resource(g
, scanout
->resource_id
);
1153 scanout
->ds
= qemu_create_displaysurface_pixman(res
->image
);
1158 dpy_gfx_replace_surface(scanout
->con
, scanout
->ds
);
1159 dpy_gfx_update(scanout
->con
, 0, 0, scanout
->width
, scanout
->height
);
1160 if (scanout
->cursor
.resource_id
) {
1161 update_cursor(g
, &scanout
->cursor
);
1163 res
->scanout_bitmask
|= (1 << i
);
1169 static void virtio_gpu_device_realize(DeviceState
*qdev
, Error
**errp
)
1171 VirtIODevice
*vdev
= VIRTIO_DEVICE(qdev
);
1172 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1174 Error
*local_err
= NULL
;
1177 if (virtio_host_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
)) {
1178 error_setg(errp
, "virtio-gpu does not support vIOMMU yet");
1182 if (g
->conf
.max_outputs
> VIRTIO_GPU_MAX_SCANOUTS
) {
1183 error_setg(errp
, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS
);
1187 g
->use_virgl_renderer
= false;
1188 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1191 have_virgl
= display_opengl
;
1194 g
->conf
.flags
&= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED
);
1197 if (virtio_gpu_virgl_enabled(g
->conf
)) {
1198 error_setg(&g
->migration_blocker
, "virgl is not yet migratable");
1199 migrate_add_blocker(g
->migration_blocker
, &local_err
);
1201 error_propagate(errp
, local_err
);
1202 error_free(g
->migration_blocker
);
1207 g
->config_size
= sizeof(struct virtio_gpu_config
);
1208 g
->virtio_config
.num_scanouts
= cpu_to_le32(g
->conf
.max_outputs
);
1209 virtio_init(VIRTIO_DEVICE(g
), "virtio-gpu", VIRTIO_ID_GPU
,
1212 g
->req_state
[0].width
= g
->conf
.xres
;
1213 g
->req_state
[0].height
= g
->conf
.yres
;
1215 if (virtio_gpu_virgl_enabled(g
->conf
)) {
1216 /* use larger control queue in 3d mode */
1217 g
->ctrl_vq
= virtio_add_queue(vdev
, 256, virtio_gpu_handle_ctrl_cb
);
1218 g
->cursor_vq
= virtio_add_queue(vdev
, 16, virtio_gpu_handle_cursor_cb
);
1220 #if defined(CONFIG_VIRGL)
1221 g
->virtio_config
.num_capsets
= virtio_gpu_virgl_get_num_capsets(g
);
1223 g
->virtio_config
.num_capsets
= 0;
1226 g
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_gpu_handle_ctrl_cb
);
1227 g
->cursor_vq
= virtio_add_queue(vdev
, 16, virtio_gpu_handle_cursor_cb
);
1230 g
->ctrl_bh
= qemu_bh_new(virtio_gpu_ctrl_bh
, g
);
1231 g
->cursor_bh
= qemu_bh_new(virtio_gpu_cursor_bh
, g
);
1232 QTAILQ_INIT(&g
->reslist
);
1233 QTAILQ_INIT(&g
->cmdq
);
1234 QTAILQ_INIT(&g
->fenceq
);
1236 g
->enabled_output_bitmask
= 1;
1239 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
1241 graphic_console_init(DEVICE(g
), i
, &virtio_gpu_ops
, g
);
1243 dpy_gfx_replace_surface(g
->scanout
[i
].con
, NULL
);
1248 static void virtio_gpu_device_unrealize(DeviceState
*qdev
, Error
**errp
)
1250 VirtIOGPU
*g
= VIRTIO_GPU(qdev
);
1251 if (g
->migration_blocker
) {
1252 migrate_del_blocker(g
->migration_blocker
);
1253 error_free(g
->migration_blocker
);
1257 static void virtio_gpu_instance_init(Object
*obj
)
1261 static void virtio_gpu_reset(VirtIODevice
*vdev
)
1263 VirtIOGPU
*g
= VIRTIO_GPU(vdev
);
1264 struct virtio_gpu_simple_resource
*res
, *tmp
;
1269 QTAILQ_FOREACH_SAFE(res
, &g
->reslist
, next
, tmp
) {
1270 virtio_gpu_resource_destroy(g
, res
);
1272 for (i
= 0; i
< g
->conf
.max_outputs
; i
++) {
1273 g
->scanout
[i
].resource_id
= 0;
1274 g
->scanout
[i
].width
= 0;
1275 g
->scanout
[i
].height
= 0;
1276 g
->scanout
[i
].x
= 0;
1277 g
->scanout
[i
].y
= 0;
1278 g
->scanout
[i
].ds
= NULL
;
1282 if (g
->use_virgl_renderer
) {
1283 virtio_gpu_virgl_reset(g
);
1284 g
->use_virgl_renderer
= 0;
1290 * For historical reasons virtio_gpu does not adhere to virtio migration
1291 * scheme as described in doc/virtio-migration.txt, in a sense that no
1292 * save/load callback are provided to the core. Instead the device data
1293 * is saved/loaded after the core data.
1295 * Because of this we need a special vmsd.
1297 static const VMStateDescription vmstate_virtio_gpu
= {
1298 .name
= "virtio-gpu",
1299 .minimum_version_id
= VIRTIO_GPU_VM_VERSION
,
1300 .version_id
= VIRTIO_GPU_VM_VERSION
,
1301 .fields
= (VMStateField
[]) {
1302 VMSTATE_VIRTIO_DEVICE
/* core */,
1304 .name
= "virtio-gpu",
1305 .info
= &(const VMStateInfo
) {
1306 .name
= "virtio-gpu",
1307 .get
= virtio_gpu_load
,
1308 .put
= virtio_gpu_save
,
1310 .flags
= VMS_SINGLE
,
1312 VMSTATE_END_OF_LIST()
1316 static Property virtio_gpu_properties
[] = {
1317 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU
, conf
.max_outputs
, 1),
1318 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU
, conf
.max_hostmem
, 256 * MiB
),
1320 DEFINE_PROP_BIT("virgl", VirtIOGPU
, conf
.flags
,
1321 VIRTIO_GPU_FLAG_VIRGL_ENABLED
, true),
1322 DEFINE_PROP_BIT("stats", VirtIOGPU
, conf
.flags
,
1323 VIRTIO_GPU_FLAG_STATS_ENABLED
, false),
1325 DEFINE_PROP_UINT32("xres", VirtIOGPU
, conf
.xres
, 1024),
1326 DEFINE_PROP_UINT32("yres", VirtIOGPU
, conf
.yres
, 768),
1327 DEFINE_PROP_END_OF_LIST(),
1330 static void virtio_gpu_class_init(ObjectClass
*klass
, void *data
)
1332 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1333 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1335 vdc
->realize
= virtio_gpu_device_realize
;
1336 vdc
->unrealize
= virtio_gpu_device_unrealize
;
1337 vdc
->get_config
= virtio_gpu_get_config
;
1338 vdc
->set_config
= virtio_gpu_set_config
;
1339 vdc
->get_features
= virtio_gpu_get_features
;
1340 vdc
->set_features
= virtio_gpu_set_features
;
1342 vdc
->reset
= virtio_gpu_reset
;
1344 set_bit(DEVICE_CATEGORY_DISPLAY
, dc
->categories
);
1345 dc
->props
= virtio_gpu_properties
;
1346 dc
->vmsd
= &vmstate_virtio_gpu
;
1347 dc
->hotpluggable
= false;
1350 static const TypeInfo virtio_gpu_info
= {
1351 .name
= TYPE_VIRTIO_GPU
,
1352 .parent
= TYPE_VIRTIO_DEVICE
,
1353 .instance_size
= sizeof(VirtIOGPU
),
1354 .instance_init
= virtio_gpu_instance_init
,
1355 .class_init
= virtio_gpu_class_init
,
1358 static void virtio_register_types(void)
1360 type_register_static(&virtio_gpu_info
);
1363 type_init(virtio_register_types
)
1365 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr
) != 24);
1366 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor
) != 56);
1367 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref
) != 32);
1368 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d
) != 40);
1369 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout
) != 48);
1370 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush
) != 48);
1371 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d
) != 56);
1372 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry
) != 16);
1373 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing
) != 32);
1374 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing
) != 32);
1375 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info
) != 408);
1377 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d
) != 72);
1378 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d
) != 72);
1379 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create
) != 96);
1380 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy
) != 24);
1381 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource
) != 32);
1382 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit
) != 32);
1383 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info
) != 32);
1384 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info
) != 40);
1385 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset
) != 32);
1386 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset
) != 24);