]> git.proxmox.com Git - mirror_qemu.git/blame - hw/display/virtio-gpu.c
qemu/main-loop: rename qemu_cond_wait_iothread() to qemu_cond_wait_bql()
[mirror_qemu.git] / hw / display / virtio-gpu.c
CommitLineData
62232bf4
GH
1/*
2 * Virtio GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2014
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 *
2e252145 10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
62232bf4
GH
11 * See the COPYING file in the top-level directory.
12 */
13
9b8bfe21 14#include "qemu/osdep.h"
f0353b0d 15#include "qemu/units.h"
62232bf4 16#include "qemu/iov.h"
a41e2d97 17#include "sysemu/cpus.h"
62232bf4 18#include "ui/console.h"
a200d53b 19#include "ui/rect.h"
62232bf4 20#include "trace.h"
8da132a5 21#include "sysemu/dma.h"
2f780b6a 22#include "sysemu/sysemu.h"
62232bf4 23#include "hw/virtio/virtio.h"
ca77ee28 24#include "migration/qemu-file-types.h"
62232bf4 25#include "hw/virtio/virtio-gpu.h"
ad08e67a 26#include "hw/virtio/virtio-gpu-bswap.h"
83a7d3c0 27#include "hw/virtio/virtio-gpu-pixman.h"
62232bf4 28#include "hw/virtio/virtio-bus.h"
a27bd6c7 29#include "hw/qdev-properties.h"
03dd024f 30#include "qemu/log.h"
0b8fa32f 31#include "qemu/module.h"
5e3d741c 32#include "qapi/error.h"
50d8e25e 33#include "qemu/error-report.h"
62232bf4 34
0c244e50
GH
35#define VIRTIO_GPU_VM_VERSION 1
36
25c001a4
VK
37static struct virtio_gpu_simple_resource *
38virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
39 bool require_backing,
40 const char *caller, uint32_t *error);
62232bf4 41
a41e2d97 42static void virtio_gpu_reset_bh(void *opaque);
b8e23926 43
2c267d66
GH
44void virtio_gpu_update_cursor_data(VirtIOGPU *g,
45 struct virtio_gpu_scanout *s,
46 uint32_t resource_id)
62232bf4
GH
47{
48 struct virtio_gpu_simple_resource *res;
49 uint32_t pixels;
bdd53f73 50 void *data;
62232bf4 51
25c001a4
VK
52 res = virtio_gpu_find_check_resource(g, resource_id, false,
53 __func__, NULL);
62232bf4
GH
54 if (!res) {
55 return;
56 }
57
bdd53f73
VK
58 if (res->blob_size) {
59 if (res->blob_size < (s->current_cursor->width *
60 s->current_cursor->height * 4)) {
61 return;
62 }
63 data = res->blob;
64 } else {
65 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
66 pixman_image_get_height(res->image) != s->current_cursor->height) {
67 return;
68 }
69 data = pixman_image_get_data(res->image);
62232bf4
GH
70 }
71
72 pixels = s->current_cursor->width * s->current_cursor->height;
bdd53f73 73 memcpy(s->current_cursor->data, data,
62232bf4
GH
74 pixels * sizeof(uint32_t));
75}
76
77static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
78{
79 struct virtio_gpu_scanout *s;
2c267d66 80 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
0c244e50 81 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
62232bf4 82
50d8e25e 83 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
62232bf4
GH
84 return;
85 }
50d8e25e 86 s = &g->parent_obj.scanout[cursor->pos.scanout_id];
62232bf4 87
e9c1b459
GH
88 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
89 cursor->pos.x,
90 cursor->pos.y,
91 move ? "move" : "update",
92 cursor->resource_id);
93
0c244e50 94 if (!move) {
62232bf4
GH
95 if (!s->current_cursor) {
96 s->current_cursor = cursor_alloc(64, 64);
97 }
98
99 s->current_cursor->hot_x = cursor->hot_x;
100 s->current_cursor->hot_y = cursor->hot_y;
101
102 if (cursor->resource_id > 0) {
2c267d66 103 vgc->update_cursor_data(g, s, cursor->resource_id);
62232bf4
GH
104 }
105 dpy_cursor_define(s->con, s->current_cursor);
0c244e50
GH
106
107 s->cursor = *cursor;
108 } else {
109 s->cursor.pos.x = cursor->pos.x;
110 s->cursor.pos.y = cursor->pos.y;
62232bf4
GH
111 }
112 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
113 cursor->resource_id ? 1 : 0);
114}
115
49a8b659 116struct virtio_gpu_simple_resource *
62232bf4
GH
117virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
118{
119 struct virtio_gpu_simple_resource *res;
120
121 QTAILQ_FOREACH(res, &g->reslist, next) {
122 if (res->resource_id == resource_id) {
123 return res;
124 }
125 }
126 return NULL;
127}
128
25c001a4
VK
129static struct virtio_gpu_simple_resource *
130virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
131 bool require_backing,
132 const char *caller, uint32_t *error)
133{
134 struct virtio_gpu_simple_resource *res;
135
136 res = virtio_gpu_find_resource(g, resource_id);
137 if (!res) {
138 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n",
139 caller, resource_id);
140 if (error) {
141 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
142 }
143 return NULL;
144 }
145
146 if (require_backing) {
e0933d91 147 if (!res->iov || (!res->image && !res->blob)) {
25c001a4
VK
148 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n",
149 caller, resource_id);
150 if (error) {
151 *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
152 }
153 return NULL;
154 }
155 }
156
157 return res;
158}
159
62232bf4
GH
160void virtio_gpu_ctrl_response(VirtIOGPU *g,
161 struct virtio_gpu_ctrl_command *cmd,
162 struct virtio_gpu_ctrl_hdr *resp,
163 size_t resp_len)
164{
165 size_t s;
166
167 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
168 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
169 resp->fence_id = cmd->cmd_hdr.fence_id;
170 resp->ctx_id = cmd->cmd_hdr.ctx_id;
171 }
1715d6b5 172 virtio_gpu_ctrl_hdr_bswap(resp);
62232bf4
GH
173 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
174 if (s != resp_len) {
175 qemu_log_mask(LOG_GUEST_ERROR,
176 "%s: response size incorrect %zu vs %zu\n",
177 __func__, s, resp_len);
178 }
179 virtqueue_push(cmd->vq, &cmd->elem, s);
180 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
181 cmd->finished = true;
182}
183
184void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
185 struct virtio_gpu_ctrl_command *cmd,
186 enum virtio_gpu_ctrl_type type)
187{
188 struct virtio_gpu_ctrl_hdr resp;
189
190 memset(&resp, 0, sizeof(resp));
191 resp.type = type;
192 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
193}
194
62232bf4
GH
195void virtio_gpu_get_display_info(VirtIOGPU *g,
196 struct virtio_gpu_ctrl_command *cmd)
197{
198 struct virtio_gpu_resp_display_info display_info;
199
200 trace_virtio_gpu_cmd_get_display_info();
201 memset(&display_info, 0, sizeof(display_info));
202 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
50d8e25e 203 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
62232bf4
GH
204 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
205 sizeof(display_info));
206}
207
1ed2cb32
GH
208void virtio_gpu_get_edid(VirtIOGPU *g,
209 struct virtio_gpu_ctrl_command *cmd)
210{
211 struct virtio_gpu_resp_edid edid;
212 struct virtio_gpu_cmd_get_edid get_edid;
50d8e25e 213 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
1ed2cb32
GH
214
215 VIRTIO_GPU_FILL_CMD(get_edid);
216 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
217
50d8e25e 218 if (get_edid.scanout >= b->conf.max_outputs) {
1ed2cb32
GH
219 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
220 return;
221 }
222
223 trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
224 memset(&edid, 0, sizeof(edid));
225 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
ee3729d9 226 virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid);
1ed2cb32
GH
227 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
228}
229
c53f5b89
TW
230static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
231 uint32_t width, uint32_t height)
232{
233 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
234 * pixman_image_create_bits will fail in case it overflow.
235 */
236
237 int bpp = PIXMAN_FORMAT_BPP(pformat);
238 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
239 return height * stride;
240}
241
9462ff46
MAL
242#ifdef WIN32
243static void
244win32_pixman_image_destroy(pixman_image_t *image, void *data)
245{
246 HANDLE handle = data;
247
248 qemu_win32_map_free(pixman_image_get_data(image), handle, &error_warn);
249}
250#endif
251
62232bf4
GH
252static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
253 struct virtio_gpu_ctrl_command *cmd)
254{
255 pixman_format_code_t pformat;
256 struct virtio_gpu_simple_resource *res;
257 struct virtio_gpu_resource_create_2d c2d;
258
259 VIRTIO_GPU_FILL_CMD(c2d);
1715d6b5 260 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
62232bf4
GH
261 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
262 c2d.width, c2d.height);
263
264 if (c2d.resource_id == 0) {
265 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
266 __func__);
267 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
268 return;
269 }
270
271 res = virtio_gpu_find_resource(g, c2d.resource_id);
272 if (res) {
273 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
274 __func__, c2d.resource_id);
275 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
276 return;
277 }
278
279 res = g_new0(struct virtio_gpu_simple_resource, 1);
280
281 res->width = c2d.width;
282 res->height = c2d.height;
283 res->format = c2d.format;
284 res->resource_id = c2d.resource_id;
285
83a7d3c0 286 pformat = virtio_gpu_get_pixman_format(c2d.format);
62232bf4
GH
287 if (!pformat) {
288 qemu_log_mask(LOG_GUEST_ERROR,
289 "%s: host couldn't handle guest format %d\n",
290 __func__, c2d.format);
cb3a0522 291 g_free(res);
62232bf4
GH
292 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
293 return;
294 }
9b7621bc 295
c53f5b89 296 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
50d8e25e 297 if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
9462ff46
MAL
298 void *bits = NULL;
299#ifdef WIN32
300 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
301 if (!bits) {
302 goto end;
303 }
304#endif
9c18a923
MAL
305 res->image = pixman_image_create_bits(
306 pformat,
307 c2d.width,
308 c2d.height,
309 bits, c2d.height ? res->hostmem / c2d.height : 0);
9462ff46
MAL
310#ifdef WIN32
311 if (res->image) {
312 pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle);
313 }
314#endif
9b7621bc 315 }
62232bf4 316
9462ff46
MAL
317#ifdef WIN32
318end:
319#endif
62232bf4
GH
320 if (!res->image) {
321 qemu_log_mask(LOG_GUEST_ERROR,
322 "%s: resource creation failed %d %d %d\n",
323 __func__, c2d.resource_id, c2d.width, c2d.height);
324 g_free(res);
325 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
326 return;
327 }
328
329 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
9b7621bc 330 g->hostmem += res->hostmem;
62232bf4
GH
331}
332
e0933d91
VK
333static void virtio_gpu_resource_create_blob(VirtIOGPU *g,
334 struct virtio_gpu_ctrl_command *cmd)
335{
336 struct virtio_gpu_simple_resource *res;
337 struct virtio_gpu_resource_create_blob cblob;
338 int ret;
339
340 VIRTIO_GPU_FILL_CMD(cblob);
341 virtio_gpu_create_blob_bswap(&cblob);
342 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
343
344 if (cblob.resource_id == 0) {
345 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
346 __func__);
347 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
348 return;
349 }
350
e0933d91
VK
351 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST &&
352 cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) {
353 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n",
354 __func__);
355 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
e0933d91
VK
356 return;
357 }
358
dc2deaba
PMD
359 if (virtio_gpu_find_resource(g, cblob.resource_id)) {
360 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
361 __func__, cblob.resource_id);
362 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
e0933d91
VK
363 return;
364 }
365
dc2deaba
PMD
366 res = g_new0(struct virtio_gpu_simple_resource, 1);
367 res->resource_id = cblob.resource_id;
368 res->blob_size = cblob.size;
369
e0933d91
VK
370 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
371 cmd, &res->addrs, &res->iov,
372 &res->iov_cnt);
fc4d3f35 373 if (ret != 0) {
e0933d91 374 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
dc2deaba 375 g_free(res);
e0933d91
VK
376 return;
377 }
378
379 virtio_gpu_init_udmabuf(res);
380 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
381}
382
da566a18
GH
383static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
384{
50d8e25e 385 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
da566a18 386 struct virtio_gpu_simple_resource *res;
da566a18
GH
387
388 if (scanout->resource_id == 0) {
389 return;
390 }
391
392 res = virtio_gpu_find_resource(g, scanout->resource_id);
393 if (res) {
394 res->scanout_bitmask &= ~(1 << scanout_id);
395 }
396
ed8f3fe6 397 dpy_gfx_replace_surface(scanout->con, NULL);
da566a18
GH
398 scanout->resource_id = 0;
399 scanout->ds = NULL;
400 scanout->width = 0;
401 scanout->height = 0;
402}
403
62232bf4
GH
404static void virtio_gpu_resource_destroy(VirtIOGPU *g,
405 struct virtio_gpu_simple_resource *res)
406{
1fccd7c5
GH
407 int i;
408
409 if (res->scanout_bitmask) {
50d8e25e 410 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1fccd7c5
GH
411 if (res->scanout_bitmask & (1 << i)) {
412 virtio_gpu_disable_scanout(g, i);
413 }
414 }
415 }
416
32db3c63 417 qemu_pixman_image_unref(res->image);
3bb68f79 418 virtio_gpu_cleanup_mapping(g, res);
62232bf4 419 QTAILQ_REMOVE(&g->reslist, res, next);
9b7621bc 420 g->hostmem -= res->hostmem;
62232bf4
GH
421 g_free(res);
422}
423
424static void virtio_gpu_resource_unref(VirtIOGPU *g,
425 struct virtio_gpu_ctrl_command *cmd)
426{
427 struct virtio_gpu_simple_resource *res;
428 struct virtio_gpu_resource_unref unref;
429
430 VIRTIO_GPU_FILL_CMD(unref);
1715d6b5 431 virtio_gpu_bswap_32(&unref, sizeof(unref));
62232bf4
GH
432 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
433
434 res = virtio_gpu_find_resource(g, unref.resource_id);
435 if (!res) {
436 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
437 __func__, unref.resource_id);
438 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
439 return;
440 }
441 virtio_gpu_resource_destroy(g, res);
442}
443
444static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
445 struct virtio_gpu_ctrl_command *cmd)
446{
447 struct virtio_gpu_simple_resource *res;
b097b80b 448 int h, bpp;
62232bf4 449 uint32_t src_offset, dst_offset, stride;
62232bf4
GH
450 pixman_format_code_t format;
451 struct virtio_gpu_transfer_to_host_2d t2d;
b097b80b 452 void *img_data;
62232bf4
GH
453
454 VIRTIO_GPU_FILL_CMD(t2d);
1715d6b5 455 virtio_gpu_t2d_bswap(&t2d);
62232bf4
GH
456 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
457
25c001a4
VK
458 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true,
459 __func__, &cmd->error);
e0933d91 460 if (!res || res->blob) {
62232bf4
GH
461 return;
462 }
463
464 if (t2d.r.x > res->width ||
465 t2d.r.y > res->height ||
466 t2d.r.width > res->width ||
467 t2d.r.height > res->height ||
468 t2d.r.x + t2d.r.width > res->width ||
469 t2d.r.y + t2d.r.height > res->height) {
470 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
471 " bounds for resource %d: %d %d %d %d vs %d %d\n",
472 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
473 t2d.r.width, t2d.r.height, res->width, res->height);
474 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
475 return;
476 }
477
478 format = pixman_image_get_format(res->image);
e5f99037 479 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
62232bf4 480 stride = pixman_image_get_stride(res->image);
b097b80b 481 img_data = pixman_image_get_data(res->image);
62232bf4 482
b097b80b 483 if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) {
62232bf4
GH
484 for (h = 0; h < t2d.r.height; h++) {
485 src_offset = t2d.offset + stride * h;
486 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
487
488 iov_to_buf(res->iov, res->iov_cnt, src_offset,
b097b80b
KZ
489 (uint8_t *)img_data + dst_offset,
490 t2d.r.width * bpp);
62232bf4
GH
491 }
492 } else {
b097b80b
KZ
493 src_offset = t2d.offset;
494 dst_offset = t2d.r.y * stride + t2d.r.x * bpp;
495 iov_to_buf(res->iov, res->iov_cnt, src_offset,
496 (uint8_t *)img_data + dst_offset,
497 stride * t2d.r.height);
62232bf4
GH
498 }
499}
500
501static void virtio_gpu_resource_flush(VirtIOGPU *g,
502 struct virtio_gpu_ctrl_command *cmd)
503{
504 struct virtio_gpu_simple_resource *res;
505 struct virtio_gpu_resource_flush rf;
32db3c63 506 struct virtio_gpu_scanout *scanout;
a200d53b 507 QemuRect flush_rect;
34e29d85
VK
508 bool within_bounds = false;
509 bool update_submitted = false;
62232bf4
GH
510 int i;
511
512 VIRTIO_GPU_FILL_CMD(rf);
1715d6b5 513 virtio_gpu_bswap_32(&rf, sizeof(rf));
62232bf4
GH
514 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
515 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
516
25c001a4
VK
517 res = virtio_gpu_find_check_resource(g, rf.resource_id, false,
518 __func__, &cmd->error);
32db3c63 519 if (!res) {
62232bf4
GH
520 return;
521 }
522
32db3c63
VK
523 if (res->blob) {
524 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
525 scanout = &g->parent_obj.scanout[i];
526 if (scanout->resource_id == res->resource_id &&
49a99ecb
DK
527 rf.r.x < scanout->x + scanout->width &&
528 rf.r.x + rf.r.width >= scanout->x &&
529 rf.r.y < scanout->y + scanout->height &&
34e29d85
VK
530 rf.r.y + rf.r.height >= scanout->y) {
531 within_bounds = true;
532
533 if (console_has_gl(scanout->con)) {
534 dpy_gl_update(scanout->con, 0, 0, scanout->width,
535 scanout->height);
536 update_submitted = true;
537 }
32db3c63
VK
538 }
539 }
34e29d85
VK
540
541 if (update_submitted) {
542 return;
543 }
544 if (!within_bounds) {
545 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts"
546 " bounds for flush %d: %d %d %d %d\n",
547 __func__, rf.resource_id, rf.r.x, rf.r.y,
548 rf.r.width, rf.r.height);
549 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
550 return;
551 }
32db3c63
VK
552 }
553
554 if (!res->blob &&
555 (rf.r.x > res->width ||
62232bf4
GH
556 rf.r.y > res->height ||
557 rf.r.width > res->width ||
558 rf.r.height > res->height ||
559 rf.r.x + rf.r.width > res->width ||
32db3c63 560 rf.r.y + rf.r.height > res->height)) {
62232bf4
GH
561 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
562 " bounds for resource %d: %d %d %d %d vs %d %d\n",
563 __func__, rf.resource_id, rf.r.x, rf.r.y,
564 rf.r.width, rf.r.height, res->width, res->height);
565 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
566 return;
567 }
568
a200d53b 569 qemu_rect_init(&flush_rect, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
50d8e25e 570 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
a200d53b 571 QemuRect rect;
62232bf4
GH
572
573 if (!(res->scanout_bitmask & (1 << i))) {
574 continue;
575 }
50d8e25e 576 scanout = &g->parent_obj.scanout[i];
62232bf4 577
a200d53b
MAL
578 qemu_rect_init(&rect, scanout->x, scanout->y,
579 scanout->width, scanout->height);
62232bf4 580
62232bf4 581 /* work out the area we need to update for each console */
a200d53b
MAL
582 if (qemu_rect_intersect(&flush_rect, &rect, &rect)) {
583 qemu_rect_translate(&rect, -scanout->x, -scanout->y);
584 dpy_gfx_update(g->parent_obj.scanout[i].con,
585 rect.x, rect.y, rect.width, rect.height);
586 }
62232bf4 587 }
62232bf4
GH
588}
589
fa06e5cb
GH
590static void virtio_unref_resource(pixman_image_t *image, void *data)
591{
592 pixman_image_unref(data);
593}
594
81cd9f71
VK
595static void virtio_gpu_update_scanout(VirtIOGPU *g,
596 uint32_t scanout_id,
597 struct virtio_gpu_simple_resource *res,
598 struct virtio_gpu_rect *r)
599{
600 struct virtio_gpu_simple_resource *ores;
601 struct virtio_gpu_scanout *scanout;
602
603 scanout = &g->parent_obj.scanout[scanout_id];
604 ores = virtio_gpu_find_resource(g, scanout->resource_id);
605 if (ores) {
606 ores->scanout_bitmask &= ~(1 << scanout_id);
607 }
608
609 res->scanout_bitmask |= (1 << scanout_id);
610 scanout->resource_id = res->resource_id;
611 scanout->x = r->x;
612 scanout->y = r->y;
613 scanout->width = r->width;
614 scanout->height = r->height;
615}
616
e64d4b6a
VK
617static void virtio_gpu_do_set_scanout(VirtIOGPU *g,
618 uint32_t scanout_id,
619 struct virtio_gpu_framebuffer *fb,
620 struct virtio_gpu_simple_resource *res,
621 struct virtio_gpu_rect *r,
622 uint32_t *error)
62232bf4 623{
62232bf4 624 struct virtio_gpu_scanout *scanout;
e64d4b6a 625 uint8_t *data;
62232bf4 626
e64d4b6a
VK
627 scanout = &g->parent_obj.scanout[scanout_id];
628
629 if (r->x > fb->width ||
630 r->y > fb->height ||
631 r->width < 16 ||
632 r->height < 16 ||
633 r->width > fb->width ||
634 r->height > fb->height ||
635 r->x + r->width > fb->width ||
636 r->y + r->height > fb->height) {
62232bf4 637 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
e64d4b6a
VK
638 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
639 __func__, scanout_id, res->resource_id,
640 r->x, r->y, r->width, r->height,
641 fb->width, fb->height);
642 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
62232bf4
GH
643 return;
644 }
645
e64d4b6a 646 g->parent_obj.enable = 1;
32db3c63
VK
647
648 if (res->blob) {
649 if (console_has_gl(scanout->con)) {
e86a93f5 650 if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) {
32db3c63 651 virtio_gpu_update_scanout(g, scanout_id, res, r);
7b41ca8d
DK
652 } else {
653 *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
32db3c63 654 }
7b41ca8d 655 return;
32db3c63
VK
656 }
657
658 data = res->blob;
659 } else {
660 data = (uint8_t *)pixman_image_get_data(res->image);
661 }
62232bf4 662
e64d4b6a 663 /* create a surface for this scanout */
32db3c63
VK
664 if ((res->blob && !console_has_gl(scanout->con)) ||
665 !scanout->ds ||
e64d4b6a
VK
666 surface_data(scanout->ds) != data + fb->offset ||
667 scanout->width != r->width ||
668 scanout->height != r->height) {
fa06e5cb 669 pixman_image_t *rect;
e64d4b6a
VK
670 void *ptr = data + fb->offset;
671 rect = pixman_image_create_bits(fb->format, r->width, r->height,
672 ptr, fb->stride);
673
674 if (res->image) {
675 pixman_image_ref(res->image);
676 pixman_image_set_destroy_function(rect, virtio_unref_resource,
677 res->image);
678 }
679
62232bf4 680 /* realloc the surface ptr */
fa06e5cb 681 scanout->ds = qemu_create_displaysurface_pixman(rect);
62232bf4 682 if (!scanout->ds) {
e64d4b6a 683 *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
62232bf4
GH
684 return;
685 }
9462ff46
MAL
686#ifdef WIN32
687 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, fb->offset);
688#endif
e64d4b6a 689
dd248ed7 690 pixman_image_unref(rect);
e64d4b6a 691 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con,
50d8e25e 692 scanout->ds);
62232bf4
GH
693 }
694
81cd9f71 695 virtio_gpu_update_scanout(g, scanout_id, res, r);
e64d4b6a
VK
696}
697
698static void virtio_gpu_set_scanout(VirtIOGPU *g,
699 struct virtio_gpu_ctrl_command *cmd)
700{
701 struct virtio_gpu_simple_resource *res;
702 struct virtio_gpu_framebuffer fb = { 0 };
703 struct virtio_gpu_set_scanout ss;
704
705 VIRTIO_GPU_FILL_CMD(ss);
706 virtio_gpu_bswap_32(&ss, sizeof(ss));
707 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
708 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
709
4fa7b4cc
GH
710 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
711 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
712 __func__, ss.scanout_id);
713 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
714 return;
715 }
716
e64d4b6a
VK
717 if (ss.resource_id == 0) {
718 virtio_gpu_disable_scanout(g, ss.scanout_id);
719 return;
c806cfa0
GH
720 }
721
e64d4b6a
VK
722 res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
723 __func__, &cmd->error);
724 if (!res) {
725 return;
726 }
727
728 fb.format = pixman_image_get_format(res->image);
729 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
730 fb.width = pixman_image_get_width(res->image);
731 fb.height = pixman_image_get_height(res->image);
732 fb.stride = pixman_image_get_stride(res->image);
733 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
734
735 virtio_gpu_do_set_scanout(g, ss.scanout_id,
736 &fb, res, &ss.r, &cmd->error);
62232bf4
GH
737}
738
32db3c63
VK
739static void virtio_gpu_set_scanout_blob(VirtIOGPU *g,
740 struct virtio_gpu_ctrl_command *cmd)
741{
742 struct virtio_gpu_simple_resource *res;
743 struct virtio_gpu_framebuffer fb = { 0 };
744 struct virtio_gpu_set_scanout_blob ss;
745 uint64_t fbend;
746
747 VIRTIO_GPU_FILL_CMD(ss);
748 virtio_gpu_scanout_blob_bswap(&ss);
749 trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
750 ss.r.width, ss.r.height, ss.r.x,
751 ss.r.y);
752
4fa7b4cc
GH
753 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
754 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
755 __func__, ss.scanout_id);
756 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
757 return;
758 }
759
32db3c63
VK
760 if (ss.resource_id == 0) {
761 virtio_gpu_disable_scanout(g, ss.scanout_id);
762 return;
763 }
764
765 res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
766 __func__, &cmd->error);
767 if (!res) {
768 return;
769 }
770
771 fb.format = virtio_gpu_get_pixman_format(ss.format);
772 if (!fb.format) {
773 qemu_log_mask(LOG_GUEST_ERROR,
774 "%s: host couldn't handle guest format %d\n",
775 __func__, ss.format);
776 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
777 return;
778 }
779
780 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
781 fb.width = ss.width;
782 fb.height = ss.height;
783 fb.stride = ss.strides[0];
784 fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
785
786 fbend = fb.offset;
787 fbend += fb.stride * (ss.r.height - 1);
788 fbend += fb.bytes_pp * ss.r.width;
789 if (fbend > res->blob_size) {
790 qemu_log_mask(LOG_GUEST_ERROR,
791 "%s: fb end out of range\n",
792 __func__);
793 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
794 return;
795 }
796
797 virtio_gpu_do_set_scanout(g, ss.scanout_id,
798 &fb, res, &ss.r, &cmd->error);
799}
800
3bb68f79 801int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
70d37662 802 uint32_t nr_entries, uint32_t offset,
62232bf4 803 struct virtio_gpu_ctrl_command *cmd,
9049f8bc
GH
804 uint64_t **addr, struct iovec **iov,
805 uint32_t *niov)
62232bf4
GH
806{
807 struct virtio_gpu_mem_entry *ents;
808 size_t esize, s;
9049f8bc 809 int e, v;
62232bf4 810
70d37662 811 if (nr_entries > 16384) {
62232bf4 812 qemu_log_mask(LOG_GUEST_ERROR,
2c84167b 813 "%s: nr_entries is too big (%d > 16384)\n",
70d37662 814 __func__, nr_entries);
62232bf4
GH
815 return -1;
816 }
817
70d37662 818 esize = sizeof(*ents) * nr_entries;
62232bf4
GH
819 ents = g_malloc(esize);
820 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
70d37662 821 offset, ents, esize);
62232bf4
GH
822 if (s != esize) {
823 qemu_log_mask(LOG_GUEST_ERROR,
824 "%s: command data size incorrect %zu vs %zu\n",
825 __func__, s, esize);
826 g_free(ents);
827 return -1;
828 }
829
9049f8bc 830 *iov = NULL;
0c244e50 831 if (addr) {
9049f8bc
GH
832 *addr = NULL;
833 }
70d37662 834 for (e = 0, v = 0; e < nr_entries; e++) {
9049f8bc
GH
835 uint64_t a = le64_to_cpu(ents[e].addr);
836 uint32_t l = le32_to_cpu(ents[e].length);
837 hwaddr len;
838 void *map;
839
840 do {
841 len = l;
a1d4b0a3
PMD
842 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len,
843 DMA_DIRECTION_TO_DEVICE,
844 MEMTXATTRS_UNSPECIFIED);
9049f8bc
GH
845 if (!map) {
846 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
70d37662 847 " element %d\n", __func__, e);
9049f8bc
GH
848 virtio_gpu_cleanup_mapping_iov(g, *iov, v);
849 g_free(ents);
850 *iov = NULL;
851 if (addr) {
852 g_free(*addr);
853 *addr = NULL;
854 }
855 return -1;
856 }
857
858 if (!(v % 16)) {
b21e2380 859 *iov = g_renew(struct iovec, *iov, v + 16);
9049f8bc 860 if (addr) {
b21e2380 861 *addr = g_renew(uint64_t, *addr, v + 16);
9049f8bc 862 }
eb398a54 863 }
9049f8bc
GH
864 (*iov)[v].iov_base = map;
865 (*iov)[v].iov_len = len;
0c244e50 866 if (addr) {
9049f8bc 867 (*addr)[v] = a;
0c244e50 868 }
9049f8bc
GH
869
870 a += len;
871 l -= len;
872 v += 1;
873 } while (l > 0);
62232bf4 874 }
9049f8bc
GH
875 *niov = v;
876
62232bf4
GH
877 g_free(ents);
878 return 0;
879}
880
3bb68f79
GH
881void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
882 struct iovec *iov, uint32_t count)
62232bf4
GH
883{
884 int i;
885
886 for (i = 0; i < count; i++) {
8da132a5
GH
887 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
888 iov[i].iov_base, iov[i].iov_len,
889 DMA_DIRECTION_TO_DEVICE,
890 iov[i].iov_len);
62232bf4 891 }
7f3be0f2 892 g_free(iov);
62232bf4
GH
893}
894
49a8b659
AC
895void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
896 struct virtio_gpu_simple_resource *res)
62232bf4 897{
3bb68f79 898 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
62232bf4
GH
899 res->iov = NULL;
900 res->iov_cnt = 0;
0c244e50
GH
901 g_free(res->addrs);
902 res->addrs = NULL;
e0933d91
VK
903
904 if (res->blob) {
905 virtio_gpu_fini_udmabuf(res);
906 }
62232bf4
GH
907}
908
909static void
910virtio_gpu_resource_attach_backing(VirtIOGPU *g,
911 struct virtio_gpu_ctrl_command *cmd)
912{
913 struct virtio_gpu_simple_resource *res;
914 struct virtio_gpu_resource_attach_backing ab;
915 int ret;
916
917 VIRTIO_GPU_FILL_CMD(ab);
1715d6b5 918 virtio_gpu_bswap_32(&ab, sizeof(ab));
62232bf4
GH
919 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
920
921 res = virtio_gpu_find_resource(g, ab.resource_id);
922 if (!res) {
923 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
924 __func__, ab.resource_id);
925 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
926 return;
927 }
928
204f01b3
LQ
929 if (res->iov) {
930 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
931 return;
932 }
933
70d37662
VK
934 ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd,
935 &res->addrs, &res->iov, &res->iov_cnt);
62232bf4
GH
936 if (ret != 0) {
937 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
938 return;
939 }
62232bf4
GH
940}
941
942static void
943virtio_gpu_resource_detach_backing(VirtIOGPU *g,
944 struct virtio_gpu_ctrl_command *cmd)
945{
946 struct virtio_gpu_simple_resource *res;
947 struct virtio_gpu_resource_detach_backing detach;
948
949 VIRTIO_GPU_FILL_CMD(detach);
1715d6b5 950 virtio_gpu_bswap_32(&detach, sizeof(detach));
62232bf4
GH
951 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
952
25c001a4
VK
953 res = virtio_gpu_find_check_resource(g, detach.resource_id, true,
954 __func__, &cmd->error);
955 if (!res) {
62232bf4
GH
956 return;
957 }
3bb68f79 958 virtio_gpu_cleanup_mapping(g, res);
62232bf4
GH
959}
960
2f47691a
GH
961void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
962 struct virtio_gpu_ctrl_command *cmd)
62232bf4
GH
963{
964 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
1715d6b5 965 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
62232bf4
GH
966
967 switch (cmd->cmd_hdr.type) {
968 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
969 virtio_gpu_get_display_info(g, cmd);
970 break;
1ed2cb32
GH
971 case VIRTIO_GPU_CMD_GET_EDID:
972 virtio_gpu_get_edid(g, cmd);
973 break;
62232bf4
GH
974 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
975 virtio_gpu_resource_create_2d(g, cmd);
976 break;
e0933d91
VK
977 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
978 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
979 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
980 break;
981 }
982 virtio_gpu_resource_create_blob(g, cmd);
983 break;
62232bf4
GH
984 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
985 virtio_gpu_resource_unref(g, cmd);
986 break;
987 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
988 virtio_gpu_resource_flush(g, cmd);
989 break;
990 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
991 virtio_gpu_transfer_to_host_2d(g, cmd);
992 break;
993 case VIRTIO_GPU_CMD_SET_SCANOUT:
994 virtio_gpu_set_scanout(g, cmd);
995 break;
32db3c63
VK
996 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
997 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
998 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
999 break;
1000 }
1001 virtio_gpu_set_scanout_blob(g, cmd);
1002 break;
62232bf4
GH
1003 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
1004 virtio_gpu_resource_attach_backing(g, cmd);
1005 break;
1006 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
1007 virtio_gpu_resource_detach_backing(g, cmd);
1008 break;
1009 default:
1010 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
1011 break;
1012 }
1013 if (!cmd->finished) {
b3a5dfde
VK
1014 if (!g->parent_obj.renderer_blocked) {
1015 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
1016 VIRTIO_GPU_RESP_OK_NODATA);
1017 }
62232bf4
GH
1018 }
1019}
1020
1021static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
1022{
1023 VirtIOGPU *g = VIRTIO_GPU(vdev);
1024 qemu_bh_schedule(g->ctrl_bh);
1025}
1026
1027static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
1028{
1029 VirtIOGPU *g = VIRTIO_GPU(vdev);
1030 qemu_bh_schedule(g->cursor_bh);
1031}
1032
0c55a1cf 1033void virtio_gpu_process_cmdq(VirtIOGPU *g)
3eb769fd
GH
1034{
1035 struct virtio_gpu_ctrl_command *cmd;
2f47691a 1036 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
3eb769fd 1037
f8f3c271
MAL
1038 if (g->processing_cmdq) {
1039 return;
1040 }
1041 g->processing_cmdq = true;
3eb769fd
GH
1042 while (!QTAILQ_EMPTY(&g->cmdq)) {
1043 cmd = QTAILQ_FIRST(&g->cmdq);
1044
50d8e25e 1045 if (g->parent_obj.renderer_blocked) {
0c55a1cf
GH
1046 break;
1047 }
ad341aac
MAL
1048
1049 /* process command */
2f47691a 1050 vgc->process_cmd(g, cmd);
ad341aac 1051
3eb769fd 1052 QTAILQ_REMOVE(&g->cmdq, cmd, next);
50d8e25e 1053 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
3eb769fd
GH
1054 g->stats.requests++;
1055 }
1056
1057 if (!cmd->finished) {
1058 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
1059 g->inflight++;
50d8e25e 1060 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
3eb769fd
GH
1061 if (g->stats.max_inflight < g->inflight) {
1062 g->stats.max_inflight = g->inflight;
1063 }
1064 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
1065 }
1066 } else {
1067 g_free(cmd);
1068 }
1069 }
f8f3c271 1070 g->processing_cmdq = false;
3eb769fd
GH
1071}
1072
b3a5dfde
VK
1073static void virtio_gpu_process_fenceq(VirtIOGPU *g)
1074{
1075 struct virtio_gpu_ctrl_command *cmd, *tmp;
1076
1077 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
1078 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
1079 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
1080 QTAILQ_REMOVE(&g->fenceq, cmd, next);
1081 g_free(cmd);
1082 g->inflight--;
1083 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1084 fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
1085 }
1086 }
1087}
1088
1089static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b)
1090{
1091 VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj);
1092
1093 virtio_gpu_process_fenceq(g);
1094 virtio_gpu_process_cmdq(g);
1095}
1096
62232bf4
GH
1097static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1098{
1099 VirtIOGPU *g = VIRTIO_GPU(vdev);
1100 struct virtio_gpu_ctrl_command *cmd;
1101
1102 if (!virtio_queue_ready(vq)) {
1103 return;
1104 }
1105
51b19ebe
PB
1106 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
1107 while (cmd) {
62232bf4
GH
1108 cmd->vq = vq;
1109 cmd->error = 0;
1110 cmd->finished = false;
3eb769fd 1111 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
51b19ebe 1112 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
62232bf4 1113 }
9d9e1521 1114
3eb769fd 1115 virtio_gpu_process_cmdq(g);
62232bf4
GH
1116}
1117
1118static void virtio_gpu_ctrl_bh(void *opaque)
1119{
1120 VirtIOGPU *g = opaque;
cabbe8e5
GH
1121 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
1122
3daccfff 1123 vgc->handle_ctrl(VIRTIO_DEVICE(g), g->ctrl_vq);
62232bf4
GH
1124}
1125
1126static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
1127{
1128 VirtIOGPU *g = VIRTIO_GPU(vdev);
51b19ebe 1129 VirtQueueElement *elem;
62232bf4
GH
1130 size_t s;
1131 struct virtio_gpu_update_cursor cursor_info;
1132
1133 if (!virtio_queue_ready(vq)) {
1134 return;
1135 }
51b19ebe
PB
1136 for (;;) {
1137 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1138 if (!elem) {
1139 break;
1140 }
1141
1142 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
62232bf4
GH
1143 &cursor_info, sizeof(cursor_info));
1144 if (s != sizeof(cursor_info)) {
1145 qemu_log_mask(LOG_GUEST_ERROR,
1146 "%s: cursor size incorrect %zu vs %zu\n",
1147 __func__, s, sizeof(cursor_info));
1148 } else {
1715d6b5 1149 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
62232bf4
GH
1150 update_cursor(g, &cursor_info);
1151 }
51b19ebe 1152 virtqueue_push(vq, elem, 0);
62232bf4 1153 virtio_notify(vdev, vq);
51b19ebe 1154 g_free(elem);
62232bf4
GH
1155 }
1156}
1157
1158static void virtio_gpu_cursor_bh(void *opaque)
1159{
1160 VirtIOGPU *g = opaque;
50d8e25e 1161 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
62232bf4
GH
1162}
1163
0c244e50
GH
1164static const VMStateDescription vmstate_virtio_gpu_scanout = {
1165 .name = "virtio-gpu-one-scanout",
1166 .version_id = 1,
f0613160 1167 .fields = (const VMStateField[]) {
0c244e50
GH
1168 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
1169 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
1170 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
1171 VMSTATE_INT32(x, struct virtio_gpu_scanout),
1172 VMSTATE_INT32(y, struct virtio_gpu_scanout),
1173 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
1174 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
1175 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
1176 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
1177 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
1178 VMSTATE_END_OF_LIST()
1179 },
1180};
1181
1182static const VMStateDescription vmstate_virtio_gpu_scanouts = {
1183 .name = "virtio-gpu-scanouts",
1184 .version_id = 1,
f0613160 1185 .fields = (const VMStateField[]) {
50d8e25e
MAL
1186 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
1187 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
1188 struct VirtIOGPU, NULL),
1189 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
1190 parent_obj.conf.max_outputs, 1,
0c244e50
GH
1191 vmstate_virtio_gpu_scanout,
1192 struct virtio_gpu_scanout),
1193 VMSTATE_END_OF_LIST()
1194 },
1195};
1196
2c21ee76 1197static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
3ddba9a9 1198 const VMStateField *field, JSONWriter *vmdesc)
0c244e50
GH
1199{
1200 VirtIOGPU *g = opaque;
0c244e50
GH
1201 struct virtio_gpu_simple_resource *res;
1202 int i;
1203
0c244e50
GH
1204 /* in 2d mode we should never find unprocessed commands here */
1205 assert(QTAILQ_EMPTY(&g->cmdq));
1206
1207 QTAILQ_FOREACH(res, &g->reslist, next) {
f66767f7
MAL
1208 if (res->blob_size) {
1209 continue;
1210 }
0c244e50
GH
1211 qemu_put_be32(f, res->resource_id);
1212 qemu_put_be32(f, res->width);
1213 qemu_put_be32(f, res->height);
1214 qemu_put_be32(f, res->format);
1215 qemu_put_be32(f, res->iov_cnt);
1216 for (i = 0; i < res->iov_cnt; i++) {
1217 qemu_put_be64(f, res->addrs[i]);
1218 qemu_put_be32(f, res->iov[i].iov_len);
1219 }
1220 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
1221 pixman_image_get_stride(res->image) * res->height);
1222 }
1223 qemu_put_be32(f, 0); /* end of list */
1224
2f168d07 1225 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
0c244e50
GH
1226}
1227
e92ffae6
MAL
1228static bool virtio_gpu_load_restore_mapping(VirtIOGPU *g,
1229 struct virtio_gpu_simple_resource *res)
1230{
1231 int i;
1232
1233 for (i = 0; i < res->iov_cnt; i++) {
1234 hwaddr len = res->iov[i].iov_len;
1235 res->iov[i].iov_base =
1236 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len,
1237 DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED);
1238
1239 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1240 /* Clean up the half-a-mapping we just created... */
1241 if (res->iov[i].iov_base) {
1242 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, res->iov[i].iov_base,
1243 len, DMA_DIRECTION_TO_DEVICE, 0);
1244 }
1245 /* ...and the mappings for previous loop iterations */
1246 res->iov_cnt = i;
1247 virtio_gpu_cleanup_mapping(g, res);
1248 return false;
1249 }
1250 }
1251
1252 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1253 g->hostmem += res->hostmem;
1254 return true;
1255}
1256
2c21ee76 1257static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
03fee66f 1258 const VMStateField *field)
0c244e50
GH
1259{
1260 VirtIOGPU *g = opaque;
0c244e50 1261 struct virtio_gpu_simple_resource *res;
0c244e50 1262 uint32_t resource_id, pformat;
9462ff46 1263 void *bits = NULL;
8a502efd 1264 int i;
0c244e50 1265
039aa5db
PM
1266 g->hostmem = 0;
1267
0c244e50
GH
1268 resource_id = qemu_get_be32(f);
1269 while (resource_id != 0) {
b0ee78ff
LQ
1270 res = virtio_gpu_find_resource(g, resource_id);
1271 if (res) {
1272 return -EINVAL;
1273 }
1274
0c244e50
GH
1275 res = g_new0(struct virtio_gpu_simple_resource, 1);
1276 res->resource_id = resource_id;
1277 res->width = qemu_get_be32(f);
1278 res->height = qemu_get_be32(f);
1279 res->format = qemu_get_be32(f);
1280 res->iov_cnt = qemu_get_be32(f);
1281
1282 /* allocate */
83a7d3c0 1283 pformat = virtio_gpu_get_pixman_format(res->format);
0c244e50 1284 if (!pformat) {
c84f0f25 1285 g_free(res);
0c244e50
GH
1286 return -EINVAL;
1287 }
9462ff46
MAL
1288
1289 res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
1290#ifdef WIN32
1291 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
1292 if (!bits) {
1293 g_free(res);
1294 return -EINVAL;
1295 }
1296#endif
9c18a923
MAL
1297 res->image = pixman_image_create_bits(
1298 pformat,
1299 res->width, res->height,
1300 bits, res->height ? res->hostmem / res->height : 0);
0c244e50 1301 if (!res->image) {
c84f0f25 1302 g_free(res);
0c244e50
GH
1303 return -EINVAL;
1304 }
04562ee8
MAL
1305#ifdef WIN32
1306 pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle);
1307#endif
039aa5db 1308
0c244e50
GH
1309 res->addrs = g_new(uint64_t, res->iov_cnt);
1310 res->iov = g_new(struct iovec, res->iov_cnt);
1311
1312 /* read data */
1313 for (i = 0; i < res->iov_cnt; i++) {
1314 res->addrs[i] = qemu_get_be64(f);
1315 res->iov[i].iov_len = qemu_get_be32(f);
1316 }
1317 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1318 pixman_image_get_stride(res->image) * res->height);
1319
e92ffae6
MAL
1320 if (!virtio_gpu_load_restore_mapping(g, res)) {
1321 pixman_image_unref(res->image);
1322 g_free(res);
1323 return -EINVAL;
0c244e50
GH
1324 }
1325
0c244e50
GH
1326 resource_id = qemu_get_be32(f);
1327 }
1328
1329 /* load & apply scanout state */
1330 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
0f2a301d
MAL
1331
1332 return 0;
1333}
1334
f66767f7
MAL
1335static int virtio_gpu_blob_save(QEMUFile *f, void *opaque, size_t size,
1336 const VMStateField *field, JSONWriter *vmdesc)
1337{
1338 VirtIOGPU *g = opaque;
1339 struct virtio_gpu_simple_resource *res;
1340 int i;
1341
1342 /* in 2d mode we should never find unprocessed commands here */
1343 assert(QTAILQ_EMPTY(&g->cmdq));
1344
1345 QTAILQ_FOREACH(res, &g->reslist, next) {
1346 if (!res->blob_size) {
1347 continue;
1348 }
1349 qemu_put_be32(f, res->resource_id);
1350 qemu_put_be32(f, res->blob_size);
1351 qemu_put_be32(f, res->iov_cnt);
1352 for (i = 0; i < res->iov_cnt; i++) {
1353 qemu_put_be64(f, res->addrs[i]);
1354 qemu_put_be32(f, res->iov[i].iov_len);
1355 }
1356 }
1357 qemu_put_be32(f, 0); /* end of list */
1358
1359 return 0;
1360}
1361
1362static int virtio_gpu_blob_load(QEMUFile *f, void *opaque, size_t size,
1363 const VMStateField *field)
1364{
1365 VirtIOGPU *g = opaque;
1366 struct virtio_gpu_simple_resource *res;
1367 uint32_t resource_id;
1368 int i;
1369
1370 resource_id = qemu_get_be32(f);
1371 while (resource_id != 0) {
1372 res = virtio_gpu_find_resource(g, resource_id);
1373 if (res) {
1374 return -EINVAL;
1375 }
1376
1377 res = g_new0(struct virtio_gpu_simple_resource, 1);
1378 res->resource_id = resource_id;
1379 res->blob_size = qemu_get_be32(f);
1380 res->iov_cnt = qemu_get_be32(f);
1381 res->addrs = g_new(uint64_t, res->iov_cnt);
1382 res->iov = g_new(struct iovec, res->iov_cnt);
1383
1384 /* read data */
1385 for (i = 0; i < res->iov_cnt; i++) {
1386 res->addrs[i] = qemu_get_be64(f);
1387 res->iov[i].iov_len = qemu_get_be32(f);
1388 }
1389
1390 if (!virtio_gpu_load_restore_mapping(g, res)) {
1391 g_free(res);
1392 return -EINVAL;
1393 }
1394
1395 virtio_gpu_init_udmabuf(res);
1396
1397 resource_id = qemu_get_be32(f);
1398 }
1399
1400 return 0;
1401}
1402
0f2a301d
MAL
1403static int virtio_gpu_post_load(void *opaque, int version_id)
1404{
1405 VirtIOGPU *g = opaque;
1406 struct virtio_gpu_scanout *scanout;
1407 struct virtio_gpu_simple_resource *res;
1408 int i;
1409
50d8e25e 1410 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
529969b8 1411 /* FIXME: should take scanout.r.{x,y} into account */
50d8e25e 1412 scanout = &g->parent_obj.scanout[i];
0c244e50
GH
1413 if (!scanout->resource_id) {
1414 continue;
1415 }
1416 res = virtio_gpu_find_resource(g, scanout->resource_id);
1417 if (!res) {
1418 return -EINVAL;
1419 }
1420 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1421 if (!scanout->ds) {
1422 return -EINVAL;
1423 }
9462ff46
MAL
1424#ifdef WIN32
1425 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, 0);
1426#endif
0c244e50
GH
1427
1428 dpy_gfx_replace_surface(scanout->con, scanout->ds);
91155f8b 1429 dpy_gfx_update_full(scanout->con);
10750ee0
GH
1430 if (scanout->cursor.resource_id) {
1431 update_cursor(g, &scanout->cursor);
1432 }
0c244e50
GH
1433 res->scanout_bitmask |= (1 << i);
1434 }
1435
1436 return 0;
1437}
1438
37f86af0 1439void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
62232bf4
GH
1440{
1441 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1442 VirtIOGPU *g = VIRTIO_GPU(qdev);
62232bf4 1443
cce386e1 1444 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) {
8e7b21ca
GS
1445 if (!virtio_gpu_rutabaga_enabled(g->parent_obj.conf) &&
1446 !virtio_gpu_have_udmabuf()) {
1447 error_setg(errp, "need rutabaga or udmabuf for blob resources");
cce386e1
VK
1448 return;
1449 }
1450
1451 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
1452 error_setg(errp, "blobs and virgl are not compatible (yet)");
1453 return;
1454 }
1455 }
1456
50d8e25e
MAL
1457 if (!virtio_gpu_base_device_realize(qdev,
1458 virtio_gpu_handle_ctrl_cb,
1459 virtio_gpu_handle_cursor_cb,
1460 errp)) {
1461 return;
1462 }
1463
1464 g->ctrl_vq = virtio_get_queue(vdev, 0);
1465 g->cursor_vq = virtio_get_queue(vdev, 1);
f63192b0
AB
1466 g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g,
1467 &qdev->mem_reentrancy_guard);
1468 g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g,
1469 &qdev->mem_reentrancy_guard);
a41e2d97
MAL
1470 g->reset_bh = qemu_bh_new(virtio_gpu_reset_bh, g);
1471 qemu_cond_init(&g->reset_cond);
62232bf4 1472 QTAILQ_INIT(&g->reslist);
3eb769fd 1473 QTAILQ_INIT(&g->cmdq);
62232bf4 1474 QTAILQ_INIT(&g->fenceq);
62232bf4
GH
1475}
1476
957d7786
MAL
1477static void virtio_gpu_device_unrealize(DeviceState *qdev)
1478{
1479 VirtIOGPU *g = VIRTIO_GPU(qdev);
1480
1481 g_clear_pointer(&g->ctrl_bh, qemu_bh_delete);
1482 g_clear_pointer(&g->cursor_bh, qemu_bh_delete);
a41e2d97
MAL
1483 g_clear_pointer(&g->reset_bh, qemu_bh_delete);
1484 qemu_cond_destroy(&g->reset_cond);
957d7786
MAL
1485 virtio_gpu_base_device_unrealize(qdev);
1486}
1487
a41e2d97 1488static void virtio_gpu_reset_bh(void *opaque)
62232bf4 1489{
a41e2d97 1490 VirtIOGPU *g = VIRTIO_GPU(opaque);
62232bf4 1491 struct virtio_gpu_simple_resource *res, *tmp;
0d0be876 1492 int i = 0;
62232bf4 1493
62232bf4
GH
1494 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1495 virtio_gpu_resource_destroy(g, res);
1496 }
9d9e1521 1497
a41e2d97
MAL
1498 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1499 dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
1500 }
1501
1502 g->reset_finished = true;
1503 qemu_cond_signal(&g->reset_cond);
1504}
1505
1506void virtio_gpu_reset(VirtIODevice *vdev)
1507{
1508 VirtIOGPU *g = VIRTIO_GPU(vdev);
1509 struct virtio_gpu_ctrl_command *cmd;
1510
1511 if (qemu_in_vcpu_thread()) {
1512 g->reset_finished = false;
1513 qemu_bh_schedule(g->reset_bh);
1514 while (!g->reset_finished) {
7c754c78 1515 qemu_cond_wait_bql(&g->reset_cond);
a41e2d97
MAL
1516 }
1517 } else {
1518 virtio_gpu_reset_bh(g);
1519 }
1520
dc84ed5b
GH
1521 while (!QTAILQ_EMPTY(&g->cmdq)) {
1522 cmd = QTAILQ_FIRST(&g->cmdq);
1523 QTAILQ_REMOVE(&g->cmdq, cmd, next);
1524 g_free(cmd);
1525 }
1526
1527 while (!QTAILQ_EMPTY(&g->fenceq)) {
1528 cmd = QTAILQ_FIRST(&g->fenceq);
1529 QTAILQ_REMOVE(&g->fenceq, cmd, next);
1530 g->inflight--;
1531 g_free(cmd);
1532 }
1533
50d8e25e
MAL
1534 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
1535}
1536
1537static void
1538virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
1539{
1540 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1541
1542 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
1543}
1544
1545static void
1546virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
1547{
1548 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1549 const struct virtio_gpu_config *vgconfig =
1550 (const struct virtio_gpu_config *)config;
1551
1552 if (vgconfig->events_clear) {
1553 g->virtio_config.events_read &= ~vgconfig->events_clear;
1554 }
62232bf4
GH
1555}
1556
f66767f7
MAL
1557static bool virtio_gpu_blob_state_needed(void *opaque)
1558{
1559 VirtIOGPU *g = VIRTIO_GPU(opaque);
1560
1561 return virtio_gpu_blob_enabled(g->parent_obj.conf);
1562}
1563
1564const VMStateDescription vmstate_virtio_gpu_blob_state = {
1565 .name = "virtio-gpu/blob",
1566 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1567 .version_id = VIRTIO_GPU_VM_VERSION,
1568 .needed = virtio_gpu_blob_state_needed,
1569 .fields = (const VMStateField[]){
1570 {
1571 .name = "virtio-gpu/blob",
1572 .info = &(const VMStateInfo) {
1573 .name = "blob",
1574 .get = virtio_gpu_blob_load,
1575 .put = virtio_gpu_blob_save,
1576 },
1577 .flags = VMS_SINGLE,
1578 } /* device */,
1579 VMSTATE_END_OF_LIST()
1580 },
1581};
1582
8a502efd
HP
1583/*
1584 * For historical reasons virtio_gpu does not adhere to virtio migration
1585 * scheme as described in doc/virtio-migration.txt, in a sense that no
1586 * save/load callback are provided to the core. Instead the device data
1587 * is saved/loaded after the core data.
1588 *
1589 * Because of this we need a special vmsd.
1590 */
1591static const VMStateDescription vmstate_virtio_gpu = {
1592 .name = "virtio-gpu",
1593 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1594 .version_id = VIRTIO_GPU_VM_VERSION,
f0613160 1595 .fields = (const VMStateField[]) {
8a502efd
HP
1596 VMSTATE_VIRTIO_DEVICE /* core */,
1597 {
1598 .name = "virtio-gpu",
1599 .info = &(const VMStateInfo) {
1600 .name = "virtio-gpu",
1601 .get = virtio_gpu_load,
1602 .put = virtio_gpu_save,
1603 },
1604 .flags = VMS_SINGLE,
1605 } /* device */,
1606 VMSTATE_END_OF_LIST()
1607 },
f0613160 1608 .subsections = (const VMStateDescription * const []) {
f66767f7
MAL
1609 &vmstate_virtio_gpu_blob_state,
1610 NULL
1611 },
0f2a301d 1612 .post_load = virtio_gpu_post_load,
8a502efd 1613};
0fc07498 1614
62232bf4 1615static Property virtio_gpu_properties[] = {
50d8e25e
MAL
1616 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
1617 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
1618 256 * MiB),
cce386e1
VK
1619 DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags,
1620 VIRTIO_GPU_FLAG_BLOB_ENABLED, false),
ba62dfa7 1621 DEFINE_PROP_SIZE("hostmem", VirtIOGPU, parent_obj.conf.hostmem, 0),
62232bf4
GH
1622 DEFINE_PROP_END_OF_LIST(),
1623};
1624
1625static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1626{
1627 DeviceClass *dc = DEVICE_CLASS(klass);
1628 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
cabbe8e5 1629 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
b3a5dfde 1630 VirtIOGPUBaseClass *vgbc = &vgc->parent;
cabbe8e5 1631
cabbe8e5 1632 vgc->handle_ctrl = virtio_gpu_handle_ctrl;
2f47691a 1633 vgc->process_cmd = virtio_gpu_simple_process_cmd;
2c267d66 1634 vgc->update_cursor_data = virtio_gpu_update_cursor_data;
b3a5dfde 1635 vgbc->gl_flushed = virtio_gpu_handle_gl_flushed;
62232bf4
GH
1636
1637 vdc->realize = virtio_gpu_device_realize;
957d7786 1638 vdc->unrealize = virtio_gpu_device_unrealize;
50d8e25e 1639 vdc->reset = virtio_gpu_reset;
62232bf4
GH
1640 vdc->get_config = virtio_gpu_get_config;
1641 vdc->set_config = virtio_gpu_set_config;
62232bf4 1642
0fc07498 1643 dc->vmsd = &vmstate_virtio_gpu;
4f67d30b 1644 device_class_set_props(dc, virtio_gpu_properties);
62232bf4
GH
1645}
1646
1647static const TypeInfo virtio_gpu_info = {
1648 .name = TYPE_VIRTIO_GPU,
50d8e25e 1649 .parent = TYPE_VIRTIO_GPU_BASE,
62232bf4 1650 .instance_size = sizeof(VirtIOGPU),
cabbe8e5 1651 .class_size = sizeof(VirtIOGPUClass),
62232bf4
GH
1652 .class_init = virtio_gpu_class_init,
1653};
561d0f45 1654module_obj(TYPE_VIRTIO_GPU);
24ce7aa7 1655module_kconfig(VIRTIO_GPU);
62232bf4
GH
1656
1657static void virtio_register_types(void)
1658{
1659 type_register_static(&virtio_gpu_info);
1660}
1661
1662type_init(virtio_register_types)