]> git.proxmox.com Git - mirror_qemu.git/blob - hw/display/virtio-gpu.c
Merge tag 'python-pull-request' of https://gitlab.com/jsnow/qemu into staging
[mirror_qemu.git] / hw / display / virtio-gpu.c
1 /*
2 * Virtio GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2014
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
16 #include "qemu/iov.h"
17 #include "sysemu/cpus.h"
18 #include "ui/console.h"
19 #include "trace.h"
20 #include "sysemu/dma.h"
21 #include "sysemu/sysemu.h"
22 #include "hw/virtio/virtio.h"
23 #include "migration/qemu-file-types.h"
24 #include "hw/virtio/virtio-gpu.h"
25 #include "hw/virtio/virtio-gpu-bswap.h"
26 #include "hw/virtio/virtio-gpu-pixman.h"
27 #include "hw/virtio/virtio-bus.h"
28 #include "hw/qdev-properties.h"
29 #include "qemu/log.h"
30 #include "qemu/module.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
33
34 #define VIRTIO_GPU_VM_VERSION 1
35
36 static struct virtio_gpu_simple_resource*
37 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
38 static struct virtio_gpu_simple_resource *
39 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
40 bool require_backing,
41 const char *caller, uint32_t *error);
42
43 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
44 struct virtio_gpu_simple_resource *res);
45 static void virtio_gpu_reset_bh(void *opaque);
46
47 void virtio_gpu_update_cursor_data(VirtIOGPU *g,
48 struct virtio_gpu_scanout *s,
49 uint32_t resource_id)
50 {
51 struct virtio_gpu_simple_resource *res;
52 uint32_t pixels;
53 void *data;
54
55 res = virtio_gpu_find_check_resource(g, resource_id, false,
56 __func__, NULL);
57 if (!res) {
58 return;
59 }
60
61 if (res->blob_size) {
62 if (res->blob_size < (s->current_cursor->width *
63 s->current_cursor->height * 4)) {
64 return;
65 }
66 data = res->blob;
67 } else {
68 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
69 pixman_image_get_height(res->image) != s->current_cursor->height) {
70 return;
71 }
72 data = pixman_image_get_data(res->image);
73 }
74
75 pixels = s->current_cursor->width * s->current_cursor->height;
76 memcpy(s->current_cursor->data, data,
77 pixels * sizeof(uint32_t));
78 }
79
80 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
81 {
82 struct virtio_gpu_scanout *s;
83 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
84 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
85
86 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
87 return;
88 }
89 s = &g->parent_obj.scanout[cursor->pos.scanout_id];
90
91 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
92 cursor->pos.x,
93 cursor->pos.y,
94 move ? "move" : "update",
95 cursor->resource_id);
96
97 if (!move) {
98 if (!s->current_cursor) {
99 s->current_cursor = cursor_alloc(64, 64);
100 }
101
102 s->current_cursor->hot_x = cursor->hot_x;
103 s->current_cursor->hot_y = cursor->hot_y;
104
105 if (cursor->resource_id > 0) {
106 vgc->update_cursor_data(g, s, cursor->resource_id);
107 }
108 dpy_cursor_define(s->con, s->current_cursor);
109
110 s->cursor = *cursor;
111 } else {
112 s->cursor.pos.x = cursor->pos.x;
113 s->cursor.pos.y = cursor->pos.y;
114 }
115 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
116 cursor->resource_id ? 1 : 0);
117 }
118
119 static struct virtio_gpu_simple_resource *
120 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
121 {
122 struct virtio_gpu_simple_resource *res;
123
124 QTAILQ_FOREACH(res, &g->reslist, next) {
125 if (res->resource_id == resource_id) {
126 return res;
127 }
128 }
129 return NULL;
130 }
131
132 static struct virtio_gpu_simple_resource *
133 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
134 bool require_backing,
135 const char *caller, uint32_t *error)
136 {
137 struct virtio_gpu_simple_resource *res;
138
139 res = virtio_gpu_find_resource(g, resource_id);
140 if (!res) {
141 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n",
142 caller, resource_id);
143 if (error) {
144 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
145 }
146 return NULL;
147 }
148
149 if (require_backing) {
150 if (!res->iov || (!res->image && !res->blob)) {
151 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n",
152 caller, resource_id);
153 if (error) {
154 *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
155 }
156 return NULL;
157 }
158 }
159
160 return res;
161 }
162
163 void virtio_gpu_ctrl_response(VirtIOGPU *g,
164 struct virtio_gpu_ctrl_command *cmd,
165 struct virtio_gpu_ctrl_hdr *resp,
166 size_t resp_len)
167 {
168 size_t s;
169
170 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
171 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
172 resp->fence_id = cmd->cmd_hdr.fence_id;
173 resp->ctx_id = cmd->cmd_hdr.ctx_id;
174 }
175 virtio_gpu_ctrl_hdr_bswap(resp);
176 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
177 if (s != resp_len) {
178 qemu_log_mask(LOG_GUEST_ERROR,
179 "%s: response size incorrect %zu vs %zu\n",
180 __func__, s, resp_len);
181 }
182 virtqueue_push(cmd->vq, &cmd->elem, s);
183 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
184 cmd->finished = true;
185 }
186
187 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
188 struct virtio_gpu_ctrl_command *cmd,
189 enum virtio_gpu_ctrl_type type)
190 {
191 struct virtio_gpu_ctrl_hdr resp;
192
193 memset(&resp, 0, sizeof(resp));
194 resp.type = type;
195 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
196 }
197
198 void virtio_gpu_get_display_info(VirtIOGPU *g,
199 struct virtio_gpu_ctrl_command *cmd)
200 {
201 struct virtio_gpu_resp_display_info display_info;
202
203 trace_virtio_gpu_cmd_get_display_info();
204 memset(&display_info, 0, sizeof(display_info));
205 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
206 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
207 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
208 sizeof(display_info));
209 }
210
211 void virtio_gpu_get_edid(VirtIOGPU *g,
212 struct virtio_gpu_ctrl_command *cmd)
213 {
214 struct virtio_gpu_resp_edid edid;
215 struct virtio_gpu_cmd_get_edid get_edid;
216 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
217
218 VIRTIO_GPU_FILL_CMD(get_edid);
219 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
220
221 if (get_edid.scanout >= b->conf.max_outputs) {
222 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
223 return;
224 }
225
226 trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
227 memset(&edid, 0, sizeof(edid));
228 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
229 virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid);
230 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
231 }
232
233 static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
234 uint32_t width, uint32_t height)
235 {
236 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
237 * pixman_image_create_bits will fail in case it overflow.
238 */
239
240 int bpp = PIXMAN_FORMAT_BPP(pformat);
241 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
242 return height * stride;
243 }
244
245 #ifdef WIN32
246 static void
247 win32_pixman_image_destroy(pixman_image_t *image, void *data)
248 {
249 HANDLE handle = data;
250
251 qemu_win32_map_free(pixman_image_get_data(image), handle, &error_warn);
252 }
253 #endif
254
255 static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
256 struct virtio_gpu_ctrl_command *cmd)
257 {
258 pixman_format_code_t pformat;
259 struct virtio_gpu_simple_resource *res;
260 struct virtio_gpu_resource_create_2d c2d;
261
262 VIRTIO_GPU_FILL_CMD(c2d);
263 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
264 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
265 c2d.width, c2d.height);
266
267 if (c2d.resource_id == 0) {
268 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
269 __func__);
270 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
271 return;
272 }
273
274 res = virtio_gpu_find_resource(g, c2d.resource_id);
275 if (res) {
276 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
277 __func__, c2d.resource_id);
278 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
279 return;
280 }
281
282 res = g_new0(struct virtio_gpu_simple_resource, 1);
283
284 res->width = c2d.width;
285 res->height = c2d.height;
286 res->format = c2d.format;
287 res->resource_id = c2d.resource_id;
288
289 pformat = virtio_gpu_get_pixman_format(c2d.format);
290 if (!pformat) {
291 qemu_log_mask(LOG_GUEST_ERROR,
292 "%s: host couldn't handle guest format %d\n",
293 __func__, c2d.format);
294 g_free(res);
295 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
296 return;
297 }
298
299 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
300 if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
301 void *bits = NULL;
302 #ifdef WIN32
303 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
304 if (!bits) {
305 goto end;
306 }
307 #endif
308 res->image = pixman_image_create_bits(
309 pformat,
310 c2d.width,
311 c2d.height,
312 bits, c2d.height ? res->hostmem / c2d.height : 0);
313 #ifdef WIN32
314 if (res->image) {
315 pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle);
316 }
317 #endif
318 }
319
320 #ifdef WIN32
321 end:
322 #endif
323 if (!res->image) {
324 qemu_log_mask(LOG_GUEST_ERROR,
325 "%s: resource creation failed %d %d %d\n",
326 __func__, c2d.resource_id, c2d.width, c2d.height);
327 g_free(res);
328 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
329 return;
330 }
331
332 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
333 g->hostmem += res->hostmem;
334 }
335
336 static void virtio_gpu_resource_create_blob(VirtIOGPU *g,
337 struct virtio_gpu_ctrl_command *cmd)
338 {
339 struct virtio_gpu_simple_resource *res;
340 struct virtio_gpu_resource_create_blob cblob;
341 int ret;
342
343 VIRTIO_GPU_FILL_CMD(cblob);
344 virtio_gpu_create_blob_bswap(&cblob);
345 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
346
347 if (cblob.resource_id == 0) {
348 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
349 __func__);
350 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
351 return;
352 }
353
354 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST &&
355 cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) {
356 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n",
357 __func__);
358 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
359 return;
360 }
361
362 if (virtio_gpu_find_resource(g, cblob.resource_id)) {
363 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
364 __func__, cblob.resource_id);
365 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
366 return;
367 }
368
369 res = g_new0(struct virtio_gpu_simple_resource, 1);
370 res->resource_id = cblob.resource_id;
371 res->blob_size = cblob.size;
372
373 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
374 cmd, &res->addrs, &res->iov,
375 &res->iov_cnt);
376 if (ret != 0) {
377 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
378 g_free(res);
379 return;
380 }
381
382 virtio_gpu_init_udmabuf(res);
383 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
384 }
385
386 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
387 {
388 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
389 struct virtio_gpu_simple_resource *res;
390
391 if (scanout->resource_id == 0) {
392 return;
393 }
394
395 res = virtio_gpu_find_resource(g, scanout->resource_id);
396 if (res) {
397 res->scanout_bitmask &= ~(1 << scanout_id);
398 }
399
400 dpy_gfx_replace_surface(scanout->con, NULL);
401 scanout->resource_id = 0;
402 scanout->ds = NULL;
403 scanout->width = 0;
404 scanout->height = 0;
405 }
406
407 static void virtio_gpu_resource_destroy(VirtIOGPU *g,
408 struct virtio_gpu_simple_resource *res)
409 {
410 int i;
411
412 if (res->scanout_bitmask) {
413 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
414 if (res->scanout_bitmask & (1 << i)) {
415 virtio_gpu_disable_scanout(g, i);
416 }
417 }
418 }
419
420 qemu_pixman_image_unref(res->image);
421 virtio_gpu_cleanup_mapping(g, res);
422 QTAILQ_REMOVE(&g->reslist, res, next);
423 g->hostmem -= res->hostmem;
424 g_free(res);
425 }
426
427 static void virtio_gpu_resource_unref(VirtIOGPU *g,
428 struct virtio_gpu_ctrl_command *cmd)
429 {
430 struct virtio_gpu_simple_resource *res;
431 struct virtio_gpu_resource_unref unref;
432
433 VIRTIO_GPU_FILL_CMD(unref);
434 virtio_gpu_bswap_32(&unref, sizeof(unref));
435 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
436
437 res = virtio_gpu_find_resource(g, unref.resource_id);
438 if (!res) {
439 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
440 __func__, unref.resource_id);
441 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
442 return;
443 }
444 virtio_gpu_resource_destroy(g, res);
445 }
446
447 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
448 struct virtio_gpu_ctrl_command *cmd)
449 {
450 struct virtio_gpu_simple_resource *res;
451 int h, bpp;
452 uint32_t src_offset, dst_offset, stride;
453 pixman_format_code_t format;
454 struct virtio_gpu_transfer_to_host_2d t2d;
455 void *img_data;
456
457 VIRTIO_GPU_FILL_CMD(t2d);
458 virtio_gpu_t2d_bswap(&t2d);
459 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
460
461 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true,
462 __func__, &cmd->error);
463 if (!res || res->blob) {
464 return;
465 }
466
467 if (t2d.r.x > res->width ||
468 t2d.r.y > res->height ||
469 t2d.r.width > res->width ||
470 t2d.r.height > res->height ||
471 t2d.r.x + t2d.r.width > res->width ||
472 t2d.r.y + t2d.r.height > res->height) {
473 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
474 " bounds for resource %d: %d %d %d %d vs %d %d\n",
475 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
476 t2d.r.width, t2d.r.height, res->width, res->height);
477 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
478 return;
479 }
480
481 format = pixman_image_get_format(res->image);
482 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
483 stride = pixman_image_get_stride(res->image);
484 img_data = pixman_image_get_data(res->image);
485
486 if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) {
487 for (h = 0; h < t2d.r.height; h++) {
488 src_offset = t2d.offset + stride * h;
489 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
490
491 iov_to_buf(res->iov, res->iov_cnt, src_offset,
492 (uint8_t *)img_data + dst_offset,
493 t2d.r.width * bpp);
494 }
495 } else {
496 src_offset = t2d.offset;
497 dst_offset = t2d.r.y * stride + t2d.r.x * bpp;
498 iov_to_buf(res->iov, res->iov_cnt, src_offset,
499 (uint8_t *)img_data + dst_offset,
500 stride * t2d.r.height);
501 }
502 }
503
504 static void virtio_gpu_resource_flush(VirtIOGPU *g,
505 struct virtio_gpu_ctrl_command *cmd)
506 {
507 struct virtio_gpu_simple_resource *res;
508 struct virtio_gpu_resource_flush rf;
509 struct virtio_gpu_scanout *scanout;
510 pixman_region16_t flush_region;
511 bool within_bounds = false;
512 bool update_submitted = false;
513 int i;
514
515 VIRTIO_GPU_FILL_CMD(rf);
516 virtio_gpu_bswap_32(&rf, sizeof(rf));
517 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
518 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
519
520 res = virtio_gpu_find_check_resource(g, rf.resource_id, false,
521 __func__, &cmd->error);
522 if (!res) {
523 return;
524 }
525
526 if (res->blob) {
527 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
528 scanout = &g->parent_obj.scanout[i];
529 if (scanout->resource_id == res->resource_id &&
530 rf.r.x < scanout->x + scanout->width &&
531 rf.r.x + rf.r.width >= scanout->x &&
532 rf.r.y < scanout->y + scanout->height &&
533 rf.r.y + rf.r.height >= scanout->y) {
534 within_bounds = true;
535
536 if (console_has_gl(scanout->con)) {
537 dpy_gl_update(scanout->con, 0, 0, scanout->width,
538 scanout->height);
539 update_submitted = true;
540 }
541 }
542 }
543
544 if (update_submitted) {
545 return;
546 }
547 if (!within_bounds) {
548 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts"
549 " bounds for flush %d: %d %d %d %d\n",
550 __func__, rf.resource_id, rf.r.x, rf.r.y,
551 rf.r.width, rf.r.height);
552 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
553 return;
554 }
555 }
556
557 if (!res->blob &&
558 (rf.r.x > res->width ||
559 rf.r.y > res->height ||
560 rf.r.width > res->width ||
561 rf.r.height > res->height ||
562 rf.r.x + rf.r.width > res->width ||
563 rf.r.y + rf.r.height > res->height)) {
564 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
565 " bounds for resource %d: %d %d %d %d vs %d %d\n",
566 __func__, rf.resource_id, rf.r.x, rf.r.y,
567 rf.r.width, rf.r.height, res->width, res->height);
568 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
569 return;
570 }
571
572 pixman_region_init_rect(&flush_region,
573 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
574 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
575 pixman_region16_t region, finalregion;
576 pixman_box16_t *extents;
577
578 if (!(res->scanout_bitmask & (1 << i))) {
579 continue;
580 }
581 scanout = &g->parent_obj.scanout[i];
582
583 pixman_region_init(&finalregion);
584 pixman_region_init_rect(&region, scanout->x, scanout->y,
585 scanout->width, scanout->height);
586
587 pixman_region_intersect(&finalregion, &flush_region, &region);
588 pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
589 extents = pixman_region_extents(&finalregion);
590 /* work out the area we need to update for each console */
591 dpy_gfx_update(g->parent_obj.scanout[i].con,
592 extents->x1, extents->y1,
593 extents->x2 - extents->x1,
594 extents->y2 - extents->y1);
595
596 pixman_region_fini(&region);
597 pixman_region_fini(&finalregion);
598 }
599 pixman_region_fini(&flush_region);
600 }
601
602 static void virtio_unref_resource(pixman_image_t *image, void *data)
603 {
604 pixman_image_unref(data);
605 }
606
607 static void virtio_gpu_update_scanout(VirtIOGPU *g,
608 uint32_t scanout_id,
609 struct virtio_gpu_simple_resource *res,
610 struct virtio_gpu_rect *r)
611 {
612 struct virtio_gpu_simple_resource *ores;
613 struct virtio_gpu_scanout *scanout;
614
615 scanout = &g->parent_obj.scanout[scanout_id];
616 ores = virtio_gpu_find_resource(g, scanout->resource_id);
617 if (ores) {
618 ores->scanout_bitmask &= ~(1 << scanout_id);
619 }
620
621 res->scanout_bitmask |= (1 << scanout_id);
622 scanout->resource_id = res->resource_id;
623 scanout->x = r->x;
624 scanout->y = r->y;
625 scanout->width = r->width;
626 scanout->height = r->height;
627 }
628
629 static void virtio_gpu_do_set_scanout(VirtIOGPU *g,
630 uint32_t scanout_id,
631 struct virtio_gpu_framebuffer *fb,
632 struct virtio_gpu_simple_resource *res,
633 struct virtio_gpu_rect *r,
634 uint32_t *error)
635 {
636 struct virtio_gpu_scanout *scanout;
637 uint8_t *data;
638
639 scanout = &g->parent_obj.scanout[scanout_id];
640
641 if (r->x > fb->width ||
642 r->y > fb->height ||
643 r->width < 16 ||
644 r->height < 16 ||
645 r->width > fb->width ||
646 r->height > fb->height ||
647 r->x + r->width > fb->width ||
648 r->y + r->height > fb->height) {
649 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
650 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
651 __func__, scanout_id, res->resource_id,
652 r->x, r->y, r->width, r->height,
653 fb->width, fb->height);
654 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
655 return;
656 }
657
658 g->parent_obj.enable = 1;
659
660 if (res->blob) {
661 if (console_has_gl(scanout->con)) {
662 if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) {
663 virtio_gpu_update_scanout(g, scanout_id, res, r);
664 } else {
665 *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
666 }
667 return;
668 }
669
670 data = res->blob;
671 } else {
672 data = (uint8_t *)pixman_image_get_data(res->image);
673 }
674
675 /* create a surface for this scanout */
676 if ((res->blob && !console_has_gl(scanout->con)) ||
677 !scanout->ds ||
678 surface_data(scanout->ds) != data + fb->offset ||
679 scanout->width != r->width ||
680 scanout->height != r->height) {
681 pixman_image_t *rect;
682 void *ptr = data + fb->offset;
683 rect = pixman_image_create_bits(fb->format, r->width, r->height,
684 ptr, fb->stride);
685
686 if (res->image) {
687 pixman_image_ref(res->image);
688 pixman_image_set_destroy_function(rect, virtio_unref_resource,
689 res->image);
690 }
691
692 /* realloc the surface ptr */
693 scanout->ds = qemu_create_displaysurface_pixman(rect);
694 if (!scanout->ds) {
695 *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
696 return;
697 }
698 #ifdef WIN32
699 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, fb->offset);
700 #endif
701
702 pixman_image_unref(rect);
703 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con,
704 scanout->ds);
705 }
706
707 virtio_gpu_update_scanout(g, scanout_id, res, r);
708 }
709
710 static void virtio_gpu_set_scanout(VirtIOGPU *g,
711 struct virtio_gpu_ctrl_command *cmd)
712 {
713 struct virtio_gpu_simple_resource *res;
714 struct virtio_gpu_framebuffer fb = { 0 };
715 struct virtio_gpu_set_scanout ss;
716
717 VIRTIO_GPU_FILL_CMD(ss);
718 virtio_gpu_bswap_32(&ss, sizeof(ss));
719 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
720 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
721
722 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
723 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
724 __func__, ss.scanout_id);
725 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
726 return;
727 }
728
729 if (ss.resource_id == 0) {
730 virtio_gpu_disable_scanout(g, ss.scanout_id);
731 return;
732 }
733
734 res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
735 __func__, &cmd->error);
736 if (!res) {
737 return;
738 }
739
740 fb.format = pixman_image_get_format(res->image);
741 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
742 fb.width = pixman_image_get_width(res->image);
743 fb.height = pixman_image_get_height(res->image);
744 fb.stride = pixman_image_get_stride(res->image);
745 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
746
747 virtio_gpu_do_set_scanout(g, ss.scanout_id,
748 &fb, res, &ss.r, &cmd->error);
749 }
750
751 static void virtio_gpu_set_scanout_blob(VirtIOGPU *g,
752 struct virtio_gpu_ctrl_command *cmd)
753 {
754 struct virtio_gpu_simple_resource *res;
755 struct virtio_gpu_framebuffer fb = { 0 };
756 struct virtio_gpu_set_scanout_blob ss;
757 uint64_t fbend;
758
759 VIRTIO_GPU_FILL_CMD(ss);
760 virtio_gpu_scanout_blob_bswap(&ss);
761 trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
762 ss.r.width, ss.r.height, ss.r.x,
763 ss.r.y);
764
765 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
766 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
767 __func__, ss.scanout_id);
768 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
769 return;
770 }
771
772 if (ss.resource_id == 0) {
773 virtio_gpu_disable_scanout(g, ss.scanout_id);
774 return;
775 }
776
777 res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
778 __func__, &cmd->error);
779 if (!res) {
780 return;
781 }
782
783 fb.format = virtio_gpu_get_pixman_format(ss.format);
784 if (!fb.format) {
785 qemu_log_mask(LOG_GUEST_ERROR,
786 "%s: host couldn't handle guest format %d\n",
787 __func__, ss.format);
788 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
789 return;
790 }
791
792 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
793 fb.width = ss.width;
794 fb.height = ss.height;
795 fb.stride = ss.strides[0];
796 fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
797
798 fbend = fb.offset;
799 fbend += fb.stride * (ss.r.height - 1);
800 fbend += fb.bytes_pp * ss.r.width;
801 if (fbend > res->blob_size) {
802 qemu_log_mask(LOG_GUEST_ERROR,
803 "%s: fb end out of range\n",
804 __func__);
805 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
806 return;
807 }
808
809 virtio_gpu_do_set_scanout(g, ss.scanout_id,
810 &fb, res, &ss.r, &cmd->error);
811 }
812
813 int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
814 uint32_t nr_entries, uint32_t offset,
815 struct virtio_gpu_ctrl_command *cmd,
816 uint64_t **addr, struct iovec **iov,
817 uint32_t *niov)
818 {
819 struct virtio_gpu_mem_entry *ents;
820 size_t esize, s;
821 int e, v;
822
823 if (nr_entries > 16384) {
824 qemu_log_mask(LOG_GUEST_ERROR,
825 "%s: nr_entries is too big (%d > 16384)\n",
826 __func__, nr_entries);
827 return -1;
828 }
829
830 esize = sizeof(*ents) * nr_entries;
831 ents = g_malloc(esize);
832 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
833 offset, ents, esize);
834 if (s != esize) {
835 qemu_log_mask(LOG_GUEST_ERROR,
836 "%s: command data size incorrect %zu vs %zu\n",
837 __func__, s, esize);
838 g_free(ents);
839 return -1;
840 }
841
842 *iov = NULL;
843 if (addr) {
844 *addr = NULL;
845 }
846 for (e = 0, v = 0; e < nr_entries; e++) {
847 uint64_t a = le64_to_cpu(ents[e].addr);
848 uint32_t l = le32_to_cpu(ents[e].length);
849 hwaddr len;
850 void *map;
851
852 do {
853 len = l;
854 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len,
855 DMA_DIRECTION_TO_DEVICE,
856 MEMTXATTRS_UNSPECIFIED);
857 if (!map) {
858 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
859 " element %d\n", __func__, e);
860 virtio_gpu_cleanup_mapping_iov(g, *iov, v);
861 g_free(ents);
862 *iov = NULL;
863 if (addr) {
864 g_free(*addr);
865 *addr = NULL;
866 }
867 return -1;
868 }
869
870 if (!(v % 16)) {
871 *iov = g_renew(struct iovec, *iov, v + 16);
872 if (addr) {
873 *addr = g_renew(uint64_t, *addr, v + 16);
874 }
875 }
876 (*iov)[v].iov_base = map;
877 (*iov)[v].iov_len = len;
878 if (addr) {
879 (*addr)[v] = a;
880 }
881
882 a += len;
883 l -= len;
884 v += 1;
885 } while (l > 0);
886 }
887 *niov = v;
888
889 g_free(ents);
890 return 0;
891 }
892
893 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
894 struct iovec *iov, uint32_t count)
895 {
896 int i;
897
898 for (i = 0; i < count; i++) {
899 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
900 iov[i].iov_base, iov[i].iov_len,
901 DMA_DIRECTION_TO_DEVICE,
902 iov[i].iov_len);
903 }
904 g_free(iov);
905 }
906
907 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
908 struct virtio_gpu_simple_resource *res)
909 {
910 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
911 res->iov = NULL;
912 res->iov_cnt = 0;
913 g_free(res->addrs);
914 res->addrs = NULL;
915
916 if (res->blob) {
917 virtio_gpu_fini_udmabuf(res);
918 }
919 }
920
921 static void
922 virtio_gpu_resource_attach_backing(VirtIOGPU *g,
923 struct virtio_gpu_ctrl_command *cmd)
924 {
925 struct virtio_gpu_simple_resource *res;
926 struct virtio_gpu_resource_attach_backing ab;
927 int ret;
928
929 VIRTIO_GPU_FILL_CMD(ab);
930 virtio_gpu_bswap_32(&ab, sizeof(ab));
931 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
932
933 res = virtio_gpu_find_resource(g, ab.resource_id);
934 if (!res) {
935 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
936 __func__, ab.resource_id);
937 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
938 return;
939 }
940
941 if (res->iov) {
942 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
943 return;
944 }
945
946 ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd,
947 &res->addrs, &res->iov, &res->iov_cnt);
948 if (ret != 0) {
949 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
950 return;
951 }
952 }
953
954 static void
955 virtio_gpu_resource_detach_backing(VirtIOGPU *g,
956 struct virtio_gpu_ctrl_command *cmd)
957 {
958 struct virtio_gpu_simple_resource *res;
959 struct virtio_gpu_resource_detach_backing detach;
960
961 VIRTIO_GPU_FILL_CMD(detach);
962 virtio_gpu_bswap_32(&detach, sizeof(detach));
963 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
964
965 res = virtio_gpu_find_check_resource(g, detach.resource_id, true,
966 __func__, &cmd->error);
967 if (!res) {
968 return;
969 }
970 virtio_gpu_cleanup_mapping(g, res);
971 }
972
973 void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
974 struct virtio_gpu_ctrl_command *cmd)
975 {
976 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
977 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
978
979 switch (cmd->cmd_hdr.type) {
980 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
981 virtio_gpu_get_display_info(g, cmd);
982 break;
983 case VIRTIO_GPU_CMD_GET_EDID:
984 virtio_gpu_get_edid(g, cmd);
985 break;
986 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
987 virtio_gpu_resource_create_2d(g, cmd);
988 break;
989 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
990 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
991 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
992 break;
993 }
994 virtio_gpu_resource_create_blob(g, cmd);
995 break;
996 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
997 virtio_gpu_resource_unref(g, cmd);
998 break;
999 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
1000 virtio_gpu_resource_flush(g, cmd);
1001 break;
1002 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
1003 virtio_gpu_transfer_to_host_2d(g, cmd);
1004 break;
1005 case VIRTIO_GPU_CMD_SET_SCANOUT:
1006 virtio_gpu_set_scanout(g, cmd);
1007 break;
1008 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
1009 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
1010 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
1011 break;
1012 }
1013 virtio_gpu_set_scanout_blob(g, cmd);
1014 break;
1015 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
1016 virtio_gpu_resource_attach_backing(g, cmd);
1017 break;
1018 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
1019 virtio_gpu_resource_detach_backing(g, cmd);
1020 break;
1021 default:
1022 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
1023 break;
1024 }
1025 if (!cmd->finished) {
1026 if (!g->parent_obj.renderer_blocked) {
1027 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
1028 VIRTIO_GPU_RESP_OK_NODATA);
1029 }
1030 }
1031 }
1032
1033 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
1034 {
1035 VirtIOGPU *g = VIRTIO_GPU(vdev);
1036 qemu_bh_schedule(g->ctrl_bh);
1037 }
1038
1039 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
1040 {
1041 VirtIOGPU *g = VIRTIO_GPU(vdev);
1042 qemu_bh_schedule(g->cursor_bh);
1043 }
1044
1045 void virtio_gpu_process_cmdq(VirtIOGPU *g)
1046 {
1047 struct virtio_gpu_ctrl_command *cmd;
1048 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
1049
1050 if (g->processing_cmdq) {
1051 return;
1052 }
1053 g->processing_cmdq = true;
1054 while (!QTAILQ_EMPTY(&g->cmdq)) {
1055 cmd = QTAILQ_FIRST(&g->cmdq);
1056
1057 if (g->parent_obj.renderer_blocked) {
1058 break;
1059 }
1060
1061 /* process command */
1062 vgc->process_cmd(g, cmd);
1063
1064 QTAILQ_REMOVE(&g->cmdq, cmd, next);
1065 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1066 g->stats.requests++;
1067 }
1068
1069 if (!cmd->finished) {
1070 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
1071 g->inflight++;
1072 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1073 if (g->stats.max_inflight < g->inflight) {
1074 g->stats.max_inflight = g->inflight;
1075 }
1076 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
1077 }
1078 } else {
1079 g_free(cmd);
1080 }
1081 }
1082 g->processing_cmdq = false;
1083 }
1084
1085 static void virtio_gpu_process_fenceq(VirtIOGPU *g)
1086 {
1087 struct virtio_gpu_ctrl_command *cmd, *tmp;
1088
1089 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
1090 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
1091 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
1092 QTAILQ_REMOVE(&g->fenceq, cmd, next);
1093 g_free(cmd);
1094 g->inflight--;
1095 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1096 fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
1097 }
1098 }
1099 }
1100
1101 static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b)
1102 {
1103 VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj);
1104
1105 virtio_gpu_process_fenceq(g);
1106 virtio_gpu_process_cmdq(g);
1107 }
1108
1109 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1110 {
1111 VirtIOGPU *g = VIRTIO_GPU(vdev);
1112 struct virtio_gpu_ctrl_command *cmd;
1113
1114 if (!virtio_queue_ready(vq)) {
1115 return;
1116 }
1117
1118 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
1119 while (cmd) {
1120 cmd->vq = vq;
1121 cmd->error = 0;
1122 cmd->finished = false;
1123 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
1124 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
1125 }
1126
1127 virtio_gpu_process_cmdq(g);
1128 }
1129
1130 static void virtio_gpu_ctrl_bh(void *opaque)
1131 {
1132 VirtIOGPU *g = opaque;
1133 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
1134
1135 vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
1136 }
1137
1138 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
1139 {
1140 VirtIOGPU *g = VIRTIO_GPU(vdev);
1141 VirtQueueElement *elem;
1142 size_t s;
1143 struct virtio_gpu_update_cursor cursor_info;
1144
1145 if (!virtio_queue_ready(vq)) {
1146 return;
1147 }
1148 for (;;) {
1149 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1150 if (!elem) {
1151 break;
1152 }
1153
1154 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
1155 &cursor_info, sizeof(cursor_info));
1156 if (s != sizeof(cursor_info)) {
1157 qemu_log_mask(LOG_GUEST_ERROR,
1158 "%s: cursor size incorrect %zu vs %zu\n",
1159 __func__, s, sizeof(cursor_info));
1160 } else {
1161 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
1162 update_cursor(g, &cursor_info);
1163 }
1164 virtqueue_push(vq, elem, 0);
1165 virtio_notify(vdev, vq);
1166 g_free(elem);
1167 }
1168 }
1169
1170 static void virtio_gpu_cursor_bh(void *opaque)
1171 {
1172 VirtIOGPU *g = opaque;
1173 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
1174 }
1175
1176 static const VMStateDescription vmstate_virtio_gpu_scanout = {
1177 .name = "virtio-gpu-one-scanout",
1178 .version_id = 1,
1179 .fields = (VMStateField[]) {
1180 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
1181 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
1182 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
1183 VMSTATE_INT32(x, struct virtio_gpu_scanout),
1184 VMSTATE_INT32(y, struct virtio_gpu_scanout),
1185 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
1186 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
1187 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
1188 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
1189 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
1190 VMSTATE_END_OF_LIST()
1191 },
1192 };
1193
1194 static const VMStateDescription vmstate_virtio_gpu_scanouts = {
1195 .name = "virtio-gpu-scanouts",
1196 .version_id = 1,
1197 .fields = (VMStateField[]) {
1198 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
1199 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
1200 struct VirtIOGPU, NULL),
1201 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
1202 parent_obj.conf.max_outputs, 1,
1203 vmstate_virtio_gpu_scanout,
1204 struct virtio_gpu_scanout),
1205 VMSTATE_END_OF_LIST()
1206 },
1207 };
1208
1209 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
1210 const VMStateField *field, JSONWriter *vmdesc)
1211 {
1212 VirtIOGPU *g = opaque;
1213 struct virtio_gpu_simple_resource *res;
1214 int i;
1215
1216 /* in 2d mode we should never find unprocessed commands here */
1217 assert(QTAILQ_EMPTY(&g->cmdq));
1218
1219 QTAILQ_FOREACH(res, &g->reslist, next) {
1220 qemu_put_be32(f, res->resource_id);
1221 qemu_put_be32(f, res->width);
1222 qemu_put_be32(f, res->height);
1223 qemu_put_be32(f, res->format);
1224 qemu_put_be32(f, res->iov_cnt);
1225 for (i = 0; i < res->iov_cnt; i++) {
1226 qemu_put_be64(f, res->addrs[i]);
1227 qemu_put_be32(f, res->iov[i].iov_len);
1228 }
1229 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
1230 pixman_image_get_stride(res->image) * res->height);
1231 }
1232 qemu_put_be32(f, 0); /* end of list */
1233
1234 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
1235 }
1236
1237 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
1238 const VMStateField *field)
1239 {
1240 VirtIOGPU *g = opaque;
1241 struct virtio_gpu_simple_resource *res;
1242 struct virtio_gpu_scanout *scanout;
1243 uint32_t resource_id, pformat;
1244 void *bits = NULL;
1245 int i;
1246
1247 g->hostmem = 0;
1248
1249 resource_id = qemu_get_be32(f);
1250 while (resource_id != 0) {
1251 res = virtio_gpu_find_resource(g, resource_id);
1252 if (res) {
1253 return -EINVAL;
1254 }
1255
1256 res = g_new0(struct virtio_gpu_simple_resource, 1);
1257 res->resource_id = resource_id;
1258 res->width = qemu_get_be32(f);
1259 res->height = qemu_get_be32(f);
1260 res->format = qemu_get_be32(f);
1261 res->iov_cnt = qemu_get_be32(f);
1262
1263 /* allocate */
1264 pformat = virtio_gpu_get_pixman_format(res->format);
1265 if (!pformat) {
1266 g_free(res);
1267 return -EINVAL;
1268 }
1269
1270 res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
1271 #ifdef WIN32
1272 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
1273 if (!bits) {
1274 g_free(res);
1275 return -EINVAL;
1276 }
1277 #endif
1278 res->image = pixman_image_create_bits(
1279 pformat,
1280 res->width, res->height,
1281 bits, res->height ? res->hostmem / res->height : 0);
1282 if (!res->image) {
1283 g_free(res);
1284 return -EINVAL;
1285 }
1286 #ifdef WIN32
1287 pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle);
1288 #endif
1289
1290 res->addrs = g_new(uint64_t, res->iov_cnt);
1291 res->iov = g_new(struct iovec, res->iov_cnt);
1292
1293 /* read data */
1294 for (i = 0; i < res->iov_cnt; i++) {
1295 res->addrs[i] = qemu_get_be64(f);
1296 res->iov[i].iov_len = qemu_get_be32(f);
1297 }
1298 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1299 pixman_image_get_stride(res->image) * res->height);
1300
1301 /* restore mapping */
1302 for (i = 0; i < res->iov_cnt; i++) {
1303 hwaddr len = res->iov[i].iov_len;
1304 res->iov[i].iov_base =
1305 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len,
1306 DMA_DIRECTION_TO_DEVICE,
1307 MEMTXATTRS_UNSPECIFIED);
1308
1309 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1310 /* Clean up the half-a-mapping we just created... */
1311 if (res->iov[i].iov_base) {
1312 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
1313 res->iov[i].iov_base,
1314 len,
1315 DMA_DIRECTION_TO_DEVICE,
1316 0);
1317 }
1318 /* ...and the mappings for previous loop iterations */
1319 res->iov_cnt = i;
1320 virtio_gpu_cleanup_mapping(g, res);
1321 pixman_image_unref(res->image);
1322 g_free(res);
1323 return -EINVAL;
1324 }
1325 }
1326
1327 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1328 g->hostmem += res->hostmem;
1329
1330 resource_id = qemu_get_be32(f);
1331 }
1332
1333 /* load & apply scanout state */
1334 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1335 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1336 /* FIXME: should take scanout.r.{x,y} into account */
1337 scanout = &g->parent_obj.scanout[i];
1338 if (!scanout->resource_id) {
1339 continue;
1340 }
1341 res = virtio_gpu_find_resource(g, scanout->resource_id);
1342 if (!res) {
1343 return -EINVAL;
1344 }
1345 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1346 if (!scanout->ds) {
1347 return -EINVAL;
1348 }
1349 #ifdef WIN32
1350 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, 0);
1351 #endif
1352
1353 dpy_gfx_replace_surface(scanout->con, scanout->ds);
1354 dpy_gfx_update_full(scanout->con);
1355 if (scanout->cursor.resource_id) {
1356 update_cursor(g, &scanout->cursor);
1357 }
1358 res->scanout_bitmask |= (1 << i);
1359 }
1360
1361 return 0;
1362 }
1363
1364 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1365 {
1366 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1367 VirtIOGPU *g = VIRTIO_GPU(qdev);
1368
1369 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) {
1370 if (!virtio_gpu_have_udmabuf()) {
1371 error_setg(errp, "cannot enable blob resources without udmabuf");
1372 return;
1373 }
1374
1375 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
1376 error_setg(errp, "blobs and virgl are not compatible (yet)");
1377 return;
1378 }
1379 }
1380
1381 if (!virtio_gpu_base_device_realize(qdev,
1382 virtio_gpu_handle_ctrl_cb,
1383 virtio_gpu_handle_cursor_cb,
1384 errp)) {
1385 return;
1386 }
1387
1388 g->ctrl_vq = virtio_get_queue(vdev, 0);
1389 g->cursor_vq = virtio_get_queue(vdev, 1);
1390 g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g,
1391 &qdev->mem_reentrancy_guard);
1392 g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g,
1393 &qdev->mem_reentrancy_guard);
1394 g->reset_bh = qemu_bh_new(virtio_gpu_reset_bh, g);
1395 qemu_cond_init(&g->reset_cond);
1396 QTAILQ_INIT(&g->reslist);
1397 QTAILQ_INIT(&g->cmdq);
1398 QTAILQ_INIT(&g->fenceq);
1399 }
1400
1401 static void virtio_gpu_device_unrealize(DeviceState *qdev)
1402 {
1403 VirtIOGPU *g = VIRTIO_GPU(qdev);
1404
1405 g_clear_pointer(&g->ctrl_bh, qemu_bh_delete);
1406 g_clear_pointer(&g->cursor_bh, qemu_bh_delete);
1407 g_clear_pointer(&g->reset_bh, qemu_bh_delete);
1408 qemu_cond_destroy(&g->reset_cond);
1409 virtio_gpu_base_device_unrealize(qdev);
1410 }
1411
1412 static void virtio_gpu_reset_bh(void *opaque)
1413 {
1414 VirtIOGPU *g = VIRTIO_GPU(opaque);
1415 struct virtio_gpu_simple_resource *res, *tmp;
1416 int i = 0;
1417
1418 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1419 virtio_gpu_resource_destroy(g, res);
1420 }
1421
1422 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1423 dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
1424 }
1425
1426 g->reset_finished = true;
1427 qemu_cond_signal(&g->reset_cond);
1428 }
1429
1430 void virtio_gpu_reset(VirtIODevice *vdev)
1431 {
1432 VirtIOGPU *g = VIRTIO_GPU(vdev);
1433 struct virtio_gpu_ctrl_command *cmd;
1434
1435 if (qemu_in_vcpu_thread()) {
1436 g->reset_finished = false;
1437 qemu_bh_schedule(g->reset_bh);
1438 while (!g->reset_finished) {
1439 qemu_cond_wait_iothread(&g->reset_cond);
1440 }
1441 } else {
1442 virtio_gpu_reset_bh(g);
1443 }
1444
1445 while (!QTAILQ_EMPTY(&g->cmdq)) {
1446 cmd = QTAILQ_FIRST(&g->cmdq);
1447 QTAILQ_REMOVE(&g->cmdq, cmd, next);
1448 g_free(cmd);
1449 }
1450
1451 while (!QTAILQ_EMPTY(&g->fenceq)) {
1452 cmd = QTAILQ_FIRST(&g->fenceq);
1453 QTAILQ_REMOVE(&g->fenceq, cmd, next);
1454 g->inflight--;
1455 g_free(cmd);
1456 }
1457
1458 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
1459 }
1460
1461 static void
1462 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
1463 {
1464 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1465
1466 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
1467 }
1468
1469 static void
1470 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
1471 {
1472 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1473 const struct virtio_gpu_config *vgconfig =
1474 (const struct virtio_gpu_config *)config;
1475
1476 if (vgconfig->events_clear) {
1477 g->virtio_config.events_read &= ~vgconfig->events_clear;
1478 }
1479 }
1480
1481 /*
1482 * For historical reasons virtio_gpu does not adhere to virtio migration
1483 * scheme as described in doc/virtio-migration.txt, in a sense that no
1484 * save/load callback are provided to the core. Instead the device data
1485 * is saved/loaded after the core data.
1486 *
1487 * Because of this we need a special vmsd.
1488 */
1489 static const VMStateDescription vmstate_virtio_gpu = {
1490 .name = "virtio-gpu",
1491 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1492 .version_id = VIRTIO_GPU_VM_VERSION,
1493 .fields = (VMStateField[]) {
1494 VMSTATE_VIRTIO_DEVICE /* core */,
1495 {
1496 .name = "virtio-gpu",
1497 .info = &(const VMStateInfo) {
1498 .name = "virtio-gpu",
1499 .get = virtio_gpu_load,
1500 .put = virtio_gpu_save,
1501 },
1502 .flags = VMS_SINGLE,
1503 } /* device */,
1504 VMSTATE_END_OF_LIST()
1505 },
1506 };
1507
1508 static Property virtio_gpu_properties[] = {
1509 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
1510 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
1511 256 * MiB),
1512 DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags,
1513 VIRTIO_GPU_FLAG_BLOB_ENABLED, false),
1514 DEFINE_PROP_END_OF_LIST(),
1515 };
1516
1517 static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1518 {
1519 DeviceClass *dc = DEVICE_CLASS(klass);
1520 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1521 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
1522 VirtIOGPUBaseClass *vgbc = &vgc->parent;
1523
1524 vgc->handle_ctrl = virtio_gpu_handle_ctrl;
1525 vgc->process_cmd = virtio_gpu_simple_process_cmd;
1526 vgc->update_cursor_data = virtio_gpu_update_cursor_data;
1527 vgbc->gl_flushed = virtio_gpu_handle_gl_flushed;
1528
1529 vdc->realize = virtio_gpu_device_realize;
1530 vdc->unrealize = virtio_gpu_device_unrealize;
1531 vdc->reset = virtio_gpu_reset;
1532 vdc->get_config = virtio_gpu_get_config;
1533 vdc->set_config = virtio_gpu_set_config;
1534
1535 dc->vmsd = &vmstate_virtio_gpu;
1536 device_class_set_props(dc, virtio_gpu_properties);
1537 }
1538
1539 static const TypeInfo virtio_gpu_info = {
1540 .name = TYPE_VIRTIO_GPU,
1541 .parent = TYPE_VIRTIO_GPU_BASE,
1542 .instance_size = sizeof(VirtIOGPU),
1543 .class_size = sizeof(VirtIOGPUClass),
1544 .class_init = virtio_gpu_class_init,
1545 };
1546 module_obj(TYPE_VIRTIO_GPU);
1547 module_kconfig(VIRTIO_GPU);
1548
1549 static void virtio_register_types(void)
1550 {
1551 type_register_static(&virtio_gpu_info);
1552 }
1553
1554 type_init(virtio_register_types)