]> git.proxmox.com Git - mirror_qemu.git/blob - hw/display/virtio-gpu.c
Merge tag 'pull-qapi-2023-07-10' of https://repo.or.cz/qemu/armbru into staging
[mirror_qemu.git] / hw / display / virtio-gpu.c
1 /*
2 * Virtio GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2014
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
16 #include "qemu/iov.h"
17 #include "ui/console.h"
18 #include "trace.h"
19 #include "sysemu/dma.h"
20 #include "sysemu/sysemu.h"
21 #include "hw/virtio/virtio.h"
22 #include "migration/qemu-file-types.h"
23 #include "hw/virtio/virtio-gpu.h"
24 #include "hw/virtio/virtio-gpu-bswap.h"
25 #include "hw/virtio/virtio-gpu-pixman.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/qdev-properties.h"
28 #include "qemu/log.h"
29 #include "qemu/module.h"
30 #include "qapi/error.h"
31 #include "qemu/error-report.h"
32
33 #define VIRTIO_GPU_VM_VERSION 1
34
35 static struct virtio_gpu_simple_resource*
36 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
37 static struct virtio_gpu_simple_resource *
38 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
39 bool require_backing,
40 const char *caller, uint32_t *error);
41
42 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
43 struct virtio_gpu_simple_resource *res);
44
45 void virtio_gpu_update_cursor_data(VirtIOGPU *g,
46 struct virtio_gpu_scanout *s,
47 uint32_t resource_id)
48 {
49 struct virtio_gpu_simple_resource *res;
50 uint32_t pixels;
51 void *data;
52
53 res = virtio_gpu_find_check_resource(g, resource_id, false,
54 __func__, NULL);
55 if (!res) {
56 return;
57 }
58
59 if (res->blob_size) {
60 if (res->blob_size < (s->current_cursor->width *
61 s->current_cursor->height * 4)) {
62 return;
63 }
64 data = res->blob;
65 } else {
66 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
67 pixman_image_get_height(res->image) != s->current_cursor->height) {
68 return;
69 }
70 data = pixman_image_get_data(res->image);
71 }
72
73 pixels = s->current_cursor->width * s->current_cursor->height;
74 memcpy(s->current_cursor->data, data,
75 pixels * sizeof(uint32_t));
76 }
77
78 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
79 {
80 struct virtio_gpu_scanout *s;
81 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
82 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
83
84 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
85 return;
86 }
87 s = &g->parent_obj.scanout[cursor->pos.scanout_id];
88
89 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
90 cursor->pos.x,
91 cursor->pos.y,
92 move ? "move" : "update",
93 cursor->resource_id);
94
95 if (!move) {
96 if (!s->current_cursor) {
97 s->current_cursor = cursor_alloc(64, 64);
98 }
99
100 s->current_cursor->hot_x = cursor->hot_x;
101 s->current_cursor->hot_y = cursor->hot_y;
102
103 if (cursor->resource_id > 0) {
104 vgc->update_cursor_data(g, s, cursor->resource_id);
105 }
106 dpy_cursor_define(s->con, s->current_cursor);
107
108 s->cursor = *cursor;
109 } else {
110 s->cursor.pos.x = cursor->pos.x;
111 s->cursor.pos.y = cursor->pos.y;
112 }
113 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
114 cursor->resource_id ? 1 : 0);
115 }
116
117 static struct virtio_gpu_simple_resource *
118 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
119 {
120 struct virtio_gpu_simple_resource *res;
121
122 QTAILQ_FOREACH(res, &g->reslist, next) {
123 if (res->resource_id == resource_id) {
124 return res;
125 }
126 }
127 return NULL;
128 }
129
130 static struct virtio_gpu_simple_resource *
131 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
132 bool require_backing,
133 const char *caller, uint32_t *error)
134 {
135 struct virtio_gpu_simple_resource *res;
136
137 res = virtio_gpu_find_resource(g, resource_id);
138 if (!res) {
139 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n",
140 caller, resource_id);
141 if (error) {
142 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
143 }
144 return NULL;
145 }
146
147 if (require_backing) {
148 if (!res->iov || (!res->image && !res->blob)) {
149 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n",
150 caller, resource_id);
151 if (error) {
152 *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
153 }
154 return NULL;
155 }
156 }
157
158 return res;
159 }
160
161 void virtio_gpu_ctrl_response(VirtIOGPU *g,
162 struct virtio_gpu_ctrl_command *cmd,
163 struct virtio_gpu_ctrl_hdr *resp,
164 size_t resp_len)
165 {
166 size_t s;
167
168 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
169 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
170 resp->fence_id = cmd->cmd_hdr.fence_id;
171 resp->ctx_id = cmd->cmd_hdr.ctx_id;
172 }
173 virtio_gpu_ctrl_hdr_bswap(resp);
174 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
175 if (s != resp_len) {
176 qemu_log_mask(LOG_GUEST_ERROR,
177 "%s: response size incorrect %zu vs %zu\n",
178 __func__, s, resp_len);
179 }
180 virtqueue_push(cmd->vq, &cmd->elem, s);
181 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
182 cmd->finished = true;
183 }
184
185 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
186 struct virtio_gpu_ctrl_command *cmd,
187 enum virtio_gpu_ctrl_type type)
188 {
189 struct virtio_gpu_ctrl_hdr resp;
190
191 memset(&resp, 0, sizeof(resp));
192 resp.type = type;
193 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
194 }
195
196 void virtio_gpu_get_display_info(VirtIOGPU *g,
197 struct virtio_gpu_ctrl_command *cmd)
198 {
199 struct virtio_gpu_resp_display_info display_info;
200
201 trace_virtio_gpu_cmd_get_display_info();
202 memset(&display_info, 0, sizeof(display_info));
203 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
204 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
205 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
206 sizeof(display_info));
207 }
208
209 void virtio_gpu_get_edid(VirtIOGPU *g,
210 struct virtio_gpu_ctrl_command *cmd)
211 {
212 struct virtio_gpu_resp_edid edid;
213 struct virtio_gpu_cmd_get_edid get_edid;
214 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
215
216 VIRTIO_GPU_FILL_CMD(get_edid);
217 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
218
219 if (get_edid.scanout >= b->conf.max_outputs) {
220 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
221 return;
222 }
223
224 trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
225 memset(&edid, 0, sizeof(edid));
226 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
227 virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid);
228 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
229 }
230
231 static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
232 uint32_t width, uint32_t height)
233 {
234 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
235 * pixman_image_create_bits will fail in case it overflow.
236 */
237
238 int bpp = PIXMAN_FORMAT_BPP(pformat);
239 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
240 return height * stride;
241 }
242
243 #ifdef WIN32
244 static void
245 win32_pixman_image_destroy(pixman_image_t *image, void *data)
246 {
247 HANDLE handle = data;
248
249 qemu_win32_map_free(pixman_image_get_data(image), handle, &error_warn);
250 }
251 #endif
252
253 static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
254 struct virtio_gpu_ctrl_command *cmd)
255 {
256 pixman_format_code_t pformat;
257 struct virtio_gpu_simple_resource *res;
258 struct virtio_gpu_resource_create_2d c2d;
259
260 VIRTIO_GPU_FILL_CMD(c2d);
261 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
262 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
263 c2d.width, c2d.height);
264
265 if (c2d.resource_id == 0) {
266 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
267 __func__);
268 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
269 return;
270 }
271
272 res = virtio_gpu_find_resource(g, c2d.resource_id);
273 if (res) {
274 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
275 __func__, c2d.resource_id);
276 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
277 return;
278 }
279
280 res = g_new0(struct virtio_gpu_simple_resource, 1);
281
282 res->width = c2d.width;
283 res->height = c2d.height;
284 res->format = c2d.format;
285 res->resource_id = c2d.resource_id;
286
287 pformat = virtio_gpu_get_pixman_format(c2d.format);
288 if (!pformat) {
289 qemu_log_mask(LOG_GUEST_ERROR,
290 "%s: host couldn't handle guest format %d\n",
291 __func__, c2d.format);
292 g_free(res);
293 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
294 return;
295 }
296
297 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
298 if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
299 void *bits = NULL;
300 #ifdef WIN32
301 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
302 if (!bits) {
303 goto end;
304 }
305 #endif
306 res->image = pixman_image_create_bits(
307 pformat,
308 c2d.width,
309 c2d.height,
310 bits, c2d.height ? res->hostmem / c2d.height : 0);
311 #ifdef WIN32
312 if (res->image) {
313 pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle);
314 }
315 #endif
316 }
317
318 #ifdef WIN32
319 end:
320 #endif
321 if (!res->image) {
322 qemu_log_mask(LOG_GUEST_ERROR,
323 "%s: resource creation failed %d %d %d\n",
324 __func__, c2d.resource_id, c2d.width, c2d.height);
325 g_free(res);
326 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
327 return;
328 }
329
330 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
331 g->hostmem += res->hostmem;
332 }
333
334 static void virtio_gpu_resource_create_blob(VirtIOGPU *g,
335 struct virtio_gpu_ctrl_command *cmd)
336 {
337 struct virtio_gpu_simple_resource *res;
338 struct virtio_gpu_resource_create_blob cblob;
339 int ret;
340
341 VIRTIO_GPU_FILL_CMD(cblob);
342 virtio_gpu_create_blob_bswap(&cblob);
343 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
344
345 if (cblob.resource_id == 0) {
346 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
347 __func__);
348 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
349 return;
350 }
351
352 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST &&
353 cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) {
354 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n",
355 __func__);
356 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
357 return;
358 }
359
360 if (virtio_gpu_find_resource(g, cblob.resource_id)) {
361 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
362 __func__, cblob.resource_id);
363 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
364 return;
365 }
366
367 res = g_new0(struct virtio_gpu_simple_resource, 1);
368 res->resource_id = cblob.resource_id;
369 res->blob_size = cblob.size;
370
371 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
372 cmd, &res->addrs, &res->iov,
373 &res->iov_cnt);
374 if (ret != 0) {
375 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
376 g_free(res);
377 return;
378 }
379
380 virtio_gpu_init_udmabuf(res);
381 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
382 }
383
384 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
385 {
386 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
387 struct virtio_gpu_simple_resource *res;
388
389 if (scanout->resource_id == 0) {
390 return;
391 }
392
393 res = virtio_gpu_find_resource(g, scanout->resource_id);
394 if (res) {
395 res->scanout_bitmask &= ~(1 << scanout_id);
396 }
397
398 dpy_gfx_replace_surface(scanout->con, NULL);
399 scanout->resource_id = 0;
400 scanout->ds = NULL;
401 scanout->width = 0;
402 scanout->height = 0;
403 }
404
405 static void virtio_gpu_resource_destroy(VirtIOGPU *g,
406 struct virtio_gpu_simple_resource *res)
407 {
408 int i;
409
410 if (res->scanout_bitmask) {
411 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
412 if (res->scanout_bitmask & (1 << i)) {
413 virtio_gpu_disable_scanout(g, i);
414 }
415 }
416 }
417
418 qemu_pixman_image_unref(res->image);
419 virtio_gpu_cleanup_mapping(g, res);
420 QTAILQ_REMOVE(&g->reslist, res, next);
421 g->hostmem -= res->hostmem;
422 g_free(res);
423 }
424
425 static void virtio_gpu_resource_unref(VirtIOGPU *g,
426 struct virtio_gpu_ctrl_command *cmd)
427 {
428 struct virtio_gpu_simple_resource *res;
429 struct virtio_gpu_resource_unref unref;
430
431 VIRTIO_GPU_FILL_CMD(unref);
432 virtio_gpu_bswap_32(&unref, sizeof(unref));
433 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
434
435 res = virtio_gpu_find_resource(g, unref.resource_id);
436 if (!res) {
437 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
438 __func__, unref.resource_id);
439 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
440 return;
441 }
442 virtio_gpu_resource_destroy(g, res);
443 }
444
445 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
446 struct virtio_gpu_ctrl_command *cmd)
447 {
448 struct virtio_gpu_simple_resource *res;
449 int h, bpp;
450 uint32_t src_offset, dst_offset, stride;
451 pixman_format_code_t format;
452 struct virtio_gpu_transfer_to_host_2d t2d;
453 void *img_data;
454
455 VIRTIO_GPU_FILL_CMD(t2d);
456 virtio_gpu_t2d_bswap(&t2d);
457 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
458
459 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true,
460 __func__, &cmd->error);
461 if (!res || res->blob) {
462 return;
463 }
464
465 if (t2d.r.x > res->width ||
466 t2d.r.y > res->height ||
467 t2d.r.width > res->width ||
468 t2d.r.height > res->height ||
469 t2d.r.x + t2d.r.width > res->width ||
470 t2d.r.y + t2d.r.height > res->height) {
471 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
472 " bounds for resource %d: %d %d %d %d vs %d %d\n",
473 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
474 t2d.r.width, t2d.r.height, res->width, res->height);
475 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
476 return;
477 }
478
479 format = pixman_image_get_format(res->image);
480 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
481 stride = pixman_image_get_stride(res->image);
482 img_data = pixman_image_get_data(res->image);
483
484 if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) {
485 for (h = 0; h < t2d.r.height; h++) {
486 src_offset = t2d.offset + stride * h;
487 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
488
489 iov_to_buf(res->iov, res->iov_cnt, src_offset,
490 (uint8_t *)img_data + dst_offset,
491 t2d.r.width * bpp);
492 }
493 } else {
494 src_offset = t2d.offset;
495 dst_offset = t2d.r.y * stride + t2d.r.x * bpp;
496 iov_to_buf(res->iov, res->iov_cnt, src_offset,
497 (uint8_t *)img_data + dst_offset,
498 stride * t2d.r.height);
499 }
500 }
501
502 static void virtio_gpu_resource_flush(VirtIOGPU *g,
503 struct virtio_gpu_ctrl_command *cmd)
504 {
505 struct virtio_gpu_simple_resource *res;
506 struct virtio_gpu_resource_flush rf;
507 struct virtio_gpu_scanout *scanout;
508 pixman_region16_t flush_region;
509 bool within_bounds = false;
510 bool update_submitted = false;
511 int i;
512
513 VIRTIO_GPU_FILL_CMD(rf);
514 virtio_gpu_bswap_32(&rf, sizeof(rf));
515 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
516 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
517
518 res = virtio_gpu_find_check_resource(g, rf.resource_id, false,
519 __func__, &cmd->error);
520 if (!res) {
521 return;
522 }
523
524 if (res->blob) {
525 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
526 scanout = &g->parent_obj.scanout[i];
527 if (scanout->resource_id == res->resource_id &&
528 rf.r.x < scanout->x + scanout->width &&
529 rf.r.x + rf.r.width >= scanout->x &&
530 rf.r.y < scanout->y + scanout->height &&
531 rf.r.y + rf.r.height >= scanout->y) {
532 within_bounds = true;
533
534 if (console_has_gl(scanout->con)) {
535 dpy_gl_update(scanout->con, 0, 0, scanout->width,
536 scanout->height);
537 update_submitted = true;
538 }
539 }
540 }
541
542 if (update_submitted) {
543 return;
544 }
545 if (!within_bounds) {
546 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts"
547 " bounds for flush %d: %d %d %d %d\n",
548 __func__, rf.resource_id, rf.r.x, rf.r.y,
549 rf.r.width, rf.r.height);
550 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
551 return;
552 }
553 }
554
555 if (!res->blob &&
556 (rf.r.x > res->width ||
557 rf.r.y > res->height ||
558 rf.r.width > res->width ||
559 rf.r.height > res->height ||
560 rf.r.x + rf.r.width > res->width ||
561 rf.r.y + rf.r.height > res->height)) {
562 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
563 " bounds for resource %d: %d %d %d %d vs %d %d\n",
564 __func__, rf.resource_id, rf.r.x, rf.r.y,
565 rf.r.width, rf.r.height, res->width, res->height);
566 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
567 return;
568 }
569
570 pixman_region_init_rect(&flush_region,
571 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
572 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
573 pixman_region16_t region, finalregion;
574 pixman_box16_t *extents;
575
576 if (!(res->scanout_bitmask & (1 << i))) {
577 continue;
578 }
579 scanout = &g->parent_obj.scanout[i];
580
581 pixman_region_init(&finalregion);
582 pixman_region_init_rect(&region, scanout->x, scanout->y,
583 scanout->width, scanout->height);
584
585 pixman_region_intersect(&finalregion, &flush_region, &region);
586 pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
587 extents = pixman_region_extents(&finalregion);
588 /* work out the area we need to update for each console */
589 dpy_gfx_update(g->parent_obj.scanout[i].con,
590 extents->x1, extents->y1,
591 extents->x2 - extents->x1,
592 extents->y2 - extents->y1);
593
594 pixman_region_fini(&region);
595 pixman_region_fini(&finalregion);
596 }
597 pixman_region_fini(&flush_region);
598 }
599
600 static void virtio_unref_resource(pixman_image_t *image, void *data)
601 {
602 pixman_image_unref(data);
603 }
604
605 static void virtio_gpu_update_scanout(VirtIOGPU *g,
606 uint32_t scanout_id,
607 struct virtio_gpu_simple_resource *res,
608 struct virtio_gpu_rect *r)
609 {
610 struct virtio_gpu_simple_resource *ores;
611 struct virtio_gpu_scanout *scanout;
612
613 scanout = &g->parent_obj.scanout[scanout_id];
614 ores = virtio_gpu_find_resource(g, scanout->resource_id);
615 if (ores) {
616 ores->scanout_bitmask &= ~(1 << scanout_id);
617 }
618
619 res->scanout_bitmask |= (1 << scanout_id);
620 scanout->resource_id = res->resource_id;
621 scanout->x = r->x;
622 scanout->y = r->y;
623 scanout->width = r->width;
624 scanout->height = r->height;
625 }
626
627 static void virtio_gpu_do_set_scanout(VirtIOGPU *g,
628 uint32_t scanout_id,
629 struct virtio_gpu_framebuffer *fb,
630 struct virtio_gpu_simple_resource *res,
631 struct virtio_gpu_rect *r,
632 uint32_t *error)
633 {
634 struct virtio_gpu_scanout *scanout;
635 uint8_t *data;
636
637 scanout = &g->parent_obj.scanout[scanout_id];
638
639 if (r->x > fb->width ||
640 r->y > fb->height ||
641 r->width < 16 ||
642 r->height < 16 ||
643 r->width > fb->width ||
644 r->height > fb->height ||
645 r->x + r->width > fb->width ||
646 r->y + r->height > fb->height) {
647 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
648 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
649 __func__, scanout_id, res->resource_id,
650 r->x, r->y, r->width, r->height,
651 fb->width, fb->height);
652 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
653 return;
654 }
655
656 g->parent_obj.enable = 1;
657
658 if (res->blob) {
659 if (console_has_gl(scanout->con)) {
660 if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) {
661 virtio_gpu_update_scanout(g, scanout_id, res, r);
662 } else {
663 *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
664 }
665 return;
666 }
667
668 data = res->blob;
669 } else {
670 data = (uint8_t *)pixman_image_get_data(res->image);
671 }
672
673 /* create a surface for this scanout */
674 if ((res->blob && !console_has_gl(scanout->con)) ||
675 !scanout->ds ||
676 surface_data(scanout->ds) != data + fb->offset ||
677 scanout->width != r->width ||
678 scanout->height != r->height) {
679 pixman_image_t *rect;
680 void *ptr = data + fb->offset;
681 rect = pixman_image_create_bits(fb->format, r->width, r->height,
682 ptr, fb->stride);
683
684 if (res->image) {
685 pixman_image_ref(res->image);
686 pixman_image_set_destroy_function(rect, virtio_unref_resource,
687 res->image);
688 }
689
690 /* realloc the surface ptr */
691 scanout->ds = qemu_create_displaysurface_pixman(rect);
692 if (!scanout->ds) {
693 *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
694 return;
695 }
696 #ifdef WIN32
697 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, fb->offset);
698 #endif
699
700 pixman_image_unref(rect);
701 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con,
702 scanout->ds);
703 }
704
705 virtio_gpu_update_scanout(g, scanout_id, res, r);
706 }
707
708 static void virtio_gpu_set_scanout(VirtIOGPU *g,
709 struct virtio_gpu_ctrl_command *cmd)
710 {
711 struct virtio_gpu_simple_resource *res;
712 struct virtio_gpu_framebuffer fb = { 0 };
713 struct virtio_gpu_set_scanout ss;
714
715 VIRTIO_GPU_FILL_CMD(ss);
716 virtio_gpu_bswap_32(&ss, sizeof(ss));
717 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
718 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
719
720 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
721 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
722 __func__, ss.scanout_id);
723 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
724 return;
725 }
726
727 if (ss.resource_id == 0) {
728 virtio_gpu_disable_scanout(g, ss.scanout_id);
729 return;
730 }
731
732 res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
733 __func__, &cmd->error);
734 if (!res) {
735 return;
736 }
737
738 fb.format = pixman_image_get_format(res->image);
739 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
740 fb.width = pixman_image_get_width(res->image);
741 fb.height = pixman_image_get_height(res->image);
742 fb.stride = pixman_image_get_stride(res->image);
743 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
744
745 virtio_gpu_do_set_scanout(g, ss.scanout_id,
746 &fb, res, &ss.r, &cmd->error);
747 }
748
749 static void virtio_gpu_set_scanout_blob(VirtIOGPU *g,
750 struct virtio_gpu_ctrl_command *cmd)
751 {
752 struct virtio_gpu_simple_resource *res;
753 struct virtio_gpu_framebuffer fb = { 0 };
754 struct virtio_gpu_set_scanout_blob ss;
755 uint64_t fbend;
756
757 VIRTIO_GPU_FILL_CMD(ss);
758 virtio_gpu_scanout_blob_bswap(&ss);
759 trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
760 ss.r.width, ss.r.height, ss.r.x,
761 ss.r.y);
762
763 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
764 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
765 __func__, ss.scanout_id);
766 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
767 return;
768 }
769
770 if (ss.resource_id == 0) {
771 virtio_gpu_disable_scanout(g, ss.scanout_id);
772 return;
773 }
774
775 res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
776 __func__, &cmd->error);
777 if (!res) {
778 return;
779 }
780
781 fb.format = virtio_gpu_get_pixman_format(ss.format);
782 if (!fb.format) {
783 qemu_log_mask(LOG_GUEST_ERROR,
784 "%s: host couldn't handle guest format %d\n",
785 __func__, ss.format);
786 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
787 return;
788 }
789
790 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
791 fb.width = ss.width;
792 fb.height = ss.height;
793 fb.stride = ss.strides[0];
794 fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
795
796 fbend = fb.offset;
797 fbend += fb.stride * (ss.r.height - 1);
798 fbend += fb.bytes_pp * ss.r.width;
799 if (fbend > res->blob_size) {
800 qemu_log_mask(LOG_GUEST_ERROR,
801 "%s: fb end out of range\n",
802 __func__);
803 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
804 return;
805 }
806
807 virtio_gpu_do_set_scanout(g, ss.scanout_id,
808 &fb, res, &ss.r, &cmd->error);
809 }
810
811 int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
812 uint32_t nr_entries, uint32_t offset,
813 struct virtio_gpu_ctrl_command *cmd,
814 uint64_t **addr, struct iovec **iov,
815 uint32_t *niov)
816 {
817 struct virtio_gpu_mem_entry *ents;
818 size_t esize, s;
819 int e, v;
820
821 if (nr_entries > 16384) {
822 qemu_log_mask(LOG_GUEST_ERROR,
823 "%s: nr_entries is too big (%d > 16384)\n",
824 __func__, nr_entries);
825 return -1;
826 }
827
828 esize = sizeof(*ents) * nr_entries;
829 ents = g_malloc(esize);
830 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
831 offset, ents, esize);
832 if (s != esize) {
833 qemu_log_mask(LOG_GUEST_ERROR,
834 "%s: command data size incorrect %zu vs %zu\n",
835 __func__, s, esize);
836 g_free(ents);
837 return -1;
838 }
839
840 *iov = NULL;
841 if (addr) {
842 *addr = NULL;
843 }
844 for (e = 0, v = 0; e < nr_entries; e++) {
845 uint64_t a = le64_to_cpu(ents[e].addr);
846 uint32_t l = le32_to_cpu(ents[e].length);
847 hwaddr len;
848 void *map;
849
850 do {
851 len = l;
852 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len,
853 DMA_DIRECTION_TO_DEVICE,
854 MEMTXATTRS_UNSPECIFIED);
855 if (!map) {
856 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
857 " element %d\n", __func__, e);
858 virtio_gpu_cleanup_mapping_iov(g, *iov, v);
859 g_free(ents);
860 *iov = NULL;
861 if (addr) {
862 g_free(*addr);
863 *addr = NULL;
864 }
865 return -1;
866 }
867
868 if (!(v % 16)) {
869 *iov = g_renew(struct iovec, *iov, v + 16);
870 if (addr) {
871 *addr = g_renew(uint64_t, *addr, v + 16);
872 }
873 }
874 (*iov)[v].iov_base = map;
875 (*iov)[v].iov_len = len;
876 if (addr) {
877 (*addr)[v] = a;
878 }
879
880 a += len;
881 l -= len;
882 v += 1;
883 } while (l > 0);
884 }
885 *niov = v;
886
887 g_free(ents);
888 return 0;
889 }
890
891 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
892 struct iovec *iov, uint32_t count)
893 {
894 int i;
895
896 for (i = 0; i < count; i++) {
897 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
898 iov[i].iov_base, iov[i].iov_len,
899 DMA_DIRECTION_TO_DEVICE,
900 iov[i].iov_len);
901 }
902 g_free(iov);
903 }
904
905 static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
906 struct virtio_gpu_simple_resource *res)
907 {
908 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
909 res->iov = NULL;
910 res->iov_cnt = 0;
911 g_free(res->addrs);
912 res->addrs = NULL;
913
914 if (res->blob) {
915 virtio_gpu_fini_udmabuf(res);
916 }
917 }
918
919 static void
920 virtio_gpu_resource_attach_backing(VirtIOGPU *g,
921 struct virtio_gpu_ctrl_command *cmd)
922 {
923 struct virtio_gpu_simple_resource *res;
924 struct virtio_gpu_resource_attach_backing ab;
925 int ret;
926
927 VIRTIO_GPU_FILL_CMD(ab);
928 virtio_gpu_bswap_32(&ab, sizeof(ab));
929 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
930
931 res = virtio_gpu_find_resource(g, ab.resource_id);
932 if (!res) {
933 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
934 __func__, ab.resource_id);
935 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
936 return;
937 }
938
939 if (res->iov) {
940 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
941 return;
942 }
943
944 ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd,
945 &res->addrs, &res->iov, &res->iov_cnt);
946 if (ret != 0) {
947 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
948 return;
949 }
950 }
951
952 static void
953 virtio_gpu_resource_detach_backing(VirtIOGPU *g,
954 struct virtio_gpu_ctrl_command *cmd)
955 {
956 struct virtio_gpu_simple_resource *res;
957 struct virtio_gpu_resource_detach_backing detach;
958
959 VIRTIO_GPU_FILL_CMD(detach);
960 virtio_gpu_bswap_32(&detach, sizeof(detach));
961 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
962
963 res = virtio_gpu_find_check_resource(g, detach.resource_id, true,
964 __func__, &cmd->error);
965 if (!res) {
966 return;
967 }
968 virtio_gpu_cleanup_mapping(g, res);
969 }
970
971 void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
972 struct virtio_gpu_ctrl_command *cmd)
973 {
974 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
975 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
976
977 switch (cmd->cmd_hdr.type) {
978 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
979 virtio_gpu_get_display_info(g, cmd);
980 break;
981 case VIRTIO_GPU_CMD_GET_EDID:
982 virtio_gpu_get_edid(g, cmd);
983 break;
984 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
985 virtio_gpu_resource_create_2d(g, cmd);
986 break;
987 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
988 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
989 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
990 break;
991 }
992 virtio_gpu_resource_create_blob(g, cmd);
993 break;
994 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
995 virtio_gpu_resource_unref(g, cmd);
996 break;
997 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
998 virtio_gpu_resource_flush(g, cmd);
999 break;
1000 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
1001 virtio_gpu_transfer_to_host_2d(g, cmd);
1002 break;
1003 case VIRTIO_GPU_CMD_SET_SCANOUT:
1004 virtio_gpu_set_scanout(g, cmd);
1005 break;
1006 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
1007 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
1008 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
1009 break;
1010 }
1011 virtio_gpu_set_scanout_blob(g, cmd);
1012 break;
1013 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
1014 virtio_gpu_resource_attach_backing(g, cmd);
1015 break;
1016 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
1017 virtio_gpu_resource_detach_backing(g, cmd);
1018 break;
1019 default:
1020 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
1021 break;
1022 }
1023 if (!cmd->finished) {
1024 if (!g->parent_obj.renderer_blocked) {
1025 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
1026 VIRTIO_GPU_RESP_OK_NODATA);
1027 }
1028 }
1029 }
1030
1031 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
1032 {
1033 VirtIOGPU *g = VIRTIO_GPU(vdev);
1034 qemu_bh_schedule(g->ctrl_bh);
1035 }
1036
1037 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
1038 {
1039 VirtIOGPU *g = VIRTIO_GPU(vdev);
1040 qemu_bh_schedule(g->cursor_bh);
1041 }
1042
1043 void virtio_gpu_process_cmdq(VirtIOGPU *g)
1044 {
1045 struct virtio_gpu_ctrl_command *cmd;
1046 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
1047
1048 if (g->processing_cmdq) {
1049 return;
1050 }
1051 g->processing_cmdq = true;
1052 while (!QTAILQ_EMPTY(&g->cmdq)) {
1053 cmd = QTAILQ_FIRST(&g->cmdq);
1054
1055 if (g->parent_obj.renderer_blocked) {
1056 break;
1057 }
1058
1059 /* process command */
1060 vgc->process_cmd(g, cmd);
1061
1062 QTAILQ_REMOVE(&g->cmdq, cmd, next);
1063 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1064 g->stats.requests++;
1065 }
1066
1067 if (!cmd->finished) {
1068 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
1069 g->inflight++;
1070 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1071 if (g->stats.max_inflight < g->inflight) {
1072 g->stats.max_inflight = g->inflight;
1073 }
1074 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
1075 }
1076 } else {
1077 g_free(cmd);
1078 }
1079 }
1080 g->processing_cmdq = false;
1081 }
1082
1083 static void virtio_gpu_process_fenceq(VirtIOGPU *g)
1084 {
1085 struct virtio_gpu_ctrl_command *cmd, *tmp;
1086
1087 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
1088 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
1089 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
1090 QTAILQ_REMOVE(&g->fenceq, cmd, next);
1091 g_free(cmd);
1092 g->inflight--;
1093 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1094 fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
1095 }
1096 }
1097 }
1098
1099 static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b)
1100 {
1101 VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj);
1102
1103 virtio_gpu_process_fenceq(g);
1104 virtio_gpu_process_cmdq(g);
1105 }
1106
1107 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1108 {
1109 VirtIOGPU *g = VIRTIO_GPU(vdev);
1110 struct virtio_gpu_ctrl_command *cmd;
1111
1112 if (!virtio_queue_ready(vq)) {
1113 return;
1114 }
1115
1116 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
1117 while (cmd) {
1118 cmd->vq = vq;
1119 cmd->error = 0;
1120 cmd->finished = false;
1121 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
1122 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
1123 }
1124
1125 virtio_gpu_process_cmdq(g);
1126 }
1127
1128 static void virtio_gpu_ctrl_bh(void *opaque)
1129 {
1130 VirtIOGPU *g = opaque;
1131 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
1132
1133 vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
1134 }
1135
1136 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
1137 {
1138 VirtIOGPU *g = VIRTIO_GPU(vdev);
1139 VirtQueueElement *elem;
1140 size_t s;
1141 struct virtio_gpu_update_cursor cursor_info;
1142
1143 if (!virtio_queue_ready(vq)) {
1144 return;
1145 }
1146 for (;;) {
1147 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1148 if (!elem) {
1149 break;
1150 }
1151
1152 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
1153 &cursor_info, sizeof(cursor_info));
1154 if (s != sizeof(cursor_info)) {
1155 qemu_log_mask(LOG_GUEST_ERROR,
1156 "%s: cursor size incorrect %zu vs %zu\n",
1157 __func__, s, sizeof(cursor_info));
1158 } else {
1159 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
1160 update_cursor(g, &cursor_info);
1161 }
1162 virtqueue_push(vq, elem, 0);
1163 virtio_notify(vdev, vq);
1164 g_free(elem);
1165 }
1166 }
1167
1168 static void virtio_gpu_cursor_bh(void *opaque)
1169 {
1170 VirtIOGPU *g = opaque;
1171 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
1172 }
1173
1174 static const VMStateDescription vmstate_virtio_gpu_scanout = {
1175 .name = "virtio-gpu-one-scanout",
1176 .version_id = 1,
1177 .fields = (VMStateField[]) {
1178 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
1179 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
1180 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
1181 VMSTATE_INT32(x, struct virtio_gpu_scanout),
1182 VMSTATE_INT32(y, struct virtio_gpu_scanout),
1183 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
1184 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
1185 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
1186 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
1187 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
1188 VMSTATE_END_OF_LIST()
1189 },
1190 };
1191
1192 static const VMStateDescription vmstate_virtio_gpu_scanouts = {
1193 .name = "virtio-gpu-scanouts",
1194 .version_id = 1,
1195 .fields = (VMStateField[]) {
1196 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
1197 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
1198 struct VirtIOGPU, NULL),
1199 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
1200 parent_obj.conf.max_outputs, 1,
1201 vmstate_virtio_gpu_scanout,
1202 struct virtio_gpu_scanout),
1203 VMSTATE_END_OF_LIST()
1204 },
1205 };
1206
1207 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
1208 const VMStateField *field, JSONWriter *vmdesc)
1209 {
1210 VirtIOGPU *g = opaque;
1211 struct virtio_gpu_simple_resource *res;
1212 int i;
1213
1214 /* in 2d mode we should never find unprocessed commands here */
1215 assert(QTAILQ_EMPTY(&g->cmdq));
1216
1217 QTAILQ_FOREACH(res, &g->reslist, next) {
1218 qemu_put_be32(f, res->resource_id);
1219 qemu_put_be32(f, res->width);
1220 qemu_put_be32(f, res->height);
1221 qemu_put_be32(f, res->format);
1222 qemu_put_be32(f, res->iov_cnt);
1223 for (i = 0; i < res->iov_cnt; i++) {
1224 qemu_put_be64(f, res->addrs[i]);
1225 qemu_put_be32(f, res->iov[i].iov_len);
1226 }
1227 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
1228 pixman_image_get_stride(res->image) * res->height);
1229 }
1230 qemu_put_be32(f, 0); /* end of list */
1231
1232 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
1233 }
1234
1235 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
1236 const VMStateField *field)
1237 {
1238 VirtIOGPU *g = opaque;
1239 struct virtio_gpu_simple_resource *res;
1240 struct virtio_gpu_scanout *scanout;
1241 uint32_t resource_id, pformat;
1242 void *bits = NULL;
1243 int i;
1244
1245 g->hostmem = 0;
1246
1247 resource_id = qemu_get_be32(f);
1248 while (resource_id != 0) {
1249 res = virtio_gpu_find_resource(g, resource_id);
1250 if (res) {
1251 return -EINVAL;
1252 }
1253
1254 res = g_new0(struct virtio_gpu_simple_resource, 1);
1255 res->resource_id = resource_id;
1256 res->width = qemu_get_be32(f);
1257 res->height = qemu_get_be32(f);
1258 res->format = qemu_get_be32(f);
1259 res->iov_cnt = qemu_get_be32(f);
1260
1261 /* allocate */
1262 pformat = virtio_gpu_get_pixman_format(res->format);
1263 if (!pformat) {
1264 g_free(res);
1265 return -EINVAL;
1266 }
1267
1268 res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
1269 #ifdef WIN32
1270 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
1271 if (!bits) {
1272 g_free(res);
1273 return -EINVAL;
1274 }
1275 #endif
1276 res->image = pixman_image_create_bits(
1277 pformat,
1278 res->width, res->height,
1279 bits, res->height ? res->hostmem / res->height : 0);
1280 if (!res->image) {
1281 g_free(res);
1282 return -EINVAL;
1283 }
1284
1285
1286 res->addrs = g_new(uint64_t, res->iov_cnt);
1287 res->iov = g_new(struct iovec, res->iov_cnt);
1288
1289 /* read data */
1290 for (i = 0; i < res->iov_cnt; i++) {
1291 res->addrs[i] = qemu_get_be64(f);
1292 res->iov[i].iov_len = qemu_get_be32(f);
1293 }
1294 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1295 pixman_image_get_stride(res->image) * res->height);
1296
1297 /* restore mapping */
1298 for (i = 0; i < res->iov_cnt; i++) {
1299 hwaddr len = res->iov[i].iov_len;
1300 res->iov[i].iov_base =
1301 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len,
1302 DMA_DIRECTION_TO_DEVICE,
1303 MEMTXATTRS_UNSPECIFIED);
1304
1305 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1306 /* Clean up the half-a-mapping we just created... */
1307 if (res->iov[i].iov_base) {
1308 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
1309 res->iov[i].iov_base,
1310 len,
1311 DMA_DIRECTION_TO_DEVICE,
1312 0);
1313 }
1314 /* ...and the mappings for previous loop iterations */
1315 res->iov_cnt = i;
1316 virtio_gpu_cleanup_mapping(g, res);
1317 pixman_image_unref(res->image);
1318 g_free(res);
1319 return -EINVAL;
1320 }
1321 }
1322
1323 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1324 g->hostmem += res->hostmem;
1325
1326 resource_id = qemu_get_be32(f);
1327 }
1328
1329 /* load & apply scanout state */
1330 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1331 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1332 /* FIXME: should take scanout.r.{x,y} into account */
1333 scanout = &g->parent_obj.scanout[i];
1334 if (!scanout->resource_id) {
1335 continue;
1336 }
1337 res = virtio_gpu_find_resource(g, scanout->resource_id);
1338 if (!res) {
1339 return -EINVAL;
1340 }
1341 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1342 if (!scanout->ds) {
1343 return -EINVAL;
1344 }
1345 #ifdef WIN32
1346 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, 0);
1347 #endif
1348
1349 dpy_gfx_replace_surface(scanout->con, scanout->ds);
1350 dpy_gfx_update_full(scanout->con);
1351 if (scanout->cursor.resource_id) {
1352 update_cursor(g, &scanout->cursor);
1353 }
1354 res->scanout_bitmask |= (1 << i);
1355 }
1356
1357 return 0;
1358 }
1359
1360 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1361 {
1362 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1363 VirtIOGPU *g = VIRTIO_GPU(qdev);
1364
1365 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) {
1366 if (!virtio_gpu_have_udmabuf()) {
1367 error_setg(errp, "cannot enable blob resources without udmabuf");
1368 return;
1369 }
1370
1371 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
1372 error_setg(errp, "blobs and virgl are not compatible (yet)");
1373 return;
1374 }
1375 }
1376
1377 if (!virtio_gpu_base_device_realize(qdev,
1378 virtio_gpu_handle_ctrl_cb,
1379 virtio_gpu_handle_cursor_cb,
1380 errp)) {
1381 return;
1382 }
1383
1384 g->ctrl_vq = virtio_get_queue(vdev, 0);
1385 g->cursor_vq = virtio_get_queue(vdev, 1);
1386 g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g,
1387 &qdev->mem_reentrancy_guard);
1388 g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g,
1389 &qdev->mem_reentrancy_guard);
1390 QTAILQ_INIT(&g->reslist);
1391 QTAILQ_INIT(&g->cmdq);
1392 QTAILQ_INIT(&g->fenceq);
1393 }
1394
1395 void virtio_gpu_reset(VirtIODevice *vdev)
1396 {
1397 VirtIOGPU *g = VIRTIO_GPU(vdev);
1398 struct virtio_gpu_simple_resource *res, *tmp;
1399 struct virtio_gpu_ctrl_command *cmd;
1400 int i = 0;
1401
1402 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1403 virtio_gpu_resource_destroy(g, res);
1404 }
1405
1406 while (!QTAILQ_EMPTY(&g->cmdq)) {
1407 cmd = QTAILQ_FIRST(&g->cmdq);
1408 QTAILQ_REMOVE(&g->cmdq, cmd, next);
1409 g_free(cmd);
1410 }
1411
1412 while (!QTAILQ_EMPTY(&g->fenceq)) {
1413 cmd = QTAILQ_FIRST(&g->fenceq);
1414 QTAILQ_REMOVE(&g->fenceq, cmd, next);
1415 g->inflight--;
1416 g_free(cmd);
1417 }
1418
1419 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1420 dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
1421 }
1422
1423 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
1424 }
1425
1426 static void
1427 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
1428 {
1429 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1430
1431 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
1432 }
1433
1434 static void
1435 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
1436 {
1437 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1438 const struct virtio_gpu_config *vgconfig =
1439 (const struct virtio_gpu_config *)config;
1440
1441 if (vgconfig->events_clear) {
1442 g->virtio_config.events_read &= ~vgconfig->events_clear;
1443 }
1444 }
1445
1446 /*
1447 * For historical reasons virtio_gpu does not adhere to virtio migration
1448 * scheme as described in doc/virtio-migration.txt, in a sense that no
1449 * save/load callback are provided to the core. Instead the device data
1450 * is saved/loaded after the core data.
1451 *
1452 * Because of this we need a special vmsd.
1453 */
1454 static const VMStateDescription vmstate_virtio_gpu = {
1455 .name = "virtio-gpu",
1456 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1457 .version_id = VIRTIO_GPU_VM_VERSION,
1458 .fields = (VMStateField[]) {
1459 VMSTATE_VIRTIO_DEVICE /* core */,
1460 {
1461 .name = "virtio-gpu",
1462 .info = &(const VMStateInfo) {
1463 .name = "virtio-gpu",
1464 .get = virtio_gpu_load,
1465 .put = virtio_gpu_save,
1466 },
1467 .flags = VMS_SINGLE,
1468 } /* device */,
1469 VMSTATE_END_OF_LIST()
1470 },
1471 };
1472
1473 static Property virtio_gpu_properties[] = {
1474 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
1475 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
1476 256 * MiB),
1477 DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags,
1478 VIRTIO_GPU_FLAG_BLOB_ENABLED, false),
1479 DEFINE_PROP_END_OF_LIST(),
1480 };
1481
1482 static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1483 {
1484 DeviceClass *dc = DEVICE_CLASS(klass);
1485 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1486 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
1487 VirtIOGPUBaseClass *vgbc = &vgc->parent;
1488
1489 vgc->handle_ctrl = virtio_gpu_handle_ctrl;
1490 vgc->process_cmd = virtio_gpu_simple_process_cmd;
1491 vgc->update_cursor_data = virtio_gpu_update_cursor_data;
1492 vgbc->gl_flushed = virtio_gpu_handle_gl_flushed;
1493
1494 vdc->realize = virtio_gpu_device_realize;
1495 vdc->reset = virtio_gpu_reset;
1496 vdc->get_config = virtio_gpu_get_config;
1497 vdc->set_config = virtio_gpu_set_config;
1498
1499 dc->vmsd = &vmstate_virtio_gpu;
1500 device_class_set_props(dc, virtio_gpu_properties);
1501 }
1502
1503 static const TypeInfo virtio_gpu_info = {
1504 .name = TYPE_VIRTIO_GPU,
1505 .parent = TYPE_VIRTIO_GPU_BASE,
1506 .instance_size = sizeof(VirtIOGPU),
1507 .class_size = sizeof(VirtIOGPUClass),
1508 .class_init = virtio_gpu_class_init,
1509 };
1510 module_obj(TYPE_VIRTIO_GPU);
1511 module_kconfig(VIRTIO_GPU);
1512
1513 static void virtio_register_types(void)
1514 {
1515 type_register_static(&virtio_gpu_info);
1516 }
1517
1518 type_init(virtio_register_types)