]> git.proxmox.com Git - mirror_qemu.git/blob - hw/display/virtio-gpu.c
qemu/main-loop: rename qemu_cond_wait_iothread() to qemu_cond_wait_bql()
[mirror_qemu.git] / hw / display / virtio-gpu.c
1 /*
2 * Virtio GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2014
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
16 #include "qemu/iov.h"
17 #include "sysemu/cpus.h"
18 #include "ui/console.h"
19 #include "ui/rect.h"
20 #include "trace.h"
21 #include "sysemu/dma.h"
22 #include "sysemu/sysemu.h"
23 #include "hw/virtio/virtio.h"
24 #include "migration/qemu-file-types.h"
25 #include "hw/virtio/virtio-gpu.h"
26 #include "hw/virtio/virtio-gpu-bswap.h"
27 #include "hw/virtio/virtio-gpu-pixman.h"
28 #include "hw/virtio/virtio-bus.h"
29 #include "hw/qdev-properties.h"
30 #include "qemu/log.h"
31 #include "qemu/module.h"
32 #include "qapi/error.h"
33 #include "qemu/error-report.h"
34
35 #define VIRTIO_GPU_VM_VERSION 1
36
37 static struct virtio_gpu_simple_resource *
38 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
39 bool require_backing,
40 const char *caller, uint32_t *error);
41
42 static void virtio_gpu_reset_bh(void *opaque);
43
44 void virtio_gpu_update_cursor_data(VirtIOGPU *g,
45 struct virtio_gpu_scanout *s,
46 uint32_t resource_id)
47 {
48 struct virtio_gpu_simple_resource *res;
49 uint32_t pixels;
50 void *data;
51
52 res = virtio_gpu_find_check_resource(g, resource_id, false,
53 __func__, NULL);
54 if (!res) {
55 return;
56 }
57
58 if (res->blob_size) {
59 if (res->blob_size < (s->current_cursor->width *
60 s->current_cursor->height * 4)) {
61 return;
62 }
63 data = res->blob;
64 } else {
65 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
66 pixman_image_get_height(res->image) != s->current_cursor->height) {
67 return;
68 }
69 data = pixman_image_get_data(res->image);
70 }
71
72 pixels = s->current_cursor->width * s->current_cursor->height;
73 memcpy(s->current_cursor->data, data,
74 pixels * sizeof(uint32_t));
75 }
76
77 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
78 {
79 struct virtio_gpu_scanout *s;
80 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
81 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
82
83 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
84 return;
85 }
86 s = &g->parent_obj.scanout[cursor->pos.scanout_id];
87
88 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
89 cursor->pos.x,
90 cursor->pos.y,
91 move ? "move" : "update",
92 cursor->resource_id);
93
94 if (!move) {
95 if (!s->current_cursor) {
96 s->current_cursor = cursor_alloc(64, 64);
97 }
98
99 s->current_cursor->hot_x = cursor->hot_x;
100 s->current_cursor->hot_y = cursor->hot_y;
101
102 if (cursor->resource_id > 0) {
103 vgc->update_cursor_data(g, s, cursor->resource_id);
104 }
105 dpy_cursor_define(s->con, s->current_cursor);
106
107 s->cursor = *cursor;
108 } else {
109 s->cursor.pos.x = cursor->pos.x;
110 s->cursor.pos.y = cursor->pos.y;
111 }
112 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
113 cursor->resource_id ? 1 : 0);
114 }
115
116 struct virtio_gpu_simple_resource *
117 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
118 {
119 struct virtio_gpu_simple_resource *res;
120
121 QTAILQ_FOREACH(res, &g->reslist, next) {
122 if (res->resource_id == resource_id) {
123 return res;
124 }
125 }
126 return NULL;
127 }
128
129 static struct virtio_gpu_simple_resource *
130 virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
131 bool require_backing,
132 const char *caller, uint32_t *error)
133 {
134 struct virtio_gpu_simple_resource *res;
135
136 res = virtio_gpu_find_resource(g, resource_id);
137 if (!res) {
138 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n",
139 caller, resource_id);
140 if (error) {
141 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
142 }
143 return NULL;
144 }
145
146 if (require_backing) {
147 if (!res->iov || (!res->image && !res->blob)) {
148 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n",
149 caller, resource_id);
150 if (error) {
151 *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
152 }
153 return NULL;
154 }
155 }
156
157 return res;
158 }
159
160 void virtio_gpu_ctrl_response(VirtIOGPU *g,
161 struct virtio_gpu_ctrl_command *cmd,
162 struct virtio_gpu_ctrl_hdr *resp,
163 size_t resp_len)
164 {
165 size_t s;
166
167 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
168 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
169 resp->fence_id = cmd->cmd_hdr.fence_id;
170 resp->ctx_id = cmd->cmd_hdr.ctx_id;
171 }
172 virtio_gpu_ctrl_hdr_bswap(resp);
173 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
174 if (s != resp_len) {
175 qemu_log_mask(LOG_GUEST_ERROR,
176 "%s: response size incorrect %zu vs %zu\n",
177 __func__, s, resp_len);
178 }
179 virtqueue_push(cmd->vq, &cmd->elem, s);
180 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
181 cmd->finished = true;
182 }
183
184 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
185 struct virtio_gpu_ctrl_command *cmd,
186 enum virtio_gpu_ctrl_type type)
187 {
188 struct virtio_gpu_ctrl_hdr resp;
189
190 memset(&resp, 0, sizeof(resp));
191 resp.type = type;
192 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
193 }
194
195 void virtio_gpu_get_display_info(VirtIOGPU *g,
196 struct virtio_gpu_ctrl_command *cmd)
197 {
198 struct virtio_gpu_resp_display_info display_info;
199
200 trace_virtio_gpu_cmd_get_display_info();
201 memset(&display_info, 0, sizeof(display_info));
202 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
203 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
204 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
205 sizeof(display_info));
206 }
207
208 void virtio_gpu_get_edid(VirtIOGPU *g,
209 struct virtio_gpu_ctrl_command *cmd)
210 {
211 struct virtio_gpu_resp_edid edid;
212 struct virtio_gpu_cmd_get_edid get_edid;
213 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
214
215 VIRTIO_GPU_FILL_CMD(get_edid);
216 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
217
218 if (get_edid.scanout >= b->conf.max_outputs) {
219 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
220 return;
221 }
222
223 trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
224 memset(&edid, 0, sizeof(edid));
225 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
226 virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid);
227 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
228 }
229
230 static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
231 uint32_t width, uint32_t height)
232 {
233 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
234 * pixman_image_create_bits will fail in case it overflow.
235 */
236
237 int bpp = PIXMAN_FORMAT_BPP(pformat);
238 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
239 return height * stride;
240 }
241
242 #ifdef WIN32
243 static void
244 win32_pixman_image_destroy(pixman_image_t *image, void *data)
245 {
246 HANDLE handle = data;
247
248 qemu_win32_map_free(pixman_image_get_data(image), handle, &error_warn);
249 }
250 #endif
251
252 static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
253 struct virtio_gpu_ctrl_command *cmd)
254 {
255 pixman_format_code_t pformat;
256 struct virtio_gpu_simple_resource *res;
257 struct virtio_gpu_resource_create_2d c2d;
258
259 VIRTIO_GPU_FILL_CMD(c2d);
260 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
261 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
262 c2d.width, c2d.height);
263
264 if (c2d.resource_id == 0) {
265 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
266 __func__);
267 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
268 return;
269 }
270
271 res = virtio_gpu_find_resource(g, c2d.resource_id);
272 if (res) {
273 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
274 __func__, c2d.resource_id);
275 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
276 return;
277 }
278
279 res = g_new0(struct virtio_gpu_simple_resource, 1);
280
281 res->width = c2d.width;
282 res->height = c2d.height;
283 res->format = c2d.format;
284 res->resource_id = c2d.resource_id;
285
286 pformat = virtio_gpu_get_pixman_format(c2d.format);
287 if (!pformat) {
288 qemu_log_mask(LOG_GUEST_ERROR,
289 "%s: host couldn't handle guest format %d\n",
290 __func__, c2d.format);
291 g_free(res);
292 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
293 return;
294 }
295
296 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
297 if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
298 void *bits = NULL;
299 #ifdef WIN32
300 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
301 if (!bits) {
302 goto end;
303 }
304 #endif
305 res->image = pixman_image_create_bits(
306 pformat,
307 c2d.width,
308 c2d.height,
309 bits, c2d.height ? res->hostmem / c2d.height : 0);
310 #ifdef WIN32
311 if (res->image) {
312 pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle);
313 }
314 #endif
315 }
316
317 #ifdef WIN32
318 end:
319 #endif
320 if (!res->image) {
321 qemu_log_mask(LOG_GUEST_ERROR,
322 "%s: resource creation failed %d %d %d\n",
323 __func__, c2d.resource_id, c2d.width, c2d.height);
324 g_free(res);
325 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
326 return;
327 }
328
329 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
330 g->hostmem += res->hostmem;
331 }
332
333 static void virtio_gpu_resource_create_blob(VirtIOGPU *g,
334 struct virtio_gpu_ctrl_command *cmd)
335 {
336 struct virtio_gpu_simple_resource *res;
337 struct virtio_gpu_resource_create_blob cblob;
338 int ret;
339
340 VIRTIO_GPU_FILL_CMD(cblob);
341 virtio_gpu_create_blob_bswap(&cblob);
342 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
343
344 if (cblob.resource_id == 0) {
345 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
346 __func__);
347 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
348 return;
349 }
350
351 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST &&
352 cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) {
353 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n",
354 __func__);
355 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
356 return;
357 }
358
359 if (virtio_gpu_find_resource(g, cblob.resource_id)) {
360 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
361 __func__, cblob.resource_id);
362 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
363 return;
364 }
365
366 res = g_new0(struct virtio_gpu_simple_resource, 1);
367 res->resource_id = cblob.resource_id;
368 res->blob_size = cblob.size;
369
370 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
371 cmd, &res->addrs, &res->iov,
372 &res->iov_cnt);
373 if (ret != 0) {
374 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
375 g_free(res);
376 return;
377 }
378
379 virtio_gpu_init_udmabuf(res);
380 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
381 }
382
383 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
384 {
385 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
386 struct virtio_gpu_simple_resource *res;
387
388 if (scanout->resource_id == 0) {
389 return;
390 }
391
392 res = virtio_gpu_find_resource(g, scanout->resource_id);
393 if (res) {
394 res->scanout_bitmask &= ~(1 << scanout_id);
395 }
396
397 dpy_gfx_replace_surface(scanout->con, NULL);
398 scanout->resource_id = 0;
399 scanout->ds = NULL;
400 scanout->width = 0;
401 scanout->height = 0;
402 }
403
404 static void virtio_gpu_resource_destroy(VirtIOGPU *g,
405 struct virtio_gpu_simple_resource *res)
406 {
407 int i;
408
409 if (res->scanout_bitmask) {
410 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
411 if (res->scanout_bitmask & (1 << i)) {
412 virtio_gpu_disable_scanout(g, i);
413 }
414 }
415 }
416
417 qemu_pixman_image_unref(res->image);
418 virtio_gpu_cleanup_mapping(g, res);
419 QTAILQ_REMOVE(&g->reslist, res, next);
420 g->hostmem -= res->hostmem;
421 g_free(res);
422 }
423
424 static void virtio_gpu_resource_unref(VirtIOGPU *g,
425 struct virtio_gpu_ctrl_command *cmd)
426 {
427 struct virtio_gpu_simple_resource *res;
428 struct virtio_gpu_resource_unref unref;
429
430 VIRTIO_GPU_FILL_CMD(unref);
431 virtio_gpu_bswap_32(&unref, sizeof(unref));
432 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
433
434 res = virtio_gpu_find_resource(g, unref.resource_id);
435 if (!res) {
436 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
437 __func__, unref.resource_id);
438 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
439 return;
440 }
441 virtio_gpu_resource_destroy(g, res);
442 }
443
444 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
445 struct virtio_gpu_ctrl_command *cmd)
446 {
447 struct virtio_gpu_simple_resource *res;
448 int h, bpp;
449 uint32_t src_offset, dst_offset, stride;
450 pixman_format_code_t format;
451 struct virtio_gpu_transfer_to_host_2d t2d;
452 void *img_data;
453
454 VIRTIO_GPU_FILL_CMD(t2d);
455 virtio_gpu_t2d_bswap(&t2d);
456 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
457
458 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true,
459 __func__, &cmd->error);
460 if (!res || res->blob) {
461 return;
462 }
463
464 if (t2d.r.x > res->width ||
465 t2d.r.y > res->height ||
466 t2d.r.width > res->width ||
467 t2d.r.height > res->height ||
468 t2d.r.x + t2d.r.width > res->width ||
469 t2d.r.y + t2d.r.height > res->height) {
470 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
471 " bounds for resource %d: %d %d %d %d vs %d %d\n",
472 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
473 t2d.r.width, t2d.r.height, res->width, res->height);
474 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
475 return;
476 }
477
478 format = pixman_image_get_format(res->image);
479 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
480 stride = pixman_image_get_stride(res->image);
481 img_data = pixman_image_get_data(res->image);
482
483 if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) {
484 for (h = 0; h < t2d.r.height; h++) {
485 src_offset = t2d.offset + stride * h;
486 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
487
488 iov_to_buf(res->iov, res->iov_cnt, src_offset,
489 (uint8_t *)img_data + dst_offset,
490 t2d.r.width * bpp);
491 }
492 } else {
493 src_offset = t2d.offset;
494 dst_offset = t2d.r.y * stride + t2d.r.x * bpp;
495 iov_to_buf(res->iov, res->iov_cnt, src_offset,
496 (uint8_t *)img_data + dst_offset,
497 stride * t2d.r.height);
498 }
499 }
500
501 static void virtio_gpu_resource_flush(VirtIOGPU *g,
502 struct virtio_gpu_ctrl_command *cmd)
503 {
504 struct virtio_gpu_simple_resource *res;
505 struct virtio_gpu_resource_flush rf;
506 struct virtio_gpu_scanout *scanout;
507 QemuRect flush_rect;
508 bool within_bounds = false;
509 bool update_submitted = false;
510 int i;
511
512 VIRTIO_GPU_FILL_CMD(rf);
513 virtio_gpu_bswap_32(&rf, sizeof(rf));
514 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
515 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
516
517 res = virtio_gpu_find_check_resource(g, rf.resource_id, false,
518 __func__, &cmd->error);
519 if (!res) {
520 return;
521 }
522
523 if (res->blob) {
524 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
525 scanout = &g->parent_obj.scanout[i];
526 if (scanout->resource_id == res->resource_id &&
527 rf.r.x < scanout->x + scanout->width &&
528 rf.r.x + rf.r.width >= scanout->x &&
529 rf.r.y < scanout->y + scanout->height &&
530 rf.r.y + rf.r.height >= scanout->y) {
531 within_bounds = true;
532
533 if (console_has_gl(scanout->con)) {
534 dpy_gl_update(scanout->con, 0, 0, scanout->width,
535 scanout->height);
536 update_submitted = true;
537 }
538 }
539 }
540
541 if (update_submitted) {
542 return;
543 }
544 if (!within_bounds) {
545 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts"
546 " bounds for flush %d: %d %d %d %d\n",
547 __func__, rf.resource_id, rf.r.x, rf.r.y,
548 rf.r.width, rf.r.height);
549 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
550 return;
551 }
552 }
553
554 if (!res->blob &&
555 (rf.r.x > res->width ||
556 rf.r.y > res->height ||
557 rf.r.width > res->width ||
558 rf.r.height > res->height ||
559 rf.r.x + rf.r.width > res->width ||
560 rf.r.y + rf.r.height > res->height)) {
561 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
562 " bounds for resource %d: %d %d %d %d vs %d %d\n",
563 __func__, rf.resource_id, rf.r.x, rf.r.y,
564 rf.r.width, rf.r.height, res->width, res->height);
565 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
566 return;
567 }
568
569 qemu_rect_init(&flush_rect, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
570 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
571 QemuRect rect;
572
573 if (!(res->scanout_bitmask & (1 << i))) {
574 continue;
575 }
576 scanout = &g->parent_obj.scanout[i];
577
578 qemu_rect_init(&rect, scanout->x, scanout->y,
579 scanout->width, scanout->height);
580
581 /* work out the area we need to update for each console */
582 if (qemu_rect_intersect(&flush_rect, &rect, &rect)) {
583 qemu_rect_translate(&rect, -scanout->x, -scanout->y);
584 dpy_gfx_update(g->parent_obj.scanout[i].con,
585 rect.x, rect.y, rect.width, rect.height);
586 }
587 }
588 }
589
590 static void virtio_unref_resource(pixman_image_t *image, void *data)
591 {
592 pixman_image_unref(data);
593 }
594
595 static void virtio_gpu_update_scanout(VirtIOGPU *g,
596 uint32_t scanout_id,
597 struct virtio_gpu_simple_resource *res,
598 struct virtio_gpu_rect *r)
599 {
600 struct virtio_gpu_simple_resource *ores;
601 struct virtio_gpu_scanout *scanout;
602
603 scanout = &g->parent_obj.scanout[scanout_id];
604 ores = virtio_gpu_find_resource(g, scanout->resource_id);
605 if (ores) {
606 ores->scanout_bitmask &= ~(1 << scanout_id);
607 }
608
609 res->scanout_bitmask |= (1 << scanout_id);
610 scanout->resource_id = res->resource_id;
611 scanout->x = r->x;
612 scanout->y = r->y;
613 scanout->width = r->width;
614 scanout->height = r->height;
615 }
616
617 static void virtio_gpu_do_set_scanout(VirtIOGPU *g,
618 uint32_t scanout_id,
619 struct virtio_gpu_framebuffer *fb,
620 struct virtio_gpu_simple_resource *res,
621 struct virtio_gpu_rect *r,
622 uint32_t *error)
623 {
624 struct virtio_gpu_scanout *scanout;
625 uint8_t *data;
626
627 scanout = &g->parent_obj.scanout[scanout_id];
628
629 if (r->x > fb->width ||
630 r->y > fb->height ||
631 r->width < 16 ||
632 r->height < 16 ||
633 r->width > fb->width ||
634 r->height > fb->height ||
635 r->x + r->width > fb->width ||
636 r->y + r->height > fb->height) {
637 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
638 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
639 __func__, scanout_id, res->resource_id,
640 r->x, r->y, r->width, r->height,
641 fb->width, fb->height);
642 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
643 return;
644 }
645
646 g->parent_obj.enable = 1;
647
648 if (res->blob) {
649 if (console_has_gl(scanout->con)) {
650 if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) {
651 virtio_gpu_update_scanout(g, scanout_id, res, r);
652 } else {
653 *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
654 }
655 return;
656 }
657
658 data = res->blob;
659 } else {
660 data = (uint8_t *)pixman_image_get_data(res->image);
661 }
662
663 /* create a surface for this scanout */
664 if ((res->blob && !console_has_gl(scanout->con)) ||
665 !scanout->ds ||
666 surface_data(scanout->ds) != data + fb->offset ||
667 scanout->width != r->width ||
668 scanout->height != r->height) {
669 pixman_image_t *rect;
670 void *ptr = data + fb->offset;
671 rect = pixman_image_create_bits(fb->format, r->width, r->height,
672 ptr, fb->stride);
673
674 if (res->image) {
675 pixman_image_ref(res->image);
676 pixman_image_set_destroy_function(rect, virtio_unref_resource,
677 res->image);
678 }
679
680 /* realloc the surface ptr */
681 scanout->ds = qemu_create_displaysurface_pixman(rect);
682 if (!scanout->ds) {
683 *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
684 return;
685 }
686 #ifdef WIN32
687 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, fb->offset);
688 #endif
689
690 pixman_image_unref(rect);
691 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con,
692 scanout->ds);
693 }
694
695 virtio_gpu_update_scanout(g, scanout_id, res, r);
696 }
697
698 static void virtio_gpu_set_scanout(VirtIOGPU *g,
699 struct virtio_gpu_ctrl_command *cmd)
700 {
701 struct virtio_gpu_simple_resource *res;
702 struct virtio_gpu_framebuffer fb = { 0 };
703 struct virtio_gpu_set_scanout ss;
704
705 VIRTIO_GPU_FILL_CMD(ss);
706 virtio_gpu_bswap_32(&ss, sizeof(ss));
707 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
708 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
709
710 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
711 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
712 __func__, ss.scanout_id);
713 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
714 return;
715 }
716
717 if (ss.resource_id == 0) {
718 virtio_gpu_disable_scanout(g, ss.scanout_id);
719 return;
720 }
721
722 res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
723 __func__, &cmd->error);
724 if (!res) {
725 return;
726 }
727
728 fb.format = pixman_image_get_format(res->image);
729 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
730 fb.width = pixman_image_get_width(res->image);
731 fb.height = pixman_image_get_height(res->image);
732 fb.stride = pixman_image_get_stride(res->image);
733 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
734
735 virtio_gpu_do_set_scanout(g, ss.scanout_id,
736 &fb, res, &ss.r, &cmd->error);
737 }
738
739 static void virtio_gpu_set_scanout_blob(VirtIOGPU *g,
740 struct virtio_gpu_ctrl_command *cmd)
741 {
742 struct virtio_gpu_simple_resource *res;
743 struct virtio_gpu_framebuffer fb = { 0 };
744 struct virtio_gpu_set_scanout_blob ss;
745 uint64_t fbend;
746
747 VIRTIO_GPU_FILL_CMD(ss);
748 virtio_gpu_scanout_blob_bswap(&ss);
749 trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
750 ss.r.width, ss.r.height, ss.r.x,
751 ss.r.y);
752
753 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
754 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
755 __func__, ss.scanout_id);
756 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
757 return;
758 }
759
760 if (ss.resource_id == 0) {
761 virtio_gpu_disable_scanout(g, ss.scanout_id);
762 return;
763 }
764
765 res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
766 __func__, &cmd->error);
767 if (!res) {
768 return;
769 }
770
771 fb.format = virtio_gpu_get_pixman_format(ss.format);
772 if (!fb.format) {
773 qemu_log_mask(LOG_GUEST_ERROR,
774 "%s: host couldn't handle guest format %d\n",
775 __func__, ss.format);
776 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
777 return;
778 }
779
780 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
781 fb.width = ss.width;
782 fb.height = ss.height;
783 fb.stride = ss.strides[0];
784 fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
785
786 fbend = fb.offset;
787 fbend += fb.stride * (ss.r.height - 1);
788 fbend += fb.bytes_pp * ss.r.width;
789 if (fbend > res->blob_size) {
790 qemu_log_mask(LOG_GUEST_ERROR,
791 "%s: fb end out of range\n",
792 __func__);
793 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
794 return;
795 }
796
797 virtio_gpu_do_set_scanout(g, ss.scanout_id,
798 &fb, res, &ss.r, &cmd->error);
799 }
800
801 int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
802 uint32_t nr_entries, uint32_t offset,
803 struct virtio_gpu_ctrl_command *cmd,
804 uint64_t **addr, struct iovec **iov,
805 uint32_t *niov)
806 {
807 struct virtio_gpu_mem_entry *ents;
808 size_t esize, s;
809 int e, v;
810
811 if (nr_entries > 16384) {
812 qemu_log_mask(LOG_GUEST_ERROR,
813 "%s: nr_entries is too big (%d > 16384)\n",
814 __func__, nr_entries);
815 return -1;
816 }
817
818 esize = sizeof(*ents) * nr_entries;
819 ents = g_malloc(esize);
820 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
821 offset, ents, esize);
822 if (s != esize) {
823 qemu_log_mask(LOG_GUEST_ERROR,
824 "%s: command data size incorrect %zu vs %zu\n",
825 __func__, s, esize);
826 g_free(ents);
827 return -1;
828 }
829
830 *iov = NULL;
831 if (addr) {
832 *addr = NULL;
833 }
834 for (e = 0, v = 0; e < nr_entries; e++) {
835 uint64_t a = le64_to_cpu(ents[e].addr);
836 uint32_t l = le32_to_cpu(ents[e].length);
837 hwaddr len;
838 void *map;
839
840 do {
841 len = l;
842 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len,
843 DMA_DIRECTION_TO_DEVICE,
844 MEMTXATTRS_UNSPECIFIED);
845 if (!map) {
846 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
847 " element %d\n", __func__, e);
848 virtio_gpu_cleanup_mapping_iov(g, *iov, v);
849 g_free(ents);
850 *iov = NULL;
851 if (addr) {
852 g_free(*addr);
853 *addr = NULL;
854 }
855 return -1;
856 }
857
858 if (!(v % 16)) {
859 *iov = g_renew(struct iovec, *iov, v + 16);
860 if (addr) {
861 *addr = g_renew(uint64_t, *addr, v + 16);
862 }
863 }
864 (*iov)[v].iov_base = map;
865 (*iov)[v].iov_len = len;
866 if (addr) {
867 (*addr)[v] = a;
868 }
869
870 a += len;
871 l -= len;
872 v += 1;
873 } while (l > 0);
874 }
875 *niov = v;
876
877 g_free(ents);
878 return 0;
879 }
880
881 void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
882 struct iovec *iov, uint32_t count)
883 {
884 int i;
885
886 for (i = 0; i < count; i++) {
887 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
888 iov[i].iov_base, iov[i].iov_len,
889 DMA_DIRECTION_TO_DEVICE,
890 iov[i].iov_len);
891 }
892 g_free(iov);
893 }
894
895 void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
896 struct virtio_gpu_simple_resource *res)
897 {
898 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
899 res->iov = NULL;
900 res->iov_cnt = 0;
901 g_free(res->addrs);
902 res->addrs = NULL;
903
904 if (res->blob) {
905 virtio_gpu_fini_udmabuf(res);
906 }
907 }
908
909 static void
910 virtio_gpu_resource_attach_backing(VirtIOGPU *g,
911 struct virtio_gpu_ctrl_command *cmd)
912 {
913 struct virtio_gpu_simple_resource *res;
914 struct virtio_gpu_resource_attach_backing ab;
915 int ret;
916
917 VIRTIO_GPU_FILL_CMD(ab);
918 virtio_gpu_bswap_32(&ab, sizeof(ab));
919 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
920
921 res = virtio_gpu_find_resource(g, ab.resource_id);
922 if (!res) {
923 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
924 __func__, ab.resource_id);
925 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
926 return;
927 }
928
929 if (res->iov) {
930 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
931 return;
932 }
933
934 ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd,
935 &res->addrs, &res->iov, &res->iov_cnt);
936 if (ret != 0) {
937 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
938 return;
939 }
940 }
941
942 static void
943 virtio_gpu_resource_detach_backing(VirtIOGPU *g,
944 struct virtio_gpu_ctrl_command *cmd)
945 {
946 struct virtio_gpu_simple_resource *res;
947 struct virtio_gpu_resource_detach_backing detach;
948
949 VIRTIO_GPU_FILL_CMD(detach);
950 virtio_gpu_bswap_32(&detach, sizeof(detach));
951 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
952
953 res = virtio_gpu_find_check_resource(g, detach.resource_id, true,
954 __func__, &cmd->error);
955 if (!res) {
956 return;
957 }
958 virtio_gpu_cleanup_mapping(g, res);
959 }
960
961 void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
962 struct virtio_gpu_ctrl_command *cmd)
963 {
964 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
965 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
966
967 switch (cmd->cmd_hdr.type) {
968 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
969 virtio_gpu_get_display_info(g, cmd);
970 break;
971 case VIRTIO_GPU_CMD_GET_EDID:
972 virtio_gpu_get_edid(g, cmd);
973 break;
974 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
975 virtio_gpu_resource_create_2d(g, cmd);
976 break;
977 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
978 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
979 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
980 break;
981 }
982 virtio_gpu_resource_create_blob(g, cmd);
983 break;
984 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
985 virtio_gpu_resource_unref(g, cmd);
986 break;
987 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
988 virtio_gpu_resource_flush(g, cmd);
989 break;
990 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
991 virtio_gpu_transfer_to_host_2d(g, cmd);
992 break;
993 case VIRTIO_GPU_CMD_SET_SCANOUT:
994 virtio_gpu_set_scanout(g, cmd);
995 break;
996 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
997 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
998 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
999 break;
1000 }
1001 virtio_gpu_set_scanout_blob(g, cmd);
1002 break;
1003 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
1004 virtio_gpu_resource_attach_backing(g, cmd);
1005 break;
1006 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
1007 virtio_gpu_resource_detach_backing(g, cmd);
1008 break;
1009 default:
1010 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
1011 break;
1012 }
1013 if (!cmd->finished) {
1014 if (!g->parent_obj.renderer_blocked) {
1015 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
1016 VIRTIO_GPU_RESP_OK_NODATA);
1017 }
1018 }
1019 }
1020
1021 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
1022 {
1023 VirtIOGPU *g = VIRTIO_GPU(vdev);
1024 qemu_bh_schedule(g->ctrl_bh);
1025 }
1026
1027 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
1028 {
1029 VirtIOGPU *g = VIRTIO_GPU(vdev);
1030 qemu_bh_schedule(g->cursor_bh);
1031 }
1032
1033 void virtio_gpu_process_cmdq(VirtIOGPU *g)
1034 {
1035 struct virtio_gpu_ctrl_command *cmd;
1036 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
1037
1038 if (g->processing_cmdq) {
1039 return;
1040 }
1041 g->processing_cmdq = true;
1042 while (!QTAILQ_EMPTY(&g->cmdq)) {
1043 cmd = QTAILQ_FIRST(&g->cmdq);
1044
1045 if (g->parent_obj.renderer_blocked) {
1046 break;
1047 }
1048
1049 /* process command */
1050 vgc->process_cmd(g, cmd);
1051
1052 QTAILQ_REMOVE(&g->cmdq, cmd, next);
1053 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1054 g->stats.requests++;
1055 }
1056
1057 if (!cmd->finished) {
1058 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
1059 g->inflight++;
1060 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1061 if (g->stats.max_inflight < g->inflight) {
1062 g->stats.max_inflight = g->inflight;
1063 }
1064 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
1065 }
1066 } else {
1067 g_free(cmd);
1068 }
1069 }
1070 g->processing_cmdq = false;
1071 }
1072
1073 static void virtio_gpu_process_fenceq(VirtIOGPU *g)
1074 {
1075 struct virtio_gpu_ctrl_command *cmd, *tmp;
1076
1077 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
1078 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
1079 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
1080 QTAILQ_REMOVE(&g->fenceq, cmd, next);
1081 g_free(cmd);
1082 g->inflight--;
1083 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1084 fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
1085 }
1086 }
1087 }
1088
1089 static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b)
1090 {
1091 VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj);
1092
1093 virtio_gpu_process_fenceq(g);
1094 virtio_gpu_process_cmdq(g);
1095 }
1096
1097 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1098 {
1099 VirtIOGPU *g = VIRTIO_GPU(vdev);
1100 struct virtio_gpu_ctrl_command *cmd;
1101
1102 if (!virtio_queue_ready(vq)) {
1103 return;
1104 }
1105
1106 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
1107 while (cmd) {
1108 cmd->vq = vq;
1109 cmd->error = 0;
1110 cmd->finished = false;
1111 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
1112 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
1113 }
1114
1115 virtio_gpu_process_cmdq(g);
1116 }
1117
1118 static void virtio_gpu_ctrl_bh(void *opaque)
1119 {
1120 VirtIOGPU *g = opaque;
1121 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
1122
1123 vgc->handle_ctrl(VIRTIO_DEVICE(g), g->ctrl_vq);
1124 }
1125
1126 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
1127 {
1128 VirtIOGPU *g = VIRTIO_GPU(vdev);
1129 VirtQueueElement *elem;
1130 size_t s;
1131 struct virtio_gpu_update_cursor cursor_info;
1132
1133 if (!virtio_queue_ready(vq)) {
1134 return;
1135 }
1136 for (;;) {
1137 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1138 if (!elem) {
1139 break;
1140 }
1141
1142 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
1143 &cursor_info, sizeof(cursor_info));
1144 if (s != sizeof(cursor_info)) {
1145 qemu_log_mask(LOG_GUEST_ERROR,
1146 "%s: cursor size incorrect %zu vs %zu\n",
1147 __func__, s, sizeof(cursor_info));
1148 } else {
1149 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
1150 update_cursor(g, &cursor_info);
1151 }
1152 virtqueue_push(vq, elem, 0);
1153 virtio_notify(vdev, vq);
1154 g_free(elem);
1155 }
1156 }
1157
1158 static void virtio_gpu_cursor_bh(void *opaque)
1159 {
1160 VirtIOGPU *g = opaque;
1161 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
1162 }
1163
1164 static const VMStateDescription vmstate_virtio_gpu_scanout = {
1165 .name = "virtio-gpu-one-scanout",
1166 .version_id = 1,
1167 .fields = (const VMStateField[]) {
1168 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
1169 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
1170 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
1171 VMSTATE_INT32(x, struct virtio_gpu_scanout),
1172 VMSTATE_INT32(y, struct virtio_gpu_scanout),
1173 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
1174 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
1175 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
1176 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
1177 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
1178 VMSTATE_END_OF_LIST()
1179 },
1180 };
1181
1182 static const VMStateDescription vmstate_virtio_gpu_scanouts = {
1183 .name = "virtio-gpu-scanouts",
1184 .version_id = 1,
1185 .fields = (const VMStateField[]) {
1186 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
1187 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
1188 struct VirtIOGPU, NULL),
1189 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
1190 parent_obj.conf.max_outputs, 1,
1191 vmstate_virtio_gpu_scanout,
1192 struct virtio_gpu_scanout),
1193 VMSTATE_END_OF_LIST()
1194 },
1195 };
1196
1197 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
1198 const VMStateField *field, JSONWriter *vmdesc)
1199 {
1200 VirtIOGPU *g = opaque;
1201 struct virtio_gpu_simple_resource *res;
1202 int i;
1203
1204 /* in 2d mode we should never find unprocessed commands here */
1205 assert(QTAILQ_EMPTY(&g->cmdq));
1206
1207 QTAILQ_FOREACH(res, &g->reslist, next) {
1208 if (res->blob_size) {
1209 continue;
1210 }
1211 qemu_put_be32(f, res->resource_id);
1212 qemu_put_be32(f, res->width);
1213 qemu_put_be32(f, res->height);
1214 qemu_put_be32(f, res->format);
1215 qemu_put_be32(f, res->iov_cnt);
1216 for (i = 0; i < res->iov_cnt; i++) {
1217 qemu_put_be64(f, res->addrs[i]);
1218 qemu_put_be32(f, res->iov[i].iov_len);
1219 }
1220 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
1221 pixman_image_get_stride(res->image) * res->height);
1222 }
1223 qemu_put_be32(f, 0); /* end of list */
1224
1225 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
1226 }
1227
1228 static bool virtio_gpu_load_restore_mapping(VirtIOGPU *g,
1229 struct virtio_gpu_simple_resource *res)
1230 {
1231 int i;
1232
1233 for (i = 0; i < res->iov_cnt; i++) {
1234 hwaddr len = res->iov[i].iov_len;
1235 res->iov[i].iov_base =
1236 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len,
1237 DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED);
1238
1239 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1240 /* Clean up the half-a-mapping we just created... */
1241 if (res->iov[i].iov_base) {
1242 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, res->iov[i].iov_base,
1243 len, DMA_DIRECTION_TO_DEVICE, 0);
1244 }
1245 /* ...and the mappings for previous loop iterations */
1246 res->iov_cnt = i;
1247 virtio_gpu_cleanup_mapping(g, res);
1248 return false;
1249 }
1250 }
1251
1252 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1253 g->hostmem += res->hostmem;
1254 return true;
1255 }
1256
1257 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
1258 const VMStateField *field)
1259 {
1260 VirtIOGPU *g = opaque;
1261 struct virtio_gpu_simple_resource *res;
1262 uint32_t resource_id, pformat;
1263 void *bits = NULL;
1264 int i;
1265
1266 g->hostmem = 0;
1267
1268 resource_id = qemu_get_be32(f);
1269 while (resource_id != 0) {
1270 res = virtio_gpu_find_resource(g, resource_id);
1271 if (res) {
1272 return -EINVAL;
1273 }
1274
1275 res = g_new0(struct virtio_gpu_simple_resource, 1);
1276 res->resource_id = resource_id;
1277 res->width = qemu_get_be32(f);
1278 res->height = qemu_get_be32(f);
1279 res->format = qemu_get_be32(f);
1280 res->iov_cnt = qemu_get_be32(f);
1281
1282 /* allocate */
1283 pformat = virtio_gpu_get_pixman_format(res->format);
1284 if (!pformat) {
1285 g_free(res);
1286 return -EINVAL;
1287 }
1288
1289 res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
1290 #ifdef WIN32
1291 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
1292 if (!bits) {
1293 g_free(res);
1294 return -EINVAL;
1295 }
1296 #endif
1297 res->image = pixman_image_create_bits(
1298 pformat,
1299 res->width, res->height,
1300 bits, res->height ? res->hostmem / res->height : 0);
1301 if (!res->image) {
1302 g_free(res);
1303 return -EINVAL;
1304 }
1305 #ifdef WIN32
1306 pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle);
1307 #endif
1308
1309 res->addrs = g_new(uint64_t, res->iov_cnt);
1310 res->iov = g_new(struct iovec, res->iov_cnt);
1311
1312 /* read data */
1313 for (i = 0; i < res->iov_cnt; i++) {
1314 res->addrs[i] = qemu_get_be64(f);
1315 res->iov[i].iov_len = qemu_get_be32(f);
1316 }
1317 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1318 pixman_image_get_stride(res->image) * res->height);
1319
1320 if (!virtio_gpu_load_restore_mapping(g, res)) {
1321 pixman_image_unref(res->image);
1322 g_free(res);
1323 return -EINVAL;
1324 }
1325
1326 resource_id = qemu_get_be32(f);
1327 }
1328
1329 /* load & apply scanout state */
1330 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1331
1332 return 0;
1333 }
1334
1335 static int virtio_gpu_blob_save(QEMUFile *f, void *opaque, size_t size,
1336 const VMStateField *field, JSONWriter *vmdesc)
1337 {
1338 VirtIOGPU *g = opaque;
1339 struct virtio_gpu_simple_resource *res;
1340 int i;
1341
1342 /* in 2d mode we should never find unprocessed commands here */
1343 assert(QTAILQ_EMPTY(&g->cmdq));
1344
1345 QTAILQ_FOREACH(res, &g->reslist, next) {
1346 if (!res->blob_size) {
1347 continue;
1348 }
1349 qemu_put_be32(f, res->resource_id);
1350 qemu_put_be32(f, res->blob_size);
1351 qemu_put_be32(f, res->iov_cnt);
1352 for (i = 0; i < res->iov_cnt; i++) {
1353 qemu_put_be64(f, res->addrs[i]);
1354 qemu_put_be32(f, res->iov[i].iov_len);
1355 }
1356 }
1357 qemu_put_be32(f, 0); /* end of list */
1358
1359 return 0;
1360 }
1361
1362 static int virtio_gpu_blob_load(QEMUFile *f, void *opaque, size_t size,
1363 const VMStateField *field)
1364 {
1365 VirtIOGPU *g = opaque;
1366 struct virtio_gpu_simple_resource *res;
1367 uint32_t resource_id;
1368 int i;
1369
1370 resource_id = qemu_get_be32(f);
1371 while (resource_id != 0) {
1372 res = virtio_gpu_find_resource(g, resource_id);
1373 if (res) {
1374 return -EINVAL;
1375 }
1376
1377 res = g_new0(struct virtio_gpu_simple_resource, 1);
1378 res->resource_id = resource_id;
1379 res->blob_size = qemu_get_be32(f);
1380 res->iov_cnt = qemu_get_be32(f);
1381 res->addrs = g_new(uint64_t, res->iov_cnt);
1382 res->iov = g_new(struct iovec, res->iov_cnt);
1383
1384 /* read data */
1385 for (i = 0; i < res->iov_cnt; i++) {
1386 res->addrs[i] = qemu_get_be64(f);
1387 res->iov[i].iov_len = qemu_get_be32(f);
1388 }
1389
1390 if (!virtio_gpu_load_restore_mapping(g, res)) {
1391 g_free(res);
1392 return -EINVAL;
1393 }
1394
1395 virtio_gpu_init_udmabuf(res);
1396
1397 resource_id = qemu_get_be32(f);
1398 }
1399
1400 return 0;
1401 }
1402
1403 static int virtio_gpu_post_load(void *opaque, int version_id)
1404 {
1405 VirtIOGPU *g = opaque;
1406 struct virtio_gpu_scanout *scanout;
1407 struct virtio_gpu_simple_resource *res;
1408 int i;
1409
1410 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1411 /* FIXME: should take scanout.r.{x,y} into account */
1412 scanout = &g->parent_obj.scanout[i];
1413 if (!scanout->resource_id) {
1414 continue;
1415 }
1416 res = virtio_gpu_find_resource(g, scanout->resource_id);
1417 if (!res) {
1418 return -EINVAL;
1419 }
1420 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1421 if (!scanout->ds) {
1422 return -EINVAL;
1423 }
1424 #ifdef WIN32
1425 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, 0);
1426 #endif
1427
1428 dpy_gfx_replace_surface(scanout->con, scanout->ds);
1429 dpy_gfx_update_full(scanout->con);
1430 if (scanout->cursor.resource_id) {
1431 update_cursor(g, &scanout->cursor);
1432 }
1433 res->scanout_bitmask |= (1 << i);
1434 }
1435
1436 return 0;
1437 }
1438
1439 void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1440 {
1441 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1442 VirtIOGPU *g = VIRTIO_GPU(qdev);
1443
1444 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) {
1445 if (!virtio_gpu_rutabaga_enabled(g->parent_obj.conf) &&
1446 !virtio_gpu_have_udmabuf()) {
1447 error_setg(errp, "need rutabaga or udmabuf for blob resources");
1448 return;
1449 }
1450
1451 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
1452 error_setg(errp, "blobs and virgl are not compatible (yet)");
1453 return;
1454 }
1455 }
1456
1457 if (!virtio_gpu_base_device_realize(qdev,
1458 virtio_gpu_handle_ctrl_cb,
1459 virtio_gpu_handle_cursor_cb,
1460 errp)) {
1461 return;
1462 }
1463
1464 g->ctrl_vq = virtio_get_queue(vdev, 0);
1465 g->cursor_vq = virtio_get_queue(vdev, 1);
1466 g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g,
1467 &qdev->mem_reentrancy_guard);
1468 g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g,
1469 &qdev->mem_reentrancy_guard);
1470 g->reset_bh = qemu_bh_new(virtio_gpu_reset_bh, g);
1471 qemu_cond_init(&g->reset_cond);
1472 QTAILQ_INIT(&g->reslist);
1473 QTAILQ_INIT(&g->cmdq);
1474 QTAILQ_INIT(&g->fenceq);
1475 }
1476
1477 static void virtio_gpu_device_unrealize(DeviceState *qdev)
1478 {
1479 VirtIOGPU *g = VIRTIO_GPU(qdev);
1480
1481 g_clear_pointer(&g->ctrl_bh, qemu_bh_delete);
1482 g_clear_pointer(&g->cursor_bh, qemu_bh_delete);
1483 g_clear_pointer(&g->reset_bh, qemu_bh_delete);
1484 qemu_cond_destroy(&g->reset_cond);
1485 virtio_gpu_base_device_unrealize(qdev);
1486 }
1487
1488 static void virtio_gpu_reset_bh(void *opaque)
1489 {
1490 VirtIOGPU *g = VIRTIO_GPU(opaque);
1491 struct virtio_gpu_simple_resource *res, *tmp;
1492 int i = 0;
1493
1494 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1495 virtio_gpu_resource_destroy(g, res);
1496 }
1497
1498 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1499 dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
1500 }
1501
1502 g->reset_finished = true;
1503 qemu_cond_signal(&g->reset_cond);
1504 }
1505
1506 void virtio_gpu_reset(VirtIODevice *vdev)
1507 {
1508 VirtIOGPU *g = VIRTIO_GPU(vdev);
1509 struct virtio_gpu_ctrl_command *cmd;
1510
1511 if (qemu_in_vcpu_thread()) {
1512 g->reset_finished = false;
1513 qemu_bh_schedule(g->reset_bh);
1514 while (!g->reset_finished) {
1515 qemu_cond_wait_bql(&g->reset_cond);
1516 }
1517 } else {
1518 virtio_gpu_reset_bh(g);
1519 }
1520
1521 while (!QTAILQ_EMPTY(&g->cmdq)) {
1522 cmd = QTAILQ_FIRST(&g->cmdq);
1523 QTAILQ_REMOVE(&g->cmdq, cmd, next);
1524 g_free(cmd);
1525 }
1526
1527 while (!QTAILQ_EMPTY(&g->fenceq)) {
1528 cmd = QTAILQ_FIRST(&g->fenceq);
1529 QTAILQ_REMOVE(&g->fenceq, cmd, next);
1530 g->inflight--;
1531 g_free(cmd);
1532 }
1533
1534 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
1535 }
1536
1537 static void
1538 virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
1539 {
1540 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1541
1542 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
1543 }
1544
1545 static void
1546 virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
1547 {
1548 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1549 const struct virtio_gpu_config *vgconfig =
1550 (const struct virtio_gpu_config *)config;
1551
1552 if (vgconfig->events_clear) {
1553 g->virtio_config.events_read &= ~vgconfig->events_clear;
1554 }
1555 }
1556
1557 static bool virtio_gpu_blob_state_needed(void *opaque)
1558 {
1559 VirtIOGPU *g = VIRTIO_GPU(opaque);
1560
1561 return virtio_gpu_blob_enabled(g->parent_obj.conf);
1562 }
1563
1564 const VMStateDescription vmstate_virtio_gpu_blob_state = {
1565 .name = "virtio-gpu/blob",
1566 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1567 .version_id = VIRTIO_GPU_VM_VERSION,
1568 .needed = virtio_gpu_blob_state_needed,
1569 .fields = (const VMStateField[]){
1570 {
1571 .name = "virtio-gpu/blob",
1572 .info = &(const VMStateInfo) {
1573 .name = "blob",
1574 .get = virtio_gpu_blob_load,
1575 .put = virtio_gpu_blob_save,
1576 },
1577 .flags = VMS_SINGLE,
1578 } /* device */,
1579 VMSTATE_END_OF_LIST()
1580 },
1581 };
1582
1583 /*
1584 * For historical reasons virtio_gpu does not adhere to virtio migration
1585 * scheme as described in doc/virtio-migration.txt, in a sense that no
1586 * save/load callback are provided to the core. Instead the device data
1587 * is saved/loaded after the core data.
1588 *
1589 * Because of this we need a special vmsd.
1590 */
1591 static const VMStateDescription vmstate_virtio_gpu = {
1592 .name = "virtio-gpu",
1593 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1594 .version_id = VIRTIO_GPU_VM_VERSION,
1595 .fields = (const VMStateField[]) {
1596 VMSTATE_VIRTIO_DEVICE /* core */,
1597 {
1598 .name = "virtio-gpu",
1599 .info = &(const VMStateInfo) {
1600 .name = "virtio-gpu",
1601 .get = virtio_gpu_load,
1602 .put = virtio_gpu_save,
1603 },
1604 .flags = VMS_SINGLE,
1605 } /* device */,
1606 VMSTATE_END_OF_LIST()
1607 },
1608 .subsections = (const VMStateDescription * const []) {
1609 &vmstate_virtio_gpu_blob_state,
1610 NULL
1611 },
1612 .post_load = virtio_gpu_post_load,
1613 };
1614
1615 static Property virtio_gpu_properties[] = {
1616 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
1617 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
1618 256 * MiB),
1619 DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags,
1620 VIRTIO_GPU_FLAG_BLOB_ENABLED, false),
1621 DEFINE_PROP_SIZE("hostmem", VirtIOGPU, parent_obj.conf.hostmem, 0),
1622 DEFINE_PROP_END_OF_LIST(),
1623 };
1624
1625 static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1626 {
1627 DeviceClass *dc = DEVICE_CLASS(klass);
1628 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1629 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
1630 VirtIOGPUBaseClass *vgbc = &vgc->parent;
1631
1632 vgc->handle_ctrl = virtio_gpu_handle_ctrl;
1633 vgc->process_cmd = virtio_gpu_simple_process_cmd;
1634 vgc->update_cursor_data = virtio_gpu_update_cursor_data;
1635 vgbc->gl_flushed = virtio_gpu_handle_gl_flushed;
1636
1637 vdc->realize = virtio_gpu_device_realize;
1638 vdc->unrealize = virtio_gpu_device_unrealize;
1639 vdc->reset = virtio_gpu_reset;
1640 vdc->get_config = virtio_gpu_get_config;
1641 vdc->set_config = virtio_gpu_set_config;
1642
1643 dc->vmsd = &vmstate_virtio_gpu;
1644 device_class_set_props(dc, virtio_gpu_properties);
1645 }
1646
1647 static const TypeInfo virtio_gpu_info = {
1648 .name = TYPE_VIRTIO_GPU,
1649 .parent = TYPE_VIRTIO_GPU_BASE,
1650 .instance_size = sizeof(VirtIOGPU),
1651 .class_size = sizeof(VirtIOGPUClass),
1652 .class_init = virtio_gpu_class_init,
1653 };
1654 module_obj(TYPE_VIRTIO_GPU);
1655 module_kconfig(VIRTIO_GPU);
1656
1657 static void virtio_register_types(void)
1658 {
1659 type_register_static(&virtio_gpu_info);
1660 }
1661
1662 type_init(virtio_register_types)