]> git.proxmox.com Git - mirror_qemu.git/blame - hw/display/virtio-gpu.c
virtio-gpu: add bswap helpers header
[mirror_qemu.git] / hw / display / virtio-gpu.c
CommitLineData
62232bf4
GH
1/*
2 * Virtio GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2014
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 *
2e252145 10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
62232bf4
GH
11 * See the COPYING file in the top-level directory.
12 */
13
9b8bfe21 14#include "qemu/osdep.h"
f0353b0d 15#include "qemu/units.h"
62232bf4
GH
16#include "qemu-common.h"
17#include "qemu/iov.h"
18#include "ui/console.h"
19#include "trace.h"
8da132a5 20#include "sysemu/dma.h"
62232bf4
GH
21#include "hw/virtio/virtio.h"
22#include "hw/virtio/virtio-gpu.h"
ad08e67a 23#include "hw/virtio/virtio-gpu-bswap.h"
62232bf4 24#include "hw/virtio/virtio-bus.h"
1ed2cb32 25#include "hw/display/edid.h"
795c40b8 26#include "migration/blocker.h"
03dd024f 27#include "qemu/log.h"
5e3d741c 28#include "qapi/error.h"
62232bf4 29
0c244e50
GH
30#define VIRTIO_GPU_VM_VERSION 1
31
62232bf4
GH
32static struct virtio_gpu_simple_resource*
33virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
34
3bb68f79
GH
35static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
36 struct virtio_gpu_simple_resource *res);
b8e23926 37
9d9e1521 38#ifdef CONFIG_VIRGL
a9c94277 39#include <virglrenderer.h>
9d9e1521
GH
40#define VIRGL(_g, _virgl, _simple, ...) \
41 do { \
42 if (_g->use_virgl_renderer) { \
43 _virgl(__VA_ARGS__); \
44 } else { \
45 _simple(__VA_ARGS__); \
46 } \
47 } while (0)
48#else
49#define VIRGL(_g, _virgl, _simple, ...) \
50 do { \
51 _simple(__VA_ARGS__); \
52 } while (0)
53#endif
54
62232bf4
GH
55static void update_cursor_data_simple(VirtIOGPU *g,
56 struct virtio_gpu_scanout *s,
57 uint32_t resource_id)
58{
59 struct virtio_gpu_simple_resource *res;
60 uint32_t pixels;
61
62 res = virtio_gpu_find_resource(g, resource_id);
63 if (!res) {
64 return;
65 }
66
67 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
68 pixman_image_get_height(res->image) != s->current_cursor->height) {
69 return;
70 }
71
72 pixels = s->current_cursor->width * s->current_cursor->height;
73 memcpy(s->current_cursor->data,
74 pixman_image_get_data(res->image),
75 pixels * sizeof(uint32_t));
76}
77
9d9e1521
GH
78#ifdef CONFIG_VIRGL
79
80static void update_cursor_data_virgl(VirtIOGPU *g,
81 struct virtio_gpu_scanout *s,
82 uint32_t resource_id)
83{
84 uint32_t width, height;
85 uint32_t pixels, *data;
86
87 data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
88 if (!data) {
89 return;
90 }
91
92 if (width != s->current_cursor->width ||
93 height != s->current_cursor->height) {
2d1cd6c7 94 free(data);
9d9e1521
GH
95 return;
96 }
97
98 pixels = s->current_cursor->width * s->current_cursor->height;
99 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t));
100 free(data);
101}
102
103#endif
104
62232bf4
GH
105static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
106{
107 struct virtio_gpu_scanout *s;
0c244e50 108 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
62232bf4
GH
109
110 if (cursor->pos.scanout_id >= g->conf.max_outputs) {
111 return;
112 }
113 s = &g->scanout[cursor->pos.scanout_id];
114
e9c1b459
GH
115 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
116 cursor->pos.x,
117 cursor->pos.y,
118 move ? "move" : "update",
119 cursor->resource_id);
120
0c244e50 121 if (!move) {
62232bf4
GH
122 if (!s->current_cursor) {
123 s->current_cursor = cursor_alloc(64, 64);
124 }
125
126 s->current_cursor->hot_x = cursor->hot_x;
127 s->current_cursor->hot_y = cursor->hot_y;
128
129 if (cursor->resource_id > 0) {
9d9e1521
GH
130 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple,
131 g, s, cursor->resource_id);
62232bf4
GH
132 }
133 dpy_cursor_define(s->con, s->current_cursor);
0c244e50
GH
134
135 s->cursor = *cursor;
136 } else {
137 s->cursor.pos.x = cursor->pos.x;
138 s->cursor.pos.y = cursor->pos.y;
62232bf4
GH
139 }
140 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
141 cursor->resource_id ? 1 : 0);
142}
143
144static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
145{
146 VirtIOGPU *g = VIRTIO_GPU(vdev);
147 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
148}
149
150static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
151{
152 VirtIOGPU *g = VIRTIO_GPU(vdev);
153 struct virtio_gpu_config vgconfig;
154
155 memcpy(&vgconfig, config, sizeof(g->virtio_config));
156
157 if (vgconfig.events_clear) {
158 g->virtio_config.events_read &= ~vgconfig.events_clear;
159 }
160}
161
9d5b731d
JW
162static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features,
163 Error **errp)
62232bf4 164{
9d9e1521
GH
165 VirtIOGPU *g = VIRTIO_GPU(vdev);
166
167 if (virtio_gpu_virgl_enabled(g->conf)) {
fff02bc0 168 features |= (1 << VIRTIO_GPU_F_VIRGL);
9d9e1521 169 }
1ed2cb32
GH
170 if (virtio_gpu_edid_enabled(g->conf)) {
171 features |= (1 << VIRTIO_GPU_F_EDID);
172 }
62232bf4
GH
173 return features;
174}
175
9d9e1521
GH
176static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features)
177{
fff02bc0 178 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
9d9e1521
GH
179 VirtIOGPU *g = VIRTIO_GPU(vdev);
180
181 g->use_virgl_renderer = ((features & virgl) == virgl);
182 trace_virtio_gpu_features(g->use_virgl_renderer);
183}
184
62232bf4
GH
185static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
186{
187 g->virtio_config.events_read |= event_type;
188 virtio_notify_config(&g->parent_obj);
189}
190
191static struct virtio_gpu_simple_resource *
192virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
193{
194 struct virtio_gpu_simple_resource *res;
195
196 QTAILQ_FOREACH(res, &g->reslist, next) {
197 if (res->resource_id == resource_id) {
198 return res;
199 }
200 }
201 return NULL;
202}
203
204void virtio_gpu_ctrl_response(VirtIOGPU *g,
205 struct virtio_gpu_ctrl_command *cmd,
206 struct virtio_gpu_ctrl_hdr *resp,
207 size_t resp_len)
208{
209 size_t s;
210
211 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
212 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
213 resp->fence_id = cmd->cmd_hdr.fence_id;
214 resp->ctx_id = cmd->cmd_hdr.ctx_id;
215 }
1715d6b5 216 virtio_gpu_ctrl_hdr_bswap(resp);
62232bf4
GH
217 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
218 if (s != resp_len) {
219 qemu_log_mask(LOG_GUEST_ERROR,
220 "%s: response size incorrect %zu vs %zu\n",
221 __func__, s, resp_len);
222 }
223 virtqueue_push(cmd->vq, &cmd->elem, s);
224 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
225 cmd->finished = true;
226}
227
228void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
229 struct virtio_gpu_ctrl_command *cmd,
230 enum virtio_gpu_ctrl_type type)
231{
232 struct virtio_gpu_ctrl_hdr resp;
233
234 memset(&resp, 0, sizeof(resp));
235 resp.type = type;
236 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
237}
238
239static void
240virtio_gpu_fill_display_info(VirtIOGPU *g,
241 struct virtio_gpu_resp_display_info *dpy_info)
242{
243 int i;
244
245 for (i = 0; i < g->conf.max_outputs; i++) {
246 if (g->enabled_output_bitmask & (1 << i)) {
247 dpy_info->pmodes[i].enabled = 1;
1715d6b5
FA
248 dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
249 dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
62232bf4
GH
250 }
251 }
252}
253
254void virtio_gpu_get_display_info(VirtIOGPU *g,
255 struct virtio_gpu_ctrl_command *cmd)
256{
257 struct virtio_gpu_resp_display_info display_info;
258
259 trace_virtio_gpu_cmd_get_display_info();
260 memset(&display_info, 0, sizeof(display_info));
261 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
262 virtio_gpu_fill_display_info(g, &display_info);
263 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
264 sizeof(display_info));
265}
266
1ed2cb32
GH
267static void
268virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
269 struct virtio_gpu_resp_edid *edid)
270{
271 qemu_edid_info info = {
272 .prefx = g->req_state[scanout].width,
273 .prefy = g->req_state[scanout].height,
274 };
275
276 edid->size = cpu_to_le32(sizeof(edid->edid));
277 qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
278}
279
280void virtio_gpu_get_edid(VirtIOGPU *g,
281 struct virtio_gpu_ctrl_command *cmd)
282{
283 struct virtio_gpu_resp_edid edid;
284 struct virtio_gpu_cmd_get_edid get_edid;
285
286 VIRTIO_GPU_FILL_CMD(get_edid);
287 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
288
289 if (get_edid.scanout >= g->conf.max_outputs) {
290 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
291 return;
292 }
293
294 trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
295 memset(&edid, 0, sizeof(edid));
296 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
297 virtio_gpu_generate_edid(g, get_edid.scanout, &edid);
298 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
299}
300
62232bf4
GH
301static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format)
302{
303 switch (virtio_gpu_format) {
62232bf4 304 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
a27450ec 305 return PIXMAN_BE_b8g8r8x8;
62232bf4 306 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
a27450ec 307 return PIXMAN_BE_b8g8r8a8;
62232bf4 308 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
a27450ec 309 return PIXMAN_BE_x8r8g8b8;
62232bf4 310 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
a27450ec 311 return PIXMAN_BE_a8r8g8b8;
62232bf4 312 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
a27450ec 313 return PIXMAN_BE_r8g8b8x8;
62232bf4 314 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
a27450ec 315 return PIXMAN_BE_r8g8b8a8;
62232bf4 316 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
a27450ec 317 return PIXMAN_BE_x8b8g8r8;
62232bf4 318 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
a27450ec 319 return PIXMAN_BE_a8b8g8r8;
62232bf4
GH
320 default:
321 return 0;
322 }
323}
324
c53f5b89
TW
325static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
326 uint32_t width, uint32_t height)
327{
328 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
329 * pixman_image_create_bits will fail in case it overflow.
330 */
331
332 int bpp = PIXMAN_FORMAT_BPP(pformat);
333 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
334 return height * stride;
335}
336
62232bf4
GH
337static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
338 struct virtio_gpu_ctrl_command *cmd)
339{
340 pixman_format_code_t pformat;
341 struct virtio_gpu_simple_resource *res;
342 struct virtio_gpu_resource_create_2d c2d;
343
344 VIRTIO_GPU_FILL_CMD(c2d);
1715d6b5 345 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
62232bf4
GH
346 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
347 c2d.width, c2d.height);
348
349 if (c2d.resource_id == 0) {
350 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
351 __func__);
352 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
353 return;
354 }
355
356 res = virtio_gpu_find_resource(g, c2d.resource_id);
357 if (res) {
358 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
359 __func__, c2d.resource_id);
360 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
361 return;
362 }
363
364 res = g_new0(struct virtio_gpu_simple_resource, 1);
365
366 res->width = c2d.width;
367 res->height = c2d.height;
368 res->format = c2d.format;
369 res->resource_id = c2d.resource_id;
370
371 pformat = get_pixman_format(c2d.format);
372 if (!pformat) {
373 qemu_log_mask(LOG_GUEST_ERROR,
374 "%s: host couldn't handle guest format %d\n",
375 __func__, c2d.format);
cb3a0522 376 g_free(res);
62232bf4
GH
377 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
378 return;
379 }
9b7621bc 380
c53f5b89 381 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
9b7621bc
GH
382 if (res->hostmem + g->hostmem < g->conf.max_hostmem) {
383 res->image = pixman_image_create_bits(pformat,
384 c2d.width,
385 c2d.height,
386 NULL, 0);
387 }
62232bf4
GH
388
389 if (!res->image) {
390 qemu_log_mask(LOG_GUEST_ERROR,
391 "%s: resource creation failed %d %d %d\n",
392 __func__, c2d.resource_id, c2d.width, c2d.height);
393 g_free(res);
394 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
395 return;
396 }
397
398 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
9b7621bc 399 g->hostmem += res->hostmem;
62232bf4
GH
400}
401
da566a18
GH
402static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
403{
404 struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id];
405 struct virtio_gpu_simple_resource *res;
406 DisplaySurface *ds = NULL;
407
408 if (scanout->resource_id == 0) {
409 return;
410 }
411
412 res = virtio_gpu_find_resource(g, scanout->resource_id);
413 if (res) {
414 res->scanout_bitmask &= ~(1 << scanout_id);
415 }
416
417 if (scanout_id == 0) {
418 /* primary head */
419 ds = qemu_create_message_surface(scanout->width ?: 640,
420 scanout->height ?: 480,
421 "Guest disabled display.");
422 }
423 dpy_gfx_replace_surface(scanout->con, ds);
424 scanout->resource_id = 0;
425 scanout->ds = NULL;
426 scanout->width = 0;
427 scanout->height = 0;
428}
429
62232bf4
GH
430static void virtio_gpu_resource_destroy(VirtIOGPU *g,
431 struct virtio_gpu_simple_resource *res)
432{
1fccd7c5
GH
433 int i;
434
435 if (res->scanout_bitmask) {
436 for (i = 0; i < g->conf.max_outputs; i++) {
437 if (res->scanout_bitmask & (1 << i)) {
438 virtio_gpu_disable_scanout(g, i);
439 }
440 }
441 }
442
62232bf4 443 pixman_image_unref(res->image);
3bb68f79 444 virtio_gpu_cleanup_mapping(g, res);
62232bf4 445 QTAILQ_REMOVE(&g->reslist, res, next);
9b7621bc 446 g->hostmem -= res->hostmem;
62232bf4
GH
447 g_free(res);
448}
449
450static void virtio_gpu_resource_unref(VirtIOGPU *g,
451 struct virtio_gpu_ctrl_command *cmd)
452{
453 struct virtio_gpu_simple_resource *res;
454 struct virtio_gpu_resource_unref unref;
455
456 VIRTIO_GPU_FILL_CMD(unref);
1715d6b5 457 virtio_gpu_bswap_32(&unref, sizeof(unref));
62232bf4
GH
458 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
459
460 res = virtio_gpu_find_resource(g, unref.resource_id);
461 if (!res) {
462 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
463 __func__, unref.resource_id);
464 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
465 return;
466 }
467 virtio_gpu_resource_destroy(g, res);
468}
469
470static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
471 struct virtio_gpu_ctrl_command *cmd)
472{
473 struct virtio_gpu_simple_resource *res;
474 int h;
475 uint32_t src_offset, dst_offset, stride;
476 int bpp;
477 pixman_format_code_t format;
478 struct virtio_gpu_transfer_to_host_2d t2d;
479
480 VIRTIO_GPU_FILL_CMD(t2d);
1715d6b5 481 virtio_gpu_t2d_bswap(&t2d);
62232bf4
GH
482 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
483
484 res = virtio_gpu_find_resource(g, t2d.resource_id);
485 if (!res || !res->iov) {
486 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
487 __func__, t2d.resource_id);
488 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
489 return;
490 }
491
492 if (t2d.r.x > res->width ||
493 t2d.r.y > res->height ||
494 t2d.r.width > res->width ||
495 t2d.r.height > res->height ||
496 t2d.r.x + t2d.r.width > res->width ||
497 t2d.r.y + t2d.r.height > res->height) {
498 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
499 " bounds for resource %d: %d %d %d %d vs %d %d\n",
500 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
501 t2d.r.width, t2d.r.height, res->width, res->height);
502 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
503 return;
504 }
505
506 format = pixman_image_get_format(res->image);
e5f99037 507 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
62232bf4
GH
508 stride = pixman_image_get_stride(res->image);
509
510 if (t2d.offset || t2d.r.x || t2d.r.y ||
511 t2d.r.width != pixman_image_get_width(res->image)) {
512 void *img_data = pixman_image_get_data(res->image);
513 for (h = 0; h < t2d.r.height; h++) {
514 src_offset = t2d.offset + stride * h;
515 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
516
517 iov_to_buf(res->iov, res->iov_cnt, src_offset,
518 (uint8_t *)img_data
519 + dst_offset, t2d.r.width * bpp);
520 }
521 } else {
522 iov_to_buf(res->iov, res->iov_cnt, 0,
523 pixman_image_get_data(res->image),
524 pixman_image_get_stride(res->image)
525 * pixman_image_get_height(res->image));
526 }
527}
528
529static void virtio_gpu_resource_flush(VirtIOGPU *g,
530 struct virtio_gpu_ctrl_command *cmd)
531{
532 struct virtio_gpu_simple_resource *res;
533 struct virtio_gpu_resource_flush rf;
534 pixman_region16_t flush_region;
535 int i;
536
537 VIRTIO_GPU_FILL_CMD(rf);
1715d6b5 538 virtio_gpu_bswap_32(&rf, sizeof(rf));
62232bf4
GH
539 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
540 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
541
542 res = virtio_gpu_find_resource(g, rf.resource_id);
543 if (!res) {
544 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
545 __func__, rf.resource_id);
546 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
547 return;
548 }
549
550 if (rf.r.x > res->width ||
551 rf.r.y > res->height ||
552 rf.r.width > res->width ||
553 rf.r.height > res->height ||
554 rf.r.x + rf.r.width > res->width ||
555 rf.r.y + rf.r.height > res->height) {
556 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
557 " bounds for resource %d: %d %d %d %d vs %d %d\n",
558 __func__, rf.resource_id, rf.r.x, rf.r.y,
559 rf.r.width, rf.r.height, res->width, res->height);
560 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
561 return;
562 }
563
564 pixman_region_init_rect(&flush_region,
565 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
2fe76055 566 for (i = 0; i < g->conf.max_outputs; i++) {
62232bf4
GH
567 struct virtio_gpu_scanout *scanout;
568 pixman_region16_t region, finalregion;
569 pixman_box16_t *extents;
570
571 if (!(res->scanout_bitmask & (1 << i))) {
572 continue;
573 }
574 scanout = &g->scanout[i];
575
576 pixman_region_init(&finalregion);
577 pixman_region_init_rect(&region, scanout->x, scanout->y,
578 scanout->width, scanout->height);
579
580 pixman_region_intersect(&finalregion, &flush_region, &region);
581 pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
582 extents = pixman_region_extents(&finalregion);
583 /* work out the area we need to update for each console */
584 dpy_gfx_update(g->scanout[i].con,
585 extents->x1, extents->y1,
586 extents->x2 - extents->x1,
587 extents->y2 - extents->y1);
588
589 pixman_region_fini(&region);
590 pixman_region_fini(&finalregion);
591 }
592 pixman_region_fini(&flush_region);
593}
594
fa06e5cb
GH
595static void virtio_unref_resource(pixman_image_t *image, void *data)
596{
597 pixman_image_unref(data);
598}
599
62232bf4
GH
600static void virtio_gpu_set_scanout(VirtIOGPU *g,
601 struct virtio_gpu_ctrl_command *cmd)
602{
c806cfa0 603 struct virtio_gpu_simple_resource *res, *ores;
62232bf4
GH
604 struct virtio_gpu_scanout *scanout;
605 pixman_format_code_t format;
606 uint32_t offset;
607 int bpp;
608 struct virtio_gpu_set_scanout ss;
609
610 VIRTIO_GPU_FILL_CMD(ss);
1715d6b5 611 virtio_gpu_bswap_32(&ss, sizeof(ss));
62232bf4
GH
612 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
613 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
614
2fe76055 615 if (ss.scanout_id >= g->conf.max_outputs) {
fe89fdeb
MAL
616 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
617 __func__, ss.scanout_id);
618 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
619 return;
620 }
621
62232bf4
GH
622 g->enable = 1;
623 if (ss.resource_id == 0) {
da566a18 624 virtio_gpu_disable_scanout(g, ss.scanout_id);
62232bf4
GH
625 return;
626 }
627
628 /* create a surface for this scanout */
62232bf4
GH
629 res = virtio_gpu_find_resource(g, ss.resource_id);
630 if (!res) {
631 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
632 __func__, ss.resource_id);
633 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
634 return;
635 }
636
637 if (ss.r.x > res->width ||
638 ss.r.y > res->height ||
da35f7f1
GH
639 ss.r.width < 16 ||
640 ss.r.height < 16 ||
62232bf4
GH
641 ss.r.width > res->width ||
642 ss.r.height > res->height ||
643 ss.r.x + ss.r.width > res->width ||
644 ss.r.y + ss.r.height > res->height) {
645 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
646 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
647 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
648 ss.r.width, ss.r.height, res->width, res->height);
649 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
650 return;
651 }
652
653 scanout = &g->scanout[ss.scanout_id];
654
655 format = pixman_image_get_format(res->image);
e5f99037 656 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
62232bf4
GH
657 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
658 if (!scanout->ds || surface_data(scanout->ds)
659 != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
660 scanout->width != ss.r.width ||
661 scanout->height != ss.r.height) {
fa06e5cb
GH
662 pixman_image_t *rect;
663 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
664 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
665 pixman_image_get_stride(res->image));
666 pixman_image_ref(res->image);
667 pixman_image_set_destroy_function(rect, virtio_unref_resource,
668 res->image);
62232bf4 669 /* realloc the surface ptr */
fa06e5cb 670 scanout->ds = qemu_create_displaysurface_pixman(rect);
62232bf4
GH
671 if (!scanout->ds) {
672 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
673 return;
674 }
dd248ed7 675 pixman_image_unref(rect);
62232bf4
GH
676 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
677 }
678
c806cfa0
GH
679 ores = virtio_gpu_find_resource(g, scanout->resource_id);
680 if (ores) {
681 ores->scanout_bitmask &= ~(1 << ss.scanout_id);
682 }
683
62232bf4
GH
684 res->scanout_bitmask |= (1 << ss.scanout_id);
685 scanout->resource_id = ss.resource_id;
686 scanout->x = ss.r.x;
687 scanout->y = ss.r.y;
688 scanout->width = ss.r.width;
689 scanout->height = ss.r.height;
690}
691
3bb68f79
GH
692int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
693 struct virtio_gpu_resource_attach_backing *ab,
62232bf4 694 struct virtio_gpu_ctrl_command *cmd,
0c244e50 695 uint64_t **addr, struct iovec **iov)
62232bf4
GH
696{
697 struct virtio_gpu_mem_entry *ents;
698 size_t esize, s;
699 int i;
700
701 if (ab->nr_entries > 16384) {
702 qemu_log_mask(LOG_GUEST_ERROR,
2c84167b 703 "%s: nr_entries is too big (%d > 16384)\n",
62232bf4
GH
704 __func__, ab->nr_entries);
705 return -1;
706 }
707
708 esize = sizeof(*ents) * ab->nr_entries;
709 ents = g_malloc(esize);
710 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
711 sizeof(*ab), ents, esize);
712 if (s != esize) {
713 qemu_log_mask(LOG_GUEST_ERROR,
714 "%s: command data size incorrect %zu vs %zu\n",
715 __func__, s, esize);
716 g_free(ents);
717 return -1;
718 }
719
720 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
0c244e50
GH
721 if (addr) {
722 *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
723 }
62232bf4 724 for (i = 0; i < ab->nr_entries; i++) {
1715d6b5
FA
725 uint64_t a = le64_to_cpu(ents[i].addr);
726 uint32_t l = le32_to_cpu(ents[i].length);
727 hwaddr len = l;
728 (*iov)[i].iov_len = l;
8da132a5
GH
729 (*iov)[i].iov_base = dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
730 a, &len, DMA_DIRECTION_TO_DEVICE);
0c244e50 731 if (addr) {
1715d6b5 732 (*addr)[i] = a;
0c244e50 733 }
1715d6b5 734 if (!(*iov)[i].iov_base || len != l) {
62232bf4
GH
735 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
736 " resource %d element %d\n",
737 __func__, ab->resource_id, i);
3bb68f79 738 virtio_gpu_cleanup_mapping_iov(g, *iov, i);
62232bf4 739 g_free(ents);
62232bf4 740 *iov = NULL;
0c244e50
GH
741 if (addr) {
742 g_free(*addr);
743 *addr = NULL;
744 }
62232bf4
GH
745 return -1;
746 }
747 }
748 g_free(ents);
749 return 0;
750}
751
3bb68f79
GH
752void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
753 struct iovec *iov, uint32_t count)
62232bf4
GH
754{
755 int i;
756
757 for (i = 0; i < count; i++) {
8da132a5
GH
758 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
759 iov[i].iov_base, iov[i].iov_len,
760 DMA_DIRECTION_TO_DEVICE,
761 iov[i].iov_len);
62232bf4 762 }
7f3be0f2 763 g_free(iov);
62232bf4
GH
764}
765
3bb68f79
GH
766static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
767 struct virtio_gpu_simple_resource *res)
62232bf4 768{
3bb68f79 769 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
62232bf4
GH
770 res->iov = NULL;
771 res->iov_cnt = 0;
0c244e50
GH
772 g_free(res->addrs);
773 res->addrs = NULL;
62232bf4
GH
774}
775
776static void
777virtio_gpu_resource_attach_backing(VirtIOGPU *g,
778 struct virtio_gpu_ctrl_command *cmd)
779{
780 struct virtio_gpu_simple_resource *res;
781 struct virtio_gpu_resource_attach_backing ab;
782 int ret;
783
784 VIRTIO_GPU_FILL_CMD(ab);
1715d6b5 785 virtio_gpu_bswap_32(&ab, sizeof(ab));
62232bf4
GH
786 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
787
788 res = virtio_gpu_find_resource(g, ab.resource_id);
789 if (!res) {
790 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
791 __func__, ab.resource_id);
792 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
793 return;
794 }
795
204f01b3
LQ
796 if (res->iov) {
797 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
798 return;
799 }
800
3bb68f79 801 ret = virtio_gpu_create_mapping_iov(g, &ab, cmd, &res->addrs, &res->iov);
62232bf4
GH
802 if (ret != 0) {
803 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
804 return;
805 }
806
807 res->iov_cnt = ab.nr_entries;
808}
809
810static void
811virtio_gpu_resource_detach_backing(VirtIOGPU *g,
812 struct virtio_gpu_ctrl_command *cmd)
813{
814 struct virtio_gpu_simple_resource *res;
815 struct virtio_gpu_resource_detach_backing detach;
816
817 VIRTIO_GPU_FILL_CMD(detach);
1715d6b5 818 virtio_gpu_bswap_32(&detach, sizeof(detach));
62232bf4
GH
819 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
820
821 res = virtio_gpu_find_resource(g, detach.resource_id);
822 if (!res || !res->iov) {
823 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
824 __func__, detach.resource_id);
825 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
826 return;
827 }
3bb68f79 828 virtio_gpu_cleanup_mapping(g, res);
62232bf4
GH
829}
830
831static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
832 struct virtio_gpu_ctrl_command *cmd)
833{
834 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
1715d6b5 835 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
62232bf4
GH
836
837 switch (cmd->cmd_hdr.type) {
838 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
839 virtio_gpu_get_display_info(g, cmd);
840 break;
1ed2cb32
GH
841 case VIRTIO_GPU_CMD_GET_EDID:
842 virtio_gpu_get_edid(g, cmd);
843 break;
62232bf4
GH
844 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
845 virtio_gpu_resource_create_2d(g, cmd);
846 break;
847 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
848 virtio_gpu_resource_unref(g, cmd);
849 break;
850 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
851 virtio_gpu_resource_flush(g, cmd);
852 break;
853 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
854 virtio_gpu_transfer_to_host_2d(g, cmd);
855 break;
856 case VIRTIO_GPU_CMD_SET_SCANOUT:
857 virtio_gpu_set_scanout(g, cmd);
858 break;
859 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
860 virtio_gpu_resource_attach_backing(g, cmd);
861 break;
862 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
863 virtio_gpu_resource_detach_backing(g, cmd);
864 break;
865 default:
866 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
867 break;
868 }
869 if (!cmd->finished) {
870 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
871 VIRTIO_GPU_RESP_OK_NODATA);
872 }
873}
874
875static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
876{
877 VirtIOGPU *g = VIRTIO_GPU(vdev);
878 qemu_bh_schedule(g->ctrl_bh);
879}
880
881static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
882{
883 VirtIOGPU *g = VIRTIO_GPU(vdev);
884 qemu_bh_schedule(g->cursor_bh);
885}
886
0c55a1cf 887void virtio_gpu_process_cmdq(VirtIOGPU *g)
3eb769fd
GH
888{
889 struct virtio_gpu_ctrl_command *cmd;
890
891 while (!QTAILQ_EMPTY(&g->cmdq)) {
892 cmd = QTAILQ_FIRST(&g->cmdq);
893
9a6d74c0 894 if (g->renderer_blocked) {
0c55a1cf
GH
895 break;
896 }
ad341aac
MAL
897
898 /* process command */
899 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd,
900 g, cmd);
901
3eb769fd
GH
902 QTAILQ_REMOVE(&g->cmdq, cmd, next);
903 if (virtio_gpu_stats_enabled(g->conf)) {
904 g->stats.requests++;
905 }
906
907 if (!cmd->finished) {
908 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
909 g->inflight++;
910 if (virtio_gpu_stats_enabled(g->conf)) {
911 if (g->stats.max_inflight < g->inflight) {
912 g->stats.max_inflight = g->inflight;
913 }
914 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
915 }
916 } else {
917 g_free(cmd);
918 }
919 }
920}
921
62232bf4
GH
922static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
923{
924 VirtIOGPU *g = VIRTIO_GPU(vdev);
925 struct virtio_gpu_ctrl_command *cmd;
926
927 if (!virtio_queue_ready(vq)) {
928 return;
929 }
930
9d9e1521
GH
931#ifdef CONFIG_VIRGL
932 if (!g->renderer_inited && g->use_virgl_renderer) {
933 virtio_gpu_virgl_init(g);
934 g->renderer_inited = true;
935 }
936#endif
937
51b19ebe
PB
938 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
939 while (cmd) {
62232bf4
GH
940 cmd->vq = vq;
941 cmd->error = 0;
942 cmd->finished = false;
3eb769fd 943 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
51b19ebe 944 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
62232bf4 945 }
9d9e1521 946
3eb769fd
GH
947 virtio_gpu_process_cmdq(g);
948
9d9e1521
GH
949#ifdef CONFIG_VIRGL
950 if (g->use_virgl_renderer) {
951 virtio_gpu_virgl_fence_poll(g);
952 }
953#endif
62232bf4
GH
954}
955
956static void virtio_gpu_ctrl_bh(void *opaque)
957{
958 VirtIOGPU *g = opaque;
959 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq);
960}
961
962static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
963{
964 VirtIOGPU *g = VIRTIO_GPU(vdev);
51b19ebe 965 VirtQueueElement *elem;
62232bf4
GH
966 size_t s;
967 struct virtio_gpu_update_cursor cursor_info;
968
969 if (!virtio_queue_ready(vq)) {
970 return;
971 }
51b19ebe
PB
972 for (;;) {
973 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
974 if (!elem) {
975 break;
976 }
977
978 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
62232bf4
GH
979 &cursor_info, sizeof(cursor_info));
980 if (s != sizeof(cursor_info)) {
981 qemu_log_mask(LOG_GUEST_ERROR,
982 "%s: cursor size incorrect %zu vs %zu\n",
983 __func__, s, sizeof(cursor_info));
984 } else {
1715d6b5 985 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
62232bf4
GH
986 update_cursor(g, &cursor_info);
987 }
51b19ebe 988 virtqueue_push(vq, elem, 0);
62232bf4 989 virtio_notify(vdev, vq);
51b19ebe 990 g_free(elem);
62232bf4
GH
991 }
992}
993
994static void virtio_gpu_cursor_bh(void *opaque)
995{
996 VirtIOGPU *g = opaque;
997 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
998}
999
1000static void virtio_gpu_invalidate_display(void *opaque)
1001{
1002}
1003
1004static void virtio_gpu_update_display(void *opaque)
1005{
1006}
1007
1008static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
1009{
1010}
1011
1012static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
1013{
1014 VirtIOGPU *g = opaque;
1015
6b860806 1016 if (idx >= g->conf.max_outputs) {
62232bf4
GH
1017 return -1;
1018 }
1019
1020 g->req_state[idx].x = info->xoff;
1021 g->req_state[idx].y = info->yoff;
1022 g->req_state[idx].width = info->width;
1023 g->req_state[idx].height = info->height;
1024
1025 if (info->width && info->height) {
1026 g->enabled_output_bitmask |= (1 << idx);
1027 } else {
1028 g->enabled_output_bitmask &= ~(1 << idx);
1029 }
1030
1031 /* send event to guest */
1032 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
1033 return 0;
1034}
1035
ad341aac
MAL
1036static void virtio_gpu_gl_block(void *opaque, bool block)
1037{
1038 VirtIOGPU *g = opaque;
1039
1040 if (block) {
1041 g->renderer_blocked++;
1042 } else {
1043 g->renderer_blocked--;
1044 }
1045 assert(g->renderer_blocked >= 0);
1046
1047 if (g->renderer_blocked == 0) {
9032e3d7
GH
1048#ifdef CONFIG_VIRGL
1049 if (g->renderer_reset) {
1050 g->renderer_reset = false;
1051 virtio_gpu_virgl_reset(g);
1052 }
1053#endif
ad341aac
MAL
1054 virtio_gpu_process_cmdq(g);
1055 }
1056}
1057
62232bf4
GH
1058const GraphicHwOps virtio_gpu_ops = {
1059 .invalidate = virtio_gpu_invalidate_display,
1060 .gfx_update = virtio_gpu_update_display,
1061 .text_update = virtio_gpu_text_update,
1062 .ui_info = virtio_gpu_ui_info,
321c9adb 1063 .gl_block = virtio_gpu_gl_block,
62232bf4
GH
1064};
1065
0c244e50
GH
1066static const VMStateDescription vmstate_virtio_gpu_scanout = {
1067 .name = "virtio-gpu-one-scanout",
1068 .version_id = 1,
1069 .fields = (VMStateField[]) {
1070 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
1071 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
1072 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
1073 VMSTATE_INT32(x, struct virtio_gpu_scanout),
1074 VMSTATE_INT32(y, struct virtio_gpu_scanout),
1075 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
1076 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
1077 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
1078 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
1079 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
1080 VMSTATE_END_OF_LIST()
1081 },
1082};
1083
1084static const VMStateDescription vmstate_virtio_gpu_scanouts = {
1085 .name = "virtio-gpu-scanouts",
1086 .version_id = 1,
1087 .fields = (VMStateField[]) {
1088 VMSTATE_INT32(enable, struct VirtIOGPU),
d2164ad3 1089 VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU, NULL),
0c244e50
GH
1090 VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU,
1091 conf.max_outputs, 1,
1092 vmstate_virtio_gpu_scanout,
1093 struct virtio_gpu_scanout),
1094 VMSTATE_END_OF_LIST()
1095 },
1096};
1097
2c21ee76 1098static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
03fee66f 1099 const VMStateField *field, QJSON *vmdesc)
0c244e50
GH
1100{
1101 VirtIOGPU *g = opaque;
0c244e50
GH
1102 struct virtio_gpu_simple_resource *res;
1103 int i;
1104
0c244e50
GH
1105 /* in 2d mode we should never find unprocessed commands here */
1106 assert(QTAILQ_EMPTY(&g->cmdq));
1107
1108 QTAILQ_FOREACH(res, &g->reslist, next) {
1109 qemu_put_be32(f, res->resource_id);
1110 qemu_put_be32(f, res->width);
1111 qemu_put_be32(f, res->height);
1112 qemu_put_be32(f, res->format);
1113 qemu_put_be32(f, res->iov_cnt);
1114 for (i = 0; i < res->iov_cnt; i++) {
1115 qemu_put_be64(f, res->addrs[i]);
1116 qemu_put_be32(f, res->iov[i].iov_len);
1117 }
1118 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
1119 pixman_image_get_stride(res->image) * res->height);
1120 }
1121 qemu_put_be32(f, 0); /* end of list */
1122
2f168d07 1123 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
0c244e50
GH
1124}
1125
2c21ee76 1126static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
03fee66f 1127 const VMStateField *field)
0c244e50
GH
1128{
1129 VirtIOGPU *g = opaque;
0c244e50
GH
1130 struct virtio_gpu_simple_resource *res;
1131 struct virtio_gpu_scanout *scanout;
1132 uint32_t resource_id, pformat;
8a502efd 1133 int i;
0c244e50 1134
039aa5db
PM
1135 g->hostmem = 0;
1136
0c244e50
GH
1137 resource_id = qemu_get_be32(f);
1138 while (resource_id != 0) {
1139 res = g_new0(struct virtio_gpu_simple_resource, 1);
1140 res->resource_id = resource_id;
1141 res->width = qemu_get_be32(f);
1142 res->height = qemu_get_be32(f);
1143 res->format = qemu_get_be32(f);
1144 res->iov_cnt = qemu_get_be32(f);
1145
1146 /* allocate */
1147 pformat = get_pixman_format(res->format);
1148 if (!pformat) {
c84f0f25 1149 g_free(res);
0c244e50
GH
1150 return -EINVAL;
1151 }
1152 res->image = pixman_image_create_bits(pformat,
1153 res->width, res->height,
1154 NULL, 0);
1155 if (!res->image) {
c84f0f25 1156 g_free(res);
0c244e50
GH
1157 return -EINVAL;
1158 }
1159
c53f5b89 1160 res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
039aa5db 1161
0c244e50
GH
1162 res->addrs = g_new(uint64_t, res->iov_cnt);
1163 res->iov = g_new(struct iovec, res->iov_cnt);
1164
1165 /* read data */
1166 for (i = 0; i < res->iov_cnt; i++) {
1167 res->addrs[i] = qemu_get_be64(f);
1168 res->iov[i].iov_len = qemu_get_be32(f);
1169 }
1170 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1171 pixman_image_get_stride(res->image) * res->height);
1172
1173 /* restore mapping */
1174 for (i = 0; i < res->iov_cnt; i++) {
1175 hwaddr len = res->iov[i].iov_len;
1176 res->iov[i].iov_base =
8da132a5
GH
1177 dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
1178 res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE);
3bb68f79 1179
0c244e50 1180 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
c84f0f25
PM
1181 /* Clean up the half-a-mapping we just created... */
1182 if (res->iov[i].iov_base) {
8da132a5
GH
1183 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
1184 res->iov[i].iov_base,
1185 res->iov[i].iov_len,
1186 DMA_DIRECTION_TO_DEVICE,
1187 res->iov[i].iov_len);
c84f0f25
PM
1188 }
1189 /* ...and the mappings for previous loop iterations */
1190 res->iov_cnt = i;
3bb68f79 1191 virtio_gpu_cleanup_mapping(g, res);
c84f0f25
PM
1192 pixman_image_unref(res->image);
1193 g_free(res);
0c244e50
GH
1194 return -EINVAL;
1195 }
1196 }
1197
1198 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
039aa5db 1199 g->hostmem += res->hostmem;
0c244e50
GH
1200
1201 resource_id = qemu_get_be32(f);
1202 }
1203
1204 /* load & apply scanout state */
1205 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1206 for (i = 0; i < g->conf.max_outputs; i++) {
1207 scanout = &g->scanout[i];
1208 if (!scanout->resource_id) {
1209 continue;
1210 }
1211 res = virtio_gpu_find_resource(g, scanout->resource_id);
1212 if (!res) {
1213 return -EINVAL;
1214 }
1215 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1216 if (!scanout->ds) {
1217 return -EINVAL;
1218 }
1219
1220 dpy_gfx_replace_surface(scanout->con, scanout->ds);
91155f8b 1221 dpy_gfx_update_full(scanout->con);
10750ee0
GH
1222 if (scanout->cursor.resource_id) {
1223 update_cursor(g, &scanout->cursor);
1224 }
0c244e50
GH
1225 res->scanout_bitmask |= (1 << i);
1226 }
1227
1228 return 0;
1229}
1230
62232bf4
GH
1231static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1232{
1233 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1234 VirtIOGPU *g = VIRTIO_GPU(qdev);
9d9e1521 1235 bool have_virgl;
fe44dc91 1236 Error *local_err = NULL;
62232bf4
GH
1237 int i;
1238
acfc4846
MAL
1239 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
1240 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
5e3d741c
MAL
1241 return;
1242 }
1243
9d9e1521
GH
1244 g->use_virgl_renderer = false;
1245#if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1246 have_virgl = false;
1247#else
1248 have_virgl = display_opengl;
1249#endif
1250 if (!have_virgl) {
1251 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
1252 }
1253
fe44dc91
AA
1254 if (virtio_gpu_virgl_enabled(g->conf)) {
1255 error_setg(&g->migration_blocker, "virgl is not yet migratable");
1256 migrate_add_blocker(g->migration_blocker, &local_err);
1257 if (local_err) {
1258 error_propagate(errp, local_err);
1259 error_free(g->migration_blocker);
1260 return;
1261 }
1262 }
1263
1715d6b5 1264 g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
fe44dc91 1265 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
4a9102c5 1266 sizeof(struct virtio_gpu_config));
fe44dc91 1267
729abb6a
GH
1268 g->req_state[0].width = g->conf.xres;
1269 g->req_state[0].height = g->conf.yres;
fe44dc91 1270
9d9e1521
GH
1271 if (virtio_gpu_virgl_enabled(g->conf)) {
1272 /* use larger control queue in 3d mode */
1273 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb);
1274 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
5643cc94
DA
1275
1276#if defined(CONFIG_VIRGL)
1277 g->virtio_config.num_capsets = virtio_gpu_virgl_get_num_capsets(g);
1278#else
1279 g->virtio_config.num_capsets = 0;
1280#endif
9d9e1521
GH
1281 } else {
1282 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
1283 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
1284 }
62232bf4
GH
1285
1286 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
1287 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
1288 QTAILQ_INIT(&g->reslist);
3eb769fd 1289 QTAILQ_INIT(&g->cmdq);
62232bf4
GH
1290 QTAILQ_INIT(&g->fenceq);
1291
1292 g->enabled_output_bitmask = 1;
62232bf4
GH
1293
1294 for (i = 0; i < g->conf.max_outputs; i++) {
1295 g->scanout[i].con =
1296 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
1297 if (i > 0) {
1298 dpy_gfx_replace_surface(g->scanout[i].con, NULL);
1299 }
1300 }
1301}
1302
de889221
DDAG
1303static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp)
1304{
1305 VirtIOGPU *g = VIRTIO_GPU(qdev);
1306 if (g->migration_blocker) {
1307 migrate_del_blocker(g->migration_blocker);
1308 error_free(g->migration_blocker);
1309 }
1310}
1311
62232bf4
GH
1312static void virtio_gpu_instance_init(Object *obj)
1313{
1314}
1315
0be00346 1316static void virtio_gpu_reset(VirtIODevice *vdev)
62232bf4
GH
1317{
1318 VirtIOGPU *g = VIRTIO_GPU(vdev);
1319 struct virtio_gpu_simple_resource *res, *tmp;
dc84ed5b 1320 struct virtio_gpu_ctrl_command *cmd;
62232bf4
GH
1321 int i;
1322
1323 g->enable = 0;
1324
1325 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1326 virtio_gpu_resource_destroy(g, res);
1327 }
1328 for (i = 0; i < g->conf.max_outputs; i++) {
62232bf4
GH
1329 g->scanout[i].resource_id = 0;
1330 g->scanout[i].width = 0;
1331 g->scanout[i].height = 0;
1332 g->scanout[i].x = 0;
1333 g->scanout[i].y = 0;
1334 g->scanout[i].ds = NULL;
1335 }
9d9e1521 1336
dc84ed5b
GH
1337 while (!QTAILQ_EMPTY(&g->cmdq)) {
1338 cmd = QTAILQ_FIRST(&g->cmdq);
1339 QTAILQ_REMOVE(&g->cmdq, cmd, next);
1340 g_free(cmd);
1341 }
1342
1343 while (!QTAILQ_EMPTY(&g->fenceq)) {
1344 cmd = QTAILQ_FIRST(&g->fenceq);
1345 QTAILQ_REMOVE(&g->fenceq, cmd, next);
1346 g->inflight--;
1347 g_free(cmd);
1348 }
1349
9d9e1521
GH
1350#ifdef CONFIG_VIRGL
1351 if (g->use_virgl_renderer) {
9032e3d7
GH
1352 if (g->renderer_blocked) {
1353 g->renderer_reset = true;
1354 } else {
1355 virtio_gpu_virgl_reset(g);
1356 }
9d9e1521
GH
1357 g->use_virgl_renderer = 0;
1358 }
1359#endif
62232bf4
GH
1360}
1361
8a502efd
HP
1362/*
1363 * For historical reasons virtio_gpu does not adhere to virtio migration
1364 * scheme as described in doc/virtio-migration.txt, in a sense that no
1365 * save/load callback are provided to the core. Instead the device data
1366 * is saved/loaded after the core data.
1367 *
1368 * Because of this we need a special vmsd.
1369 */
1370static const VMStateDescription vmstate_virtio_gpu = {
1371 .name = "virtio-gpu",
1372 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1373 .version_id = VIRTIO_GPU_VM_VERSION,
1374 .fields = (VMStateField[]) {
1375 VMSTATE_VIRTIO_DEVICE /* core */,
1376 {
1377 .name = "virtio-gpu",
1378 .info = &(const VMStateInfo) {
1379 .name = "virtio-gpu",
1380 .get = virtio_gpu_load,
1381 .put = virtio_gpu_save,
1382 },
1383 .flags = VMS_SINGLE,
1384 } /* device */,
1385 VMSTATE_END_OF_LIST()
1386 },
1387};
0fc07498 1388
62232bf4 1389static Property virtio_gpu_properties[] = {
b3409a31 1390 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
f0353b0d 1391 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem, 256 * MiB),
9d9e1521
GH
1392#ifdef CONFIG_VIRGL
1393 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags,
1394 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
1395 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags,
1396 VIRTIO_GPU_FLAG_STATS_ENABLED, false),
1397#endif
1ed2cb32
GH
1398 DEFINE_PROP_BIT("edid", VirtIOGPU, conf.flags,
1399 VIRTIO_GPU_FLAG_EDID_ENABLED, false),
729abb6a
GH
1400 DEFINE_PROP_UINT32("xres", VirtIOGPU, conf.xres, 1024),
1401 DEFINE_PROP_UINT32("yres", VirtIOGPU, conf.yres, 768),
62232bf4
GH
1402 DEFINE_PROP_END_OF_LIST(),
1403};
1404
1405static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1406{
1407 DeviceClass *dc = DEVICE_CLASS(klass);
1408 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1409
1410 vdc->realize = virtio_gpu_device_realize;
de889221 1411 vdc->unrealize = virtio_gpu_device_unrealize;
62232bf4
GH
1412 vdc->get_config = virtio_gpu_get_config;
1413 vdc->set_config = virtio_gpu_set_config;
1414 vdc->get_features = virtio_gpu_get_features;
9d9e1521 1415 vdc->set_features = virtio_gpu_set_features;
62232bf4
GH
1416
1417 vdc->reset = virtio_gpu_reset;
1418
e837acfd 1419 set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
62232bf4 1420 dc->props = virtio_gpu_properties;
0fc07498 1421 dc->vmsd = &vmstate_virtio_gpu;
a2056e09 1422 dc->hotpluggable = false;
62232bf4
GH
1423}
1424
1425static const TypeInfo virtio_gpu_info = {
1426 .name = TYPE_VIRTIO_GPU,
1427 .parent = TYPE_VIRTIO_DEVICE,
1428 .instance_size = sizeof(VirtIOGPU),
1429 .instance_init = virtio_gpu_instance_init,
1430 .class_init = virtio_gpu_class_init,
1431};
1432
1433static void virtio_register_types(void)
1434{
1435 type_register_static(&virtio_gpu_info);
1436}
1437
1438type_init(virtio_register_types)
1439
1440QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
1441QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
1442QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
1443QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
1444QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
1445QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
1446QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
1447QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
1448QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
1449QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
1450QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
9d9e1521
GH
1451
1452QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
1453QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
1454QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
1455QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
1456QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
1457QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
1458QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
1459QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
1460QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
1461QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);