]> git.proxmox.com Git - mirror_qemu.git/blob - hw/display/virtio-gpu.c
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-3.0-20180703' into staging
[mirror_qemu.git] / hw / display / virtio-gpu.c
1 /*
2 * Virtio GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2014
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
16 #include "qemu-common.h"
17 #include "qemu/iov.h"
18 #include "ui/console.h"
19 #include "trace.h"
20 #include "hw/virtio/virtio.h"
21 #include "hw/virtio/virtio-gpu.h"
22 #include "hw/virtio/virtio-bus.h"
23 #include "migration/blocker.h"
24 #include "qemu/log.h"
25 #include "qapi/error.h"
26
27 #define VIRTIO_GPU_VM_VERSION 1
28
29 static struct virtio_gpu_simple_resource*
30 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
31
32 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res);
33
34 static void
35 virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr)
36 {
37 le32_to_cpus(&hdr->type);
38 le32_to_cpus(&hdr->flags);
39 le64_to_cpus(&hdr->fence_id);
40 le32_to_cpus(&hdr->ctx_id);
41 le32_to_cpus(&hdr->padding);
42 }
43
44 static void virtio_gpu_bswap_32(void *ptr,
45 size_t size)
46 {
47 #ifdef HOST_WORDS_BIGENDIAN
48
49 size_t i;
50 struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr;
51
52 virtio_gpu_ctrl_hdr_bswap(hdr);
53
54 i = sizeof(struct virtio_gpu_ctrl_hdr);
55 while (i < size) {
56 le32_to_cpus((uint32_t *)(ptr + i));
57 i = i + sizeof(uint32_t);
58 }
59
60 #endif
61 }
62
63 static void
64 virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d)
65 {
66 virtio_gpu_ctrl_hdr_bswap(&t2d->hdr);
67 le32_to_cpus(&t2d->r.x);
68 le32_to_cpus(&t2d->r.y);
69 le32_to_cpus(&t2d->r.width);
70 le32_to_cpus(&t2d->r.height);
71 le64_to_cpus(&t2d->offset);
72 le32_to_cpus(&t2d->resource_id);
73 le32_to_cpus(&t2d->padding);
74 }
75
76 #ifdef CONFIG_VIRGL
77 #include <virglrenderer.h>
78 #define VIRGL(_g, _virgl, _simple, ...) \
79 do { \
80 if (_g->use_virgl_renderer) { \
81 _virgl(__VA_ARGS__); \
82 } else { \
83 _simple(__VA_ARGS__); \
84 } \
85 } while (0)
86 #else
87 #define VIRGL(_g, _virgl, _simple, ...) \
88 do { \
89 _simple(__VA_ARGS__); \
90 } while (0)
91 #endif
92
93 static void update_cursor_data_simple(VirtIOGPU *g,
94 struct virtio_gpu_scanout *s,
95 uint32_t resource_id)
96 {
97 struct virtio_gpu_simple_resource *res;
98 uint32_t pixels;
99
100 res = virtio_gpu_find_resource(g, resource_id);
101 if (!res) {
102 return;
103 }
104
105 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
106 pixman_image_get_height(res->image) != s->current_cursor->height) {
107 return;
108 }
109
110 pixels = s->current_cursor->width * s->current_cursor->height;
111 memcpy(s->current_cursor->data,
112 pixman_image_get_data(res->image),
113 pixels * sizeof(uint32_t));
114 }
115
116 #ifdef CONFIG_VIRGL
117
118 static void update_cursor_data_virgl(VirtIOGPU *g,
119 struct virtio_gpu_scanout *s,
120 uint32_t resource_id)
121 {
122 uint32_t width, height;
123 uint32_t pixels, *data;
124
125 data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
126 if (!data) {
127 return;
128 }
129
130 if (width != s->current_cursor->width ||
131 height != s->current_cursor->height) {
132 free(data);
133 return;
134 }
135
136 pixels = s->current_cursor->width * s->current_cursor->height;
137 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t));
138 free(data);
139 }
140
141 #endif
142
143 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
144 {
145 struct virtio_gpu_scanout *s;
146 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
147
148 if (cursor->pos.scanout_id >= g->conf.max_outputs) {
149 return;
150 }
151 s = &g->scanout[cursor->pos.scanout_id];
152
153 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
154 cursor->pos.x,
155 cursor->pos.y,
156 move ? "move" : "update",
157 cursor->resource_id);
158
159 if (!move) {
160 if (!s->current_cursor) {
161 s->current_cursor = cursor_alloc(64, 64);
162 }
163
164 s->current_cursor->hot_x = cursor->hot_x;
165 s->current_cursor->hot_y = cursor->hot_y;
166
167 if (cursor->resource_id > 0) {
168 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple,
169 g, s, cursor->resource_id);
170 }
171 dpy_cursor_define(s->con, s->current_cursor);
172
173 s->cursor = *cursor;
174 } else {
175 s->cursor.pos.x = cursor->pos.x;
176 s->cursor.pos.y = cursor->pos.y;
177 }
178 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
179 cursor->resource_id ? 1 : 0);
180 }
181
182 static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
183 {
184 VirtIOGPU *g = VIRTIO_GPU(vdev);
185 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
186 }
187
188 static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
189 {
190 VirtIOGPU *g = VIRTIO_GPU(vdev);
191 struct virtio_gpu_config vgconfig;
192
193 memcpy(&vgconfig, config, sizeof(g->virtio_config));
194
195 if (vgconfig.events_clear) {
196 g->virtio_config.events_read &= ~vgconfig.events_clear;
197 }
198 }
199
200 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features,
201 Error **errp)
202 {
203 VirtIOGPU *g = VIRTIO_GPU(vdev);
204
205 if (virtio_gpu_virgl_enabled(g->conf)) {
206 features |= (1 << VIRTIO_GPU_F_VIRGL);
207 }
208 return features;
209 }
210
211 static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features)
212 {
213 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
214 VirtIOGPU *g = VIRTIO_GPU(vdev);
215
216 g->use_virgl_renderer = ((features & virgl) == virgl);
217 trace_virtio_gpu_features(g->use_virgl_renderer);
218 }
219
220 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
221 {
222 g->virtio_config.events_read |= event_type;
223 virtio_notify_config(&g->parent_obj);
224 }
225
226 static struct virtio_gpu_simple_resource *
227 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
228 {
229 struct virtio_gpu_simple_resource *res;
230
231 QTAILQ_FOREACH(res, &g->reslist, next) {
232 if (res->resource_id == resource_id) {
233 return res;
234 }
235 }
236 return NULL;
237 }
238
239 void virtio_gpu_ctrl_response(VirtIOGPU *g,
240 struct virtio_gpu_ctrl_command *cmd,
241 struct virtio_gpu_ctrl_hdr *resp,
242 size_t resp_len)
243 {
244 size_t s;
245
246 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
247 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
248 resp->fence_id = cmd->cmd_hdr.fence_id;
249 resp->ctx_id = cmd->cmd_hdr.ctx_id;
250 }
251 virtio_gpu_ctrl_hdr_bswap(resp);
252 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
253 if (s != resp_len) {
254 qemu_log_mask(LOG_GUEST_ERROR,
255 "%s: response size incorrect %zu vs %zu\n",
256 __func__, s, resp_len);
257 }
258 virtqueue_push(cmd->vq, &cmd->elem, s);
259 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
260 cmd->finished = true;
261 }
262
263 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
264 struct virtio_gpu_ctrl_command *cmd,
265 enum virtio_gpu_ctrl_type type)
266 {
267 struct virtio_gpu_ctrl_hdr resp;
268
269 memset(&resp, 0, sizeof(resp));
270 resp.type = type;
271 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
272 }
273
274 static void
275 virtio_gpu_fill_display_info(VirtIOGPU *g,
276 struct virtio_gpu_resp_display_info *dpy_info)
277 {
278 int i;
279
280 for (i = 0; i < g->conf.max_outputs; i++) {
281 if (g->enabled_output_bitmask & (1 << i)) {
282 dpy_info->pmodes[i].enabled = 1;
283 dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
284 dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
285 }
286 }
287 }
288
289 void virtio_gpu_get_display_info(VirtIOGPU *g,
290 struct virtio_gpu_ctrl_command *cmd)
291 {
292 struct virtio_gpu_resp_display_info display_info;
293
294 trace_virtio_gpu_cmd_get_display_info();
295 memset(&display_info, 0, sizeof(display_info));
296 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
297 virtio_gpu_fill_display_info(g, &display_info);
298 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
299 sizeof(display_info));
300 }
301
302 static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format)
303 {
304 switch (virtio_gpu_format) {
305 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
306 return PIXMAN_BE_b8g8r8x8;
307 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
308 return PIXMAN_BE_b8g8r8a8;
309 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
310 return PIXMAN_BE_x8r8g8b8;
311 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
312 return PIXMAN_BE_a8r8g8b8;
313 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
314 return PIXMAN_BE_r8g8b8x8;
315 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
316 return PIXMAN_BE_r8g8b8a8;
317 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
318 return PIXMAN_BE_x8b8g8r8;
319 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
320 return PIXMAN_BE_a8b8g8r8;
321 default:
322 return 0;
323 }
324 }
325
326 static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
327 uint32_t width, uint32_t height)
328 {
329 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
330 * pixman_image_create_bits will fail in case it overflow.
331 */
332
333 int bpp = PIXMAN_FORMAT_BPP(pformat);
334 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
335 return height * stride;
336 }
337
338 static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
339 struct virtio_gpu_ctrl_command *cmd)
340 {
341 pixman_format_code_t pformat;
342 struct virtio_gpu_simple_resource *res;
343 struct virtio_gpu_resource_create_2d c2d;
344
345 VIRTIO_GPU_FILL_CMD(c2d);
346 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
347 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
348 c2d.width, c2d.height);
349
350 if (c2d.resource_id == 0) {
351 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
352 __func__);
353 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
354 return;
355 }
356
357 res = virtio_gpu_find_resource(g, c2d.resource_id);
358 if (res) {
359 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
360 __func__, c2d.resource_id);
361 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
362 return;
363 }
364
365 res = g_new0(struct virtio_gpu_simple_resource, 1);
366
367 res->width = c2d.width;
368 res->height = c2d.height;
369 res->format = c2d.format;
370 res->resource_id = c2d.resource_id;
371
372 pformat = get_pixman_format(c2d.format);
373 if (!pformat) {
374 qemu_log_mask(LOG_GUEST_ERROR,
375 "%s: host couldn't handle guest format %d\n",
376 __func__, c2d.format);
377 g_free(res);
378 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
379 return;
380 }
381
382 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
383 if (res->hostmem + g->hostmem < g->conf.max_hostmem) {
384 res->image = pixman_image_create_bits(pformat,
385 c2d.width,
386 c2d.height,
387 NULL, 0);
388 }
389
390 if (!res->image) {
391 qemu_log_mask(LOG_GUEST_ERROR,
392 "%s: resource creation failed %d %d %d\n",
393 __func__, c2d.resource_id, c2d.width, c2d.height);
394 g_free(res);
395 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
396 return;
397 }
398
399 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
400 g->hostmem += res->hostmem;
401 }
402
403 static void virtio_gpu_resource_destroy(VirtIOGPU *g,
404 struct virtio_gpu_simple_resource *res)
405 {
406 pixman_image_unref(res->image);
407 virtio_gpu_cleanup_mapping(res);
408 QTAILQ_REMOVE(&g->reslist, res, next);
409 g->hostmem -= res->hostmem;
410 g_free(res);
411 }
412
413 static void virtio_gpu_resource_unref(VirtIOGPU *g,
414 struct virtio_gpu_ctrl_command *cmd)
415 {
416 struct virtio_gpu_simple_resource *res;
417 struct virtio_gpu_resource_unref unref;
418
419 VIRTIO_GPU_FILL_CMD(unref);
420 virtio_gpu_bswap_32(&unref, sizeof(unref));
421 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
422
423 res = virtio_gpu_find_resource(g, unref.resource_id);
424 if (!res) {
425 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
426 __func__, unref.resource_id);
427 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
428 return;
429 }
430 virtio_gpu_resource_destroy(g, res);
431 }
432
433 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
434 struct virtio_gpu_ctrl_command *cmd)
435 {
436 struct virtio_gpu_simple_resource *res;
437 int h;
438 uint32_t src_offset, dst_offset, stride;
439 int bpp;
440 pixman_format_code_t format;
441 struct virtio_gpu_transfer_to_host_2d t2d;
442
443 VIRTIO_GPU_FILL_CMD(t2d);
444 virtio_gpu_t2d_bswap(&t2d);
445 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
446
447 res = virtio_gpu_find_resource(g, t2d.resource_id);
448 if (!res || !res->iov) {
449 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
450 __func__, t2d.resource_id);
451 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
452 return;
453 }
454
455 if (t2d.r.x > res->width ||
456 t2d.r.y > res->height ||
457 t2d.r.width > res->width ||
458 t2d.r.height > res->height ||
459 t2d.r.x + t2d.r.width > res->width ||
460 t2d.r.y + t2d.r.height > res->height) {
461 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
462 " bounds for resource %d: %d %d %d %d vs %d %d\n",
463 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
464 t2d.r.width, t2d.r.height, res->width, res->height);
465 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
466 return;
467 }
468
469 format = pixman_image_get_format(res->image);
470 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
471 stride = pixman_image_get_stride(res->image);
472
473 if (t2d.offset || t2d.r.x || t2d.r.y ||
474 t2d.r.width != pixman_image_get_width(res->image)) {
475 void *img_data = pixman_image_get_data(res->image);
476 for (h = 0; h < t2d.r.height; h++) {
477 src_offset = t2d.offset + stride * h;
478 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
479
480 iov_to_buf(res->iov, res->iov_cnt, src_offset,
481 (uint8_t *)img_data
482 + dst_offset, t2d.r.width * bpp);
483 }
484 } else {
485 iov_to_buf(res->iov, res->iov_cnt, 0,
486 pixman_image_get_data(res->image),
487 pixman_image_get_stride(res->image)
488 * pixman_image_get_height(res->image));
489 }
490 }
491
492 static void virtio_gpu_resource_flush(VirtIOGPU *g,
493 struct virtio_gpu_ctrl_command *cmd)
494 {
495 struct virtio_gpu_simple_resource *res;
496 struct virtio_gpu_resource_flush rf;
497 pixman_region16_t flush_region;
498 int i;
499
500 VIRTIO_GPU_FILL_CMD(rf);
501 virtio_gpu_bswap_32(&rf, sizeof(rf));
502 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
503 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
504
505 res = virtio_gpu_find_resource(g, rf.resource_id);
506 if (!res) {
507 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
508 __func__, rf.resource_id);
509 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
510 return;
511 }
512
513 if (rf.r.x > res->width ||
514 rf.r.y > res->height ||
515 rf.r.width > res->width ||
516 rf.r.height > res->height ||
517 rf.r.x + rf.r.width > res->width ||
518 rf.r.y + rf.r.height > res->height) {
519 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
520 " bounds for resource %d: %d %d %d %d vs %d %d\n",
521 __func__, rf.resource_id, rf.r.x, rf.r.y,
522 rf.r.width, rf.r.height, res->width, res->height);
523 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
524 return;
525 }
526
527 pixman_region_init_rect(&flush_region,
528 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
529 for (i = 0; i < g->conf.max_outputs; i++) {
530 struct virtio_gpu_scanout *scanout;
531 pixman_region16_t region, finalregion;
532 pixman_box16_t *extents;
533
534 if (!(res->scanout_bitmask & (1 << i))) {
535 continue;
536 }
537 scanout = &g->scanout[i];
538
539 pixman_region_init(&finalregion);
540 pixman_region_init_rect(&region, scanout->x, scanout->y,
541 scanout->width, scanout->height);
542
543 pixman_region_intersect(&finalregion, &flush_region, &region);
544 pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
545 extents = pixman_region_extents(&finalregion);
546 /* work out the area we need to update for each console */
547 dpy_gfx_update(g->scanout[i].con,
548 extents->x1, extents->y1,
549 extents->x2 - extents->x1,
550 extents->y2 - extents->y1);
551
552 pixman_region_fini(&region);
553 pixman_region_fini(&finalregion);
554 }
555 pixman_region_fini(&flush_region);
556 }
557
558 static void virtio_unref_resource(pixman_image_t *image, void *data)
559 {
560 pixman_image_unref(data);
561 }
562
563 static void virtio_gpu_set_scanout(VirtIOGPU *g,
564 struct virtio_gpu_ctrl_command *cmd)
565 {
566 struct virtio_gpu_simple_resource *res;
567 struct virtio_gpu_scanout *scanout;
568 pixman_format_code_t format;
569 uint32_t offset;
570 int bpp;
571 struct virtio_gpu_set_scanout ss;
572
573 VIRTIO_GPU_FILL_CMD(ss);
574 virtio_gpu_bswap_32(&ss, sizeof(ss));
575 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
576 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
577
578 if (ss.scanout_id >= g->conf.max_outputs) {
579 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
580 __func__, ss.scanout_id);
581 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
582 return;
583 }
584
585 g->enable = 1;
586 if (ss.resource_id == 0) {
587 scanout = &g->scanout[ss.scanout_id];
588 if (scanout->resource_id) {
589 res = virtio_gpu_find_resource(g, scanout->resource_id);
590 if (res) {
591 res->scanout_bitmask &= ~(1 << ss.scanout_id);
592 }
593 }
594 if (ss.scanout_id == 0) {
595 qemu_log_mask(LOG_GUEST_ERROR,
596 "%s: illegal scanout id specified %d",
597 __func__, ss.scanout_id);
598 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
599 return;
600 }
601 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL);
602 scanout->ds = NULL;
603 scanout->width = 0;
604 scanout->height = 0;
605 return;
606 }
607
608 /* create a surface for this scanout */
609 res = virtio_gpu_find_resource(g, ss.resource_id);
610 if (!res) {
611 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
612 __func__, ss.resource_id);
613 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
614 return;
615 }
616
617 if (ss.r.x > res->width ||
618 ss.r.y > res->height ||
619 ss.r.width > res->width ||
620 ss.r.height > res->height ||
621 ss.r.x + ss.r.width > res->width ||
622 ss.r.y + ss.r.height > res->height) {
623 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
624 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
625 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
626 ss.r.width, ss.r.height, res->width, res->height);
627 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
628 return;
629 }
630
631 scanout = &g->scanout[ss.scanout_id];
632
633 format = pixman_image_get_format(res->image);
634 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
635 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
636 if (!scanout->ds || surface_data(scanout->ds)
637 != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
638 scanout->width != ss.r.width ||
639 scanout->height != ss.r.height) {
640 pixman_image_t *rect;
641 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
642 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
643 pixman_image_get_stride(res->image));
644 pixman_image_ref(res->image);
645 pixman_image_set_destroy_function(rect, virtio_unref_resource,
646 res->image);
647 /* realloc the surface ptr */
648 scanout->ds = qemu_create_displaysurface_pixman(rect);
649 if (!scanout->ds) {
650 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
651 return;
652 }
653 pixman_image_unref(rect);
654 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
655 }
656
657 res->scanout_bitmask |= (1 << ss.scanout_id);
658 scanout->resource_id = ss.resource_id;
659 scanout->x = ss.r.x;
660 scanout->y = ss.r.y;
661 scanout->width = ss.r.width;
662 scanout->height = ss.r.height;
663 }
664
665 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
666 struct virtio_gpu_ctrl_command *cmd,
667 uint64_t **addr, struct iovec **iov)
668 {
669 struct virtio_gpu_mem_entry *ents;
670 size_t esize, s;
671 int i;
672
673 if (ab->nr_entries > 16384) {
674 qemu_log_mask(LOG_GUEST_ERROR,
675 "%s: nr_entries is too big (%d > 16384)\n",
676 __func__, ab->nr_entries);
677 return -1;
678 }
679
680 esize = sizeof(*ents) * ab->nr_entries;
681 ents = g_malloc(esize);
682 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
683 sizeof(*ab), ents, esize);
684 if (s != esize) {
685 qemu_log_mask(LOG_GUEST_ERROR,
686 "%s: command data size incorrect %zu vs %zu\n",
687 __func__, s, esize);
688 g_free(ents);
689 return -1;
690 }
691
692 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
693 if (addr) {
694 *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
695 }
696 for (i = 0; i < ab->nr_entries; i++) {
697 uint64_t a = le64_to_cpu(ents[i].addr);
698 uint32_t l = le32_to_cpu(ents[i].length);
699 hwaddr len = l;
700 (*iov)[i].iov_len = l;
701 (*iov)[i].iov_base = cpu_physical_memory_map(a, &len, 1);
702 if (addr) {
703 (*addr)[i] = a;
704 }
705 if (!(*iov)[i].iov_base || len != l) {
706 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
707 " resource %d element %d\n",
708 __func__, ab->resource_id, i);
709 virtio_gpu_cleanup_mapping_iov(*iov, i);
710 g_free(ents);
711 *iov = NULL;
712 if (addr) {
713 g_free(*addr);
714 *addr = NULL;
715 }
716 return -1;
717 }
718 }
719 g_free(ents);
720 return 0;
721 }
722
723 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count)
724 {
725 int i;
726
727 for (i = 0; i < count; i++) {
728 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1,
729 iov[i].iov_len);
730 }
731 g_free(iov);
732 }
733
734 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res)
735 {
736 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt);
737 res->iov = NULL;
738 res->iov_cnt = 0;
739 g_free(res->addrs);
740 res->addrs = NULL;
741 }
742
743 static void
744 virtio_gpu_resource_attach_backing(VirtIOGPU *g,
745 struct virtio_gpu_ctrl_command *cmd)
746 {
747 struct virtio_gpu_simple_resource *res;
748 struct virtio_gpu_resource_attach_backing ab;
749 int ret;
750
751 VIRTIO_GPU_FILL_CMD(ab);
752 virtio_gpu_bswap_32(&ab, sizeof(ab));
753 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
754
755 res = virtio_gpu_find_resource(g, ab.resource_id);
756 if (!res) {
757 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
758 __func__, ab.resource_id);
759 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
760 return;
761 }
762
763 if (res->iov) {
764 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
765 return;
766 }
767
768 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov);
769 if (ret != 0) {
770 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
771 return;
772 }
773
774 res->iov_cnt = ab.nr_entries;
775 }
776
777 static void
778 virtio_gpu_resource_detach_backing(VirtIOGPU *g,
779 struct virtio_gpu_ctrl_command *cmd)
780 {
781 struct virtio_gpu_simple_resource *res;
782 struct virtio_gpu_resource_detach_backing detach;
783
784 VIRTIO_GPU_FILL_CMD(detach);
785 virtio_gpu_bswap_32(&detach, sizeof(detach));
786 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
787
788 res = virtio_gpu_find_resource(g, detach.resource_id);
789 if (!res || !res->iov) {
790 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
791 __func__, detach.resource_id);
792 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
793 return;
794 }
795 virtio_gpu_cleanup_mapping(res);
796 }
797
798 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
799 struct virtio_gpu_ctrl_command *cmd)
800 {
801 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
802 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
803
804 switch (cmd->cmd_hdr.type) {
805 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
806 virtio_gpu_get_display_info(g, cmd);
807 break;
808 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
809 virtio_gpu_resource_create_2d(g, cmd);
810 break;
811 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
812 virtio_gpu_resource_unref(g, cmd);
813 break;
814 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
815 virtio_gpu_resource_flush(g, cmd);
816 break;
817 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
818 virtio_gpu_transfer_to_host_2d(g, cmd);
819 break;
820 case VIRTIO_GPU_CMD_SET_SCANOUT:
821 virtio_gpu_set_scanout(g, cmd);
822 break;
823 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
824 virtio_gpu_resource_attach_backing(g, cmd);
825 break;
826 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
827 virtio_gpu_resource_detach_backing(g, cmd);
828 break;
829 default:
830 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
831 break;
832 }
833 if (!cmd->finished) {
834 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
835 VIRTIO_GPU_RESP_OK_NODATA);
836 }
837 }
838
839 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
840 {
841 VirtIOGPU *g = VIRTIO_GPU(vdev);
842 qemu_bh_schedule(g->ctrl_bh);
843 }
844
845 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
846 {
847 VirtIOGPU *g = VIRTIO_GPU(vdev);
848 qemu_bh_schedule(g->cursor_bh);
849 }
850
851 void virtio_gpu_process_cmdq(VirtIOGPU *g)
852 {
853 struct virtio_gpu_ctrl_command *cmd;
854
855 while (!QTAILQ_EMPTY(&g->cmdq)) {
856 cmd = QTAILQ_FIRST(&g->cmdq);
857
858 /* process command */
859 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd,
860 g, cmd);
861 if (cmd->waiting) {
862 break;
863 }
864 QTAILQ_REMOVE(&g->cmdq, cmd, next);
865 if (virtio_gpu_stats_enabled(g->conf)) {
866 g->stats.requests++;
867 }
868
869 if (!cmd->finished) {
870 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
871 g->inflight++;
872 if (virtio_gpu_stats_enabled(g->conf)) {
873 if (g->stats.max_inflight < g->inflight) {
874 g->stats.max_inflight = g->inflight;
875 }
876 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
877 }
878 } else {
879 g_free(cmd);
880 }
881 }
882 }
883
884 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
885 {
886 VirtIOGPU *g = VIRTIO_GPU(vdev);
887 struct virtio_gpu_ctrl_command *cmd;
888
889 if (!virtio_queue_ready(vq)) {
890 return;
891 }
892
893 #ifdef CONFIG_VIRGL
894 if (!g->renderer_inited && g->use_virgl_renderer) {
895 virtio_gpu_virgl_init(g);
896 g->renderer_inited = true;
897 }
898 #endif
899
900 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
901 while (cmd) {
902 cmd->vq = vq;
903 cmd->error = 0;
904 cmd->finished = false;
905 cmd->waiting = false;
906 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
907 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
908 }
909
910 virtio_gpu_process_cmdq(g);
911
912 #ifdef CONFIG_VIRGL
913 if (g->use_virgl_renderer) {
914 virtio_gpu_virgl_fence_poll(g);
915 }
916 #endif
917 }
918
919 static void virtio_gpu_ctrl_bh(void *opaque)
920 {
921 VirtIOGPU *g = opaque;
922 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq);
923 }
924
925 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
926 {
927 VirtIOGPU *g = VIRTIO_GPU(vdev);
928 VirtQueueElement *elem;
929 size_t s;
930 struct virtio_gpu_update_cursor cursor_info;
931
932 if (!virtio_queue_ready(vq)) {
933 return;
934 }
935 for (;;) {
936 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
937 if (!elem) {
938 break;
939 }
940
941 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
942 &cursor_info, sizeof(cursor_info));
943 if (s != sizeof(cursor_info)) {
944 qemu_log_mask(LOG_GUEST_ERROR,
945 "%s: cursor size incorrect %zu vs %zu\n",
946 __func__, s, sizeof(cursor_info));
947 } else {
948 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
949 update_cursor(g, &cursor_info);
950 }
951 virtqueue_push(vq, elem, 0);
952 virtio_notify(vdev, vq);
953 g_free(elem);
954 }
955 }
956
957 static void virtio_gpu_cursor_bh(void *opaque)
958 {
959 VirtIOGPU *g = opaque;
960 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
961 }
962
963 static void virtio_gpu_invalidate_display(void *opaque)
964 {
965 }
966
967 static void virtio_gpu_update_display(void *opaque)
968 {
969 }
970
971 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
972 {
973 }
974
975 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
976 {
977 VirtIOGPU *g = opaque;
978
979 if (idx >= g->conf.max_outputs) {
980 return -1;
981 }
982
983 g->req_state[idx].x = info->xoff;
984 g->req_state[idx].y = info->yoff;
985 g->req_state[idx].width = info->width;
986 g->req_state[idx].height = info->height;
987
988 if (info->width && info->height) {
989 g->enabled_output_bitmask |= (1 << idx);
990 } else {
991 g->enabled_output_bitmask &= ~(1 << idx);
992 }
993
994 /* send event to guest */
995 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
996 return 0;
997 }
998
999 const GraphicHwOps virtio_gpu_ops = {
1000 .invalidate = virtio_gpu_invalidate_display,
1001 .gfx_update = virtio_gpu_update_display,
1002 .text_update = virtio_gpu_text_update,
1003 .ui_info = virtio_gpu_ui_info,
1004 #ifdef CONFIG_VIRGL
1005 .gl_block = virtio_gpu_gl_block,
1006 #endif
1007 };
1008
1009 static const VMStateDescription vmstate_virtio_gpu_scanout = {
1010 .name = "virtio-gpu-one-scanout",
1011 .version_id = 1,
1012 .fields = (VMStateField[]) {
1013 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
1014 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
1015 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
1016 VMSTATE_INT32(x, struct virtio_gpu_scanout),
1017 VMSTATE_INT32(y, struct virtio_gpu_scanout),
1018 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
1019 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
1020 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
1021 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
1022 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
1023 VMSTATE_END_OF_LIST()
1024 },
1025 };
1026
1027 static const VMStateDescription vmstate_virtio_gpu_scanouts = {
1028 .name = "virtio-gpu-scanouts",
1029 .version_id = 1,
1030 .fields = (VMStateField[]) {
1031 VMSTATE_INT32(enable, struct VirtIOGPU),
1032 VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU, NULL),
1033 VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU,
1034 conf.max_outputs, 1,
1035 vmstate_virtio_gpu_scanout,
1036 struct virtio_gpu_scanout),
1037 VMSTATE_END_OF_LIST()
1038 },
1039 };
1040
1041 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
1042 VMStateField *field, QJSON *vmdesc)
1043 {
1044 VirtIOGPU *g = opaque;
1045 struct virtio_gpu_simple_resource *res;
1046 int i;
1047
1048 /* in 2d mode we should never find unprocessed commands here */
1049 assert(QTAILQ_EMPTY(&g->cmdq));
1050
1051 QTAILQ_FOREACH(res, &g->reslist, next) {
1052 qemu_put_be32(f, res->resource_id);
1053 qemu_put_be32(f, res->width);
1054 qemu_put_be32(f, res->height);
1055 qemu_put_be32(f, res->format);
1056 qemu_put_be32(f, res->iov_cnt);
1057 for (i = 0; i < res->iov_cnt; i++) {
1058 qemu_put_be64(f, res->addrs[i]);
1059 qemu_put_be32(f, res->iov[i].iov_len);
1060 }
1061 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
1062 pixman_image_get_stride(res->image) * res->height);
1063 }
1064 qemu_put_be32(f, 0); /* end of list */
1065
1066 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
1067 }
1068
1069 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
1070 VMStateField *field)
1071 {
1072 VirtIOGPU *g = opaque;
1073 struct virtio_gpu_simple_resource *res;
1074 struct virtio_gpu_scanout *scanout;
1075 uint32_t resource_id, pformat;
1076 int i;
1077
1078 g->hostmem = 0;
1079
1080 resource_id = qemu_get_be32(f);
1081 while (resource_id != 0) {
1082 res = g_new0(struct virtio_gpu_simple_resource, 1);
1083 res->resource_id = resource_id;
1084 res->width = qemu_get_be32(f);
1085 res->height = qemu_get_be32(f);
1086 res->format = qemu_get_be32(f);
1087 res->iov_cnt = qemu_get_be32(f);
1088
1089 /* allocate */
1090 pformat = get_pixman_format(res->format);
1091 if (!pformat) {
1092 g_free(res);
1093 return -EINVAL;
1094 }
1095 res->image = pixman_image_create_bits(pformat,
1096 res->width, res->height,
1097 NULL, 0);
1098 if (!res->image) {
1099 g_free(res);
1100 return -EINVAL;
1101 }
1102
1103 res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
1104
1105 res->addrs = g_new(uint64_t, res->iov_cnt);
1106 res->iov = g_new(struct iovec, res->iov_cnt);
1107
1108 /* read data */
1109 for (i = 0; i < res->iov_cnt; i++) {
1110 res->addrs[i] = qemu_get_be64(f);
1111 res->iov[i].iov_len = qemu_get_be32(f);
1112 }
1113 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1114 pixman_image_get_stride(res->image) * res->height);
1115
1116 /* restore mapping */
1117 for (i = 0; i < res->iov_cnt; i++) {
1118 hwaddr len = res->iov[i].iov_len;
1119 res->iov[i].iov_base =
1120 cpu_physical_memory_map(res->addrs[i], &len, 1);
1121 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1122 /* Clean up the half-a-mapping we just created... */
1123 if (res->iov[i].iov_base) {
1124 cpu_physical_memory_unmap(res->iov[i].iov_base,
1125 len, 0, 0);
1126 }
1127 /* ...and the mappings for previous loop iterations */
1128 res->iov_cnt = i;
1129 virtio_gpu_cleanup_mapping(res);
1130 pixman_image_unref(res->image);
1131 g_free(res);
1132 return -EINVAL;
1133 }
1134 }
1135
1136 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1137 g->hostmem += res->hostmem;
1138
1139 resource_id = qemu_get_be32(f);
1140 }
1141
1142 /* load & apply scanout state */
1143 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1144 for (i = 0; i < g->conf.max_outputs; i++) {
1145 scanout = &g->scanout[i];
1146 if (!scanout->resource_id) {
1147 continue;
1148 }
1149 res = virtio_gpu_find_resource(g, scanout->resource_id);
1150 if (!res) {
1151 return -EINVAL;
1152 }
1153 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1154 if (!scanout->ds) {
1155 return -EINVAL;
1156 }
1157
1158 dpy_gfx_replace_surface(scanout->con, scanout->ds);
1159 dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height);
1160 if (scanout->cursor.resource_id) {
1161 update_cursor(g, &scanout->cursor);
1162 }
1163 res->scanout_bitmask |= (1 << i);
1164 }
1165
1166 return 0;
1167 }
1168
1169 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1170 {
1171 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1172 VirtIOGPU *g = VIRTIO_GPU(qdev);
1173 bool have_virgl;
1174 Error *local_err = NULL;
1175 int i;
1176
1177 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1178 error_setg(errp, "virtio-gpu does not support vIOMMU yet");
1179 return;
1180 }
1181
1182 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
1183 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
1184 return;
1185 }
1186
1187 g->use_virgl_renderer = false;
1188 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1189 have_virgl = false;
1190 #else
1191 have_virgl = display_opengl;
1192 #endif
1193 if (!have_virgl) {
1194 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
1195 }
1196
1197 if (virtio_gpu_virgl_enabled(g->conf)) {
1198 error_setg(&g->migration_blocker, "virgl is not yet migratable");
1199 migrate_add_blocker(g->migration_blocker, &local_err);
1200 if (local_err) {
1201 error_propagate(errp, local_err);
1202 error_free(g->migration_blocker);
1203 return;
1204 }
1205 }
1206
1207 g->config_size = sizeof(struct virtio_gpu_config);
1208 g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
1209 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
1210 g->config_size);
1211
1212 g->req_state[0].width = g->conf.xres;
1213 g->req_state[0].height = g->conf.yres;
1214
1215 if (virtio_gpu_virgl_enabled(g->conf)) {
1216 /* use larger control queue in 3d mode */
1217 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb);
1218 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
1219
1220 #if defined(CONFIG_VIRGL)
1221 g->virtio_config.num_capsets = virtio_gpu_virgl_get_num_capsets(g);
1222 #else
1223 g->virtio_config.num_capsets = 0;
1224 #endif
1225 } else {
1226 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
1227 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
1228 }
1229
1230 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
1231 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
1232 QTAILQ_INIT(&g->reslist);
1233 QTAILQ_INIT(&g->cmdq);
1234 QTAILQ_INIT(&g->fenceq);
1235
1236 g->enabled_output_bitmask = 1;
1237 g->qdev = qdev;
1238
1239 for (i = 0; i < g->conf.max_outputs; i++) {
1240 g->scanout[i].con =
1241 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
1242 if (i > 0) {
1243 dpy_gfx_replace_surface(g->scanout[i].con, NULL);
1244 }
1245 }
1246 }
1247
1248 static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp)
1249 {
1250 VirtIOGPU *g = VIRTIO_GPU(qdev);
1251 if (g->migration_blocker) {
1252 migrate_del_blocker(g->migration_blocker);
1253 error_free(g->migration_blocker);
1254 }
1255 }
1256
1257 static void virtio_gpu_instance_init(Object *obj)
1258 {
1259 }
1260
1261 static void virtio_gpu_reset(VirtIODevice *vdev)
1262 {
1263 VirtIOGPU *g = VIRTIO_GPU(vdev);
1264 struct virtio_gpu_simple_resource *res, *tmp;
1265 int i;
1266
1267 g->enable = 0;
1268
1269 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1270 virtio_gpu_resource_destroy(g, res);
1271 }
1272 for (i = 0; i < g->conf.max_outputs; i++) {
1273 g->scanout[i].resource_id = 0;
1274 g->scanout[i].width = 0;
1275 g->scanout[i].height = 0;
1276 g->scanout[i].x = 0;
1277 g->scanout[i].y = 0;
1278 g->scanout[i].ds = NULL;
1279 }
1280
1281 #ifdef CONFIG_VIRGL
1282 if (g->use_virgl_renderer) {
1283 virtio_gpu_virgl_reset(g);
1284 g->use_virgl_renderer = 0;
1285 }
1286 #endif
1287 }
1288
1289 /*
1290 * For historical reasons virtio_gpu does not adhere to virtio migration
1291 * scheme as described in doc/virtio-migration.txt, in a sense that no
1292 * save/load callback are provided to the core. Instead the device data
1293 * is saved/loaded after the core data.
1294 *
1295 * Because of this we need a special vmsd.
1296 */
1297 static const VMStateDescription vmstate_virtio_gpu = {
1298 .name = "virtio-gpu",
1299 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1300 .version_id = VIRTIO_GPU_VM_VERSION,
1301 .fields = (VMStateField[]) {
1302 VMSTATE_VIRTIO_DEVICE /* core */,
1303 {
1304 .name = "virtio-gpu",
1305 .info = &(const VMStateInfo) {
1306 .name = "virtio-gpu",
1307 .get = virtio_gpu_load,
1308 .put = virtio_gpu_save,
1309 },
1310 .flags = VMS_SINGLE,
1311 } /* device */,
1312 VMSTATE_END_OF_LIST()
1313 },
1314 };
1315
1316 static Property virtio_gpu_properties[] = {
1317 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
1318 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem, 256 * MiB),
1319 #ifdef CONFIG_VIRGL
1320 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags,
1321 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
1322 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags,
1323 VIRTIO_GPU_FLAG_STATS_ENABLED, false),
1324 #endif
1325 DEFINE_PROP_UINT32("xres", VirtIOGPU, conf.xres, 1024),
1326 DEFINE_PROP_UINT32("yres", VirtIOGPU, conf.yres, 768),
1327 DEFINE_PROP_END_OF_LIST(),
1328 };
1329
1330 static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1331 {
1332 DeviceClass *dc = DEVICE_CLASS(klass);
1333 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1334
1335 vdc->realize = virtio_gpu_device_realize;
1336 vdc->unrealize = virtio_gpu_device_unrealize;
1337 vdc->get_config = virtio_gpu_get_config;
1338 vdc->set_config = virtio_gpu_set_config;
1339 vdc->get_features = virtio_gpu_get_features;
1340 vdc->set_features = virtio_gpu_set_features;
1341
1342 vdc->reset = virtio_gpu_reset;
1343
1344 set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
1345 dc->props = virtio_gpu_properties;
1346 dc->vmsd = &vmstate_virtio_gpu;
1347 dc->hotpluggable = false;
1348 }
1349
1350 static const TypeInfo virtio_gpu_info = {
1351 .name = TYPE_VIRTIO_GPU,
1352 .parent = TYPE_VIRTIO_DEVICE,
1353 .instance_size = sizeof(VirtIOGPU),
1354 .instance_init = virtio_gpu_instance_init,
1355 .class_init = virtio_gpu_class_init,
1356 };
1357
1358 static void virtio_register_types(void)
1359 {
1360 type_register_static(&virtio_gpu_info);
1361 }
1362
1363 type_init(virtio_register_types)
1364
1365 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
1366 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
1367 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
1368 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
1369 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
1370 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
1371 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
1372 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
1373 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
1374 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
1375 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
1376
1377 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
1378 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
1379 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
1380 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
1381 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
1382 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
1383 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
1384 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
1385 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
1386 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);