]>
Commit | Line | Data |
---|---|---|
d52c454a MAL |
1 | /* |
2 | * Virtio vhost-user GPU Device | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2013-2018 | |
5 | * | |
6 | * Authors: | |
7 | * Dave Airlie <airlied@redhat.com> | |
8 | * Gerd Hoffmann <kraxel@redhat.com> | |
9 | * Marc-André Lureau <marcandre.lureau@redhat.com> | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
12 | * See the COPYING file in the top-level directory. | |
13 | */ | |
14 | ||
4bd802b2 | 15 | #include "qemu/osdep.h" |
d52c454a MAL |
16 | #include <virglrenderer.h> |
17 | #include "virgl.h" | |
18 | ||
0c27b9c5 MAL |
19 | #include <epoxy/gl.h> |
20 | ||
d52c454a MAL |
21 | void |
22 | vg_virgl_update_cursor_data(VuGpu *g, uint32_t resource_id, | |
23 | gpointer data) | |
24 | { | |
25 | uint32_t width, height; | |
26 | uint32_t *cursor; | |
27 | ||
28 | cursor = virgl_renderer_get_cursor_data(resource_id, &width, &height); | |
29 | g_return_if_fail(cursor != NULL); | |
30 | g_return_if_fail(width == 64); | |
31 | g_return_if_fail(height == 64); | |
32 | ||
33 | memcpy(data, cursor, 64 * 64 * sizeof(uint32_t)); | |
34 | free(cursor); | |
35 | } | |
36 | ||
37 | static void | |
38 | virgl_cmd_context_create(VuGpu *g, | |
39 | struct virtio_gpu_ctrl_command *cmd) | |
40 | { | |
41 | struct virtio_gpu_ctx_create cc; | |
42 | ||
43 | VUGPU_FILL_CMD(cc); | |
44 | ||
45 | virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, | |
46 | cc.debug_name); | |
47 | } | |
48 | ||
49 | static void | |
50 | virgl_cmd_context_destroy(VuGpu *g, | |
51 | struct virtio_gpu_ctrl_command *cmd) | |
52 | { | |
53 | struct virtio_gpu_ctx_destroy cd; | |
54 | ||
55 | VUGPU_FILL_CMD(cd); | |
56 | ||
57 | virgl_renderer_context_destroy(cd.hdr.ctx_id); | |
58 | } | |
59 | ||
60 | static void | |
61 | virgl_cmd_create_resource_2d(VuGpu *g, | |
62 | struct virtio_gpu_ctrl_command *cmd) | |
63 | { | |
64 | struct virtio_gpu_resource_create_2d c2d; | |
65 | struct virgl_renderer_resource_create_args args; | |
66 | ||
67 | VUGPU_FILL_CMD(c2d); | |
68 | ||
69 | args.handle = c2d.resource_id; | |
70 | args.target = 2; | |
71 | args.format = c2d.format; | |
72 | args.bind = (1 << 1); | |
73 | args.width = c2d.width; | |
74 | args.height = c2d.height; | |
75 | args.depth = 1; | |
76 | args.array_size = 1; | |
77 | args.last_level = 0; | |
78 | args.nr_samples = 0; | |
79 | args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP; | |
80 | virgl_renderer_resource_create(&args, NULL, 0); | |
81 | } | |
82 | ||
83 | static void | |
84 | virgl_cmd_create_resource_3d(VuGpu *g, | |
85 | struct virtio_gpu_ctrl_command *cmd) | |
86 | { | |
87 | struct virtio_gpu_resource_create_3d c3d; | |
88 | struct virgl_renderer_resource_create_args args; | |
89 | ||
90 | VUGPU_FILL_CMD(c3d); | |
91 | ||
92 | args.handle = c3d.resource_id; | |
93 | args.target = c3d.target; | |
94 | args.format = c3d.format; | |
95 | args.bind = c3d.bind; | |
96 | args.width = c3d.width; | |
97 | args.height = c3d.height; | |
98 | args.depth = c3d.depth; | |
99 | args.array_size = c3d.array_size; | |
100 | args.last_level = c3d.last_level; | |
101 | args.nr_samples = c3d.nr_samples; | |
102 | args.flags = c3d.flags; | |
103 | virgl_renderer_resource_create(&args, NULL, 0); | |
104 | } | |
105 | ||
106 | static void | |
107 | virgl_cmd_resource_unref(VuGpu *g, | |
108 | struct virtio_gpu_ctrl_command *cmd) | |
109 | { | |
110 | struct virtio_gpu_resource_unref unref; | |
f6091d86 LQ |
111 | struct iovec *res_iovs = NULL; |
112 | int num_iovs = 0; | |
d52c454a MAL |
113 | |
114 | VUGPU_FILL_CMD(unref); | |
115 | ||
f6091d86 LQ |
116 | virgl_renderer_resource_detach_iov(unref.resource_id, |
117 | &res_iovs, | |
118 | &num_iovs); | |
3ea32d13 LQ |
119 | if (res_iovs != NULL && num_iovs != 0) { |
120 | vg_cleanup_mapping_iov(g, res_iovs, num_iovs); | |
121 | } | |
d52c454a MAL |
122 | virgl_renderer_resource_unref(unref.resource_id); |
123 | } | |
124 | ||
125 | /* Not yet(?) defined in standard-headers, remove when possible */ | |
126 | #ifndef VIRTIO_GPU_CAPSET_VIRGL2 | |
127 | #define VIRTIO_GPU_CAPSET_VIRGL2 2 | |
128 | #endif | |
129 | ||
130 | static void | |
131 | virgl_cmd_get_capset_info(VuGpu *g, | |
132 | struct virtio_gpu_ctrl_command *cmd) | |
133 | { | |
134 | struct virtio_gpu_get_capset_info info; | |
135 | struct virtio_gpu_resp_capset_info resp; | |
136 | ||
137 | VUGPU_FILL_CMD(info); | |
138 | ||
121841b2 | 139 | memset(&resp, 0, sizeof(resp)); |
d52c454a MAL |
140 | if (info.capset_index == 0) { |
141 | resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL; | |
142 | virgl_renderer_get_cap_set(resp.capset_id, | |
143 | &resp.capset_max_version, | |
144 | &resp.capset_max_size); | |
145 | } else if (info.capset_index == 1) { | |
146 | resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2; | |
147 | virgl_renderer_get_cap_set(resp.capset_id, | |
148 | &resp.capset_max_version, | |
149 | &resp.capset_max_size); | |
150 | } else { | |
151 | resp.capset_max_version = 0; | |
152 | resp.capset_max_size = 0; | |
153 | } | |
154 | resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO; | |
155 | vg_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); | |
156 | } | |
157 | ||
158 | uint32_t | |
159 | vg_virgl_get_num_capsets(void) | |
160 | { | |
161 | uint32_t capset2_max_ver, capset2_max_size; | |
162 | virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2, | |
163 | &capset2_max_ver, | |
164 | &capset2_max_size); | |
165 | ||
166 | return capset2_max_ver ? 2 : 1; | |
167 | } | |
168 | ||
169 | static void | |
170 | virgl_cmd_get_capset(VuGpu *g, | |
171 | struct virtio_gpu_ctrl_command *cmd) | |
172 | { | |
173 | struct virtio_gpu_get_capset gc; | |
174 | struct virtio_gpu_resp_capset *resp; | |
175 | uint32_t max_ver, max_size; | |
176 | ||
177 | VUGPU_FILL_CMD(gc); | |
178 | ||
179 | virgl_renderer_get_cap_set(gc.capset_id, &max_ver, | |
180 | &max_size); | |
9f22893a LQ |
181 | if (!max_size) { |
182 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
183 | return; | |
184 | } | |
d52c454a MAL |
185 | resp = g_malloc0(sizeof(*resp) + max_size); |
186 | ||
187 | resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET; | |
188 | virgl_renderer_fill_caps(gc.capset_id, | |
189 | gc.capset_version, | |
190 | (void *)resp->capset_data); | |
191 | vg_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size); | |
192 | g_free(resp); | |
193 | } | |
194 | ||
195 | static void | |
196 | virgl_cmd_submit_3d(VuGpu *g, | |
197 | struct virtio_gpu_ctrl_command *cmd) | |
198 | { | |
199 | struct virtio_gpu_cmd_submit cs; | |
200 | void *buf; | |
201 | size_t s; | |
202 | ||
203 | VUGPU_FILL_CMD(cs); | |
204 | ||
205 | buf = g_malloc(cs.size); | |
206 | s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, | |
207 | sizeof(cs), buf, cs.size); | |
208 | if (s != cs.size) { | |
209 | g_critical("%s: size mismatch (%zd/%d)", __func__, s, cs.size); | |
210 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
211 | goto out; | |
212 | } | |
213 | ||
214 | virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4); | |
215 | ||
216 | out: | |
217 | g_free(buf); | |
218 | } | |
219 | ||
220 | static void | |
221 | virgl_cmd_transfer_to_host_2d(VuGpu *g, | |
222 | struct virtio_gpu_ctrl_command *cmd) | |
223 | { | |
224 | struct virtio_gpu_transfer_to_host_2d t2d; | |
225 | struct virtio_gpu_box box; | |
226 | ||
227 | VUGPU_FILL_CMD(t2d); | |
228 | ||
229 | box.x = t2d.r.x; | |
230 | box.y = t2d.r.y; | |
231 | box.z = 0; | |
232 | box.w = t2d.r.width; | |
233 | box.h = t2d.r.height; | |
234 | box.d = 1; | |
235 | ||
236 | virgl_renderer_transfer_write_iov(t2d.resource_id, | |
237 | 0, | |
238 | 0, | |
239 | 0, | |
240 | 0, | |
241 | (struct virgl_box *)&box, | |
242 | t2d.offset, NULL, 0); | |
243 | } | |
244 | ||
245 | static void | |
246 | virgl_cmd_transfer_to_host_3d(VuGpu *g, | |
247 | struct virtio_gpu_ctrl_command *cmd) | |
248 | { | |
249 | struct virtio_gpu_transfer_host_3d t3d; | |
250 | ||
251 | VUGPU_FILL_CMD(t3d); | |
252 | ||
253 | virgl_renderer_transfer_write_iov(t3d.resource_id, | |
254 | t3d.hdr.ctx_id, | |
255 | t3d.level, | |
256 | t3d.stride, | |
257 | t3d.layer_stride, | |
258 | (struct virgl_box *)&t3d.box, | |
259 | t3d.offset, NULL, 0); | |
260 | } | |
261 | ||
262 | static void | |
263 | virgl_cmd_transfer_from_host_3d(VuGpu *g, | |
264 | struct virtio_gpu_ctrl_command *cmd) | |
265 | { | |
266 | struct virtio_gpu_transfer_host_3d tf3d; | |
267 | ||
268 | VUGPU_FILL_CMD(tf3d); | |
269 | ||
270 | virgl_renderer_transfer_read_iov(tf3d.resource_id, | |
271 | tf3d.hdr.ctx_id, | |
272 | tf3d.level, | |
273 | tf3d.stride, | |
274 | tf3d.layer_stride, | |
275 | (struct virgl_box *)&tf3d.box, | |
276 | tf3d.offset, NULL, 0); | |
277 | } | |
278 | ||
279 | static void | |
280 | virgl_resource_attach_backing(VuGpu *g, | |
281 | struct virtio_gpu_ctrl_command *cmd) | |
282 | { | |
283 | struct virtio_gpu_resource_attach_backing att_rb; | |
284 | struct iovec *res_iovs; | |
285 | int ret; | |
286 | ||
287 | VUGPU_FILL_CMD(att_rb); | |
288 | ||
289 | ret = vg_create_mapping_iov(g, &att_rb, cmd, &res_iovs); | |
290 | if (ret != 0) { | |
291 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
292 | return; | |
293 | } | |
294 | ||
63736af5 | 295 | ret = virgl_renderer_resource_attach_iov(att_rb.resource_id, |
d52c454a | 296 | res_iovs, att_rb.nr_entries); |
63736af5 | 297 | if (ret != 0) { |
3ea32d13 | 298 | vg_cleanup_mapping_iov(g, res_iovs, att_rb.nr_entries); |
63736af5 | 299 | } |
d52c454a MAL |
300 | } |
301 | ||
302 | static void | |
303 | virgl_resource_detach_backing(VuGpu *g, | |
304 | struct virtio_gpu_ctrl_command *cmd) | |
305 | { | |
306 | struct virtio_gpu_resource_detach_backing detach_rb; | |
307 | struct iovec *res_iovs = NULL; | |
308 | int num_iovs = 0; | |
309 | ||
310 | VUGPU_FILL_CMD(detach_rb); | |
311 | ||
312 | virgl_renderer_resource_detach_iov(detach_rb.resource_id, | |
313 | &res_iovs, | |
314 | &num_iovs); | |
315 | if (res_iovs == NULL || num_iovs == 0) { | |
316 | return; | |
317 | } | |
3ea32d13 | 318 | vg_cleanup_mapping_iov(g, res_iovs, num_iovs); |
d52c454a MAL |
319 | } |
320 | ||
e3c82fe0 EN |
321 | static int |
322 | virgl_get_resource_info_modifiers(uint32_t resource_id, | |
323 | struct virgl_renderer_resource_info *info, | |
324 | uint64_t *modifiers) | |
325 | { | |
326 | int ret; | |
327 | #ifdef VIRGL_RENDERER_RESOURCE_INFO_EXT_VERSION | |
328 | struct virgl_renderer_resource_info_ext info_ext; | |
329 | ret = virgl_renderer_resource_get_info_ext(resource_id, &info_ext); | |
330 | if (ret < 0) { | |
331 | return ret; | |
332 | } | |
333 | ||
334 | *info = info_ext.base; | |
335 | *modifiers = info_ext.modifiers; | |
336 | #else | |
337 | ret = virgl_renderer_resource_get_info(resource_id, info); | |
338 | if (ret < 0) { | |
339 | return ret; | |
340 | } | |
341 | ||
342 | /* | |
343 | * Before virgl_renderer_resource_get_info_ext, | |
344 | * getting the modifiers was not possible. | |
345 | */ | |
346 | *modifiers = 0; | |
347 | #endif | |
348 | ||
349 | return 0; | |
350 | } | |
351 | ||
d52c454a MAL |
352 | static void |
353 | virgl_cmd_set_scanout(VuGpu *g, | |
354 | struct virtio_gpu_ctrl_command *cmd) | |
355 | { | |
356 | struct virtio_gpu_set_scanout ss; | |
357 | struct virgl_renderer_resource_info info; | |
358 | int ret; | |
359 | ||
360 | VUGPU_FILL_CMD(ss); | |
361 | ||
362 | if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) { | |
363 | g_critical("%s: illegal scanout id specified %d", | |
364 | __func__, ss.scanout_id); | |
365 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; | |
366 | return; | |
367 | } | |
368 | ||
369 | memset(&info, 0, sizeof(info)); | |
370 | ||
371 | if (ss.resource_id && ss.r.width && ss.r.height) { | |
e3c82fe0 EN |
372 | uint64_t modifiers = 0; |
373 | ret = virgl_get_resource_info_modifiers(ss.resource_id, &info, | |
374 | &modifiers); | |
d52c454a MAL |
375 | if (ret == -1) { |
376 | g_critical("%s: illegal resource specified %d\n", | |
377 | __func__, ss.resource_id); | |
378 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
379 | return; | |
380 | } | |
381 | ||
382 | int fd = -1; | |
383 | if (virgl_renderer_get_fd_for_texture(info.tex_id, &fd) < 0) { | |
384 | g_critical("%s: failed to get fd for texture\n", __func__); | |
385 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
386 | return; | |
387 | } | |
388 | assert(fd >= 0); | |
389 | VhostUserGpuMsg msg = { | |
d52c454a MAL |
390 | .payload.dmabuf_scanout.scanout_id = ss.scanout_id, |
391 | .payload.dmabuf_scanout.x = ss.r.x, | |
392 | .payload.dmabuf_scanout.y = ss.r.y, | |
393 | .payload.dmabuf_scanout.width = ss.r.width, | |
394 | .payload.dmabuf_scanout.height = ss.r.height, | |
395 | .payload.dmabuf_scanout.fd_width = info.width, | |
396 | .payload.dmabuf_scanout.fd_height = info.height, | |
397 | .payload.dmabuf_scanout.fd_stride = info.stride, | |
398 | .payload.dmabuf_scanout.fd_flags = info.flags, | |
399 | .payload.dmabuf_scanout.fd_drm_fourcc = info.drm_fourcc | |
400 | }; | |
e3c82fe0 EN |
401 | |
402 | if (g->use_modifiers) { | |
403 | /* | |
1a9c9a6f | 404 | * The message uses all the fields set in dmabuf_scanout plus |
e3c82fe0 EN |
405 | * modifiers which is appended after VhostUserGpuDMABUFScanout. |
406 | */ | |
407 | msg.request = VHOST_USER_GPU_DMABUF_SCANOUT2; | |
408 | msg.size = sizeof(VhostUserGpuDMABUFScanout2); | |
409 | msg.payload.dmabuf_scanout2.modifier = modifiers; | |
410 | } else { | |
411 | msg.request = VHOST_USER_GPU_DMABUF_SCANOUT; | |
412 | msg.size = sizeof(VhostUserGpuDMABUFScanout); | |
413 | } | |
414 | ||
d52c454a MAL |
415 | vg_send_msg(g, &msg, fd); |
416 | close(fd); | |
417 | } else { | |
418 | VhostUserGpuMsg msg = { | |
419 | .request = VHOST_USER_GPU_DMABUF_SCANOUT, | |
420 | .size = sizeof(VhostUserGpuDMABUFScanout), | |
421 | .payload.dmabuf_scanout.scanout_id = ss.scanout_id, | |
422 | }; | |
423 | g_debug("disable scanout"); | |
424 | vg_send_msg(g, &msg, -1); | |
425 | } | |
426 | g->scanout[ss.scanout_id].resource_id = ss.resource_id; | |
427 | } | |
428 | ||
429 | static void | |
430 | virgl_cmd_resource_flush(VuGpu *g, | |
431 | struct virtio_gpu_ctrl_command *cmd) | |
432 | { | |
433 | struct virtio_gpu_resource_flush rf; | |
434 | int i; | |
435 | ||
436 | VUGPU_FILL_CMD(rf); | |
437 | ||
0c27b9c5 | 438 | glFlush(); |
d52c454a MAL |
439 | if (!rf.resource_id) { |
440 | g_debug("bad resource id for flush..?"); | |
441 | return; | |
442 | } | |
443 | for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) { | |
444 | if (g->scanout[i].resource_id != rf.resource_id) { | |
445 | continue; | |
446 | } | |
447 | VhostUserGpuMsg msg = { | |
448 | .request = VHOST_USER_GPU_DMABUF_UPDATE, | |
449 | .size = sizeof(VhostUserGpuUpdate), | |
450 | .payload.update.scanout_id = i, | |
451 | .payload.update.x = rf.r.x, | |
452 | .payload.update.y = rf.r.y, | |
453 | .payload.update.width = rf.r.width, | |
454 | .payload.update.height = rf.r.height | |
455 | }; | |
456 | vg_send_msg(g, &msg, -1); | |
457 | vg_wait_ok(g); | |
458 | } | |
459 | } | |
460 | ||
461 | static void | |
462 | virgl_cmd_ctx_attach_resource(VuGpu *g, | |
463 | struct virtio_gpu_ctrl_command *cmd) | |
464 | { | |
465 | struct virtio_gpu_ctx_resource att_res; | |
466 | ||
467 | VUGPU_FILL_CMD(att_res); | |
468 | ||
469 | virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id); | |
470 | } | |
471 | ||
472 | static void | |
473 | virgl_cmd_ctx_detach_resource(VuGpu *g, | |
474 | struct virtio_gpu_ctrl_command *cmd) | |
475 | { | |
476 | struct virtio_gpu_ctx_resource det_res; | |
477 | ||
478 | VUGPU_FILL_CMD(det_res); | |
479 | ||
480 | virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id); | |
481 | } | |
482 | ||
483 | void vg_virgl_process_cmd(VuGpu *g, struct virtio_gpu_ctrl_command *cmd) | |
484 | { | |
485 | virgl_renderer_force_ctx_0(); | |
486 | switch (cmd->cmd_hdr.type) { | |
487 | case VIRTIO_GPU_CMD_CTX_CREATE: | |
488 | virgl_cmd_context_create(g, cmd); | |
489 | break; | |
490 | case VIRTIO_GPU_CMD_CTX_DESTROY: | |
491 | virgl_cmd_context_destroy(g, cmd); | |
492 | break; | |
493 | case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: | |
494 | virgl_cmd_create_resource_2d(g, cmd); | |
495 | break; | |
496 | case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D: | |
497 | virgl_cmd_create_resource_3d(g, cmd); | |
498 | break; | |
499 | case VIRTIO_GPU_CMD_SUBMIT_3D: | |
500 | virgl_cmd_submit_3d(g, cmd); | |
501 | break; | |
502 | case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: | |
503 | virgl_cmd_transfer_to_host_2d(g, cmd); | |
504 | break; | |
505 | case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D: | |
506 | virgl_cmd_transfer_to_host_3d(g, cmd); | |
507 | break; | |
508 | case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D: | |
509 | virgl_cmd_transfer_from_host_3d(g, cmd); | |
510 | break; | |
511 | case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: | |
512 | virgl_resource_attach_backing(g, cmd); | |
513 | break; | |
514 | case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: | |
515 | virgl_resource_detach_backing(g, cmd); | |
516 | break; | |
517 | case VIRTIO_GPU_CMD_SET_SCANOUT: | |
518 | virgl_cmd_set_scanout(g, cmd); | |
519 | break; | |
520 | case VIRTIO_GPU_CMD_RESOURCE_FLUSH: | |
521 | virgl_cmd_resource_flush(g, cmd); | |
522 | break; | |
523 | case VIRTIO_GPU_CMD_RESOURCE_UNREF: | |
524 | virgl_cmd_resource_unref(g, cmd); | |
525 | break; | |
526 | case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE: | |
527 | /* TODO add security */ | |
528 | virgl_cmd_ctx_attach_resource(g, cmd); | |
529 | break; | |
530 | case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE: | |
531 | /* TODO add security */ | |
532 | virgl_cmd_ctx_detach_resource(g, cmd); | |
533 | break; | |
534 | case VIRTIO_GPU_CMD_GET_CAPSET_INFO: | |
535 | virgl_cmd_get_capset_info(g, cmd); | |
536 | break; | |
537 | case VIRTIO_GPU_CMD_GET_CAPSET: | |
538 | virgl_cmd_get_capset(g, cmd); | |
539 | break; | |
540 | case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: | |
541 | vg_get_display_info(g, cmd); | |
542 | break; | |
c0644426 EN |
543 | case VIRTIO_GPU_CMD_GET_EDID: |
544 | vg_get_edid(g, cmd); | |
545 | break; | |
d52c454a MAL |
546 | default: |
547 | g_debug("TODO handle ctrl %x\n", cmd->cmd_hdr.type); | |
548 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
549 | break; | |
550 | } | |
551 | ||
72e631c6 | 552 | if (cmd->state != VG_CMD_STATE_NEW) { |
d52c454a MAL |
553 | return; |
554 | } | |
555 | ||
556 | if (cmd->error) { | |
557 | g_warning("%s: ctrl 0x%x, error 0x%x\n", __func__, | |
558 | cmd->cmd_hdr.type, cmd->error); | |
559 | vg_ctrl_response_nodata(g, cmd, cmd->error); | |
560 | return; | |
561 | } | |
562 | ||
563 | if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) { | |
564 | vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); | |
565 | return; | |
566 | } | |
567 | ||
568 | g_debug("Creating fence id:%" PRId64 " type:%d", | |
569 | cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); | |
570 | virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); | |
571 | } | |
572 | ||
573 | static void | |
574 | virgl_write_fence(void *opaque, uint32_t fence) | |
575 | { | |
576 | VuGpu *g = opaque; | |
577 | struct virtio_gpu_ctrl_command *cmd, *tmp; | |
578 | ||
579 | QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { | |
580 | /* | |
581 | * the guest can end up emitting fences out of order | |
582 | * so we should check all fenced cmds not just the first one. | |
583 | */ | |
584 | if (cmd->cmd_hdr.fence_id > fence) { | |
585 | continue; | |
586 | } | |
587 | g_debug("FENCE %" PRIu64, cmd->cmd_hdr.fence_id); | |
588 | vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); | |
589 | QTAILQ_REMOVE(&g->fenceq, cmd, next); | |
4ff97121 | 590 | free(cmd); |
d52c454a MAL |
591 | g->inflight--; |
592 | } | |
593 | } | |
594 | ||
595 | #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \ | |
596 | VIRGL_RENDERER_CALLBACKS_VERSION >= 2 | |
597 | static int | |
598 | virgl_get_drm_fd(void *opaque) | |
599 | { | |
600 | VuGpu *g = opaque; | |
601 | ||
602 | return g->drm_rnode_fd; | |
603 | } | |
604 | #endif | |
605 | ||
606 | static struct virgl_renderer_callbacks virgl_cbs = { | |
607 | #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \ | |
608 | VIRGL_RENDERER_CALLBACKS_VERSION >= 2 | |
609 | .get_drm_fd = virgl_get_drm_fd, | |
610 | .version = 2, | |
611 | #else | |
612 | .version = 1, | |
613 | #endif | |
614 | .write_fence = virgl_write_fence, | |
615 | }; | |
616 | ||
617 | static void | |
618 | vg_virgl_poll(VuDev *dev, int condition, void *data) | |
619 | { | |
620 | virgl_renderer_poll(); | |
621 | } | |
622 | ||
623 | bool | |
624 | vg_virgl_init(VuGpu *g) | |
625 | { | |
626 | int ret; | |
627 | ||
628 | if (g->drm_rnode_fd && virgl_cbs.version == 1) { | |
629 | g_warning("virgl will use the default rendernode"); | |
630 | } | |
631 | ||
632 | ret = virgl_renderer_init(g, | |
633 | VIRGL_RENDERER_USE_EGL | | |
634 | VIRGL_RENDERER_THREAD_SYNC, | |
635 | &virgl_cbs); | |
636 | if (ret != 0) { | |
637 | return false; | |
638 | } | |
639 | ||
640 | ret = virgl_renderer_get_poll_fd(); | |
641 | if (ret != -1) { | |
642 | g->renderer_source = | |
643 | vug_source_new(&g->dev, ret, G_IO_IN, vg_virgl_poll, g); | |
644 | } | |
645 | ||
646 | return true; | |
647 | } |