]> git.proxmox.com Git - mirror_qemu.git/blob - hw/display/virtio-gpu-udmabuf.c
target/hppa: Always use copy_iaoq_entry to set cpu_iaoq_[fb]
[mirror_qemu.git] / hw / display / virtio-gpu-udmabuf.c
1 /*
2 * Virtio GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2014
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/error-report.h"
16 #include "qemu/units.h"
17 #include "qemu/iov.h"
18 #include "ui/console.h"
19 #include "hw/virtio/virtio-gpu.h"
20 #include "hw/virtio/virtio-gpu-pixman.h"
21 #include "trace.h"
22 #include "exec/ramblock.h"
23 #include "sysemu/hostmem.h"
24 #include <sys/ioctl.h>
25 #include <linux/memfd.h>
26 #include "qemu/memfd.h"
27 #include "standard-headers/linux/udmabuf.h"
28
29 static void virtio_gpu_create_udmabuf(struct virtio_gpu_simple_resource *res)
30 {
31 struct udmabuf_create_list *list;
32 RAMBlock *rb;
33 ram_addr_t offset;
34 int udmabuf, i;
35
36 udmabuf = udmabuf_fd();
37 if (udmabuf < 0) {
38 return;
39 }
40
41 list = g_malloc0(sizeof(struct udmabuf_create_list) +
42 sizeof(struct udmabuf_create_item) * res->iov_cnt);
43
44 for (i = 0; i < res->iov_cnt; i++) {
45 rcu_read_lock();
46 rb = qemu_ram_block_from_host(res->iov[i].iov_base, false, &offset);
47 rcu_read_unlock();
48
49 if (!rb || rb->fd < 0) {
50 g_free(list);
51 return;
52 }
53
54 list->list[i].memfd = rb->fd;
55 list->list[i].offset = offset;
56 list->list[i].size = res->iov[i].iov_len;
57 }
58
59 list->count = res->iov_cnt;
60 list->flags = UDMABUF_FLAGS_CLOEXEC;
61
62 res->dmabuf_fd = ioctl(udmabuf, UDMABUF_CREATE_LIST, list);
63 if (res->dmabuf_fd < 0) {
64 warn_report("%s: UDMABUF_CREATE_LIST: %s", __func__,
65 strerror(errno));
66 }
67 g_free(list);
68 }
69
70 static void virtio_gpu_remap_udmabuf(struct virtio_gpu_simple_resource *res)
71 {
72 res->remapped = mmap(NULL, res->blob_size, PROT_READ,
73 MAP_SHARED, res->dmabuf_fd, 0);
74 if (res->remapped == MAP_FAILED) {
75 warn_report("%s: dmabuf mmap failed: %s", __func__,
76 strerror(errno));
77 res->remapped = NULL;
78 }
79 }
80
81 static void virtio_gpu_destroy_udmabuf(struct virtio_gpu_simple_resource *res)
82 {
83 if (res->remapped) {
84 munmap(res->remapped, res->blob_size);
85 res->remapped = NULL;
86 }
87 if (res->dmabuf_fd >= 0) {
88 close(res->dmabuf_fd);
89 res->dmabuf_fd = -1;
90 }
91 }
92
93 static int find_memory_backend_type(Object *obj, void *opaque)
94 {
95 bool *memfd_backend = opaque;
96 int ret;
97
98 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
99 HostMemoryBackend *backend = MEMORY_BACKEND(obj);
100 RAMBlock *rb = backend->mr.ram_block;
101
102 if (rb && rb->fd > 0) {
103 ret = fcntl(rb->fd, F_GET_SEALS);
104 if (ret > 0) {
105 *memfd_backend = true;
106 }
107 }
108 }
109
110 return 0;
111 }
112
113 bool virtio_gpu_have_udmabuf(void)
114 {
115 Object *memdev_root;
116 int udmabuf;
117 bool memfd_backend = false;
118
119 udmabuf = udmabuf_fd();
120 if (udmabuf < 0) {
121 return false;
122 }
123
124 memdev_root = object_resolve_path("/objects", NULL);
125 object_child_foreach(memdev_root, find_memory_backend_type, &memfd_backend);
126
127 return memfd_backend;
128 }
129
130 void virtio_gpu_init_udmabuf(struct virtio_gpu_simple_resource *res)
131 {
132 void *pdata = NULL;
133
134 res->dmabuf_fd = -1;
135 if (res->iov_cnt == 1 &&
136 res->iov[0].iov_len < 4096) {
137 pdata = res->iov[0].iov_base;
138 } else {
139 virtio_gpu_create_udmabuf(res);
140 if (res->dmabuf_fd < 0) {
141 return;
142 }
143 virtio_gpu_remap_udmabuf(res);
144 if (!res->remapped) {
145 return;
146 }
147 pdata = res->remapped;
148 }
149
150 res->blob = pdata;
151 }
152
153 void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res)
154 {
155 if (res->remapped) {
156 virtio_gpu_destroy_udmabuf(res);
157 }
158 }
159
160 static void virtio_gpu_free_dmabuf(VirtIOGPU *g, VGPUDMABuf *dmabuf)
161 {
162 struct virtio_gpu_scanout *scanout;
163
164 scanout = &g->parent_obj.scanout[dmabuf->scanout_id];
165 dpy_gl_release_dmabuf(scanout->con, &dmabuf->buf);
166 QTAILQ_REMOVE(&g->dmabuf.bufs, dmabuf, next);
167 g_free(dmabuf);
168 }
169
170 static VGPUDMABuf
171 *virtio_gpu_create_dmabuf(VirtIOGPU *g,
172 uint32_t scanout_id,
173 struct virtio_gpu_simple_resource *res,
174 struct virtio_gpu_framebuffer *fb,
175 struct virtio_gpu_rect *r)
176 {
177 VGPUDMABuf *dmabuf;
178
179 if (res->dmabuf_fd < 0) {
180 return NULL;
181 }
182
183 dmabuf = g_new0(VGPUDMABuf, 1);
184 dmabuf->buf.width = r->width;
185 dmabuf->buf.height = r->height;
186 dmabuf->buf.stride = fb->stride;
187 dmabuf->buf.x = r->x;
188 dmabuf->buf.y = r->y;
189 dmabuf->buf.backing_width = fb->width;
190 dmabuf->buf.backing_height = fb->height;
191 dmabuf->buf.fourcc = qemu_pixman_to_drm_format(fb->format);
192 dmabuf->buf.fd = res->dmabuf_fd;
193 dmabuf->buf.allow_fences = true;
194 dmabuf->buf.draw_submitted = false;
195 dmabuf->scanout_id = scanout_id;
196 QTAILQ_INSERT_HEAD(&g->dmabuf.bufs, dmabuf, next);
197
198 return dmabuf;
199 }
200
201 int virtio_gpu_update_dmabuf(VirtIOGPU *g,
202 uint32_t scanout_id,
203 struct virtio_gpu_simple_resource *res,
204 struct virtio_gpu_framebuffer *fb,
205 struct virtio_gpu_rect *r)
206 {
207 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
208 VGPUDMABuf *new_primary, *old_primary = NULL;
209
210 new_primary = virtio_gpu_create_dmabuf(g, scanout_id, res, fb, r);
211 if (!new_primary) {
212 return -EINVAL;
213 }
214
215 if (g->dmabuf.primary[scanout_id]) {
216 old_primary = g->dmabuf.primary[scanout_id];
217 }
218
219 g->dmabuf.primary[scanout_id] = new_primary;
220 qemu_console_resize(scanout->con,
221 new_primary->buf.width,
222 new_primary->buf.height);
223 dpy_gl_scanout_dmabuf(scanout->con, &new_primary->buf);
224
225 if (old_primary) {
226 virtio_gpu_free_dmabuf(g, old_primary);
227 }
228
229 return 0;
230 }