]>
Commit | Line | Data |
---|---|---|
dc5698e8 DA |
1 | /* |
2 | * Copyright (C) 2015 Red Hat, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Authors: | |
6 | * Dave Airlie <airlied@redhat.com> | |
7 | * Gerd Hoffmann <kraxel@redhat.com> | |
8 | * | |
9 | * Permission is hereby granted, free of charge, to any person obtaining a | |
10 | * copy of this software and associated documentation files (the "Software"), | |
11 | * to deal in the Software without restriction, including without limitation | |
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
13 | * and/or sell copies of the Software, and to permit persons to whom the | |
14 | * Software is furnished to do so, subject to the following conditions: | |
15 | * | |
16 | * The above copyright notice and this permission notice (including the next | |
17 | * paragraph) shall be included in all copies or substantial portions of the | |
18 | * Software. | |
19 | * | |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
23 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
26 | * OTHER DEALINGS IN THE SOFTWARE. | |
27 | */ | |
28 | ||
29 | #include <drm/drmP.h> | |
30 | #include "virtgpu_drv.h" | |
31 | #include <linux/virtio.h> | |
32 | #include <linux/virtio_config.h> | |
33 | #include <linux/virtio_ring.h> | |
34 | ||
35 | #define MAX_INLINE_CMD_SIZE 96 | |
36 | #define MAX_INLINE_RESP_SIZE 24 | |
37 | #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \ | |
38 | + MAX_INLINE_CMD_SIZE \ | |
39 | + MAX_INLINE_RESP_SIZE) | |
40 | ||
dc5698e8 DA |
41 | void virtio_gpu_ctrl_ack(struct virtqueue *vq) |
42 | { | |
43 | struct drm_device *dev = vq->vdev->priv; | |
44 | struct virtio_gpu_device *vgdev = dev->dev_private; | |
9d492b6b | 45 | |
dc5698e8 DA |
46 | schedule_work(&vgdev->ctrlq.dequeue_work); |
47 | } | |
48 | ||
49 | void virtio_gpu_cursor_ack(struct virtqueue *vq) | |
50 | { | |
51 | struct drm_device *dev = vq->vdev->priv; | |
52 | struct virtio_gpu_device *vgdev = dev->dev_private; | |
9d492b6b | 53 | |
dc5698e8 DA |
54 | schedule_work(&vgdev->cursorq.dequeue_work); |
55 | } | |
56 | ||
57 | int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev) | |
58 | { | |
f5985bf9 GH |
59 | vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs", |
60 | VBUFFER_SIZE, | |
61 | __alignof__(struct virtio_gpu_vbuffer), | |
62 | 0, NULL); | |
dc5698e8 DA |
63 | if (!vgdev->vbufs) |
64 | return -ENOMEM; | |
dc5698e8 DA |
65 | return 0; |
66 | } | |
67 | ||
68 | void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev) | |
69 | { | |
f5985bf9 GH |
70 | kmem_cache_destroy(vgdev->vbufs); |
71 | vgdev->vbufs = NULL; | |
dc5698e8 DA |
72 | } |
73 | ||
74 | static struct virtio_gpu_vbuffer* | |
75 | virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, | |
76 | int size, int resp_size, void *resp_buf, | |
77 | virtio_gpu_resp_cb resp_cb) | |
78 | { | |
79 | struct virtio_gpu_vbuffer *vbuf; | |
80 | ||
7fea1e0f | 81 | vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL); |
43c27940 GH |
82 | if (!vbuf) |
83 | return ERR_PTR(-ENOMEM); | |
dc5698e8 DA |
84 | |
85 | BUG_ON(size > MAX_INLINE_CMD_SIZE); | |
86 | vbuf->buf = (void *)vbuf + sizeof(*vbuf); | |
87 | vbuf->size = size; | |
88 | ||
89 | vbuf->resp_cb = resp_cb; | |
90 | vbuf->resp_size = resp_size; | |
91 | if (resp_size <= MAX_INLINE_RESP_SIZE) | |
92 | vbuf->resp_buf = (void *)vbuf->buf + size; | |
93 | else | |
94 | vbuf->resp_buf = resp_buf; | |
95 | BUG_ON(!vbuf->resp_buf); | |
96 | return vbuf; | |
97 | } | |
98 | ||
99 | static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, | |
100 | struct virtio_gpu_vbuffer **vbuffer_p, | |
101 | int size) | |
102 | { | |
103 | struct virtio_gpu_vbuffer *vbuf; | |
104 | ||
105 | vbuf = virtio_gpu_get_vbuf(vgdev, size, | |
106 | sizeof(struct virtio_gpu_ctrl_hdr), | |
107 | NULL, NULL); | |
108 | if (IS_ERR(vbuf)) { | |
109 | *vbuffer_p = NULL; | |
110 | return ERR_CAST(vbuf); | |
111 | } | |
112 | *vbuffer_p = vbuf; | |
113 | return vbuf->buf; | |
114 | } | |
115 | ||
116 | static struct virtio_gpu_update_cursor* | |
117 | virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev, | |
118 | struct virtio_gpu_vbuffer **vbuffer_p) | |
119 | { | |
120 | struct virtio_gpu_vbuffer *vbuf; | |
121 | ||
122 | vbuf = virtio_gpu_get_vbuf | |
123 | (vgdev, sizeof(struct virtio_gpu_update_cursor), | |
124 | 0, NULL, NULL); | |
125 | if (IS_ERR(vbuf)) { | |
126 | *vbuffer_p = NULL; | |
127 | return ERR_CAST(vbuf); | |
128 | } | |
129 | *vbuffer_p = vbuf; | |
130 | return (struct virtio_gpu_update_cursor *)vbuf->buf; | |
131 | } | |
132 | ||
133 | static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, | |
134 | virtio_gpu_resp_cb cb, | |
135 | struct virtio_gpu_vbuffer **vbuffer_p, | |
136 | int cmd_size, int resp_size, | |
137 | void *resp_buf) | |
138 | { | |
139 | struct virtio_gpu_vbuffer *vbuf; | |
140 | ||
141 | vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size, | |
142 | resp_size, resp_buf, cb); | |
143 | if (IS_ERR(vbuf)) { | |
144 | *vbuffer_p = NULL; | |
145 | return ERR_CAST(vbuf); | |
146 | } | |
147 | *vbuffer_p = vbuf; | |
148 | return (struct virtio_gpu_command *)vbuf->buf; | |
149 | } | |
150 | ||
151 | static void free_vbuf(struct virtio_gpu_device *vgdev, | |
152 | struct virtio_gpu_vbuffer *vbuf) | |
153 | { | |
154 | if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) | |
155 | kfree(vbuf->resp_buf); | |
156 | kfree(vbuf->data_buf); | |
f5985bf9 | 157 | kmem_cache_free(vgdev->vbufs, vbuf); |
dc5698e8 DA |
158 | } |
159 | ||
160 | static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) | |
161 | { | |
162 | struct virtio_gpu_vbuffer *vbuf; | |
163 | unsigned int len; | |
164 | int freed = 0; | |
165 | ||
166 | while ((vbuf = virtqueue_get_buf(vq, &len))) { | |
167 | list_add_tail(&vbuf->list, reclaim_list); | |
168 | freed++; | |
169 | } | |
170 | if (freed == 0) | |
171 | DRM_DEBUG("Huh? zero vbufs reclaimed"); | |
172 | } | |
173 | ||
174 | void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) | |
175 | { | |
176 | struct virtio_gpu_device *vgdev = | |
177 | container_of(work, struct virtio_gpu_device, | |
178 | ctrlq.dequeue_work); | |
179 | struct list_head reclaim_list; | |
180 | struct virtio_gpu_vbuffer *entry, *tmp; | |
181 | struct virtio_gpu_ctrl_hdr *resp; | |
182 | u64 fence_id = 0; | |
183 | ||
184 | INIT_LIST_HEAD(&reclaim_list); | |
185 | spin_lock(&vgdev->ctrlq.qlock); | |
186 | do { | |
187 | virtqueue_disable_cb(vgdev->ctrlq.vq); | |
188 | reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list); | |
189 | ||
190 | } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); | |
191 | spin_unlock(&vgdev->ctrlq.qlock); | |
192 | ||
193 | list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { | |
194 | resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; | |
195 | if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) | |
196 | DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type)); | |
197 | if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) { | |
198 | u64 f = le64_to_cpu(resp->fence_id); | |
199 | ||
200 | if (fence_id > f) { | |
201 | DRM_ERROR("%s: Oops: fence %llx -> %llx\n", | |
202 | __func__, fence_id, f); | |
203 | } else { | |
204 | fence_id = f; | |
205 | } | |
206 | } | |
207 | if (entry->resp_cb) | |
208 | entry->resp_cb(vgdev, entry); | |
209 | ||
210 | list_del(&entry->list); | |
211 | free_vbuf(vgdev, entry); | |
212 | } | |
213 | wake_up(&vgdev->ctrlq.ack_queue); | |
214 | ||
215 | if (fence_id) | |
216 | virtio_gpu_fence_event_process(vgdev, fence_id); | |
217 | } | |
218 | ||
219 | void virtio_gpu_dequeue_cursor_func(struct work_struct *work) | |
220 | { | |
221 | struct virtio_gpu_device *vgdev = | |
222 | container_of(work, struct virtio_gpu_device, | |
223 | cursorq.dequeue_work); | |
224 | struct list_head reclaim_list; | |
225 | struct virtio_gpu_vbuffer *entry, *tmp; | |
226 | ||
227 | INIT_LIST_HEAD(&reclaim_list); | |
228 | spin_lock(&vgdev->cursorq.qlock); | |
229 | do { | |
230 | virtqueue_disable_cb(vgdev->cursorq.vq); | |
231 | reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list); | |
232 | } while (!virtqueue_enable_cb(vgdev->cursorq.vq)); | |
233 | spin_unlock(&vgdev->cursorq.qlock); | |
234 | ||
235 | list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { | |
236 | list_del(&entry->list); | |
237 | free_vbuf(vgdev, entry); | |
238 | } | |
239 | wake_up(&vgdev->cursorq.ack_queue); | |
240 | } | |
241 | ||
9c73f478 GH |
242 | static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, |
243 | struct virtio_gpu_vbuffer *vbuf) | |
3373755a MT |
244 | __releases(&vgdev->ctrlq.qlock) |
245 | __acquires(&vgdev->ctrlq.qlock) | |
dc5698e8 DA |
246 | { |
247 | struct virtqueue *vq = vgdev->ctrlq.vq; | |
248 | struct scatterlist *sgs[3], vcmd, vout, vresp; | |
249 | int outcnt = 0, incnt = 0; | |
250 | int ret; | |
251 | ||
252 | if (!vgdev->vqs_ready) | |
253 | return -ENODEV; | |
254 | ||
255 | sg_init_one(&vcmd, vbuf->buf, vbuf->size); | |
dbe37dc3 | 256 | sgs[outcnt + incnt] = &vcmd; |
dc5698e8 DA |
257 | outcnt++; |
258 | ||
259 | if (vbuf->data_size) { | |
260 | sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); | |
261 | sgs[outcnt + incnt] = &vout; | |
262 | outcnt++; | |
263 | } | |
264 | ||
265 | if (vbuf->resp_size) { | |
266 | sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); | |
267 | sgs[outcnt + incnt] = &vresp; | |
268 | incnt++; | |
269 | } | |
270 | ||
dc5698e8 DA |
271 | retry: |
272 | ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); | |
273 | if (ret == -ENOSPC) { | |
274 | spin_unlock(&vgdev->ctrlq.qlock); | |
d02d2700 | 275 | wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt); |
dc5698e8 DA |
276 | spin_lock(&vgdev->ctrlq.qlock); |
277 | goto retry; | |
278 | } else { | |
279 | virtqueue_kick(vq); | |
280 | } | |
dc5698e8 DA |
281 | |
282 | if (!ret) | |
283 | ret = vq->num_free; | |
284 | return ret; | |
285 | } | |
286 | ||
9c73f478 GH |
287 | static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, |
288 | struct virtio_gpu_vbuffer *vbuf) | |
289 | { | |
290 | int rc; | |
291 | ||
292 | spin_lock(&vgdev->ctrlq.qlock); | |
293 | rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); | |
294 | spin_unlock(&vgdev->ctrlq.qlock); | |
295 | return rc; | |
296 | } | |
297 | ||
ec2f0577 GH |
298 | static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, |
299 | struct virtio_gpu_vbuffer *vbuf, | |
300 | struct virtio_gpu_ctrl_hdr *hdr, | |
301 | struct virtio_gpu_fence **fence) | |
302 | { | |
303 | struct virtqueue *vq = vgdev->ctrlq.vq; | |
304 | int rc; | |
305 | ||
306 | again: | |
307 | spin_lock(&vgdev->ctrlq.qlock); | |
308 | ||
309 | /* | |
310 | * Make sure we have enouth space in the virtqueue. If not | |
311 | * wait here until we have. | |
312 | * | |
313 | * Without that virtio_gpu_queue_ctrl_buffer_nolock might have | |
314 | * to wait for free space, which can result in fence ids being | |
315 | * submitted out-of-order. | |
316 | */ | |
317 | if (vq->num_free < 3) { | |
318 | spin_unlock(&vgdev->ctrlq.qlock); | |
319 | wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3); | |
320 | goto again; | |
321 | } | |
322 | ||
323 | if (fence) | |
324 | virtio_gpu_fence_emit(vgdev, hdr, fence); | |
325 | rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); | |
326 | spin_unlock(&vgdev->ctrlq.qlock); | |
327 | return rc; | |
328 | } | |
329 | ||
dc5698e8 DA |
330 | static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, |
331 | struct virtio_gpu_vbuffer *vbuf) | |
332 | { | |
333 | struct virtqueue *vq = vgdev->cursorq.vq; | |
334 | struct scatterlist *sgs[1], ccmd; | |
335 | int ret; | |
336 | int outcnt; | |
337 | ||
338 | if (!vgdev->vqs_ready) | |
339 | return -ENODEV; | |
340 | ||
341 | sg_init_one(&ccmd, vbuf->buf, vbuf->size); | |
342 | sgs[0] = &ccmd; | |
343 | outcnt = 1; | |
344 | ||
345 | spin_lock(&vgdev->cursorq.qlock); | |
346 | retry: | |
347 | ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); | |
348 | if (ret == -ENOSPC) { | |
349 | spin_unlock(&vgdev->cursorq.qlock); | |
d02d2700 | 350 | wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt); |
dc5698e8 DA |
351 | spin_lock(&vgdev->cursorq.qlock); |
352 | goto retry; | |
353 | } else { | |
354 | virtqueue_kick(vq); | |
355 | } | |
356 | ||
357 | spin_unlock(&vgdev->cursorq.qlock); | |
358 | ||
359 | if (!ret) | |
360 | ret = vq->num_free; | |
361 | return ret; | |
362 | } | |
363 | ||
364 | /* just create gem objects for userspace and long lived objects, | |
5d883850 RS |
365 | * just use dma_alloced pages for the queue objects? |
366 | */ | |
dc5698e8 DA |
367 | |
368 | /* create a basic resource */ | |
369 | void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, | |
23c897d7 | 370 | struct virtio_gpu_object *bo, |
dc5698e8 DA |
371 | uint32_t format, |
372 | uint32_t width, | |
373 | uint32_t height) | |
374 | { | |
375 | struct virtio_gpu_resource_create_2d *cmd_p; | |
376 | struct virtio_gpu_vbuffer *vbuf; | |
377 | ||
378 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
379 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
380 | ||
381 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); | |
724cfdfd | 382 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
dc5698e8 DA |
383 | cmd_p->format = cpu_to_le32(format); |
384 | cmd_p->width = cpu_to_le32(width); | |
385 | cmd_p->height = cpu_to_le32(height); | |
386 | ||
387 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
23c897d7 | 388 | bo->created = true; |
dc5698e8 DA |
389 | } |
390 | ||
391 | void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, | |
392 | uint32_t resource_id) | |
393 | { | |
394 | struct virtio_gpu_resource_unref *cmd_p; | |
395 | struct virtio_gpu_vbuffer *vbuf; | |
396 | ||
397 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
398 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
399 | ||
400 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); | |
401 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
402 | ||
403 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
404 | } | |
405 | ||
b3f13ec9 | 406 | static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, |
a3b815f0 GH |
407 | uint32_t resource_id, |
408 | struct virtio_gpu_fence **fence) | |
dc5698e8 DA |
409 | { |
410 | struct virtio_gpu_resource_detach_backing *cmd_p; | |
411 | struct virtio_gpu_vbuffer *vbuf; | |
412 | ||
413 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
414 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
415 | ||
416 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING); | |
417 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
418 | ||
a3b815f0 | 419 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
dc5698e8 DA |
420 | } |
421 | ||
422 | void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, | |
423 | uint32_t scanout_id, uint32_t resource_id, | |
424 | uint32_t width, uint32_t height, | |
425 | uint32_t x, uint32_t y) | |
426 | { | |
427 | struct virtio_gpu_set_scanout *cmd_p; | |
428 | struct virtio_gpu_vbuffer *vbuf; | |
429 | ||
430 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
431 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
432 | ||
433 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT); | |
434 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
435 | cmd_p->scanout_id = cpu_to_le32(scanout_id); | |
436 | cmd_p->r.width = cpu_to_le32(width); | |
437 | cmd_p->r.height = cpu_to_le32(height); | |
438 | cmd_p->r.x = cpu_to_le32(x); | |
439 | cmd_p->r.y = cpu_to_le32(y); | |
440 | ||
441 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
442 | } | |
443 | ||
444 | void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, | |
445 | uint32_t resource_id, | |
446 | uint32_t x, uint32_t y, | |
447 | uint32_t width, uint32_t height) | |
448 | { | |
449 | struct virtio_gpu_resource_flush *cmd_p; | |
450 | struct virtio_gpu_vbuffer *vbuf; | |
451 | ||
452 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
453 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
454 | ||
455 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); | |
456 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
457 | cmd_p->r.width = cpu_to_le32(width); | |
458 | cmd_p->r.height = cpu_to_le32(height); | |
459 | cmd_p->r.x = cpu_to_le32(x); | |
460 | cmd_p->r.y = cpu_to_le32(y); | |
461 | ||
462 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
463 | } | |
464 | ||
465 | void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, | |
af334c5d GH |
466 | struct virtio_gpu_object *bo, |
467 | uint64_t offset, | |
dc5698e8 DA |
468 | __le32 width, __le32 height, |
469 | __le32 x, __le32 y, | |
470 | struct virtio_gpu_fence **fence) | |
471 | { | |
472 | struct virtio_gpu_transfer_to_host_2d *cmd_p; | |
473 | struct virtio_gpu_vbuffer *vbuf; | |
8f44ca22 JA |
474 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
475 | ||
476 | if (use_dma_api) | |
477 | dma_sync_sg_for_device(vgdev->vdev->dev.parent, | |
af334c5d | 478 | bo->pages->sgl, bo->pages->nents, |
8f44ca22 | 479 | DMA_TO_DEVICE); |
dc5698e8 DA |
480 | |
481 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
482 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
483 | ||
484 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); | |
af334c5d | 485 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
dc5698e8 DA |
486 | cmd_p->offset = cpu_to_le64(offset); |
487 | cmd_p->r.width = width; | |
488 | cmd_p->r.height = height; | |
489 | cmd_p->r.x = x; | |
490 | cmd_p->r.y = y; | |
491 | ||
ec2f0577 | 492 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
dc5698e8 DA |
493 | } |
494 | ||
495 | static void | |
496 | virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, | |
497 | uint32_t resource_id, | |
498 | struct virtio_gpu_mem_entry *ents, | |
499 | uint32_t nents, | |
500 | struct virtio_gpu_fence **fence) | |
501 | { | |
502 | struct virtio_gpu_resource_attach_backing *cmd_p; | |
503 | struct virtio_gpu_vbuffer *vbuf; | |
504 | ||
505 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
506 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
507 | ||
508 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING); | |
509 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
510 | cmd_p->nr_entries = cpu_to_le32(nents); | |
511 | ||
512 | vbuf->data_buf = ents; | |
513 | vbuf->data_size = sizeof(*ents) * nents; | |
514 | ||
ec2f0577 | 515 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
dc5698e8 DA |
516 | } |
517 | ||
518 | static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, | |
519 | struct virtio_gpu_vbuffer *vbuf) | |
520 | { | |
521 | struct virtio_gpu_resp_display_info *resp = | |
522 | (struct virtio_gpu_resp_display_info *)vbuf->resp_buf; | |
523 | int i; | |
524 | ||
525 | spin_lock(&vgdev->display_info_lock); | |
526 | for (i = 0; i < vgdev->num_scanouts; i++) { | |
527 | vgdev->outputs[i].info = resp->pmodes[i]; | |
528 | if (resp->pmodes[i].enabled) { | |
529 | DRM_DEBUG("output %d: %dx%d+%d+%d", i, | |
530 | le32_to_cpu(resp->pmodes[i].r.width), | |
531 | le32_to_cpu(resp->pmodes[i].r.height), | |
532 | le32_to_cpu(resp->pmodes[i].r.x), | |
533 | le32_to_cpu(resp->pmodes[i].r.y)); | |
534 | } else { | |
535 | DRM_DEBUG("output %d: disabled", i); | |
536 | } | |
537 | } | |
538 | ||
441012af | 539 | vgdev->display_info_pending = false; |
dc5698e8 DA |
540 | spin_unlock(&vgdev->display_info_lock); |
541 | wake_up(&vgdev->resp_wq); | |
542 | ||
543 | if (!drm_helper_hpd_irq_event(vgdev->ddev)) | |
544 | drm_kms_helper_hotplug_event(vgdev->ddev); | |
545 | } | |
546 | ||
62fb7a5e GH |
547 | static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, |
548 | struct virtio_gpu_vbuffer *vbuf) | |
549 | { | |
550 | struct virtio_gpu_get_capset_info *cmd = | |
551 | (struct virtio_gpu_get_capset_info *)vbuf->buf; | |
552 | struct virtio_gpu_resp_capset_info *resp = | |
553 | (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf; | |
554 | int i = le32_to_cpu(cmd->capset_index); | |
555 | ||
556 | spin_lock(&vgdev->display_info_lock); | |
557 | vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); | |
558 | vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); | |
559 | vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); | |
560 | spin_unlock(&vgdev->display_info_lock); | |
561 | wake_up(&vgdev->resp_wq); | |
562 | } | |
563 | ||
564 | static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, | |
565 | struct virtio_gpu_vbuffer *vbuf) | |
566 | { | |
567 | struct virtio_gpu_get_capset *cmd = | |
568 | (struct virtio_gpu_get_capset *)vbuf->buf; | |
569 | struct virtio_gpu_resp_capset *resp = | |
570 | (struct virtio_gpu_resp_capset *)vbuf->resp_buf; | |
571 | struct virtio_gpu_drv_cap_cache *cache_ent; | |
572 | ||
573 | spin_lock(&vgdev->display_info_lock); | |
574 | list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { | |
575 | if (cache_ent->version == le32_to_cpu(cmd->capset_version) && | |
576 | cache_ent->id == le32_to_cpu(cmd->capset_id)) { | |
577 | memcpy(cache_ent->caps_cache, resp->capset_data, | |
578 | cache_ent->size); | |
579 | atomic_set(&cache_ent->is_valid, 1); | |
580 | break; | |
581 | } | |
582 | } | |
583 | spin_unlock(&vgdev->display_info_lock); | |
584 | wake_up(&vgdev->resp_wq); | |
585 | } | |
586 | ||
dc5698e8 DA |
587 | int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev) |
588 | { | |
589 | struct virtio_gpu_ctrl_hdr *cmd_p; | |
590 | struct virtio_gpu_vbuffer *vbuf; | |
591 | void *resp_buf; | |
592 | ||
593 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info), | |
594 | GFP_KERNEL); | |
595 | if (!resp_buf) | |
596 | return -ENOMEM; | |
597 | ||
598 | cmd_p = virtio_gpu_alloc_cmd_resp | |
599 | (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf, | |
600 | sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info), | |
601 | resp_buf); | |
602 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
603 | ||
441012af | 604 | vgdev->display_info_pending = true; |
dc5698e8 DA |
605 | cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO); |
606 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
607 | return 0; | |
608 | } | |
609 | ||
62fb7a5e GH |
610 | int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx) |
611 | { | |
612 | struct virtio_gpu_get_capset_info *cmd_p; | |
613 | struct virtio_gpu_vbuffer *vbuf; | |
614 | void *resp_buf; | |
615 | ||
616 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info), | |
617 | GFP_KERNEL); | |
618 | if (!resp_buf) | |
619 | return -ENOMEM; | |
620 | ||
621 | cmd_p = virtio_gpu_alloc_cmd_resp | |
622 | (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf, | |
623 | sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info), | |
624 | resp_buf); | |
625 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
626 | ||
627 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO); | |
628 | cmd_p->capset_index = cpu_to_le32(idx); | |
629 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
630 | return 0; | |
631 | } | |
632 | ||
633 | int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, | |
634 | int idx, int version, | |
635 | struct virtio_gpu_drv_cap_cache **cache_p) | |
636 | { | |
637 | struct virtio_gpu_get_capset *cmd_p; | |
638 | struct virtio_gpu_vbuffer *vbuf; | |
09c4b494 | 639 | int max_size; |
62fb7a5e GH |
640 | struct virtio_gpu_drv_cap_cache *cache_ent; |
641 | void *resp_buf; | |
642 | ||
09c4b494 | 643 | if (idx >= vgdev->num_capsets) |
62fb7a5e GH |
644 | return -EINVAL; |
645 | ||
646 | if (version > vgdev->capsets[idx].max_version) | |
647 | return -EINVAL; | |
648 | ||
649 | cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL); | |
650 | if (!cache_ent) | |
651 | return -ENOMEM; | |
652 | ||
09c4b494 | 653 | max_size = vgdev->capsets[idx].max_size; |
62fb7a5e GH |
654 | cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL); |
655 | if (!cache_ent->caps_cache) { | |
656 | kfree(cache_ent); | |
657 | return -ENOMEM; | |
658 | } | |
659 | ||
660 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size, | |
661 | GFP_KERNEL); | |
662 | if (!resp_buf) { | |
663 | kfree(cache_ent->caps_cache); | |
664 | kfree(cache_ent); | |
665 | return -ENOMEM; | |
666 | } | |
667 | ||
668 | cache_ent->version = version; | |
669 | cache_ent->id = vgdev->capsets[idx].id; | |
670 | atomic_set(&cache_ent->is_valid, 0); | |
671 | cache_ent->size = max_size; | |
672 | spin_lock(&vgdev->display_info_lock); | |
673 | list_add_tail(&cache_ent->head, &vgdev->cap_cache); | |
674 | spin_unlock(&vgdev->display_info_lock); | |
675 | ||
676 | cmd_p = virtio_gpu_alloc_cmd_resp | |
677 | (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p), | |
678 | sizeof(struct virtio_gpu_resp_capset) + max_size, | |
679 | resp_buf); | |
680 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET); | |
681 | cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id); | |
682 | cmd_p->capset_version = cpu_to_le32(version); | |
683 | *cache_p = cache_ent; | |
684 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
685 | ||
686 | return 0; | |
687 | } | |
688 | ||
689 | void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, | |
690 | uint32_t nlen, const char *name) | |
691 | { | |
692 | struct virtio_gpu_ctx_create *cmd_p; | |
693 | struct virtio_gpu_vbuffer *vbuf; | |
694 | ||
695 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
696 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
697 | ||
698 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE); | |
699 | cmd_p->hdr.ctx_id = cpu_to_le32(id); | |
700 | cmd_p->nlen = cpu_to_le32(nlen); | |
dbe37dc3 RS |
701 | strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1); |
702 | cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0; | |
62fb7a5e GH |
703 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
704 | } | |
705 | ||
706 | void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, | |
707 | uint32_t id) | |
708 | { | |
709 | struct virtio_gpu_ctx_destroy *cmd_p; | |
710 | struct virtio_gpu_vbuffer *vbuf; | |
711 | ||
712 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
713 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
714 | ||
715 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY); | |
716 | cmd_p->hdr.ctx_id = cpu_to_le32(id); | |
717 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
718 | } | |
719 | ||
720 | void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, | |
721 | uint32_t ctx_id, | |
722 | uint32_t resource_id) | |
723 | { | |
724 | struct virtio_gpu_ctx_resource *cmd_p; | |
725 | struct virtio_gpu_vbuffer *vbuf; | |
726 | ||
727 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
728 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
729 | ||
730 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); | |
731 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
732 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
733 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
734 | ||
735 | } | |
736 | ||
737 | void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, | |
738 | uint32_t ctx_id, | |
739 | uint32_t resource_id) | |
740 | { | |
741 | struct virtio_gpu_ctx_resource *cmd_p; | |
742 | struct virtio_gpu_vbuffer *vbuf; | |
743 | ||
744 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
745 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
746 | ||
747 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); | |
748 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
749 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
750 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
751 | } | |
752 | ||
753 | void | |
754 | virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, | |
23c897d7 | 755 | struct virtio_gpu_object *bo, |
62fb7a5e GH |
756 | struct virtio_gpu_resource_create_3d *rc_3d, |
757 | struct virtio_gpu_fence **fence) | |
758 | { | |
759 | struct virtio_gpu_resource_create_3d *cmd_p; | |
760 | struct virtio_gpu_vbuffer *vbuf; | |
761 | ||
762 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
763 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
764 | ||
765 | *cmd_p = *rc_3d; | |
766 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); | |
767 | cmd_p->hdr.flags = 0; | |
768 | ||
769 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); | |
23c897d7 | 770 | bo->created = true; |
62fb7a5e GH |
771 | } |
772 | ||
773 | void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, | |
af334c5d GH |
774 | struct virtio_gpu_object *bo, |
775 | uint32_t ctx_id, | |
62fb7a5e GH |
776 | uint64_t offset, uint32_t level, |
777 | struct virtio_gpu_box *box, | |
778 | struct virtio_gpu_fence **fence) | |
779 | { | |
780 | struct virtio_gpu_transfer_host_3d *cmd_p; | |
781 | struct virtio_gpu_vbuffer *vbuf; | |
8f44ca22 JA |
782 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
783 | ||
784 | if (use_dma_api) | |
785 | dma_sync_sg_for_device(vgdev->vdev->dev.parent, | |
af334c5d | 786 | bo->pages->sgl, bo->pages->nents, |
8f44ca22 | 787 | DMA_TO_DEVICE); |
62fb7a5e GH |
788 | |
789 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
790 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
791 | ||
792 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); | |
793 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
af334c5d | 794 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
62fb7a5e GH |
795 | cmd_p->box = *box; |
796 | cmd_p->offset = cpu_to_le64(offset); | |
797 | cmd_p->level = cpu_to_le32(level); | |
798 | ||
799 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); | |
800 | } | |
801 | ||
802 | void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, | |
803 | uint32_t resource_id, uint32_t ctx_id, | |
804 | uint64_t offset, uint32_t level, | |
805 | struct virtio_gpu_box *box, | |
806 | struct virtio_gpu_fence **fence) | |
807 | { | |
808 | struct virtio_gpu_transfer_host_3d *cmd_p; | |
809 | struct virtio_gpu_vbuffer *vbuf; | |
810 | ||
811 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
812 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
813 | ||
814 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); | |
815 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
816 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
817 | cmd_p->box = *box; | |
818 | cmd_p->offset = cpu_to_le64(offset); | |
819 | cmd_p->level = cpu_to_le32(level); | |
820 | ||
821 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); | |
822 | } | |
823 | ||
824 | void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, | |
825 | void *data, uint32_t data_size, | |
826 | uint32_t ctx_id, struct virtio_gpu_fence **fence) | |
827 | { | |
828 | struct virtio_gpu_cmd_submit *cmd_p; | |
829 | struct virtio_gpu_vbuffer *vbuf; | |
830 | ||
831 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
832 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
833 | ||
834 | vbuf->data_buf = data; | |
835 | vbuf->data_size = data_size; | |
836 | ||
837 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); | |
838 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
839 | cmd_p->size = cpu_to_le32(data_size); | |
840 | ||
841 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); | |
842 | } | |
843 | ||
dc5698e8 DA |
844 | int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, |
845 | struct virtio_gpu_object *obj, | |
dc5698e8 DA |
846 | struct virtio_gpu_fence **fence) |
847 | { | |
a3b815f0 | 848 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
dc5698e8 DA |
849 | struct virtio_gpu_mem_entry *ents; |
850 | struct scatterlist *sg; | |
a3b815f0 | 851 | int si, nents; |
dc5698e8 | 852 | |
23c897d7 GH |
853 | if (!obj->created) |
854 | return 0; | |
855 | ||
dc5698e8 DA |
856 | if (!obj->pages) { |
857 | int ret; | |
9d492b6b | 858 | |
dc5698e8 DA |
859 | ret = virtio_gpu_object_get_sg_table(vgdev, obj); |
860 | if (ret) | |
861 | return ret; | |
862 | } | |
863 | ||
a3b815f0 GH |
864 | if (use_dma_api) { |
865 | obj->mapped = dma_map_sg(vgdev->vdev->dev.parent, | |
866 | obj->pages->sgl, obj->pages->nents, | |
867 | DMA_TO_DEVICE); | |
868 | nents = obj->mapped; | |
869 | } else { | |
870 | nents = obj->pages->nents; | |
871 | } | |
872 | ||
dc5698e8 | 873 | /* gets freed when the ring has consumed it */ |
a3b815f0 | 874 | ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry), |
dc5698e8 DA |
875 | GFP_KERNEL); |
876 | if (!ents) { | |
877 | DRM_ERROR("failed to allocate ent list\n"); | |
878 | return -ENOMEM; | |
879 | } | |
880 | ||
a3b815f0 GH |
881 | for_each_sg(obj->pages->sgl, sg, nents, si) { |
882 | ents[si].addr = cpu_to_le64(use_dma_api | |
883 | ? sg_dma_address(sg) | |
884 | : sg_phys(sg)); | |
dc5698e8 DA |
885 | ents[si].length = cpu_to_le32(sg->length); |
886 | ents[si].padding = 0; | |
887 | } | |
888 | ||
724cfdfd | 889 | virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, |
a3b815f0 | 890 | ents, nents, |
dc5698e8 | 891 | fence); |
dc5698e8 DA |
892 | return 0; |
893 | } | |
894 | ||
b3f13ec9 GH |
895 | void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, |
896 | struct virtio_gpu_object *obj) | |
897 | { | |
a3b815f0 | 898 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
a3b815f0 GH |
899 | |
900 | if (use_dma_api && obj->mapped) { | |
9fdd90c0 | 901 | struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); |
a3b815f0 GH |
902 | /* detach backing and wait for the host process it ... */ |
903 | virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence); | |
904 | dma_fence_wait(&fence->f, true); | |
905 | dma_fence_put(&fence->f); | |
906 | ||
907 | /* ... then tear down iommu mappings */ | |
908 | dma_unmap_sg(vgdev->vdev->dev.parent, | |
909 | obj->pages->sgl, obj->mapped, | |
910 | DMA_TO_DEVICE); | |
911 | obj->mapped = 0; | |
912 | } else { | |
913 | virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL); | |
914 | } | |
b3f13ec9 GH |
915 | } |
916 | ||
dc5698e8 DA |
917 | void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, |
918 | struct virtio_gpu_output *output) | |
919 | { | |
920 | struct virtio_gpu_vbuffer *vbuf; | |
921 | struct virtio_gpu_update_cursor *cur_p; | |
922 | ||
923 | output->cursor.pos.scanout_id = cpu_to_le32(output->index); | |
924 | cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf); | |
925 | memcpy(cur_p, &output->cursor, sizeof(output->cursor)); | |
926 | virtio_gpu_queue_cursor(vgdev, vbuf); | |
927 | } |