]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/virtio/virtgpu_vq.c
drm/virtio: batch plane updates (pageflip)
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / virtio / virtgpu_vq.c
CommitLineData
dc5698e8
DA
1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
a3d63977 29#include <linux/dma-mapping.h>
dc5698e8
DA
30#include <linux/virtio.h>
31#include <linux/virtio_config.h>
32#include <linux/virtio_ring.h>
33
a3d63977
SR
34#include "virtgpu_drv.h"
35#include "virtgpu_trace.h"
36
dc5698e8
DA
37#define MAX_INLINE_CMD_SIZE 96
38#define MAX_INLINE_RESP_SIZE 24
39#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
40 + MAX_INLINE_CMD_SIZE \
41 + MAX_INLINE_RESP_SIZE)
42
1dc34852
GH
43static void convert_to_hw_box(struct virtio_gpu_box *dst,
44 const struct drm_virtgpu_3d_box *src)
45{
46 dst->x = cpu_to_le32(src->x);
47 dst->y = cpu_to_le32(src->y);
48 dst->z = cpu_to_le32(src->z);
49 dst->w = cpu_to_le32(src->w);
50 dst->h = cpu_to_le32(src->h);
51 dst->d = cpu_to_le32(src->d);
52}
53
dc5698e8
DA
54void virtio_gpu_ctrl_ack(struct virtqueue *vq)
55{
56 struct drm_device *dev = vq->vdev->priv;
57 struct virtio_gpu_device *vgdev = dev->dev_private;
9d492b6b 58
dc5698e8
DA
59 schedule_work(&vgdev->ctrlq.dequeue_work);
60}
61
62void virtio_gpu_cursor_ack(struct virtqueue *vq)
63{
64 struct drm_device *dev = vq->vdev->priv;
65 struct virtio_gpu_device *vgdev = dev->dev_private;
9d492b6b 66
dc5698e8
DA
67 schedule_work(&vgdev->cursorq.dequeue_work);
68}
69
70int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
71{
f5985bf9
GH
72 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
73 VBUFFER_SIZE,
74 __alignof__(struct virtio_gpu_vbuffer),
75 0, NULL);
dc5698e8
DA
76 if (!vgdev->vbufs)
77 return -ENOMEM;
dc5698e8
DA
78 return 0;
79}
80
81void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
82{
f5985bf9
GH
83 kmem_cache_destroy(vgdev->vbufs);
84 vgdev->vbufs = NULL;
dc5698e8
DA
85}
86
87static struct virtio_gpu_vbuffer*
88virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
89 int size, int resp_size, void *resp_buf,
90 virtio_gpu_resp_cb resp_cb)
91{
92 struct virtio_gpu_vbuffer *vbuf;
93
7fea1e0f 94 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
43c27940
GH
95 if (!vbuf)
96 return ERR_PTR(-ENOMEM);
dc5698e8 97
145cbefc
CW
98 BUG_ON(size > MAX_INLINE_CMD_SIZE ||
99 size < sizeof(struct virtio_gpu_ctrl_hdr));
dc5698e8
DA
100 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
101 vbuf->size = size;
102
103 vbuf->resp_cb = resp_cb;
104 vbuf->resp_size = resp_size;
105 if (resp_size <= MAX_INLINE_RESP_SIZE)
106 vbuf->resp_buf = (void *)vbuf->buf + size;
107 else
108 vbuf->resp_buf = resp_buf;
109 BUG_ON(!vbuf->resp_buf);
110 return vbuf;
111}
112
145cbefc
CW
113static struct virtio_gpu_ctrl_hdr *
114virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
115{
116 /* this assumes a vbuf contains a command that starts with a
117 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
118 * virtqueues.
119 */
120 return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
121}
122
dc5698e8
DA
123static struct virtio_gpu_update_cursor*
124virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
125 struct virtio_gpu_vbuffer **vbuffer_p)
126{
127 struct virtio_gpu_vbuffer *vbuf;
128
129 vbuf = virtio_gpu_get_vbuf
130 (vgdev, sizeof(struct virtio_gpu_update_cursor),
131 0, NULL, NULL);
132 if (IS_ERR(vbuf)) {
133 *vbuffer_p = NULL;
134 return ERR_CAST(vbuf);
135 }
136 *vbuffer_p = vbuf;
137 return (struct virtio_gpu_update_cursor *)vbuf->buf;
138}
139
140static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
141 virtio_gpu_resp_cb cb,
142 struct virtio_gpu_vbuffer **vbuffer_p,
143 int cmd_size, int resp_size,
144 void *resp_buf)
145{
146 struct virtio_gpu_vbuffer *vbuf;
147
148 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
149 resp_size, resp_buf, cb);
150 if (IS_ERR(vbuf)) {
151 *vbuffer_p = NULL;
152 return ERR_CAST(vbuf);
153 }
154 *vbuffer_p = vbuf;
155 return (struct virtio_gpu_command *)vbuf->buf;
156}
157
8235eab0
GH
158static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
159 struct virtio_gpu_vbuffer **vbuffer_p,
160 int size)
161{
162 return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
163 sizeof(struct virtio_gpu_ctrl_hdr),
164 NULL);
165}
166
1ed5f698
GH
167static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
168 struct virtio_gpu_vbuffer **vbuffer_p,
169 int size,
170 virtio_gpu_resp_cb cb)
171{
172 return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
173 sizeof(struct virtio_gpu_ctrl_hdr),
174 NULL);
175}
176
dc5698e8
DA
177static void free_vbuf(struct virtio_gpu_device *vgdev,
178 struct virtio_gpu_vbuffer *vbuf)
179{
180 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
181 kfree(vbuf->resp_buf);
e1218b8c 182 kvfree(vbuf->data_buf);
f5985bf9 183 kmem_cache_free(vgdev->vbufs, vbuf);
dc5698e8
DA
184}
185
186static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
187{
188 struct virtio_gpu_vbuffer *vbuf;
189 unsigned int len;
190 int freed = 0;
191
192 while ((vbuf = virtqueue_get_buf(vq, &len))) {
193 list_add_tail(&vbuf->list, reclaim_list);
194 freed++;
195 }
196 if (freed == 0)
197 DRM_DEBUG("Huh? zero vbufs reclaimed");
198}
199
200void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
201{
202 struct virtio_gpu_device *vgdev =
203 container_of(work, struct virtio_gpu_device,
204 ctrlq.dequeue_work);
205 struct list_head reclaim_list;
206 struct virtio_gpu_vbuffer *entry, *tmp;
207 struct virtio_gpu_ctrl_hdr *resp;
208 u64 fence_id = 0;
209
210 INIT_LIST_HEAD(&reclaim_list);
211 spin_lock(&vgdev->ctrlq.qlock);
212 do {
213 virtqueue_disable_cb(vgdev->ctrlq.vq);
214 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
215
216 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
217 spin_unlock(&vgdev->ctrlq.qlock);
218
da758d51 219 list_for_each_entry(entry, &reclaim_list, list) {
dc5698e8 220 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
5daf8857
CW
221
222 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
223
3630c2a2 224 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
2c77ae22 225 if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
3630c2a2 226 struct virtio_gpu_ctrl_hdr *cmd;
145cbefc 227 cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
e46e31cf
GH
228 DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
229 le32_to_cpu(resp->type),
230 le32_to_cpu(cmd->type));
3630c2a2
GH
231 } else
232 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
233 }
dc5698e8
DA
234 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
235 u64 f = le64_to_cpu(resp->fence_id);
236
237 if (fence_id > f) {
238 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
239 __func__, fence_id, f);
240 } else {
241 fence_id = f;
242 }
243 }
244 if (entry->resp_cb)
245 entry->resp_cb(vgdev, entry);
dc5698e8
DA
246 }
247 wake_up(&vgdev->ctrlq.ack_queue);
248
249 if (fence_id)
250 virtio_gpu_fence_event_process(vgdev, fence_id);
da758d51
GH
251
252 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
253 if (entry->objs)
f0c6cef7 254 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
da758d51
GH
255 list_del(&entry->list);
256 free_vbuf(vgdev, entry);
257 }
dc5698e8
DA
258}
259
260void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
261{
262 struct virtio_gpu_device *vgdev =
263 container_of(work, struct virtio_gpu_device,
264 cursorq.dequeue_work);
265 struct list_head reclaim_list;
266 struct virtio_gpu_vbuffer *entry, *tmp;
267
268 INIT_LIST_HEAD(&reclaim_list);
269 spin_lock(&vgdev->cursorq.qlock);
270 do {
271 virtqueue_disable_cb(vgdev->cursorq.vq);
272 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
273 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
274 spin_unlock(&vgdev->cursorq.qlock);
275
276 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
277 list_del(&entry->list);
278 free_vbuf(vgdev, entry);
279 }
280 wake_up(&vgdev->cursorq.ack_queue);
281}
282
e1218b8c
DR
283/* Create sg_table from a vmalloc'd buffer. */
284static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
285{
286 int ret, s, i;
287 struct sg_table *sgt;
288 struct scatterlist *sg;
289 struct page *pg;
290
291 if (WARN_ON(!PAGE_ALIGNED(data)))
292 return NULL;
293
294 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
295 if (!sgt)
296 return NULL;
297
298 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
299 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
300 if (ret) {
301 kfree(sgt);
302 return NULL;
303 }
304
305 for_each_sg(sgt->sgl, sg, *sg_ents, i) {
306 pg = vmalloc_to_page(data);
307 if (!pg) {
308 sg_free_table(sgt);
309 kfree(sgt);
310 return NULL;
311 }
312
313 s = min_t(int, PAGE_SIZE, size);
314 sg_set_page(sg, pg, s, 0);
315
316 size -= s;
317 data += s;
318 }
319
320 return sgt;
321}
322
96b5d1be 323static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
6ebe8661
CW
324 struct virtio_gpu_vbuffer *vbuf,
325 struct virtio_gpu_fence *fence,
326 int elemcnt,
327 struct scatterlist **sgs,
328 int outcnt,
329 int incnt)
dc5698e8
DA
330{
331 struct virtqueue *vq = vgdev->ctrlq.vq;
b1df3a2b
GH
332 int ret, idx;
333
334 if (!drm_dev_enter(vgdev->ddev, &idx)) {
335 if (fence && vbuf->objs)
336 virtio_gpu_array_unlock_resv(vbuf->objs);
337 free_vbuf(vgdev, vbuf);
338 return;
339 }
dc5698e8 340
5edbb560
GH
341 if (vgdev->has_indirect)
342 elemcnt = 1;
343
6ebe8661
CW
344again:
345 spin_lock(&vgdev->ctrlq.qlock);
346
347 if (vq->num_free < elemcnt) {
348 spin_unlock(&vgdev->ctrlq.qlock);
fcdd19b8 349 virtio_gpu_notify(vgdev);
6ebe8661
CW
350 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
351 goto again;
352 }
353
354 /* now that the position of the vbuf in the virtqueue is known, we can
355 * finally set the fence id
356 */
357 if (fence) {
358 virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
359 fence);
360 if (vbuf->objs) {
361 virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
362 virtio_gpu_array_unlock_resv(vbuf->objs);
363 }
364 }
365
dc5698e8 366 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
1425a4ce
CW
367 WARN_ON(ret);
368
145cbefc 369 trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
1425a4ce 370
cca41da1 371 atomic_inc(&vgdev->pending_commands);
1425a4ce 372
6ebe8661
CW
373 spin_unlock(&vgdev->ctrlq.qlock);
374
b1df3a2b 375 drm_dev_exit(idx);
dc5698e8
DA
376}
377
4100a7b8
GH
378static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
379 struct virtio_gpu_vbuffer *vbuf,
4100a7b8 380 struct virtio_gpu_fence *fence)
ec2f0577 381{
db2e2072 382 struct scatterlist *sgs[3], vcmd, vout, vresp;
e1218b8c 383 struct sg_table *sgt = NULL;
db2e2072 384 int elemcnt = 0, outcnt = 0, incnt = 0;
e1218b8c 385
db2e2072
CW
386 /* set up vcmd */
387 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
388 elemcnt++;
389 sgs[outcnt] = &vcmd;
390 outcnt++;
391
392 /* set up vout */
e1218b8c
DR
393 if (vbuf->data_size) {
394 if (is_vmalloc_addr(vbuf->data_buf)) {
db2e2072 395 int sg_ents;
e1218b8c 396 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
db2e2072 397 &sg_ents);
4d8d4869
CW
398 if (!sgt) {
399 if (fence && vbuf->objs)
400 virtio_gpu_array_unlock_resv(vbuf->objs);
de235805 401 return;
4d8d4869 402 }
db2e2072
CW
403
404 elemcnt += sg_ents;
405 sgs[outcnt] = sgt->sgl;
e1218b8c 406 } else {
db2e2072
CW
407 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
408 elemcnt++;
409 sgs[outcnt] = &vout;
e1218b8c 410 }
db2e2072
CW
411 outcnt++;
412 }
413
414 /* set up vresp */
415 if (vbuf->resp_size) {
416 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
417 elemcnt++;
418 sgs[outcnt + incnt] = &vresp;
419 incnt++;
e1218b8c 420 }
ec2f0577 421
96b5d1be
CW
422 virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
423 incnt);
e1218b8c
DR
424
425 if (sgt) {
426 sg_free_table(sgt);
427 kfree(sgt);
428 }
ec2f0577
GH
429}
430
cca41da1 431void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
7082e7a4 432{
cca41da1 433 bool notify;
7082e7a4 434
cca41da1 435 if (!atomic_read(&vgdev->pending_commands))
7082e7a4 436 return;
cca41da1
GH
437
438 spin_lock(&vgdev->ctrlq.qlock);
439 atomic_set(&vgdev->pending_commands, 0);
440 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
441 spin_unlock(&vgdev->ctrlq.qlock);
442
443 if (notify)
444 virtqueue_notify(vgdev->ctrlq.vq);
7082e7a4
GH
445}
446
32d6c2c5
DR
447static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
448 struct virtio_gpu_vbuffer *vbuf)
449{
e19d3411 450 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
32d6c2c5
DR
451}
452
4100a7b8
GH
453static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
454 struct virtio_gpu_vbuffer *vbuf)
dc5698e8
DA
455{
456 struct virtqueue *vq = vgdev->cursorq.vq;
457 struct scatterlist *sgs[1], ccmd;
b1df3a2b 458 int idx, ret, outcnt;
744583ec 459 bool notify;
dc5698e8 460
b1df3a2b
GH
461 if (!drm_dev_enter(vgdev->ddev, &idx)) {
462 free_vbuf(vgdev, vbuf);
4100a7b8 463 return;
b1df3a2b 464 }
dc5698e8
DA
465
466 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
467 sgs[0] = &ccmd;
468 outcnt = 1;
469
470 spin_lock(&vgdev->cursorq.qlock);
471retry:
472 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
473 if (ret == -ENOSPC) {
474 spin_unlock(&vgdev->cursorq.qlock);
d02d2700 475 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
dc5698e8
DA
476 spin_lock(&vgdev->cursorq.qlock);
477 goto retry;
478 } else {
5daf8857 479 trace_virtio_gpu_cmd_queue(vq,
145cbefc 480 virtio_gpu_vbuf_ctrl_hdr(vbuf));
5daf8857 481
744583ec 482 notify = virtqueue_kick_prepare(vq);
dc5698e8
DA
483 }
484
485 spin_unlock(&vgdev->cursorq.qlock);
744583ec
GH
486
487 if (notify)
488 virtqueue_notify(vq);
b1df3a2b
GH
489
490 drm_dev_exit(idx);
dc5698e8
DA
491}
492
493/* just create gem objects for userspace and long lived objects,
5d883850
RS
494 * just use dma_alloced pages for the queue objects?
495 */
dc5698e8
DA
496
497/* create a basic resource */
498void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
23c897d7 499 struct virtio_gpu_object *bo,
530b2842 500 struct virtio_gpu_object_params *params,
e2324300 501 struct virtio_gpu_object_array *objs,
530b2842 502 struct virtio_gpu_fence *fence)
dc5698e8
DA
503{
504 struct virtio_gpu_resource_create_2d *cmd_p;
505 struct virtio_gpu_vbuffer *vbuf;
506
507 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
508 memset(cmd_p, 0, sizeof(*cmd_p));
e2324300 509 vbuf->objs = objs;
dc5698e8
DA
510
511 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
724cfdfd 512 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
f9659329
GH
513 cmd_p->format = cpu_to_le32(params->format);
514 cmd_p->width = cpu_to_le32(params->width);
515 cmd_p->height = cpu_to_le32(params->height);
dc5698e8 516
e19d3411 517 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
cca41da1 518 virtio_gpu_notify(vgdev);
23c897d7 519 bo->created = true;
dc5698e8
DA
520}
521
1ed5f698
GH
522static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
523 struct virtio_gpu_vbuffer *vbuf)
524{
525 struct virtio_gpu_object *bo;
526
527 bo = vbuf->resp_cb_data;
528 vbuf->resp_cb_data = NULL;
529
530 virtio_gpu_cleanup_object(bo);
531}
532
dc5698e8 533void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
1ed5f698 534 struct virtio_gpu_object *bo)
dc5698e8
DA
535{
536 struct virtio_gpu_resource_unref *cmd_p;
537 struct virtio_gpu_vbuffer *vbuf;
538
1ed5f698
GH
539 cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
540 virtio_gpu_cmd_unref_cb);
dc5698e8
DA
541 memset(cmd_p, 0, sizeof(*cmd_p));
542
543 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
1ed5f698 544 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
dc5698e8 545
1ed5f698 546 vbuf->resp_cb_data = bo;
dc5698e8 547 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
cca41da1 548 virtio_gpu_notify(vgdev);
dc5698e8
DA
549}
550
dc5698e8
DA
551void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
552 uint32_t scanout_id, uint32_t resource_id,
553 uint32_t width, uint32_t height,
554 uint32_t x, uint32_t y)
555{
556 struct virtio_gpu_set_scanout *cmd_p;
557 struct virtio_gpu_vbuffer *vbuf;
558
559 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
560 memset(cmd_p, 0, sizeof(*cmd_p));
561
562 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
563 cmd_p->resource_id = cpu_to_le32(resource_id);
564 cmd_p->scanout_id = cpu_to_le32(scanout_id);
565 cmd_p->r.width = cpu_to_le32(width);
566 cmd_p->r.height = cpu_to_le32(height);
567 cmd_p->r.x = cpu_to_le32(x);
568 cmd_p->r.y = cpu_to_le32(y);
569
570 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
571}
572
573void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
574 uint32_t resource_id,
575 uint32_t x, uint32_t y,
576 uint32_t width, uint32_t height)
577{
578 struct virtio_gpu_resource_flush *cmd_p;
579 struct virtio_gpu_vbuffer *vbuf;
580
581 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
582 memset(cmd_p, 0, sizeof(*cmd_p));
583
584 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
585 cmd_p->resource_id = cpu_to_le32(resource_id);
586 cmd_p->r.width = cpu_to_le32(width);
587 cmd_p->r.height = cpu_to_le32(height);
588 cmd_p->r.x = cpu_to_le32(x);
589 cmd_p->r.y = cpu_to_le32(y);
590
591 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
592}
593
594void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
af334c5d 595 uint64_t offset,
64f1cc99
GH
596 uint32_t width, uint32_t height,
597 uint32_t x, uint32_t y,
3d3bdbc0 598 struct virtio_gpu_object_array *objs,
4d55fd66 599 struct virtio_gpu_fence *fence)
dc5698e8 600{
3d3bdbc0 601 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
dc5698e8
DA
602 struct virtio_gpu_transfer_to_host_2d *cmd_p;
603 struct virtio_gpu_vbuffer *vbuf;
8f44ca22
JA
604 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
605
606 if (use_dma_api)
607 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
af334c5d 608 bo->pages->sgl, bo->pages->nents,
8f44ca22 609 DMA_TO_DEVICE);
dc5698e8
DA
610
611 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
612 memset(cmd_p, 0, sizeof(*cmd_p));
3d3bdbc0 613 vbuf->objs = objs;
dc5698e8
DA
614
615 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
af334c5d 616 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
dc5698e8 617 cmd_p->offset = cpu_to_le64(offset);
64f1cc99
GH
618 cmd_p->r.width = cpu_to_le32(width);
619 cmd_p->r.height = cpu_to_le32(height);
620 cmd_p->r.x = cpu_to_le32(x);
621 cmd_p->r.y = cpu_to_le32(y);
dc5698e8 622
e19d3411 623 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
dc5698e8
DA
624}
625
626static void
627virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
628 uint32_t resource_id,
629 struct virtio_gpu_mem_entry *ents,
630 uint32_t nents,
4d55fd66 631 struct virtio_gpu_fence *fence)
dc5698e8
DA
632{
633 struct virtio_gpu_resource_attach_backing *cmd_p;
634 struct virtio_gpu_vbuffer *vbuf;
635
636 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
637 memset(cmd_p, 0, sizeof(*cmd_p));
638
639 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
640 cmd_p->resource_id = cpu_to_le32(resource_id);
641 cmd_p->nr_entries = cpu_to_le32(nents);
642
643 vbuf->data_buf = ents;
644 vbuf->data_size = sizeof(*ents) * nents;
645
e19d3411 646 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
cca41da1 647 virtio_gpu_notify(vgdev);
dc5698e8
DA
648}
649
650static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
651 struct virtio_gpu_vbuffer *vbuf)
652{
653 struct virtio_gpu_resp_display_info *resp =
654 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
655 int i;
656
657 spin_lock(&vgdev->display_info_lock);
658 for (i = 0; i < vgdev->num_scanouts; i++) {
659 vgdev->outputs[i].info = resp->pmodes[i];
660 if (resp->pmodes[i].enabled) {
661 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
662 le32_to_cpu(resp->pmodes[i].r.width),
663 le32_to_cpu(resp->pmodes[i].r.height),
664 le32_to_cpu(resp->pmodes[i].r.x),
665 le32_to_cpu(resp->pmodes[i].r.y));
666 } else {
667 DRM_DEBUG("output %d: disabled", i);
668 }
669 }
670
441012af 671 vgdev->display_info_pending = false;
dc5698e8
DA
672 spin_unlock(&vgdev->display_info_lock);
673 wake_up(&vgdev->resp_wq);
674
675 if (!drm_helper_hpd_irq_event(vgdev->ddev))
676 drm_kms_helper_hotplug_event(vgdev->ddev);
677}
678
62fb7a5e
GH
679static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
680 struct virtio_gpu_vbuffer *vbuf)
681{
682 struct virtio_gpu_get_capset_info *cmd =
683 (struct virtio_gpu_get_capset_info *)vbuf->buf;
684 struct virtio_gpu_resp_capset_info *resp =
685 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
686 int i = le32_to_cpu(cmd->capset_index);
687
688 spin_lock(&vgdev->display_info_lock);
689 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
690 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
691 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
692 spin_unlock(&vgdev->display_info_lock);
693 wake_up(&vgdev->resp_wq);
694}
695
696static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
697 struct virtio_gpu_vbuffer *vbuf)
698{
699 struct virtio_gpu_get_capset *cmd =
700 (struct virtio_gpu_get_capset *)vbuf->buf;
701 struct virtio_gpu_resp_capset *resp =
702 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
703 struct virtio_gpu_drv_cap_cache *cache_ent;
704
705 spin_lock(&vgdev->display_info_lock);
706 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
707 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
708 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
709 memcpy(cache_ent->caps_cache, resp->capset_data,
710 cache_ent->size);
9ff3a5c8
DR
711 /* Copy must occur before is_valid is signalled. */
712 smp_wmb();
62fb7a5e
GH
713 atomic_set(&cache_ent->is_valid, 1);
714 break;
715 }
716 }
717 spin_unlock(&vgdev->display_info_lock);
676a905b 718 wake_up_all(&vgdev->resp_wq);
62fb7a5e
GH
719}
720
b4b01b49
GH
721static int virtio_get_edid_block(void *data, u8 *buf,
722 unsigned int block, size_t len)
723{
724 struct virtio_gpu_resp_edid *resp = data;
725 size_t start = block * EDID_LENGTH;
726
727 if (start + len > le32_to_cpu(resp->size))
728 return -1;
729 memcpy(buf, resp->edid + start, len);
730 return 0;
731}
732
733static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
734 struct virtio_gpu_vbuffer *vbuf)
735{
736 struct virtio_gpu_cmd_get_edid *cmd =
737 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
738 struct virtio_gpu_resp_edid *resp =
739 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
740 uint32_t scanout = le32_to_cpu(cmd->scanout);
741 struct virtio_gpu_output *output;
742 struct edid *new_edid, *old_edid;
743
744 if (scanout >= vgdev->num_scanouts)
745 return;
746 output = vgdev->outputs + scanout;
747
748 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
41de4be6 749 drm_connector_update_edid_property(&output->conn, new_edid);
b4b01b49
GH
750
751 spin_lock(&vgdev->display_info_lock);
752 old_edid = output->edid;
753 output->edid = new_edid;
b4b01b49
GH
754 spin_unlock(&vgdev->display_info_lock);
755
756 kfree(old_edid);
757 wake_up(&vgdev->resp_wq);
758}
759
dc5698e8
DA
760int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
761{
762 struct virtio_gpu_ctrl_hdr *cmd_p;
763 struct virtio_gpu_vbuffer *vbuf;
764 void *resp_buf;
765
766 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
767 GFP_KERNEL);
768 if (!resp_buf)
769 return -ENOMEM;
770
771 cmd_p = virtio_gpu_alloc_cmd_resp
772 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
773 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
774 resp_buf);
775 memset(cmd_p, 0, sizeof(*cmd_p));
776
441012af 777 vgdev->display_info_pending = true;
dc5698e8
DA
778 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
779 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
cca41da1 780 virtio_gpu_notify(vgdev);
dc5698e8
DA
781 return 0;
782}
783
62fb7a5e
GH
784int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
785{
786 struct virtio_gpu_get_capset_info *cmd_p;
787 struct virtio_gpu_vbuffer *vbuf;
788 void *resp_buf;
789
790 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
791 GFP_KERNEL);
792 if (!resp_buf)
793 return -ENOMEM;
794
795 cmd_p = virtio_gpu_alloc_cmd_resp
796 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
797 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
798 resp_buf);
799 memset(cmd_p, 0, sizeof(*cmd_p));
800
801 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
802 cmd_p->capset_index = cpu_to_le32(idx);
803 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
cca41da1 804 virtio_gpu_notify(vgdev);
62fb7a5e
GH
805 return 0;
806}
807
808int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
809 int idx, int version,
810 struct virtio_gpu_drv_cap_cache **cache_p)
811{
812 struct virtio_gpu_get_capset *cmd_p;
813 struct virtio_gpu_vbuffer *vbuf;
09c4b494 814 int max_size;
62fb7a5e 815 struct virtio_gpu_drv_cap_cache *cache_ent;
5934ce99 816 struct virtio_gpu_drv_cap_cache *search_ent;
62fb7a5e
GH
817 void *resp_buf;
818
5934ce99
DR
819 *cache_p = NULL;
820
09c4b494 821 if (idx >= vgdev->num_capsets)
62fb7a5e
GH
822 return -EINVAL;
823
824 if (version > vgdev->capsets[idx].max_version)
825 return -EINVAL;
826
827 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
828 if (!cache_ent)
829 return -ENOMEM;
830
09c4b494 831 max_size = vgdev->capsets[idx].max_size;
62fb7a5e
GH
832 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
833 if (!cache_ent->caps_cache) {
834 kfree(cache_ent);
835 return -ENOMEM;
836 }
837
838 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
839 GFP_KERNEL);
840 if (!resp_buf) {
841 kfree(cache_ent->caps_cache);
842 kfree(cache_ent);
843 return -ENOMEM;
844 }
845
846 cache_ent->version = version;
847 cache_ent->id = vgdev->capsets[idx].id;
848 atomic_set(&cache_ent->is_valid, 0);
849 cache_ent->size = max_size;
850 spin_lock(&vgdev->display_info_lock);
5934ce99
DR
851 /* Search while under lock in case it was added by another task. */
852 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
853 if (search_ent->id == vgdev->capsets[idx].id &&
854 search_ent->version == version) {
855 *cache_p = search_ent;
856 break;
857 }
858 }
859 if (!*cache_p)
860 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
62fb7a5e
GH
861 spin_unlock(&vgdev->display_info_lock);
862
5934ce99
DR
863 if (*cache_p) {
864 /* Entry was found, so free everything that was just created. */
865 kfree(resp_buf);
866 kfree(cache_ent->caps_cache);
867 kfree(cache_ent);
868 return 0;
869 }
870
62fb7a5e
GH
871 cmd_p = virtio_gpu_alloc_cmd_resp
872 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
873 sizeof(struct virtio_gpu_resp_capset) + max_size,
874 resp_buf);
875 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
876 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
877 cmd_p->capset_version = cpu_to_le32(version);
878 *cache_p = cache_ent;
879 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
cca41da1 880 virtio_gpu_notify(vgdev);
62fb7a5e
GH
881
882 return 0;
883}
884
b4b01b49
GH
885int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
886{
887 struct virtio_gpu_cmd_get_edid *cmd_p;
888 struct virtio_gpu_vbuffer *vbuf;
889 void *resp_buf;
890 int scanout;
891
892 if (WARN_ON(!vgdev->has_edid))
893 return -EINVAL;
894
895 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
896 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
897 GFP_KERNEL);
898 if (!resp_buf)
899 return -ENOMEM;
900
901 cmd_p = virtio_gpu_alloc_cmd_resp
902 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
903 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
904 resp_buf);
905 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
906 cmd_p->scanout = cpu_to_le32(scanout);
907 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
cca41da1 908 virtio_gpu_notify(vgdev);
b4b01b49
GH
909 }
910
911 return 0;
912}
913
62fb7a5e
GH
914void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
915 uint32_t nlen, const char *name)
916{
917 struct virtio_gpu_ctx_create *cmd_p;
918 struct virtio_gpu_vbuffer *vbuf;
919
920 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
921 memset(cmd_p, 0, sizeof(*cmd_p));
922
923 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
924 cmd_p->hdr.ctx_id = cpu_to_le32(id);
925 cmd_p->nlen = cpu_to_le32(nlen);
dbe37dc3
RS
926 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
927 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
62fb7a5e 928 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
cca41da1 929 virtio_gpu_notify(vgdev);
62fb7a5e
GH
930}
931
932void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
933 uint32_t id)
934{
935 struct virtio_gpu_ctx_destroy *cmd_p;
936 struct virtio_gpu_vbuffer *vbuf;
937
938 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
939 memset(cmd_p, 0, sizeof(*cmd_p));
940
941 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
942 cmd_p->hdr.ctx_id = cpu_to_le32(id);
943 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
cca41da1 944 virtio_gpu_notify(vgdev);
62fb7a5e
GH
945}
946
947void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
948 uint32_t ctx_id,
93c38d15 949 struct virtio_gpu_object_array *objs)
62fb7a5e 950{
93c38d15 951 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
62fb7a5e
GH
952 struct virtio_gpu_ctx_resource *cmd_p;
953 struct virtio_gpu_vbuffer *vbuf;
954
955 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
956 memset(cmd_p, 0, sizeof(*cmd_p));
93c38d15 957 vbuf->objs = objs;
62fb7a5e
GH
958
959 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
960 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
93c38d15 961 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
62fb7a5e 962 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
cca41da1 963 virtio_gpu_notify(vgdev);
62fb7a5e
GH
964}
965
966void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
967 uint32_t ctx_id,
93c38d15 968 struct virtio_gpu_object_array *objs)
62fb7a5e 969{
93c38d15 970 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
62fb7a5e
GH
971 struct virtio_gpu_ctx_resource *cmd_p;
972 struct virtio_gpu_vbuffer *vbuf;
973
974 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
975 memset(cmd_p, 0, sizeof(*cmd_p));
93c38d15 976 vbuf->objs = objs;
62fb7a5e
GH
977
978 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
979 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
93c38d15 980 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
62fb7a5e 981 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
cca41da1 982 virtio_gpu_notify(vgdev);
62fb7a5e
GH
983}
984
985void
986virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
23c897d7 987 struct virtio_gpu_object *bo,
530b2842 988 struct virtio_gpu_object_params *params,
e2324300 989 struct virtio_gpu_object_array *objs,
530b2842 990 struct virtio_gpu_fence *fence)
62fb7a5e
GH
991{
992 struct virtio_gpu_resource_create_3d *cmd_p;
993 struct virtio_gpu_vbuffer *vbuf;
994
995 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
996 memset(cmd_p, 0, sizeof(*cmd_p));
e2324300 997 vbuf->objs = objs;
62fb7a5e 998
62fb7a5e 999 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
fd4d6a42
GH
1000 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1001 cmd_p->format = cpu_to_le32(params->format);
1002 cmd_p->width = cpu_to_le32(params->width);
1003 cmd_p->height = cpu_to_le32(params->height);
1004
1005 cmd_p->target = cpu_to_le32(params->target);
1006 cmd_p->bind = cpu_to_le32(params->bind);
1007 cmd_p->depth = cpu_to_le32(params->depth);
1008 cmd_p->array_size = cpu_to_le32(params->array_size);
1009 cmd_p->last_level = cpu_to_le32(params->last_level);
1010 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1011 cmd_p->flags = cpu_to_le32(params->flags);
62fb7a5e 1012
e19d3411 1013 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
cca41da1
GH
1014 virtio_gpu_notify(vgdev);
1015
23c897d7 1016 bo->created = true;
62fb7a5e
GH
1017}
1018
1019void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
af334c5d 1020 uint32_t ctx_id,
62fb7a5e 1021 uint64_t offset, uint32_t level,
1dc34852 1022 struct drm_virtgpu_3d_box *box,
3d3bdbc0 1023 struct virtio_gpu_object_array *objs,
4d55fd66 1024 struct virtio_gpu_fence *fence)
62fb7a5e 1025{
3d3bdbc0 1026 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
62fb7a5e
GH
1027 struct virtio_gpu_transfer_host_3d *cmd_p;
1028 struct virtio_gpu_vbuffer *vbuf;
8f44ca22
JA
1029 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1030
1031 if (use_dma_api)
1032 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
af334c5d 1033 bo->pages->sgl, bo->pages->nents,
8f44ca22 1034 DMA_TO_DEVICE);
62fb7a5e
GH
1035
1036 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1037 memset(cmd_p, 0, sizeof(*cmd_p));
1038
3d3bdbc0
GH
1039 vbuf->objs = objs;
1040
62fb7a5e
GH
1041 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1042 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
af334c5d 1043 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1dc34852 1044 convert_to_hw_box(&cmd_p->box, box);
62fb7a5e
GH
1045 cmd_p->offset = cpu_to_le64(offset);
1046 cmd_p->level = cpu_to_le32(level);
1047
e19d3411 1048 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
62fb7a5e
GH
1049}
1050
1051void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
375f156a 1052 uint32_t ctx_id,
62fb7a5e 1053 uint64_t offset, uint32_t level,
1dc34852 1054 struct drm_virtgpu_3d_box *box,
375f156a 1055 struct virtio_gpu_object_array *objs,
4d55fd66 1056 struct virtio_gpu_fence *fence)
62fb7a5e 1057{
375f156a 1058 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
62fb7a5e
GH
1059 struct virtio_gpu_transfer_host_3d *cmd_p;
1060 struct virtio_gpu_vbuffer *vbuf;
1061
1062 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1063 memset(cmd_p, 0, sizeof(*cmd_p));
1064
375f156a
GH
1065 vbuf->objs = objs;
1066
62fb7a5e
GH
1067 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1068 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
375f156a 1069 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1dc34852 1070 convert_to_hw_box(&cmd_p->box, box);
62fb7a5e
GH
1071 cmd_p->offset = cpu_to_le64(offset);
1072 cmd_p->level = cpu_to_le32(level);
1073
e19d3411 1074 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
cca41da1 1075 virtio_gpu_notify(vgdev);
62fb7a5e
GH
1076}
1077
1078void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1079 void *data, uint32_t data_size,
da758d51
GH
1080 uint32_t ctx_id,
1081 struct virtio_gpu_object_array *objs,
1082 struct virtio_gpu_fence *fence)
62fb7a5e
GH
1083{
1084 struct virtio_gpu_cmd_submit *cmd_p;
1085 struct virtio_gpu_vbuffer *vbuf;
1086
1087 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1088 memset(cmd_p, 0, sizeof(*cmd_p));
1089
1090 vbuf->data_buf = data;
1091 vbuf->data_size = data_size;
da758d51 1092 vbuf->objs = objs;
62fb7a5e
GH
1093
1094 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1095 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1096 cmd_p->size = cpu_to_le32(data_size);
1097
e19d3411 1098 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
cca41da1 1099 virtio_gpu_notify(vgdev);
62fb7a5e
GH
1100}
1101
dc5698e8
DA
1102int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1103 struct virtio_gpu_object *obj,
2f2aa137
GH
1104 struct virtio_gpu_mem_entry *ents,
1105 unsigned int nents)
dc5698e8 1106{
724cfdfd 1107 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
2f2aa137 1108 ents, nents, NULL);
dc5698e8
DA
1109 return 0;
1110}
1111
1112void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1113 struct virtio_gpu_output *output)
1114{
1115 struct virtio_gpu_vbuffer *vbuf;
1116 struct virtio_gpu_update_cursor *cur_p;
1117
1118 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1119 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1120 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1121 virtio_gpu_queue_cursor(vgdev, vbuf);
1122}