]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/gpu/drm/virtio/virtgpu_vq.c
drm/virtio: move virtio_gpu_mem_entry initialization to new function
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / virtio / virtgpu_vq.c
1 /*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
33
34 #include "virtgpu_drv.h"
35 #include "virtgpu_trace.h"
36
37 #define MAX_INLINE_CMD_SIZE 96
38 #define MAX_INLINE_RESP_SIZE 24
39 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
40 + MAX_INLINE_CMD_SIZE \
41 + MAX_INLINE_RESP_SIZE)
42
43 static void convert_to_hw_box(struct virtio_gpu_box *dst,
44 const struct drm_virtgpu_3d_box *src)
45 {
46 dst->x = cpu_to_le32(src->x);
47 dst->y = cpu_to_le32(src->y);
48 dst->z = cpu_to_le32(src->z);
49 dst->w = cpu_to_le32(src->w);
50 dst->h = cpu_to_le32(src->h);
51 dst->d = cpu_to_le32(src->d);
52 }
53
54 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
55 {
56 struct drm_device *dev = vq->vdev->priv;
57 struct virtio_gpu_device *vgdev = dev->dev_private;
58
59 schedule_work(&vgdev->ctrlq.dequeue_work);
60 }
61
62 void virtio_gpu_cursor_ack(struct virtqueue *vq)
63 {
64 struct drm_device *dev = vq->vdev->priv;
65 struct virtio_gpu_device *vgdev = dev->dev_private;
66
67 schedule_work(&vgdev->cursorq.dequeue_work);
68 }
69
70 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
71 {
72 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
73 VBUFFER_SIZE,
74 __alignof__(struct virtio_gpu_vbuffer),
75 0, NULL);
76 if (!vgdev->vbufs)
77 return -ENOMEM;
78 return 0;
79 }
80
81 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
82 {
83 kmem_cache_destroy(vgdev->vbufs);
84 vgdev->vbufs = NULL;
85 }
86
87 static struct virtio_gpu_vbuffer*
88 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
89 int size, int resp_size, void *resp_buf,
90 virtio_gpu_resp_cb resp_cb)
91 {
92 struct virtio_gpu_vbuffer *vbuf;
93
94 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
95 if (!vbuf)
96 return ERR_PTR(-ENOMEM);
97
98 BUG_ON(size > MAX_INLINE_CMD_SIZE ||
99 size < sizeof(struct virtio_gpu_ctrl_hdr));
100 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
101 vbuf->size = size;
102
103 vbuf->resp_cb = resp_cb;
104 vbuf->resp_size = resp_size;
105 if (resp_size <= MAX_INLINE_RESP_SIZE)
106 vbuf->resp_buf = (void *)vbuf->buf + size;
107 else
108 vbuf->resp_buf = resp_buf;
109 BUG_ON(!vbuf->resp_buf);
110 return vbuf;
111 }
112
113 static struct virtio_gpu_ctrl_hdr *
114 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
115 {
116 /* this assumes a vbuf contains a command that starts with a
117 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
118 * virtqueues.
119 */
120 return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
121 }
122
123 static struct virtio_gpu_update_cursor*
124 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
125 struct virtio_gpu_vbuffer **vbuffer_p)
126 {
127 struct virtio_gpu_vbuffer *vbuf;
128
129 vbuf = virtio_gpu_get_vbuf
130 (vgdev, sizeof(struct virtio_gpu_update_cursor),
131 0, NULL, NULL);
132 if (IS_ERR(vbuf)) {
133 *vbuffer_p = NULL;
134 return ERR_CAST(vbuf);
135 }
136 *vbuffer_p = vbuf;
137 return (struct virtio_gpu_update_cursor *)vbuf->buf;
138 }
139
140 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
141 virtio_gpu_resp_cb cb,
142 struct virtio_gpu_vbuffer **vbuffer_p,
143 int cmd_size, int resp_size,
144 void *resp_buf)
145 {
146 struct virtio_gpu_vbuffer *vbuf;
147
148 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
149 resp_size, resp_buf, cb);
150 if (IS_ERR(vbuf)) {
151 *vbuffer_p = NULL;
152 return ERR_CAST(vbuf);
153 }
154 *vbuffer_p = vbuf;
155 return (struct virtio_gpu_command *)vbuf->buf;
156 }
157
158 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
159 struct virtio_gpu_vbuffer **vbuffer_p,
160 int size)
161 {
162 return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
163 sizeof(struct virtio_gpu_ctrl_hdr),
164 NULL);
165 }
166
167 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
168 struct virtio_gpu_vbuffer **vbuffer_p,
169 int size,
170 virtio_gpu_resp_cb cb)
171 {
172 return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
173 sizeof(struct virtio_gpu_ctrl_hdr),
174 NULL);
175 }
176
177 static void free_vbuf(struct virtio_gpu_device *vgdev,
178 struct virtio_gpu_vbuffer *vbuf)
179 {
180 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
181 kfree(vbuf->resp_buf);
182 kvfree(vbuf->data_buf);
183 kmem_cache_free(vgdev->vbufs, vbuf);
184 }
185
186 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
187 {
188 struct virtio_gpu_vbuffer *vbuf;
189 unsigned int len;
190 int freed = 0;
191
192 while ((vbuf = virtqueue_get_buf(vq, &len))) {
193 list_add_tail(&vbuf->list, reclaim_list);
194 freed++;
195 }
196 if (freed == 0)
197 DRM_DEBUG("Huh? zero vbufs reclaimed");
198 }
199
200 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
201 {
202 struct virtio_gpu_device *vgdev =
203 container_of(work, struct virtio_gpu_device,
204 ctrlq.dequeue_work);
205 struct list_head reclaim_list;
206 struct virtio_gpu_vbuffer *entry, *tmp;
207 struct virtio_gpu_ctrl_hdr *resp;
208 u64 fence_id = 0;
209
210 INIT_LIST_HEAD(&reclaim_list);
211 spin_lock(&vgdev->ctrlq.qlock);
212 do {
213 virtqueue_disable_cb(vgdev->ctrlq.vq);
214 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
215
216 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
217 spin_unlock(&vgdev->ctrlq.qlock);
218
219 list_for_each_entry(entry, &reclaim_list, list) {
220 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
221
222 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
223
224 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
225 if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
226 struct virtio_gpu_ctrl_hdr *cmd;
227 cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
228 DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
229 le32_to_cpu(resp->type),
230 le32_to_cpu(cmd->type));
231 } else
232 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
233 }
234 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
235 u64 f = le64_to_cpu(resp->fence_id);
236
237 if (fence_id > f) {
238 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
239 __func__, fence_id, f);
240 } else {
241 fence_id = f;
242 }
243 }
244 if (entry->resp_cb)
245 entry->resp_cb(vgdev, entry);
246 }
247 wake_up(&vgdev->ctrlq.ack_queue);
248
249 if (fence_id)
250 virtio_gpu_fence_event_process(vgdev, fence_id);
251
252 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
253 if (entry->objs)
254 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
255 list_del(&entry->list);
256 free_vbuf(vgdev, entry);
257 }
258 }
259
260 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
261 {
262 struct virtio_gpu_device *vgdev =
263 container_of(work, struct virtio_gpu_device,
264 cursorq.dequeue_work);
265 struct list_head reclaim_list;
266 struct virtio_gpu_vbuffer *entry, *tmp;
267
268 INIT_LIST_HEAD(&reclaim_list);
269 spin_lock(&vgdev->cursorq.qlock);
270 do {
271 virtqueue_disable_cb(vgdev->cursorq.vq);
272 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
273 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
274 spin_unlock(&vgdev->cursorq.qlock);
275
276 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
277 list_del(&entry->list);
278 free_vbuf(vgdev, entry);
279 }
280 wake_up(&vgdev->cursorq.ack_queue);
281 }
282
283 /* Create sg_table from a vmalloc'd buffer. */
284 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
285 {
286 int ret, s, i;
287 struct sg_table *sgt;
288 struct scatterlist *sg;
289 struct page *pg;
290
291 if (WARN_ON(!PAGE_ALIGNED(data)))
292 return NULL;
293
294 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
295 if (!sgt)
296 return NULL;
297
298 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
299 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
300 if (ret) {
301 kfree(sgt);
302 return NULL;
303 }
304
305 for_each_sg(sgt->sgl, sg, *sg_ents, i) {
306 pg = vmalloc_to_page(data);
307 if (!pg) {
308 sg_free_table(sgt);
309 kfree(sgt);
310 return NULL;
311 }
312
313 s = min_t(int, PAGE_SIZE, size);
314 sg_set_page(sg, pg, s, 0);
315
316 size -= s;
317 data += s;
318 }
319
320 return sgt;
321 }
322
323 static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
324 struct virtio_gpu_vbuffer *vbuf,
325 struct virtio_gpu_fence *fence,
326 int elemcnt,
327 struct scatterlist **sgs,
328 int outcnt,
329 int incnt)
330 {
331 struct virtqueue *vq = vgdev->ctrlq.vq;
332 bool notify = false;
333 int ret;
334
335 if (vgdev->has_indirect)
336 elemcnt = 1;
337
338 again:
339 spin_lock(&vgdev->ctrlq.qlock);
340
341 if (!vgdev->vqs_ready) {
342 spin_unlock(&vgdev->ctrlq.qlock);
343
344 if (fence && vbuf->objs)
345 virtio_gpu_array_unlock_resv(vbuf->objs);
346 return;
347 }
348
349 if (vq->num_free < elemcnt) {
350 spin_unlock(&vgdev->ctrlq.qlock);
351 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
352 goto again;
353 }
354
355 /* now that the position of the vbuf in the virtqueue is known, we can
356 * finally set the fence id
357 */
358 if (fence) {
359 virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
360 fence);
361 if (vbuf->objs) {
362 virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
363 virtio_gpu_array_unlock_resv(vbuf->objs);
364 }
365 }
366
367 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
368 WARN_ON(ret);
369
370 trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
371
372 notify = virtqueue_kick_prepare(vq);
373
374 spin_unlock(&vgdev->ctrlq.qlock);
375
376 if (notify) {
377 if (vgdev->disable_notify)
378 vgdev->pending_notify = true;
379 else
380 virtqueue_notify(vq);
381 }
382 }
383
384 static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
385 struct virtio_gpu_vbuffer *vbuf,
386 struct virtio_gpu_fence *fence)
387 {
388 struct scatterlist *sgs[3], vcmd, vout, vresp;
389 struct sg_table *sgt = NULL;
390 int elemcnt = 0, outcnt = 0, incnt = 0;
391
392 /* set up vcmd */
393 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
394 elemcnt++;
395 sgs[outcnt] = &vcmd;
396 outcnt++;
397
398 /* set up vout */
399 if (vbuf->data_size) {
400 if (is_vmalloc_addr(vbuf->data_buf)) {
401 int sg_ents;
402 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
403 &sg_ents);
404 if (!sgt) {
405 if (fence && vbuf->objs)
406 virtio_gpu_array_unlock_resv(vbuf->objs);
407 return;
408 }
409
410 elemcnt += sg_ents;
411 sgs[outcnt] = sgt->sgl;
412 } else {
413 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
414 elemcnt++;
415 sgs[outcnt] = &vout;
416 }
417 outcnt++;
418 }
419
420 /* set up vresp */
421 if (vbuf->resp_size) {
422 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
423 elemcnt++;
424 sgs[outcnt + incnt] = &vresp;
425 incnt++;
426 }
427
428 virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
429 incnt);
430
431 if (sgt) {
432 sg_free_table(sgt);
433 kfree(sgt);
434 }
435 }
436
437 void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev)
438 {
439 vgdev->disable_notify = true;
440 }
441
442 void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev)
443 {
444 vgdev->disable_notify = false;
445
446 if (!vgdev->pending_notify)
447 return;
448 vgdev->pending_notify = false;
449 virtqueue_notify(vgdev->ctrlq.vq);
450 }
451
452 static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
453 struct virtio_gpu_vbuffer *vbuf)
454 {
455 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
456 }
457
458 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
459 struct virtio_gpu_vbuffer *vbuf)
460 {
461 struct virtqueue *vq = vgdev->cursorq.vq;
462 struct scatterlist *sgs[1], ccmd;
463 bool notify;
464 int ret;
465 int outcnt;
466
467 if (!vgdev->vqs_ready)
468 return;
469
470 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
471 sgs[0] = &ccmd;
472 outcnt = 1;
473
474 spin_lock(&vgdev->cursorq.qlock);
475 retry:
476 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
477 if (ret == -ENOSPC) {
478 spin_unlock(&vgdev->cursorq.qlock);
479 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
480 spin_lock(&vgdev->cursorq.qlock);
481 goto retry;
482 } else {
483 trace_virtio_gpu_cmd_queue(vq,
484 virtio_gpu_vbuf_ctrl_hdr(vbuf));
485
486 notify = virtqueue_kick_prepare(vq);
487 }
488
489 spin_unlock(&vgdev->cursorq.qlock);
490
491 if (notify)
492 virtqueue_notify(vq);
493 }
494
495 /* just create gem objects for userspace and long lived objects,
496 * just use dma_alloced pages for the queue objects?
497 */
498
499 /* create a basic resource */
500 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
501 struct virtio_gpu_object *bo,
502 struct virtio_gpu_object_params *params,
503 struct virtio_gpu_object_array *objs,
504 struct virtio_gpu_fence *fence)
505 {
506 struct virtio_gpu_resource_create_2d *cmd_p;
507 struct virtio_gpu_vbuffer *vbuf;
508
509 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
510 memset(cmd_p, 0, sizeof(*cmd_p));
511 vbuf->objs = objs;
512
513 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
514 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
515 cmd_p->format = cpu_to_le32(params->format);
516 cmd_p->width = cpu_to_le32(params->width);
517 cmd_p->height = cpu_to_le32(params->height);
518
519 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
520 bo->created = true;
521 }
522
523 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
524 struct virtio_gpu_vbuffer *vbuf)
525 {
526 struct virtio_gpu_object *bo;
527
528 bo = vbuf->resp_cb_data;
529 vbuf->resp_cb_data = NULL;
530
531 virtio_gpu_cleanup_object(bo);
532 }
533
534 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
535 struct virtio_gpu_object *bo)
536 {
537 struct virtio_gpu_resource_unref *cmd_p;
538 struct virtio_gpu_vbuffer *vbuf;
539
540 cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
541 virtio_gpu_cmd_unref_cb);
542 memset(cmd_p, 0, sizeof(*cmd_p));
543
544 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
545 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
546
547 vbuf->resp_cb_data = bo;
548 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
549 }
550
551 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
552 uint32_t scanout_id, uint32_t resource_id,
553 uint32_t width, uint32_t height,
554 uint32_t x, uint32_t y)
555 {
556 struct virtio_gpu_set_scanout *cmd_p;
557 struct virtio_gpu_vbuffer *vbuf;
558
559 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
560 memset(cmd_p, 0, sizeof(*cmd_p));
561
562 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
563 cmd_p->resource_id = cpu_to_le32(resource_id);
564 cmd_p->scanout_id = cpu_to_le32(scanout_id);
565 cmd_p->r.width = cpu_to_le32(width);
566 cmd_p->r.height = cpu_to_le32(height);
567 cmd_p->r.x = cpu_to_le32(x);
568 cmd_p->r.y = cpu_to_le32(y);
569
570 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
571 }
572
573 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
574 uint32_t resource_id,
575 uint32_t x, uint32_t y,
576 uint32_t width, uint32_t height)
577 {
578 struct virtio_gpu_resource_flush *cmd_p;
579 struct virtio_gpu_vbuffer *vbuf;
580
581 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
582 memset(cmd_p, 0, sizeof(*cmd_p));
583
584 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
585 cmd_p->resource_id = cpu_to_le32(resource_id);
586 cmd_p->r.width = cpu_to_le32(width);
587 cmd_p->r.height = cpu_to_le32(height);
588 cmd_p->r.x = cpu_to_le32(x);
589 cmd_p->r.y = cpu_to_le32(y);
590
591 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
592 }
593
594 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
595 uint64_t offset,
596 uint32_t width, uint32_t height,
597 uint32_t x, uint32_t y,
598 struct virtio_gpu_object_array *objs,
599 struct virtio_gpu_fence *fence)
600 {
601 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
602 struct virtio_gpu_transfer_to_host_2d *cmd_p;
603 struct virtio_gpu_vbuffer *vbuf;
604 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
605
606 if (use_dma_api)
607 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
608 bo->pages->sgl, bo->pages->nents,
609 DMA_TO_DEVICE);
610
611 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
612 memset(cmd_p, 0, sizeof(*cmd_p));
613 vbuf->objs = objs;
614
615 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
616 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
617 cmd_p->offset = cpu_to_le64(offset);
618 cmd_p->r.width = cpu_to_le32(width);
619 cmd_p->r.height = cpu_to_le32(height);
620 cmd_p->r.x = cpu_to_le32(x);
621 cmd_p->r.y = cpu_to_le32(y);
622
623 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
624 }
625
626 static void
627 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
628 uint32_t resource_id,
629 struct virtio_gpu_mem_entry *ents,
630 uint32_t nents,
631 struct virtio_gpu_fence *fence)
632 {
633 struct virtio_gpu_resource_attach_backing *cmd_p;
634 struct virtio_gpu_vbuffer *vbuf;
635
636 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
637 memset(cmd_p, 0, sizeof(*cmd_p));
638
639 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
640 cmd_p->resource_id = cpu_to_le32(resource_id);
641 cmd_p->nr_entries = cpu_to_le32(nents);
642
643 vbuf->data_buf = ents;
644 vbuf->data_size = sizeof(*ents) * nents;
645
646 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
647 }
648
649 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
650 struct virtio_gpu_vbuffer *vbuf)
651 {
652 struct virtio_gpu_resp_display_info *resp =
653 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
654 int i;
655
656 spin_lock(&vgdev->display_info_lock);
657 for (i = 0; i < vgdev->num_scanouts; i++) {
658 vgdev->outputs[i].info = resp->pmodes[i];
659 if (resp->pmodes[i].enabled) {
660 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
661 le32_to_cpu(resp->pmodes[i].r.width),
662 le32_to_cpu(resp->pmodes[i].r.height),
663 le32_to_cpu(resp->pmodes[i].r.x),
664 le32_to_cpu(resp->pmodes[i].r.y));
665 } else {
666 DRM_DEBUG("output %d: disabled", i);
667 }
668 }
669
670 vgdev->display_info_pending = false;
671 spin_unlock(&vgdev->display_info_lock);
672 wake_up(&vgdev->resp_wq);
673
674 if (!drm_helper_hpd_irq_event(vgdev->ddev))
675 drm_kms_helper_hotplug_event(vgdev->ddev);
676 }
677
678 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
679 struct virtio_gpu_vbuffer *vbuf)
680 {
681 struct virtio_gpu_get_capset_info *cmd =
682 (struct virtio_gpu_get_capset_info *)vbuf->buf;
683 struct virtio_gpu_resp_capset_info *resp =
684 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
685 int i = le32_to_cpu(cmd->capset_index);
686
687 spin_lock(&vgdev->display_info_lock);
688 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
689 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
690 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
691 spin_unlock(&vgdev->display_info_lock);
692 wake_up(&vgdev->resp_wq);
693 }
694
695 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
696 struct virtio_gpu_vbuffer *vbuf)
697 {
698 struct virtio_gpu_get_capset *cmd =
699 (struct virtio_gpu_get_capset *)vbuf->buf;
700 struct virtio_gpu_resp_capset *resp =
701 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
702 struct virtio_gpu_drv_cap_cache *cache_ent;
703
704 spin_lock(&vgdev->display_info_lock);
705 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
706 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
707 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
708 memcpy(cache_ent->caps_cache, resp->capset_data,
709 cache_ent->size);
710 /* Copy must occur before is_valid is signalled. */
711 smp_wmb();
712 atomic_set(&cache_ent->is_valid, 1);
713 break;
714 }
715 }
716 spin_unlock(&vgdev->display_info_lock);
717 wake_up_all(&vgdev->resp_wq);
718 }
719
720 static int virtio_get_edid_block(void *data, u8 *buf,
721 unsigned int block, size_t len)
722 {
723 struct virtio_gpu_resp_edid *resp = data;
724 size_t start = block * EDID_LENGTH;
725
726 if (start + len > le32_to_cpu(resp->size))
727 return -1;
728 memcpy(buf, resp->edid + start, len);
729 return 0;
730 }
731
732 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
733 struct virtio_gpu_vbuffer *vbuf)
734 {
735 struct virtio_gpu_cmd_get_edid *cmd =
736 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
737 struct virtio_gpu_resp_edid *resp =
738 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
739 uint32_t scanout = le32_to_cpu(cmd->scanout);
740 struct virtio_gpu_output *output;
741 struct edid *new_edid, *old_edid;
742
743 if (scanout >= vgdev->num_scanouts)
744 return;
745 output = vgdev->outputs + scanout;
746
747 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
748 drm_connector_update_edid_property(&output->conn, new_edid);
749
750 spin_lock(&vgdev->display_info_lock);
751 old_edid = output->edid;
752 output->edid = new_edid;
753 spin_unlock(&vgdev->display_info_lock);
754
755 kfree(old_edid);
756 wake_up(&vgdev->resp_wq);
757 }
758
759 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
760 {
761 struct virtio_gpu_ctrl_hdr *cmd_p;
762 struct virtio_gpu_vbuffer *vbuf;
763 void *resp_buf;
764
765 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
766 GFP_KERNEL);
767 if (!resp_buf)
768 return -ENOMEM;
769
770 cmd_p = virtio_gpu_alloc_cmd_resp
771 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
772 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
773 resp_buf);
774 memset(cmd_p, 0, sizeof(*cmd_p));
775
776 vgdev->display_info_pending = true;
777 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
778 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
779 return 0;
780 }
781
782 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
783 {
784 struct virtio_gpu_get_capset_info *cmd_p;
785 struct virtio_gpu_vbuffer *vbuf;
786 void *resp_buf;
787
788 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
789 GFP_KERNEL);
790 if (!resp_buf)
791 return -ENOMEM;
792
793 cmd_p = virtio_gpu_alloc_cmd_resp
794 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
795 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
796 resp_buf);
797 memset(cmd_p, 0, sizeof(*cmd_p));
798
799 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
800 cmd_p->capset_index = cpu_to_le32(idx);
801 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
802 return 0;
803 }
804
805 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
806 int idx, int version,
807 struct virtio_gpu_drv_cap_cache **cache_p)
808 {
809 struct virtio_gpu_get_capset *cmd_p;
810 struct virtio_gpu_vbuffer *vbuf;
811 int max_size;
812 struct virtio_gpu_drv_cap_cache *cache_ent;
813 struct virtio_gpu_drv_cap_cache *search_ent;
814 void *resp_buf;
815
816 *cache_p = NULL;
817
818 if (idx >= vgdev->num_capsets)
819 return -EINVAL;
820
821 if (version > vgdev->capsets[idx].max_version)
822 return -EINVAL;
823
824 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
825 if (!cache_ent)
826 return -ENOMEM;
827
828 max_size = vgdev->capsets[idx].max_size;
829 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
830 if (!cache_ent->caps_cache) {
831 kfree(cache_ent);
832 return -ENOMEM;
833 }
834
835 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
836 GFP_KERNEL);
837 if (!resp_buf) {
838 kfree(cache_ent->caps_cache);
839 kfree(cache_ent);
840 return -ENOMEM;
841 }
842
843 cache_ent->version = version;
844 cache_ent->id = vgdev->capsets[idx].id;
845 atomic_set(&cache_ent->is_valid, 0);
846 cache_ent->size = max_size;
847 spin_lock(&vgdev->display_info_lock);
848 /* Search while under lock in case it was added by another task. */
849 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
850 if (search_ent->id == vgdev->capsets[idx].id &&
851 search_ent->version == version) {
852 *cache_p = search_ent;
853 break;
854 }
855 }
856 if (!*cache_p)
857 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
858 spin_unlock(&vgdev->display_info_lock);
859
860 if (*cache_p) {
861 /* Entry was found, so free everything that was just created. */
862 kfree(resp_buf);
863 kfree(cache_ent->caps_cache);
864 kfree(cache_ent);
865 return 0;
866 }
867
868 cmd_p = virtio_gpu_alloc_cmd_resp
869 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
870 sizeof(struct virtio_gpu_resp_capset) + max_size,
871 resp_buf);
872 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
873 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
874 cmd_p->capset_version = cpu_to_le32(version);
875 *cache_p = cache_ent;
876 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
877
878 return 0;
879 }
880
881 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
882 {
883 struct virtio_gpu_cmd_get_edid *cmd_p;
884 struct virtio_gpu_vbuffer *vbuf;
885 void *resp_buf;
886 int scanout;
887
888 if (WARN_ON(!vgdev->has_edid))
889 return -EINVAL;
890
891 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
892 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
893 GFP_KERNEL);
894 if (!resp_buf)
895 return -ENOMEM;
896
897 cmd_p = virtio_gpu_alloc_cmd_resp
898 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
899 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
900 resp_buf);
901 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
902 cmd_p->scanout = cpu_to_le32(scanout);
903 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
904 }
905
906 return 0;
907 }
908
909 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
910 uint32_t nlen, const char *name)
911 {
912 struct virtio_gpu_ctx_create *cmd_p;
913 struct virtio_gpu_vbuffer *vbuf;
914
915 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
916 memset(cmd_p, 0, sizeof(*cmd_p));
917
918 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
919 cmd_p->hdr.ctx_id = cpu_to_le32(id);
920 cmd_p->nlen = cpu_to_le32(nlen);
921 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
922 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
923 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
924 }
925
926 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
927 uint32_t id)
928 {
929 struct virtio_gpu_ctx_destroy *cmd_p;
930 struct virtio_gpu_vbuffer *vbuf;
931
932 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
933 memset(cmd_p, 0, sizeof(*cmd_p));
934
935 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
936 cmd_p->hdr.ctx_id = cpu_to_le32(id);
937 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
938 }
939
940 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
941 uint32_t ctx_id,
942 struct virtio_gpu_object_array *objs)
943 {
944 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
945 struct virtio_gpu_ctx_resource *cmd_p;
946 struct virtio_gpu_vbuffer *vbuf;
947
948 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
949 memset(cmd_p, 0, sizeof(*cmd_p));
950 vbuf->objs = objs;
951
952 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
953 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
954 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
955 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
956
957 }
958
959 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
960 uint32_t ctx_id,
961 struct virtio_gpu_object_array *objs)
962 {
963 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
964 struct virtio_gpu_ctx_resource *cmd_p;
965 struct virtio_gpu_vbuffer *vbuf;
966
967 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
968 memset(cmd_p, 0, sizeof(*cmd_p));
969 vbuf->objs = objs;
970
971 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
972 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
973 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
974 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
975 }
976
977 void
978 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
979 struct virtio_gpu_object *bo,
980 struct virtio_gpu_object_params *params,
981 struct virtio_gpu_object_array *objs,
982 struct virtio_gpu_fence *fence)
983 {
984 struct virtio_gpu_resource_create_3d *cmd_p;
985 struct virtio_gpu_vbuffer *vbuf;
986
987 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
988 memset(cmd_p, 0, sizeof(*cmd_p));
989 vbuf->objs = objs;
990
991 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
992 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
993 cmd_p->format = cpu_to_le32(params->format);
994 cmd_p->width = cpu_to_le32(params->width);
995 cmd_p->height = cpu_to_le32(params->height);
996
997 cmd_p->target = cpu_to_le32(params->target);
998 cmd_p->bind = cpu_to_le32(params->bind);
999 cmd_p->depth = cpu_to_le32(params->depth);
1000 cmd_p->array_size = cpu_to_le32(params->array_size);
1001 cmd_p->last_level = cpu_to_le32(params->last_level);
1002 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1003 cmd_p->flags = cpu_to_le32(params->flags);
1004
1005 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1006 bo->created = true;
1007 }
1008
1009 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1010 uint32_t ctx_id,
1011 uint64_t offset, uint32_t level,
1012 struct drm_virtgpu_3d_box *box,
1013 struct virtio_gpu_object_array *objs,
1014 struct virtio_gpu_fence *fence)
1015 {
1016 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1017 struct virtio_gpu_transfer_host_3d *cmd_p;
1018 struct virtio_gpu_vbuffer *vbuf;
1019 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1020
1021 if (use_dma_api)
1022 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
1023 bo->pages->sgl, bo->pages->nents,
1024 DMA_TO_DEVICE);
1025
1026 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1027 memset(cmd_p, 0, sizeof(*cmd_p));
1028
1029 vbuf->objs = objs;
1030
1031 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1032 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1033 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1034 convert_to_hw_box(&cmd_p->box, box);
1035 cmd_p->offset = cpu_to_le64(offset);
1036 cmd_p->level = cpu_to_le32(level);
1037
1038 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1039 }
1040
1041 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1042 uint32_t ctx_id,
1043 uint64_t offset, uint32_t level,
1044 struct drm_virtgpu_3d_box *box,
1045 struct virtio_gpu_object_array *objs,
1046 struct virtio_gpu_fence *fence)
1047 {
1048 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1049 struct virtio_gpu_transfer_host_3d *cmd_p;
1050 struct virtio_gpu_vbuffer *vbuf;
1051
1052 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1053 memset(cmd_p, 0, sizeof(*cmd_p));
1054
1055 vbuf->objs = objs;
1056
1057 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1058 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1059 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1060 convert_to_hw_box(&cmd_p->box, box);
1061 cmd_p->offset = cpu_to_le64(offset);
1062 cmd_p->level = cpu_to_le32(level);
1063
1064 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1065 }
1066
1067 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1068 void *data, uint32_t data_size,
1069 uint32_t ctx_id,
1070 struct virtio_gpu_object_array *objs,
1071 struct virtio_gpu_fence *fence)
1072 {
1073 struct virtio_gpu_cmd_submit *cmd_p;
1074 struct virtio_gpu_vbuffer *vbuf;
1075
1076 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1077 memset(cmd_p, 0, sizeof(*cmd_p));
1078
1079 vbuf->data_buf = data;
1080 vbuf->data_size = data_size;
1081 vbuf->objs = objs;
1082
1083 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1084 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1085 cmd_p->size = cpu_to_le32(data_size);
1086
1087 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1088 }
1089
1090 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1091 struct virtio_gpu_object *obj,
1092 struct virtio_gpu_mem_entry *ents,
1093 unsigned int nents)
1094 {
1095 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1096 ents, nents, NULL);
1097 return 0;
1098 }
1099
1100 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1101 struct virtio_gpu_output *output)
1102 {
1103 struct virtio_gpu_vbuffer *vbuf;
1104 struct virtio_gpu_update_cursor *cur_p;
1105
1106 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1107 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1108 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1109 virtio_gpu_queue_cursor(vgdev, vbuf);
1110 }