]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/virtio/virtgpu_vq.c
virtio-gpu: add virtio_gpu_queue_ctrl_buffer_locked
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / virtio / virtgpu_vq.c
CommitLineData
dc5698e8
DA
1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#include <drm/drmP.h>
30#include "virtgpu_drv.h"
31#include <linux/virtio.h>
32#include <linux/virtio_config.h>
33#include <linux/virtio_ring.h>
34
35#define MAX_INLINE_CMD_SIZE 96
36#define MAX_INLINE_RESP_SIZE 24
37#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
38 + MAX_INLINE_CMD_SIZE \
39 + MAX_INLINE_RESP_SIZE)
40
41void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
42 uint32_t *resid)
43{
44 int handle;
45
46 idr_preload(GFP_KERNEL);
47 spin_lock(&vgdev->resource_idr_lock);
48 handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
49 spin_unlock(&vgdev->resource_idr_lock);
50 idr_preload_end();
51 *resid = handle;
52}
53
54void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
55{
56 spin_lock(&vgdev->resource_idr_lock);
57 idr_remove(&vgdev->resource_idr, id);
58 spin_unlock(&vgdev->resource_idr_lock);
59}
60
61void virtio_gpu_ctrl_ack(struct virtqueue *vq)
62{
63 struct drm_device *dev = vq->vdev->priv;
64 struct virtio_gpu_device *vgdev = dev->dev_private;
65 schedule_work(&vgdev->ctrlq.dequeue_work);
66}
67
68void virtio_gpu_cursor_ack(struct virtqueue *vq)
69{
70 struct drm_device *dev = vq->vdev->priv;
71 struct virtio_gpu_device *vgdev = dev->dev_private;
72 schedule_work(&vgdev->cursorq.dequeue_work);
73}
74
75int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
76{
77 struct virtio_gpu_vbuffer *vbuf;
78 int i, size, count = 0;
79 void *ptr;
80
81 INIT_LIST_HEAD(&vgdev->free_vbufs);
d5084f17 82 spin_lock_init(&vgdev->free_vbufs_lock);
dc5698e8
DA
83 count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
84 count += virtqueue_get_vring_size(vgdev->cursorq.vq);
85 size = count * VBUFFER_SIZE;
86 DRM_INFO("virtio vbuffers: %d bufs, %zdB each, %dkB total.\n",
87 count, VBUFFER_SIZE, size / 1024);
88
89 vgdev->vbufs = kzalloc(size, GFP_KERNEL);
90 if (!vgdev->vbufs)
91 return -ENOMEM;
92
93 for (i = 0, ptr = vgdev->vbufs;
94 i < count;
95 i++, ptr += VBUFFER_SIZE) {
96 vbuf = ptr;
97 list_add(&vbuf->list, &vgdev->free_vbufs);
98 }
99 return 0;
100}
101
102void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
103{
104 struct virtio_gpu_vbuffer *vbuf;
105 int i, count = 0;
106
107 count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
108 count += virtqueue_get_vring_size(vgdev->cursorq.vq);
109
d5084f17 110 spin_lock(&vgdev->free_vbufs_lock);
dc5698e8
DA
111 for (i = 0; i < count; i++) {
112 if (WARN_ON(list_empty(&vgdev->free_vbufs)))
113 return;
114 vbuf = list_first_entry(&vgdev->free_vbufs,
115 struct virtio_gpu_vbuffer, list);
116 list_del(&vbuf->list);
117 }
d5084f17 118 spin_unlock(&vgdev->free_vbufs_lock);
dc5698e8
DA
119 kfree(vgdev->vbufs);
120}
121
122static struct virtio_gpu_vbuffer*
123virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
124 int size, int resp_size, void *resp_buf,
125 virtio_gpu_resp_cb resp_cb)
126{
127 struct virtio_gpu_vbuffer *vbuf;
128
d5084f17 129 spin_lock(&vgdev->free_vbufs_lock);
dc5698e8
DA
130 BUG_ON(list_empty(&vgdev->free_vbufs));
131 vbuf = list_first_entry(&vgdev->free_vbufs,
132 struct virtio_gpu_vbuffer, list);
133 list_del(&vbuf->list);
d5084f17 134 spin_unlock(&vgdev->free_vbufs_lock);
dc5698e8
DA
135 memset(vbuf, 0, VBUFFER_SIZE);
136
137 BUG_ON(size > MAX_INLINE_CMD_SIZE);
138 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
139 vbuf->size = size;
140
141 vbuf->resp_cb = resp_cb;
142 vbuf->resp_size = resp_size;
143 if (resp_size <= MAX_INLINE_RESP_SIZE)
144 vbuf->resp_buf = (void *)vbuf->buf + size;
145 else
146 vbuf->resp_buf = resp_buf;
147 BUG_ON(!vbuf->resp_buf);
148 return vbuf;
149}
150
151static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
152 struct virtio_gpu_vbuffer **vbuffer_p,
153 int size)
154{
155 struct virtio_gpu_vbuffer *vbuf;
156
157 vbuf = virtio_gpu_get_vbuf(vgdev, size,
158 sizeof(struct virtio_gpu_ctrl_hdr),
159 NULL, NULL);
160 if (IS_ERR(vbuf)) {
161 *vbuffer_p = NULL;
162 return ERR_CAST(vbuf);
163 }
164 *vbuffer_p = vbuf;
165 return vbuf->buf;
166}
167
168static struct virtio_gpu_update_cursor*
169virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
170 struct virtio_gpu_vbuffer **vbuffer_p)
171{
172 struct virtio_gpu_vbuffer *vbuf;
173
174 vbuf = virtio_gpu_get_vbuf
175 (vgdev, sizeof(struct virtio_gpu_update_cursor),
176 0, NULL, NULL);
177 if (IS_ERR(vbuf)) {
178 *vbuffer_p = NULL;
179 return ERR_CAST(vbuf);
180 }
181 *vbuffer_p = vbuf;
182 return (struct virtio_gpu_update_cursor *)vbuf->buf;
183}
184
185static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
186 virtio_gpu_resp_cb cb,
187 struct virtio_gpu_vbuffer **vbuffer_p,
188 int cmd_size, int resp_size,
189 void *resp_buf)
190{
191 struct virtio_gpu_vbuffer *vbuf;
192
193 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
194 resp_size, resp_buf, cb);
195 if (IS_ERR(vbuf)) {
196 *vbuffer_p = NULL;
197 return ERR_CAST(vbuf);
198 }
199 *vbuffer_p = vbuf;
200 return (struct virtio_gpu_command *)vbuf->buf;
201}
202
203static void free_vbuf(struct virtio_gpu_device *vgdev,
204 struct virtio_gpu_vbuffer *vbuf)
205{
206 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
207 kfree(vbuf->resp_buf);
208 kfree(vbuf->data_buf);
d5084f17 209 spin_lock(&vgdev->free_vbufs_lock);
dc5698e8 210 list_add(&vbuf->list, &vgdev->free_vbufs);
d5084f17 211 spin_unlock(&vgdev->free_vbufs_lock);
dc5698e8
DA
212}
213
214static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
215{
216 struct virtio_gpu_vbuffer *vbuf;
217 unsigned int len;
218 int freed = 0;
219
220 while ((vbuf = virtqueue_get_buf(vq, &len))) {
221 list_add_tail(&vbuf->list, reclaim_list);
222 freed++;
223 }
224 if (freed == 0)
225 DRM_DEBUG("Huh? zero vbufs reclaimed");
226}
227
228void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
229{
230 struct virtio_gpu_device *vgdev =
231 container_of(work, struct virtio_gpu_device,
232 ctrlq.dequeue_work);
233 struct list_head reclaim_list;
234 struct virtio_gpu_vbuffer *entry, *tmp;
235 struct virtio_gpu_ctrl_hdr *resp;
236 u64 fence_id = 0;
237
238 INIT_LIST_HEAD(&reclaim_list);
239 spin_lock(&vgdev->ctrlq.qlock);
240 do {
241 virtqueue_disable_cb(vgdev->ctrlq.vq);
242 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
243
244 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
245 spin_unlock(&vgdev->ctrlq.qlock);
246
247 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
248 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
249 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
250 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
251 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
252 u64 f = le64_to_cpu(resp->fence_id);
253
254 if (fence_id > f) {
255 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
256 __func__, fence_id, f);
257 } else {
258 fence_id = f;
259 }
260 }
261 if (entry->resp_cb)
262 entry->resp_cb(vgdev, entry);
263
264 list_del(&entry->list);
265 free_vbuf(vgdev, entry);
266 }
267 wake_up(&vgdev->ctrlq.ack_queue);
268
269 if (fence_id)
270 virtio_gpu_fence_event_process(vgdev, fence_id);
271}
272
273void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
274{
275 struct virtio_gpu_device *vgdev =
276 container_of(work, struct virtio_gpu_device,
277 cursorq.dequeue_work);
278 struct list_head reclaim_list;
279 struct virtio_gpu_vbuffer *entry, *tmp;
280
281 INIT_LIST_HEAD(&reclaim_list);
282 spin_lock(&vgdev->cursorq.qlock);
283 do {
284 virtqueue_disable_cb(vgdev->cursorq.vq);
285 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
286 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
287 spin_unlock(&vgdev->cursorq.qlock);
288
289 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
290 list_del(&entry->list);
291 free_vbuf(vgdev, entry);
292 }
293 wake_up(&vgdev->cursorq.ack_queue);
294}
295
9c73f478
GH
296static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
297 struct virtio_gpu_vbuffer *vbuf)
dc5698e8
DA
298{
299 struct virtqueue *vq = vgdev->ctrlq.vq;
300 struct scatterlist *sgs[3], vcmd, vout, vresp;
301 int outcnt = 0, incnt = 0;
302 int ret;
303
304 if (!vgdev->vqs_ready)
305 return -ENODEV;
306
307 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
308 sgs[outcnt+incnt] = &vcmd;
309 outcnt++;
310
311 if (vbuf->data_size) {
312 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
313 sgs[outcnt + incnt] = &vout;
314 outcnt++;
315 }
316
317 if (vbuf->resp_size) {
318 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
319 sgs[outcnt + incnt] = &vresp;
320 incnt++;
321 }
322
dc5698e8
DA
323retry:
324 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
325 if (ret == -ENOSPC) {
326 spin_unlock(&vgdev->ctrlq.qlock);
327 wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
328 spin_lock(&vgdev->ctrlq.qlock);
329 goto retry;
330 } else {
331 virtqueue_kick(vq);
332 }
dc5698e8
DA
333
334 if (!ret)
335 ret = vq->num_free;
336 return ret;
337}
338
9c73f478
GH
339static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
340 struct virtio_gpu_vbuffer *vbuf)
341{
342 int rc;
343
344 spin_lock(&vgdev->ctrlq.qlock);
345 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
346 spin_unlock(&vgdev->ctrlq.qlock);
347 return rc;
348}
349
dc5698e8
DA
350static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
351 struct virtio_gpu_vbuffer *vbuf)
352{
353 struct virtqueue *vq = vgdev->cursorq.vq;
354 struct scatterlist *sgs[1], ccmd;
355 int ret;
356 int outcnt;
357
358 if (!vgdev->vqs_ready)
359 return -ENODEV;
360
361 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
362 sgs[0] = &ccmd;
363 outcnt = 1;
364
365 spin_lock(&vgdev->cursorq.qlock);
366retry:
367 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
368 if (ret == -ENOSPC) {
369 spin_unlock(&vgdev->cursorq.qlock);
370 wait_event(vgdev->cursorq.ack_queue, vq->num_free);
371 spin_lock(&vgdev->cursorq.qlock);
372 goto retry;
373 } else {
374 virtqueue_kick(vq);
375 }
376
377 spin_unlock(&vgdev->cursorq.qlock);
378
379 if (!ret)
380 ret = vq->num_free;
381 return ret;
382}
383
384/* just create gem objects for userspace and long lived objects,
385 just use dma_alloced pages for the queue objects? */
386
387/* create a basic resource */
388void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
389 uint32_t resource_id,
390 uint32_t format,
391 uint32_t width,
392 uint32_t height)
393{
394 struct virtio_gpu_resource_create_2d *cmd_p;
395 struct virtio_gpu_vbuffer *vbuf;
396
397 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
398 memset(cmd_p, 0, sizeof(*cmd_p));
399
400 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
401 cmd_p->resource_id = cpu_to_le32(resource_id);
402 cmd_p->format = cpu_to_le32(format);
403 cmd_p->width = cpu_to_le32(width);
404 cmd_p->height = cpu_to_le32(height);
405
406 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
407}
408
409void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
410 uint32_t resource_id)
411{
412 struct virtio_gpu_resource_unref *cmd_p;
413 struct virtio_gpu_vbuffer *vbuf;
414
415 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
416 memset(cmd_p, 0, sizeof(*cmd_p));
417
418 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
419 cmd_p->resource_id = cpu_to_le32(resource_id);
420
421 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
422}
423
424void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
425 uint32_t resource_id)
426{
427 struct virtio_gpu_resource_detach_backing *cmd_p;
428 struct virtio_gpu_vbuffer *vbuf;
429
430 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
431 memset(cmd_p, 0, sizeof(*cmd_p));
432
433 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
434 cmd_p->resource_id = cpu_to_le32(resource_id);
435
436 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
437}
438
439void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
440 uint32_t scanout_id, uint32_t resource_id,
441 uint32_t width, uint32_t height,
442 uint32_t x, uint32_t y)
443{
444 struct virtio_gpu_set_scanout *cmd_p;
445 struct virtio_gpu_vbuffer *vbuf;
446
447 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
448 memset(cmd_p, 0, sizeof(*cmd_p));
449
450 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
451 cmd_p->resource_id = cpu_to_le32(resource_id);
452 cmd_p->scanout_id = cpu_to_le32(scanout_id);
453 cmd_p->r.width = cpu_to_le32(width);
454 cmd_p->r.height = cpu_to_le32(height);
455 cmd_p->r.x = cpu_to_le32(x);
456 cmd_p->r.y = cpu_to_le32(y);
457
458 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
459}
460
461void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
462 uint32_t resource_id,
463 uint32_t x, uint32_t y,
464 uint32_t width, uint32_t height)
465{
466 struct virtio_gpu_resource_flush *cmd_p;
467 struct virtio_gpu_vbuffer *vbuf;
468
469 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
470 memset(cmd_p, 0, sizeof(*cmd_p));
471
472 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
473 cmd_p->resource_id = cpu_to_le32(resource_id);
474 cmd_p->r.width = cpu_to_le32(width);
475 cmd_p->r.height = cpu_to_le32(height);
476 cmd_p->r.x = cpu_to_le32(x);
477 cmd_p->r.y = cpu_to_le32(y);
478
479 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
480}
481
482void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
483 uint32_t resource_id, uint64_t offset,
484 __le32 width, __le32 height,
485 __le32 x, __le32 y,
486 struct virtio_gpu_fence **fence)
487{
488 struct virtio_gpu_transfer_to_host_2d *cmd_p;
489 struct virtio_gpu_vbuffer *vbuf;
490
491 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
492 memset(cmd_p, 0, sizeof(*cmd_p));
493
494 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
495 cmd_p->resource_id = cpu_to_le32(resource_id);
496 cmd_p->offset = cpu_to_le64(offset);
497 cmd_p->r.width = width;
498 cmd_p->r.height = height;
499 cmd_p->r.x = x;
500 cmd_p->r.y = y;
501
502 if (fence)
503 virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
504 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
505}
506
507static void
508virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
509 uint32_t resource_id,
510 struct virtio_gpu_mem_entry *ents,
511 uint32_t nents,
512 struct virtio_gpu_fence **fence)
513{
514 struct virtio_gpu_resource_attach_backing *cmd_p;
515 struct virtio_gpu_vbuffer *vbuf;
516
517 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
518 memset(cmd_p, 0, sizeof(*cmd_p));
519
520 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
521 cmd_p->resource_id = cpu_to_le32(resource_id);
522 cmd_p->nr_entries = cpu_to_le32(nents);
523
524 vbuf->data_buf = ents;
525 vbuf->data_size = sizeof(*ents) * nents;
526
527 if (fence)
528 virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
529 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
530}
531
532static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
533 struct virtio_gpu_vbuffer *vbuf)
534{
535 struct virtio_gpu_resp_display_info *resp =
536 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
537 int i;
538
539 spin_lock(&vgdev->display_info_lock);
540 for (i = 0; i < vgdev->num_scanouts; i++) {
541 vgdev->outputs[i].info = resp->pmodes[i];
542 if (resp->pmodes[i].enabled) {
543 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
544 le32_to_cpu(resp->pmodes[i].r.width),
545 le32_to_cpu(resp->pmodes[i].r.height),
546 le32_to_cpu(resp->pmodes[i].r.x),
547 le32_to_cpu(resp->pmodes[i].r.y));
548 } else {
549 DRM_DEBUG("output %d: disabled", i);
550 }
551 }
552
441012af 553 vgdev->display_info_pending = false;
dc5698e8
DA
554 spin_unlock(&vgdev->display_info_lock);
555 wake_up(&vgdev->resp_wq);
556
557 if (!drm_helper_hpd_irq_event(vgdev->ddev))
558 drm_kms_helper_hotplug_event(vgdev->ddev);
559}
560
561int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
562{
563 struct virtio_gpu_ctrl_hdr *cmd_p;
564 struct virtio_gpu_vbuffer *vbuf;
565 void *resp_buf;
566
567 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
568 GFP_KERNEL);
569 if (!resp_buf)
570 return -ENOMEM;
571
572 cmd_p = virtio_gpu_alloc_cmd_resp
573 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
574 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
575 resp_buf);
576 memset(cmd_p, 0, sizeof(*cmd_p));
577
441012af 578 vgdev->display_info_pending = true;
dc5698e8
DA
579 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
580 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
581 return 0;
582}
583
584int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
585 struct virtio_gpu_object *obj,
586 uint32_t resource_id,
587 struct virtio_gpu_fence **fence)
588{
589 struct virtio_gpu_mem_entry *ents;
590 struct scatterlist *sg;
591 int si;
592
593 if (!obj->pages) {
594 int ret;
595 ret = virtio_gpu_object_get_sg_table(vgdev, obj);
596 if (ret)
597 return ret;
598 }
599
600 /* gets freed when the ring has consumed it */
601 ents = kmalloc_array(obj->pages->nents,
602 sizeof(struct virtio_gpu_mem_entry),
603 GFP_KERNEL);
604 if (!ents) {
605 DRM_ERROR("failed to allocate ent list\n");
606 return -ENOMEM;
607 }
608
609 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
610 ents[si].addr = cpu_to_le64(sg_phys(sg));
611 ents[si].length = cpu_to_le32(sg->length);
612 ents[si].padding = 0;
613 }
614
615 virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
616 ents, obj->pages->nents,
617 fence);
618 obj->hw_res_handle = resource_id;
619 return 0;
620}
621
622void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
623 struct virtio_gpu_output *output)
624{
625 struct virtio_gpu_vbuffer *vbuf;
626 struct virtio_gpu_update_cursor *cur_p;
627
628 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
629 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
630 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
631 virtio_gpu_queue_cursor(vgdev, vbuf);
632}