]>
Commit | Line | Data |
---|---|---|
dc5698e8 DA |
1 | /* |
2 | * Copyright (C) 2015 Red Hat, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Authors: | |
6 | * Dave Airlie <airlied@redhat.com> | |
7 | * Gerd Hoffmann <kraxel@redhat.com> | |
8 | * | |
9 | * Permission is hereby granted, free of charge, to any person obtaining a | |
10 | * copy of this software and associated documentation files (the "Software"), | |
11 | * to deal in the Software without restriction, including without limitation | |
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
13 | * and/or sell copies of the Software, and to permit persons to whom the | |
14 | * Software is furnished to do so, subject to the following conditions: | |
15 | * | |
16 | * The above copyright notice and this permission notice (including the next | |
17 | * paragraph) shall be included in all copies or substantial portions of the | |
18 | * Software. | |
19 | * | |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
23 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
26 | * OTHER DEALINGS IN THE SOFTWARE. | |
27 | */ | |
28 | ||
a3d63977 | 29 | #include <linux/dma-mapping.h> |
dc5698e8 DA |
30 | #include <linux/virtio.h> |
31 | #include <linux/virtio_config.h> | |
32 | #include <linux/virtio_ring.h> | |
33 | ||
a3d63977 SR |
34 | #include "virtgpu_drv.h" |
35 | #include "virtgpu_trace.h" | |
36 | ||
dc5698e8 DA |
37 | #define MAX_INLINE_CMD_SIZE 96 |
38 | #define MAX_INLINE_RESP_SIZE 24 | |
39 | #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \ | |
40 | + MAX_INLINE_CMD_SIZE \ | |
41 | + MAX_INLINE_RESP_SIZE) | |
42 | ||
dc5698e8 DA |
43 | void virtio_gpu_ctrl_ack(struct virtqueue *vq) |
44 | { | |
45 | struct drm_device *dev = vq->vdev->priv; | |
46 | struct virtio_gpu_device *vgdev = dev->dev_private; | |
9d492b6b | 47 | |
dc5698e8 DA |
48 | schedule_work(&vgdev->ctrlq.dequeue_work); |
49 | } | |
50 | ||
51 | void virtio_gpu_cursor_ack(struct virtqueue *vq) | |
52 | { | |
53 | struct drm_device *dev = vq->vdev->priv; | |
54 | struct virtio_gpu_device *vgdev = dev->dev_private; | |
9d492b6b | 55 | |
dc5698e8 DA |
56 | schedule_work(&vgdev->cursorq.dequeue_work); |
57 | } | |
58 | ||
59 | int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev) | |
60 | { | |
f5985bf9 GH |
61 | vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs", |
62 | VBUFFER_SIZE, | |
63 | __alignof__(struct virtio_gpu_vbuffer), | |
64 | 0, NULL); | |
dc5698e8 DA |
65 | if (!vgdev->vbufs) |
66 | return -ENOMEM; | |
dc5698e8 DA |
67 | return 0; |
68 | } | |
69 | ||
70 | void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev) | |
71 | { | |
f5985bf9 GH |
72 | kmem_cache_destroy(vgdev->vbufs); |
73 | vgdev->vbufs = NULL; | |
dc5698e8 DA |
74 | } |
75 | ||
76 | static struct virtio_gpu_vbuffer* | |
77 | virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, | |
78 | int size, int resp_size, void *resp_buf, | |
79 | virtio_gpu_resp_cb resp_cb) | |
80 | { | |
81 | struct virtio_gpu_vbuffer *vbuf; | |
82 | ||
7fea1e0f | 83 | vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL); |
43c27940 GH |
84 | if (!vbuf) |
85 | return ERR_PTR(-ENOMEM); | |
dc5698e8 DA |
86 | |
87 | BUG_ON(size > MAX_INLINE_CMD_SIZE); | |
88 | vbuf->buf = (void *)vbuf + sizeof(*vbuf); | |
89 | vbuf->size = size; | |
90 | ||
91 | vbuf->resp_cb = resp_cb; | |
92 | vbuf->resp_size = resp_size; | |
93 | if (resp_size <= MAX_INLINE_RESP_SIZE) | |
94 | vbuf->resp_buf = (void *)vbuf->buf + size; | |
95 | else | |
96 | vbuf->resp_buf = resp_buf; | |
97 | BUG_ON(!vbuf->resp_buf); | |
98 | return vbuf; | |
99 | } | |
100 | ||
101 | static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, | |
102 | struct virtio_gpu_vbuffer **vbuffer_p, | |
103 | int size) | |
104 | { | |
105 | struct virtio_gpu_vbuffer *vbuf; | |
106 | ||
107 | vbuf = virtio_gpu_get_vbuf(vgdev, size, | |
108 | sizeof(struct virtio_gpu_ctrl_hdr), | |
109 | NULL, NULL); | |
110 | if (IS_ERR(vbuf)) { | |
111 | *vbuffer_p = NULL; | |
112 | return ERR_CAST(vbuf); | |
113 | } | |
114 | *vbuffer_p = vbuf; | |
115 | return vbuf->buf; | |
116 | } | |
117 | ||
118 | static struct virtio_gpu_update_cursor* | |
119 | virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev, | |
120 | struct virtio_gpu_vbuffer **vbuffer_p) | |
121 | { | |
122 | struct virtio_gpu_vbuffer *vbuf; | |
123 | ||
124 | vbuf = virtio_gpu_get_vbuf | |
125 | (vgdev, sizeof(struct virtio_gpu_update_cursor), | |
126 | 0, NULL, NULL); | |
127 | if (IS_ERR(vbuf)) { | |
128 | *vbuffer_p = NULL; | |
129 | return ERR_CAST(vbuf); | |
130 | } | |
131 | *vbuffer_p = vbuf; | |
132 | return (struct virtio_gpu_update_cursor *)vbuf->buf; | |
133 | } | |
134 | ||
135 | static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, | |
136 | virtio_gpu_resp_cb cb, | |
137 | struct virtio_gpu_vbuffer **vbuffer_p, | |
138 | int cmd_size, int resp_size, | |
139 | void *resp_buf) | |
140 | { | |
141 | struct virtio_gpu_vbuffer *vbuf; | |
142 | ||
143 | vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size, | |
144 | resp_size, resp_buf, cb); | |
145 | if (IS_ERR(vbuf)) { | |
146 | *vbuffer_p = NULL; | |
147 | return ERR_CAST(vbuf); | |
148 | } | |
149 | *vbuffer_p = vbuf; | |
150 | return (struct virtio_gpu_command *)vbuf->buf; | |
151 | } | |
152 | ||
153 | static void free_vbuf(struct virtio_gpu_device *vgdev, | |
154 | struct virtio_gpu_vbuffer *vbuf) | |
155 | { | |
156 | if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) | |
157 | kfree(vbuf->resp_buf); | |
e1218b8c | 158 | kvfree(vbuf->data_buf); |
f5985bf9 | 159 | kmem_cache_free(vgdev->vbufs, vbuf); |
dc5698e8 DA |
160 | } |
161 | ||
162 | static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) | |
163 | { | |
164 | struct virtio_gpu_vbuffer *vbuf; | |
165 | unsigned int len; | |
166 | int freed = 0; | |
167 | ||
168 | while ((vbuf = virtqueue_get_buf(vq, &len))) { | |
169 | list_add_tail(&vbuf->list, reclaim_list); | |
170 | freed++; | |
171 | } | |
172 | if (freed == 0) | |
173 | DRM_DEBUG("Huh? zero vbufs reclaimed"); | |
174 | } | |
175 | ||
176 | void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) | |
177 | { | |
178 | struct virtio_gpu_device *vgdev = | |
179 | container_of(work, struct virtio_gpu_device, | |
180 | ctrlq.dequeue_work); | |
181 | struct list_head reclaim_list; | |
182 | struct virtio_gpu_vbuffer *entry, *tmp; | |
183 | struct virtio_gpu_ctrl_hdr *resp; | |
184 | u64 fence_id = 0; | |
185 | ||
186 | INIT_LIST_HEAD(&reclaim_list); | |
187 | spin_lock(&vgdev->ctrlq.qlock); | |
188 | do { | |
189 | virtqueue_disable_cb(vgdev->ctrlq.vq); | |
190 | reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list); | |
191 | ||
192 | } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); | |
193 | spin_unlock(&vgdev->ctrlq.qlock); | |
194 | ||
da758d51 | 195 | list_for_each_entry(entry, &reclaim_list, list) { |
dc5698e8 | 196 | resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; |
5daf8857 CW |
197 | |
198 | trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); | |
199 | ||
3630c2a2 GH |
200 | if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { |
201 | if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) { | |
202 | struct virtio_gpu_ctrl_hdr *cmd; | |
203 | cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf; | |
204 | DRM_ERROR("response 0x%x (command 0x%x)\n", | |
205 | le32_to_cpu(resp->type), | |
206 | le32_to_cpu(cmd->type)); | |
207 | } else | |
208 | DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type)); | |
209 | } | |
dc5698e8 DA |
210 | if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) { |
211 | u64 f = le64_to_cpu(resp->fence_id); | |
212 | ||
213 | if (fence_id > f) { | |
214 | DRM_ERROR("%s: Oops: fence %llx -> %llx\n", | |
215 | __func__, fence_id, f); | |
216 | } else { | |
217 | fence_id = f; | |
218 | } | |
219 | } | |
220 | if (entry->resp_cb) | |
221 | entry->resp_cb(vgdev, entry); | |
dc5698e8 DA |
222 | } |
223 | wake_up(&vgdev->ctrlq.ack_queue); | |
224 | ||
225 | if (fence_id) | |
226 | virtio_gpu_fence_event_process(vgdev, fence_id); | |
da758d51 GH |
227 | |
228 | list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { | |
229 | if (entry->objs) | |
f0c6cef7 | 230 | virtio_gpu_array_put_free_delayed(vgdev, entry->objs); |
da758d51 GH |
231 | list_del(&entry->list); |
232 | free_vbuf(vgdev, entry); | |
233 | } | |
dc5698e8 DA |
234 | } |
235 | ||
236 | void virtio_gpu_dequeue_cursor_func(struct work_struct *work) | |
237 | { | |
238 | struct virtio_gpu_device *vgdev = | |
239 | container_of(work, struct virtio_gpu_device, | |
240 | cursorq.dequeue_work); | |
241 | struct list_head reclaim_list; | |
242 | struct virtio_gpu_vbuffer *entry, *tmp; | |
243 | ||
244 | INIT_LIST_HEAD(&reclaim_list); | |
245 | spin_lock(&vgdev->cursorq.qlock); | |
246 | do { | |
247 | virtqueue_disable_cb(vgdev->cursorq.vq); | |
248 | reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list); | |
249 | } while (!virtqueue_enable_cb(vgdev->cursorq.vq)); | |
250 | spin_unlock(&vgdev->cursorq.qlock); | |
251 | ||
252 | list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { | |
253 | list_del(&entry->list); | |
254 | free_vbuf(vgdev, entry); | |
255 | } | |
256 | wake_up(&vgdev->cursorq.ack_queue); | |
257 | } | |
258 | ||
e1218b8c DR |
259 | /* Create sg_table from a vmalloc'd buffer. */ |
260 | static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) | |
261 | { | |
262 | int ret, s, i; | |
263 | struct sg_table *sgt; | |
264 | struct scatterlist *sg; | |
265 | struct page *pg; | |
266 | ||
267 | if (WARN_ON(!PAGE_ALIGNED(data))) | |
268 | return NULL; | |
269 | ||
270 | sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); | |
271 | if (!sgt) | |
272 | return NULL; | |
273 | ||
274 | *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE); | |
275 | ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL); | |
276 | if (ret) { | |
277 | kfree(sgt); | |
278 | return NULL; | |
279 | } | |
280 | ||
281 | for_each_sg(sgt->sgl, sg, *sg_ents, i) { | |
282 | pg = vmalloc_to_page(data); | |
283 | if (!pg) { | |
284 | sg_free_table(sgt); | |
285 | kfree(sgt); | |
286 | return NULL; | |
287 | } | |
288 | ||
289 | s = min_t(int, PAGE_SIZE, size); | |
290 | sg_set_page(sg, pg, s, 0); | |
291 | ||
292 | size -= s; | |
293 | data += s; | |
294 | } | |
295 | ||
296 | return sgt; | |
297 | } | |
298 | ||
744583ec | 299 | static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, |
e1218b8c DR |
300 | struct virtio_gpu_vbuffer *vbuf, |
301 | struct scatterlist *vout) | |
3373755a MT |
302 | __releases(&vgdev->ctrlq.qlock) |
303 | __acquires(&vgdev->ctrlq.qlock) | |
dc5698e8 DA |
304 | { |
305 | struct virtqueue *vq = vgdev->ctrlq.vq; | |
e1218b8c | 306 | struct scatterlist *sgs[3], vcmd, vresp; |
dc5698e8 | 307 | int outcnt = 0, incnt = 0; |
744583ec | 308 | bool notify = false; |
dc5698e8 DA |
309 | int ret; |
310 | ||
311 | if (!vgdev->vqs_ready) | |
744583ec | 312 | return notify; |
dc5698e8 DA |
313 | |
314 | sg_init_one(&vcmd, vbuf->buf, vbuf->size); | |
dbe37dc3 | 315 | sgs[outcnt + incnt] = &vcmd; |
dc5698e8 DA |
316 | outcnt++; |
317 | ||
e1218b8c DR |
318 | if (vout) { |
319 | sgs[outcnt + incnt] = vout; | |
dc5698e8 DA |
320 | outcnt++; |
321 | } | |
322 | ||
323 | if (vbuf->resp_size) { | |
324 | sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); | |
325 | sgs[outcnt + incnt] = &vresp; | |
326 | incnt++; | |
327 | } | |
328 | ||
dc5698e8 DA |
329 | retry: |
330 | ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); | |
331 | if (ret == -ENOSPC) { | |
332 | spin_unlock(&vgdev->ctrlq.qlock); | |
d02d2700 | 333 | wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt); |
dc5698e8 DA |
334 | spin_lock(&vgdev->ctrlq.qlock); |
335 | goto retry; | |
336 | } else { | |
5daf8857 CW |
337 | trace_virtio_gpu_cmd_queue(vq, |
338 | (struct virtio_gpu_ctrl_hdr *)vbuf->buf); | |
339 | ||
744583ec | 340 | notify = virtqueue_kick_prepare(vq); |
dc5698e8 | 341 | } |
744583ec | 342 | return notify; |
dc5698e8 DA |
343 | } |
344 | ||
4100a7b8 GH |
345 | static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, |
346 | struct virtio_gpu_vbuffer *vbuf, | |
347 | struct virtio_gpu_ctrl_hdr *hdr, | |
348 | struct virtio_gpu_fence *fence) | |
ec2f0577 GH |
349 | { |
350 | struct virtqueue *vq = vgdev->ctrlq.vq; | |
e1218b8c DR |
351 | struct scatterlist *vout = NULL, sg; |
352 | struct sg_table *sgt = NULL; | |
744583ec | 353 | bool notify; |
e1218b8c DR |
354 | int outcnt = 0; |
355 | ||
356 | if (vbuf->data_size) { | |
357 | if (is_vmalloc_addr(vbuf->data_buf)) { | |
358 | sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, | |
359 | &outcnt); | |
360 | if (!sgt) | |
361 | return -ENOMEM; | |
362 | vout = sgt->sgl; | |
363 | } else { | |
364 | sg_init_one(&sg, vbuf->data_buf, vbuf->data_size); | |
365 | vout = &sg; | |
366 | outcnt = 1; | |
367 | } | |
368 | } | |
ec2f0577 GH |
369 | |
370 | again: | |
371 | spin_lock(&vgdev->ctrlq.qlock); | |
372 | ||
373 | /* | |
374 | * Make sure we have enouth space in the virtqueue. If not | |
375 | * wait here until we have. | |
376 | * | |
377 | * Without that virtio_gpu_queue_ctrl_buffer_nolock might have | |
378 | * to wait for free space, which can result in fence ids being | |
379 | * submitted out-of-order. | |
380 | */ | |
e1218b8c | 381 | if (vq->num_free < 2 + outcnt) { |
ec2f0577 GH |
382 | spin_unlock(&vgdev->ctrlq.qlock); |
383 | wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3); | |
384 | goto again; | |
385 | } | |
386 | ||
32d6c2c5 | 387 | if (hdr && fence) { |
ec2f0577 | 388 | virtio_gpu_fence_emit(vgdev, hdr, fence); |
84f6fec4 GH |
389 | if (vbuf->objs) { |
390 | virtio_gpu_array_add_fence(vbuf->objs, &fence->f); | |
391 | virtio_gpu_array_unlock_resv(vbuf->objs); | |
392 | } | |
da758d51 | 393 | } |
e1218b8c | 394 | notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout); |
ec2f0577 | 395 | spin_unlock(&vgdev->ctrlq.qlock); |
744583ec GH |
396 | if (notify) |
397 | virtqueue_notify(vgdev->ctrlq.vq); | |
e1218b8c DR |
398 | |
399 | if (sgt) { | |
400 | sg_free_table(sgt); | |
401 | kfree(sgt); | |
402 | } | |
ec2f0577 GH |
403 | } |
404 | ||
32d6c2c5 DR |
405 | static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, |
406 | struct virtio_gpu_vbuffer *vbuf) | |
407 | { | |
408 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL); | |
409 | } | |
410 | ||
4100a7b8 GH |
411 | static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, |
412 | struct virtio_gpu_vbuffer *vbuf) | |
dc5698e8 DA |
413 | { |
414 | struct virtqueue *vq = vgdev->cursorq.vq; | |
415 | struct scatterlist *sgs[1], ccmd; | |
744583ec | 416 | bool notify; |
dc5698e8 DA |
417 | int ret; |
418 | int outcnt; | |
419 | ||
420 | if (!vgdev->vqs_ready) | |
4100a7b8 | 421 | return; |
dc5698e8 DA |
422 | |
423 | sg_init_one(&ccmd, vbuf->buf, vbuf->size); | |
424 | sgs[0] = &ccmd; | |
425 | outcnt = 1; | |
426 | ||
427 | spin_lock(&vgdev->cursorq.qlock); | |
428 | retry: | |
429 | ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); | |
430 | if (ret == -ENOSPC) { | |
431 | spin_unlock(&vgdev->cursorq.qlock); | |
d02d2700 | 432 | wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt); |
dc5698e8 DA |
433 | spin_lock(&vgdev->cursorq.qlock); |
434 | goto retry; | |
435 | } else { | |
5daf8857 CW |
436 | trace_virtio_gpu_cmd_queue(vq, |
437 | (struct virtio_gpu_ctrl_hdr *)vbuf->buf); | |
438 | ||
744583ec | 439 | notify = virtqueue_kick_prepare(vq); |
dc5698e8 DA |
440 | } |
441 | ||
442 | spin_unlock(&vgdev->cursorq.qlock); | |
744583ec GH |
443 | |
444 | if (notify) | |
445 | virtqueue_notify(vq); | |
dc5698e8 DA |
446 | } |
447 | ||
448 | /* just create gem objects for userspace and long lived objects, | |
5d883850 RS |
449 | * just use dma_alloced pages for the queue objects? |
450 | */ | |
dc5698e8 DA |
451 | |
452 | /* create a basic resource */ | |
453 | void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, | |
23c897d7 | 454 | struct virtio_gpu_object *bo, |
530b2842 | 455 | struct virtio_gpu_object_params *params, |
e2324300 | 456 | struct virtio_gpu_object_array *objs, |
530b2842 | 457 | struct virtio_gpu_fence *fence) |
dc5698e8 DA |
458 | { |
459 | struct virtio_gpu_resource_create_2d *cmd_p; | |
460 | struct virtio_gpu_vbuffer *vbuf; | |
461 | ||
462 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
463 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
e2324300 | 464 | vbuf->objs = objs; |
dc5698e8 DA |
465 | |
466 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); | |
724cfdfd | 467 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
f9659329 GH |
468 | cmd_p->format = cpu_to_le32(params->format); |
469 | cmd_p->width = cpu_to_le32(params->width); | |
470 | cmd_p->height = cpu_to_le32(params->height); | |
dc5698e8 | 471 | |
530b2842 | 472 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
23c897d7 | 473 | bo->created = true; |
dc5698e8 DA |
474 | } |
475 | ||
476 | void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, | |
477 | uint32_t resource_id) | |
478 | { | |
479 | struct virtio_gpu_resource_unref *cmd_p; | |
480 | struct virtio_gpu_vbuffer *vbuf; | |
481 | ||
482 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
483 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
484 | ||
485 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); | |
486 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
487 | ||
488 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
489 | } | |
490 | ||
b3f13ec9 | 491 | static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, |
a3b815f0 | 492 | uint32_t resource_id, |
4d55fd66 | 493 | struct virtio_gpu_fence *fence) |
dc5698e8 DA |
494 | { |
495 | struct virtio_gpu_resource_detach_backing *cmd_p; | |
496 | struct virtio_gpu_vbuffer *vbuf; | |
497 | ||
498 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
499 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
500 | ||
501 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING); | |
502 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
503 | ||
a3b815f0 | 504 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
dc5698e8 DA |
505 | } |
506 | ||
507 | void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, | |
508 | uint32_t scanout_id, uint32_t resource_id, | |
509 | uint32_t width, uint32_t height, | |
510 | uint32_t x, uint32_t y) | |
511 | { | |
512 | struct virtio_gpu_set_scanout *cmd_p; | |
513 | struct virtio_gpu_vbuffer *vbuf; | |
514 | ||
515 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
516 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
517 | ||
518 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT); | |
519 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
520 | cmd_p->scanout_id = cpu_to_le32(scanout_id); | |
521 | cmd_p->r.width = cpu_to_le32(width); | |
522 | cmd_p->r.height = cpu_to_le32(height); | |
523 | cmd_p->r.x = cpu_to_le32(x); | |
524 | cmd_p->r.y = cpu_to_le32(y); | |
525 | ||
526 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
527 | } | |
528 | ||
529 | void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, | |
530 | uint32_t resource_id, | |
531 | uint32_t x, uint32_t y, | |
532 | uint32_t width, uint32_t height) | |
533 | { | |
534 | struct virtio_gpu_resource_flush *cmd_p; | |
535 | struct virtio_gpu_vbuffer *vbuf; | |
536 | ||
537 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
538 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
539 | ||
540 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); | |
541 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
542 | cmd_p->r.width = cpu_to_le32(width); | |
543 | cmd_p->r.height = cpu_to_le32(height); | |
544 | cmd_p->r.x = cpu_to_le32(x); | |
545 | cmd_p->r.y = cpu_to_le32(y); | |
546 | ||
547 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
548 | } | |
549 | ||
550 | void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, | |
af334c5d | 551 | uint64_t offset, |
dc5698e8 DA |
552 | __le32 width, __le32 height, |
553 | __le32 x, __le32 y, | |
3d3bdbc0 | 554 | struct virtio_gpu_object_array *objs, |
4d55fd66 | 555 | struct virtio_gpu_fence *fence) |
dc5698e8 | 556 | { |
3d3bdbc0 | 557 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
dc5698e8 DA |
558 | struct virtio_gpu_transfer_to_host_2d *cmd_p; |
559 | struct virtio_gpu_vbuffer *vbuf; | |
8f44ca22 JA |
560 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
561 | ||
562 | if (use_dma_api) | |
563 | dma_sync_sg_for_device(vgdev->vdev->dev.parent, | |
af334c5d | 564 | bo->pages->sgl, bo->pages->nents, |
8f44ca22 | 565 | DMA_TO_DEVICE); |
dc5698e8 DA |
566 | |
567 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
568 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
3d3bdbc0 | 569 | vbuf->objs = objs; |
dc5698e8 DA |
570 | |
571 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); | |
af334c5d | 572 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
dc5698e8 DA |
573 | cmd_p->offset = cpu_to_le64(offset); |
574 | cmd_p->r.width = width; | |
575 | cmd_p->r.height = height; | |
576 | cmd_p->r.x = x; | |
577 | cmd_p->r.y = y; | |
578 | ||
ec2f0577 | 579 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
dc5698e8 DA |
580 | } |
581 | ||
582 | static void | |
583 | virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, | |
584 | uint32_t resource_id, | |
585 | struct virtio_gpu_mem_entry *ents, | |
586 | uint32_t nents, | |
4d55fd66 | 587 | struct virtio_gpu_fence *fence) |
dc5698e8 DA |
588 | { |
589 | struct virtio_gpu_resource_attach_backing *cmd_p; | |
590 | struct virtio_gpu_vbuffer *vbuf; | |
591 | ||
592 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
593 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
594 | ||
595 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING); | |
596 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
597 | cmd_p->nr_entries = cpu_to_le32(nents); | |
598 | ||
599 | vbuf->data_buf = ents; | |
600 | vbuf->data_size = sizeof(*ents) * nents; | |
601 | ||
ec2f0577 | 602 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
dc5698e8 DA |
603 | } |
604 | ||
605 | static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, | |
606 | struct virtio_gpu_vbuffer *vbuf) | |
607 | { | |
608 | struct virtio_gpu_resp_display_info *resp = | |
609 | (struct virtio_gpu_resp_display_info *)vbuf->resp_buf; | |
610 | int i; | |
611 | ||
612 | spin_lock(&vgdev->display_info_lock); | |
613 | for (i = 0; i < vgdev->num_scanouts; i++) { | |
614 | vgdev->outputs[i].info = resp->pmodes[i]; | |
615 | if (resp->pmodes[i].enabled) { | |
616 | DRM_DEBUG("output %d: %dx%d+%d+%d", i, | |
617 | le32_to_cpu(resp->pmodes[i].r.width), | |
618 | le32_to_cpu(resp->pmodes[i].r.height), | |
619 | le32_to_cpu(resp->pmodes[i].r.x), | |
620 | le32_to_cpu(resp->pmodes[i].r.y)); | |
621 | } else { | |
622 | DRM_DEBUG("output %d: disabled", i); | |
623 | } | |
624 | } | |
625 | ||
441012af | 626 | vgdev->display_info_pending = false; |
dc5698e8 DA |
627 | spin_unlock(&vgdev->display_info_lock); |
628 | wake_up(&vgdev->resp_wq); | |
629 | ||
630 | if (!drm_helper_hpd_irq_event(vgdev->ddev)) | |
631 | drm_kms_helper_hotplug_event(vgdev->ddev); | |
632 | } | |
633 | ||
62fb7a5e GH |
634 | static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, |
635 | struct virtio_gpu_vbuffer *vbuf) | |
636 | { | |
637 | struct virtio_gpu_get_capset_info *cmd = | |
638 | (struct virtio_gpu_get_capset_info *)vbuf->buf; | |
639 | struct virtio_gpu_resp_capset_info *resp = | |
640 | (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf; | |
641 | int i = le32_to_cpu(cmd->capset_index); | |
642 | ||
643 | spin_lock(&vgdev->display_info_lock); | |
644 | vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); | |
645 | vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); | |
646 | vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); | |
647 | spin_unlock(&vgdev->display_info_lock); | |
648 | wake_up(&vgdev->resp_wq); | |
649 | } | |
650 | ||
651 | static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, | |
652 | struct virtio_gpu_vbuffer *vbuf) | |
653 | { | |
654 | struct virtio_gpu_get_capset *cmd = | |
655 | (struct virtio_gpu_get_capset *)vbuf->buf; | |
656 | struct virtio_gpu_resp_capset *resp = | |
657 | (struct virtio_gpu_resp_capset *)vbuf->resp_buf; | |
658 | struct virtio_gpu_drv_cap_cache *cache_ent; | |
659 | ||
660 | spin_lock(&vgdev->display_info_lock); | |
661 | list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { | |
662 | if (cache_ent->version == le32_to_cpu(cmd->capset_version) && | |
663 | cache_ent->id == le32_to_cpu(cmd->capset_id)) { | |
664 | memcpy(cache_ent->caps_cache, resp->capset_data, | |
665 | cache_ent->size); | |
9ff3a5c8 DR |
666 | /* Copy must occur before is_valid is signalled. */ |
667 | smp_wmb(); | |
62fb7a5e GH |
668 | atomic_set(&cache_ent->is_valid, 1); |
669 | break; | |
670 | } | |
671 | } | |
672 | spin_unlock(&vgdev->display_info_lock); | |
676a905b | 673 | wake_up_all(&vgdev->resp_wq); |
62fb7a5e GH |
674 | } |
675 | ||
b4b01b49 GH |
676 | static int virtio_get_edid_block(void *data, u8 *buf, |
677 | unsigned int block, size_t len) | |
678 | { | |
679 | struct virtio_gpu_resp_edid *resp = data; | |
680 | size_t start = block * EDID_LENGTH; | |
681 | ||
682 | if (start + len > le32_to_cpu(resp->size)) | |
683 | return -1; | |
684 | memcpy(buf, resp->edid + start, len); | |
685 | return 0; | |
686 | } | |
687 | ||
688 | static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev, | |
689 | struct virtio_gpu_vbuffer *vbuf) | |
690 | { | |
691 | struct virtio_gpu_cmd_get_edid *cmd = | |
692 | (struct virtio_gpu_cmd_get_edid *)vbuf->buf; | |
693 | struct virtio_gpu_resp_edid *resp = | |
694 | (struct virtio_gpu_resp_edid *)vbuf->resp_buf; | |
695 | uint32_t scanout = le32_to_cpu(cmd->scanout); | |
696 | struct virtio_gpu_output *output; | |
697 | struct edid *new_edid, *old_edid; | |
698 | ||
699 | if (scanout >= vgdev->num_scanouts) | |
700 | return; | |
701 | output = vgdev->outputs + scanout; | |
702 | ||
703 | new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp); | |
41de4be6 | 704 | drm_connector_update_edid_property(&output->conn, new_edid); |
b4b01b49 GH |
705 | |
706 | spin_lock(&vgdev->display_info_lock); | |
707 | old_edid = output->edid; | |
708 | output->edid = new_edid; | |
b4b01b49 GH |
709 | spin_unlock(&vgdev->display_info_lock); |
710 | ||
711 | kfree(old_edid); | |
712 | wake_up(&vgdev->resp_wq); | |
713 | } | |
714 | ||
dc5698e8 DA |
715 | int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev) |
716 | { | |
717 | struct virtio_gpu_ctrl_hdr *cmd_p; | |
718 | struct virtio_gpu_vbuffer *vbuf; | |
719 | void *resp_buf; | |
720 | ||
721 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info), | |
722 | GFP_KERNEL); | |
723 | if (!resp_buf) | |
724 | return -ENOMEM; | |
725 | ||
726 | cmd_p = virtio_gpu_alloc_cmd_resp | |
727 | (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf, | |
728 | sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info), | |
729 | resp_buf); | |
730 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
731 | ||
441012af | 732 | vgdev->display_info_pending = true; |
dc5698e8 DA |
733 | cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO); |
734 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
735 | return 0; | |
736 | } | |
737 | ||
62fb7a5e GH |
738 | int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx) |
739 | { | |
740 | struct virtio_gpu_get_capset_info *cmd_p; | |
741 | struct virtio_gpu_vbuffer *vbuf; | |
742 | void *resp_buf; | |
743 | ||
744 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info), | |
745 | GFP_KERNEL); | |
746 | if (!resp_buf) | |
747 | return -ENOMEM; | |
748 | ||
749 | cmd_p = virtio_gpu_alloc_cmd_resp | |
750 | (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf, | |
751 | sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info), | |
752 | resp_buf); | |
753 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
754 | ||
755 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO); | |
756 | cmd_p->capset_index = cpu_to_le32(idx); | |
757 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
758 | return 0; | |
759 | } | |
760 | ||
761 | int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, | |
762 | int idx, int version, | |
763 | struct virtio_gpu_drv_cap_cache **cache_p) | |
764 | { | |
765 | struct virtio_gpu_get_capset *cmd_p; | |
766 | struct virtio_gpu_vbuffer *vbuf; | |
09c4b494 | 767 | int max_size; |
62fb7a5e | 768 | struct virtio_gpu_drv_cap_cache *cache_ent; |
5934ce99 | 769 | struct virtio_gpu_drv_cap_cache *search_ent; |
62fb7a5e GH |
770 | void *resp_buf; |
771 | ||
5934ce99 DR |
772 | *cache_p = NULL; |
773 | ||
09c4b494 | 774 | if (idx >= vgdev->num_capsets) |
62fb7a5e GH |
775 | return -EINVAL; |
776 | ||
777 | if (version > vgdev->capsets[idx].max_version) | |
778 | return -EINVAL; | |
779 | ||
780 | cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL); | |
781 | if (!cache_ent) | |
782 | return -ENOMEM; | |
783 | ||
09c4b494 | 784 | max_size = vgdev->capsets[idx].max_size; |
62fb7a5e GH |
785 | cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL); |
786 | if (!cache_ent->caps_cache) { | |
787 | kfree(cache_ent); | |
788 | return -ENOMEM; | |
789 | } | |
790 | ||
791 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size, | |
792 | GFP_KERNEL); | |
793 | if (!resp_buf) { | |
794 | kfree(cache_ent->caps_cache); | |
795 | kfree(cache_ent); | |
796 | return -ENOMEM; | |
797 | } | |
798 | ||
799 | cache_ent->version = version; | |
800 | cache_ent->id = vgdev->capsets[idx].id; | |
801 | atomic_set(&cache_ent->is_valid, 0); | |
802 | cache_ent->size = max_size; | |
803 | spin_lock(&vgdev->display_info_lock); | |
5934ce99 DR |
804 | /* Search while under lock in case it was added by another task. */ |
805 | list_for_each_entry(search_ent, &vgdev->cap_cache, head) { | |
806 | if (search_ent->id == vgdev->capsets[idx].id && | |
807 | search_ent->version == version) { | |
808 | *cache_p = search_ent; | |
809 | break; | |
810 | } | |
811 | } | |
812 | if (!*cache_p) | |
813 | list_add_tail(&cache_ent->head, &vgdev->cap_cache); | |
62fb7a5e GH |
814 | spin_unlock(&vgdev->display_info_lock); |
815 | ||
5934ce99 DR |
816 | if (*cache_p) { |
817 | /* Entry was found, so free everything that was just created. */ | |
818 | kfree(resp_buf); | |
819 | kfree(cache_ent->caps_cache); | |
820 | kfree(cache_ent); | |
821 | return 0; | |
822 | } | |
823 | ||
62fb7a5e GH |
824 | cmd_p = virtio_gpu_alloc_cmd_resp |
825 | (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p), | |
826 | sizeof(struct virtio_gpu_resp_capset) + max_size, | |
827 | resp_buf); | |
828 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET); | |
829 | cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id); | |
830 | cmd_p->capset_version = cpu_to_le32(version); | |
831 | *cache_p = cache_ent; | |
832 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
833 | ||
834 | return 0; | |
835 | } | |
836 | ||
b4b01b49 GH |
837 | int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev) |
838 | { | |
839 | struct virtio_gpu_cmd_get_edid *cmd_p; | |
840 | struct virtio_gpu_vbuffer *vbuf; | |
841 | void *resp_buf; | |
842 | int scanout; | |
843 | ||
844 | if (WARN_ON(!vgdev->has_edid)) | |
845 | return -EINVAL; | |
846 | ||
847 | for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) { | |
848 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid), | |
849 | GFP_KERNEL); | |
850 | if (!resp_buf) | |
851 | return -ENOMEM; | |
852 | ||
853 | cmd_p = virtio_gpu_alloc_cmd_resp | |
854 | (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf, | |
855 | sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid), | |
856 | resp_buf); | |
857 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID); | |
858 | cmd_p->scanout = cpu_to_le32(scanout); | |
859 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
860 | } | |
861 | ||
862 | return 0; | |
863 | } | |
864 | ||
62fb7a5e GH |
865 | void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, |
866 | uint32_t nlen, const char *name) | |
867 | { | |
868 | struct virtio_gpu_ctx_create *cmd_p; | |
869 | struct virtio_gpu_vbuffer *vbuf; | |
870 | ||
871 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
872 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
873 | ||
874 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE); | |
875 | cmd_p->hdr.ctx_id = cpu_to_le32(id); | |
876 | cmd_p->nlen = cpu_to_le32(nlen); | |
dbe37dc3 RS |
877 | strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1); |
878 | cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0; | |
62fb7a5e GH |
879 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
880 | } | |
881 | ||
882 | void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, | |
883 | uint32_t id) | |
884 | { | |
885 | struct virtio_gpu_ctx_destroy *cmd_p; | |
886 | struct virtio_gpu_vbuffer *vbuf; | |
887 | ||
888 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
889 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
890 | ||
891 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY); | |
892 | cmd_p->hdr.ctx_id = cpu_to_le32(id); | |
893 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
894 | } | |
895 | ||
896 | void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, | |
897 | uint32_t ctx_id, | |
93c38d15 | 898 | struct virtio_gpu_object_array *objs) |
62fb7a5e | 899 | { |
93c38d15 | 900 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
62fb7a5e GH |
901 | struct virtio_gpu_ctx_resource *cmd_p; |
902 | struct virtio_gpu_vbuffer *vbuf; | |
903 | ||
904 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
905 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
93c38d15 | 906 | vbuf->objs = objs; |
62fb7a5e GH |
907 | |
908 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); | |
909 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
93c38d15 | 910 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
62fb7a5e GH |
911 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
912 | ||
913 | } | |
914 | ||
915 | void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, | |
916 | uint32_t ctx_id, | |
93c38d15 | 917 | struct virtio_gpu_object_array *objs) |
62fb7a5e | 918 | { |
93c38d15 | 919 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
62fb7a5e GH |
920 | struct virtio_gpu_ctx_resource *cmd_p; |
921 | struct virtio_gpu_vbuffer *vbuf; | |
922 | ||
923 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
924 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
93c38d15 | 925 | vbuf->objs = objs; |
62fb7a5e GH |
926 | |
927 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); | |
928 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
93c38d15 | 929 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
62fb7a5e GH |
930 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
931 | } | |
932 | ||
933 | void | |
934 | virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, | |
23c897d7 | 935 | struct virtio_gpu_object *bo, |
530b2842 | 936 | struct virtio_gpu_object_params *params, |
e2324300 | 937 | struct virtio_gpu_object_array *objs, |
530b2842 | 938 | struct virtio_gpu_fence *fence) |
62fb7a5e GH |
939 | { |
940 | struct virtio_gpu_resource_create_3d *cmd_p; | |
941 | struct virtio_gpu_vbuffer *vbuf; | |
942 | ||
943 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
944 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
e2324300 | 945 | vbuf->objs = objs; |
62fb7a5e | 946 | |
62fb7a5e | 947 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); |
fd4d6a42 GH |
948 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
949 | cmd_p->format = cpu_to_le32(params->format); | |
950 | cmd_p->width = cpu_to_le32(params->width); | |
951 | cmd_p->height = cpu_to_le32(params->height); | |
952 | ||
953 | cmd_p->target = cpu_to_le32(params->target); | |
954 | cmd_p->bind = cpu_to_le32(params->bind); | |
955 | cmd_p->depth = cpu_to_le32(params->depth); | |
956 | cmd_p->array_size = cpu_to_le32(params->array_size); | |
957 | cmd_p->last_level = cpu_to_le32(params->last_level); | |
958 | cmd_p->nr_samples = cpu_to_le32(params->nr_samples); | |
959 | cmd_p->flags = cpu_to_le32(params->flags); | |
62fb7a5e | 960 | |
530b2842 | 961 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
23c897d7 | 962 | bo->created = true; |
62fb7a5e GH |
963 | } |
964 | ||
965 | void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, | |
af334c5d | 966 | uint32_t ctx_id, |
62fb7a5e GH |
967 | uint64_t offset, uint32_t level, |
968 | struct virtio_gpu_box *box, | |
3d3bdbc0 | 969 | struct virtio_gpu_object_array *objs, |
4d55fd66 | 970 | struct virtio_gpu_fence *fence) |
62fb7a5e | 971 | { |
3d3bdbc0 | 972 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
62fb7a5e GH |
973 | struct virtio_gpu_transfer_host_3d *cmd_p; |
974 | struct virtio_gpu_vbuffer *vbuf; | |
8f44ca22 JA |
975 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
976 | ||
977 | if (use_dma_api) | |
978 | dma_sync_sg_for_device(vgdev->vdev->dev.parent, | |
af334c5d | 979 | bo->pages->sgl, bo->pages->nents, |
8f44ca22 | 980 | DMA_TO_DEVICE); |
62fb7a5e GH |
981 | |
982 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
983 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
984 | ||
3d3bdbc0 GH |
985 | vbuf->objs = objs; |
986 | ||
62fb7a5e GH |
987 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); |
988 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
af334c5d | 989 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
62fb7a5e GH |
990 | cmd_p->box = *box; |
991 | cmd_p->offset = cpu_to_le64(offset); | |
992 | cmd_p->level = cpu_to_le32(level); | |
993 | ||
994 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); | |
995 | } | |
996 | ||
997 | void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, | |
375f156a | 998 | uint32_t ctx_id, |
62fb7a5e GH |
999 | uint64_t offset, uint32_t level, |
1000 | struct virtio_gpu_box *box, | |
375f156a | 1001 | struct virtio_gpu_object_array *objs, |
4d55fd66 | 1002 | struct virtio_gpu_fence *fence) |
62fb7a5e | 1003 | { |
375f156a | 1004 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
62fb7a5e GH |
1005 | struct virtio_gpu_transfer_host_3d *cmd_p; |
1006 | struct virtio_gpu_vbuffer *vbuf; | |
1007 | ||
1008 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
1009 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
1010 | ||
375f156a GH |
1011 | vbuf->objs = objs; |
1012 | ||
62fb7a5e GH |
1013 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); |
1014 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
375f156a | 1015 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
62fb7a5e GH |
1016 | cmd_p->box = *box; |
1017 | cmd_p->offset = cpu_to_le64(offset); | |
1018 | cmd_p->level = cpu_to_le32(level); | |
1019 | ||
1020 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); | |
1021 | } | |
1022 | ||
1023 | void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, | |
1024 | void *data, uint32_t data_size, | |
da758d51 GH |
1025 | uint32_t ctx_id, |
1026 | struct virtio_gpu_object_array *objs, | |
1027 | struct virtio_gpu_fence *fence) | |
62fb7a5e GH |
1028 | { |
1029 | struct virtio_gpu_cmd_submit *cmd_p; | |
1030 | struct virtio_gpu_vbuffer *vbuf; | |
1031 | ||
1032 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
1033 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
1034 | ||
1035 | vbuf->data_buf = data; | |
1036 | vbuf->data_size = data_size; | |
da758d51 | 1037 | vbuf->objs = objs; |
62fb7a5e GH |
1038 | |
1039 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); | |
1040 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
1041 | cmd_p->size = cpu_to_le32(data_size); | |
1042 | ||
1043 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); | |
1044 | } | |
1045 | ||
dc5698e8 DA |
1046 | int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, |
1047 | struct virtio_gpu_object *obj, | |
4d55fd66 | 1048 | struct virtio_gpu_fence *fence) |
dc5698e8 | 1049 | { |
a3b815f0 | 1050 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
dc5698e8 DA |
1051 | struct virtio_gpu_mem_entry *ents; |
1052 | struct scatterlist *sg; | |
c66df701 | 1053 | int si, nents, ret; |
dc5698e8 | 1054 | |
530b2842 GH |
1055 | if (WARN_ON_ONCE(!obj->created)) |
1056 | return -EINVAL; | |
c66df701 GH |
1057 | if (WARN_ON_ONCE(obj->pages)) |
1058 | return -EINVAL; | |
23c897d7 | 1059 | |
c66df701 GH |
1060 | ret = drm_gem_shmem_pin(&obj->base.base); |
1061 | if (ret < 0) | |
1062 | return -EINVAL; | |
9d492b6b | 1063 | |
c66df701 GH |
1064 | obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base); |
1065 | if (obj->pages == NULL) { | |
1066 | drm_gem_shmem_unpin(&obj->base.base); | |
1067 | return -EINVAL; | |
dc5698e8 DA |
1068 | } |
1069 | ||
a3b815f0 GH |
1070 | if (use_dma_api) { |
1071 | obj->mapped = dma_map_sg(vgdev->vdev->dev.parent, | |
1072 | obj->pages->sgl, obj->pages->nents, | |
1073 | DMA_TO_DEVICE); | |
1074 | nents = obj->mapped; | |
1075 | } else { | |
1076 | nents = obj->pages->nents; | |
1077 | } | |
1078 | ||
dc5698e8 | 1079 | /* gets freed when the ring has consumed it */ |
a3b815f0 | 1080 | ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry), |
dc5698e8 DA |
1081 | GFP_KERNEL); |
1082 | if (!ents) { | |
1083 | DRM_ERROR("failed to allocate ent list\n"); | |
1084 | return -ENOMEM; | |
1085 | } | |
1086 | ||
a3b815f0 GH |
1087 | for_each_sg(obj->pages->sgl, sg, nents, si) { |
1088 | ents[si].addr = cpu_to_le64(use_dma_api | |
1089 | ? sg_dma_address(sg) | |
1090 | : sg_phys(sg)); | |
dc5698e8 DA |
1091 | ents[si].length = cpu_to_le32(sg->length); |
1092 | ents[si].padding = 0; | |
1093 | } | |
1094 | ||
724cfdfd | 1095 | virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, |
a3b815f0 | 1096 | ents, nents, |
dc5698e8 | 1097 | fence); |
dc5698e8 DA |
1098 | return 0; |
1099 | } | |
1100 | ||
b3f13ec9 GH |
1101 | void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, |
1102 | struct virtio_gpu_object *obj) | |
1103 | { | |
a3b815f0 | 1104 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
a3b815f0 | 1105 | |
c66df701 GH |
1106 | if (WARN_ON_ONCE(!obj->pages)) |
1107 | return; | |
1108 | ||
a3b815f0 | 1109 | if (use_dma_api && obj->mapped) { |
9fdd90c0 | 1110 | struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); |
a3b815f0 | 1111 | /* detach backing and wait for the host process it ... */ |
4d55fd66 | 1112 | virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence); |
a3b815f0 GH |
1113 | dma_fence_wait(&fence->f, true); |
1114 | dma_fence_put(&fence->f); | |
1115 | ||
1116 | /* ... then tear down iommu mappings */ | |
1117 | dma_unmap_sg(vgdev->vdev->dev.parent, | |
1118 | obj->pages->sgl, obj->mapped, | |
1119 | DMA_TO_DEVICE); | |
1120 | obj->mapped = 0; | |
1121 | } else { | |
1122 | virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL); | |
1123 | } | |
c66df701 GH |
1124 | |
1125 | sg_free_table(obj->pages); | |
1126 | obj->pages = NULL; | |
1127 | ||
1128 | drm_gem_shmem_unpin(&obj->base.base); | |
b3f13ec9 GH |
1129 | } |
1130 | ||
dc5698e8 DA |
1131 | void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, |
1132 | struct virtio_gpu_output *output) | |
1133 | { | |
1134 | struct virtio_gpu_vbuffer *vbuf; | |
1135 | struct virtio_gpu_update_cursor *cur_p; | |
1136 | ||
1137 | output->cursor.pos.scanout_id = cpu_to_le32(output->index); | |
1138 | cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf); | |
1139 | memcpy(cur_p, &output->cursor, sizeof(output->cursor)); | |
1140 | virtio_gpu_queue_cursor(vgdev, vbuf); | |
1141 | } |