]>
Commit | Line | Data |
---|---|---|
dc5698e8 DA |
1 | /* |
2 | * Copyright (C) 2015 Red Hat, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Authors: | |
6 | * Dave Airlie <airlied@redhat.com> | |
7 | * Gerd Hoffmann <kraxel@redhat.com> | |
8 | * | |
9 | * Permission is hereby granted, free of charge, to any person obtaining a | |
10 | * copy of this software and associated documentation files (the "Software"), | |
11 | * to deal in the Software without restriction, including without limitation | |
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
13 | * and/or sell copies of the Software, and to permit persons to whom the | |
14 | * Software is furnished to do so, subject to the following conditions: | |
15 | * | |
16 | * The above copyright notice and this permission notice (including the next | |
17 | * paragraph) shall be included in all copies or substantial portions of the | |
18 | * Software. | |
19 | * | |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
23 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
26 | * OTHER DEALINGS IN THE SOFTWARE. | |
27 | */ | |
28 | ||
a3d63977 | 29 | #include <linux/dma-mapping.h> |
dc5698e8 DA |
30 | #include <linux/virtio.h> |
31 | #include <linux/virtio_config.h> | |
32 | #include <linux/virtio_ring.h> | |
33 | ||
a3d63977 SR |
34 | #include "virtgpu_drv.h" |
35 | #include "virtgpu_trace.h" | |
36 | ||
dc5698e8 DA |
37 | #define MAX_INLINE_CMD_SIZE 96 |
38 | #define MAX_INLINE_RESP_SIZE 24 | |
39 | #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \ | |
40 | + MAX_INLINE_CMD_SIZE \ | |
41 | + MAX_INLINE_RESP_SIZE) | |
42 | ||
1dc34852 GH |
43 | static void convert_to_hw_box(struct virtio_gpu_box *dst, |
44 | const struct drm_virtgpu_3d_box *src) | |
45 | { | |
46 | dst->x = cpu_to_le32(src->x); | |
47 | dst->y = cpu_to_le32(src->y); | |
48 | dst->z = cpu_to_le32(src->z); | |
49 | dst->w = cpu_to_le32(src->w); | |
50 | dst->h = cpu_to_le32(src->h); | |
51 | dst->d = cpu_to_le32(src->d); | |
52 | } | |
53 | ||
dc5698e8 DA |
54 | void virtio_gpu_ctrl_ack(struct virtqueue *vq) |
55 | { | |
56 | struct drm_device *dev = vq->vdev->priv; | |
57 | struct virtio_gpu_device *vgdev = dev->dev_private; | |
9d492b6b | 58 | |
dc5698e8 DA |
59 | schedule_work(&vgdev->ctrlq.dequeue_work); |
60 | } | |
61 | ||
62 | void virtio_gpu_cursor_ack(struct virtqueue *vq) | |
63 | { | |
64 | struct drm_device *dev = vq->vdev->priv; | |
65 | struct virtio_gpu_device *vgdev = dev->dev_private; | |
9d492b6b | 66 | |
dc5698e8 DA |
67 | schedule_work(&vgdev->cursorq.dequeue_work); |
68 | } | |
69 | ||
70 | int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev) | |
71 | { | |
f5985bf9 GH |
72 | vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs", |
73 | VBUFFER_SIZE, | |
74 | __alignof__(struct virtio_gpu_vbuffer), | |
75 | 0, NULL); | |
dc5698e8 DA |
76 | if (!vgdev->vbufs) |
77 | return -ENOMEM; | |
dc5698e8 DA |
78 | return 0; |
79 | } | |
80 | ||
81 | void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev) | |
82 | { | |
f5985bf9 GH |
83 | kmem_cache_destroy(vgdev->vbufs); |
84 | vgdev->vbufs = NULL; | |
dc5698e8 DA |
85 | } |
86 | ||
87 | static struct virtio_gpu_vbuffer* | |
88 | virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, | |
89 | int size, int resp_size, void *resp_buf, | |
90 | virtio_gpu_resp_cb resp_cb) | |
91 | { | |
92 | struct virtio_gpu_vbuffer *vbuf; | |
93 | ||
7fea1e0f | 94 | vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL); |
43c27940 GH |
95 | if (!vbuf) |
96 | return ERR_PTR(-ENOMEM); | |
dc5698e8 | 97 | |
145cbefc CW |
98 | BUG_ON(size > MAX_INLINE_CMD_SIZE || |
99 | size < sizeof(struct virtio_gpu_ctrl_hdr)); | |
dc5698e8 DA |
100 | vbuf->buf = (void *)vbuf + sizeof(*vbuf); |
101 | vbuf->size = size; | |
102 | ||
103 | vbuf->resp_cb = resp_cb; | |
104 | vbuf->resp_size = resp_size; | |
105 | if (resp_size <= MAX_INLINE_RESP_SIZE) | |
106 | vbuf->resp_buf = (void *)vbuf->buf + size; | |
107 | else | |
108 | vbuf->resp_buf = resp_buf; | |
109 | BUG_ON(!vbuf->resp_buf); | |
110 | return vbuf; | |
111 | } | |
112 | ||
145cbefc CW |
113 | static struct virtio_gpu_ctrl_hdr * |
114 | virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf) | |
115 | { | |
116 | /* this assumes a vbuf contains a command that starts with a | |
117 | * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor | |
118 | * virtqueues. | |
119 | */ | |
120 | return (struct virtio_gpu_ctrl_hdr *)vbuf->buf; | |
121 | } | |
122 | ||
dc5698e8 DA |
123 | static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, |
124 | struct virtio_gpu_vbuffer **vbuffer_p, | |
125 | int size) | |
126 | { | |
127 | struct virtio_gpu_vbuffer *vbuf; | |
128 | ||
129 | vbuf = virtio_gpu_get_vbuf(vgdev, size, | |
130 | sizeof(struct virtio_gpu_ctrl_hdr), | |
131 | NULL, NULL); | |
132 | if (IS_ERR(vbuf)) { | |
133 | *vbuffer_p = NULL; | |
134 | return ERR_CAST(vbuf); | |
135 | } | |
136 | *vbuffer_p = vbuf; | |
137 | return vbuf->buf; | |
138 | } | |
139 | ||
140 | static struct virtio_gpu_update_cursor* | |
141 | virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev, | |
142 | struct virtio_gpu_vbuffer **vbuffer_p) | |
143 | { | |
144 | struct virtio_gpu_vbuffer *vbuf; | |
145 | ||
146 | vbuf = virtio_gpu_get_vbuf | |
147 | (vgdev, sizeof(struct virtio_gpu_update_cursor), | |
148 | 0, NULL, NULL); | |
149 | if (IS_ERR(vbuf)) { | |
150 | *vbuffer_p = NULL; | |
151 | return ERR_CAST(vbuf); | |
152 | } | |
153 | *vbuffer_p = vbuf; | |
154 | return (struct virtio_gpu_update_cursor *)vbuf->buf; | |
155 | } | |
156 | ||
157 | static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, | |
158 | virtio_gpu_resp_cb cb, | |
159 | struct virtio_gpu_vbuffer **vbuffer_p, | |
160 | int cmd_size, int resp_size, | |
161 | void *resp_buf) | |
162 | { | |
163 | struct virtio_gpu_vbuffer *vbuf; | |
164 | ||
165 | vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size, | |
166 | resp_size, resp_buf, cb); | |
167 | if (IS_ERR(vbuf)) { | |
168 | *vbuffer_p = NULL; | |
169 | return ERR_CAST(vbuf); | |
170 | } | |
171 | *vbuffer_p = vbuf; | |
172 | return (struct virtio_gpu_command *)vbuf->buf; | |
173 | } | |
174 | ||
175 | static void free_vbuf(struct virtio_gpu_device *vgdev, | |
176 | struct virtio_gpu_vbuffer *vbuf) | |
177 | { | |
178 | if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) | |
179 | kfree(vbuf->resp_buf); | |
e1218b8c | 180 | kvfree(vbuf->data_buf); |
f5985bf9 | 181 | kmem_cache_free(vgdev->vbufs, vbuf); |
dc5698e8 DA |
182 | } |
183 | ||
184 | static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) | |
185 | { | |
186 | struct virtio_gpu_vbuffer *vbuf; | |
187 | unsigned int len; | |
188 | int freed = 0; | |
189 | ||
190 | while ((vbuf = virtqueue_get_buf(vq, &len))) { | |
191 | list_add_tail(&vbuf->list, reclaim_list); | |
192 | freed++; | |
193 | } | |
194 | if (freed == 0) | |
195 | DRM_DEBUG("Huh? zero vbufs reclaimed"); | |
196 | } | |
197 | ||
198 | void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) | |
199 | { | |
200 | struct virtio_gpu_device *vgdev = | |
201 | container_of(work, struct virtio_gpu_device, | |
202 | ctrlq.dequeue_work); | |
203 | struct list_head reclaim_list; | |
204 | struct virtio_gpu_vbuffer *entry, *tmp; | |
205 | struct virtio_gpu_ctrl_hdr *resp; | |
206 | u64 fence_id = 0; | |
207 | ||
208 | INIT_LIST_HEAD(&reclaim_list); | |
209 | spin_lock(&vgdev->ctrlq.qlock); | |
210 | do { | |
211 | virtqueue_disable_cb(vgdev->ctrlq.vq); | |
212 | reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list); | |
213 | ||
214 | } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); | |
215 | spin_unlock(&vgdev->ctrlq.qlock); | |
216 | ||
da758d51 | 217 | list_for_each_entry(entry, &reclaim_list, list) { |
dc5698e8 | 218 | resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; |
5daf8857 CW |
219 | |
220 | trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); | |
221 | ||
3630c2a2 GH |
222 | if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { |
223 | if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) { | |
224 | struct virtio_gpu_ctrl_hdr *cmd; | |
145cbefc | 225 | cmd = virtio_gpu_vbuf_ctrl_hdr(entry); |
e46e31cf GH |
226 | DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n", |
227 | le32_to_cpu(resp->type), | |
228 | le32_to_cpu(cmd->type)); | |
3630c2a2 GH |
229 | } else |
230 | DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type)); | |
231 | } | |
dc5698e8 DA |
232 | if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) { |
233 | u64 f = le64_to_cpu(resp->fence_id); | |
234 | ||
235 | if (fence_id > f) { | |
236 | DRM_ERROR("%s: Oops: fence %llx -> %llx\n", | |
237 | __func__, fence_id, f); | |
238 | } else { | |
239 | fence_id = f; | |
240 | } | |
241 | } | |
242 | if (entry->resp_cb) | |
243 | entry->resp_cb(vgdev, entry); | |
dc5698e8 DA |
244 | } |
245 | wake_up(&vgdev->ctrlq.ack_queue); | |
246 | ||
247 | if (fence_id) | |
248 | virtio_gpu_fence_event_process(vgdev, fence_id); | |
da758d51 GH |
249 | |
250 | list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { | |
251 | if (entry->objs) | |
f0c6cef7 | 252 | virtio_gpu_array_put_free_delayed(vgdev, entry->objs); |
da758d51 GH |
253 | list_del(&entry->list); |
254 | free_vbuf(vgdev, entry); | |
255 | } | |
dc5698e8 DA |
256 | } |
257 | ||
258 | void virtio_gpu_dequeue_cursor_func(struct work_struct *work) | |
259 | { | |
260 | struct virtio_gpu_device *vgdev = | |
261 | container_of(work, struct virtio_gpu_device, | |
262 | cursorq.dequeue_work); | |
263 | struct list_head reclaim_list; | |
264 | struct virtio_gpu_vbuffer *entry, *tmp; | |
265 | ||
266 | INIT_LIST_HEAD(&reclaim_list); | |
267 | spin_lock(&vgdev->cursorq.qlock); | |
268 | do { | |
269 | virtqueue_disable_cb(vgdev->cursorq.vq); | |
270 | reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list); | |
271 | } while (!virtqueue_enable_cb(vgdev->cursorq.vq)); | |
272 | spin_unlock(&vgdev->cursorq.qlock); | |
273 | ||
274 | list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { | |
275 | list_del(&entry->list); | |
276 | free_vbuf(vgdev, entry); | |
277 | } | |
278 | wake_up(&vgdev->cursorq.ack_queue); | |
279 | } | |
280 | ||
e1218b8c DR |
281 | /* Create sg_table from a vmalloc'd buffer. */ |
282 | static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) | |
283 | { | |
284 | int ret, s, i; | |
285 | struct sg_table *sgt; | |
286 | struct scatterlist *sg; | |
287 | struct page *pg; | |
288 | ||
289 | if (WARN_ON(!PAGE_ALIGNED(data))) | |
290 | return NULL; | |
291 | ||
292 | sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); | |
293 | if (!sgt) | |
294 | return NULL; | |
295 | ||
296 | *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE); | |
297 | ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL); | |
298 | if (ret) { | |
299 | kfree(sgt); | |
300 | return NULL; | |
301 | } | |
302 | ||
303 | for_each_sg(sgt->sgl, sg, *sg_ents, i) { | |
304 | pg = vmalloc_to_page(data); | |
305 | if (!pg) { | |
306 | sg_free_table(sgt); | |
307 | kfree(sgt); | |
308 | return NULL; | |
309 | } | |
310 | ||
311 | s = min_t(int, PAGE_SIZE, size); | |
312 | sg_set_page(sg, pg, s, 0); | |
313 | ||
314 | size -= s; | |
315 | data += s; | |
316 | } | |
317 | ||
318 | return sgt; | |
319 | } | |
320 | ||
744583ec | 321 | static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, |
e1218b8c DR |
322 | struct virtio_gpu_vbuffer *vbuf, |
323 | struct scatterlist *vout) | |
dc5698e8 DA |
324 | { |
325 | struct virtqueue *vq = vgdev->ctrlq.vq; | |
e1218b8c | 326 | struct scatterlist *sgs[3], vcmd, vresp; |
dc5698e8 | 327 | int outcnt = 0, incnt = 0; |
744583ec | 328 | bool notify = false; |
dc5698e8 DA |
329 | int ret; |
330 | ||
331 | if (!vgdev->vqs_ready) | |
744583ec | 332 | return notify; |
dc5698e8 DA |
333 | |
334 | sg_init_one(&vcmd, vbuf->buf, vbuf->size); | |
dbe37dc3 | 335 | sgs[outcnt + incnt] = &vcmd; |
dc5698e8 DA |
336 | outcnt++; |
337 | ||
e1218b8c DR |
338 | if (vout) { |
339 | sgs[outcnt + incnt] = vout; | |
dc5698e8 DA |
340 | outcnt++; |
341 | } | |
342 | ||
343 | if (vbuf->resp_size) { | |
344 | sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); | |
345 | sgs[outcnt + incnt] = &vresp; | |
346 | incnt++; | |
347 | } | |
348 | ||
dc5698e8 | 349 | ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); |
1425a4ce CW |
350 | WARN_ON(ret); |
351 | ||
145cbefc | 352 | trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf)); |
1425a4ce CW |
353 | |
354 | notify = virtqueue_kick_prepare(vq); | |
355 | ||
744583ec | 356 | return notify; |
dc5698e8 DA |
357 | } |
358 | ||
4100a7b8 GH |
359 | static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, |
360 | struct virtio_gpu_vbuffer *vbuf, | |
4100a7b8 | 361 | struct virtio_gpu_fence *fence) |
ec2f0577 GH |
362 | { |
363 | struct virtqueue *vq = vgdev->ctrlq.vq; | |
e1218b8c DR |
364 | struct scatterlist *vout = NULL, sg; |
365 | struct sg_table *sgt = NULL; | |
744583ec | 366 | bool notify; |
e1218b8c DR |
367 | int outcnt = 0; |
368 | ||
369 | if (vbuf->data_size) { | |
370 | if (is_vmalloc_addr(vbuf->data_buf)) { | |
371 | sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, | |
372 | &outcnt); | |
4d8d4869 CW |
373 | if (!sgt) { |
374 | if (fence && vbuf->objs) | |
375 | virtio_gpu_array_unlock_resv(vbuf->objs); | |
de235805 | 376 | return; |
4d8d4869 | 377 | } |
e1218b8c DR |
378 | vout = sgt->sgl; |
379 | } else { | |
380 | sg_init_one(&sg, vbuf->data_buf, vbuf->data_size); | |
381 | vout = &sg; | |
382 | outcnt = 1; | |
383 | } | |
384 | } | |
ec2f0577 GH |
385 | |
386 | again: | |
387 | spin_lock(&vgdev->ctrlq.qlock); | |
388 | ||
389 | /* | |
390 | * Make sure we have enouth space in the virtqueue. If not | |
391 | * wait here until we have. | |
392 | * | |
393 | * Without that virtio_gpu_queue_ctrl_buffer_nolock might have | |
394 | * to wait for free space, which can result in fence ids being | |
395 | * submitted out-of-order. | |
396 | */ | |
e1218b8c | 397 | if (vq->num_free < 2 + outcnt) { |
ec2f0577 | 398 | spin_unlock(&vgdev->ctrlq.qlock); |
58547d7d CW |
399 | wait_event(vgdev->ctrlq.ack_queue, |
400 | vq->num_free >= 2 + outcnt); | |
ec2f0577 GH |
401 | goto again; |
402 | } | |
403 | ||
e19d3411 CW |
404 | if (fence) { |
405 | virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf), | |
406 | fence); | |
84f6fec4 GH |
407 | if (vbuf->objs) { |
408 | virtio_gpu_array_add_fence(vbuf->objs, &fence->f); | |
409 | virtio_gpu_array_unlock_resv(vbuf->objs); | |
410 | } | |
da758d51 | 411 | } |
e1218b8c | 412 | notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout); |
ec2f0577 | 413 | spin_unlock(&vgdev->ctrlq.qlock); |
7082e7a4 GH |
414 | if (notify) { |
415 | if (vgdev->disable_notify) | |
416 | vgdev->pending_notify = true; | |
417 | else | |
418 | virtqueue_notify(vgdev->ctrlq.vq); | |
419 | } | |
e1218b8c DR |
420 | |
421 | if (sgt) { | |
422 | sg_free_table(sgt); | |
423 | kfree(sgt); | |
424 | } | |
ec2f0577 GH |
425 | } |
426 | ||
7082e7a4 GH |
427 | void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev) |
428 | { | |
429 | vgdev->disable_notify = true; | |
430 | } | |
431 | ||
432 | void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev) | |
433 | { | |
434 | vgdev->disable_notify = false; | |
435 | ||
436 | if (!vgdev->pending_notify) | |
437 | return; | |
438 | vgdev->pending_notify = false; | |
439 | virtqueue_notify(vgdev->ctrlq.vq); | |
440 | } | |
441 | ||
32d6c2c5 DR |
442 | static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, |
443 | struct virtio_gpu_vbuffer *vbuf) | |
444 | { | |
e19d3411 | 445 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL); |
32d6c2c5 DR |
446 | } |
447 | ||
4100a7b8 GH |
448 | static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, |
449 | struct virtio_gpu_vbuffer *vbuf) | |
dc5698e8 DA |
450 | { |
451 | struct virtqueue *vq = vgdev->cursorq.vq; | |
452 | struct scatterlist *sgs[1], ccmd; | |
744583ec | 453 | bool notify; |
dc5698e8 DA |
454 | int ret; |
455 | int outcnt; | |
456 | ||
457 | if (!vgdev->vqs_ready) | |
4100a7b8 | 458 | return; |
dc5698e8 DA |
459 | |
460 | sg_init_one(&ccmd, vbuf->buf, vbuf->size); | |
461 | sgs[0] = &ccmd; | |
462 | outcnt = 1; | |
463 | ||
464 | spin_lock(&vgdev->cursorq.qlock); | |
465 | retry: | |
466 | ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); | |
467 | if (ret == -ENOSPC) { | |
468 | spin_unlock(&vgdev->cursorq.qlock); | |
d02d2700 | 469 | wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt); |
dc5698e8 DA |
470 | spin_lock(&vgdev->cursorq.qlock); |
471 | goto retry; | |
472 | } else { | |
5daf8857 | 473 | trace_virtio_gpu_cmd_queue(vq, |
145cbefc | 474 | virtio_gpu_vbuf_ctrl_hdr(vbuf)); |
5daf8857 | 475 | |
744583ec | 476 | notify = virtqueue_kick_prepare(vq); |
dc5698e8 DA |
477 | } |
478 | ||
479 | spin_unlock(&vgdev->cursorq.qlock); | |
744583ec GH |
480 | |
481 | if (notify) | |
482 | virtqueue_notify(vq); | |
dc5698e8 DA |
483 | } |
484 | ||
485 | /* just create gem objects for userspace and long lived objects, | |
5d883850 RS |
486 | * just use dma_alloced pages for the queue objects? |
487 | */ | |
dc5698e8 DA |
488 | |
489 | /* create a basic resource */ | |
490 | void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, | |
23c897d7 | 491 | struct virtio_gpu_object *bo, |
530b2842 | 492 | struct virtio_gpu_object_params *params, |
e2324300 | 493 | struct virtio_gpu_object_array *objs, |
530b2842 | 494 | struct virtio_gpu_fence *fence) |
dc5698e8 DA |
495 | { |
496 | struct virtio_gpu_resource_create_2d *cmd_p; | |
497 | struct virtio_gpu_vbuffer *vbuf; | |
498 | ||
499 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
500 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
e2324300 | 501 | vbuf->objs = objs; |
dc5698e8 DA |
502 | |
503 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); | |
724cfdfd | 504 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
f9659329 GH |
505 | cmd_p->format = cpu_to_le32(params->format); |
506 | cmd_p->width = cpu_to_le32(params->width); | |
507 | cmd_p->height = cpu_to_le32(params->height); | |
dc5698e8 | 508 | |
e19d3411 | 509 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
23c897d7 | 510 | bo->created = true; |
dc5698e8 DA |
511 | } |
512 | ||
513 | void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, | |
514 | uint32_t resource_id) | |
515 | { | |
516 | struct virtio_gpu_resource_unref *cmd_p; | |
517 | struct virtio_gpu_vbuffer *vbuf; | |
518 | ||
519 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
520 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
521 | ||
522 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); | |
523 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
524 | ||
525 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
526 | } | |
527 | ||
b3f13ec9 | 528 | static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, |
a3b815f0 | 529 | uint32_t resource_id, |
4d55fd66 | 530 | struct virtio_gpu_fence *fence) |
dc5698e8 DA |
531 | { |
532 | struct virtio_gpu_resource_detach_backing *cmd_p; | |
533 | struct virtio_gpu_vbuffer *vbuf; | |
534 | ||
535 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
536 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
537 | ||
538 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING); | |
539 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
540 | ||
e19d3411 | 541 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
dc5698e8 DA |
542 | } |
543 | ||
544 | void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, | |
545 | uint32_t scanout_id, uint32_t resource_id, | |
546 | uint32_t width, uint32_t height, | |
547 | uint32_t x, uint32_t y) | |
548 | { | |
549 | struct virtio_gpu_set_scanout *cmd_p; | |
550 | struct virtio_gpu_vbuffer *vbuf; | |
551 | ||
552 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
553 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
554 | ||
555 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT); | |
556 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
557 | cmd_p->scanout_id = cpu_to_le32(scanout_id); | |
558 | cmd_p->r.width = cpu_to_le32(width); | |
559 | cmd_p->r.height = cpu_to_le32(height); | |
560 | cmd_p->r.x = cpu_to_le32(x); | |
561 | cmd_p->r.y = cpu_to_le32(y); | |
562 | ||
563 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
564 | } | |
565 | ||
566 | void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, | |
567 | uint32_t resource_id, | |
568 | uint32_t x, uint32_t y, | |
569 | uint32_t width, uint32_t height) | |
570 | { | |
571 | struct virtio_gpu_resource_flush *cmd_p; | |
572 | struct virtio_gpu_vbuffer *vbuf; | |
573 | ||
574 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
575 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
576 | ||
577 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); | |
578 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
579 | cmd_p->r.width = cpu_to_le32(width); | |
580 | cmd_p->r.height = cpu_to_le32(height); | |
581 | cmd_p->r.x = cpu_to_le32(x); | |
582 | cmd_p->r.y = cpu_to_le32(y); | |
583 | ||
584 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
585 | } | |
586 | ||
587 | void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, | |
af334c5d | 588 | uint64_t offset, |
64f1cc99 GH |
589 | uint32_t width, uint32_t height, |
590 | uint32_t x, uint32_t y, | |
3d3bdbc0 | 591 | struct virtio_gpu_object_array *objs, |
4d55fd66 | 592 | struct virtio_gpu_fence *fence) |
dc5698e8 | 593 | { |
3d3bdbc0 | 594 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
dc5698e8 DA |
595 | struct virtio_gpu_transfer_to_host_2d *cmd_p; |
596 | struct virtio_gpu_vbuffer *vbuf; | |
8f44ca22 JA |
597 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
598 | ||
599 | if (use_dma_api) | |
600 | dma_sync_sg_for_device(vgdev->vdev->dev.parent, | |
af334c5d | 601 | bo->pages->sgl, bo->pages->nents, |
8f44ca22 | 602 | DMA_TO_DEVICE); |
dc5698e8 DA |
603 | |
604 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
605 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
3d3bdbc0 | 606 | vbuf->objs = objs; |
dc5698e8 DA |
607 | |
608 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); | |
af334c5d | 609 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
dc5698e8 | 610 | cmd_p->offset = cpu_to_le64(offset); |
64f1cc99 GH |
611 | cmd_p->r.width = cpu_to_le32(width); |
612 | cmd_p->r.height = cpu_to_le32(height); | |
613 | cmd_p->r.x = cpu_to_le32(x); | |
614 | cmd_p->r.y = cpu_to_le32(y); | |
dc5698e8 | 615 | |
e19d3411 | 616 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
dc5698e8 DA |
617 | } |
618 | ||
619 | static void | |
620 | virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, | |
621 | uint32_t resource_id, | |
622 | struct virtio_gpu_mem_entry *ents, | |
623 | uint32_t nents, | |
4d55fd66 | 624 | struct virtio_gpu_fence *fence) |
dc5698e8 DA |
625 | { |
626 | struct virtio_gpu_resource_attach_backing *cmd_p; | |
627 | struct virtio_gpu_vbuffer *vbuf; | |
628 | ||
629 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
630 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
631 | ||
632 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING); | |
633 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
634 | cmd_p->nr_entries = cpu_to_le32(nents); | |
635 | ||
636 | vbuf->data_buf = ents; | |
637 | vbuf->data_size = sizeof(*ents) * nents; | |
638 | ||
e19d3411 | 639 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
dc5698e8 DA |
640 | } |
641 | ||
642 | static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, | |
643 | struct virtio_gpu_vbuffer *vbuf) | |
644 | { | |
645 | struct virtio_gpu_resp_display_info *resp = | |
646 | (struct virtio_gpu_resp_display_info *)vbuf->resp_buf; | |
647 | int i; | |
648 | ||
649 | spin_lock(&vgdev->display_info_lock); | |
650 | for (i = 0; i < vgdev->num_scanouts; i++) { | |
651 | vgdev->outputs[i].info = resp->pmodes[i]; | |
652 | if (resp->pmodes[i].enabled) { | |
653 | DRM_DEBUG("output %d: %dx%d+%d+%d", i, | |
654 | le32_to_cpu(resp->pmodes[i].r.width), | |
655 | le32_to_cpu(resp->pmodes[i].r.height), | |
656 | le32_to_cpu(resp->pmodes[i].r.x), | |
657 | le32_to_cpu(resp->pmodes[i].r.y)); | |
658 | } else { | |
659 | DRM_DEBUG("output %d: disabled", i); | |
660 | } | |
661 | } | |
662 | ||
441012af | 663 | vgdev->display_info_pending = false; |
dc5698e8 DA |
664 | spin_unlock(&vgdev->display_info_lock); |
665 | wake_up(&vgdev->resp_wq); | |
666 | ||
667 | if (!drm_helper_hpd_irq_event(vgdev->ddev)) | |
668 | drm_kms_helper_hotplug_event(vgdev->ddev); | |
669 | } | |
670 | ||
62fb7a5e GH |
671 | static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, |
672 | struct virtio_gpu_vbuffer *vbuf) | |
673 | { | |
674 | struct virtio_gpu_get_capset_info *cmd = | |
675 | (struct virtio_gpu_get_capset_info *)vbuf->buf; | |
676 | struct virtio_gpu_resp_capset_info *resp = | |
677 | (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf; | |
678 | int i = le32_to_cpu(cmd->capset_index); | |
679 | ||
680 | spin_lock(&vgdev->display_info_lock); | |
681 | vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); | |
682 | vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); | |
683 | vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); | |
684 | spin_unlock(&vgdev->display_info_lock); | |
685 | wake_up(&vgdev->resp_wq); | |
686 | } | |
687 | ||
688 | static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, | |
689 | struct virtio_gpu_vbuffer *vbuf) | |
690 | { | |
691 | struct virtio_gpu_get_capset *cmd = | |
692 | (struct virtio_gpu_get_capset *)vbuf->buf; | |
693 | struct virtio_gpu_resp_capset *resp = | |
694 | (struct virtio_gpu_resp_capset *)vbuf->resp_buf; | |
695 | struct virtio_gpu_drv_cap_cache *cache_ent; | |
696 | ||
697 | spin_lock(&vgdev->display_info_lock); | |
698 | list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { | |
699 | if (cache_ent->version == le32_to_cpu(cmd->capset_version) && | |
700 | cache_ent->id == le32_to_cpu(cmd->capset_id)) { | |
701 | memcpy(cache_ent->caps_cache, resp->capset_data, | |
702 | cache_ent->size); | |
9ff3a5c8 DR |
703 | /* Copy must occur before is_valid is signalled. */ |
704 | smp_wmb(); | |
62fb7a5e GH |
705 | atomic_set(&cache_ent->is_valid, 1); |
706 | break; | |
707 | } | |
708 | } | |
709 | spin_unlock(&vgdev->display_info_lock); | |
676a905b | 710 | wake_up_all(&vgdev->resp_wq); |
62fb7a5e GH |
711 | } |
712 | ||
b4b01b49 GH |
713 | static int virtio_get_edid_block(void *data, u8 *buf, |
714 | unsigned int block, size_t len) | |
715 | { | |
716 | struct virtio_gpu_resp_edid *resp = data; | |
717 | size_t start = block * EDID_LENGTH; | |
718 | ||
719 | if (start + len > le32_to_cpu(resp->size)) | |
720 | return -1; | |
721 | memcpy(buf, resp->edid + start, len); | |
722 | return 0; | |
723 | } | |
724 | ||
725 | static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev, | |
726 | struct virtio_gpu_vbuffer *vbuf) | |
727 | { | |
728 | struct virtio_gpu_cmd_get_edid *cmd = | |
729 | (struct virtio_gpu_cmd_get_edid *)vbuf->buf; | |
730 | struct virtio_gpu_resp_edid *resp = | |
731 | (struct virtio_gpu_resp_edid *)vbuf->resp_buf; | |
732 | uint32_t scanout = le32_to_cpu(cmd->scanout); | |
733 | struct virtio_gpu_output *output; | |
734 | struct edid *new_edid, *old_edid; | |
735 | ||
736 | if (scanout >= vgdev->num_scanouts) | |
737 | return; | |
738 | output = vgdev->outputs + scanout; | |
739 | ||
740 | new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp); | |
41de4be6 | 741 | drm_connector_update_edid_property(&output->conn, new_edid); |
b4b01b49 GH |
742 | |
743 | spin_lock(&vgdev->display_info_lock); | |
744 | old_edid = output->edid; | |
745 | output->edid = new_edid; | |
b4b01b49 GH |
746 | spin_unlock(&vgdev->display_info_lock); |
747 | ||
748 | kfree(old_edid); | |
749 | wake_up(&vgdev->resp_wq); | |
750 | } | |
751 | ||
dc5698e8 DA |
752 | int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev) |
753 | { | |
754 | struct virtio_gpu_ctrl_hdr *cmd_p; | |
755 | struct virtio_gpu_vbuffer *vbuf; | |
756 | void *resp_buf; | |
757 | ||
758 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info), | |
759 | GFP_KERNEL); | |
760 | if (!resp_buf) | |
761 | return -ENOMEM; | |
762 | ||
763 | cmd_p = virtio_gpu_alloc_cmd_resp | |
764 | (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf, | |
765 | sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info), | |
766 | resp_buf); | |
767 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
768 | ||
441012af | 769 | vgdev->display_info_pending = true; |
dc5698e8 DA |
770 | cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO); |
771 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
772 | return 0; | |
773 | } | |
774 | ||
62fb7a5e GH |
775 | int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx) |
776 | { | |
777 | struct virtio_gpu_get_capset_info *cmd_p; | |
778 | struct virtio_gpu_vbuffer *vbuf; | |
779 | void *resp_buf; | |
780 | ||
781 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info), | |
782 | GFP_KERNEL); | |
783 | if (!resp_buf) | |
784 | return -ENOMEM; | |
785 | ||
786 | cmd_p = virtio_gpu_alloc_cmd_resp | |
787 | (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf, | |
788 | sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info), | |
789 | resp_buf); | |
790 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
791 | ||
792 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO); | |
793 | cmd_p->capset_index = cpu_to_le32(idx); | |
794 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
795 | return 0; | |
796 | } | |
797 | ||
798 | int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, | |
799 | int idx, int version, | |
800 | struct virtio_gpu_drv_cap_cache **cache_p) | |
801 | { | |
802 | struct virtio_gpu_get_capset *cmd_p; | |
803 | struct virtio_gpu_vbuffer *vbuf; | |
09c4b494 | 804 | int max_size; |
62fb7a5e | 805 | struct virtio_gpu_drv_cap_cache *cache_ent; |
5934ce99 | 806 | struct virtio_gpu_drv_cap_cache *search_ent; |
62fb7a5e GH |
807 | void *resp_buf; |
808 | ||
5934ce99 DR |
809 | *cache_p = NULL; |
810 | ||
09c4b494 | 811 | if (idx >= vgdev->num_capsets) |
62fb7a5e GH |
812 | return -EINVAL; |
813 | ||
814 | if (version > vgdev->capsets[idx].max_version) | |
815 | return -EINVAL; | |
816 | ||
817 | cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL); | |
818 | if (!cache_ent) | |
819 | return -ENOMEM; | |
820 | ||
09c4b494 | 821 | max_size = vgdev->capsets[idx].max_size; |
62fb7a5e GH |
822 | cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL); |
823 | if (!cache_ent->caps_cache) { | |
824 | kfree(cache_ent); | |
825 | return -ENOMEM; | |
826 | } | |
827 | ||
828 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size, | |
829 | GFP_KERNEL); | |
830 | if (!resp_buf) { | |
831 | kfree(cache_ent->caps_cache); | |
832 | kfree(cache_ent); | |
833 | return -ENOMEM; | |
834 | } | |
835 | ||
836 | cache_ent->version = version; | |
837 | cache_ent->id = vgdev->capsets[idx].id; | |
838 | atomic_set(&cache_ent->is_valid, 0); | |
839 | cache_ent->size = max_size; | |
840 | spin_lock(&vgdev->display_info_lock); | |
5934ce99 DR |
841 | /* Search while under lock in case it was added by another task. */ |
842 | list_for_each_entry(search_ent, &vgdev->cap_cache, head) { | |
843 | if (search_ent->id == vgdev->capsets[idx].id && | |
844 | search_ent->version == version) { | |
845 | *cache_p = search_ent; | |
846 | break; | |
847 | } | |
848 | } | |
849 | if (!*cache_p) | |
850 | list_add_tail(&cache_ent->head, &vgdev->cap_cache); | |
62fb7a5e GH |
851 | spin_unlock(&vgdev->display_info_lock); |
852 | ||
5934ce99 DR |
853 | if (*cache_p) { |
854 | /* Entry was found, so free everything that was just created. */ | |
855 | kfree(resp_buf); | |
856 | kfree(cache_ent->caps_cache); | |
857 | kfree(cache_ent); | |
858 | return 0; | |
859 | } | |
860 | ||
62fb7a5e GH |
861 | cmd_p = virtio_gpu_alloc_cmd_resp |
862 | (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p), | |
863 | sizeof(struct virtio_gpu_resp_capset) + max_size, | |
864 | resp_buf); | |
865 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET); | |
866 | cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id); | |
867 | cmd_p->capset_version = cpu_to_le32(version); | |
868 | *cache_p = cache_ent; | |
869 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
870 | ||
871 | return 0; | |
872 | } | |
873 | ||
b4b01b49 GH |
874 | int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev) |
875 | { | |
876 | struct virtio_gpu_cmd_get_edid *cmd_p; | |
877 | struct virtio_gpu_vbuffer *vbuf; | |
878 | void *resp_buf; | |
879 | int scanout; | |
880 | ||
881 | if (WARN_ON(!vgdev->has_edid)) | |
882 | return -EINVAL; | |
883 | ||
884 | for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) { | |
885 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid), | |
886 | GFP_KERNEL); | |
887 | if (!resp_buf) | |
888 | return -ENOMEM; | |
889 | ||
890 | cmd_p = virtio_gpu_alloc_cmd_resp | |
891 | (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf, | |
892 | sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid), | |
893 | resp_buf); | |
894 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID); | |
895 | cmd_p->scanout = cpu_to_le32(scanout); | |
896 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
897 | } | |
898 | ||
899 | return 0; | |
900 | } | |
901 | ||
62fb7a5e GH |
902 | void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, |
903 | uint32_t nlen, const char *name) | |
904 | { | |
905 | struct virtio_gpu_ctx_create *cmd_p; | |
906 | struct virtio_gpu_vbuffer *vbuf; | |
907 | ||
908 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
909 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
910 | ||
911 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE); | |
912 | cmd_p->hdr.ctx_id = cpu_to_le32(id); | |
913 | cmd_p->nlen = cpu_to_le32(nlen); | |
dbe37dc3 RS |
914 | strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1); |
915 | cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0; | |
62fb7a5e GH |
916 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
917 | } | |
918 | ||
919 | void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, | |
920 | uint32_t id) | |
921 | { | |
922 | struct virtio_gpu_ctx_destroy *cmd_p; | |
923 | struct virtio_gpu_vbuffer *vbuf; | |
924 | ||
925 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
926 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
927 | ||
928 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY); | |
929 | cmd_p->hdr.ctx_id = cpu_to_le32(id); | |
930 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
931 | } | |
932 | ||
933 | void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, | |
934 | uint32_t ctx_id, | |
93c38d15 | 935 | struct virtio_gpu_object_array *objs) |
62fb7a5e | 936 | { |
93c38d15 | 937 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
62fb7a5e GH |
938 | struct virtio_gpu_ctx_resource *cmd_p; |
939 | struct virtio_gpu_vbuffer *vbuf; | |
940 | ||
941 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
942 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
93c38d15 | 943 | vbuf->objs = objs; |
62fb7a5e GH |
944 | |
945 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); | |
946 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
93c38d15 | 947 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
62fb7a5e GH |
948 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
949 | ||
950 | } | |
951 | ||
952 | void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, | |
953 | uint32_t ctx_id, | |
93c38d15 | 954 | struct virtio_gpu_object_array *objs) |
62fb7a5e | 955 | { |
93c38d15 | 956 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
62fb7a5e GH |
957 | struct virtio_gpu_ctx_resource *cmd_p; |
958 | struct virtio_gpu_vbuffer *vbuf; | |
959 | ||
960 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
961 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
93c38d15 | 962 | vbuf->objs = objs; |
62fb7a5e GH |
963 | |
964 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); | |
965 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
93c38d15 | 966 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
62fb7a5e GH |
967 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
968 | } | |
969 | ||
970 | void | |
971 | virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, | |
23c897d7 | 972 | struct virtio_gpu_object *bo, |
530b2842 | 973 | struct virtio_gpu_object_params *params, |
e2324300 | 974 | struct virtio_gpu_object_array *objs, |
530b2842 | 975 | struct virtio_gpu_fence *fence) |
62fb7a5e GH |
976 | { |
977 | struct virtio_gpu_resource_create_3d *cmd_p; | |
978 | struct virtio_gpu_vbuffer *vbuf; | |
979 | ||
980 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
981 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
e2324300 | 982 | vbuf->objs = objs; |
62fb7a5e | 983 | |
62fb7a5e | 984 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); |
fd4d6a42 GH |
985 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
986 | cmd_p->format = cpu_to_le32(params->format); | |
987 | cmd_p->width = cpu_to_le32(params->width); | |
988 | cmd_p->height = cpu_to_le32(params->height); | |
989 | ||
990 | cmd_p->target = cpu_to_le32(params->target); | |
991 | cmd_p->bind = cpu_to_le32(params->bind); | |
992 | cmd_p->depth = cpu_to_le32(params->depth); | |
993 | cmd_p->array_size = cpu_to_le32(params->array_size); | |
994 | cmd_p->last_level = cpu_to_le32(params->last_level); | |
995 | cmd_p->nr_samples = cpu_to_le32(params->nr_samples); | |
996 | cmd_p->flags = cpu_to_le32(params->flags); | |
62fb7a5e | 997 | |
e19d3411 | 998 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
23c897d7 | 999 | bo->created = true; |
62fb7a5e GH |
1000 | } |
1001 | ||
1002 | void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, | |
af334c5d | 1003 | uint32_t ctx_id, |
62fb7a5e | 1004 | uint64_t offset, uint32_t level, |
1dc34852 | 1005 | struct drm_virtgpu_3d_box *box, |
3d3bdbc0 | 1006 | struct virtio_gpu_object_array *objs, |
4d55fd66 | 1007 | struct virtio_gpu_fence *fence) |
62fb7a5e | 1008 | { |
3d3bdbc0 | 1009 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
62fb7a5e GH |
1010 | struct virtio_gpu_transfer_host_3d *cmd_p; |
1011 | struct virtio_gpu_vbuffer *vbuf; | |
8f44ca22 JA |
1012 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
1013 | ||
1014 | if (use_dma_api) | |
1015 | dma_sync_sg_for_device(vgdev->vdev->dev.parent, | |
af334c5d | 1016 | bo->pages->sgl, bo->pages->nents, |
8f44ca22 | 1017 | DMA_TO_DEVICE); |
62fb7a5e GH |
1018 | |
1019 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
1020 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
1021 | ||
3d3bdbc0 GH |
1022 | vbuf->objs = objs; |
1023 | ||
62fb7a5e GH |
1024 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); |
1025 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
af334c5d | 1026 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
1dc34852 | 1027 | convert_to_hw_box(&cmd_p->box, box); |
62fb7a5e GH |
1028 | cmd_p->offset = cpu_to_le64(offset); |
1029 | cmd_p->level = cpu_to_le32(level); | |
1030 | ||
e19d3411 | 1031 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
62fb7a5e GH |
1032 | } |
1033 | ||
1034 | void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, | |
375f156a | 1035 | uint32_t ctx_id, |
62fb7a5e | 1036 | uint64_t offset, uint32_t level, |
1dc34852 | 1037 | struct drm_virtgpu_3d_box *box, |
375f156a | 1038 | struct virtio_gpu_object_array *objs, |
4d55fd66 | 1039 | struct virtio_gpu_fence *fence) |
62fb7a5e | 1040 | { |
375f156a | 1041 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
62fb7a5e GH |
1042 | struct virtio_gpu_transfer_host_3d *cmd_p; |
1043 | struct virtio_gpu_vbuffer *vbuf; | |
1044 | ||
1045 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
1046 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
1047 | ||
375f156a GH |
1048 | vbuf->objs = objs; |
1049 | ||
62fb7a5e GH |
1050 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); |
1051 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
375f156a | 1052 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
1dc34852 | 1053 | convert_to_hw_box(&cmd_p->box, box); |
62fb7a5e GH |
1054 | cmd_p->offset = cpu_to_le64(offset); |
1055 | cmd_p->level = cpu_to_le32(level); | |
1056 | ||
e19d3411 | 1057 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
62fb7a5e GH |
1058 | } |
1059 | ||
1060 | void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, | |
1061 | void *data, uint32_t data_size, | |
da758d51 GH |
1062 | uint32_t ctx_id, |
1063 | struct virtio_gpu_object_array *objs, | |
1064 | struct virtio_gpu_fence *fence) | |
62fb7a5e GH |
1065 | { |
1066 | struct virtio_gpu_cmd_submit *cmd_p; | |
1067 | struct virtio_gpu_vbuffer *vbuf; | |
1068 | ||
1069 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
1070 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
1071 | ||
1072 | vbuf->data_buf = data; | |
1073 | vbuf->data_size = data_size; | |
da758d51 | 1074 | vbuf->objs = objs; |
62fb7a5e GH |
1075 | |
1076 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); | |
1077 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
1078 | cmd_p->size = cpu_to_le32(data_size); | |
1079 | ||
e19d3411 | 1080 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
62fb7a5e GH |
1081 | } |
1082 | ||
dc5698e8 DA |
1083 | int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, |
1084 | struct virtio_gpu_object *obj, | |
4d55fd66 | 1085 | struct virtio_gpu_fence *fence) |
dc5698e8 | 1086 | { |
a3b815f0 | 1087 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
dc5698e8 DA |
1088 | struct virtio_gpu_mem_entry *ents; |
1089 | struct scatterlist *sg; | |
c66df701 | 1090 | int si, nents, ret; |
dc5698e8 | 1091 | |
530b2842 GH |
1092 | if (WARN_ON_ONCE(!obj->created)) |
1093 | return -EINVAL; | |
c66df701 GH |
1094 | if (WARN_ON_ONCE(obj->pages)) |
1095 | return -EINVAL; | |
23c897d7 | 1096 | |
c66df701 GH |
1097 | ret = drm_gem_shmem_pin(&obj->base.base); |
1098 | if (ret < 0) | |
1099 | return -EINVAL; | |
9d492b6b | 1100 | |
c66df701 GH |
1101 | obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base); |
1102 | if (obj->pages == NULL) { | |
1103 | drm_gem_shmem_unpin(&obj->base.base); | |
1104 | return -EINVAL; | |
dc5698e8 DA |
1105 | } |
1106 | ||
a3b815f0 GH |
1107 | if (use_dma_api) { |
1108 | obj->mapped = dma_map_sg(vgdev->vdev->dev.parent, | |
1109 | obj->pages->sgl, obj->pages->nents, | |
1110 | DMA_TO_DEVICE); | |
1111 | nents = obj->mapped; | |
1112 | } else { | |
1113 | nents = obj->pages->nents; | |
1114 | } | |
1115 | ||
dc5698e8 | 1116 | /* gets freed when the ring has consumed it */ |
a3b815f0 | 1117 | ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry), |
dc5698e8 DA |
1118 | GFP_KERNEL); |
1119 | if (!ents) { | |
1120 | DRM_ERROR("failed to allocate ent list\n"); | |
1121 | return -ENOMEM; | |
1122 | } | |
1123 | ||
a3b815f0 GH |
1124 | for_each_sg(obj->pages->sgl, sg, nents, si) { |
1125 | ents[si].addr = cpu_to_le64(use_dma_api | |
1126 | ? sg_dma_address(sg) | |
1127 | : sg_phys(sg)); | |
dc5698e8 DA |
1128 | ents[si].length = cpu_to_le32(sg->length); |
1129 | ents[si].padding = 0; | |
1130 | } | |
1131 | ||
724cfdfd | 1132 | virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, |
a3b815f0 | 1133 | ents, nents, |
dc5698e8 | 1134 | fence); |
dc5698e8 DA |
1135 | return 0; |
1136 | } | |
1137 | ||
b3f13ec9 GH |
1138 | void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, |
1139 | struct virtio_gpu_object *obj) | |
1140 | { | |
a3b815f0 | 1141 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
a3b815f0 | 1142 | |
c66df701 GH |
1143 | if (WARN_ON_ONCE(!obj->pages)) |
1144 | return; | |
1145 | ||
a3b815f0 | 1146 | if (use_dma_api && obj->mapped) { |
9fdd90c0 | 1147 | struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); |
a3b815f0 | 1148 | /* detach backing and wait for the host process it ... */ |
4d55fd66 | 1149 | virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence); |
a3b815f0 GH |
1150 | dma_fence_wait(&fence->f, true); |
1151 | dma_fence_put(&fence->f); | |
1152 | ||
1153 | /* ... then tear down iommu mappings */ | |
1154 | dma_unmap_sg(vgdev->vdev->dev.parent, | |
1155 | obj->pages->sgl, obj->mapped, | |
1156 | DMA_TO_DEVICE); | |
1157 | obj->mapped = 0; | |
1158 | } else { | |
1159 | virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL); | |
1160 | } | |
c66df701 GH |
1161 | |
1162 | sg_free_table(obj->pages); | |
1163 | obj->pages = NULL; | |
1164 | ||
1165 | drm_gem_shmem_unpin(&obj->base.base); | |
b3f13ec9 GH |
1166 | } |
1167 | ||
dc5698e8 DA |
1168 | void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, |
1169 | struct virtio_gpu_output *output) | |
1170 | { | |
1171 | struct virtio_gpu_vbuffer *vbuf; | |
1172 | struct virtio_gpu_update_cursor *cur_p; | |
1173 | ||
1174 | output->cursor.pos.scanout_id = cpu_to_le32(output->index); | |
1175 | cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf); | |
1176 | memcpy(cur_p, &output->cursor, sizeof(output->cursor)); | |
1177 | virtio_gpu_queue_cursor(vgdev, vbuf); | |
1178 | } |