]>
Commit | Line | Data |
---|---|---|
dc5698e8 DA |
1 | /* |
2 | * Copyright (C) 2015 Red Hat, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Authors: | |
6 | * Dave Airlie <airlied@redhat.com> | |
7 | * Gerd Hoffmann <kraxel@redhat.com> | |
8 | * | |
9 | * Permission is hereby granted, free of charge, to any person obtaining a | |
10 | * copy of this software and associated documentation files (the "Software"), | |
11 | * to deal in the Software without restriction, including without limitation | |
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
13 | * and/or sell copies of the Software, and to permit persons to whom the | |
14 | * Software is furnished to do so, subject to the following conditions: | |
15 | * | |
16 | * The above copyright notice and this permission notice (including the next | |
17 | * paragraph) shall be included in all copies or substantial portions of the | |
18 | * Software. | |
19 | * | |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
23 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
24 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
25 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
26 | * OTHER DEALINGS IN THE SOFTWARE. | |
27 | */ | |
28 | ||
a3d63977 | 29 | #include <linux/dma-mapping.h> |
dc5698e8 DA |
30 | #include <linux/virtio.h> |
31 | #include <linux/virtio_config.h> | |
32 | #include <linux/virtio_ring.h> | |
33 | ||
a3d63977 SR |
34 | #include "virtgpu_drv.h" |
35 | #include "virtgpu_trace.h" | |
36 | ||
dc5698e8 DA |
37 | #define MAX_INLINE_CMD_SIZE 96 |
38 | #define MAX_INLINE_RESP_SIZE 24 | |
39 | #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \ | |
40 | + MAX_INLINE_CMD_SIZE \ | |
41 | + MAX_INLINE_RESP_SIZE) | |
42 | ||
1dc34852 GH |
43 | static void convert_to_hw_box(struct virtio_gpu_box *dst, |
44 | const struct drm_virtgpu_3d_box *src) | |
45 | { | |
46 | dst->x = cpu_to_le32(src->x); | |
47 | dst->y = cpu_to_le32(src->y); | |
48 | dst->z = cpu_to_le32(src->z); | |
49 | dst->w = cpu_to_le32(src->w); | |
50 | dst->h = cpu_to_le32(src->h); | |
51 | dst->d = cpu_to_le32(src->d); | |
52 | } | |
53 | ||
dc5698e8 DA |
54 | void virtio_gpu_ctrl_ack(struct virtqueue *vq) |
55 | { | |
56 | struct drm_device *dev = vq->vdev->priv; | |
57 | struct virtio_gpu_device *vgdev = dev->dev_private; | |
9d492b6b | 58 | |
dc5698e8 DA |
59 | schedule_work(&vgdev->ctrlq.dequeue_work); |
60 | } | |
61 | ||
62 | void virtio_gpu_cursor_ack(struct virtqueue *vq) | |
63 | { | |
64 | struct drm_device *dev = vq->vdev->priv; | |
65 | struct virtio_gpu_device *vgdev = dev->dev_private; | |
9d492b6b | 66 | |
dc5698e8 DA |
67 | schedule_work(&vgdev->cursorq.dequeue_work); |
68 | } | |
69 | ||
70 | int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev) | |
71 | { | |
f5985bf9 GH |
72 | vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs", |
73 | VBUFFER_SIZE, | |
74 | __alignof__(struct virtio_gpu_vbuffer), | |
75 | 0, NULL); | |
dc5698e8 DA |
76 | if (!vgdev->vbufs) |
77 | return -ENOMEM; | |
dc5698e8 DA |
78 | return 0; |
79 | } | |
80 | ||
81 | void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev) | |
82 | { | |
f5985bf9 GH |
83 | kmem_cache_destroy(vgdev->vbufs); |
84 | vgdev->vbufs = NULL; | |
dc5698e8 DA |
85 | } |
86 | ||
87 | static struct virtio_gpu_vbuffer* | |
88 | virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, | |
89 | int size, int resp_size, void *resp_buf, | |
90 | virtio_gpu_resp_cb resp_cb) | |
91 | { | |
92 | struct virtio_gpu_vbuffer *vbuf; | |
93 | ||
7fea1e0f | 94 | vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL); |
43c27940 GH |
95 | if (!vbuf) |
96 | return ERR_PTR(-ENOMEM); | |
dc5698e8 | 97 | |
145cbefc CW |
98 | BUG_ON(size > MAX_INLINE_CMD_SIZE || |
99 | size < sizeof(struct virtio_gpu_ctrl_hdr)); | |
dc5698e8 DA |
100 | vbuf->buf = (void *)vbuf + sizeof(*vbuf); |
101 | vbuf->size = size; | |
102 | ||
103 | vbuf->resp_cb = resp_cb; | |
104 | vbuf->resp_size = resp_size; | |
105 | if (resp_size <= MAX_INLINE_RESP_SIZE) | |
106 | vbuf->resp_buf = (void *)vbuf->buf + size; | |
107 | else | |
108 | vbuf->resp_buf = resp_buf; | |
109 | BUG_ON(!vbuf->resp_buf); | |
110 | return vbuf; | |
111 | } | |
112 | ||
145cbefc CW |
113 | static struct virtio_gpu_ctrl_hdr * |
114 | virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf) | |
115 | { | |
116 | /* this assumes a vbuf contains a command that starts with a | |
117 | * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor | |
118 | * virtqueues. | |
119 | */ | |
120 | return (struct virtio_gpu_ctrl_hdr *)vbuf->buf; | |
121 | } | |
122 | ||
dc5698e8 DA |
123 | static struct virtio_gpu_update_cursor* |
124 | virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev, | |
125 | struct virtio_gpu_vbuffer **vbuffer_p) | |
126 | { | |
127 | struct virtio_gpu_vbuffer *vbuf; | |
128 | ||
129 | vbuf = virtio_gpu_get_vbuf | |
130 | (vgdev, sizeof(struct virtio_gpu_update_cursor), | |
131 | 0, NULL, NULL); | |
132 | if (IS_ERR(vbuf)) { | |
133 | *vbuffer_p = NULL; | |
134 | return ERR_CAST(vbuf); | |
135 | } | |
136 | *vbuffer_p = vbuf; | |
137 | return (struct virtio_gpu_update_cursor *)vbuf->buf; | |
138 | } | |
139 | ||
140 | static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, | |
141 | virtio_gpu_resp_cb cb, | |
142 | struct virtio_gpu_vbuffer **vbuffer_p, | |
143 | int cmd_size, int resp_size, | |
144 | void *resp_buf) | |
145 | { | |
146 | struct virtio_gpu_vbuffer *vbuf; | |
147 | ||
148 | vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size, | |
149 | resp_size, resp_buf, cb); | |
150 | if (IS_ERR(vbuf)) { | |
151 | *vbuffer_p = NULL; | |
152 | return ERR_CAST(vbuf); | |
153 | } | |
154 | *vbuffer_p = vbuf; | |
155 | return (struct virtio_gpu_command *)vbuf->buf; | |
156 | } | |
157 | ||
8235eab0 GH |
158 | static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, |
159 | struct virtio_gpu_vbuffer **vbuffer_p, | |
160 | int size) | |
161 | { | |
162 | return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size, | |
163 | sizeof(struct virtio_gpu_ctrl_hdr), | |
164 | NULL); | |
165 | } | |
166 | ||
1ed5f698 GH |
167 | static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev, |
168 | struct virtio_gpu_vbuffer **vbuffer_p, | |
169 | int size, | |
170 | virtio_gpu_resp_cb cb) | |
171 | { | |
172 | return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size, | |
173 | sizeof(struct virtio_gpu_ctrl_hdr), | |
174 | NULL); | |
175 | } | |
176 | ||
dc5698e8 DA |
177 | static void free_vbuf(struct virtio_gpu_device *vgdev, |
178 | struct virtio_gpu_vbuffer *vbuf) | |
179 | { | |
180 | if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) | |
181 | kfree(vbuf->resp_buf); | |
e1218b8c | 182 | kvfree(vbuf->data_buf); |
f5985bf9 | 183 | kmem_cache_free(vgdev->vbufs, vbuf); |
dc5698e8 DA |
184 | } |
185 | ||
186 | static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) | |
187 | { | |
188 | struct virtio_gpu_vbuffer *vbuf; | |
189 | unsigned int len; | |
190 | int freed = 0; | |
191 | ||
192 | while ((vbuf = virtqueue_get_buf(vq, &len))) { | |
193 | list_add_tail(&vbuf->list, reclaim_list); | |
194 | freed++; | |
195 | } | |
196 | if (freed == 0) | |
197 | DRM_DEBUG("Huh? zero vbufs reclaimed"); | |
198 | } | |
199 | ||
200 | void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) | |
201 | { | |
202 | struct virtio_gpu_device *vgdev = | |
203 | container_of(work, struct virtio_gpu_device, | |
204 | ctrlq.dequeue_work); | |
205 | struct list_head reclaim_list; | |
206 | struct virtio_gpu_vbuffer *entry, *tmp; | |
207 | struct virtio_gpu_ctrl_hdr *resp; | |
208 | u64 fence_id = 0; | |
209 | ||
210 | INIT_LIST_HEAD(&reclaim_list); | |
211 | spin_lock(&vgdev->ctrlq.qlock); | |
212 | do { | |
213 | virtqueue_disable_cb(vgdev->ctrlq.vq); | |
214 | reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list); | |
215 | ||
216 | } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); | |
217 | spin_unlock(&vgdev->ctrlq.qlock); | |
218 | ||
da758d51 | 219 | list_for_each_entry(entry, &reclaim_list, list) { |
dc5698e8 | 220 | resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; |
5daf8857 CW |
221 | |
222 | trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); | |
223 | ||
3630c2a2 GH |
224 | if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { |
225 | if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) { | |
226 | struct virtio_gpu_ctrl_hdr *cmd; | |
145cbefc | 227 | cmd = virtio_gpu_vbuf_ctrl_hdr(entry); |
e46e31cf GH |
228 | DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n", |
229 | le32_to_cpu(resp->type), | |
230 | le32_to_cpu(cmd->type)); | |
3630c2a2 GH |
231 | } else |
232 | DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type)); | |
233 | } | |
dc5698e8 DA |
234 | if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) { |
235 | u64 f = le64_to_cpu(resp->fence_id); | |
236 | ||
237 | if (fence_id > f) { | |
238 | DRM_ERROR("%s: Oops: fence %llx -> %llx\n", | |
239 | __func__, fence_id, f); | |
240 | } else { | |
241 | fence_id = f; | |
242 | } | |
243 | } | |
244 | if (entry->resp_cb) | |
245 | entry->resp_cb(vgdev, entry); | |
dc5698e8 DA |
246 | } |
247 | wake_up(&vgdev->ctrlq.ack_queue); | |
248 | ||
249 | if (fence_id) | |
250 | virtio_gpu_fence_event_process(vgdev, fence_id); | |
da758d51 GH |
251 | |
252 | list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { | |
253 | if (entry->objs) | |
f0c6cef7 | 254 | virtio_gpu_array_put_free_delayed(vgdev, entry->objs); |
da758d51 GH |
255 | list_del(&entry->list); |
256 | free_vbuf(vgdev, entry); | |
257 | } | |
dc5698e8 DA |
258 | } |
259 | ||
260 | void virtio_gpu_dequeue_cursor_func(struct work_struct *work) | |
261 | { | |
262 | struct virtio_gpu_device *vgdev = | |
263 | container_of(work, struct virtio_gpu_device, | |
264 | cursorq.dequeue_work); | |
265 | struct list_head reclaim_list; | |
266 | struct virtio_gpu_vbuffer *entry, *tmp; | |
267 | ||
268 | INIT_LIST_HEAD(&reclaim_list); | |
269 | spin_lock(&vgdev->cursorq.qlock); | |
270 | do { | |
271 | virtqueue_disable_cb(vgdev->cursorq.vq); | |
272 | reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list); | |
273 | } while (!virtqueue_enable_cb(vgdev->cursorq.vq)); | |
274 | spin_unlock(&vgdev->cursorq.qlock); | |
275 | ||
276 | list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { | |
277 | list_del(&entry->list); | |
278 | free_vbuf(vgdev, entry); | |
279 | } | |
280 | wake_up(&vgdev->cursorq.ack_queue); | |
281 | } | |
282 | ||
e1218b8c DR |
283 | /* Create sg_table from a vmalloc'd buffer. */ |
284 | static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) | |
285 | { | |
286 | int ret, s, i; | |
287 | struct sg_table *sgt; | |
288 | struct scatterlist *sg; | |
289 | struct page *pg; | |
290 | ||
291 | if (WARN_ON(!PAGE_ALIGNED(data))) | |
292 | return NULL; | |
293 | ||
294 | sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); | |
295 | if (!sgt) | |
296 | return NULL; | |
297 | ||
298 | *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE); | |
299 | ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL); | |
300 | if (ret) { | |
301 | kfree(sgt); | |
302 | return NULL; | |
303 | } | |
304 | ||
305 | for_each_sg(sgt->sgl, sg, *sg_ents, i) { | |
306 | pg = vmalloc_to_page(data); | |
307 | if (!pg) { | |
308 | sg_free_table(sgt); | |
309 | kfree(sgt); | |
310 | return NULL; | |
311 | } | |
312 | ||
313 | s = min_t(int, PAGE_SIZE, size); | |
314 | sg_set_page(sg, pg, s, 0); | |
315 | ||
316 | size -= s; | |
317 | data += s; | |
318 | } | |
319 | ||
320 | return sgt; | |
321 | } | |
322 | ||
96b5d1be | 323 | static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, |
6ebe8661 CW |
324 | struct virtio_gpu_vbuffer *vbuf, |
325 | struct virtio_gpu_fence *fence, | |
326 | int elemcnt, | |
327 | struct scatterlist **sgs, | |
328 | int outcnt, | |
329 | int incnt) | |
dc5698e8 DA |
330 | { |
331 | struct virtqueue *vq = vgdev->ctrlq.vq; | |
744583ec | 332 | bool notify = false; |
dc5698e8 DA |
333 | int ret; |
334 | ||
5edbb560 GH |
335 | if (vgdev->has_indirect) |
336 | elemcnt = 1; | |
337 | ||
6ebe8661 CW |
338 | again: |
339 | spin_lock(&vgdev->ctrlq.qlock); | |
340 | ||
c900b2a4 CW |
341 | if (!vgdev->vqs_ready) { |
342 | spin_unlock(&vgdev->ctrlq.qlock); | |
343 | ||
344 | if (fence && vbuf->objs) | |
345 | virtio_gpu_array_unlock_resv(vbuf->objs); | |
96b5d1be | 346 | return; |
c900b2a4 CW |
347 | } |
348 | ||
6ebe8661 CW |
349 | if (vq->num_free < elemcnt) { |
350 | spin_unlock(&vgdev->ctrlq.qlock); | |
351 | wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt); | |
352 | goto again; | |
353 | } | |
354 | ||
355 | /* now that the position of the vbuf in the virtqueue is known, we can | |
356 | * finally set the fence id | |
357 | */ | |
358 | if (fence) { | |
359 | virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf), | |
360 | fence); | |
361 | if (vbuf->objs) { | |
362 | virtio_gpu_array_add_fence(vbuf->objs, &fence->f); | |
363 | virtio_gpu_array_unlock_resv(vbuf->objs); | |
364 | } | |
365 | } | |
366 | ||
dc5698e8 | 367 | ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); |
1425a4ce CW |
368 | WARN_ON(ret); |
369 | ||
145cbefc | 370 | trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf)); |
1425a4ce CW |
371 | |
372 | notify = virtqueue_kick_prepare(vq); | |
373 | ||
6ebe8661 CW |
374 | spin_unlock(&vgdev->ctrlq.qlock); |
375 | ||
96b5d1be CW |
376 | if (notify) { |
377 | if (vgdev->disable_notify) | |
378 | vgdev->pending_notify = true; | |
379 | else | |
380 | virtqueue_notify(vq); | |
381 | } | |
dc5698e8 DA |
382 | } |
383 | ||
4100a7b8 GH |
384 | static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, |
385 | struct virtio_gpu_vbuffer *vbuf, | |
4100a7b8 | 386 | struct virtio_gpu_fence *fence) |
ec2f0577 | 387 | { |
db2e2072 | 388 | struct scatterlist *sgs[3], vcmd, vout, vresp; |
e1218b8c | 389 | struct sg_table *sgt = NULL; |
db2e2072 | 390 | int elemcnt = 0, outcnt = 0, incnt = 0; |
e1218b8c | 391 | |
db2e2072 CW |
392 | /* set up vcmd */ |
393 | sg_init_one(&vcmd, vbuf->buf, vbuf->size); | |
394 | elemcnt++; | |
395 | sgs[outcnt] = &vcmd; | |
396 | outcnt++; | |
397 | ||
398 | /* set up vout */ | |
e1218b8c DR |
399 | if (vbuf->data_size) { |
400 | if (is_vmalloc_addr(vbuf->data_buf)) { | |
db2e2072 | 401 | int sg_ents; |
e1218b8c | 402 | sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, |
db2e2072 | 403 | &sg_ents); |
4d8d4869 CW |
404 | if (!sgt) { |
405 | if (fence && vbuf->objs) | |
406 | virtio_gpu_array_unlock_resv(vbuf->objs); | |
de235805 | 407 | return; |
4d8d4869 | 408 | } |
db2e2072 CW |
409 | |
410 | elemcnt += sg_ents; | |
411 | sgs[outcnt] = sgt->sgl; | |
e1218b8c | 412 | } else { |
db2e2072 CW |
413 | sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); |
414 | elemcnt++; | |
415 | sgs[outcnt] = &vout; | |
e1218b8c | 416 | } |
db2e2072 CW |
417 | outcnt++; |
418 | } | |
419 | ||
420 | /* set up vresp */ | |
421 | if (vbuf->resp_size) { | |
422 | sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); | |
423 | elemcnt++; | |
424 | sgs[outcnt + incnt] = &vresp; | |
425 | incnt++; | |
e1218b8c | 426 | } |
ec2f0577 | 427 | |
96b5d1be CW |
428 | virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt, |
429 | incnt); | |
e1218b8c DR |
430 | |
431 | if (sgt) { | |
432 | sg_free_table(sgt); | |
433 | kfree(sgt); | |
434 | } | |
ec2f0577 GH |
435 | } |
436 | ||
7082e7a4 GH |
437 | void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev) |
438 | { | |
439 | vgdev->disable_notify = true; | |
440 | } | |
441 | ||
442 | void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev) | |
443 | { | |
444 | vgdev->disable_notify = false; | |
445 | ||
446 | if (!vgdev->pending_notify) | |
447 | return; | |
448 | vgdev->pending_notify = false; | |
449 | virtqueue_notify(vgdev->ctrlq.vq); | |
450 | } | |
451 | ||
32d6c2c5 DR |
452 | static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, |
453 | struct virtio_gpu_vbuffer *vbuf) | |
454 | { | |
e19d3411 | 455 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL); |
32d6c2c5 DR |
456 | } |
457 | ||
4100a7b8 GH |
458 | static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, |
459 | struct virtio_gpu_vbuffer *vbuf) | |
dc5698e8 DA |
460 | { |
461 | struct virtqueue *vq = vgdev->cursorq.vq; | |
462 | struct scatterlist *sgs[1], ccmd; | |
744583ec | 463 | bool notify; |
dc5698e8 DA |
464 | int ret; |
465 | int outcnt; | |
466 | ||
467 | if (!vgdev->vqs_ready) | |
4100a7b8 | 468 | return; |
dc5698e8 DA |
469 | |
470 | sg_init_one(&ccmd, vbuf->buf, vbuf->size); | |
471 | sgs[0] = &ccmd; | |
472 | outcnt = 1; | |
473 | ||
474 | spin_lock(&vgdev->cursorq.qlock); | |
475 | retry: | |
476 | ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); | |
477 | if (ret == -ENOSPC) { | |
478 | spin_unlock(&vgdev->cursorq.qlock); | |
d02d2700 | 479 | wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt); |
dc5698e8 DA |
480 | spin_lock(&vgdev->cursorq.qlock); |
481 | goto retry; | |
482 | } else { | |
5daf8857 | 483 | trace_virtio_gpu_cmd_queue(vq, |
145cbefc | 484 | virtio_gpu_vbuf_ctrl_hdr(vbuf)); |
5daf8857 | 485 | |
744583ec | 486 | notify = virtqueue_kick_prepare(vq); |
dc5698e8 DA |
487 | } |
488 | ||
489 | spin_unlock(&vgdev->cursorq.qlock); | |
744583ec GH |
490 | |
491 | if (notify) | |
492 | virtqueue_notify(vq); | |
dc5698e8 DA |
493 | } |
494 | ||
495 | /* just create gem objects for userspace and long lived objects, | |
5d883850 RS |
496 | * just use dma_alloced pages for the queue objects? |
497 | */ | |
dc5698e8 DA |
498 | |
499 | /* create a basic resource */ | |
500 | void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, | |
23c897d7 | 501 | struct virtio_gpu_object *bo, |
530b2842 | 502 | struct virtio_gpu_object_params *params, |
e2324300 | 503 | struct virtio_gpu_object_array *objs, |
530b2842 | 504 | struct virtio_gpu_fence *fence) |
dc5698e8 DA |
505 | { |
506 | struct virtio_gpu_resource_create_2d *cmd_p; | |
507 | struct virtio_gpu_vbuffer *vbuf; | |
508 | ||
509 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
510 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
e2324300 | 511 | vbuf->objs = objs; |
dc5698e8 DA |
512 | |
513 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); | |
724cfdfd | 514 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
f9659329 GH |
515 | cmd_p->format = cpu_to_le32(params->format); |
516 | cmd_p->width = cpu_to_le32(params->width); | |
517 | cmd_p->height = cpu_to_le32(params->height); | |
dc5698e8 | 518 | |
e19d3411 | 519 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
23c897d7 | 520 | bo->created = true; |
dc5698e8 DA |
521 | } |
522 | ||
1ed5f698 GH |
523 | static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev, |
524 | struct virtio_gpu_vbuffer *vbuf) | |
525 | { | |
526 | struct virtio_gpu_object *bo; | |
527 | ||
528 | bo = vbuf->resp_cb_data; | |
529 | vbuf->resp_cb_data = NULL; | |
530 | ||
531 | virtio_gpu_cleanup_object(bo); | |
532 | } | |
533 | ||
dc5698e8 | 534 | void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, |
1ed5f698 | 535 | struct virtio_gpu_object *bo) |
dc5698e8 DA |
536 | { |
537 | struct virtio_gpu_resource_unref *cmd_p; | |
538 | struct virtio_gpu_vbuffer *vbuf; | |
539 | ||
1ed5f698 GH |
540 | cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p), |
541 | virtio_gpu_cmd_unref_cb); | |
dc5698e8 DA |
542 | memset(cmd_p, 0, sizeof(*cmd_p)); |
543 | ||
544 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); | |
1ed5f698 | 545 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
dc5698e8 | 546 | |
1ed5f698 | 547 | vbuf->resp_cb_data = bo; |
dc5698e8 DA |
548 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
549 | } | |
550 | ||
dc5698e8 DA |
551 | void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, |
552 | uint32_t scanout_id, uint32_t resource_id, | |
553 | uint32_t width, uint32_t height, | |
554 | uint32_t x, uint32_t y) | |
555 | { | |
556 | struct virtio_gpu_set_scanout *cmd_p; | |
557 | struct virtio_gpu_vbuffer *vbuf; | |
558 | ||
559 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
560 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
561 | ||
562 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT); | |
563 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
564 | cmd_p->scanout_id = cpu_to_le32(scanout_id); | |
565 | cmd_p->r.width = cpu_to_le32(width); | |
566 | cmd_p->r.height = cpu_to_le32(height); | |
567 | cmd_p->r.x = cpu_to_le32(x); | |
568 | cmd_p->r.y = cpu_to_le32(y); | |
569 | ||
570 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
571 | } | |
572 | ||
573 | void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, | |
574 | uint32_t resource_id, | |
575 | uint32_t x, uint32_t y, | |
576 | uint32_t width, uint32_t height) | |
577 | { | |
578 | struct virtio_gpu_resource_flush *cmd_p; | |
579 | struct virtio_gpu_vbuffer *vbuf; | |
580 | ||
581 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
582 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
583 | ||
584 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); | |
585 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
586 | cmd_p->r.width = cpu_to_le32(width); | |
587 | cmd_p->r.height = cpu_to_le32(height); | |
588 | cmd_p->r.x = cpu_to_le32(x); | |
589 | cmd_p->r.y = cpu_to_le32(y); | |
590 | ||
591 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
592 | } | |
593 | ||
594 | void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, | |
af334c5d | 595 | uint64_t offset, |
64f1cc99 GH |
596 | uint32_t width, uint32_t height, |
597 | uint32_t x, uint32_t y, | |
3d3bdbc0 | 598 | struct virtio_gpu_object_array *objs, |
4d55fd66 | 599 | struct virtio_gpu_fence *fence) |
dc5698e8 | 600 | { |
3d3bdbc0 | 601 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
dc5698e8 DA |
602 | struct virtio_gpu_transfer_to_host_2d *cmd_p; |
603 | struct virtio_gpu_vbuffer *vbuf; | |
8f44ca22 JA |
604 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
605 | ||
606 | if (use_dma_api) | |
607 | dma_sync_sg_for_device(vgdev->vdev->dev.parent, | |
af334c5d | 608 | bo->pages->sgl, bo->pages->nents, |
8f44ca22 | 609 | DMA_TO_DEVICE); |
dc5698e8 DA |
610 | |
611 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
612 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
3d3bdbc0 | 613 | vbuf->objs = objs; |
dc5698e8 DA |
614 | |
615 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); | |
af334c5d | 616 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
dc5698e8 | 617 | cmd_p->offset = cpu_to_le64(offset); |
64f1cc99 GH |
618 | cmd_p->r.width = cpu_to_le32(width); |
619 | cmd_p->r.height = cpu_to_le32(height); | |
620 | cmd_p->r.x = cpu_to_le32(x); | |
621 | cmd_p->r.y = cpu_to_le32(y); | |
dc5698e8 | 622 | |
e19d3411 | 623 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
dc5698e8 DA |
624 | } |
625 | ||
626 | static void | |
627 | virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, | |
628 | uint32_t resource_id, | |
629 | struct virtio_gpu_mem_entry *ents, | |
630 | uint32_t nents, | |
4d55fd66 | 631 | struct virtio_gpu_fence *fence) |
dc5698e8 DA |
632 | { |
633 | struct virtio_gpu_resource_attach_backing *cmd_p; | |
634 | struct virtio_gpu_vbuffer *vbuf; | |
635 | ||
636 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
637 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
638 | ||
639 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING); | |
640 | cmd_p->resource_id = cpu_to_le32(resource_id); | |
641 | cmd_p->nr_entries = cpu_to_le32(nents); | |
642 | ||
643 | vbuf->data_buf = ents; | |
644 | vbuf->data_size = sizeof(*ents) * nents; | |
645 | ||
e19d3411 | 646 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
dc5698e8 DA |
647 | } |
648 | ||
649 | static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, | |
650 | struct virtio_gpu_vbuffer *vbuf) | |
651 | { | |
652 | struct virtio_gpu_resp_display_info *resp = | |
653 | (struct virtio_gpu_resp_display_info *)vbuf->resp_buf; | |
654 | int i; | |
655 | ||
656 | spin_lock(&vgdev->display_info_lock); | |
657 | for (i = 0; i < vgdev->num_scanouts; i++) { | |
658 | vgdev->outputs[i].info = resp->pmodes[i]; | |
659 | if (resp->pmodes[i].enabled) { | |
660 | DRM_DEBUG("output %d: %dx%d+%d+%d", i, | |
661 | le32_to_cpu(resp->pmodes[i].r.width), | |
662 | le32_to_cpu(resp->pmodes[i].r.height), | |
663 | le32_to_cpu(resp->pmodes[i].r.x), | |
664 | le32_to_cpu(resp->pmodes[i].r.y)); | |
665 | } else { | |
666 | DRM_DEBUG("output %d: disabled", i); | |
667 | } | |
668 | } | |
669 | ||
441012af | 670 | vgdev->display_info_pending = false; |
dc5698e8 DA |
671 | spin_unlock(&vgdev->display_info_lock); |
672 | wake_up(&vgdev->resp_wq); | |
673 | ||
674 | if (!drm_helper_hpd_irq_event(vgdev->ddev)) | |
675 | drm_kms_helper_hotplug_event(vgdev->ddev); | |
676 | } | |
677 | ||
62fb7a5e GH |
678 | static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, |
679 | struct virtio_gpu_vbuffer *vbuf) | |
680 | { | |
681 | struct virtio_gpu_get_capset_info *cmd = | |
682 | (struct virtio_gpu_get_capset_info *)vbuf->buf; | |
683 | struct virtio_gpu_resp_capset_info *resp = | |
684 | (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf; | |
685 | int i = le32_to_cpu(cmd->capset_index); | |
686 | ||
687 | spin_lock(&vgdev->display_info_lock); | |
688 | vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); | |
689 | vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); | |
690 | vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); | |
691 | spin_unlock(&vgdev->display_info_lock); | |
692 | wake_up(&vgdev->resp_wq); | |
693 | } | |
694 | ||
695 | static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, | |
696 | struct virtio_gpu_vbuffer *vbuf) | |
697 | { | |
698 | struct virtio_gpu_get_capset *cmd = | |
699 | (struct virtio_gpu_get_capset *)vbuf->buf; | |
700 | struct virtio_gpu_resp_capset *resp = | |
701 | (struct virtio_gpu_resp_capset *)vbuf->resp_buf; | |
702 | struct virtio_gpu_drv_cap_cache *cache_ent; | |
703 | ||
704 | spin_lock(&vgdev->display_info_lock); | |
705 | list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { | |
706 | if (cache_ent->version == le32_to_cpu(cmd->capset_version) && | |
707 | cache_ent->id == le32_to_cpu(cmd->capset_id)) { | |
708 | memcpy(cache_ent->caps_cache, resp->capset_data, | |
709 | cache_ent->size); | |
9ff3a5c8 DR |
710 | /* Copy must occur before is_valid is signalled. */ |
711 | smp_wmb(); | |
62fb7a5e GH |
712 | atomic_set(&cache_ent->is_valid, 1); |
713 | break; | |
714 | } | |
715 | } | |
716 | spin_unlock(&vgdev->display_info_lock); | |
676a905b | 717 | wake_up_all(&vgdev->resp_wq); |
62fb7a5e GH |
718 | } |
719 | ||
b4b01b49 GH |
720 | static int virtio_get_edid_block(void *data, u8 *buf, |
721 | unsigned int block, size_t len) | |
722 | { | |
723 | struct virtio_gpu_resp_edid *resp = data; | |
724 | size_t start = block * EDID_LENGTH; | |
725 | ||
726 | if (start + len > le32_to_cpu(resp->size)) | |
727 | return -1; | |
728 | memcpy(buf, resp->edid + start, len); | |
729 | return 0; | |
730 | } | |
731 | ||
732 | static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev, | |
733 | struct virtio_gpu_vbuffer *vbuf) | |
734 | { | |
735 | struct virtio_gpu_cmd_get_edid *cmd = | |
736 | (struct virtio_gpu_cmd_get_edid *)vbuf->buf; | |
737 | struct virtio_gpu_resp_edid *resp = | |
738 | (struct virtio_gpu_resp_edid *)vbuf->resp_buf; | |
739 | uint32_t scanout = le32_to_cpu(cmd->scanout); | |
740 | struct virtio_gpu_output *output; | |
741 | struct edid *new_edid, *old_edid; | |
742 | ||
743 | if (scanout >= vgdev->num_scanouts) | |
744 | return; | |
745 | output = vgdev->outputs + scanout; | |
746 | ||
747 | new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp); | |
41de4be6 | 748 | drm_connector_update_edid_property(&output->conn, new_edid); |
b4b01b49 GH |
749 | |
750 | spin_lock(&vgdev->display_info_lock); | |
751 | old_edid = output->edid; | |
752 | output->edid = new_edid; | |
b4b01b49 GH |
753 | spin_unlock(&vgdev->display_info_lock); |
754 | ||
755 | kfree(old_edid); | |
756 | wake_up(&vgdev->resp_wq); | |
757 | } | |
758 | ||
dc5698e8 DA |
759 | int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev) |
760 | { | |
761 | struct virtio_gpu_ctrl_hdr *cmd_p; | |
762 | struct virtio_gpu_vbuffer *vbuf; | |
763 | void *resp_buf; | |
764 | ||
765 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info), | |
766 | GFP_KERNEL); | |
767 | if (!resp_buf) | |
768 | return -ENOMEM; | |
769 | ||
770 | cmd_p = virtio_gpu_alloc_cmd_resp | |
771 | (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf, | |
772 | sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info), | |
773 | resp_buf); | |
774 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
775 | ||
441012af | 776 | vgdev->display_info_pending = true; |
dc5698e8 DA |
777 | cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO); |
778 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
779 | return 0; | |
780 | } | |
781 | ||
62fb7a5e GH |
782 | int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx) |
783 | { | |
784 | struct virtio_gpu_get_capset_info *cmd_p; | |
785 | struct virtio_gpu_vbuffer *vbuf; | |
786 | void *resp_buf; | |
787 | ||
788 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info), | |
789 | GFP_KERNEL); | |
790 | if (!resp_buf) | |
791 | return -ENOMEM; | |
792 | ||
793 | cmd_p = virtio_gpu_alloc_cmd_resp | |
794 | (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf, | |
795 | sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info), | |
796 | resp_buf); | |
797 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
798 | ||
799 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO); | |
800 | cmd_p->capset_index = cpu_to_le32(idx); | |
801 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
802 | return 0; | |
803 | } | |
804 | ||
805 | int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, | |
806 | int idx, int version, | |
807 | struct virtio_gpu_drv_cap_cache **cache_p) | |
808 | { | |
809 | struct virtio_gpu_get_capset *cmd_p; | |
810 | struct virtio_gpu_vbuffer *vbuf; | |
09c4b494 | 811 | int max_size; |
62fb7a5e | 812 | struct virtio_gpu_drv_cap_cache *cache_ent; |
5934ce99 | 813 | struct virtio_gpu_drv_cap_cache *search_ent; |
62fb7a5e GH |
814 | void *resp_buf; |
815 | ||
5934ce99 DR |
816 | *cache_p = NULL; |
817 | ||
09c4b494 | 818 | if (idx >= vgdev->num_capsets) |
62fb7a5e GH |
819 | return -EINVAL; |
820 | ||
821 | if (version > vgdev->capsets[idx].max_version) | |
822 | return -EINVAL; | |
823 | ||
824 | cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL); | |
825 | if (!cache_ent) | |
826 | return -ENOMEM; | |
827 | ||
09c4b494 | 828 | max_size = vgdev->capsets[idx].max_size; |
62fb7a5e GH |
829 | cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL); |
830 | if (!cache_ent->caps_cache) { | |
831 | kfree(cache_ent); | |
832 | return -ENOMEM; | |
833 | } | |
834 | ||
835 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size, | |
836 | GFP_KERNEL); | |
837 | if (!resp_buf) { | |
838 | kfree(cache_ent->caps_cache); | |
839 | kfree(cache_ent); | |
840 | return -ENOMEM; | |
841 | } | |
842 | ||
843 | cache_ent->version = version; | |
844 | cache_ent->id = vgdev->capsets[idx].id; | |
845 | atomic_set(&cache_ent->is_valid, 0); | |
846 | cache_ent->size = max_size; | |
847 | spin_lock(&vgdev->display_info_lock); | |
5934ce99 DR |
848 | /* Search while under lock in case it was added by another task. */ |
849 | list_for_each_entry(search_ent, &vgdev->cap_cache, head) { | |
850 | if (search_ent->id == vgdev->capsets[idx].id && | |
851 | search_ent->version == version) { | |
852 | *cache_p = search_ent; | |
853 | break; | |
854 | } | |
855 | } | |
856 | if (!*cache_p) | |
857 | list_add_tail(&cache_ent->head, &vgdev->cap_cache); | |
62fb7a5e GH |
858 | spin_unlock(&vgdev->display_info_lock); |
859 | ||
5934ce99 DR |
860 | if (*cache_p) { |
861 | /* Entry was found, so free everything that was just created. */ | |
862 | kfree(resp_buf); | |
863 | kfree(cache_ent->caps_cache); | |
864 | kfree(cache_ent); | |
865 | return 0; | |
866 | } | |
867 | ||
62fb7a5e GH |
868 | cmd_p = virtio_gpu_alloc_cmd_resp |
869 | (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p), | |
870 | sizeof(struct virtio_gpu_resp_capset) + max_size, | |
871 | resp_buf); | |
872 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET); | |
873 | cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id); | |
874 | cmd_p->capset_version = cpu_to_le32(version); | |
875 | *cache_p = cache_ent; | |
876 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
877 | ||
878 | return 0; | |
879 | } | |
880 | ||
b4b01b49 GH |
881 | int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev) |
882 | { | |
883 | struct virtio_gpu_cmd_get_edid *cmd_p; | |
884 | struct virtio_gpu_vbuffer *vbuf; | |
885 | void *resp_buf; | |
886 | int scanout; | |
887 | ||
888 | if (WARN_ON(!vgdev->has_edid)) | |
889 | return -EINVAL; | |
890 | ||
891 | for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) { | |
892 | resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid), | |
893 | GFP_KERNEL); | |
894 | if (!resp_buf) | |
895 | return -ENOMEM; | |
896 | ||
897 | cmd_p = virtio_gpu_alloc_cmd_resp | |
898 | (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf, | |
899 | sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid), | |
900 | resp_buf); | |
901 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID); | |
902 | cmd_p->scanout = cpu_to_le32(scanout); | |
903 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
904 | } | |
905 | ||
906 | return 0; | |
907 | } | |
908 | ||
62fb7a5e GH |
909 | void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, |
910 | uint32_t nlen, const char *name) | |
911 | { | |
912 | struct virtio_gpu_ctx_create *cmd_p; | |
913 | struct virtio_gpu_vbuffer *vbuf; | |
914 | ||
915 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
916 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
917 | ||
918 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE); | |
919 | cmd_p->hdr.ctx_id = cpu_to_le32(id); | |
920 | cmd_p->nlen = cpu_to_le32(nlen); | |
dbe37dc3 RS |
921 | strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1); |
922 | cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0; | |
62fb7a5e GH |
923 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
924 | } | |
925 | ||
926 | void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, | |
927 | uint32_t id) | |
928 | { | |
929 | struct virtio_gpu_ctx_destroy *cmd_p; | |
930 | struct virtio_gpu_vbuffer *vbuf; | |
931 | ||
932 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
933 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
934 | ||
935 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY); | |
936 | cmd_p->hdr.ctx_id = cpu_to_le32(id); | |
937 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); | |
938 | } | |
939 | ||
940 | void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, | |
941 | uint32_t ctx_id, | |
93c38d15 | 942 | struct virtio_gpu_object_array *objs) |
62fb7a5e | 943 | { |
93c38d15 | 944 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
62fb7a5e GH |
945 | struct virtio_gpu_ctx_resource *cmd_p; |
946 | struct virtio_gpu_vbuffer *vbuf; | |
947 | ||
948 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
949 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
93c38d15 | 950 | vbuf->objs = objs; |
62fb7a5e GH |
951 | |
952 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); | |
953 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
93c38d15 | 954 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
62fb7a5e GH |
955 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
956 | ||
957 | } | |
958 | ||
959 | void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, | |
960 | uint32_t ctx_id, | |
93c38d15 | 961 | struct virtio_gpu_object_array *objs) |
62fb7a5e | 962 | { |
93c38d15 | 963 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
62fb7a5e GH |
964 | struct virtio_gpu_ctx_resource *cmd_p; |
965 | struct virtio_gpu_vbuffer *vbuf; | |
966 | ||
967 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
968 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
93c38d15 | 969 | vbuf->objs = objs; |
62fb7a5e GH |
970 | |
971 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); | |
972 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
93c38d15 | 973 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
62fb7a5e GH |
974 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
975 | } | |
976 | ||
977 | void | |
978 | virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, | |
23c897d7 | 979 | struct virtio_gpu_object *bo, |
530b2842 | 980 | struct virtio_gpu_object_params *params, |
e2324300 | 981 | struct virtio_gpu_object_array *objs, |
530b2842 | 982 | struct virtio_gpu_fence *fence) |
62fb7a5e GH |
983 | { |
984 | struct virtio_gpu_resource_create_3d *cmd_p; | |
985 | struct virtio_gpu_vbuffer *vbuf; | |
986 | ||
987 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
988 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
e2324300 | 989 | vbuf->objs = objs; |
62fb7a5e | 990 | |
62fb7a5e | 991 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); |
fd4d6a42 GH |
992 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
993 | cmd_p->format = cpu_to_le32(params->format); | |
994 | cmd_p->width = cpu_to_le32(params->width); | |
995 | cmd_p->height = cpu_to_le32(params->height); | |
996 | ||
997 | cmd_p->target = cpu_to_le32(params->target); | |
998 | cmd_p->bind = cpu_to_le32(params->bind); | |
999 | cmd_p->depth = cpu_to_le32(params->depth); | |
1000 | cmd_p->array_size = cpu_to_le32(params->array_size); | |
1001 | cmd_p->last_level = cpu_to_le32(params->last_level); | |
1002 | cmd_p->nr_samples = cpu_to_le32(params->nr_samples); | |
1003 | cmd_p->flags = cpu_to_le32(params->flags); | |
62fb7a5e | 1004 | |
e19d3411 | 1005 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
23c897d7 | 1006 | bo->created = true; |
62fb7a5e GH |
1007 | } |
1008 | ||
1009 | void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, | |
af334c5d | 1010 | uint32_t ctx_id, |
62fb7a5e | 1011 | uint64_t offset, uint32_t level, |
1dc34852 | 1012 | struct drm_virtgpu_3d_box *box, |
3d3bdbc0 | 1013 | struct virtio_gpu_object_array *objs, |
4d55fd66 | 1014 | struct virtio_gpu_fence *fence) |
62fb7a5e | 1015 | { |
3d3bdbc0 | 1016 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
62fb7a5e GH |
1017 | struct virtio_gpu_transfer_host_3d *cmd_p; |
1018 | struct virtio_gpu_vbuffer *vbuf; | |
8f44ca22 JA |
1019 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
1020 | ||
1021 | if (use_dma_api) | |
1022 | dma_sync_sg_for_device(vgdev->vdev->dev.parent, | |
af334c5d | 1023 | bo->pages->sgl, bo->pages->nents, |
8f44ca22 | 1024 | DMA_TO_DEVICE); |
62fb7a5e GH |
1025 | |
1026 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
1027 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
1028 | ||
3d3bdbc0 GH |
1029 | vbuf->objs = objs; |
1030 | ||
62fb7a5e GH |
1031 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); |
1032 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
af334c5d | 1033 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
1dc34852 | 1034 | convert_to_hw_box(&cmd_p->box, box); |
62fb7a5e GH |
1035 | cmd_p->offset = cpu_to_le64(offset); |
1036 | cmd_p->level = cpu_to_le32(level); | |
1037 | ||
e19d3411 | 1038 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
62fb7a5e GH |
1039 | } |
1040 | ||
1041 | void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, | |
375f156a | 1042 | uint32_t ctx_id, |
62fb7a5e | 1043 | uint64_t offset, uint32_t level, |
1dc34852 | 1044 | struct drm_virtgpu_3d_box *box, |
375f156a | 1045 | struct virtio_gpu_object_array *objs, |
4d55fd66 | 1046 | struct virtio_gpu_fence *fence) |
62fb7a5e | 1047 | { |
375f156a | 1048 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
62fb7a5e GH |
1049 | struct virtio_gpu_transfer_host_3d *cmd_p; |
1050 | struct virtio_gpu_vbuffer *vbuf; | |
1051 | ||
1052 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
1053 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
1054 | ||
375f156a GH |
1055 | vbuf->objs = objs; |
1056 | ||
62fb7a5e GH |
1057 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); |
1058 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
375f156a | 1059 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
1dc34852 | 1060 | convert_to_hw_box(&cmd_p->box, box); |
62fb7a5e GH |
1061 | cmd_p->offset = cpu_to_le64(offset); |
1062 | cmd_p->level = cpu_to_le32(level); | |
1063 | ||
e19d3411 | 1064 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
62fb7a5e GH |
1065 | } |
1066 | ||
1067 | void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, | |
1068 | void *data, uint32_t data_size, | |
da758d51 GH |
1069 | uint32_t ctx_id, |
1070 | struct virtio_gpu_object_array *objs, | |
1071 | struct virtio_gpu_fence *fence) | |
62fb7a5e GH |
1072 | { |
1073 | struct virtio_gpu_cmd_submit *cmd_p; | |
1074 | struct virtio_gpu_vbuffer *vbuf; | |
1075 | ||
1076 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); | |
1077 | memset(cmd_p, 0, sizeof(*cmd_p)); | |
1078 | ||
1079 | vbuf->data_buf = data; | |
1080 | vbuf->data_size = data_size; | |
da758d51 | 1081 | vbuf->objs = objs; |
62fb7a5e GH |
1082 | |
1083 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); | |
1084 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); | |
1085 | cmd_p->size = cpu_to_le32(data_size); | |
1086 | ||
e19d3411 | 1087 | virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
62fb7a5e GH |
1088 | } |
1089 | ||
dc5698e8 DA |
1090 | int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, |
1091 | struct virtio_gpu_object *obj, | |
4d55fd66 | 1092 | struct virtio_gpu_fence *fence) |
dc5698e8 | 1093 | { |
a3b815f0 | 1094 | bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
dc5698e8 DA |
1095 | struct virtio_gpu_mem_entry *ents; |
1096 | struct scatterlist *sg; | |
c66df701 | 1097 | int si, nents, ret; |
dc5698e8 | 1098 | |
530b2842 GH |
1099 | if (WARN_ON_ONCE(!obj->created)) |
1100 | return -EINVAL; | |
c66df701 GH |
1101 | if (WARN_ON_ONCE(obj->pages)) |
1102 | return -EINVAL; | |
23c897d7 | 1103 | |
c66df701 GH |
1104 | ret = drm_gem_shmem_pin(&obj->base.base); |
1105 | if (ret < 0) | |
1106 | return -EINVAL; | |
9d492b6b | 1107 | |
c66df701 GH |
1108 | obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base); |
1109 | if (obj->pages == NULL) { | |
1110 | drm_gem_shmem_unpin(&obj->base.base); | |
1111 | return -EINVAL; | |
dc5698e8 DA |
1112 | } |
1113 | ||
a3b815f0 GH |
1114 | if (use_dma_api) { |
1115 | obj->mapped = dma_map_sg(vgdev->vdev->dev.parent, | |
1116 | obj->pages->sgl, obj->pages->nents, | |
1117 | DMA_TO_DEVICE); | |
1118 | nents = obj->mapped; | |
1119 | } else { | |
1120 | nents = obj->pages->nents; | |
1121 | } | |
1122 | ||
dc5698e8 | 1123 | /* gets freed when the ring has consumed it */ |
a3b815f0 | 1124 | ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry), |
dc5698e8 DA |
1125 | GFP_KERNEL); |
1126 | if (!ents) { | |
1127 | DRM_ERROR("failed to allocate ent list\n"); | |
1128 | return -ENOMEM; | |
1129 | } | |
1130 | ||
a3b815f0 GH |
1131 | for_each_sg(obj->pages->sgl, sg, nents, si) { |
1132 | ents[si].addr = cpu_to_le64(use_dma_api | |
1133 | ? sg_dma_address(sg) | |
1134 | : sg_phys(sg)); | |
dc5698e8 DA |
1135 | ents[si].length = cpu_to_le32(sg->length); |
1136 | ents[si].padding = 0; | |
1137 | } | |
1138 | ||
724cfdfd | 1139 | virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, |
a3b815f0 | 1140 | ents, nents, |
dc5698e8 | 1141 | fence); |
dc5698e8 DA |
1142 | return 0; |
1143 | } | |
1144 | ||
1145 | void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, | |
1146 | struct virtio_gpu_output *output) | |
1147 | { | |
1148 | struct virtio_gpu_vbuffer *vbuf; | |
1149 | struct virtio_gpu_update_cursor *cur_p; | |
1150 | ||
1151 | output->cursor.pos.scanout_id = cpu_to_le32(output->index); | |
1152 | cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf); | |
1153 | memcpy(cur_p, &output->cursor, sizeof(output->cursor)); | |
1154 | virtio_gpu_queue_cursor(vgdev, vbuf); | |
1155 | } |