]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/vc4/vc4_gem.c
drm/vc4: Fix an integer overflow in temporary allocation layout.
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / vc4 / vc4_gem.c
CommitLineData
d5b1a78a
EA
1/*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <linux/module.h>
25#include <linux/platform_device.h>
001bdb55 26#include <linux/pm_runtime.h>
d5b1a78a
EA
27#include <linux/device.h>
28#include <linux/io.h>
29
30#include "uapi/drm/vc4_drm.h"
31#include "vc4_drv.h"
32#include "vc4_regs.h"
33#include "vc4_trace.h"
34
35static void
36vc4_queue_hangcheck(struct drm_device *dev)
37{
38 struct vc4_dev *vc4 = to_vc4_dev(dev);
39
40 mod_timer(&vc4->hangcheck.timer,
41 round_jiffies_up(jiffies + msecs_to_jiffies(100)));
42}
43
21461365
EA
44struct vc4_hang_state {
45 struct drm_vc4_get_hang_state user_state;
46
47 u32 bo_count;
48 struct drm_gem_object **bo;
49};
50
51static void
52vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
53{
54 unsigned int i;
55
21461365 56 for (i = 0; i < state->user_state.bo_count; i++)
db369729 57 drm_gem_object_unreference_unlocked(state->bo[i]);
21461365
EA
58
59 kfree(state);
60}
61
62int
63vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
64 struct drm_file *file_priv)
65{
66 struct drm_vc4_get_hang_state *get_state = data;
67 struct drm_vc4_get_hang_state_bo *bo_state;
68 struct vc4_hang_state *kernel_state;
69 struct drm_vc4_get_hang_state *state;
70 struct vc4_dev *vc4 = to_vc4_dev(dev);
71 unsigned long irqflags;
72 u32 i;
65c4777d 73 int ret = 0;
21461365
EA
74
75 spin_lock_irqsave(&vc4->job_lock, irqflags);
76 kernel_state = vc4->hang_state;
77 if (!kernel_state) {
78 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
79 return -ENOENT;
80 }
81 state = &kernel_state->user_state;
82
83 /* If the user's array isn't big enough, just return the
84 * required array size.
85 */
86 if (get_state->bo_count < state->bo_count) {
87 get_state->bo_count = state->bo_count;
88 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
89 return 0;
90 }
91
92 vc4->hang_state = NULL;
93 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
94
95 /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
96 state->bo = get_state->bo;
97 memcpy(get_state, state, sizeof(*state));
98
99 bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
100 if (!bo_state) {
101 ret = -ENOMEM;
102 goto err_free;
103 }
104
105 for (i = 0; i < state->bo_count; i++) {
106 struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
107 u32 handle;
108
109 ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
110 &handle);
111
112 if (ret) {
113 state->bo_count = i - 1;
114 goto err;
115 }
116 bo_state[i].handle = handle;
117 bo_state[i].paddr = vc4_bo->base.paddr;
118 bo_state[i].size = vc4_bo->base.base.size;
119 }
120
65c4777d
DC
121 if (copy_to_user((void __user *)(uintptr_t)get_state->bo,
122 bo_state,
123 state->bo_count * sizeof(*bo_state)))
124 ret = -EFAULT;
125
21461365
EA
126 kfree(bo_state);
127
128err_free:
129
130 vc4_free_hang_state(dev, kernel_state);
131
132err:
133 return ret;
134}
135
136static void
137vc4_save_hang_state(struct drm_device *dev)
138{
139 struct vc4_dev *vc4 = to_vc4_dev(dev);
140 struct drm_vc4_get_hang_state *state;
141 struct vc4_hang_state *kernel_state;
ca26d28b 142 struct vc4_exec_info *exec[2];
21461365
EA
143 struct vc4_bo *bo;
144 unsigned long irqflags;
ca26d28b 145 unsigned int i, j, unref_list_count, prev_idx;
21461365 146
7e5082fb 147 kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
21461365
EA
148 if (!kernel_state)
149 return;
150
151 state = &kernel_state->user_state;
152
153 spin_lock_irqsave(&vc4->job_lock, irqflags);
ca26d28b
VG
154 exec[0] = vc4_first_bin_job(vc4);
155 exec[1] = vc4_first_render_job(vc4);
156 if (!exec[0] && !exec[1]) {
21461365
EA
157 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
158 return;
159 }
160
ca26d28b
VG
161 /* Get the bos from both binner and renderer into hang state. */
162 state->bo_count = 0;
163 for (i = 0; i < 2; i++) {
164 if (!exec[i])
165 continue;
166
167 unref_list_count = 0;
168 list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
169 unref_list_count++;
170 state->bo_count += exec[i]->bo_count + unref_list_count;
171 }
172
173 kernel_state->bo = kcalloc(state->bo_count,
174 sizeof(*kernel_state->bo), GFP_ATOMIC);
21461365 175
21461365
EA
176 if (!kernel_state->bo) {
177 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
178 return;
179 }
180
ca26d28b
VG
181 prev_idx = 0;
182 for (i = 0; i < 2; i++) {
183 if (!exec[i])
184 continue;
21461365 185
ca26d28b
VG
186 for (j = 0; j < exec[i]->bo_count; j++) {
187 drm_gem_object_reference(&exec[i]->bo[j]->base);
188 kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
189 }
190
191 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
192 drm_gem_object_reference(&bo->base.base);
193 kernel_state->bo[j + prev_idx] = &bo->base.base;
194 j++;
195 }
196 prev_idx = j + 1;
21461365
EA
197 }
198
ca26d28b
VG
199 if (exec[0])
200 state->start_bin = exec[0]->ct0ca;
201 if (exec[1])
202 state->start_render = exec[1]->ct1ca;
21461365
EA
203
204 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
205
206 state->ct0ca = V3D_READ(V3D_CTNCA(0));
207 state->ct0ea = V3D_READ(V3D_CTNEA(0));
208
209 state->ct1ca = V3D_READ(V3D_CTNCA(1));
210 state->ct1ea = V3D_READ(V3D_CTNEA(1));
211
212 state->ct0cs = V3D_READ(V3D_CTNCS(0));
213 state->ct1cs = V3D_READ(V3D_CTNCS(1));
214
215 state->ct0ra0 = V3D_READ(V3D_CT00RA0);
216 state->ct1ra0 = V3D_READ(V3D_CT01RA0);
217
218 state->bpca = V3D_READ(V3D_BPCA);
219 state->bpcs = V3D_READ(V3D_BPCS);
220 state->bpoa = V3D_READ(V3D_BPOA);
221 state->bpos = V3D_READ(V3D_BPOS);
222
223 state->vpmbase = V3D_READ(V3D_VPMBASE);
224
225 state->dbge = V3D_READ(V3D_DBGE);
226 state->fdbgo = V3D_READ(V3D_FDBGO);
227 state->fdbgb = V3D_READ(V3D_FDBGB);
228 state->fdbgr = V3D_READ(V3D_FDBGR);
229 state->fdbgs = V3D_READ(V3D_FDBGS);
230 state->errstat = V3D_READ(V3D_ERRSTAT);
231
232 spin_lock_irqsave(&vc4->job_lock, irqflags);
233 if (vc4->hang_state) {
234 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
235 vc4_free_hang_state(dev, kernel_state);
236 } else {
237 vc4->hang_state = kernel_state;
238 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
239 }
240}
241
d5b1a78a
EA
242static void
243vc4_reset(struct drm_device *dev)
244{
245 struct vc4_dev *vc4 = to_vc4_dev(dev);
246
247 DRM_INFO("Resetting GPU.\n");
36cb6253
EA
248
249 mutex_lock(&vc4->power_lock);
250 if (vc4->power_refcount) {
251 /* Power the device off and back on the by dropping the
252 * reference on runtime PM.
253 */
254 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
255 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
256 }
257 mutex_unlock(&vc4->power_lock);
d5b1a78a
EA
258
259 vc4_irq_reset(dev);
260
261 /* Rearm the hangcheck -- another job might have been waiting
262 * for our hung one to get kicked off, and vc4_irq_reset()
263 * would have started it.
264 */
265 vc4_queue_hangcheck(dev);
266}
267
268static void
269vc4_reset_work(struct work_struct *work)
270{
271 struct vc4_dev *vc4 =
272 container_of(work, struct vc4_dev, hangcheck.reset_work);
273
21461365
EA
274 vc4_save_hang_state(vc4->dev);
275
d5b1a78a
EA
276 vc4_reset(vc4->dev);
277}
278
279static void
280vc4_hangcheck_elapsed(unsigned long data)
281{
282 struct drm_device *dev = (struct drm_device *)data;
283 struct vc4_dev *vc4 = to_vc4_dev(dev);
284 uint32_t ct0ca, ct1ca;
c4ce60dc 285 unsigned long irqflags;
ca26d28b 286 struct vc4_exec_info *bin_exec, *render_exec;
c4ce60dc
EA
287
288 spin_lock_irqsave(&vc4->job_lock, irqflags);
ca26d28b
VG
289
290 bin_exec = vc4_first_bin_job(vc4);
291 render_exec = vc4_first_render_job(vc4);
d5b1a78a
EA
292
293 /* If idle, we can stop watching for hangs. */
ca26d28b 294 if (!bin_exec && !render_exec) {
c4ce60dc 295 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
d5b1a78a 296 return;
c4ce60dc 297 }
d5b1a78a
EA
298
299 ct0ca = V3D_READ(V3D_CTNCA(0));
300 ct1ca = V3D_READ(V3D_CTNCA(1));
301
302 /* If we've made any progress in execution, rearm the timer
303 * and wait.
304 */
ca26d28b
VG
305 if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
306 (render_exec && ct1ca != render_exec->last_ct1ca)) {
307 if (bin_exec)
308 bin_exec->last_ct0ca = ct0ca;
309 if (render_exec)
310 render_exec->last_ct1ca = ct1ca;
c4ce60dc 311 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
d5b1a78a
EA
312 vc4_queue_hangcheck(dev);
313 return;
314 }
315
c4ce60dc
EA
316 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
317
d5b1a78a
EA
318 /* We've gone too long with no progress, reset. This has to
319 * be done from a work struct, since resetting can sleep and
320 * this timer hook isn't allowed to.
321 */
322 schedule_work(&vc4->hangcheck.reset_work);
323}
324
325static void
326submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end)
327{
328 struct vc4_dev *vc4 = to_vc4_dev(dev);
329
330 /* Set the current and end address of the control list.
331 * Writing the end register is what starts the job.
332 */
333 V3D_WRITE(V3D_CTNCA(thread), start);
334 V3D_WRITE(V3D_CTNEA(thread), end);
335}
336
337int
338vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
339 bool interruptible)
340{
341 struct vc4_dev *vc4 = to_vc4_dev(dev);
342 int ret = 0;
343 unsigned long timeout_expire;
344 DEFINE_WAIT(wait);
345
346 if (vc4->finished_seqno >= seqno)
347 return 0;
348
349 if (timeout_ns == 0)
350 return -ETIME;
351
352 timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns);
353
354 trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns);
355 for (;;) {
356 prepare_to_wait(&vc4->job_wait_queue, &wait,
357 interruptible ? TASK_INTERRUPTIBLE :
358 TASK_UNINTERRUPTIBLE);
359
360 if (interruptible && signal_pending(current)) {
361 ret = -ERESTARTSYS;
362 break;
363 }
364
365 if (vc4->finished_seqno >= seqno)
366 break;
367
368 if (timeout_ns != ~0ull) {
369 if (time_after_eq(jiffies, timeout_expire)) {
370 ret = -ETIME;
371 break;
372 }
373 schedule_timeout(timeout_expire - jiffies);
374 } else {
375 schedule();
376 }
377 }
378
379 finish_wait(&vc4->job_wait_queue, &wait);
380 trace_vc4_wait_for_seqno_end(dev, seqno);
381
13cf8909 382 return ret;
d5b1a78a
EA
383}
384
385static void
386vc4_flush_caches(struct drm_device *dev)
387{
388 struct vc4_dev *vc4 = to_vc4_dev(dev);
389
390 /* Flush the GPU L2 caches. These caches sit on top of system
391 * L3 (the 128kb or so shared with the CPU), and are
392 * non-allocating in the L3.
393 */
394 V3D_WRITE(V3D_L2CACTL,
395 V3D_L2CACTL_L2CCLR);
396
397 V3D_WRITE(V3D_SLCACTL,
398 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
399 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) |
400 VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
401 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
402}
403
404/* Sets the registers for the next job to be actually be executed in
405 * the hardware.
406 *
407 * The job_lock should be held during this.
408 */
409void
ca26d28b 410vc4_submit_next_bin_job(struct drm_device *dev)
d5b1a78a
EA
411{
412 struct vc4_dev *vc4 = to_vc4_dev(dev);
ca26d28b 413 struct vc4_exec_info *exec;
d5b1a78a 414
ca26d28b
VG
415again:
416 exec = vc4_first_bin_job(vc4);
d5b1a78a
EA
417 if (!exec)
418 return;
419
420 vc4_flush_caches(dev);
421
ca26d28b
VG
422 /* Either put the job in the binner if it uses the binner, or
423 * immediately move it to the to-be-rendered queue.
424 */
425 if (exec->ct0ca != exec->ct0ea) {
d5b1a78a 426 submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
ca26d28b
VG
427 } else {
428 vc4_move_job_to_render(dev, exec);
429 goto again;
430 }
431}
432
433void
434vc4_submit_next_render_job(struct drm_device *dev)
435{
436 struct vc4_dev *vc4 = to_vc4_dev(dev);
437 struct vc4_exec_info *exec = vc4_first_render_job(vc4);
438
439 if (!exec)
440 return;
441
d5b1a78a
EA
442 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
443}
444
ca26d28b
VG
445void
446vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
447{
448 struct vc4_dev *vc4 = to_vc4_dev(dev);
449 bool was_empty = list_empty(&vc4->render_job_list);
450
451 list_move_tail(&exec->head, &vc4->render_job_list);
452 if (was_empty)
453 vc4_submit_next_render_job(dev);
454}
455
d5b1a78a
EA
456static void
457vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
458{
459 struct vc4_bo *bo;
460 unsigned i;
461
462 for (i = 0; i < exec->bo_count; i++) {
463 bo = to_vc4_bo(&exec->bo[i]->base);
464 bo->seqno = seqno;
465 }
466
467 list_for_each_entry(bo, &exec->unref_list, unref_head) {
468 bo->seqno = seqno;
469 }
7edabee0
EA
470
471 for (i = 0; i < exec->rcl_write_bo_count; i++) {
472 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
473 bo->write_seqno = seqno;
474 }
d5b1a78a
EA
475}
476
477/* Queues a struct vc4_exec_info for execution. If no job is
478 * currently executing, then submits it.
479 *
480 * Unlike most GPUs, our hardware only handles one command list at a
481 * time. To queue multiple jobs at once, we'd need to edit the
482 * previous command list to have a jump to the new one at the end, and
483 * then bump the end address. That's a change for a later date,
484 * though.
485 */
486static void
487vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec)
488{
489 struct vc4_dev *vc4 = to_vc4_dev(dev);
490 uint64_t seqno;
491 unsigned long irqflags;
492
493 spin_lock_irqsave(&vc4->job_lock, irqflags);
494
495 seqno = ++vc4->emit_seqno;
496 exec->seqno = seqno;
497 vc4_update_bo_seqnos(exec, seqno);
498
ca26d28b 499 list_add_tail(&exec->head, &vc4->bin_job_list);
d5b1a78a
EA
500
501 /* If no job was executing, kick ours off. Otherwise, it'll
ca26d28b 502 * get started when the previous job's flush done interrupt
d5b1a78a
EA
503 * occurs.
504 */
ca26d28b
VG
505 if (vc4_first_bin_job(vc4) == exec) {
506 vc4_submit_next_bin_job(dev);
d5b1a78a
EA
507 vc4_queue_hangcheck(dev);
508 }
509
510 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
511}
512
513/**
514 * Looks up a bunch of GEM handles for BOs and stores the array for
515 * use in the command validator that actually writes relocated
516 * addresses pointing to them.
517 */
518static int
519vc4_cl_lookup_bos(struct drm_device *dev,
520 struct drm_file *file_priv,
521 struct vc4_exec_info *exec)
522{
523 struct drm_vc4_submit_cl *args = exec->args;
524 uint32_t *handles;
525 int ret = 0;
526 int i;
527
528 exec->bo_count = args->bo_handle_count;
529
530 if (!exec->bo_count) {
531 /* See comment on bo_index for why we have to check
532 * this.
533 */
534 DRM_ERROR("Rendering requires BOs to validate\n");
535 return -EINVAL;
536 }
537
ece7267d
EA
538 exec->bo = drm_calloc_large(exec->bo_count,
539 sizeof(struct drm_gem_cma_object *));
d5b1a78a
EA
540 if (!exec->bo) {
541 DRM_ERROR("Failed to allocate validated BO pointers\n");
542 return -ENOMEM;
543 }
544
545 handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
546 if (!handles) {
b2cdeb19 547 ret = -ENOMEM;
d5b1a78a
EA
548 DRM_ERROR("Failed to allocate incoming GEM handles\n");
549 goto fail;
550 }
551
b2cdeb19
DC
552 if (copy_from_user(handles,
553 (void __user *)(uintptr_t)args->bo_handles,
554 exec->bo_count * sizeof(uint32_t))) {
555 ret = -EFAULT;
d5b1a78a
EA
556 DRM_ERROR("Failed to copy in GEM handles\n");
557 goto fail;
558 }
559
560 spin_lock(&file_priv->table_lock);
561 for (i = 0; i < exec->bo_count; i++) {
562 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
563 handles[i]);
564 if (!bo) {
565 DRM_ERROR("Failed to look up GEM BO %d: %d\n",
566 i, handles[i]);
567 ret = -EINVAL;
568 spin_unlock(&file_priv->table_lock);
569 goto fail;
570 }
571 drm_gem_object_reference(bo);
572 exec->bo[i] = (struct drm_gem_cma_object *)bo;
573 }
574 spin_unlock(&file_priv->table_lock);
575
576fail:
d5fb46e0 577 drm_free_large(handles);
552416c1 578 return ret;
d5b1a78a
EA
579}
580
581static int
582vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
583{
584 struct drm_vc4_submit_cl *args = exec->args;
585 void *temp = NULL;
586 void *bin;
587 int ret = 0;
588 uint32_t bin_offset = 0;
589 uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size,
590 16);
591 uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size;
592 uint32_t exec_size = uniforms_offset + args->uniforms_size;
593 uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) *
594 args->shader_rec_count);
595 struct vc4_bo *bo;
596
0f2ff82e
EA
597 if (shader_rec_offset < args->bin_cl_size ||
598 uniforms_offset < shader_rec_offset ||
d5b1a78a
EA
599 exec_size < uniforms_offset ||
600 args->shader_rec_count >= (UINT_MAX /
601 sizeof(struct vc4_shader_state)) ||
602 temp_size < exec_size) {
603 DRM_ERROR("overflow in exec arguments\n");
604 goto fail;
605 }
606
607 /* Allocate space where we'll store the copied in user command lists
608 * and shader records.
609 *
610 * We don't just copy directly into the BOs because we need to
611 * read the contents back for validation, and I think the
612 * bo->vaddr is uncached access.
613 */
ece7267d 614 temp = drm_malloc_ab(temp_size, 1);
d5b1a78a
EA
615 if (!temp) {
616 DRM_ERROR("Failed to allocate storage for copying "
617 "in bin/render CLs.\n");
618 ret = -ENOMEM;
619 goto fail;
620 }
621 bin = temp + bin_offset;
622 exec->shader_rec_u = temp + shader_rec_offset;
623 exec->uniforms_u = temp + uniforms_offset;
624 exec->shader_state = temp + exec_size;
625 exec->shader_state_size = args->shader_rec_count;
626
65c4777d
DC
627 if (copy_from_user(bin,
628 (void __user *)(uintptr_t)args->bin_cl,
629 args->bin_cl_size)) {
630 ret = -EFAULT;
d5b1a78a
EA
631 goto fail;
632 }
633
65c4777d
DC
634 if (copy_from_user(exec->shader_rec_u,
635 (void __user *)(uintptr_t)args->shader_rec,
636 args->shader_rec_size)) {
637 ret = -EFAULT;
d5b1a78a
EA
638 goto fail;
639 }
640
65c4777d
DC
641 if (copy_from_user(exec->uniforms_u,
642 (void __user *)(uintptr_t)args->uniforms,
643 args->uniforms_size)) {
644 ret = -EFAULT;
d5b1a78a
EA
645 goto fail;
646 }
647
648 bo = vc4_bo_create(dev, exec_size, true);
2c68f1fc 649 if (IS_ERR(bo)) {
d5b1a78a 650 DRM_ERROR("Couldn't allocate BO for binning\n");
2c68f1fc 651 ret = PTR_ERR(bo);
d5b1a78a
EA
652 goto fail;
653 }
654 exec->exec_bo = &bo->base;
655
656 list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
657 &exec->unref_list);
658
659 exec->ct0ca = exec->exec_bo->paddr + bin_offset;
660
661 exec->bin_u = bin;
662
663 exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
664 exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
665 exec->shader_rec_size = args->shader_rec_size;
666
667 exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
668 exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
669 exec->uniforms_size = args->uniforms_size;
670
671 ret = vc4_validate_bin_cl(dev,
672 exec->exec_bo->vaddr + bin_offset,
673 bin,
674 exec);
675 if (ret)
676 goto fail;
677
678 ret = vc4_validate_shader_recs(dev, exec);
7edabee0
EA
679 if (ret)
680 goto fail;
681
682 /* Block waiting on any previous rendering into the CS's VBO,
683 * IB, or textures, so that pixels are actually written by the
684 * time we try to read them.
685 */
686 ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
d5b1a78a
EA
687
688fail:
ece7267d 689 drm_free_large(temp);
d5b1a78a
EA
690 return ret;
691}
692
693static void
694vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
695{
001bdb55 696 struct vc4_dev *vc4 = to_vc4_dev(dev);
d5b1a78a
EA
697 unsigned i;
698
d5b1a78a
EA
699 if (exec->bo) {
700 for (i = 0; i < exec->bo_count; i++)
db369729 701 drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
ece7267d 702 drm_free_large(exec->bo);
d5b1a78a
EA
703 }
704
705 while (!list_empty(&exec->unref_list)) {
706 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
707 struct vc4_bo, unref_head);
708 list_del(&bo->unref_head);
db369729 709 drm_gem_object_unreference_unlocked(&bo->base.base);
d5b1a78a 710 }
d5b1a78a 711
36cb6253 712 mutex_lock(&vc4->power_lock);
3a622346
EA
713 if (--vc4->power_refcount == 0) {
714 pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
715 pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
716 }
36cb6253 717 mutex_unlock(&vc4->power_lock);
001bdb55 718
d5b1a78a
EA
719 kfree(exec);
720}
721
722void
723vc4_job_handle_completed(struct vc4_dev *vc4)
724{
725 unsigned long irqflags;
b501bacc 726 struct vc4_seqno_cb *cb, *cb_temp;
d5b1a78a
EA
727
728 spin_lock_irqsave(&vc4->job_lock, irqflags);
729 while (!list_empty(&vc4->job_done_list)) {
730 struct vc4_exec_info *exec =
731 list_first_entry(&vc4->job_done_list,
732 struct vc4_exec_info, head);
733 list_del(&exec->head);
734
735 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
736 vc4_complete_exec(vc4->dev, exec);
737 spin_lock_irqsave(&vc4->job_lock, irqflags);
738 }
b501bacc
EA
739
740 list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
741 if (cb->seqno <= vc4->finished_seqno) {
742 list_del_init(&cb->work.entry);
743 schedule_work(&cb->work);
744 }
745 }
746
747 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
748}
749
750static void vc4_seqno_cb_work(struct work_struct *work)
751{
752 struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
753
754 cb->func(cb);
755}
756
757int vc4_queue_seqno_cb(struct drm_device *dev,
758 struct vc4_seqno_cb *cb, uint64_t seqno,
759 void (*func)(struct vc4_seqno_cb *cb))
760{
761 struct vc4_dev *vc4 = to_vc4_dev(dev);
762 int ret = 0;
763 unsigned long irqflags;
764
765 cb->func = func;
766 INIT_WORK(&cb->work, vc4_seqno_cb_work);
767
768 spin_lock_irqsave(&vc4->job_lock, irqflags);
769 if (seqno > vc4->finished_seqno) {
770 cb->seqno = seqno;
771 list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
772 } else {
773 schedule_work(&cb->work);
774 }
d5b1a78a 775 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
b501bacc
EA
776
777 return ret;
d5b1a78a
EA
778}
779
780/* Scheduled when any job has been completed, this walks the list of
781 * jobs that had completed and unrefs their BOs and frees their exec
782 * structs.
783 */
784static void
785vc4_job_done_work(struct work_struct *work)
786{
787 struct vc4_dev *vc4 =
788 container_of(work, struct vc4_dev, job_done_work);
789
790 vc4_job_handle_completed(vc4);
791}
792
793static int
794vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev,
795 uint64_t seqno,
796 uint64_t *timeout_ns)
797{
798 unsigned long start = jiffies;
799 int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true);
800
801 if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) {
802 uint64_t delta = jiffies_to_nsecs(jiffies - start);
803
804 if (*timeout_ns >= delta)
805 *timeout_ns -= delta;
806 }
807
808 return ret;
809}
810
811int
812vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
813 struct drm_file *file_priv)
814{
815 struct drm_vc4_wait_seqno *args = data;
816
817 return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
818 &args->timeout_ns);
819}
820
821int
822vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
823 struct drm_file *file_priv)
824{
825 int ret;
826 struct drm_vc4_wait_bo *args = data;
827 struct drm_gem_object *gem_obj;
828 struct vc4_bo *bo;
829
e0015236
EA
830 if (args->pad != 0)
831 return -EINVAL;
832
a8ad0bd8 833 gem_obj = drm_gem_object_lookup(file_priv, args->handle);
d5b1a78a
EA
834 if (!gem_obj) {
835 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
836 return -EINVAL;
837 }
838 bo = to_vc4_bo(gem_obj);
839
840 ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
841 &args->timeout_ns);
842
843 drm_gem_object_unreference_unlocked(gem_obj);
844 return ret;
845}
846
847/**
848 * Submits a command list to the VC4.
849 *
850 * This is what is called batchbuffer emitting on other hardware.
851 */
852int
853vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
854 struct drm_file *file_priv)
855{
856 struct vc4_dev *vc4 = to_vc4_dev(dev);
857 struct drm_vc4_submit_cl *args = data;
858 struct vc4_exec_info *exec;
36cb6253 859 int ret = 0;
d5b1a78a
EA
860
861 if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
862 DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
863 return -EINVAL;
864 }
865
866 exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
867 if (!exec) {
868 DRM_ERROR("malloc failure on exec struct\n");
869 return -ENOMEM;
870 }
871
36cb6253
EA
872 mutex_lock(&vc4->power_lock);
873 if (vc4->power_refcount++ == 0)
874 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
875 mutex_unlock(&vc4->power_lock);
001bdb55
EA
876 if (ret < 0) {
877 kfree(exec);
878 return ret;
879 }
880
d5b1a78a
EA
881 exec->args = args;
882 INIT_LIST_HEAD(&exec->unref_list);
883
884 ret = vc4_cl_lookup_bos(dev, file_priv, exec);
885 if (ret)
886 goto fail;
887
888 if (exec->args->bin_cl_size != 0) {
889 ret = vc4_get_bcl(dev, exec);
890 if (ret)
891 goto fail;
892 } else {
893 exec->ct0ca = 0;
894 exec->ct0ea = 0;
895 }
896
897 ret = vc4_get_rcl(dev, exec);
898 if (ret)
899 goto fail;
900
901 /* Clear this out of the struct we'll be putting in the queue,
902 * since it's part of our stack.
903 */
904 exec->args = NULL;
905
906 vc4_queue_submit(dev, exec);
907
908 /* Return the seqno for our job. */
909 args->seqno = vc4->emit_seqno;
910
911 return 0;
912
913fail:
914 vc4_complete_exec(vc4->dev, exec);
915
916 return ret;
917}
918
919void
920vc4_gem_init(struct drm_device *dev)
921{
922 struct vc4_dev *vc4 = to_vc4_dev(dev);
923
ca26d28b
VG
924 INIT_LIST_HEAD(&vc4->bin_job_list);
925 INIT_LIST_HEAD(&vc4->render_job_list);
d5b1a78a 926 INIT_LIST_HEAD(&vc4->job_done_list);
b501bacc 927 INIT_LIST_HEAD(&vc4->seqno_cb_list);
d5b1a78a
EA
928 spin_lock_init(&vc4->job_lock);
929
930 INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
931 setup_timer(&vc4->hangcheck.timer,
932 vc4_hangcheck_elapsed,
933 (unsigned long)dev);
934
935 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
36cb6253
EA
936
937 mutex_init(&vc4->power_lock);
d5b1a78a
EA
938}
939
940void
941vc4_gem_destroy(struct drm_device *dev)
942{
943 struct vc4_dev *vc4 = to_vc4_dev(dev);
944
945 /* Waiting for exec to finish would need to be done before
946 * unregistering V3D.
947 */
948 WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
949
950 /* V3D should already have disabled its interrupt and cleared
951 * the overflow allocation registers. Now free the object.
952 */
953 if (vc4->overflow_mem) {
954 drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base);
955 vc4->overflow_mem = NULL;
956 }
957
21461365
EA
958 if (vc4->hang_state)
959 vc4_free_hang_state(dev, vc4->hang_state);
def96527
EA
960
961 vc4_bo_cache_destroy(dev);
d5b1a78a 962}