2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/device.h>
29 #include "uapi/drm/vc4_drm.h"
32 #include "vc4_trace.h"
35 vc4_queue_hangcheck(struct drm_device
*dev
)
37 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
39 mod_timer(&vc4
->hangcheck
.timer
,
40 round_jiffies_up(jiffies
+ msecs_to_jiffies(100)));
43 struct vc4_hang_state
{
44 struct drm_vc4_get_hang_state user_state
;
47 struct drm_gem_object
**bo
;
51 vc4_free_hang_state(struct drm_device
*dev
, struct vc4_hang_state
*state
)
55 mutex_lock(&dev
->struct_mutex
);
56 for (i
= 0; i
< state
->user_state
.bo_count
; i
++)
57 drm_gem_object_unreference(state
->bo
[i
]);
58 mutex_unlock(&dev
->struct_mutex
);
64 vc4_get_hang_state_ioctl(struct drm_device
*dev
, void *data
,
65 struct drm_file
*file_priv
)
67 struct drm_vc4_get_hang_state
*get_state
= data
;
68 struct drm_vc4_get_hang_state_bo
*bo_state
;
69 struct vc4_hang_state
*kernel_state
;
70 struct drm_vc4_get_hang_state
*state
;
71 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
72 unsigned long irqflags
;
76 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
77 kernel_state
= vc4
->hang_state
;
79 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
82 state
= &kernel_state
->user_state
;
84 /* If the user's array isn't big enough, just return the
85 * required array size.
87 if (get_state
->bo_count
< state
->bo_count
) {
88 get_state
->bo_count
= state
->bo_count
;
89 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
93 vc4
->hang_state
= NULL
;
94 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
96 /* Save the user's BO pointer, so we don't stomp it with the memcpy. */
97 state
->bo
= get_state
->bo
;
98 memcpy(get_state
, state
, sizeof(*state
));
100 bo_state
= kcalloc(state
->bo_count
, sizeof(*bo_state
), GFP_KERNEL
);
106 for (i
= 0; i
< state
->bo_count
; i
++) {
107 struct vc4_bo
*vc4_bo
= to_vc4_bo(kernel_state
->bo
[i
]);
110 ret
= drm_gem_handle_create(file_priv
, kernel_state
->bo
[i
],
114 state
->bo_count
= i
- 1;
117 bo_state
[i
].handle
= handle
;
118 bo_state
[i
].paddr
= vc4_bo
->base
.paddr
;
119 bo_state
[i
].size
= vc4_bo
->base
.base
.size
;
122 if (copy_to_user((void __user
*)(uintptr_t)get_state
->bo
,
124 state
->bo_count
* sizeof(*bo_state
)))
131 vc4_free_hang_state(dev
, kernel_state
);
138 vc4_save_hang_state(struct drm_device
*dev
)
140 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
141 struct drm_vc4_get_hang_state
*state
;
142 struct vc4_hang_state
*kernel_state
;
143 struct vc4_exec_info
*exec
;
145 unsigned long irqflags
;
146 unsigned int i
, unref_list_count
;
148 kernel_state
= kcalloc(1, sizeof(*kernel_state
), GFP_KERNEL
);
152 state
= &kernel_state
->user_state
;
154 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
155 exec
= vc4_first_job(vc4
);
157 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
161 unref_list_count
= 0;
162 list_for_each_entry(bo
, &exec
->unref_list
, unref_head
)
165 state
->bo_count
= exec
->bo_count
+ unref_list_count
;
166 kernel_state
->bo
= kcalloc(state
->bo_count
, sizeof(*kernel_state
->bo
),
168 if (!kernel_state
->bo
) {
169 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
173 for (i
= 0; i
< exec
->bo_count
; i
++) {
174 drm_gem_object_reference(&exec
->bo
[i
]->base
);
175 kernel_state
->bo
[i
] = &exec
->bo
[i
]->base
;
178 list_for_each_entry(bo
, &exec
->unref_list
, unref_head
) {
179 drm_gem_object_reference(&bo
->base
.base
);
180 kernel_state
->bo
[i
] = &bo
->base
.base
;
184 state
->start_bin
= exec
->ct0ca
;
185 state
->start_render
= exec
->ct1ca
;
187 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
189 state
->ct0ca
= V3D_READ(V3D_CTNCA(0));
190 state
->ct0ea
= V3D_READ(V3D_CTNEA(0));
192 state
->ct1ca
= V3D_READ(V3D_CTNCA(1));
193 state
->ct1ea
= V3D_READ(V3D_CTNEA(1));
195 state
->ct0cs
= V3D_READ(V3D_CTNCS(0));
196 state
->ct1cs
= V3D_READ(V3D_CTNCS(1));
198 state
->ct0ra0
= V3D_READ(V3D_CT00RA0
);
199 state
->ct1ra0
= V3D_READ(V3D_CT01RA0
);
201 state
->bpca
= V3D_READ(V3D_BPCA
);
202 state
->bpcs
= V3D_READ(V3D_BPCS
);
203 state
->bpoa
= V3D_READ(V3D_BPOA
);
204 state
->bpos
= V3D_READ(V3D_BPOS
);
206 state
->vpmbase
= V3D_READ(V3D_VPMBASE
);
208 state
->dbge
= V3D_READ(V3D_DBGE
);
209 state
->fdbgo
= V3D_READ(V3D_FDBGO
);
210 state
->fdbgb
= V3D_READ(V3D_FDBGB
);
211 state
->fdbgr
= V3D_READ(V3D_FDBGR
);
212 state
->fdbgs
= V3D_READ(V3D_FDBGS
);
213 state
->errstat
= V3D_READ(V3D_ERRSTAT
);
215 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
216 if (vc4
->hang_state
) {
217 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
218 vc4_free_hang_state(dev
, kernel_state
);
220 vc4
->hang_state
= kernel_state
;
221 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
226 vc4_reset(struct drm_device
*dev
)
228 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
230 DRM_INFO("Resetting GPU.\n");
231 vc4_v3d_set_power(vc4
, false);
232 vc4_v3d_set_power(vc4
, true);
236 /* Rearm the hangcheck -- another job might have been waiting
237 * for our hung one to get kicked off, and vc4_irq_reset()
238 * would have started it.
240 vc4_queue_hangcheck(dev
);
244 vc4_reset_work(struct work_struct
*work
)
246 struct vc4_dev
*vc4
=
247 container_of(work
, struct vc4_dev
, hangcheck
.reset_work
);
249 vc4_save_hang_state(vc4
->dev
);
255 vc4_hangcheck_elapsed(unsigned long data
)
257 struct drm_device
*dev
= (struct drm_device
*)data
;
258 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
259 uint32_t ct0ca
, ct1ca
;
261 /* If idle, we can stop watching for hangs. */
262 if (list_empty(&vc4
->job_list
))
265 ct0ca
= V3D_READ(V3D_CTNCA(0));
266 ct1ca
= V3D_READ(V3D_CTNCA(1));
268 /* If we've made any progress in execution, rearm the timer
271 if (ct0ca
!= vc4
->hangcheck
.last_ct0ca
||
272 ct1ca
!= vc4
->hangcheck
.last_ct1ca
) {
273 vc4
->hangcheck
.last_ct0ca
= ct0ca
;
274 vc4
->hangcheck
.last_ct1ca
= ct1ca
;
275 vc4_queue_hangcheck(dev
);
279 /* We've gone too long with no progress, reset. This has to
280 * be done from a work struct, since resetting can sleep and
281 * this timer hook isn't allowed to.
283 schedule_work(&vc4
->hangcheck
.reset_work
);
287 submit_cl(struct drm_device
*dev
, uint32_t thread
, uint32_t start
, uint32_t end
)
289 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
291 /* Set the current and end address of the control list.
292 * Writing the end register is what starts the job.
294 V3D_WRITE(V3D_CTNCA(thread
), start
);
295 V3D_WRITE(V3D_CTNEA(thread
), end
);
299 vc4_wait_for_seqno(struct drm_device
*dev
, uint64_t seqno
, uint64_t timeout_ns
,
302 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
304 unsigned long timeout_expire
;
307 if (vc4
->finished_seqno
>= seqno
)
313 timeout_expire
= jiffies
+ nsecs_to_jiffies(timeout_ns
);
315 trace_vc4_wait_for_seqno_begin(dev
, seqno
, timeout_ns
);
317 prepare_to_wait(&vc4
->job_wait_queue
, &wait
,
318 interruptible
? TASK_INTERRUPTIBLE
:
319 TASK_UNINTERRUPTIBLE
);
321 if (interruptible
&& signal_pending(current
)) {
326 if (vc4
->finished_seqno
>= seqno
)
329 if (timeout_ns
!= ~0ull) {
330 if (time_after_eq(jiffies
, timeout_expire
)) {
334 schedule_timeout(timeout_expire
- jiffies
);
340 finish_wait(&vc4
->job_wait_queue
, &wait
);
341 trace_vc4_wait_for_seqno_end(dev
, seqno
);
343 if (ret
&& ret
!= -ERESTARTSYS
) {
344 DRM_ERROR("timeout waiting for render thread idle\n");
352 vc4_flush_caches(struct drm_device
*dev
)
354 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
356 /* Flush the GPU L2 caches. These caches sit on top of system
357 * L3 (the 128kb or so shared with the CPU), and are
358 * non-allocating in the L3.
360 V3D_WRITE(V3D_L2CACTL
,
363 V3D_WRITE(V3D_SLCACTL
,
364 VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC
) |
365 VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC
) |
366 VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC
) |
367 VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC
));
370 /* Sets the registers for the next job to be actually be executed in
373 * The job_lock should be held during this.
376 vc4_submit_next_job(struct drm_device
*dev
)
378 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
379 struct vc4_exec_info
*exec
= vc4_first_job(vc4
);
384 vc4_flush_caches(dev
);
386 /* Disable the binner's pre-loaded overflow memory address */
387 V3D_WRITE(V3D_BPOA
, 0);
388 V3D_WRITE(V3D_BPOS
, 0);
390 if (exec
->ct0ca
!= exec
->ct0ea
)
391 submit_cl(dev
, 0, exec
->ct0ca
, exec
->ct0ea
);
392 submit_cl(dev
, 1, exec
->ct1ca
, exec
->ct1ea
);
396 vc4_update_bo_seqnos(struct vc4_exec_info
*exec
, uint64_t seqno
)
401 for (i
= 0; i
< exec
->bo_count
; i
++) {
402 bo
= to_vc4_bo(&exec
->bo
[i
]->base
);
406 list_for_each_entry(bo
, &exec
->unref_list
, unref_head
) {
411 /* Queues a struct vc4_exec_info for execution. If no job is
412 * currently executing, then submits it.
414 * Unlike most GPUs, our hardware only handles one command list at a
415 * time. To queue multiple jobs at once, we'd need to edit the
416 * previous command list to have a jump to the new one at the end, and
417 * then bump the end address. That's a change for a later date,
421 vc4_queue_submit(struct drm_device
*dev
, struct vc4_exec_info
*exec
)
423 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
425 unsigned long irqflags
;
427 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
429 seqno
= ++vc4
->emit_seqno
;
431 vc4_update_bo_seqnos(exec
, seqno
);
433 list_add_tail(&exec
->head
, &vc4
->job_list
);
435 /* If no job was executing, kick ours off. Otherwise, it'll
436 * get started when the previous job's frame done interrupt
439 if (vc4_first_job(vc4
) == exec
) {
440 vc4_submit_next_job(dev
);
441 vc4_queue_hangcheck(dev
);
444 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
448 * Looks up a bunch of GEM handles for BOs and stores the array for
449 * use in the command validator that actually writes relocated
450 * addresses pointing to them.
453 vc4_cl_lookup_bos(struct drm_device
*dev
,
454 struct drm_file
*file_priv
,
455 struct vc4_exec_info
*exec
)
457 struct drm_vc4_submit_cl
*args
= exec
->args
;
462 exec
->bo_count
= args
->bo_handle_count
;
464 if (!exec
->bo_count
) {
465 /* See comment on bo_index for why we have to check
468 DRM_ERROR("Rendering requires BOs to validate\n");
472 exec
->bo
= kcalloc(exec
->bo_count
, sizeof(struct drm_gem_cma_object
*),
475 DRM_ERROR("Failed to allocate validated BO pointers\n");
479 handles
= drm_malloc_ab(exec
->bo_count
, sizeof(uint32_t));
481 DRM_ERROR("Failed to allocate incoming GEM handles\n");
485 ret
= copy_from_user(handles
,
486 (void __user
*)(uintptr_t)args
->bo_handles
,
487 exec
->bo_count
* sizeof(uint32_t));
489 DRM_ERROR("Failed to copy in GEM handles\n");
493 spin_lock(&file_priv
->table_lock
);
494 for (i
= 0; i
< exec
->bo_count
; i
++) {
495 struct drm_gem_object
*bo
= idr_find(&file_priv
->object_idr
,
498 DRM_ERROR("Failed to look up GEM BO %d: %d\n",
501 spin_unlock(&file_priv
->table_lock
);
504 drm_gem_object_reference(bo
);
505 exec
->bo
[i
] = (struct drm_gem_cma_object
*)bo
;
507 spin_unlock(&file_priv
->table_lock
);
515 vc4_get_bcl(struct drm_device
*dev
, struct vc4_exec_info
*exec
)
517 struct drm_vc4_submit_cl
*args
= exec
->args
;
521 uint32_t bin_offset
= 0;
522 uint32_t shader_rec_offset
= roundup(bin_offset
+ args
->bin_cl_size
,
524 uint32_t uniforms_offset
= shader_rec_offset
+ args
->shader_rec_size
;
525 uint32_t exec_size
= uniforms_offset
+ args
->uniforms_size
;
526 uint32_t temp_size
= exec_size
+ (sizeof(struct vc4_shader_state
) *
527 args
->shader_rec_count
);
530 if (uniforms_offset
< shader_rec_offset
||
531 exec_size
< uniforms_offset
||
532 args
->shader_rec_count
>= (UINT_MAX
/
533 sizeof(struct vc4_shader_state
)) ||
534 temp_size
< exec_size
) {
535 DRM_ERROR("overflow in exec arguments\n");
539 /* Allocate space where we'll store the copied in user command lists
540 * and shader records.
542 * We don't just copy directly into the BOs because we need to
543 * read the contents back for validation, and I think the
544 * bo->vaddr is uncached access.
546 temp
= kmalloc(temp_size
, GFP_KERNEL
);
548 DRM_ERROR("Failed to allocate storage for copying "
549 "in bin/render CLs.\n");
553 bin
= temp
+ bin_offset
;
554 exec
->shader_rec_u
= temp
+ shader_rec_offset
;
555 exec
->uniforms_u
= temp
+ uniforms_offset
;
556 exec
->shader_state
= temp
+ exec_size
;
557 exec
->shader_state_size
= args
->shader_rec_count
;
559 if (copy_from_user(bin
,
560 (void __user
*)(uintptr_t)args
->bin_cl
,
561 args
->bin_cl_size
)) {
566 if (copy_from_user(exec
->shader_rec_u
,
567 (void __user
*)(uintptr_t)args
->shader_rec
,
568 args
->shader_rec_size
)) {
573 if (copy_from_user(exec
->uniforms_u
,
574 (void __user
*)(uintptr_t)args
->uniforms
,
575 args
->uniforms_size
)) {
580 bo
= vc4_bo_create(dev
, exec_size
, true);
582 DRM_ERROR("Couldn't allocate BO for binning\n");
586 exec
->exec_bo
= &bo
->base
;
588 list_add_tail(&to_vc4_bo(&exec
->exec_bo
->base
)->unref_head
,
591 exec
->ct0ca
= exec
->exec_bo
->paddr
+ bin_offset
;
595 exec
->shader_rec_v
= exec
->exec_bo
->vaddr
+ shader_rec_offset
;
596 exec
->shader_rec_p
= exec
->exec_bo
->paddr
+ shader_rec_offset
;
597 exec
->shader_rec_size
= args
->shader_rec_size
;
599 exec
->uniforms_v
= exec
->exec_bo
->vaddr
+ uniforms_offset
;
600 exec
->uniforms_p
= exec
->exec_bo
->paddr
+ uniforms_offset
;
601 exec
->uniforms_size
= args
->uniforms_size
;
603 ret
= vc4_validate_bin_cl(dev
,
604 exec
->exec_bo
->vaddr
+ bin_offset
,
610 ret
= vc4_validate_shader_recs(dev
, exec
);
618 vc4_complete_exec(struct drm_device
*dev
, struct vc4_exec_info
*exec
)
622 /* Need the struct lock for drm_gem_object_unreference(). */
623 mutex_lock(&dev
->struct_mutex
);
625 for (i
= 0; i
< exec
->bo_count
; i
++)
626 drm_gem_object_unreference(&exec
->bo
[i
]->base
);
630 while (!list_empty(&exec
->unref_list
)) {
631 struct vc4_bo
*bo
= list_first_entry(&exec
->unref_list
,
632 struct vc4_bo
, unref_head
);
633 list_del(&bo
->unref_head
);
634 drm_gem_object_unreference(&bo
->base
.base
);
636 mutex_unlock(&dev
->struct_mutex
);
642 vc4_job_handle_completed(struct vc4_dev
*vc4
)
644 unsigned long irqflags
;
645 struct vc4_seqno_cb
*cb
, *cb_temp
;
647 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
648 while (!list_empty(&vc4
->job_done_list
)) {
649 struct vc4_exec_info
*exec
=
650 list_first_entry(&vc4
->job_done_list
,
651 struct vc4_exec_info
, head
);
652 list_del(&exec
->head
);
654 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
655 vc4_complete_exec(vc4
->dev
, exec
);
656 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
659 list_for_each_entry_safe(cb
, cb_temp
, &vc4
->seqno_cb_list
, work
.entry
) {
660 if (cb
->seqno
<= vc4
->finished_seqno
) {
661 list_del_init(&cb
->work
.entry
);
662 schedule_work(&cb
->work
);
666 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
669 static void vc4_seqno_cb_work(struct work_struct
*work
)
671 struct vc4_seqno_cb
*cb
= container_of(work
, struct vc4_seqno_cb
, work
);
676 int vc4_queue_seqno_cb(struct drm_device
*dev
,
677 struct vc4_seqno_cb
*cb
, uint64_t seqno
,
678 void (*func
)(struct vc4_seqno_cb
*cb
))
680 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
682 unsigned long irqflags
;
685 INIT_WORK(&cb
->work
, vc4_seqno_cb_work
);
687 spin_lock_irqsave(&vc4
->job_lock
, irqflags
);
688 if (seqno
> vc4
->finished_seqno
) {
690 list_add_tail(&cb
->work
.entry
, &vc4
->seqno_cb_list
);
692 schedule_work(&cb
->work
);
694 spin_unlock_irqrestore(&vc4
->job_lock
, irqflags
);
699 /* Scheduled when any job has been completed, this walks the list of
700 * jobs that had completed and unrefs their BOs and frees their exec
704 vc4_job_done_work(struct work_struct
*work
)
706 struct vc4_dev
*vc4
=
707 container_of(work
, struct vc4_dev
, job_done_work
);
709 vc4_job_handle_completed(vc4
);
713 vc4_wait_for_seqno_ioctl_helper(struct drm_device
*dev
,
715 uint64_t *timeout_ns
)
717 unsigned long start
= jiffies
;
718 int ret
= vc4_wait_for_seqno(dev
, seqno
, *timeout_ns
, true);
720 if ((ret
== -EINTR
|| ret
== -ERESTARTSYS
) && *timeout_ns
!= ~0ull) {
721 uint64_t delta
= jiffies_to_nsecs(jiffies
- start
);
723 if (*timeout_ns
>= delta
)
724 *timeout_ns
-= delta
;
731 vc4_wait_seqno_ioctl(struct drm_device
*dev
, void *data
,
732 struct drm_file
*file_priv
)
734 struct drm_vc4_wait_seqno
*args
= data
;
736 return vc4_wait_for_seqno_ioctl_helper(dev
, args
->seqno
,
741 vc4_wait_bo_ioctl(struct drm_device
*dev
, void *data
,
742 struct drm_file
*file_priv
)
745 struct drm_vc4_wait_bo
*args
= data
;
746 struct drm_gem_object
*gem_obj
;
749 gem_obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
751 DRM_ERROR("Failed to look up GEM BO %d\n", args
->handle
);
754 bo
= to_vc4_bo(gem_obj
);
756 ret
= vc4_wait_for_seqno_ioctl_helper(dev
, bo
->seqno
,
759 drm_gem_object_unreference_unlocked(gem_obj
);
764 * Submits a command list to the VC4.
766 * This is what is called batchbuffer emitting on other hardware.
769 vc4_submit_cl_ioctl(struct drm_device
*dev
, void *data
,
770 struct drm_file
*file_priv
)
772 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
773 struct drm_vc4_submit_cl
*args
= data
;
774 struct vc4_exec_info
*exec
;
777 if ((args
->flags
& ~VC4_SUBMIT_CL_USE_CLEAR_COLOR
) != 0) {
778 DRM_ERROR("Unknown flags: 0x%02x\n", args
->flags
);
782 exec
= kcalloc(1, sizeof(*exec
), GFP_KERNEL
);
784 DRM_ERROR("malloc failure on exec struct\n");
789 INIT_LIST_HEAD(&exec
->unref_list
);
791 ret
= vc4_cl_lookup_bos(dev
, file_priv
, exec
);
795 if (exec
->args
->bin_cl_size
!= 0) {
796 ret
= vc4_get_bcl(dev
, exec
);
804 ret
= vc4_get_rcl(dev
, exec
);
808 /* Clear this out of the struct we'll be putting in the queue,
809 * since it's part of our stack.
813 vc4_queue_submit(dev
, exec
);
815 /* Return the seqno for our job. */
816 args
->seqno
= vc4
->emit_seqno
;
821 vc4_complete_exec(vc4
->dev
, exec
);
827 vc4_gem_init(struct drm_device
*dev
)
829 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
831 INIT_LIST_HEAD(&vc4
->job_list
);
832 INIT_LIST_HEAD(&vc4
->job_done_list
);
833 INIT_LIST_HEAD(&vc4
->seqno_cb_list
);
834 spin_lock_init(&vc4
->job_lock
);
836 INIT_WORK(&vc4
->hangcheck
.reset_work
, vc4_reset_work
);
837 setup_timer(&vc4
->hangcheck
.timer
,
838 vc4_hangcheck_elapsed
,
841 INIT_WORK(&vc4
->job_done_work
, vc4_job_done_work
);
845 vc4_gem_destroy(struct drm_device
*dev
)
847 struct vc4_dev
*vc4
= to_vc4_dev(dev
);
849 /* Waiting for exec to finish would need to be done before
852 WARN_ON(vc4
->emit_seqno
!= vc4
->finished_seqno
);
854 /* V3D should already have disabled its interrupt and cleared
855 * the overflow allocation registers. Now free the object.
857 if (vc4
->overflow_mem
) {
858 drm_gem_object_unreference_unlocked(&vc4
->overflow_mem
->base
.base
);
859 vc4
->overflow_mem
= NULL
;
862 vc4_bo_cache_destroy(dev
);
865 vc4_free_hang_state(dev
, vc4
->hang_state
);