]>
Commit | Line | Data |
---|---|---|
57692c94 EA |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* Copyright (C) 2018 Broadcom */ | |
3 | ||
4 | /** | |
5 | * DOC: Broadcom V3D scheduling | |
6 | * | |
7 | * The shared DRM GPU scheduler is used to coordinate submitting jobs | |
8 | * to the hardware. Each DRM fd (roughly a client process) gets its | |
9 | * own scheduler entity, which will process jobs in order. The GPU | |
10 | * scheduler will round-robin between clients to submit the next job. | |
11 | * | |
12 | * For simplicity, and in order to keep latency low for interactive | |
13 | * jobs when bulk background jobs are queued up, we submit a new job | |
14 | * to the HW only when it has completed the last one, instead of | |
15 | * filling up the CT[01]Q FIFOs with jobs. Similarly, we use | |
16 | * v3d_job_dependency() to manage the dependency between bin and | |
17 | * render, instead of having the clients submit jobs with using the | |
18 | * HW's semaphores to interlock between them. | |
19 | */ | |
20 | ||
21 | #include <linux/kthread.h> | |
22 | ||
23 | #include "v3d_drv.h" | |
24 | #include "v3d_regs.h" | |
25 | #include "v3d_trace.h" | |
26 | ||
27 | static struct v3d_job * | |
28 | to_v3d_job(struct drm_sched_job *sched_job) | |
29 | { | |
30 | return container_of(sched_job, struct v3d_job, base); | |
31 | } | |
32 | ||
33 | static void | |
34 | v3d_job_free(struct drm_sched_job *sched_job) | |
35 | { | |
36 | struct v3d_job *job = to_v3d_job(sched_job); | |
37 | ||
38 | v3d_exec_put(job->exec); | |
39 | } | |
40 | ||
41 | /** | |
42 | * Returns the fences that the bin job depends on, one by one. | |
43 | * v3d_job_run() won't be called until all of them have been signaled. | |
44 | */ | |
45 | static struct dma_fence * | |
46 | v3d_job_dependency(struct drm_sched_job *sched_job, | |
47 | struct drm_sched_entity *s_entity) | |
48 | { | |
49 | struct v3d_job *job = to_v3d_job(sched_job); | |
50 | struct v3d_exec_info *exec = job->exec; | |
51 | enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER; | |
52 | struct dma_fence *fence; | |
53 | ||
54 | fence = job->in_fence; | |
55 | if (fence) { | |
56 | job->in_fence = NULL; | |
57 | return fence; | |
58 | } | |
59 | ||
60 | if (q == V3D_RENDER) { | |
61 | /* If we had a bin job, the render job definitely depends on | |
62 | * it. We first have to wait for bin to be scheduled, so that | |
63 | * its done_fence is created. | |
64 | */ | |
65 | fence = exec->bin_done_fence; | |
66 | if (fence) { | |
67 | exec->bin_done_fence = NULL; | |
68 | return fence; | |
69 | } | |
70 | } | |
71 | ||
72 | /* XXX: Wait on a fence for switching the GMP if necessary, | |
73 | * and then do so. | |
74 | */ | |
75 | ||
76 | return fence; | |
77 | } | |
78 | ||
79 | static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job) | |
80 | { | |
81 | struct v3d_job *job = to_v3d_job(sched_job); | |
82 | struct v3d_exec_info *exec = job->exec; | |
83 | enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER; | |
84 | struct v3d_dev *v3d = exec->v3d; | |
85 | struct drm_device *dev = &v3d->drm; | |
86 | struct dma_fence *fence; | |
87 | unsigned long irqflags; | |
88 | ||
89 | if (unlikely(job->base.s_fence->finished.error)) | |
90 | return NULL; | |
91 | ||
92 | /* Lock required around bin_job update vs | |
93 | * v3d_overflow_mem_work(). | |
94 | */ | |
95 | spin_lock_irqsave(&v3d->job_lock, irqflags); | |
96 | if (q == V3D_BIN) { | |
97 | v3d->bin_job = job->exec; | |
98 | ||
99 | /* Clear out the overflow allocation, so we don't | |
100 | * reuse the overflow attached to a previous job. | |
101 | */ | |
102 | V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0); | |
103 | } else { | |
104 | v3d->render_job = job->exec; | |
105 | } | |
106 | spin_unlock_irqrestore(&v3d->job_lock, irqflags); | |
107 | ||
108 | /* Can we avoid this flush when q==RENDER? We need to be | |
109 | * careful of scheduling, though -- imagine job0 rendering to | |
110 | * texture and job1 reading, and them being executed as bin0, | |
111 | * bin1, render0, render1, so that render1's flush at bin time | |
112 | * wasn't enough. | |
113 | */ | |
114 | v3d_invalidate_caches(v3d); | |
115 | ||
116 | fence = v3d_fence_create(v3d, q); | |
17e23993 DC |
117 | if (IS_ERR(fence)) |
118 | return NULL; | |
57692c94 EA |
119 | |
120 | if (job->done_fence) | |
121 | dma_fence_put(job->done_fence); | |
122 | job->done_fence = dma_fence_get(fence); | |
123 | ||
124 | trace_v3d_submit_cl(dev, q == V3D_RENDER, to_v3d_fence(fence)->seqno, | |
125 | job->start, job->end); | |
126 | ||
127 | if (q == V3D_BIN) { | |
128 | if (exec->qma) { | |
129 | V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, exec->qma); | |
130 | V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, exec->qms); | |
131 | } | |
132 | if (exec->qts) { | |
133 | V3D_CORE_WRITE(0, V3D_CLE_CT0QTS, | |
134 | V3D_CLE_CT0QTS_ENABLE | | |
135 | exec->qts); | |
136 | } | |
137 | } else { | |
138 | /* XXX: Set the QCFG */ | |
139 | } | |
140 | ||
141 | /* Set the current and end address of the control list. | |
142 | * Writing the end register is what starts the job. | |
143 | */ | |
144 | V3D_CORE_WRITE(0, V3D_CLE_CTNQBA(q), job->start); | |
145 | V3D_CORE_WRITE(0, V3D_CLE_CTNQEA(q), job->end); | |
146 | ||
147 | return fence; | |
148 | } | |
149 | ||
150 | static void | |
151 | v3d_job_timedout(struct drm_sched_job *sched_job) | |
152 | { | |
153 | struct v3d_job *job = to_v3d_job(sched_job); | |
154 | struct v3d_exec_info *exec = job->exec; | |
155 | struct v3d_dev *v3d = exec->v3d; | |
156 | enum v3d_queue q; | |
157 | ||
158 | mutex_lock(&v3d->reset_lock); | |
159 | ||
160 | /* block scheduler */ | |
161 | for (q = 0; q < V3D_MAX_QUEUES; q++) { | |
162 | struct drm_gpu_scheduler *sched = &v3d->queue[q].sched; | |
163 | ||
164 | kthread_park(sched->thread); | |
165 | drm_sched_hw_job_reset(sched, (sched_job->sched == sched ? | |
166 | sched_job : NULL)); | |
167 | } | |
168 | ||
169 | /* get the GPU back into the init state */ | |
170 | v3d_reset(v3d); | |
171 | ||
172 | /* Unblock schedulers and restart their jobs. */ | |
173 | for (q = 0; q < V3D_MAX_QUEUES; q++) { | |
174 | drm_sched_job_recovery(&v3d->queue[q].sched); | |
175 | kthread_unpark(v3d->queue[q].sched.thread); | |
176 | } | |
177 | ||
178 | mutex_unlock(&v3d->reset_lock); | |
179 | } | |
180 | ||
181 | static const struct drm_sched_backend_ops v3d_sched_ops = { | |
182 | .dependency = v3d_job_dependency, | |
183 | .run_job = v3d_job_run, | |
184 | .timedout_job = v3d_job_timedout, | |
185 | .free_job = v3d_job_free | |
186 | }; | |
187 | ||
188 | int | |
189 | v3d_sched_init(struct v3d_dev *v3d) | |
190 | { | |
191 | int hw_jobs_limit = 1; | |
192 | int job_hang_limit = 0; | |
193 | int hang_limit_ms = 500; | |
194 | int ret; | |
195 | ||
196 | ret = drm_sched_init(&v3d->queue[V3D_BIN].sched, | |
197 | &v3d_sched_ops, | |
198 | hw_jobs_limit, job_hang_limit, | |
199 | msecs_to_jiffies(hang_limit_ms), | |
200 | "v3d_bin"); | |
201 | if (ret) { | |
202 | dev_err(v3d->dev, "Failed to create bin scheduler: %d.", ret); | |
203 | return ret; | |
204 | } | |
205 | ||
206 | ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched, | |
207 | &v3d_sched_ops, | |
208 | hw_jobs_limit, job_hang_limit, | |
209 | msecs_to_jiffies(hang_limit_ms), | |
210 | "v3d_render"); | |
211 | if (ret) { | |
212 | dev_err(v3d->dev, "Failed to create render scheduler: %d.", | |
213 | ret); | |
214 | drm_sched_fini(&v3d->queue[V3D_BIN].sched); | |
215 | return ret; | |
216 | } | |
217 | ||
218 | return 0; | |
219 | } | |
220 | ||
221 | void | |
222 | v3d_sched_fini(struct v3d_dev *v3d) | |
223 | { | |
224 | enum v3d_queue q; | |
225 | ||
226 | for (q = 0; q < V3D_MAX_QUEUES; q++) | |
227 | drm_sched_fini(&v3d->queue[q].sched); | |
228 | } |