]>
Commit | Line | Data |
---|---|---|
7198e6b0 RC |
1 | /* |
2 | * Copyright (C) 2013 Red Hat | |
3 | * Author: Rob Clark <robdclark@gmail.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published by | |
7 | * the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include "msm_gpu.h" | |
19 | #include "msm_gem.h" | |
871d812a | 20 | #include "msm_mmu.h" |
fde5de6c | 21 | #include "msm_fence.h" |
7198e6b0 RC |
22 | |
23 | ||
24 | /* | |
25 | * Power Management: | |
26 | */ | |
27 | ||
6490ad47 | 28 | #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING |
7198e6b0 | 29 | #include <mach/board.h> |
bf2b33af | 30 | static void bs_init(struct msm_gpu *gpu) |
7198e6b0 | 31 | { |
bf2b33af RC |
32 | if (gpu->bus_scale_table) { |
33 | gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table); | |
7198e6b0 RC |
34 | DBG("bus scale client: %08x", gpu->bsc); |
35 | } | |
36 | } | |
37 | ||
38 | static void bs_fini(struct msm_gpu *gpu) | |
39 | { | |
40 | if (gpu->bsc) { | |
41 | msm_bus_scale_unregister_client(gpu->bsc); | |
42 | gpu->bsc = 0; | |
43 | } | |
44 | } | |
45 | ||
46 | static void bs_set(struct msm_gpu *gpu, int idx) | |
47 | { | |
48 | if (gpu->bsc) { | |
49 | DBG("set bus scaling: %d", idx); | |
50 | msm_bus_scale_client_update_request(gpu->bsc, idx); | |
51 | } | |
52 | } | |
53 | #else | |
bf2b33af | 54 | static void bs_init(struct msm_gpu *gpu) {} |
7198e6b0 RC |
55 | static void bs_fini(struct msm_gpu *gpu) {} |
56 | static void bs_set(struct msm_gpu *gpu, int idx) {} | |
57 | #endif | |
58 | ||
59 | static int enable_pwrrail(struct msm_gpu *gpu) | |
60 | { | |
61 | struct drm_device *dev = gpu->dev; | |
62 | int ret = 0; | |
63 | ||
64 | if (gpu->gpu_reg) { | |
65 | ret = regulator_enable(gpu->gpu_reg); | |
66 | if (ret) { | |
67 | dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret); | |
68 | return ret; | |
69 | } | |
70 | } | |
71 | ||
72 | if (gpu->gpu_cx) { | |
73 | ret = regulator_enable(gpu->gpu_cx); | |
74 | if (ret) { | |
75 | dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret); | |
76 | return ret; | |
77 | } | |
78 | } | |
79 | ||
80 | return 0; | |
81 | } | |
82 | ||
83 | static int disable_pwrrail(struct msm_gpu *gpu) | |
84 | { | |
85 | if (gpu->gpu_cx) | |
86 | regulator_disable(gpu->gpu_cx); | |
87 | if (gpu->gpu_reg) | |
88 | regulator_disable(gpu->gpu_reg); | |
89 | return 0; | |
90 | } | |
91 | ||
92 | static int enable_clk(struct msm_gpu *gpu) | |
93 | { | |
7198e6b0 RC |
94 | int i; |
95 | ||
89d777a5 JC |
96 | if (gpu->grp_clks[0] && gpu->fast_rate) |
97 | clk_set_rate(gpu->grp_clks[0], gpu->fast_rate); | |
7198e6b0 | 98 | |
b5f103ab JC |
99 | /* Set the RBBM timer rate to 19.2Mhz */ |
100 | if (gpu->grp_clks[2]) | |
101 | clk_set_rate(gpu->grp_clks[2], 19200000); | |
102 | ||
89d777a5 JC |
103 | for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) |
104 | if (gpu->grp_clks[i]) | |
105 | clk_prepare(gpu->grp_clks[i]); | |
7198e6b0 | 106 | |
89d777a5 | 107 | for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) |
7198e6b0 RC |
108 | if (gpu->grp_clks[i]) |
109 | clk_enable(gpu->grp_clks[i]); | |
110 | ||
111 | return 0; | |
112 | } | |
113 | ||
114 | static int disable_clk(struct msm_gpu *gpu) | |
115 | { | |
7198e6b0 RC |
116 | int i; |
117 | ||
89d777a5 JC |
118 | for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) |
119 | if (gpu->grp_clks[i]) | |
7198e6b0 | 120 | clk_disable(gpu->grp_clks[i]); |
7198e6b0 | 121 | |
89d777a5 | 122 | for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) |
7198e6b0 RC |
123 | if (gpu->grp_clks[i]) |
124 | clk_unprepare(gpu->grp_clks[i]); | |
125 | ||
89d777a5 JC |
126 | if (gpu->grp_clks[0] && gpu->slow_rate) |
127 | clk_set_rate(gpu->grp_clks[0], gpu->slow_rate); | |
128 | ||
b5f103ab JC |
129 | if (gpu->grp_clks[2]) |
130 | clk_set_rate(gpu->grp_clks[2], 0); | |
131 | ||
7198e6b0 RC |
132 | return 0; |
133 | } | |
134 | ||
135 | static int enable_axi(struct msm_gpu *gpu) | |
136 | { | |
137 | if (gpu->ebi1_clk) | |
138 | clk_prepare_enable(gpu->ebi1_clk); | |
139 | if (gpu->bus_freq) | |
140 | bs_set(gpu, gpu->bus_freq); | |
141 | return 0; | |
142 | } | |
143 | ||
144 | static int disable_axi(struct msm_gpu *gpu) | |
145 | { | |
146 | if (gpu->ebi1_clk) | |
147 | clk_disable_unprepare(gpu->ebi1_clk); | |
148 | if (gpu->bus_freq) | |
149 | bs_set(gpu, 0); | |
150 | return 0; | |
151 | } | |
152 | ||
153 | int msm_gpu_pm_resume(struct msm_gpu *gpu) | |
154 | { | |
37d77c3a | 155 | struct drm_device *dev = gpu->dev; |
7198e6b0 RC |
156 | int ret; |
157 | ||
37d77c3a RC |
158 | DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt); |
159 | ||
160 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
161 | ||
162 | if (gpu->active_cnt++ > 0) | |
163 | return 0; | |
164 | ||
165 | if (WARN_ON(gpu->active_cnt <= 0)) | |
166 | return -EINVAL; | |
7198e6b0 RC |
167 | |
168 | ret = enable_pwrrail(gpu); | |
169 | if (ret) | |
170 | return ret; | |
171 | ||
172 | ret = enable_clk(gpu); | |
173 | if (ret) | |
174 | return ret; | |
175 | ||
176 | ret = enable_axi(gpu); | |
177 | if (ret) | |
178 | return ret; | |
179 | ||
180 | return 0; | |
181 | } | |
182 | ||
183 | int msm_gpu_pm_suspend(struct msm_gpu *gpu) | |
184 | { | |
37d77c3a | 185 | struct drm_device *dev = gpu->dev; |
7198e6b0 RC |
186 | int ret; |
187 | ||
37d77c3a RC |
188 | DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt); |
189 | ||
190 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
191 | ||
192 | if (--gpu->active_cnt > 0) | |
193 | return 0; | |
194 | ||
195 | if (WARN_ON(gpu->active_cnt < 0)) | |
196 | return -EINVAL; | |
7198e6b0 RC |
197 | |
198 | ret = disable_axi(gpu); | |
199 | if (ret) | |
200 | return ret; | |
201 | ||
202 | ret = disable_clk(gpu); | |
203 | if (ret) | |
204 | return ret; | |
205 | ||
206 | ret = disable_pwrrail(gpu); | |
207 | if (ret) | |
208 | return ret; | |
209 | ||
210 | return 0; | |
211 | } | |
212 | ||
37d77c3a RC |
213 | /* |
214 | * Inactivity detection (for suspend): | |
215 | */ | |
216 | ||
217 | static void inactive_worker(struct work_struct *work) | |
218 | { | |
219 | struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work); | |
220 | struct drm_device *dev = gpu->dev; | |
221 | ||
222 | if (gpu->inactive) | |
223 | return; | |
224 | ||
225 | DBG("%s: inactive!\n", gpu->name); | |
226 | mutex_lock(&dev->struct_mutex); | |
227 | if (!(msm_gpu_active(gpu) || gpu->inactive)) { | |
228 | disable_axi(gpu); | |
229 | disable_clk(gpu); | |
230 | gpu->inactive = true; | |
231 | } | |
232 | mutex_unlock(&dev->struct_mutex); | |
233 | } | |
234 | ||
235 | static void inactive_handler(unsigned long data) | |
236 | { | |
237 | struct msm_gpu *gpu = (struct msm_gpu *)data; | |
238 | struct msm_drm_private *priv = gpu->dev->dev_private; | |
239 | ||
240 | queue_work(priv->wq, &gpu->inactive_work); | |
241 | } | |
242 | ||
243 | /* cancel inactive timer and make sure we are awake: */ | |
244 | static void inactive_cancel(struct msm_gpu *gpu) | |
245 | { | |
246 | DBG("%s", gpu->name); | |
247 | del_timer(&gpu->inactive_timer); | |
248 | if (gpu->inactive) { | |
249 | enable_clk(gpu); | |
250 | enable_axi(gpu); | |
251 | gpu->inactive = false; | |
252 | } | |
253 | } | |
254 | ||
255 | static void inactive_start(struct msm_gpu *gpu) | |
256 | { | |
257 | DBG("%s", gpu->name); | |
258 | mod_timer(&gpu->inactive_timer, | |
259 | round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES)); | |
260 | } | |
261 | ||
bd6f82d8 RC |
262 | /* |
263 | * Hangcheck detection for locked gpu: | |
264 | */ | |
265 | ||
b6295f9a | 266 | static void retire_submits(struct msm_gpu *gpu); |
1a370be9 | 267 | |
bd6f82d8 RC |
268 | static void recover_worker(struct work_struct *work) |
269 | { | |
270 | struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); | |
271 | struct drm_device *dev = gpu->dev; | |
4816b626 | 272 | struct msm_gem_submit *submit; |
b6295f9a | 273 | uint32_t fence = gpu->funcs->last_fence(gpu); |
bd6f82d8 | 274 | |
b6295f9a RC |
275 | msm_update_fence(gpu->fctx, fence + 1); |
276 | ||
bd6f82d8 | 277 | mutex_lock(&dev->struct_mutex); |
1a370be9 | 278 | |
4816b626 RC |
279 | dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); |
280 | list_for_each_entry(submit, &gpu->submit_list, node) { | |
281 | if (submit->fence->seqno == (fence + 1)) { | |
282 | struct task_struct *task; | |
283 | ||
284 | rcu_read_lock(); | |
285 | task = pid_task(submit->pid, PIDTYPE_PID); | |
286 | if (task) { | |
287 | dev_err(dev->dev, "%s: offending task: %s\n", | |
288 | gpu->name, task->comm); | |
289 | } | |
290 | rcu_read_unlock(); | |
291 | break; | |
292 | } | |
293 | } | |
294 | ||
295 | if (msm_gpu_active(gpu)) { | |
1a370be9 | 296 | /* retire completed submits, plus the one that hung: */ |
b6295f9a | 297 | retire_submits(gpu); |
1a370be9 | 298 | |
37d77c3a RC |
299 | inactive_cancel(gpu); |
300 | gpu->funcs->recover(gpu); | |
1a370be9 RC |
301 | |
302 | /* replay the remaining submits after the one that hung: */ | |
303 | list_for_each_entry(submit, &gpu->submit_list, node) { | |
304 | gpu->funcs->submit(gpu, submit, NULL); | |
305 | } | |
37d77c3a | 306 | } |
4816b626 | 307 | |
bd6f82d8 RC |
308 | mutex_unlock(&dev->struct_mutex); |
309 | ||
310 | msm_gpu_retire(gpu); | |
311 | } | |
312 | ||
313 | static void hangcheck_timer_reset(struct msm_gpu *gpu) | |
314 | { | |
315 | DBG("%s", gpu->name); | |
316 | mod_timer(&gpu->hangcheck_timer, | |
317 | round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES)); | |
318 | } | |
319 | ||
320 | static void hangcheck_handler(unsigned long data) | |
321 | { | |
322 | struct msm_gpu *gpu = (struct msm_gpu *)data; | |
6b8819c8 RC |
323 | struct drm_device *dev = gpu->dev; |
324 | struct msm_drm_private *priv = dev->dev_private; | |
bd6f82d8 RC |
325 | uint32_t fence = gpu->funcs->last_fence(gpu); |
326 | ||
327 | if (fence != gpu->hangcheck_fence) { | |
328 | /* some progress has been made.. ya! */ | |
329 | gpu->hangcheck_fence = fence; | |
ca762a8a | 330 | } else if (fence < gpu->fctx->last_fence) { |
bd6f82d8 | 331 | /* no progress and not done.. hung! */ |
bd6f82d8 | 332 | gpu->hangcheck_fence = fence; |
26791c48 RC |
333 | dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n", |
334 | gpu->name); | |
335 | dev_err(dev->dev, "%s: completed fence: %u\n", | |
336 | gpu->name, fence); | |
337 | dev_err(dev->dev, "%s: submitted fence: %u\n", | |
ca762a8a | 338 | gpu->name, gpu->fctx->last_fence); |
bd6f82d8 RC |
339 | queue_work(priv->wq, &gpu->recover_work); |
340 | } | |
341 | ||
342 | /* if still more pending work, reset the hangcheck timer: */ | |
ca762a8a | 343 | if (gpu->fctx->last_fence > gpu->hangcheck_fence) |
bd6f82d8 | 344 | hangcheck_timer_reset(gpu); |
6b8819c8 RC |
345 | |
346 | /* workaround for missing irq: */ | |
347 | queue_work(priv->wq, &gpu->retire_work); | |
bd6f82d8 RC |
348 | } |
349 | ||
70c70f09 RC |
350 | /* |
351 | * Performance Counters: | |
352 | */ | |
353 | ||
354 | /* called under perf_lock */ | |
355 | static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs) | |
356 | { | |
357 | uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)]; | |
358 | int i, n = min(ncntrs, gpu->num_perfcntrs); | |
359 | ||
360 | /* read current values: */ | |
361 | for (i = 0; i < gpu->num_perfcntrs; i++) | |
362 | current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg); | |
363 | ||
364 | /* update cntrs: */ | |
365 | for (i = 0; i < n; i++) | |
366 | cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i]; | |
367 | ||
368 | /* save current values: */ | |
369 | for (i = 0; i < gpu->num_perfcntrs; i++) | |
370 | gpu->last_cntrs[i] = current_cntrs[i]; | |
371 | ||
372 | return n; | |
373 | } | |
374 | ||
375 | static void update_sw_cntrs(struct msm_gpu *gpu) | |
376 | { | |
377 | ktime_t time; | |
378 | uint32_t elapsed; | |
379 | unsigned long flags; | |
380 | ||
381 | spin_lock_irqsave(&gpu->perf_lock, flags); | |
382 | if (!gpu->perfcntr_active) | |
383 | goto out; | |
384 | ||
385 | time = ktime_get(); | |
386 | elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time)); | |
387 | ||
388 | gpu->totaltime += elapsed; | |
389 | if (gpu->last_sample.active) | |
390 | gpu->activetime += elapsed; | |
391 | ||
392 | gpu->last_sample.active = msm_gpu_active(gpu); | |
393 | gpu->last_sample.time = time; | |
394 | ||
395 | out: | |
396 | spin_unlock_irqrestore(&gpu->perf_lock, flags); | |
397 | } | |
398 | ||
399 | void msm_gpu_perfcntr_start(struct msm_gpu *gpu) | |
400 | { | |
401 | unsigned long flags; | |
402 | ||
403 | spin_lock_irqsave(&gpu->perf_lock, flags); | |
404 | /* we could dynamically enable/disable perfcntr registers too.. */ | |
405 | gpu->last_sample.active = msm_gpu_active(gpu); | |
406 | gpu->last_sample.time = ktime_get(); | |
407 | gpu->activetime = gpu->totaltime = 0; | |
408 | gpu->perfcntr_active = true; | |
409 | update_hw_cntrs(gpu, 0, NULL); | |
410 | spin_unlock_irqrestore(&gpu->perf_lock, flags); | |
411 | } | |
412 | ||
413 | void msm_gpu_perfcntr_stop(struct msm_gpu *gpu) | |
414 | { | |
415 | gpu->perfcntr_active = false; | |
416 | } | |
417 | ||
418 | /* returns -errno or # of cntrs sampled */ | |
419 | int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, | |
420 | uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs) | |
421 | { | |
422 | unsigned long flags; | |
423 | int ret; | |
424 | ||
425 | spin_lock_irqsave(&gpu->perf_lock, flags); | |
426 | ||
427 | if (!gpu->perfcntr_active) { | |
428 | ret = -EINVAL; | |
429 | goto out; | |
430 | } | |
431 | ||
432 | *activetime = gpu->activetime; | |
433 | *totaltime = gpu->totaltime; | |
434 | ||
435 | gpu->activetime = gpu->totaltime = 0; | |
436 | ||
437 | ret = update_hw_cntrs(gpu, ncntrs, cntrs); | |
438 | ||
439 | out: | |
440 | spin_unlock_irqrestore(&gpu->perf_lock, flags); | |
441 | ||
442 | return ret; | |
443 | } | |
444 | ||
7198e6b0 RC |
445 | /* |
446 | * Cmdstream submission/retirement: | |
447 | */ | |
448 | ||
7d12a279 RC |
449 | static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) |
450 | { | |
451 | int i; | |
452 | ||
453 | for (i = 0; i < submit->nr_bos; i++) { | |
454 | struct msm_gem_object *msm_obj = submit->bos[i].obj; | |
455 | /* move to inactive: */ | |
456 | msm_gem_move_to_inactive(&msm_obj->base); | |
457 | msm_gem_put_iova(&msm_obj->base, gpu->id); | |
458 | drm_gem_object_unreference(&msm_obj->base); | |
459 | } | |
460 | ||
40e6815b | 461 | msm_gem_submit_free(submit); |
7d12a279 RC |
462 | } |
463 | ||
b6295f9a | 464 | static void retire_submits(struct msm_gpu *gpu) |
1a370be9 RC |
465 | { |
466 | struct drm_device *dev = gpu->dev; | |
467 | ||
468 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
469 | ||
470 | while (!list_empty(&gpu->submit_list)) { | |
471 | struct msm_gem_submit *submit; | |
472 | ||
473 | submit = list_first_entry(&gpu->submit_list, | |
474 | struct msm_gem_submit, node); | |
475 | ||
f54d1867 | 476 | if (dma_fence_is_signaled(submit->fence)) { |
7d12a279 | 477 | retire_submit(gpu, submit); |
1a370be9 RC |
478 | } else { |
479 | break; | |
480 | } | |
481 | } | |
482 | } | |
483 | ||
7198e6b0 RC |
484 | static void retire_worker(struct work_struct *work) |
485 | { | |
486 | struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); | |
487 | struct drm_device *dev = gpu->dev; | |
488 | uint32_t fence = gpu->funcs->last_fence(gpu); | |
489 | ||
ca762a8a | 490 | msm_update_fence(gpu->fctx, fence); |
edd4fc63 | 491 | |
7198e6b0 | 492 | mutex_lock(&dev->struct_mutex); |
b6295f9a | 493 | retire_submits(gpu); |
7198e6b0 | 494 | mutex_unlock(&dev->struct_mutex); |
37d77c3a RC |
495 | |
496 | if (!msm_gpu_active(gpu)) | |
497 | inactive_start(gpu); | |
7198e6b0 RC |
498 | } |
499 | ||
500 | /* call from irq handler to schedule work to retire bo's */ | |
501 | void msm_gpu_retire(struct msm_gpu *gpu) | |
502 | { | |
503 | struct msm_drm_private *priv = gpu->dev->dev_private; | |
504 | queue_work(priv->wq, &gpu->retire_work); | |
70c70f09 | 505 | update_sw_cntrs(gpu); |
7198e6b0 RC |
506 | } |
507 | ||
508 | /* add bo's to gpu's ring, and kick gpu: */ | |
f44d32c7 | 509 | void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, |
7198e6b0 RC |
510 | struct msm_file_private *ctx) |
511 | { | |
512 | struct drm_device *dev = gpu->dev; | |
513 | struct msm_drm_private *priv = dev->dev_private; | |
f44d32c7 | 514 | int i; |
7198e6b0 | 515 | |
1a370be9 RC |
516 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
517 | ||
37d77c3a RC |
518 | inactive_cancel(gpu); |
519 | ||
1a370be9 RC |
520 | list_add_tail(&submit->node, &gpu->submit_list); |
521 | ||
a7d3c950 RC |
522 | msm_rd_dump_submit(submit); |
523 | ||
70c70f09 RC |
524 | update_sw_cntrs(gpu); |
525 | ||
7198e6b0 RC |
526 | for (i = 0; i < submit->nr_bos; i++) { |
527 | struct msm_gem_object *msm_obj = submit->bos[i].obj; | |
78babc16 | 528 | uint64_t iova; |
7198e6b0 RC |
529 | |
530 | /* can't happen yet.. but when we add 2d support we'll have | |
531 | * to deal w/ cross-ring synchronization: | |
532 | */ | |
533 | WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu)); | |
534 | ||
7d12a279 RC |
535 | /* submit takes a reference to the bo and iova until retired: */ |
536 | drm_gem_object_reference(&msm_obj->base); | |
537 | msm_gem_get_iova_locked(&msm_obj->base, | |
538 | submit->gpu->id, &iova); | |
7198e6b0 | 539 | |
bf6811f3 RC |
540 | if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) |
541 | msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); | |
b6295f9a RC |
542 | else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) |
543 | msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); | |
7198e6b0 | 544 | } |
1a370be9 | 545 | |
1193c3bc | 546 | gpu->funcs->submit(gpu, submit, ctx); |
1a370be9 RC |
547 | priv->lastctx = ctx; |
548 | ||
bd6f82d8 | 549 | hangcheck_timer_reset(gpu); |
7198e6b0 RC |
550 | } |
551 | ||
552 | /* | |
553 | * Init/Cleanup: | |
554 | */ | |
555 | ||
556 | static irqreturn_t irq_handler(int irq, void *data) | |
557 | { | |
558 | struct msm_gpu *gpu = data; | |
559 | return gpu->funcs->irq(gpu); | |
560 | } | |
561 | ||
562 | static const char *clk_names[] = { | |
720c3bb8 | 563 | "core", "iface", "rbbmtimer", "mem", "mem_iface", "alt_mem_iface", |
7198e6b0 RC |
564 | }; |
565 | ||
566 | int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |
567 | struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, | |
568 | const char *name, const char *ioname, const char *irqname, int ringsz) | |
569 | { | |
871d812a | 570 | struct iommu_domain *iommu; |
7198e6b0 RC |
571 | int i, ret; |
572 | ||
70c70f09 RC |
573 | if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) |
574 | gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); | |
575 | ||
7198e6b0 RC |
576 | gpu->dev = drm; |
577 | gpu->funcs = funcs; | |
578 | gpu->name = name; | |
37d77c3a | 579 | gpu->inactive = true; |
ca762a8a RC |
580 | gpu->fctx = msm_fence_context_alloc(drm, name); |
581 | if (IS_ERR(gpu->fctx)) { | |
582 | ret = PTR_ERR(gpu->fctx); | |
583 | gpu->fctx = NULL; | |
584 | goto fail; | |
585 | } | |
7198e6b0 RC |
586 | |
587 | INIT_LIST_HEAD(&gpu->active_list); | |
588 | INIT_WORK(&gpu->retire_work, retire_worker); | |
37d77c3a | 589 | INIT_WORK(&gpu->inactive_work, inactive_worker); |
bd6f82d8 RC |
590 | INIT_WORK(&gpu->recover_work, recover_worker); |
591 | ||
1a370be9 RC |
592 | INIT_LIST_HEAD(&gpu->submit_list); |
593 | ||
37d77c3a RC |
594 | setup_timer(&gpu->inactive_timer, inactive_handler, |
595 | (unsigned long)gpu); | |
bd6f82d8 RC |
596 | setup_timer(&gpu->hangcheck_timer, hangcheck_handler, |
597 | (unsigned long)gpu); | |
7198e6b0 | 598 | |
70c70f09 RC |
599 | spin_lock_init(&gpu->perf_lock); |
600 | ||
7198e6b0 RC |
601 | BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks)); |
602 | ||
603 | /* Map registers: */ | |
604 | gpu->mmio = msm_ioremap(pdev, ioname, name); | |
605 | if (IS_ERR(gpu->mmio)) { | |
606 | ret = PTR_ERR(gpu->mmio); | |
607 | goto fail; | |
608 | } | |
609 | ||
610 | /* Get Interrupt: */ | |
611 | gpu->irq = platform_get_irq_byname(pdev, irqname); | |
612 | if (gpu->irq < 0) { | |
613 | ret = gpu->irq; | |
614 | dev_err(drm->dev, "failed to get irq: %d\n", ret); | |
615 | goto fail; | |
616 | } | |
617 | ||
618 | ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, | |
619 | IRQF_TRIGGER_HIGH, gpu->name, gpu); | |
620 | if (ret) { | |
621 | dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); | |
622 | goto fail; | |
623 | } | |
624 | ||
625 | /* Acquire clocks: */ | |
626 | for (i = 0; i < ARRAY_SIZE(clk_names); i++) { | |
720c3bb8 | 627 | gpu->grp_clks[i] = msm_clk_get(pdev, clk_names[i]); |
7198e6b0 RC |
628 | DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]); |
629 | if (IS_ERR(gpu->grp_clks[i])) | |
630 | gpu->grp_clks[i] = NULL; | |
631 | } | |
632 | ||
720c3bb8 | 633 | gpu->ebi1_clk = msm_clk_get(pdev, "bus"); |
7198e6b0 RC |
634 | DBG("ebi1_clk: %p", gpu->ebi1_clk); |
635 | if (IS_ERR(gpu->ebi1_clk)) | |
636 | gpu->ebi1_clk = NULL; | |
637 | ||
638 | /* Acquire regulators: */ | |
639 | gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd"); | |
640 | DBG("gpu_reg: %p", gpu->gpu_reg); | |
641 | if (IS_ERR(gpu->gpu_reg)) | |
642 | gpu->gpu_reg = NULL; | |
643 | ||
644 | gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx"); | |
645 | DBG("gpu_cx: %p", gpu->gpu_cx); | |
646 | if (IS_ERR(gpu->gpu_cx)) | |
647 | gpu->gpu_cx = NULL; | |
648 | ||
649 | /* Setup IOMMU.. eventually we will (I think) do this once per context | |
650 | * and have separate page tables per context. For now, to keep things | |
651 | * simple and to get something working, just use a single address space: | |
652 | */ | |
871d812a RC |
653 | iommu = iommu_domain_alloc(&platform_bus_type); |
654 | if (iommu) { | |
667ce33e | 655 | /* TODO 32b vs 64b address space.. */ |
b5f103ab | 656 | iommu->geometry.aperture_start = SZ_16M; |
667ce33e RC |
657 | iommu->geometry.aperture_end = 0xffffffff; |
658 | ||
871d812a | 659 | dev_info(drm->dev, "%s: using IOMMU\n", name); |
667ce33e RC |
660 | gpu->aspace = msm_gem_address_space_create(&pdev->dev, |
661 | iommu, "gpu"); | |
662 | if (IS_ERR(gpu->aspace)) { | |
663 | ret = PTR_ERR(gpu->aspace); | |
5e921b19 | 664 | dev_err(drm->dev, "failed to init iommu: %d\n", ret); |
667ce33e | 665 | gpu->aspace = NULL; |
5e921b19 SV |
666 | iommu_domain_free(iommu); |
667 | goto fail; | |
668 | } | |
669 | ||
871d812a RC |
670 | } else { |
671 | dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); | |
7198e6b0 | 672 | } |
667ce33e | 673 | gpu->id = msm_register_address_space(drm, gpu->aspace); |
7198e6b0 | 674 | |
a1ad3523 | 675 | |
7198e6b0 | 676 | /* Create ringbuffer: */ |
a1ad3523 | 677 | mutex_lock(&drm->struct_mutex); |
7198e6b0 | 678 | gpu->rb = msm_ringbuffer_new(gpu, ringsz); |
a1ad3523 | 679 | mutex_unlock(&drm->struct_mutex); |
7198e6b0 RC |
680 | if (IS_ERR(gpu->rb)) { |
681 | ret = PTR_ERR(gpu->rb); | |
682 | gpu->rb = NULL; | |
683 | dev_err(drm->dev, "could not create ringbuffer: %d\n", ret); | |
684 | goto fail; | |
685 | } | |
686 | ||
bf2b33af | 687 | bs_init(gpu); |
7198e6b0 RC |
688 | |
689 | return 0; | |
690 | ||
691 | fail: | |
692 | return ret; | |
693 | } | |
694 | ||
695 | void msm_gpu_cleanup(struct msm_gpu *gpu) | |
696 | { | |
697 | DBG("%s", gpu->name); | |
698 | ||
699 | WARN_ON(!list_empty(&gpu->active_list)); | |
700 | ||
701 | bs_fini(gpu); | |
702 | ||
703 | if (gpu->rb) { | |
704 | if (gpu->rb_iova) | |
705 | msm_gem_put_iova(gpu->rb->bo, gpu->id); | |
706 | msm_ringbuffer_destroy(gpu->rb); | |
707 | } | |
708 | ||
ca762a8a RC |
709 | if (gpu->fctx) |
710 | msm_fence_context_free(gpu->fctx); | |
7198e6b0 | 711 | } |