]>
Commit | Line | Data |
---|---|---|
d8f4a9ed TR |
1 | /* |
2 | * Copyright (C) 2012 Avionic Design GmbH | |
ad926015 | 3 | * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved. |
d8f4a9ed TR |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | */ | |
9 | ||
ad926015 | 10 | #include <linux/bitops.h> |
776dc384 | 11 | #include <linux/host1x.h> |
bdd2f9cd | 12 | #include <linux/idr.h> |
df06b759 | 13 | #include <linux/iommu.h> |
776dc384 | 14 | |
1503ca47 | 15 | #include <drm/drm_atomic.h> |
07866963 TR |
16 | #include <drm/drm_atomic_helper.h> |
17 | ||
d8f4a9ed | 18 | #include "drm.h" |
de2ba664 | 19 | #include "gem.h" |
d8f4a9ed TR |
20 | |
21 | #define DRIVER_NAME "tegra" | |
22 | #define DRIVER_DESC "NVIDIA Tegra graphics" | |
23 | #define DRIVER_DATE "20120330" | |
24 | #define DRIVER_MAJOR 0 | |
25 | #define DRIVER_MINOR 0 | |
26 | #define DRIVER_PATCHLEVEL 0 | |
27 | ||
ad926015 | 28 | #define CARVEOUT_SZ SZ_64M |
368f622c | 29 | #define CDMA_GATHER_FETCHES_MAX_NB 16383 |
ad926015 | 30 | |
08943e6c | 31 | struct tegra_drm_file { |
bdd2f9cd TR |
32 | struct idr contexts; |
33 | struct mutex lock; | |
08943e6c TR |
34 | }; |
35 | ||
1503ca47 TR |
36 | static void tegra_atomic_schedule(struct tegra_drm *tegra, |
37 | struct drm_atomic_state *state) | |
38 | { | |
39 | tegra->commit.state = state; | |
40 | schedule_work(&tegra->commit.work); | |
41 | } | |
42 | ||
43 | static void tegra_atomic_complete(struct tegra_drm *tegra, | |
44 | struct drm_atomic_state *state) | |
45 | { | |
46 | struct drm_device *drm = tegra->drm; | |
47 | ||
48 | /* | |
49 | * Everything below can be run asynchronously without the need to grab | |
50 | * any modeset locks at all under one condition: It must be guaranteed | |
51 | * that the asynchronous work has either been cancelled (if the driver | |
52 | * supports it, which at least requires that the framebuffers get | |
53 | * cleaned up with drm_atomic_helper_cleanup_planes()) or completed | |
54 | * before the new state gets committed on the software side with | |
55 | * drm_atomic_helper_swap_state(). | |
56 | * | |
57 | * This scheme allows new atomic state updates to be prepared and | |
58 | * checked in parallel to the asynchronous completion of the previous | |
59 | * update. Which is important since compositors need to figure out the | |
60 | * composition of the next frame right after having submitted the | |
61 | * current layout. | |
62 | */ | |
63 | ||
1af434a9 | 64 | drm_atomic_helper_commit_modeset_disables(drm, state); |
1af434a9 | 65 | drm_atomic_helper_commit_modeset_enables(drm, state); |
2b58e98d LY |
66 | drm_atomic_helper_commit_planes(drm, state, |
67 | DRM_PLANE_COMMIT_ACTIVE_ONLY); | |
1503ca47 TR |
68 | |
69 | drm_atomic_helper_wait_for_vblanks(drm, state); | |
70 | ||
71 | drm_atomic_helper_cleanup_planes(drm, state); | |
0853695c | 72 | drm_atomic_state_put(state); |
1503ca47 TR |
73 | } |
74 | ||
75 | static void tegra_atomic_work(struct work_struct *work) | |
76 | { | |
77 | struct tegra_drm *tegra = container_of(work, struct tegra_drm, | |
78 | commit.work); | |
79 | ||
80 | tegra_atomic_complete(tegra, tegra->commit.state); | |
81 | } | |
82 | ||
83 | static int tegra_atomic_commit(struct drm_device *drm, | |
2dacdd70 | 84 | struct drm_atomic_state *state, bool nonblock) |
1503ca47 TR |
85 | { |
86 | struct tegra_drm *tegra = drm->dev_private; | |
87 | int err; | |
88 | ||
89 | err = drm_atomic_helper_prepare_planes(drm, state); | |
90 | if (err) | |
91 | return err; | |
92 | ||
2dacdd70 | 93 | /* serialize outstanding nonblocking commits */ |
1503ca47 TR |
94 | mutex_lock(&tegra->commit.lock); |
95 | flush_work(&tegra->commit.work); | |
96 | ||
97 | /* | |
98 | * This is the point of no return - everything below never fails except | |
99 | * when the hw goes bonghits. Which means we can commit the new state on | |
100 | * the software side now. | |
101 | */ | |
102 | ||
5e84c269 | 103 | drm_atomic_helper_swap_state(state, true); |
1503ca47 | 104 | |
0853695c | 105 | drm_atomic_state_get(state); |
2dacdd70 | 106 | if (nonblock) |
1503ca47 TR |
107 | tegra_atomic_schedule(tegra, state); |
108 | else | |
109 | tegra_atomic_complete(tegra, state); | |
110 | ||
111 | mutex_unlock(&tegra->commit.lock); | |
112 | return 0; | |
113 | } | |
114 | ||
f9914214 TR |
115 | static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { |
116 | .fb_create = tegra_fb_create, | |
b110ef37 | 117 | #ifdef CONFIG_DRM_FBDEV_EMULATION |
f9914214 TR |
118 | .output_poll_changed = tegra_fb_output_poll_changed, |
119 | #endif | |
07866963 | 120 | .atomic_check = drm_atomic_helper_check, |
1503ca47 | 121 | .atomic_commit = tegra_atomic_commit, |
f9914214 TR |
122 | }; |
123 | ||
776dc384 | 124 | static int tegra_drm_load(struct drm_device *drm, unsigned long flags) |
692e6d7b | 125 | { |
776dc384 | 126 | struct host1x_device *device = to_host1x_device(drm->dev); |
386a2a71 | 127 | struct tegra_drm *tegra; |
692e6d7b TB |
128 | int err; |
129 | ||
776dc384 | 130 | tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); |
386a2a71 | 131 | if (!tegra) |
692e6d7b TB |
132 | return -ENOMEM; |
133 | ||
df06b759 | 134 | if (iommu_present(&platform_bus_type)) { |
ad926015 | 135 | u64 carveout_start, carveout_end, gem_start, gem_end; |
4553f733 | 136 | struct iommu_domain_geometry *geometry; |
ad926015 | 137 | unsigned long order; |
4553f733 | 138 | |
df06b759 | 139 | tegra->domain = iommu_domain_alloc(&platform_bus_type); |
bf19b885 DC |
140 | if (!tegra->domain) { |
141 | err = -ENOMEM; | |
df06b759 TR |
142 | goto free; |
143 | } | |
144 | ||
4553f733 | 145 | geometry = &tegra->domain->geometry; |
ad926015 MP |
146 | gem_start = geometry->aperture_start; |
147 | gem_end = geometry->aperture_end - CARVEOUT_SZ; | |
148 | carveout_start = gem_end + 1; | |
149 | carveout_end = geometry->aperture_end; | |
150 | ||
151 | order = __ffs(tegra->domain->pgsize_bitmap); | |
152 | init_iova_domain(&tegra->carveout.domain, 1UL << order, | |
153 | carveout_start >> order, | |
154 | carveout_end >> order); | |
4553f733 | 155 | |
ad926015 MP |
156 | tegra->carveout.shift = iova_shift(&tegra->carveout.domain); |
157 | tegra->carveout.limit = carveout_end >> tegra->carveout.shift; | |
158 | ||
159 | drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1); | |
347ad49d | 160 | mutex_init(&tegra->mm_lock); |
ad926015 MP |
161 | |
162 | DRM_DEBUG("IOMMU apertures:\n"); | |
163 | DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end); | |
164 | DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start, | |
165 | carveout_end); | |
df06b759 TR |
166 | } |
167 | ||
386a2a71 TR |
168 | mutex_init(&tegra->clients_lock); |
169 | INIT_LIST_HEAD(&tegra->clients); | |
1503ca47 TR |
170 | |
171 | mutex_init(&tegra->commit.lock); | |
172 | INIT_WORK(&tegra->commit.work, tegra_atomic_work); | |
173 | ||
386a2a71 TR |
174 | drm->dev_private = tegra; |
175 | tegra->drm = drm; | |
d8f4a9ed TR |
176 | |
177 | drm_mode_config_init(drm); | |
178 | ||
f9914214 TR |
179 | drm->mode_config.min_width = 0; |
180 | drm->mode_config.min_height = 0; | |
181 | ||
182 | drm->mode_config.max_width = 4096; | |
183 | drm->mode_config.max_height = 4096; | |
184 | ||
5e91144d AC |
185 | drm->mode_config.allow_fb_modifiers = true; |
186 | ||
f9914214 TR |
187 | drm->mode_config.funcs = &tegra_drm_mode_funcs; |
188 | ||
e2215321 TR |
189 | err = tegra_drm_fb_prepare(drm); |
190 | if (err < 0) | |
1d1e6fe9 | 191 | goto config; |
e2215321 TR |
192 | |
193 | drm_kms_helper_poll_init(drm); | |
194 | ||
776dc384 | 195 | err = host1x_device_init(device); |
d8f4a9ed | 196 | if (err < 0) |
1d1e6fe9 | 197 | goto fbdev; |
d8f4a9ed | 198 | |
603f0cc9 TR |
199 | /* |
200 | * We don't use the drm_irq_install() helpers provided by the DRM | |
201 | * core, so we need to set this manually in order to allow the | |
202 | * DRM_IOCTL_WAIT_VBLANK to operate correctly. | |
203 | */ | |
4423843c | 204 | drm->irq_enabled = true; |
603f0cc9 | 205 | |
42e9ce05 | 206 | /* syncpoints are used for full 32-bit hardware VBLANK counters */ |
42e9ce05 TR |
207 | drm->max_vblank_count = 0xffffffff; |
208 | ||
6e5ff998 TR |
209 | err = drm_vblank_init(drm, drm->mode_config.num_crtc); |
210 | if (err < 0) | |
1d1e6fe9 | 211 | goto device; |
6e5ff998 | 212 | |
31930d4d TR |
213 | drm_mode_config_reset(drm); |
214 | ||
d8f4a9ed TR |
215 | err = tegra_drm_fb_init(drm); |
216 | if (err < 0) | |
1d1e6fe9 | 217 | goto vblank; |
d8f4a9ed | 218 | |
d8f4a9ed | 219 | return 0; |
1d1e6fe9 TR |
220 | |
221 | vblank: | |
222 | drm_vblank_cleanup(drm); | |
223 | device: | |
224 | host1x_device_exit(device); | |
225 | fbdev: | |
226 | drm_kms_helper_poll_fini(drm); | |
227 | tegra_drm_fb_free(drm); | |
228 | config: | |
229 | drm_mode_config_cleanup(drm); | |
df06b759 TR |
230 | |
231 | if (tegra->domain) { | |
232 | iommu_domain_free(tegra->domain); | |
233 | drm_mm_takedown(&tegra->mm); | |
347ad49d | 234 | mutex_destroy(&tegra->mm_lock); |
ad926015 | 235 | put_iova_domain(&tegra->carveout.domain); |
df06b759 TR |
236 | } |
237 | free: | |
1d1e6fe9 TR |
238 | kfree(tegra); |
239 | return err; | |
d8f4a9ed TR |
240 | } |
241 | ||
11b3c20b | 242 | static void tegra_drm_unload(struct drm_device *drm) |
d8f4a9ed | 243 | { |
776dc384 | 244 | struct host1x_device *device = to_host1x_device(drm->dev); |
df06b759 | 245 | struct tegra_drm *tegra = drm->dev_private; |
776dc384 TR |
246 | int err; |
247 | ||
d8f4a9ed TR |
248 | drm_kms_helper_poll_fini(drm); |
249 | tegra_drm_fb_exit(drm); | |
f002abc1 | 250 | drm_mode_config_cleanup(drm); |
4aa3df71 | 251 | drm_vblank_cleanup(drm); |
d8f4a9ed | 252 | |
776dc384 TR |
253 | err = host1x_device_exit(device); |
254 | if (err < 0) | |
11b3c20b | 255 | return; |
776dc384 | 256 | |
df06b759 TR |
257 | if (tegra->domain) { |
258 | iommu_domain_free(tegra->domain); | |
259 | drm_mm_takedown(&tegra->mm); | |
347ad49d | 260 | mutex_destroy(&tegra->mm_lock); |
ad926015 | 261 | put_iova_domain(&tegra->carveout.domain); |
df06b759 TR |
262 | } |
263 | ||
1053f4dd | 264 | kfree(tegra); |
d8f4a9ed TR |
265 | } |
266 | ||
267 | static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) | |
268 | { | |
08943e6c | 269 | struct tegra_drm_file *fpriv; |
d43f81cb TB |
270 | |
271 | fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); | |
272 | if (!fpriv) | |
273 | return -ENOMEM; | |
274 | ||
bdd2f9cd TR |
275 | idr_init(&fpriv->contexts); |
276 | mutex_init(&fpriv->lock); | |
d43f81cb TB |
277 | filp->driver_priv = fpriv; |
278 | ||
d8f4a9ed TR |
279 | return 0; |
280 | } | |
281 | ||
c88c3630 | 282 | static void tegra_drm_context_free(struct tegra_drm_context *context) |
d43f81cb TB |
283 | { |
284 | context->client->ops->close_channel(context); | |
285 | kfree(context); | |
286 | } | |
287 | ||
d8f4a9ed TR |
288 | static void tegra_drm_lastclose(struct drm_device *drm) |
289 | { | |
b110ef37 | 290 | #ifdef CONFIG_DRM_FBDEV_EMULATION |
386a2a71 | 291 | struct tegra_drm *tegra = drm->dev_private; |
d8f4a9ed | 292 | |
386a2a71 | 293 | tegra_fbdev_restore_mode(tegra->fbdev); |
60c2f709 | 294 | #endif |
d8f4a9ed TR |
295 | } |
296 | ||
c40f0f1a | 297 | static struct host1x_bo * |
a8ad0bd8 | 298 | host1x_bo_lookup(struct drm_file *file, u32 handle) |
c40f0f1a TR |
299 | { |
300 | struct drm_gem_object *gem; | |
301 | struct tegra_bo *bo; | |
302 | ||
a8ad0bd8 | 303 | gem = drm_gem_object_lookup(file, handle); |
c40f0f1a TR |
304 | if (!gem) |
305 | return NULL; | |
306 | ||
a07cdfe5 | 307 | drm_gem_object_unreference_unlocked(gem); |
c40f0f1a TR |
308 | |
309 | bo = to_tegra_bo(gem); | |
310 | return &bo->base; | |
311 | } | |
312 | ||
961e3bea TR |
313 | static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, |
314 | struct drm_tegra_reloc __user *src, | |
315 | struct drm_device *drm, | |
316 | struct drm_file *file) | |
317 | { | |
318 | u32 cmdbuf, target; | |
319 | int err; | |
320 | ||
321 | err = get_user(cmdbuf, &src->cmdbuf.handle); | |
322 | if (err < 0) | |
323 | return err; | |
324 | ||
325 | err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset); | |
326 | if (err < 0) | |
327 | return err; | |
328 | ||
329 | err = get_user(target, &src->target.handle); | |
330 | if (err < 0) | |
331 | return err; | |
332 | ||
31f40f86 | 333 | err = get_user(dest->target.offset, &src->target.offset); |
961e3bea TR |
334 | if (err < 0) |
335 | return err; | |
336 | ||
337 | err = get_user(dest->shift, &src->shift); | |
338 | if (err < 0) | |
339 | return err; | |
340 | ||
a8ad0bd8 | 341 | dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf); |
961e3bea TR |
342 | if (!dest->cmdbuf.bo) |
343 | return -ENOENT; | |
344 | ||
a8ad0bd8 | 345 | dest->target.bo = host1x_bo_lookup(file, target); |
961e3bea TR |
346 | if (!dest->target.bo) |
347 | return -ENOENT; | |
348 | ||
349 | return 0; | |
350 | } | |
351 | ||
d0fbbdff DO |
352 | static int host1x_waitchk_copy_from_user(struct host1x_waitchk *dest, |
353 | struct drm_tegra_waitchk __user *src, | |
354 | struct drm_file *file) | |
355 | { | |
356 | u32 cmdbuf; | |
357 | int err; | |
358 | ||
359 | err = get_user(cmdbuf, &src->handle); | |
360 | if (err < 0) | |
361 | return err; | |
362 | ||
363 | err = get_user(dest->offset, &src->offset); | |
364 | if (err < 0) | |
365 | return err; | |
366 | ||
367 | err = get_user(dest->syncpt_id, &src->syncpt); | |
368 | if (err < 0) | |
369 | return err; | |
370 | ||
371 | err = get_user(dest->thresh, &src->thresh); | |
372 | if (err < 0) | |
373 | return err; | |
374 | ||
375 | dest->bo = host1x_bo_lookup(file, cmdbuf); | |
376 | if (!dest->bo) | |
377 | return -ENOENT; | |
378 | ||
379 | return 0; | |
380 | } | |
381 | ||
c40f0f1a TR |
382 | int tegra_drm_submit(struct tegra_drm_context *context, |
383 | struct drm_tegra_submit *args, struct drm_device *drm, | |
384 | struct drm_file *file) | |
385 | { | |
386 | unsigned int num_cmdbufs = args->num_cmdbufs; | |
387 | unsigned int num_relocs = args->num_relocs; | |
388 | unsigned int num_waitchks = args->num_waitchks; | |
389 | struct drm_tegra_cmdbuf __user *cmdbufs = | |
a7ed68fc | 390 | (void __user *)(uintptr_t)args->cmdbufs; |
c40f0f1a | 391 | struct drm_tegra_reloc __user *relocs = |
a7ed68fc | 392 | (void __user *)(uintptr_t)args->relocs; |
c40f0f1a | 393 | struct drm_tegra_waitchk __user *waitchks = |
a7ed68fc | 394 | (void __user *)(uintptr_t)args->waitchks; |
c40f0f1a | 395 | struct drm_tegra_syncpt syncpt; |
e0b2ce02 DO |
396 | struct host1x *host1x = dev_get_drvdata(drm->dev->parent); |
397 | struct host1x_syncpt *sp; | |
c40f0f1a TR |
398 | struct host1x_job *job; |
399 | int err; | |
400 | ||
401 | /* We don't yet support other than one syncpt_incr struct per submit */ | |
402 | if (args->num_syncpts != 1) | |
403 | return -EINVAL; | |
404 | ||
d0fbbdff DO |
405 | /* We don't yet support waitchks */ |
406 | if (args->num_waitchks != 0) | |
407 | return -EINVAL; | |
408 | ||
c40f0f1a TR |
409 | job = host1x_job_alloc(context->channel, args->num_cmdbufs, |
410 | args->num_relocs, args->num_waitchks); | |
411 | if (!job) | |
412 | return -ENOMEM; | |
413 | ||
414 | job->num_relocs = args->num_relocs; | |
415 | job->num_waitchk = args->num_waitchks; | |
416 | job->client = (u32)args->context; | |
417 | job->class = context->client->base.class; | |
418 | job->serialize = true; | |
419 | ||
420 | while (num_cmdbufs) { | |
421 | struct drm_tegra_cmdbuf cmdbuf; | |
422 | struct host1x_bo *bo; | |
368f622c DO |
423 | struct tegra_bo *obj; |
424 | u64 offset; | |
c40f0f1a | 425 | |
9a991600 DC |
426 | if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) { |
427 | err = -EFAULT; | |
c40f0f1a | 428 | goto fail; |
9a991600 | 429 | } |
c40f0f1a | 430 | |
368f622c DO |
431 | /* |
432 | * The maximum number of CDMA gather fetches is 16383, a higher | |
433 | * value means the words count is malformed. | |
434 | */ | |
435 | if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) { | |
436 | err = -EINVAL; | |
437 | goto fail; | |
438 | } | |
439 | ||
a8ad0bd8 | 440 | bo = host1x_bo_lookup(file, cmdbuf.handle); |
c40f0f1a TR |
441 | if (!bo) { |
442 | err = -ENOENT; | |
443 | goto fail; | |
444 | } | |
445 | ||
368f622c DO |
446 | offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32); |
447 | obj = host1x_to_tegra_bo(bo); | |
448 | ||
449 | /* | |
450 | * Gather buffer base address must be 4-bytes aligned, | |
451 | * unaligned offset is malformed and cause commands stream | |
452 | * corruption on the buffer address relocation. | |
453 | */ | |
454 | if (offset & 3 || offset >= obj->gem.size) { | |
455 | err = -EINVAL; | |
456 | goto fail; | |
457 | } | |
458 | ||
c40f0f1a TR |
459 | host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset); |
460 | num_cmdbufs--; | |
461 | cmdbufs++; | |
462 | } | |
463 | ||
961e3bea | 464 | /* copy and resolve relocations from submit */ |
c40f0f1a | 465 | while (num_relocs--) { |
368f622c DO |
466 | struct host1x_reloc *reloc; |
467 | struct tegra_bo *obj; | |
468 | ||
961e3bea TR |
469 | err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs], |
470 | &relocs[num_relocs], drm, | |
471 | file); | |
472 | if (err < 0) | |
c40f0f1a | 473 | goto fail; |
368f622c DO |
474 | |
475 | reloc = &job->relocarray[num_relocs]; | |
476 | obj = host1x_to_tegra_bo(reloc->cmdbuf.bo); | |
477 | ||
478 | /* | |
479 | * The unaligned cmdbuf offset will cause an unaligned write | |
480 | * during of the relocations patching, corrupting the commands | |
481 | * stream. | |
482 | */ | |
483 | if (reloc->cmdbuf.offset & 3 || | |
484 | reloc->cmdbuf.offset >= obj->gem.size) { | |
485 | err = -EINVAL; | |
486 | goto fail; | |
487 | } | |
488 | ||
489 | obj = host1x_to_tegra_bo(reloc->target.bo); | |
490 | ||
491 | if (reloc->target.offset >= obj->gem.size) { | |
492 | err = -EINVAL; | |
493 | goto fail; | |
494 | } | |
c40f0f1a TR |
495 | } |
496 | ||
d0fbbdff DO |
497 | /* copy and resolve waitchks from submit */ |
498 | while (num_waitchks--) { | |
499 | struct host1x_waitchk *wait = &job->waitchk[num_waitchks]; | |
500 | struct tegra_bo *obj; | |
501 | ||
502 | err = host1x_waitchk_copy_from_user(wait, | |
503 | &waitchks[num_waitchks], | |
504 | file); | |
505 | if (err < 0) | |
506 | goto fail; | |
507 | ||
508 | obj = host1x_to_tegra_bo(wait->bo); | |
509 | ||
510 | /* | |
511 | * The unaligned offset will cause an unaligned write during | |
512 | * of the waitchks patching, corrupting the commands stream. | |
513 | */ | |
514 | if (wait->offset & 3 || | |
515 | wait->offset >= obj->gem.size) { | |
516 | err = -EINVAL; | |
517 | goto fail; | |
518 | } | |
9a991600 | 519 | } |
c40f0f1a | 520 | |
9a991600 DC |
521 | if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts, |
522 | sizeof(syncpt))) { | |
523 | err = -EFAULT; | |
c40f0f1a | 524 | goto fail; |
9a991600 | 525 | } |
c40f0f1a | 526 | |
e0b2ce02 DO |
527 | /* check whether syncpoint ID is valid */ |
528 | sp = host1x_syncpt_get(host1x, syncpt.id); | |
529 | if (!sp) { | |
530 | err = -ENOENT; | |
531 | goto fail; | |
532 | } | |
533 | ||
c40f0f1a TR |
534 | job->is_addr_reg = context->client->ops->is_addr_reg; |
535 | job->syncpt_incrs = syncpt.incrs; | |
536 | job->syncpt_id = syncpt.id; | |
537 | job->timeout = 10000; | |
538 | ||
539 | if (args->timeout && args->timeout < 10000) | |
540 | job->timeout = args->timeout; | |
541 | ||
542 | err = host1x_job_pin(job, context->client->base.dev); | |
543 | if (err) | |
544 | goto fail; | |
545 | ||
546 | err = host1x_job_submit(job); | |
547 | if (err) | |
548 | goto fail_submit; | |
549 | ||
550 | args->fence = job->syncpt_end; | |
551 | ||
552 | host1x_job_put(job); | |
553 | return 0; | |
554 | ||
555 | fail_submit: | |
556 | host1x_job_unpin(job); | |
557 | fail: | |
558 | host1x_job_put(job); | |
559 | return err; | |
560 | } | |
561 | ||
562 | ||
d43f81cb | 563 | #ifdef CONFIG_DRM_TEGRA_STAGING |
bdd2f9cd TR |
564 | static struct tegra_drm_context * |
565 | tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id) | |
d43f81cb | 566 | { |
bdd2f9cd | 567 | struct tegra_drm_context *context; |
d43f81cb | 568 | |
bdd2f9cd TR |
569 | mutex_lock(&file->lock); |
570 | context = idr_find(&file->contexts, id); | |
571 | mutex_unlock(&file->lock); | |
d43f81cb | 572 | |
bdd2f9cd | 573 | return context; |
d43f81cb TB |
574 | } |
575 | ||
576 | static int tegra_gem_create(struct drm_device *drm, void *data, | |
577 | struct drm_file *file) | |
578 | { | |
579 | struct drm_tegra_gem_create *args = data; | |
580 | struct tegra_bo *bo; | |
581 | ||
773af77f | 582 | bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags, |
d43f81cb TB |
583 | &args->handle); |
584 | if (IS_ERR(bo)) | |
585 | return PTR_ERR(bo); | |
586 | ||
587 | return 0; | |
588 | } | |
589 | ||
590 | static int tegra_gem_mmap(struct drm_device *drm, void *data, | |
591 | struct drm_file *file) | |
592 | { | |
593 | struct drm_tegra_gem_mmap *args = data; | |
594 | struct drm_gem_object *gem; | |
595 | struct tegra_bo *bo; | |
596 | ||
a8ad0bd8 | 597 | gem = drm_gem_object_lookup(file, args->handle); |
d43f81cb TB |
598 | if (!gem) |
599 | return -EINVAL; | |
600 | ||
601 | bo = to_tegra_bo(gem); | |
602 | ||
2bc7b0ca | 603 | args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node); |
d43f81cb | 604 | |
11533304 | 605 | drm_gem_object_unreference_unlocked(gem); |
d43f81cb TB |
606 | |
607 | return 0; | |
608 | } | |
609 | ||
610 | static int tegra_syncpt_read(struct drm_device *drm, void *data, | |
611 | struct drm_file *file) | |
612 | { | |
776dc384 | 613 | struct host1x *host = dev_get_drvdata(drm->dev->parent); |
d43f81cb | 614 | struct drm_tegra_syncpt_read *args = data; |
776dc384 | 615 | struct host1x_syncpt *sp; |
d43f81cb | 616 | |
776dc384 | 617 | sp = host1x_syncpt_get(host, args->id); |
d43f81cb TB |
618 | if (!sp) |
619 | return -EINVAL; | |
620 | ||
621 | args->value = host1x_syncpt_read_min(sp); | |
622 | return 0; | |
623 | } | |
624 | ||
625 | static int tegra_syncpt_incr(struct drm_device *drm, void *data, | |
626 | struct drm_file *file) | |
627 | { | |
776dc384 | 628 | struct host1x *host1x = dev_get_drvdata(drm->dev->parent); |
d43f81cb | 629 | struct drm_tegra_syncpt_incr *args = data; |
776dc384 | 630 | struct host1x_syncpt *sp; |
d43f81cb | 631 | |
776dc384 | 632 | sp = host1x_syncpt_get(host1x, args->id); |
d43f81cb TB |
633 | if (!sp) |
634 | return -EINVAL; | |
635 | ||
ebae30b1 | 636 | return host1x_syncpt_incr(sp); |
d43f81cb TB |
637 | } |
638 | ||
639 | static int tegra_syncpt_wait(struct drm_device *drm, void *data, | |
640 | struct drm_file *file) | |
641 | { | |
776dc384 | 642 | struct host1x *host1x = dev_get_drvdata(drm->dev->parent); |
d43f81cb | 643 | struct drm_tegra_syncpt_wait *args = data; |
776dc384 | 644 | struct host1x_syncpt *sp; |
d43f81cb | 645 | |
776dc384 | 646 | sp = host1x_syncpt_get(host1x, args->id); |
d43f81cb TB |
647 | if (!sp) |
648 | return -EINVAL; | |
649 | ||
650 | return host1x_syncpt_wait(sp, args->thresh, args->timeout, | |
651 | &args->value); | |
652 | } | |
653 | ||
bdd2f9cd TR |
654 | static int tegra_client_open(struct tegra_drm_file *fpriv, |
655 | struct tegra_drm_client *client, | |
656 | struct tegra_drm_context *context) | |
657 | { | |
658 | int err; | |
659 | ||
660 | err = client->ops->open_channel(client, context); | |
661 | if (err < 0) | |
662 | return err; | |
663 | ||
664 | err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL); | |
665 | if (err < 0) { | |
666 | client->ops->close_channel(context); | |
667 | return err; | |
668 | } | |
669 | ||
670 | context->client = client; | |
671 | context->id = err; | |
672 | ||
673 | return 0; | |
674 | } | |
675 | ||
d43f81cb TB |
676 | static int tegra_open_channel(struct drm_device *drm, void *data, |
677 | struct drm_file *file) | |
678 | { | |
08943e6c | 679 | struct tegra_drm_file *fpriv = file->driver_priv; |
386a2a71 | 680 | struct tegra_drm *tegra = drm->dev_private; |
d43f81cb | 681 | struct drm_tegra_open_channel *args = data; |
c88c3630 | 682 | struct tegra_drm_context *context; |
53fa7f72 | 683 | struct tegra_drm_client *client; |
d43f81cb TB |
684 | int err = -ENODEV; |
685 | ||
686 | context = kzalloc(sizeof(*context), GFP_KERNEL); | |
687 | if (!context) | |
688 | return -ENOMEM; | |
689 | ||
bdd2f9cd TR |
690 | mutex_lock(&fpriv->lock); |
691 | ||
776dc384 | 692 | list_for_each_entry(client, &tegra->clients, list) |
53fa7f72 | 693 | if (client->base.class == args->client) { |
bdd2f9cd TR |
694 | err = tegra_client_open(fpriv, client, context); |
695 | if (err < 0) | |
d43f81cb TB |
696 | break; |
697 | ||
bdd2f9cd TR |
698 | args->context = context->id; |
699 | break; | |
d43f81cb TB |
700 | } |
701 | ||
bdd2f9cd TR |
702 | if (err < 0) |
703 | kfree(context); | |
704 | ||
705 | mutex_unlock(&fpriv->lock); | |
d43f81cb TB |
706 | return err; |
707 | } | |
708 | ||
709 | static int tegra_close_channel(struct drm_device *drm, void *data, | |
710 | struct drm_file *file) | |
711 | { | |
08943e6c | 712 | struct tegra_drm_file *fpriv = file->driver_priv; |
776dc384 | 713 | struct drm_tegra_close_channel *args = data; |
c88c3630 | 714 | struct tegra_drm_context *context; |
bdd2f9cd | 715 | int err = 0; |
c88c3630 | 716 | |
bdd2f9cd | 717 | mutex_lock(&fpriv->lock); |
d43f81cb | 718 | |
bdd2f9cd TR |
719 | context = tegra_drm_file_get_context(fpriv, args->context); |
720 | if (!context) { | |
721 | err = -EINVAL; | |
722 | goto unlock; | |
723 | } | |
d43f81cb | 724 | |
bdd2f9cd | 725 | idr_remove(&fpriv->contexts, context->id); |
c88c3630 | 726 | tegra_drm_context_free(context); |
d43f81cb | 727 | |
bdd2f9cd TR |
728 | unlock: |
729 | mutex_unlock(&fpriv->lock); | |
730 | return err; | |
d43f81cb TB |
731 | } |
732 | ||
733 | static int tegra_get_syncpt(struct drm_device *drm, void *data, | |
734 | struct drm_file *file) | |
735 | { | |
08943e6c | 736 | struct tegra_drm_file *fpriv = file->driver_priv; |
d43f81cb | 737 | struct drm_tegra_get_syncpt *args = data; |
c88c3630 | 738 | struct tegra_drm_context *context; |
d43f81cb | 739 | struct host1x_syncpt *syncpt; |
bdd2f9cd | 740 | int err = 0; |
d43f81cb | 741 | |
bdd2f9cd | 742 | mutex_lock(&fpriv->lock); |
c88c3630 | 743 | |
bdd2f9cd TR |
744 | context = tegra_drm_file_get_context(fpriv, args->context); |
745 | if (!context) { | |
746 | err = -ENODEV; | |
747 | goto unlock; | |
748 | } | |
d43f81cb | 749 | |
bdd2f9cd TR |
750 | if (args->index >= context->client->base.num_syncpts) { |
751 | err = -EINVAL; | |
752 | goto unlock; | |
753 | } | |
d43f81cb | 754 | |
53fa7f72 | 755 | syncpt = context->client->base.syncpts[args->index]; |
d43f81cb TB |
756 | args->id = host1x_syncpt_id(syncpt); |
757 | ||
bdd2f9cd TR |
758 | unlock: |
759 | mutex_unlock(&fpriv->lock); | |
760 | return err; | |
d43f81cb TB |
761 | } |
762 | ||
763 | static int tegra_submit(struct drm_device *drm, void *data, | |
764 | struct drm_file *file) | |
765 | { | |
08943e6c | 766 | struct tegra_drm_file *fpriv = file->driver_priv; |
d43f81cb | 767 | struct drm_tegra_submit *args = data; |
c88c3630 | 768 | struct tegra_drm_context *context; |
bdd2f9cd | 769 | int err; |
c88c3630 | 770 | |
bdd2f9cd | 771 | mutex_lock(&fpriv->lock); |
d43f81cb | 772 | |
bdd2f9cd TR |
773 | context = tegra_drm_file_get_context(fpriv, args->context); |
774 | if (!context) { | |
775 | err = -ENODEV; | |
776 | goto unlock; | |
777 | } | |
d43f81cb | 778 | |
bdd2f9cd | 779 | err = context->client->ops->submit(context, args, drm, file); |
d43f81cb | 780 | |
bdd2f9cd TR |
781 | unlock: |
782 | mutex_unlock(&fpriv->lock); | |
783 | return err; | |
d43f81cb | 784 | } |
c54a169b AM |
785 | |
786 | static int tegra_get_syncpt_base(struct drm_device *drm, void *data, | |
787 | struct drm_file *file) | |
788 | { | |
789 | struct tegra_drm_file *fpriv = file->driver_priv; | |
790 | struct drm_tegra_get_syncpt_base *args = data; | |
791 | struct tegra_drm_context *context; | |
792 | struct host1x_syncpt_base *base; | |
793 | struct host1x_syncpt *syncpt; | |
bdd2f9cd | 794 | int err = 0; |
c54a169b | 795 | |
bdd2f9cd | 796 | mutex_lock(&fpriv->lock); |
c54a169b | 797 | |
bdd2f9cd TR |
798 | context = tegra_drm_file_get_context(fpriv, args->context); |
799 | if (!context) { | |
800 | err = -ENODEV; | |
801 | goto unlock; | |
802 | } | |
c54a169b | 803 | |
bdd2f9cd TR |
804 | if (args->syncpt >= context->client->base.num_syncpts) { |
805 | err = -EINVAL; | |
806 | goto unlock; | |
807 | } | |
c54a169b AM |
808 | |
809 | syncpt = context->client->base.syncpts[args->syncpt]; | |
810 | ||
811 | base = host1x_syncpt_get_base(syncpt); | |
bdd2f9cd TR |
812 | if (!base) { |
813 | err = -ENXIO; | |
814 | goto unlock; | |
815 | } | |
c54a169b AM |
816 | |
817 | args->id = host1x_syncpt_base_id(base); | |
818 | ||
bdd2f9cd TR |
819 | unlock: |
820 | mutex_unlock(&fpriv->lock); | |
821 | return err; | |
c54a169b | 822 | } |
7678d71f TR |
823 | |
824 | static int tegra_gem_set_tiling(struct drm_device *drm, void *data, | |
825 | struct drm_file *file) | |
826 | { | |
827 | struct drm_tegra_gem_set_tiling *args = data; | |
828 | enum tegra_bo_tiling_mode mode; | |
829 | struct drm_gem_object *gem; | |
830 | unsigned long value = 0; | |
831 | struct tegra_bo *bo; | |
832 | ||
833 | switch (args->mode) { | |
834 | case DRM_TEGRA_GEM_TILING_MODE_PITCH: | |
835 | mode = TEGRA_BO_TILING_MODE_PITCH; | |
836 | ||
837 | if (args->value != 0) | |
838 | return -EINVAL; | |
839 | ||
840 | break; | |
841 | ||
842 | case DRM_TEGRA_GEM_TILING_MODE_TILED: | |
843 | mode = TEGRA_BO_TILING_MODE_TILED; | |
844 | ||
845 | if (args->value != 0) | |
846 | return -EINVAL; | |
847 | ||
848 | break; | |
849 | ||
850 | case DRM_TEGRA_GEM_TILING_MODE_BLOCK: | |
851 | mode = TEGRA_BO_TILING_MODE_BLOCK; | |
852 | ||
853 | if (args->value > 5) | |
854 | return -EINVAL; | |
855 | ||
856 | value = args->value; | |
857 | break; | |
858 | ||
859 | default: | |
860 | return -EINVAL; | |
861 | } | |
862 | ||
a8ad0bd8 | 863 | gem = drm_gem_object_lookup(file, args->handle); |
7678d71f TR |
864 | if (!gem) |
865 | return -ENOENT; | |
866 | ||
867 | bo = to_tegra_bo(gem); | |
868 | ||
869 | bo->tiling.mode = mode; | |
870 | bo->tiling.value = value; | |
871 | ||
11533304 | 872 | drm_gem_object_unreference_unlocked(gem); |
7678d71f TR |
873 | |
874 | return 0; | |
875 | } | |
876 | ||
877 | static int tegra_gem_get_tiling(struct drm_device *drm, void *data, | |
878 | struct drm_file *file) | |
879 | { | |
880 | struct drm_tegra_gem_get_tiling *args = data; | |
881 | struct drm_gem_object *gem; | |
882 | struct tegra_bo *bo; | |
883 | int err = 0; | |
884 | ||
a8ad0bd8 | 885 | gem = drm_gem_object_lookup(file, args->handle); |
7678d71f TR |
886 | if (!gem) |
887 | return -ENOENT; | |
888 | ||
889 | bo = to_tegra_bo(gem); | |
890 | ||
891 | switch (bo->tiling.mode) { | |
892 | case TEGRA_BO_TILING_MODE_PITCH: | |
893 | args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH; | |
894 | args->value = 0; | |
895 | break; | |
896 | ||
897 | case TEGRA_BO_TILING_MODE_TILED: | |
898 | args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED; | |
899 | args->value = 0; | |
900 | break; | |
901 | ||
902 | case TEGRA_BO_TILING_MODE_BLOCK: | |
903 | args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK; | |
904 | args->value = bo->tiling.value; | |
905 | break; | |
906 | ||
907 | default: | |
908 | err = -EINVAL; | |
909 | break; | |
910 | } | |
911 | ||
11533304 | 912 | drm_gem_object_unreference_unlocked(gem); |
7678d71f TR |
913 | |
914 | return err; | |
915 | } | |
7b129087 TR |
916 | |
917 | static int tegra_gem_set_flags(struct drm_device *drm, void *data, | |
918 | struct drm_file *file) | |
919 | { | |
920 | struct drm_tegra_gem_set_flags *args = data; | |
921 | struct drm_gem_object *gem; | |
922 | struct tegra_bo *bo; | |
923 | ||
924 | if (args->flags & ~DRM_TEGRA_GEM_FLAGS) | |
925 | return -EINVAL; | |
926 | ||
a8ad0bd8 | 927 | gem = drm_gem_object_lookup(file, args->handle); |
7b129087 TR |
928 | if (!gem) |
929 | return -ENOENT; | |
930 | ||
931 | bo = to_tegra_bo(gem); | |
932 | bo->flags = 0; | |
933 | ||
934 | if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP) | |
935 | bo->flags |= TEGRA_BO_BOTTOM_UP; | |
936 | ||
11533304 | 937 | drm_gem_object_unreference_unlocked(gem); |
7b129087 TR |
938 | |
939 | return 0; | |
940 | } | |
941 | ||
942 | static int tegra_gem_get_flags(struct drm_device *drm, void *data, | |
943 | struct drm_file *file) | |
944 | { | |
945 | struct drm_tegra_gem_get_flags *args = data; | |
946 | struct drm_gem_object *gem; | |
947 | struct tegra_bo *bo; | |
948 | ||
a8ad0bd8 | 949 | gem = drm_gem_object_lookup(file, args->handle); |
7b129087 TR |
950 | if (!gem) |
951 | return -ENOENT; | |
952 | ||
953 | bo = to_tegra_bo(gem); | |
954 | args->flags = 0; | |
955 | ||
956 | if (bo->flags & TEGRA_BO_BOTTOM_UP) | |
957 | args->flags |= DRM_TEGRA_GEM_BOTTOM_UP; | |
958 | ||
11533304 | 959 | drm_gem_object_unreference_unlocked(gem); |
7b129087 TR |
960 | |
961 | return 0; | |
962 | } | |
d43f81cb TB |
963 | #endif |
964 | ||
baa70943 | 965 | static const struct drm_ioctl_desc tegra_drm_ioctls[] = { |
d43f81cb | 966 | #ifdef CONFIG_DRM_TEGRA_STAGING |
f8c47144 DV |
967 | DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, 0), |
968 | DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, 0), | |
969 | DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 0), | |
970 | DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, 0), | |
971 | DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, 0), | |
972 | DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, 0), | |
973 | DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, 0), | |
974 | DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, 0), | |
975 | DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, 0), | |
976 | DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, 0), | |
977 | DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, 0), | |
978 | DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, 0), | |
979 | DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, 0), | |
980 | DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, 0), | |
d43f81cb | 981 | #endif |
d8f4a9ed TR |
982 | }; |
983 | ||
984 | static const struct file_operations tegra_drm_fops = { | |
985 | .owner = THIS_MODULE, | |
986 | .open = drm_open, | |
987 | .release = drm_release, | |
988 | .unlocked_ioctl = drm_ioctl, | |
de2ba664 | 989 | .mmap = tegra_drm_mmap, |
d8f4a9ed | 990 | .poll = drm_poll, |
d8f4a9ed | 991 | .read = drm_read, |
d8f4a9ed | 992 | .compat_ioctl = drm_compat_ioctl, |
d8f4a9ed TR |
993 | .llseek = noop_llseek, |
994 | }; | |
995 | ||
bdd2f9cd TR |
996 | static int tegra_drm_context_cleanup(int id, void *p, void *data) |
997 | { | |
998 | struct tegra_drm_context *context = p; | |
999 | ||
1000 | tegra_drm_context_free(context); | |
1001 | ||
1002 | return 0; | |
1003 | } | |
1004 | ||
3c03c46a TR |
1005 | static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file) |
1006 | { | |
08943e6c | 1007 | struct tegra_drm_file *fpriv = file->driver_priv; |
3c03c46a | 1008 | |
bdd2f9cd TR |
1009 | mutex_lock(&fpriv->lock); |
1010 | idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL); | |
1011 | mutex_unlock(&fpriv->lock); | |
d43f81cb | 1012 | |
bdd2f9cd TR |
1013 | idr_destroy(&fpriv->contexts); |
1014 | mutex_destroy(&fpriv->lock); | |
d43f81cb | 1015 | kfree(fpriv); |
3c03c46a TR |
1016 | } |
1017 | ||
e450fcc6 TR |
1018 | #ifdef CONFIG_DEBUG_FS |
1019 | static int tegra_debugfs_framebuffers(struct seq_file *s, void *data) | |
1020 | { | |
1021 | struct drm_info_node *node = (struct drm_info_node *)s->private; | |
1022 | struct drm_device *drm = node->minor->dev; | |
1023 | struct drm_framebuffer *fb; | |
1024 | ||
1025 | mutex_lock(&drm->mode_config.fb_lock); | |
1026 | ||
1027 | list_for_each_entry(fb, &drm->mode_config.fb_list, head) { | |
1028 | seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n", | |
b00c600e VS |
1029 | fb->base.id, fb->width, fb->height, |
1030 | fb->format->depth, | |
272725c7 | 1031 | fb->format->cpp[0] * 8, |
747a598f | 1032 | drm_framebuffer_read_refcount(fb)); |
e450fcc6 TR |
1033 | } |
1034 | ||
1035 | mutex_unlock(&drm->mode_config.fb_lock); | |
1036 | ||
1037 | return 0; | |
1038 | } | |
1039 | ||
28c23373 TR |
1040 | static int tegra_debugfs_iova(struct seq_file *s, void *data) |
1041 | { | |
1042 | struct drm_info_node *node = (struct drm_info_node *)s->private; | |
1043 | struct drm_device *drm = node->minor->dev; | |
1044 | struct tegra_drm *tegra = drm->dev_private; | |
b5c3714f | 1045 | struct drm_printer p = drm_seq_file_printer(s); |
28c23373 | 1046 | |
347ad49d | 1047 | mutex_lock(&tegra->mm_lock); |
b5c3714f | 1048 | drm_mm_print(&tegra->mm, &p); |
347ad49d | 1049 | mutex_unlock(&tegra->mm_lock); |
b5c3714f DV |
1050 | |
1051 | return 0; | |
28c23373 TR |
1052 | } |
1053 | ||
e450fcc6 TR |
1054 | static struct drm_info_list tegra_debugfs_list[] = { |
1055 | { "framebuffers", tegra_debugfs_framebuffers, 0 }, | |
28c23373 | 1056 | { "iova", tegra_debugfs_iova, 0 }, |
e450fcc6 TR |
1057 | }; |
1058 | ||
1059 | static int tegra_debugfs_init(struct drm_minor *minor) | |
1060 | { | |
1061 | return drm_debugfs_create_files(tegra_debugfs_list, | |
1062 | ARRAY_SIZE(tegra_debugfs_list), | |
1063 | minor->debugfs_root, minor); | |
1064 | } | |
e450fcc6 TR |
1065 | #endif |
1066 | ||
9b57f5f2 | 1067 | static struct drm_driver tegra_drm_driver = { |
ad906599 TR |
1068 | .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | |
1069 | DRIVER_ATOMIC, | |
d8f4a9ed TR |
1070 | .load = tegra_drm_load, |
1071 | .unload = tegra_drm_unload, | |
1072 | .open = tegra_drm_open, | |
3c03c46a | 1073 | .preclose = tegra_drm_preclose, |
d8f4a9ed TR |
1074 | .lastclose = tegra_drm_lastclose, |
1075 | ||
e450fcc6 TR |
1076 | #if defined(CONFIG_DEBUG_FS) |
1077 | .debugfs_init = tegra_debugfs_init, | |
e450fcc6 TR |
1078 | #endif |
1079 | ||
1ddbdbd6 | 1080 | .gem_free_object_unlocked = tegra_bo_free_object, |
de2ba664 | 1081 | .gem_vm_ops = &tegra_bo_vm_ops, |
3800391d TR |
1082 | |
1083 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | |
1084 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | |
1085 | .gem_prime_export = tegra_gem_prime_export, | |
1086 | .gem_prime_import = tegra_gem_prime_import, | |
1087 | ||
de2ba664 AM |
1088 | .dumb_create = tegra_bo_dumb_create, |
1089 | .dumb_map_offset = tegra_bo_dumb_map_offset, | |
43387b37 | 1090 | .dumb_destroy = drm_gem_dumb_destroy, |
d8f4a9ed TR |
1091 | |
1092 | .ioctls = tegra_drm_ioctls, | |
1093 | .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), | |
1094 | .fops = &tegra_drm_fops, | |
1095 | ||
1096 | .name = DRIVER_NAME, | |
1097 | .desc = DRIVER_DESC, | |
1098 | .date = DRIVER_DATE, | |
1099 | .major = DRIVER_MAJOR, | |
1100 | .minor = DRIVER_MINOR, | |
1101 | .patchlevel = DRIVER_PATCHLEVEL, | |
1102 | }; | |
776dc384 TR |
1103 | |
1104 | int tegra_drm_register_client(struct tegra_drm *tegra, | |
1105 | struct tegra_drm_client *client) | |
1106 | { | |
1107 | mutex_lock(&tegra->clients_lock); | |
1108 | list_add_tail(&client->list, &tegra->clients); | |
1109 | mutex_unlock(&tegra->clients_lock); | |
1110 | ||
1111 | return 0; | |
1112 | } | |
1113 | ||
1114 | int tegra_drm_unregister_client(struct tegra_drm *tegra, | |
1115 | struct tegra_drm_client *client) | |
1116 | { | |
1117 | mutex_lock(&tegra->clients_lock); | |
1118 | list_del_init(&client->list); | |
1119 | mutex_unlock(&tegra->clients_lock); | |
1120 | ||
1121 | return 0; | |
1122 | } | |
1123 | ||
ad926015 MP |
1124 | void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, |
1125 | dma_addr_t *dma) | |
1126 | { | |
1127 | struct iova *alloc; | |
1128 | void *virt; | |
1129 | gfp_t gfp; | |
1130 | int err; | |
1131 | ||
1132 | if (tegra->domain) | |
1133 | size = iova_align(&tegra->carveout.domain, size); | |
1134 | else | |
1135 | size = PAGE_ALIGN(size); | |
1136 | ||
1137 | gfp = GFP_KERNEL | __GFP_ZERO; | |
1138 | if (!tegra->domain) { | |
1139 | /* | |
1140 | * Many units only support 32-bit addresses, even on 64-bit | |
1141 | * SoCs. If there is no IOMMU to translate into a 32-bit IO | |
1142 | * virtual address space, force allocations to be in the | |
1143 | * lower 32-bit range. | |
1144 | */ | |
1145 | gfp |= GFP_DMA; | |
1146 | } | |
1147 | ||
1148 | virt = (void *)__get_free_pages(gfp, get_order(size)); | |
1149 | if (!virt) | |
1150 | return ERR_PTR(-ENOMEM); | |
1151 | ||
1152 | if (!tegra->domain) { | |
1153 | /* | |
1154 | * If IOMMU is disabled, devices address physical memory | |
1155 | * directly. | |
1156 | */ | |
1157 | *dma = virt_to_phys(virt); | |
1158 | return virt; | |
1159 | } | |
1160 | ||
1161 | alloc = alloc_iova(&tegra->carveout.domain, | |
1162 | size >> tegra->carveout.shift, | |
1163 | tegra->carveout.limit, true); | |
1164 | if (!alloc) { | |
1165 | err = -EBUSY; | |
1166 | goto free_pages; | |
1167 | } | |
1168 | ||
1169 | *dma = iova_dma_addr(&tegra->carveout.domain, alloc); | |
1170 | err = iommu_map(tegra->domain, *dma, virt_to_phys(virt), | |
1171 | size, IOMMU_READ | IOMMU_WRITE); | |
1172 | if (err < 0) | |
1173 | goto free_iova; | |
1174 | ||
1175 | return virt; | |
1176 | ||
1177 | free_iova: | |
1178 | __free_iova(&tegra->carveout.domain, alloc); | |
1179 | free_pages: | |
1180 | free_pages((unsigned long)virt, get_order(size)); | |
1181 | ||
1182 | return ERR_PTR(err); | |
1183 | } | |
1184 | ||
1185 | void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, | |
1186 | dma_addr_t dma) | |
1187 | { | |
1188 | if (tegra->domain) | |
1189 | size = iova_align(&tegra->carveout.domain, size); | |
1190 | else | |
1191 | size = PAGE_ALIGN(size); | |
1192 | ||
1193 | if (tegra->domain) { | |
1194 | iommu_unmap(tegra->domain, dma, size); | |
1195 | free_iova(&tegra->carveout.domain, | |
1196 | iova_pfn(&tegra->carveout.domain, dma)); | |
1197 | } | |
1198 | ||
1199 | free_pages((unsigned long)virt, get_order(size)); | |
1200 | } | |
1201 | ||
9910f5c4 | 1202 | static int host1x_drm_probe(struct host1x_device *dev) |
776dc384 | 1203 | { |
9910f5c4 TR |
1204 | struct drm_driver *driver = &tegra_drm_driver; |
1205 | struct drm_device *drm; | |
1206 | int err; | |
1207 | ||
1208 | drm = drm_dev_alloc(driver, &dev->dev); | |
0f288605 TG |
1209 | if (IS_ERR(drm)) |
1210 | return PTR_ERR(drm); | |
9910f5c4 | 1211 | |
9910f5c4 TR |
1212 | dev_set_drvdata(&dev->dev, drm); |
1213 | ||
1214 | err = drm_dev_register(drm, 0); | |
1215 | if (err < 0) | |
1216 | goto unref; | |
1217 | ||
9910f5c4 TR |
1218 | return 0; |
1219 | ||
1220 | unref: | |
1221 | drm_dev_unref(drm); | |
1222 | return err; | |
776dc384 TR |
1223 | } |
1224 | ||
9910f5c4 | 1225 | static int host1x_drm_remove(struct host1x_device *dev) |
776dc384 | 1226 | { |
9910f5c4 TR |
1227 | struct drm_device *drm = dev_get_drvdata(&dev->dev); |
1228 | ||
1229 | drm_dev_unregister(drm); | |
1230 | drm_dev_unref(drm); | |
776dc384 TR |
1231 | |
1232 | return 0; | |
1233 | } | |
1234 | ||
359ae687 TR |
1235 | #ifdef CONFIG_PM_SLEEP |
1236 | static int host1x_drm_suspend(struct device *dev) | |
1237 | { | |
1238 | struct drm_device *drm = dev_get_drvdata(dev); | |
986c58d1 | 1239 | struct tegra_drm *tegra = drm->dev_private; |
359ae687 TR |
1240 | |
1241 | drm_kms_helper_poll_disable(drm); | |
986c58d1 TR |
1242 | tegra_drm_fb_suspend(drm); |
1243 | ||
1244 | tegra->state = drm_atomic_helper_suspend(drm); | |
1245 | if (IS_ERR(tegra->state)) { | |
1246 | tegra_drm_fb_resume(drm); | |
1247 | drm_kms_helper_poll_enable(drm); | |
1248 | return PTR_ERR(tegra->state); | |
1249 | } | |
359ae687 TR |
1250 | |
1251 | return 0; | |
1252 | } | |
1253 | ||
1254 | static int host1x_drm_resume(struct device *dev) | |
1255 | { | |
1256 | struct drm_device *drm = dev_get_drvdata(dev); | |
986c58d1 | 1257 | struct tegra_drm *tegra = drm->dev_private; |
359ae687 | 1258 | |
986c58d1 TR |
1259 | drm_atomic_helper_resume(drm, tegra->state); |
1260 | tegra_drm_fb_resume(drm); | |
359ae687 TR |
1261 | drm_kms_helper_poll_enable(drm); |
1262 | ||
1263 | return 0; | |
1264 | } | |
1265 | #endif | |
1266 | ||
a13f1dc4 TR |
1267 | static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend, |
1268 | host1x_drm_resume); | |
359ae687 | 1269 | |
776dc384 TR |
1270 | static const struct of_device_id host1x_drm_subdevs[] = { |
1271 | { .compatible = "nvidia,tegra20-dc", }, | |
1272 | { .compatible = "nvidia,tegra20-hdmi", }, | |
1273 | { .compatible = "nvidia,tegra20-gr2d", }, | |
5f60ed0d | 1274 | { .compatible = "nvidia,tegra20-gr3d", }, |
776dc384 TR |
1275 | { .compatible = "nvidia,tegra30-dc", }, |
1276 | { .compatible = "nvidia,tegra30-hdmi", }, | |
1277 | { .compatible = "nvidia,tegra30-gr2d", }, | |
5f60ed0d | 1278 | { .compatible = "nvidia,tegra30-gr3d", }, |
dec72739 | 1279 | { .compatible = "nvidia,tegra114-dsi", }, |
7d1d28ac | 1280 | { .compatible = "nvidia,tegra114-hdmi", }, |
5f60ed0d | 1281 | { .compatible = "nvidia,tegra114-gr3d", }, |
8620fc62 | 1282 | { .compatible = "nvidia,tegra124-dc", }, |
6b6b6042 | 1283 | { .compatible = "nvidia,tegra124-sor", }, |
fb7be70e | 1284 | { .compatible = "nvidia,tegra124-hdmi", }, |
7d338587 | 1285 | { .compatible = "nvidia,tegra124-dsi", }, |
0ae797a8 | 1286 | { .compatible = "nvidia,tegra124-vic", }, |
c06c7930 | 1287 | { .compatible = "nvidia,tegra132-dsi", }, |
5b4f516f | 1288 | { .compatible = "nvidia,tegra210-dc", }, |
ddfb406b | 1289 | { .compatible = "nvidia,tegra210-dsi", }, |
3309ac83 | 1290 | { .compatible = "nvidia,tegra210-sor", }, |
459cc2c6 | 1291 | { .compatible = "nvidia,tegra210-sor1", }, |
0ae797a8 | 1292 | { .compatible = "nvidia,tegra210-vic", }, |
776dc384 TR |
1293 | { /* sentinel */ } |
1294 | }; | |
1295 | ||
1296 | static struct host1x_driver host1x_drm_driver = { | |
f4c5cf88 TR |
1297 | .driver = { |
1298 | .name = "drm", | |
359ae687 | 1299 | .pm = &host1x_drm_pm_ops, |
f4c5cf88 | 1300 | }, |
776dc384 TR |
1301 | .probe = host1x_drm_probe, |
1302 | .remove = host1x_drm_remove, | |
1303 | .subdevs = host1x_drm_subdevs, | |
1304 | }; | |
1305 | ||
473112e4 TR |
1306 | static struct platform_driver * const drivers[] = { |
1307 | &tegra_dc_driver, | |
1308 | &tegra_hdmi_driver, | |
1309 | &tegra_dsi_driver, | |
1310 | &tegra_dpaux_driver, | |
1311 | &tegra_sor_driver, | |
1312 | &tegra_gr2d_driver, | |
1313 | &tegra_gr3d_driver, | |
0ae797a8 | 1314 | &tegra_vic_driver, |
473112e4 TR |
1315 | }; |
1316 | ||
776dc384 TR |
1317 | static int __init host1x_drm_init(void) |
1318 | { | |
1319 | int err; | |
1320 | ||
1321 | err = host1x_driver_register(&host1x_drm_driver); | |
1322 | if (err < 0) | |
1323 | return err; | |
1324 | ||
473112e4 | 1325 | err = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); |
776dc384 TR |
1326 | if (err < 0) |
1327 | goto unregister_host1x; | |
1328 | ||
776dc384 TR |
1329 | return 0; |
1330 | ||
776dc384 TR |
1331 | unregister_host1x: |
1332 | host1x_driver_unregister(&host1x_drm_driver); | |
1333 | return err; | |
1334 | } | |
1335 | module_init(host1x_drm_init); | |
1336 | ||
1337 | static void __exit host1x_drm_exit(void) | |
1338 | { | |
473112e4 | 1339 | platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); |
776dc384 TR |
1340 | host1x_driver_unregister(&host1x_drm_driver); |
1341 | } | |
1342 | module_exit(host1x_drm_exit); | |
1343 | ||
1344 | MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); | |
1345 | MODULE_DESCRIPTION("NVIDIA Tegra DRM driver"); | |
1346 | MODULE_LICENSE("GPL v2"); |