]>
Commit | Line | Data |
---|---|---|
d8f4a9ed TR |
1 | /* |
2 | * Copyright (C) 2012 Avionic Design GmbH | |
ad926015 | 3 | * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved. |
d8f4a9ed TR |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | */ | |
9 | ||
ad926015 | 10 | #include <linux/bitops.h> |
776dc384 | 11 | #include <linux/host1x.h> |
bdd2f9cd | 12 | #include <linux/idr.h> |
df06b759 | 13 | #include <linux/iommu.h> |
776dc384 | 14 | |
1503ca47 | 15 | #include <drm/drm_atomic.h> |
07866963 TR |
16 | #include <drm/drm_atomic_helper.h> |
17 | ||
d8f4a9ed | 18 | #include "drm.h" |
de2ba664 | 19 | #include "gem.h" |
d8f4a9ed TR |
20 | |
21 | #define DRIVER_NAME "tegra" | |
22 | #define DRIVER_DESC "NVIDIA Tegra graphics" | |
23 | #define DRIVER_DATE "20120330" | |
24 | #define DRIVER_MAJOR 0 | |
25 | #define DRIVER_MINOR 0 | |
26 | #define DRIVER_PATCHLEVEL 0 | |
27 | ||
ad926015 | 28 | #define CARVEOUT_SZ SZ_64M |
368f622c | 29 | #define CDMA_GATHER_FETCHES_MAX_NB 16383 |
ad926015 | 30 | |
08943e6c | 31 | struct tegra_drm_file { |
bdd2f9cd TR |
32 | struct idr contexts; |
33 | struct mutex lock; | |
08943e6c TR |
34 | }; |
35 | ||
1503ca47 TR |
36 | static void tegra_atomic_schedule(struct tegra_drm *tegra, |
37 | struct drm_atomic_state *state) | |
38 | { | |
39 | tegra->commit.state = state; | |
40 | schedule_work(&tegra->commit.work); | |
41 | } | |
42 | ||
43 | static void tegra_atomic_complete(struct tegra_drm *tegra, | |
44 | struct drm_atomic_state *state) | |
45 | { | |
46 | struct drm_device *drm = tegra->drm; | |
47 | ||
48 | /* | |
49 | * Everything below can be run asynchronously without the need to grab | |
50 | * any modeset locks at all under one condition: It must be guaranteed | |
51 | * that the asynchronous work has either been cancelled (if the driver | |
52 | * supports it, which at least requires that the framebuffers get | |
53 | * cleaned up with drm_atomic_helper_cleanup_planes()) or completed | |
54 | * before the new state gets committed on the software side with | |
55 | * drm_atomic_helper_swap_state(). | |
56 | * | |
57 | * This scheme allows new atomic state updates to be prepared and | |
58 | * checked in parallel to the asynchronous completion of the previous | |
59 | * update. Which is important since compositors need to figure out the | |
60 | * composition of the next frame right after having submitted the | |
61 | * current layout. | |
62 | */ | |
63 | ||
1af434a9 | 64 | drm_atomic_helper_commit_modeset_disables(drm, state); |
1af434a9 | 65 | drm_atomic_helper_commit_modeset_enables(drm, state); |
2b58e98d LY |
66 | drm_atomic_helper_commit_planes(drm, state, |
67 | DRM_PLANE_COMMIT_ACTIVE_ONLY); | |
1503ca47 TR |
68 | |
69 | drm_atomic_helper_wait_for_vblanks(drm, state); | |
70 | ||
71 | drm_atomic_helper_cleanup_planes(drm, state); | |
0853695c | 72 | drm_atomic_state_put(state); |
1503ca47 TR |
73 | } |
74 | ||
75 | static void tegra_atomic_work(struct work_struct *work) | |
76 | { | |
77 | struct tegra_drm *tegra = container_of(work, struct tegra_drm, | |
78 | commit.work); | |
79 | ||
80 | tegra_atomic_complete(tegra, tegra->commit.state); | |
81 | } | |
82 | ||
83 | static int tegra_atomic_commit(struct drm_device *drm, | |
2dacdd70 | 84 | struct drm_atomic_state *state, bool nonblock) |
1503ca47 TR |
85 | { |
86 | struct tegra_drm *tegra = drm->dev_private; | |
87 | int err; | |
88 | ||
89 | err = drm_atomic_helper_prepare_planes(drm, state); | |
90 | if (err) | |
91 | return err; | |
92 | ||
2dacdd70 | 93 | /* serialize outstanding nonblocking commits */ |
1503ca47 TR |
94 | mutex_lock(&tegra->commit.lock); |
95 | flush_work(&tegra->commit.work); | |
96 | ||
97 | /* | |
98 | * This is the point of no return - everything below never fails except | |
99 | * when the hw goes bonghits. Which means we can commit the new state on | |
100 | * the software side now. | |
101 | */ | |
102 | ||
424624ea ML |
103 | err = drm_atomic_helper_swap_state(state, true); |
104 | if (err) { | |
105 | mutex_unlock(&tegra->commit.lock); | |
106 | drm_atomic_helper_cleanup_planes(drm, state); | |
107 | return err; | |
108 | } | |
1503ca47 | 109 | |
0853695c | 110 | drm_atomic_state_get(state); |
2dacdd70 | 111 | if (nonblock) |
1503ca47 TR |
112 | tegra_atomic_schedule(tegra, state); |
113 | else | |
114 | tegra_atomic_complete(tegra, state); | |
115 | ||
116 | mutex_unlock(&tegra->commit.lock); | |
117 | return 0; | |
118 | } | |
119 | ||
f9914214 TR |
120 | static const struct drm_mode_config_funcs tegra_drm_mode_funcs = { |
121 | .fb_create = tegra_fb_create, | |
b110ef37 | 122 | #ifdef CONFIG_DRM_FBDEV_EMULATION |
f9914214 TR |
123 | .output_poll_changed = tegra_fb_output_poll_changed, |
124 | #endif | |
07866963 | 125 | .atomic_check = drm_atomic_helper_check, |
1503ca47 | 126 | .atomic_commit = tegra_atomic_commit, |
f9914214 TR |
127 | }; |
128 | ||
776dc384 | 129 | static int tegra_drm_load(struct drm_device *drm, unsigned long flags) |
692e6d7b | 130 | { |
776dc384 | 131 | struct host1x_device *device = to_host1x_device(drm->dev); |
386a2a71 | 132 | struct tegra_drm *tegra; |
692e6d7b TB |
133 | int err; |
134 | ||
776dc384 | 135 | tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); |
386a2a71 | 136 | if (!tegra) |
692e6d7b TB |
137 | return -ENOMEM; |
138 | ||
df06b759 | 139 | if (iommu_present(&platform_bus_type)) { |
ad926015 | 140 | u64 carveout_start, carveout_end, gem_start, gem_end; |
4553f733 | 141 | struct iommu_domain_geometry *geometry; |
ad926015 | 142 | unsigned long order; |
4553f733 | 143 | |
df06b759 | 144 | tegra->domain = iommu_domain_alloc(&platform_bus_type); |
bf19b885 DC |
145 | if (!tegra->domain) { |
146 | err = -ENOMEM; | |
df06b759 TR |
147 | goto free; |
148 | } | |
149 | ||
4553f733 | 150 | geometry = &tegra->domain->geometry; |
ad926015 MP |
151 | gem_start = geometry->aperture_start; |
152 | gem_end = geometry->aperture_end - CARVEOUT_SZ; | |
153 | carveout_start = gem_end + 1; | |
154 | carveout_end = geometry->aperture_end; | |
155 | ||
156 | order = __ffs(tegra->domain->pgsize_bitmap); | |
157 | init_iova_domain(&tegra->carveout.domain, 1UL << order, | |
aa3ac946 | 158 | carveout_start >> order); |
4553f733 | 159 | |
ad926015 MP |
160 | tegra->carveout.shift = iova_shift(&tegra->carveout.domain); |
161 | tegra->carveout.limit = carveout_end >> tegra->carveout.shift; | |
162 | ||
163 | drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1); | |
347ad49d | 164 | mutex_init(&tegra->mm_lock); |
ad926015 MP |
165 | |
166 | DRM_DEBUG("IOMMU apertures:\n"); | |
167 | DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end); | |
168 | DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start, | |
169 | carveout_end); | |
df06b759 TR |
170 | } |
171 | ||
386a2a71 TR |
172 | mutex_init(&tegra->clients_lock); |
173 | INIT_LIST_HEAD(&tegra->clients); | |
1503ca47 TR |
174 | |
175 | mutex_init(&tegra->commit.lock); | |
176 | INIT_WORK(&tegra->commit.work, tegra_atomic_work); | |
177 | ||
386a2a71 TR |
178 | drm->dev_private = tegra; |
179 | tegra->drm = drm; | |
d8f4a9ed TR |
180 | |
181 | drm_mode_config_init(drm); | |
182 | ||
f9914214 TR |
183 | drm->mode_config.min_width = 0; |
184 | drm->mode_config.min_height = 0; | |
185 | ||
186 | drm->mode_config.max_width = 4096; | |
187 | drm->mode_config.max_height = 4096; | |
188 | ||
5e91144d AC |
189 | drm->mode_config.allow_fb_modifiers = true; |
190 | ||
f9914214 TR |
191 | drm->mode_config.funcs = &tegra_drm_mode_funcs; |
192 | ||
e2215321 TR |
193 | err = tegra_drm_fb_prepare(drm); |
194 | if (err < 0) | |
1d1e6fe9 | 195 | goto config; |
e2215321 TR |
196 | |
197 | drm_kms_helper_poll_init(drm); | |
198 | ||
776dc384 | 199 | err = host1x_device_init(device); |
d8f4a9ed | 200 | if (err < 0) |
1d1e6fe9 | 201 | goto fbdev; |
d8f4a9ed | 202 | |
603f0cc9 TR |
203 | /* |
204 | * We don't use the drm_irq_install() helpers provided by the DRM | |
205 | * core, so we need to set this manually in order to allow the | |
206 | * DRM_IOCTL_WAIT_VBLANK to operate correctly. | |
207 | */ | |
4423843c | 208 | drm->irq_enabled = true; |
603f0cc9 | 209 | |
42e9ce05 | 210 | /* syncpoints are used for full 32-bit hardware VBLANK counters */ |
42e9ce05 TR |
211 | drm->max_vblank_count = 0xffffffff; |
212 | ||
6e5ff998 TR |
213 | err = drm_vblank_init(drm, drm->mode_config.num_crtc); |
214 | if (err < 0) | |
1d1e6fe9 | 215 | goto device; |
6e5ff998 | 216 | |
31930d4d TR |
217 | drm_mode_config_reset(drm); |
218 | ||
d8f4a9ed TR |
219 | err = tegra_drm_fb_init(drm); |
220 | if (err < 0) | |
00a9121b | 221 | goto device; |
d8f4a9ed | 222 | |
d8f4a9ed | 223 | return 0; |
1d1e6fe9 | 224 | |
1d1e6fe9 TR |
225 | device: |
226 | host1x_device_exit(device); | |
227 | fbdev: | |
228 | drm_kms_helper_poll_fini(drm); | |
229 | tegra_drm_fb_free(drm); | |
230 | config: | |
231 | drm_mode_config_cleanup(drm); | |
df06b759 TR |
232 | |
233 | if (tegra->domain) { | |
234 | iommu_domain_free(tegra->domain); | |
235 | drm_mm_takedown(&tegra->mm); | |
347ad49d | 236 | mutex_destroy(&tegra->mm_lock); |
ad926015 | 237 | put_iova_domain(&tegra->carveout.domain); |
df06b759 TR |
238 | } |
239 | free: | |
1d1e6fe9 TR |
240 | kfree(tegra); |
241 | return err; | |
d8f4a9ed TR |
242 | } |
243 | ||
11b3c20b | 244 | static void tegra_drm_unload(struct drm_device *drm) |
d8f4a9ed | 245 | { |
776dc384 | 246 | struct host1x_device *device = to_host1x_device(drm->dev); |
df06b759 | 247 | struct tegra_drm *tegra = drm->dev_private; |
776dc384 TR |
248 | int err; |
249 | ||
d8f4a9ed TR |
250 | drm_kms_helper_poll_fini(drm); |
251 | tegra_drm_fb_exit(drm); | |
9270914f | 252 | drm_atomic_helper_shutdown(drm); |
f002abc1 | 253 | drm_mode_config_cleanup(drm); |
d8f4a9ed | 254 | |
776dc384 TR |
255 | err = host1x_device_exit(device); |
256 | if (err < 0) | |
11b3c20b | 257 | return; |
776dc384 | 258 | |
df06b759 TR |
259 | if (tegra->domain) { |
260 | iommu_domain_free(tegra->domain); | |
261 | drm_mm_takedown(&tegra->mm); | |
347ad49d | 262 | mutex_destroy(&tegra->mm_lock); |
ad926015 | 263 | put_iova_domain(&tegra->carveout.domain); |
df06b759 TR |
264 | } |
265 | ||
1053f4dd | 266 | kfree(tegra); |
d8f4a9ed TR |
267 | } |
268 | ||
269 | static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) | |
270 | { | |
08943e6c | 271 | struct tegra_drm_file *fpriv; |
d43f81cb TB |
272 | |
273 | fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); | |
274 | if (!fpriv) | |
275 | return -ENOMEM; | |
276 | ||
bdd2f9cd TR |
277 | idr_init(&fpriv->contexts); |
278 | mutex_init(&fpriv->lock); | |
d43f81cb TB |
279 | filp->driver_priv = fpriv; |
280 | ||
d8f4a9ed TR |
281 | return 0; |
282 | } | |
283 | ||
c88c3630 | 284 | static void tegra_drm_context_free(struct tegra_drm_context *context) |
d43f81cb TB |
285 | { |
286 | context->client->ops->close_channel(context); | |
287 | kfree(context); | |
288 | } | |
289 | ||
d8f4a9ed TR |
290 | static void tegra_drm_lastclose(struct drm_device *drm) |
291 | { | |
b110ef37 | 292 | #ifdef CONFIG_DRM_FBDEV_EMULATION |
386a2a71 | 293 | struct tegra_drm *tegra = drm->dev_private; |
d8f4a9ed | 294 | |
386a2a71 | 295 | tegra_fbdev_restore_mode(tegra->fbdev); |
60c2f709 | 296 | #endif |
d8f4a9ed TR |
297 | } |
298 | ||
c40f0f1a | 299 | static struct host1x_bo * |
a8ad0bd8 | 300 | host1x_bo_lookup(struct drm_file *file, u32 handle) |
c40f0f1a TR |
301 | { |
302 | struct drm_gem_object *gem; | |
303 | struct tegra_bo *bo; | |
304 | ||
a8ad0bd8 | 305 | gem = drm_gem_object_lookup(file, handle); |
c40f0f1a TR |
306 | if (!gem) |
307 | return NULL; | |
308 | ||
c40f0f1a TR |
309 | bo = to_tegra_bo(gem); |
310 | return &bo->base; | |
311 | } | |
312 | ||
961e3bea TR |
313 | static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, |
314 | struct drm_tegra_reloc __user *src, | |
315 | struct drm_device *drm, | |
316 | struct drm_file *file) | |
317 | { | |
318 | u32 cmdbuf, target; | |
319 | int err; | |
320 | ||
321 | err = get_user(cmdbuf, &src->cmdbuf.handle); | |
322 | if (err < 0) | |
323 | return err; | |
324 | ||
325 | err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset); | |
326 | if (err < 0) | |
327 | return err; | |
328 | ||
329 | err = get_user(target, &src->target.handle); | |
330 | if (err < 0) | |
331 | return err; | |
332 | ||
31f40f86 | 333 | err = get_user(dest->target.offset, &src->target.offset); |
961e3bea TR |
334 | if (err < 0) |
335 | return err; | |
336 | ||
337 | err = get_user(dest->shift, &src->shift); | |
338 | if (err < 0) | |
339 | return err; | |
340 | ||
a8ad0bd8 | 341 | dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf); |
961e3bea TR |
342 | if (!dest->cmdbuf.bo) |
343 | return -ENOENT; | |
344 | ||
a8ad0bd8 | 345 | dest->target.bo = host1x_bo_lookup(file, target); |
961e3bea TR |
346 | if (!dest->target.bo) |
347 | return -ENOENT; | |
348 | ||
349 | return 0; | |
350 | } | |
351 | ||
d0fbbdff DO |
352 | static int host1x_waitchk_copy_from_user(struct host1x_waitchk *dest, |
353 | struct drm_tegra_waitchk __user *src, | |
354 | struct drm_file *file) | |
355 | { | |
356 | u32 cmdbuf; | |
357 | int err; | |
358 | ||
359 | err = get_user(cmdbuf, &src->handle); | |
360 | if (err < 0) | |
361 | return err; | |
362 | ||
363 | err = get_user(dest->offset, &src->offset); | |
364 | if (err < 0) | |
365 | return err; | |
366 | ||
367 | err = get_user(dest->syncpt_id, &src->syncpt); | |
368 | if (err < 0) | |
369 | return err; | |
370 | ||
371 | err = get_user(dest->thresh, &src->thresh); | |
372 | if (err < 0) | |
373 | return err; | |
374 | ||
375 | dest->bo = host1x_bo_lookup(file, cmdbuf); | |
376 | if (!dest->bo) | |
377 | return -ENOENT; | |
378 | ||
379 | return 0; | |
380 | } | |
381 | ||
c40f0f1a TR |
382 | int tegra_drm_submit(struct tegra_drm_context *context, |
383 | struct drm_tegra_submit *args, struct drm_device *drm, | |
384 | struct drm_file *file) | |
385 | { | |
386 | unsigned int num_cmdbufs = args->num_cmdbufs; | |
387 | unsigned int num_relocs = args->num_relocs; | |
388 | unsigned int num_waitchks = args->num_waitchks; | |
a176c67d MP |
389 | struct drm_tegra_cmdbuf __user *user_cmdbufs; |
390 | struct drm_tegra_reloc __user *user_relocs; | |
391 | struct drm_tegra_waitchk __user *user_waitchks; | |
392 | struct drm_tegra_syncpt __user *user_syncpt; | |
c40f0f1a | 393 | struct drm_tegra_syncpt syncpt; |
e0b2ce02 | 394 | struct host1x *host1x = dev_get_drvdata(drm->dev->parent); |
ec73c4cf | 395 | struct drm_gem_object **refs; |
e0b2ce02 | 396 | struct host1x_syncpt *sp; |
c40f0f1a | 397 | struct host1x_job *job; |
ec73c4cf | 398 | unsigned int num_refs; |
c40f0f1a TR |
399 | int err; |
400 | ||
a176c67d MP |
401 | user_cmdbufs = u64_to_user_ptr(args->cmdbufs); |
402 | user_relocs = u64_to_user_ptr(args->relocs); | |
403 | user_waitchks = u64_to_user_ptr(args->waitchks); | |
404 | user_syncpt = u64_to_user_ptr(args->syncpts); | |
405 | ||
c40f0f1a TR |
406 | /* We don't yet support other than one syncpt_incr struct per submit */ |
407 | if (args->num_syncpts != 1) | |
408 | return -EINVAL; | |
409 | ||
d0fbbdff DO |
410 | /* We don't yet support waitchks */ |
411 | if (args->num_waitchks != 0) | |
412 | return -EINVAL; | |
413 | ||
c40f0f1a TR |
414 | job = host1x_job_alloc(context->channel, args->num_cmdbufs, |
415 | args->num_relocs, args->num_waitchks); | |
416 | if (!job) | |
417 | return -ENOMEM; | |
418 | ||
419 | job->num_relocs = args->num_relocs; | |
420 | job->num_waitchk = args->num_waitchks; | |
421 | job->client = (u32)args->context; | |
422 | job->class = context->client->base.class; | |
423 | job->serialize = true; | |
424 | ||
ec73c4cf DO |
425 | /* |
426 | * Track referenced BOs so that they can be unreferenced after the | |
427 | * submission is complete. | |
428 | */ | |
429 | num_refs = num_cmdbufs + num_relocs * 2 + num_waitchks; | |
430 | ||
431 | refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL); | |
432 | if (!refs) { | |
433 | err = -ENOMEM; | |
434 | goto put; | |
435 | } | |
436 | ||
437 | /* reuse as an iterator later */ | |
438 | num_refs = 0; | |
439 | ||
c40f0f1a TR |
440 | while (num_cmdbufs) { |
441 | struct drm_tegra_cmdbuf cmdbuf; | |
442 | struct host1x_bo *bo; | |
368f622c DO |
443 | struct tegra_bo *obj; |
444 | u64 offset; | |
c40f0f1a | 445 | |
a176c67d | 446 | if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) { |
9a991600 | 447 | err = -EFAULT; |
c40f0f1a | 448 | goto fail; |
9a991600 | 449 | } |
c40f0f1a | 450 | |
368f622c DO |
451 | /* |
452 | * The maximum number of CDMA gather fetches is 16383, a higher | |
453 | * value means the words count is malformed. | |
454 | */ | |
455 | if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) { | |
456 | err = -EINVAL; | |
457 | goto fail; | |
458 | } | |
459 | ||
a8ad0bd8 | 460 | bo = host1x_bo_lookup(file, cmdbuf.handle); |
c40f0f1a TR |
461 | if (!bo) { |
462 | err = -ENOENT; | |
463 | goto fail; | |
464 | } | |
465 | ||
368f622c DO |
466 | offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32); |
467 | obj = host1x_to_tegra_bo(bo); | |
ec73c4cf | 468 | refs[num_refs++] = &obj->gem; |
368f622c DO |
469 | |
470 | /* | |
471 | * Gather buffer base address must be 4-bytes aligned, | |
472 | * unaligned offset is malformed and cause commands stream | |
473 | * corruption on the buffer address relocation. | |
474 | */ | |
a1400b49 | 475 | if (offset & 3 || offset > obj->gem.size) { |
368f622c DO |
476 | err = -EINVAL; |
477 | goto fail; | |
478 | } | |
479 | ||
c40f0f1a TR |
480 | host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset); |
481 | num_cmdbufs--; | |
a176c67d | 482 | user_cmdbufs++; |
c40f0f1a TR |
483 | } |
484 | ||
961e3bea | 485 | /* copy and resolve relocations from submit */ |
c40f0f1a | 486 | while (num_relocs--) { |
368f622c DO |
487 | struct host1x_reloc *reloc; |
488 | struct tegra_bo *obj; | |
489 | ||
961e3bea | 490 | err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs], |
a176c67d | 491 | &user_relocs[num_relocs], drm, |
961e3bea TR |
492 | file); |
493 | if (err < 0) | |
c40f0f1a | 494 | goto fail; |
368f622c DO |
495 | |
496 | reloc = &job->relocarray[num_relocs]; | |
497 | obj = host1x_to_tegra_bo(reloc->cmdbuf.bo); | |
ec73c4cf | 498 | refs[num_refs++] = &obj->gem; |
368f622c DO |
499 | |
500 | /* | |
501 | * The unaligned cmdbuf offset will cause an unaligned write | |
502 | * during of the relocations patching, corrupting the commands | |
503 | * stream. | |
504 | */ | |
505 | if (reloc->cmdbuf.offset & 3 || | |
506 | reloc->cmdbuf.offset >= obj->gem.size) { | |
507 | err = -EINVAL; | |
508 | goto fail; | |
509 | } | |
510 | ||
511 | obj = host1x_to_tegra_bo(reloc->target.bo); | |
ec73c4cf | 512 | refs[num_refs++] = &obj->gem; |
368f622c DO |
513 | |
514 | if (reloc->target.offset >= obj->gem.size) { | |
515 | err = -EINVAL; | |
516 | goto fail; | |
517 | } | |
c40f0f1a TR |
518 | } |
519 | ||
d0fbbdff DO |
520 | /* copy and resolve waitchks from submit */ |
521 | while (num_waitchks--) { | |
522 | struct host1x_waitchk *wait = &job->waitchk[num_waitchks]; | |
523 | struct tegra_bo *obj; | |
524 | ||
a176c67d MP |
525 | err = host1x_waitchk_copy_from_user( |
526 | wait, &user_waitchks[num_waitchks], file); | |
d0fbbdff DO |
527 | if (err < 0) |
528 | goto fail; | |
529 | ||
530 | obj = host1x_to_tegra_bo(wait->bo); | |
ec73c4cf | 531 | refs[num_refs++] = &obj->gem; |
d0fbbdff DO |
532 | |
533 | /* | |
534 | * The unaligned offset will cause an unaligned write during | |
535 | * of the waitchks patching, corrupting the commands stream. | |
536 | */ | |
537 | if (wait->offset & 3 || | |
538 | wait->offset >= obj->gem.size) { | |
539 | err = -EINVAL; | |
540 | goto fail; | |
541 | } | |
9a991600 | 542 | } |
c40f0f1a | 543 | |
a176c67d | 544 | if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) { |
9a991600 | 545 | err = -EFAULT; |
c40f0f1a | 546 | goto fail; |
9a991600 | 547 | } |
c40f0f1a | 548 | |
e0b2ce02 DO |
549 | /* check whether syncpoint ID is valid */ |
550 | sp = host1x_syncpt_get(host1x, syncpt.id); | |
551 | if (!sp) { | |
552 | err = -ENOENT; | |
553 | goto fail; | |
554 | } | |
555 | ||
c40f0f1a | 556 | job->is_addr_reg = context->client->ops->is_addr_reg; |
0f563a4b | 557 | job->is_valid_class = context->client->ops->is_valid_class; |
c40f0f1a TR |
558 | job->syncpt_incrs = syncpt.incrs; |
559 | job->syncpt_id = syncpt.id; | |
560 | job->timeout = 10000; | |
561 | ||
562 | if (args->timeout && args->timeout < 10000) | |
563 | job->timeout = args->timeout; | |
564 | ||
565 | err = host1x_job_pin(job, context->client->base.dev); | |
566 | if (err) | |
567 | goto fail; | |
568 | ||
569 | err = host1x_job_submit(job); | |
ec73c4cf DO |
570 | if (err) { |
571 | host1x_job_unpin(job); | |
572 | goto fail; | |
573 | } | |
c40f0f1a TR |
574 | |
575 | args->fence = job->syncpt_end; | |
576 | ||
c40f0f1a | 577 | fail: |
ec73c4cf DO |
578 | while (num_refs--) |
579 | drm_gem_object_put_unlocked(refs[num_refs]); | |
580 | ||
581 | kfree(refs); | |
582 | ||
583 | put: | |
c40f0f1a TR |
584 | host1x_job_put(job); |
585 | return err; | |
586 | } | |
587 | ||
588 | ||
d43f81cb | 589 | #ifdef CONFIG_DRM_TEGRA_STAGING |
d43f81cb TB |
590 | static int tegra_gem_create(struct drm_device *drm, void *data, |
591 | struct drm_file *file) | |
592 | { | |
593 | struct drm_tegra_gem_create *args = data; | |
594 | struct tegra_bo *bo; | |
595 | ||
773af77f | 596 | bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags, |
d43f81cb TB |
597 | &args->handle); |
598 | if (IS_ERR(bo)) | |
599 | return PTR_ERR(bo); | |
600 | ||
601 | return 0; | |
602 | } | |
603 | ||
604 | static int tegra_gem_mmap(struct drm_device *drm, void *data, | |
605 | struct drm_file *file) | |
606 | { | |
607 | struct drm_tegra_gem_mmap *args = data; | |
608 | struct drm_gem_object *gem; | |
609 | struct tegra_bo *bo; | |
610 | ||
a8ad0bd8 | 611 | gem = drm_gem_object_lookup(file, args->handle); |
d43f81cb TB |
612 | if (!gem) |
613 | return -EINVAL; | |
614 | ||
615 | bo = to_tegra_bo(gem); | |
616 | ||
2bc7b0ca | 617 | args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node); |
d43f81cb | 618 | |
7664b2fa | 619 | drm_gem_object_put_unlocked(gem); |
d43f81cb TB |
620 | |
621 | return 0; | |
622 | } | |
623 | ||
624 | static int tegra_syncpt_read(struct drm_device *drm, void *data, | |
625 | struct drm_file *file) | |
626 | { | |
776dc384 | 627 | struct host1x *host = dev_get_drvdata(drm->dev->parent); |
d43f81cb | 628 | struct drm_tegra_syncpt_read *args = data; |
776dc384 | 629 | struct host1x_syncpt *sp; |
d43f81cb | 630 | |
776dc384 | 631 | sp = host1x_syncpt_get(host, args->id); |
d43f81cb TB |
632 | if (!sp) |
633 | return -EINVAL; | |
634 | ||
635 | args->value = host1x_syncpt_read_min(sp); | |
636 | return 0; | |
637 | } | |
638 | ||
639 | static int tegra_syncpt_incr(struct drm_device *drm, void *data, | |
640 | struct drm_file *file) | |
641 | { | |
776dc384 | 642 | struct host1x *host1x = dev_get_drvdata(drm->dev->parent); |
d43f81cb | 643 | struct drm_tegra_syncpt_incr *args = data; |
776dc384 | 644 | struct host1x_syncpt *sp; |
d43f81cb | 645 | |
776dc384 | 646 | sp = host1x_syncpt_get(host1x, args->id); |
d43f81cb TB |
647 | if (!sp) |
648 | return -EINVAL; | |
649 | ||
ebae30b1 | 650 | return host1x_syncpt_incr(sp); |
d43f81cb TB |
651 | } |
652 | ||
653 | static int tegra_syncpt_wait(struct drm_device *drm, void *data, | |
654 | struct drm_file *file) | |
655 | { | |
776dc384 | 656 | struct host1x *host1x = dev_get_drvdata(drm->dev->parent); |
d43f81cb | 657 | struct drm_tegra_syncpt_wait *args = data; |
776dc384 | 658 | struct host1x_syncpt *sp; |
d43f81cb | 659 | |
776dc384 | 660 | sp = host1x_syncpt_get(host1x, args->id); |
d43f81cb TB |
661 | if (!sp) |
662 | return -EINVAL; | |
663 | ||
664 | return host1x_syncpt_wait(sp, args->thresh, args->timeout, | |
665 | &args->value); | |
666 | } | |
667 | ||
bdd2f9cd TR |
668 | static int tegra_client_open(struct tegra_drm_file *fpriv, |
669 | struct tegra_drm_client *client, | |
670 | struct tegra_drm_context *context) | |
671 | { | |
672 | int err; | |
673 | ||
674 | err = client->ops->open_channel(client, context); | |
675 | if (err < 0) | |
676 | return err; | |
677 | ||
d6c153ec | 678 | err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL); |
bdd2f9cd TR |
679 | if (err < 0) { |
680 | client->ops->close_channel(context); | |
681 | return err; | |
682 | } | |
683 | ||
684 | context->client = client; | |
685 | context->id = err; | |
686 | ||
687 | return 0; | |
688 | } | |
689 | ||
d43f81cb TB |
690 | static int tegra_open_channel(struct drm_device *drm, void *data, |
691 | struct drm_file *file) | |
692 | { | |
08943e6c | 693 | struct tegra_drm_file *fpriv = file->driver_priv; |
386a2a71 | 694 | struct tegra_drm *tegra = drm->dev_private; |
d43f81cb | 695 | struct drm_tegra_open_channel *args = data; |
c88c3630 | 696 | struct tegra_drm_context *context; |
53fa7f72 | 697 | struct tegra_drm_client *client; |
d43f81cb TB |
698 | int err = -ENODEV; |
699 | ||
700 | context = kzalloc(sizeof(*context), GFP_KERNEL); | |
701 | if (!context) | |
702 | return -ENOMEM; | |
703 | ||
bdd2f9cd TR |
704 | mutex_lock(&fpriv->lock); |
705 | ||
776dc384 | 706 | list_for_each_entry(client, &tegra->clients, list) |
53fa7f72 | 707 | if (client->base.class == args->client) { |
bdd2f9cd TR |
708 | err = tegra_client_open(fpriv, client, context); |
709 | if (err < 0) | |
d43f81cb TB |
710 | break; |
711 | ||
bdd2f9cd TR |
712 | args->context = context->id; |
713 | break; | |
d43f81cb TB |
714 | } |
715 | ||
bdd2f9cd TR |
716 | if (err < 0) |
717 | kfree(context); | |
718 | ||
719 | mutex_unlock(&fpriv->lock); | |
d43f81cb TB |
720 | return err; |
721 | } | |
722 | ||
723 | static int tegra_close_channel(struct drm_device *drm, void *data, | |
724 | struct drm_file *file) | |
725 | { | |
08943e6c | 726 | struct tegra_drm_file *fpriv = file->driver_priv; |
776dc384 | 727 | struct drm_tegra_close_channel *args = data; |
c88c3630 | 728 | struct tegra_drm_context *context; |
bdd2f9cd | 729 | int err = 0; |
c88c3630 | 730 | |
bdd2f9cd | 731 | mutex_lock(&fpriv->lock); |
d43f81cb | 732 | |
1066a895 | 733 | context = idr_find(&fpriv->contexts, args->context); |
bdd2f9cd TR |
734 | if (!context) { |
735 | err = -EINVAL; | |
736 | goto unlock; | |
737 | } | |
d43f81cb | 738 | |
bdd2f9cd | 739 | idr_remove(&fpriv->contexts, context->id); |
c88c3630 | 740 | tegra_drm_context_free(context); |
d43f81cb | 741 | |
bdd2f9cd TR |
742 | unlock: |
743 | mutex_unlock(&fpriv->lock); | |
744 | return err; | |
d43f81cb TB |
745 | } |
746 | ||
747 | static int tegra_get_syncpt(struct drm_device *drm, void *data, | |
748 | struct drm_file *file) | |
749 | { | |
08943e6c | 750 | struct tegra_drm_file *fpriv = file->driver_priv; |
d43f81cb | 751 | struct drm_tegra_get_syncpt *args = data; |
c88c3630 | 752 | struct tegra_drm_context *context; |
d43f81cb | 753 | struct host1x_syncpt *syncpt; |
bdd2f9cd | 754 | int err = 0; |
d43f81cb | 755 | |
bdd2f9cd | 756 | mutex_lock(&fpriv->lock); |
c88c3630 | 757 | |
1066a895 | 758 | context = idr_find(&fpriv->contexts, args->context); |
bdd2f9cd TR |
759 | if (!context) { |
760 | err = -ENODEV; | |
761 | goto unlock; | |
762 | } | |
d43f81cb | 763 | |
bdd2f9cd TR |
764 | if (args->index >= context->client->base.num_syncpts) { |
765 | err = -EINVAL; | |
766 | goto unlock; | |
767 | } | |
d43f81cb | 768 | |
53fa7f72 | 769 | syncpt = context->client->base.syncpts[args->index]; |
d43f81cb TB |
770 | args->id = host1x_syncpt_id(syncpt); |
771 | ||
bdd2f9cd TR |
772 | unlock: |
773 | mutex_unlock(&fpriv->lock); | |
774 | return err; | |
d43f81cb TB |
775 | } |
776 | ||
777 | static int tegra_submit(struct drm_device *drm, void *data, | |
778 | struct drm_file *file) | |
779 | { | |
08943e6c | 780 | struct tegra_drm_file *fpriv = file->driver_priv; |
d43f81cb | 781 | struct drm_tegra_submit *args = data; |
c88c3630 | 782 | struct tegra_drm_context *context; |
bdd2f9cd | 783 | int err; |
c88c3630 | 784 | |
bdd2f9cd | 785 | mutex_lock(&fpriv->lock); |
d43f81cb | 786 | |
1066a895 | 787 | context = idr_find(&fpriv->contexts, args->context); |
bdd2f9cd TR |
788 | if (!context) { |
789 | err = -ENODEV; | |
790 | goto unlock; | |
791 | } | |
d43f81cb | 792 | |
bdd2f9cd | 793 | err = context->client->ops->submit(context, args, drm, file); |
d43f81cb | 794 | |
bdd2f9cd TR |
795 | unlock: |
796 | mutex_unlock(&fpriv->lock); | |
797 | return err; | |
d43f81cb | 798 | } |
c54a169b AM |
799 | |
800 | static int tegra_get_syncpt_base(struct drm_device *drm, void *data, | |
801 | struct drm_file *file) | |
802 | { | |
803 | struct tegra_drm_file *fpriv = file->driver_priv; | |
804 | struct drm_tegra_get_syncpt_base *args = data; | |
805 | struct tegra_drm_context *context; | |
806 | struct host1x_syncpt_base *base; | |
807 | struct host1x_syncpt *syncpt; | |
bdd2f9cd | 808 | int err = 0; |
c54a169b | 809 | |
bdd2f9cd | 810 | mutex_lock(&fpriv->lock); |
c54a169b | 811 | |
1066a895 | 812 | context = idr_find(&fpriv->contexts, args->context); |
bdd2f9cd TR |
813 | if (!context) { |
814 | err = -ENODEV; | |
815 | goto unlock; | |
816 | } | |
c54a169b | 817 | |
bdd2f9cd TR |
818 | if (args->syncpt >= context->client->base.num_syncpts) { |
819 | err = -EINVAL; | |
820 | goto unlock; | |
821 | } | |
c54a169b AM |
822 | |
823 | syncpt = context->client->base.syncpts[args->syncpt]; | |
824 | ||
825 | base = host1x_syncpt_get_base(syncpt); | |
bdd2f9cd TR |
826 | if (!base) { |
827 | err = -ENXIO; | |
828 | goto unlock; | |
829 | } | |
c54a169b AM |
830 | |
831 | args->id = host1x_syncpt_base_id(base); | |
832 | ||
bdd2f9cd TR |
833 | unlock: |
834 | mutex_unlock(&fpriv->lock); | |
835 | return err; | |
c54a169b | 836 | } |
7678d71f TR |
837 | |
838 | static int tegra_gem_set_tiling(struct drm_device *drm, void *data, | |
839 | struct drm_file *file) | |
840 | { | |
841 | struct drm_tegra_gem_set_tiling *args = data; | |
842 | enum tegra_bo_tiling_mode mode; | |
843 | struct drm_gem_object *gem; | |
844 | unsigned long value = 0; | |
845 | struct tegra_bo *bo; | |
846 | ||
847 | switch (args->mode) { | |
848 | case DRM_TEGRA_GEM_TILING_MODE_PITCH: | |
849 | mode = TEGRA_BO_TILING_MODE_PITCH; | |
850 | ||
851 | if (args->value != 0) | |
852 | return -EINVAL; | |
853 | ||
854 | break; | |
855 | ||
856 | case DRM_TEGRA_GEM_TILING_MODE_TILED: | |
857 | mode = TEGRA_BO_TILING_MODE_TILED; | |
858 | ||
859 | if (args->value != 0) | |
860 | return -EINVAL; | |
861 | ||
862 | break; | |
863 | ||
864 | case DRM_TEGRA_GEM_TILING_MODE_BLOCK: | |
865 | mode = TEGRA_BO_TILING_MODE_BLOCK; | |
866 | ||
867 | if (args->value > 5) | |
868 | return -EINVAL; | |
869 | ||
870 | value = args->value; | |
871 | break; | |
872 | ||
873 | default: | |
874 | return -EINVAL; | |
875 | } | |
876 | ||
a8ad0bd8 | 877 | gem = drm_gem_object_lookup(file, args->handle); |
7678d71f TR |
878 | if (!gem) |
879 | return -ENOENT; | |
880 | ||
881 | bo = to_tegra_bo(gem); | |
882 | ||
883 | bo->tiling.mode = mode; | |
884 | bo->tiling.value = value; | |
885 | ||
7664b2fa | 886 | drm_gem_object_put_unlocked(gem); |
7678d71f TR |
887 | |
888 | return 0; | |
889 | } | |
890 | ||
891 | static int tegra_gem_get_tiling(struct drm_device *drm, void *data, | |
892 | struct drm_file *file) | |
893 | { | |
894 | struct drm_tegra_gem_get_tiling *args = data; | |
895 | struct drm_gem_object *gem; | |
896 | struct tegra_bo *bo; | |
897 | int err = 0; | |
898 | ||
a8ad0bd8 | 899 | gem = drm_gem_object_lookup(file, args->handle); |
7678d71f TR |
900 | if (!gem) |
901 | return -ENOENT; | |
902 | ||
903 | bo = to_tegra_bo(gem); | |
904 | ||
905 | switch (bo->tiling.mode) { | |
906 | case TEGRA_BO_TILING_MODE_PITCH: | |
907 | args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH; | |
908 | args->value = 0; | |
909 | break; | |
910 | ||
911 | case TEGRA_BO_TILING_MODE_TILED: | |
912 | args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED; | |
913 | args->value = 0; | |
914 | break; | |
915 | ||
916 | case TEGRA_BO_TILING_MODE_BLOCK: | |
917 | args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK; | |
918 | args->value = bo->tiling.value; | |
919 | break; | |
920 | ||
921 | default: | |
922 | err = -EINVAL; | |
923 | break; | |
924 | } | |
925 | ||
7664b2fa | 926 | drm_gem_object_put_unlocked(gem); |
7678d71f TR |
927 | |
928 | return err; | |
929 | } | |
7b129087 TR |
930 | |
931 | static int tegra_gem_set_flags(struct drm_device *drm, void *data, | |
932 | struct drm_file *file) | |
933 | { | |
934 | struct drm_tegra_gem_set_flags *args = data; | |
935 | struct drm_gem_object *gem; | |
936 | struct tegra_bo *bo; | |
937 | ||
938 | if (args->flags & ~DRM_TEGRA_GEM_FLAGS) | |
939 | return -EINVAL; | |
940 | ||
a8ad0bd8 | 941 | gem = drm_gem_object_lookup(file, args->handle); |
7b129087 TR |
942 | if (!gem) |
943 | return -ENOENT; | |
944 | ||
945 | bo = to_tegra_bo(gem); | |
946 | bo->flags = 0; | |
947 | ||
948 | if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP) | |
949 | bo->flags |= TEGRA_BO_BOTTOM_UP; | |
950 | ||
7664b2fa | 951 | drm_gem_object_put_unlocked(gem); |
7b129087 TR |
952 | |
953 | return 0; | |
954 | } | |
955 | ||
956 | static int tegra_gem_get_flags(struct drm_device *drm, void *data, | |
957 | struct drm_file *file) | |
958 | { | |
959 | struct drm_tegra_gem_get_flags *args = data; | |
960 | struct drm_gem_object *gem; | |
961 | struct tegra_bo *bo; | |
962 | ||
a8ad0bd8 | 963 | gem = drm_gem_object_lookup(file, args->handle); |
7b129087 TR |
964 | if (!gem) |
965 | return -ENOENT; | |
966 | ||
967 | bo = to_tegra_bo(gem); | |
968 | args->flags = 0; | |
969 | ||
970 | if (bo->flags & TEGRA_BO_BOTTOM_UP) | |
971 | args->flags |= DRM_TEGRA_GEM_BOTTOM_UP; | |
972 | ||
7664b2fa | 973 | drm_gem_object_put_unlocked(gem); |
7b129087 TR |
974 | |
975 | return 0; | |
976 | } | |
d43f81cb TB |
977 | #endif |
978 | ||
baa70943 | 979 | static const struct drm_ioctl_desc tegra_drm_ioctls[] = { |
d43f81cb | 980 | #ifdef CONFIG_DRM_TEGRA_STAGING |
6c68b717 TR |
981 | DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, |
982 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | |
983 | DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, | |
984 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | |
985 | DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, | |
986 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | |
987 | DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, | |
988 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | |
989 | DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, | |
990 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | |
991 | DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, | |
992 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | |
993 | DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, | |
994 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | |
995 | DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, | |
996 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | |
997 | DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, | |
998 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | |
999 | DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, | |
1000 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | |
1001 | DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, | |
1002 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | |
1003 | DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, | |
1004 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | |
1005 | DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, | |
1006 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | |
1007 | DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, | |
1008 | DRM_UNLOCKED | DRM_RENDER_ALLOW), | |
d43f81cb | 1009 | #endif |
d8f4a9ed TR |
1010 | }; |
1011 | ||
1012 | static const struct file_operations tegra_drm_fops = { | |
1013 | .owner = THIS_MODULE, | |
1014 | .open = drm_open, | |
1015 | .release = drm_release, | |
1016 | .unlocked_ioctl = drm_ioctl, | |
de2ba664 | 1017 | .mmap = tegra_drm_mmap, |
d8f4a9ed | 1018 | .poll = drm_poll, |
d8f4a9ed | 1019 | .read = drm_read, |
d8f4a9ed | 1020 | .compat_ioctl = drm_compat_ioctl, |
d8f4a9ed TR |
1021 | .llseek = noop_llseek, |
1022 | }; | |
1023 | ||
bdd2f9cd TR |
1024 | static int tegra_drm_context_cleanup(int id, void *p, void *data) |
1025 | { | |
1026 | struct tegra_drm_context *context = p; | |
1027 | ||
1028 | tegra_drm_context_free(context); | |
1029 | ||
1030 | return 0; | |
1031 | } | |
1032 | ||
bda0ecc4 | 1033 | static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file) |
3c03c46a | 1034 | { |
08943e6c | 1035 | struct tegra_drm_file *fpriv = file->driver_priv; |
3c03c46a | 1036 | |
bdd2f9cd TR |
1037 | mutex_lock(&fpriv->lock); |
1038 | idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL); | |
1039 | mutex_unlock(&fpriv->lock); | |
d43f81cb | 1040 | |
bdd2f9cd TR |
1041 | idr_destroy(&fpriv->contexts); |
1042 | mutex_destroy(&fpriv->lock); | |
d43f81cb | 1043 | kfree(fpriv); |
3c03c46a TR |
1044 | } |
1045 | ||
e450fcc6 TR |
1046 | #ifdef CONFIG_DEBUG_FS |
1047 | static int tegra_debugfs_framebuffers(struct seq_file *s, void *data) | |
1048 | { | |
1049 | struct drm_info_node *node = (struct drm_info_node *)s->private; | |
1050 | struct drm_device *drm = node->minor->dev; | |
1051 | struct drm_framebuffer *fb; | |
1052 | ||
1053 | mutex_lock(&drm->mode_config.fb_lock); | |
1054 | ||
1055 | list_for_each_entry(fb, &drm->mode_config.fb_list, head) { | |
1056 | seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n", | |
b00c600e VS |
1057 | fb->base.id, fb->width, fb->height, |
1058 | fb->format->depth, | |
272725c7 | 1059 | fb->format->cpp[0] * 8, |
747a598f | 1060 | drm_framebuffer_read_refcount(fb)); |
e450fcc6 TR |
1061 | } |
1062 | ||
1063 | mutex_unlock(&drm->mode_config.fb_lock); | |
1064 | ||
1065 | return 0; | |
1066 | } | |
1067 | ||
28c23373 TR |
1068 | static int tegra_debugfs_iova(struct seq_file *s, void *data) |
1069 | { | |
1070 | struct drm_info_node *node = (struct drm_info_node *)s->private; | |
1071 | struct drm_device *drm = node->minor->dev; | |
1072 | struct tegra_drm *tegra = drm->dev_private; | |
b5c3714f | 1073 | struct drm_printer p = drm_seq_file_printer(s); |
28c23373 | 1074 | |
68d890a3 MM |
1075 | if (tegra->domain) { |
1076 | mutex_lock(&tegra->mm_lock); | |
1077 | drm_mm_print(&tegra->mm, &p); | |
1078 | mutex_unlock(&tegra->mm_lock); | |
1079 | } | |
b5c3714f DV |
1080 | |
1081 | return 0; | |
28c23373 TR |
1082 | } |
1083 | ||
e450fcc6 TR |
1084 | static struct drm_info_list tegra_debugfs_list[] = { |
1085 | { "framebuffers", tegra_debugfs_framebuffers, 0 }, | |
28c23373 | 1086 | { "iova", tegra_debugfs_iova, 0 }, |
e450fcc6 TR |
1087 | }; |
1088 | ||
1089 | static int tegra_debugfs_init(struct drm_minor *minor) | |
1090 | { | |
1091 | return drm_debugfs_create_files(tegra_debugfs_list, | |
1092 | ARRAY_SIZE(tegra_debugfs_list), | |
1093 | minor->debugfs_root, minor); | |
1094 | } | |
e450fcc6 TR |
1095 | #endif |
1096 | ||
9b57f5f2 | 1097 | static struct drm_driver tegra_drm_driver = { |
ad906599 | 1098 | .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | |
6c68b717 | 1099 | DRIVER_ATOMIC | DRIVER_RENDER, |
d8f4a9ed TR |
1100 | .load = tegra_drm_load, |
1101 | .unload = tegra_drm_unload, | |
1102 | .open = tegra_drm_open, | |
bda0ecc4 | 1103 | .postclose = tegra_drm_postclose, |
d8f4a9ed TR |
1104 | .lastclose = tegra_drm_lastclose, |
1105 | ||
e450fcc6 TR |
1106 | #if defined(CONFIG_DEBUG_FS) |
1107 | .debugfs_init = tegra_debugfs_init, | |
e450fcc6 TR |
1108 | #endif |
1109 | ||
1ddbdbd6 | 1110 | .gem_free_object_unlocked = tegra_bo_free_object, |
de2ba664 | 1111 | .gem_vm_ops = &tegra_bo_vm_ops, |
3800391d TR |
1112 | |
1113 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | |
1114 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | |
1115 | .gem_prime_export = tegra_gem_prime_export, | |
1116 | .gem_prime_import = tegra_gem_prime_import, | |
1117 | ||
de2ba664 | 1118 | .dumb_create = tegra_bo_dumb_create, |
d8f4a9ed TR |
1119 | |
1120 | .ioctls = tegra_drm_ioctls, | |
1121 | .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls), | |
1122 | .fops = &tegra_drm_fops, | |
1123 | ||
1124 | .name = DRIVER_NAME, | |
1125 | .desc = DRIVER_DESC, | |
1126 | .date = DRIVER_DATE, | |
1127 | .major = DRIVER_MAJOR, | |
1128 | .minor = DRIVER_MINOR, | |
1129 | .patchlevel = DRIVER_PATCHLEVEL, | |
1130 | }; | |
776dc384 TR |
1131 | |
1132 | int tegra_drm_register_client(struct tegra_drm *tegra, | |
1133 | struct tegra_drm_client *client) | |
1134 | { | |
1135 | mutex_lock(&tegra->clients_lock); | |
1136 | list_add_tail(&client->list, &tegra->clients); | |
1137 | mutex_unlock(&tegra->clients_lock); | |
1138 | ||
1139 | return 0; | |
1140 | } | |
1141 | ||
1142 | int tegra_drm_unregister_client(struct tegra_drm *tegra, | |
1143 | struct tegra_drm_client *client) | |
1144 | { | |
1145 | mutex_lock(&tegra->clients_lock); | |
1146 | list_del_init(&client->list); | |
1147 | mutex_unlock(&tegra->clients_lock); | |
1148 | ||
1149 | return 0; | |
1150 | } | |
1151 | ||
ad926015 MP |
1152 | void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, |
1153 | dma_addr_t *dma) | |
1154 | { | |
1155 | struct iova *alloc; | |
1156 | void *virt; | |
1157 | gfp_t gfp; | |
1158 | int err; | |
1159 | ||
1160 | if (tegra->domain) | |
1161 | size = iova_align(&tegra->carveout.domain, size); | |
1162 | else | |
1163 | size = PAGE_ALIGN(size); | |
1164 | ||
1165 | gfp = GFP_KERNEL | __GFP_ZERO; | |
1166 | if (!tegra->domain) { | |
1167 | /* | |
1168 | * Many units only support 32-bit addresses, even on 64-bit | |
1169 | * SoCs. If there is no IOMMU to translate into a 32-bit IO | |
1170 | * virtual address space, force allocations to be in the | |
1171 | * lower 32-bit range. | |
1172 | */ | |
1173 | gfp |= GFP_DMA; | |
1174 | } | |
1175 | ||
1176 | virt = (void *)__get_free_pages(gfp, get_order(size)); | |
1177 | if (!virt) | |
1178 | return ERR_PTR(-ENOMEM); | |
1179 | ||
1180 | if (!tegra->domain) { | |
1181 | /* | |
1182 | * If IOMMU is disabled, devices address physical memory | |
1183 | * directly. | |
1184 | */ | |
1185 | *dma = virt_to_phys(virt); | |
1186 | return virt; | |
1187 | } | |
1188 | ||
1189 | alloc = alloc_iova(&tegra->carveout.domain, | |
1190 | size >> tegra->carveout.shift, | |
1191 | tegra->carveout.limit, true); | |
1192 | if (!alloc) { | |
1193 | err = -EBUSY; | |
1194 | goto free_pages; | |
1195 | } | |
1196 | ||
1197 | *dma = iova_dma_addr(&tegra->carveout.domain, alloc); | |
1198 | err = iommu_map(tegra->domain, *dma, virt_to_phys(virt), | |
1199 | size, IOMMU_READ | IOMMU_WRITE); | |
1200 | if (err < 0) | |
1201 | goto free_iova; | |
1202 | ||
1203 | return virt; | |
1204 | ||
1205 | free_iova: | |
1206 | __free_iova(&tegra->carveout.domain, alloc); | |
1207 | free_pages: | |
1208 | free_pages((unsigned long)virt, get_order(size)); | |
1209 | ||
1210 | return ERR_PTR(err); | |
1211 | } | |
1212 | ||
1213 | void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, | |
1214 | dma_addr_t dma) | |
1215 | { | |
1216 | if (tegra->domain) | |
1217 | size = iova_align(&tegra->carveout.domain, size); | |
1218 | else | |
1219 | size = PAGE_ALIGN(size); | |
1220 | ||
1221 | if (tegra->domain) { | |
1222 | iommu_unmap(tegra->domain, dma, size); | |
1223 | free_iova(&tegra->carveout.domain, | |
1224 | iova_pfn(&tegra->carveout.domain, dma)); | |
1225 | } | |
1226 | ||
1227 | free_pages((unsigned long)virt, get_order(size)); | |
1228 | } | |
1229 | ||
9910f5c4 | 1230 | static int host1x_drm_probe(struct host1x_device *dev) |
776dc384 | 1231 | { |
9910f5c4 TR |
1232 | struct drm_driver *driver = &tegra_drm_driver; |
1233 | struct drm_device *drm; | |
1234 | int err; | |
1235 | ||
1236 | drm = drm_dev_alloc(driver, &dev->dev); | |
0f288605 TG |
1237 | if (IS_ERR(drm)) |
1238 | return PTR_ERR(drm); | |
9910f5c4 | 1239 | |
9910f5c4 TR |
1240 | dev_set_drvdata(&dev->dev, drm); |
1241 | ||
1242 | err = drm_dev_register(drm, 0); | |
1243 | if (err < 0) | |
1244 | goto unref; | |
1245 | ||
9910f5c4 TR |
1246 | return 0; |
1247 | ||
1248 | unref: | |
1249 | drm_dev_unref(drm); | |
1250 | return err; | |
776dc384 TR |
1251 | } |
1252 | ||
9910f5c4 | 1253 | static int host1x_drm_remove(struct host1x_device *dev) |
776dc384 | 1254 | { |
9910f5c4 TR |
1255 | struct drm_device *drm = dev_get_drvdata(&dev->dev); |
1256 | ||
1257 | drm_dev_unregister(drm); | |
1258 | drm_dev_unref(drm); | |
776dc384 TR |
1259 | |
1260 | return 0; | |
1261 | } | |
1262 | ||
359ae687 TR |
1263 | #ifdef CONFIG_PM_SLEEP |
1264 | static int host1x_drm_suspend(struct device *dev) | |
1265 | { | |
1266 | struct drm_device *drm = dev_get_drvdata(dev); | |
986c58d1 | 1267 | struct tegra_drm *tegra = drm->dev_private; |
359ae687 TR |
1268 | |
1269 | drm_kms_helper_poll_disable(drm); | |
986c58d1 TR |
1270 | tegra_drm_fb_suspend(drm); |
1271 | ||
1272 | tegra->state = drm_atomic_helper_suspend(drm); | |
1273 | if (IS_ERR(tegra->state)) { | |
1274 | tegra_drm_fb_resume(drm); | |
1275 | drm_kms_helper_poll_enable(drm); | |
1276 | return PTR_ERR(tegra->state); | |
1277 | } | |
359ae687 TR |
1278 | |
1279 | return 0; | |
1280 | } | |
1281 | ||
1282 | static int host1x_drm_resume(struct device *dev) | |
1283 | { | |
1284 | struct drm_device *drm = dev_get_drvdata(dev); | |
986c58d1 | 1285 | struct tegra_drm *tegra = drm->dev_private; |
359ae687 | 1286 | |
986c58d1 TR |
1287 | drm_atomic_helper_resume(drm, tegra->state); |
1288 | tegra_drm_fb_resume(drm); | |
359ae687 TR |
1289 | drm_kms_helper_poll_enable(drm); |
1290 | ||
1291 | return 0; | |
1292 | } | |
1293 | #endif | |
1294 | ||
a13f1dc4 TR |
1295 | static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend, |
1296 | host1x_drm_resume); | |
359ae687 | 1297 | |
776dc384 TR |
1298 | static const struct of_device_id host1x_drm_subdevs[] = { |
1299 | { .compatible = "nvidia,tegra20-dc", }, | |
1300 | { .compatible = "nvidia,tegra20-hdmi", }, | |
1301 | { .compatible = "nvidia,tegra20-gr2d", }, | |
5f60ed0d | 1302 | { .compatible = "nvidia,tegra20-gr3d", }, |
776dc384 TR |
1303 | { .compatible = "nvidia,tegra30-dc", }, |
1304 | { .compatible = "nvidia,tegra30-hdmi", }, | |
1305 | { .compatible = "nvidia,tegra30-gr2d", }, | |
5f60ed0d | 1306 | { .compatible = "nvidia,tegra30-gr3d", }, |
dec72739 | 1307 | { .compatible = "nvidia,tegra114-dsi", }, |
7d1d28ac | 1308 | { .compatible = "nvidia,tegra114-hdmi", }, |
5f60ed0d | 1309 | { .compatible = "nvidia,tegra114-gr3d", }, |
8620fc62 | 1310 | { .compatible = "nvidia,tegra124-dc", }, |
6b6b6042 | 1311 | { .compatible = "nvidia,tegra124-sor", }, |
fb7be70e | 1312 | { .compatible = "nvidia,tegra124-hdmi", }, |
7d338587 | 1313 | { .compatible = "nvidia,tegra124-dsi", }, |
0ae797a8 | 1314 | { .compatible = "nvidia,tegra124-vic", }, |
c06c7930 | 1315 | { .compatible = "nvidia,tegra132-dsi", }, |
5b4f516f | 1316 | { .compatible = "nvidia,tegra210-dc", }, |
ddfb406b | 1317 | { .compatible = "nvidia,tegra210-dsi", }, |
3309ac83 | 1318 | { .compatible = "nvidia,tegra210-sor", }, |
459cc2c6 | 1319 | { .compatible = "nvidia,tegra210-sor1", }, |
0ae797a8 | 1320 | { .compatible = "nvidia,tegra210-vic", }, |
6e44b9ad | 1321 | { .compatible = "nvidia,tegra186-vic", }, |
776dc384 TR |
1322 | { /* sentinel */ } |
1323 | }; | |
1324 | ||
1325 | static struct host1x_driver host1x_drm_driver = { | |
f4c5cf88 TR |
1326 | .driver = { |
1327 | .name = "drm", | |
359ae687 | 1328 | .pm = &host1x_drm_pm_ops, |
f4c5cf88 | 1329 | }, |
776dc384 TR |
1330 | .probe = host1x_drm_probe, |
1331 | .remove = host1x_drm_remove, | |
1332 | .subdevs = host1x_drm_subdevs, | |
1333 | }; | |
1334 | ||
473112e4 TR |
1335 | static struct platform_driver * const drivers[] = { |
1336 | &tegra_dc_driver, | |
1337 | &tegra_hdmi_driver, | |
1338 | &tegra_dsi_driver, | |
1339 | &tegra_dpaux_driver, | |
1340 | &tegra_sor_driver, | |
1341 | &tegra_gr2d_driver, | |
1342 | &tegra_gr3d_driver, | |
0ae797a8 | 1343 | &tegra_vic_driver, |
473112e4 TR |
1344 | }; |
1345 | ||
776dc384 TR |
1346 | static int __init host1x_drm_init(void) |
1347 | { | |
1348 | int err; | |
1349 | ||
1350 | err = host1x_driver_register(&host1x_drm_driver); | |
1351 | if (err < 0) | |
1352 | return err; | |
1353 | ||
473112e4 | 1354 | err = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); |
776dc384 TR |
1355 | if (err < 0) |
1356 | goto unregister_host1x; | |
1357 | ||
776dc384 TR |
1358 | return 0; |
1359 | ||
776dc384 TR |
1360 | unregister_host1x: |
1361 | host1x_driver_unregister(&host1x_drm_driver); | |
1362 | return err; | |
1363 | } | |
1364 | module_init(host1x_drm_init); | |
1365 | ||
1366 | static void __exit host1x_drm_exit(void) | |
1367 | { | |
473112e4 | 1368 | platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); |
776dc384 TR |
1369 | host1x_driver_unregister(&host1x_drm_driver); |
1370 | } | |
1371 | module_exit(host1x_drm_exit); | |
1372 | ||
1373 | MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); | |
1374 | MODULE_DESCRIPTION("NVIDIA Tegra DRM driver"); | |
1375 | MODULE_LICENSE("GPL v2"); |