]>
Commit | Line | Data |
---|---|---|
f556cb0c CZ |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
f556cb0c | 22 | */ |
1b1f42d8 | 23 | |
f556cb0c CZ |
24 | #include <linux/kthread.h> |
25 | #include <linux/wait.h> | |
26 | #include <linux/sched.h> | |
27 | #include <drm/drmP.h> | |
1b1f42d8 | 28 | #include <drm/gpu_scheduler.h> |
f556cb0c | 29 | |
c24784f0 CK |
30 | static struct kmem_cache *sched_fence_slab; |
31 | ||
4983e48c | 32 | static int __init drm_sched_fence_slab_init(void) |
c24784f0 CK |
33 | { |
34 | sched_fence_slab = kmem_cache_create( | |
1b1f42d8 | 35 | "drm_sched_fence", sizeof(struct drm_sched_fence), 0, |
c24784f0 CK |
36 | SLAB_HWCACHE_ALIGN, NULL); |
37 | if (!sched_fence_slab) | |
38 | return -ENOMEM; | |
39 | ||
40 | return 0; | |
41 | } | |
42 | ||
4983e48c | 43 | static void __exit drm_sched_fence_slab_fini(void) |
c24784f0 CK |
44 | { |
45 | rcu_barrier(); | |
46 | kmem_cache_destroy(sched_fence_slab); | |
47 | } | |
48 | ||
1b1f42d8 | 49 | void drm_sched_fence_scheduled(struct drm_sched_fence *fence) |
f556cb0c | 50 | { |
f54d1867 | 51 | int ret = dma_fence_signal(&fence->scheduled); |
6fc13675 | 52 | |
2983e5ce | 53 | if (!ret) |
f54d1867 CW |
54 | DMA_FENCE_TRACE(&fence->scheduled, |
55 | "signaled from irq context\n"); | |
2983e5ce | 56 | else |
f54d1867 CW |
57 | DMA_FENCE_TRACE(&fence->scheduled, |
58 | "was already signaled\n"); | |
f556cb0c CZ |
59 | } |
60 | ||
1b1f42d8 | 61 | void drm_sched_fence_finished(struct drm_sched_fence *fence) |
393a0bd4 | 62 | { |
f54d1867 | 63 | int ret = dma_fence_signal(&fence->finished); |
393a0bd4 | 64 | |
6fc13675 | 65 | if (!ret) |
f54d1867 CW |
66 | DMA_FENCE_TRACE(&fence->finished, |
67 | "signaled from irq context\n"); | |
6fc13675 | 68 | else |
f54d1867 CW |
69 | DMA_FENCE_TRACE(&fence->finished, |
70 | "was already signaled\n"); | |
393a0bd4 CK |
71 | } |
72 | ||
1b1f42d8 | 73 | static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence) |
f556cb0c | 74 | { |
1b1f42d8 | 75 | return "drm_sched"; |
f556cb0c CZ |
76 | } |
77 | ||
1b1f42d8 | 78 | static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f) |
f556cb0c | 79 | { |
1b1f42d8 | 80 | struct drm_sched_fence *fence = to_drm_sched_fence(f); |
9b398fa5 | 81 | return (const char *)fence->sched->name; |
f556cb0c CZ |
82 | } |
83 | ||
51f170a5 DV |
84 | static bool drm_sched_fence_enable_signaling(struct dma_fence *f) |
85 | { | |
86 | return true; | |
87 | } | |
88 | ||
189e0fb7 CK |
89 | /** |
90 | * amd_sched_fence_free - free up the fence memory | |
91 | * | |
92 | * @rcu: RCU callback head | |
93 | * | |
94 | * Free up the fence memory after the RCU grace period. | |
95 | */ | |
1b1f42d8 | 96 | static void drm_sched_fence_free(struct rcu_head *rcu) |
f5617f9d | 97 | { |
f54d1867 | 98 | struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); |
1b1f42d8 | 99 | struct drm_sched_fence *fence = to_drm_sched_fence(f); |
6fc13675 | 100 | |
f54d1867 | 101 | dma_fence_put(fence->parent); |
f5617f9d CZ |
102 | kmem_cache_free(sched_fence_slab, fence); |
103 | } | |
104 | ||
189e0fb7 | 105 | /** |
95662133 | 106 | * amd_sched_fence_release_scheduled - callback that fence can be freed |
189e0fb7 CK |
107 | * |
108 | * @fence: fence | |
109 | * | |
110 | * This function is called when the reference count becomes zero. | |
111 | * It just RCU schedules freeing up the fence. | |
112 | */ | |
1b1f42d8 | 113 | static void drm_sched_fence_release_scheduled(struct dma_fence *f) |
6fc13675 | 114 | { |
1b1f42d8 | 115 | struct drm_sched_fence *fence = to_drm_sched_fence(f); |
6fc13675 | 116 | |
1b1f42d8 | 117 | call_rcu(&fence->finished.rcu, drm_sched_fence_free); |
6fc13675 CK |
118 | } |
119 | ||
120 | /** | |
95662133 | 121 | * amd_sched_fence_release_finished - drop extra reference |
6fc13675 CK |
122 | * |
123 | * @f: fence | |
124 | * | |
125 | * Drop the extra reference from the scheduled fence to the base fence. | |
126 | */ | |
1b1f42d8 | 127 | static void drm_sched_fence_release_finished(struct dma_fence *f) |
189e0fb7 | 128 | { |
1b1f42d8 | 129 | struct drm_sched_fence *fence = to_drm_sched_fence(f); |
6fc13675 | 130 | |
f54d1867 | 131 | dma_fence_put(&fence->scheduled); |
189e0fb7 CK |
132 | } |
133 | ||
1b1f42d8 LS |
134 | const struct dma_fence_ops drm_sched_fence_ops_scheduled = { |
135 | .get_driver_name = drm_sched_fence_get_driver_name, | |
136 | .get_timeline_name = drm_sched_fence_get_timeline_name, | |
51f170a5 DV |
137 | .enable_signaling = drm_sched_fence_enable_signaling, |
138 | .signaled = NULL, | |
139 | .wait = dma_fence_default_wait, | |
1b1f42d8 | 140 | .release = drm_sched_fence_release_scheduled, |
6fc13675 CK |
141 | }; |
142 | ||
1b1f42d8 LS |
143 | const struct dma_fence_ops drm_sched_fence_ops_finished = { |
144 | .get_driver_name = drm_sched_fence_get_driver_name, | |
145 | .get_timeline_name = drm_sched_fence_get_timeline_name, | |
51f170a5 DV |
146 | .enable_signaling = drm_sched_fence_enable_signaling, |
147 | .signaled = NULL, | |
148 | .wait = dma_fence_default_wait, | |
1b1f42d8 | 149 | .release = drm_sched_fence_release_finished, |
f556cb0c | 150 | }; |
1b1f42d8 LS |
151 | |
152 | struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f) | |
153 | { | |
154 | if (f->ops == &drm_sched_fence_ops_scheduled) | |
155 | return container_of(f, struct drm_sched_fence, scheduled); | |
156 | ||
157 | if (f->ops == &drm_sched_fence_ops_finished) | |
158 | return container_of(f, struct drm_sched_fence, finished); | |
159 | ||
160 | return NULL; | |
161 | } | |
162 | EXPORT_SYMBOL(to_drm_sched_fence); | |
163 | ||
164 | struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity, | |
165 | void *owner) | |
166 | { | |
167 | struct drm_sched_fence *fence = NULL; | |
168 | unsigned seq; | |
169 | ||
170 | fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); | |
171 | if (fence == NULL) | |
172 | return NULL; | |
173 | ||
174 | fence->owner = owner; | |
175 | fence->sched = entity->sched; | |
176 | spin_lock_init(&fence->lock); | |
177 | ||
178 | seq = atomic_inc_return(&entity->fence_seq); | |
179 | dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled, | |
180 | &fence->lock, entity->fence_context, seq); | |
181 | dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished, | |
182 | &fence->lock, entity->fence_context + 1, seq); | |
183 | ||
184 | return fence; | |
185 | } | |
4983e48c LS |
186 | |
187 | module_init(drm_sched_fence_slab_init); | |
188 | module_exit(drm_sched_fence_slab_fini); | |
189 | ||
190 | MODULE_DESCRIPTION("DRM GPU scheduler"); | |
191 | MODULE_LICENSE("GPL and additional rights"); |