]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/radeon/radeon_sa.c
UAPI: (Scripted) Convert #include "..." to #include <path/...> in drivers/gpu/
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / radeon / radeon_sa.c
CommitLineData
b15ba512
JG
1/*
2 * Copyright 2011 Red Hat Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 */
c3b7fe8b
CK
30/* Algorithm:
31 *
32 * We store the last allocated bo in "hole", we always try to allocate
33 * after the last allocated bo. Principle is that in a linear GPU ring
34 * progression was is after last is the oldest bo we allocated and thus
35 * the first one that should no longer be in use by the GPU.
36 *
37 * If it's not the case we skip over the bo after last to the closest
38 * done bo if such one exist. If none exist and we are not asked to
39 * block we report failure to allocate.
40 *
41 * If we are asked to block we wait on all the oldest fence of all
42 * rings. We just wait for any of those fence to complete.
43 */
760285e7 44#include <drm/drmP.h>
b15ba512
JG
45#include "radeon.h"
46
c3b7fe8b
CK
47static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
48static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
49
b15ba512
JG
50int radeon_sa_bo_manager_init(struct radeon_device *rdev,
51 struct radeon_sa_manager *sa_manager,
52 unsigned size, u32 domain)
53{
c3b7fe8b 54 int i, r;
b15ba512 55
bfb38d35 56 init_waitqueue_head(&sa_manager->wq);
b15ba512
JG
57 sa_manager->bo = NULL;
58 sa_manager->size = size;
59 sa_manager->domain = domain;
c3b7fe8b
CK
60 sa_manager->hole = &sa_manager->olist;
61 INIT_LIST_HEAD(&sa_manager->olist);
62 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
63 INIT_LIST_HEAD(&sa_manager->flist[i]);
64 }
b15ba512
JG
65
66 r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
40f5cf99 67 RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo);
b15ba512
JG
68 if (r) {
69 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
70 return r;
71 }
72
73 return r;
74}
75
76void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
77 struct radeon_sa_manager *sa_manager)
78{
79 struct radeon_sa_bo *sa_bo, *tmp;
80
c3b7fe8b
CK
81 if (!list_empty(&sa_manager->olist)) {
82 sa_manager->hole = &sa_manager->olist,
83 radeon_sa_bo_try_free(sa_manager);
84 if (!list_empty(&sa_manager->olist)) {
85 dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
86 }
b15ba512 87 }
c3b7fe8b
CK
88 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
89 radeon_sa_bo_remove_locked(sa_bo);
b15ba512
JG
90 }
91 radeon_bo_unref(&sa_manager->bo);
92 sa_manager->size = 0;
93}
94
95int radeon_sa_bo_manager_start(struct radeon_device *rdev,
96 struct radeon_sa_manager *sa_manager)
97{
98 int r;
99
100 if (sa_manager->bo == NULL) {
101 dev_err(rdev->dev, "no bo for sa manager\n");
102 return -EINVAL;
103 }
104
105 /* map the buffer */
106 r = radeon_bo_reserve(sa_manager->bo, false);
107 if (r) {
108 dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
109 return r;
110 }
111 r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
112 if (r) {
113 radeon_bo_unreserve(sa_manager->bo);
114 dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
115 return r;
116 }
117 r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
118 radeon_bo_unreserve(sa_manager->bo);
119 return r;
120}
121
122int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
123 struct radeon_sa_manager *sa_manager)
124{
125 int r;
126
127 if (sa_manager->bo == NULL) {
128 dev_err(rdev->dev, "no bo for sa manager\n");
129 return -EINVAL;
130 }
131
132 r = radeon_bo_reserve(sa_manager->bo, false);
133 if (!r) {
134 radeon_bo_kunmap(sa_manager->bo);
135 radeon_bo_unpin(sa_manager->bo);
136 radeon_bo_unreserve(sa_manager->bo);
137 }
138 return r;
139}
140
557017a0
CK
141static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
142{
c3b7fe8b
CK
143 struct radeon_sa_manager *sa_manager = sa_bo->manager;
144 if (sa_manager->hole == &sa_bo->olist) {
145 sa_manager->hole = sa_bo->olist.prev;
146 }
147 list_del_init(&sa_bo->olist);
148 list_del_init(&sa_bo->flist);
557017a0
CK
149 radeon_fence_unref(&sa_bo->fence);
150 kfree(sa_bo);
151}
152
c3b7fe8b
CK
153static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
154{
155 struct radeon_sa_bo *sa_bo, *tmp;
156
157 if (sa_manager->hole->next == &sa_manager->olist)
158 return;
159
160 sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
161 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
162 if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
163 return;
164 }
165 radeon_sa_bo_remove_locked(sa_bo);
166 }
167}
168
169static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
170{
171 struct list_head *hole = sa_manager->hole;
172
173 if (hole != &sa_manager->olist) {
174 return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
175 }
176 return 0;
177}
178
179static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
180{
181 struct list_head *hole = sa_manager->hole;
182
183 if (hole->next != &sa_manager->olist) {
184 return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
185 }
186 return sa_manager->size;
187}
188
189static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
190 struct radeon_sa_bo *sa_bo,
191 unsigned size, unsigned align)
192{
193 unsigned soffset, eoffset, wasted;
194
195 soffset = radeon_sa_bo_hole_soffset(sa_manager);
196 eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
197 wasted = (align - (soffset % align)) % align;
198
199 if ((eoffset - soffset) >= (size + wasted)) {
200 soffset += wasted;
201
202 sa_bo->manager = sa_manager;
203 sa_bo->soffset = soffset;
204 sa_bo->eoffset = soffset + size;
205 list_add(&sa_bo->olist, sa_manager->hole);
206 INIT_LIST_HEAD(&sa_bo->flist);
207 sa_manager->hole = &sa_bo->olist;
208 return true;
209 }
210 return false;
211}
212
bfb38d35
CK
213/**
214 * radeon_sa_event - Check if we can stop waiting
215 *
216 * @sa_manager: pointer to the sa_manager
217 * @size: number of bytes we want to allocate
218 * @align: alignment we need to match
219 *
220 * Check if either there is a fence we can wait for or
221 * enough free memory to satisfy the allocation directly
222 */
223static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
224 unsigned size, unsigned align)
225{
226 unsigned soffset, eoffset, wasted;
227 int i;
228
229 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
230 if (!list_empty(&sa_manager->flist[i])) {
231 return true;
232 }
233 }
234
235 soffset = radeon_sa_bo_hole_soffset(sa_manager);
236 eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
237 wasted = (align - (soffset % align)) % align;
238
239 if ((eoffset - soffset) >= (size + wasted)) {
240 return true;
241 }
242
243 return false;
244}
245
c3b7fe8b
CK
246static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
247 struct radeon_fence **fences,
248 unsigned *tries)
249{
250 struct radeon_sa_bo *best_bo = NULL;
251 unsigned i, soffset, best, tmp;
252
253 /* if hole points to the end of the buffer */
254 if (sa_manager->hole->next == &sa_manager->olist) {
255 /* try again with its beginning */
256 sa_manager->hole = &sa_manager->olist;
257 return true;
258 }
259
260 soffset = radeon_sa_bo_hole_soffset(sa_manager);
261 /* to handle wrap around we add sa_manager->size */
262 best = sa_manager->size * 2;
263 /* go over all fence list and try to find the closest sa_bo
264 * of the current last
265 */
266 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
267 struct radeon_sa_bo *sa_bo;
268
269 if (list_empty(&sa_manager->flist[i])) {
270 continue;
271 }
272
273 sa_bo = list_first_entry(&sa_manager->flist[i],
274 struct radeon_sa_bo, flist);
275
276 if (!radeon_fence_signaled(sa_bo->fence)) {
277 fences[i] = sa_bo->fence;
278 continue;
279 }
280
281 /* limit the number of tries each ring gets */
282 if (tries[i] > 2) {
283 continue;
284 }
285
286 tmp = sa_bo->soffset;
287 if (tmp < soffset) {
288 /* wrap around, pretend it's after */
289 tmp += sa_manager->size;
290 }
291 tmp -= soffset;
292 if (tmp < best) {
293 /* this sa bo is the closest one */
294 best = tmp;
295 best_bo = sa_bo;
296 }
297 }
298
299 if (best_bo) {
300 ++tries[best_bo->fence->ring];
301 sa_manager->hole = best_bo->olist.prev;
302
303 /* we knew that this one is signaled,
304 so it's save to remote it */
305 radeon_sa_bo_remove_locked(best_bo);
306 return true;
307 }
308 return false;
309}
310
b15ba512
JG
311int radeon_sa_bo_new(struct radeon_device *rdev,
312 struct radeon_sa_manager *sa_manager,
2e0d9910 313 struct radeon_sa_bo **sa_bo,
557017a0 314 unsigned size, unsigned align, bool block)
b15ba512 315{
c3b7fe8b
CK
316 struct radeon_fence *fences[RADEON_NUM_RINGS];
317 unsigned tries[RADEON_NUM_RINGS];
318 int i, r = -ENOMEM;
b15ba512
JG
319
320 BUG_ON(align > RADEON_GPU_PAGE_SIZE);
321 BUG_ON(size > sa_manager->size);
2e0d9910
CK
322
323 *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
c3b7fe8b
CK
324 if ((*sa_bo) == NULL) {
325 return -ENOMEM;
326 }
327 (*sa_bo)->manager = sa_manager;
328 (*sa_bo)->fence = NULL;
329 INIT_LIST_HEAD(&(*sa_bo)->olist);
330 INIT_LIST_HEAD(&(*sa_bo)->flist);
557017a0 331
bfb38d35
CK
332 spin_lock(&sa_manager->wq.lock);
333 while(1) {
c3b7fe8b
CK
334 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
335 fences[i] = NULL;
336 tries[i] = 0;
337 }
b15ba512 338
c3b7fe8b
CK
339 do {
340 radeon_sa_bo_try_free(sa_manager);
b15ba512 341
c3b7fe8b
CK
342 if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
343 size, align)) {
bfb38d35 344 spin_unlock(&sa_manager->wq.lock);
c3b7fe8b 345 return 0;
557017a0 346 }
557017a0 347
c3b7fe8b
CK
348 /* see if we can skip over some allocations */
349 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
350
bfb38d35
CK
351 if (!block) {
352 break;
353 }
354
355 spin_unlock(&sa_manager->wq.lock);
356 r = radeon_fence_wait_any(rdev, fences, false);
357 spin_lock(&sa_manager->wq.lock);
358 /* if we have nothing to wait for block */
359 if (r == -ENOENT) {
360 r = wait_event_interruptible_locked(
361 sa_manager->wq,
362 radeon_sa_event(sa_manager, size, align)
363 );
364 }
365 if (r) {
366 goto out_err;
557017a0 367 }
bfb38d35 368 };
b15ba512 369
c3b7fe8b 370out_err:
bfb38d35 371 spin_unlock(&sa_manager->wq.lock);
c3b7fe8b
CK
372 kfree(*sa_bo);
373 *sa_bo = NULL;
374 return r;
b15ba512
JG
375}
376
557017a0
CK
377void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
378 struct radeon_fence *fence)
b15ba512 379{
557017a0
CK
380 struct radeon_sa_manager *sa_manager;
381
c3b7fe8b 382 if (sa_bo == NULL || *sa_bo == NULL) {
2e0d9910 383 return;
c3b7fe8b 384 }
2e0d9910 385
557017a0 386 sa_manager = (*sa_bo)->manager;
bfb38d35 387 spin_lock(&sa_manager->wq.lock);
876dc9f3 388 if (fence && !radeon_fence_signaled(fence)) {
557017a0 389 (*sa_bo)->fence = radeon_fence_ref(fence);
c3b7fe8b
CK
390 list_add_tail(&(*sa_bo)->flist,
391 &sa_manager->flist[fence->ring]);
557017a0
CK
392 } else {
393 radeon_sa_bo_remove_locked(*sa_bo);
394 }
bfb38d35
CK
395 wake_up_all_locked(&sa_manager->wq);
396 spin_unlock(&sa_manager->wq.lock);
2e0d9910 397 *sa_bo = NULL;
b15ba512 398}
711a9729
CK
399
400#if defined(CONFIG_DEBUG_FS)
401void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
402 struct seq_file *m)
403{
404 struct radeon_sa_bo *i;
405
bfb38d35 406 spin_lock(&sa_manager->wq.lock);
c3b7fe8b
CK
407 list_for_each_entry(i, &sa_manager->olist, olist) {
408 if (&i->olist == sa_manager->hole) {
409 seq_printf(m, ">");
557017a0 410 } else {
c3b7fe8b
CK
411 seq_printf(m, " ");
412 }
413 seq_printf(m, "[0x%08x 0x%08x] size %8d",
414 i->soffset, i->eoffset, i->eoffset - i->soffset);
415 if (i->fence) {
416 seq_printf(m, " protected by 0x%016llx on ring %d",
417 i->fence->seq, i->fence->ring);
557017a0 418 }
c3b7fe8b 419 seq_printf(m, "\n");
711a9729 420 }
bfb38d35 421 spin_unlock(&sa_manager->wq.lock);
711a9729
CK
422}
423#endif