2 * Copyright 2011 Christian König.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <deathsimple@vodafone.de>
32 #include "radeon_trace.h"
34 int radeon_semaphore_create(struct radeon_device
*rdev
,
35 struct radeon_semaphore
**semaphore
)
40 *semaphore
= kmalloc(sizeof(struct radeon_semaphore
), GFP_KERNEL
);
41 if (*semaphore
== NULL
) {
44 r
= radeon_sa_bo_new(rdev
, &rdev
->ring_tmp_bo
, &(*semaphore
)->sa_bo
,
45 8 * RADEON_NUM_SYNCS
, 8);
51 (*semaphore
)->waiters
= 0;
52 (*semaphore
)->gpu_addr
= radeon_sa_bo_gpu_addr((*semaphore
)->sa_bo
);
54 cpu_addr
= radeon_sa_bo_cpu_addr((*semaphore
)->sa_bo
);
55 for (i
= 0; i
< RADEON_NUM_SYNCS
; ++i
)
58 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
)
59 (*semaphore
)->sync_to
[i
] = NULL
;
64 bool radeon_semaphore_emit_signal(struct radeon_device
*rdev
, int ridx
,
65 struct radeon_semaphore
*semaphore
)
67 struct radeon_ring
*ring
= &rdev
->ring
[ridx
];
69 trace_radeon_semaphore_signale(ridx
, semaphore
);
71 if (radeon_semaphore_ring_emit(rdev
, ridx
, ring
, semaphore
, false)) {
74 /* for debugging lockup only, used by sysfs debug files */
75 ring
->last_semaphore_signal_addr
= semaphore
->gpu_addr
;
81 bool radeon_semaphore_emit_wait(struct radeon_device
*rdev
, int ridx
,
82 struct radeon_semaphore
*semaphore
)
84 struct radeon_ring
*ring
= &rdev
->ring
[ridx
];
86 trace_radeon_semaphore_wait(ridx
, semaphore
);
88 if (radeon_semaphore_ring_emit(rdev
, ridx
, ring
, semaphore
, true)) {
91 /* for debugging lockup only, used by sysfs debug files */
92 ring
->last_semaphore_wait_addr
= semaphore
->gpu_addr
;
99 * radeon_semaphore_sync_to - use the semaphore to sync to a fence
101 * @semaphore: semaphore object to add fence to
102 * @fence: fence to sync to
104 * Sync to the fence using this semaphore object
106 void radeon_semaphore_sync_to(struct radeon_semaphore
*semaphore
,
107 struct radeon_fence
*fence
)
109 struct radeon_fence
*other
;
114 other
= semaphore
->sync_to
[fence
->ring
];
115 semaphore
->sync_to
[fence
->ring
] = radeon_fence_later(fence
, other
);
119 * radeon_semaphore_sync_rings - sync ring to all registered fences
121 * @rdev: radeon_device pointer
122 * @semaphore: semaphore object to use for sync
123 * @ring: ring that needs sync
125 * Ensure that all registered fences are signaled before letting
126 * the ring continue. The caller must hold the ring lock.
128 int radeon_semaphore_sync_rings(struct radeon_device
*rdev
,
129 struct radeon_semaphore
*semaphore
,
135 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
136 struct radeon_fence
*fence
= semaphore
->sync_to
[i
];
138 /* check if we really need to sync */
139 if (!radeon_fence_need_sync(fence
, ring
))
142 /* prevent GPU deadlocks */
143 if (!rdev
->ring
[i
].ready
) {
144 dev_err(rdev
->dev
, "Syncing to a disabled ring!");
148 if (++count
> RADEON_NUM_SYNCS
) {
149 /* not enough room, wait manually */
150 r
= radeon_fence_wait(fence
, false);
156 /* allocate enough space for sync command */
157 r
= radeon_ring_alloc(rdev
, &rdev
->ring
[i
], 16);
162 /* emit the signal semaphore */
163 if (!radeon_semaphore_emit_signal(rdev
, i
, semaphore
)) {
164 /* signaling wasn't successful wait manually */
165 radeon_ring_undo(&rdev
->ring
[i
]);
166 r
= radeon_fence_wait(fence
, false);
172 /* we assume caller has already allocated space on waiters ring */
173 if (!radeon_semaphore_emit_wait(rdev
, ring
, semaphore
)) {
174 /* waiting wasn't successful wait manually */
175 radeon_ring_undo(&rdev
->ring
[i
]);
176 r
= radeon_fence_wait(fence
, false);
182 radeon_ring_commit(rdev
, &rdev
->ring
[i
]);
183 radeon_fence_note_sync(fence
, ring
);
185 semaphore
->gpu_addr
+= 8;
191 void radeon_semaphore_free(struct radeon_device
*rdev
,
192 struct radeon_semaphore
**semaphore
,
193 struct radeon_fence
*fence
)
195 if (semaphore
== NULL
|| *semaphore
== NULL
) {
198 if ((*semaphore
)->waiters
> 0) {
199 dev_err(rdev
->dev
, "semaphore %p has more waiters than signalers,"
200 " hardware lockup imminent!\n", *semaphore
);
202 radeon_sa_bo_free(rdev
, &(*semaphore
)->sa_bo
, fence
);