]>
Commit | Line | Data |
---|---|---|
786d7257 | 1 | /* |
04a5faa8 | 2 | * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst) |
786d7257 ML |
3 | * |
4 | * Based on bo.c which bears the following copyright notice, | |
5 | * but is dual licensed: | |
6 | * | |
7 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | |
8 | * All Rights Reserved. | |
9 | * | |
10 | * Permission is hereby granted, free of charge, to any person obtaining a | |
11 | * copy of this software and associated documentation files (the | |
12 | * "Software"), to deal in the Software without restriction, including | |
13 | * without limitation the rights to use, copy, modify, merge, publish, | |
14 | * distribute, sub license, and/or sell copies of the Software, and to | |
15 | * permit persons to whom the Software is furnished to do so, subject to | |
16 | * the following conditions: | |
17 | * | |
18 | * The above copyright notice and this permission notice (including the | |
19 | * next paragraph) shall be included in all copies or substantial portions | |
20 | * of the Software. | |
21 | * | |
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
25 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
26 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
27 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
28 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
29 | * | |
30 | **************************************************************************/ | |
31 | /* | |
32 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |
33 | */ | |
34 | ||
35 | #include <linux/reservation.h> | |
36 | #include <linux/export.h> | |
37 | ||
dad6c394 RC |
38 | /** |
39 | * DOC: Reservation Object Overview | |
40 | * | |
41 | * The reservation object provides a mechanism to manage shared and | |
42 | * exclusive fences associated with a buffer. A reservation object | |
43 | * can have attached one exclusive fence (normally associated with | |
44 | * write operations) or N shared fences (read operations). The RCU | |
45 | * mechanism is used to protect read access to fences from locked | |
46 | * write-side updates. | |
47 | */ | |
48 | ||
786d7257 ML |
49 | DEFINE_WW_CLASS(reservation_ww_class); |
50 | EXPORT_SYMBOL(reservation_ww_class); | |
04a5faa8 | 51 | |
3c3b177a ML |
52 | struct lock_class_key reservation_seqcount_class; |
53 | EXPORT_SYMBOL(reservation_seqcount_class); | |
54 | ||
55 | const char reservation_seqcount_string[] = "reservation_seqcount"; | |
56 | EXPORT_SYMBOL(reservation_seqcount_string); | |
dad6c394 RC |
57 | |
58 | /** | |
59 | * reservation_object_reserve_shared - Reserve space to add a shared | |
60 | * fence to a reservation_object. | |
61 | * @obj: reservation object | |
62 | * | |
63 | * Should be called before reservation_object_add_shared_fence(). Must | |
64 | * be called with obj->lock held. | |
65 | * | |
66 | * RETURNS | |
67 | * Zero for success, or -errno | |
04a5faa8 ML |
68 | */ |
69 | int reservation_object_reserve_shared(struct reservation_object *obj) | |
70 | { | |
71 | struct reservation_object_list *fobj, *old; | |
72 | u32 max; | |
73 | ||
74 | old = reservation_object_get_list(obj); | |
75 | ||
76 | if (old && old->shared_max) { | |
77 | if (old->shared_count < old->shared_max) { | |
78 | /* perform an in-place update */ | |
79 | kfree(obj->staged); | |
80 | obj->staged = NULL; | |
81 | return 0; | |
82 | } else | |
83 | max = old->shared_max * 2; | |
84 | } else | |
85 | max = 4; | |
86 | ||
87 | /* | |
88 | * resize obj->staged or allocate if it doesn't exist, | |
89 | * noop if already correct size | |
90 | */ | |
91 | fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]), | |
92 | GFP_KERNEL); | |
93 | if (!fobj) | |
94 | return -ENOMEM; | |
95 | ||
96 | obj->staged = fobj; | |
97 | fobj->shared_max = max; | |
98 | return 0; | |
99 | } | |
100 | EXPORT_SYMBOL(reservation_object_reserve_shared); | |
101 | ||
102 | static void | |
103 | reservation_object_add_shared_inplace(struct reservation_object *obj, | |
104 | struct reservation_object_list *fobj, | |
105 | struct fence *fence) | |
106 | { | |
107 | u32 i; | |
108 | ||
3c3b177a ML |
109 | fence_get(fence); |
110 | ||
111 | preempt_disable(); | |
112 | write_seqcount_begin(&obj->seq); | |
113 | ||
04a5faa8 | 114 | for (i = 0; i < fobj->shared_count; ++i) { |
3c3b177a | 115 | struct fence *old_fence; |
04a5faa8 | 116 | |
3c3b177a ML |
117 | old_fence = rcu_dereference_protected(fobj->shared[i], |
118 | reservation_object_held(obj)); | |
04a5faa8 | 119 | |
3c3b177a ML |
120 | if (old_fence->context == fence->context) { |
121 | /* memory barrier is added by write_seqcount_begin */ | |
122 | RCU_INIT_POINTER(fobj->shared[i], fence); | |
123 | write_seqcount_end(&obj->seq); | |
124 | preempt_enable(); | |
04a5faa8 ML |
125 | |
126 | fence_put(old_fence); | |
127 | return; | |
128 | } | |
129 | } | |
130 | ||
04a5faa8 | 131 | /* |
3c3b177a ML |
132 | * memory barrier is added by write_seqcount_begin, |
133 | * fobj->shared_count is protected by this lock too | |
04a5faa8 | 134 | */ |
3c3b177a | 135 | RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); |
04a5faa8 | 136 | fobj->shared_count++; |
3c3b177a ML |
137 | |
138 | write_seqcount_end(&obj->seq); | |
139 | preempt_enable(); | |
04a5faa8 ML |
140 | } |
141 | ||
142 | static void | |
143 | reservation_object_add_shared_replace(struct reservation_object *obj, | |
144 | struct reservation_object_list *old, | |
145 | struct reservation_object_list *fobj, | |
146 | struct fence *fence) | |
147 | { | |
148 | unsigned i; | |
3c3b177a | 149 | struct fence *old_fence = NULL; |
04a5faa8 ML |
150 | |
151 | fence_get(fence); | |
152 | ||
153 | if (!old) { | |
3c3b177a | 154 | RCU_INIT_POINTER(fobj->shared[0], fence); |
04a5faa8 ML |
155 | fobj->shared_count = 1; |
156 | goto done; | |
157 | } | |
158 | ||
159 | /* | |
160 | * no need to bump fence refcounts, rcu_read access | |
161 | * requires the use of kref_get_unless_zero, and the | |
162 | * references from the old struct are carried over to | |
163 | * the new. | |
164 | */ | |
165 | fobj->shared_count = old->shared_count; | |
166 | ||
167 | for (i = 0; i < old->shared_count; ++i) { | |
3c3b177a ML |
168 | struct fence *check; |
169 | ||
170 | check = rcu_dereference_protected(old->shared[i], | |
171 | reservation_object_held(obj)); | |
172 | ||
173 | if (!old_fence && check->context == fence->context) { | |
174 | old_fence = check; | |
175 | RCU_INIT_POINTER(fobj->shared[i], fence); | |
04a5faa8 | 176 | } else |
3c3b177a ML |
177 | RCU_INIT_POINTER(fobj->shared[i], check); |
178 | } | |
179 | if (!old_fence) { | |
180 | RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); | |
181 | fobj->shared_count++; | |
04a5faa8 | 182 | } |
04a5faa8 ML |
183 | |
184 | done: | |
3c3b177a ML |
185 | preempt_disable(); |
186 | write_seqcount_begin(&obj->seq); | |
187 | /* | |
188 | * RCU_INIT_POINTER can be used here, | |
189 | * seqcount provides the necessary barriers | |
190 | */ | |
191 | RCU_INIT_POINTER(obj->fence, fobj); | |
192 | write_seqcount_end(&obj->seq); | |
193 | preempt_enable(); | |
194 | ||
195 | if (old) | |
196 | kfree_rcu(old, rcu); | |
197 | ||
198 | if (old_fence) | |
199 | fence_put(old_fence); | |
04a5faa8 ML |
200 | } |
201 | ||
dad6c394 RC |
202 | /** |
203 | * reservation_object_add_shared_fence - Add a fence to a shared slot | |
204 | * @obj: the reservation object | |
205 | * @fence: the shared fence to add | |
206 | * | |
04a5faa8 | 207 | * Add a fence to a shared slot, obj->lock must be held, and |
f5bef0b8 | 208 | * reservation_object_reserve_shared() has been called. |
04a5faa8 ML |
209 | */ |
210 | void reservation_object_add_shared_fence(struct reservation_object *obj, | |
211 | struct fence *fence) | |
212 | { | |
213 | struct reservation_object_list *old, *fobj = obj->staged; | |
214 | ||
215 | old = reservation_object_get_list(obj); | |
216 | obj->staged = NULL; | |
217 | ||
218 | if (!fobj) { | |
3c3b177a | 219 | BUG_ON(old->shared_count >= old->shared_max); |
04a5faa8 ML |
220 | reservation_object_add_shared_inplace(obj, old, fence); |
221 | } else | |
222 | reservation_object_add_shared_replace(obj, old, fobj, fence); | |
223 | } | |
224 | EXPORT_SYMBOL(reservation_object_add_shared_fence); | |
225 | ||
dad6c394 RC |
226 | /** |
227 | * reservation_object_add_excl_fence - Add an exclusive fence. | |
228 | * @obj: the reservation object | |
229 | * @fence: the shared fence to add | |
230 | * | |
231 | * Add a fence to the exclusive slot. The obj->lock must be held. | |
232 | */ | |
04a5faa8 ML |
233 | void reservation_object_add_excl_fence(struct reservation_object *obj, |
234 | struct fence *fence) | |
235 | { | |
3c3b177a | 236 | struct fence *old_fence = reservation_object_get_excl(obj); |
04a5faa8 ML |
237 | struct reservation_object_list *old; |
238 | u32 i = 0; | |
239 | ||
240 | old = reservation_object_get_list(obj); | |
3c3b177a | 241 | if (old) |
04a5faa8 | 242 | i = old->shared_count; |
04a5faa8 ML |
243 | |
244 | if (fence) | |
245 | fence_get(fence); | |
246 | ||
3c3b177a ML |
247 | preempt_disable(); |
248 | write_seqcount_begin(&obj->seq); | |
249 | /* write_seqcount_begin provides the necessary memory barrier */ | |
250 | RCU_INIT_POINTER(obj->fence_excl, fence); | |
251 | if (old) | |
252 | old->shared_count = 0; | |
253 | write_seqcount_end(&obj->seq); | |
254 | preempt_enable(); | |
04a5faa8 ML |
255 | |
256 | /* inplace update, no shared fences */ | |
257 | while (i--) | |
3c3b177a ML |
258 | fence_put(rcu_dereference_protected(old->shared[i], |
259 | reservation_object_held(obj))); | |
04a5faa8 ML |
260 | |
261 | if (old_fence) | |
262 | fence_put(old_fence); | |
263 | } | |
264 | EXPORT_SYMBOL(reservation_object_add_excl_fence); | |
3c3b177a | 265 | |
dad6c394 RC |
266 | /** |
267 | * reservation_object_get_fences_rcu - Get an object's shared and exclusive | |
268 | * fences without update side lock held | |
269 | * @obj: the reservation object | |
270 | * @pfence_excl: the returned exclusive fence (or NULL) | |
271 | * @pshared_count: the number of shared fences returned | |
272 | * @pshared: the array of shared fence ptrs returned (array is krealloc'd to | |
273 | * the required size, and must be freed by caller) | |
274 | * | |
275 | * RETURNS | |
276 | * Zero or -errno | |
277 | */ | |
3c3b177a ML |
278 | int reservation_object_get_fences_rcu(struct reservation_object *obj, |
279 | struct fence **pfence_excl, | |
280 | unsigned *pshared_count, | |
281 | struct fence ***pshared) | |
282 | { | |
fedf5413 CW |
283 | struct fence **shared = NULL; |
284 | struct fence *fence_excl; | |
285 | unsigned int shared_count; | |
286 | int ret = 1; | |
3c3b177a | 287 | |
fedf5413 | 288 | do { |
3c3b177a ML |
289 | struct reservation_object_list *fobj; |
290 | unsigned seq; | |
fedf5413 | 291 | unsigned int i; |
3c3b177a | 292 | |
fedf5413 | 293 | shared_count = i = 0; |
3c3b177a ML |
294 | |
295 | rcu_read_lock(); | |
fedf5413 CW |
296 | seq = read_seqcount_begin(&obj->seq); |
297 | ||
298 | fence_excl = rcu_dereference(obj->fence_excl); | |
299 | if (fence_excl && !fence_get_rcu(fence_excl)) | |
300 | goto unlock; | |
3c3b177a ML |
301 | |
302 | fobj = rcu_dereference(obj->fence); | |
303 | if (fobj) { | |
304 | struct fence **nshared; | |
305 | size_t sz = sizeof(*shared) * fobj->shared_max; | |
306 | ||
307 | nshared = krealloc(shared, sz, | |
308 | GFP_NOWAIT | __GFP_NOWARN); | |
309 | if (!nshared) { | |
310 | rcu_read_unlock(); | |
311 | nshared = krealloc(shared, sz, GFP_KERNEL); | |
312 | if (nshared) { | |
313 | shared = nshared; | |
314 | continue; | |
315 | } | |
316 | ||
317 | ret = -ENOMEM; | |
3c3b177a ML |
318 | break; |
319 | } | |
320 | shared = nshared; | |
3c3b177a | 321 | shared_count = fobj->shared_count; |
3c3b177a ML |
322 | |
323 | for (i = 0; i < shared_count; ++i) { | |
fedf5413 CW |
324 | shared[i] = rcu_dereference(fobj->shared[i]); |
325 | if (!fence_get_rcu(shared[i])) | |
326 | break; | |
3c3b177a | 327 | } |
fedf5413 | 328 | } |
3c3b177a | 329 | |
fedf5413 CW |
330 | if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { |
331 | while (i--) | |
332 | fence_put(shared[i]); | |
333 | fence_put(fence_excl); | |
334 | goto unlock; | |
335 | } | |
336 | ||
337 | ret = 0; | |
3c3b177a ML |
338 | unlock: |
339 | rcu_read_unlock(); | |
fedf5413 CW |
340 | } while (ret); |
341 | ||
342 | if (!shared_count) { | |
3c3b177a | 343 | kfree(shared); |
fedf5413 | 344 | shared = NULL; |
3c3b177a | 345 | } |
fedf5413 CW |
346 | |
347 | *pshared_count = shared_count; | |
348 | *pshared = shared; | |
3c3b177a ML |
349 | *pfence_excl = fence_excl; |
350 | ||
351 | return ret; | |
352 | } | |
353 | EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); | |
354 | ||
dad6c394 RC |
355 | /** |
356 | * reservation_object_wait_timeout_rcu - Wait on reservation's objects | |
357 | * shared and/or exclusive fences. | |
358 | * @obj: the reservation object | |
359 | * @wait_all: if true, wait on all fences, else wait on just exclusive fence | |
360 | * @intr: if true, do interruptible wait | |
361 | * @timeout: timeout value in jiffies or zero to return immediately | |
362 | * | |
363 | * RETURNS | |
364 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or | |
365 | * greater than zer on success. | |
366 | */ | |
3c3b177a ML |
367 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, |
368 | bool wait_all, bool intr, | |
369 | unsigned long timeout) | |
370 | { | |
371 | struct fence *fence; | |
372 | unsigned seq, shared_count, i = 0; | |
373 | long ret = timeout; | |
374 | ||
fb8b7d2b JZ |
375 | if (!timeout) |
376 | return reservation_object_test_signaled_rcu(obj, wait_all); | |
377 | ||
3c3b177a ML |
378 | retry: |
379 | fence = NULL; | |
380 | shared_count = 0; | |
381 | seq = read_seqcount_begin(&obj->seq); | |
382 | rcu_read_lock(); | |
383 | ||
384 | if (wait_all) { | |
5136629d JT |
385 | struct reservation_object_list *fobj = |
386 | rcu_dereference(obj->fence); | |
3c3b177a ML |
387 | |
388 | if (fobj) | |
389 | shared_count = fobj->shared_count; | |
390 | ||
3c3b177a ML |
391 | for (i = 0; i < shared_count; ++i) { |
392 | struct fence *lfence = rcu_dereference(fobj->shared[i]); | |
393 | ||
394 | if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) | |
395 | continue; | |
396 | ||
397 | if (!fence_get_rcu(lfence)) | |
398 | goto unlock_retry; | |
399 | ||
400 | if (fence_is_signaled(lfence)) { | |
401 | fence_put(lfence); | |
402 | continue; | |
403 | } | |
404 | ||
405 | fence = lfence; | |
406 | break; | |
407 | } | |
408 | } | |
409 | ||
410 | if (!shared_count) { | |
411 | struct fence *fence_excl = rcu_dereference(obj->fence_excl); | |
412 | ||
3c3b177a ML |
413 | if (fence_excl && |
414 | !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) { | |
415 | if (!fence_get_rcu(fence_excl)) | |
416 | goto unlock_retry; | |
417 | ||
418 | if (fence_is_signaled(fence_excl)) | |
419 | fence_put(fence_excl); | |
420 | else | |
421 | fence = fence_excl; | |
422 | } | |
423 | } | |
424 | ||
425 | rcu_read_unlock(); | |
426 | if (fence) { | |
1cec20f0 CW |
427 | if (read_seqcount_retry(&obj->seq, seq)) { |
428 | fence_put(fence); | |
429 | goto retry; | |
430 | } | |
431 | ||
3c3b177a ML |
432 | ret = fence_wait_timeout(fence, intr, ret); |
433 | fence_put(fence); | |
434 | if (ret > 0 && wait_all && (i + 1 < shared_count)) | |
435 | goto retry; | |
436 | } | |
437 | return ret; | |
438 | ||
439 | unlock_retry: | |
440 | rcu_read_unlock(); | |
441 | goto retry; | |
442 | } | |
443 | EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); | |
444 | ||
445 | ||
446 | static inline int | |
447 | reservation_object_test_signaled_single(struct fence *passed_fence) | |
448 | { | |
449 | struct fence *fence, *lfence = passed_fence; | |
450 | int ret = 1; | |
451 | ||
452 | if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { | |
3c3b177a ML |
453 | fence = fence_get_rcu(lfence); |
454 | if (!fence) | |
455 | return -1; | |
456 | ||
457 | ret = !!fence_is_signaled(fence); | |
458 | fence_put(fence); | |
459 | } | |
460 | return ret; | |
461 | } | |
462 | ||
dad6c394 RC |
463 | /** |
464 | * reservation_object_test_signaled_rcu - Test if a reservation object's | |
465 | * fences have been signaled. | |
466 | * @obj: the reservation object | |
467 | * @test_all: if true, test all fences, otherwise only test the exclusive | |
468 | * fence | |
469 | * | |
470 | * RETURNS | |
471 | * true if all fences signaled, else false | |
472 | */ | |
3c3b177a ML |
473 | bool reservation_object_test_signaled_rcu(struct reservation_object *obj, |
474 | bool test_all) | |
475 | { | |
476 | unsigned seq, shared_count; | |
477 | int ret = true; | |
478 | ||
479 | retry: | |
480 | shared_count = 0; | |
481 | seq = read_seqcount_begin(&obj->seq); | |
482 | rcu_read_lock(); | |
483 | ||
484 | if (test_all) { | |
485 | unsigned i; | |
486 | ||
5136629d JT |
487 | struct reservation_object_list *fobj = |
488 | rcu_dereference(obj->fence); | |
3c3b177a ML |
489 | |
490 | if (fobj) | |
491 | shared_count = fobj->shared_count; | |
492 | ||
493 | if (read_seqcount_retry(&obj->seq, seq)) | |
494 | goto unlock_retry; | |
495 | ||
496 | for (i = 0; i < shared_count; ++i) { | |
497 | struct fence *fence = rcu_dereference(fobj->shared[i]); | |
498 | ||
499 | ret = reservation_object_test_signaled_single(fence); | |
500 | if (ret < 0) | |
501 | goto unlock_retry; | |
502 | else if (!ret) | |
503 | break; | |
504 | } | |
505 | ||
506 | /* | |
507 | * There could be a read_seqcount_retry here, but nothing cares | |
508 | * about whether it's the old or newer fence pointers that are | |
509 | * signaled. That race could still have happened after checking | |
510 | * read_seqcount_retry. If you care, use ww_mutex_lock. | |
511 | */ | |
512 | } | |
513 | ||
514 | if (!shared_count) { | |
515 | struct fence *fence_excl = rcu_dereference(obj->fence_excl); | |
516 | ||
517 | if (read_seqcount_retry(&obj->seq, seq)) | |
518 | goto unlock_retry; | |
519 | ||
520 | if (fence_excl) { | |
5136629d JT |
521 | ret = reservation_object_test_signaled_single( |
522 | fence_excl); | |
3c3b177a ML |
523 | if (ret < 0) |
524 | goto unlock_retry; | |
525 | } | |
526 | } | |
527 | ||
528 | rcu_read_unlock(); | |
529 | return ret; | |
530 | ||
531 | unlock_retry: | |
532 | rcu_read_unlock(); | |
533 | goto retry; | |
534 | } | |
535 | EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu); |