]> git.proxmox.com Git - mirror_ovs.git/blame - lib/ovs-atomic.h
ofp-actions: Fix userspace support for mpls_ttl.
[mirror_ovs.git] / lib / ovs-atomic.h
CommitLineData
31a3fc6e 1/*
9c463631 2 * Copyright (c) 2013, 2014, 2017 Nicira, Inc.
31a3fc6e
BP
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef OVS_ATOMIC_H
18#define OVS_ATOMIC_H 1
19
20/* Atomic operations.
21 *
22 * This library implements atomic operations with an API based on the one
23 * defined in C11. It includes multiple implementations for compilers and
a54667e5 24 * libraries with varying degrees of built-in support for C11, including a
31a3fc6e
BP
25 * fallback implementation for systems that have pthreads but no other support
26 * for atomics.
27 *
28 * This comment describes the common features of all the implementations.
29 *
30 *
31 * Types
32 * =====
33 *
34 * The following atomic types are supported as typedefs for atomic versions of
35 * the listed ordinary types:
36 *
37 * ordinary type atomic version
38 * ------------------- ----------------------
39 * bool atomic_bool
40 *
41 * char atomic_char
42 * signed char atomic_schar
43 * unsigned char atomic_uchar
44 *
45 * short atomic_short
46 * unsigned short atomic_ushort
47 *
48 * int atomic_int
49 * unsigned int atomic_uint
50 *
51 * long atomic_long
52 * unsigned long atomic_ulong
53 *
54 * long long atomic_llong
55 * unsigned long long atomic_ullong
56 *
57 * size_t atomic_size_t
58 * ptrdiff_t atomic_ptrdiff_t
59 *
60 * intmax_t atomic_intmax_t
61 * uintmax_t atomic_uintmax_t
62 *
63 * intptr_t atomic_intptr_t
64 * uintptr_t atomic_uintptr_t
65 *
66 * uint8_t atomic_uint8_t (*)
67 * uint16_t atomic_uint16_t (*)
68 * uint32_t atomic_uint32_t (*)
69 * int8_t atomic_int8_t (*)
70 * int16_t atomic_int16_t (*)
71 * int32_t atomic_int32_t (*)
4a0c670a
BP
72 * uint64_t atomic_uint64_t (*)
73 * int64_t atomic_int64_t (*)
31a3fc6e
BP
74 *
75 * (*) Not specified by C11.
76 *
1bd2c9ed
BP
77 * Atomic types may also be obtained via ATOMIC(TYPE), e.g. ATOMIC(void *).
78 * Only basic integer types and pointer types can be made atomic this way,
79 * e.g. atomic structs are not supported.
80 *
31a3fc6e
BP
81 * The atomic version of a type doesn't necessarily have the same size or
82 * representation as the ordinary version; for example, atomic_int might be a
1bd2c9ed
BP
83 * typedef for a struct. The range of an atomic type does match the range of
84 * the corresponding ordinary type.
31a3fc6e
BP
85 *
86 * C11 says that one may use the _Atomic keyword in place of the typedef name,
87 * e.g. "_Atomic int" instead of "atomic_int". This library doesn't support
88 * that.
89 *
90 *
c5f81b20
BP
91 * Life Cycle
92 * ==========
31a3fc6e
BP
93 *
94 * To initialize an atomic variable at its point of definition, use
95 * ATOMIC_VAR_INIT:
96 *
97 * static atomic_int ai = ATOMIC_VAR_INIT(123);
98 *
99 * To initialize an atomic variable in code, use atomic_init():
100 *
101 * static atomic_int ai;
102 * ...
103 * atomic_init(&ai, 123);
104 *
105 *
106 * Barriers
107 * ========
108 *
109 * enum memory_order specifies the strictness of a memory barrier. It has the
110 * following values:
111 *
112 * memory_order_relaxed:
113 *
5e99c681
JR
114 * Only atomicity is provided, does not imply any memory ordering with
115 * respect to any other variable (atomic or not). Relaxed accesses to
116 * the same atomic variable will be performed in the program order.
117 * The compiler and CPU are free to move memory accesses to other
118 * variables past the atomic operation.
119 *
120 * memory_order_consume:
121 *
122 * Memory accesses with data dependency on the result of the consume
123 * operation (atomic_read_explicit, or a load operation preceding a
124 * atomic_thread_fence) will not be moved prior to the consume
125 * barrier. Non-data-dependent loads and stores can be reordered to
898dcef1 126 * happen before the consume barrier.
5e99c681
JR
127 *
128 * RCU is the prime example of the use of the consume barrier: The
129 * consume barrier guarantees that reads from a RCU protected object
130 * are performed after the RCU protected pointer is read. A
131 * corresponding release barrier is used to store the modified RCU
132 * protected pointer after the RCU protected object has been fully
133 * constructed. The synchronization between these barriers prevents
134 * the RCU "consumer" from seeing uninitialized data.
135 *
136 * May not be used with atomic_store_explicit(), as consume semantics
137 * applies only to atomic loads.
31a3fc6e
BP
138 *
139 * memory_order_acquire:
140 *
141 * Memory accesses after an acquire barrier cannot be moved before the
142 * barrier. Memory accesses before an acquire barrier *can* be moved
143 * after it.
144 *
5e99c681
JR
145 * An atomic_thread_fence with memory_order_acquire does not have a
146 * load operation by itself; it prevents all following memory accesses
147 * from moving prior to preceding loads.
148 *
149 * May not be used with atomic_store_explicit(), as acquire semantics
150 * applies only to atomic loads.
151 *
31a3fc6e
BP
152 * memory_order_release:
153 *
154 * Memory accesses before a release barrier cannot be moved after the
155 * barrier. Memory accesses after a release barrier *can* be moved
156 * before it.
157 *
5e99c681
JR
158 * An atomic_thread_fence with memory_order_release does not have a
159 * store operation by itself; it prevents all preceding memory accesses
160 * from moving past subsequent stores.
161 *
162 * May not be used with atomic_read_explicit(), as release semantics
163 * applies only to atomic stores.
164 *
31a3fc6e
BP
165 * memory_order_acq_rel:
166 *
167 * Memory accesses cannot be moved across an acquire-release barrier in
168 * either direction.
169 *
5e99c681
JR
170 * May only be used with atomic read-modify-write operations, as both
171 * load and store operation is required for acquire-release semantics.
31a3fc6e 172 *
5e99c681
JR
173 * An atomic_thread_fence with memory_order_acq_rel does not have
174 * either load or store operation by itself; it prevents all following
175 * memory accesses from moving prior to preceding loads and all
176 * preceding memory accesses from moving past subsequent stores.
31a3fc6e 177 *
5e99c681 178 * memory_order_seq_cst:
31a3fc6e 179 *
5e99c681
JR
180 * Prevents movement of memory accesses like an acquire-release barrier,
181 * but whereas acquire-release synchronizes cooperating threads (using
182 * the same atomic variable), sequential-consistency synchronizes the
183 * whole system, providing a total order for stores on all atomic
184 * variables.
31a3fc6e 185 *
15ba057e
JR
186 * OVS atomics require the memory_order to be passed as a compile-time constant
187 * value, as some compiler implementations may perform poorly if the memory
188 * order parameter is passed in as a run-time value.
189 *
31a3fc6e
BP
190 * The following functions insert explicit barriers. Most of the other atomic
191 * functions also include barriers.
192 *
193 * void atomic_thread_fence(memory_order order);
194 *
195 * Inserts a barrier of the specified type.
196 *
197 * For memory_order_relaxed, this is a no-op.
198 *
199 * void atomic_signal_fence(memory_order order);
200 *
201 * Inserts a barrier of the specified type, but only with respect to
202 * signal handlers in the same thread as the barrier. This is
203 * basically a compiler optimization barrier, except for
204 * memory_order_relaxed, which is a no-op.
205 *
206 *
207 * Atomic Operations
208 * =================
209 *
210 * In this section, A is an atomic type and C is the corresponding non-atomic
211 * type.
212 *
25045d75 213 * The "store" and "compare_exchange" primitives match C11:
31a3fc6e
BP
214 *
215 * void atomic_store(A *object, C value);
216 * void atomic_store_explicit(A *object, C value, memory_order);
217 *
218 * Atomically stores 'value' into '*object', respecting the given
219 * memory order (or memory_order_seq_cst for atomic_store()).
220 *
25045d75
JR
221 * bool atomic_compare_exchange_strong(A *object, C *expected, C desired);
222 * bool atomic_compare_exchange_weak(A *object, C *expected, C desired);
223 * bool atomic_compare_exchange_strong_explicit(A *object, C *expected,
224 * C desired,
225 * memory_order success,
226 * memory_order failure);
227 * bool atomic_compare_exchange_weak_explicit(A *object, C *expected,
228 * C desired,
229 * memory_order success,
230 * memory_order failure);
231 *
232 * Atomically loads '*object' and compares it with '*expected' and if
233 * equal, stores 'desired' into '*object' (an atomic read-modify-write
234 * operation) and returns true, and if non-equal, stores the actual
235 * value of '*object' into '*expected' (an atomic load operation) and
236 * returns false. The memory order for the successful case (atomic
237 * read-modify-write operation) is 'success', and for the unsuccessful
238 * case (atomic load operation) 'failure'. 'failure' may not be
239 * stronger than 'success'.
240 *
241 * The weak forms may fail (returning false) also when '*object' equals
242 * '*expected'. The strong form can be implemented by the weak form in
243 * a loop. Some platforms can implement the weak form more
244 * efficiently, so it should be used if the application will need to
245 * loop anyway.
246 *
31a3fc6e
BP
247 * The following primitives differ from the C11 ones (and have different names)
248 * because there does not appear to be a way to implement the standard
249 * primitives in standard C:
250 *
251 * void atomic_read(A *src, C *dst);
252 * void atomic_read_explicit(A *src, C *dst, memory_order);
253 *
254 * Atomically loads a value from 'src', writing the value read into
255 * '*dst', respecting the given memory order (or memory_order_seq_cst
256 * for atomic_read()).
257 *
258 * void atomic_add(A *rmw, C arg, C *orig);
259 * void atomic_sub(A *rmw, C arg, C *orig);
260 * void atomic_or(A *rmw, C arg, C *orig);
261 * void atomic_xor(A *rmw, C arg, C *orig);
262 * void atomic_and(A *rmw, C arg, C *orig);
263 * void atomic_add_explicit(A *rmw, C arg, C *orig, memory_order);
264 * void atomic_sub_explicit(A *rmw, C arg, C *orig, memory_order);
265 * void atomic_or_explicit(A *rmw, C arg, C *orig, memory_order);
266 * void atomic_xor_explicit(A *rmw, C arg, C *orig, memory_order);
267 * void atomic_and_explicit(A *rmw, C arg, C *orig, memory_order);
268 *
269 * Atomically applies the given operation, with 'arg' as the second
270 * operand, to '*rmw', and stores the original value of '*rmw' into
271 * '*orig', respecting the given memory order (or memory_order_seq_cst
272 * if none is specified).
273 *
274 * The results are similar to those that would be obtained with +=, -=,
275 * |=, ^=, or |= on non-atomic types.
276 *
277 *
278 * atomic_flag
279 * ===========
280 *
281 * atomic_flag is a typedef for a type with two states, set and clear, that
282 * provides atomic test-and-set functionality.
283 *
4597d2a5
BP
284 *
285 * Life Cycle
286 * ----------
287 *
31a3fc6e
BP
288 * ATOMIC_FLAG_INIT is an initializer for atomic_flag. The initial state is
289 * "clear".
290 *
8917f72c 291 * An atomic_flag may also be initialized at runtime with atomic_flag_clear().
4597d2a5
BP
292 *
293 *
294 * Operations
295 * ----------
296 *
31a3fc6e
BP
297 * The following functions are available.
298 *
299 * bool atomic_flag_test_and_set(atomic_flag *object)
300 * bool atomic_flag_test_and_set_explicit(atomic_flag *object,
301 * memory_order);
302 *
7c3f0e29 303 * Atomically sets '*object', respecting the given memory order (or
31a3fc6e
BP
304 * memory_order_seq_cst for atomic_flag_test_and_set()). Returns the
305 * previous value of the flag (false for clear, true for set).
306 *
307 * void atomic_flag_clear(atomic_flag *object);
308 * void atomic_flag_clear_explicit(atomic_flag *object, memory_order);
309 *
310 * Atomically clears '*object', respecting the given memory order (or
311 * memory_order_seq_cst for atomic_flag_clear()).
312 */
313
314#include <limits.h>
315#include <pthread.h>
316#include <stdbool.h>
317#include <stddef.h>
318#include <stdint.h>
319#include "compiler.h"
320#include "util.h"
321
322#define IN_OVS_ATOMIC_H
323 #if __CHECKER__
324 /* sparse doesn't understand some GCC extensions we use. */
325 #include "ovs-atomic-pthreads.h"
29ab0cf7
BP
326 #elif __has_extension(c_atomic)
327 #include "ovs-atomic-clang.h"
adabd65d
BP
328 #elif HAVE_ATOMIC && __cplusplus >= 201103L
329 #include "ovs-atomic-c++.h"
330 #elif HAVE_STDATOMIC_H && !defined(__cplusplus)
07ece367 331 #include "ovs-atomic-c11.h"
adabd65d 332 #elif __GNUC__ >= 5 && !defined(__cplusplus)
9c463631 333 #error "GCC 5+ should have <stdatomic.h>"
adabd65d 334 #elif __GNUC__ >= 5 || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 7)
31a3fc6e 335 #include "ovs-atomic-gcc4.7+.h"
f31841d5
JR
336 #elif __GNUC__ && defined(__x86_64__)
337 #include "ovs-atomic-x86_64.h"
105a9298
JR
338 #elif __GNUC__ && defined(__i386__)
339 #include "ovs-atomic-i586.h"
15248032 340 #elif HAVE_GCC4_ATOMICS
31a3fc6e 341 #include "ovs-atomic-gcc4+.h"
574bd10f 342 #elif _MSC_VER
ec2d2b5f 343 #include "ovs-atomic-msvc.h"
31a3fc6e 344 #else
29d204ef
JR
345 /* ovs-atomic-pthreads implementation is provided for portability.
346 * It might be too slow for real use because Open vSwitch is
347 * optimized for platforms where real atomic ops are available. */
31a3fc6e
BP
348 #include "ovs-atomic-pthreads.h"
349 #endif
350#undef IN_OVS_ATOMIC_H
351
6a36690c
BP
352#ifndef OMIT_STANDARD_ATOMIC_TYPES
353typedef ATOMIC(bool) atomic_bool;
354
355typedef ATOMIC(char) atomic_char;
356typedef ATOMIC(signed char) atomic_schar;
357typedef ATOMIC(unsigned char) atomic_uchar;
358
359typedef ATOMIC(short) atomic_short;
360typedef ATOMIC(unsigned short) atomic_ushort;
361
362typedef ATOMIC(int) atomic_int;
363typedef ATOMIC(unsigned int) atomic_uint;
364
365typedef ATOMIC(long) atomic_long;
366typedef ATOMIC(unsigned long) atomic_ulong;
367
368typedef ATOMIC(long long) atomic_llong;
369typedef ATOMIC(unsigned long long) atomic_ullong;
370
371typedef ATOMIC(size_t) atomic_size_t;
372typedef ATOMIC(ptrdiff_t) atomic_ptrdiff_t;
373
374typedef ATOMIC(intmax_t) atomic_intmax_t;
375typedef ATOMIC(uintmax_t) atomic_uintmax_t;
376
377typedef ATOMIC(intptr_t) atomic_intptr_t;
378typedef ATOMIC(uintptr_t) atomic_uintptr_t;
379#endif /* !OMIT_STANDARD_ATOMIC_TYPES */
380
381/* Nonstandard atomic types. */
382typedef ATOMIC(uint8_t) atomic_uint8_t;
383typedef ATOMIC(uint16_t) atomic_uint16_t;
384typedef ATOMIC(uint32_t) atomic_uint32_t;
4a0c670a 385typedef ATOMIC(uint64_t) atomic_uint64_t;
6a36690c
BP
386
387typedef ATOMIC(int8_t) atomic_int8_t;
388typedef ATOMIC(int16_t) atomic_int16_t;
389typedef ATOMIC(int32_t) atomic_int32_t;
4a0c670a 390typedef ATOMIC(int64_t) atomic_int64_t;
6a36690c 391
b119717b
JR
392/* Relaxed atomic operations.
393 *
394 * When an operation on an atomic variable is not expected to synchronize
395 * with operations on other (atomic or non-atomic) variables, no memory
396 * barriers are needed and the relaxed memory ordering can be used. These
397 * macros make such uses less daunting, but not invisible. */
398#define atomic_store_relaxed(VAR, VALUE) \
399 atomic_store_explicit(VAR, VALUE, memory_order_relaxed)
400#define atomic_read_relaxed(VAR, DST) \
401 atomic_read_explicit(VAR, DST, memory_order_relaxed)
402#define atomic_compare_exchange_strong_relaxed(DST, EXP, SRC) \
403 atomic_compare_exchange_strong_explicit(DST, EXP, SRC, \
404 memory_order_relaxed, \
405 memory_order_relaxed)
406#define atomic_compare_exchange_weak_relaxed(DST, EXP, SRC) \
407 atomic_compare_exchange_weak_explicit(DST, EXP, SRC, \
408 memory_order_relaxed, \
409 memory_order_relaxed)
410#define atomic_add_relaxed(RMW, ARG, ORIG) \
411 atomic_add_explicit(RMW, ARG, ORIG, memory_order_relaxed)
412#define atomic_sub_relaxed(RMW, ARG, ORIG) \
413 atomic_sub_explicit(RMW, ARG, ORIG, memory_order_relaxed)
414#define atomic_or_relaxed(RMW, ARG, ORIG) \
415 atomic_or_explicit(RMW, ARG, ORIG, memory_order_relaxed)
416#define atomic_xor_relaxed(RMW, ARG, ORIG) \
417 atomic_xor_explicit(RMW, ARG, ORIG, memory_order_relaxed)
418#define atomic_and_relaxed(RMW, ARG, ORIG) \
419 atomic_and_explicit(RMW, ARG, ORIG, memory_order_relaxed)
420#define atomic_flag_test_and_set_relaxed(FLAG) \
421 atomic_flag_test_and_set_explicit(FLAG, memory_order_relaxed)
422#define atomic_flag_clear_relaxed(FLAG) \
423 atomic_flag_clear_explicit(FLAG, memory_order_relaxed)
424
a36cd3ff
JR
425/* A simplified atomic count. Does not provide any synchronization with any
426 * other variables.
427 *
428 * Typically a counter is not used to synchronize the state of any other
429 * variables (with the notable exception of reference count, below).
430 * This abstraction releaves the user from the memory order considerations,
431 * and may make the code easier to read.
432 *
433 * We only support the unsigned int counters, as those are the most common. */
434typedef struct atomic_count {
435 atomic_uint count;
436} atomic_count;
437
438#define ATOMIC_COUNT_INIT(VALUE) { VALUE }
439
440static inline void
441atomic_count_init(atomic_count *count, unsigned int value)
442{
443 atomic_init(&count->count, value);
444}
445
446static inline unsigned int
447atomic_count_inc(atomic_count *count)
448{
449 unsigned int old;
450
adabd65d 451 atomic_add_relaxed(&count->count, 1u, &old);
a36cd3ff
JR
452
453 return old;
454}
455
456static inline unsigned int
457atomic_count_dec(atomic_count *count)
458{
459 unsigned int old;
460
adabd65d 461 atomic_sub_relaxed(&count->count, 1u, &old);
a36cd3ff
JR
462
463 return old;
464}
465
466static inline unsigned int
467atomic_count_get(atomic_count *count)
468{
469 unsigned int value;
470
471 atomic_read_relaxed(&count->count, &value);
472
473 return value;
474}
475
476static inline void
477atomic_count_set(atomic_count *count, unsigned int value)
478{
479 atomic_store_relaxed(&count->count, value);
480}
481
e981a45a
DB
482static inline uint64_t
483atomic_count_inc64(atomic_uint64_t *counter)
484{
485 uint64_t old;
486
487 atomic_add_relaxed(counter, 1ull, &old);
488
489 return old;
490}
491
492static inline uint64_t
493atomic_count_dec64(atomic_uint64_t *counter)
494{
495 uint64_t old;
496
497 atomic_sub_relaxed(counter, 1ull, &old);
498
499 return old;
500}
501
502static inline uint64_t
503atomic_count_get64(atomic_uint64_t *counter)
504{
505 uint64_t value;
506
507 atomic_read_relaxed(counter, &value);
508
509 return value;
510}
511
512static inline void
513atomic_count_set64(atomic_uint64_t *counter, uint64_t value)
514{
515 atomic_store_relaxed(counter, value);
516}
517
37bec3d3
BP
518/* Reference count. */
519struct ovs_refcount {
520 atomic_uint count;
521};
522
523/* Initializes 'refcount'. The reference count is initially 1. */
524static inline void
525ovs_refcount_init(struct ovs_refcount *refcount)
526{
adabd65d 527 atomic_init(&refcount->count, 1u);
37bec3d3
BP
528}
529
541bfad2
JR
530/* Increments 'refcount'.
531 *
532 * Does not provide a memory barrier, as the calling thread must have
533 * protected access to the object already. */
37bec3d3
BP
534static inline void
535ovs_refcount_ref(struct ovs_refcount *refcount)
536{
537 unsigned int old_refcount;
538
adabd65d 539 atomic_add_explicit(&refcount->count, 1u, &old_refcount,
541bfad2 540 memory_order_relaxed);
37bec3d3
BP
541 ovs_assert(old_refcount > 0);
542}
543
544/* Decrements 'refcount' and returns the previous reference count. Often used
545 * in this form:
546 *
547 * if (ovs_refcount_unref(&object->ref_cnt) == 1) {
b5343307 548 * ...uninitialize object...
37bec3d3
BP
549 * free(object);
550 * }
541bfad2
JR
551 *
552 * Provides a release barrier making the preceding loads and stores to not be
2864e627
JR
553 * reordered after the unref, and in case of the last reference provides also
554 * an acquire barrier to keep all the following uninitialization from being
555 * reordered before the atomic decrement operation. Together these synchronize
556 * any concurrent unref operations between each other. */
37bec3d3
BP
557static inline unsigned int
558ovs_refcount_unref(struct ovs_refcount *refcount)
559{
560 unsigned int old_refcount;
561
adabd65d 562 atomic_sub_explicit(&refcount->count, 1u, &old_refcount,
541bfad2 563 memory_order_release);
37bec3d3 564 ovs_assert(old_refcount > 0);
541bfad2
JR
565 if (old_refcount == 1) {
566 /* 'memory_order_release' above means that there are no (reordered)
2864e627 567 * accesses to the protected object from any thread at this point.
541bfad2
JR
568 * An acquire barrier is needed to keep all subsequent access to the
569 * object's memory from being reordered before the atomic operation
570 * above. */
571 atomic_thread_fence(memory_order_acquire);
572 }
37bec3d3
BP
573 return old_refcount;
574}
575
541bfad2
JR
576/* Reads and returns 'refcount_''s current reference count.
577 *
578 * Does not provide a memory barrier.
37bec3d3
BP
579 *
580 * Rarely useful. */
581static inline unsigned int
582ovs_refcount_read(const struct ovs_refcount *refcount_)
583{
584 struct ovs_refcount *refcount
585 = CONST_CAST(struct ovs_refcount *, refcount_);
586 unsigned int count;
587
541bfad2 588 atomic_read_explicit(&refcount->count, &count, memory_order_relaxed);
37bec3d3
BP
589 return count;
590}
591
6969766b
JR
592/* Increments 'refcount', but only if it is non-zero.
593 *
594 * This may only be called for an object which is RCU protected during
595 * this call. This implies that its possible destruction is postponed
596 * until all current RCU threads quiesce.
597 *
598 * Returns false if the refcount was zero. In this case the object may
599 * be safely accessed until the current thread quiesces, but no additional
600 * references to the object may be taken.
601 *
602 * Does not provide a memory barrier, as the calling thread must have
603 * RCU protected access to the object already.
604 *
605 * It is critical that we never increment a zero refcount to a
606 * non-zero value, as whenever a refcount reaches the zero value, the
607 * protected object may be irrevocably scheduled for deletion. */
608static inline bool
609ovs_refcount_try_ref_rcu(struct ovs_refcount *refcount)
610{
611 unsigned int count;
612
613 atomic_read_explicit(&refcount->count, &count, memory_order_relaxed);
614 do {
615 if (count == 0) {
616 return false;
617 }
618 } while (!atomic_compare_exchange_weak_explicit(&refcount->count, &count,
619 count + 1,
620 memory_order_relaxed,
621 memory_order_relaxed));
622 return true;
623}
624
625/* Decrements 'refcount' and returns the previous reference count. To
626 * be used only when a memory barrier is already provided for the
627 * protected object independently.
628 *
629 * For example:
630 *
631 * if (ovs_refcount_unref_relaxed(&object->ref_cnt) == 1) {
6969766b
JR
632 * ovsrcu_postpone(destructor_function, object);
633 * }
634 *
635 * Here RCU quiescing already provides a full memory barrier. No additional
636 * barriers are needed here.
637 *
638 * Or:
639 *
640 * if (stp && ovs_refcount_unref_relaxed(&stp->ref_cnt) == 1) {
641 * ovs_mutex_lock(&mutex);
417e7e66 642 * ovs_list_remove(&stp->node);
6969766b
JR
643 * ovs_mutex_unlock(&mutex);
644 * free(stp->name);
645 * free(stp);
646 * }
647 *
648 * Here a mutex is used to guard access to all of 'stp' apart from
649 * 'ref_cnt'. Hence all changes to 'stp' by other threads must be
650 * visible when we get the mutex, and no access after the unlock can
651 * be reordered to happen prior the lock operation. No additional
652 * barriers are needed here.
653 */
654static inline unsigned int
655ovs_refcount_unref_relaxed(struct ovs_refcount *refcount)
656{
657 unsigned int old_refcount;
658
adabd65d 659 atomic_sub_explicit(&refcount->count, 1u, &old_refcount,
6969766b
JR
660 memory_order_relaxed);
661 ovs_assert(old_refcount > 0);
662 return old_refcount;
663}
664
31a3fc6e 665#endif /* ovs-atomic.h */