]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/libs/atomic/doc/atomic.hpp
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / boost / libs / atomic / doc / atomic.hpp
1 /** \file boost/atomic.hpp */
2
3 // Copyright (c) 2009 Helge Bahmann
4 //
5 // Distributed under the Boost Software License, Version 1.0.
6 // See accompanying file LICENSE_1_0.txt or copy at
7 // http://www.boost.org/LICENSE_1_0.txt)
8
9 /* this is just a pseudo-header file fed to doxygen
10 to more easily generate the class documentation; will
11 be replaced by proper documentation down the road */
12
13 namespace boost {
14
15 /**
16 \brief Memory ordering constraints
17
18 This defines the relative order of one atomic operation
19 and other memory operations (loads, stores, other atomic operations)
20 executed by the same thread.
21
22 The order of operations specified by the programmer in the
23 source code ("program order") does not necessarily match
24 the order in which they are actually executed on the target system:
25 Both compiler as well as processor may reorder operations
26 quite arbitrarily. <B>Specifying the wrong ordering
27 constraint will therefore generally result in an incorrect program.</B>
28 */
29 enum memory_order {
30 /**
31 \brief No constraint
32 Atomic operation and other memory operations may be reordered freely.
33 */
34 memory_order_relaxed,
35 /**
36 \brief Data dependence constraint
37 Atomic operation must strictly precede any memory operation that
38 computationally depends on the outcome of the atomic operation.
39 */
40 memory_order_consume,
41 /**
42 \brief Acquire memory
43 Atomic operation must strictly precede all memory operations that
44 follow in program order.
45 */
46 memory_order_acquire,
47 /**
48 \brief Release memory
49 Atomic operation must strictly follow all memory operations that precede
50 in program order.
51 */
52 memory_order_release,
53 /**
54 \brief Acquire and release memory
55 Combines the effects of \ref memory_order_acquire and \ref memory_order_release
56 */
57 memory_order_acq_rel,
58 /**
59 \brief Sequentially consistent
60 Produces the same result \ref memory_order_acq_rel, but additionally
61 enforces globally sequential consistent execution
62 */
63 memory_order_seq_cst
64 };
65
66 /**
67 \brief Atomic datatype
68
69 An atomic variable. Provides methods to modify this variable atomically.
70 Valid template parameters are:
71
72 - integral data types (char, short, int, ...)
73 - pointer data types
74 - any other data type that has a non-throwing default
75 constructor and that can be copied via <TT>memcpy</TT>
76
77 Unless specified otherwise, any memory ordering constraint can be used
78 with any of the atomic operations.
79 */
80 template<typename Type>
81 class atomic {
82 public:
83 /**
84 \brief The constant equals to \c true if the atomic type's operations
85 are always lock-free and \c false otherwise.
86
87 If this constant is \c true then \c is_lock_free() shall always
88 return \c true for any object of this atomic type.
89 */
90 static constexpr bool is_always_lock_free;
91
92 /**
93 \brief Create uninitialized atomic variable
94 Creates an atomic variable. Its initial value is undefined.
95 */
96 atomic();
97 /**
98 \brief Create an initialize atomic variable
99 \param value Initial value
100 Creates and initializes an atomic variable.
101 */
102 explicit atomic(Type value);
103
104 /**
105 \brief Returns \c true if the object's operations are lock-free
106 and \c false otherwise.
107 */
108 bool is_lock_free() const noexcept;
109
110
111 /**
112 \brief Read the current value of the atomic variable
113 \param order Memory ordering constraint, see \ref memory_order
114 \return Current value of the variable
115
116 Valid memory ordering constraints are:
117 - @c memory_order_relaxed
118 - @c memory_order_consume
119 - @c memory_order_acquire
120 - @c memory_order_seq_cst
121 */
122 Type load(memory_order order=memory_order_seq_cst) const;
123
124 /**
125 \brief Write new value to atomic variable
126 \param value New value
127 \param order Memory ordering constraint, see \ref memory_order
128
129 Valid memory ordering constraints are:
130 - @c memory_order_relaxed
131 - @c memory_order_release
132 - @c memory_order_seq_cst
133 */
134 void store(Type value, memory_order order=memory_order_seq_cst);
135
136 /**
137 \brief Atomically compare and exchange variable
138 \param expected Expected old value
139 \param desired Desired new value
140 \param order Memory ordering constraint, see \ref memory_order
141 \return @c true if value was changed
142
143 Atomically performs the following operation
144
145 \code
146 if (variable==expected) {
147 variable=desired;
148 return true;
149 } else {
150 expected=variable;
151 return false;
152 }
153 \endcode
154
155 This operation may fail "spuriously", i.e. the state of the variable
156 is unchanged even though the expected value was found (this is the
157 case on architectures using "load-linked"/"store conditional" to
158 implement the operation).
159
160 The established memory order will be @c order if the operation
161 is successful. If the operation is unsuccessful, the
162 memory order will be
163
164 - @c memory_order_relaxed if @c order is @c memory_order_acquire ,
165 @c memory_order_relaxed or @c memory_order_consume
166 - @c memory_order_release if @c order is @c memory_order_acq_release
167 or @c memory_order_release
168 - @c memory_order_seq_cst if @c order is @c memory_order_seq_cst
169 */
170 bool compare_exchange_weak(
171 Type &expected,
172 Type desired,
173 memory_order order=memory_order_seq_cst);
174
175 /**
176 \brief Atomically compare and exchange variable
177 \param expected Expected old value
178 \param desired Desired new value
179 \param success_order Memory ordering constraint if operation
180 is successful
181 \param failure_order Memory ordering constraint if operation is unsuccessful
182 \return @c true if value was changed
183
184 Atomically performs the following operation
185
186 \code
187 if (variable==expected) {
188 variable=desired;
189 return true;
190 } else {
191 expected=variable;
192 return false;
193 }
194 \endcode
195
196 This operation may fail "spuriously", i.e. the state of the variable
197 is unchanged even though the expected value was found (this is the
198 case on architectures using "load-linked"/"store conditional" to
199 implement the operation).
200
201 The constraint imposed by @c success_order may not be
202 weaker than the constraint imposed by @c failure_order.
203 */
204 bool compare_exchange_weak(
205 Type &expected,
206 Type desired,
207 memory_order success_order,
208 memory_order failure_order);
209 /**
210 \brief Atomically compare and exchange variable
211 \param expected Expected old value
212 \param desired Desired new value
213 \param order Memory ordering constraint, see \ref memory_order
214 \return @c true if value was changed
215
216 Atomically performs the following operation
217
218 \code
219 if (variable==expected) {
220 variable=desired;
221 return true;
222 } else {
223 expected=variable;
224 return false;
225 }
226 \endcode
227
228 In contrast to \ref compare_exchange_weak, this operation will never
229 fail spuriously. Since compare-and-swap must generally be retried
230 in a loop, implementors are advised to prefer \ref compare_exchange_weak
231 where feasible.
232
233 The established memory order will be @c order if the operation
234 is successful. If the operation is unsuccessful, the
235 memory order will be
236
237 - @c memory_order_relaxed if @c order is @c memory_order_acquire ,
238 @c memory_order_relaxed or @c memory_order_consume
239 - @c memory_order_release if @c order is @c memory_order_acq_release
240 or @c memory_order_release
241 - @c memory_order_seq_cst if @c order is @c memory_order_seq_cst
242 */
243 bool compare_exchange_strong(
244 Type &expected,
245 Type desired,
246 memory_order order=memory_order_seq_cst);
247
248 /**
249 \brief Atomically compare and exchange variable
250 \param expected Expected old value
251 \param desired Desired new value
252 \param success_order Memory ordering constraint if operation
253 is successful
254 \param failure_order Memory ordering constraint if operation is unsuccessful
255 \return @c true if value was changed
256
257 Atomically performs the following operation
258
259 \code
260 if (variable==expected) {
261 variable=desired;
262 return true;
263 } else {
264 expected=variable;
265 return false;
266 }
267 \endcode
268
269 In contrast to \ref compare_exchange_weak, this operation will never
270 fail spuriously. Since compare-and-swap must generally be retried
271 in a loop, implementors are advised to prefer \ref compare_exchange_weak
272 where feasible.
273
274 The constraint imposed by @c success_order may not be
275 weaker than the constraint imposed by @c failure_order.
276 */
277 bool compare_exchange_strong(
278 Type &expected,
279 Type desired,
280 memory_order success_order,
281 memory_order failure_order);
282 /**
283 \brief Atomically exchange variable
284 \param value New value
285 \param order Memory ordering constraint, see \ref memory_order
286 \return Old value of the variable
287
288 Atomically exchanges the value of the variable with the new
289 value and returns its old value.
290 */
291 Type exchange(Type value, memory_order order=memory_order_seq_cst);
292
293 /**
294 \brief Atomically add and return old value
295 \param operand Operand
296 \param order Memory ordering constraint, see \ref memory_order
297 \return Old value of the variable
298
299 Atomically adds operand to the variable and returns its
300 old value.
301 */
302 Type fetch_add(Type operand, memory_order order=memory_order_seq_cst);
303 /**
304 \brief Atomically subtract and return old value
305 \param operand Operand
306 \param order Memory ordering constraint, see \ref memory_order
307 \return Old value of the variable
308
309 Atomically subtracts operand from the variable and returns its
310 old value.
311
312 This method is available only if \c Type is an integral type
313 or a non-void pointer type. If it is a pointer type,
314 @c operand is of type @c ptrdiff_t and the operation
315 is performed following the rules for pointer arithmetic
316 in C++.
317 */
318 Type fetch_sub(Type operand, memory_order order=memory_order_seq_cst);
319
320 /**
321 \brief Atomically perform bitwise "AND" and return old value
322 \param operand Operand
323 \param order Memory ordering constraint, see \ref memory_order
324 \return Old value of the variable
325
326 Atomically performs bitwise "AND" with the variable and returns its
327 old value.
328
329 This method is available only if \c Type is an integral type
330 or a non-void pointer type. If it is a pointer type,
331 @c operand is of type @c ptrdiff_t and the operation
332 is performed following the rules for pointer arithmetic
333 in C++.
334 */
335 Type fetch_and(Type operand, memory_order order=memory_order_seq_cst);
336
337 /**
338 \brief Atomically perform bitwise "OR" and return old value
339 \param operand Operand
340 \param order Memory ordering constraint, see \ref memory_order
341 \return Old value of the variable
342
343 Atomically performs bitwise "OR" with the variable and returns its
344 old value.
345
346 This method is available only if \c Type is an integral type.
347 */
348 Type fetch_or(Type operand, memory_order order=memory_order_seq_cst);
349
350 /**
351 \brief Atomically perform bitwise "XOR" and return old value
352 \param operand Operand
353 \param order Memory ordering constraint, see \ref memory_order
354 \return Old value of the variable
355
356 Atomically performs bitwise "XOR" with the variable and returns its
357 old value.
358
359 This method is available only if \c Type is an integral type.
360 */
361 Type fetch_xor(Type operand, memory_order order=memory_order_seq_cst);
362
363 /**
364 \brief Implicit load
365 \return Current value of the variable
366
367 The same as <tt>load(memory_order_seq_cst)</tt>. Avoid using
368 the implicit conversion operator, use \ref load with
369 an explicit memory ordering constraint.
370 */
371 operator Type(void) const;
372 /**
373 \brief Implicit store
374 \param value New value
375 \return Copy of @c value
376
377 The same as <tt>store(value, memory_order_seq_cst)</tt>. Avoid using
378 the implicit conversion operator, use \ref store with
379 an explicit memory ordering constraint.
380 */
381 Type operator=(Type v);
382
383 /**
384 \brief Atomically perform bitwise "AND" and return new value
385 \param operand Operand
386 \return New value of the variable
387
388 The same as <tt>fetch_and(operand, memory_order_seq_cst)&operand</tt>.
389 Avoid using the implicit bitwise "AND" operator, use \ref fetch_and
390 with an explicit memory ordering constraint.
391 */
392 Type operator&=(Type operand);
393
394 /**
395 \brief Atomically perform bitwise "OR" and return new value
396 \param operand Operand
397 \return New value of the variable
398
399 The same as <tt>fetch_or(operand, memory_order_seq_cst)|operand</tt>.
400 Avoid using the implicit bitwise "OR" operator, use \ref fetch_or
401 with an explicit memory ordering constraint.
402
403 This method is available only if \c Type is an integral type.
404 */
405 Type operator|=(Type operand);
406
407 /**
408 \brief Atomically perform bitwise "XOR" and return new value
409 \param operand Operand
410 \return New value of the variable
411
412 The same as <tt>fetch_xor(operand, memory_order_seq_cst)^operand</tt>.
413 Avoid using the implicit bitwise "XOR" operator, use \ref fetch_xor
414 with an explicit memory ordering constraint.
415
416 This method is available only if \c Type is an integral type.
417 */
418 Type operator^=(Type operand);
419
420 /**
421 \brief Atomically add and return new value
422 \param operand Operand
423 \return New value of the variable
424
425 The same as <tt>fetch_add(operand, memory_order_seq_cst)+operand</tt>.
426 Avoid using the implicit add operator, use \ref fetch_add
427 with an explicit memory ordering constraint.
428
429 This method is available only if \c Type is an integral type
430 or a non-void pointer type. If it is a pointer type,
431 @c operand is of type @c ptrdiff_t and the operation
432 is performed following the rules for pointer arithmetic
433 in C++.
434 */
435 Type operator+=(Type operand);
436
437 /**
438 \brief Atomically subtract and return new value
439 \param operand Operand
440 \return New value of the variable
441
442 The same as <tt>fetch_sub(operand, memory_order_seq_cst)-operand</tt>.
443 Avoid using the implicit subtract operator, use \ref fetch_sub
444 with an explicit memory ordering constraint.
445
446 This method is available only if \c Type is an integral type
447 or a non-void pointer type. If it is a pointer type,
448 @c operand is of type @c ptrdiff_t and the operation
449 is performed following the rules for pointer arithmetic
450 in C++.
451 */
452 Type operator-=(Type operand);
453
454 /**
455 \brief Atomically increment and return new value
456 \return New value of the variable
457
458 The same as <tt>fetch_add(1, memory_order_seq_cst)+1</tt>.
459 Avoid using the implicit increment operator, use \ref fetch_add
460 with an explicit memory ordering constraint.
461
462 This method is available only if \c Type is an integral type
463 or a non-void pointer type. If it is a pointer type,
464 the operation
465 is performed following the rules for pointer arithmetic
466 in C++.
467 */
468 Type operator++(void);
469 /**
470 \brief Atomically increment and return old value
471 \return Old value of the variable
472
473 The same as <tt>fetch_add(1, memory_order_seq_cst)</tt>.
474 Avoid using the implicit increment operator, use \ref fetch_add
475 with an explicit memory ordering constraint.
476
477 This method is available only if \c Type is an integral type
478 or a non-void pointer type. If it is a pointer type,
479 the operation
480 is performed following the rules for pointer arithmetic
481 in C++.
482 */
483 Type operator++(int);
484 /**
485 \brief Atomically subtract and return new value
486 \return New value of the variable
487
488 The same as <tt>fetch_sub(1, memory_order_seq_cst)-1</tt>.
489 Avoid using the implicit increment operator, use \ref fetch_sub
490 with an explicit memory ordering constraint.
491
492 This method is available only if \c Type is an integral type
493 or a non-void pointer type. If it is a pointer type,
494 the operation
495 is performed following the rules for pointer arithmetic
496 in C++.
497 */
498 Type operator--(void);
499 /**
500 \brief Atomically subtract and return old value
501 \return Old value of the variable
502
503 The same as <tt>fetch_sub(1, memory_order_seq_cst)</tt>.
504 Avoid using the implicit increment operator, use \ref fetch_sub
505 with an explicit memory ordering constraint.
506
507 This method is available only if \c Type is an integral type
508 or a non-void pointer type. If it is a pointer type,
509 the operation
510 is performed following the rules for pointer arithmetic
511 in C++.
512 */
513 Type operator--(int);
514
515 /** \brief Deleted copy constructor */
516 atomic(const atomic &) = delete;
517 /** \brief Deleted copy assignment */
518 const atomic & operator=(const atomic &) = delete;
519 };
520
521 /**
522 \brief Insert explicit fence for thread synchronization
523 \param order Memory ordering constraint
524
525 Inserts an explicit fence. The exact semantic depends on the
526 type of fence inserted:
527
528 - \c memory_order_relaxed: No operation
529 - \c memory_order_release: Performs a "release" operation
530 - \c memory_order_acquire or \c memory_order_consume: Performs an
531 "acquire" operation
532 - \c memory_order_acq_rel: Performs both an "acquire" and a "release"
533 operation
534 - \c memory_order_seq_cst: Performs both an "acquire" and a "release"
535 operation and in addition there exists a global total order of
536 all \c memory_order_seq_cst operations
537
538 */
539 void atomic_thread_fence(memory_order order);
540
541 /**
542 \brief Insert explicit fence for synchronization with a signal handler
543 \param order Memory ordering constraint
544
545 Inserts an explicit fence to synchronize with a signal handler called within
546 the context of the same thread. The fence ensures the corresponding operations
547 around it are complete and/or not started. The exact semantic depends on the
548 type of fence inserted:
549
550 - \c memory_order_relaxed: No operation
551 - \c memory_order_release: Ensures the operations before the fence are complete
552 - \c memory_order_acquire or \c memory_order_consume: Ensures the operations
553 after the fence are not started.
554 - \c memory_order_acq_rel or \c memory_order_seq_cst: Ensures the operations
555 around the fence do not cross it.
556
557 Note that this call does not affect visibility order of the memory operations
558 to other threads. It is functionally similar to \c atomic_thread_fence, only
559 it does not generate any instructions to synchronize hardware threads.
560 */
561 void atomic_signal_fence(memory_order order);
562
563 }