]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /* |
2 | * Distributed under the Boost Software License, Version 1.0. | |
3 | * (See accompanying file LICENSE_1_0.txt or copy at | |
4 | * http://www.boost.org/LICENSE_1_0.txt) | |
5 | * | |
6 | * Copyright (c) 2009 Helge Bahmann | |
7 | * Copyright (c) 2012 Tim Blechmann | |
8 | * Copyright (c) 2014 Andrey Semashev | |
9 | */ | |
10 | /*! | |
11 | * \file atomic/detail/ops_gcc_x86.hpp | |
12 | * | |
13 | * This header contains implementation of the \c operations template. | |
14 | */ | |
15 | ||
16 | #ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_ | |
17 | #define BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_ | |
18 | ||
19 | #include <boost/memory_order.hpp> | |
20 | #include <boost/atomic/detail/config.hpp> | |
21 | #include <boost/atomic/detail/storage_type.hpp> | |
22 | #include <boost/atomic/detail/operations_fwd.hpp> | |
23 | #include <boost/atomic/capabilities.hpp> | |
24 | #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) | |
25 | #include <boost/atomic/detail/ops_gcc_x86_dcas.hpp> | |
26 | #include <boost/atomic/detail/ops_cas_based.hpp> | |
27 | #endif | |
28 | ||
29 | #ifdef BOOST_HAS_PRAGMA_ONCE | |
30 | #pragma once | |
31 | #endif | |
32 | ||
33 | #if defined(__x86_64__) | |
34 | #define BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "rdx" | |
35 | #else | |
36 | #define BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "edx" | |
37 | #endif | |
38 | ||
39 | namespace boost { | |
40 | namespace atomics { | |
41 | namespace detail { | |
42 | ||
43 | struct gcc_x86_operations_base | |
44 | { | |
45 | static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true; | |
46 | ||
47 | static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT | |
48 | { | |
49 | if ((order & memory_order_release) != 0) | |
50 | __asm__ __volatile__ ("" ::: "memory"); | |
51 | } | |
52 | ||
53 | static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT | |
54 | { | |
55 | if ((order & memory_order_acquire) != 0) | |
56 | __asm__ __volatile__ ("" ::: "memory"); | |
57 | } | |
58 | }; | |
59 | ||
60 | template< typename T, typename Derived > | |
61 | struct gcc_x86_operations : | |
62 | public gcc_x86_operations_base | |
63 | { | |
64 | typedef T storage_type; | |
65 | ||
66 | static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT | |
67 | { | |
68 | if (order != memory_order_seq_cst) | |
69 | { | |
70 | fence_before(order); | |
71 | storage = v; | |
72 | fence_after(order); | |
73 | } | |
74 | else | |
75 | { | |
76 | Derived::exchange(storage, v, order); | |
77 | } | |
78 | } | |
79 | ||
80 | static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT | |
81 | { | |
82 | storage_type v = storage; | |
83 | fence_after(order); | |
84 | return v; | |
85 | } | |
86 | ||
87 | static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT | |
88 | { | |
89 | return Derived::fetch_add(storage, -v, order); | |
90 | } | |
91 | ||
92 | static BOOST_FORCEINLINE bool compare_exchange_weak( | |
93 | storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT | |
94 | { | |
95 | return Derived::compare_exchange_strong(storage, expected, desired, success_order, failure_order); | |
96 | } | |
97 | ||
98 | static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT | |
99 | { | |
100 | return !!Derived::exchange(storage, (storage_type)1, order); | |
101 | } | |
102 | ||
103 | static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT | |
104 | { | |
105 | store(storage, (storage_type)0, order); | |
106 | } | |
107 | ||
108 | static BOOST_FORCEINLINE bool is_lock_free(storage_type const volatile&) BOOST_NOEXCEPT | |
109 | { | |
110 | return true; | |
111 | } | |
112 | }; | |
113 | ||
114 | template< bool Signed > | |
115 | struct operations< 1u, Signed > : | |
116 | public gcc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > | |
117 | { | |
118 | typedef gcc_x86_operations< typename make_storage_type< 1u, Signed >::type, operations< 1u, Signed > > base_type; | |
119 | typedef typename base_type::storage_type storage_type; | |
120 | typedef typename make_storage_type< 1u, Signed >::aligned aligned_storage_type; | |
121 | ||
122 | static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
123 | { | |
124 | __asm__ __volatile__ | |
125 | ( | |
126 | "lock; xaddb %0, %1" | |
127 | : "+q" (v), "+m" (storage) | |
128 | : | |
129 | : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory" | |
130 | ); | |
131 | return v; | |
132 | } | |
133 | ||
134 | static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
135 | { | |
136 | __asm__ __volatile__ | |
137 | ( | |
138 | "xchgb %0, %1" | |
139 | : "+q" (v), "+m" (storage) | |
140 | : | |
141 | : "memory" | |
142 | ); | |
143 | return v; | |
144 | } | |
145 | ||
146 | static BOOST_FORCEINLINE bool compare_exchange_strong( | |
147 | storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT | |
148 | { | |
149 | storage_type previous = expected; | |
150 | bool success; | |
151 | __asm__ __volatile__ | |
152 | ( | |
153 | "lock; cmpxchgb %3, %1\n\t" | |
154 | "sete %2" | |
155 | : "+a" (previous), "+m" (storage), "=q" (success) | |
156 | : "q" (desired) | |
157 | : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory" | |
158 | ); | |
159 | expected = previous; | |
160 | return success; | |
161 | } | |
162 | ||
163 | #define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\ | |
164 | __asm__ __volatile__\ | |
165 | (\ | |
166 | "xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\ | |
167 | ".align 16\n\t"\ | |
168 | "1: movb %[arg], %%dl\n\t"\ | |
169 | op " %%al, %%dl\n\t"\ | |
170 | "lock; cmpxchgb %%dl, %[storage]\n\t"\ | |
171 | "jne 1b"\ | |
172 | : [res] "+a" (result), [storage] "+m" (storage)\ | |
173 | : [arg] "q" (argument)\ | |
174 | : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\ | |
175 | ) | |
176 | ||
177 | static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
178 | { | |
179 | storage_type res = storage; | |
180 | BOOST_ATOMIC_DETAIL_CAS_LOOP("andb", v, res); | |
181 | return res; | |
182 | } | |
183 | ||
184 | static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
185 | { | |
186 | storage_type res = storage; | |
187 | BOOST_ATOMIC_DETAIL_CAS_LOOP("orb", v, res); | |
188 | return res; | |
189 | } | |
190 | ||
191 | static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
192 | { | |
193 | storage_type res = storage; | |
194 | BOOST_ATOMIC_DETAIL_CAS_LOOP("xorb", v, res); | |
195 | return res; | |
196 | } | |
197 | ||
198 | #undef BOOST_ATOMIC_DETAIL_CAS_LOOP | |
199 | }; | |
200 | ||
201 | template< bool Signed > | |
202 | struct operations< 2u, Signed > : | |
203 | public gcc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > | |
204 | { | |
205 | typedef gcc_x86_operations< typename make_storage_type< 2u, Signed >::type, operations< 2u, Signed > > base_type; | |
206 | typedef typename base_type::storage_type storage_type; | |
207 | typedef typename make_storage_type< 2u, Signed >::aligned aligned_storage_type; | |
208 | ||
209 | static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
210 | { | |
211 | __asm__ __volatile__ | |
212 | ( | |
213 | "lock; xaddw %0, %1" | |
214 | : "+q" (v), "+m" (storage) | |
215 | : | |
216 | : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory" | |
217 | ); | |
218 | return v; | |
219 | } | |
220 | ||
221 | static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
222 | { | |
223 | __asm__ __volatile__ | |
224 | ( | |
225 | "xchgw %0, %1" | |
226 | : "+q" (v), "+m" (storage) | |
227 | : | |
228 | : "memory" | |
229 | ); | |
230 | return v; | |
231 | } | |
232 | ||
233 | static BOOST_FORCEINLINE bool compare_exchange_strong( | |
234 | storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT | |
235 | { | |
236 | storage_type previous = expected; | |
237 | bool success; | |
238 | __asm__ __volatile__ | |
239 | ( | |
240 | "lock; cmpxchgw %3, %1\n\t" | |
241 | "sete %2" | |
242 | : "+a" (previous), "+m" (storage), "=q" (success) | |
243 | : "q" (desired) | |
244 | : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory" | |
245 | ); | |
246 | expected = previous; | |
247 | return success; | |
248 | } | |
249 | ||
250 | #define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\ | |
251 | __asm__ __volatile__\ | |
252 | (\ | |
253 | "xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\ | |
254 | ".align 16\n\t"\ | |
255 | "1: movw %[arg], %%dx\n\t"\ | |
256 | op " %%ax, %%dx\n\t"\ | |
257 | "lock; cmpxchgw %%dx, %[storage]\n\t"\ | |
258 | "jne 1b"\ | |
259 | : [res] "+a" (result), [storage] "+m" (storage)\ | |
260 | : [arg] "q" (argument)\ | |
261 | : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\ | |
262 | ) | |
263 | ||
264 | static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
265 | { | |
266 | storage_type res = storage; | |
267 | BOOST_ATOMIC_DETAIL_CAS_LOOP("andw", v, res); | |
268 | return res; | |
269 | } | |
270 | ||
271 | static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
272 | { | |
273 | storage_type res = storage; | |
274 | BOOST_ATOMIC_DETAIL_CAS_LOOP("orw", v, res); | |
275 | return res; | |
276 | } | |
277 | ||
278 | static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
279 | { | |
280 | storage_type res = storage; | |
281 | BOOST_ATOMIC_DETAIL_CAS_LOOP("xorw", v, res); | |
282 | return res; | |
283 | } | |
284 | ||
285 | #undef BOOST_ATOMIC_DETAIL_CAS_LOOP | |
286 | }; | |
287 | ||
288 | template< bool Signed > | |
289 | struct operations< 4u, Signed > : | |
290 | public gcc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > | |
291 | { | |
292 | typedef gcc_x86_operations< typename make_storage_type< 4u, Signed >::type, operations< 4u, Signed > > base_type; | |
293 | typedef typename base_type::storage_type storage_type; | |
294 | typedef typename make_storage_type< 4u, Signed >::aligned aligned_storage_type; | |
295 | ||
296 | static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
297 | { | |
298 | __asm__ __volatile__ | |
299 | ( | |
300 | "lock; xaddl %0, %1" | |
301 | : "+r" (v), "+m" (storage) | |
302 | : | |
303 | : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory" | |
304 | ); | |
305 | return v; | |
306 | } | |
307 | ||
308 | static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
309 | { | |
310 | __asm__ __volatile__ | |
311 | ( | |
312 | "xchgl %0, %1" | |
313 | : "+r" (v), "+m" (storage) | |
314 | : | |
315 | : "memory" | |
316 | ); | |
317 | return v; | |
318 | } | |
319 | ||
320 | static BOOST_FORCEINLINE bool compare_exchange_strong( | |
321 | storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT | |
322 | { | |
323 | storage_type previous = expected; | |
324 | bool success; | |
325 | __asm__ __volatile__ | |
326 | ( | |
327 | "lock; cmpxchgl %3, %1\n\t" | |
328 | "sete %2" | |
329 | : "+a" (previous), "+m" (storage), "=q" (success) | |
330 | : "r" (desired) | |
331 | : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory" | |
332 | ); | |
333 | expected = previous; | |
334 | return success; | |
335 | } | |
336 | ||
337 | #define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\ | |
338 | __asm__ __volatile__\ | |
339 | (\ | |
340 | "xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\ | |
341 | ".align 16\n\t"\ | |
342 | "1: movl %[arg], %%edx\n\t"\ | |
343 | op " %%eax, %%edx\n\t"\ | |
344 | "lock; cmpxchgl %%edx, %[storage]\n\t"\ | |
345 | "jne 1b"\ | |
346 | : [res] "+a" (result), [storage] "+m" (storage)\ | |
347 | : [arg] "r" (argument)\ | |
348 | : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\ | |
349 | ) | |
350 | ||
351 | static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
352 | { | |
353 | storage_type res = storage; | |
354 | BOOST_ATOMIC_DETAIL_CAS_LOOP("andl", v, res); | |
355 | return res; | |
356 | } | |
357 | ||
358 | static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
359 | { | |
360 | storage_type res = storage; | |
361 | BOOST_ATOMIC_DETAIL_CAS_LOOP("orl", v, res); | |
362 | return res; | |
363 | } | |
364 | ||
365 | static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
366 | { | |
367 | storage_type res = storage; | |
368 | BOOST_ATOMIC_DETAIL_CAS_LOOP("xorl", v, res); | |
369 | return res; | |
370 | } | |
371 | ||
372 | #undef BOOST_ATOMIC_DETAIL_CAS_LOOP | |
373 | }; | |
374 | ||
375 | #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) | |
376 | ||
377 | template< bool Signed > | |
378 | struct operations< 8u, Signed > : | |
379 | public cas_based_operations< gcc_dcas_x86< Signed > > | |
380 | { | |
381 | }; | |
382 | ||
383 | #elif defined(__x86_64__) | |
384 | ||
385 | template< bool Signed > | |
386 | struct operations< 8u, Signed > : | |
387 | public gcc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > | |
388 | { | |
389 | typedef gcc_x86_operations< typename make_storage_type< 8u, Signed >::type, operations< 8u, Signed > > base_type; | |
390 | typedef typename base_type::storage_type storage_type; | |
391 | typedef typename make_storage_type< 8u, Signed >::aligned aligned_storage_type; | |
392 | ||
393 | static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
394 | { | |
395 | __asm__ __volatile__ | |
396 | ( | |
397 | "lock; xaddq %0, %1" | |
398 | : "+r" (v), "+m" (storage) | |
399 | : | |
400 | : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory" | |
401 | ); | |
402 | return v; | |
403 | } | |
404 | ||
405 | static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
406 | { | |
407 | __asm__ __volatile__ | |
408 | ( | |
409 | "xchgq %0, %1" | |
410 | : "+r" (v), "+m" (storage) | |
411 | : | |
412 | : "memory" | |
413 | ); | |
414 | return v; | |
415 | } | |
416 | ||
417 | static BOOST_FORCEINLINE bool compare_exchange_strong( | |
418 | storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order, memory_order) BOOST_NOEXCEPT | |
419 | { | |
420 | storage_type previous = expected; | |
421 | bool success; | |
422 | __asm__ __volatile__ | |
423 | ( | |
424 | "lock; cmpxchgq %3, %1\n\t" | |
425 | "sete %2" | |
426 | : "+a" (previous), "+m" (storage), "=q" (success) | |
427 | : "r" (desired) | |
428 | : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA "memory" | |
429 | ); | |
430 | expected = previous; | |
431 | return success; | |
432 | } | |
433 | ||
434 | #define BOOST_ATOMIC_DETAIL_CAS_LOOP(op, argument, result)\ | |
435 | __asm__ __volatile__\ | |
436 | (\ | |
437 | "xor %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER ", %%" BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER "\n\t"\ | |
438 | ".align 16\n\t"\ | |
439 | "1: movq %[arg], %%rdx\n\t"\ | |
440 | op " %%rax, %%rdx\n\t"\ | |
441 | "lock; cmpxchgq %%rdx, %[storage]\n\t"\ | |
442 | "jne 1b"\ | |
443 | : [res] "+a" (result), [storage] "+m" (storage)\ | |
444 | : [arg] "r" (argument)\ | |
445 | : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC_COMMA BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER, "memory"\ | |
446 | ) | |
447 | ||
448 | static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
449 | { | |
450 | storage_type res = storage; | |
451 | BOOST_ATOMIC_DETAIL_CAS_LOOP("andq", v, res); | |
452 | return res; | |
453 | } | |
454 | ||
455 | static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
456 | { | |
457 | storage_type res = storage; | |
458 | BOOST_ATOMIC_DETAIL_CAS_LOOP("orq", v, res); | |
459 | return res; | |
460 | } | |
461 | ||
462 | static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order) BOOST_NOEXCEPT | |
463 | { | |
464 | storage_type res = storage; | |
465 | BOOST_ATOMIC_DETAIL_CAS_LOOP("xorq", v, res); | |
466 | return res; | |
467 | } | |
468 | ||
469 | #undef BOOST_ATOMIC_DETAIL_CAS_LOOP | |
470 | }; | |
471 | ||
472 | #endif | |
473 | ||
474 | #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) | |
475 | ||
476 | template< bool Signed > | |
477 | struct operations< 16u, Signed > : | |
478 | public cas_based_operations< gcc_dcas_x86_64< Signed > > | |
479 | { | |
480 | }; | |
481 | ||
482 | #endif // defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B) | |
483 | ||
484 | BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT | |
485 | { | |
486 | if (order == memory_order_seq_cst) | |
487 | { | |
488 | __asm__ __volatile__ | |
489 | ( | |
490 | #if defined(BOOST_ATOMIC_DETAIL_X86_HAS_MFENCE) | |
491 | "mfence\n" | |
492 | #else | |
493 | "lock; addl $0, (%%esp)\n" | |
494 | #endif | |
495 | ::: "memory" | |
496 | ); | |
497 | } | |
498 | else if ((order & (memory_order_acquire | memory_order_release)) != 0) | |
499 | { | |
500 | __asm__ __volatile__ ("" ::: "memory"); | |
501 | } | |
502 | } | |
503 | ||
504 | BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT | |
505 | { | |
506 | if (order != memory_order_relaxed) | |
507 | __asm__ __volatile__ ("" ::: "memory"); | |
508 | } | |
509 | ||
510 | } // namespace detail | |
511 | } // namespace atomics | |
512 | } // namespace boost | |
513 | ||
514 | #undef BOOST_ATOMIC_DETAIL_TEMP_CAS_REGISTER | |
515 | ||
516 | #endif // BOOST_ATOMIC_DETAIL_OPS_GCC_X86_HPP_INCLUDED_ |