]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/lib/librte_eal/common/include/arch/x86/rte_atomic.h
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / lib / librte_eal / common / include / arch / x86 / rte_atomic.h
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #ifndef _RTE_ATOMIC_X86_H_
35 #define _RTE_ATOMIC_X86_H_
36
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40
41 #include <stdint.h>
42 #include <rte_common.h>
43 #include <emmintrin.h>
44 #include "generic/rte_atomic.h"
45
46 #if RTE_MAX_LCORE == 1
47 #define MPLOCKED /**< No need to insert MP lock prefix. */
48 #else
49 #define MPLOCKED "lock ; " /**< Insert MP lock prefix. */
50 #endif
51
52 #define rte_mb() _mm_mfence()
53
54 #define rte_wmb() _mm_sfence()
55
56 #define rte_rmb() _mm_lfence()
57
58 #define rte_smp_mb() rte_mb()
59
60 #define rte_smp_wmb() rte_compiler_barrier()
61
62 #define rte_smp_rmb() rte_compiler_barrier()
63
64 /*------------------------- 16 bit atomic operations -------------------------*/
65
66 #ifndef RTE_FORCE_INTRINSICS
67 static inline int
68 rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
69 {
70 uint8_t res;
71
72 asm volatile(
73 MPLOCKED
74 "cmpxchgw %[src], %[dst];"
75 "sete %[res];"
76 : [res] "=a" (res), /* output */
77 [dst] "=m" (*dst)
78 : [src] "r" (src), /* input */
79 "a" (exp),
80 "m" (*dst)
81 : "memory"); /* no-clobber list */
82 return res;
83 }
84
85 static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
86 {
87 return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
88 }
89
90 static inline void
91 rte_atomic16_inc(rte_atomic16_t *v)
92 {
93 asm volatile(
94 MPLOCKED
95 "incw %[cnt]"
96 : [cnt] "=m" (v->cnt) /* output */
97 : "m" (v->cnt) /* input */
98 );
99 }
100
101 static inline void
102 rte_atomic16_dec(rte_atomic16_t *v)
103 {
104 asm volatile(
105 MPLOCKED
106 "decw %[cnt]"
107 : [cnt] "=m" (v->cnt) /* output */
108 : "m" (v->cnt) /* input */
109 );
110 }
111
112 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
113 {
114 uint8_t ret;
115
116 asm volatile(
117 MPLOCKED
118 "incw %[cnt] ; "
119 "sete %[ret]"
120 : [cnt] "+m" (v->cnt), /* output */
121 [ret] "=qm" (ret)
122 );
123 return ret != 0;
124 }
125
126 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
127 {
128 uint8_t ret;
129
130 asm volatile(MPLOCKED
131 "decw %[cnt] ; "
132 "sete %[ret]"
133 : [cnt] "+m" (v->cnt), /* output */
134 [ret] "=qm" (ret)
135 );
136 return ret != 0;
137 }
138
139 /*------------------------- 32 bit atomic operations -------------------------*/
140
141 static inline int
142 rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
143 {
144 uint8_t res;
145
146 asm volatile(
147 MPLOCKED
148 "cmpxchgl %[src], %[dst];"
149 "sete %[res];"
150 : [res] "=a" (res), /* output */
151 [dst] "=m" (*dst)
152 : [src] "r" (src), /* input */
153 "a" (exp),
154 "m" (*dst)
155 : "memory"); /* no-clobber list */
156 return res;
157 }
158
159 static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
160 {
161 return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
162 }
163
164 static inline void
165 rte_atomic32_inc(rte_atomic32_t *v)
166 {
167 asm volatile(
168 MPLOCKED
169 "incl %[cnt]"
170 : [cnt] "=m" (v->cnt) /* output */
171 : "m" (v->cnt) /* input */
172 );
173 }
174
175 static inline void
176 rte_atomic32_dec(rte_atomic32_t *v)
177 {
178 asm volatile(
179 MPLOCKED
180 "decl %[cnt]"
181 : [cnt] "=m" (v->cnt) /* output */
182 : "m" (v->cnt) /* input */
183 );
184 }
185
186 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
187 {
188 uint8_t ret;
189
190 asm volatile(
191 MPLOCKED
192 "incl %[cnt] ; "
193 "sete %[ret]"
194 : [cnt] "+m" (v->cnt), /* output */
195 [ret] "=qm" (ret)
196 );
197 return ret != 0;
198 }
199
200 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
201 {
202 uint8_t ret;
203
204 asm volatile(MPLOCKED
205 "decl %[cnt] ; "
206 "sete %[ret]"
207 : [cnt] "+m" (v->cnt), /* output */
208 [ret] "=qm" (ret)
209 );
210 return ret != 0;
211 }
212 #endif
213
214 #ifdef RTE_ARCH_I686
215 #include "rte_atomic_32.h"
216 #else
217 #include "rte_atomic_64.h"
218 #endif
219
220 #ifdef __cplusplus
221 }
222 #endif
223
224 #endif /* _RTE_ATOMIC_X86_H_ */