]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/lib/librte_eal/common/include/arch/x86/rte_atomic.h
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / seastar / dpdk / lib / librte_eal / common / include / arch / x86 / rte_atomic.h
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #ifndef _RTE_ATOMIC_X86_H_
35 #define _RTE_ATOMIC_X86_H_
36
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40
41 #include <stdint.h>
42 #include <rte_common.h>
43 #include <emmintrin.h>
44 #include "generic/rte_atomic.h"
45
46 #if RTE_MAX_LCORE == 1
47 #define MPLOCKED /**< No need to insert MP lock prefix. */
48 #else
49 #define MPLOCKED "lock ; " /**< Insert MP lock prefix. */
50 #endif
51
52 #define rte_mb() _mm_mfence()
53
54 #define rte_wmb() _mm_sfence()
55
56 #define rte_rmb() _mm_lfence()
57
58 #define rte_smp_mb() rte_mb()
59
60 #define rte_smp_wmb() rte_compiler_barrier()
61
62 #define rte_smp_rmb() rte_compiler_barrier()
63
64 #define rte_io_mb() rte_mb()
65
66 #define rte_io_wmb() rte_compiler_barrier()
67
68 #define rte_io_rmb() rte_compiler_barrier()
69
70 /*------------------------- 16 bit atomic operations -------------------------*/
71
72 #ifndef RTE_FORCE_INTRINSICS
73 static inline int
74 rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
75 {
76 uint8_t res;
77
78 asm volatile(
79 MPLOCKED
80 "cmpxchgw %[src], %[dst];"
81 "sete %[res];"
82 : [res] "=a" (res), /* output */
83 [dst] "=m" (*dst)
84 : [src] "r" (src), /* input */
85 "a" (exp),
86 "m" (*dst)
87 : "memory"); /* no-clobber list */
88 return res;
89 }
90
91 static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
92 {
93 return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
94 }
95
96 static inline void
97 rte_atomic16_inc(rte_atomic16_t *v)
98 {
99 asm volatile(
100 MPLOCKED
101 "incw %[cnt]"
102 : [cnt] "=m" (v->cnt) /* output */
103 : "m" (v->cnt) /* input */
104 );
105 }
106
107 static inline void
108 rte_atomic16_dec(rte_atomic16_t *v)
109 {
110 asm volatile(
111 MPLOCKED
112 "decw %[cnt]"
113 : [cnt] "=m" (v->cnt) /* output */
114 : "m" (v->cnt) /* input */
115 );
116 }
117
118 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
119 {
120 uint8_t ret;
121
122 asm volatile(
123 MPLOCKED
124 "incw %[cnt] ; "
125 "sete %[ret]"
126 : [cnt] "+m" (v->cnt), /* output */
127 [ret] "=qm" (ret)
128 );
129 return ret != 0;
130 }
131
132 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
133 {
134 uint8_t ret;
135
136 asm volatile(MPLOCKED
137 "decw %[cnt] ; "
138 "sete %[ret]"
139 : [cnt] "+m" (v->cnt), /* output */
140 [ret] "=qm" (ret)
141 );
142 return ret != 0;
143 }
144
145 /*------------------------- 32 bit atomic operations -------------------------*/
146
147 static inline int
148 rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
149 {
150 uint8_t res;
151
152 asm volatile(
153 MPLOCKED
154 "cmpxchgl %[src], %[dst];"
155 "sete %[res];"
156 : [res] "=a" (res), /* output */
157 [dst] "=m" (*dst)
158 : [src] "r" (src), /* input */
159 "a" (exp),
160 "m" (*dst)
161 : "memory"); /* no-clobber list */
162 return res;
163 }
164
165 static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
166 {
167 return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
168 }
169
170 static inline void
171 rte_atomic32_inc(rte_atomic32_t *v)
172 {
173 asm volatile(
174 MPLOCKED
175 "incl %[cnt]"
176 : [cnt] "=m" (v->cnt) /* output */
177 : "m" (v->cnt) /* input */
178 );
179 }
180
181 static inline void
182 rte_atomic32_dec(rte_atomic32_t *v)
183 {
184 asm volatile(
185 MPLOCKED
186 "decl %[cnt]"
187 : [cnt] "=m" (v->cnt) /* output */
188 : "m" (v->cnt) /* input */
189 );
190 }
191
192 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
193 {
194 uint8_t ret;
195
196 asm volatile(
197 MPLOCKED
198 "incl %[cnt] ; "
199 "sete %[ret]"
200 : [cnt] "+m" (v->cnt), /* output */
201 [ret] "=qm" (ret)
202 );
203 return ret != 0;
204 }
205
206 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
207 {
208 uint8_t ret;
209
210 asm volatile(MPLOCKED
211 "decl %[cnt] ; "
212 "sete %[ret]"
213 : [cnt] "+m" (v->cnt), /* output */
214 [ret] "=qm" (ret)
215 );
216 return ret != 0;
217 }
218 #endif
219
220 #ifdef RTE_ARCH_I686
221 #include "rte_atomic_32.h"
222 #else
223 #include "rte_atomic_64.h"
224 #endif
225
226 #ifdef __cplusplus
227 }
228 #endif
229
230 #endif /* _RTE_ATOMIC_X86_H_ */