]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_eal/arm/include/rte_pause_64.h
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_eal / arm / include / rte_pause_64.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 * Copyright(c) 2019 Arm Limited
4 */
5
6 #ifndef _RTE_PAUSE_ARM64_H_
7 #define _RTE_PAUSE_ARM64_H_
8
9 #ifdef __cplusplus
10 extern "C" {
11 #endif
12
13 #include <rte_common.h>
14
15 #ifdef RTE_ARM_USE_WFE
16 #define RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
17 #endif
18
19 #include "generic/rte_pause.h"
20
21 static inline void rte_pause(void)
22 {
23 asm volatile("yield" ::: "memory");
24 }
25
26 #ifdef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
27
28 /* Send an event to quit WFE. */
29 #define __SEVL() { asm volatile("sevl" : : : "memory"); }
30
31 /* Put processor into low power WFE(Wait For Event) state. */
32 #define __WFE() { asm volatile("wfe" : : : "memory"); }
33
34 static __rte_always_inline void
35 rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
36 int memorder)
37 {
38 uint16_t value;
39
40 assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
41
42 /*
43 * Atomic exclusive load from addr, it returns the 16-bit content of
44 * *addr while making it 'monitored',when it is written by someone
45 * else, the 'monitored' state is cleared and a event is generated
46 * implicitly to exit WFE.
47 */
48 #define __LOAD_EXC_16(src, dst, memorder) { \
49 if (memorder == __ATOMIC_RELAXED) { \
50 asm volatile("ldxrh %w[tmp], [%x[addr]]" \
51 : [tmp] "=&r" (dst) \
52 : [addr] "r"(src) \
53 : "memory"); \
54 } else { \
55 asm volatile("ldaxrh %w[tmp], [%x[addr]]" \
56 : [tmp] "=&r" (dst) \
57 : [addr] "r"(src) \
58 : "memory"); \
59 } }
60
61 __LOAD_EXC_16(addr, value, memorder)
62 if (value != expected) {
63 __SEVL()
64 do {
65 __WFE()
66 __LOAD_EXC_16(addr, value, memorder)
67 } while (value != expected);
68 }
69 #undef __LOAD_EXC_16
70 }
71
72 static __rte_always_inline void
73 rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
74 int memorder)
75 {
76 uint32_t value;
77
78 assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
79
80 /*
81 * Atomic exclusive load from addr, it returns the 32-bit content of
82 * *addr while making it 'monitored',when it is written by someone
83 * else, the 'monitored' state is cleared and a event is generated
84 * implicitly to exit WFE.
85 */
86 #define __LOAD_EXC_32(src, dst, memorder) { \
87 if (memorder == __ATOMIC_RELAXED) { \
88 asm volatile("ldxr %w[tmp], [%x[addr]]" \
89 : [tmp] "=&r" (dst) \
90 : [addr] "r"(src) \
91 : "memory"); \
92 } else { \
93 asm volatile("ldaxr %w[tmp], [%x[addr]]" \
94 : [tmp] "=&r" (dst) \
95 : [addr] "r"(src) \
96 : "memory"); \
97 } }
98
99 __LOAD_EXC_32(addr, value, memorder)
100 if (value != expected) {
101 __SEVL()
102 do {
103 __WFE()
104 __LOAD_EXC_32(addr, value, memorder)
105 } while (value != expected);
106 }
107 #undef __LOAD_EXC_32
108 }
109
110 static __rte_always_inline void
111 rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
112 int memorder)
113 {
114 uint64_t value;
115
116 assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
117
118 /*
119 * Atomic exclusive load from addr, it returns the 64-bit content of
120 * *addr while making it 'monitored',when it is written by someone
121 * else, the 'monitored' state is cleared and a event is generated
122 * implicitly to exit WFE.
123 */
124 #define __LOAD_EXC_64(src, dst, memorder) { \
125 if (memorder == __ATOMIC_RELAXED) { \
126 asm volatile("ldxr %x[tmp], [%x[addr]]" \
127 : [tmp] "=&r" (dst) \
128 : [addr] "r"(src) \
129 : "memory"); \
130 } else { \
131 asm volatile("ldaxr %x[tmp], [%x[addr]]" \
132 : [tmp] "=&r" (dst) \
133 : [addr] "r"(src) \
134 : "memory"); \
135 } }
136
137 __LOAD_EXC_64(addr, value, memorder)
138 if (value != expected) {
139 __SEVL()
140 do {
141 __WFE()
142 __LOAD_EXC_64(addr, value, memorder)
143 } while (value != expected);
144 }
145 }
146 #undef __LOAD_EXC_64
147
148 #undef __SEVL
149 #undef __WFE
150
151 #endif
152
153 #ifdef __cplusplus
154 }
155 #endif
156
157 #endif /* _RTE_PAUSE_ARM64_H_ */