]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/lib/librte_eal/include/generic/rte_mcslock.h
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_eal / include / generic / rte_mcslock.h
CommitLineData
f67539c2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Arm Limited
3 */
4
5#ifndef _RTE_MCSLOCK_H_
6#define _RTE_MCSLOCK_H_
7
8/**
9 * @file
10 *
11 * RTE MCS lock
12 *
13 * This file defines the main data structure and APIs for MCS queued lock.
14 *
15 * The MCS lock (proposed by John M. Mellor-Crummey and Michael L. Scott)
16 * provides scalability by spinning on a CPU/thread local variable which
17 * avoids expensive cache bouncings. It provides fairness by maintaining
18 * a list of acquirers and passing the lock to each CPU/thread in the order
19 * they acquired the lock.
20 */
21
22#include <rte_lcore.h>
23#include <rte_common.h>
24#include <rte_pause.h>
25
26/**
27 * The rte_mcslock_t type.
28 */
29typedef struct rte_mcslock {
30 struct rte_mcslock *next;
31 int locked; /* 1 if the queue locked, 0 otherwise */
32} rte_mcslock_t;
33
34/**
35 * @warning
36 * @b EXPERIMENTAL: This API may change without prior notice
37 *
38 * Take the MCS lock.
39 *
40 * @param msl
41 * A pointer to the pointer of a MCS lock.
42 * When the lock is initialized or declared, the msl pointer should be
43 * set to NULL.
44 * @param me
45 * A pointer to a new node of MCS lock. Each CPU/thread acquiring the
46 * lock should use its 'own node'.
47 */
48__rte_experimental
49static inline void
50rte_mcslock_lock(rte_mcslock_t **msl, rte_mcslock_t *me)
51{
52 rte_mcslock_t *prev;
53
54 /* Init me node */
55 __atomic_store_n(&me->locked, 1, __ATOMIC_RELAXED);
56 __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);
57
58 /* If the queue is empty, the exchange operation is enough to acquire
59 * the lock. Hence, the exchange operation requires acquire semantics.
60 * The store to me->next above should complete before the node is
61 * visible to other CPUs/threads. Hence, the exchange operation requires
62 * release semantics as well.
63 */
64 prev = __atomic_exchange_n(msl, me, __ATOMIC_ACQ_REL);
65 if (likely(prev == NULL)) {
66 /* Queue was empty, no further action required,
67 * proceed with lock taken.
68 */
69 return;
70 }
71 __atomic_store_n(&prev->next, me, __ATOMIC_RELAXED);
72
73 /* The while-load of me->locked should not move above the previous
74 * store to prev->next. Otherwise it will cause a deadlock. Need a
75 * store-load barrier.
76 */
77 __atomic_thread_fence(__ATOMIC_ACQ_REL);
78 /* If the lock has already been acquired, it first atomically
79 * places the node at the end of the queue and then proceeds
80 * to spin on me->locked until the previous lock holder resets
81 * the me->locked using mcslock_unlock().
82 */
83 while (__atomic_load_n(&me->locked, __ATOMIC_ACQUIRE))
84 rte_pause();
85}
86
87/**
88 * @warning
89 * @b EXPERIMENTAL: This API may change without prior notice
90 *
91 * Release the MCS lock.
92 *
93 * @param msl
94 * A pointer to the pointer of a MCS lock.
95 * @param me
96 * A pointer to the node of MCS lock passed in rte_mcslock_lock.
97 */
98__rte_experimental
99static inline void
100rte_mcslock_unlock(rte_mcslock_t **msl, rte_mcslock_t *me)
101{
102 /* Check if there are more nodes in the queue. */
103 if (likely(__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)) {
104 /* No, last member in the queue. */
105 rte_mcslock_t *save_me = __atomic_load_n(&me, __ATOMIC_RELAXED);
106
107 /* Release the lock by setting it to NULL */
108 if (likely(__atomic_compare_exchange_n(msl, &save_me, NULL, 0,
109 __ATOMIC_RELEASE, __ATOMIC_RELAXED)))
110 return;
111
112 /* Speculative execution would be allowed to read in the
113 * while-loop first. This has the potential to cause a
114 * deadlock. Need a load barrier.
115 */
116 __atomic_thread_fence(__ATOMIC_ACQUIRE);
117 /* More nodes added to the queue by other CPUs.
118 * Wait until the next pointer is set.
119 */
120 while (__atomic_load_n(&me->next, __ATOMIC_RELAXED) == NULL)
121 rte_pause();
122 }
123
124 /* Pass lock to next waiter. */
125 __atomic_store_n(&me->next->locked, 0, __ATOMIC_RELEASE);
126}
127
128/**
129 * @warning
130 * @b EXPERIMENTAL: This API may change without prior notice
131 *
132 * Try to take the lock.
133 *
134 * @param msl
135 * A pointer to the pointer of a MCS lock.
136 * @param me
137 * A pointer to a new node of MCS lock.
138 * @return
139 * 1 if the lock is successfully taken; 0 otherwise.
140 */
141__rte_experimental
142static inline int
143rte_mcslock_trylock(rte_mcslock_t **msl, rte_mcslock_t *me)
144{
145 /* Init me node */
146 __atomic_store_n(&me->next, NULL, __ATOMIC_RELAXED);
147
148 /* Try to lock */
149 rte_mcslock_t *expected = NULL;
150
151 /* The lock can be taken only when the queue is empty. Hence,
152 * the compare-exchange operation requires acquire semantics.
153 * The store to me->next above should complete before the node
154 * is visible to other CPUs/threads. Hence, the compare-exchange
155 * operation requires release semantics as well.
156 */
157 return __atomic_compare_exchange_n(msl, &expected, me, 0,
158 __ATOMIC_ACQ_REL, __ATOMIC_RELAXED);
159}
160
161/**
162 * @warning
163 * @b EXPERIMENTAL: This API may change without prior notice
164 *
165 * Test if the lock is taken.
166 *
167 * @param msl
168 * A pointer to a MCS lock node.
169 * @return
170 * 1 if the lock is currently taken; 0 otherwise.
171 */
172__rte_experimental
173static inline int
174rte_mcslock_is_locked(rte_mcslock_t *msl)
175{
176 return (__atomic_load_n(&msl, __ATOMIC_RELAXED) != NULL);
177}
178
179#endif /* _RTE_MCSLOCK_H_ */