]> git.proxmox.com Git - ceph.git/blame - ceph/src/seastar/dpdk/examples/performance-thread/common/lthread_mutex.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / examples / performance-thread / common / lthread_mutex.c
CommitLineData
9f95a23c
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015 Intel Corporation
7c673cae
FG
3 */
4
5#include <stdio.h>
6#include <stdlib.h>
7#include <string.h>
8#include <stdint.h>
9#include <stddef.h>
10#include <limits.h>
11#include <inttypes.h>
12#include <unistd.h>
13#include <pthread.h>
14#include <fcntl.h>
15#include <sys/time.h>
16#include <sys/mman.h>
17
18#include <rte_per_lcore.h>
19#include <rte_log.h>
20#include <rte_spinlock.h>
21#include <rte_common.h>
22
23#include "lthread_api.h"
24#include "lthread_int.h"
25#include "lthread_mutex.h"
26#include "lthread_sched.h"
27#include "lthread_queue.h"
28#include "lthread_objcache.h"
29#include "lthread_diag.h"
30
31/*
32 * Create a mutex
33 */
34int
35lthread_mutex_init(char *name, struct lthread_mutex **mutex,
36 __rte_unused const struct lthread_mutexattr *attr)
37{
38 struct lthread_mutex *m;
39
40 if (mutex == NULL)
41 return POSIX_ERRNO(EINVAL);
42
43
44 m = _lthread_objcache_alloc((THIS_SCHED)->mutex_cache);
45 if (m == NULL)
46 return POSIX_ERRNO(EAGAIN);
47
48 m->blocked = _lthread_queue_create("blocked queue");
49 if (m->blocked == NULL) {
50 _lthread_objcache_free((THIS_SCHED)->mutex_cache, m);
51 return POSIX_ERRNO(EAGAIN);
52 }
53
54 if (name == NULL)
55 strncpy(m->name, "no name", sizeof(m->name));
56 else
57 strncpy(m->name, name, sizeof(m->name));
58 m->name[sizeof(m->name)-1] = 0;
59
60 m->root_sched = THIS_SCHED;
61 m->owner = NULL;
62
63 rte_atomic64_init(&m->count);
64
65 DIAG_CREATE_EVENT(m, LT_DIAG_MUTEX_CREATE);
66 /* success */
67 (*mutex) = m;
68 return 0;
69}
70
71/*
72 * Destroy a mutex
73 */
74int lthread_mutex_destroy(struct lthread_mutex *m)
75{
76 if ((m == NULL) || (m->blocked == NULL)) {
77 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EINVAL));
78 return POSIX_ERRNO(EINVAL);
79 }
80
81 if (m->owner == NULL) {
82 /* try to delete the blocked queue */
83 if (_lthread_queue_destroy(m->blocked) < 0) {
84 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY,
85 m, POSIX_ERRNO(EBUSY));
86 return POSIX_ERRNO(EBUSY);
87 }
88
89 /* free the mutex to cache */
90 _lthread_objcache_free(m->root_sched->mutex_cache, m);
91 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, 0);
92 return 0;
93 }
94 /* can't do its still in use */
95 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EBUSY));
96 return POSIX_ERRNO(EBUSY);
97}
98
99/*
100 * Try to obtain a mutex
101 */
102int lthread_mutex_lock(struct lthread_mutex *m)
103{
104 struct lthread *lt = THIS_LTHREAD;
105
106 if ((m == NULL) || (m->blocked == NULL)) {
107 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EINVAL));
108 return POSIX_ERRNO(EINVAL);
109 }
110
111 /* allow no recursion */
112 if (m->owner == lt) {
113 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EDEADLK));
114 return POSIX_ERRNO(EDEADLK);
115 }
116
117 for (;;) {
118 rte_atomic64_inc(&m->count);
119 do {
120 if (rte_atomic64_cmpset
121 ((uint64_t *) &m->owner, 0, (uint64_t) lt)) {
122 /* happy days, we got the lock */
123 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, 0);
124 return 0;
125 }
126 /* spin due to race with unlock when
127 * nothing was blocked
128 */
129 } while ((rte_atomic64_read(&m->count) == 1) &&
130 (m->owner == NULL));
131
132 /* queue the current thread in the blocked queue
133 * we defer this to after we return to the scheduler
134 * to ensure that the current thread context is saved
135 * before unlock could result in it being dequeued and
136 * resumed
137 */
138 DIAG_EVENT(m, LT_DIAG_MUTEX_BLOCKED, m, lt);
139 lt->pending_wr_queue = m->blocked;
140 /* now relinquish cpu */
141 _suspend();
142 /* resumed, must loop and compete for the lock again */
143 }
144 return 0;
145}
146
9f95a23c 147/* try to lock a mutex but don't block */
7c673cae
FG
148int lthread_mutex_trylock(struct lthread_mutex *m)
149{
150 struct lthread *lt = THIS_LTHREAD;
151
152 if ((m == NULL) || (m->blocked == NULL)) {
153 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EINVAL));
154 return POSIX_ERRNO(EINVAL);
155 }
156
157 if (m->owner == lt) {
158 /* no recursion */
159 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EDEADLK));
160 return POSIX_ERRNO(EDEADLK);
161 }
162
163 rte_atomic64_inc(&m->count);
164 if (rte_atomic64_cmpset
165 ((uint64_t *) &m->owner, (uint64_t) NULL, (uint64_t) lt)) {
166 /* got the lock */
167 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, 0);
168 return 0;
169 }
170
171 /* failed so return busy */
172 rte_atomic64_dec(&m->count);
173 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EBUSY));
174 return POSIX_ERRNO(EBUSY);
175}
176
177/*
178 * Unlock a mutex
179 */
180int lthread_mutex_unlock(struct lthread_mutex *m)
181{
182 struct lthread *lt = THIS_LTHREAD;
183 struct lthread *unblocked;
184
185 if ((m == NULL) || (m->blocked == NULL)) {
186 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EINVAL));
187 return POSIX_ERRNO(EINVAL);
188 }
189
190 /* fail if its owned */
191 if (m->owner != lt || m->owner == NULL) {
192 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EPERM));
193 return POSIX_ERRNO(EPERM);
194 }
195
196 rte_atomic64_dec(&m->count);
197 /* if there are blocked threads then make one ready */
198 while (rte_atomic64_read(&m->count) > 0) {
199 unblocked = _lthread_queue_remove(m->blocked);
200
201 if (unblocked != NULL) {
202 rte_atomic64_dec(&m->count);
203 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);
204 RTE_ASSERT(unblocked->sched != NULL);
205 _ready_queue_insert((struct lthread_sched *)
206 unblocked->sched, unblocked);
207 break;
208 }
209 }
210 /* release the lock */
211 m->owner = NULL;
212 return 0;
213}
214
215/*
216 * return the diagnostic ref val stored in a mutex
217 */
218uint64_t
219lthread_mutex_diag_ref(struct lthread_mutex *m)
220{
221 if (m == NULL)
222 return 0;
223 return m->diag_ref;
224}