2 * Copyright (c) 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "fat-rwlock.h"
23 #include "openvswitch/hmap.h"
24 #include "openvswitch/list.h"
25 #include "ovs-thread.h"
28 struct fat_rwlock_slot
{
29 /* Membership in rwlock's list of "struct fat_rwlock_slot"s.
31 * fat_rwlock_destroy() sets 'rwlock' to NULL to indicate that this
32 * slot may be destroyed. */
33 struct ovs_list list_node
; /* In struct rwlock's 'threads' list. */
34 struct fat_rwlock
*rwlock
; /* Owner. */
38 * A thread holding the read-lock holds its own mutex.
40 * A thread holding the write-lock holds every thread's mutex, plus
42 struct ovs_mutex mutex
;
44 /* This thread's locking status for 'rwlock':
46 * - 0: This thread does not have any lock on 'rwlock'. This thread
47 * does not have 'mutex' locked.
49 * - 1: This thread has a read-lock on 'rwlock' and holds 'mutex'.
51 * - 2...UINT_MAX-1: This thread has recursively taken the read-lock on
52 * 'rwlock' to the level of 'depth'. This thread holds 'mutex'.
54 * - UINT_MAX: This thread has the write-lock on 'rwlock' and holds
55 * 'mutex' (plus the 'mutex' of all of 'rwlock''s other slots).
57 * Accessed only by the slot's own thread, so no synchronization is
63 free_slot(struct fat_rwlock_slot
*slot
)
69 ovs_list_remove(&slot
->list_node
);
74 slot_destructor(void *slot_
)
76 struct fat_rwlock_slot
*slot
= slot_
;
77 struct fat_rwlock
*rwlock
= slot
->rwlock
;
79 ovs_mutex_lock(&rwlock
->mutex
);
81 ovs_mutex_unlock(&rwlock
->mutex
);
84 /* Initialize 'rwlock' as a new fat_rwlock. */
86 fat_rwlock_init(struct fat_rwlock
*rwlock
)
88 ovsthread_key_create(&rwlock
->key
, slot_destructor
);
89 ovs_mutex_init(&rwlock
->mutex
);
90 ovs_mutex_lock(&rwlock
->mutex
);
91 ovs_list_init(&rwlock
->threads
);
92 ovs_mutex_unlock(&rwlock
->mutex
);
95 /* Destroys 'rwlock', which must not be locked or otherwise in use by any
98 fat_rwlock_destroy(struct fat_rwlock
*rwlock
)
100 struct fat_rwlock_slot
*slot
, *next
;
102 /* Order is important here. By destroying the thread-specific data first,
103 * before we destroy the slots, we ensure that the thread-specific
104 * data destructor can't race with our loop below. */
105 ovsthread_key_delete(rwlock
->key
);
107 LIST_FOR_EACH_SAFE (slot
, next
, list_node
, &rwlock
->threads
) {
110 ovs_mutex_destroy(&rwlock
->mutex
);
113 static struct fat_rwlock_slot
*
114 fat_rwlock_get_slot__(struct fat_rwlock
*rwlock
)
116 struct fat_rwlock_slot
*slot
;
119 slot
= ovsthread_getspecific(rwlock
->key
);
124 /* Slow path: create a new slot for 'rwlock' in this thread. */
126 slot
= xmalloc_cacheline(sizeof *slot
);
127 slot
->rwlock
= rwlock
;
128 ovs_mutex_init(&slot
->mutex
);
131 ovs_mutex_lock(&rwlock
->mutex
);
132 ovs_list_push_back(&rwlock
->threads
, &slot
->list_node
);
133 ovs_mutex_unlock(&rwlock
->mutex
);
135 ovsthread_setspecific(rwlock
->key
, slot
);
140 /* Locks 'rwlock' for reading. The read-lock is recursive: it may be acquired
141 * any number of times by a single thread (which must then release it the same
142 * number of times for it to truly be released). */
144 fat_rwlock_rdlock(const struct fat_rwlock
*rwlock_
)
145 OVS_ACQ_RDLOCK(rwlock_
)
146 OVS_NO_THREAD_SAFETY_ANALYSIS
148 struct fat_rwlock
*rwlock
= CONST_CAST(struct fat_rwlock
*, rwlock_
);
149 struct fat_rwlock_slot
*this = fat_rwlock_get_slot__(rwlock
);
151 switch (this->depth
) {
153 /* This thread already holds the write-lock. */
157 ovs_mutex_lock(&this->mutex
);
165 static struct fat_rwlock_slot
*
166 fat_rwlock_try_get_slot__(struct fat_rwlock
*rwlock
)
168 struct fat_rwlock_slot
*slot
;
171 slot
= ovsthread_getspecific(rwlock
->key
);
176 /* Slow path: create a new slot for 'rwlock' in this thread. */
178 if (!ovs_mutex_trylock(&rwlock
->mutex
)) {
179 slot
= xmalloc_cacheline(sizeof *slot
);
180 slot
->rwlock
= rwlock
;
181 ovs_mutex_init(&slot
->mutex
);
184 ovs_list_push_back(&rwlock
->threads
, &slot
->list_node
);
185 ovs_mutex_unlock(&rwlock
->mutex
);
186 ovsthread_setspecific(rwlock
->key
, slot
);
192 /* Tries to lock 'rwlock' for reading. If successful, returns 0. If taking
193 * the lock would require blocking, returns EBUSY (without blocking). */
195 fat_rwlock_tryrdlock(const struct fat_rwlock
*rwlock_
)
196 OVS_TRY_RDLOCK(0, rwlock_
)
197 OVS_NO_THREAD_SAFETY_ANALYSIS
199 struct fat_rwlock
*rwlock
= CONST_CAST(struct fat_rwlock
*, rwlock_
);
200 struct fat_rwlock_slot
*this = fat_rwlock_try_get_slot__(rwlock
);
207 switch (this->depth
) {
212 error
= ovs_mutex_trylock(&this->mutex
);
225 /* Locks 'rwlock' for writing.
227 * The write lock is not recursive. */
229 fat_rwlock_wrlock(const struct fat_rwlock
*rwlock_
)
230 OVS_ACQ_WRLOCK(rwlock_
)
231 OVS_NO_THREAD_SAFETY_ANALYSIS
233 struct fat_rwlock
*rwlock
= CONST_CAST(struct fat_rwlock
*, rwlock_
);
234 struct fat_rwlock_slot
*this = fat_rwlock_get_slot__(rwlock
);
235 struct fat_rwlock_slot
*slot
;
237 ovs_assert(!this->depth
);
238 this->depth
= UINT_MAX
;
240 ovs_mutex_lock(&rwlock
->mutex
);
241 LIST_FOR_EACH (slot
, list_node
, &rwlock
->threads
) {
242 ovs_mutex_lock(&slot
->mutex
);
246 /* Unlocks 'rwlock', which the current thread must have locked for reading or
247 * for writing. If the read lock has been taken recursively, it must be
248 * released the same number of times to be truly released. */
250 fat_rwlock_unlock(const struct fat_rwlock
*rwlock_
)
251 OVS_RELEASES(rwlock_
)
252 OVS_NO_THREAD_SAFETY_ANALYSIS
254 struct fat_rwlock
*rwlock
= CONST_CAST(struct fat_rwlock
*, rwlock_
);
255 struct fat_rwlock_slot
*this = fat_rwlock_get_slot__(rwlock
);
256 struct fat_rwlock_slot
*slot
;
258 switch (this->depth
) {
260 LIST_FOR_EACH (slot
, list_node
, &rwlock
->threads
) {
261 ovs_mutex_unlock(&slot
->mutex
);
263 ovs_mutex_unlock(&rwlock
->mutex
);
268 /* This thread doesn't hold any lock. */
272 ovs_mutex_unlock(&this->mutex
);