]>
Commit | Line | Data |
---|---|---|
b8e67377 BP |
1 | /* |
2 | * Copyright (c) 2013, 2014 Nicira, Inc. | |
3 | * | |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
5 | * you may not use this file except in compliance with the License. | |
6 | * You may obtain a copy of the License at: | |
7 | * | |
8 | * http://www.apache.org/licenses/LICENSE-2.0 | |
9 | * | |
10 | * Unless required by applicable law or agreed to in writing, software | |
11 | * distributed under the License is distributed on an "AS IS" BASIS, | |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
13 | * See the License for the specific language governing permissions and | |
14 | * limitations under the License. | |
15 | */ | |
16 | ||
17 | #include <config.h> | |
18 | ||
19 | #include "fat-rwlock.h" | |
20 | ||
21 | #include <errno.h> | |
22 | ||
ee89ea7b | 23 | #include "openvswitch/hmap.h" |
b19bab5b | 24 | #include "openvswitch/list.h" |
b8e67377 BP |
25 | #include "ovs-thread.h" |
26 | #include "random.h" | |
27 | ||
b8e67377 BP |
28 | struct fat_rwlock_slot { |
29 | /* Membership in rwlock's list of "struct fat_rwlock_slot"s. | |
30 | * | |
31 | * fat_rwlock_destroy() sets 'rwlock' to NULL to indicate that this | |
32 | * slot may be destroyed. */ | |
ca6ba700 | 33 | struct ovs_list list_node; /* In struct rwlock's 'threads' list. */ |
b8e67377 BP |
34 | struct fat_rwlock *rwlock; /* Owner. */ |
35 | ||
36 | /* Mutex. | |
37 | * | |
38 | * A thread holding the read-lock holds its own mutex. | |
39 | * | |
40 | * A thread holding the write-lock holds every thread's mutex, plus | |
41 | * 'rwlock->mutex'. */ | |
42 | struct ovs_mutex mutex; | |
43 | ||
44 | /* This thread's locking status for 'rwlock': | |
45 | * | |
46 | * - 0: This thread does not have any lock on 'rwlock'. This thread | |
47 | * does not have 'mutex' locked. | |
48 | * | |
49 | * - 1: This thread has a read-lock on 'rwlock' and holds 'mutex'. | |
50 | * | |
51 | * - 2...UINT_MAX-1: This thread has recursively taken the read-lock on | |
52 | * 'rwlock' to the level of 'depth'. This thread holds 'mutex'. | |
53 | * | |
54 | * - UINT_MAX: This thread has the write-lock on 'rwlock' and holds | |
55 | * 'mutex' (plus the 'mutex' of all of 'rwlock''s other slots). | |
56 | * | |
57 | * Accessed only by the slot's own thread, so no synchronization is | |
58 | * needed. */ | |
59 | unsigned int depth; | |
b8e67377 BP |
60 | }; |
61 | ||
62 | static void | |
63 | free_slot(struct fat_rwlock_slot *slot) | |
64 | { | |
65 | if (slot->depth) { | |
66 | abort(); | |
67 | } | |
68 | ||
417e7e66 | 69 | ovs_list_remove(&slot->list_node); |
2fec66db | 70 | free_cacheline(slot); |
b8e67377 BP |
71 | } |
72 | ||
73 | static void | |
74 | slot_destructor(void *slot_) | |
75 | { | |
76 | struct fat_rwlock_slot *slot = slot_; | |
77 | struct fat_rwlock *rwlock = slot->rwlock; | |
78 | ||
79 | ovs_mutex_lock(&rwlock->mutex); | |
80 | free_slot(slot); | |
81 | ovs_mutex_unlock(&rwlock->mutex); | |
82 | } | |
83 | ||
84 | /* Initialize 'rwlock' as a new fat_rwlock. */ | |
85 | void | |
86 | fat_rwlock_init(struct fat_rwlock *rwlock) | |
87 | { | |
88 | ovsthread_key_create(&rwlock->key, slot_destructor); | |
89 | ovs_mutex_init(&rwlock->mutex); | |
90 | ovs_mutex_lock(&rwlock->mutex); | |
417e7e66 | 91 | ovs_list_init(&rwlock->threads); |
b8e67377 BP |
92 | ovs_mutex_unlock(&rwlock->mutex); |
93 | } | |
94 | ||
95 | /* Destroys 'rwlock', which must not be locked or otherwise in use by any | |
96 | * thread. */ | |
97 | void | |
98 | fat_rwlock_destroy(struct fat_rwlock *rwlock) | |
99 | { | |
100 | struct fat_rwlock_slot *slot, *next; | |
101 | ||
102 | /* Order is important here. By destroying the thread-specific data first, | |
103 | * before we destroy the slots, we ensure that the thread-specific | |
104 | * data destructor can't race with our loop below. */ | |
105 | ovsthread_key_delete(rwlock->key); | |
106 | ||
107 | LIST_FOR_EACH_SAFE (slot, next, list_node, &rwlock->threads) { | |
108 | free_slot(slot); | |
109 | } | |
7156f7b9 | 110 | ovs_mutex_destroy(&rwlock->mutex); |
b8e67377 BP |
111 | } |
112 | ||
113 | static struct fat_rwlock_slot * | |
114 | fat_rwlock_get_slot__(struct fat_rwlock *rwlock) | |
115 | { | |
116 | struct fat_rwlock_slot *slot; | |
b8e67377 BP |
117 | |
118 | /* Fast path. */ | |
119 | slot = ovsthread_getspecific(rwlock->key); | |
120 | if (slot) { | |
121 | return slot; | |
122 | } | |
123 | ||
124 | /* Slow path: create a new slot for 'rwlock' in this thread. */ | |
125 | ||
2fec66db | 126 | slot = xmalloc_cacheline(sizeof *slot); |
b8e67377 BP |
127 | slot->rwlock = rwlock; |
128 | ovs_mutex_init(&slot->mutex); | |
129 | slot->depth = 0; | |
130 | ||
131 | ovs_mutex_lock(&rwlock->mutex); | |
417e7e66 | 132 | ovs_list_push_back(&rwlock->threads, &slot->list_node); |
b8e67377 BP |
133 | ovs_mutex_unlock(&rwlock->mutex); |
134 | ||
135 | ovsthread_setspecific(rwlock->key, slot); | |
136 | ||
137 | return slot; | |
138 | } | |
139 | ||
140 | /* Locks 'rwlock' for reading. The read-lock is recursive: it may be acquired | |
141 | * any number of times by a single thread (which must then release it the same | |
142 | * number of times for it to truly be released). */ | |
143 | void | |
144 | fat_rwlock_rdlock(const struct fat_rwlock *rwlock_) | |
145 | OVS_ACQ_RDLOCK(rwlock_) | |
146 | OVS_NO_THREAD_SAFETY_ANALYSIS | |
147 | { | |
148 | struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_); | |
149 | struct fat_rwlock_slot *this = fat_rwlock_get_slot__(rwlock); | |
150 | ||
151 | switch (this->depth) { | |
152 | case UINT_MAX: | |
153 | /* This thread already holds the write-lock. */ | |
154 | abort(); | |
155 | ||
156 | case 0: | |
157 | ovs_mutex_lock(&this->mutex); | |
158 | /* fall through */ | |
159 | default: | |
160 | this->depth++; | |
161 | break; | |
162 | } | |
163 | } | |
164 | ||
1ef49150 DDP |
165 | static struct fat_rwlock_slot * |
166 | fat_rwlock_try_get_slot__(struct fat_rwlock *rwlock) | |
167 | { | |
168 | struct fat_rwlock_slot *slot; | |
169 | ||
170 | /* Fast path. */ | |
171 | slot = ovsthread_getspecific(rwlock->key); | |
172 | if (slot) { | |
173 | return slot; | |
174 | } | |
175 | ||
176 | /* Slow path: create a new slot for 'rwlock' in this thread. */ | |
177 | ||
178 | if (!ovs_mutex_trylock(&rwlock->mutex)) { | |
179 | slot = xmalloc_cacheline(sizeof *slot); | |
180 | slot->rwlock = rwlock; | |
181 | ovs_mutex_init(&slot->mutex); | |
182 | slot->depth = 0; | |
183 | ||
417e7e66 | 184 | ovs_list_push_back(&rwlock->threads, &slot->list_node); |
1ef49150 DDP |
185 | ovs_mutex_unlock(&rwlock->mutex); |
186 | ovsthread_setspecific(rwlock->key, slot); | |
187 | } | |
188 | ||
189 | return slot; | |
190 | } | |
191 | ||
b8e67377 BP |
192 | /* Tries to lock 'rwlock' for reading. If successful, returns 0. If taking |
193 | * the lock would require blocking, returns EBUSY (without blocking). */ | |
194 | int | |
195 | fat_rwlock_tryrdlock(const struct fat_rwlock *rwlock_) | |
196 | OVS_TRY_RDLOCK(0, rwlock_) | |
197 | OVS_NO_THREAD_SAFETY_ANALYSIS | |
198 | { | |
199 | struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_); | |
1ef49150 | 200 | struct fat_rwlock_slot *this = fat_rwlock_try_get_slot__(rwlock); |
b8e67377 BP |
201 | int error; |
202 | ||
1ef49150 DDP |
203 | if (!this) { |
204 | return EBUSY; | |
205 | } | |
206 | ||
b8e67377 BP |
207 | switch (this->depth) { |
208 | case UINT_MAX: | |
209 | return EBUSY; | |
210 | ||
211 | case 0: | |
212 | error = ovs_mutex_trylock(&this->mutex); | |
213 | if (error) { | |
214 | return error; | |
215 | } | |
216 | /* fall through */ | |
217 | default: | |
218 | this->depth++; | |
219 | break; | |
220 | } | |
221 | ||
222 | return 0; | |
223 | } | |
224 | ||
225 | /* Locks 'rwlock' for writing. | |
226 | * | |
227 | * The write lock is not recursive. */ | |
228 | void | |
229 | fat_rwlock_wrlock(const struct fat_rwlock *rwlock_) | |
230 | OVS_ACQ_WRLOCK(rwlock_) | |
231 | OVS_NO_THREAD_SAFETY_ANALYSIS | |
232 | { | |
233 | struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_); | |
234 | struct fat_rwlock_slot *this = fat_rwlock_get_slot__(rwlock); | |
235 | struct fat_rwlock_slot *slot; | |
236 | ||
237 | ovs_assert(!this->depth); | |
238 | this->depth = UINT_MAX; | |
239 | ||
240 | ovs_mutex_lock(&rwlock->mutex); | |
241 | LIST_FOR_EACH (slot, list_node, &rwlock->threads) { | |
242 | ovs_mutex_lock(&slot->mutex); | |
243 | } | |
244 | } | |
245 | ||
246 | /* Unlocks 'rwlock', which the current thread must have locked for reading or | |
247 | * for writing. If the read lock has been taken recursively, it must be | |
248 | * released the same number of times to be truly released. */ | |
249 | void | |
250 | fat_rwlock_unlock(const struct fat_rwlock *rwlock_) | |
251 | OVS_RELEASES(rwlock_) | |
252 | OVS_NO_THREAD_SAFETY_ANALYSIS | |
253 | { | |
254 | struct fat_rwlock *rwlock = CONST_CAST(struct fat_rwlock *, rwlock_); | |
255 | struct fat_rwlock_slot *this = fat_rwlock_get_slot__(rwlock); | |
256 | struct fat_rwlock_slot *slot; | |
257 | ||
258 | switch (this->depth) { | |
259 | case UINT_MAX: | |
260 | LIST_FOR_EACH (slot, list_node, &rwlock->threads) { | |
261 | ovs_mutex_unlock(&slot->mutex); | |
262 | } | |
263 | ovs_mutex_unlock(&rwlock->mutex); | |
264 | this->depth = 0; | |
265 | break; | |
266 | ||
267 | case 0: | |
268 | /* This thread doesn't hold any lock. */ | |
269 | abort(); | |
270 | ||
271 | case 1: | |
272 | ovs_mutex_unlock(&this->mutex); | |
273 | default: | |
274 | this->depth--; | |
275 | break; | |
276 | } | |
277 | } |