2 * Copyright (c) 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "guarded-list.h"
21 #include "ovs-thread.h"
22 #include "poll-loop.h"
25 #include "openvswitch/vlog.h"
27 VLOG_DEFINE_THIS_MODULE(ovs_rcu
);
30 void (*function
)(void *aux
);
35 struct ovs_list list_node
;
36 struct ovsrcu_cb cbs
[16];
40 struct ovsrcu_perthread
{
41 struct ovs_list list_node
; /* In global list. */
43 struct ovs_mutex mutex
;
45 struct ovsrcu_cbset
*cbset
;
46 char name
[16]; /* This thread's name. */
49 static struct seq
*global_seqno
;
51 static pthread_key_t perthread_key
;
52 static struct ovs_list ovsrcu_threads
;
53 static struct ovs_mutex ovsrcu_threads_mutex
;
55 static struct guarded_list flushed_cbsets
;
56 static struct seq
*flushed_cbsets_seq
;
58 static void ovsrcu_init_module(void);
59 static void ovsrcu_flush_cbset(struct ovsrcu_perthread
*);
60 static void ovsrcu_unregister__(struct ovsrcu_perthread
*);
61 static bool ovsrcu_call_postponed(void);
62 static void *ovsrcu_postpone_thread(void *arg OVS_UNUSED
);
64 static struct ovsrcu_perthread
*
65 ovsrcu_perthread_get(void)
67 struct ovsrcu_perthread
*perthread
;
71 perthread
= pthread_getspecific(perthread_key
);
73 const char *name
= get_subprogram_name();
75 perthread
= xmalloc(sizeof *perthread
);
76 ovs_mutex_init(&perthread
->mutex
);
77 perthread
->seqno
= seq_read(global_seqno
);
78 perthread
->cbset
= NULL
;
79 ovs_strlcpy(perthread
->name
, name
[0] ? name
: "main",
80 sizeof perthread
->name
);
82 ovs_mutex_lock(&ovsrcu_threads_mutex
);
83 list_push_back(&ovsrcu_threads
, &perthread
->list_node
);
84 ovs_mutex_unlock(&ovsrcu_threads_mutex
);
86 pthread_setspecific(perthread_key
, perthread
);
91 /* Indicates the end of a quiescent state. See "Details" near the top of
94 * Quiescent states don't stack or nest, so this always ends a quiescent state
95 * even if ovsrcu_quiesce_start() was called multiple times in a row. */
97 ovsrcu_quiesce_end(void)
99 ovsrcu_perthread_get();
103 ovsrcu_quiesced(void)
105 if (single_threaded()) {
106 ovsrcu_call_postponed();
108 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
109 if (ovsthread_once_start(&once
)) {
110 ovs_thread_create("urcu", ovsrcu_postpone_thread
, NULL
);
111 ovsthread_once_done(&once
);
116 /* Indicates the beginning of a quiescent state. See "Details" near the top of
119 ovsrcu_quiesce_start(void)
121 struct ovsrcu_perthread
*perthread
;
123 ovsrcu_init_module();
124 perthread
= pthread_getspecific(perthread_key
);
126 pthread_setspecific(perthread_key
, NULL
);
127 ovsrcu_unregister__(perthread
);
133 /* Indicates a momentary quiescent state. See "Details" near the top of
136 * Provides a full memory barrier via seq_change().
141 struct ovsrcu_perthread
*perthread
;
143 perthread
= ovsrcu_perthread_get();
144 perthread
->seqno
= seq_read(global_seqno
);
145 if (perthread
->cbset
) {
146 ovsrcu_flush_cbset(perthread
);
148 seq_change(global_seqno
);
154 ovsrcu_is_quiescent(void)
156 ovsrcu_init_module();
157 return pthread_getspecific(perthread_key
) == NULL
;
161 ovsrcu_synchronize(void)
163 unsigned int warning_threshold
= 1000;
164 uint64_t target_seqno
;
167 if (single_threaded()) {
171 target_seqno
= seq_read(global_seqno
);
172 ovsrcu_quiesce_start();
176 uint64_t cur_seqno
= seq_read(global_seqno
);
177 struct ovsrcu_perthread
*perthread
;
178 char stalled_thread
[16];
179 unsigned int elapsed
;
182 ovs_mutex_lock(&ovsrcu_threads_mutex
);
183 LIST_FOR_EACH (perthread
, list_node
, &ovsrcu_threads
) {
184 if (perthread
->seqno
<= target_seqno
) {
185 ovs_strlcpy(stalled_thread
, perthread
->name
,
186 sizeof stalled_thread
);
191 ovs_mutex_unlock(&ovsrcu_threads_mutex
);
197 elapsed
= time_msec() - start
;
198 if (elapsed
>= warning_threshold
) {
199 VLOG_WARN("blocked %u ms waiting for %s to quiesce",
200 elapsed
, stalled_thread
);
201 warning_threshold
*= 2;
203 poll_timer_wait_until(start
+ warning_threshold
);
205 seq_wait(global_seqno
, cur_seqno
);
208 ovsrcu_quiesce_end();
211 /* Registers 'function' to be called, passing 'aux' as argument, after the
214 * This function is more conveniently called through the ovsrcu_postpone()
215 * macro, which provides a type-safe way to allow 'function''s parameter to be
216 * any pointer type. */
218 ovsrcu_postpone__(void (*function
)(void *aux
), void *aux
)
220 struct ovsrcu_perthread
*perthread
= ovsrcu_perthread_get();
221 struct ovsrcu_cbset
*cbset
;
222 struct ovsrcu_cb
*cb
;
224 cbset
= perthread
->cbset
;
226 cbset
= perthread
->cbset
= xmalloc(sizeof *perthread
->cbset
);
230 cb
= &cbset
->cbs
[cbset
->n_cbs
++];
231 cb
->function
= function
;
234 if (cbset
->n_cbs
>= ARRAY_SIZE(cbset
->cbs
)) {
235 ovsrcu_flush_cbset(perthread
);
240 ovsrcu_call_postponed(void)
242 struct ovsrcu_cbset
*cbset
;
243 struct ovs_list cbsets
;
245 guarded_list_pop_all(&flushed_cbsets
, &cbsets
);
246 if (list_is_empty(&cbsets
)) {
250 ovsrcu_synchronize();
252 LIST_FOR_EACH_POP (cbset
, list_node
, &cbsets
) {
253 struct ovsrcu_cb
*cb
;
255 for (cb
= cbset
->cbs
; cb
< &cbset
->cbs
[cbset
->n_cbs
]; cb
++) {
256 cb
->function(cb
->aux
);
265 ovsrcu_postpone_thread(void *arg OVS_UNUSED
)
267 pthread_detach(pthread_self());
270 uint64_t seqno
= seq_read(flushed_cbsets_seq
);
271 if (!ovsrcu_call_postponed()) {
272 seq_wait(flushed_cbsets_seq
, seqno
);
281 ovsrcu_flush_cbset(struct ovsrcu_perthread
*perthread
)
283 struct ovsrcu_cbset
*cbset
= perthread
->cbset
;
286 guarded_list_push_back(&flushed_cbsets
, &cbset
->list_node
, SIZE_MAX
);
287 perthread
->cbset
= NULL
;
289 seq_change(flushed_cbsets_seq
);
294 ovsrcu_unregister__(struct ovsrcu_perthread
*perthread
)
296 if (perthread
->cbset
) {
297 ovsrcu_flush_cbset(perthread
);
300 ovs_mutex_lock(&ovsrcu_threads_mutex
);
301 list_remove(&perthread
->list_node
);
302 ovs_mutex_unlock(&ovsrcu_threads_mutex
);
304 ovs_mutex_destroy(&perthread
->mutex
);
307 seq_change(global_seqno
);
311 ovsrcu_thread_exit_cb(void *perthread
)
313 ovsrcu_unregister__(perthread
);
317 ovsrcu_init_module(void)
319 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
320 if (ovsthread_once_start(&once
)) {
321 global_seqno
= seq_create();
322 xpthread_key_create(&perthread_key
, ovsrcu_thread_exit_cb
);
323 list_init(&ovsrcu_threads
);
324 ovs_mutex_init(&ovsrcu_threads_mutex
);
326 guarded_list_init(&flushed_cbsets
);
327 flushed_cbsets_seq
= seq_create();
329 ovsthread_once_done(&once
);