/*
- * Copyright (c) 2014 Nicira, Inc.
+ * Copyright (c) 2014, 2017 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*/
#include <config.h>
+#include <errno.h>
#include "ovs-rcu.h"
+#include "fatal-signal.h"
#include "guarded-list.h"
-#include "list.h"
+#include "openvswitch/list.h"
#include "ovs-thread.h"
#include "poll-loop.h"
#include "seq.h"
#include "timeval.h"
+#include "util.h"
#include "openvswitch/vlog.h"
VLOG_DEFINE_THIS_MODULE(ovs_rcu);
static struct seq *flushed_cbsets_seq;
static void ovsrcu_init_module(void);
+static void ovsrcu_flush_cbset__(struct ovsrcu_perthread *, bool);
static void ovsrcu_flush_cbset(struct ovsrcu_perthread *);
static void ovsrcu_unregister__(struct ovsrcu_perthread *);
static bool ovsrcu_call_postponed(void);
sizeof perthread->name);
ovs_mutex_lock(&ovsrcu_threads_mutex);
- list_push_back(&ovsrcu_threads, &perthread->list_node);
+ ovs_list_push_back(&ovsrcu_threads, &perthread->list_node);
ovs_mutex_unlock(&ovsrcu_threads_mutex);
pthread_setspecific(perthread_key, perthread);
ovsrcu_quiesced();
}
+int
+ovsrcu_try_quiesce(void)
+{
+ struct ovsrcu_perthread *perthread;
+ int ret = EBUSY;
+
+ ovs_assert(!single_threaded());
+ perthread = ovsrcu_perthread_get();
+ if (!seq_try_lock()) {
+ perthread->seqno = seq_read_protected(global_seqno);
+ if (perthread->cbset) {
+ ovsrcu_flush_cbset__(perthread, true);
+ }
+ seq_change_protected(global_seqno);
+ seq_unlock();
+ ovsrcu_quiesced();
+ ret = 0;
+ }
+ return ret;
+}
+
bool
ovsrcu_is_quiescent(void)
{
ovs_mutex_lock(&ovsrcu_threads_mutex);
LIST_FOR_EACH (perthread, list_node, &ovsrcu_threads) {
if (perthread->seqno <= target_seqno) {
- ovs_strlcpy(stalled_thread, perthread->name,
- sizeof stalled_thread);
+ ovs_strlcpy_arrays(stalled_thread, perthread->name);
done = false;
break;
}
/* Registers 'function' to be called, passing 'aux' as argument, after the
* next grace period.
*
+ * The call is guaranteed to happen after the next time all participating
+ * threads have quiesced at least once, but there is no quarantee that all
+ * registered functions are called as early as possible, or that the functions
+ * registered by different threads would be called in the order the
+ * registrations took place. In particular, even if two threads provably
+ * register a function each in a specific order, the functions may still be
+ * called in the opposite order, depending on the timing of when the threads
+ * call ovsrcu_quiesce(), how many functions they postpone, and when the
+ * ovs-rcu thread happens to grab the functions to be called.
+ *
+ * All functions registered by a single thread are guaranteed to execute in the
+ * registering order, however.
+ *
* This function is more conveniently called through the ovsrcu_postpone()
* macro, which provides a type-safe way to allow 'function''s parameter to be
* any pointer type. */
struct ovs_list cbsets;
guarded_list_pop_all(&flushed_cbsets, &cbsets);
- if (list_is_empty(&cbsets)) {
+ if (ovs_list_is_empty(&cbsets)) {
return false;
}
}
static void
-ovsrcu_flush_cbset(struct ovsrcu_perthread *perthread)
+ovsrcu_flush_cbset__(struct ovsrcu_perthread *perthread, bool protected)
{
struct ovsrcu_cbset *cbset = perthread->cbset;
guarded_list_push_back(&flushed_cbsets, &cbset->list_node, SIZE_MAX);
perthread->cbset = NULL;
- seq_change(flushed_cbsets_seq);
+ if (protected) {
+ seq_change_protected(flushed_cbsets_seq);
+ } else {
+ seq_change(flushed_cbsets_seq);
+ }
}
}
+static void
+ovsrcu_flush_cbset(struct ovsrcu_perthread *perthread)
+{
+ ovsrcu_flush_cbset__(perthread, false);
+}
+
static void
ovsrcu_unregister__(struct ovsrcu_perthread *perthread)
{
}
ovs_mutex_lock(&ovsrcu_threads_mutex);
- list_remove(&perthread->list_node);
+ ovs_list_remove(&perthread->list_node);
ovs_mutex_unlock(&ovsrcu_threads_mutex);
ovs_mutex_destroy(&perthread->mutex);
ovsrcu_unregister__(perthread);
}
+/* Cancels the callback to ovsrcu_thread_exit_cb().
+ *
+ * Cancelling the call to the destructor during the main thread exit
+ * is needed while using pthreads-win32 library in Windows. It has been
+ * observed that in pthreads-win32, a call to the destructor during
+ * main thread exit causes undefined behavior. */
+static void
+ovsrcu_cancel_thread_exit_cb(void *aux OVS_UNUSED)
+{
+ pthread_setspecific(perthread_key, NULL);
+}
+
static void
ovsrcu_init_module(void)
{
if (ovsthread_once_start(&once)) {
global_seqno = seq_create();
xpthread_key_create(&perthread_key, ovsrcu_thread_exit_cb);
- list_init(&ovsrcu_threads);
+ fatal_signal_add_hook(ovsrcu_cancel_thread_exit_cb, NULL, NULL, true);
+ ovs_list_init(&ovsrcu_threads);
ovs_mutex_init(&ovsrcu_threads_mutex);
guarded_list_init(&flushed_cbsets);