+# -*- sh -*-
+# vi:syntax=sh
# This is a shell function library sourced by some Open vSwitch scripts.
# It is not intended to be invoked on its own.
[ "$1" = "`cat /proc/$2/comm`" ]
}
+# version_geq version_a version_b
+#
+# Compare (dot separated) version numbers. Returns true (exit code 0) if
+# version_a is greater or equal than version_b, otherwise false (exit code 1).
+version_geq() {
+ echo $1 $2 | awk '{
+ n1 = split($1, a, ".");
+ n2 = split($2, b, ".");
+ n = (n1 > n2) ? n1 : n2;
+ for (i = 1; i <= n; i++) {
+ if (a[i]+0 < b[i]+0) exit 1
+ if (a[i]+0 > b[i]+0) exit 0
+ }
+ }'
+}
+
+install_dir () {
+ DIR="$1"
+ INSTALL_MODE="${2:-755}"
+ INSTALL_USER="root"
+ INSTALL_GROUP="root"
+ [ "$OVS_USER" != "" ] && INSTALL_USER="${OVS_USER%:*}"
+ [ "${OVS_USER##*:}" != "" ] && INSTALL_GROUP="${OVS_USER##*:}"
+
+ if test ! -d "$DIR"; then
+ install -d -m "$INSTALL_MODE" -o "$INSTALL_USER" -g "$INSTALL_GROUP" "$DIR"
+ restorecon "$DIR" >/dev/null 2>&1
+ fi
+}
+
start_daemon () {
priority=$1
wrapper=$2
strace=""
# drop core files in a sensible place
- test -d "$DAEMON_CWD" || install -d -m 755 -o root -g root "$DAEMON_CWD"
+ install_dir "$DAEMON_CWD"
set "$@" --no-chdir
cd "$DAEMON_CWD"
# log file
- test -d "$logdir" || install -d -m 755 -o root -g root "$logdir"
+ install_dir "$logdir" "750"
set "$@" --log-file="$logdir/$daemon.log"
# pidfile and monitoring
- test -d "$rundir" || install -d -m 755 -o root -g root "$rundir"
+ install_dir "$rundir"
set "$@" --pidfile="$rundir/$daemon.pid"
- set "$@" --detach --monitor
+ set "$@" --detach
+ test X"$MONITOR" = Xno || set "$@" --monitor
# wrapper
case $wrapper in
set nice -n "$priority" "$@"
fi
- action "Starting $daemon" "$@"
+ action "Starting $daemon" "$@" || return 1
if test X"$strace" != X; then
# Strace doesn't have the -D option so we attach after the fact.
stop_daemon () {
if test -e "$rundir/$1.pid"; then
if pid=`cat "$rundir/$1.pid"`; then
- for action in TERM .1 .25 .65 1 1 1 1 KILL 1 1 1 2 10 15 30 FAIL; do
+ if pid_exists "$pid" >/dev/null 2>&1; then :; else
+ rm -f $rundir/$1.$pid.ctl $rundir/$1.$pid
+ return 0
+ fi
+
+ graceful="EXIT .1 .25 .65 1"
+ actions="TERM .1 .25 .65 1 1 1 1 \
+ KILL 1 1 1 2 10 15 30 \
+ FAIL"
+ version=`ovs-appctl -T 1 -t $rundir/$1.$pid.ctl version \
+ | awk 'NR==1{print $NF}'`
+
+ # Use `ovs-appctl exit` only if the running daemon version
+ # is >= 2.5.90. This script might be used during upgrade to
+ # stop older versions of daemons which do not behave correctly
+ # with `ovs-appctl exit` (e.g. ovs-vswitchd <= 2.5.0 deletes
+ # internal ports).
+ if version_geq "$version" "2.5.90"; then
+ actions="$graceful $actions"
+ fi
+ for action in $actions; do
if pid_exists "$pid" >/dev/null 2>&1; then :; else
return 0
fi
case $action in
+ EXIT)
+ action "Exiting $1 ($pid)" \
+ ${bindir}/ovs-appctl -T 1 -t $rundir/$1.$pid.ctl exit $2
+ ;;
TERM)
action "Killing $1 ($pid)" kill $pid
;;
while test $# != 0; do
case $1 in
dynamic)
+ # XXX: According to 'man ip-address', "dynamic" is only
+ # used for ipv6 addresses. But, atleast on RHEL 7.4
+ # (iproute-3.10.0-87), it is being used for ipv4
+ # addresses assigned with dhcp.
+ if [ "$family" = "inet" ]; then
+ shift
+ continue
+ fi
# Omit kernel-maintained route.
continue 2
;;
done
}
+run_as_ovsuser() {
+ if [ "$OVS_USER" != "" ]; then
+ local uid=$(id -u "${OVS_USER%:*}")
+ local gid=$(id -g "${OVS_USER%:*}")
+ local groups=$(id -G "${OVS_USER%:*}" | tr ' ' ',')
+ setpriv --reuid "$uid" --regid "$gid" --groups "$groups" "$@"
+ else
+ "$@"
+ fi
+}
+
ovsdb_tool () {
- ovsdb-tool -vconsole:off "$@"
+ run_as_ovsuser ovsdb-tool -vconsole:off "$@"
}
create_db () {
action "Creating empty database $DB_FILE" ovsdb_tool create "$DB_FILE" "$DB_SCHEMA"
}
+backup_db () {
+ # Back up the old version.
+ version=`ovsdb_tool db-version "$DB_FILE"`
+ cksum=`ovsdb_tool db-cksum "$DB_FILE" | awk '{print $1}'`
+ backup=$DB_FILE.backup$version-$cksum
+ action "Backing up database to $backup" cp "$DB_FILE" "$backup" || return 1
+}
+
upgrade_db () {
DB_FILE="$1"
DB_SCHEMA="$2"
schemaver=`ovsdb_tool schema-version "$DB_SCHEMA"`
if test ! -e "$DB_FILE"; then
log_warning_msg "$DB_FILE does not exist"
- install -d -m 755 -o root -g root `dirname $DB_FILE`
+ install_dir `dirname $DB_FILE`
create_db "$DB_FILE" "$DB_SCHEMA"
- elif test X"`ovsdb_tool needs-conversion "$DB_FILE" "$DB_SCHEMA"`" != Xno; then
- # Back up the old version.
- version=`ovsdb_tool db-version "$DB_FILE"`
- cksum=`ovsdb_tool db-cksum "$DB_FILE" | awk '{print $1}'`
- backup=$DB_FILE.backup$version-$cksum
- action "Backing up database to $backup" cp "$DB_FILE" "$backup" || return 1
+ elif test X"`ovsdb_tool needs-conversion "$DB_FILE" "$DB_SCHEMA"`" = Xyes; then
+ backup_db || return 1
# Compact database. This is important if the old schema did not enable
# garbage collection (i.e. if it did not have any tables with "isRoot":
fi
fi
}
+
+upgrade_cluster () {
+ local DB_SCHEMA=$1 DB_SERVER=$2
+ local schema_name=$(ovsdb-tool schema-name $1) || return 1
+
+ action "Waiting for $schema_name to come up" ovsdb-client -t 30 wait "$DB_SERVER" "$schema_name" connected || return $?
+ local db_version=$(ovsdb-client -t 10 get-schema-version "$DB_SERVER" "$schema_name") || return $?
+ local target_version=$(ovsdb-tool schema-version "$DB_SCHEMA") || return $?
+
+ if ovsdb-tool compare-versions "$db_version" == "$target_version"; then
+ :
+ elif ovsdb-tool compare-versions "$db_version" ">" "$target_version"; then
+ log_warning_msg "Database $schema_name has newer schema version ($db_version) than our local schema ($target_version), possibly an upgrade is partially complete?"
+ else
+ action "Upgrading database $schema_name from schema version $db_version to $target_version" ovsdb-client -t 30 convert "$DB_SERVER" "$DB_SCHEMA"
+ fi
+}
+
+create_cluster () {
+ DB_FILE="$1"
+ DB_SCHEMA="$2"
+ LOCAL_ADDR="$3"
+
+ if test ! -e "$DB_FILE"; then
+ action "Creating cluster database $DB_FILE" ovsdb_tool create-cluster "$DB_FILE" "$DB_SCHEMA" "$LOCAL_ADDR"
+ elif ovsdb_tool db-is-standalone "$DB_FILE"; then
+ # Convert standalone database to clustered.
+ backup_db || return 1
+ action "Creating cluster database $DB_FILE from existing one" \
+ ovsdb_tool create-cluster "$DB_FILE" "$backup" "$LOCAL_ADDR"
+ fi
+}
+
+join_cluster() {
+ DB_FILE="$1"
+ SCHEMA_NAME="$2"
+ LOCAL_ADDR="$3"
+ REMOTE_ADDR="$4"
+
+ if test ! -e "$DB_FILE"; then
+ ovsdb_tool join-cluster "$DB_FILE" "$SCHEMA_NAME" "$LOCAL_ADDR" "$REMOTE_ADDR"
+ elif ovsdb_tool db-is-standalone "$DB_FILE"; then
+ # Backup standalone database and join cluster.
+ backup_db || return 1
+ action "Joining $DB_FILE to cluster" \
+ ovsdb_tool join-cluster "$DB_FILE" "$SCHEMA_NAME" "$LOCAL_ADDR"
+ fi
+}
+
+ovs_vsctl () {
+ ovs-vsctl --no-wait "$@"
+}
+
+## ----------------- ##
+## force-reload-kmod ##
+## ----------------- ##
+
+ovs_kmod_ctl () {
+ "$dir0/ovs-kmod-ctl" "$@"
+}
+
+internal_interfaces () {
+ # Outputs a list of internal interfaces:
+ #
+ # - There is an internal interface for every bridge, whether it
+ # has an Interface record or not and whether the Interface
+ # record's 'type' is properly set or not.
+ #
+ # - There is an internal interface for each Interface record whose
+ # 'type' is 'internal'.
+ #
+ # But ignore interfaces that don't really exist.
+ for d in `(ovs_vsctl --bare \
+ -- --columns=name find Interface type=internal \
+ -- list-br) | sort -u`
+ do
+ if test -e "/sys/class/net/$d"; then
+ printf "%s " "$d"
+ fi
+ done
+}
+
+ovs_save () {
+ bridges=`ovs_vsctl -- --real list-br`
+ if [ -n "${bridges}" ] && \
+ "$datadir/scripts/ovs-save" "$1" ${bridges} > "$2"; then
+ chmod +x "$2"
+ return 0
+ fi
+ [ -z "${bridges}" ] && return 0
+}
+
+save_flows_if_required () {
+ if test X"$DELETE_BRIDGES" != Xyes; then
+ action "Saving flows" ovs_save save-flows "${script_flows}"
+ fi
+}
+
+save_interfaces () {
+ "$datadir/scripts/ovs-save" save-interfaces ${ifaces} \
+ > "${script_interfaces}"
+}
+
+flow_restore_wait () {
+ if test X"${OVS_VSWITCHD:-yes}" = Xyes; then
+ ovs_vsctl set open_vswitch . other_config:flow-restore-wait="true"
+ fi
+}
+
+flow_restore_complete () {
+ if test X"${OVS_VSWITCHD:-yes}" = Xyes; then
+ ovs_vsctl --if-exists remove open_vswitch . other_config \
+ flow-restore-wait="true"
+ fi
+}
+
+restore_flows () {
+ [ -x "${script_flows}" ] && \
+ action "Restoring saved flows" "${script_flows}"
+}
+
+restore_interfaces () {
+ [ ! -x "${script_interfaces}" ] && return 0
+ action "Restoring interface configuration" "${script_interfaces}"
+ rc=$?
+ if test $rc = 0; then
+ level=debug
+ else
+ level=err
+ fi
+ log="logger -p daemon.$level -t ovs-save"
+ $log "interface restore script exited with status $rc:"
+ $log -f "$script_interfaces"
+}
+
+init_restore_scripts () {
+ script_interfaces=`mktemp`
+ script_flows=`mktemp`
+ trap 'rm -f "${script_interfaces}" "${script_flows}"' 0
+}
+
+force_reload_kmod () {
+
+ if test X"${OVS_VSWITCHD:-yes}" != Xyes; then
+ log_failure_msg "Reloading of kmod without ovs-vswitchd is an error"
+ exit 1
+ fi
+
+ ifaces=`internal_interfaces`
+ action "Detected internal interfaces: $ifaces" true
+
+ init_restore_scripts
+ save_flows_if_required
+
+ # Restart the database first, since a large database may take a
+ # while to load, and we want to minimize forwarding disruption.
+ stop_ovsdb
+ start_ovsdb || return 1
+
+ stop_forwarding
+
+ if action "Saving interface configuration" save_interfaces; then
+ :
+ else
+ log_warning_msg "Failed to save configuration, not replacing kernel module"
+ start_forwarding
+ add_managers
+ exit 1
+ fi
+ chmod +x "$script_interfaces"
+
+ for dp in `ovs-dpctl dump-dps`; do
+ action "Removing datapath: $dp" ovs-dpctl del-dp "$dp"
+ done
+
+ if test -e /sys/module/ip_gre; then
+ action "Forcing removal of ip_gre module" rmmod ip_gre
+ fi
+
+ if test -e /sys/module/gre; then
+ action "Forcing removal of gre module" rmmod gre
+ fi
+
+ ovs_kmod_ctl remove
+
+ # Start vswitchd by asking it to wait till flow restore is finished.
+ flow_restore_wait
+ start_forwarding || return 1
+
+ # Restore saved flows and inform vswitchd that we are done.
+ restore_flows
+ flow_restore_complete
+ add_managers
+
+ restore_interfaces
+
+ "$datadir/scripts/ovs-check-dead-ifs"
+}
+
+## ------- ##
+## restart ##
+## ------- ##
+
+restart () {
+ if daemon_is_running ovsdb-server && daemon_is_running ovs-vswitchd; then
+ init_restore_scripts
+ if test X"${OVS_VSWITCHD:-yes}" = Xyes; then
+ save_flows_if_required
+ fi
+ fi
+
+ # Restart the database first, since a large database may take a
+ # while to load, and we want to minimize forwarding disruption.
+ stop_ovsdb
+ start_ovsdb || return 1
+
+ stop_forwarding
+
+ # Start vswitchd by asking it to wait till flow restore is finished.
+ flow_restore_wait
+ start_forwarding || return 1
+
+ # Restore saved flows and inform vswitchd that we are done.
+ restore_flows
+ flow_restore_complete
+ add_managers
+}