#include "connmgr.h"
#include "coverage.h"
#include "cmap.h"
+#include "lib/dpif-provider.h"
#include "dpif.h"
#include "openvswitch/dynamic-string.h"
#include "fail-open.h"
#include "tunnel.h"
#include "unixctl.h"
#include "openvswitch/vlog.h"
+#include "lib/netdev-provider.h"
-#define MAX_QUEUE_LENGTH 512
#define UPCALL_MAX_BATCH 64
#define REVALIDATE_MAX_BATCH 50
COVERAGE_DEFINE(upcall_ukey_contention);
COVERAGE_DEFINE(upcall_ukey_replace);
COVERAGE_DEFINE(revalidate_missed_dp_flow);
+COVERAGE_DEFINE(upcall_flow_limit_hit);
+COVERAGE_DEFINE(upcall_flow_limit_kill);
/* A thread that reads upcalls from dpif, forwards each upcall's packet,
* and possibly sets up a kernel flow as a cache. */
uint64_t conn_seq; /* Corresponds to 'dump_seq' when
conns[n_conns-1] was stored. */
size_t n_conns; /* Number of connections waiting. */
+
+ long long int offload_rebalance_time; /* Time of last offload rebalance */
};
enum upcall_type {
ofp_port_t ofp_in_port; /* OpenFlow in port, or OFPP_NONE. */
uint16_t mru; /* If !0, Maximum receive unit of
fragmented IP packet */
+ uint64_t hash;
enum upcall_type type; /* Type of the upcall. */
const struct nlattr *actions; /* Flow actions in DPIF_UC_ACTION Upcalls. */
uint32_t key_recirc_id; /* Non-zero if reference is held by the ukey. */
struct recirc_refs recircs; /* Action recirc IDs with references held. */
+
+#define OFFL_REBAL_INTVL_MSEC 3000 /* dynamic offload rebalance freq */
+ struct netdev *in_netdev; /* in_odp_port's netdev */
+ bool offloaded; /* True if flow is offloaded */
+ uint64_t flow_pps_rate; /* Packets-Per-Second rate */
+ long long int flow_time; /* last pps update time */
+ uint64_t flow_packets; /* #pkts seen in interval */
+ uint64_t flow_backlog_packets; /* prev-mode #pkts (offl or kernel) */
};
/* Datapath operation with optional ukey attached. */
static int process_upcall(struct udpif *, struct upcall *,
struct ofpbuf *odp_actions, struct flow_wildcards *);
static void handle_upcalls(struct udpif *, struct upcall *, size_t n_upcalls);
-static void udpif_stop_threads(struct udpif *);
+static void udpif_stop_threads(struct udpif *, bool delete_flows);
static void udpif_start_threads(struct udpif *, size_t n_handlers,
size_t n_revalidators);
static void udpif_pause_revalidators(struct udpif *);
const ovs_u128 *ufid, const unsigned pmd_id);
static void upcall_uninit(struct upcall *);
+static void udpif_flow_rebalance(struct udpif *udpif);
+static int udpif_flow_program(struct udpif *udpif, struct udpif_key *ukey,
+ enum dpif_offload_type offload_type);
+static int udpif_flow_unprogram(struct udpif *udpif, struct udpif_key *ukey,
+ enum dpif_offload_type offload_type);
+
static upcall_callback upcall_cb;
static dp_purge_callback dp_purge_cb;
void
udpif_destroy(struct udpif *udpif)
{
- udpif_stop_threads(udpif);
+ udpif_stop_threads(udpif, false);
dpif_register_dp_purge_cb(udpif->dpif, NULL, udpif);
dpif_register_upcall_cb(udpif->dpif, NULL, udpif);
free(udpif);
}
-/* Stops the handler and revalidator threads, must be enclosed in
- * ovsrcu quiescent state unless when destroying udpif. */
+/* Stops the handler and revalidator threads.
+ *
+ * If 'delete_flows' is true, we delete ukeys and delete all flows from the
+ * datapath. Otherwise, we end up double-counting stats for flows that remain
+ * in the datapath. If 'delete_flows' is false, we skip this step. This is
+ * appropriate if OVS is about to exit anyway and it is desirable to let
+ * existing network connections continue being forwarded afterward. */
static void
-udpif_stop_threads(struct udpif *udpif)
+udpif_stop_threads(struct udpif *udpif, bool delete_flows)
{
if (udpif && (udpif->n_handlers != 0 || udpif->n_revalidators != 0)) {
size_t i;
+ /* Tell the threads to exit. */
latch_set(&udpif->exit_latch);
+ /* Wait for the threads to exit. Quiesce because this can take a long
+ * time.. */
+ ovsrcu_quiesce_start();
for (i = 0; i < udpif->n_handlers; i++) {
- struct handler *handler = &udpif->handlers[i];
-
- xpthread_join(handler->thread, NULL);
+ xpthread_join(udpif->handlers[i].thread, NULL);
}
-
for (i = 0; i < udpif->n_revalidators; i++) {
xpthread_join(udpif->revalidators[i].thread, NULL);
}
-
dpif_disable_upcall(udpif->dpif);
+ ovsrcu_quiesce_end();
- for (i = 0; i < udpif->n_revalidators; i++) {
- struct revalidator *revalidator = &udpif->revalidators[i];
-
- /* Delete ukeys, and delete all flows from the datapath to prevent
- * double-counting stats. */
- revalidator_purge(revalidator);
+ if (delete_flows) {
+ for (i = 0; i < udpif->n_revalidators; i++) {
+ revalidator_purge(&udpif->revalidators[i]);
+ }
}
latch_poll(&udpif->exit_latch);
}
}
-/* Starts the handler and revalidator threads, must be enclosed in
- * ovsrcu quiescent state. */
+/* Starts the handler and revalidator threads. */
static void
udpif_start_threads(struct udpif *udpif, size_t n_handlers_,
size_t n_revalidators_)
{
if (udpif && n_handlers_ && n_revalidators_) {
+ /* Creating a thread can take a significant amount of time on some
+ * systems, even hundred of milliseconds, so quiesce around it. */
+ ovsrcu_quiesce_start();
+
udpif->n_handlers = n_handlers_;
udpif->n_revalidators = n_revalidators_;
ovs_barrier_init(&udpif->pause_barrier, udpif->n_revalidators + 1);
udpif->reval_exit = false;
udpif->pause = false;
+ udpif->offload_rebalance_time = time_msec();
udpif->revalidators = xzalloc(udpif->n_revalidators
* sizeof *udpif->revalidators);
for (size_t i = 0; i < udpif->n_revalidators; i++) {
revalidator->thread = ovs_thread_create(
"revalidator", udpif_revalidator, revalidator);
}
+ ovsrcu_quiesce_end();
}
}
ovs_assert(udpif);
ovs_assert(n_handlers_ && n_revalidators_);
- ovsrcu_quiesce_start();
if (udpif->n_handlers != n_handlers_
|| udpif->n_revalidators != n_revalidators_) {
- udpif_stop_threads(udpif);
+ udpif_stop_threads(udpif, true);
}
if (!udpif->handlers && !udpif->revalidators) {
udpif_start_threads(udpif, n_handlers_, n_revalidators_);
}
- ovsrcu_quiesce_end();
-}
-
-/* Waits for all ongoing upcall translations to complete. This ensures that
- * there are no transient references to any removed ofprotos (or other
- * objects). In particular, this should be called after an ofproto is removed
- * (e.g. via xlate_remove_ofproto()) but before it is destroyed. */
-void
-udpif_synchronize(struct udpif *udpif)
-{
- /* This is stronger than necessary. It would be sufficient to ensure
- * (somehow) that each handler and revalidator thread had passed through
- * its main loop once. */
- size_t n_handlers_ = udpif->n_handlers;
- size_t n_revalidators_ = udpif->n_revalidators;
-
- ovsrcu_quiesce_start();
- udpif_stop_threads(udpif);
- udpif_start_threads(udpif, n_handlers_, n_revalidators_);
- ovsrcu_quiesce_end();
}
/* Notifies 'udpif' that something changed which may render previous
size_t n_handlers_ = udpif->n_handlers;
size_t n_revalidators_ = udpif->n_revalidators;
- ovsrcu_quiesce_start();
-
- udpif_stop_threads(udpif);
+ udpif_stop_threads(udpif, true);
dpif_flow_flush(udpif->dpif);
udpif_start_threads(udpif, n_handlers_, n_revalidators_);
-
- ovsrcu_quiesce_end();
}
/* Removes all flows from all datapaths. */
struct dpif_upcall *dupcall = &dupcalls[n_upcalls];
struct upcall *upcall = &upcalls[n_upcalls];
struct flow *flow = &flows[n_upcalls];
- unsigned int mru;
+ unsigned int mru = 0;
+ uint64_t hash = 0;
int error;
ofpbuf_use_stub(recv_buf, recv_stubs[n_upcalls],
}
upcall->fitness = odp_flow_key_to_flow(dupcall->key, dupcall->key_len,
- flow);
+ flow, NULL);
if (upcall->fitness == ODP_FIT_ERROR) {
goto free_dupcall;
}
if (dupcall->mru) {
mru = nl_attr_get_u16(dupcall->mru);
- } else {
- mru = 0;
+ }
+
+ if (dupcall->hash) {
+ hash = nl_attr_get_u64(dupcall->hash);
}
error = upcall_receive(upcall, udpif->backer, &dupcall->packet,
upcall->key = dupcall->key;
upcall->key_len = dupcall->key_len;
upcall->ufid = &dupcall->ufid;
+ upcall->hash = hash;
upcall->out_tun_key = dupcall->out_tun_key;
upcall->actions = dupcall->actions;
return n_upcalls;
}
+static void
+udpif_run_flow_rebalance(struct udpif *udpif)
+{
+ long long int now = 0;
+
+ /* Don't rebalance if OFFL_REBAL_INTVL_MSEC have not elapsed */
+ now = time_msec();
+ if (now < udpif->offload_rebalance_time + OFFL_REBAL_INTVL_MSEC) {
+ return;
+ }
+
+ if (!netdev_any_oor()) {
+ return;
+ }
+
+ VLOG_DBG("Offload rebalance: Found OOR netdevs");
+ udpif->offload_rebalance_time = now;
+ udpif_flow_rebalance(udpif);
+}
+
static void *
udpif_revalidator(void *arg)
{
dpif_flow_dump_destroy(udpif->dump);
seq_change(udpif->dump_seq);
+ if (netdev_is_offload_rebalance_policy_enabled()) {
+ udpif_run_flow_rebalance(udpif);
+ }
duration = MAX(time_msec() - start_time, 1);
udpif->dump_duration = duration;
flow_limit /= duration / 1000;
} else if (duration > 1300) {
flow_limit = flow_limit * 3 / 4;
- } else if (duration < 1000 && n_flows > 2000
- && flow_limit < n_flows * 1000 / duration) {
+ } else if (duration < 1000 &&
+ flow_limit < n_flows * 1000 / duration) {
flow_limit += 1000;
}
flow_limit = MIN(ofproto_flow_limit, MAX(flow_limit, 1000));
duration);
}
- poll_timer_wait_until(start_time + MIN(ofproto_max_idle, 500));
+ poll_timer_wait_until(start_time + MIN(ofproto_max_idle,
+ ofproto_max_revalidator));
seq_wait(udpif->reval_seq, last_reval_seq);
latch_wait(&udpif->exit_latch);
latch_wait(&udpif->pause_latch);
* initialized with at least 128 bytes of space. */
static void
compose_slow_path(struct udpif *udpif, struct xlate_out *xout,
- const struct flow *flow,
odp_port_t odp_in_port, ofp_port_t ofp_in_port,
struct ofpbuf *buf, uint32_t meter_id,
struct uuid *ofproto_uuid)
odp_port_t port;
uint32_t pid;
+ memset(&cookie, 0, sizeof cookie);
cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
cookie.ofp_in_port = ofp_in_port;
cookie.ofproto_uuid = *ofproto_uuid;
port = xout->slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP | SLOW_STP)
? ODPP_NONE
: odp_in_port;
- pid = dpif_port_get_pid(udpif->dpif, port, flow_hash_5tuple(flow, 0));
+ pid = dpif_port_get_pid(udpif->dpif, port);
size_t offset;
size_t ac_offset;
}
odp_put_userspace_action(pid, &cookie, sizeof cookie,
- ODPP_NONE, false, buf);
+ ODPP_NONE, false, buf, NULL);
if (meter_id != UINT32_MAX) {
nl_msg_end_nested(buf, ac_offset);
odp_actions->data, odp_actions->size);
} else {
/* upcall->put_actions already initialized by upcall_receive(). */
- compose_slow_path(udpif, &upcall->xout, upcall->flow,
+ compose_slow_path(udpif, &upcall->xout,
upcall->flow->in_port.odp_port, upcall->ofp_in_port,
&upcall->put_actions,
upcall->ofproto->up.slowpath_meter_id,
atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
if (udpif_get_n_flows(udpif) >= flow_limit) {
- VLOG_WARN_RL(&rl, "upcall: datapath flow limit reached");
+ COVERAGE_INC(upcall_flow_limit_hit);
+ VLOG_WARN_RL(&rl,
+ "upcall: datapath reached the dynamic limit of %u flows.",
+ flow_limit);
return false;
}
memset(&ipfix_actions, 0, sizeof ipfix_actions);
if (upcall->out_tun_key) {
- odp_tun_key_from_attr(upcall->out_tun_key, &output_tunnel_key);
+ odp_tun_key_from_attr(upcall->out_tun_key, &output_tunnel_key,
+ NULL);
}
actions_len = dpif_read_actions(udpif, upcall, flow,
: NULL),
am->pin.up.action_set_len = state->action_set_len,
am->pin.up.bridge = upcall->ofproto->uuid;
+ am->pin.up.odp_port = upcall->packet->md.in_port.odp_port;
}
/* We don't want to use the upcall 'flow', since it may be
flow_clear_conntrack(&frozen_flow);
}
- frozen_metadata_to_flow(&state->metadata, &frozen_flow);
+ frozen_metadata_to_flow(&upcall->ofproto->up, &state->metadata,
+ &frozen_flow);
flow_get_metadata(&frozen_flow, &am->pin.up.base.flow_metadata);
ofproto_dpif_send_async_msg(upcall->ofproto, am);
op->dop.execute.needs_help = (upcall->xout.slow & SLOW_ACTION) != 0;
op->dop.execute.probe = false;
op->dop.execute.mtu = upcall->mru;
+ op->dop.execute.hash = upcall->hash;
}
}
for (i = 0; i < n_ops; i++) {
opsp[n_opsp++] = &ops[i].dop;
}
- dpif_operate(udpif->dpif, opsp, n_opsp);
+ dpif_operate(udpif->dpif, opsp, n_opsp, DPIF_OFFLOAD_AUTO);
for (i = 0; i < n_ops; i++) {
struct udpif_key *ukey = ops[i].ukey;
ukey->state = UKEY_CREATED;
ukey->state_thread = ovsthread_id_self();
ukey->state_where = OVS_SOURCE_LOCATOR;
- ukey->created = time_msec();
+ ukey->created = ukey->flow_time = time_msec();
memset(&ukey->stats, 0, sizeof ukey->stats);
ukey->stats.used = used;
ukey->xcache = NULL;
+ ukey->offloaded = false;
+ ukey->in_netdev = NULL;
+ ukey->flow_packets = ukey->flow_backlog_packets = 0;
+
ukey->key_recirc_id = key_recirc_id;
recirc_refs_init(&ukey->recircs);
if (xout) {
}
reval_seq = seq_read(udpif->reval_seq) - 1; /* Ensure revalidation. */
- ofpbuf_use_const(&actions, &flow->actions, flow->actions_len);
+ ofpbuf_use_const(&actions, flow->actions, flow->actions_len);
*ukey = ukey_create__(flow->key, flow->key_len,
flow->mask, flow->mask_len, flow->ufid_present,
&flow->ufid, flow->pmd_id, &actions,
return true;
}
- if (udpif->dump_duration < 200) {
+ if (udpif->dump_duration < ofproto_max_revalidator / 2) {
/* We are likely to handle full revalidation for the flows. */
return true;
}
duration = now - used;
metric = duration / packets;
- if (metric < 200) {
- /* The flow is receiving more than ~5pps, so keep it. */
+ if (metric < 1000 / ofproto_min_revalidate_pps) {
+ /* The flow is receiving more than min-revalidate-pps, so keep it. */
return true;
}
return false;
struct xlate_in xin;
int error;
- fitness = odp_flow_key_to_flow(key, len, &ctx->flow);
+ fitness = odp_flow_key_to_flow(key, len, &ctx->flow, NULL);
if (fitness == ODP_FIT_ERROR) {
return EINVAL;
}
struct ofproto_dpif *ofproto;
ofp_port_t ofp_in_port;
- ofproto = xlate_lookup_ofproto(udpif->backer, &ctx.flow, &ofp_in_port);
+ ofproto = xlate_lookup_ofproto(udpif->backer, &ctx.flow, &ofp_in_port,
+ NULL);
ofpbuf_clear(odp_actions);
goto exit;
}
- compose_slow_path(udpif, xoutp, &ctx.flow, ctx.flow.in_port.odp_port,
+ compose_slow_path(udpif, xoutp, ctx.flow.in_port.odp_port,
ofp_in_port, odp_actions,
ofproto->up.slowpath_meter_id, &ofproto->uuid);
}
- if (odp_flow_key_to_mask(ukey->mask, ukey->mask_len, &dp_mask, &ctx.flow)
+ if (odp_flow_key_to_mask(ukey->mask, ukey->mask_len, &dp_mask, &ctx.flow,
+ NULL)
== ODP_FIT_ERROR) {
goto exit;
}
revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey,
const struct dpif_flow_stats *stats,
struct ofpbuf *odp_actions, uint64_t reval_seq,
- struct recirc_refs *recircs)
+ struct recirc_refs *recircs, bool offloaded)
OVS_REQUIRES(ukey->mutex)
{
bool need_revalidate = ukey->reval_seq != reval_seq;
/* Stats for deleted flows will be attributed upon flow deletion. Skip. */
if (result != UKEY_DELETE) {
- xlate_push_stats(ukey->xcache, &push);
+ xlate_push_stats(ukey->xcache, &push, offloaded);
ukey->stats = *stats;
ukey->reval_seq = reval_seq;
}
for (i = 0; i < n_ops; i++) {
opsp[i] = &ops[i].dop;
}
- dpif_operate(udpif->dpif, opsp, n_ops);
+ dpif_operate(udpif->dpif, opsp, n_ops, DPIF_OFFLOAD_AUTO);
for (i = 0; i < n_ops; i++) {
struct ukey_op *op = &ops[i];
if (op->ukey) {
ovs_mutex_lock(&op->ukey->mutex);
if (op->ukey->xcache) {
- xlate_push_stats(op->ukey->xcache, push);
+ xlate_push_stats(op->ukey->xcache, push, false);
ovs_mutex_unlock(&op->ukey->mutex);
continue;
}
}
}
+static void
+ukey_netdev_unref(struct udpif_key *ukey)
+{
+ if (!ukey->in_netdev) {
+ return;
+ }
+ netdev_close(ukey->in_netdev);
+ ukey->in_netdev = NULL;
+}
+
+/*
+ * Given a udpif_key, get its input port (netdev) by parsing the flow keys
+ * and actions. The flow may not contain flow attributes if it is a terse
+ * dump; read its attributes from the ukey and then parse the flow to get
+ * the port info. Save them in udpif_key.
+ */
+static void
+ukey_to_flow_netdev(struct udpif *udpif, struct udpif_key *ukey)
+{
+ const char *dpif_type_str = dpif_normalize_type(dpif_type(udpif->dpif));
+ const struct nlattr *k;
+ unsigned int left;
+
+ /* Remove existing references to netdev */
+ ukey_netdev_unref(ukey);
+
+ /* Find the input port and get a reference to its netdev */
+ NL_ATTR_FOR_EACH (k, left, ukey->key, ukey->key_len) {
+ enum ovs_key_attr type = nl_attr_type(k);
+
+ if (type == OVS_KEY_ATTR_IN_PORT) {
+ ukey->in_netdev = netdev_ports_get(nl_attr_get_odp_port(k),
+ dpif_type_str);
+ } else if (type == OVS_KEY_ATTR_TUNNEL) {
+ struct flow_tnl tnl;
+ enum odp_key_fitness res;
+
+ if (ukey->in_netdev) {
+ netdev_close(ukey->in_netdev);
+ ukey->in_netdev = NULL;
+ }
+ res = odp_tun_key_from_attr(k, &tnl, NULL);
+ if (res != ODP_FIT_ERROR) {
+ ukey->in_netdev = flow_get_tunnel_netdev(&tnl);
+ break;
+ }
+ }
+ }
+}
+
+static uint64_t
+udpif_flow_packet_delta(struct udpif_key *ukey, const struct dpif_flow *f)
+{
+ return f->stats.n_packets + ukey->flow_backlog_packets -
+ ukey->flow_packets;
+}
+
+static long long int
+udpif_flow_time_delta(struct udpif *udpif, struct udpif_key *ukey)
+{
+ return (udpif->dpif->current_ms - ukey->flow_time) / 1000;
+}
+
+/*
+ * Save backlog packet count while switching modes
+ * between offloaded and kernel datapaths.
+ */
+static void
+udpif_set_ukey_backlog_packets(struct udpif_key *ukey)
+{
+ ukey->flow_backlog_packets = ukey->flow_packets;
+}
+
+/* Gather pps-rate for the given dpif_flow and save it in its ukey */
+static void
+udpif_update_flow_pps(struct udpif *udpif, struct udpif_key *ukey,
+ const struct dpif_flow *f)
+{
+ uint64_t pps;
+
+ /* Update pps-rate only when we are close to rebalance interval */
+ if (udpif->dpif->current_ms - ukey->flow_time < OFFL_REBAL_INTVL_MSEC) {
+ return;
+ }
+
+ ukey->offloaded = f->attrs.offloaded;
+ pps = udpif_flow_packet_delta(ukey, f) /
+ udpif_flow_time_delta(udpif, ukey);
+ ukey->flow_pps_rate = pps;
+ ukey->flow_packets = ukey->flow_backlog_packets + f->stats.n_packets;
+ ukey->flow_time = udpif->dpif->current_ms;
+}
+
+static long long int
+udpif_update_used(struct udpif *udpif, struct udpif_key *ukey,
+ struct dpif_flow_stats *stats)
+ OVS_REQUIRES(ukey->mutex)
+{
+ if (!udpif->dump->terse) {
+ return ukey->created;
+ }
+
+ if (stats->n_packets > ukey->stats.n_packets) {
+ stats->used = udpif->dpif->current_ms;
+ } else if (ukey->stats.used) {
+ stats->used = ukey->stats.used;
+ } else {
+ stats->used = ukey->created;
+ }
+ return stats->used;
+}
+
static void
revalidate(struct revalidator *revalidator)
{
struct udpif *udpif = revalidator->udpif;
struct dpif_flow_dump_thread *dump_thread;
uint64_t dump_seq, reval_seq;
+ bool kill_warn_print = true;
unsigned int flow_limit;
dump_seq = seq_read(udpif->dump_seq);
long long int max_idle;
long long int now;
+ size_t kill_all_limit;
size_t n_dp_flows;
bool kill_them_all;
* datapath flows, so we will recover before all the flows are
* gone.) */
n_dp_flows = udpif_get_n_flows(udpif);
- kill_them_all = n_dp_flows > flow_limit * 2;
+ if (n_dp_flows >= flow_limit) {
+ COVERAGE_INC(upcall_flow_limit_hit);
+ }
+
+ kill_them_all = false;
+ kill_all_limit = flow_limit * 2;
+ if (OVS_UNLIKELY(n_dp_flows > kill_all_limit)) {
+ static struct vlog_rate_limit rlem = VLOG_RATE_LIMIT_INIT(1, 1);
+
+ kill_them_all = true;
+ COVERAGE_INC(upcall_flow_limit_kill);
+ if (kill_warn_print) {
+ kill_warn_print = false;
+ VLOG_WARN_RL(&rlem,
+ "Number of datapath flows (%"PRIuSIZE") twice as high as "
+ "current dynamic flow limit (%"PRIuSIZE"). "
+ "Starting to delete flows unconditionally "
+ "as an emergency measure.", n_dp_flows, kill_all_limit);
+ }
+ }
+
max_idle = n_dp_flows > flow_limit ? 100 : ofproto_max_idle;
+ udpif->dpif->current_ms = time_msec();
for (f = flows; f < &flows[n_dumped]; f++) {
long long int used = f->stats.used;
struct recirc_refs recircs = RECIRC_REFS_EMPTY_INITIALIZER;
+ struct dpif_flow_stats stats = f->stats;
enum reval_result result;
struct udpif_key *ukey;
bool already_dumped;
}
if (!used) {
- used = ukey->created;
+ used = udpif_update_used(udpif, ukey, &stats);
}
if (kill_them_all || (used && used < now - max_idle)) {
result = UKEY_DELETE;
} else {
- result = revalidate_ukey(udpif, ukey, &f->stats, &odp_actions,
- reval_seq, &recircs);
+ result = revalidate_ukey(udpif, ukey, &stats, &odp_actions,
+ reval_seq, &recircs,
+ f->attrs.offloaded);
}
ukey->dump_seq = dump_seq;
+ if (netdev_is_offload_rebalance_policy_enabled() &&
+ result != UKEY_DELETE) {
+ udpif_update_flow_pps(udpif, ukey, f);
+ }
+
if (result != UKEY_KEEP) {
/* Takes ownership of 'recircs'. */
reval_op_init(&ops[n_ops++], result, udpif, ukey, &recircs,
COVERAGE_INC(revalidate_missed_dp_flow);
memset(&stats, 0, sizeof stats);
result = revalidate_ukey(udpif, ukey, &stats, &odp_actions,
- reval_seq, &recircs);
+ reval_seq, &recircs, false);
}
if (result != UKEY_KEEP) {
/* Clears 'recircs' if filled by revalidate_ukey(). */
const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
{
struct ds ds = DS_EMPTY_INITIALIZER;
+ uint64_t n_offloaded_flows;
struct udpif *udpif;
LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
ds_put_format(&ds, " flows : (current %lu)"
" (avg %u) (max %u) (limit %u)\n", udpif_get_n_flows(udpif),
udpif->avg_n_flows, udpif->max_n_flows, flow_limit);
+ if (!dpif_get_n_offloaded_flows(udpif->dpif, &n_offloaded_flows)) {
+ ds_put_format(&ds, " offloaded flows : %"PRIu64"\n",
+ n_offloaded_flows);
+ }
ds_put_format(&ds, " dump duration : %lldms\n", udpif->dump_duration);
ds_put_format(&ds, " ufid enabled : ");
if (ufid_enabled) {
}
unixctl_command_reply(conn, "");
}
+
+/* Flows are sorted in the following order:
+ * netdev, flow state (offloaded/kernel path), flow_pps_rate.
+ */
+static int
+flow_compare_rebalance(const void *elem1, const void *elem2)
+{
+ const struct udpif_key *f1 = *(struct udpif_key **)elem1;
+ const struct udpif_key *f2 = *(struct udpif_key **)elem2;
+ int64_t diff;
+
+ if (f1->in_netdev < f2->in_netdev) {
+ return -1;
+ } else if (f1->in_netdev > f2->in_netdev) {
+ return 1;
+ }
+
+ if (f1->offloaded != f2->offloaded) {
+ return f2->offloaded - f1->offloaded;
+ }
+
+ diff = (f1->offloaded == true) ?
+ f1->flow_pps_rate - f2->flow_pps_rate :
+ f2->flow_pps_rate - f1->flow_pps_rate;
+
+ return (diff < 0) ? -1 : 1;
+}
+
+/* Insert flows from pending array during rebalancing */
+static int
+rebalance_insert_pending(struct udpif *udpif, struct udpif_key **pending_flows,
+ int pending_count, int insert_count,
+ uint64_t rate_threshold)
+{
+ int count = 0;
+
+ for (int i = 0; i < pending_count; i++) {
+ struct udpif_key *flow = pending_flows[i];
+ int err;
+
+ /* Stop offloading pending flows if the insert count is
+ * reached and the flow rate is less than the threshold
+ */
+ if (count >= insert_count && flow->flow_pps_rate < rate_threshold) {
+ break;
+ }
+
+ /* Offload the flow to netdev */
+ err = udpif_flow_program(udpif, flow, DPIF_OFFLOAD_ALWAYS);
+
+ if (err == ENOSPC) {
+ /* Stop if we are out of resources */
+ break;
+ }
+
+ if (err) {
+ continue;
+ }
+
+ /* Offload succeeded; delete it from the kernel datapath */
+ udpif_flow_unprogram(udpif, flow, DPIF_OFFLOAD_NEVER);
+
+ /* Change the state of the flow, adjust dpif counters */
+ flow->offloaded = true;
+
+ udpif_set_ukey_backlog_packets(flow);
+ count++;
+ }
+
+ return count;
+}
+
+/* Remove flows from offloaded array during rebalancing */
+static void
+rebalance_remove_offloaded(struct udpif *udpif,
+ struct udpif_key **offloaded_flows,
+ int offload_count)
+{
+ for (int i = 0; i < offload_count; i++) {
+ struct udpif_key *flow = offloaded_flows[i];
+ int err;
+
+ /* Install the flow into kernel path first */
+ err = udpif_flow_program(udpif, flow, DPIF_OFFLOAD_NEVER);
+ if (err) {
+ continue;
+ }
+
+ /* Success; now remove offloaded flow from netdev */
+ err = udpif_flow_unprogram(udpif, flow, DPIF_OFFLOAD_ALWAYS);
+ if (err) {
+ udpif_flow_unprogram(udpif, flow, DPIF_OFFLOAD_NEVER);
+ continue;
+ }
+ udpif_set_ukey_backlog_packets(flow);
+ flow->offloaded = false;
+ }
+}
+
+/*
+ * Rebalance offloaded flows on a netdev that's in OOR state.
+ *
+ * The rebalancing is done in two phases. In the first phase, we check if
+ * the pending flows can be offloaded (if some resources became available
+ * in the meantime) by trying to offload each pending flow. If all pending
+ * flows get successfully offloaded, the OOR state is cleared on the netdev
+ * and there's nothing to rebalance.
+ *
+ * If some of the pending flows could not be offloaded, i.e, we still see
+ * the OOR error, then we move to the second phase of rebalancing. In this
+ * phase, the rebalancer compares pps-rate of an offloaded flow with the
+ * least pps-rate with that of a pending flow with the highest pps-rate from
+ * their respective sorted arrays. If pps-rate of the offloaded flow is less
+ * than the pps-rate of the pending flow, then it deletes the offloaded flow
+ * from the HW/netdev and adds it to kernel datapath and then offloads pending
+ * to HW/netdev. This process is repeated for every pair of offloaded and
+ * pending flows in the ordered list. The process stops when we encounter an
+ * offloaded flow that has a higher pps-rate than the corresponding pending
+ * flow. The entire rebalancing process is repeated in the next iteration.
+ */
+static bool
+rebalance_device(struct udpif *udpif, struct udpif_key **offloaded_flows,
+ int offload_count, struct udpif_key **pending_flows,
+ int pending_count)
+{
+
+ /* Phase 1 */
+ int num_inserted = rebalance_insert_pending(udpif, pending_flows,
+ pending_count, pending_count,
+ 0);
+ if (num_inserted) {
+ VLOG_DBG("Offload rebalance: Phase1: inserted %d pending flows",
+ num_inserted);
+ }
+
+ /* Adjust pending array */
+ pending_flows = &pending_flows[num_inserted];
+ pending_count -= num_inserted;
+
+ if (!pending_count) {
+ /*
+ * Successfully offloaded all pending flows. The device
+ * is no longer in OOR state; done rebalancing this device.
+ */
+ return false;
+ }
+
+ /*
+ * Phase 2; determine how many offloaded flows to churn.
+ */
+#define OFFL_REBAL_MAX_CHURN 1024
+ int churn_count = 0;
+ while (churn_count < OFFL_REBAL_MAX_CHURN && churn_count < offload_count
+ && churn_count < pending_count) {
+ if (pending_flows[churn_count]->flow_pps_rate <=
+ offloaded_flows[churn_count]->flow_pps_rate)
+ break;
+ churn_count++;
+ }
+
+ if (churn_count) {
+ VLOG_DBG("Offload rebalance: Phase2: removing %d offloaded flows",
+ churn_count);
+ }
+
+ /* Bail early if nothing to churn */
+ if (!churn_count) {
+ return true;
+ }
+
+ /* Remove offloaded flows */
+ rebalance_remove_offloaded(udpif, offloaded_flows, churn_count);
+
+ /* Adjust offloaded array */
+ offloaded_flows = &offloaded_flows[churn_count];
+ offload_count -= churn_count;
+
+ /* Replace offloaded flows with pending flows */
+ num_inserted = rebalance_insert_pending(udpif, pending_flows,
+ pending_count, churn_count,
+ offload_count ?
+ offloaded_flows[0]->flow_pps_rate :
+ 0);
+ if (num_inserted) {
+ VLOG_DBG("Offload rebalance: Phase2: inserted %d pending flows",
+ num_inserted);
+ }
+
+ return true;
+}
+
+static struct udpif_key **
+udpif_add_oor_flows(struct udpif_key **sort_flows, size_t *total_flow_count,
+ size_t *alloc_flow_count, struct udpif_key *ukey)
+{
+ if (*total_flow_count >= *alloc_flow_count) {
+ sort_flows = x2nrealloc(sort_flows, alloc_flow_count, sizeof ukey);
+ }
+ sort_flows[(*total_flow_count)++] = ukey;
+ return sort_flows;
+}
+
+/*
+ * Build sort_flows[] initially with flows that
+ * reference an 'OOR' netdev as their input port.
+ */
+static struct udpif_key **
+udpif_build_oor_flows(struct udpif_key **sort_flows, size_t *total_flow_count,
+ size_t *alloc_flow_count, struct udpif_key *ukey,
+ int *oor_netdev_count)
+{
+ struct netdev *netdev;
+ int count;
+
+ /* Input netdev must be available for the flow */
+ netdev = ukey->in_netdev;
+ if (!netdev) {
+ return sort_flows;
+ }
+
+ /* Is the in-netdev for this flow in OOR state ? */
+ if (!netdev_get_hw_info(netdev, HW_INFO_TYPE_OOR)) {
+ ukey_netdev_unref(ukey);
+ return sort_flows;
+ }
+
+ /* Add the flow to sort_flows[] */
+ sort_flows = udpif_add_oor_flows(sort_flows, total_flow_count,
+ alloc_flow_count, ukey);
+ if (ukey->offloaded) {
+ count = netdev_get_hw_info(netdev, HW_INFO_TYPE_OFFL_COUNT);
+ ovs_assert(count >= 0);
+ if (count++ == 0) {
+ (*oor_netdev_count)++;
+ }
+ netdev_set_hw_info(netdev, HW_INFO_TYPE_OFFL_COUNT, count);
+ } else {
+ count = netdev_get_hw_info(netdev, HW_INFO_TYPE_PEND_COUNT);
+ ovs_assert(count >= 0);
+ netdev_set_hw_info(netdev, HW_INFO_TYPE_PEND_COUNT, ++count);
+ }
+
+ return sort_flows;
+}
+
+/*
+ * Rebalance offloaded flows on HW netdevs that are in OOR state.
+ */
+static void
+udpif_flow_rebalance(struct udpif *udpif)
+{
+ struct udpif_key **sort_flows = NULL;
+ size_t alloc_flow_count = 0;
+ size_t total_flow_count = 0;
+ int oor_netdev_count = 0;
+ int offload_index = 0;
+ int pending_index;
+
+ /* Collect flows (offloaded and pending) that reference OOR netdevs */
+ for (size_t i = 0; i < N_UMAPS; i++) {
+ struct udpif_key *ukey;
+ struct umap *umap = &udpif->ukeys[i];
+
+ CMAP_FOR_EACH (ukey, cmap_node, &umap->cmap) {
+ ukey_to_flow_netdev(udpif, ukey);
+ sort_flows = udpif_build_oor_flows(sort_flows, &total_flow_count,
+ &alloc_flow_count, ukey,
+ &oor_netdev_count);
+ }
+ }
+
+ /* Sort flows by OOR netdevs, state (offloaded/pending) and pps-rate */
+ qsort(sort_flows, total_flow_count, sizeof(struct udpif_key *),
+ flow_compare_rebalance);
+
+ /*
+ * We now have flows referencing OOR netdevs, that are sorted. We also
+ * have a count of offloaded and pending flows on each of the netdevs
+ * that are in OOR state. Now rebalance each oor-netdev.
+ */
+ while (oor_netdev_count) {
+ struct netdev *netdev;
+ int offload_count;
+ int pending_count;
+ bool oor;
+
+ netdev = sort_flows[offload_index]->in_netdev;
+ ovs_assert(netdev_get_hw_info(netdev, HW_INFO_TYPE_OOR) == true);
+ VLOG_DBG("Offload rebalance: netdev: %s is OOR", netdev->name);
+
+ offload_count = netdev_get_hw_info(netdev, HW_INFO_TYPE_OFFL_COUNT);
+ pending_count = netdev_get_hw_info(netdev, HW_INFO_TYPE_PEND_COUNT);
+ pending_index = offload_index + offload_count;
+
+ oor = rebalance_device(udpif,
+ &sort_flows[offload_index], offload_count,
+ &sort_flows[pending_index], pending_count);
+ netdev_set_hw_info(netdev, HW_INFO_TYPE_OOR, oor);
+
+ offload_index = pending_index + pending_count;
+ netdev_set_hw_info(netdev, HW_INFO_TYPE_OFFL_COUNT, 0);
+ netdev_set_hw_info(netdev, HW_INFO_TYPE_PEND_COUNT, 0);
+ oor_netdev_count--;
+ }
+
+ for (int i = 0; i < total_flow_count; i++) {
+ struct udpif_key *ukey = sort_flows[i];
+ ukey_netdev_unref(ukey);
+ }
+ free(sort_flows);
+}
+
+static int
+udpif_flow_program(struct udpif *udpif, struct udpif_key *ukey,
+ enum dpif_offload_type offload_type)
+{
+ struct dpif_op *opsp;
+ struct ukey_op uop;
+
+ opsp = &uop.dop;
+ put_op_init(&uop, ukey, DPIF_FP_CREATE);
+ dpif_operate(udpif->dpif, &opsp, 1, offload_type);
+
+ return opsp->error;
+}
+
+static int
+udpif_flow_unprogram(struct udpif *udpif, struct udpif_key *ukey,
+ enum dpif_offload_type offload_type)
+{
+ struct dpif_op *opsp;
+ struct ukey_op uop;
+
+ opsp = &uop.dop;
+ delete_op_init(udpif, &uop, ukey);
+ dpif_operate(udpif->dpif, &opsp, 1, offload_type);
+
+ return opsp->error;
+}