/*
- * Copyright (c) 2008-2017 Nicira, Inc.
+ * Copyright (c) 2008-2018 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "fat-rwlock.h"
#include "flow.h"
#include "netdev-linux.h"
+#include "netdev-offload.h"
#include "netdev-provider.h"
#include "netdev-vport.h"
#include "netdev.h"
#include "odp-util.h"
#include "openvswitch/dynamic-string.h"
#include "openvswitch/flow.h"
+#include "openvswitch/hmap.h"
#include "openvswitch/match.h"
#include "openvswitch/ofpbuf.h"
#include "openvswitch/poll-loop.h"
#include "openvswitch/shash.h"
+#include "openvswitch/thread.h"
#include "openvswitch/vlog.h"
#include "packets.h"
#include "random.h"
#define FLOW_DUMP_MAX_BATCH 50
#define OPERATE_MAX_OPS 50
+#ifndef EPOLLEXCLUSIVE
+#define EPOLLEXCLUSIVE (1u << 28)
+#endif
+
struct dpif_netlink_dp {
/* Generic Netlink header. */
uint8_t cmd;
static int dpif_netlink_dp_get(const struct dpif *,
struct dpif_netlink_dp *reply,
struct ofpbuf **bufp);
+static int
+dpif_netlink_set_features(struct dpif *dpif_, uint32_t new_features);
struct dpif_netlink_flow {
/* Generic Netlink header. */
struct ofpbuf **bufp);
static void dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *,
struct dpif_flow_stats *);
-static void dpif_netlink_flow_to_dpif_flow(struct dpif *, struct dpif_flow *,
+static void dpif_netlink_flow_to_dpif_flow(struct dpif_flow *,
const struct dpif_netlink_flow *);
/* One of the dpif channels between the kernel and userspace. */
#endif
struct dpif_handler {
- struct dpif_channel *channels;/* Array of channels for each handler. */
struct epoll_event *epoll_events;
int epoll_fd; /* epoll fd that includes channel socks. */
int n_events; /* Num events returned by epoll_wait(). */
struct dpif_netlink {
struct dpif dpif;
int dp_ifindex;
+ uint32_t user_features;
/* Upcall messages. */
struct fat_rwlock upcall_lock;
struct dpif_handler *handlers;
uint32_t n_handlers; /* Num of upcall handlers. */
+ struct dpif_channel *channels; /* Array of channels for each port. */
int uc_array_size; /* Size of 'handler->channels' and */
/* 'handler->epoll_events'. */
static int ovs_vport_family;
static int ovs_flow_family;
static int ovs_packet_family;
+static int ovs_meter_family;
+static int ovs_ct_limit_family;
/* Generic Netlink multicast groups for OVS.
*
static int dpif_netlink_init(void);
static int open_dpif(const struct dpif_netlink_dp *, struct dpif **);
static uint32_t dpif_netlink_port_get_pid(const struct dpif *,
- odp_port_t port_no, uint32_t hash);
+ odp_port_t port_no);
static void dpif_netlink_handler_uninit(struct dpif_handler *handler);
static int dpif_netlink_refresh_channels(struct dpif_netlink *,
uint32_t n_handlers);
odp_port_t port_no, const char *port_name,
struct dpif_port *dpif_port);
+static int
+create_nl_sock(struct dpif_netlink *dpif OVS_UNUSED, struct nl_sock **sockp)
+ OVS_REQ_WRLOCK(dpif->upcall_lock)
+{
+#ifndef _WIN32
+ return nl_sock_create(NETLINK_GENERIC, sockp);
+#else
+ /* Pick netlink sockets to use in a round-robin fashion from each
+ * handler's pool of sockets. */
+ struct dpif_handler *handler = &dpif->handlers[0];
+ struct dpif_windows_vport_sock *sock_pool = handler->vport_sock_pool;
+ size_t index = handler->last_used_pool_idx;
+
+ /* A pool of sockets is allocated when the handler is initialized. */
+ if (sock_pool == NULL) {
+ *sockp = NULL;
+ return EINVAL;
+ }
+
+ ovs_assert(index < VPORT_SOCK_POOL_SIZE);
+ *sockp = sock_pool[index].nl_sock;
+ ovs_assert(*sockp);
+ index = (index == VPORT_SOCK_POOL_SIZE - 1) ? 0 : index + 1;
+ handler->last_used_pool_idx = index;
+ return 0;
+#endif
+}
+
+static void
+close_nl_sock(struct nl_sock *sock)
+{
+#ifndef _WIN32
+ nl_sock_destroy(sock);
+#endif
+}
+
static struct dpif_netlink *
dpif_netlink_cast(const struct dpif *dpif)
{
/* Create or look up datapath. */
dpif_netlink_dp_init(&dp_request);
+ upcall_pid = 0;
+ dp_request.upcall_pid = &upcall_pid;
+ dp_request.name = name;
+
if (create) {
dp_request.cmd = OVS_DP_CMD_NEW;
- upcall_pid = 0;
- dp_request.upcall_pid = &upcall_pid;
} else {
+ dp_request.cmd = OVS_DP_CMD_GET;
+
+ error = dpif_netlink_dp_transact(&dp_request, &dp, &buf);
+ if (error) {
+ return error;
+ }
+ dp_request.user_features = dp.user_features;
+ ofpbuf_delete(buf);
+
/* Use OVS_DP_CMD_SET to report user features */
dp_request.cmd = OVS_DP_CMD_SET;
}
- dp_request.name = name;
+
dp_request.user_features |= OVS_DP_F_UNALIGNED;
dp_request.user_features |= OVS_DP_F_VPORT_PIDS;
error = dpif_netlink_dp_transact(&dp_request, &dp, &buf);
}
error = open_dpif(&dp, dpifp);
+ dpif_netlink_set_features(*dpifp, OVS_DP_F_TC_RECIRC_SHARING);
ofpbuf_delete(buf);
+
return error;
}
dp->dp_ifindex, dp->dp_ifindex);
dpif->dp_ifindex = dp->dp_ifindex;
+ dpif->user_features = dp->user_features;
*dpifp = &dpif->dpif;
return 0;
}
-/* Destroys the netlink sockets pointed by the elements in 'socksp'
- * and frees the 'socksp'. */
-static void
-vport_del_socksp__(struct nl_sock **socksp, uint32_t n_socks)
-{
- size_t i;
-
- for (i = 0; i < n_socks; i++) {
- nl_sock_destroy(socksp[i]);
- }
-
- free(socksp);
-}
-
-/* Creates an array of netlink sockets. Returns an array of the
- * corresponding pointers. Records the error in 'error'. */
-static struct nl_sock **
-vport_create_socksp__(uint32_t n_socks, int *error)
-{
- struct nl_sock **socksp = xzalloc(n_socks * sizeof *socksp);
- size_t i;
-
- for (i = 0; i < n_socks; i++) {
- *error = nl_sock_create(NETLINK_GENERIC, &socksp[i]);
- if (*error) {
- goto error;
- }
- }
-
- return socksp;
-
-error:
- vport_del_socksp__(socksp, n_socks);
-
- return NULL;
-}
-
#ifdef _WIN32
static void
vport_delete_sock_pool(struct dpif_handler *handler)
vport_delete_sock_pool(handler);
return error;
}
-
-/* Returns an array pointers to netlink sockets. The sockets are picked from a
- * pool. Records the error in 'error'. */
-static struct nl_sock **
-vport_create_socksp_windows(struct dpif_netlink *dpif, int *error)
- OVS_REQ_WRLOCK(dpif->upcall_lock)
-{
- uint32_t n_socks = dpif->n_handlers;
- struct nl_sock **socksp;
- size_t i;
-
- ovs_assert(n_socks <= 1);
- socksp = xzalloc(n_socks * sizeof *socksp);
-
- /* Pick netlink sockets to use in a round-robin fashion from each
- * handler's pool of sockets. */
- for (i = 0; i < n_socks; i++) {
- struct dpif_handler *handler = &dpif->handlers[i];
- struct dpif_windows_vport_sock *sock_pool = handler->vport_sock_pool;
- size_t index = handler->last_used_pool_idx;
-
- /* A pool of sockets is allocated when the handler is initialized. */
- if (sock_pool == NULL) {
- free(socksp);
- *error = EINVAL;
- return NULL;
- }
-
- ovs_assert(index < VPORT_SOCK_POOL_SIZE);
- socksp[i] = sock_pool[index].nl_sock;
- socksp[i] = sock_pool[index].nl_sock;
- ovs_assert(socksp[i]);
- index = (index == VPORT_SOCK_POOL_SIZE - 1) ? 0 : index + 1;
- handler->last_used_pool_idx = index;
- }
-
- return socksp;
-}
-
-static void
-vport_del_socksp_windows(struct dpif_netlink *dpif, struct nl_sock **socksp)
-{
- free(socksp);
-}
#endif /* _WIN32 */
-static struct nl_sock **
-vport_create_socksp(struct dpif_netlink *dpif, int *error)
-{
-#ifdef _WIN32
- return vport_create_socksp_windows(dpif, error);
-#else
- return vport_create_socksp__(dpif->n_handlers, error);
-#endif
-}
-
-static void
-vport_del_socksp(struct dpif_netlink *dpif, struct nl_sock **socksp)
-{
-#ifdef _WIN32
- vport_del_socksp_windows(dpif, socksp);
-#else
- vport_del_socksp__(socksp, dpif->n_handlers);
-#endif
-}
-
-/* Given the array of pointers to netlink sockets 'socksp', returns
- * the array of corresponding pids. If the 'socksp' is NULL, returns
- * a single-element array of value 0. */
-static uint32_t *
-vport_socksp_to_pids(struct nl_sock **socksp, uint32_t n_socks)
-{
- uint32_t *pids;
-
- if (!socksp) {
- pids = xzalloc(sizeof *pids);
- } else {
- size_t i;
-
- pids = xzalloc(n_socks * sizeof *pids);
- for (i = 0; i < n_socks; i++) {
- pids[i] = nl_sock_pid(socksp[i]);
- }
- }
-
- return pids;
-}
-
-/* Given the port number 'port_idx', extracts the pids of netlink sockets
- * associated to the port and assigns it to 'upcall_pids'. */
+/* Given the port number 'port_idx', extracts the pid of netlink socket
+ * associated to the port and assigns it to 'upcall_pid'. */
static bool
-vport_get_pids(struct dpif_netlink *dpif, uint32_t port_idx,
- uint32_t **upcall_pids)
+vport_get_pid(struct dpif_netlink *dpif, uint32_t port_idx,
+ uint32_t *upcall_pid)
{
- uint32_t *pids;
- size_t i;
-
/* Since the nl_sock can only be assigned in either all
- * or none "dpif->handlers" channels, the following check
+ * or none "dpif" channels, the following check
* would suffice. */
- if (!dpif->handlers[0].channels[port_idx].sock) {
+ if (!dpif->channels[port_idx].sock) {
return false;
}
ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
- pids = xzalloc(dpif->n_handlers * sizeof *pids);
-
- for (i = 0; i < dpif->n_handlers; i++) {
- pids[i] = nl_sock_pid(dpif->handlers[i].channels[port_idx].sock);
- }
-
- *upcall_pids = pids;
+ *upcall_pid = nl_sock_pid(dpif->channels[port_idx].sock);
return true;
}
static int
-vport_add_channels(struct dpif_netlink *dpif, odp_port_t port_no,
- struct nl_sock **socksp)
+vport_add_channel(struct dpif_netlink *dpif, odp_port_t port_no,
+ struct nl_sock *sock)
{
struct epoll_event event;
uint32_t port_idx = odp_to_u32(port_no);
- size_t i, j;
+ size_t i;
int error;
if (dpif->handlers == NULL) {
+ close_nl_sock(sock);
return 0;
}
/* We assume that the datapath densely chooses port numbers, which can
* therefore be used as an index into 'channels' and 'epoll_events' of
- * 'dpif->handler'. */
+ * 'dpif'. */
if (port_idx >= dpif->uc_array_size) {
uint32_t new_size = port_idx + 1;
return EFBIG;
}
- for (i = 0; i < dpif->n_handlers; i++) {
- struct dpif_handler *handler = &dpif->handlers[i];
+ dpif->channels = xrealloc(dpif->channels,
+ new_size * sizeof *dpif->channels);
- handler->channels = xrealloc(handler->channels,
- new_size * sizeof *handler->channels);
+ for (i = dpif->uc_array_size; i < new_size; i++) {
+ dpif->channels[i].sock = NULL;
+ }
- for (j = dpif->uc_array_size; j < new_size; j++) {
- handler->channels[j].sock = NULL;
- }
+ for (i = 0; i < dpif->n_handlers; i++) {
+ struct dpif_handler *handler = &dpif->handlers[i];
handler->epoll_events = xrealloc(handler->epoll_events,
new_size * sizeof *handler->epoll_events);
}
memset(&event, 0, sizeof event);
- event.events = EPOLLIN;
+ event.events = EPOLLIN | EPOLLEXCLUSIVE;
event.data.u32 = port_idx;
for (i = 0; i < dpif->n_handlers; i++) {
struct dpif_handler *handler = &dpif->handlers[i];
#ifndef _WIN32
- if (epoll_ctl(handler->epoll_fd, EPOLL_CTL_ADD, nl_sock_fd(socksp[i]),
+ if (epoll_ctl(handler->epoll_fd, EPOLL_CTL_ADD, nl_sock_fd(sock),
&event) < 0) {
error = errno;
goto error;
}
#endif
- dpif->handlers[i].channels[port_idx].sock = socksp[i];
- dpif->handlers[i].channels[port_idx].last_poll = LLONG_MIN;
}
+ dpif->channels[port_idx].sock = sock;
+ dpif->channels[port_idx].last_poll = LLONG_MIN;
return 0;
error:
- for (j = 0; j < i; j++) {
#ifndef _WIN32
- epoll_ctl(dpif->handlers[j].epoll_fd, EPOLL_CTL_DEL,
- nl_sock_fd(socksp[j]), NULL);
-#endif
- dpif->handlers[j].channels[port_idx].sock = NULL;
+ while (i--) {
+ epoll_ctl(dpif->handlers[i].epoll_fd, EPOLL_CTL_DEL,
+ nl_sock_fd(sock), NULL);
}
+#endif
+ dpif->channels[port_idx].sock = NULL;
return error;
}
uint32_t port_idx = odp_to_u32(port_no);
size_t i;
- if (!dpif->handlers || port_idx >= dpif->uc_array_size) {
- return;
- }
-
- /* Since the sock can only be assigned in either all or none
- * of "dpif->handlers" channels, the following check would
- * suffice. */
- if (!dpif->handlers[0].channels[port_idx].sock) {
+ if (!dpif->handlers || port_idx >= dpif->uc_array_size
+ || !dpif->channels[port_idx].sock) {
return;
}
struct dpif_handler *handler = &dpif->handlers[i];
#ifndef _WIN32
epoll_ctl(handler->epoll_fd, EPOLL_CTL_DEL,
- nl_sock_fd(handler->channels[port_idx].sock), NULL);
- nl_sock_destroy(handler->channels[port_idx].sock);
+ nl_sock_fd(dpif->channels[port_idx].sock), NULL);
#endif
- handler->channels[port_idx].sock = NULL;
handler->event_offset = handler->n_events = 0;
}
+#ifndef _WIN32
+ nl_sock_destroy(dpif->channels[port_idx].sock);
+#endif
+ dpif->channels[port_idx].sock = NULL;
}
static void
struct dpif_netlink_vport vport_request;
uint32_t upcall_pids = 0;
- /* Since the sock can only be assigned in either all or none
- * of "dpif->handlers" channels, the following check would
- * suffice. */
- if (!dpif->handlers[0].channels[i].sock) {
+ if (!dpif->channels[i].sock) {
continue;
}
dpif_netlink_handler_uninit(handler);
free(handler->epoll_events);
- free(handler->channels);
}
-
+ free(dpif->channels);
free(dpif->handlers);
dpif->handlers = NULL;
+ dpif->channels = NULL;
dpif->n_handlers = 0;
dpif->uc_array_size = 0;
}
return error;
}
+static int
+dpif_netlink_set_features(struct dpif *dpif_, uint32_t new_features)
+{
+ struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
+ struct dpif_netlink_dp request, reply;
+ struct ofpbuf *bufp;
+ int error;
+
+ dpif_netlink_dp_init(&request);
+ request.cmd = OVS_DP_CMD_SET;
+ request.dp_ifindex = dpif->dp_ifindex;
+ request.user_features = dpif->user_features | new_features;
+
+ error = dpif_netlink_dp_transact(&request, &reply, &bufp);
+ if (!error) {
+ dpif->user_features = reply.user_features;
+ ofpbuf_delete(bufp);
+ if (!(dpif->user_features & new_features)) {
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return error;
+}
+
static const char *
get_vport_type(const struct dpif_netlink_vport *vport)
{
{
struct dpif_netlink_vport request, reply;
struct ofpbuf *buf;
- struct nl_sock **socksp = NULL;
- uint32_t *upcall_pids;
+ struct nl_sock *sock = NULL;
+ uint32_t upcall_pids = 0;
int error = 0;
if (dpif->handlers) {
- socksp = vport_create_socksp(dpif, &error);
- if (!socksp) {
+ error = create_nl_sock(dpif, &sock);
+ if (error) {
return error;
}
}
request.name = name;
request.port_no = *port_nop;
- upcall_pids = vport_socksp_to_pids(socksp, dpif->n_handlers);
- request.n_upcall_pids = socksp ? dpif->n_handlers : 1;
- request.upcall_pids = upcall_pids;
+ if (sock) {
+ upcall_pids = nl_sock_pid(sock);
+ }
+ request.n_upcall_pids = 1;
+ request.upcall_pids = &upcall_pids;
if (options) {
request.options = options->data;
dpif_name(&dpif->dpif), *port_nop);
}
- vport_del_socksp(dpif, socksp);
+ close_nl_sock(sock);
goto exit;
}
- if (socksp) {
- error = vport_add_channels(dpif, *port_nop, socksp);
- if (error) {
- VLOG_INFO("%s: could not add channel for port %s",
- dpif_name(&dpif->dpif), name);
+ error = vport_add_channel(dpif, *port_nop, sock);
+ if (error) {
+ VLOG_INFO("%s: could not add channel for port %s",
+ dpif_name(&dpif->dpif), name);
- /* Delete the port. */
- dpif_netlink_vport_init(&request);
- request.cmd = OVS_VPORT_CMD_DEL;
- request.dp_ifindex = dpif->dp_ifindex;
- request.port_no = *port_nop;
- dpif_netlink_vport_transact(&request, NULL, NULL);
- vport_del_socksp(dpif, socksp);
- goto exit;
- }
+ /* Delete the port. */
+ dpif_netlink_vport_init(&request);
+ request.cmd = OVS_VPORT_CMD_DEL;
+ request.dp_ifindex = dpif->dp_ifindex;
+ request.port_no = *port_nop;
+ dpif_netlink_vport_transact(&request, NULL, NULL);
+ close_nl_sock(sock);
+ goto exit;
}
- free(socksp);
exit:
ofpbuf_delete(buf);
- free(upcall_pids);
return error;
}
static uint32_t
dpif_netlink_port_get_pid__(const struct dpif_netlink *dpif,
- odp_port_t port_no, uint32_t hash)
+ odp_port_t port_no)
OVS_REQ_RDLOCK(dpif->upcall_lock)
{
uint32_t port_idx = odp_to_u32(port_no);
/* The ODPP_NONE "reserved" port number uses the "ovs-system"'s
* channel, since it is not heavily loaded. */
uint32_t idx = port_idx >= dpif->uc_array_size ? 0 : port_idx;
- struct dpif_handler *h = &dpif->handlers[hash % dpif->n_handlers];
/* Needs to check in case the socket pointer is changed in between
* the holding of upcall_lock. A known case happens when the main
* thread deletes the vport while the handler thread is handling
* the upcall from that port. */
- if (h->channels[idx].sock) {
- pid = nl_sock_pid(h->channels[idx].sock);
+ if (dpif->channels[idx].sock) {
+ pid = nl_sock_pid(dpif->channels[idx].sock);
}
}
}
static uint32_t
-dpif_netlink_port_get_pid(const struct dpif *dpif_, odp_port_t port_no,
- uint32_t hash)
+dpif_netlink_port_get_pid(const struct dpif *dpif_, odp_port_t port_no)
{
const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
uint32_t ret;
fat_rwlock_rdlock(&dpif->upcall_lock);
- ret = dpif_netlink_port_get_pid__(dpif, port_no, hash);
+ ret = dpif_netlink_port_get_pid__(dpif, port_no);
fat_rwlock_unlock(&dpif->upcall_lock);
return ret;
static void
dpif_netlink_populate_flow_dump_types(struct dpif_netlink_flow_dump *dump,
- struct dpif_flow_dump_types *types) {
+ struct dpif_flow_dump_types *types)
+{
if (!types) {
dump->types.ovs_flows = true;
dump->types.netdev_flows = true;
}
static void
-dpif_netlink_flow_to_dpif_flow(struct dpif *dpif, struct dpif_flow *dpif_flow,
+dpif_netlink_flow_to_dpif_flow(struct dpif_flow *dpif_flow,
const struct dpif_netlink_flow *datapath_flow)
{
dpif_flow->key = datapath_flow->key;
dpif_flow->ufid = datapath_flow->ufid;
} else {
ovs_assert(datapath_flow->key && datapath_flow->key_len);
- dpif_flow_hash(dpif, datapath_flow->key, datapath_flow->key_len,
- &dpif_flow->ufid);
+ odp_flow_key_hash(datapath_flow->key, datapath_flow->key_len,
+ &dpif_flow->ufid);
}
dpif_netlink_flow_get_stats(datapath_flow, &dpif_flow->stats);
dpif_flow->attrs.offloaded = false;
.mask = &match->wc.masks,
.support = {
.max_vlan_headers = 2,
+ .recirc = true,
+ .ct_state = true,
+ .ct_zone = true,
+ .ct_mark = true,
+ .ct_label = true,
},
};
size_t offset;
if (dump->up.terse || datapath_flow.actions) {
/* Common case: we don't want actions, or the flow includes
* actions. */
- dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
- &datapath_flow);
+ dpif_netlink_flow_to_dpif_flow(&flows[n_flows++], &datapath_flow);
} else {
/* Rare case: the flow does not include actions. Retrieve this
* individual flow again to get the actions. */
/* Save this flow. Then exit, because we only have one buffer to
* handle this case. */
- dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
- &datapath_flow);
+ dpif_netlink_flow_to_dpif_flow(&flows[n_flows++], &datapath_flow);
break;
}
}
if (d_exec->mtu) {
nl_msg_put_u16(buf, OVS_PACKET_ATTR_MRU, d_exec->mtu);
}
+
+ if (d_exec->hash) {
+ nl_msg_put_u64(buf, OVS_PACKET_ATTR_HASH, d_exec->hash);
+ }
}
/* Executes, against 'dpif', up to the first 'n_ops' operations in 'ops'.
op->error = dpif_netlink_flow_from_ofpbuf(&reply, txn->reply);
if (!op->error) {
- dpif_netlink_flow_to_dpif_flow(&dpif->dpif, get->flow,
- &reply);
+ dpif_netlink_flow_to_dpif_flow(get->flow, &reply);
}
}
break;
struct netdev *dev;
struct offload_info info;
ovs_be16 dst_port = 0;
+ uint8_t csum_on = false;
int err;
if (put->flags & DPIF_FP_PROBE) {
return err;
}
- /* When we try to install a dummy flow from a probed feature. */
- if (match.flow.dl_type == htons(0x1234)) {
- return EOPNOTSUPP;
- }
-
in_port = match.flow.in_port.odp_port;
dev = netdev_ports_get(in_port, dpif_class);
if (!dev) {
if (tnl_cfg && tnl_cfg->dst_port != 0) {
dst_port = tnl_cfg->dst_port;
}
+ if (tnl_cfg) {
+ csum_on = tnl_cfg->csum;
+ }
netdev_close(outdev);
}
}
info.dpif_class = dpif_class;
info.tp_dst_port = dst_port;
+ info.tunnel_csum_on = csum_on;
+ info.recirc_id_shared_with_tc = (dpif->user_features
+ & OVS_DP_F_TC_RECIRC_SHARING);
err = netdev_flow_put(dev, &match,
CONST_CAST(struct nlattr *, put->actions),
put->actions_len,
VLOG_DBG("added flow");
} else if (err != EEXIST) {
- VLOG_ERR_RL(&rl, "failed to offload flow: %s", ovs_strerror(err));
+ struct netdev *oor_netdev = NULL;
+ enum vlog_level level;
+ if (err == ENOSPC && netdev_is_offload_rebalance_policy_enabled()) {
+ /*
+ * We need to set OOR on the input netdev (i.e, 'dev') for the
+ * flow. But if the flow has a tunnel attribute (i.e, decap action,
+ * with a virtual device like a VxLAN interface as its in-port),
+ * then lookup and set OOR on the underlying tunnel (real) netdev.
+ */
+ oor_netdev = flow_get_tunnel_netdev(&match.flow.tunnel);
+ if (!oor_netdev) {
+ /* Not a 'tunnel' flow */
+ oor_netdev = dev;
+ }
+ netdev_set_hw_info(oor_netdev, HW_INFO_TYPE_OOR, true);
+ }
+ level = (err == ENOSPC || err == EOPNOTSUPP) ? VLL_DBG : VLL_ERR;
+ VLOG_RL(&rl, level, "failed to offload flow: %s: %s",
+ ovs_strerror(err),
+ (oor_netdev ? oor_netdev->name : dev->name));
}
out:
break;
}
- log_flow_put_message(&dpif->dpif, &this_module, put, 0);
err = parse_flow_put(dpif, put);
+ log_flow_put_message(&dpif->dpif, &this_module, put, 0);
break;
}
case DPIF_OP_FLOW_DEL: {
break;
}
- log_flow_del_message(&dpif->dpif, &this_module, del, 0);
err = netdev_ports_flow_del(dpif->dpif.dpif_class, del->ufid,
del->stats);
+ log_flow_del_message(&dpif->dpif, &this_module, del, 0);
break;
}
case DPIF_OP_FLOW_GET: {
break;
}
- log_flow_get_message(&dpif->dpif, &this_module, get, 0);
err = parse_flow_get(dpif, get);
+ log_flow_get_message(&dpif->dpif, &this_module, get, 0);
break;
}
case DPIF_OP_EXECUTE:
}
static void
-dpif_netlink_operate(struct dpif *dpif_, struct dpif_op **ops, size_t n_ops)
+dpif_netlink_operate(struct dpif *dpif_, struct dpif_op **ops, size_t n_ops,
+ enum dpif_offload_type offload_type)
{
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
struct dpif_op *new_ops[OPERATE_MAX_OPS];
int i = 0;
int err = 0;
- if (netdev_is_flow_api_enabled()) {
+ if (offload_type == DPIF_OFFLOAD_ALWAYS && !netdev_is_flow_api_enabled()) {
+ VLOG_DBG("Invalid offload_type: %d", offload_type);
+ return;
+ }
+
+ if (offload_type != DPIF_OFFLOAD_NEVER && netdev_is_flow_api_enabled()) {
while (n_ops > 0) {
count = 0;
err = try_send_to_netdev(dpif, op);
if (err && err != EEXIST) {
+ if (offload_type == DPIF_OFFLOAD_ALWAYS) {
+ /* We got an error while offloading an op. Since
+ * OFFLOAD_ALWAYS is specified, we stop further
+ * processing and return to the caller without
+ * invoking kernel datapath as fallback. But the
+ * interface requires us to process all n_ops; so
+ * return the same error in the remaining ops too.
+ */
+ op->error = err;
+ n_ops--;
+ while (n_ops > 0) {
+ op = ops[i++];
+ op->error = err;
+ n_ops--;
+ }
+ return;
+ }
new_ops[count++] = op;
} else {
op->error = err;
dpif_netlink_operate_chunks(dpif, new_ops, count);
}
- } else {
+ } else if (offload_type != DPIF_OFFLOAD_ALWAYS) {
dpif_netlink_operate_chunks(dpif, ops, n_ops);
}
}
dpif_netlink_port_dump_start__(dpif, &dump);
while (!dpif_netlink_port_dump_next__(dpif, &dump, &vport, &buf)) {
uint32_t port_no = odp_to_u32(vport.port_no);
- uint32_t *upcall_pids = NULL;
+ uint32_t upcall_pid;
int error;
if (port_no >= dpif->uc_array_size
- || !vport_get_pids(dpif, port_no, &upcall_pids)) {
- struct nl_sock **socksp = vport_create_socksp(dpif, &error);
+ || !vport_get_pid(dpif, port_no, &upcall_pid)) {
+ struct nl_sock *sock;
+ error = create_nl_sock(dpif, &sock);
- if (!socksp) {
+ if (error) {
goto error;
}
- error = vport_add_channels(dpif, vport.port_no, socksp);
+ error = vport_add_channel(dpif, vport.port_no, sock);
if (error) {
VLOG_INFO("%s: could not add channels for port %s",
dpif_name(&dpif->dpif), vport.name);
- vport_del_socksp(dpif, socksp);
+ nl_sock_destroy(sock);
retval = error;
goto error;
}
- upcall_pids = vport_socksp_to_pids(socksp, dpif->n_handlers);
- free(socksp);
+ upcall_pid = nl_sock_pid(sock);
}
/* Configure the vport to deliver misses to 'sock'. */
if (vport.upcall_pids[0] == 0
- || vport.n_upcall_pids != dpif->n_handlers
- || memcmp(upcall_pids, vport.upcall_pids, n_handlers * sizeof
- *upcall_pids)) {
+ || vport.n_upcall_pids != 1
+ || upcall_pid != vport.upcall_pids[0]) {
struct dpif_netlink_vport vport_request;
dpif_netlink_vport_init(&vport_request);
vport_request.cmd = OVS_VPORT_CMD_SET;
vport_request.dp_ifindex = dpif->dp_ifindex;
vport_request.port_no = vport.port_no;
- vport_request.n_upcall_pids = dpif->n_handlers;
- vport_request.upcall_pids = upcall_pids;
+ vport_request.n_upcall_pids = 1;
+ vport_request.upcall_pids = &upcall_pid;
error = dpif_netlink_vport_transact(&vport_request, NULL, NULL);
if (error) {
VLOG_WARN_RL(&error_rl,
if (port_no < keep_channels_nbits) {
bitmap_set1(keep_channels, port_no);
}
- free(upcall_pids);
continue;
error:
- free(upcall_pids);
vport_del_channels(dpif, vport.port_no);
}
nl_dump_done(&dump);
}
static int
-parse_odp_packet(const struct dpif_netlink *dpif, struct ofpbuf *buf,
- struct dpif_upcall *upcall, int *dp_ifindex)
+parse_odp_packet(struct ofpbuf *buf, struct dpif_upcall *upcall,
+ int *dp_ifindex)
{
static const struct nl_policy ovs_packet_policy[] = {
/* Always present. */
[OVS_PACKET_ATTR_USERDATA] = { .type = NL_A_UNSPEC, .optional = true },
[OVS_PACKET_ATTR_EGRESS_TUN_KEY] = { .type = NL_A_NESTED, .optional = true },
[OVS_PACKET_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
- [OVS_PACKET_ATTR_MRU] = { .type = NL_A_U16, .optional = true }
+ [OVS_PACKET_ATTR_MRU] = { .type = NL_A_U16, .optional = true },
+ [OVS_PACKET_ATTR_HASH] = { .type = NL_A_U64, .optional = true }
};
struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
upcall->key = CONST_CAST(struct nlattr *,
nl_attr_get(a[OVS_PACKET_ATTR_KEY]));
upcall->key_len = nl_attr_get_size(a[OVS_PACKET_ATTR_KEY]);
- dpif_flow_hash(&dpif->dpif, upcall->key, upcall->key_len, &upcall->ufid);
+ odp_flow_key_hash(upcall->key, upcall->key_len, &upcall->ufid);
upcall->userdata = a[OVS_PACKET_ATTR_USERDATA];
upcall->out_tun_key = a[OVS_PACKET_ATTR_EGRESS_TUN_KEY];
upcall->actions = a[OVS_PACKET_ATTR_ACTIONS];
upcall->mru = a[OVS_PACKET_ATTR_MRU];
+ upcall->hash = a[OVS_PACKET_ATTR_HASH];
/* Allow overwriting the netlink attribute header without reallocating. */
dp_packet_use_stub(&upcall->packet,
return error;
}
- error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
+ error = parse_odp_packet(buf, upcall, &dp_ifindex);
if (!error && dp_ifindex == dpif->dp_ifindex) {
return 0;
} else if (error) {
while (handler->event_offset < handler->n_events) {
int idx = handler->epoll_events[handler->event_offset].data.u32;
- struct dpif_channel *ch = &dpif->handlers[handler_id].channels[idx];
+ struct dpif_channel *ch = &dpif->channels[idx];
handler->event_offset++;
return error;
}
- error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
+ error = parse_odp_packet(buf, upcall, &dp_ifindex);
if (!error && dp_ifindex == dpif->dp_ifindex) {
return 0;
} else if (error) {
OVS_REQ_WRLOCK(dpif->upcall_lock)
{
if (dpif->handlers) {
- size_t i, j;
+ size_t i;
+ if (!dpif->channels[0].sock) {
+ return;
+ }
for (i = 0; i < dpif->uc_array_size; i++ ) {
- if (!dpif->handlers[0].channels[i].sock) {
- continue;
- }
- for (j = 0; j < dpif->n_handlers; j++) {
- nl_sock_drain(dpif->handlers[j].channels[i].sock);
- }
+ nl_sock_drain(dpif->channels[i].sock);
}
}
}
struct ct_dpif_dump_state *dump_)
{
struct dpif_netlink_ct_dump_state *dump;
- int err;
INIT_CONTAINER(dump, dump_, up);
- err = nl_ct_dump_done(dump->nl_ct_dump);
+ int err = nl_ct_dump_done(dump->nl_ct_dump);
free(dump);
return err;
}
}
}
-\f
-/* Meters */
-static void
-dpif_netlink_meter_get_features(const struct dpif * dpif OVS_UNUSED,
- struct ofputil_meter_features *features)
+static int
+dpif_netlink_ct_set_limits(struct dpif *dpif OVS_UNUSED,
+ const uint32_t *default_limits,
+ const struct ovs_list *zone_limits)
{
- features->max_meters = 0;
- features->band_types = 0;
- features->capabilities = 0;
- features->max_bands = 0;
- features->max_color = 0;
+ struct ovs_zone_limit req_zone_limit;
+
+ if (ovs_ct_limit_family < 0) {
+ return EOPNOTSUPP;
+ }
+
+ struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE);
+ nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family,
+ NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_SET,
+ OVS_CT_LIMIT_VERSION);
+
+ struct ovs_header *ovs_header;
+ ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header);
+ ovs_header->dp_ifindex = 0;
+
+ size_t opt_offset;
+ opt_offset = nl_msg_start_nested(request, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
+ if (default_limits) {
+ req_zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
+ req_zone_limit.limit = *default_limits;
+ nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
+ }
+
+ if (!ovs_list_is_empty(zone_limits)) {
+ struct ct_dpif_zone_limit *zone_limit;
+
+ LIST_FOR_EACH (zone_limit, node, zone_limits) {
+ req_zone_limit.zone_id = zone_limit->zone;
+ req_zone_limit.limit = zone_limit->limit;
+ nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
+ }
+ }
+ nl_msg_end_nested(request, opt_offset);
+
+ int err = nl_transact(NETLINK_GENERIC, request, NULL);
+ ofpbuf_delete(request);
+ return err;
}
static int
-dpif_netlink_meter_set(struct dpif *dpif OVS_UNUSED,
- ofproto_meter_id *meter_id OVS_UNUSED,
- struct ofputil_meter_config *config OVS_UNUSED)
+dpif_netlink_zone_limits_from_ofpbuf(const struct ofpbuf *buf,
+ uint32_t *default_limit,
+ struct ovs_list *zone_limits)
{
- return EFBIG; /* meter_id out of range */
+ static const struct nl_policy ovs_ct_limit_policy[] = {
+ [OVS_CT_LIMIT_ATTR_ZONE_LIMIT] = { .type = NL_A_NESTED,
+ .optional = true },
+ };
+
+ struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
+ struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
+ struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
+ struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
+
+ struct nlattr *attr[ARRAY_SIZE(ovs_ct_limit_policy)];
+
+ if (!nlmsg || !genl || !ovs_header
+ || nlmsg->nlmsg_type != ovs_ct_limit_family
+ || !nl_policy_parse(&b, 0, ovs_ct_limit_policy, attr,
+ ARRAY_SIZE(ovs_ct_limit_policy))) {
+ return EINVAL;
+ }
+
+
+ if (!attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]) {
+ return EINVAL;
+ }
+
+ int rem = NLA_ALIGN(
+ nl_attr_get_size(attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]));
+ const struct ovs_zone_limit *zone_limit =
+ nl_attr_get(attr[OVS_CT_LIMIT_ATTR_ZONE_LIMIT]);
+
+ while (rem >= sizeof *zone_limit) {
+ if (zone_limit->zone_id == OVS_ZONE_LIMIT_DEFAULT_ZONE) {
+ *default_limit = zone_limit->limit;
+ } else if (zone_limit->zone_id < OVS_ZONE_LIMIT_DEFAULT_ZONE ||
+ zone_limit->zone_id > UINT16_MAX) {
+ } else {
+ ct_dpif_push_zone_limit(zone_limits, zone_limit->zone_id,
+ zone_limit->limit, zone_limit->count);
+ }
+ rem -= NLA_ALIGN(sizeof *zone_limit);
+ zone_limit = ALIGNED_CAST(struct ovs_zone_limit *,
+ (unsigned char *) zone_limit + NLA_ALIGN(sizeof *zone_limit));
+ }
+ return 0;
}
static int
-dpif_netlink_meter_get(const struct dpif *dpif OVS_UNUSED,
- ofproto_meter_id meter_id OVS_UNUSED,
- struct ofputil_meter_stats *stats OVS_UNUSED,
- uint16_t n_bands OVS_UNUSED)
+dpif_netlink_ct_get_limits(struct dpif *dpif OVS_UNUSED,
+ uint32_t *default_limit,
+ const struct ovs_list *zone_limits_request,
+ struct ovs_list *zone_limits_reply)
{
- return EFBIG; /* meter_id out of range */
+ if (ovs_ct_limit_family < 0) {
+ return EOPNOTSUPP;
+ }
+
+ struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE);
+ nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family,
+ NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_GET,
+ OVS_CT_LIMIT_VERSION);
+
+ struct ovs_header *ovs_header;
+ ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header);
+ ovs_header->dp_ifindex = 0;
+
+ if (!ovs_list_is_empty(zone_limits_request)) {
+ size_t opt_offset = nl_msg_start_nested(request,
+ OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
+
+ struct ovs_zone_limit req_zone_limit;
+ req_zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
+ nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
+
+ struct ct_dpif_zone_limit *zone_limit;
+ LIST_FOR_EACH (zone_limit, node, zone_limits_request) {
+ req_zone_limit.zone_id = zone_limit->zone;
+ nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
+ }
+
+ nl_msg_end_nested(request, opt_offset);
+ }
+
+ struct ofpbuf *reply;
+ int err = nl_transact(NETLINK_GENERIC, request, &reply);
+ if (err) {
+ goto out;
+ }
+
+ err = dpif_netlink_zone_limits_from_ofpbuf(reply, default_limit,
+ zone_limits_reply);
+
+out:
+ ofpbuf_delete(request);
+ ofpbuf_delete(reply);
+ return err;
}
static int
-dpif_netlink_meter_del(struct dpif *dpif OVS_UNUSED,
- ofproto_meter_id meter_id OVS_UNUSED,
- struct ofputil_meter_stats *stats OVS_UNUSED,
- uint16_t n_bands OVS_UNUSED)
+dpif_netlink_ct_del_limits(struct dpif *dpif OVS_UNUSED,
+ const struct ovs_list *zone_limits)
{
- return EFBIG; /* meter_id out of range */
+ if (ovs_ct_limit_family < 0) {
+ return EOPNOTSUPP;
+ }
+
+ struct ofpbuf *request = ofpbuf_new(NL_DUMP_BUFSIZE);
+ nl_msg_put_genlmsghdr(request, 0, ovs_ct_limit_family,
+ NLM_F_REQUEST | NLM_F_ECHO, OVS_CT_LIMIT_CMD_DEL,
+ OVS_CT_LIMIT_VERSION);
+
+ struct ovs_header *ovs_header;
+ ovs_header = ofpbuf_put_uninit(request, sizeof *ovs_header);
+ ovs_header->dp_ifindex = 0;
+
+ if (!ovs_list_is_empty(zone_limits)) {
+ size_t opt_offset =
+ nl_msg_start_nested(request, OVS_CT_LIMIT_ATTR_ZONE_LIMIT);
+
+ struct ct_dpif_zone_limit *zone_limit;
+ LIST_FOR_EACH (zone_limit, node, zone_limits) {
+ struct ovs_zone_limit req_zone_limit;
+ req_zone_limit.zone_id = zone_limit->zone;
+ nl_msg_put(request, &req_zone_limit, sizeof req_zone_limit);
+ }
+ nl_msg_end_nested(request, opt_offset);
+ }
+
+ int err = nl_transact(NETLINK_GENERIC, request, NULL);
+
+ ofpbuf_delete(request);
+ return err;
}
-\f
-const struct dpif_class dpif_netlink_class = {
- "system",
- NULL, /* init */
- dpif_netlink_enumerate,
- NULL,
- dpif_netlink_open,
- dpif_netlink_close,
- dpif_netlink_destroy,
- dpif_netlink_run,
- NULL, /* wait */
- dpif_netlink_get_stats,
- dpif_netlink_port_add,
- dpif_netlink_port_del,
- NULL, /* port_set_config */
- dpif_netlink_port_query_by_number,
- dpif_netlink_port_query_by_name,
- dpif_netlink_port_get_pid,
- dpif_netlink_port_dump_start,
+#define NL_TP_NAME_PREFIX "ovs_tp_"
+
+struct dpif_netlink_timeout_policy_protocol {
+ uint16_t l3num;
+ uint8_t l4num;
+};
+
+enum OVS_PACKED_ENUM dpif_netlink_support_timeout_policy_protocol {
+ DPIF_NL_TP_AF_INET_TCP,
+ DPIF_NL_TP_AF_INET_UDP,
+ DPIF_NL_TP_AF_INET_ICMP,
+ DPIF_NL_TP_AF_INET6_TCP,
+ DPIF_NL_TP_AF_INET6_UDP,
+ DPIF_NL_TP_AF_INET6_ICMPV6,
+ DPIF_NL_TP_MAX
+};
+
+#define DPIF_NL_ALL_TP ((1UL << DPIF_NL_TP_MAX) - 1)
+
+
+static struct dpif_netlink_timeout_policy_protocol tp_protos[] = {
+ [DPIF_NL_TP_AF_INET_TCP] = { .l3num = AF_INET, .l4num = IPPROTO_TCP },
+ [DPIF_NL_TP_AF_INET_UDP] = { .l3num = AF_INET, .l4num = IPPROTO_UDP },
+ [DPIF_NL_TP_AF_INET_ICMP] = { .l3num = AF_INET, .l4num = IPPROTO_ICMP },
+ [DPIF_NL_TP_AF_INET6_TCP] = { .l3num = AF_INET6, .l4num = IPPROTO_TCP },
+ [DPIF_NL_TP_AF_INET6_UDP] = { .l3num = AF_INET6, .l4num = IPPROTO_UDP },
+ [DPIF_NL_TP_AF_INET6_ICMPV6] = { .l3num = AF_INET6,
+ .l4num = IPPROTO_ICMPV6 },
+};
+
+static void
+dpif_netlink_format_tp_name(uint32_t id, uint16_t l3num, uint8_t l4num,
+ char **tp_name)
+{
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ ds_put_format(&ds, "%s%"PRIu32"_", NL_TP_NAME_PREFIX, id);
+ ct_dpif_format_ipproto(&ds, l4num);
+
+ if (l3num == AF_INET) {
+ ds_put_cstr(&ds, "4");
+ } else if (l3num == AF_INET6 && l4num != IPPROTO_ICMPV6) {
+ ds_put_cstr(&ds, "6");
+ }
+
+ ovs_assert(ds.length < CTNL_TIMEOUT_NAME_MAX);
+
+ *tp_name = ds_steal_cstr(&ds);
+}
+
+static int
+dpif_netlink_ct_get_timeout_policy_name(struct dpif *dpif OVS_UNUSED,
+ uint32_t tp_id, uint16_t dl_type,
+ uint8_t nw_proto, char **tp_name,
+ bool *is_generic)
+{
+ dpif_netlink_format_tp_name(tp_id,
+ dl_type == ETH_TYPE_IP ? AF_INET : AF_INET6,
+ nw_proto, tp_name);
+ *is_generic = false;
+ return 0;
+}
+
+#define CT_DPIF_NL_TP_TCP_MAPPINGS \
+ CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_SENT, SYN_SENT) \
+ CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_RECV, SYN_RECV) \
+ CT_DPIF_NL_TP_MAPPING(TCP, TCP, ESTABLISHED, ESTABLISHED) \
+ CT_DPIF_NL_TP_MAPPING(TCP, TCP, FIN_WAIT, FIN_WAIT) \
+ CT_DPIF_NL_TP_MAPPING(TCP, TCP, CLOSE_WAIT, CLOSE_WAIT) \
+ CT_DPIF_NL_TP_MAPPING(TCP, TCP, LAST_ACK, LAST_ACK) \
+ CT_DPIF_NL_TP_MAPPING(TCP, TCP, TIME_WAIT, TIME_WAIT) \
+ CT_DPIF_NL_TP_MAPPING(TCP, TCP, CLOSE, CLOSE) \
+ CT_DPIF_NL_TP_MAPPING(TCP, TCP, SYN_SENT2, SYN_SENT2) \
+ CT_DPIF_NL_TP_MAPPING(TCP, TCP, RETRANSMIT, RETRANS) \
+ CT_DPIF_NL_TP_MAPPING(TCP, TCP, UNACK, UNACK)
+
+#define CT_DPIF_NL_TP_UDP_MAPPINGS \
+ CT_DPIF_NL_TP_MAPPING(UDP, UDP, SINGLE, UNREPLIED) \
+ CT_DPIF_NL_TP_MAPPING(UDP, UDP, MULTIPLE, REPLIED)
+
+#define CT_DPIF_NL_TP_ICMP_MAPPINGS \
+ CT_DPIF_NL_TP_MAPPING(ICMP, ICMP, FIRST, TIMEOUT)
+
+#define CT_DPIF_NL_TP_ICMPV6_MAPPINGS \
+ CT_DPIF_NL_TP_MAPPING(ICMP, ICMPV6, FIRST, TIMEOUT)
+
+
+#define CT_DPIF_NL_TP_MAPPING(PROTO1, PROTO2, ATTR1, ATTR2) \
+if (tp->present & (1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1)) { \
+ nl_tp->present |= 1 << CTA_TIMEOUT_##PROTO2##_##ATTR2; \
+ nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2] = \
+ tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1]; \
+}
+
+static void
+dpif_netlink_get_nl_tp_tcp_attrs(const struct ct_dpif_timeout_policy *tp,
+ struct nl_ct_timeout_policy *nl_tp)
+{
+ CT_DPIF_NL_TP_TCP_MAPPINGS
+}
+
+static void
+dpif_netlink_get_nl_tp_udp_attrs(const struct ct_dpif_timeout_policy *tp,
+ struct nl_ct_timeout_policy *nl_tp)
+{
+ CT_DPIF_NL_TP_UDP_MAPPINGS
+}
+
+static void
+dpif_netlink_get_nl_tp_icmp_attrs(const struct ct_dpif_timeout_policy *tp,
+ struct nl_ct_timeout_policy *nl_tp)
+{
+ CT_DPIF_NL_TP_ICMP_MAPPINGS
+}
+
+static void
+dpif_netlink_get_nl_tp_icmpv6_attrs(const struct ct_dpif_timeout_policy *tp,
+ struct nl_ct_timeout_policy *nl_tp)
+{
+ CT_DPIF_NL_TP_ICMPV6_MAPPINGS
+}
+
+#undef CT_DPIF_NL_TP_MAPPING
+
+static void
+dpif_netlink_get_nl_tp_attrs(const struct ct_dpif_timeout_policy *tp,
+ uint8_t l4num, struct nl_ct_timeout_policy *nl_tp)
+{
+ nl_tp->present = 0;
+
+ if (l4num == IPPROTO_TCP) {
+ dpif_netlink_get_nl_tp_tcp_attrs(tp, nl_tp);
+ } else if (l4num == IPPROTO_UDP) {
+ dpif_netlink_get_nl_tp_udp_attrs(tp, nl_tp);
+ } else if (l4num == IPPROTO_ICMP) {
+ dpif_netlink_get_nl_tp_icmp_attrs(tp, nl_tp);
+ } else if (l4num == IPPROTO_ICMPV6) {
+ dpif_netlink_get_nl_tp_icmpv6_attrs(tp, nl_tp);
+ }
+}
+
+#define CT_DPIF_NL_TP_MAPPING(PROTO1, PROTO2, ATTR1, ATTR2) \
+if (nl_tp->present & (1 << CTA_TIMEOUT_##PROTO2##_##ATTR2)) { \
+ if (tp->present & (1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1)) { \
+ if (tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1] != \
+ nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2]) { \
+ VLOG_WARN_RL(&error_rl, "Inconsistent timeout policy %s " \
+ "attribute %s=%"PRIu32" while %s=%"PRIu32, \
+ nl_tp->name, "CTA_TIMEOUT_"#PROTO2"_"#ATTR2, \
+ nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2], \
+ "CT_DPIF_TP_ATTR_"#PROTO1"_"#ATTR1, \
+ tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1]); \
+ } \
+ } else { \
+ tp->present |= 1 << CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1; \
+ tp->attrs[CT_DPIF_TP_ATTR_##PROTO1##_##ATTR1] = \
+ nl_tp->attrs[CTA_TIMEOUT_##PROTO2##_##ATTR2]; \
+ } \
+}
+
+static void
+dpif_netlink_set_ct_dpif_tp_tcp_attrs(const struct nl_ct_timeout_policy *nl_tp,
+ struct ct_dpif_timeout_policy *tp)
+{
+ CT_DPIF_NL_TP_TCP_MAPPINGS
+}
+
+static void
+dpif_netlink_set_ct_dpif_tp_udp_attrs(const struct nl_ct_timeout_policy *nl_tp,
+ struct ct_dpif_timeout_policy *tp)
+{
+ CT_DPIF_NL_TP_UDP_MAPPINGS
+}
+
+static void
+dpif_netlink_set_ct_dpif_tp_icmp_attrs(
+ const struct nl_ct_timeout_policy *nl_tp,
+ struct ct_dpif_timeout_policy *tp)
+{
+ CT_DPIF_NL_TP_ICMP_MAPPINGS
+}
+
+static void
+dpif_netlink_set_ct_dpif_tp_icmpv6_attrs(
+ const struct nl_ct_timeout_policy *nl_tp,
+ struct ct_dpif_timeout_policy *tp)
+{
+ CT_DPIF_NL_TP_ICMPV6_MAPPINGS
+}
+
+#undef CT_DPIF_NL_TP_MAPPING
+
+static void
+dpif_netlink_set_ct_dpif_tp_attrs(const struct nl_ct_timeout_policy *nl_tp,
+ struct ct_dpif_timeout_policy *tp)
+{
+ if (nl_tp->l4num == IPPROTO_TCP) {
+ dpif_netlink_set_ct_dpif_tp_tcp_attrs(nl_tp, tp);
+ } else if (nl_tp->l4num == IPPROTO_UDP) {
+ dpif_netlink_set_ct_dpif_tp_udp_attrs(nl_tp, tp);
+ } else if (nl_tp->l4num == IPPROTO_ICMP) {
+ dpif_netlink_set_ct_dpif_tp_icmp_attrs(nl_tp, tp);
+ } else if (nl_tp->l4num == IPPROTO_ICMPV6) {
+ dpif_netlink_set_ct_dpif_tp_icmpv6_attrs(nl_tp, tp);
+ }
+}
+
+#ifdef _WIN32
+static int
+dpif_netlink_ct_set_timeout_policy(struct dpif *dpif OVS_UNUSED,
+ const struct ct_dpif_timeout_policy *tp)
+{
+ return EOPNOTSUPP;
+}
+
+static int
+dpif_netlink_ct_get_timeout_policy(struct dpif *dpif OVS_UNUSED,
+ uint32_t tp_id,
+ struct ct_dpif_timeout_policy *tp)
+{
+ return EOPNOTSUPP;
+}
+
+static int
+dpif_netlink_ct_del_timeout_policy(struct dpif *dpif OVS_UNUSED,
+ uint32_t tp_id)
+{
+ return EOPNOTSUPP;
+}
+
+static int
+dpif_netlink_ct_timeout_policy_dump_start(struct dpif *dpif OVS_UNUSED,
+ void **statep)
+{
+ return EOPNOTSUPP;
+}
+
+static int
+dpif_netlink_ct_timeout_policy_dump_next(struct dpif *dpif OVS_UNUSED,
+ void *state,
+ struct ct_dpif_timeout_policy **tp)
+{
+ return EOPNOTSUPP;
+}
+
+static int
+dpif_netlink_ct_timeout_policy_dump_done(struct dpif *dpif OVS_UNUSED,
+ void *state)
+{
+ return EOPNOTSUPP;
+}
+#else
+static int
+dpif_netlink_ct_set_timeout_policy(struct dpif *dpif OVS_UNUSED,
+ const struct ct_dpif_timeout_policy *tp)
+{
+ int err = 0;
+
+ for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) {
+ struct nl_ct_timeout_policy nl_tp;
+ char *nl_tp_name;
+
+ dpif_netlink_format_tp_name(tp->id, tp_protos[i].l3num,
+ tp_protos[i].l4num, &nl_tp_name);
+ ovs_strlcpy(nl_tp.name, nl_tp_name, sizeof nl_tp.name);
+ free(nl_tp_name);
+
+ nl_tp.l3num = tp_protos[i].l3num;
+ nl_tp.l4num = tp_protos[i].l4num;
+ dpif_netlink_get_nl_tp_attrs(tp, tp_protos[i].l4num, &nl_tp);
+ err = nl_ct_set_timeout_policy(&nl_tp);
+ if (err) {
+ VLOG_WARN_RL(&error_rl, "failed to add timeout policy %s (%s)",
+ nl_tp.name, ovs_strerror(err));
+ goto out;
+ }
+ }
+
+out:
+ return err;
+}
+
+static int
+dpif_netlink_ct_get_timeout_policy(struct dpif *dpif OVS_UNUSED,
+ uint32_t tp_id,
+ struct ct_dpif_timeout_policy *tp)
+{
+ int err = 0;
+
+ tp->id = tp_id;
+ tp->present = 0;
+ for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) {
+ struct nl_ct_timeout_policy nl_tp;
+ char *nl_tp_name;
+
+ dpif_netlink_format_tp_name(tp_id, tp_protos[i].l3num,
+ tp_protos[i].l4num, &nl_tp_name);
+ err = nl_ct_get_timeout_policy(nl_tp_name, &nl_tp);
+
+ if (err) {
+ VLOG_WARN_RL(&error_rl, "failed to get timeout policy %s (%s)",
+ nl_tp_name, ovs_strerror(err));
+ free(nl_tp_name);
+ goto out;
+ }
+ free(nl_tp_name);
+ dpif_netlink_set_ct_dpif_tp_attrs(&nl_tp, tp);
+ }
+
+out:
+ return err;
+}
+
+/* Returns 0 if all the sub timeout policies are deleted or not exist in the
+ * kernel. Returns 1 if any sub timeout policy deletion failed. */
+static int
+dpif_netlink_ct_del_timeout_policy(struct dpif *dpif OVS_UNUSED,
+ uint32_t tp_id)
+{
+ int ret = 0;
+
+ for (int i = 0; i < ARRAY_SIZE(tp_protos); ++i) {
+ char *nl_tp_name;
+ dpif_netlink_format_tp_name(tp_id, tp_protos[i].l3num,
+ tp_protos[i].l4num, &nl_tp_name);
+ int err = nl_ct_del_timeout_policy(nl_tp_name);
+ if (err == ENOENT) {
+ err = 0;
+ }
+ if (err) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(6, 6);
+ VLOG_INFO_RL(&rl, "failed to delete timeout policy %s (%s)",
+ nl_tp_name, ovs_strerror(err));
+ ret = 1;
+ }
+ free(nl_tp_name);
+ }
+
+ return ret;
+}
+
+struct dpif_netlink_ct_timeout_policy_dump_state {
+ struct nl_ct_timeout_policy_dump_state *nl_dump_state;
+ struct hmap tp_dump_map;
+};
+
+struct dpif_netlink_tp_dump_node {
+ struct hmap_node hmap_node; /* node in tp_dump_map. */
+ struct ct_dpif_timeout_policy *tp;
+ uint32_t l3_l4_present;
+};
+
+static struct dpif_netlink_tp_dump_node *
+get_dpif_netlink_tp_dump_node_by_tp_id(uint32_t tp_id,
+ struct hmap *tp_dump_map)
+{
+ struct dpif_netlink_tp_dump_node *tp_dump_node;
+
+ HMAP_FOR_EACH_WITH_HASH (tp_dump_node, hmap_node, hash_int(tp_id, 0),
+ tp_dump_map) {
+ if (tp_dump_node->tp->id == tp_id) {
+ return tp_dump_node;
+ }
+ }
+ return NULL;
+}
+
+static void
+update_dpif_netlink_tp_dump_node(
+ const struct nl_ct_timeout_policy *nl_tp,
+ struct dpif_netlink_tp_dump_node *tp_dump_node)
+{
+ dpif_netlink_set_ct_dpif_tp_attrs(nl_tp, tp_dump_node->tp);
+ for (int i = 0; i < DPIF_NL_TP_MAX; ++i) {
+ if (nl_tp->l3num == tp_protos[i].l3num &&
+ nl_tp->l4num == tp_protos[i].l4num) {
+ tp_dump_node->l3_l4_present |= 1 << i;
+ break;
+ }
+ }
+}
+
+static int
+dpif_netlink_ct_timeout_policy_dump_start(struct dpif *dpif OVS_UNUSED,
+ void **statep)
+{
+ struct dpif_netlink_ct_timeout_policy_dump_state *dump_state;
+
+ *statep = dump_state = xzalloc(sizeof *dump_state);
+ int err = nl_ct_timeout_policy_dump_start(&dump_state->nl_dump_state);
+ if (err) {
+ free(dump_state);
+ return err;
+ }
+ hmap_init(&dump_state->tp_dump_map);
+ return 0;
+}
+
+static void
+get_and_cleanup_tp_dump_node(struct hmap *hmap,
+ struct dpif_netlink_tp_dump_node *tp_dump_node,
+ struct ct_dpif_timeout_policy *tp)
+{
+ hmap_remove(hmap, &tp_dump_node->hmap_node);
+ *tp = *tp_dump_node->tp;
+ free(tp_dump_node->tp);
+ free(tp_dump_node);
+}
+
+static int
+dpif_netlink_ct_timeout_policy_dump_next(struct dpif *dpif OVS_UNUSED,
+ void *state,
+ struct ct_dpif_timeout_policy *tp)
+{
+ struct dpif_netlink_ct_timeout_policy_dump_state *dump_state = state;
+ struct dpif_netlink_tp_dump_node *tp_dump_node;
+ int err;
+
+ /* Dumps all the timeout policies in the kernel. */
+ do {
+ struct nl_ct_timeout_policy nl_tp;
+ uint32_t tp_id;
+
+ err = nl_ct_timeout_policy_dump_next(dump_state->nl_dump_state,
+ &nl_tp);
+ if (err) {
+ break;
+ }
+
+ /* We only interest in OVS installed timeout policies. */
+ if (!ovs_scan(nl_tp.name, NL_TP_NAME_PREFIX"%"PRIu32, &tp_id)) {
+ continue;
+ }
+
+ tp_dump_node = get_dpif_netlink_tp_dump_node_by_tp_id(
+ tp_id, &dump_state->tp_dump_map);
+ if (!tp_dump_node) {
+ tp_dump_node = xzalloc(sizeof *tp_dump_node);
+ tp_dump_node->tp = xzalloc(sizeof *tp_dump_node->tp);
+ tp_dump_node->tp->id = tp_id;
+ hmap_insert(&dump_state->tp_dump_map, &tp_dump_node->hmap_node,
+ hash_int(tp_id, 0));
+ }
+
+ update_dpif_netlink_tp_dump_node(&nl_tp, tp_dump_node);
+
+ /* Returns one ct_dpif_timeout_policy if we gather all the L3/L4
+ * sub-pieces. */
+ if (tp_dump_node->l3_l4_present == DPIF_NL_ALL_TP) {
+ get_and_cleanup_tp_dump_node(&dump_state->tp_dump_map,
+ tp_dump_node, tp);
+ break;
+ }
+ } while (true);
+
+ /* Dump the incomplete timeout policies. */
+ if (err == EOF) {
+ if (!hmap_is_empty(&dump_state->tp_dump_map)) {
+ struct hmap_node *hmap_node = hmap_first(&dump_state->tp_dump_map);
+ tp_dump_node = CONTAINER_OF(hmap_node,
+ struct dpif_netlink_tp_dump_node,
+ hmap_node);
+ get_and_cleanup_tp_dump_node(&dump_state->tp_dump_map,
+ tp_dump_node, tp);
+ return 0;
+ }
+ }
+
+ return err;
+}
+
+static int
+dpif_netlink_ct_timeout_policy_dump_done(struct dpif *dpif OVS_UNUSED,
+ void *state)
+{
+ struct dpif_netlink_ct_timeout_policy_dump_state *dump_state = state;
+ struct dpif_netlink_tp_dump_node *tp_dump_node;
+
+ int err = nl_ct_timeout_policy_dump_done(dump_state->nl_dump_state);
+ HMAP_FOR_EACH_POP (tp_dump_node, hmap_node, &dump_state->tp_dump_map) {
+ free(tp_dump_node->tp);
+ free(tp_dump_node);
+ }
+ hmap_destroy(&dump_state->tp_dump_map);
+ free(dump_state);
+ return err;
+}
+#endif
+
+\f
+/* Meters */
+
+/* Set of supported meter flags */
+#define DP_SUPPORTED_METER_FLAGS_MASK \
+ (OFPMF13_STATS | OFPMF13_PKTPS | OFPMF13_KBPS | OFPMF13_BURST)
+
+/* Meter support was introduced in Linux 4.15. In some versions of
+ * Linux 4.15, 4.16, and 4.17, there was a bug that never set the id
+ * when the meter was created, so all meters essentially had an id of
+ * zero. Check for that condition and disable meters on those kernels. */
+static bool probe_broken_meters(struct dpif *);
+
+static void
+dpif_netlink_meter_init(struct dpif_netlink *dpif, struct ofpbuf *buf,
+ void *stub, size_t size, uint32_t command)
+{
+ ofpbuf_use_stub(buf, stub, size);
+
+ nl_msg_put_genlmsghdr(buf, 0, ovs_meter_family, NLM_F_REQUEST | NLM_F_ECHO,
+ command, OVS_METER_VERSION);
+
+ struct ovs_header *ovs_header;
+ ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
+ ovs_header->dp_ifindex = dpif->dp_ifindex;
+}
+
+/* Execute meter 'request' in the kernel datapath. If the command
+ * fails, returns a positive errno value. Otherwise, stores the reply
+ * in '*replyp', parses the policy according to 'reply_policy' into the
+ * array of Netlink attribute in 'a', and returns 0. On success, the
+ * caller is responsible for calling ofpbuf_delete() on '*replyp'
+ * ('replyp' will contain pointers into 'a'). */
+static int
+dpif_netlink_meter_transact(struct ofpbuf *request, struct ofpbuf **replyp,
+ const struct nl_policy *reply_policy,
+ struct nlattr **a, size_t size_a)
+{
+ int error = nl_transact(NETLINK_GENERIC, request, replyp);
+ ofpbuf_uninit(request);
+
+ if (error) {
+ return error;
+ }
+
+ struct nlmsghdr *nlmsg = ofpbuf_try_pull(*replyp, sizeof *nlmsg);
+ struct genlmsghdr *genl = ofpbuf_try_pull(*replyp, sizeof *genl);
+ struct ovs_header *ovs_header = ofpbuf_try_pull(*replyp,
+ sizeof *ovs_header);
+ if (!nlmsg || !genl || !ovs_header
+ || nlmsg->nlmsg_type != ovs_meter_family
+ || !nl_policy_parse(*replyp, 0, reply_policy, a, size_a)) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ VLOG_DBG_RL(&rl,
+ "Kernel module response to meter tranaction is invalid");
+ return EINVAL;
+ }
+ return 0;
+}
+
+static void
+dpif_netlink_meter_get_features(const struct dpif *dpif_,
+ struct ofputil_meter_features *features)
+{
+ if (probe_broken_meters(CONST_CAST(struct dpif *, dpif_))) {
+ features = NULL;
+ return;
+ }
+
+ struct ofpbuf buf, *msg;
+ uint64_t stub[1024 / 8];
+
+ static const struct nl_policy ovs_meter_features_policy[] = {
+ [OVS_METER_ATTR_MAX_METERS] = { .type = NL_A_U32 },
+ [OVS_METER_ATTR_MAX_BANDS] = { .type = NL_A_U32 },
+ [OVS_METER_ATTR_BANDS] = { .type = NL_A_NESTED, .optional = true },
+ };
+ struct nlattr *a[ARRAY_SIZE(ovs_meter_features_policy)];
+
+ struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
+ dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub,
+ OVS_METER_CMD_FEATURES);
+ if (dpif_netlink_meter_transact(&buf, &msg, ovs_meter_features_policy, a,
+ ARRAY_SIZE(ovs_meter_features_policy))) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ VLOG_INFO_RL(&rl,
+ "dpif_netlink_meter_transact OVS_METER_CMD_FEATURES failed");
+ return;
+ }
+
+ features->max_meters = nl_attr_get_u32(a[OVS_METER_ATTR_MAX_METERS]);
+ features->max_bands = nl_attr_get_u32(a[OVS_METER_ATTR_MAX_BANDS]);
+
+ /* Bands is a nested attribute of zero or more nested
+ * band attributes. */
+ if (a[OVS_METER_ATTR_BANDS]) {
+ const struct nlattr *nla;
+ size_t left;
+
+ NL_NESTED_FOR_EACH (nla, left, a[OVS_METER_ATTR_BANDS]) {
+ const struct nlattr *band_nla;
+ size_t band_left;
+
+ NL_NESTED_FOR_EACH (band_nla, band_left, nla) {
+ if (nl_attr_type(band_nla) == OVS_BAND_ATTR_TYPE) {
+ if (nl_attr_get_size(band_nla) == sizeof(uint32_t)) {
+ switch (nl_attr_get_u32(band_nla)) {
+ case OVS_METER_BAND_TYPE_DROP:
+ features->band_types |= 1 << OFPMBT13_DROP;
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ features->capabilities = DP_SUPPORTED_METER_FLAGS_MASK;
+
+ ofpbuf_delete(msg);
+}
+
+static int
+dpif_netlink_meter_set__(struct dpif *dpif_, ofproto_meter_id meter_id,
+ struct ofputil_meter_config *config)
+{
+ struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
+ struct ofpbuf buf, *msg;
+ uint64_t stub[1024 / 8];
+
+ static const struct nl_policy ovs_meter_set_response_policy[] = {
+ [OVS_METER_ATTR_ID] = { .type = NL_A_U32 },
+ };
+ struct nlattr *a[ARRAY_SIZE(ovs_meter_set_response_policy)];
+
+ if (config->flags & ~DP_SUPPORTED_METER_FLAGS_MASK) {
+ return EBADF; /* Unsupported flags set */
+ }
+
+ for (size_t i = 0; i < config->n_bands; i++) {
+ switch (config->bands[i].type) {
+ case OFPMBT13_DROP:
+ break;
+ default:
+ return ENODEV; /* Unsupported band type */
+ }
+ }
+
+ dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub, OVS_METER_CMD_SET);
+
+ nl_msg_put_u32(&buf, OVS_METER_ATTR_ID, meter_id.uint32);
+
+ if (config->flags & OFPMF13_KBPS) {
+ nl_msg_put_flag(&buf, OVS_METER_ATTR_KBPS);
+ }
+
+ size_t bands_offset = nl_msg_start_nested(&buf, OVS_METER_ATTR_BANDS);
+ /* Bands */
+ for (size_t i = 0; i < config->n_bands; ++i) {
+ struct ofputil_meter_band * band = &config->bands[i];
+ uint32_t band_type;
+
+ size_t band_offset = nl_msg_start_nested(&buf, OVS_BAND_ATTR_UNSPEC);
+
+ switch (band->type) {
+ case OFPMBT13_DROP:
+ band_type = OVS_METER_BAND_TYPE_DROP;
+ break;
+ default:
+ band_type = OVS_METER_BAND_TYPE_UNSPEC;
+ }
+ nl_msg_put_u32(&buf, OVS_BAND_ATTR_TYPE, band_type);
+ nl_msg_put_u32(&buf, OVS_BAND_ATTR_RATE, band->rate);
+ nl_msg_put_u32(&buf, OVS_BAND_ATTR_BURST,
+ config->flags & OFPMF13_BURST ?
+ band->burst_size : band->rate);
+ nl_msg_end_nested(&buf, band_offset);
+ }
+ nl_msg_end_nested(&buf, bands_offset);
+
+ int error = dpif_netlink_meter_transact(&buf, &msg,
+ ovs_meter_set_response_policy, a,
+ ARRAY_SIZE(ovs_meter_set_response_policy));
+ if (error) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ VLOG_INFO_RL(&rl,
+ "dpif_netlink_meter_transact OVS_METER_CMD_SET failed");
+ return error;
+ }
+
+ if (nl_attr_get_u32(a[OVS_METER_ATTR_ID]) != meter_id.uint32) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ VLOG_INFO_RL(&rl,
+ "Kernel returned a different meter id than requested");
+ }
+ ofpbuf_delete(msg);
+ return 0;
+}
+
+static int
+dpif_netlink_meter_set(struct dpif *dpif_, ofproto_meter_id meter_id,
+ struct ofputil_meter_config *config)
+{
+ if (probe_broken_meters(dpif_)) {
+ return ENOMEM;
+ }
+
+ return dpif_netlink_meter_set__(dpif_, meter_id, config);
+}
+
+/* Retrieve statistics and/or delete meter 'meter_id'. Statistics are
+ * stored in 'stats', if it is not null. If 'command' is
+ * OVS_METER_CMD_DEL, the meter is deleted and statistics are optionally
+ * retrieved. If 'command' is OVS_METER_CMD_GET, then statistics are
+ * simply retrieved. */
+static int
+dpif_netlink_meter_get_stats(const struct dpif *dpif_,
+ ofproto_meter_id meter_id,
+ struct ofputil_meter_stats *stats,
+ uint16_t max_bands,
+ enum ovs_meter_cmd command)
+{
+ struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
+ struct ofpbuf buf, *msg;
+ uint64_t stub[1024 / 8];
+
+ static const struct nl_policy ovs_meter_stats_policy[] = {
+ [OVS_METER_ATTR_ID] = { .type = NL_A_U32, .optional = true},
+ [OVS_METER_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats),
+ .optional = true},
+ [OVS_METER_ATTR_BANDS] = { .type = NL_A_NESTED, .optional = true },
+ };
+ struct nlattr *a[ARRAY_SIZE(ovs_meter_stats_policy)];
+
+ dpif_netlink_meter_init(dpif, &buf, stub, sizeof stub, command);
+
+ nl_msg_put_u32(&buf, OVS_METER_ATTR_ID, meter_id.uint32);
+
+ int error = dpif_netlink_meter_transact(&buf, &msg,
+ ovs_meter_stats_policy, a,
+ ARRAY_SIZE(ovs_meter_stats_policy));
+ if (error) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ VLOG_INFO_RL(&rl, "dpif_netlink_meter_transact %s failed",
+ command == OVS_METER_CMD_GET ? "get" : "del");
+ return error;
+ }
+
+ if (stats
+ && a[OVS_METER_ATTR_ID]
+ && a[OVS_METER_ATTR_STATS]
+ && nl_attr_get_u32(a[OVS_METER_ATTR_ID]) == meter_id.uint32) {
+ /* return stats */
+ const struct ovs_flow_stats *stat;
+ const struct nlattr *nla;
+ size_t left;
+
+ stat = nl_attr_get(a[OVS_METER_ATTR_STATS]);
+ stats->packet_in_count = get_32aligned_u64(&stat->n_packets);
+ stats->byte_in_count = get_32aligned_u64(&stat->n_bytes);
+
+ if (a[OVS_METER_ATTR_BANDS]) {
+ size_t n_bands = 0;
+ NL_NESTED_FOR_EACH (nla, left, a[OVS_METER_ATTR_BANDS]) {
+ const struct nlattr *band_nla;
+ band_nla = nl_attr_find_nested(nla, OVS_BAND_ATTR_STATS);
+ if (band_nla && nl_attr_get_size(band_nla) \
+ == sizeof(struct ovs_flow_stats)) {
+ stat = nl_attr_get(band_nla);
+
+ if (n_bands < max_bands) {
+ stats->bands[n_bands].packet_count
+ = get_32aligned_u64(&stat->n_packets);
+ stats->bands[n_bands].byte_count
+ = get_32aligned_u64(&stat->n_bytes);
+ ++n_bands;
+ }
+ } else {
+ stats->bands[n_bands].packet_count = 0;
+ stats->bands[n_bands].byte_count = 0;
+ ++n_bands;
+ }
+ }
+ stats->n_bands = n_bands;
+ } else {
+ /* For a non-existent meter, return 0 stats. */
+ stats->n_bands = 0;
+ }
+ }
+
+ ofpbuf_delete(msg);
+ return error;
+}
+
+static int
+dpif_netlink_meter_get(const struct dpif *dpif, ofproto_meter_id meter_id,
+ struct ofputil_meter_stats *stats, uint16_t max_bands)
+{
+ return dpif_netlink_meter_get_stats(dpif, meter_id, stats, max_bands,
+ OVS_METER_CMD_GET);
+}
+
+static int
+dpif_netlink_meter_del(struct dpif *dpif, ofproto_meter_id meter_id,
+ struct ofputil_meter_stats *stats, uint16_t max_bands)
+{
+ return dpif_netlink_meter_get_stats(dpif, meter_id, stats, max_bands,
+ OVS_METER_CMD_DEL);
+}
+
+static bool
+probe_broken_meters__(struct dpif *dpif)
+{
+ /* This test is destructive if a probe occurs while ovs-vswitchd is
+ * running (e.g., an ovs-dpctl meter command is called), so choose a
+ * random high meter id to make this less likely to occur. */
+ ofproto_meter_id id1 = { 54545401 };
+ ofproto_meter_id id2 = { 54545402 };
+ struct ofputil_meter_band band = {OFPMBT13_DROP, 0, 1, 0};
+ struct ofputil_meter_config config1 = { 1, OFPMF13_KBPS, 1, &band};
+ struct ofputil_meter_config config2 = { 2, OFPMF13_KBPS, 1, &band};
+
+ /* Try adding two meters and make sure that they both come back with
+ * the proper meter id. Use the "__" version so that we don't cause
+ * a recurve deadlock. */
+ dpif_netlink_meter_set__(dpif, id1, &config1);
+ dpif_netlink_meter_set__(dpif, id2, &config2);
+
+ if (dpif_netlink_meter_get(dpif, id1, NULL, 0)
+ || dpif_netlink_meter_get(dpif, id2, NULL, 0)) {
+ VLOG_INFO("The kernel module has a broken meter implementation.");
+ return true;
+ }
+
+ dpif_netlink_meter_del(dpif, id1, NULL, 0);
+ dpif_netlink_meter_del(dpif, id2, NULL, 0);
+
+ return false;
+}
+
+static bool
+probe_broken_meters(struct dpif *dpif)
+{
+ /* This is a once-only test because currently OVS only has at most a single
+ * Netlink capable datapath on any given platform. */
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
+
+ static bool broken_meters = false;
+ if (ovsthread_once_start(&once)) {
+ broken_meters = probe_broken_meters__(dpif);
+ ovsthread_once_done(&once);
+ }
+ return broken_meters;
+}
+\f
+const struct dpif_class dpif_netlink_class = {
+ "system",
+ false, /* cleanup_required */
+ NULL, /* init */
+ dpif_netlink_enumerate,
+ NULL,
+ dpif_netlink_open,
+ dpif_netlink_close,
+ dpif_netlink_destroy,
+ dpif_netlink_run,
+ NULL, /* wait */
+ dpif_netlink_get_stats,
+ dpif_netlink_set_features,
+ dpif_netlink_port_add,
+ dpif_netlink_port_del,
+ NULL, /* port_set_config */
+ dpif_netlink_port_query_by_number,
+ dpif_netlink_port_query_by_name,
+ dpif_netlink_port_get_pid,
+ dpif_netlink_port_dump_start,
dpif_netlink_port_dump_next,
dpif_netlink_port_dump_done,
dpif_netlink_port_poll,
NULL, /* ct_set_maxconns */
NULL, /* ct_get_maxconns */
NULL, /* ct_get_nconns */
+ NULL, /* ct_set_tcp_seq_chk */
+ NULL, /* ct_get_tcp_seq_chk */
+ dpif_netlink_ct_set_limits,
+ dpif_netlink_ct_get_limits,
+ dpif_netlink_ct_del_limits,
+ dpif_netlink_ct_set_timeout_policy,
+ dpif_netlink_ct_get_timeout_policy,
+ dpif_netlink_ct_del_timeout_policy,
+ dpif_netlink_ct_timeout_policy_dump_start,
+ dpif_netlink_ct_timeout_policy_dump_next,
+ dpif_netlink_ct_timeout_policy_dump_done,
+ dpif_netlink_ct_get_timeout_policy_name,
+ NULL, /* ipf_set_enabled */
+ NULL, /* ipf_set_min_frag */
+ NULL, /* ipf_set_max_nfrags */
+ NULL, /* ipf_get_status */
+ NULL, /* ipf_dump_start */
+ NULL, /* ipf_dump_next */
+ NULL, /* ipf_dump_done */
dpif_netlink_meter_get_features,
dpif_netlink_meter_set,
dpif_netlink_meter_get,
error = nl_lookup_genl_mcgroup(OVS_VPORT_FAMILY, OVS_VPORT_MCGROUP,
&ovs_vport_mcgroup);
}
+ if (!error) {
+ if (nl_lookup_genl_family(OVS_METER_FAMILY, &ovs_meter_family)) {
+ VLOG_INFO("The kernel module does not support meters.");
+ }
+ }
+ if (nl_lookup_genl_family(OVS_CT_LIMIT_FAMILY,
+ &ovs_ct_limit_family) < 0) {
+ VLOG_INFO("Generic Netlink family '%s' does not exist. "
+ "Please update the Open vSwitch kernel module to enable "
+ "the conntrack limit feature.", OVS_CT_LIMIT_FAMILY);
+ }
ovs_tunnels_out_of_tree = dpif_netlink_rtnl_probe_oot_tunnels();
[OVS_DP_ATTR_MEGAFLOW_STATS] = {
NL_POLICY_FOR(struct ovs_dp_megaflow_stats),
.optional = true },
+ [OVS_DP_ATTR_USER_FEATURES] = {
+ .type = NL_A_U32,
+ .optional = true },
};
dpif_netlink_dp_init(dp);
dp->megaflow_stats = nl_attr_get(a[OVS_DP_ATTR_MEGAFLOW_STATS]);
}
+ if (a[OVS_DP_ATTR_USER_FEATURES]) {
+ dp->user_features = nl_attr_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
+ }
+
return 0;
}
ovs_be16 pt = pt_ns_type_be(nl_attr_get_be32(packet_type));
const struct nlattr *nla;
- nla = nl_attr_find(buf, NLA_HDRLEN, OVS_KEY_ATTR_ETHERTYPE);
+ nla = nl_attr_find(buf, ofs + NLA_HDRLEN, OVS_KEY_ATTR_ETHERTYPE);
if (nla) {
ovs_be16 *ethertype;