#include "connectivity.h"
#include "netdev.h"
-#include "list.h"
+#include "netdev-offload.h"
+#include "openvswitch/list.h"
#include "ovs-numa.h"
+#include "ovs-rcu.h"
#include "packets.h"
#include "seq.h"
-#include "shash.h"
+#include "openvswitch/shash.h"
#include "smap.h"
#ifdef __cplusplus
extern "C" {
#endif
+struct netdev_tnl_build_header_params;
#define NETDEV_NUMA_UNSPEC OVS_NUMA_UNSPEC
+enum netdev_ol_flags {
+ NETDEV_TX_OFFLOAD_IPV4_CKSUM = 1 << 0,
+ NETDEV_TX_OFFLOAD_TCP_CKSUM = 1 << 1,
+ NETDEV_TX_OFFLOAD_UDP_CKSUM = 1 << 2,
+ NETDEV_TX_OFFLOAD_SCTP_CKSUM = 1 << 3,
+ NETDEV_TX_OFFLOAD_TCP_TSO = 1 << 4,
+};
+
/* A network device (e.g. an Ethernet device).
*
* Network device implementations may read these members but should not modify
const struct netdev_class *netdev_class; /* Functions to control
this device. */
+ /* If this is 'true' the user did not specify a netdev_class when
+ * opening this device, and therefore got assigned to the "system" class */
+ bool auto_classified;
+
+ /* This bitmask of the offloading features enabled by the netdev. */
+ uint64_t ol_flags;
+
+ /* If this is 'true', the user explicitly specified an MTU for this
+ * netdev. Otherwise, Open vSwitch is allowed to override it. */
+ bool mtu_user_config;
+
+ int ref_cnt; /* Times this devices was opened. */
+
/* A sequence number which indicates changes in one of 'netdev''s
* properties. It must be nonzero so that users have a value which
* they may use as a reset when tracking 'netdev'.
*
* Minimally, the sequence number is required to change whenever
* 'netdev''s flags, features, ethernet address, or carrier changes. */
- uint64_t change_seq;
+ atomic_uint64_t change_seq;
+
+ /* A netdev provider might be unable to change some of the device's
+ * parameter (n_rxq, mtu) when the device is in use. In this case
+ * the provider can notify the upper layer by calling
+ * netdev_request_reconfigure(). The upper layer will react by stopping
+ * the operations on the device and calling netdev_reconfigure() to allow
+ * the configuration changes. 'last_reconfigure_seq' remembers the value
+ * of 'reconfigure_seq' when the last reconfiguration happened. */
+ struct seq *reconfigure_seq;
+ uint64_t last_reconfigure_seq;
/* The core netdev code initializes these at netdev construction and only
* provide read-only access to its client. Netdev implementations may
* modify them. */
int n_txq;
int n_rxq;
- /* Number of rx queues requested by user. */
- int requested_n_rxq;
- int ref_cnt; /* Times this devices was opened. */
struct shash_node *node; /* Pointer to element in global map. */
struct ovs_list saved_flags_list; /* Contains "struct netdev_saved_flags". */
+
+ /* Functions to control flow offloading. */
+ OVSRCU_TYPE(const struct netdev_flow_api *) flow_api;
+ const char *dpif_type; /* Type of dpif this netdev belongs to. */
+ struct netdev_hw_info hw_info; /* Offload-capable netdev info. */
};
-static void
+static inline void
netdev_change_seq_changed(const struct netdev *netdev_)
{
+ uint64_t change_seq;
struct netdev *netdev = CONST_CAST(struct netdev *, netdev_);
seq_change(connectivity_seq_get());
- netdev->change_seq++;
- if (!netdev->change_seq) {
- netdev->change_seq++;
+
+ atomic_read_relaxed(&netdev->change_seq, &change_seq);
+ change_seq++;
+ if (OVS_UNLIKELY(!change_seq)) {
+ change_seq++;
}
+ atomic_store_explicit(&netdev->change_seq, change_seq,
+ memory_order_release);
+}
+
+static inline void
+netdev_request_reconfigure(struct netdev *netdev)
+{
+ seq_change(netdev->reconfigure_seq);
}
const char *netdev_get_type(const struct netdev *);
struct netdev *netdev_rxq_get_netdev(const struct netdev_rxq *);
+
/* Network device class structure, to be defined by each implementation of a
* network device.
*
int (*init)(void);
/* Performs periodic work needed by netdevs of this class. May be null if
- * no periodic work is necessary. */
- void (*run)(void);
+ * no periodic work is necessary.
+ *
+ * 'netdev_class' points to the class. It is useful in case the same
+ * function is used to implement different classes. */
+ void (*run)(const struct netdev_class *netdev_class);
/* Arranges for poll_block() to wake up if the "run" member function needs
* to be called. Implementations are additionally required to wake
* whenever something changes in any of its netdevs which would cause their
* ->change_seq() function to change its result. May be null if nothing is
- * needed here. */
- void (*wait)(void);
+ * needed here.
+ *
+ * 'netdev_class' points to the class. It is useful in case the same
+ * function is used to implement different classes. */
+ void (*wait)(const struct netdev_class *netdev_class);
/* ## ---------------- ## */
/* ## netdev Functions ## */
/* Changes the device 'netdev''s configuration to 'args'.
*
* If this netdev class does not support configuration, this may be a null
- * pointer. */
- int (*set_config)(struct netdev *netdev, const struct smap *args);
+ * pointer.
+ *
+ * If the return value is not zero (meaning that an error occurred),
+ * the provider can allocate a string with an error message in '*errp'.
+ * The caller has to call free on it. */
+ int (*set_config)(struct netdev *netdev, const struct smap *args,
+ char **errp);
/* Returns the tunnel configuration of 'netdev'. If 'netdev' is
* not a tunnel, returns null.
const struct netdev_tunnel_config *
(*get_tunnel_config)(const struct netdev *netdev);
- /* Build Partial Tunnel header. Ethernet and ip header is already built,
- * build_header() is suppose build protocol specific part of header. */
+ /* Build Tunnel header. Ethernet and ip header parameters are passed to
+ * tunnel implementation to build entire outer header for given flow. */
int (*build_header)(const struct netdev *, struct ovs_action_push_tnl *data,
- const struct flow *tnl_flow);
+ const struct netdev_tnl_build_header_params *params);
/* build_header() can not build entire header for all packets for given
* flow. Push header is called for packet to build header specific to
* a packet on actual transmit. It uses partial header build by
* build_header() which is passed as data. */
- void (*push_header)(struct dp_packet *packet,
+ void (*push_header)(const struct netdev *,
+ struct dp_packet *packet,
const struct ovs_action_push_tnl *data);
/* Pop tunnel header from packet, build tunnel metadata and resize packet
- * for further processing. */
- int (*pop_header)(struct dp_packet *packet);
+ * for further processing.
+ * Returns NULL in case of error or tunnel implementation queued packet for further
+ * processing. */
+ struct dp_packet * (*pop_header)(struct dp_packet *packet);
/* Returns the id of the numa node the 'netdev' is on. If there is no
* such info, returns NETDEV_NUMA_UNSPEC. */
int (*get_numa_id)(const struct netdev *netdev);
- /* Configures the number of tx queues and rx queues of 'netdev'.
- * Return 0 if successful, otherwise a positive errno value.
- *
- * 'n_rxq' specifies the maximum number of receive queues to create.
- * The netdev provider might choose to create less (e.g. if the hardware
- * supports only a smaller number). The actual number of queues created
- * is stored in the 'netdev->n_rxq' field.
+ /* Configures the number of tx queues of 'netdev'. Returns 0 if successful,
+ * otherwise a positive errno value.
*
* 'n_txq' specifies the exact number of transmission queues to create.
- * The caller will call netdev_send() concurrently from 'n_txq' different
- * threads (with different qid). The netdev provider is responsible for
- * making sure that these concurrent calls do not create a race condition
- * by using multiple hw queues or locking.
- *
- * On error, the tx queue and rx queue configuration is indeterminant.
- * Caller should make decision on whether to restore the previous or
- * the default configuration. Also, caller must make sure there is no
- * other thread accessing the queues at the same time. */
- int (*set_multiq)(struct netdev *netdev, unsigned int n_txq,
- unsigned int n_rxq);
+ *
+ * The caller will call netdev_reconfigure() (if necessary) before using
+ * netdev_send() on any of the newly configured queues, giving the provider
+ * a chance to adjust its settings.
+ *
+ * On error, the tx queue configuration is unchanged. */
+ int (*set_tx_multiq)(struct netdev *netdev, unsigned int n_txq);
/* Sends buffers on 'netdev'.
* Returns 0 if successful (for every buffer), otherwise a positive errno
* If the function returns a non-zero value, some of the packets might have
* been sent anyway.
*
- * If 'may_steal' is false, the caller retains ownership of all the
- * packets. If 'may_steal' is true, the caller transfers ownership of all
- * the packets to the network device, regardless of success.
+ * The caller transfers ownership of all the packets to the network
+ * device, regardless of success.
+ *
+ * If 'concurrent_txq' is true, the caller may perform concurrent calls
+ * to netdev_send() with the same 'qid'. The netdev provider is responsible
+ * for making sure that these concurrent calls do not create a race
+ * condition by using locking or other synchronization if required.
*
* The network device is expected to maintain one or more packet
* transmission queues, so that the caller does not ordinarily have to
* network device from being usefully used by the netdev-based "userspace
* datapath". It will also prevent the OVS implementation of bonding from
* working properly over 'netdev'.) */
- int (*send)(struct netdev *netdev, int qid, struct dp_packet **buffers,
- int cnt, bool may_steal);
+ int (*send)(struct netdev *netdev, int qid, struct dp_packet_batch *batch,
+ bool concurrent_txq);
/* Registers with the poll loop to wake up from the next call to
* poll_block() when the packet transmission queue for 'netdev' has
* If 'netdev' does not have an MTU (e.g. as some tunnels do not), then
* this function should return EOPNOTSUPP. This function may be set to
* null if it would always return EOPNOTSUPP. */
- int (*set_mtu)(const struct netdev *netdev, int mtu);
+ int (*set_mtu)(struct netdev *netdev, int mtu);
/* Returns the ifindex of 'netdev', if successful, as a positive number.
* On failure, returns a negative errno value.
* (UINT64_MAX). */
int (*get_stats)(const struct netdev *netdev, struct netdev_stats *);
+ /* Retrieves current device custom stats for 'netdev' into 'custom_stats'.
+ *
+ * A network device should return only available statistics (if any).
+ * If there are not statistics available, empty array should be
+ * returned.
+ *
+ * The caller initializes 'custom_stats' before calling this function.
+ * The caller takes ownership over allocated array of counters inside
+ * structure netdev_custom_stats.
+ * */
+ int (*get_custom_stats)(const struct netdev *netdev,
+ struct netdev_custom_stats *custom_stats);
+
/* Stores the features supported by 'netdev' into each of '*current',
* '*advertised', '*supported', and '*peer'. Each value is a bitmap of
* NETDEV_F_* bits.
int (*set_advertisements)(struct netdev *netdev,
enum netdev_features advertise);
+ /* Returns 'netdev''s configured packet_type mode.
+ *
+ * This function may be set to null if it would always return
+ * NETDEV_PT_LEGACY_L2. */
+ enum netdev_pt_mode (*get_pt_mode)(const struct netdev *netdev);
+
/* Attempts to set input rate limiting (policing) policy, such that up to
* 'kbits_rate' kbps of traffic is accepted, with a maximum accumulative
* burst size of 'kbits' kb.
void *aux),
void *aux);
- /* If 'netdev' has an assigned IPv4 address, sets '*address' to that
- * address and '*netmask' to the associated netmask.
- *
- * The following error values have well-defined meanings:
- *
- * - EADDRNOTAVAIL: 'netdev' has no assigned IPv4 address.
- *
- * - EOPNOTSUPP: No IPv4 network stack attached to 'netdev'.
- *
- * This function may be set to null if it would always return EOPNOTSUPP
- * anyhow. */
- int (*get_in4)(const struct netdev *netdev, struct in_addr *address,
- struct in_addr *netmask);
-
/* Assigns 'addr' as 'netdev''s IPv4 address and 'mask' as its netmask. If
* 'addr' is INADDR_ANY, 'netdev''s IPv4 address is cleared.
*
int (*update_flags)(struct netdev *netdev, enum netdev_flags off,
enum netdev_flags on, enum netdev_flags *old_flags);
+ /* If the provider called netdev_request_reconfigure(), the upper layer
+ * will eventually call this. The provider can update the device
+ * configuration knowing that the upper layer will not call rxq_recv() or
+ * send() until this function returns.
+ *
+ * On error, the configuration is indeterminant and the device cannot be
+ * used to send and receive packets until a successful configuration is
+ * applied. */
+ int (*reconfigure)(struct netdev *netdev);
/* ## -------------------- ## */
/* ## netdev_rxq Functions ## */
/* ## -------------------- ## */
void (*rxq_destruct)(struct netdev_rxq *);
void (*rxq_dealloc)(struct netdev_rxq *);
- /* Attempts to receive a batch of packets from 'rx'. The caller supplies
- * 'pkts' as the pointer to the beginning of an array of MAX_RX_BATCH
- * pointers to dp_packet. If successful, the implementation stores
- * pointers to up to MAX_RX_BATCH dp_packets into the array, transferring
- * ownership of the packets to the caller, stores the number of received
- * packets into '*cnt', and returns 0.
+ /* Retrieves the current state of rx queue. 'false' means that queue won't
+ * get traffic in a short term and could be not polled.
+ *
+ * This function may be set to null if it would always return 'true'
+ * anyhow. */
+ bool (*rxq_enabled)(struct netdev_rxq *);
+
+ /* Attempts to receive a batch of packets from 'rx'. In 'batch', the
+ * caller supplies 'packets' as the pointer to the beginning of an array
+ * of NETDEV_MAX_BURST pointers to dp_packet. If successful, the
+ * implementation stores pointers to up to NETDEV_MAX_BURST dp_packets into
+ * the array, transferring ownership of the packets to the caller, stores
+ * the number of received packets into 'count', and returns 0.
*
* The implementation does not necessarily initialize any non-data members
- * of 'pkts'. That is, the caller must initialize layer pointers and
- * metadata itself, if desired, e.g. with pkt_metadata_init() and
- * miniflow_extract().
+ * of 'packets' in 'batch'. That is, the caller must initialize layer
+ * pointers and metadata itself, if desired, e.g. with pkt_metadata_init()
+ * and miniflow_extract().
*
* Implementations should allocate buffers with DP_NETDEV_HEADROOM bytes of
* headroom.
*
+ * If the caller provides a non-NULL qfill pointer, the implementation
+ * should return the number (zero or more) of remaining packets in the
+ * queue after the reception the current batch, if it supports that,
+ * or -ENOTSUP otherwise.
+ *
* Returns EAGAIN immediately if no packet is ready to be received or
* another positive errno value if an error was encountered. */
- int (*rxq_recv)(struct netdev_rxq *rx, struct dp_packet **pkts,
- int *cnt);
+ int (*rxq_recv)(struct netdev_rxq *rx, struct dp_packet_batch *batch,
+ int *qfill);
/* Registers with the poll loop to wake up from the next call to
* poll_block() when a packet is ready to be received with
/* Discards all packets waiting to be received from 'rx'. */
int (*rxq_drain)(struct netdev_rxq *rx);
+
+ /* Get a block_id from the netdev.
+ * Returns the block_id or 0 if none exists for netdev. */
+ uint32_t (*get_block_id)(struct netdev *);
};
int netdev_register_provider(const struct netdev_class *);
extern const struct netdev_class netdev_internal_class;
extern const struct netdev_class netdev_tap_class;
+#ifdef HAVE_AF_XDP
+extern const struct netdev_class netdev_afxdp_class;
+extern const struct netdev_class netdev_afxdp_nonpmd_class;
+#endif
#ifdef __cplusplus
}
#endif