struct netdev_tnl_build_header_params;
#define NETDEV_NUMA_UNSPEC OVS_NUMA_UNSPEC
+enum netdev_ol_flags {
+ NETDEV_TX_OFFLOAD_IPV4_CKSUM = 1 << 0,
+ NETDEV_TX_OFFLOAD_TCP_CKSUM = 1 << 1,
+ NETDEV_TX_OFFLOAD_UDP_CKSUM = 1 << 2,
+ NETDEV_TX_OFFLOAD_SCTP_CKSUM = 1 << 3,
+ NETDEV_TX_OFFLOAD_TCP_TSO = 1 << 4,
+};
+
/* A network device (e.g. an Ethernet device).
*
* Network device implementations may read these members but should not modify
* opening this device, and therefore got assigned to the "system" class */
bool auto_classified;
+ /* This bitmask of the offloading features enabled by the netdev. */
+ uint64_t ol_flags;
+
/* If this is 'true', the user explicitly specified an MTU for this
* netdev. Otherwise, Open vSwitch is allowed to override it. */
bool mtu_user_config;
*
* Minimally, the sequence number is required to change whenever
* 'netdev''s flags, features, ethernet address, or carrier changes. */
- uint64_t change_seq;
+ atomic_uint64_t change_seq;
/* A netdev provider might be unable to change some of the device's
* parameter (n_rxq, mtu) when the device is in use. In this case
/* Functions to control flow offloading. */
OVSRCU_TYPE(const struct netdev_flow_api *) flow_api;
- struct netdev_hw_info hw_info; /* offload-capable netdev info */
+ const char *dpif_type; /* Type of dpif this netdev belongs to. */
+ struct netdev_hw_info hw_info; /* Offload-capable netdev info. */
};
static inline void
netdev_change_seq_changed(const struct netdev *netdev_)
{
+ uint64_t change_seq;
struct netdev *netdev = CONST_CAST(struct netdev *, netdev_);
seq_change(connectivity_seq_get());
- netdev->change_seq++;
- if (!netdev->change_seq) {
- netdev->change_seq++;
+
+ atomic_read_relaxed(&netdev->change_seq, &change_seq);
+ change_seq++;
+ if (OVS_UNLIKELY(!change_seq)) {
+ change_seq++;
}
+ atomic_store_explicit(&netdev->change_seq, change_seq,
+ memory_order_release);
}
static inline void
void (*rxq_destruct)(struct netdev_rxq *);
void (*rxq_dealloc)(struct netdev_rxq *);
+ /* Retrieves the current state of rx queue. 'false' means that queue won't
+ * get traffic in a short term and could be not polled.
+ *
+ * This function may be set to null if it would always return 'true'
+ * anyhow. */
+ bool (*rxq_enabled)(struct netdev_rxq *);
+
/* Attempts to receive a batch of packets from 'rx'. In 'batch', the
* caller supplies 'packets' as the pointer to the beginning of an array
* of NETDEV_MAX_BURST pointers to dp_packet. If successful, the
extern const struct netdev_class netdev_internal_class;
extern const struct netdev_class netdev_tap_class;
+#ifdef HAVE_AF_XDP
+extern const struct netdev_class netdev_afxdp_class;
+extern const struct netdev_class netdev_afxdp_nonpmd_class;
+#endif
#ifdef __cplusplus
}
#endif