2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2016, 2017 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
22 #include <netinet/in.h>
30 #include <sys/ioctl.h>
31 #include <sys/types.h>
37 #include "dp-packet.h"
38 #include "openvswitch/dynamic-string.h"
39 #include "fatal-signal.h"
41 #include "openvswitch/list.h"
42 #include "netdev-dpdk.h"
43 #include "netdev-provider.h"
44 #include "netdev-vport.h"
45 #include "odp-netlink.h"
46 #include "openflow/openflow.h"
48 #include "poll-loop.h"
50 #include "openvswitch/shash.h"
54 #include "openvswitch/vlog.h"
61 VLOG_DEFINE_THIS_MODULE(netdev
);
63 COVERAGE_DEFINE(netdev_received
);
64 COVERAGE_DEFINE(netdev_sent
);
65 COVERAGE_DEFINE(netdev_add_router
);
66 COVERAGE_DEFINE(netdev_get_stats
);
68 struct netdev_saved_flags
{
69 struct netdev
*netdev
;
70 struct ovs_list node
; /* In struct netdev's saved_flags_list. */
71 enum netdev_flags saved_flags
;
72 enum netdev_flags saved_values
;
75 /* Protects 'netdev_shash' and the mutable members of struct netdev. */
76 static struct ovs_mutex netdev_mutex
= OVS_MUTEX_INITIALIZER
;
78 /* All created network devices. */
79 static struct shash netdev_shash
OVS_GUARDED_BY(netdev_mutex
)
80 = SHASH_INITIALIZER(&netdev_shash
);
82 /* Mutual exclusion of */
83 static struct ovs_mutex netdev_class_mutex
OVS_ACQ_BEFORE(netdev_mutex
)
84 = OVS_MUTEX_INITIALIZER
;
86 /* Contains 'struct netdev_registered_class'es. */
87 static struct cmap netdev_classes
= CMAP_INITIALIZER
;
89 struct netdev_registered_class
{
90 struct cmap_node cmap_node
; /* In 'netdev_classes', by class->type. */
91 const struct netdev_class
*class;
93 /* Number of references: one for the class itself and one for every
94 * instance of the class. */
95 struct ovs_refcount refcnt
;
98 static bool netdev_flow_api_enabled
= false;
100 /* This is set pretty low because we probably won't learn anything from the
101 * additional log messages. */
102 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
104 static void restore_all_flags(void *aux OVS_UNUSED
);
105 void update_device_args(struct netdev
*, const struct shash
*args
);
108 netdev_n_txq(const struct netdev
*netdev
)
110 return netdev
->n_txq
;
114 netdev_n_rxq(const struct netdev
*netdev
)
116 return netdev
->n_rxq
;
120 netdev_is_pmd(const struct netdev
*netdev
)
122 return netdev
->netdev_class
->is_pmd
;
126 netdev_has_tunnel_push_pop(const struct netdev
*netdev
)
128 return netdev
->netdev_class
->push_header
129 && netdev
->netdev_class
->pop_header
;
133 netdev_initialize(void)
134 OVS_EXCLUDED(netdev_mutex
)
136 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
138 if (ovsthread_once_start(&once
)) {
139 fatal_signal_add_hook(restore_all_flags
, NULL
, NULL
, true);
141 netdev_vport_patch_register();
144 netdev_register_provider(&netdev_linux_class
);
145 netdev_register_provider(&netdev_internal_class
);
146 netdev_register_provider(&netdev_tap_class
);
147 netdev_vport_tunnel_register();
149 #if defined(__FreeBSD__) || defined(__NetBSD__)
150 netdev_register_provider(&netdev_tap_class
);
151 netdev_register_provider(&netdev_bsd_class
);
154 netdev_register_provider(&netdev_windows_class
);
155 netdev_register_provider(&netdev_internal_class
);
156 netdev_vport_tunnel_register();
158 ovsthread_once_done(&once
);
162 /* Performs periodic work needed by all the various kinds of netdevs.
164 * If your program opens any netdevs, it must call this function within its
168 OVS_EXCLUDED(netdev_mutex
)
172 struct netdev_registered_class
*rc
;
173 CMAP_FOR_EACH (rc
, cmap_node
, &netdev_classes
) {
174 if (rc
->class->run
) {
175 rc
->class->run(rc
->class);
180 /* Arranges for poll_block() to wake up when netdev_run() needs to be called.
182 * If your program opens any netdevs, it must call this function within its
186 OVS_EXCLUDED(netdev_mutex
)
190 struct netdev_registered_class
*rc
;
191 CMAP_FOR_EACH (rc
, cmap_node
, &netdev_classes
) {
192 if (rc
->class->wait
) {
193 rc
->class->wait(rc
->class);
198 static struct netdev_registered_class
*
199 netdev_lookup_class(const char *type
)
201 struct netdev_registered_class
*rc
;
202 CMAP_FOR_EACH_WITH_HASH (rc
, cmap_node
, hash_string(type
, 0),
204 if (!strcmp(type
, rc
->class->type
)) {
211 /* Initializes and registers a new netdev provider. After successful
212 * registration, new netdevs of that type can be opened using netdev_open(). */
214 netdev_register_provider(const struct netdev_class
*new_class
)
215 OVS_EXCLUDED(netdev_class_mutex
, netdev_mutex
)
219 ovs_mutex_lock(&netdev_class_mutex
);
220 if (netdev_lookup_class(new_class
->type
)) {
221 VLOG_WARN("attempted to register duplicate netdev provider: %s",
225 error
= new_class
->init
? new_class
->init() : 0;
227 struct netdev_registered_class
*rc
;
229 rc
= xmalloc(sizeof *rc
);
230 cmap_insert(&netdev_classes
, &rc
->cmap_node
,
231 hash_string(new_class
->type
, 0));
232 rc
->class = new_class
;
233 ovs_refcount_init(&rc
->refcnt
);
235 VLOG_ERR("failed to initialize %s network device class: %s",
236 new_class
->type
, ovs_strerror(error
));
239 ovs_mutex_unlock(&netdev_class_mutex
);
244 /* Unregisters a netdev provider. 'type' must have been previously registered
245 * and not currently be in use by any netdevs. After unregistration new
246 * netdevs of that type cannot be opened using netdev_open(). (However, the
247 * provider may still be accessible from other threads until the next RCU grace
248 * period, so the caller must not free or re-register the same netdev_class
249 * until that has passed.) */
251 netdev_unregister_provider(const char *type
)
252 OVS_EXCLUDED(netdev_class_mutex
, netdev_mutex
)
254 struct netdev_registered_class
*rc
;
259 ovs_mutex_lock(&netdev_class_mutex
);
260 rc
= netdev_lookup_class(type
);
262 VLOG_WARN("attempted to unregister a netdev provider that is not "
263 "registered: %s", type
);
264 error
= EAFNOSUPPORT
;
265 } else if (ovs_refcount_unref(&rc
->refcnt
) != 1) {
266 ovs_refcount_ref(&rc
->refcnt
);
267 VLOG_WARN("attempted to unregister in use netdev provider: %s",
271 cmap_remove(&netdev_classes
, &rc
->cmap_node
,
272 hash_string(rc
->class->type
, 0));
273 ovsrcu_postpone(free
, rc
);
276 ovs_mutex_unlock(&netdev_class_mutex
);
281 /* Clears 'types' and enumerates the types of all currently registered netdev
282 * providers into it. The caller must first initialize the sset. */
284 netdev_enumerate_types(struct sset
*types
)
285 OVS_EXCLUDED(netdev_mutex
)
290 struct netdev_registered_class
*rc
;
291 CMAP_FOR_EACH (rc
, cmap_node
, &netdev_classes
) {
292 sset_add(types
, rc
->class->type
);
297 netdev_vport_type_from_name(const char *name
)
299 struct netdev_registered_class
*rc
;
301 CMAP_FOR_EACH (rc
, cmap_node
, &netdev_classes
) {
302 const char *dpif_port
= netdev_vport_class_get_dpif_port(rc
->class);
303 if (dpif_port
&& !strncmp(name
, dpif_port
, strlen(dpif_port
))) {
304 type
= rc
->class->type
;
311 /* Check that the network device name is not the same as any of the registered
312 * vport providers' dpif_port name (dpif_port is NULL if the vport provider
313 * does not define it) or the datapath internal port name (e.g. ovs-system).
315 * Returns true if there is a name conflict, false otherwise. */
317 netdev_is_reserved_name(const char *name
)
318 OVS_EXCLUDED(netdev_mutex
)
322 struct netdev_registered_class
*rc
;
323 CMAP_FOR_EACH (rc
, cmap_node
, &netdev_classes
) {
324 const char *dpif_port
= netdev_vport_class_get_dpif_port(rc
->class);
325 if (dpif_port
&& !strncmp(name
, dpif_port
, strlen(dpif_port
))) {
330 if (!strncmp(name
, "ovs-", 4)) {
335 dp_enumerate_types(&types
);
336 SSET_FOR_EACH (type
, &types
) {
337 if (!strcmp(name
+4, type
)) {
338 sset_destroy(&types
);
342 sset_destroy(&types
);
348 /* Opens the network device named 'name' (e.g. "eth0") of the specified 'type'
349 * (e.g. "system") and returns zero if successful, otherwise a positive errno
350 * value. On success, sets '*netdevp' to the new network device, otherwise to
353 * Some network devices may need to be configured (with netdev_set_config())
354 * before they can be used.
356 * Before opening rxqs or sending packets, '*netdevp' may need to be
357 * reconfigured (with netdev_is_reconf_required() and netdev_reconfigure()).
360 netdev_open(const char *name
, const char *type
, struct netdev
**netdevp
)
361 OVS_EXCLUDED(netdev_mutex
)
363 struct netdev
*netdev
;
367 /* Reject empty names. This saves the providers having to do this. At
368 * least one screwed this up: the netdev-linux "tap" implementation
369 * passed the name directly to the Linux TUNSETIFF call, which treats
370 * an empty string as a request to generate a unique name. */
376 ovs_mutex_lock(&netdev_mutex
);
377 netdev
= shash_find_data(&netdev_shash
, name
);
380 type
&& type
[0] && strcmp(type
, netdev
->netdev_class
->type
)) {
382 if (netdev
->auto_classified
) {
383 /* If this device was first created without a classification type,
384 * for example due to routing or tunneling code, and they keep a
385 * reference, a "classified" call to open will fail. In this case
386 * we remove the classless device, and re-add it below. We remove
387 * the netdev from the shash, and change the sequence, so owners of
388 * the old classless device can release/cleanup. */
390 shash_delete(&netdev_shash
, netdev
->node
);
392 netdev_change_seq_changed(netdev
);
402 struct netdev_registered_class
*rc
;
404 rc
= netdev_lookup_class(type
&& type
[0] ? type
: "system");
405 if (rc
&& ovs_refcount_try_ref_rcu(&rc
->refcnt
)) {
406 netdev
= rc
->class->alloc();
408 memset(netdev
, 0, sizeof *netdev
);
409 netdev
->netdev_class
= rc
->class;
410 netdev
->auto_classified
= type
&& type
[0] ? false : true;
411 netdev
->name
= xstrdup(name
);
412 netdev
->change_seq
= 1;
413 netdev
->reconfigure_seq
= seq_create();
414 netdev
->last_reconfigure_seq
=
415 seq_read(netdev
->reconfigure_seq
);
416 netdev
->node
= shash_add(&netdev_shash
, name
, netdev
);
418 /* By default enable one tx and rx queue per netdev. */
419 netdev
->n_txq
= netdev
->netdev_class
->send
? 1 : 0;
420 netdev
->n_rxq
= netdev
->netdev_class
->rxq_alloc
? 1 : 0;
422 ovs_list_init(&netdev
->saved_flags_list
);
424 error
= rc
->class->construct(netdev
);
426 netdev_change_seq_changed(netdev
);
428 ovs_refcount_unref(&rc
->refcnt
);
429 seq_destroy(netdev
->reconfigure_seq
);
431 ovs_assert(ovs_list_is_empty(&netdev
->saved_flags_list
));
432 shash_delete(&netdev_shash
, netdev
->node
);
433 rc
->class->dealloc(netdev
);
439 VLOG_WARN("could not create netdev %s of unknown type %s",
441 error
= EAFNOSUPPORT
;
451 ovs_mutex_unlock(&netdev_mutex
);
456 /* Returns a reference to 'netdev_' for the caller to own. Returns null if
457 * 'netdev_' is null. */
459 netdev_ref(const struct netdev
*netdev_
)
460 OVS_EXCLUDED(netdev_mutex
)
462 struct netdev
*netdev
= CONST_CAST(struct netdev
*, netdev_
);
465 ovs_mutex_lock(&netdev_mutex
);
466 ovs_assert(netdev
->ref_cnt
> 0);
468 ovs_mutex_unlock(&netdev_mutex
);
473 /* Reconfigures the device 'netdev' with 'args'. 'args' may be empty
474 * or NULL if none are needed. */
476 netdev_set_config(struct netdev
*netdev
, const struct smap
*args
, char **errp
)
477 OVS_EXCLUDED(netdev_mutex
)
479 if (netdev
->netdev_class
->set_config
) {
480 const struct smap no_args
= SMAP_INITIALIZER(&no_args
);
481 char *verbose_error
= NULL
;
484 error
= netdev
->netdev_class
->set_config(netdev
,
485 args
? args
: &no_args
,
488 VLOG_WARN_BUF(verbose_error
? NULL
: errp
,
489 "%s: could not set configuration (%s)",
490 netdev_get_name(netdev
), ovs_strerror(error
));
493 *errp
= verbose_error
;
500 } else if (args
&& !smap_is_empty(args
)) {
501 VLOG_WARN_BUF(errp
, "%s: arguments provided to device that is not configurable",
502 netdev_get_name(netdev
));
507 /* Returns the current configuration for 'netdev' in 'args'. The caller must
508 * have already initialized 'args' with smap_init(). Returns 0 on success, in
509 * which case 'args' will be filled with 'netdev''s configuration. On failure
510 * returns a positive errno value, in which case 'args' will be empty.
512 * The caller owns 'args' and its contents and must eventually free them with
515 netdev_get_config(const struct netdev
*netdev
, struct smap
*args
)
516 OVS_EXCLUDED(netdev_mutex
)
521 if (netdev
->netdev_class
->get_config
) {
522 error
= netdev
->netdev_class
->get_config(netdev
, args
);
533 const struct netdev_tunnel_config
*
534 netdev_get_tunnel_config(const struct netdev
*netdev
)
535 OVS_EXCLUDED(netdev_mutex
)
537 if (netdev
->netdev_class
->get_tunnel_config
) {
538 return netdev
->netdev_class
->get_tunnel_config(netdev
);
544 /* Returns the id of the numa node the 'netdev' is on. If the function
545 * is not implemented, returns NETDEV_NUMA_UNSPEC. */
547 netdev_get_numa_id(const struct netdev
*netdev
)
549 if (netdev
->netdev_class
->get_numa_id
) {
550 return netdev
->netdev_class
->get_numa_id(netdev
);
552 return NETDEV_NUMA_UNSPEC
;
557 netdev_unref(struct netdev
*dev
)
558 OVS_RELEASES(netdev_mutex
)
560 ovs_assert(dev
->ref_cnt
);
561 if (!--dev
->ref_cnt
) {
562 const struct netdev_class
*class = dev
->netdev_class
;
563 struct netdev_registered_class
*rc
;
565 dev
->netdev_class
->destruct(dev
);
568 shash_delete(&netdev_shash
, dev
->node
);
571 seq_destroy(dev
->reconfigure_seq
);
572 dev
->netdev_class
->dealloc(dev
);
573 ovs_mutex_unlock(&netdev_mutex
);
575 rc
= netdev_lookup_class(class->type
);
576 ovs_refcount_unref(&rc
->refcnt
);
578 ovs_mutex_unlock(&netdev_mutex
);
582 /* Closes and destroys 'netdev'. */
584 netdev_close(struct netdev
*netdev
)
585 OVS_EXCLUDED(netdev_mutex
)
588 ovs_mutex_lock(&netdev_mutex
);
589 netdev_unref(netdev
);
593 /* Removes 'netdev' from the global shash and unrefs 'netdev'.
595 * This allows handler and revalidator threads to still retain references
596 * to this netdev while the main thread changes interface configuration.
598 * This function should only be called by the main thread when closing
599 * netdevs during user configuration changes. Otherwise, netdev_close should be
600 * used to close netdevs. */
602 netdev_remove(struct netdev
*netdev
)
605 ovs_mutex_lock(&netdev_mutex
);
607 shash_delete(&netdev_shash
, netdev
->node
);
609 netdev_change_seq_changed(netdev
);
611 netdev_unref(netdev
);
615 /* Parses 'netdev_name_', which is of the form [type@]name into its component
616 * pieces. 'name' and 'type' must be freed by the caller. */
618 netdev_parse_name(const char *netdev_name_
, char **name
, char **type
)
620 char *netdev_name
= xstrdup(netdev_name_
);
623 separator
= strchr(netdev_name
, '@');
627 *name
= xstrdup(separator
+ 1);
630 *type
= xstrdup("system");
634 /* Attempts to open a netdev_rxq handle for obtaining packets received on
635 * 'netdev'. On success, returns 0 and stores a nonnull 'netdev_rxq *' into
636 * '*rxp'. On failure, returns a positive errno value and stores NULL into
639 * Some kinds of network devices might not support receiving packets. This
640 * function returns EOPNOTSUPP in that case.*/
642 netdev_rxq_open(struct netdev
*netdev
, struct netdev_rxq
**rxp
, int id
)
643 OVS_EXCLUDED(netdev_mutex
)
647 if (netdev
->netdev_class
->rxq_alloc
&& id
< netdev
->n_rxq
) {
648 struct netdev_rxq
*rx
= netdev
->netdev_class
->rxq_alloc();
652 error
= netdev
->netdev_class
->rxq_construct(rx
);
658 netdev
->netdev_class
->rxq_dealloc(rx
);
672 netdev_rxq_close(struct netdev_rxq
*rx
)
673 OVS_EXCLUDED(netdev_mutex
)
676 struct netdev
*netdev
= rx
->netdev
;
677 netdev
->netdev_class
->rxq_destruct(rx
);
678 netdev
->netdev_class
->rxq_dealloc(rx
);
679 netdev_close(netdev
);
683 /* Attempts to receive a batch of packets from 'rx'. 'batch' should point to
684 * the beginning of an array of NETDEV_MAX_BURST pointers to dp_packet. If
685 * successful, this function stores pointers to up to NETDEV_MAX_BURST
686 * dp_packets into the array, transferring ownership of the packets to the
687 * caller, stores the number of received packets in 'batch->count', and returns
690 * The implementation does not necessarily initialize any non-data members of
691 * 'batch'. That is, the caller must initialize layer pointers and metadata
692 * itself, if desired, e.g. with pkt_metadata_init() and miniflow_extract().
694 * Returns EAGAIN immediately if no packet is ready to be received or another
695 * positive errno value if an error was encountered. */
697 netdev_rxq_recv(struct netdev_rxq
*rx
, struct dp_packet_batch
*batch
)
701 retval
= rx
->netdev
->netdev_class
->rxq_recv(rx
, batch
);
703 COVERAGE_INC(netdev_received
);
710 /* Arranges for poll_block() to wake up when a packet is ready to be received
713 netdev_rxq_wait(struct netdev_rxq
*rx
)
715 rx
->netdev
->netdev_class
->rxq_wait(rx
);
718 /* Discards any packets ready to be received on 'rx'. */
720 netdev_rxq_drain(struct netdev_rxq
*rx
)
722 return (rx
->netdev
->netdev_class
->rxq_drain
723 ? rx
->netdev
->netdev_class
->rxq_drain(rx
)
727 /* Configures the number of tx queues of 'netdev'. Returns 0 if successful,
728 * otherwise a positive errno value.
730 * 'n_txq' specifies the exact number of transmission queues to create.
732 * The change might not effective immediately. The caller must check if a
733 * reconfiguration is required with netdev_is_reconf_required() and eventually
734 * call netdev_reconfigure() before using the new queues.
736 * On error, the tx queue configuration is unchanged */
738 netdev_set_tx_multiq(struct netdev
*netdev
, unsigned int n_txq
)
742 error
= (netdev
->netdev_class
->set_tx_multiq
743 ? netdev
->netdev_class
->set_tx_multiq(netdev
, MAX(n_txq
, 1))
746 if (error
&& error
!= EOPNOTSUPP
) {
747 VLOG_DBG_RL(&rl
, "failed to set tx queue for network device %s:"
748 "%s", netdev_get_name(netdev
), ovs_strerror(error
));
755 netdev_get_pt_mode(const struct netdev
*netdev
)
757 return (netdev
->netdev_class
->get_pt_mode
758 ? netdev
->netdev_class
->get_pt_mode(netdev
)
759 : NETDEV_PT_LEGACY_L2
);
762 /* Sends 'batch' on 'netdev'. Returns 0 if successful (for every packet),
763 * otherwise a positive errno value. Returns EAGAIN without blocking if
764 * at least one the packets cannot be queued immediately. Returns EMSGSIZE
765 * if a partial packet was transmitted or if a packet is too big or too small
766 * to transmit on the device.
768 * The caller must make sure that 'netdev' supports sending by making sure that
769 * 'netdev_n_txq(netdev)' returns >= 1.
771 * If the function returns a non-zero value, some of the packets might have
774 * If 'may_steal' is false, the caller retains ownership of all the packets.
775 * If 'may_steal' is true, the caller transfers ownership of all the packets
776 * to the network device, regardless of success.
778 * If 'concurrent_txq' is true, the caller may perform concurrent calls
779 * to netdev_send() with the same 'qid'. The netdev provider is responsible
780 * for making sure that these concurrent calls do not create a race condition
781 * by using locking or other synchronization if required.
783 * The network device is expected to maintain one or more packet
784 * transmission queues, so that the caller does not ordinarily have to
785 * do additional queuing of packets. 'qid' specifies the queue to use
786 * and can be ignored if the implementation does not support multiple
789 * Some network devices may not implement support for this function. In such
790 * cases this function will always return EOPNOTSUPP. */
792 netdev_send(struct netdev
*netdev
, int qid
, struct dp_packet_batch
*batch
,
793 bool may_steal
, bool concurrent_txq
)
795 int error
= netdev
->netdev_class
->send(netdev
, qid
, batch
, may_steal
,
798 COVERAGE_INC(netdev_sent
);
800 dp_packet_batch_reset_cutlen(batch
);
806 /* Pop tunnel header, build tunnel metadata and resize 'batch->packets'
807 * for further processing.
809 * The caller must make sure that 'netdev' support this operation by checking
810 * that netdev_has_tunnel_push_pop() returns true. */
812 netdev_pop_header(struct netdev
*netdev
, struct dp_packet_batch
*batch
)
814 struct dp_packet
*packet
;
815 size_t i
, size
= dp_packet_batch_size(batch
);
817 DP_PACKET_BATCH_REFILL_FOR_EACH (i
, size
, packet
, batch
) {
818 packet
= netdev
->netdev_class
->pop_header(packet
);
820 /* Reset the checksum offload flags if present, to avoid wrong
821 * interpretation in the further packet processing when
823 reset_dp_packet_checksum_ol_flags(packet
);
824 dp_packet_batch_refill(batch
, packet
, i
);
830 netdev_init_tnl_build_header_params(struct netdev_tnl_build_header_params
*params
,
831 const struct flow
*tnl_flow
,
832 const struct in6_addr
*src
,
833 struct eth_addr dmac
,
834 struct eth_addr smac
)
836 params
->flow
= tnl_flow
;
840 params
->is_ipv6
= !IN6_IS_ADDR_V4MAPPED(src
);
843 int netdev_build_header(const struct netdev
*netdev
,
844 struct ovs_action_push_tnl
*data
,
845 const struct netdev_tnl_build_header_params
*params
)
847 if (netdev
->netdev_class
->build_header
) {
848 return netdev
->netdev_class
->build_header(netdev
, data
, params
);
853 /* Push tunnel header (reading from tunnel metadata) and resize
854 * 'batch->packets' for further processing.
856 * The caller must make sure that 'netdev' support this operation by checking
857 * that netdev_has_tunnel_push_pop() returns true. */
859 netdev_push_header(const struct netdev
*netdev
,
860 struct dp_packet_batch
*batch
,
861 const struct ovs_action_push_tnl
*data
)
863 struct dp_packet
*packet
;
864 DP_PACKET_BATCH_FOR_EACH (packet
, batch
) {
865 netdev
->netdev_class
->push_header(packet
, data
);
866 pkt_metadata_init(&packet
->md
, data
->out_port
);
872 /* Registers with the poll loop to wake up from the next call to poll_block()
873 * when the packet transmission queue has sufficient room to transmit a packet
874 * with netdev_send().
876 * The network device is expected to maintain one or more packet
877 * transmission queues, so that the caller does not ordinarily have to
878 * do additional queuing of packets. 'qid' specifies the queue to use
879 * and can be ignored if the implementation does not support multiple
882 netdev_send_wait(struct netdev
*netdev
, int qid
)
884 if (netdev
->netdev_class
->send_wait
) {
885 netdev
->netdev_class
->send_wait(netdev
, qid
);
889 /* Attempts to set 'netdev''s MAC address to 'mac'. Returns 0 if successful,
890 * otherwise a positive errno value. */
892 netdev_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
894 return netdev
->netdev_class
->set_etheraddr(netdev
, mac
);
897 /* Retrieves 'netdev''s MAC address. If successful, returns 0 and copies the
898 * the MAC address into 'mac'. On failure, returns a positive errno value and
899 * clears 'mac' to all-zeros. */
901 netdev_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
903 return netdev
->netdev_class
->get_etheraddr(netdev
, mac
);
906 /* Returns the name of the network device that 'netdev' represents,
907 * e.g. "eth0". The caller must not modify or free the returned string. */
909 netdev_get_name(const struct netdev
*netdev
)
914 /* Retrieves the MTU of 'netdev'. The MTU is the maximum size of transmitted
915 * (and received) packets, in bytes, not including the hardware header; thus,
916 * this is typically 1500 bytes for Ethernet devices.
918 * If successful, returns 0 and stores the MTU size in '*mtup'. Returns
919 * EOPNOTSUPP if 'netdev' does not have an MTU (as e.g. some tunnels do not).
920 * On other failure, returns a positive errno value. On failure, sets '*mtup'
923 netdev_get_mtu(const struct netdev
*netdev
, int *mtup
)
925 const struct netdev_class
*class = netdev
->netdev_class
;
928 error
= class->get_mtu
? class->get_mtu(netdev
, mtup
) : EOPNOTSUPP
;
931 if (error
!= EOPNOTSUPP
) {
932 VLOG_DBG_RL(&rl
, "failed to retrieve MTU for network device %s: "
933 "%s", netdev_get_name(netdev
), ovs_strerror(error
));
939 /* Sets the MTU of 'netdev'. The MTU is the maximum size of transmitted
940 * (and received) packets, in bytes.
942 * If successful, returns 0. Returns EOPNOTSUPP if 'netdev' does not have an
943 * MTU (as e.g. some tunnels do not). On other failure, returns a positive
946 netdev_set_mtu(struct netdev
*netdev
, int mtu
)
948 const struct netdev_class
*class = netdev
->netdev_class
;
951 error
= class->set_mtu
? class->set_mtu(netdev
, mtu
) : EOPNOTSUPP
;
952 if (error
&& error
!= EOPNOTSUPP
) {
953 VLOG_DBG_RL(&rl
, "failed to set MTU for network device %s: %s",
954 netdev_get_name(netdev
), ovs_strerror(error
));
960 /* If 'user_config' is true, the user wants to control 'netdev''s MTU and we
961 * should not override it. If 'user_config' is false, we may adjust
962 * 'netdev''s MTU (e.g., if 'netdev' is internal). */
964 netdev_mtu_user_config(struct netdev
*netdev
, bool user_config
)
966 if (netdev
->mtu_user_config
!= user_config
) {
967 netdev_change_seq_changed(netdev
);
968 netdev
->mtu_user_config
= user_config
;
972 /* Returns 'true' if the user explicitly specified an MTU value for 'netdev'.
973 * Otherwise, returns 'false', in which case we are allowed to adjust the
976 netdev_mtu_is_user_config(struct netdev
*netdev
)
978 return netdev
->mtu_user_config
;
981 /* Returns the ifindex of 'netdev', if successful, as a positive number. On
982 * failure, returns a negative errno value.
984 * The desired semantics of the ifindex value are a combination of those
985 * specified by POSIX for if_nametoindex() and by SNMP for ifIndex. An ifindex
986 * value should be unique within a host and remain stable at least until
987 * reboot. SNMP says an ifindex "ranges between 1 and the value of ifNumber"
988 * but many systems do not follow this rule anyhow.
990 * Some network devices may not implement support for this function. In such
991 * cases this function will always return -EOPNOTSUPP.
994 netdev_get_ifindex(const struct netdev
*netdev
)
996 int (*get_ifindex
)(const struct netdev
*);
998 get_ifindex
= netdev
->netdev_class
->get_ifindex
;
1000 return get_ifindex
? get_ifindex(netdev
) : -EOPNOTSUPP
;
1003 /* Stores the features supported by 'netdev' into each of '*current',
1004 * '*advertised', '*supported', and '*peer' that are non-null. Each value is a
1005 * bitmap of "enum ofp_port_features" bits, in host byte order. Returns 0 if
1006 * successful, otherwise a positive errno value. On failure, all of the
1007 * passed-in values are set to 0.
1009 * Some network devices may not implement support for this function. In such
1010 * cases this function will always return EOPNOTSUPP. */
1012 netdev_get_features(const struct netdev
*netdev
,
1013 enum netdev_features
*current
,
1014 enum netdev_features
*advertised
,
1015 enum netdev_features
*supported
,
1016 enum netdev_features
*peer
)
1018 int (*get_features
)(const struct netdev
*netdev
,
1019 enum netdev_features
*current
,
1020 enum netdev_features
*advertised
,
1021 enum netdev_features
*supported
,
1022 enum netdev_features
*peer
);
1023 enum netdev_features dummy
[4];
1027 current
= &dummy
[0];
1030 advertised
= &dummy
[1];
1033 supported
= &dummy
[2];
1039 get_features
= netdev
->netdev_class
->get_features
;
1040 error
= get_features
1041 ? get_features(netdev
, current
, advertised
, supported
,
1045 *current
= *advertised
= *supported
= *peer
= 0;
1050 /* Returns the maximum speed of a network connection that has the NETDEV_F_*
1051 * bits in 'features', in bits per second. If no bits that indicate a speed
1052 * are set in 'features', returns 'default_bps'. */
1054 netdev_features_to_bps(enum netdev_features features
,
1055 uint64_t default_bps
)
1058 F_1000000MB
= NETDEV_F_1TB_FD
,
1059 F_100000MB
= NETDEV_F_100GB_FD
,
1060 F_40000MB
= NETDEV_F_40GB_FD
,
1061 F_10000MB
= NETDEV_F_10GB_FD
,
1062 F_1000MB
= NETDEV_F_1GB_HD
| NETDEV_F_1GB_FD
,
1063 F_100MB
= NETDEV_F_100MB_HD
| NETDEV_F_100MB_FD
,
1064 F_10MB
= NETDEV_F_10MB_HD
| NETDEV_F_10MB_FD
1067 return ( features
& F_1000000MB
? UINT64_C(1000000000000)
1068 : features
& F_100000MB
? UINT64_C(100000000000)
1069 : features
& F_40000MB
? UINT64_C(40000000000)
1070 : features
& F_10000MB
? UINT64_C(10000000000)
1071 : features
& F_1000MB
? UINT64_C(1000000000)
1072 : features
& F_100MB
? UINT64_C(100000000)
1073 : features
& F_10MB
? UINT64_C(10000000)
1077 /* Returns true if any of the NETDEV_F_* bits that indicate a full-duplex link
1078 * are set in 'features', otherwise false. */
1080 netdev_features_is_full_duplex(enum netdev_features features
)
1082 return (features
& (NETDEV_F_10MB_FD
| NETDEV_F_100MB_FD
| NETDEV_F_1GB_FD
1083 | NETDEV_F_10GB_FD
| NETDEV_F_40GB_FD
1084 | NETDEV_F_100GB_FD
| NETDEV_F_1TB_FD
)) != 0;
1087 /* Set the features advertised by 'netdev' to 'advertise'. Returns 0 if
1088 * successful, otherwise a positive errno value. */
1090 netdev_set_advertisements(struct netdev
*netdev
,
1091 enum netdev_features advertise
)
1093 return (netdev
->netdev_class
->set_advertisements
1094 ? netdev
->netdev_class
->set_advertisements(
1099 /* Assigns 'addr' as 'netdev''s IPv4 address and 'mask' as its netmask. If
1100 * 'addr' is INADDR_ANY, 'netdev''s IPv4 address is cleared. Returns a
1101 * positive errno value. */
1103 netdev_set_in4(struct netdev
*netdev
, struct in_addr addr
, struct in_addr mask
)
1105 return (netdev
->netdev_class
->set_in4
1106 ? netdev
->netdev_class
->set_in4(netdev
, addr
, mask
)
1110 /* Obtains ad IPv4 address from device name and save the address in
1111 * in4. Returns 0 if successful, otherwise a positive errno value.
1114 netdev_get_in4_by_name(const char *device_name
, struct in_addr
*in4
)
1116 struct in6_addr
*mask
, *addr6
;
1120 err
= netdev_open(device_name
, NULL
, &dev
);
1125 err
= netdev_get_addr_list(dev
, &addr6
, &mask
, &n_in6
);
1130 for (i
= 0; i
< n_in6
; i
++) {
1131 if (IN6_IS_ADDR_V4MAPPED(&addr6
[i
])) {
1132 in4
->s_addr
= in6_addr_get_mapped_ipv4(&addr6
[i
]);
1145 /* Adds 'router' as a default IP gateway for the TCP/IP stack that corresponds
1148 netdev_add_router(struct netdev
*netdev
, struct in_addr router
)
1150 COVERAGE_INC(netdev_add_router
);
1151 return (netdev
->netdev_class
->add_router
1152 ? netdev
->netdev_class
->add_router(netdev
, router
)
1156 /* Looks up the next hop for 'host' for the TCP/IP stack that corresponds to
1157 * 'netdev'. If a route cannot not be determined, sets '*next_hop' to 0,
1158 * '*netdev_name' to null, and returns a positive errno value. Otherwise, if a
1159 * next hop is found, stores the next hop gateway's address (0 if 'host' is on
1160 * a directly connected network) in '*next_hop' and a copy of the name of the
1161 * device to reach 'host' in '*netdev_name', and returns 0. The caller is
1162 * responsible for freeing '*netdev_name' (by calling free()). */
1164 netdev_get_next_hop(const struct netdev
*netdev
,
1165 const struct in_addr
*host
, struct in_addr
*next_hop
,
1168 int error
= (netdev
->netdev_class
->get_next_hop
1169 ? netdev
->netdev_class
->get_next_hop(
1170 host
, next_hop
, netdev_name
)
1173 next_hop
->s_addr
= 0;
1174 *netdev_name
= NULL
;
1179 /* Populates 'smap' with status information.
1181 * Populates 'smap' with 'netdev' specific status information. This
1182 * information may be used to populate the status column of the Interface table
1183 * as defined in ovs-vswitchd.conf.db(5). */
1185 netdev_get_status(const struct netdev
*netdev
, struct smap
*smap
)
1187 return (netdev
->netdev_class
->get_status
1188 ? netdev
->netdev_class
->get_status(netdev
, smap
)
1192 /* Returns all assigned IP address to 'netdev' and returns 0.
1193 * API allocates array of address and masks and set it to
1194 * '*addr' and '*mask'.
1195 * Otherwise, returns a positive errno value and sets '*addr', '*mask
1196 * and '*n_addr' to NULL.
1198 * The following error values have well-defined meanings:
1200 * - EADDRNOTAVAIL: 'netdev' has no assigned IPv6 address.
1202 * - EOPNOTSUPP: No IPv6 network stack attached to 'netdev'.
1204 * 'addr' may be null, in which case the address itself is not reported. */
1206 netdev_get_addr_list(const struct netdev
*netdev
, struct in6_addr
**addr
,
1207 struct in6_addr
**mask
, int *n_addr
)
1211 error
= (netdev
->netdev_class
->get_addr_list
1212 ? netdev
->netdev_class
->get_addr_list(netdev
, addr
, mask
, n_addr
): EOPNOTSUPP
);
1213 if (error
&& addr
) {
1222 /* On 'netdev', turns off the flags in 'off' and then turns on the flags in
1223 * 'on'. Returns 0 if successful, otherwise a positive errno value. */
1225 do_update_flags(struct netdev
*netdev
, enum netdev_flags off
,
1226 enum netdev_flags on
, enum netdev_flags
*old_flagsp
,
1227 struct netdev_saved_flags
**sfp
)
1228 OVS_EXCLUDED(netdev_mutex
)
1230 struct netdev_saved_flags
*sf
= NULL
;
1231 enum netdev_flags old_flags
;
1234 error
= netdev
->netdev_class
->update_flags(netdev
, off
& ~on
, on
,
1237 VLOG_WARN_RL(&rl
, "failed to %s flags for network device %s: %s",
1238 off
|| on
? "set" : "get", netdev_get_name(netdev
),
1239 ovs_strerror(error
));
1241 } else if ((off
|| on
) && sfp
) {
1242 enum netdev_flags new_flags
= (old_flags
& ~off
) | on
;
1243 enum netdev_flags changed_flags
= old_flags
^ new_flags
;
1244 if (changed_flags
) {
1245 ovs_mutex_lock(&netdev_mutex
);
1246 *sfp
= sf
= xmalloc(sizeof *sf
);
1247 sf
->netdev
= netdev
;
1248 ovs_list_push_front(&netdev
->saved_flags_list
, &sf
->node
);
1249 sf
->saved_flags
= changed_flags
;
1250 sf
->saved_values
= changed_flags
& new_flags
;
1253 ovs_mutex_unlock(&netdev_mutex
);
1258 *old_flagsp
= old_flags
;
1267 /* Obtains the current flags for 'netdev' and stores them into '*flagsp'.
1268 * Returns 0 if successful, otherwise a positive errno value. On failure,
1269 * stores 0 into '*flagsp'. */
1271 netdev_get_flags(const struct netdev
*netdev_
, enum netdev_flags
*flagsp
)
1273 struct netdev
*netdev
= CONST_CAST(struct netdev
*, netdev_
);
1274 return do_update_flags(netdev
, 0, 0, flagsp
, NULL
);
1277 /* Sets the flags for 'netdev' to 'flags'.
1278 * Returns 0 if successful, otherwise a positive errno value. */
1280 netdev_set_flags(struct netdev
*netdev
, enum netdev_flags flags
,
1281 struct netdev_saved_flags
**sfp
)
1283 return do_update_flags(netdev
, -1, flags
, NULL
, sfp
);
1286 /* Turns on the specified 'flags' on 'netdev':
1288 * - On success, returns 0. If 'sfp' is nonnull, sets '*sfp' to a newly
1289 * allocated 'struct netdev_saved_flags *' that may be passed to
1290 * netdev_restore_flags() to restore the original values of 'flags' on
1291 * 'netdev' (this will happen automatically at program termination if
1292 * netdev_restore_flags() is never called) , or to NULL if no flags were
1295 * - On failure, returns a positive errno value. If 'sfp' is nonnull, sets
1296 * '*sfp' to NULL. */
1298 netdev_turn_flags_on(struct netdev
*netdev
, enum netdev_flags flags
,
1299 struct netdev_saved_flags
**sfp
)
1301 return do_update_flags(netdev
, 0, flags
, NULL
, sfp
);
1304 /* Turns off the specified 'flags' on 'netdev'. See netdev_turn_flags_on() for
1305 * details of the interface. */
1307 netdev_turn_flags_off(struct netdev
*netdev
, enum netdev_flags flags
,
1308 struct netdev_saved_flags
**sfp
)
1310 return do_update_flags(netdev
, flags
, 0, NULL
, sfp
);
1313 /* Restores the flags that were saved in 'sf', and destroys 'sf'.
1314 * Does nothing if 'sf' is NULL. */
1316 netdev_restore_flags(struct netdev_saved_flags
*sf
)
1317 OVS_EXCLUDED(netdev_mutex
)
1320 struct netdev
*netdev
= sf
->netdev
;
1321 enum netdev_flags old_flags
;
1323 netdev
->netdev_class
->update_flags(netdev
,
1324 sf
->saved_flags
& sf
->saved_values
,
1325 sf
->saved_flags
& ~sf
->saved_values
,
1328 ovs_mutex_lock(&netdev_mutex
);
1329 ovs_list_remove(&sf
->node
);
1331 netdev_unref(netdev
);
1335 /* Looks up the ARP table entry for 'ip' on 'netdev'. If one exists and can be
1336 * successfully retrieved, it stores the corresponding MAC address in 'mac' and
1337 * returns 0. Otherwise, it returns a positive errno value; in particular,
1338 * ENXIO indicates that there is no ARP table entry for 'ip' on 'netdev'. */
1340 netdev_arp_lookup(const struct netdev
*netdev
,
1341 ovs_be32 ip
, struct eth_addr
*mac
)
1343 int error
= (netdev
->netdev_class
->arp_lookup
1344 ? netdev
->netdev_class
->arp_lookup(netdev
, ip
, mac
)
1347 *mac
= eth_addr_zero
;
1352 /* Returns true if carrier is active (link light is on) on 'netdev'. */
1354 netdev_get_carrier(const struct netdev
*netdev
)
1357 enum netdev_flags flags
;
1360 netdev_get_flags(netdev
, &flags
);
1361 if (!(flags
& NETDEV_UP
)) {
1365 if (!netdev
->netdev_class
->get_carrier
) {
1369 error
= netdev
->netdev_class
->get_carrier(netdev
, &carrier
);
1371 VLOG_DBG("%s: failed to get network device carrier status, assuming "
1372 "down: %s", netdev_get_name(netdev
), ovs_strerror(error
));
1379 /* Returns the number of times 'netdev''s carrier has changed. */
1381 netdev_get_carrier_resets(const struct netdev
*netdev
)
1383 return (netdev
->netdev_class
->get_carrier_resets
1384 ? netdev
->netdev_class
->get_carrier_resets(netdev
)
1388 /* Attempts to force netdev_get_carrier() to poll 'netdev''s MII registers for
1389 * link status instead of checking 'netdev''s carrier. 'netdev''s MII
1390 * registers will be polled once ever 'interval' milliseconds. If 'netdev'
1391 * does not support MII, another method may be used as a fallback. If
1392 * 'interval' is less than or equal to zero, reverts netdev_get_carrier() to
1393 * its normal behavior.
1395 * Returns 0 if successful, otherwise a positive errno value. */
1397 netdev_set_miimon_interval(struct netdev
*netdev
, long long int interval
)
1399 return (netdev
->netdev_class
->set_miimon_interval
1400 ? netdev
->netdev_class
->set_miimon_interval(netdev
, interval
)
1404 /* Retrieves current device stats for 'netdev'. */
1406 netdev_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1410 /* Statistics are initialized before passing it to particular device
1411 * implementation so all values are filtered out by default. */
1412 memset(stats
, 0xFF, sizeof *stats
);
1414 COVERAGE_INC(netdev_get_stats
);
1415 error
= (netdev
->netdev_class
->get_stats
1416 ? netdev
->netdev_class
->get_stats(netdev
, stats
)
1419 /* In case of error all statistics are filtered out */
1420 memset(stats
, 0xff, sizeof *stats
);
1425 /* Attempts to set input rate limiting (policing) policy, such that up to
1426 * 'kbits_rate' kbps of traffic is accepted, with a maximum accumulative burst
1427 * size of 'kbits' kb. */
1429 netdev_set_policing(struct netdev
*netdev
, uint32_t kbits_rate
,
1430 uint32_t kbits_burst
)
1432 return (netdev
->netdev_class
->set_policing
1433 ? netdev
->netdev_class
->set_policing(netdev
,
1434 kbits_rate
, kbits_burst
)
1438 /* Adds to 'types' all of the forms of QoS supported by 'netdev', or leaves it
1439 * empty if 'netdev' does not support QoS. Any names added to 'types' should
1440 * be documented as valid for the "type" column in the "QoS" table in
1441 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1443 * Every network device supports disabling QoS with a type of "", but this type
1444 * will not be added to 'types'.
1446 * The caller must initialize 'types' (e.g. with sset_init()) before calling
1447 * this function. The caller is responsible for destroying 'types' (e.g. with
1448 * sset_destroy()) when it is no longer needed.
1450 * Returns 0 if successful, otherwise a positive errno value. */
1452 netdev_get_qos_types(const struct netdev
*netdev
, struct sset
*types
)
1454 const struct netdev_class
*class = netdev
->netdev_class
;
1455 return (class->get_qos_types
1456 ? class->get_qos_types(netdev
, types
)
1460 /* Queries 'netdev' for its capabilities regarding the specified 'type' of QoS,
1461 * which should be "" or one of the types returned by netdev_get_qos_types()
1462 * for 'netdev'. Returns 0 if successful, otherwise a positive errno value.
1463 * On success, initializes 'caps' with the QoS capabilities; on failure, clears
1464 * 'caps' to all zeros. */
1466 netdev_get_qos_capabilities(const struct netdev
*netdev
, const char *type
,
1467 struct netdev_qos_capabilities
*caps
)
1469 const struct netdev_class
*class = netdev
->netdev_class
;
1472 int retval
= (class->get_qos_capabilities
1473 ? class->get_qos_capabilities(netdev
, type
, caps
)
1476 memset(caps
, 0, sizeof *caps
);
1480 /* Every netdev supports turning off QoS. */
1481 memset(caps
, 0, sizeof *caps
);
1486 /* Obtains the number of queues supported by 'netdev' for the specified 'type'
1487 * of QoS. Returns 0 if successful, otherwise a positive errno value. Stores
1488 * the number of queues (zero on failure) in '*n_queuesp'.
1490 * This is just a simple wrapper around netdev_get_qos_capabilities(). */
1492 netdev_get_n_queues(const struct netdev
*netdev
,
1493 const char *type
, unsigned int *n_queuesp
)
1495 struct netdev_qos_capabilities caps
;
1498 retval
= netdev_get_qos_capabilities(netdev
, type
, &caps
);
1499 *n_queuesp
= caps
.n_queues
;
1503 /* Queries 'netdev' about its currently configured form of QoS. If successful,
1504 * stores the name of the current form of QoS into '*typep', stores any details
1505 * of configuration as string key-value pairs in 'details', and returns 0. On
1506 * failure, sets '*typep' to NULL and returns a positive errno value.
1508 * A '*typep' of "" indicates that QoS is currently disabled on 'netdev'.
1510 * The caller must initialize 'details' as an empty smap (e.g. with
1511 * smap_init()) before calling this function. The caller must free 'details'
1512 * when it is no longer needed (e.g. with smap_destroy()).
1514 * The caller must not modify or free '*typep'.
1516 * '*typep' will be one of the types returned by netdev_get_qos_types() for
1517 * 'netdev'. The contents of 'details' should be documented as valid for
1518 * '*typep' in the "other_config" column in the "QoS" table in
1519 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)). */
1521 netdev_get_qos(const struct netdev
*netdev
,
1522 const char **typep
, struct smap
*details
)
1524 const struct netdev_class
*class = netdev
->netdev_class
;
1527 if (class->get_qos
) {
1528 retval
= class->get_qos(netdev
, typep
, details
);
1531 smap_clear(details
);
1535 /* 'netdev' doesn't support QoS, so report that QoS is disabled. */
1541 /* Attempts to reconfigure QoS on 'netdev', changing the form of QoS to 'type'
1542 * with details of configuration from 'details'. Returns 0 if successful,
1543 * otherwise a positive errno value. On error, the previous QoS configuration
1546 * When this function changes the type of QoS (not just 'details'), this also
1547 * resets all queue configuration for 'netdev' to their defaults (which depend
1548 * on the specific type of QoS). Otherwise, the queue configuration for
1549 * 'netdev' is unchanged.
1551 * 'type' should be "" (to disable QoS) or one of the types returned by
1552 * netdev_get_qos_types() for 'netdev'. The contents of 'details' should be
1553 * documented as valid for the given 'type' in the "other_config" column in the
1554 * "QoS" table in vswitchd/vswitch.xml (which is built as
1555 * ovs-vswitchd.conf.db(8)).
1557 * NULL may be specified for 'details' if there are no configuration
1560 netdev_set_qos(struct netdev
*netdev
,
1561 const char *type
, const struct smap
*details
)
1563 const struct netdev_class
*class = netdev
->netdev_class
;
1569 if (class->set_qos
) {
1571 static const struct smap empty
= SMAP_INITIALIZER(&empty
);
1574 return class->set_qos(netdev
, type
, details
);
1576 return *type
? EOPNOTSUPP
: 0;
1580 /* Queries 'netdev' for information about the queue numbered 'queue_id'. If
1581 * successful, adds that information as string key-value pairs to 'details'.
1582 * Returns 0 if successful, otherwise a positive errno value.
1584 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1585 * the current form of QoS (e.g. as returned by netdev_get_n_queues(netdev)).
1587 * The returned contents of 'details' should be documented as valid for the
1588 * given 'type' in the "other_config" column in the "Queue" table in
1589 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1591 * The caller must initialize 'details' (e.g. with smap_init()) before calling
1592 * this function. The caller must free 'details' when it is no longer needed
1593 * (e.g. with smap_destroy()). */
1595 netdev_get_queue(const struct netdev
*netdev
,
1596 unsigned int queue_id
, struct smap
*details
)
1598 const struct netdev_class
*class = netdev
->netdev_class
;
1601 retval
= (class->get_queue
1602 ? class->get_queue(netdev
, queue_id
, details
)
1605 smap_clear(details
);
1610 /* Configures the queue numbered 'queue_id' on 'netdev' with the key-value
1611 * string pairs in 'details'. The contents of 'details' should be documented
1612 * as valid for the given 'type' in the "other_config" column in the "Queue"
1613 * table in vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1614 * Returns 0 if successful, otherwise a positive errno value. On failure, the
1615 * given queue's configuration should be unmodified.
1617 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1618 * the current form of QoS (e.g. as returned by netdev_get_n_queues(netdev)).
1620 * This function does not modify 'details', and the caller retains ownership of
1623 netdev_set_queue(struct netdev
*netdev
,
1624 unsigned int queue_id
, const struct smap
*details
)
1626 const struct netdev_class
*class = netdev
->netdev_class
;
1627 return (class->set_queue
1628 ? class->set_queue(netdev
, queue_id
, details
)
1632 /* Attempts to delete the queue numbered 'queue_id' from 'netdev'. Some kinds
1633 * of QoS may have a fixed set of queues, in which case attempts to delete them
1634 * will fail with EOPNOTSUPP.
1636 * Returns 0 if successful, otherwise a positive errno value. On failure, the
1637 * given queue will be unmodified.
1639 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1640 * the current form of QoS (e.g. as returned by
1641 * netdev_get_n_queues(netdev)). */
1643 netdev_delete_queue(struct netdev
*netdev
, unsigned int queue_id
)
1645 const struct netdev_class
*class = netdev
->netdev_class
;
1646 return (class->delete_queue
1647 ? class->delete_queue(netdev
, queue_id
)
1651 /* Obtains statistics about 'queue_id' on 'netdev'. On success, returns 0 and
1652 * fills 'stats' with the queue's statistics; individual members of 'stats' may
1653 * be set to all-1-bits if the statistic is unavailable. On failure, returns a
1654 * positive errno value and fills 'stats' with values indicating unsupported
1657 netdev_get_queue_stats(const struct netdev
*netdev
, unsigned int queue_id
,
1658 struct netdev_queue_stats
*stats
)
1660 const struct netdev_class
*class = netdev
->netdev_class
;
1663 retval
= (class->get_queue_stats
1664 ? class->get_queue_stats(netdev
, queue_id
, stats
)
1667 stats
->tx_bytes
= UINT64_MAX
;
1668 stats
->tx_packets
= UINT64_MAX
;
1669 stats
->tx_errors
= UINT64_MAX
;
1670 stats
->created
= LLONG_MIN
;
1675 /* Initializes 'dump' to begin dumping the queues in a netdev.
1677 * This function provides no status indication. An error status for the entire
1678 * dump operation is provided when it is completed by calling
1679 * netdev_queue_dump_done().
1682 netdev_queue_dump_start(struct netdev_queue_dump
*dump
,
1683 const struct netdev
*netdev
)
1685 dump
->netdev
= netdev_ref(netdev
);
1686 if (netdev
->netdev_class
->queue_dump_start
) {
1687 dump
->error
= netdev
->netdev_class
->queue_dump_start(netdev
,
1690 dump
->error
= EOPNOTSUPP
;
1694 /* Attempts to retrieve another queue from 'dump', which must have been
1695 * initialized with netdev_queue_dump_start(). On success, stores a new queue
1696 * ID into '*queue_id', fills 'details' with configuration details for the
1697 * queue, and returns true. On failure, returns false.
1699 * Queues are not necessarily dumped in increasing order of queue ID (or any
1700 * other predictable order).
1702 * Failure might indicate an actual error or merely that the last queue has
1703 * been dumped. An error status for the entire dump operation is provided when
1704 * it is completed by calling netdev_queue_dump_done().
1706 * The returned contents of 'details' should be documented as valid for the
1707 * given 'type' in the "other_config" column in the "Queue" table in
1708 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1710 * The caller must initialize 'details' (e.g. with smap_init()) before calling
1711 * this function. This function will clear and replace its contents. The
1712 * caller must free 'details' when it is no longer needed (e.g. with
1713 * smap_destroy()). */
1715 netdev_queue_dump_next(struct netdev_queue_dump
*dump
,
1716 unsigned int *queue_id
, struct smap
*details
)
1718 const struct netdev
*netdev
= dump
->netdev
;
1724 dump
->error
= netdev
->netdev_class
->queue_dump_next(netdev
, dump
->state
,
1728 netdev
->netdev_class
->queue_dump_done(netdev
, dump
->state
);
1734 /* Completes queue table dump operation 'dump', which must have been
1735 * initialized with netdev_queue_dump_start(). Returns 0 if the dump operation
1736 * was error-free, otherwise a positive errno value describing the problem. */
1738 netdev_queue_dump_done(struct netdev_queue_dump
*dump
)
1740 const struct netdev
*netdev
= dump
->netdev
;
1741 if (!dump
->error
&& netdev
->netdev_class
->queue_dump_done
) {
1742 dump
->error
= netdev
->netdev_class
->queue_dump_done(netdev
,
1745 netdev_close(dump
->netdev
);
1746 return dump
->error
== EOF
? 0 : dump
->error
;
1749 /* Iterates over all of 'netdev''s queues, calling 'cb' with the queue's ID,
1750 * its statistics, and the 'aux' specified by the caller. The order of
1751 * iteration is unspecified, but (when successful) each queue is visited
1754 * Calling this function may be more efficient than calling
1755 * netdev_get_queue_stats() for every queue.
1757 * 'cb' must not modify or free the statistics passed in.
1759 * Returns 0 if successful, otherwise a positive errno value. On error, some
1760 * configured queues may not have been included in the iteration. */
1762 netdev_dump_queue_stats(const struct netdev
*netdev
,
1763 netdev_dump_queue_stats_cb
*cb
, void *aux
)
1765 const struct netdev_class
*class = netdev
->netdev_class
;
1766 return (class->dump_queue_stats
1767 ? class->dump_queue_stats(netdev
, cb
, aux
)
1772 /* Returns the class type of 'netdev'.
1774 * The caller must not free the returned value. */
1776 netdev_get_type(const struct netdev
*netdev
)
1778 return netdev
->netdev_class
->type
;
1781 /* Returns the class associated with 'netdev'. */
1782 const struct netdev_class
*
1783 netdev_get_class(const struct netdev
*netdev
)
1785 return netdev
->netdev_class
;
1788 /* Returns the netdev with 'name' or NULL if there is none.
1790 * The caller must free the returned netdev with netdev_close(). */
1792 netdev_from_name(const char *name
)
1793 OVS_EXCLUDED(netdev_mutex
)
1795 struct netdev
*netdev
;
1797 ovs_mutex_lock(&netdev_mutex
);
1798 netdev
= shash_find_data(&netdev_shash
, name
);
1802 ovs_mutex_unlock(&netdev_mutex
);
1807 /* Fills 'device_list' with devices that match 'netdev_class'.
1809 * The caller is responsible for initializing and destroying 'device_list' and
1810 * must close each device on the list. */
1812 netdev_get_devices(const struct netdev_class
*netdev_class
,
1813 struct shash
*device_list
)
1814 OVS_EXCLUDED(netdev_mutex
)
1816 struct shash_node
*node
;
1818 ovs_mutex_lock(&netdev_mutex
);
1819 SHASH_FOR_EACH (node
, &netdev_shash
) {
1820 struct netdev
*dev
= node
->data
;
1822 if (dev
->netdev_class
== netdev_class
) {
1824 shash_add(device_list
, node
->name
, node
->data
);
1827 ovs_mutex_unlock(&netdev_mutex
);
1830 /* Extracts pointers to all 'netdev-vports' into an array 'vports'
1831 * and returns it. Stores the size of the array into '*size'.
1833 * The caller is responsible for freeing 'vports' and must close
1834 * each 'netdev-vport' in the list. */
1836 netdev_get_vports(size_t *size
)
1837 OVS_EXCLUDED(netdev_mutex
)
1839 struct netdev
**vports
;
1840 struct shash_node
*node
;
1847 /* Explicitly allocates big enough chunk of memory. */
1848 ovs_mutex_lock(&netdev_mutex
);
1849 vports
= xmalloc(shash_count(&netdev_shash
) * sizeof *vports
);
1850 SHASH_FOR_EACH (node
, &netdev_shash
) {
1851 struct netdev
*dev
= node
->data
;
1853 if (netdev_vport_is_vport_class(dev
->netdev_class
)) {
1859 ovs_mutex_unlock(&netdev_mutex
);
1866 netdev_get_type_from_name(const char *name
)
1870 type
= netdev_vport_type_from_name(name
);
1872 dev
= netdev_from_name(name
);
1873 type
= dev
? netdev_get_type(dev
) : NULL
;
1880 netdev_rxq_get_netdev(const struct netdev_rxq
*rx
)
1882 ovs_assert(rx
->netdev
->ref_cnt
> 0);
1887 netdev_rxq_get_name(const struct netdev_rxq
*rx
)
1889 return netdev_get_name(netdev_rxq_get_netdev(rx
));
1893 netdev_rxq_get_queue_id(const struct netdev_rxq
*rx
)
1895 return rx
->queue_id
;
1899 restore_all_flags(void *aux OVS_UNUSED
)
1901 struct shash_node
*node
;
1903 SHASH_FOR_EACH (node
, &netdev_shash
) {
1904 struct netdev
*netdev
= node
->data
;
1905 const struct netdev_saved_flags
*sf
;
1906 enum netdev_flags saved_values
;
1907 enum netdev_flags saved_flags
;
1909 saved_values
= saved_flags
= 0;
1910 LIST_FOR_EACH (sf
, node
, &netdev
->saved_flags_list
) {
1911 saved_flags
|= sf
->saved_flags
;
1912 saved_values
&= ~sf
->saved_flags
;
1913 saved_values
|= sf
->saved_flags
& sf
->saved_values
;
1916 enum netdev_flags old_flags
;
1918 netdev
->netdev_class
->update_flags(netdev
,
1919 saved_flags
& saved_values
,
1920 saved_flags
& ~saved_values
,
1927 netdev_get_change_seq(const struct netdev
*netdev
)
1929 return netdev
->change_seq
;
1933 /* This implementation is shared by Linux and BSD. */
1935 static struct ifaddrs
*if_addr_list
;
1936 static struct ovs_mutex if_addr_list_lock
= OVS_MUTEX_INITIALIZER
;
1939 netdev_get_addrs_list_flush(void)
1941 ovs_mutex_lock(&if_addr_list_lock
);
1943 freeifaddrs(if_addr_list
);
1944 if_addr_list
= NULL
;
1946 ovs_mutex_unlock(&if_addr_list_lock
);
1950 netdev_get_addrs(const char dev
[], struct in6_addr
**paddr
,
1951 struct in6_addr
**pmask
, int *n_in
)
1953 struct in6_addr
*addr_array
, *mask_array
;
1954 const struct ifaddrs
*ifa
;
1957 ovs_mutex_lock(&if_addr_list_lock
);
1958 if (!if_addr_list
) {
1961 err
= getifaddrs(&if_addr_list
);
1963 ovs_mutex_unlock(&if_addr_list_lock
);
1968 for (ifa
= if_addr_list
; ifa
; ifa
= ifa
->ifa_next
) {
1969 if (ifa
->ifa_addr
&& ifa
->ifa_name
&& ifa
->ifa_netmask
) {
1972 family
= ifa
->ifa_addr
->sa_family
;
1973 if (family
== AF_INET
|| family
== AF_INET6
) {
1974 if (!strncmp(ifa
->ifa_name
, dev
, IFNAMSIZ
)) {
1982 ovs_mutex_unlock(&if_addr_list_lock
);
1983 return EADDRNOTAVAIL
;
1985 addr_array
= xzalloc(sizeof *addr_array
* cnt
);
1986 mask_array
= xzalloc(sizeof *mask_array
* cnt
);
1987 for (ifa
= if_addr_list
; ifa
; ifa
= ifa
->ifa_next
) {
1990 if (!ifa
->ifa_name
|| !ifa
->ifa_addr
|| !ifa
->ifa_netmask
1991 || strncmp(ifa
->ifa_name
, dev
, IFNAMSIZ
)) {
1995 family
= ifa
->ifa_addr
->sa_family
;
1996 if (family
== AF_INET
) {
1997 const struct sockaddr_in
*sin
;
1999 sin
= ALIGNED_CAST(const struct sockaddr_in
*, ifa
->ifa_addr
);
2000 in6_addr_set_mapped_ipv4(&addr_array
[i
], sin
->sin_addr
.s_addr
);
2001 sin
= ALIGNED_CAST(const struct sockaddr_in
*, ifa
->ifa_netmask
);
2002 in6_addr_set_mapped_ipv4(&mask_array
[i
], sin
->sin_addr
.s_addr
);
2004 } else if (family
== AF_INET6
) {
2005 const struct sockaddr_in6
*sin6
;
2007 sin6
= ALIGNED_CAST(const struct sockaddr_in6
*, ifa
->ifa_addr
);
2008 memcpy(&addr_array
[i
], &sin6
->sin6_addr
, sizeof *addr_array
);
2009 sin6
= ALIGNED_CAST(const struct sockaddr_in6
*, ifa
->ifa_netmask
);
2010 memcpy(&mask_array
[i
], &sin6
->sin6_addr
, sizeof *mask_array
);
2014 ovs_mutex_unlock(&if_addr_list_lock
);
2017 *paddr
= addr_array
;
2018 *pmask
= mask_array
;
2028 netdev_wait_reconf_required(struct netdev
*netdev
)
2030 seq_wait(netdev
->reconfigure_seq
, netdev
->last_reconfigure_seq
);
2034 netdev_is_reconf_required(struct netdev
*netdev
)
2036 return seq_read(netdev
->reconfigure_seq
) != netdev
->last_reconfigure_seq
;
2039 /* Give a chance to 'netdev' to reconfigure some of its parameters.
2041 * If a module uses netdev_send() and netdev_rxq_recv(), it must call this
2042 * function when netdev_is_reconf_required() returns true.
2044 * Return 0 if successful, otherwise a positive errno value. If the
2045 * reconfiguration fails the netdev will not be able to send or receive
2048 * When this function is called, no call to netdev_rxq_recv() or netdev_send()
2049 * must be issued. */
2051 netdev_reconfigure(struct netdev
*netdev
)
2053 const struct netdev_class
*class = netdev
->netdev_class
;
2055 netdev
->last_reconfigure_seq
= seq_read(netdev
->reconfigure_seq
);
2057 return (class->reconfigure
2058 ? class->reconfigure(netdev
)
2063 netdev_flow_flush(struct netdev
*netdev
)
2065 const struct netdev_class
*class = netdev
->netdev_class
;
2067 return (class->flow_flush
2068 ? class->flow_flush(netdev
)
2073 netdev_flow_dump_create(struct netdev
*netdev
, struct netdev_flow_dump
**dump
)
2075 const struct netdev_class
*class = netdev
->netdev_class
;
2077 return (class->flow_dump_create
2078 ? class->flow_dump_create(netdev
, dump
)
2083 netdev_flow_dump_destroy(struct netdev_flow_dump
*dump
)
2085 const struct netdev_class
*class = dump
->netdev
->netdev_class
;
2087 return (class->flow_dump_destroy
2088 ? class->flow_dump_destroy(dump
)
2093 netdev_flow_dump_next(struct netdev_flow_dump
*dump
, struct match
*match
,
2094 struct nlattr
**actions
, struct dpif_flow_stats
*stats
,
2095 ovs_u128
*ufid
, struct ofpbuf
*rbuffer
,
2096 struct ofpbuf
*wbuffer
)
2098 const struct netdev_class
*class = dump
->netdev
->netdev_class
;
2100 return (class->flow_dump_next
2101 ? class->flow_dump_next(dump
, match
, actions
, stats
, ufid
,
2107 netdev_flow_put(struct netdev
*netdev
, struct match
*match
,
2108 struct nlattr
*actions
, size_t act_len
,
2109 const ovs_u128
*ufid
, struct offload_info
*info
,
2110 struct dpif_flow_stats
*stats
)
2112 const struct netdev_class
*class = netdev
->netdev_class
;
2114 return (class->flow_put
2115 ? class->flow_put(netdev
, match
, actions
, act_len
, ufid
,
2121 netdev_flow_get(struct netdev
*netdev
, struct match
*match
,
2122 struct nlattr
**actions
, const ovs_u128
*ufid
,
2123 struct dpif_flow_stats
*stats
, struct ofpbuf
*buf
)
2125 const struct netdev_class
*class = netdev
->netdev_class
;
2127 return (class->flow_get
2128 ? class->flow_get(netdev
, match
, actions
, ufid
, stats
, buf
)
2133 netdev_flow_del(struct netdev
*netdev
, const ovs_u128
*ufid
,
2134 struct dpif_flow_stats
*stats
)
2136 const struct netdev_class
*class = netdev
->netdev_class
;
2138 return (class->flow_del
2139 ? class->flow_del(netdev
, ufid
, stats
)
2144 netdev_init_flow_api(struct netdev
*netdev
)
2146 const struct netdev_class
*class = netdev
->netdev_class
;
2148 if (!netdev_is_flow_api_enabled()) {
2152 return (class->init_flow_api
2153 ? class->init_flow_api(netdev
)
2158 netdev_is_flow_api_enabled(void)
2160 return netdev_flow_api_enabled
;
2163 /* Protects below port hashmaps. */
2164 static struct ovs_mutex netdev_hmap_mutex
= OVS_MUTEX_INITIALIZER
;
2166 static struct hmap port_to_netdev
OVS_GUARDED_BY(netdev_hmap_mutex
)
2167 = HMAP_INITIALIZER(&port_to_netdev
);
2168 static struct hmap ifindex_to_port
OVS_GUARDED_BY(netdev_hmap_mutex
)
2169 = HMAP_INITIALIZER(&ifindex_to_port
);
2171 struct port_to_netdev_data
{
2172 struct hmap_node node
;
2173 struct netdev
*netdev
;
2174 struct dpif_port dpif_port
;
2175 const struct dpif_class
*dpif_class
;
2178 struct ifindex_to_port_data
{
2179 struct hmap_node node
;
2184 #define NETDEV_PORTS_HASH_INT(port, dpif) \
2185 hash_int(odp_to_u32(port),\
2186 hash_pointer(dpif, 0));
2188 static struct port_to_netdev_data
*
2189 netdev_ports_lookup(odp_port_t port_no
, const struct dpif_class
*dpif_class
)
2190 OVS_REQUIRES(netdev_hmap_mutex
)
2192 size_t hash
= NETDEV_PORTS_HASH_INT(port_no
, dpif_class
);
2193 struct port_to_netdev_data
*data
;
2195 HMAP_FOR_EACH_WITH_HASH(data
, node
, hash
, &port_to_netdev
) {
2196 if (data
->dpif_class
== dpif_class
2197 && data
->dpif_port
.port_no
== port_no
) {
2205 netdev_ports_insert(struct netdev
*netdev
, const struct dpif_class
*dpif_class
,
2206 struct dpif_port
*dpif_port
)
2208 size_t hash
= NETDEV_PORTS_HASH_INT(dpif_port
->port_no
, dpif_class
);
2209 struct port_to_netdev_data
*data
;
2210 struct ifindex_to_port_data
*ifidx
;
2211 int ifindex
= netdev_get_ifindex(netdev
);
2217 data
= xzalloc(sizeof *data
);
2218 ifidx
= xzalloc(sizeof *ifidx
);
2220 ovs_mutex_lock(&netdev_hmap_mutex
);
2221 if (netdev_ports_lookup(dpif_port
->port_no
, dpif_class
)) {
2222 ovs_mutex_unlock(&netdev_hmap_mutex
);
2226 data
->netdev
= netdev_ref(netdev
);
2227 data
->dpif_class
= dpif_class
;
2228 dpif_port_clone(&data
->dpif_port
, dpif_port
);
2230 ifidx
->ifindex
= ifindex
;
2231 ifidx
->port
= dpif_port
->port_no
;
2233 hmap_insert(&port_to_netdev
, &data
->node
, hash
);
2234 hmap_insert(&ifindex_to_port
, &ifidx
->node
, ifidx
->ifindex
);
2235 ovs_mutex_unlock(&netdev_hmap_mutex
);
2237 netdev_init_flow_api(netdev
);
2243 netdev_ports_get(odp_port_t port_no
, const struct dpif_class
*dpif_class
)
2245 struct port_to_netdev_data
*data
;
2246 struct netdev
*ret
= NULL
;
2248 ovs_mutex_lock(&netdev_hmap_mutex
);
2249 data
= netdev_ports_lookup(port_no
, dpif_class
);
2251 ret
= netdev_ref(data
->netdev
);
2253 ovs_mutex_unlock(&netdev_hmap_mutex
);
2259 netdev_ports_remove(odp_port_t port_no
, const struct dpif_class
*dpif_class
)
2261 struct port_to_netdev_data
*data
;
2264 ovs_mutex_lock(&netdev_hmap_mutex
);
2266 data
= netdev_ports_lookup(port_no
, dpif_class
);
2269 int ifindex
= netdev_get_ifindex(data
->netdev
);
2272 struct ifindex_to_port_data
*ifidx
= NULL
;
2274 HMAP_FOR_EACH_WITH_HASH (ifidx
, node
, ifindex
, &ifindex_to_port
) {
2275 if (ifidx
->port
== port_no
) {
2276 hmap_remove(&ifindex_to_port
, &ifidx
->node
);
2283 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
2285 VLOG_WARN_RL(&rl
, "netdev ports map has dpif port %"PRIu32
2286 " but netdev has no ifindex: %s", port_no
,
2287 ovs_strerror(ifindex
));
2290 dpif_port_destroy(&data
->dpif_port
);
2291 netdev_close(data
->netdev
); /* unref and possibly close */
2292 hmap_remove(&port_to_netdev
, &data
->node
);
2297 ovs_mutex_unlock(&netdev_hmap_mutex
);
2303 netdev_ifindex_to_odp_port(int ifindex
)
2305 struct ifindex_to_port_data
*data
;
2308 ovs_mutex_lock(&netdev_hmap_mutex
);
2309 HMAP_FOR_EACH_WITH_HASH(data
, node
, ifindex
, &ifindex_to_port
) {
2310 if (data
->ifindex
== ifindex
) {
2315 ovs_mutex_unlock(&netdev_hmap_mutex
);
2321 netdev_ports_flow_flush(const struct dpif_class
*dpif_class
)
2323 struct port_to_netdev_data
*data
;
2325 ovs_mutex_lock(&netdev_hmap_mutex
);
2326 HMAP_FOR_EACH(data
, node
, &port_to_netdev
) {
2327 if (data
->dpif_class
== dpif_class
) {
2328 netdev_flow_flush(data
->netdev
);
2331 ovs_mutex_unlock(&netdev_hmap_mutex
);
2334 struct netdev_flow_dump
**
2335 netdev_ports_flow_dump_create(const struct dpif_class
*dpif_class
, int *ports
)
2337 struct port_to_netdev_data
*data
;
2338 struct netdev_flow_dump
**dumps
;
2342 ovs_mutex_lock(&netdev_hmap_mutex
);
2343 HMAP_FOR_EACH(data
, node
, &port_to_netdev
) {
2344 if (data
->dpif_class
== dpif_class
) {
2349 dumps
= count
? xzalloc(sizeof *dumps
* count
) : NULL
;
2351 HMAP_FOR_EACH(data
, node
, &port_to_netdev
) {
2352 if (data
->dpif_class
== dpif_class
) {
2353 if (netdev_flow_dump_create(data
->netdev
, &dumps
[i
])) {
2357 dumps
[i
]->port
= data
->dpif_port
.port_no
;
2361 ovs_mutex_unlock(&netdev_hmap_mutex
);
2368 netdev_ports_flow_del(const struct dpif_class
*dpif_class
,
2369 const ovs_u128
*ufid
,
2370 struct dpif_flow_stats
*stats
)
2372 struct port_to_netdev_data
*data
;
2374 ovs_mutex_lock(&netdev_hmap_mutex
);
2375 HMAP_FOR_EACH(data
, node
, &port_to_netdev
) {
2376 if (data
->dpif_class
== dpif_class
2377 && !netdev_flow_del(data
->netdev
, ufid
, stats
)) {
2378 ovs_mutex_unlock(&netdev_hmap_mutex
);
2382 ovs_mutex_unlock(&netdev_hmap_mutex
);
2388 netdev_ports_flow_get(const struct dpif_class
*dpif_class
, struct match
*match
,
2389 struct nlattr
**actions
, const ovs_u128
*ufid
,
2390 struct dpif_flow_stats
*stats
, struct ofpbuf
*buf
)
2392 struct port_to_netdev_data
*data
;
2394 ovs_mutex_lock(&netdev_hmap_mutex
);
2395 HMAP_FOR_EACH(data
, node
, &port_to_netdev
) {
2396 if (data
->dpif_class
== dpif_class
2397 && !netdev_flow_get(data
->netdev
, match
, actions
,
2398 ufid
, stats
, buf
)) {
2399 ovs_mutex_unlock(&netdev_hmap_mutex
);
2403 ovs_mutex_unlock(&netdev_hmap_mutex
);
2409 netdev_ports_flow_init(void)
2411 struct port_to_netdev_data
*data
;
2413 ovs_mutex_lock(&netdev_hmap_mutex
);
2414 HMAP_FOR_EACH(data
, node
, &port_to_netdev
) {
2415 netdev_init_flow_api(data
->netdev
);
2417 ovs_mutex_unlock(&netdev_hmap_mutex
);
2421 netdev_set_flow_api_enabled(const struct smap
*ovs_other_config
)
2423 if (smap_get_bool(ovs_other_config
, "hw-offload", false)) {
2424 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
2426 if (ovsthread_once_start(&once
)) {
2427 netdev_flow_api_enabled
= true;
2429 VLOG_INFO("netdev: Flow API Enabled");
2431 tc_set_policy(smap_get_def(ovs_other_config
, "tc-policy",
2432 TC_POLICY_DEFAULT
));
2434 netdev_ports_flow_init();
2436 ovsthread_once_done(&once
);
2442 netdev_set_flow_api_enabled(const struct smap
*ovs_other_config OVS_UNUSED
)