2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2016, 2017 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
22 #include <sys/types.h>
23 #include <netinet/in.h>
31 #include <sys/ioctl.h>
37 #include "dp-packet.h"
38 #include "openvswitch/dynamic-string.h"
39 #include "fatal-signal.h"
41 #include "openvswitch/list.h"
42 #include "netdev-dpdk.h"
43 #include "netdev-provider.h"
44 #include "netdev-vport.h"
45 #include "odp-netlink.h"
46 #include "openflow/openflow.h"
48 #include "openvswitch/ofp-print.h"
49 #include "openvswitch/poll-loop.h"
51 #include "openvswitch/shash.h"
53 #include "socket-util.h"
56 #include "openvswitch/vlog.h"
63 VLOG_DEFINE_THIS_MODULE(netdev
);
65 COVERAGE_DEFINE(netdev_received
);
66 COVERAGE_DEFINE(netdev_sent
);
67 COVERAGE_DEFINE(netdev_add_router
);
68 COVERAGE_DEFINE(netdev_get_stats
);
70 struct netdev_saved_flags
{
71 struct netdev
*netdev
;
72 struct ovs_list node
; /* In struct netdev's saved_flags_list. */
73 enum netdev_flags saved_flags
;
74 enum netdev_flags saved_values
;
77 /* Protects 'netdev_shash' and the mutable members of struct netdev. */
78 static struct ovs_mutex netdev_mutex
= OVS_MUTEX_INITIALIZER
;
80 /* All created network devices. */
81 static struct shash netdev_shash
OVS_GUARDED_BY(netdev_mutex
)
82 = SHASH_INITIALIZER(&netdev_shash
);
84 /* Mutual exclusion of */
85 static struct ovs_mutex netdev_class_mutex
OVS_ACQ_BEFORE(netdev_mutex
)
86 = OVS_MUTEX_INITIALIZER
;
88 /* Contains 'struct netdev_registered_class'es. */
89 static struct cmap netdev_classes
= CMAP_INITIALIZER
;
91 struct netdev_registered_class
{
92 struct cmap_node cmap_node
; /* In 'netdev_classes', by class->type. */
93 const struct netdev_class
*class;
95 /* Number of references: one for the class itself and one for every
96 * instance of the class. */
97 struct ovs_refcount refcnt
;
100 static bool netdev_flow_api_enabled
= false;
102 /* This is set pretty low because we probably won't learn anything from the
103 * additional log messages. */
104 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
106 static void restore_all_flags(void *aux OVS_UNUSED
);
107 void update_device_args(struct netdev
*, const struct shash
*args
);
110 netdev_n_txq(const struct netdev
*netdev
)
112 return netdev
->n_txq
;
116 netdev_n_rxq(const struct netdev
*netdev
)
118 return netdev
->n_rxq
;
122 netdev_is_pmd(const struct netdev
*netdev
)
124 return netdev
->netdev_class
->is_pmd
;
128 netdev_has_tunnel_push_pop(const struct netdev
*netdev
)
130 return netdev
->netdev_class
->push_header
131 && netdev
->netdev_class
->pop_header
;
135 netdev_initialize(void)
136 OVS_EXCLUDED(netdev_mutex
)
138 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
140 if (ovsthread_once_start(&once
)) {
141 fatal_signal_add_hook(restore_all_flags
, NULL
, NULL
, true);
143 netdev_vport_patch_register();
146 netdev_register_provider(&netdev_linux_class
);
147 netdev_register_provider(&netdev_internal_class
);
148 netdev_register_provider(&netdev_tap_class
);
149 netdev_vport_tunnel_register();
151 #if defined(__FreeBSD__) || defined(__NetBSD__)
152 netdev_register_provider(&netdev_tap_class
);
153 netdev_register_provider(&netdev_bsd_class
);
156 netdev_register_provider(&netdev_windows_class
);
157 netdev_register_provider(&netdev_internal_class
);
158 netdev_vport_tunnel_register();
160 ovsthread_once_done(&once
);
164 /* Performs periodic work needed by all the various kinds of netdevs.
166 * If your program opens any netdevs, it must call this function within its
170 OVS_EXCLUDED(netdev_mutex
)
174 struct netdev_registered_class
*rc
;
175 CMAP_FOR_EACH (rc
, cmap_node
, &netdev_classes
) {
176 if (rc
->class->run
) {
177 rc
->class->run(rc
->class);
182 /* Arranges for poll_block() to wake up when netdev_run() needs to be called.
184 * If your program opens any netdevs, it must call this function within its
188 OVS_EXCLUDED(netdev_mutex
)
192 struct netdev_registered_class
*rc
;
193 CMAP_FOR_EACH (rc
, cmap_node
, &netdev_classes
) {
194 if (rc
->class->wait
) {
195 rc
->class->wait(rc
->class);
200 static struct netdev_registered_class
*
201 netdev_lookup_class(const char *type
)
203 struct netdev_registered_class
*rc
;
204 CMAP_FOR_EACH_WITH_HASH (rc
, cmap_node
, hash_string(type
, 0),
206 if (!strcmp(type
, rc
->class->type
)) {
213 /* Initializes and registers a new netdev provider. After successful
214 * registration, new netdevs of that type can be opened using netdev_open(). */
216 netdev_register_provider(const struct netdev_class
*new_class
)
217 OVS_EXCLUDED(netdev_class_mutex
, netdev_mutex
)
221 ovs_mutex_lock(&netdev_class_mutex
);
222 if (netdev_lookup_class(new_class
->type
)) {
223 VLOG_WARN("attempted to register duplicate netdev provider: %s",
227 error
= new_class
->init
? new_class
->init() : 0;
229 struct netdev_registered_class
*rc
;
231 rc
= xmalloc(sizeof *rc
);
232 cmap_insert(&netdev_classes
, &rc
->cmap_node
,
233 hash_string(new_class
->type
, 0));
234 rc
->class = new_class
;
235 ovs_refcount_init(&rc
->refcnt
);
237 VLOG_ERR("failed to initialize %s network device class: %s",
238 new_class
->type
, ovs_strerror(error
));
241 ovs_mutex_unlock(&netdev_class_mutex
);
246 /* Unregisters a netdev provider. 'type' must have been previously registered
247 * and not currently be in use by any netdevs. After unregistration new
248 * netdevs of that type cannot be opened using netdev_open(). (However, the
249 * provider may still be accessible from other threads until the next RCU grace
250 * period, so the caller must not free or re-register the same netdev_class
251 * until that has passed.) */
253 netdev_unregister_provider(const char *type
)
254 OVS_EXCLUDED(netdev_class_mutex
, netdev_mutex
)
256 struct netdev_registered_class
*rc
;
261 ovs_mutex_lock(&netdev_class_mutex
);
262 rc
= netdev_lookup_class(type
);
264 VLOG_WARN("attempted to unregister a netdev provider that is not "
265 "registered: %s", type
);
266 error
= EAFNOSUPPORT
;
267 } else if (ovs_refcount_unref(&rc
->refcnt
) != 1) {
268 ovs_refcount_ref(&rc
->refcnt
);
269 VLOG_WARN("attempted to unregister in use netdev provider: %s",
273 cmap_remove(&netdev_classes
, &rc
->cmap_node
,
274 hash_string(rc
->class->type
, 0));
275 ovsrcu_postpone(free
, rc
);
278 ovs_mutex_unlock(&netdev_class_mutex
);
283 /* Clears 'types' and enumerates the types of all currently registered netdev
284 * providers into it. The caller must first initialize the sset. */
286 netdev_enumerate_types(struct sset
*types
)
287 OVS_EXCLUDED(netdev_mutex
)
292 struct netdev_registered_class
*rc
;
293 CMAP_FOR_EACH (rc
, cmap_node
, &netdev_classes
) {
294 sset_add(types
, rc
->class->type
);
299 netdev_vport_type_from_name(const char *name
)
301 struct netdev_registered_class
*rc
;
303 CMAP_FOR_EACH (rc
, cmap_node
, &netdev_classes
) {
304 const char *dpif_port
= netdev_vport_class_get_dpif_port(rc
->class);
305 if (dpif_port
&& !strncmp(name
, dpif_port
, strlen(dpif_port
))) {
306 type
= rc
->class->type
;
313 /* Check that the network device name is not the same as any of the registered
314 * vport providers' dpif_port name (dpif_port is NULL if the vport provider
315 * does not define it) or the datapath internal port name (e.g. ovs-system).
317 * Returns true if there is a name conflict, false otherwise. */
319 netdev_is_reserved_name(const char *name
)
320 OVS_EXCLUDED(netdev_mutex
)
324 struct netdev_registered_class
*rc
;
325 CMAP_FOR_EACH (rc
, cmap_node
, &netdev_classes
) {
326 const char *dpif_port
= netdev_vport_class_get_dpif_port(rc
->class);
327 if (dpif_port
&& !strncmp(name
, dpif_port
, strlen(dpif_port
))) {
332 if (!strncmp(name
, "ovs-", 4)) {
337 dp_enumerate_types(&types
);
338 SSET_FOR_EACH (type
, &types
) {
339 if (!strcmp(name
+4, type
)) {
340 sset_destroy(&types
);
344 sset_destroy(&types
);
350 /* Opens the network device named 'name' (e.g. "eth0") of the specified 'type'
351 * (e.g. "system") and returns zero if successful, otherwise a positive errno
352 * value. On success, sets '*netdevp' to the new network device, otherwise to
355 * Some network devices may need to be configured (with netdev_set_config())
356 * before they can be used.
358 * Before opening rxqs or sending packets, '*netdevp' may need to be
359 * reconfigured (with netdev_is_reconf_required() and netdev_reconfigure()).
362 netdev_open(const char *name
, const char *type
, struct netdev
**netdevp
)
363 OVS_EXCLUDED(netdev_mutex
)
365 struct netdev
*netdev
;
369 /* Reject empty names. This saves the providers having to do this. At
370 * least one screwed this up: the netdev-linux "tap" implementation
371 * passed the name directly to the Linux TUNSETIFF call, which treats
372 * an empty string as a request to generate a unique name. */
378 ovs_mutex_lock(&netdev_mutex
);
379 netdev
= shash_find_data(&netdev_shash
, name
);
382 type
&& type
[0] && strcmp(type
, netdev
->netdev_class
->type
)) {
384 if (netdev
->auto_classified
) {
385 /* If this device was first created without a classification type,
386 * for example due to routing or tunneling code, and they keep a
387 * reference, a "classified" call to open will fail. In this case
388 * we remove the classless device, and re-add it below. We remove
389 * the netdev from the shash, and change the sequence, so owners of
390 * the old classless device can release/cleanup. */
392 shash_delete(&netdev_shash
, netdev
->node
);
394 netdev_change_seq_changed(netdev
);
404 struct netdev_registered_class
*rc
;
406 rc
= netdev_lookup_class(type
&& type
[0] ? type
: "system");
407 if (rc
&& ovs_refcount_try_ref_rcu(&rc
->refcnt
)) {
408 netdev
= rc
->class->alloc();
410 memset(netdev
, 0, sizeof *netdev
);
411 netdev
->netdev_class
= rc
->class;
412 netdev
->auto_classified
= type
&& type
[0] ? false : true;
413 netdev
->name
= xstrdup(name
);
414 netdev
->change_seq
= 1;
415 netdev
->reconfigure_seq
= seq_create();
416 netdev
->last_reconfigure_seq
=
417 seq_read(netdev
->reconfigure_seq
);
418 netdev
->node
= shash_add(&netdev_shash
, name
, netdev
);
420 /* By default enable one tx and rx queue per netdev. */
421 netdev
->n_txq
= netdev
->netdev_class
->send
? 1 : 0;
422 netdev
->n_rxq
= netdev
->netdev_class
->rxq_alloc
? 1 : 0;
424 ovs_list_init(&netdev
->saved_flags_list
);
426 error
= rc
->class->construct(netdev
);
428 netdev_change_seq_changed(netdev
);
430 ovs_refcount_unref(&rc
->refcnt
);
431 seq_destroy(netdev
->reconfigure_seq
);
433 ovs_assert(ovs_list_is_empty(&netdev
->saved_flags_list
));
434 shash_delete(&netdev_shash
, netdev
->node
);
435 rc
->class->dealloc(netdev
);
441 VLOG_WARN("could not create netdev %s of unknown type %s",
443 error
= EAFNOSUPPORT
;
453 ovs_mutex_unlock(&netdev_mutex
);
458 /* Returns a reference to 'netdev_' for the caller to own. Returns null if
459 * 'netdev_' is null. */
461 netdev_ref(const struct netdev
*netdev_
)
462 OVS_EXCLUDED(netdev_mutex
)
464 struct netdev
*netdev
= CONST_CAST(struct netdev
*, netdev_
);
467 ovs_mutex_lock(&netdev_mutex
);
468 ovs_assert(netdev
->ref_cnt
> 0);
470 ovs_mutex_unlock(&netdev_mutex
);
475 /* Reconfigures the device 'netdev' with 'args'. 'args' may be empty
476 * or NULL if none are needed. */
478 netdev_set_config(struct netdev
*netdev
, const struct smap
*args
, char **errp
)
479 OVS_EXCLUDED(netdev_mutex
)
481 if (netdev
->netdev_class
->set_config
) {
482 const struct smap no_args
= SMAP_INITIALIZER(&no_args
);
483 char *verbose_error
= NULL
;
486 error
= netdev
->netdev_class
->set_config(netdev
,
487 args
? args
: &no_args
,
490 VLOG_WARN_BUF(verbose_error
? NULL
: errp
,
491 "%s: could not set configuration (%s)",
492 netdev_get_name(netdev
), ovs_strerror(error
));
495 *errp
= verbose_error
;
502 } else if (args
&& !smap_is_empty(args
)) {
503 VLOG_WARN_BUF(errp
, "%s: arguments provided to device that is not configurable",
504 netdev_get_name(netdev
));
509 /* Returns the current configuration for 'netdev' in 'args'. The caller must
510 * have already initialized 'args' with smap_init(). Returns 0 on success, in
511 * which case 'args' will be filled with 'netdev''s configuration. On failure
512 * returns a positive errno value, in which case 'args' will be empty.
514 * The caller owns 'args' and its contents and must eventually free them with
517 netdev_get_config(const struct netdev
*netdev
, struct smap
*args
)
518 OVS_EXCLUDED(netdev_mutex
)
523 if (netdev
->netdev_class
->get_config
) {
524 error
= netdev
->netdev_class
->get_config(netdev
, args
);
535 const struct netdev_tunnel_config
*
536 netdev_get_tunnel_config(const struct netdev
*netdev
)
537 OVS_EXCLUDED(netdev_mutex
)
539 if (netdev
->netdev_class
->get_tunnel_config
) {
540 return netdev
->netdev_class
->get_tunnel_config(netdev
);
546 /* Returns the id of the numa node the 'netdev' is on. If the function
547 * is not implemented, returns NETDEV_NUMA_UNSPEC. */
549 netdev_get_numa_id(const struct netdev
*netdev
)
551 if (netdev
->netdev_class
->get_numa_id
) {
552 return netdev
->netdev_class
->get_numa_id(netdev
);
554 return NETDEV_NUMA_UNSPEC
;
559 netdev_unref(struct netdev
*dev
)
560 OVS_RELEASES(netdev_mutex
)
562 ovs_assert(dev
->ref_cnt
);
563 if (!--dev
->ref_cnt
) {
564 const struct netdev_class
*class = dev
->netdev_class
;
565 struct netdev_registered_class
*rc
;
567 dev
->netdev_class
->destruct(dev
);
570 shash_delete(&netdev_shash
, dev
->node
);
573 seq_destroy(dev
->reconfigure_seq
);
574 dev
->netdev_class
->dealloc(dev
);
575 ovs_mutex_unlock(&netdev_mutex
);
577 rc
= netdev_lookup_class(class->type
);
578 ovs_refcount_unref(&rc
->refcnt
);
580 ovs_mutex_unlock(&netdev_mutex
);
584 /* Closes and destroys 'netdev'. */
586 netdev_close(struct netdev
*netdev
)
587 OVS_EXCLUDED(netdev_mutex
)
590 ovs_mutex_lock(&netdev_mutex
);
591 netdev_unref(netdev
);
595 /* Removes 'netdev' from the global shash and unrefs 'netdev'.
597 * This allows handler and revalidator threads to still retain references
598 * to this netdev while the main thread changes interface configuration.
600 * This function should only be called by the main thread when closing
601 * netdevs during user configuration changes. Otherwise, netdev_close should be
602 * used to close netdevs. */
604 netdev_remove(struct netdev
*netdev
)
607 ovs_mutex_lock(&netdev_mutex
);
609 shash_delete(&netdev_shash
, netdev
->node
);
611 netdev_change_seq_changed(netdev
);
613 netdev_unref(netdev
);
617 /* Parses 'netdev_name_', which is of the form [type@]name into its component
618 * pieces. 'name' and 'type' must be freed by the caller. */
620 netdev_parse_name(const char *netdev_name_
, char **name
, char **type
)
622 char *netdev_name
= xstrdup(netdev_name_
);
625 separator
= strchr(netdev_name
, '@');
629 *name
= xstrdup(separator
+ 1);
632 *type
= xstrdup("system");
636 /* Attempts to open a netdev_rxq handle for obtaining packets received on
637 * 'netdev'. On success, returns 0 and stores a nonnull 'netdev_rxq *' into
638 * '*rxp'. On failure, returns a positive errno value and stores NULL into
641 * Some kinds of network devices might not support receiving packets. This
642 * function returns EOPNOTSUPP in that case.*/
644 netdev_rxq_open(struct netdev
*netdev
, struct netdev_rxq
**rxp
, int id
)
645 OVS_EXCLUDED(netdev_mutex
)
649 if (netdev
->netdev_class
->rxq_alloc
&& id
< netdev
->n_rxq
) {
650 struct netdev_rxq
*rx
= netdev
->netdev_class
->rxq_alloc();
654 error
= netdev
->netdev_class
->rxq_construct(rx
);
660 netdev
->netdev_class
->rxq_dealloc(rx
);
674 netdev_rxq_close(struct netdev_rxq
*rx
)
675 OVS_EXCLUDED(netdev_mutex
)
678 struct netdev
*netdev
= rx
->netdev
;
679 netdev
->netdev_class
->rxq_destruct(rx
);
680 netdev
->netdev_class
->rxq_dealloc(rx
);
681 netdev_close(netdev
);
685 /* Attempts to receive a batch of packets from 'rx'. 'batch' should point to
686 * the beginning of an array of NETDEV_MAX_BURST pointers to dp_packet. If
687 * successful, this function stores pointers to up to NETDEV_MAX_BURST
688 * dp_packets into the array, transferring ownership of the packets to the
689 * caller, stores the number of received packets in 'batch->count', and returns
692 * The implementation does not necessarily initialize any non-data members of
693 * 'batch'. That is, the caller must initialize layer pointers and metadata
694 * itself, if desired, e.g. with pkt_metadata_init() and miniflow_extract().
696 * Returns EAGAIN immediately if no packet is ready to be received or another
697 * positive errno value if an error was encountered. */
699 netdev_rxq_recv(struct netdev_rxq
*rx
, struct dp_packet_batch
*batch
,
704 retval
= rx
->netdev
->netdev_class
->rxq_recv(rx
, batch
, qfill
);
706 COVERAGE_INC(netdev_received
);
713 /* Arranges for poll_block() to wake up when a packet is ready to be received
716 netdev_rxq_wait(struct netdev_rxq
*rx
)
718 rx
->netdev
->netdev_class
->rxq_wait(rx
);
721 /* Discards any packets ready to be received on 'rx'. */
723 netdev_rxq_drain(struct netdev_rxq
*rx
)
725 return (rx
->netdev
->netdev_class
->rxq_drain
726 ? rx
->netdev
->netdev_class
->rxq_drain(rx
)
730 /* Configures the number of tx queues of 'netdev'. Returns 0 if successful,
731 * otherwise a positive errno value.
733 * 'n_txq' specifies the exact number of transmission queues to create.
735 * The change might not effective immediately. The caller must check if a
736 * reconfiguration is required with netdev_is_reconf_required() and eventually
737 * call netdev_reconfigure() before using the new queues.
739 * On error, the tx queue configuration is unchanged */
741 netdev_set_tx_multiq(struct netdev
*netdev
, unsigned int n_txq
)
745 error
= (netdev
->netdev_class
->set_tx_multiq
746 ? netdev
->netdev_class
->set_tx_multiq(netdev
, MAX(n_txq
, 1))
749 if (error
&& error
!= EOPNOTSUPP
) {
750 VLOG_DBG_RL(&rl
, "failed to set tx queue for network device %s:"
751 "%s", netdev_get_name(netdev
), ovs_strerror(error
));
758 netdev_get_pt_mode(const struct netdev
*netdev
)
760 return (netdev
->netdev_class
->get_pt_mode
761 ? netdev
->netdev_class
->get_pt_mode(netdev
)
762 : NETDEV_PT_LEGACY_L2
);
765 /* Sends 'batch' on 'netdev'. Returns 0 if successful (for every packet),
766 * otherwise a positive errno value. Returns EAGAIN without blocking if
767 * at least one the packets cannot be queued immediately. Returns EMSGSIZE
768 * if a partial packet was transmitted or if a packet is too big or too small
769 * to transmit on the device.
771 * The caller must make sure that 'netdev' supports sending by making sure that
772 * 'netdev_n_txq(netdev)' returns >= 1.
774 * If the function returns a non-zero value, some of the packets might have
777 * The caller transfers ownership of all the packets to the network device,
778 * regardless of success.
780 * If 'concurrent_txq' is true, the caller may perform concurrent calls
781 * to netdev_send() with the same 'qid'. The netdev provider is responsible
782 * for making sure that these concurrent calls do not create a race condition
783 * by using locking or other synchronization if required.
785 * The network device is expected to maintain one or more packet
786 * transmission queues, so that the caller does not ordinarily have to
787 * do additional queuing of packets. 'qid' specifies the queue to use
788 * and can be ignored if the implementation does not support multiple
791 netdev_send(struct netdev
*netdev
, int qid
, struct dp_packet_batch
*batch
,
794 int error
= netdev
->netdev_class
->send(netdev
, qid
, batch
,
797 COVERAGE_INC(netdev_sent
);
802 /* Pop tunnel header, build tunnel metadata and resize 'batch->packets'
803 * for further processing.
805 * The caller must make sure that 'netdev' support this operation by checking
806 * that netdev_has_tunnel_push_pop() returns true. */
808 netdev_pop_header(struct netdev
*netdev
, struct dp_packet_batch
*batch
)
810 struct dp_packet
*packet
;
811 size_t i
, size
= dp_packet_batch_size(batch
);
813 DP_PACKET_BATCH_REFILL_FOR_EACH (i
, size
, packet
, batch
) {
814 packet
= netdev
->netdev_class
->pop_header(packet
);
816 /* Reset the checksum offload flags if present, to avoid wrong
817 * interpretation in the further packet processing when
819 reset_dp_packet_checksum_ol_flags(packet
);
820 dp_packet_batch_refill(batch
, packet
, i
);
826 netdev_init_tnl_build_header_params(struct netdev_tnl_build_header_params
*params
,
827 const struct flow
*tnl_flow
,
828 const struct in6_addr
*src
,
829 struct eth_addr dmac
,
830 struct eth_addr smac
)
832 params
->flow
= tnl_flow
;
836 params
->is_ipv6
= !IN6_IS_ADDR_V4MAPPED(src
);
839 int netdev_build_header(const struct netdev
*netdev
,
840 struct ovs_action_push_tnl
*data
,
841 const struct netdev_tnl_build_header_params
*params
)
843 if (netdev
->netdev_class
->build_header
) {
844 return netdev
->netdev_class
->build_header(netdev
, data
, params
);
849 /* Push tunnel header (reading from tunnel metadata) and resize
850 * 'batch->packets' for further processing.
852 * The caller must make sure that 'netdev' support this operation by checking
853 * that netdev_has_tunnel_push_pop() returns true. */
855 netdev_push_header(const struct netdev
*netdev
,
856 struct dp_packet_batch
*batch
,
857 const struct ovs_action_push_tnl
*data
)
859 struct dp_packet
*packet
;
860 DP_PACKET_BATCH_FOR_EACH (i
, packet
, batch
) {
861 netdev
->netdev_class
->push_header(netdev
, packet
, data
);
862 pkt_metadata_init(&packet
->md
, data
->out_port
);
868 /* Registers with the poll loop to wake up from the next call to poll_block()
869 * when the packet transmission queue has sufficient room to transmit a packet
870 * with netdev_send().
872 * The network device is expected to maintain one or more packet
873 * transmission queues, so that the caller does not ordinarily have to
874 * do additional queuing of packets. 'qid' specifies the queue to use
875 * and can be ignored if the implementation does not support multiple
878 netdev_send_wait(struct netdev
*netdev
, int qid
)
880 if (netdev
->netdev_class
->send_wait
) {
881 netdev
->netdev_class
->send_wait(netdev
, qid
);
885 /* Attempts to set 'netdev''s MAC address to 'mac'. Returns 0 if successful,
886 * otherwise a positive errno value. */
888 netdev_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
890 return netdev
->netdev_class
->set_etheraddr(netdev
, mac
);
893 /* Retrieves 'netdev''s MAC address. If successful, returns 0 and copies the
894 * the MAC address into 'mac'. On failure, returns a positive errno value and
895 * clears 'mac' to all-zeros. */
897 netdev_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
901 error
= netdev
->netdev_class
->get_etheraddr(netdev
, mac
);
903 memset(mac
, 0, sizeof *mac
);
908 /* Returns the name of the network device that 'netdev' represents,
909 * e.g. "eth0". The caller must not modify or free the returned string. */
911 netdev_get_name(const struct netdev
*netdev
)
916 /* Retrieves the MTU of 'netdev'. The MTU is the maximum size of transmitted
917 * (and received) packets, in bytes, not including the hardware header; thus,
918 * this is typically 1500 bytes for Ethernet devices.
920 * If successful, returns 0 and stores the MTU size in '*mtup'. Returns
921 * EOPNOTSUPP if 'netdev' does not have an MTU (as e.g. some tunnels do not).
922 * On other failure, returns a positive errno value. On failure, sets '*mtup'
925 netdev_get_mtu(const struct netdev
*netdev
, int *mtup
)
927 const struct netdev_class
*class = netdev
->netdev_class
;
930 error
= class->get_mtu
? class->get_mtu(netdev
, mtup
) : EOPNOTSUPP
;
933 if (error
!= EOPNOTSUPP
) {
934 VLOG_DBG_RL(&rl
, "failed to retrieve MTU for network device %s: "
935 "%s", netdev_get_name(netdev
), ovs_strerror(error
));
941 /* Sets the MTU of 'netdev'. The MTU is the maximum size of transmitted
942 * (and received) packets, in bytes.
944 * If successful, returns 0. Returns EOPNOTSUPP if 'netdev' does not have an
945 * MTU (as e.g. some tunnels do not). On other failure, returns a positive
948 netdev_set_mtu(struct netdev
*netdev
, int mtu
)
950 const struct netdev_class
*class = netdev
->netdev_class
;
953 error
= class->set_mtu
? class->set_mtu(netdev
, mtu
) : EOPNOTSUPP
;
954 if (error
&& error
!= EOPNOTSUPP
) {
955 VLOG_WARN_RL(&rl
, "failed to set MTU for network device %s: %s",
956 netdev_get_name(netdev
), ovs_strerror(error
));
962 /* If 'user_config' is true, the user wants to control 'netdev''s MTU and we
963 * should not override it. If 'user_config' is false, we may adjust
964 * 'netdev''s MTU (e.g., if 'netdev' is internal). */
966 netdev_mtu_user_config(struct netdev
*netdev
, bool user_config
)
968 if (netdev
->mtu_user_config
!= user_config
) {
969 netdev_change_seq_changed(netdev
);
970 netdev
->mtu_user_config
= user_config
;
974 /* Returns 'true' if the user explicitly specified an MTU value for 'netdev'.
975 * Otherwise, returns 'false', in which case we are allowed to adjust the
978 netdev_mtu_is_user_config(struct netdev
*netdev
)
980 return netdev
->mtu_user_config
;
983 /* Returns the ifindex of 'netdev', if successful, as a positive number. On
984 * failure, returns a negative errno value.
986 * The desired semantics of the ifindex value are a combination of those
987 * specified by POSIX for if_nametoindex() and by SNMP for ifIndex. An ifindex
988 * value should be unique within a host and remain stable at least until
989 * reboot. SNMP says an ifindex "ranges between 1 and the value of ifNumber"
990 * but many systems do not follow this rule anyhow.
992 * Some network devices may not implement support for this function. In such
993 * cases this function will always return -EOPNOTSUPP.
996 netdev_get_ifindex(const struct netdev
*netdev
)
998 int (*get_ifindex
)(const struct netdev
*);
1000 get_ifindex
= netdev
->netdev_class
->get_ifindex
;
1002 return get_ifindex
? get_ifindex(netdev
) : -EOPNOTSUPP
;
1005 /* Stores the features supported by 'netdev' into each of '*current',
1006 * '*advertised', '*supported', and '*peer' that are non-null. Each value is a
1007 * bitmap of "enum ofp_port_features" bits, in host byte order. Returns 0 if
1008 * successful, otherwise a positive errno value. On failure, all of the
1009 * passed-in values are set to 0.
1011 * Some network devices may not implement support for this function. In such
1012 * cases this function will always return EOPNOTSUPP. */
1014 netdev_get_features(const struct netdev
*netdev
,
1015 enum netdev_features
*current
,
1016 enum netdev_features
*advertised
,
1017 enum netdev_features
*supported
,
1018 enum netdev_features
*peer
)
1020 int (*get_features
)(const struct netdev
*netdev
,
1021 enum netdev_features
*current
,
1022 enum netdev_features
*advertised
,
1023 enum netdev_features
*supported
,
1024 enum netdev_features
*peer
);
1025 enum netdev_features dummy
[4];
1029 current
= &dummy
[0];
1032 advertised
= &dummy
[1];
1035 supported
= &dummy
[2];
1041 get_features
= netdev
->netdev_class
->get_features
;
1042 error
= get_features
1043 ? get_features(netdev
, current
, advertised
, supported
,
1047 *current
= *advertised
= *supported
= *peer
= 0;
1052 /* Returns the maximum speed of a network connection that has the NETDEV_F_*
1053 * bits in 'features', in bits per second. If no bits that indicate a speed
1054 * are set in 'features', returns 'default_bps'. */
1056 netdev_features_to_bps(enum netdev_features features
,
1057 uint64_t default_bps
)
1060 F_1000000MB
= NETDEV_F_1TB_FD
,
1061 F_100000MB
= NETDEV_F_100GB_FD
,
1062 F_40000MB
= NETDEV_F_40GB_FD
,
1063 F_10000MB
= NETDEV_F_10GB_FD
,
1064 F_1000MB
= NETDEV_F_1GB_HD
| NETDEV_F_1GB_FD
,
1065 F_100MB
= NETDEV_F_100MB_HD
| NETDEV_F_100MB_FD
,
1066 F_10MB
= NETDEV_F_10MB_HD
| NETDEV_F_10MB_FD
1069 return ( features
& F_1000000MB
? UINT64_C(1000000000000)
1070 : features
& F_100000MB
? UINT64_C(100000000000)
1071 : features
& F_40000MB
? UINT64_C(40000000000)
1072 : features
& F_10000MB
? UINT64_C(10000000000)
1073 : features
& F_1000MB
? UINT64_C(1000000000)
1074 : features
& F_100MB
? UINT64_C(100000000)
1075 : features
& F_10MB
? UINT64_C(10000000)
1079 /* Returns true if any of the NETDEV_F_* bits that indicate a full-duplex link
1080 * are set in 'features', otherwise false. */
1082 netdev_features_is_full_duplex(enum netdev_features features
)
1084 return (features
& (NETDEV_F_10MB_FD
| NETDEV_F_100MB_FD
| NETDEV_F_1GB_FD
1085 | NETDEV_F_10GB_FD
| NETDEV_F_40GB_FD
1086 | NETDEV_F_100GB_FD
| NETDEV_F_1TB_FD
)) != 0;
1089 /* Set the features advertised by 'netdev' to 'advertise'. Returns 0 if
1090 * successful, otherwise a positive errno value. */
1092 netdev_set_advertisements(struct netdev
*netdev
,
1093 enum netdev_features advertise
)
1095 return (netdev
->netdev_class
->set_advertisements
1096 ? netdev
->netdev_class
->set_advertisements(
1102 netdev_feature_to_name(uint32_t bit
)
1104 enum netdev_features f
= bit
;
1107 case NETDEV_F_10MB_HD
: return "10MB-HD";
1108 case NETDEV_F_10MB_FD
: return "10MB-FD";
1109 case NETDEV_F_100MB_HD
: return "100MB-HD";
1110 case NETDEV_F_100MB_FD
: return "100MB-FD";
1111 case NETDEV_F_1GB_HD
: return "1GB-HD";
1112 case NETDEV_F_1GB_FD
: return "1GB-FD";
1113 case NETDEV_F_10GB_FD
: return "10GB-FD";
1114 case NETDEV_F_40GB_FD
: return "40GB-FD";
1115 case NETDEV_F_100GB_FD
: return "100GB-FD";
1116 case NETDEV_F_1TB_FD
: return "1TB-FD";
1117 case NETDEV_F_OTHER
: return "OTHER";
1118 case NETDEV_F_COPPER
: return "COPPER";
1119 case NETDEV_F_FIBER
: return "FIBER";
1120 case NETDEV_F_AUTONEG
: return "AUTO_NEG";
1121 case NETDEV_F_PAUSE
: return "AUTO_PAUSE";
1122 case NETDEV_F_PAUSE_ASYM
: return "AUTO_PAUSE_ASYM";
1129 netdev_features_format(struct ds
*s
, enum netdev_features features
)
1131 ofp_print_bit_names(s
, features
, netdev_feature_to_name
, ' ');
1132 ds_put_char(s
, '\n');
1135 /* Assigns 'addr' as 'netdev''s IPv4 address and 'mask' as its netmask. If
1136 * 'addr' is INADDR_ANY, 'netdev''s IPv4 address is cleared. Returns a
1137 * positive errno value. */
1139 netdev_set_in4(struct netdev
*netdev
, struct in_addr addr
, struct in_addr mask
)
1141 return (netdev
->netdev_class
->set_in4
1142 ? netdev
->netdev_class
->set_in4(netdev
, addr
, mask
)
1147 netdev_get_addresses_by_name(const char *device_name
,
1148 struct in6_addr
**addrsp
, int *n_addrsp
)
1150 struct netdev
*netdev
;
1151 int error
= netdev_open(device_name
, NULL
, &netdev
);
1158 struct in6_addr
*masks
;
1159 error
= netdev_get_addr_list(netdev
, addrsp
, &masks
, n_addrsp
);
1160 netdev_close(netdev
);
1165 /* Obtains an IPv4 address from 'device_name' and save the address in '*in4'.
1166 * Returns 0 if successful, otherwise a positive errno value. */
1168 netdev_get_in4_by_name(const char *device_name
, struct in_addr
*in4
)
1170 struct in6_addr
*addrs
;
1172 int error
= netdev_get_addresses_by_name(device_name
, &addrs
, &n
);
1177 for (int i
= 0; i
< n
; i
++) {
1178 if (IN6_IS_ADDR_V4MAPPED(&addrs
[i
])) {
1179 in4
->s_addr
= in6_addr_get_mapped_ipv4(&addrs
[i
]);
1190 /* Obtains an IPv4 or IPv6 address from 'device_name' and save the address in
1191 * '*in6', representing IPv4 addresses as v6-mapped. Returns 0 if successful,
1192 * otherwise a positive errno value. */
1194 netdev_get_ip_by_name(const char *device_name
, struct in6_addr
*in6
)
1196 struct in6_addr
*addrs
;
1198 int error
= netdev_get_addresses_by_name(device_name
, &addrs
, &n
);
1203 for (int i
= 0; i
< n
; i
++) {
1204 if (!in6_is_lla(&addrs
[i
])) {
1216 /* Adds 'router' as a default IP gateway for the TCP/IP stack that corresponds
1219 netdev_add_router(struct netdev
*netdev
, struct in_addr router
)
1221 COVERAGE_INC(netdev_add_router
);
1222 return (netdev
->netdev_class
->add_router
1223 ? netdev
->netdev_class
->add_router(netdev
, router
)
1227 /* Looks up the next hop for 'host' for the TCP/IP stack that corresponds to
1228 * 'netdev'. If a route cannot not be determined, sets '*next_hop' to 0,
1229 * '*netdev_name' to null, and returns a positive errno value. Otherwise, if a
1230 * next hop is found, stores the next hop gateway's address (0 if 'host' is on
1231 * a directly connected network) in '*next_hop' and a copy of the name of the
1232 * device to reach 'host' in '*netdev_name', and returns 0. The caller is
1233 * responsible for freeing '*netdev_name' (by calling free()). */
1235 netdev_get_next_hop(const struct netdev
*netdev
,
1236 const struct in_addr
*host
, struct in_addr
*next_hop
,
1239 int error
= (netdev
->netdev_class
->get_next_hop
1240 ? netdev
->netdev_class
->get_next_hop(
1241 host
, next_hop
, netdev_name
)
1244 next_hop
->s_addr
= 0;
1245 *netdev_name
= NULL
;
1250 /* Populates 'smap' with status information.
1252 * Populates 'smap' with 'netdev' specific status information. This
1253 * information may be used to populate the status column of the Interface table
1254 * as defined in ovs-vswitchd.conf.db(5). */
1256 netdev_get_status(const struct netdev
*netdev
, struct smap
*smap
)
1258 return (netdev
->netdev_class
->get_status
1259 ? netdev
->netdev_class
->get_status(netdev
, smap
)
1263 /* Returns all assigned IP address to 'netdev' and returns 0.
1264 * API allocates array of address and masks and set it to
1265 * '*addr' and '*mask'.
1266 * Otherwise, returns a positive errno value and sets '*addr', '*mask
1267 * and '*n_addr' to NULL.
1269 * The following error values have well-defined meanings:
1271 * - EADDRNOTAVAIL: 'netdev' has no assigned IPv6 address.
1273 * - EOPNOTSUPP: No IPv6 network stack attached to 'netdev'.
1275 * 'addr' may be null, in which case the address itself is not reported. */
1277 netdev_get_addr_list(const struct netdev
*netdev
, struct in6_addr
**addr
,
1278 struct in6_addr
**mask
, int *n_addr
)
1282 error
= (netdev
->netdev_class
->get_addr_list
1283 ? netdev
->netdev_class
->get_addr_list(netdev
, addr
, mask
, n_addr
): EOPNOTSUPP
);
1284 if (error
&& addr
) {
1293 /* On 'netdev', turns off the flags in 'off' and then turns on the flags in
1294 * 'on'. Returns 0 if successful, otherwise a positive errno value. */
1296 do_update_flags(struct netdev
*netdev
, enum netdev_flags off
,
1297 enum netdev_flags on
, enum netdev_flags
*old_flagsp
,
1298 struct netdev_saved_flags
**sfp
)
1299 OVS_EXCLUDED(netdev_mutex
)
1301 struct netdev_saved_flags
*sf
= NULL
;
1302 enum netdev_flags old_flags
;
1305 error
= netdev
->netdev_class
->update_flags(netdev
, off
& ~on
, on
,
1308 VLOG_WARN_RL(&rl
, "failed to %s flags for network device %s: %s",
1309 off
|| on
? "set" : "get", netdev_get_name(netdev
),
1310 ovs_strerror(error
));
1312 } else if ((off
|| on
) && sfp
) {
1313 enum netdev_flags new_flags
= (old_flags
& ~off
) | on
;
1314 enum netdev_flags changed_flags
= old_flags
^ new_flags
;
1315 if (changed_flags
) {
1316 ovs_mutex_lock(&netdev_mutex
);
1317 *sfp
= sf
= xmalloc(sizeof *sf
);
1318 sf
->netdev
= netdev
;
1319 ovs_list_push_front(&netdev
->saved_flags_list
, &sf
->node
);
1320 sf
->saved_flags
= changed_flags
;
1321 sf
->saved_values
= changed_flags
& new_flags
;
1324 ovs_mutex_unlock(&netdev_mutex
);
1329 *old_flagsp
= old_flags
;
1338 /* Obtains the current flags for 'netdev' and stores them into '*flagsp'.
1339 * Returns 0 if successful, otherwise a positive errno value. On failure,
1340 * stores 0 into '*flagsp'. */
1342 netdev_get_flags(const struct netdev
*netdev_
, enum netdev_flags
*flagsp
)
1344 struct netdev
*netdev
= CONST_CAST(struct netdev
*, netdev_
);
1345 return do_update_flags(netdev
, 0, 0, flagsp
, NULL
);
1348 /* Sets the flags for 'netdev' to 'flags'.
1349 * Returns 0 if successful, otherwise a positive errno value. */
1351 netdev_set_flags(struct netdev
*netdev
, enum netdev_flags flags
,
1352 struct netdev_saved_flags
**sfp
)
1354 return do_update_flags(netdev
, -1, flags
, NULL
, sfp
);
1357 /* Turns on the specified 'flags' on 'netdev':
1359 * - On success, returns 0. If 'sfp' is nonnull, sets '*sfp' to a newly
1360 * allocated 'struct netdev_saved_flags *' that may be passed to
1361 * netdev_restore_flags() to restore the original values of 'flags' on
1362 * 'netdev' (this will happen automatically at program termination if
1363 * netdev_restore_flags() is never called) , or to NULL if no flags were
1366 * - On failure, returns a positive errno value. If 'sfp' is nonnull, sets
1367 * '*sfp' to NULL. */
1369 netdev_turn_flags_on(struct netdev
*netdev
, enum netdev_flags flags
,
1370 struct netdev_saved_flags
**sfp
)
1372 return do_update_flags(netdev
, 0, flags
, NULL
, sfp
);
1375 /* Turns off the specified 'flags' on 'netdev'. See netdev_turn_flags_on() for
1376 * details of the interface. */
1378 netdev_turn_flags_off(struct netdev
*netdev
, enum netdev_flags flags
,
1379 struct netdev_saved_flags
**sfp
)
1381 return do_update_flags(netdev
, flags
, 0, NULL
, sfp
);
1384 /* Restores the flags that were saved in 'sf', and destroys 'sf'.
1385 * Does nothing if 'sf' is NULL. */
1387 netdev_restore_flags(struct netdev_saved_flags
*sf
)
1388 OVS_EXCLUDED(netdev_mutex
)
1391 struct netdev
*netdev
= sf
->netdev
;
1392 enum netdev_flags old_flags
;
1394 netdev
->netdev_class
->update_flags(netdev
,
1395 sf
->saved_flags
& sf
->saved_values
,
1396 sf
->saved_flags
& ~sf
->saved_values
,
1399 ovs_mutex_lock(&netdev_mutex
);
1400 ovs_list_remove(&sf
->node
);
1402 netdev_unref(netdev
);
1406 /* Looks up the ARP table entry for 'ip' on 'netdev'. If one exists and can be
1407 * successfully retrieved, it stores the corresponding MAC address in 'mac' and
1408 * returns 0. Otherwise, it returns a positive errno value; in particular,
1409 * ENXIO indicates that there is no ARP table entry for 'ip' on 'netdev'. */
1411 netdev_arp_lookup(const struct netdev
*netdev
,
1412 ovs_be32 ip
, struct eth_addr
*mac
)
1414 int error
= (netdev
->netdev_class
->arp_lookup
1415 ? netdev
->netdev_class
->arp_lookup(netdev
, ip
, mac
)
1418 *mac
= eth_addr_zero
;
1423 /* Returns true if carrier is active (link light is on) on 'netdev'. */
1425 netdev_get_carrier(const struct netdev
*netdev
)
1428 enum netdev_flags flags
;
1431 netdev_get_flags(netdev
, &flags
);
1432 if (!(flags
& NETDEV_UP
)) {
1436 if (!netdev
->netdev_class
->get_carrier
) {
1440 error
= netdev
->netdev_class
->get_carrier(netdev
, &carrier
);
1442 VLOG_DBG("%s: failed to get network device carrier status, assuming "
1443 "down: %s", netdev_get_name(netdev
), ovs_strerror(error
));
1450 /* Returns the number of times 'netdev''s carrier has changed. */
1452 netdev_get_carrier_resets(const struct netdev
*netdev
)
1454 return (netdev
->netdev_class
->get_carrier_resets
1455 ? netdev
->netdev_class
->get_carrier_resets(netdev
)
1459 /* Attempts to force netdev_get_carrier() to poll 'netdev''s MII registers for
1460 * link status instead of checking 'netdev''s carrier. 'netdev''s MII
1461 * registers will be polled once ever 'interval' milliseconds. If 'netdev'
1462 * does not support MII, another method may be used as a fallback. If
1463 * 'interval' is less than or equal to zero, reverts netdev_get_carrier() to
1464 * its normal behavior.
1466 * Returns 0 if successful, otherwise a positive errno value. */
1468 netdev_set_miimon_interval(struct netdev
*netdev
, long long int interval
)
1470 return (netdev
->netdev_class
->set_miimon_interval
1471 ? netdev
->netdev_class
->set_miimon_interval(netdev
, interval
)
1475 /* Retrieves current device stats for 'netdev'. */
1477 netdev_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1481 /* Statistics are initialized before passing it to particular device
1482 * implementation so all values are filtered out by default. */
1483 memset(stats
, 0xFF, sizeof *stats
);
1485 COVERAGE_INC(netdev_get_stats
);
1486 error
= (netdev
->netdev_class
->get_stats
1487 ? netdev
->netdev_class
->get_stats(netdev
, stats
)
1490 /* In case of error all statistics are filtered out */
1491 memset(stats
, 0xff, sizeof *stats
);
1496 /* Retrieves current device custom stats for 'netdev'. */
1498 netdev_get_custom_stats(const struct netdev
*netdev
,
1499 struct netdev_custom_stats
*custom_stats
)
1502 memset(custom_stats
, 0, sizeof *custom_stats
);
1503 error
= (netdev
->netdev_class
->get_custom_stats
1504 ? netdev
->netdev_class
->get_custom_stats(netdev
, custom_stats
)
1511 /* Attempts to set input rate limiting (policing) policy, such that up to
1512 * 'kbits_rate' kbps of traffic is accepted, with a maximum accumulative burst
1513 * size of 'kbits' kb. */
1515 netdev_set_policing(struct netdev
*netdev
, uint32_t kbits_rate
,
1516 uint32_t kbits_burst
)
1518 return (netdev
->netdev_class
->set_policing
1519 ? netdev
->netdev_class
->set_policing(netdev
,
1520 kbits_rate
, kbits_burst
)
1524 /* Adds to 'types' all of the forms of QoS supported by 'netdev', or leaves it
1525 * empty if 'netdev' does not support QoS. Any names added to 'types' should
1526 * be documented as valid for the "type" column in the "QoS" table in
1527 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1529 * Every network device supports disabling QoS with a type of "", but this type
1530 * will not be added to 'types'.
1532 * The caller must initialize 'types' (e.g. with sset_init()) before calling
1533 * this function. The caller is responsible for destroying 'types' (e.g. with
1534 * sset_destroy()) when it is no longer needed.
1536 * Returns 0 if successful, otherwise a positive errno value. */
1538 netdev_get_qos_types(const struct netdev
*netdev
, struct sset
*types
)
1540 const struct netdev_class
*class = netdev
->netdev_class
;
1541 return (class->get_qos_types
1542 ? class->get_qos_types(netdev
, types
)
1546 /* Queries 'netdev' for its capabilities regarding the specified 'type' of QoS,
1547 * which should be "" or one of the types returned by netdev_get_qos_types()
1548 * for 'netdev'. Returns 0 if successful, otherwise a positive errno value.
1549 * On success, initializes 'caps' with the QoS capabilities; on failure, clears
1550 * 'caps' to all zeros. */
1552 netdev_get_qos_capabilities(const struct netdev
*netdev
, const char *type
,
1553 struct netdev_qos_capabilities
*caps
)
1555 const struct netdev_class
*class = netdev
->netdev_class
;
1558 int retval
= (class->get_qos_capabilities
1559 ? class->get_qos_capabilities(netdev
, type
, caps
)
1562 memset(caps
, 0, sizeof *caps
);
1566 /* Every netdev supports turning off QoS. */
1567 memset(caps
, 0, sizeof *caps
);
1572 /* Obtains the number of queues supported by 'netdev' for the specified 'type'
1573 * of QoS. Returns 0 if successful, otherwise a positive errno value. Stores
1574 * the number of queues (zero on failure) in '*n_queuesp'.
1576 * This is just a simple wrapper around netdev_get_qos_capabilities(). */
1578 netdev_get_n_queues(const struct netdev
*netdev
,
1579 const char *type
, unsigned int *n_queuesp
)
1581 struct netdev_qos_capabilities caps
;
1584 retval
= netdev_get_qos_capabilities(netdev
, type
, &caps
);
1585 *n_queuesp
= caps
.n_queues
;
1589 /* Queries 'netdev' about its currently configured form of QoS. If successful,
1590 * stores the name of the current form of QoS into '*typep', stores any details
1591 * of configuration as string key-value pairs in 'details', and returns 0. On
1592 * failure, sets '*typep' to NULL and returns a positive errno value.
1594 * A '*typep' of "" indicates that QoS is currently disabled on 'netdev'.
1596 * The caller must initialize 'details' as an empty smap (e.g. with
1597 * smap_init()) before calling this function. The caller must free 'details'
1598 * when it is no longer needed (e.g. with smap_destroy()).
1600 * The caller must not modify or free '*typep'.
1602 * '*typep' will be one of the types returned by netdev_get_qos_types() for
1603 * 'netdev'. The contents of 'details' should be documented as valid for
1604 * '*typep' in the "other_config" column in the "QoS" table in
1605 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)). */
1607 netdev_get_qos(const struct netdev
*netdev
,
1608 const char **typep
, struct smap
*details
)
1610 const struct netdev_class
*class = netdev
->netdev_class
;
1613 if (class->get_qos
) {
1614 retval
= class->get_qos(netdev
, typep
, details
);
1617 smap_clear(details
);
1621 /* 'netdev' doesn't support QoS, so report that QoS is disabled. */
1627 /* Attempts to reconfigure QoS on 'netdev', changing the form of QoS to 'type'
1628 * with details of configuration from 'details'. Returns 0 if successful,
1629 * otherwise a positive errno value. On error, the previous QoS configuration
1632 * When this function changes the type of QoS (not just 'details'), this also
1633 * resets all queue configuration for 'netdev' to their defaults (which depend
1634 * on the specific type of QoS). Otherwise, the queue configuration for
1635 * 'netdev' is unchanged.
1637 * 'type' should be "" (to disable QoS) or one of the types returned by
1638 * netdev_get_qos_types() for 'netdev'. The contents of 'details' should be
1639 * documented as valid for the given 'type' in the "other_config" column in the
1640 * "QoS" table in vswitchd/vswitch.xml (which is built as
1641 * ovs-vswitchd.conf.db(8)).
1643 * NULL may be specified for 'details' if there are no configuration
1646 netdev_set_qos(struct netdev
*netdev
,
1647 const char *type
, const struct smap
*details
)
1649 const struct netdev_class
*class = netdev
->netdev_class
;
1655 if (class->set_qos
) {
1657 static const struct smap empty
= SMAP_INITIALIZER(&empty
);
1660 return class->set_qos(netdev
, type
, details
);
1662 return *type
? EOPNOTSUPP
: 0;
1666 /* Queries 'netdev' for information about the queue numbered 'queue_id'. If
1667 * successful, adds that information as string key-value pairs to 'details'.
1668 * Returns 0 if successful, otherwise a positive errno value.
1670 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1671 * the current form of QoS (e.g. as returned by netdev_get_n_queues(netdev)).
1673 * The returned contents of 'details' should be documented as valid for the
1674 * given 'type' in the "other_config" column in the "Queue" table in
1675 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1677 * The caller must initialize 'details' (e.g. with smap_init()) before calling
1678 * this function. The caller must free 'details' when it is no longer needed
1679 * (e.g. with smap_destroy()). */
1681 netdev_get_queue(const struct netdev
*netdev
,
1682 unsigned int queue_id
, struct smap
*details
)
1684 const struct netdev_class
*class = netdev
->netdev_class
;
1687 retval
= (class->get_queue
1688 ? class->get_queue(netdev
, queue_id
, details
)
1691 smap_clear(details
);
1696 /* Configures the queue numbered 'queue_id' on 'netdev' with the key-value
1697 * string pairs in 'details'. The contents of 'details' should be documented
1698 * as valid for the given 'type' in the "other_config" column in the "Queue"
1699 * table in vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1700 * Returns 0 if successful, otherwise a positive errno value. On failure, the
1701 * given queue's configuration should be unmodified.
1703 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1704 * the current form of QoS (e.g. as returned by netdev_get_n_queues(netdev)).
1706 * This function does not modify 'details', and the caller retains ownership of
1709 netdev_set_queue(struct netdev
*netdev
,
1710 unsigned int queue_id
, const struct smap
*details
)
1712 const struct netdev_class
*class = netdev
->netdev_class
;
1713 return (class->set_queue
1714 ? class->set_queue(netdev
, queue_id
, details
)
1718 /* Attempts to delete the queue numbered 'queue_id' from 'netdev'. Some kinds
1719 * of QoS may have a fixed set of queues, in which case attempts to delete them
1720 * will fail with EOPNOTSUPP.
1722 * Returns 0 if successful, otherwise a positive errno value. On failure, the
1723 * given queue will be unmodified.
1725 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1726 * the current form of QoS (e.g. as returned by
1727 * netdev_get_n_queues(netdev)). */
1729 netdev_delete_queue(struct netdev
*netdev
, unsigned int queue_id
)
1731 const struct netdev_class
*class = netdev
->netdev_class
;
1732 return (class->delete_queue
1733 ? class->delete_queue(netdev
, queue_id
)
1737 /* Obtains statistics about 'queue_id' on 'netdev'. On success, returns 0 and
1738 * fills 'stats' with the queue's statistics; individual members of 'stats' may
1739 * be set to all-1-bits if the statistic is unavailable. On failure, returns a
1740 * positive errno value and fills 'stats' with values indicating unsupported
1743 netdev_get_queue_stats(const struct netdev
*netdev
, unsigned int queue_id
,
1744 struct netdev_queue_stats
*stats
)
1746 const struct netdev_class
*class = netdev
->netdev_class
;
1749 retval
= (class->get_queue_stats
1750 ? class->get_queue_stats(netdev
, queue_id
, stats
)
1753 stats
->tx_bytes
= UINT64_MAX
;
1754 stats
->tx_packets
= UINT64_MAX
;
1755 stats
->tx_errors
= UINT64_MAX
;
1756 stats
->created
= LLONG_MIN
;
1761 /* Initializes 'dump' to begin dumping the queues in a netdev.
1763 * This function provides no status indication. An error status for the entire
1764 * dump operation is provided when it is completed by calling
1765 * netdev_queue_dump_done().
1768 netdev_queue_dump_start(struct netdev_queue_dump
*dump
,
1769 const struct netdev
*netdev
)
1771 dump
->netdev
= netdev_ref(netdev
);
1772 if (netdev
->netdev_class
->queue_dump_start
) {
1773 dump
->error
= netdev
->netdev_class
->queue_dump_start(netdev
,
1776 dump
->error
= EOPNOTSUPP
;
1780 /* Attempts to retrieve another queue from 'dump', which must have been
1781 * initialized with netdev_queue_dump_start(). On success, stores a new queue
1782 * ID into '*queue_id', fills 'details' with configuration details for the
1783 * queue, and returns true. On failure, returns false.
1785 * Queues are not necessarily dumped in increasing order of queue ID (or any
1786 * other predictable order).
1788 * Failure might indicate an actual error or merely that the last queue has
1789 * been dumped. An error status for the entire dump operation is provided when
1790 * it is completed by calling netdev_queue_dump_done().
1792 * The returned contents of 'details' should be documented as valid for the
1793 * given 'type' in the "other_config" column in the "Queue" table in
1794 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1796 * The caller must initialize 'details' (e.g. with smap_init()) before calling
1797 * this function. This function will clear and replace its contents. The
1798 * caller must free 'details' when it is no longer needed (e.g. with
1799 * smap_destroy()). */
1801 netdev_queue_dump_next(struct netdev_queue_dump
*dump
,
1802 unsigned int *queue_id
, struct smap
*details
)
1804 const struct netdev
*netdev
= dump
->netdev
;
1810 dump
->error
= netdev
->netdev_class
->queue_dump_next(netdev
, dump
->state
,
1814 netdev
->netdev_class
->queue_dump_done(netdev
, dump
->state
);
1820 /* Completes queue table dump operation 'dump', which must have been
1821 * initialized with netdev_queue_dump_start(). Returns 0 if the dump operation
1822 * was error-free, otherwise a positive errno value describing the problem. */
1824 netdev_queue_dump_done(struct netdev_queue_dump
*dump
)
1826 const struct netdev
*netdev
= dump
->netdev
;
1827 if (!dump
->error
&& netdev
->netdev_class
->queue_dump_done
) {
1828 dump
->error
= netdev
->netdev_class
->queue_dump_done(netdev
,
1831 netdev_close(dump
->netdev
);
1832 return dump
->error
== EOF
? 0 : dump
->error
;
1835 /* Iterates over all of 'netdev''s queues, calling 'cb' with the queue's ID,
1836 * its statistics, and the 'aux' specified by the caller. The order of
1837 * iteration is unspecified, but (when successful) each queue is visited
1840 * Calling this function may be more efficient than calling
1841 * netdev_get_queue_stats() for every queue.
1843 * 'cb' must not modify or free the statistics passed in.
1845 * Returns 0 if successful, otherwise a positive errno value. On error, some
1846 * configured queues may not have been included in the iteration. */
1848 netdev_dump_queue_stats(const struct netdev
*netdev
,
1849 netdev_dump_queue_stats_cb
*cb
, void *aux
)
1851 const struct netdev_class
*class = netdev
->netdev_class
;
1852 return (class->dump_queue_stats
1853 ? class->dump_queue_stats(netdev
, cb
, aux
)
1858 /* Returns the class type of 'netdev'.
1860 * The caller must not free the returned value. */
1862 netdev_get_type(const struct netdev
*netdev
)
1864 return netdev
->netdev_class
->type
;
1867 /* Returns the class associated with 'netdev'. */
1868 const struct netdev_class
*
1869 netdev_get_class(const struct netdev
*netdev
)
1871 return netdev
->netdev_class
;
1874 /* Returns the netdev with 'name' or NULL if there is none.
1876 * The caller must free the returned netdev with netdev_close(). */
1878 netdev_from_name(const char *name
)
1879 OVS_EXCLUDED(netdev_mutex
)
1881 struct netdev
*netdev
;
1883 ovs_mutex_lock(&netdev_mutex
);
1884 netdev
= shash_find_data(&netdev_shash
, name
);
1888 ovs_mutex_unlock(&netdev_mutex
);
1893 /* Fills 'device_list' with devices that match 'netdev_class'.
1895 * The caller is responsible for initializing and destroying 'device_list' and
1896 * must close each device on the list. */
1898 netdev_get_devices(const struct netdev_class
*netdev_class
,
1899 struct shash
*device_list
)
1900 OVS_EXCLUDED(netdev_mutex
)
1902 struct shash_node
*node
;
1904 ovs_mutex_lock(&netdev_mutex
);
1905 SHASH_FOR_EACH (node
, &netdev_shash
) {
1906 struct netdev
*dev
= node
->data
;
1908 if (dev
->netdev_class
== netdev_class
) {
1910 shash_add(device_list
, node
->name
, node
->data
);
1913 ovs_mutex_unlock(&netdev_mutex
);
1916 /* Extracts pointers to all 'netdev-vports' into an array 'vports'
1917 * and returns it. Stores the size of the array into '*size'.
1919 * The caller is responsible for freeing 'vports' and must close
1920 * each 'netdev-vport' in the list. */
1922 netdev_get_vports(size_t *size
)
1923 OVS_EXCLUDED(netdev_mutex
)
1925 struct netdev
**vports
;
1926 struct shash_node
*node
;
1933 /* Explicitly allocates big enough chunk of memory. */
1934 ovs_mutex_lock(&netdev_mutex
);
1935 vports
= xmalloc(shash_count(&netdev_shash
) * sizeof *vports
);
1936 SHASH_FOR_EACH (node
, &netdev_shash
) {
1937 struct netdev
*dev
= node
->data
;
1939 if (netdev_vport_is_vport_class(dev
->netdev_class
)) {
1945 ovs_mutex_unlock(&netdev_mutex
);
1952 netdev_get_type_from_name(const char *name
)
1956 type
= netdev_vport_type_from_name(name
);
1958 dev
= netdev_from_name(name
);
1959 type
= dev
? netdev_get_type(dev
) : NULL
;
1966 netdev_rxq_get_netdev(const struct netdev_rxq
*rx
)
1968 ovs_assert(rx
->netdev
->ref_cnt
> 0);
1973 netdev_rxq_get_name(const struct netdev_rxq
*rx
)
1975 return netdev_get_name(netdev_rxq_get_netdev(rx
));
1979 netdev_rxq_get_queue_id(const struct netdev_rxq
*rx
)
1981 return rx
->queue_id
;
1985 restore_all_flags(void *aux OVS_UNUSED
)
1987 struct shash_node
*node
;
1989 SHASH_FOR_EACH (node
, &netdev_shash
) {
1990 struct netdev
*netdev
= node
->data
;
1991 const struct netdev_saved_flags
*sf
;
1992 enum netdev_flags saved_values
;
1993 enum netdev_flags saved_flags
;
1995 saved_values
= saved_flags
= 0;
1996 LIST_FOR_EACH (sf
, node
, &netdev
->saved_flags_list
) {
1997 saved_flags
|= sf
->saved_flags
;
1998 saved_values
&= ~sf
->saved_flags
;
1999 saved_values
|= sf
->saved_flags
& sf
->saved_values
;
2002 enum netdev_flags old_flags
;
2004 netdev
->netdev_class
->update_flags(netdev
,
2005 saved_flags
& saved_values
,
2006 saved_flags
& ~saved_values
,
2013 netdev_get_change_seq(const struct netdev
*netdev
)
2015 return netdev
->change_seq
;
2019 /* This implementation is shared by Linux and BSD. */
2021 static struct ifaddrs
*if_addr_list
;
2022 static struct ovs_mutex if_addr_list_lock
= OVS_MUTEX_INITIALIZER
;
2025 netdev_get_addrs_list_flush(void)
2027 ovs_mutex_lock(&if_addr_list_lock
);
2029 freeifaddrs(if_addr_list
);
2030 if_addr_list
= NULL
;
2032 ovs_mutex_unlock(&if_addr_list_lock
);
2036 netdev_get_addrs(const char dev
[], struct in6_addr
**paddr
,
2037 struct in6_addr
**pmask
, int *n_in
)
2039 struct in6_addr
*addr_array
, *mask_array
;
2040 const struct ifaddrs
*ifa
;
2043 ovs_mutex_lock(&if_addr_list_lock
);
2044 if (!if_addr_list
) {
2047 err
= getifaddrs(&if_addr_list
);
2049 ovs_mutex_unlock(&if_addr_list_lock
);
2054 for (ifa
= if_addr_list
; ifa
; ifa
= ifa
->ifa_next
) {
2055 if (ifa
->ifa_addr
&& ifa
->ifa_name
&& ifa
->ifa_netmask
) {
2058 family
= ifa
->ifa_addr
->sa_family
;
2059 if (family
== AF_INET
|| family
== AF_INET6
) {
2060 if (!strncmp(ifa
->ifa_name
, dev
, IFNAMSIZ
)) {
2068 ovs_mutex_unlock(&if_addr_list_lock
);
2069 return EADDRNOTAVAIL
;
2071 addr_array
= xzalloc(sizeof *addr_array
* cnt
);
2072 mask_array
= xzalloc(sizeof *mask_array
* cnt
);
2073 for (ifa
= if_addr_list
; ifa
; ifa
= ifa
->ifa_next
) {
2077 && !strncmp(ifa
->ifa_name
, dev
, IFNAMSIZ
)
2078 && sa_is_ip(ifa
->ifa_addr
)) {
2079 addr_array
[i
] = sa_get_address(ifa
->ifa_addr
);
2080 mask_array
[i
] = sa_get_address(ifa
->ifa_netmask
);
2084 ovs_mutex_unlock(&if_addr_list_lock
);
2087 *paddr
= addr_array
;
2088 *pmask
= mask_array
;
2098 netdev_wait_reconf_required(struct netdev
*netdev
)
2100 seq_wait(netdev
->reconfigure_seq
, netdev
->last_reconfigure_seq
);
2104 netdev_is_reconf_required(struct netdev
*netdev
)
2106 return seq_read(netdev
->reconfigure_seq
) != netdev
->last_reconfigure_seq
;
2109 /* Give a chance to 'netdev' to reconfigure some of its parameters.
2111 * If a module uses netdev_send() and netdev_rxq_recv(), it must call this
2112 * function when netdev_is_reconf_required() returns true.
2114 * Return 0 if successful, otherwise a positive errno value. If the
2115 * reconfiguration fails the netdev will not be able to send or receive
2118 * When this function is called, no call to netdev_rxq_recv() or netdev_send()
2119 * must be issued. */
2121 netdev_reconfigure(struct netdev
*netdev
)
2123 const struct netdev_class
*class = netdev
->netdev_class
;
2125 netdev
->last_reconfigure_seq
= seq_read(netdev
->reconfigure_seq
);
2127 return (class->reconfigure
2128 ? class->reconfigure(netdev
)
2133 netdev_flow_flush(struct netdev
*netdev
)
2135 const struct netdev_class
*class = netdev
->netdev_class
;
2137 return (class->flow_flush
2138 ? class->flow_flush(netdev
)
2143 netdev_flow_dump_create(struct netdev
*netdev
, struct netdev_flow_dump
**dump
)
2145 const struct netdev_class
*class = netdev
->netdev_class
;
2147 return (class->flow_dump_create
2148 ? class->flow_dump_create(netdev
, dump
)
2153 netdev_flow_dump_destroy(struct netdev_flow_dump
*dump
)
2155 const struct netdev_class
*class = dump
->netdev
->netdev_class
;
2157 return (class->flow_dump_destroy
2158 ? class->flow_dump_destroy(dump
)
2163 netdev_flow_dump_next(struct netdev_flow_dump
*dump
, struct match
*match
,
2164 struct nlattr
**actions
, struct dpif_flow_stats
*stats
,
2165 struct dpif_flow_attrs
*attrs
, ovs_u128
*ufid
,
2166 struct ofpbuf
*rbuffer
, struct ofpbuf
*wbuffer
)
2168 const struct netdev_class
*class = dump
->netdev
->netdev_class
;
2170 return (class->flow_dump_next
2171 ? class->flow_dump_next(dump
, match
, actions
, stats
, attrs
,
2172 ufid
, rbuffer
, wbuffer
)
2177 netdev_flow_put(struct netdev
*netdev
, struct match
*match
,
2178 struct nlattr
*actions
, size_t act_len
,
2179 const ovs_u128
*ufid
, struct offload_info
*info
,
2180 struct dpif_flow_stats
*stats
)
2182 const struct netdev_class
*class = netdev
->netdev_class
;
2184 return (class->flow_put
2185 ? class->flow_put(netdev
, match
, actions
, act_len
, ufid
,
2191 netdev_flow_get(struct netdev
*netdev
, struct match
*match
,
2192 struct nlattr
**actions
, const ovs_u128
*ufid
,
2193 struct dpif_flow_stats
*stats
,
2194 struct dpif_flow_attrs
*attrs
, struct ofpbuf
*buf
)
2196 const struct netdev_class
*class = netdev
->netdev_class
;
2198 return (class->flow_get
2199 ? class->flow_get(netdev
, match
, actions
, ufid
, stats
, attrs
, buf
)
2204 netdev_flow_del(struct netdev
*netdev
, const ovs_u128
*ufid
,
2205 struct dpif_flow_stats
*stats
)
2207 const struct netdev_class
*class = netdev
->netdev_class
;
2209 return (class->flow_del
2210 ? class->flow_del(netdev
, ufid
, stats
)
2215 netdev_init_flow_api(struct netdev
*netdev
)
2217 const struct netdev_class
*class = netdev
->netdev_class
;
2219 if (!netdev_is_flow_api_enabled()) {
2223 return (class->init_flow_api
2224 ? class->init_flow_api(netdev
)
2229 netdev_get_block_id(struct netdev
*netdev
)
2231 const struct netdev_class
*class = netdev
->netdev_class
;
2233 return (class->get_block_id
2234 ? class->get_block_id(netdev
)
2239 netdev_is_flow_api_enabled(void)
2241 return netdev_flow_api_enabled
;
2244 /* Protects below port hashmaps. */
2245 static struct ovs_mutex netdev_hmap_mutex
= OVS_MUTEX_INITIALIZER
;
2247 static struct hmap port_to_netdev
OVS_GUARDED_BY(netdev_hmap_mutex
)
2248 = HMAP_INITIALIZER(&port_to_netdev
);
2249 static struct hmap ifindex_to_port
OVS_GUARDED_BY(netdev_hmap_mutex
)
2250 = HMAP_INITIALIZER(&ifindex_to_port
);
2252 struct port_to_netdev_data
{
2253 struct hmap_node portno_node
; /* By (dpif_class, dpif_port.port_no). */
2254 struct hmap_node ifindex_node
; /* By (dpif_class, ifindex). */
2255 struct netdev
*netdev
;
2256 struct dpif_port dpif_port
;
2257 const struct dpif_class
*dpif_class
;
2262 netdev_ports_hash(odp_port_t port
, const struct dpif_class
*dpif_class
)
2264 return hash_int(odp_to_u32(port
), hash_pointer(dpif_class
, 0));
2267 static struct port_to_netdev_data
*
2268 netdev_ports_lookup(odp_port_t port_no
, const struct dpif_class
*dpif_class
)
2269 OVS_REQUIRES(netdev_hmap_mutex
)
2271 struct port_to_netdev_data
*data
;
2273 HMAP_FOR_EACH_WITH_HASH (data
, portno_node
,
2274 netdev_ports_hash(port_no
, dpif_class
),
2276 if (data
->dpif_class
== dpif_class
2277 && data
->dpif_port
.port_no
== port_no
) {
2285 netdev_ports_insert(struct netdev
*netdev
, const struct dpif_class
*dpif_class
,
2286 struct dpif_port
*dpif_port
)
2288 struct port_to_netdev_data
*data
;
2289 int ifindex
= netdev_get_ifindex(netdev
);
2295 ovs_mutex_lock(&netdev_hmap_mutex
);
2296 if (netdev_ports_lookup(dpif_port
->port_no
, dpif_class
)) {
2297 ovs_mutex_unlock(&netdev_hmap_mutex
);
2301 data
= xzalloc(sizeof *data
);
2302 data
->netdev
= netdev_ref(netdev
);
2303 data
->dpif_class
= dpif_class
;
2304 dpif_port_clone(&data
->dpif_port
, dpif_port
);
2305 data
->ifindex
= ifindex
;
2307 hmap_insert(&port_to_netdev
, &data
->portno_node
,
2308 netdev_ports_hash(dpif_port
->port_no
, dpif_class
));
2309 hmap_insert(&ifindex_to_port
, &data
->ifindex_node
, ifindex
);
2310 ovs_mutex_unlock(&netdev_hmap_mutex
);
2312 netdev_init_flow_api(netdev
);
2318 netdev_ports_get(odp_port_t port_no
, const struct dpif_class
*dpif_class
)
2320 struct port_to_netdev_data
*data
;
2321 struct netdev
*ret
= NULL
;
2323 ovs_mutex_lock(&netdev_hmap_mutex
);
2324 data
= netdev_ports_lookup(port_no
, dpif_class
);
2326 ret
= netdev_ref(data
->netdev
);
2328 ovs_mutex_unlock(&netdev_hmap_mutex
);
2334 netdev_ports_remove(odp_port_t port_no
, const struct dpif_class
*dpif_class
)
2336 struct port_to_netdev_data
*data
;
2339 ovs_mutex_lock(&netdev_hmap_mutex
);
2341 data
= netdev_ports_lookup(port_no
, dpif_class
);
2343 dpif_port_destroy(&data
->dpif_port
);
2344 netdev_close(data
->netdev
); /* unref and possibly close */
2345 hmap_remove(&port_to_netdev
, &data
->portno_node
);
2346 hmap_remove(&ifindex_to_port
, &data
->ifindex_node
);
2351 ovs_mutex_unlock(&netdev_hmap_mutex
);
2357 netdev_ifindex_to_odp_port(int ifindex
)
2359 struct port_to_netdev_data
*data
;
2362 ovs_mutex_lock(&netdev_hmap_mutex
);
2363 HMAP_FOR_EACH_WITH_HASH (data
, ifindex_node
, ifindex
, &ifindex_to_port
) {
2364 if (data
->ifindex
== ifindex
) {
2365 ret
= data
->dpif_port
.port_no
;
2369 ovs_mutex_unlock(&netdev_hmap_mutex
);
2375 netdev_ports_flow_flush(const struct dpif_class
*dpif_class
)
2377 struct port_to_netdev_data
*data
;
2379 ovs_mutex_lock(&netdev_hmap_mutex
);
2380 HMAP_FOR_EACH (data
, portno_node
, &port_to_netdev
) {
2381 if (data
->dpif_class
== dpif_class
) {
2382 netdev_flow_flush(data
->netdev
);
2385 ovs_mutex_unlock(&netdev_hmap_mutex
);
2388 struct netdev_flow_dump
**
2389 netdev_ports_flow_dump_create(const struct dpif_class
*dpif_class
, int *ports
)
2391 struct port_to_netdev_data
*data
;
2392 struct netdev_flow_dump
**dumps
;
2396 ovs_mutex_lock(&netdev_hmap_mutex
);
2397 HMAP_FOR_EACH (data
, portno_node
, &port_to_netdev
) {
2398 if (data
->dpif_class
== dpif_class
) {
2403 dumps
= count
? xzalloc(sizeof *dumps
* count
) : NULL
;
2405 HMAP_FOR_EACH (data
, portno_node
, &port_to_netdev
) {
2406 if (data
->dpif_class
== dpif_class
) {
2407 if (netdev_flow_dump_create(data
->netdev
, &dumps
[i
])) {
2411 dumps
[i
]->port
= data
->dpif_port
.port_no
;
2415 ovs_mutex_unlock(&netdev_hmap_mutex
);
2422 netdev_ports_flow_del(const struct dpif_class
*dpif_class
,
2423 const ovs_u128
*ufid
,
2424 struct dpif_flow_stats
*stats
)
2426 struct port_to_netdev_data
*data
;
2428 ovs_mutex_lock(&netdev_hmap_mutex
);
2429 HMAP_FOR_EACH (data
, portno_node
, &port_to_netdev
) {
2430 if (data
->dpif_class
== dpif_class
2431 && !netdev_flow_del(data
->netdev
, ufid
, stats
)) {
2432 ovs_mutex_unlock(&netdev_hmap_mutex
);
2436 ovs_mutex_unlock(&netdev_hmap_mutex
);
2442 netdev_ports_flow_get(const struct dpif_class
*dpif_class
, struct match
*match
,
2443 struct nlattr
**actions
, const ovs_u128
*ufid
,
2444 struct dpif_flow_stats
*stats
,
2445 struct dpif_flow_attrs
*attrs
, struct ofpbuf
*buf
)
2447 struct port_to_netdev_data
*data
;
2449 ovs_mutex_lock(&netdev_hmap_mutex
);
2450 HMAP_FOR_EACH (data
, portno_node
, &port_to_netdev
) {
2451 if (data
->dpif_class
== dpif_class
2452 && !netdev_flow_get(data
->netdev
, match
, actions
,
2453 ufid
, stats
, attrs
, buf
)) {
2454 ovs_mutex_unlock(&netdev_hmap_mutex
);
2458 ovs_mutex_unlock(&netdev_hmap_mutex
);
2463 netdev_free_custom_stats_counters(struct netdev_custom_stats
*custom_stats
)
2466 if (custom_stats
->counters
) {
2467 free(custom_stats
->counters
);
2468 custom_stats
->counters
= NULL
;
2469 custom_stats
->size
= 0;
2476 netdev_ports_flow_init(void)
2478 struct port_to_netdev_data
*data
;
2480 ovs_mutex_lock(&netdev_hmap_mutex
);
2481 HMAP_FOR_EACH (data
, portno_node
, &port_to_netdev
) {
2482 netdev_init_flow_api(data
->netdev
);
2484 ovs_mutex_unlock(&netdev_hmap_mutex
);
2488 netdev_set_flow_api_enabled(const struct smap
*ovs_other_config
)
2490 if (smap_get_bool(ovs_other_config
, "hw-offload", false)) {
2491 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
2493 if (ovsthread_once_start(&once
)) {
2494 netdev_flow_api_enabled
= true;
2496 VLOG_INFO("netdev: Flow API Enabled");
2498 tc_set_policy(smap_get_def(ovs_other_config
, "tc-policy",
2499 TC_POLICY_DEFAULT
));
2501 netdev_ports_flow_init();
2503 ovsthread_once_done(&once
);
2509 netdev_set_flow_api_enabled(const struct smap
*ovs_other_config OVS_UNUSED
)