]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netdev.c
vswitch.xml: Better document patch ports.
[mirror_ovs.git] / lib / netdev.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2016, 2017 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "netdev.h"
19
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <netinet/in.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <unistd.h>
26
27 #ifndef _WIN32
28 #include <ifaddrs.h>
29 #include <net/if.h>
30 #include <sys/ioctl.h>
31 #include <sys/types.h>
32 #endif
33
34 #include "cmap.h"
35 #include "coverage.h"
36 #include "dpif.h"
37 #include "dp-packet.h"
38 #include "openvswitch/dynamic-string.h"
39 #include "fatal-signal.h"
40 #include "hash.h"
41 #include "openvswitch/list.h"
42 #include "netdev-dpdk.h"
43 #include "netdev-provider.h"
44 #include "netdev-vport.h"
45 #include "odp-netlink.h"
46 #include "openflow/openflow.h"
47 #include "packets.h"
48 #include "poll-loop.h"
49 #include "seq.h"
50 #include "openvswitch/shash.h"
51 #include "smap.h"
52 #include "sset.h"
53 #include "svec.h"
54 #include "openvswitch/vlog.h"
55 #include "flow.h"
56 #include "util.h"
57 #ifdef __linux__
58 #include "tc.h"
59 #endif
60
61 VLOG_DEFINE_THIS_MODULE(netdev);
62
63 COVERAGE_DEFINE(netdev_received);
64 COVERAGE_DEFINE(netdev_sent);
65 COVERAGE_DEFINE(netdev_add_router);
66 COVERAGE_DEFINE(netdev_get_stats);
67
68 struct netdev_saved_flags {
69 struct netdev *netdev;
70 struct ovs_list node; /* In struct netdev's saved_flags_list. */
71 enum netdev_flags saved_flags;
72 enum netdev_flags saved_values;
73 };
74
75 /* Protects 'netdev_shash' and the mutable members of struct netdev. */
76 static struct ovs_mutex netdev_mutex = OVS_MUTEX_INITIALIZER;
77
78 /* All created network devices. */
79 static struct shash netdev_shash OVS_GUARDED_BY(netdev_mutex)
80 = SHASH_INITIALIZER(&netdev_shash);
81
82 /* Mutual exclusion of */
83 static struct ovs_mutex netdev_class_mutex OVS_ACQ_BEFORE(netdev_mutex)
84 = OVS_MUTEX_INITIALIZER;
85
86 /* Contains 'struct netdev_registered_class'es. */
87 static struct cmap netdev_classes = CMAP_INITIALIZER;
88
89 struct netdev_registered_class {
90 struct cmap_node cmap_node; /* In 'netdev_classes', by class->type. */
91 const struct netdev_class *class;
92
93 /* Number of references: one for the class itself and one for every
94 * instance of the class. */
95 struct ovs_refcount refcnt;
96 };
97
98 static bool netdev_flow_api_enabled = false;
99
100 /* This is set pretty low because we probably won't learn anything from the
101 * additional log messages. */
102 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
103
104 static void restore_all_flags(void *aux OVS_UNUSED);
105 void update_device_args(struct netdev *, const struct shash *args);
106
107 int
108 netdev_n_txq(const struct netdev *netdev)
109 {
110 return netdev->n_txq;
111 }
112
113 int
114 netdev_n_rxq(const struct netdev *netdev)
115 {
116 return netdev->n_rxq;
117 }
118
119 bool
120 netdev_is_pmd(const struct netdev *netdev)
121 {
122 return netdev->netdev_class->is_pmd;
123 }
124
125 bool
126 netdev_has_tunnel_push_pop(const struct netdev *netdev)
127 {
128 return netdev->netdev_class->push_header
129 && netdev->netdev_class->pop_header;
130 }
131
132 static void
133 netdev_initialize(void)
134 OVS_EXCLUDED(netdev_mutex)
135 {
136 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
137
138 if (ovsthread_once_start(&once)) {
139 fatal_signal_add_hook(restore_all_flags, NULL, NULL, true);
140
141 netdev_vport_patch_register();
142
143 #ifdef __linux__
144 netdev_register_provider(&netdev_linux_class);
145 netdev_register_provider(&netdev_internal_class);
146 netdev_register_provider(&netdev_tap_class);
147 netdev_vport_tunnel_register();
148 #endif
149 #if defined(__FreeBSD__) || defined(__NetBSD__)
150 netdev_register_provider(&netdev_tap_class);
151 netdev_register_provider(&netdev_bsd_class);
152 #endif
153 #ifdef _WIN32
154 netdev_register_provider(&netdev_windows_class);
155 netdev_register_provider(&netdev_internal_class);
156 netdev_vport_tunnel_register();
157 #endif
158 ovsthread_once_done(&once);
159 }
160 }
161
162 /* Performs periodic work needed by all the various kinds of netdevs.
163 *
164 * If your program opens any netdevs, it must call this function within its
165 * main poll loop. */
166 void
167 netdev_run(void)
168 OVS_EXCLUDED(netdev_mutex)
169 {
170 netdev_initialize();
171
172 struct netdev_registered_class *rc;
173 CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
174 if (rc->class->run) {
175 rc->class->run(rc->class);
176 }
177 }
178 }
179
180 /* Arranges for poll_block() to wake up when netdev_run() needs to be called.
181 *
182 * If your program opens any netdevs, it must call this function within its
183 * main poll loop. */
184 void
185 netdev_wait(void)
186 OVS_EXCLUDED(netdev_mutex)
187 {
188 netdev_initialize();
189
190 struct netdev_registered_class *rc;
191 CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
192 if (rc->class->wait) {
193 rc->class->wait(rc->class);
194 }
195 }
196 }
197
198 static struct netdev_registered_class *
199 netdev_lookup_class(const char *type)
200 {
201 struct netdev_registered_class *rc;
202 CMAP_FOR_EACH_WITH_HASH (rc, cmap_node, hash_string(type, 0),
203 &netdev_classes) {
204 if (!strcmp(type, rc->class->type)) {
205 return rc;
206 }
207 }
208 return NULL;
209 }
210
211 /* Initializes and registers a new netdev provider. After successful
212 * registration, new netdevs of that type can be opened using netdev_open(). */
213 int
214 netdev_register_provider(const struct netdev_class *new_class)
215 OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
216 {
217 int error;
218
219 ovs_mutex_lock(&netdev_class_mutex);
220 if (netdev_lookup_class(new_class->type)) {
221 VLOG_WARN("attempted to register duplicate netdev provider: %s",
222 new_class->type);
223 error = EEXIST;
224 } else {
225 error = new_class->init ? new_class->init() : 0;
226 if (!error) {
227 struct netdev_registered_class *rc;
228
229 rc = xmalloc(sizeof *rc);
230 cmap_insert(&netdev_classes, &rc->cmap_node,
231 hash_string(new_class->type, 0));
232 rc->class = new_class;
233 ovs_refcount_init(&rc->refcnt);
234 } else {
235 VLOG_ERR("failed to initialize %s network device class: %s",
236 new_class->type, ovs_strerror(error));
237 }
238 }
239 ovs_mutex_unlock(&netdev_class_mutex);
240
241 return error;
242 }
243
244 /* Unregisters a netdev provider. 'type' must have been previously registered
245 * and not currently be in use by any netdevs. After unregistration new
246 * netdevs of that type cannot be opened using netdev_open(). (However, the
247 * provider may still be accessible from other threads until the next RCU grace
248 * period, so the caller must not free or re-register the same netdev_class
249 * until that has passed.) */
250 int
251 netdev_unregister_provider(const char *type)
252 OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
253 {
254 struct netdev_registered_class *rc;
255 int error;
256
257 netdev_initialize();
258
259 ovs_mutex_lock(&netdev_class_mutex);
260 rc = netdev_lookup_class(type);
261 if (!rc) {
262 VLOG_WARN("attempted to unregister a netdev provider that is not "
263 "registered: %s", type);
264 error = EAFNOSUPPORT;
265 } else if (ovs_refcount_unref(&rc->refcnt) != 1) {
266 ovs_refcount_ref(&rc->refcnt);
267 VLOG_WARN("attempted to unregister in use netdev provider: %s",
268 type);
269 error = EBUSY;
270 } else {
271 cmap_remove(&netdev_classes, &rc->cmap_node,
272 hash_string(rc->class->type, 0));
273 ovsrcu_postpone(free, rc);
274 error = 0;
275 }
276 ovs_mutex_unlock(&netdev_class_mutex);
277
278 return error;
279 }
280
281 /* Clears 'types' and enumerates the types of all currently registered netdev
282 * providers into it. The caller must first initialize the sset. */
283 void
284 netdev_enumerate_types(struct sset *types)
285 OVS_EXCLUDED(netdev_mutex)
286 {
287 netdev_initialize();
288 sset_clear(types);
289
290 struct netdev_registered_class *rc;
291 CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
292 sset_add(types, rc->class->type);
293 }
294 }
295
296 static const char *
297 netdev_vport_type_from_name(const char *name)
298 {
299 struct netdev_registered_class *rc;
300 const char *type;
301 CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
302 const char *dpif_port = netdev_vport_class_get_dpif_port(rc->class);
303 if (dpif_port && !strncmp(name, dpif_port, strlen(dpif_port))) {
304 type = rc->class->type;
305 return type;
306 }
307 }
308 return NULL;
309 }
310
311 /* Check that the network device name is not the same as any of the registered
312 * vport providers' dpif_port name (dpif_port is NULL if the vport provider
313 * does not define it) or the datapath internal port name (e.g. ovs-system).
314 *
315 * Returns true if there is a name conflict, false otherwise. */
316 bool
317 netdev_is_reserved_name(const char *name)
318 OVS_EXCLUDED(netdev_mutex)
319 {
320 netdev_initialize();
321
322 struct netdev_registered_class *rc;
323 CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
324 const char *dpif_port = netdev_vport_class_get_dpif_port(rc->class);
325 if (dpif_port && !strncmp(name, dpif_port, strlen(dpif_port))) {
326 return true;
327 }
328 }
329
330 if (!strncmp(name, "ovs-", 4)) {
331 struct sset types;
332 const char *type;
333
334 sset_init(&types);
335 dp_enumerate_types(&types);
336 SSET_FOR_EACH (type, &types) {
337 if (!strcmp(name+4, type)) {
338 sset_destroy(&types);
339 return true;
340 }
341 }
342 sset_destroy(&types);
343 }
344
345 return false;
346 }
347
348 /* Opens the network device named 'name' (e.g. "eth0") of the specified 'type'
349 * (e.g. "system") and returns zero if successful, otherwise a positive errno
350 * value. On success, sets '*netdevp' to the new network device, otherwise to
351 * null.
352 *
353 * Some network devices may need to be configured (with netdev_set_config())
354 * before they can be used.
355 *
356 * Before opening rxqs or sending packets, '*netdevp' may need to be
357 * reconfigured (with netdev_is_reconf_required() and netdev_reconfigure()).
358 * */
359 int
360 netdev_open(const char *name, const char *type, struct netdev **netdevp)
361 OVS_EXCLUDED(netdev_mutex)
362 {
363 struct netdev *netdev;
364 int error = 0;
365
366 if (!name[0]) {
367 /* Reject empty names. This saves the providers having to do this. At
368 * least one screwed this up: the netdev-linux "tap" implementation
369 * passed the name directly to the Linux TUNSETIFF call, which treats
370 * an empty string as a request to generate a unique name. */
371 return EINVAL;
372 }
373
374 netdev_initialize();
375
376 ovs_mutex_lock(&netdev_mutex);
377 netdev = shash_find_data(&netdev_shash, name);
378
379 if (netdev &&
380 type && type[0] && strcmp(type, netdev->netdev_class->type)) {
381
382 if (netdev->auto_classified) {
383 /* If this device was first created without a classification type,
384 * for example due to routing or tunneling code, and they keep a
385 * reference, a "classified" call to open will fail. In this case
386 * we remove the classless device, and re-add it below. We remove
387 * the netdev from the shash, and change the sequence, so owners of
388 * the old classless device can release/cleanup. */
389 if (netdev->node) {
390 shash_delete(&netdev_shash, netdev->node);
391 netdev->node = NULL;
392 netdev_change_seq_changed(netdev);
393 }
394
395 netdev = NULL;
396 } else {
397 error = EEXIST;
398 }
399 }
400
401 if (!netdev) {
402 struct netdev_registered_class *rc;
403
404 rc = netdev_lookup_class(type && type[0] ? type : "system");
405 if (rc && ovs_refcount_try_ref_rcu(&rc->refcnt)) {
406 netdev = rc->class->alloc();
407 if (netdev) {
408 memset(netdev, 0, sizeof *netdev);
409 netdev->netdev_class = rc->class;
410 netdev->auto_classified = type && type[0] ? false : true;
411 netdev->name = xstrdup(name);
412 netdev->change_seq = 1;
413 netdev->reconfigure_seq = seq_create();
414 netdev->last_reconfigure_seq =
415 seq_read(netdev->reconfigure_seq);
416 netdev->node = shash_add(&netdev_shash, name, netdev);
417
418 /* By default enable one tx and rx queue per netdev. */
419 netdev->n_txq = netdev->netdev_class->send ? 1 : 0;
420 netdev->n_rxq = netdev->netdev_class->rxq_alloc ? 1 : 0;
421
422 ovs_list_init(&netdev->saved_flags_list);
423
424 error = rc->class->construct(netdev);
425 if (!error) {
426 netdev_change_seq_changed(netdev);
427 } else {
428 ovs_refcount_unref(&rc->refcnt);
429 seq_destroy(netdev->reconfigure_seq);
430 free(netdev->name);
431 ovs_assert(ovs_list_is_empty(&netdev->saved_flags_list));
432 shash_delete(&netdev_shash, netdev->node);
433 rc->class->dealloc(netdev);
434 }
435 } else {
436 error = ENOMEM;
437 }
438 } else {
439 VLOG_WARN("could not create netdev %s of unknown type %s",
440 name, type);
441 error = EAFNOSUPPORT;
442 }
443 }
444
445 if (!error) {
446 netdev->ref_cnt++;
447 *netdevp = netdev;
448 } else {
449 *netdevp = NULL;
450 }
451 ovs_mutex_unlock(&netdev_mutex);
452
453 return error;
454 }
455
456 /* Returns a reference to 'netdev_' for the caller to own. Returns null if
457 * 'netdev_' is null. */
458 struct netdev *
459 netdev_ref(const struct netdev *netdev_)
460 OVS_EXCLUDED(netdev_mutex)
461 {
462 struct netdev *netdev = CONST_CAST(struct netdev *, netdev_);
463
464 if (netdev) {
465 ovs_mutex_lock(&netdev_mutex);
466 ovs_assert(netdev->ref_cnt > 0);
467 netdev->ref_cnt++;
468 ovs_mutex_unlock(&netdev_mutex);
469 }
470 return netdev;
471 }
472
473 /* Reconfigures the device 'netdev' with 'args'. 'args' may be empty
474 * or NULL if none are needed. */
475 int
476 netdev_set_config(struct netdev *netdev, const struct smap *args, char **errp)
477 OVS_EXCLUDED(netdev_mutex)
478 {
479 if (netdev->netdev_class->set_config) {
480 const struct smap no_args = SMAP_INITIALIZER(&no_args);
481 char *verbose_error = NULL;
482 int error;
483
484 error = netdev->netdev_class->set_config(netdev,
485 args ? args : &no_args,
486 &verbose_error);
487 if (error) {
488 VLOG_WARN_BUF(verbose_error ? NULL : errp,
489 "%s: could not set configuration (%s)",
490 netdev_get_name(netdev), ovs_strerror(error));
491 if (verbose_error) {
492 if (errp) {
493 *errp = verbose_error;
494 } else {
495 free(verbose_error);
496 }
497 }
498 }
499 return error;
500 } else if (args && !smap_is_empty(args)) {
501 VLOG_WARN_BUF(errp, "%s: arguments provided to device that is not configurable",
502 netdev_get_name(netdev));
503 }
504 return 0;
505 }
506
507 /* Returns the current configuration for 'netdev' in 'args'. The caller must
508 * have already initialized 'args' with smap_init(). Returns 0 on success, in
509 * which case 'args' will be filled with 'netdev''s configuration. On failure
510 * returns a positive errno value, in which case 'args' will be empty.
511 *
512 * The caller owns 'args' and its contents and must eventually free them with
513 * smap_destroy(). */
514 int
515 netdev_get_config(const struct netdev *netdev, struct smap *args)
516 OVS_EXCLUDED(netdev_mutex)
517 {
518 int error;
519
520 smap_clear(args);
521 if (netdev->netdev_class->get_config) {
522 error = netdev->netdev_class->get_config(netdev, args);
523 if (error) {
524 smap_clear(args);
525 }
526 } else {
527 error = 0;
528 }
529
530 return error;
531 }
532
533 const struct netdev_tunnel_config *
534 netdev_get_tunnel_config(const struct netdev *netdev)
535 OVS_EXCLUDED(netdev_mutex)
536 {
537 if (netdev->netdev_class->get_tunnel_config) {
538 return netdev->netdev_class->get_tunnel_config(netdev);
539 } else {
540 return NULL;
541 }
542 }
543
544 /* Returns the id of the numa node the 'netdev' is on. If the function
545 * is not implemented, returns NETDEV_NUMA_UNSPEC. */
546 int
547 netdev_get_numa_id(const struct netdev *netdev)
548 {
549 if (netdev->netdev_class->get_numa_id) {
550 return netdev->netdev_class->get_numa_id(netdev);
551 } else {
552 return NETDEV_NUMA_UNSPEC;
553 }
554 }
555
556 static void
557 netdev_unref(struct netdev *dev)
558 OVS_RELEASES(netdev_mutex)
559 {
560 ovs_assert(dev->ref_cnt);
561 if (!--dev->ref_cnt) {
562 const struct netdev_class *class = dev->netdev_class;
563 struct netdev_registered_class *rc;
564
565 dev->netdev_class->destruct(dev);
566
567 if (dev->node) {
568 shash_delete(&netdev_shash, dev->node);
569 }
570 free(dev->name);
571 seq_destroy(dev->reconfigure_seq);
572 dev->netdev_class->dealloc(dev);
573 ovs_mutex_unlock(&netdev_mutex);
574
575 rc = netdev_lookup_class(class->type);
576 ovs_refcount_unref(&rc->refcnt);
577 } else {
578 ovs_mutex_unlock(&netdev_mutex);
579 }
580 }
581
582 /* Closes and destroys 'netdev'. */
583 void
584 netdev_close(struct netdev *netdev)
585 OVS_EXCLUDED(netdev_mutex)
586 {
587 if (netdev) {
588 ovs_mutex_lock(&netdev_mutex);
589 netdev_unref(netdev);
590 }
591 }
592
593 /* Removes 'netdev' from the global shash and unrefs 'netdev'.
594 *
595 * This allows handler and revalidator threads to still retain references
596 * to this netdev while the main thread changes interface configuration.
597 *
598 * This function should only be called by the main thread when closing
599 * netdevs during user configuration changes. Otherwise, netdev_close should be
600 * used to close netdevs. */
601 void
602 netdev_remove(struct netdev *netdev)
603 {
604 if (netdev) {
605 ovs_mutex_lock(&netdev_mutex);
606 if (netdev->node) {
607 shash_delete(&netdev_shash, netdev->node);
608 netdev->node = NULL;
609 netdev_change_seq_changed(netdev);
610 }
611 netdev_unref(netdev);
612 }
613 }
614
615 /* Parses 'netdev_name_', which is of the form [type@]name into its component
616 * pieces. 'name' and 'type' must be freed by the caller. */
617 void
618 netdev_parse_name(const char *netdev_name_, char **name, char **type)
619 {
620 char *netdev_name = xstrdup(netdev_name_);
621 char *separator;
622
623 separator = strchr(netdev_name, '@');
624 if (separator) {
625 *separator = '\0';
626 *type = netdev_name;
627 *name = xstrdup(separator + 1);
628 } else {
629 *name = netdev_name;
630 *type = xstrdup("system");
631 }
632 }
633
634 /* Attempts to open a netdev_rxq handle for obtaining packets received on
635 * 'netdev'. On success, returns 0 and stores a nonnull 'netdev_rxq *' into
636 * '*rxp'. On failure, returns a positive errno value and stores NULL into
637 * '*rxp'.
638 *
639 * Some kinds of network devices might not support receiving packets. This
640 * function returns EOPNOTSUPP in that case.*/
641 int
642 netdev_rxq_open(struct netdev *netdev, struct netdev_rxq **rxp, int id)
643 OVS_EXCLUDED(netdev_mutex)
644 {
645 int error;
646
647 if (netdev->netdev_class->rxq_alloc && id < netdev->n_rxq) {
648 struct netdev_rxq *rx = netdev->netdev_class->rxq_alloc();
649 if (rx) {
650 rx->netdev = netdev;
651 rx->queue_id = id;
652 error = netdev->netdev_class->rxq_construct(rx);
653 if (!error) {
654 netdev_ref(netdev);
655 *rxp = rx;
656 return 0;
657 }
658 netdev->netdev_class->rxq_dealloc(rx);
659 } else {
660 error = ENOMEM;
661 }
662 } else {
663 error = EOPNOTSUPP;
664 }
665
666 *rxp = NULL;
667 return error;
668 }
669
670 /* Closes 'rx'. */
671 void
672 netdev_rxq_close(struct netdev_rxq *rx)
673 OVS_EXCLUDED(netdev_mutex)
674 {
675 if (rx) {
676 struct netdev *netdev = rx->netdev;
677 netdev->netdev_class->rxq_destruct(rx);
678 netdev->netdev_class->rxq_dealloc(rx);
679 netdev_close(netdev);
680 }
681 }
682
683 /* Attempts to receive a batch of packets from 'rx'. 'batch' should point to
684 * the beginning of an array of NETDEV_MAX_BURST pointers to dp_packet. If
685 * successful, this function stores pointers to up to NETDEV_MAX_BURST
686 * dp_packets into the array, transferring ownership of the packets to the
687 * caller, stores the number of received packets in 'batch->count', and returns
688 * 0.
689 *
690 * The implementation does not necessarily initialize any non-data members of
691 * 'batch'. That is, the caller must initialize layer pointers and metadata
692 * itself, if desired, e.g. with pkt_metadata_init() and miniflow_extract().
693 *
694 * Returns EAGAIN immediately if no packet is ready to be received or another
695 * positive errno value if an error was encountered. */
696 int
697 netdev_rxq_recv(struct netdev_rxq *rx, struct dp_packet_batch *batch)
698 {
699 int retval;
700
701 retval = rx->netdev->netdev_class->rxq_recv(rx, batch);
702 if (!retval) {
703 COVERAGE_INC(netdev_received);
704 } else {
705 batch->count = 0;
706 }
707 return retval;
708 }
709
710 /* Arranges for poll_block() to wake up when a packet is ready to be received
711 * on 'rx'. */
712 void
713 netdev_rxq_wait(struct netdev_rxq *rx)
714 {
715 rx->netdev->netdev_class->rxq_wait(rx);
716 }
717
718 /* Discards any packets ready to be received on 'rx'. */
719 int
720 netdev_rxq_drain(struct netdev_rxq *rx)
721 {
722 return (rx->netdev->netdev_class->rxq_drain
723 ? rx->netdev->netdev_class->rxq_drain(rx)
724 : 0);
725 }
726
727 /* Configures the number of tx queues of 'netdev'. Returns 0 if successful,
728 * otherwise a positive errno value.
729 *
730 * 'n_txq' specifies the exact number of transmission queues to create.
731 *
732 * The change might not effective immediately. The caller must check if a
733 * reconfiguration is required with netdev_is_reconf_required() and eventually
734 * call netdev_reconfigure() before using the new queues.
735 *
736 * On error, the tx queue configuration is unchanged */
737 int
738 netdev_set_tx_multiq(struct netdev *netdev, unsigned int n_txq)
739 {
740 int error;
741
742 error = (netdev->netdev_class->set_tx_multiq
743 ? netdev->netdev_class->set_tx_multiq(netdev, MAX(n_txq, 1))
744 : EOPNOTSUPP);
745
746 if (error && error != EOPNOTSUPP) {
747 VLOG_DBG_RL(&rl, "failed to set tx queue for network device %s:"
748 "%s", netdev_get_name(netdev), ovs_strerror(error));
749 }
750
751 return error;
752 }
753
754 enum netdev_pt_mode
755 netdev_get_pt_mode(const struct netdev *netdev)
756 {
757 return (netdev->netdev_class->get_pt_mode
758 ? netdev->netdev_class->get_pt_mode(netdev)
759 : NETDEV_PT_LEGACY_L2);
760 }
761
762 /* Sends 'batch' on 'netdev'. Returns 0 if successful (for every packet),
763 * otherwise a positive errno value. Returns EAGAIN without blocking if
764 * at least one the packets cannot be queued immediately. Returns EMSGSIZE
765 * if a partial packet was transmitted or if a packet is too big or too small
766 * to transmit on the device.
767 *
768 * The caller must make sure that 'netdev' supports sending by making sure that
769 * 'netdev_n_txq(netdev)' returns >= 1.
770 *
771 * If the function returns a non-zero value, some of the packets might have
772 * been sent anyway.
773 *
774 * If 'may_steal' is false, the caller retains ownership of all the packets.
775 * If 'may_steal' is true, the caller transfers ownership of all the packets
776 * to the network device, regardless of success.
777 *
778 * If 'concurrent_txq' is true, the caller may perform concurrent calls
779 * to netdev_send() with the same 'qid'. The netdev provider is responsible
780 * for making sure that these concurrent calls do not create a race condition
781 * by using locking or other synchronization if required.
782 *
783 * The network device is expected to maintain one or more packet
784 * transmission queues, so that the caller does not ordinarily have to
785 * do additional queuing of packets. 'qid' specifies the queue to use
786 * and can be ignored if the implementation does not support multiple
787 * queues.
788 *
789 * Some network devices may not implement support for this function. In such
790 * cases this function will always return EOPNOTSUPP. */
791 int
792 netdev_send(struct netdev *netdev, int qid, struct dp_packet_batch *batch,
793 bool may_steal, bool concurrent_txq)
794 {
795 int error = netdev->netdev_class->send(netdev, qid, batch, may_steal,
796 concurrent_txq);
797 if (!error) {
798 COVERAGE_INC(netdev_sent);
799 if (!may_steal) {
800 dp_packet_batch_reset_cutlen(batch);
801 }
802 }
803 return error;
804 }
805
806 /* Pop tunnel header, build tunnel metadata and resize 'batch->packets'
807 * for further processing.
808 *
809 * The caller must make sure that 'netdev' support this operation by checking
810 * that netdev_has_tunnel_push_pop() returns true. */
811 void
812 netdev_pop_header(struct netdev *netdev, struct dp_packet_batch *batch)
813 {
814 struct dp_packet *packet;
815 size_t i, size = dp_packet_batch_size(batch);
816
817 DP_PACKET_BATCH_REFILL_FOR_EACH (i, size, packet, batch) {
818 packet = netdev->netdev_class->pop_header(packet);
819 if (packet) {
820 /* Reset the checksum offload flags if present, to avoid wrong
821 * interpretation in the further packet processing when
822 * recirculated.*/
823 reset_dp_packet_checksum_ol_flags(packet);
824 dp_packet_batch_refill(batch, packet, i);
825 }
826 }
827 }
828
829 void
830 netdev_init_tnl_build_header_params(struct netdev_tnl_build_header_params *params,
831 const struct flow *tnl_flow,
832 const struct in6_addr *src,
833 struct eth_addr dmac,
834 struct eth_addr smac)
835 {
836 params->flow = tnl_flow;
837 params->dmac = dmac;
838 params->smac = smac;
839 params->s_ip = src;
840 params->is_ipv6 = !IN6_IS_ADDR_V4MAPPED(src);
841 }
842
843 int netdev_build_header(const struct netdev *netdev,
844 struct ovs_action_push_tnl *data,
845 const struct netdev_tnl_build_header_params *params)
846 {
847 if (netdev->netdev_class->build_header) {
848 return netdev->netdev_class->build_header(netdev, data, params);
849 }
850 return EOPNOTSUPP;
851 }
852
853 /* Push tunnel header (reading from tunnel metadata) and resize
854 * 'batch->packets' for further processing.
855 *
856 * The caller must make sure that 'netdev' support this operation by checking
857 * that netdev_has_tunnel_push_pop() returns true. */
858 int
859 netdev_push_header(const struct netdev *netdev,
860 struct dp_packet_batch *batch,
861 const struct ovs_action_push_tnl *data)
862 {
863 struct dp_packet *packet;
864 DP_PACKET_BATCH_FOR_EACH (packet, batch) {
865 netdev->netdev_class->push_header(packet, data);
866 pkt_metadata_init(&packet->md, data->out_port);
867 }
868
869 return 0;
870 }
871
872 /* Registers with the poll loop to wake up from the next call to poll_block()
873 * when the packet transmission queue has sufficient room to transmit a packet
874 * with netdev_send().
875 *
876 * The network device is expected to maintain one or more packet
877 * transmission queues, so that the caller does not ordinarily have to
878 * do additional queuing of packets. 'qid' specifies the queue to use
879 * and can be ignored if the implementation does not support multiple
880 * queues. */
881 void
882 netdev_send_wait(struct netdev *netdev, int qid)
883 {
884 if (netdev->netdev_class->send_wait) {
885 netdev->netdev_class->send_wait(netdev, qid);
886 }
887 }
888
889 /* Attempts to set 'netdev''s MAC address to 'mac'. Returns 0 if successful,
890 * otherwise a positive errno value. */
891 int
892 netdev_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
893 {
894 return netdev->netdev_class->set_etheraddr(netdev, mac);
895 }
896
897 /* Retrieves 'netdev''s MAC address. If successful, returns 0 and copies the
898 * the MAC address into 'mac'. On failure, returns a positive errno value and
899 * clears 'mac' to all-zeros. */
900 int
901 netdev_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
902 {
903 return netdev->netdev_class->get_etheraddr(netdev, mac);
904 }
905
906 /* Returns the name of the network device that 'netdev' represents,
907 * e.g. "eth0". The caller must not modify or free the returned string. */
908 const char *
909 netdev_get_name(const struct netdev *netdev)
910 {
911 return netdev->name;
912 }
913
914 /* Retrieves the MTU of 'netdev'. The MTU is the maximum size of transmitted
915 * (and received) packets, in bytes, not including the hardware header; thus,
916 * this is typically 1500 bytes for Ethernet devices.
917 *
918 * If successful, returns 0 and stores the MTU size in '*mtup'. Returns
919 * EOPNOTSUPP if 'netdev' does not have an MTU (as e.g. some tunnels do not).
920 * On other failure, returns a positive errno value. On failure, sets '*mtup'
921 * to 0. */
922 int
923 netdev_get_mtu(const struct netdev *netdev, int *mtup)
924 {
925 const struct netdev_class *class = netdev->netdev_class;
926 int error;
927
928 error = class->get_mtu ? class->get_mtu(netdev, mtup) : EOPNOTSUPP;
929 if (error) {
930 *mtup = 0;
931 if (error != EOPNOTSUPP) {
932 VLOG_DBG_RL(&rl, "failed to retrieve MTU for network device %s: "
933 "%s", netdev_get_name(netdev), ovs_strerror(error));
934 }
935 }
936 return error;
937 }
938
939 /* Sets the MTU of 'netdev'. The MTU is the maximum size of transmitted
940 * (and received) packets, in bytes.
941 *
942 * If successful, returns 0. Returns EOPNOTSUPP if 'netdev' does not have an
943 * MTU (as e.g. some tunnels do not). On other failure, returns a positive
944 * errno value. */
945 int
946 netdev_set_mtu(struct netdev *netdev, int mtu)
947 {
948 const struct netdev_class *class = netdev->netdev_class;
949 int error;
950
951 error = class->set_mtu ? class->set_mtu(netdev, mtu) : EOPNOTSUPP;
952 if (error && error != EOPNOTSUPP) {
953 VLOG_DBG_RL(&rl, "failed to set MTU for network device %s: %s",
954 netdev_get_name(netdev), ovs_strerror(error));
955 }
956
957 return error;
958 }
959
960 /* If 'user_config' is true, the user wants to control 'netdev''s MTU and we
961 * should not override it. If 'user_config' is false, we may adjust
962 * 'netdev''s MTU (e.g., if 'netdev' is internal). */
963 void
964 netdev_mtu_user_config(struct netdev *netdev, bool user_config)
965 {
966 if (netdev->mtu_user_config != user_config) {
967 netdev_change_seq_changed(netdev);
968 netdev->mtu_user_config = user_config;
969 }
970 }
971
972 /* Returns 'true' if the user explicitly specified an MTU value for 'netdev'.
973 * Otherwise, returns 'false', in which case we are allowed to adjust the
974 * device MTU. */
975 bool
976 netdev_mtu_is_user_config(struct netdev *netdev)
977 {
978 return netdev->mtu_user_config;
979 }
980
981 /* Returns the ifindex of 'netdev', if successful, as a positive number. On
982 * failure, returns a negative errno value.
983 *
984 * The desired semantics of the ifindex value are a combination of those
985 * specified by POSIX for if_nametoindex() and by SNMP for ifIndex. An ifindex
986 * value should be unique within a host and remain stable at least until
987 * reboot. SNMP says an ifindex "ranges between 1 and the value of ifNumber"
988 * but many systems do not follow this rule anyhow.
989 *
990 * Some network devices may not implement support for this function. In such
991 * cases this function will always return -EOPNOTSUPP.
992 */
993 int
994 netdev_get_ifindex(const struct netdev *netdev)
995 {
996 int (*get_ifindex)(const struct netdev *);
997
998 get_ifindex = netdev->netdev_class->get_ifindex;
999
1000 return get_ifindex ? get_ifindex(netdev) : -EOPNOTSUPP;
1001 }
1002
1003 /* Stores the features supported by 'netdev' into each of '*current',
1004 * '*advertised', '*supported', and '*peer' that are non-null. Each value is a
1005 * bitmap of "enum ofp_port_features" bits, in host byte order. Returns 0 if
1006 * successful, otherwise a positive errno value. On failure, all of the
1007 * passed-in values are set to 0.
1008 *
1009 * Some network devices may not implement support for this function. In such
1010 * cases this function will always return EOPNOTSUPP. */
1011 int
1012 netdev_get_features(const struct netdev *netdev,
1013 enum netdev_features *current,
1014 enum netdev_features *advertised,
1015 enum netdev_features *supported,
1016 enum netdev_features *peer)
1017 {
1018 int (*get_features)(const struct netdev *netdev,
1019 enum netdev_features *current,
1020 enum netdev_features *advertised,
1021 enum netdev_features *supported,
1022 enum netdev_features *peer);
1023 enum netdev_features dummy[4];
1024 int error;
1025
1026 if (!current) {
1027 current = &dummy[0];
1028 }
1029 if (!advertised) {
1030 advertised = &dummy[1];
1031 }
1032 if (!supported) {
1033 supported = &dummy[2];
1034 }
1035 if (!peer) {
1036 peer = &dummy[3];
1037 }
1038
1039 get_features = netdev->netdev_class->get_features;
1040 error = get_features
1041 ? get_features(netdev, current, advertised, supported,
1042 peer)
1043 : EOPNOTSUPP;
1044 if (error) {
1045 *current = *advertised = *supported = *peer = 0;
1046 }
1047 return error;
1048 }
1049
1050 /* Returns the maximum speed of a network connection that has the NETDEV_F_*
1051 * bits in 'features', in bits per second. If no bits that indicate a speed
1052 * are set in 'features', returns 'default_bps'. */
1053 uint64_t
1054 netdev_features_to_bps(enum netdev_features features,
1055 uint64_t default_bps)
1056 {
1057 enum {
1058 F_1000000MB = NETDEV_F_1TB_FD,
1059 F_100000MB = NETDEV_F_100GB_FD,
1060 F_40000MB = NETDEV_F_40GB_FD,
1061 F_10000MB = NETDEV_F_10GB_FD,
1062 F_1000MB = NETDEV_F_1GB_HD | NETDEV_F_1GB_FD,
1063 F_100MB = NETDEV_F_100MB_HD | NETDEV_F_100MB_FD,
1064 F_10MB = NETDEV_F_10MB_HD | NETDEV_F_10MB_FD
1065 };
1066
1067 return ( features & F_1000000MB ? UINT64_C(1000000000000)
1068 : features & F_100000MB ? UINT64_C(100000000000)
1069 : features & F_40000MB ? UINT64_C(40000000000)
1070 : features & F_10000MB ? UINT64_C(10000000000)
1071 : features & F_1000MB ? UINT64_C(1000000000)
1072 : features & F_100MB ? UINT64_C(100000000)
1073 : features & F_10MB ? UINT64_C(10000000)
1074 : default_bps);
1075 }
1076
1077 /* Returns true if any of the NETDEV_F_* bits that indicate a full-duplex link
1078 * are set in 'features', otherwise false. */
1079 bool
1080 netdev_features_is_full_duplex(enum netdev_features features)
1081 {
1082 return (features & (NETDEV_F_10MB_FD | NETDEV_F_100MB_FD | NETDEV_F_1GB_FD
1083 | NETDEV_F_10GB_FD | NETDEV_F_40GB_FD
1084 | NETDEV_F_100GB_FD | NETDEV_F_1TB_FD)) != 0;
1085 }
1086
1087 /* Set the features advertised by 'netdev' to 'advertise'. Returns 0 if
1088 * successful, otherwise a positive errno value. */
1089 int
1090 netdev_set_advertisements(struct netdev *netdev,
1091 enum netdev_features advertise)
1092 {
1093 return (netdev->netdev_class->set_advertisements
1094 ? netdev->netdev_class->set_advertisements(
1095 netdev, advertise)
1096 : EOPNOTSUPP);
1097 }
1098
1099 /* Assigns 'addr' as 'netdev''s IPv4 address and 'mask' as its netmask. If
1100 * 'addr' is INADDR_ANY, 'netdev''s IPv4 address is cleared. Returns a
1101 * positive errno value. */
1102 int
1103 netdev_set_in4(struct netdev *netdev, struct in_addr addr, struct in_addr mask)
1104 {
1105 return (netdev->netdev_class->set_in4
1106 ? netdev->netdev_class->set_in4(netdev, addr, mask)
1107 : EOPNOTSUPP);
1108 }
1109
1110 /* Obtains ad IPv4 address from device name and save the address in
1111 * in4. Returns 0 if successful, otherwise a positive errno value.
1112 */
1113 int
1114 netdev_get_in4_by_name(const char *device_name, struct in_addr *in4)
1115 {
1116 struct in6_addr *mask, *addr6;
1117 int err, n_in6, i;
1118 struct netdev *dev;
1119
1120 err = netdev_open(device_name, NULL, &dev);
1121 if (err) {
1122 return err;
1123 }
1124
1125 err = netdev_get_addr_list(dev, &addr6, &mask, &n_in6);
1126 if (err) {
1127 goto out;
1128 }
1129
1130 for (i = 0; i < n_in6; i++) {
1131 if (IN6_IS_ADDR_V4MAPPED(&addr6[i])) {
1132 in4->s_addr = in6_addr_get_mapped_ipv4(&addr6[i]);
1133 goto out;
1134 }
1135 }
1136 err = -ENOENT;
1137 out:
1138 free(addr6);
1139 free(mask);
1140 netdev_close(dev);
1141 return err;
1142
1143 }
1144
1145 /* Adds 'router' as a default IP gateway for the TCP/IP stack that corresponds
1146 * to 'netdev'. */
1147 int
1148 netdev_add_router(struct netdev *netdev, struct in_addr router)
1149 {
1150 COVERAGE_INC(netdev_add_router);
1151 return (netdev->netdev_class->add_router
1152 ? netdev->netdev_class->add_router(netdev, router)
1153 : EOPNOTSUPP);
1154 }
1155
1156 /* Looks up the next hop for 'host' for the TCP/IP stack that corresponds to
1157 * 'netdev'. If a route cannot not be determined, sets '*next_hop' to 0,
1158 * '*netdev_name' to null, and returns a positive errno value. Otherwise, if a
1159 * next hop is found, stores the next hop gateway's address (0 if 'host' is on
1160 * a directly connected network) in '*next_hop' and a copy of the name of the
1161 * device to reach 'host' in '*netdev_name', and returns 0. The caller is
1162 * responsible for freeing '*netdev_name' (by calling free()). */
1163 int
1164 netdev_get_next_hop(const struct netdev *netdev,
1165 const struct in_addr *host, struct in_addr *next_hop,
1166 char **netdev_name)
1167 {
1168 int error = (netdev->netdev_class->get_next_hop
1169 ? netdev->netdev_class->get_next_hop(
1170 host, next_hop, netdev_name)
1171 : EOPNOTSUPP);
1172 if (error) {
1173 next_hop->s_addr = 0;
1174 *netdev_name = NULL;
1175 }
1176 return error;
1177 }
1178
1179 /* Populates 'smap' with status information.
1180 *
1181 * Populates 'smap' with 'netdev' specific status information. This
1182 * information may be used to populate the status column of the Interface table
1183 * as defined in ovs-vswitchd.conf.db(5). */
1184 int
1185 netdev_get_status(const struct netdev *netdev, struct smap *smap)
1186 {
1187 return (netdev->netdev_class->get_status
1188 ? netdev->netdev_class->get_status(netdev, smap)
1189 : EOPNOTSUPP);
1190 }
1191
1192 /* Returns all assigned IP address to 'netdev' and returns 0.
1193 * API allocates array of address and masks and set it to
1194 * '*addr' and '*mask'.
1195 * Otherwise, returns a positive errno value and sets '*addr', '*mask
1196 * and '*n_addr' to NULL.
1197 *
1198 * The following error values have well-defined meanings:
1199 *
1200 * - EADDRNOTAVAIL: 'netdev' has no assigned IPv6 address.
1201 *
1202 * - EOPNOTSUPP: No IPv6 network stack attached to 'netdev'.
1203 *
1204 * 'addr' may be null, in which case the address itself is not reported. */
1205 int
1206 netdev_get_addr_list(const struct netdev *netdev, struct in6_addr **addr,
1207 struct in6_addr **mask, int *n_addr)
1208 {
1209 int error;
1210
1211 error = (netdev->netdev_class->get_addr_list
1212 ? netdev->netdev_class->get_addr_list(netdev, addr, mask, n_addr): EOPNOTSUPP);
1213 if (error && addr) {
1214 *addr = NULL;
1215 *mask = NULL;
1216 *n_addr = 0;
1217 }
1218
1219 return error;
1220 }
1221
1222 /* On 'netdev', turns off the flags in 'off' and then turns on the flags in
1223 * 'on'. Returns 0 if successful, otherwise a positive errno value. */
1224 static int
1225 do_update_flags(struct netdev *netdev, enum netdev_flags off,
1226 enum netdev_flags on, enum netdev_flags *old_flagsp,
1227 struct netdev_saved_flags **sfp)
1228 OVS_EXCLUDED(netdev_mutex)
1229 {
1230 struct netdev_saved_flags *sf = NULL;
1231 enum netdev_flags old_flags;
1232 int error;
1233
1234 error = netdev->netdev_class->update_flags(netdev, off & ~on, on,
1235 &old_flags);
1236 if (error) {
1237 VLOG_WARN_RL(&rl, "failed to %s flags for network device %s: %s",
1238 off || on ? "set" : "get", netdev_get_name(netdev),
1239 ovs_strerror(error));
1240 old_flags = 0;
1241 } else if ((off || on) && sfp) {
1242 enum netdev_flags new_flags = (old_flags & ~off) | on;
1243 enum netdev_flags changed_flags = old_flags ^ new_flags;
1244 if (changed_flags) {
1245 ovs_mutex_lock(&netdev_mutex);
1246 *sfp = sf = xmalloc(sizeof *sf);
1247 sf->netdev = netdev;
1248 ovs_list_push_front(&netdev->saved_flags_list, &sf->node);
1249 sf->saved_flags = changed_flags;
1250 sf->saved_values = changed_flags & new_flags;
1251
1252 netdev->ref_cnt++;
1253 ovs_mutex_unlock(&netdev_mutex);
1254 }
1255 }
1256
1257 if (old_flagsp) {
1258 *old_flagsp = old_flags;
1259 }
1260 if (sfp) {
1261 *sfp = sf;
1262 }
1263
1264 return error;
1265 }
1266
1267 /* Obtains the current flags for 'netdev' and stores them into '*flagsp'.
1268 * Returns 0 if successful, otherwise a positive errno value. On failure,
1269 * stores 0 into '*flagsp'. */
1270 int
1271 netdev_get_flags(const struct netdev *netdev_, enum netdev_flags *flagsp)
1272 {
1273 struct netdev *netdev = CONST_CAST(struct netdev *, netdev_);
1274 return do_update_flags(netdev, 0, 0, flagsp, NULL);
1275 }
1276
1277 /* Sets the flags for 'netdev' to 'flags'.
1278 * Returns 0 if successful, otherwise a positive errno value. */
1279 int
1280 netdev_set_flags(struct netdev *netdev, enum netdev_flags flags,
1281 struct netdev_saved_flags **sfp)
1282 {
1283 return do_update_flags(netdev, -1, flags, NULL, sfp);
1284 }
1285
1286 /* Turns on the specified 'flags' on 'netdev':
1287 *
1288 * - On success, returns 0. If 'sfp' is nonnull, sets '*sfp' to a newly
1289 * allocated 'struct netdev_saved_flags *' that may be passed to
1290 * netdev_restore_flags() to restore the original values of 'flags' on
1291 * 'netdev' (this will happen automatically at program termination if
1292 * netdev_restore_flags() is never called) , or to NULL if no flags were
1293 * actually changed.
1294 *
1295 * - On failure, returns a positive errno value. If 'sfp' is nonnull, sets
1296 * '*sfp' to NULL. */
1297 int
1298 netdev_turn_flags_on(struct netdev *netdev, enum netdev_flags flags,
1299 struct netdev_saved_flags **sfp)
1300 {
1301 return do_update_flags(netdev, 0, flags, NULL, sfp);
1302 }
1303
1304 /* Turns off the specified 'flags' on 'netdev'. See netdev_turn_flags_on() for
1305 * details of the interface. */
1306 int
1307 netdev_turn_flags_off(struct netdev *netdev, enum netdev_flags flags,
1308 struct netdev_saved_flags **sfp)
1309 {
1310 return do_update_flags(netdev, flags, 0, NULL, sfp);
1311 }
1312
1313 /* Restores the flags that were saved in 'sf', and destroys 'sf'.
1314 * Does nothing if 'sf' is NULL. */
1315 void
1316 netdev_restore_flags(struct netdev_saved_flags *sf)
1317 OVS_EXCLUDED(netdev_mutex)
1318 {
1319 if (sf) {
1320 struct netdev *netdev = sf->netdev;
1321 enum netdev_flags old_flags;
1322
1323 netdev->netdev_class->update_flags(netdev,
1324 sf->saved_flags & sf->saved_values,
1325 sf->saved_flags & ~sf->saved_values,
1326 &old_flags);
1327
1328 ovs_mutex_lock(&netdev_mutex);
1329 ovs_list_remove(&sf->node);
1330 free(sf);
1331 netdev_unref(netdev);
1332 }
1333 }
1334
1335 /* Looks up the ARP table entry for 'ip' on 'netdev'. If one exists and can be
1336 * successfully retrieved, it stores the corresponding MAC address in 'mac' and
1337 * returns 0. Otherwise, it returns a positive errno value; in particular,
1338 * ENXIO indicates that there is no ARP table entry for 'ip' on 'netdev'. */
1339 int
1340 netdev_arp_lookup(const struct netdev *netdev,
1341 ovs_be32 ip, struct eth_addr *mac)
1342 {
1343 int error = (netdev->netdev_class->arp_lookup
1344 ? netdev->netdev_class->arp_lookup(netdev, ip, mac)
1345 : EOPNOTSUPP);
1346 if (error) {
1347 *mac = eth_addr_zero;
1348 }
1349 return error;
1350 }
1351
1352 /* Returns true if carrier is active (link light is on) on 'netdev'. */
1353 bool
1354 netdev_get_carrier(const struct netdev *netdev)
1355 {
1356 int error;
1357 enum netdev_flags flags;
1358 bool carrier;
1359
1360 netdev_get_flags(netdev, &flags);
1361 if (!(flags & NETDEV_UP)) {
1362 return false;
1363 }
1364
1365 if (!netdev->netdev_class->get_carrier) {
1366 return true;
1367 }
1368
1369 error = netdev->netdev_class->get_carrier(netdev, &carrier);
1370 if (error) {
1371 VLOG_DBG("%s: failed to get network device carrier status, assuming "
1372 "down: %s", netdev_get_name(netdev), ovs_strerror(error));
1373 carrier = false;
1374 }
1375
1376 return carrier;
1377 }
1378
1379 /* Returns the number of times 'netdev''s carrier has changed. */
1380 long long int
1381 netdev_get_carrier_resets(const struct netdev *netdev)
1382 {
1383 return (netdev->netdev_class->get_carrier_resets
1384 ? netdev->netdev_class->get_carrier_resets(netdev)
1385 : 0);
1386 }
1387
1388 /* Attempts to force netdev_get_carrier() to poll 'netdev''s MII registers for
1389 * link status instead of checking 'netdev''s carrier. 'netdev''s MII
1390 * registers will be polled once ever 'interval' milliseconds. If 'netdev'
1391 * does not support MII, another method may be used as a fallback. If
1392 * 'interval' is less than or equal to zero, reverts netdev_get_carrier() to
1393 * its normal behavior.
1394 *
1395 * Returns 0 if successful, otherwise a positive errno value. */
1396 int
1397 netdev_set_miimon_interval(struct netdev *netdev, long long int interval)
1398 {
1399 return (netdev->netdev_class->set_miimon_interval
1400 ? netdev->netdev_class->set_miimon_interval(netdev, interval)
1401 : EOPNOTSUPP);
1402 }
1403
1404 /* Retrieves current device stats for 'netdev'. */
1405 int
1406 netdev_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1407 {
1408 int error;
1409
1410 /* Statistics are initialized before passing it to particular device
1411 * implementation so all values are filtered out by default. */
1412 memset(stats, 0xFF, sizeof *stats);
1413
1414 COVERAGE_INC(netdev_get_stats);
1415 error = (netdev->netdev_class->get_stats
1416 ? netdev->netdev_class->get_stats(netdev, stats)
1417 : EOPNOTSUPP);
1418 if (error) {
1419 /* In case of error all statistics are filtered out */
1420 memset(stats, 0xff, sizeof *stats);
1421 }
1422 return error;
1423 }
1424
1425 /* Attempts to set input rate limiting (policing) policy, such that up to
1426 * 'kbits_rate' kbps of traffic is accepted, with a maximum accumulative burst
1427 * size of 'kbits' kb. */
1428 int
1429 netdev_set_policing(struct netdev *netdev, uint32_t kbits_rate,
1430 uint32_t kbits_burst)
1431 {
1432 return (netdev->netdev_class->set_policing
1433 ? netdev->netdev_class->set_policing(netdev,
1434 kbits_rate, kbits_burst)
1435 : EOPNOTSUPP);
1436 }
1437
1438 /* Adds to 'types' all of the forms of QoS supported by 'netdev', or leaves it
1439 * empty if 'netdev' does not support QoS. Any names added to 'types' should
1440 * be documented as valid for the "type" column in the "QoS" table in
1441 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1442 *
1443 * Every network device supports disabling QoS with a type of "", but this type
1444 * will not be added to 'types'.
1445 *
1446 * The caller must initialize 'types' (e.g. with sset_init()) before calling
1447 * this function. The caller is responsible for destroying 'types' (e.g. with
1448 * sset_destroy()) when it is no longer needed.
1449 *
1450 * Returns 0 if successful, otherwise a positive errno value. */
1451 int
1452 netdev_get_qos_types(const struct netdev *netdev, struct sset *types)
1453 {
1454 const struct netdev_class *class = netdev->netdev_class;
1455 return (class->get_qos_types
1456 ? class->get_qos_types(netdev, types)
1457 : 0);
1458 }
1459
1460 /* Queries 'netdev' for its capabilities regarding the specified 'type' of QoS,
1461 * which should be "" or one of the types returned by netdev_get_qos_types()
1462 * for 'netdev'. Returns 0 if successful, otherwise a positive errno value.
1463 * On success, initializes 'caps' with the QoS capabilities; on failure, clears
1464 * 'caps' to all zeros. */
1465 int
1466 netdev_get_qos_capabilities(const struct netdev *netdev, const char *type,
1467 struct netdev_qos_capabilities *caps)
1468 {
1469 const struct netdev_class *class = netdev->netdev_class;
1470
1471 if (*type) {
1472 int retval = (class->get_qos_capabilities
1473 ? class->get_qos_capabilities(netdev, type, caps)
1474 : EOPNOTSUPP);
1475 if (retval) {
1476 memset(caps, 0, sizeof *caps);
1477 }
1478 return retval;
1479 } else {
1480 /* Every netdev supports turning off QoS. */
1481 memset(caps, 0, sizeof *caps);
1482 return 0;
1483 }
1484 }
1485
1486 /* Obtains the number of queues supported by 'netdev' for the specified 'type'
1487 * of QoS. Returns 0 if successful, otherwise a positive errno value. Stores
1488 * the number of queues (zero on failure) in '*n_queuesp'.
1489 *
1490 * This is just a simple wrapper around netdev_get_qos_capabilities(). */
1491 int
1492 netdev_get_n_queues(const struct netdev *netdev,
1493 const char *type, unsigned int *n_queuesp)
1494 {
1495 struct netdev_qos_capabilities caps;
1496 int retval;
1497
1498 retval = netdev_get_qos_capabilities(netdev, type, &caps);
1499 *n_queuesp = caps.n_queues;
1500 return retval;
1501 }
1502
1503 /* Queries 'netdev' about its currently configured form of QoS. If successful,
1504 * stores the name of the current form of QoS into '*typep', stores any details
1505 * of configuration as string key-value pairs in 'details', and returns 0. On
1506 * failure, sets '*typep' to NULL and returns a positive errno value.
1507 *
1508 * A '*typep' of "" indicates that QoS is currently disabled on 'netdev'.
1509 *
1510 * The caller must initialize 'details' as an empty smap (e.g. with
1511 * smap_init()) before calling this function. The caller must free 'details'
1512 * when it is no longer needed (e.g. with smap_destroy()).
1513 *
1514 * The caller must not modify or free '*typep'.
1515 *
1516 * '*typep' will be one of the types returned by netdev_get_qos_types() for
1517 * 'netdev'. The contents of 'details' should be documented as valid for
1518 * '*typep' in the "other_config" column in the "QoS" table in
1519 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)). */
1520 int
1521 netdev_get_qos(const struct netdev *netdev,
1522 const char **typep, struct smap *details)
1523 {
1524 const struct netdev_class *class = netdev->netdev_class;
1525 int retval;
1526
1527 if (class->get_qos) {
1528 retval = class->get_qos(netdev, typep, details);
1529 if (retval) {
1530 *typep = NULL;
1531 smap_clear(details);
1532 }
1533 return retval;
1534 } else {
1535 /* 'netdev' doesn't support QoS, so report that QoS is disabled. */
1536 *typep = "";
1537 return 0;
1538 }
1539 }
1540
1541 /* Attempts to reconfigure QoS on 'netdev', changing the form of QoS to 'type'
1542 * with details of configuration from 'details'. Returns 0 if successful,
1543 * otherwise a positive errno value. On error, the previous QoS configuration
1544 * is retained.
1545 *
1546 * When this function changes the type of QoS (not just 'details'), this also
1547 * resets all queue configuration for 'netdev' to their defaults (which depend
1548 * on the specific type of QoS). Otherwise, the queue configuration for
1549 * 'netdev' is unchanged.
1550 *
1551 * 'type' should be "" (to disable QoS) or one of the types returned by
1552 * netdev_get_qos_types() for 'netdev'. The contents of 'details' should be
1553 * documented as valid for the given 'type' in the "other_config" column in the
1554 * "QoS" table in vswitchd/vswitch.xml (which is built as
1555 * ovs-vswitchd.conf.db(8)).
1556 *
1557 * NULL may be specified for 'details' if there are no configuration
1558 * details. */
1559 int
1560 netdev_set_qos(struct netdev *netdev,
1561 const char *type, const struct smap *details)
1562 {
1563 const struct netdev_class *class = netdev->netdev_class;
1564
1565 if (!type) {
1566 type = "";
1567 }
1568
1569 if (class->set_qos) {
1570 if (!details) {
1571 static const struct smap empty = SMAP_INITIALIZER(&empty);
1572 details = &empty;
1573 }
1574 return class->set_qos(netdev, type, details);
1575 } else {
1576 return *type ? EOPNOTSUPP : 0;
1577 }
1578 }
1579
1580 /* Queries 'netdev' for information about the queue numbered 'queue_id'. If
1581 * successful, adds that information as string key-value pairs to 'details'.
1582 * Returns 0 if successful, otherwise a positive errno value.
1583 *
1584 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1585 * the current form of QoS (e.g. as returned by netdev_get_n_queues(netdev)).
1586 *
1587 * The returned contents of 'details' should be documented as valid for the
1588 * given 'type' in the "other_config" column in the "Queue" table in
1589 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1590 *
1591 * The caller must initialize 'details' (e.g. with smap_init()) before calling
1592 * this function. The caller must free 'details' when it is no longer needed
1593 * (e.g. with smap_destroy()). */
1594 int
1595 netdev_get_queue(const struct netdev *netdev,
1596 unsigned int queue_id, struct smap *details)
1597 {
1598 const struct netdev_class *class = netdev->netdev_class;
1599 int retval;
1600
1601 retval = (class->get_queue
1602 ? class->get_queue(netdev, queue_id, details)
1603 : EOPNOTSUPP);
1604 if (retval) {
1605 smap_clear(details);
1606 }
1607 return retval;
1608 }
1609
1610 /* Configures the queue numbered 'queue_id' on 'netdev' with the key-value
1611 * string pairs in 'details'. The contents of 'details' should be documented
1612 * as valid for the given 'type' in the "other_config" column in the "Queue"
1613 * table in vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1614 * Returns 0 if successful, otherwise a positive errno value. On failure, the
1615 * given queue's configuration should be unmodified.
1616 *
1617 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1618 * the current form of QoS (e.g. as returned by netdev_get_n_queues(netdev)).
1619 *
1620 * This function does not modify 'details', and the caller retains ownership of
1621 * it. */
1622 int
1623 netdev_set_queue(struct netdev *netdev,
1624 unsigned int queue_id, const struct smap *details)
1625 {
1626 const struct netdev_class *class = netdev->netdev_class;
1627 return (class->set_queue
1628 ? class->set_queue(netdev, queue_id, details)
1629 : EOPNOTSUPP);
1630 }
1631
1632 /* Attempts to delete the queue numbered 'queue_id' from 'netdev'. Some kinds
1633 * of QoS may have a fixed set of queues, in which case attempts to delete them
1634 * will fail with EOPNOTSUPP.
1635 *
1636 * Returns 0 if successful, otherwise a positive errno value. On failure, the
1637 * given queue will be unmodified.
1638 *
1639 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1640 * the current form of QoS (e.g. as returned by
1641 * netdev_get_n_queues(netdev)). */
1642 int
1643 netdev_delete_queue(struct netdev *netdev, unsigned int queue_id)
1644 {
1645 const struct netdev_class *class = netdev->netdev_class;
1646 return (class->delete_queue
1647 ? class->delete_queue(netdev, queue_id)
1648 : EOPNOTSUPP);
1649 }
1650
1651 /* Obtains statistics about 'queue_id' on 'netdev'. On success, returns 0 and
1652 * fills 'stats' with the queue's statistics; individual members of 'stats' may
1653 * be set to all-1-bits if the statistic is unavailable. On failure, returns a
1654 * positive errno value and fills 'stats' with values indicating unsupported
1655 * statistics. */
1656 int
1657 netdev_get_queue_stats(const struct netdev *netdev, unsigned int queue_id,
1658 struct netdev_queue_stats *stats)
1659 {
1660 const struct netdev_class *class = netdev->netdev_class;
1661 int retval;
1662
1663 retval = (class->get_queue_stats
1664 ? class->get_queue_stats(netdev, queue_id, stats)
1665 : EOPNOTSUPP);
1666 if (retval) {
1667 stats->tx_bytes = UINT64_MAX;
1668 stats->tx_packets = UINT64_MAX;
1669 stats->tx_errors = UINT64_MAX;
1670 stats->created = LLONG_MIN;
1671 }
1672 return retval;
1673 }
1674
1675 /* Initializes 'dump' to begin dumping the queues in a netdev.
1676 *
1677 * This function provides no status indication. An error status for the entire
1678 * dump operation is provided when it is completed by calling
1679 * netdev_queue_dump_done().
1680 */
1681 void
1682 netdev_queue_dump_start(struct netdev_queue_dump *dump,
1683 const struct netdev *netdev)
1684 {
1685 dump->netdev = netdev_ref(netdev);
1686 if (netdev->netdev_class->queue_dump_start) {
1687 dump->error = netdev->netdev_class->queue_dump_start(netdev,
1688 &dump->state);
1689 } else {
1690 dump->error = EOPNOTSUPP;
1691 }
1692 }
1693
1694 /* Attempts to retrieve another queue from 'dump', which must have been
1695 * initialized with netdev_queue_dump_start(). On success, stores a new queue
1696 * ID into '*queue_id', fills 'details' with configuration details for the
1697 * queue, and returns true. On failure, returns false.
1698 *
1699 * Queues are not necessarily dumped in increasing order of queue ID (or any
1700 * other predictable order).
1701 *
1702 * Failure might indicate an actual error or merely that the last queue has
1703 * been dumped. An error status for the entire dump operation is provided when
1704 * it is completed by calling netdev_queue_dump_done().
1705 *
1706 * The returned contents of 'details' should be documented as valid for the
1707 * given 'type' in the "other_config" column in the "Queue" table in
1708 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1709 *
1710 * The caller must initialize 'details' (e.g. with smap_init()) before calling
1711 * this function. This function will clear and replace its contents. The
1712 * caller must free 'details' when it is no longer needed (e.g. with
1713 * smap_destroy()). */
1714 bool
1715 netdev_queue_dump_next(struct netdev_queue_dump *dump,
1716 unsigned int *queue_id, struct smap *details)
1717 {
1718 const struct netdev *netdev = dump->netdev;
1719
1720 if (dump->error) {
1721 return false;
1722 }
1723
1724 dump->error = netdev->netdev_class->queue_dump_next(netdev, dump->state,
1725 queue_id, details);
1726
1727 if (dump->error) {
1728 netdev->netdev_class->queue_dump_done(netdev, dump->state);
1729 return false;
1730 }
1731 return true;
1732 }
1733
1734 /* Completes queue table dump operation 'dump', which must have been
1735 * initialized with netdev_queue_dump_start(). Returns 0 if the dump operation
1736 * was error-free, otherwise a positive errno value describing the problem. */
1737 int
1738 netdev_queue_dump_done(struct netdev_queue_dump *dump)
1739 {
1740 const struct netdev *netdev = dump->netdev;
1741 if (!dump->error && netdev->netdev_class->queue_dump_done) {
1742 dump->error = netdev->netdev_class->queue_dump_done(netdev,
1743 dump->state);
1744 }
1745 netdev_close(dump->netdev);
1746 return dump->error == EOF ? 0 : dump->error;
1747 }
1748
1749 /* Iterates over all of 'netdev''s queues, calling 'cb' with the queue's ID,
1750 * its statistics, and the 'aux' specified by the caller. The order of
1751 * iteration is unspecified, but (when successful) each queue is visited
1752 * exactly once.
1753 *
1754 * Calling this function may be more efficient than calling
1755 * netdev_get_queue_stats() for every queue.
1756 *
1757 * 'cb' must not modify or free the statistics passed in.
1758 *
1759 * Returns 0 if successful, otherwise a positive errno value. On error, some
1760 * configured queues may not have been included in the iteration. */
1761 int
1762 netdev_dump_queue_stats(const struct netdev *netdev,
1763 netdev_dump_queue_stats_cb *cb, void *aux)
1764 {
1765 const struct netdev_class *class = netdev->netdev_class;
1766 return (class->dump_queue_stats
1767 ? class->dump_queue_stats(netdev, cb, aux)
1768 : EOPNOTSUPP);
1769 }
1770
1771 \f
1772 /* Returns the class type of 'netdev'.
1773 *
1774 * The caller must not free the returned value. */
1775 const char *
1776 netdev_get_type(const struct netdev *netdev)
1777 {
1778 return netdev->netdev_class->type;
1779 }
1780
1781 /* Returns the class associated with 'netdev'. */
1782 const struct netdev_class *
1783 netdev_get_class(const struct netdev *netdev)
1784 {
1785 return netdev->netdev_class;
1786 }
1787
1788 /* Returns the netdev with 'name' or NULL if there is none.
1789 *
1790 * The caller must free the returned netdev with netdev_close(). */
1791 struct netdev *
1792 netdev_from_name(const char *name)
1793 OVS_EXCLUDED(netdev_mutex)
1794 {
1795 struct netdev *netdev;
1796
1797 ovs_mutex_lock(&netdev_mutex);
1798 netdev = shash_find_data(&netdev_shash, name);
1799 if (netdev) {
1800 netdev->ref_cnt++;
1801 }
1802 ovs_mutex_unlock(&netdev_mutex);
1803
1804 return netdev;
1805 }
1806
1807 /* Fills 'device_list' with devices that match 'netdev_class'.
1808 *
1809 * The caller is responsible for initializing and destroying 'device_list' and
1810 * must close each device on the list. */
1811 void
1812 netdev_get_devices(const struct netdev_class *netdev_class,
1813 struct shash *device_list)
1814 OVS_EXCLUDED(netdev_mutex)
1815 {
1816 struct shash_node *node;
1817
1818 ovs_mutex_lock(&netdev_mutex);
1819 SHASH_FOR_EACH (node, &netdev_shash) {
1820 struct netdev *dev = node->data;
1821
1822 if (dev->netdev_class == netdev_class) {
1823 dev->ref_cnt++;
1824 shash_add(device_list, node->name, node->data);
1825 }
1826 }
1827 ovs_mutex_unlock(&netdev_mutex);
1828 }
1829
1830 /* Extracts pointers to all 'netdev-vports' into an array 'vports'
1831 * and returns it. Stores the size of the array into '*size'.
1832 *
1833 * The caller is responsible for freeing 'vports' and must close
1834 * each 'netdev-vport' in the list. */
1835 struct netdev **
1836 netdev_get_vports(size_t *size)
1837 OVS_EXCLUDED(netdev_mutex)
1838 {
1839 struct netdev **vports;
1840 struct shash_node *node;
1841 size_t n = 0;
1842
1843 if (!size) {
1844 return NULL;
1845 }
1846
1847 /* Explicitly allocates big enough chunk of memory. */
1848 ovs_mutex_lock(&netdev_mutex);
1849 vports = xmalloc(shash_count(&netdev_shash) * sizeof *vports);
1850 SHASH_FOR_EACH (node, &netdev_shash) {
1851 struct netdev *dev = node->data;
1852
1853 if (netdev_vport_is_vport_class(dev->netdev_class)) {
1854 dev->ref_cnt++;
1855 vports[n] = dev;
1856 n++;
1857 }
1858 }
1859 ovs_mutex_unlock(&netdev_mutex);
1860 *size = n;
1861
1862 return vports;
1863 }
1864
1865 const char *
1866 netdev_get_type_from_name(const char *name)
1867 {
1868 struct netdev *dev;
1869 const char *type;
1870 type = netdev_vport_type_from_name(name);
1871 if (type == NULL) {
1872 dev = netdev_from_name(name);
1873 type = dev ? netdev_get_type(dev) : NULL;
1874 netdev_close(dev);
1875 }
1876 return type;
1877 }
1878 \f
1879 struct netdev *
1880 netdev_rxq_get_netdev(const struct netdev_rxq *rx)
1881 {
1882 ovs_assert(rx->netdev->ref_cnt > 0);
1883 return rx->netdev;
1884 }
1885
1886 const char *
1887 netdev_rxq_get_name(const struct netdev_rxq *rx)
1888 {
1889 return netdev_get_name(netdev_rxq_get_netdev(rx));
1890 }
1891
1892 int
1893 netdev_rxq_get_queue_id(const struct netdev_rxq *rx)
1894 {
1895 return rx->queue_id;
1896 }
1897
1898 static void
1899 restore_all_flags(void *aux OVS_UNUSED)
1900 {
1901 struct shash_node *node;
1902
1903 SHASH_FOR_EACH (node, &netdev_shash) {
1904 struct netdev *netdev = node->data;
1905 const struct netdev_saved_flags *sf;
1906 enum netdev_flags saved_values;
1907 enum netdev_flags saved_flags;
1908
1909 saved_values = saved_flags = 0;
1910 LIST_FOR_EACH (sf, node, &netdev->saved_flags_list) {
1911 saved_flags |= sf->saved_flags;
1912 saved_values &= ~sf->saved_flags;
1913 saved_values |= sf->saved_flags & sf->saved_values;
1914 }
1915 if (saved_flags) {
1916 enum netdev_flags old_flags;
1917
1918 netdev->netdev_class->update_flags(netdev,
1919 saved_flags & saved_values,
1920 saved_flags & ~saved_values,
1921 &old_flags);
1922 }
1923 }
1924 }
1925
1926 uint64_t
1927 netdev_get_change_seq(const struct netdev *netdev)
1928 {
1929 return netdev->change_seq;
1930 }
1931
1932 #ifndef _WIN32
1933 /* This implementation is shared by Linux and BSD. */
1934
1935 static struct ifaddrs *if_addr_list;
1936 static struct ovs_mutex if_addr_list_lock = OVS_MUTEX_INITIALIZER;
1937
1938 void
1939 netdev_get_addrs_list_flush(void)
1940 {
1941 ovs_mutex_lock(&if_addr_list_lock);
1942 if (if_addr_list) {
1943 freeifaddrs(if_addr_list);
1944 if_addr_list = NULL;
1945 }
1946 ovs_mutex_unlock(&if_addr_list_lock);
1947 }
1948
1949 int
1950 netdev_get_addrs(const char dev[], struct in6_addr **paddr,
1951 struct in6_addr **pmask, int *n_in)
1952 {
1953 struct in6_addr *addr_array, *mask_array;
1954 const struct ifaddrs *ifa;
1955 int cnt = 0, i = 0;
1956
1957 ovs_mutex_lock(&if_addr_list_lock);
1958 if (!if_addr_list) {
1959 int err;
1960
1961 err = getifaddrs(&if_addr_list);
1962 if (err) {
1963 ovs_mutex_unlock(&if_addr_list_lock);
1964 return -err;
1965 }
1966 }
1967
1968 for (ifa = if_addr_list; ifa; ifa = ifa->ifa_next) {
1969 if (ifa->ifa_addr && ifa->ifa_name && ifa->ifa_netmask) {
1970 int family;
1971
1972 family = ifa->ifa_addr->sa_family;
1973 if (family == AF_INET || family == AF_INET6) {
1974 if (!strncmp(ifa->ifa_name, dev, IFNAMSIZ)) {
1975 cnt++;
1976 }
1977 }
1978 }
1979 }
1980
1981 if (!cnt) {
1982 ovs_mutex_unlock(&if_addr_list_lock);
1983 return EADDRNOTAVAIL;
1984 }
1985 addr_array = xzalloc(sizeof *addr_array * cnt);
1986 mask_array = xzalloc(sizeof *mask_array * cnt);
1987 for (ifa = if_addr_list; ifa; ifa = ifa->ifa_next) {
1988 int family;
1989
1990 if (!ifa->ifa_name || !ifa->ifa_addr || !ifa->ifa_netmask
1991 || strncmp(ifa->ifa_name, dev, IFNAMSIZ)) {
1992 continue;
1993 }
1994
1995 family = ifa->ifa_addr->sa_family;
1996 if (family == AF_INET) {
1997 const struct sockaddr_in *sin;
1998
1999 sin = ALIGNED_CAST(const struct sockaddr_in *, ifa->ifa_addr);
2000 in6_addr_set_mapped_ipv4(&addr_array[i], sin->sin_addr.s_addr);
2001 sin = ALIGNED_CAST(const struct sockaddr_in *, ifa->ifa_netmask);
2002 in6_addr_set_mapped_ipv4(&mask_array[i], sin->sin_addr.s_addr);
2003 i++;
2004 } else if (family == AF_INET6) {
2005 const struct sockaddr_in6 *sin6;
2006
2007 sin6 = ALIGNED_CAST(const struct sockaddr_in6 *, ifa->ifa_addr);
2008 memcpy(&addr_array[i], &sin6->sin6_addr, sizeof *addr_array);
2009 sin6 = ALIGNED_CAST(const struct sockaddr_in6 *, ifa->ifa_netmask);
2010 memcpy(&mask_array[i], &sin6->sin6_addr, sizeof *mask_array);
2011 i++;
2012 }
2013 }
2014 ovs_mutex_unlock(&if_addr_list_lock);
2015 if (paddr) {
2016 *n_in = cnt;
2017 *paddr = addr_array;
2018 *pmask = mask_array;
2019 } else {
2020 free(addr_array);
2021 free(mask_array);
2022 }
2023 return 0;
2024 }
2025 #endif
2026
2027 void
2028 netdev_wait_reconf_required(struct netdev *netdev)
2029 {
2030 seq_wait(netdev->reconfigure_seq, netdev->last_reconfigure_seq);
2031 }
2032
2033 bool
2034 netdev_is_reconf_required(struct netdev *netdev)
2035 {
2036 return seq_read(netdev->reconfigure_seq) != netdev->last_reconfigure_seq;
2037 }
2038
2039 /* Give a chance to 'netdev' to reconfigure some of its parameters.
2040 *
2041 * If a module uses netdev_send() and netdev_rxq_recv(), it must call this
2042 * function when netdev_is_reconf_required() returns true.
2043 *
2044 * Return 0 if successful, otherwise a positive errno value. If the
2045 * reconfiguration fails the netdev will not be able to send or receive
2046 * packets.
2047 *
2048 * When this function is called, no call to netdev_rxq_recv() or netdev_send()
2049 * must be issued. */
2050 int
2051 netdev_reconfigure(struct netdev *netdev)
2052 {
2053 const struct netdev_class *class = netdev->netdev_class;
2054
2055 netdev->last_reconfigure_seq = seq_read(netdev->reconfigure_seq);
2056
2057 return (class->reconfigure
2058 ? class->reconfigure(netdev)
2059 : EOPNOTSUPP);
2060 }
2061
2062 int
2063 netdev_flow_flush(struct netdev *netdev)
2064 {
2065 const struct netdev_class *class = netdev->netdev_class;
2066
2067 return (class->flow_flush
2068 ? class->flow_flush(netdev)
2069 : EOPNOTSUPP);
2070 }
2071
2072 int
2073 netdev_flow_dump_create(struct netdev *netdev, struct netdev_flow_dump **dump)
2074 {
2075 const struct netdev_class *class = netdev->netdev_class;
2076
2077 return (class->flow_dump_create
2078 ? class->flow_dump_create(netdev, dump)
2079 : EOPNOTSUPP);
2080 }
2081
2082 int
2083 netdev_flow_dump_destroy(struct netdev_flow_dump *dump)
2084 {
2085 const struct netdev_class *class = dump->netdev->netdev_class;
2086
2087 return (class->flow_dump_destroy
2088 ? class->flow_dump_destroy(dump)
2089 : EOPNOTSUPP);
2090 }
2091
2092 bool
2093 netdev_flow_dump_next(struct netdev_flow_dump *dump, struct match *match,
2094 struct nlattr **actions, struct dpif_flow_stats *stats,
2095 ovs_u128 *ufid, struct ofpbuf *rbuffer,
2096 struct ofpbuf *wbuffer)
2097 {
2098 const struct netdev_class *class = dump->netdev->netdev_class;
2099
2100 return (class->flow_dump_next
2101 ? class->flow_dump_next(dump, match, actions, stats, ufid,
2102 rbuffer, wbuffer)
2103 : false);
2104 }
2105
2106 int
2107 netdev_flow_put(struct netdev *netdev, struct match *match,
2108 struct nlattr *actions, size_t act_len,
2109 const ovs_u128 *ufid, struct offload_info *info,
2110 struct dpif_flow_stats *stats)
2111 {
2112 const struct netdev_class *class = netdev->netdev_class;
2113
2114 return (class->flow_put
2115 ? class->flow_put(netdev, match, actions, act_len, ufid,
2116 info, stats)
2117 : EOPNOTSUPP);
2118 }
2119
2120 int
2121 netdev_flow_get(struct netdev *netdev, struct match *match,
2122 struct nlattr **actions, const ovs_u128 *ufid,
2123 struct dpif_flow_stats *stats, struct ofpbuf *buf)
2124 {
2125 const struct netdev_class *class = netdev->netdev_class;
2126
2127 return (class->flow_get
2128 ? class->flow_get(netdev, match, actions, ufid, stats, buf)
2129 : EOPNOTSUPP);
2130 }
2131
2132 int
2133 netdev_flow_del(struct netdev *netdev, const ovs_u128 *ufid,
2134 struct dpif_flow_stats *stats)
2135 {
2136 const struct netdev_class *class = netdev->netdev_class;
2137
2138 return (class->flow_del
2139 ? class->flow_del(netdev, ufid, stats)
2140 : EOPNOTSUPP);
2141 }
2142
2143 int
2144 netdev_init_flow_api(struct netdev *netdev)
2145 {
2146 const struct netdev_class *class = netdev->netdev_class;
2147
2148 if (!netdev_is_flow_api_enabled()) {
2149 return EOPNOTSUPP;
2150 }
2151
2152 return (class->init_flow_api
2153 ? class->init_flow_api(netdev)
2154 : EOPNOTSUPP);
2155 }
2156
2157 bool
2158 netdev_is_flow_api_enabled(void)
2159 {
2160 return netdev_flow_api_enabled;
2161 }
2162
2163 /* Protects below port hashmaps. */
2164 static struct ovs_mutex netdev_hmap_mutex = OVS_MUTEX_INITIALIZER;
2165
2166 static struct hmap port_to_netdev OVS_GUARDED_BY(netdev_hmap_mutex)
2167 = HMAP_INITIALIZER(&port_to_netdev);
2168 static struct hmap ifindex_to_port OVS_GUARDED_BY(netdev_hmap_mutex)
2169 = HMAP_INITIALIZER(&ifindex_to_port);
2170
2171 struct port_to_netdev_data {
2172 struct hmap_node node;
2173 struct netdev *netdev;
2174 struct dpif_port dpif_port;
2175 const struct dpif_class *dpif_class;
2176 };
2177
2178 struct ifindex_to_port_data {
2179 struct hmap_node node;
2180 int ifindex;
2181 odp_port_t port;
2182 };
2183
2184 #define NETDEV_PORTS_HASH_INT(port, dpif) \
2185 hash_int(odp_to_u32(port),\
2186 hash_pointer(dpif, 0));
2187
2188 static struct port_to_netdev_data *
2189 netdev_ports_lookup(odp_port_t port_no, const struct dpif_class *dpif_class)
2190 OVS_REQUIRES(netdev_hmap_mutex)
2191 {
2192 size_t hash = NETDEV_PORTS_HASH_INT(port_no, dpif_class);
2193 struct port_to_netdev_data *data;
2194
2195 HMAP_FOR_EACH_WITH_HASH(data, node, hash, &port_to_netdev) {
2196 if (data->dpif_class == dpif_class
2197 && data->dpif_port.port_no == port_no) {
2198 return data;
2199 }
2200 }
2201 return NULL;
2202 }
2203
2204 int
2205 netdev_ports_insert(struct netdev *netdev, const struct dpif_class *dpif_class,
2206 struct dpif_port *dpif_port)
2207 {
2208 size_t hash = NETDEV_PORTS_HASH_INT(dpif_port->port_no, dpif_class);
2209 struct port_to_netdev_data *data;
2210 struct ifindex_to_port_data *ifidx;
2211 int ifindex = netdev_get_ifindex(netdev);
2212
2213 if (ifindex < 0) {
2214 return ENODEV;
2215 }
2216
2217 data = xzalloc(sizeof *data);
2218 ifidx = xzalloc(sizeof *ifidx);
2219
2220 ovs_mutex_lock(&netdev_hmap_mutex);
2221 if (netdev_ports_lookup(dpif_port->port_no, dpif_class)) {
2222 ovs_mutex_unlock(&netdev_hmap_mutex);
2223 return EEXIST;
2224 }
2225
2226 data->netdev = netdev_ref(netdev);
2227 data->dpif_class = dpif_class;
2228 dpif_port_clone(&data->dpif_port, dpif_port);
2229
2230 ifidx->ifindex = ifindex;
2231 ifidx->port = dpif_port->port_no;
2232
2233 hmap_insert(&port_to_netdev, &data->node, hash);
2234 hmap_insert(&ifindex_to_port, &ifidx->node, ifidx->ifindex);
2235 ovs_mutex_unlock(&netdev_hmap_mutex);
2236
2237 netdev_init_flow_api(netdev);
2238
2239 return 0;
2240 }
2241
2242 struct netdev *
2243 netdev_ports_get(odp_port_t port_no, const struct dpif_class *dpif_class)
2244 {
2245 struct port_to_netdev_data *data;
2246 struct netdev *ret = NULL;
2247
2248 ovs_mutex_lock(&netdev_hmap_mutex);
2249 data = netdev_ports_lookup(port_no, dpif_class);
2250 if (data) {
2251 ret = netdev_ref(data->netdev);
2252 }
2253 ovs_mutex_unlock(&netdev_hmap_mutex);
2254
2255 return ret;
2256 }
2257
2258 int
2259 netdev_ports_remove(odp_port_t port_no, const struct dpif_class *dpif_class)
2260 {
2261 struct port_to_netdev_data *data;
2262 int ret = ENOENT;
2263
2264 ovs_mutex_lock(&netdev_hmap_mutex);
2265
2266 data = netdev_ports_lookup(port_no, dpif_class);
2267
2268 if (data) {
2269 int ifindex = netdev_get_ifindex(data->netdev);
2270
2271 if (ifindex > 0) {
2272 struct ifindex_to_port_data *ifidx = NULL;
2273
2274 HMAP_FOR_EACH_WITH_HASH (ifidx, node, ifindex, &ifindex_to_port) {
2275 if (ifidx->port == port_no) {
2276 hmap_remove(&ifindex_to_port, &ifidx->node);
2277 free(ifidx);
2278 break;
2279 }
2280 }
2281 ovs_assert(ifidx);
2282 } else {
2283 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2284
2285 VLOG_WARN_RL(&rl, "netdev ports map has dpif port %"PRIu32
2286 " but netdev has no ifindex: %s", port_no,
2287 ovs_strerror(ifindex));
2288 }
2289
2290 dpif_port_destroy(&data->dpif_port);
2291 netdev_close(data->netdev); /* unref and possibly close */
2292 hmap_remove(&port_to_netdev, &data->node);
2293 free(data);
2294 ret = 0;
2295 }
2296
2297 ovs_mutex_unlock(&netdev_hmap_mutex);
2298
2299 return ret;
2300 }
2301
2302 odp_port_t
2303 netdev_ifindex_to_odp_port(int ifindex)
2304 {
2305 struct ifindex_to_port_data *data;
2306 odp_port_t ret = 0;
2307
2308 ovs_mutex_lock(&netdev_hmap_mutex);
2309 HMAP_FOR_EACH_WITH_HASH(data, node, ifindex, &ifindex_to_port) {
2310 if (data->ifindex == ifindex) {
2311 ret = data->port;
2312 break;
2313 }
2314 }
2315 ovs_mutex_unlock(&netdev_hmap_mutex);
2316
2317 return ret;
2318 }
2319
2320 void
2321 netdev_ports_flow_flush(const struct dpif_class *dpif_class)
2322 {
2323 struct port_to_netdev_data *data;
2324
2325 ovs_mutex_lock(&netdev_hmap_mutex);
2326 HMAP_FOR_EACH(data, node, &port_to_netdev) {
2327 if (data->dpif_class == dpif_class) {
2328 netdev_flow_flush(data->netdev);
2329 }
2330 }
2331 ovs_mutex_unlock(&netdev_hmap_mutex);
2332 }
2333
2334 struct netdev_flow_dump **
2335 netdev_ports_flow_dump_create(const struct dpif_class *dpif_class, int *ports)
2336 {
2337 struct port_to_netdev_data *data;
2338 struct netdev_flow_dump **dumps;
2339 int count = 0;
2340 int i = 0;
2341
2342 ovs_mutex_lock(&netdev_hmap_mutex);
2343 HMAP_FOR_EACH(data, node, &port_to_netdev) {
2344 if (data->dpif_class == dpif_class) {
2345 count++;
2346 }
2347 }
2348
2349 dumps = count ? xzalloc(sizeof *dumps * count) : NULL;
2350
2351 HMAP_FOR_EACH(data, node, &port_to_netdev) {
2352 if (data->dpif_class == dpif_class) {
2353 if (netdev_flow_dump_create(data->netdev, &dumps[i])) {
2354 continue;
2355 }
2356
2357 dumps[i]->port = data->dpif_port.port_no;
2358 i++;
2359 }
2360 }
2361 ovs_mutex_unlock(&netdev_hmap_mutex);
2362
2363 *ports = i;
2364 return dumps;
2365 }
2366
2367 int
2368 netdev_ports_flow_del(const struct dpif_class *dpif_class,
2369 const ovs_u128 *ufid,
2370 struct dpif_flow_stats *stats)
2371 {
2372 struct port_to_netdev_data *data;
2373
2374 ovs_mutex_lock(&netdev_hmap_mutex);
2375 HMAP_FOR_EACH(data, node, &port_to_netdev) {
2376 if (data->dpif_class == dpif_class
2377 && !netdev_flow_del(data->netdev, ufid, stats)) {
2378 ovs_mutex_unlock(&netdev_hmap_mutex);
2379 return 0;
2380 }
2381 }
2382 ovs_mutex_unlock(&netdev_hmap_mutex);
2383
2384 return ENOENT;
2385 }
2386
2387 int
2388 netdev_ports_flow_get(const struct dpif_class *dpif_class, struct match *match,
2389 struct nlattr **actions, const ovs_u128 *ufid,
2390 struct dpif_flow_stats *stats, struct ofpbuf *buf)
2391 {
2392 struct port_to_netdev_data *data;
2393
2394 ovs_mutex_lock(&netdev_hmap_mutex);
2395 HMAP_FOR_EACH(data, node, &port_to_netdev) {
2396 if (data->dpif_class == dpif_class
2397 && !netdev_flow_get(data->netdev, match, actions,
2398 ufid, stats, buf)) {
2399 ovs_mutex_unlock(&netdev_hmap_mutex);
2400 return 0;
2401 }
2402 }
2403 ovs_mutex_unlock(&netdev_hmap_mutex);
2404 return ENOENT;
2405 }
2406
2407 #ifdef __linux__
2408 static void
2409 netdev_ports_flow_init(void)
2410 {
2411 struct port_to_netdev_data *data;
2412
2413 ovs_mutex_lock(&netdev_hmap_mutex);
2414 HMAP_FOR_EACH(data, node, &port_to_netdev) {
2415 netdev_init_flow_api(data->netdev);
2416 }
2417 ovs_mutex_unlock(&netdev_hmap_mutex);
2418 }
2419
2420 void
2421 netdev_set_flow_api_enabled(const struct smap *ovs_other_config)
2422 {
2423 if (smap_get_bool(ovs_other_config, "hw-offload", false)) {
2424 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2425
2426 if (ovsthread_once_start(&once)) {
2427 netdev_flow_api_enabled = true;
2428
2429 VLOG_INFO("netdev: Flow API Enabled");
2430
2431 tc_set_policy(smap_get_def(ovs_other_config, "tc-policy",
2432 TC_POLICY_DEFAULT));
2433
2434 netdev_ports_flow_init();
2435
2436 ovsthread_once_done(&once);
2437 }
2438 }
2439 }
2440 #else
2441 void
2442 netdev_set_flow_api_enabled(const struct smap *ovs_other_config OVS_UNUSED)
2443 {
2444 }
2445 #endif