]> git.proxmox.com Git - ovs.git/blob - lib/netdev.c
lib/tc: Support matching on ip tos
[ovs.git] / lib / netdev.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2016, 2017 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "netdev.h"
19
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <sys/types.h>
23 #include <netinet/in.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27
28 #ifndef _WIN32
29 #include <ifaddrs.h>
30 #include <net/if.h>
31 #include <sys/ioctl.h>
32 #endif
33
34 #include "cmap.h"
35 #include "coverage.h"
36 #include "dpif.h"
37 #include "dp-packet.h"
38 #include "openvswitch/dynamic-string.h"
39 #include "fatal-signal.h"
40 #include "hash.h"
41 #include "openvswitch/list.h"
42 #include "netdev-dpdk.h"
43 #include "netdev-provider.h"
44 #include "netdev-vport.h"
45 #include "odp-netlink.h"
46 #include "openflow/openflow.h"
47 #include "packets.h"
48 #include "openvswitch/ofp-print.h"
49 #include "openvswitch/poll-loop.h"
50 #include "seq.h"
51 #include "openvswitch/shash.h"
52 #include "smap.h"
53 #include "socket-util.h"
54 #include "sset.h"
55 #include "svec.h"
56 #include "openvswitch/vlog.h"
57 #include "flow.h"
58 #include "util.h"
59 #ifdef __linux__
60 #include "tc.h"
61 #endif
62
63 VLOG_DEFINE_THIS_MODULE(netdev);
64
65 COVERAGE_DEFINE(netdev_received);
66 COVERAGE_DEFINE(netdev_sent);
67 COVERAGE_DEFINE(netdev_add_router);
68 COVERAGE_DEFINE(netdev_get_stats);
69
70 struct netdev_saved_flags {
71 struct netdev *netdev;
72 struct ovs_list node; /* In struct netdev's saved_flags_list. */
73 enum netdev_flags saved_flags;
74 enum netdev_flags saved_values;
75 };
76
77 /* Protects 'netdev_shash' and the mutable members of struct netdev. */
78 static struct ovs_mutex netdev_mutex = OVS_MUTEX_INITIALIZER;
79
80 /* All created network devices. */
81 static struct shash netdev_shash OVS_GUARDED_BY(netdev_mutex)
82 = SHASH_INITIALIZER(&netdev_shash);
83
84 /* Mutual exclusion of */
85 static struct ovs_mutex netdev_class_mutex OVS_ACQ_BEFORE(netdev_mutex)
86 = OVS_MUTEX_INITIALIZER;
87
88 /* Contains 'struct netdev_registered_class'es. */
89 static struct cmap netdev_classes = CMAP_INITIALIZER;
90
91 struct netdev_registered_class {
92 struct cmap_node cmap_node; /* In 'netdev_classes', by class->type. */
93 const struct netdev_class *class;
94
95 /* Number of references: one for the class itself and one for every
96 * instance of the class. */
97 struct ovs_refcount refcnt;
98 };
99
100 static bool netdev_flow_api_enabled = false;
101
102 /* This is set pretty low because we probably won't learn anything from the
103 * additional log messages. */
104 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
105
106 static void restore_all_flags(void *aux OVS_UNUSED);
107 void update_device_args(struct netdev *, const struct shash *args);
108
109 int
110 netdev_n_txq(const struct netdev *netdev)
111 {
112 return netdev->n_txq;
113 }
114
115 int
116 netdev_n_rxq(const struct netdev *netdev)
117 {
118 return netdev->n_rxq;
119 }
120
121 bool
122 netdev_is_pmd(const struct netdev *netdev)
123 {
124 return netdev->netdev_class->is_pmd;
125 }
126
127 bool
128 netdev_has_tunnel_push_pop(const struct netdev *netdev)
129 {
130 return netdev->netdev_class->push_header
131 && netdev->netdev_class->pop_header;
132 }
133
134 static void
135 netdev_initialize(void)
136 OVS_EXCLUDED(netdev_mutex)
137 {
138 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
139
140 if (ovsthread_once_start(&once)) {
141 fatal_signal_add_hook(restore_all_flags, NULL, NULL, true);
142
143 netdev_vport_patch_register();
144
145 #ifdef __linux__
146 netdev_register_provider(&netdev_linux_class);
147 netdev_register_provider(&netdev_internal_class);
148 netdev_register_provider(&netdev_tap_class);
149 netdev_vport_tunnel_register();
150 #endif
151 #if defined(__FreeBSD__) || defined(__NetBSD__)
152 netdev_register_provider(&netdev_tap_class);
153 netdev_register_provider(&netdev_bsd_class);
154 #endif
155 #ifdef _WIN32
156 netdev_register_provider(&netdev_windows_class);
157 netdev_register_provider(&netdev_internal_class);
158 netdev_vport_tunnel_register();
159 #endif
160 ovsthread_once_done(&once);
161 }
162 }
163
164 /* Performs periodic work needed by all the various kinds of netdevs.
165 *
166 * If your program opens any netdevs, it must call this function within its
167 * main poll loop. */
168 void
169 netdev_run(void)
170 OVS_EXCLUDED(netdev_mutex)
171 {
172 netdev_initialize();
173
174 struct netdev_registered_class *rc;
175 CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
176 if (rc->class->run) {
177 rc->class->run(rc->class);
178 }
179 }
180 }
181
182 /* Arranges for poll_block() to wake up when netdev_run() needs to be called.
183 *
184 * If your program opens any netdevs, it must call this function within its
185 * main poll loop. */
186 void
187 netdev_wait(void)
188 OVS_EXCLUDED(netdev_mutex)
189 {
190 netdev_initialize();
191
192 struct netdev_registered_class *rc;
193 CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
194 if (rc->class->wait) {
195 rc->class->wait(rc->class);
196 }
197 }
198 }
199
200 static struct netdev_registered_class *
201 netdev_lookup_class(const char *type)
202 {
203 struct netdev_registered_class *rc;
204 CMAP_FOR_EACH_WITH_HASH (rc, cmap_node, hash_string(type, 0),
205 &netdev_classes) {
206 if (!strcmp(type, rc->class->type)) {
207 return rc;
208 }
209 }
210 return NULL;
211 }
212
213 /* Initializes and registers a new netdev provider. After successful
214 * registration, new netdevs of that type can be opened using netdev_open(). */
215 int
216 netdev_register_provider(const struct netdev_class *new_class)
217 OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
218 {
219 int error;
220
221 ovs_mutex_lock(&netdev_class_mutex);
222 if (netdev_lookup_class(new_class->type)) {
223 VLOG_WARN("attempted to register duplicate netdev provider: %s",
224 new_class->type);
225 error = EEXIST;
226 } else {
227 error = new_class->init ? new_class->init() : 0;
228 if (!error) {
229 struct netdev_registered_class *rc;
230
231 rc = xmalloc(sizeof *rc);
232 cmap_insert(&netdev_classes, &rc->cmap_node,
233 hash_string(new_class->type, 0));
234 rc->class = new_class;
235 ovs_refcount_init(&rc->refcnt);
236 } else {
237 VLOG_ERR("failed to initialize %s network device class: %s",
238 new_class->type, ovs_strerror(error));
239 }
240 }
241 ovs_mutex_unlock(&netdev_class_mutex);
242
243 return error;
244 }
245
246 /* Unregisters a netdev provider. 'type' must have been previously registered
247 * and not currently be in use by any netdevs. After unregistration new
248 * netdevs of that type cannot be opened using netdev_open(). (However, the
249 * provider may still be accessible from other threads until the next RCU grace
250 * period, so the caller must not free or re-register the same netdev_class
251 * until that has passed.) */
252 int
253 netdev_unregister_provider(const char *type)
254 OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
255 {
256 struct netdev_registered_class *rc;
257 int error;
258
259 netdev_initialize();
260
261 ovs_mutex_lock(&netdev_class_mutex);
262 rc = netdev_lookup_class(type);
263 if (!rc) {
264 VLOG_WARN("attempted to unregister a netdev provider that is not "
265 "registered: %s", type);
266 error = EAFNOSUPPORT;
267 } else if (ovs_refcount_unref(&rc->refcnt) != 1) {
268 ovs_refcount_ref(&rc->refcnt);
269 VLOG_WARN("attempted to unregister in use netdev provider: %s",
270 type);
271 error = EBUSY;
272 } else {
273 cmap_remove(&netdev_classes, &rc->cmap_node,
274 hash_string(rc->class->type, 0));
275 ovsrcu_postpone(free, rc);
276 error = 0;
277 }
278 ovs_mutex_unlock(&netdev_class_mutex);
279
280 return error;
281 }
282
283 /* Clears 'types' and enumerates the types of all currently registered netdev
284 * providers into it. The caller must first initialize the sset. */
285 void
286 netdev_enumerate_types(struct sset *types)
287 OVS_EXCLUDED(netdev_mutex)
288 {
289 netdev_initialize();
290 sset_clear(types);
291
292 struct netdev_registered_class *rc;
293 CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
294 sset_add(types, rc->class->type);
295 }
296 }
297
298 static const char *
299 netdev_vport_type_from_name(const char *name)
300 {
301 struct netdev_registered_class *rc;
302 const char *type;
303 CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
304 const char *dpif_port = netdev_vport_class_get_dpif_port(rc->class);
305 if (dpif_port && !strncmp(name, dpif_port, strlen(dpif_port))) {
306 type = rc->class->type;
307 return type;
308 }
309 }
310 return NULL;
311 }
312
313 /* Check that the network device name is not the same as any of the registered
314 * vport providers' dpif_port name (dpif_port is NULL if the vport provider
315 * does not define it) or the datapath internal port name (e.g. ovs-system).
316 *
317 * Returns true if there is a name conflict, false otherwise. */
318 bool
319 netdev_is_reserved_name(const char *name)
320 OVS_EXCLUDED(netdev_mutex)
321 {
322 netdev_initialize();
323
324 struct netdev_registered_class *rc;
325 CMAP_FOR_EACH (rc, cmap_node, &netdev_classes) {
326 const char *dpif_port = netdev_vport_class_get_dpif_port(rc->class);
327 if (dpif_port && !strncmp(name, dpif_port, strlen(dpif_port))) {
328 return true;
329 }
330 }
331
332 if (!strncmp(name, "ovs-", 4)) {
333 struct sset types;
334 const char *type;
335
336 sset_init(&types);
337 dp_enumerate_types(&types);
338 SSET_FOR_EACH (type, &types) {
339 if (!strcmp(name+4, type)) {
340 sset_destroy(&types);
341 return true;
342 }
343 }
344 sset_destroy(&types);
345 }
346
347 return false;
348 }
349
350 /* Opens the network device named 'name' (e.g. "eth0") of the specified 'type'
351 * (e.g. "system") and returns zero if successful, otherwise a positive errno
352 * value. On success, sets '*netdevp' to the new network device, otherwise to
353 * null.
354 *
355 * Some network devices may need to be configured (with netdev_set_config())
356 * before they can be used.
357 *
358 * Before opening rxqs or sending packets, '*netdevp' may need to be
359 * reconfigured (with netdev_is_reconf_required() and netdev_reconfigure()).
360 * */
361 int
362 netdev_open(const char *name, const char *type, struct netdev **netdevp)
363 OVS_EXCLUDED(netdev_mutex)
364 {
365 struct netdev *netdev;
366 int error = 0;
367
368 if (!name[0]) {
369 /* Reject empty names. This saves the providers having to do this. At
370 * least one screwed this up: the netdev-linux "tap" implementation
371 * passed the name directly to the Linux TUNSETIFF call, which treats
372 * an empty string as a request to generate a unique name. */
373 return EINVAL;
374 }
375
376 netdev_initialize();
377
378 ovs_mutex_lock(&netdev_mutex);
379 netdev = shash_find_data(&netdev_shash, name);
380
381 if (netdev &&
382 type && type[0] && strcmp(type, netdev->netdev_class->type)) {
383
384 if (netdev->auto_classified) {
385 /* If this device was first created without a classification type,
386 * for example due to routing or tunneling code, and they keep a
387 * reference, a "classified" call to open will fail. In this case
388 * we remove the classless device, and re-add it below. We remove
389 * the netdev from the shash, and change the sequence, so owners of
390 * the old classless device can release/cleanup. */
391 if (netdev->node) {
392 shash_delete(&netdev_shash, netdev->node);
393 netdev->node = NULL;
394 netdev_change_seq_changed(netdev);
395 }
396
397 netdev = NULL;
398 } else {
399 error = EEXIST;
400 }
401 }
402
403 if (!netdev) {
404 struct netdev_registered_class *rc;
405
406 rc = netdev_lookup_class(type && type[0] ? type : "system");
407 if (rc && ovs_refcount_try_ref_rcu(&rc->refcnt)) {
408 netdev = rc->class->alloc();
409 if (netdev) {
410 memset(netdev, 0, sizeof *netdev);
411 netdev->netdev_class = rc->class;
412 netdev->auto_classified = type && type[0] ? false : true;
413 netdev->name = xstrdup(name);
414 netdev->change_seq = 1;
415 netdev->reconfigure_seq = seq_create();
416 netdev->last_reconfigure_seq =
417 seq_read(netdev->reconfigure_seq);
418 netdev->node = shash_add(&netdev_shash, name, netdev);
419
420 /* By default enable one tx and rx queue per netdev. */
421 netdev->n_txq = netdev->netdev_class->send ? 1 : 0;
422 netdev->n_rxq = netdev->netdev_class->rxq_alloc ? 1 : 0;
423
424 ovs_list_init(&netdev->saved_flags_list);
425
426 error = rc->class->construct(netdev);
427 if (!error) {
428 netdev_change_seq_changed(netdev);
429 } else {
430 ovs_refcount_unref(&rc->refcnt);
431 seq_destroy(netdev->reconfigure_seq);
432 free(netdev->name);
433 ovs_assert(ovs_list_is_empty(&netdev->saved_flags_list));
434 shash_delete(&netdev_shash, netdev->node);
435 rc->class->dealloc(netdev);
436 }
437 } else {
438 error = ENOMEM;
439 }
440 } else {
441 VLOG_WARN("could not create netdev %s of unknown type %s",
442 name, type);
443 error = EAFNOSUPPORT;
444 }
445 }
446
447 if (!error) {
448 netdev->ref_cnt++;
449 *netdevp = netdev;
450 } else {
451 *netdevp = NULL;
452 }
453 ovs_mutex_unlock(&netdev_mutex);
454
455 return error;
456 }
457
458 /* Returns a reference to 'netdev_' for the caller to own. Returns null if
459 * 'netdev_' is null. */
460 struct netdev *
461 netdev_ref(const struct netdev *netdev_)
462 OVS_EXCLUDED(netdev_mutex)
463 {
464 struct netdev *netdev = CONST_CAST(struct netdev *, netdev_);
465
466 if (netdev) {
467 ovs_mutex_lock(&netdev_mutex);
468 ovs_assert(netdev->ref_cnt > 0);
469 netdev->ref_cnt++;
470 ovs_mutex_unlock(&netdev_mutex);
471 }
472 return netdev;
473 }
474
475 /* Reconfigures the device 'netdev' with 'args'. 'args' may be empty
476 * or NULL if none are needed. */
477 int
478 netdev_set_config(struct netdev *netdev, const struct smap *args, char **errp)
479 OVS_EXCLUDED(netdev_mutex)
480 {
481 if (netdev->netdev_class->set_config) {
482 const struct smap no_args = SMAP_INITIALIZER(&no_args);
483 char *verbose_error = NULL;
484 int error;
485
486 error = netdev->netdev_class->set_config(netdev,
487 args ? args : &no_args,
488 &verbose_error);
489 if (error) {
490 VLOG_WARN_BUF(verbose_error ? NULL : errp,
491 "%s: could not set configuration (%s)",
492 netdev_get_name(netdev), ovs_strerror(error));
493 if (verbose_error) {
494 if (errp) {
495 *errp = verbose_error;
496 } else {
497 free(verbose_error);
498 }
499 }
500 }
501 return error;
502 } else if (args && !smap_is_empty(args)) {
503 VLOG_WARN_BUF(errp, "%s: arguments provided to device that is not configurable",
504 netdev_get_name(netdev));
505 }
506 return 0;
507 }
508
509 /* Returns the current configuration for 'netdev' in 'args'. The caller must
510 * have already initialized 'args' with smap_init(). Returns 0 on success, in
511 * which case 'args' will be filled with 'netdev''s configuration. On failure
512 * returns a positive errno value, in which case 'args' will be empty.
513 *
514 * The caller owns 'args' and its contents and must eventually free them with
515 * smap_destroy(). */
516 int
517 netdev_get_config(const struct netdev *netdev, struct smap *args)
518 OVS_EXCLUDED(netdev_mutex)
519 {
520 int error;
521
522 smap_clear(args);
523 if (netdev->netdev_class->get_config) {
524 error = netdev->netdev_class->get_config(netdev, args);
525 if (error) {
526 smap_clear(args);
527 }
528 } else {
529 error = 0;
530 }
531
532 return error;
533 }
534
535 const struct netdev_tunnel_config *
536 netdev_get_tunnel_config(const struct netdev *netdev)
537 OVS_EXCLUDED(netdev_mutex)
538 {
539 if (netdev->netdev_class->get_tunnel_config) {
540 return netdev->netdev_class->get_tunnel_config(netdev);
541 } else {
542 return NULL;
543 }
544 }
545
546 /* Returns the id of the numa node the 'netdev' is on. If the function
547 * is not implemented, returns NETDEV_NUMA_UNSPEC. */
548 int
549 netdev_get_numa_id(const struct netdev *netdev)
550 {
551 if (netdev->netdev_class->get_numa_id) {
552 return netdev->netdev_class->get_numa_id(netdev);
553 } else {
554 return NETDEV_NUMA_UNSPEC;
555 }
556 }
557
558 static void
559 netdev_unref(struct netdev *dev)
560 OVS_RELEASES(netdev_mutex)
561 {
562 ovs_assert(dev->ref_cnt);
563 if (!--dev->ref_cnt) {
564 const struct netdev_class *class = dev->netdev_class;
565 struct netdev_registered_class *rc;
566
567 dev->netdev_class->destruct(dev);
568
569 if (dev->node) {
570 shash_delete(&netdev_shash, dev->node);
571 }
572 free(dev->name);
573 seq_destroy(dev->reconfigure_seq);
574 dev->netdev_class->dealloc(dev);
575 ovs_mutex_unlock(&netdev_mutex);
576
577 rc = netdev_lookup_class(class->type);
578 ovs_refcount_unref(&rc->refcnt);
579 } else {
580 ovs_mutex_unlock(&netdev_mutex);
581 }
582 }
583
584 /* Closes and destroys 'netdev'. */
585 void
586 netdev_close(struct netdev *netdev)
587 OVS_EXCLUDED(netdev_mutex)
588 {
589 if (netdev) {
590 ovs_mutex_lock(&netdev_mutex);
591 netdev_unref(netdev);
592 }
593 }
594
595 /* Removes 'netdev' from the global shash and unrefs 'netdev'.
596 *
597 * This allows handler and revalidator threads to still retain references
598 * to this netdev while the main thread changes interface configuration.
599 *
600 * This function should only be called by the main thread when closing
601 * netdevs during user configuration changes. Otherwise, netdev_close should be
602 * used to close netdevs. */
603 void
604 netdev_remove(struct netdev *netdev)
605 {
606 if (netdev) {
607 ovs_mutex_lock(&netdev_mutex);
608 if (netdev->node) {
609 shash_delete(&netdev_shash, netdev->node);
610 netdev->node = NULL;
611 netdev_change_seq_changed(netdev);
612 }
613 netdev_unref(netdev);
614 }
615 }
616
617 /* Parses 'netdev_name_', which is of the form [type@]name into its component
618 * pieces. 'name' and 'type' must be freed by the caller. */
619 void
620 netdev_parse_name(const char *netdev_name_, char **name, char **type)
621 {
622 char *netdev_name = xstrdup(netdev_name_);
623 char *separator;
624
625 separator = strchr(netdev_name, '@');
626 if (separator) {
627 *separator = '\0';
628 *type = netdev_name;
629 *name = xstrdup(separator + 1);
630 } else {
631 *name = netdev_name;
632 *type = xstrdup("system");
633 }
634 }
635
636 /* Attempts to open a netdev_rxq handle for obtaining packets received on
637 * 'netdev'. On success, returns 0 and stores a nonnull 'netdev_rxq *' into
638 * '*rxp'. On failure, returns a positive errno value and stores NULL into
639 * '*rxp'.
640 *
641 * Some kinds of network devices might not support receiving packets. This
642 * function returns EOPNOTSUPP in that case.*/
643 int
644 netdev_rxq_open(struct netdev *netdev, struct netdev_rxq **rxp, int id)
645 OVS_EXCLUDED(netdev_mutex)
646 {
647 int error;
648
649 if (netdev->netdev_class->rxq_alloc && id < netdev->n_rxq) {
650 struct netdev_rxq *rx = netdev->netdev_class->rxq_alloc();
651 if (rx) {
652 rx->netdev = netdev;
653 rx->queue_id = id;
654 error = netdev->netdev_class->rxq_construct(rx);
655 if (!error) {
656 netdev_ref(netdev);
657 *rxp = rx;
658 return 0;
659 }
660 netdev->netdev_class->rxq_dealloc(rx);
661 } else {
662 error = ENOMEM;
663 }
664 } else {
665 error = EOPNOTSUPP;
666 }
667
668 *rxp = NULL;
669 return error;
670 }
671
672 /* Closes 'rx'. */
673 void
674 netdev_rxq_close(struct netdev_rxq *rx)
675 OVS_EXCLUDED(netdev_mutex)
676 {
677 if (rx) {
678 struct netdev *netdev = rx->netdev;
679 netdev->netdev_class->rxq_destruct(rx);
680 netdev->netdev_class->rxq_dealloc(rx);
681 netdev_close(netdev);
682 }
683 }
684
685 /* Attempts to receive a batch of packets from 'rx'. 'batch' should point to
686 * the beginning of an array of NETDEV_MAX_BURST pointers to dp_packet. If
687 * successful, this function stores pointers to up to NETDEV_MAX_BURST
688 * dp_packets into the array, transferring ownership of the packets to the
689 * caller, stores the number of received packets in 'batch->count', and returns
690 * 0.
691 *
692 * The implementation does not necessarily initialize any non-data members of
693 * 'batch'. That is, the caller must initialize layer pointers and metadata
694 * itself, if desired, e.g. with pkt_metadata_init() and miniflow_extract().
695 *
696 * Returns EAGAIN immediately if no packet is ready to be received or another
697 * positive errno value if an error was encountered. */
698 int
699 netdev_rxq_recv(struct netdev_rxq *rx, struct dp_packet_batch *batch,
700 int *qfill)
701 {
702 int retval;
703
704 retval = rx->netdev->netdev_class->rxq_recv(rx, batch, qfill);
705 if (!retval) {
706 COVERAGE_INC(netdev_received);
707 } else {
708 batch->count = 0;
709 }
710 return retval;
711 }
712
713 /* Arranges for poll_block() to wake up when a packet is ready to be received
714 * on 'rx'. */
715 void
716 netdev_rxq_wait(struct netdev_rxq *rx)
717 {
718 rx->netdev->netdev_class->rxq_wait(rx);
719 }
720
721 /* Discards any packets ready to be received on 'rx'. */
722 int
723 netdev_rxq_drain(struct netdev_rxq *rx)
724 {
725 return (rx->netdev->netdev_class->rxq_drain
726 ? rx->netdev->netdev_class->rxq_drain(rx)
727 : 0);
728 }
729
730 /* Configures the number of tx queues of 'netdev'. Returns 0 if successful,
731 * otherwise a positive errno value.
732 *
733 * 'n_txq' specifies the exact number of transmission queues to create.
734 *
735 * The change might not effective immediately. The caller must check if a
736 * reconfiguration is required with netdev_is_reconf_required() and eventually
737 * call netdev_reconfigure() before using the new queues.
738 *
739 * On error, the tx queue configuration is unchanged */
740 int
741 netdev_set_tx_multiq(struct netdev *netdev, unsigned int n_txq)
742 {
743 int error;
744
745 error = (netdev->netdev_class->set_tx_multiq
746 ? netdev->netdev_class->set_tx_multiq(netdev, MAX(n_txq, 1))
747 : EOPNOTSUPP);
748
749 if (error && error != EOPNOTSUPP) {
750 VLOG_DBG_RL(&rl, "failed to set tx queue for network device %s:"
751 "%s", netdev_get_name(netdev), ovs_strerror(error));
752 }
753
754 return error;
755 }
756
757 enum netdev_pt_mode
758 netdev_get_pt_mode(const struct netdev *netdev)
759 {
760 return (netdev->netdev_class->get_pt_mode
761 ? netdev->netdev_class->get_pt_mode(netdev)
762 : NETDEV_PT_LEGACY_L2);
763 }
764
765 /* Sends 'batch' on 'netdev'. Returns 0 if successful (for every packet),
766 * otherwise a positive errno value. Returns EAGAIN without blocking if
767 * at least one the packets cannot be queued immediately. Returns EMSGSIZE
768 * if a partial packet was transmitted or if a packet is too big or too small
769 * to transmit on the device.
770 *
771 * The caller must make sure that 'netdev' supports sending by making sure that
772 * 'netdev_n_txq(netdev)' returns >= 1.
773 *
774 * If the function returns a non-zero value, some of the packets might have
775 * been sent anyway.
776 *
777 * The caller transfers ownership of all the packets to the network device,
778 * regardless of success.
779 *
780 * If 'concurrent_txq' is true, the caller may perform concurrent calls
781 * to netdev_send() with the same 'qid'. The netdev provider is responsible
782 * for making sure that these concurrent calls do not create a race condition
783 * by using locking or other synchronization if required.
784 *
785 * The network device is expected to maintain one or more packet
786 * transmission queues, so that the caller does not ordinarily have to
787 * do additional queuing of packets. 'qid' specifies the queue to use
788 * and can be ignored if the implementation does not support multiple
789 * queues. */
790 int
791 netdev_send(struct netdev *netdev, int qid, struct dp_packet_batch *batch,
792 bool concurrent_txq)
793 {
794 int error = netdev->netdev_class->send(netdev, qid, batch,
795 concurrent_txq);
796 if (!error) {
797 COVERAGE_INC(netdev_sent);
798 }
799 return error;
800 }
801
802 /* Pop tunnel header, build tunnel metadata and resize 'batch->packets'
803 * for further processing.
804 *
805 * The caller must make sure that 'netdev' support this operation by checking
806 * that netdev_has_tunnel_push_pop() returns true. */
807 void
808 netdev_pop_header(struct netdev *netdev, struct dp_packet_batch *batch)
809 {
810 struct dp_packet *packet;
811 size_t i, size = dp_packet_batch_size(batch);
812
813 DP_PACKET_BATCH_REFILL_FOR_EACH (i, size, packet, batch) {
814 packet = netdev->netdev_class->pop_header(packet);
815 if (packet) {
816 /* Reset the checksum offload flags if present, to avoid wrong
817 * interpretation in the further packet processing when
818 * recirculated.*/
819 reset_dp_packet_checksum_ol_flags(packet);
820 dp_packet_batch_refill(batch, packet, i);
821 }
822 }
823 }
824
825 void
826 netdev_init_tnl_build_header_params(struct netdev_tnl_build_header_params *params,
827 const struct flow *tnl_flow,
828 const struct in6_addr *src,
829 struct eth_addr dmac,
830 struct eth_addr smac)
831 {
832 params->flow = tnl_flow;
833 params->dmac = dmac;
834 params->smac = smac;
835 params->s_ip = src;
836 params->is_ipv6 = !IN6_IS_ADDR_V4MAPPED(src);
837 }
838
839 int netdev_build_header(const struct netdev *netdev,
840 struct ovs_action_push_tnl *data,
841 const struct netdev_tnl_build_header_params *params)
842 {
843 if (netdev->netdev_class->build_header) {
844 return netdev->netdev_class->build_header(netdev, data, params);
845 }
846 return EOPNOTSUPP;
847 }
848
849 /* Push tunnel header (reading from tunnel metadata) and resize
850 * 'batch->packets' for further processing.
851 *
852 * The caller must make sure that 'netdev' support this operation by checking
853 * that netdev_has_tunnel_push_pop() returns true. */
854 int
855 netdev_push_header(const struct netdev *netdev,
856 struct dp_packet_batch *batch,
857 const struct ovs_action_push_tnl *data)
858 {
859 struct dp_packet *packet;
860 DP_PACKET_BATCH_FOR_EACH (i, packet, batch) {
861 netdev->netdev_class->push_header(netdev, packet, data);
862 pkt_metadata_init(&packet->md, data->out_port);
863 }
864
865 return 0;
866 }
867
868 /* Registers with the poll loop to wake up from the next call to poll_block()
869 * when the packet transmission queue has sufficient room to transmit a packet
870 * with netdev_send().
871 *
872 * The network device is expected to maintain one or more packet
873 * transmission queues, so that the caller does not ordinarily have to
874 * do additional queuing of packets. 'qid' specifies the queue to use
875 * and can be ignored if the implementation does not support multiple
876 * queues. */
877 void
878 netdev_send_wait(struct netdev *netdev, int qid)
879 {
880 if (netdev->netdev_class->send_wait) {
881 netdev->netdev_class->send_wait(netdev, qid);
882 }
883 }
884
885 /* Attempts to set 'netdev''s MAC address to 'mac'. Returns 0 if successful,
886 * otherwise a positive errno value. */
887 int
888 netdev_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
889 {
890 return netdev->netdev_class->set_etheraddr(netdev, mac);
891 }
892
893 /* Retrieves 'netdev''s MAC address. If successful, returns 0 and copies the
894 * the MAC address into 'mac'. On failure, returns a positive errno value and
895 * clears 'mac' to all-zeros. */
896 int
897 netdev_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
898 {
899 int error;
900
901 error = netdev->netdev_class->get_etheraddr(netdev, mac);
902 if (error) {
903 memset(mac, 0, sizeof *mac);
904 }
905 return error;
906 }
907
908 /* Returns the name of the network device that 'netdev' represents,
909 * e.g. "eth0". The caller must not modify or free the returned string. */
910 const char *
911 netdev_get_name(const struct netdev *netdev)
912 {
913 return netdev->name;
914 }
915
916 /* Retrieves the MTU of 'netdev'. The MTU is the maximum size of transmitted
917 * (and received) packets, in bytes, not including the hardware header; thus,
918 * this is typically 1500 bytes for Ethernet devices.
919 *
920 * If successful, returns 0 and stores the MTU size in '*mtup'. Returns
921 * EOPNOTSUPP if 'netdev' does not have an MTU (as e.g. some tunnels do not).
922 * On other failure, returns a positive errno value. On failure, sets '*mtup'
923 * to 0. */
924 int
925 netdev_get_mtu(const struct netdev *netdev, int *mtup)
926 {
927 const struct netdev_class *class = netdev->netdev_class;
928 int error;
929
930 error = class->get_mtu ? class->get_mtu(netdev, mtup) : EOPNOTSUPP;
931 if (error) {
932 *mtup = 0;
933 if (error != EOPNOTSUPP) {
934 VLOG_DBG_RL(&rl, "failed to retrieve MTU for network device %s: "
935 "%s", netdev_get_name(netdev), ovs_strerror(error));
936 }
937 }
938 return error;
939 }
940
941 /* Sets the MTU of 'netdev'. The MTU is the maximum size of transmitted
942 * (and received) packets, in bytes.
943 *
944 * If successful, returns 0. Returns EOPNOTSUPP if 'netdev' does not have an
945 * MTU (as e.g. some tunnels do not). On other failure, returns a positive
946 * errno value. */
947 int
948 netdev_set_mtu(struct netdev *netdev, int mtu)
949 {
950 const struct netdev_class *class = netdev->netdev_class;
951 int error;
952
953 error = class->set_mtu ? class->set_mtu(netdev, mtu) : EOPNOTSUPP;
954 if (error && error != EOPNOTSUPP) {
955 VLOG_WARN_RL(&rl, "failed to set MTU for network device %s: %s",
956 netdev_get_name(netdev), ovs_strerror(error));
957 }
958
959 return error;
960 }
961
962 /* If 'user_config' is true, the user wants to control 'netdev''s MTU and we
963 * should not override it. If 'user_config' is false, we may adjust
964 * 'netdev''s MTU (e.g., if 'netdev' is internal). */
965 void
966 netdev_mtu_user_config(struct netdev *netdev, bool user_config)
967 {
968 if (netdev->mtu_user_config != user_config) {
969 netdev_change_seq_changed(netdev);
970 netdev->mtu_user_config = user_config;
971 }
972 }
973
974 /* Returns 'true' if the user explicitly specified an MTU value for 'netdev'.
975 * Otherwise, returns 'false', in which case we are allowed to adjust the
976 * device MTU. */
977 bool
978 netdev_mtu_is_user_config(struct netdev *netdev)
979 {
980 return netdev->mtu_user_config;
981 }
982
983 /* Returns the ifindex of 'netdev', if successful, as a positive number. On
984 * failure, returns a negative errno value.
985 *
986 * The desired semantics of the ifindex value are a combination of those
987 * specified by POSIX for if_nametoindex() and by SNMP for ifIndex. An ifindex
988 * value should be unique within a host and remain stable at least until
989 * reboot. SNMP says an ifindex "ranges between 1 and the value of ifNumber"
990 * but many systems do not follow this rule anyhow.
991 *
992 * Some network devices may not implement support for this function. In such
993 * cases this function will always return -EOPNOTSUPP.
994 */
995 int
996 netdev_get_ifindex(const struct netdev *netdev)
997 {
998 int (*get_ifindex)(const struct netdev *);
999
1000 get_ifindex = netdev->netdev_class->get_ifindex;
1001
1002 return get_ifindex ? get_ifindex(netdev) : -EOPNOTSUPP;
1003 }
1004
1005 /* Stores the features supported by 'netdev' into each of '*current',
1006 * '*advertised', '*supported', and '*peer' that are non-null. Each value is a
1007 * bitmap of "enum ofp_port_features" bits, in host byte order. Returns 0 if
1008 * successful, otherwise a positive errno value. On failure, all of the
1009 * passed-in values are set to 0.
1010 *
1011 * Some network devices may not implement support for this function. In such
1012 * cases this function will always return EOPNOTSUPP. */
1013 int
1014 netdev_get_features(const struct netdev *netdev,
1015 enum netdev_features *current,
1016 enum netdev_features *advertised,
1017 enum netdev_features *supported,
1018 enum netdev_features *peer)
1019 {
1020 int (*get_features)(const struct netdev *netdev,
1021 enum netdev_features *current,
1022 enum netdev_features *advertised,
1023 enum netdev_features *supported,
1024 enum netdev_features *peer);
1025 enum netdev_features dummy[4];
1026 int error;
1027
1028 if (!current) {
1029 current = &dummy[0];
1030 }
1031 if (!advertised) {
1032 advertised = &dummy[1];
1033 }
1034 if (!supported) {
1035 supported = &dummy[2];
1036 }
1037 if (!peer) {
1038 peer = &dummy[3];
1039 }
1040
1041 get_features = netdev->netdev_class->get_features;
1042 error = get_features
1043 ? get_features(netdev, current, advertised, supported,
1044 peer)
1045 : EOPNOTSUPP;
1046 if (error) {
1047 *current = *advertised = *supported = *peer = 0;
1048 }
1049 return error;
1050 }
1051
1052 /* Returns the maximum speed of a network connection that has the NETDEV_F_*
1053 * bits in 'features', in bits per second. If no bits that indicate a speed
1054 * are set in 'features', returns 'default_bps'. */
1055 uint64_t
1056 netdev_features_to_bps(enum netdev_features features,
1057 uint64_t default_bps)
1058 {
1059 enum {
1060 F_1000000MB = NETDEV_F_1TB_FD,
1061 F_100000MB = NETDEV_F_100GB_FD,
1062 F_40000MB = NETDEV_F_40GB_FD,
1063 F_10000MB = NETDEV_F_10GB_FD,
1064 F_1000MB = NETDEV_F_1GB_HD | NETDEV_F_1GB_FD,
1065 F_100MB = NETDEV_F_100MB_HD | NETDEV_F_100MB_FD,
1066 F_10MB = NETDEV_F_10MB_HD | NETDEV_F_10MB_FD
1067 };
1068
1069 return ( features & F_1000000MB ? UINT64_C(1000000000000)
1070 : features & F_100000MB ? UINT64_C(100000000000)
1071 : features & F_40000MB ? UINT64_C(40000000000)
1072 : features & F_10000MB ? UINT64_C(10000000000)
1073 : features & F_1000MB ? UINT64_C(1000000000)
1074 : features & F_100MB ? UINT64_C(100000000)
1075 : features & F_10MB ? UINT64_C(10000000)
1076 : default_bps);
1077 }
1078
1079 /* Returns true if any of the NETDEV_F_* bits that indicate a full-duplex link
1080 * are set in 'features', otherwise false. */
1081 bool
1082 netdev_features_is_full_duplex(enum netdev_features features)
1083 {
1084 return (features & (NETDEV_F_10MB_FD | NETDEV_F_100MB_FD | NETDEV_F_1GB_FD
1085 | NETDEV_F_10GB_FD | NETDEV_F_40GB_FD
1086 | NETDEV_F_100GB_FD | NETDEV_F_1TB_FD)) != 0;
1087 }
1088
1089 /* Set the features advertised by 'netdev' to 'advertise'. Returns 0 if
1090 * successful, otherwise a positive errno value. */
1091 int
1092 netdev_set_advertisements(struct netdev *netdev,
1093 enum netdev_features advertise)
1094 {
1095 return (netdev->netdev_class->set_advertisements
1096 ? netdev->netdev_class->set_advertisements(
1097 netdev, advertise)
1098 : EOPNOTSUPP);
1099 }
1100
1101 static const char *
1102 netdev_feature_to_name(uint32_t bit)
1103 {
1104 enum netdev_features f = bit;
1105
1106 switch (f) {
1107 case NETDEV_F_10MB_HD: return "10MB-HD";
1108 case NETDEV_F_10MB_FD: return "10MB-FD";
1109 case NETDEV_F_100MB_HD: return "100MB-HD";
1110 case NETDEV_F_100MB_FD: return "100MB-FD";
1111 case NETDEV_F_1GB_HD: return "1GB-HD";
1112 case NETDEV_F_1GB_FD: return "1GB-FD";
1113 case NETDEV_F_10GB_FD: return "10GB-FD";
1114 case NETDEV_F_40GB_FD: return "40GB-FD";
1115 case NETDEV_F_100GB_FD: return "100GB-FD";
1116 case NETDEV_F_1TB_FD: return "1TB-FD";
1117 case NETDEV_F_OTHER: return "OTHER";
1118 case NETDEV_F_COPPER: return "COPPER";
1119 case NETDEV_F_FIBER: return "FIBER";
1120 case NETDEV_F_AUTONEG: return "AUTO_NEG";
1121 case NETDEV_F_PAUSE: return "AUTO_PAUSE";
1122 case NETDEV_F_PAUSE_ASYM: return "AUTO_PAUSE_ASYM";
1123 }
1124
1125 return NULL;
1126 }
1127
1128 void
1129 netdev_features_format(struct ds *s, enum netdev_features features)
1130 {
1131 ofp_print_bit_names(s, features, netdev_feature_to_name, ' ');
1132 ds_put_char(s, '\n');
1133 }
1134
1135 /* Assigns 'addr' as 'netdev''s IPv4 address and 'mask' as its netmask. If
1136 * 'addr' is INADDR_ANY, 'netdev''s IPv4 address is cleared. Returns a
1137 * positive errno value. */
1138 int
1139 netdev_set_in4(struct netdev *netdev, struct in_addr addr, struct in_addr mask)
1140 {
1141 return (netdev->netdev_class->set_in4
1142 ? netdev->netdev_class->set_in4(netdev, addr, mask)
1143 : EOPNOTSUPP);
1144 }
1145
1146 static int
1147 netdev_get_addresses_by_name(const char *device_name,
1148 struct in6_addr **addrsp, int *n_addrsp)
1149 {
1150 struct netdev *netdev;
1151 int error = netdev_open(device_name, NULL, &netdev);
1152 if (error) {
1153 *addrsp = NULL;
1154 *n_addrsp = 0;
1155 return error;
1156 }
1157
1158 struct in6_addr *masks;
1159 error = netdev_get_addr_list(netdev, addrsp, &masks, n_addrsp);
1160 netdev_close(netdev);
1161 free(masks);
1162 return error;
1163 }
1164
1165 /* Obtains an IPv4 address from 'device_name' and save the address in '*in4'.
1166 * Returns 0 if successful, otherwise a positive errno value. */
1167 int
1168 netdev_get_in4_by_name(const char *device_name, struct in_addr *in4)
1169 {
1170 struct in6_addr *addrs;
1171 int n;
1172 int error = netdev_get_addresses_by_name(device_name, &addrs, &n);
1173
1174 in4->s_addr = 0;
1175 if (!error) {
1176 error = ENOENT;
1177 for (int i = 0; i < n; i++) {
1178 if (IN6_IS_ADDR_V4MAPPED(&addrs[i])) {
1179 in4->s_addr = in6_addr_get_mapped_ipv4(&addrs[i]);
1180 error = 0;
1181 break;
1182 }
1183 }
1184 }
1185 free(addrs);
1186
1187 return error;
1188 }
1189
1190 /* Obtains an IPv4 or IPv6 address from 'device_name' and save the address in
1191 * '*in6', representing IPv4 addresses as v6-mapped. Returns 0 if successful,
1192 * otherwise a positive errno value. */
1193 int
1194 netdev_get_ip_by_name(const char *device_name, struct in6_addr *in6)
1195 {
1196 struct in6_addr *addrs;
1197 int n;
1198 int error = netdev_get_addresses_by_name(device_name, &addrs, &n);
1199
1200 *in6 = in6addr_any;
1201 if (!error) {
1202 error = ENOENT;
1203 for (int i = 0; i < n; i++) {
1204 if (!in6_is_lla(&addrs[i])) {
1205 *in6 = addrs[i];
1206 error = 0;
1207 break;
1208 }
1209 }
1210 }
1211 free(addrs);
1212
1213 return error;
1214 }
1215
1216 /* Adds 'router' as a default IP gateway for the TCP/IP stack that corresponds
1217 * to 'netdev'. */
1218 int
1219 netdev_add_router(struct netdev *netdev, struct in_addr router)
1220 {
1221 COVERAGE_INC(netdev_add_router);
1222 return (netdev->netdev_class->add_router
1223 ? netdev->netdev_class->add_router(netdev, router)
1224 : EOPNOTSUPP);
1225 }
1226
1227 /* Looks up the next hop for 'host' for the TCP/IP stack that corresponds to
1228 * 'netdev'. If a route cannot not be determined, sets '*next_hop' to 0,
1229 * '*netdev_name' to null, and returns a positive errno value. Otherwise, if a
1230 * next hop is found, stores the next hop gateway's address (0 if 'host' is on
1231 * a directly connected network) in '*next_hop' and a copy of the name of the
1232 * device to reach 'host' in '*netdev_name', and returns 0. The caller is
1233 * responsible for freeing '*netdev_name' (by calling free()). */
1234 int
1235 netdev_get_next_hop(const struct netdev *netdev,
1236 const struct in_addr *host, struct in_addr *next_hop,
1237 char **netdev_name)
1238 {
1239 int error = (netdev->netdev_class->get_next_hop
1240 ? netdev->netdev_class->get_next_hop(
1241 host, next_hop, netdev_name)
1242 : EOPNOTSUPP);
1243 if (error) {
1244 next_hop->s_addr = 0;
1245 *netdev_name = NULL;
1246 }
1247 return error;
1248 }
1249
1250 /* Populates 'smap' with status information.
1251 *
1252 * Populates 'smap' with 'netdev' specific status information. This
1253 * information may be used to populate the status column of the Interface table
1254 * as defined in ovs-vswitchd.conf.db(5). */
1255 int
1256 netdev_get_status(const struct netdev *netdev, struct smap *smap)
1257 {
1258 return (netdev->netdev_class->get_status
1259 ? netdev->netdev_class->get_status(netdev, smap)
1260 : EOPNOTSUPP);
1261 }
1262
1263 /* Returns all assigned IP address to 'netdev' and returns 0.
1264 * API allocates array of address and masks and set it to
1265 * '*addr' and '*mask'.
1266 * Otherwise, returns a positive errno value and sets '*addr', '*mask
1267 * and '*n_addr' to NULL.
1268 *
1269 * The following error values have well-defined meanings:
1270 *
1271 * - EADDRNOTAVAIL: 'netdev' has no assigned IPv6 address.
1272 *
1273 * - EOPNOTSUPP: No IPv6 network stack attached to 'netdev'.
1274 *
1275 * 'addr' may be null, in which case the address itself is not reported. */
1276 int
1277 netdev_get_addr_list(const struct netdev *netdev, struct in6_addr **addr,
1278 struct in6_addr **mask, int *n_addr)
1279 {
1280 int error;
1281
1282 error = (netdev->netdev_class->get_addr_list
1283 ? netdev->netdev_class->get_addr_list(netdev, addr, mask, n_addr): EOPNOTSUPP);
1284 if (error && addr) {
1285 *addr = NULL;
1286 *mask = NULL;
1287 *n_addr = 0;
1288 }
1289
1290 return error;
1291 }
1292
1293 /* On 'netdev', turns off the flags in 'off' and then turns on the flags in
1294 * 'on'. Returns 0 if successful, otherwise a positive errno value. */
1295 static int
1296 do_update_flags(struct netdev *netdev, enum netdev_flags off,
1297 enum netdev_flags on, enum netdev_flags *old_flagsp,
1298 struct netdev_saved_flags **sfp)
1299 OVS_EXCLUDED(netdev_mutex)
1300 {
1301 struct netdev_saved_flags *sf = NULL;
1302 enum netdev_flags old_flags;
1303 int error;
1304
1305 error = netdev->netdev_class->update_flags(netdev, off & ~on, on,
1306 &old_flags);
1307 if (error) {
1308 VLOG_WARN_RL(&rl, "failed to %s flags for network device %s: %s",
1309 off || on ? "set" : "get", netdev_get_name(netdev),
1310 ovs_strerror(error));
1311 old_flags = 0;
1312 } else if ((off || on) && sfp) {
1313 enum netdev_flags new_flags = (old_flags & ~off) | on;
1314 enum netdev_flags changed_flags = old_flags ^ new_flags;
1315 if (changed_flags) {
1316 ovs_mutex_lock(&netdev_mutex);
1317 *sfp = sf = xmalloc(sizeof *sf);
1318 sf->netdev = netdev;
1319 ovs_list_push_front(&netdev->saved_flags_list, &sf->node);
1320 sf->saved_flags = changed_flags;
1321 sf->saved_values = changed_flags & new_flags;
1322
1323 netdev->ref_cnt++;
1324 ovs_mutex_unlock(&netdev_mutex);
1325 }
1326 }
1327
1328 if (old_flagsp) {
1329 *old_flagsp = old_flags;
1330 }
1331 if (sfp) {
1332 *sfp = sf;
1333 }
1334
1335 return error;
1336 }
1337
1338 /* Obtains the current flags for 'netdev' and stores them into '*flagsp'.
1339 * Returns 0 if successful, otherwise a positive errno value. On failure,
1340 * stores 0 into '*flagsp'. */
1341 int
1342 netdev_get_flags(const struct netdev *netdev_, enum netdev_flags *flagsp)
1343 {
1344 struct netdev *netdev = CONST_CAST(struct netdev *, netdev_);
1345 return do_update_flags(netdev, 0, 0, flagsp, NULL);
1346 }
1347
1348 /* Sets the flags for 'netdev' to 'flags'.
1349 * Returns 0 if successful, otherwise a positive errno value. */
1350 int
1351 netdev_set_flags(struct netdev *netdev, enum netdev_flags flags,
1352 struct netdev_saved_flags **sfp)
1353 {
1354 return do_update_flags(netdev, -1, flags, NULL, sfp);
1355 }
1356
1357 /* Turns on the specified 'flags' on 'netdev':
1358 *
1359 * - On success, returns 0. If 'sfp' is nonnull, sets '*sfp' to a newly
1360 * allocated 'struct netdev_saved_flags *' that may be passed to
1361 * netdev_restore_flags() to restore the original values of 'flags' on
1362 * 'netdev' (this will happen automatically at program termination if
1363 * netdev_restore_flags() is never called) , or to NULL if no flags were
1364 * actually changed.
1365 *
1366 * - On failure, returns a positive errno value. If 'sfp' is nonnull, sets
1367 * '*sfp' to NULL. */
1368 int
1369 netdev_turn_flags_on(struct netdev *netdev, enum netdev_flags flags,
1370 struct netdev_saved_flags **sfp)
1371 {
1372 return do_update_flags(netdev, 0, flags, NULL, sfp);
1373 }
1374
1375 /* Turns off the specified 'flags' on 'netdev'. See netdev_turn_flags_on() for
1376 * details of the interface. */
1377 int
1378 netdev_turn_flags_off(struct netdev *netdev, enum netdev_flags flags,
1379 struct netdev_saved_flags **sfp)
1380 {
1381 return do_update_flags(netdev, flags, 0, NULL, sfp);
1382 }
1383
1384 /* Restores the flags that were saved in 'sf', and destroys 'sf'.
1385 * Does nothing if 'sf' is NULL. */
1386 void
1387 netdev_restore_flags(struct netdev_saved_flags *sf)
1388 OVS_EXCLUDED(netdev_mutex)
1389 {
1390 if (sf) {
1391 struct netdev *netdev = sf->netdev;
1392 enum netdev_flags old_flags;
1393
1394 netdev->netdev_class->update_flags(netdev,
1395 sf->saved_flags & sf->saved_values,
1396 sf->saved_flags & ~sf->saved_values,
1397 &old_flags);
1398
1399 ovs_mutex_lock(&netdev_mutex);
1400 ovs_list_remove(&sf->node);
1401 free(sf);
1402 netdev_unref(netdev);
1403 }
1404 }
1405
1406 /* Looks up the ARP table entry for 'ip' on 'netdev'. If one exists and can be
1407 * successfully retrieved, it stores the corresponding MAC address in 'mac' and
1408 * returns 0. Otherwise, it returns a positive errno value; in particular,
1409 * ENXIO indicates that there is no ARP table entry for 'ip' on 'netdev'. */
1410 int
1411 netdev_arp_lookup(const struct netdev *netdev,
1412 ovs_be32 ip, struct eth_addr *mac)
1413 {
1414 int error = (netdev->netdev_class->arp_lookup
1415 ? netdev->netdev_class->arp_lookup(netdev, ip, mac)
1416 : EOPNOTSUPP);
1417 if (error) {
1418 *mac = eth_addr_zero;
1419 }
1420 return error;
1421 }
1422
1423 /* Returns true if carrier is active (link light is on) on 'netdev'. */
1424 bool
1425 netdev_get_carrier(const struct netdev *netdev)
1426 {
1427 int error;
1428 enum netdev_flags flags;
1429 bool carrier;
1430
1431 netdev_get_flags(netdev, &flags);
1432 if (!(flags & NETDEV_UP)) {
1433 return false;
1434 }
1435
1436 if (!netdev->netdev_class->get_carrier) {
1437 return true;
1438 }
1439
1440 error = netdev->netdev_class->get_carrier(netdev, &carrier);
1441 if (error) {
1442 VLOG_DBG("%s: failed to get network device carrier status, assuming "
1443 "down: %s", netdev_get_name(netdev), ovs_strerror(error));
1444 carrier = false;
1445 }
1446
1447 return carrier;
1448 }
1449
1450 /* Returns the number of times 'netdev''s carrier has changed. */
1451 long long int
1452 netdev_get_carrier_resets(const struct netdev *netdev)
1453 {
1454 return (netdev->netdev_class->get_carrier_resets
1455 ? netdev->netdev_class->get_carrier_resets(netdev)
1456 : 0);
1457 }
1458
1459 /* Attempts to force netdev_get_carrier() to poll 'netdev''s MII registers for
1460 * link status instead of checking 'netdev''s carrier. 'netdev''s MII
1461 * registers will be polled once ever 'interval' milliseconds. If 'netdev'
1462 * does not support MII, another method may be used as a fallback. If
1463 * 'interval' is less than or equal to zero, reverts netdev_get_carrier() to
1464 * its normal behavior.
1465 *
1466 * Returns 0 if successful, otherwise a positive errno value. */
1467 int
1468 netdev_set_miimon_interval(struct netdev *netdev, long long int interval)
1469 {
1470 return (netdev->netdev_class->set_miimon_interval
1471 ? netdev->netdev_class->set_miimon_interval(netdev, interval)
1472 : EOPNOTSUPP);
1473 }
1474
1475 /* Retrieves current device stats for 'netdev'. */
1476 int
1477 netdev_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1478 {
1479 int error;
1480
1481 /* Statistics are initialized before passing it to particular device
1482 * implementation so all values are filtered out by default. */
1483 memset(stats, 0xFF, sizeof *stats);
1484
1485 COVERAGE_INC(netdev_get_stats);
1486 error = (netdev->netdev_class->get_stats
1487 ? netdev->netdev_class->get_stats(netdev, stats)
1488 : EOPNOTSUPP);
1489 if (error) {
1490 /* In case of error all statistics are filtered out */
1491 memset(stats, 0xff, sizeof *stats);
1492 }
1493 return error;
1494 }
1495
1496 /* Retrieves current device custom stats for 'netdev'. */
1497 int
1498 netdev_get_custom_stats(const struct netdev *netdev,
1499 struct netdev_custom_stats *custom_stats)
1500 {
1501 int error;
1502 memset(custom_stats, 0, sizeof *custom_stats);
1503 error = (netdev->netdev_class->get_custom_stats
1504 ? netdev->netdev_class->get_custom_stats(netdev, custom_stats)
1505 : EOPNOTSUPP);
1506
1507 return error;
1508 }
1509
1510
1511 /* Attempts to set input rate limiting (policing) policy, such that up to
1512 * 'kbits_rate' kbps of traffic is accepted, with a maximum accumulative burst
1513 * size of 'kbits' kb. */
1514 int
1515 netdev_set_policing(struct netdev *netdev, uint32_t kbits_rate,
1516 uint32_t kbits_burst)
1517 {
1518 return (netdev->netdev_class->set_policing
1519 ? netdev->netdev_class->set_policing(netdev,
1520 kbits_rate, kbits_burst)
1521 : EOPNOTSUPP);
1522 }
1523
1524 /* Adds to 'types' all of the forms of QoS supported by 'netdev', or leaves it
1525 * empty if 'netdev' does not support QoS. Any names added to 'types' should
1526 * be documented as valid for the "type" column in the "QoS" table in
1527 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1528 *
1529 * Every network device supports disabling QoS with a type of "", but this type
1530 * will not be added to 'types'.
1531 *
1532 * The caller must initialize 'types' (e.g. with sset_init()) before calling
1533 * this function. The caller is responsible for destroying 'types' (e.g. with
1534 * sset_destroy()) when it is no longer needed.
1535 *
1536 * Returns 0 if successful, otherwise a positive errno value. */
1537 int
1538 netdev_get_qos_types(const struct netdev *netdev, struct sset *types)
1539 {
1540 const struct netdev_class *class = netdev->netdev_class;
1541 return (class->get_qos_types
1542 ? class->get_qos_types(netdev, types)
1543 : 0);
1544 }
1545
1546 /* Queries 'netdev' for its capabilities regarding the specified 'type' of QoS,
1547 * which should be "" or one of the types returned by netdev_get_qos_types()
1548 * for 'netdev'. Returns 0 if successful, otherwise a positive errno value.
1549 * On success, initializes 'caps' with the QoS capabilities; on failure, clears
1550 * 'caps' to all zeros. */
1551 int
1552 netdev_get_qos_capabilities(const struct netdev *netdev, const char *type,
1553 struct netdev_qos_capabilities *caps)
1554 {
1555 const struct netdev_class *class = netdev->netdev_class;
1556
1557 if (*type) {
1558 int retval = (class->get_qos_capabilities
1559 ? class->get_qos_capabilities(netdev, type, caps)
1560 : EOPNOTSUPP);
1561 if (retval) {
1562 memset(caps, 0, sizeof *caps);
1563 }
1564 return retval;
1565 } else {
1566 /* Every netdev supports turning off QoS. */
1567 memset(caps, 0, sizeof *caps);
1568 return 0;
1569 }
1570 }
1571
1572 /* Obtains the number of queues supported by 'netdev' for the specified 'type'
1573 * of QoS. Returns 0 if successful, otherwise a positive errno value. Stores
1574 * the number of queues (zero on failure) in '*n_queuesp'.
1575 *
1576 * This is just a simple wrapper around netdev_get_qos_capabilities(). */
1577 int
1578 netdev_get_n_queues(const struct netdev *netdev,
1579 const char *type, unsigned int *n_queuesp)
1580 {
1581 struct netdev_qos_capabilities caps;
1582 int retval;
1583
1584 retval = netdev_get_qos_capabilities(netdev, type, &caps);
1585 *n_queuesp = caps.n_queues;
1586 return retval;
1587 }
1588
1589 /* Queries 'netdev' about its currently configured form of QoS. If successful,
1590 * stores the name of the current form of QoS into '*typep', stores any details
1591 * of configuration as string key-value pairs in 'details', and returns 0. On
1592 * failure, sets '*typep' to NULL and returns a positive errno value.
1593 *
1594 * A '*typep' of "" indicates that QoS is currently disabled on 'netdev'.
1595 *
1596 * The caller must initialize 'details' as an empty smap (e.g. with
1597 * smap_init()) before calling this function. The caller must free 'details'
1598 * when it is no longer needed (e.g. with smap_destroy()).
1599 *
1600 * The caller must not modify or free '*typep'.
1601 *
1602 * '*typep' will be one of the types returned by netdev_get_qos_types() for
1603 * 'netdev'. The contents of 'details' should be documented as valid for
1604 * '*typep' in the "other_config" column in the "QoS" table in
1605 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)). */
1606 int
1607 netdev_get_qos(const struct netdev *netdev,
1608 const char **typep, struct smap *details)
1609 {
1610 const struct netdev_class *class = netdev->netdev_class;
1611 int retval;
1612
1613 if (class->get_qos) {
1614 retval = class->get_qos(netdev, typep, details);
1615 if (retval) {
1616 *typep = NULL;
1617 smap_clear(details);
1618 }
1619 return retval;
1620 } else {
1621 /* 'netdev' doesn't support QoS, so report that QoS is disabled. */
1622 *typep = "";
1623 return 0;
1624 }
1625 }
1626
1627 /* Attempts to reconfigure QoS on 'netdev', changing the form of QoS to 'type'
1628 * with details of configuration from 'details'. Returns 0 if successful,
1629 * otherwise a positive errno value. On error, the previous QoS configuration
1630 * is retained.
1631 *
1632 * When this function changes the type of QoS (not just 'details'), this also
1633 * resets all queue configuration for 'netdev' to their defaults (which depend
1634 * on the specific type of QoS). Otherwise, the queue configuration for
1635 * 'netdev' is unchanged.
1636 *
1637 * 'type' should be "" (to disable QoS) or one of the types returned by
1638 * netdev_get_qos_types() for 'netdev'. The contents of 'details' should be
1639 * documented as valid for the given 'type' in the "other_config" column in the
1640 * "QoS" table in vswitchd/vswitch.xml (which is built as
1641 * ovs-vswitchd.conf.db(8)).
1642 *
1643 * NULL may be specified for 'details' if there are no configuration
1644 * details. */
1645 int
1646 netdev_set_qos(struct netdev *netdev,
1647 const char *type, const struct smap *details)
1648 {
1649 const struct netdev_class *class = netdev->netdev_class;
1650
1651 if (!type) {
1652 type = "";
1653 }
1654
1655 if (class->set_qos) {
1656 if (!details) {
1657 static const struct smap empty = SMAP_INITIALIZER(&empty);
1658 details = &empty;
1659 }
1660 return class->set_qos(netdev, type, details);
1661 } else {
1662 return *type ? EOPNOTSUPP : 0;
1663 }
1664 }
1665
1666 /* Queries 'netdev' for information about the queue numbered 'queue_id'. If
1667 * successful, adds that information as string key-value pairs to 'details'.
1668 * Returns 0 if successful, otherwise a positive errno value.
1669 *
1670 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1671 * the current form of QoS (e.g. as returned by netdev_get_n_queues(netdev)).
1672 *
1673 * The returned contents of 'details' should be documented as valid for the
1674 * given 'type' in the "other_config" column in the "Queue" table in
1675 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1676 *
1677 * The caller must initialize 'details' (e.g. with smap_init()) before calling
1678 * this function. The caller must free 'details' when it is no longer needed
1679 * (e.g. with smap_destroy()). */
1680 int
1681 netdev_get_queue(const struct netdev *netdev,
1682 unsigned int queue_id, struct smap *details)
1683 {
1684 const struct netdev_class *class = netdev->netdev_class;
1685 int retval;
1686
1687 retval = (class->get_queue
1688 ? class->get_queue(netdev, queue_id, details)
1689 : EOPNOTSUPP);
1690 if (retval) {
1691 smap_clear(details);
1692 }
1693 return retval;
1694 }
1695
1696 /* Configures the queue numbered 'queue_id' on 'netdev' with the key-value
1697 * string pairs in 'details'. The contents of 'details' should be documented
1698 * as valid for the given 'type' in the "other_config" column in the "Queue"
1699 * table in vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1700 * Returns 0 if successful, otherwise a positive errno value. On failure, the
1701 * given queue's configuration should be unmodified.
1702 *
1703 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1704 * the current form of QoS (e.g. as returned by netdev_get_n_queues(netdev)).
1705 *
1706 * This function does not modify 'details', and the caller retains ownership of
1707 * it. */
1708 int
1709 netdev_set_queue(struct netdev *netdev,
1710 unsigned int queue_id, const struct smap *details)
1711 {
1712 const struct netdev_class *class = netdev->netdev_class;
1713 return (class->set_queue
1714 ? class->set_queue(netdev, queue_id, details)
1715 : EOPNOTSUPP);
1716 }
1717
1718 /* Attempts to delete the queue numbered 'queue_id' from 'netdev'. Some kinds
1719 * of QoS may have a fixed set of queues, in which case attempts to delete them
1720 * will fail with EOPNOTSUPP.
1721 *
1722 * Returns 0 if successful, otherwise a positive errno value. On failure, the
1723 * given queue will be unmodified.
1724 *
1725 * 'queue_id' must be less than the number of queues supported by 'netdev' for
1726 * the current form of QoS (e.g. as returned by
1727 * netdev_get_n_queues(netdev)). */
1728 int
1729 netdev_delete_queue(struct netdev *netdev, unsigned int queue_id)
1730 {
1731 const struct netdev_class *class = netdev->netdev_class;
1732 return (class->delete_queue
1733 ? class->delete_queue(netdev, queue_id)
1734 : EOPNOTSUPP);
1735 }
1736
1737 /* Obtains statistics about 'queue_id' on 'netdev'. On success, returns 0 and
1738 * fills 'stats' with the queue's statistics; individual members of 'stats' may
1739 * be set to all-1-bits if the statistic is unavailable. On failure, returns a
1740 * positive errno value and fills 'stats' with values indicating unsupported
1741 * statistics. */
1742 int
1743 netdev_get_queue_stats(const struct netdev *netdev, unsigned int queue_id,
1744 struct netdev_queue_stats *stats)
1745 {
1746 const struct netdev_class *class = netdev->netdev_class;
1747 int retval;
1748
1749 retval = (class->get_queue_stats
1750 ? class->get_queue_stats(netdev, queue_id, stats)
1751 : EOPNOTSUPP);
1752 if (retval) {
1753 stats->tx_bytes = UINT64_MAX;
1754 stats->tx_packets = UINT64_MAX;
1755 stats->tx_errors = UINT64_MAX;
1756 stats->created = LLONG_MIN;
1757 }
1758 return retval;
1759 }
1760
1761 /* Initializes 'dump' to begin dumping the queues in a netdev.
1762 *
1763 * This function provides no status indication. An error status for the entire
1764 * dump operation is provided when it is completed by calling
1765 * netdev_queue_dump_done().
1766 */
1767 void
1768 netdev_queue_dump_start(struct netdev_queue_dump *dump,
1769 const struct netdev *netdev)
1770 {
1771 dump->netdev = netdev_ref(netdev);
1772 if (netdev->netdev_class->queue_dump_start) {
1773 dump->error = netdev->netdev_class->queue_dump_start(netdev,
1774 &dump->state);
1775 } else {
1776 dump->error = EOPNOTSUPP;
1777 }
1778 }
1779
1780 /* Attempts to retrieve another queue from 'dump', which must have been
1781 * initialized with netdev_queue_dump_start(). On success, stores a new queue
1782 * ID into '*queue_id', fills 'details' with configuration details for the
1783 * queue, and returns true. On failure, returns false.
1784 *
1785 * Queues are not necessarily dumped in increasing order of queue ID (or any
1786 * other predictable order).
1787 *
1788 * Failure might indicate an actual error or merely that the last queue has
1789 * been dumped. An error status for the entire dump operation is provided when
1790 * it is completed by calling netdev_queue_dump_done().
1791 *
1792 * The returned contents of 'details' should be documented as valid for the
1793 * given 'type' in the "other_config" column in the "Queue" table in
1794 * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
1795 *
1796 * The caller must initialize 'details' (e.g. with smap_init()) before calling
1797 * this function. This function will clear and replace its contents. The
1798 * caller must free 'details' when it is no longer needed (e.g. with
1799 * smap_destroy()). */
1800 bool
1801 netdev_queue_dump_next(struct netdev_queue_dump *dump,
1802 unsigned int *queue_id, struct smap *details)
1803 {
1804 const struct netdev *netdev = dump->netdev;
1805
1806 if (dump->error) {
1807 return false;
1808 }
1809
1810 dump->error = netdev->netdev_class->queue_dump_next(netdev, dump->state,
1811 queue_id, details);
1812
1813 if (dump->error) {
1814 netdev->netdev_class->queue_dump_done(netdev, dump->state);
1815 return false;
1816 }
1817 return true;
1818 }
1819
1820 /* Completes queue table dump operation 'dump', which must have been
1821 * initialized with netdev_queue_dump_start(). Returns 0 if the dump operation
1822 * was error-free, otherwise a positive errno value describing the problem. */
1823 int
1824 netdev_queue_dump_done(struct netdev_queue_dump *dump)
1825 {
1826 const struct netdev *netdev = dump->netdev;
1827 if (!dump->error && netdev->netdev_class->queue_dump_done) {
1828 dump->error = netdev->netdev_class->queue_dump_done(netdev,
1829 dump->state);
1830 }
1831 netdev_close(dump->netdev);
1832 return dump->error == EOF ? 0 : dump->error;
1833 }
1834
1835 /* Iterates over all of 'netdev''s queues, calling 'cb' with the queue's ID,
1836 * its statistics, and the 'aux' specified by the caller. The order of
1837 * iteration is unspecified, but (when successful) each queue is visited
1838 * exactly once.
1839 *
1840 * Calling this function may be more efficient than calling
1841 * netdev_get_queue_stats() for every queue.
1842 *
1843 * 'cb' must not modify or free the statistics passed in.
1844 *
1845 * Returns 0 if successful, otherwise a positive errno value. On error, some
1846 * configured queues may not have been included in the iteration. */
1847 int
1848 netdev_dump_queue_stats(const struct netdev *netdev,
1849 netdev_dump_queue_stats_cb *cb, void *aux)
1850 {
1851 const struct netdev_class *class = netdev->netdev_class;
1852 return (class->dump_queue_stats
1853 ? class->dump_queue_stats(netdev, cb, aux)
1854 : EOPNOTSUPP);
1855 }
1856
1857 \f
1858 /* Returns the class type of 'netdev'.
1859 *
1860 * The caller must not free the returned value. */
1861 const char *
1862 netdev_get_type(const struct netdev *netdev)
1863 {
1864 return netdev->netdev_class->type;
1865 }
1866
1867 /* Returns the class associated with 'netdev'. */
1868 const struct netdev_class *
1869 netdev_get_class(const struct netdev *netdev)
1870 {
1871 return netdev->netdev_class;
1872 }
1873
1874 /* Returns the netdev with 'name' or NULL if there is none.
1875 *
1876 * The caller must free the returned netdev with netdev_close(). */
1877 struct netdev *
1878 netdev_from_name(const char *name)
1879 OVS_EXCLUDED(netdev_mutex)
1880 {
1881 struct netdev *netdev;
1882
1883 ovs_mutex_lock(&netdev_mutex);
1884 netdev = shash_find_data(&netdev_shash, name);
1885 if (netdev) {
1886 netdev->ref_cnt++;
1887 }
1888 ovs_mutex_unlock(&netdev_mutex);
1889
1890 return netdev;
1891 }
1892
1893 /* Fills 'device_list' with devices that match 'netdev_class'.
1894 *
1895 * The caller is responsible for initializing and destroying 'device_list' and
1896 * must close each device on the list. */
1897 void
1898 netdev_get_devices(const struct netdev_class *netdev_class,
1899 struct shash *device_list)
1900 OVS_EXCLUDED(netdev_mutex)
1901 {
1902 struct shash_node *node;
1903
1904 ovs_mutex_lock(&netdev_mutex);
1905 SHASH_FOR_EACH (node, &netdev_shash) {
1906 struct netdev *dev = node->data;
1907
1908 if (dev->netdev_class == netdev_class) {
1909 dev->ref_cnt++;
1910 shash_add(device_list, node->name, node->data);
1911 }
1912 }
1913 ovs_mutex_unlock(&netdev_mutex);
1914 }
1915
1916 /* Extracts pointers to all 'netdev-vports' into an array 'vports'
1917 * and returns it. Stores the size of the array into '*size'.
1918 *
1919 * The caller is responsible for freeing 'vports' and must close
1920 * each 'netdev-vport' in the list. */
1921 struct netdev **
1922 netdev_get_vports(size_t *size)
1923 OVS_EXCLUDED(netdev_mutex)
1924 {
1925 struct netdev **vports;
1926 struct shash_node *node;
1927 size_t n = 0;
1928
1929 if (!size) {
1930 return NULL;
1931 }
1932
1933 /* Explicitly allocates big enough chunk of memory. */
1934 ovs_mutex_lock(&netdev_mutex);
1935 vports = xmalloc(shash_count(&netdev_shash) * sizeof *vports);
1936 SHASH_FOR_EACH (node, &netdev_shash) {
1937 struct netdev *dev = node->data;
1938
1939 if (netdev_vport_is_vport_class(dev->netdev_class)) {
1940 dev->ref_cnt++;
1941 vports[n] = dev;
1942 n++;
1943 }
1944 }
1945 ovs_mutex_unlock(&netdev_mutex);
1946 *size = n;
1947
1948 return vports;
1949 }
1950
1951 const char *
1952 netdev_get_type_from_name(const char *name)
1953 {
1954 struct netdev *dev;
1955 const char *type;
1956 type = netdev_vport_type_from_name(name);
1957 if (type == NULL) {
1958 dev = netdev_from_name(name);
1959 type = dev ? netdev_get_type(dev) : NULL;
1960 netdev_close(dev);
1961 }
1962 return type;
1963 }
1964 \f
1965 struct netdev *
1966 netdev_rxq_get_netdev(const struct netdev_rxq *rx)
1967 {
1968 ovs_assert(rx->netdev->ref_cnt > 0);
1969 return rx->netdev;
1970 }
1971
1972 const char *
1973 netdev_rxq_get_name(const struct netdev_rxq *rx)
1974 {
1975 return netdev_get_name(netdev_rxq_get_netdev(rx));
1976 }
1977
1978 int
1979 netdev_rxq_get_queue_id(const struct netdev_rxq *rx)
1980 {
1981 return rx->queue_id;
1982 }
1983
1984 static void
1985 restore_all_flags(void *aux OVS_UNUSED)
1986 {
1987 struct shash_node *node;
1988
1989 SHASH_FOR_EACH (node, &netdev_shash) {
1990 struct netdev *netdev = node->data;
1991 const struct netdev_saved_flags *sf;
1992 enum netdev_flags saved_values;
1993 enum netdev_flags saved_flags;
1994
1995 saved_values = saved_flags = 0;
1996 LIST_FOR_EACH (sf, node, &netdev->saved_flags_list) {
1997 saved_flags |= sf->saved_flags;
1998 saved_values &= ~sf->saved_flags;
1999 saved_values |= sf->saved_flags & sf->saved_values;
2000 }
2001 if (saved_flags) {
2002 enum netdev_flags old_flags;
2003
2004 netdev->netdev_class->update_flags(netdev,
2005 saved_flags & saved_values,
2006 saved_flags & ~saved_values,
2007 &old_flags);
2008 }
2009 }
2010 }
2011
2012 uint64_t
2013 netdev_get_change_seq(const struct netdev *netdev)
2014 {
2015 return netdev->change_seq;
2016 }
2017
2018 #ifndef _WIN32
2019 /* This implementation is shared by Linux and BSD. */
2020
2021 static struct ifaddrs *if_addr_list;
2022 static struct ovs_mutex if_addr_list_lock = OVS_MUTEX_INITIALIZER;
2023
2024 void
2025 netdev_get_addrs_list_flush(void)
2026 {
2027 ovs_mutex_lock(&if_addr_list_lock);
2028 if (if_addr_list) {
2029 freeifaddrs(if_addr_list);
2030 if_addr_list = NULL;
2031 }
2032 ovs_mutex_unlock(&if_addr_list_lock);
2033 }
2034
2035 int
2036 netdev_get_addrs(const char dev[], struct in6_addr **paddr,
2037 struct in6_addr **pmask, int *n_in)
2038 {
2039 struct in6_addr *addr_array, *mask_array;
2040 const struct ifaddrs *ifa;
2041 int cnt = 0, i = 0;
2042
2043 ovs_mutex_lock(&if_addr_list_lock);
2044 if (!if_addr_list) {
2045 int err;
2046
2047 err = getifaddrs(&if_addr_list);
2048 if (err) {
2049 ovs_mutex_unlock(&if_addr_list_lock);
2050 return -err;
2051 }
2052 }
2053
2054 for (ifa = if_addr_list; ifa; ifa = ifa->ifa_next) {
2055 if (ifa->ifa_addr && ifa->ifa_name && ifa->ifa_netmask) {
2056 int family;
2057
2058 family = ifa->ifa_addr->sa_family;
2059 if (family == AF_INET || family == AF_INET6) {
2060 if (!strncmp(ifa->ifa_name, dev, IFNAMSIZ)) {
2061 cnt++;
2062 }
2063 }
2064 }
2065 }
2066
2067 if (!cnt) {
2068 ovs_mutex_unlock(&if_addr_list_lock);
2069 return EADDRNOTAVAIL;
2070 }
2071 addr_array = xzalloc(sizeof *addr_array * cnt);
2072 mask_array = xzalloc(sizeof *mask_array * cnt);
2073 for (ifa = if_addr_list; ifa; ifa = ifa->ifa_next) {
2074 if (ifa->ifa_name
2075 && ifa->ifa_addr
2076 && ifa->ifa_netmask
2077 && !strncmp(ifa->ifa_name, dev, IFNAMSIZ)
2078 && sa_is_ip(ifa->ifa_addr)) {
2079 addr_array[i] = sa_get_address(ifa->ifa_addr);
2080 mask_array[i] = sa_get_address(ifa->ifa_netmask);
2081 i++;
2082 }
2083 }
2084 ovs_mutex_unlock(&if_addr_list_lock);
2085 if (paddr) {
2086 *n_in = cnt;
2087 *paddr = addr_array;
2088 *pmask = mask_array;
2089 } else {
2090 free(addr_array);
2091 free(mask_array);
2092 }
2093 return 0;
2094 }
2095 #endif
2096
2097 void
2098 netdev_wait_reconf_required(struct netdev *netdev)
2099 {
2100 seq_wait(netdev->reconfigure_seq, netdev->last_reconfigure_seq);
2101 }
2102
2103 bool
2104 netdev_is_reconf_required(struct netdev *netdev)
2105 {
2106 return seq_read(netdev->reconfigure_seq) != netdev->last_reconfigure_seq;
2107 }
2108
2109 /* Give a chance to 'netdev' to reconfigure some of its parameters.
2110 *
2111 * If a module uses netdev_send() and netdev_rxq_recv(), it must call this
2112 * function when netdev_is_reconf_required() returns true.
2113 *
2114 * Return 0 if successful, otherwise a positive errno value. If the
2115 * reconfiguration fails the netdev will not be able to send or receive
2116 * packets.
2117 *
2118 * When this function is called, no call to netdev_rxq_recv() or netdev_send()
2119 * must be issued. */
2120 int
2121 netdev_reconfigure(struct netdev *netdev)
2122 {
2123 const struct netdev_class *class = netdev->netdev_class;
2124
2125 netdev->last_reconfigure_seq = seq_read(netdev->reconfigure_seq);
2126
2127 return (class->reconfigure
2128 ? class->reconfigure(netdev)
2129 : EOPNOTSUPP);
2130 }
2131
2132 int
2133 netdev_flow_flush(struct netdev *netdev)
2134 {
2135 const struct netdev_class *class = netdev->netdev_class;
2136
2137 return (class->flow_flush
2138 ? class->flow_flush(netdev)
2139 : EOPNOTSUPP);
2140 }
2141
2142 int
2143 netdev_flow_dump_create(struct netdev *netdev, struct netdev_flow_dump **dump)
2144 {
2145 const struct netdev_class *class = netdev->netdev_class;
2146
2147 return (class->flow_dump_create
2148 ? class->flow_dump_create(netdev, dump)
2149 : EOPNOTSUPP);
2150 }
2151
2152 int
2153 netdev_flow_dump_destroy(struct netdev_flow_dump *dump)
2154 {
2155 const struct netdev_class *class = dump->netdev->netdev_class;
2156
2157 return (class->flow_dump_destroy
2158 ? class->flow_dump_destroy(dump)
2159 : EOPNOTSUPP);
2160 }
2161
2162 bool
2163 netdev_flow_dump_next(struct netdev_flow_dump *dump, struct match *match,
2164 struct nlattr **actions, struct dpif_flow_stats *stats,
2165 struct dpif_flow_attrs *attrs, ovs_u128 *ufid,
2166 struct ofpbuf *rbuffer, struct ofpbuf *wbuffer)
2167 {
2168 const struct netdev_class *class = dump->netdev->netdev_class;
2169
2170 return (class->flow_dump_next
2171 ? class->flow_dump_next(dump, match, actions, stats, attrs,
2172 ufid, rbuffer, wbuffer)
2173 : false);
2174 }
2175
2176 int
2177 netdev_flow_put(struct netdev *netdev, struct match *match,
2178 struct nlattr *actions, size_t act_len,
2179 const ovs_u128 *ufid, struct offload_info *info,
2180 struct dpif_flow_stats *stats)
2181 {
2182 const struct netdev_class *class = netdev->netdev_class;
2183
2184 return (class->flow_put
2185 ? class->flow_put(netdev, match, actions, act_len, ufid,
2186 info, stats)
2187 : EOPNOTSUPP);
2188 }
2189
2190 int
2191 netdev_flow_get(struct netdev *netdev, struct match *match,
2192 struct nlattr **actions, const ovs_u128 *ufid,
2193 struct dpif_flow_stats *stats,
2194 struct dpif_flow_attrs *attrs, struct ofpbuf *buf)
2195 {
2196 const struct netdev_class *class = netdev->netdev_class;
2197
2198 return (class->flow_get
2199 ? class->flow_get(netdev, match, actions, ufid, stats, attrs, buf)
2200 : EOPNOTSUPP);
2201 }
2202
2203 int
2204 netdev_flow_del(struct netdev *netdev, const ovs_u128 *ufid,
2205 struct dpif_flow_stats *stats)
2206 {
2207 const struct netdev_class *class = netdev->netdev_class;
2208
2209 return (class->flow_del
2210 ? class->flow_del(netdev, ufid, stats)
2211 : EOPNOTSUPP);
2212 }
2213
2214 int
2215 netdev_init_flow_api(struct netdev *netdev)
2216 {
2217 const struct netdev_class *class = netdev->netdev_class;
2218
2219 if (!netdev_is_flow_api_enabled()) {
2220 return EOPNOTSUPP;
2221 }
2222
2223 return (class->init_flow_api
2224 ? class->init_flow_api(netdev)
2225 : EOPNOTSUPP);
2226 }
2227
2228 uint32_t
2229 netdev_get_block_id(struct netdev *netdev)
2230 {
2231 const struct netdev_class *class = netdev->netdev_class;
2232
2233 return (class->get_block_id
2234 ? class->get_block_id(netdev)
2235 : 0);
2236 }
2237
2238 bool
2239 netdev_is_flow_api_enabled(void)
2240 {
2241 return netdev_flow_api_enabled;
2242 }
2243
2244 /* Protects below port hashmaps. */
2245 static struct ovs_mutex netdev_hmap_mutex = OVS_MUTEX_INITIALIZER;
2246
2247 static struct hmap port_to_netdev OVS_GUARDED_BY(netdev_hmap_mutex)
2248 = HMAP_INITIALIZER(&port_to_netdev);
2249 static struct hmap ifindex_to_port OVS_GUARDED_BY(netdev_hmap_mutex)
2250 = HMAP_INITIALIZER(&ifindex_to_port);
2251
2252 struct port_to_netdev_data {
2253 struct hmap_node portno_node; /* By (dpif_class, dpif_port.port_no). */
2254 struct hmap_node ifindex_node; /* By (dpif_class, ifindex). */
2255 struct netdev *netdev;
2256 struct dpif_port dpif_port;
2257 const struct dpif_class *dpif_class;
2258 int ifindex;
2259 };
2260
2261 static uint32_t
2262 netdev_ports_hash(odp_port_t port, const struct dpif_class *dpif_class)
2263 {
2264 return hash_int(odp_to_u32(port), hash_pointer(dpif_class, 0));
2265 }
2266
2267 static struct port_to_netdev_data *
2268 netdev_ports_lookup(odp_port_t port_no, const struct dpif_class *dpif_class)
2269 OVS_REQUIRES(netdev_hmap_mutex)
2270 {
2271 struct port_to_netdev_data *data;
2272
2273 HMAP_FOR_EACH_WITH_HASH (data, portno_node,
2274 netdev_ports_hash(port_no, dpif_class),
2275 &port_to_netdev) {
2276 if (data->dpif_class == dpif_class
2277 && data->dpif_port.port_no == port_no) {
2278 return data;
2279 }
2280 }
2281 return NULL;
2282 }
2283
2284 int
2285 netdev_ports_insert(struct netdev *netdev, const struct dpif_class *dpif_class,
2286 struct dpif_port *dpif_port)
2287 {
2288 struct port_to_netdev_data *data;
2289 int ifindex = netdev_get_ifindex(netdev);
2290
2291 if (ifindex < 0) {
2292 return ENODEV;
2293 }
2294
2295 ovs_mutex_lock(&netdev_hmap_mutex);
2296 if (netdev_ports_lookup(dpif_port->port_no, dpif_class)) {
2297 ovs_mutex_unlock(&netdev_hmap_mutex);
2298 return EEXIST;
2299 }
2300
2301 data = xzalloc(sizeof *data);
2302 data->netdev = netdev_ref(netdev);
2303 data->dpif_class = dpif_class;
2304 dpif_port_clone(&data->dpif_port, dpif_port);
2305 data->ifindex = ifindex;
2306
2307 hmap_insert(&port_to_netdev, &data->portno_node,
2308 netdev_ports_hash(dpif_port->port_no, dpif_class));
2309 hmap_insert(&ifindex_to_port, &data->ifindex_node, ifindex);
2310 ovs_mutex_unlock(&netdev_hmap_mutex);
2311
2312 netdev_init_flow_api(netdev);
2313
2314 return 0;
2315 }
2316
2317 struct netdev *
2318 netdev_ports_get(odp_port_t port_no, const struct dpif_class *dpif_class)
2319 {
2320 struct port_to_netdev_data *data;
2321 struct netdev *ret = NULL;
2322
2323 ovs_mutex_lock(&netdev_hmap_mutex);
2324 data = netdev_ports_lookup(port_no, dpif_class);
2325 if (data) {
2326 ret = netdev_ref(data->netdev);
2327 }
2328 ovs_mutex_unlock(&netdev_hmap_mutex);
2329
2330 return ret;
2331 }
2332
2333 int
2334 netdev_ports_remove(odp_port_t port_no, const struct dpif_class *dpif_class)
2335 {
2336 struct port_to_netdev_data *data;
2337 int ret = ENOENT;
2338
2339 ovs_mutex_lock(&netdev_hmap_mutex);
2340
2341 data = netdev_ports_lookup(port_no, dpif_class);
2342 if (data) {
2343 dpif_port_destroy(&data->dpif_port);
2344 netdev_close(data->netdev); /* unref and possibly close */
2345 hmap_remove(&port_to_netdev, &data->portno_node);
2346 hmap_remove(&ifindex_to_port, &data->ifindex_node);
2347 free(data);
2348 ret = 0;
2349 }
2350
2351 ovs_mutex_unlock(&netdev_hmap_mutex);
2352
2353 return ret;
2354 }
2355
2356 odp_port_t
2357 netdev_ifindex_to_odp_port(int ifindex)
2358 {
2359 struct port_to_netdev_data *data;
2360 odp_port_t ret = 0;
2361
2362 ovs_mutex_lock(&netdev_hmap_mutex);
2363 HMAP_FOR_EACH_WITH_HASH (data, ifindex_node, ifindex, &ifindex_to_port) {
2364 if (data->ifindex == ifindex) {
2365 ret = data->dpif_port.port_no;
2366 break;
2367 }
2368 }
2369 ovs_mutex_unlock(&netdev_hmap_mutex);
2370
2371 return ret;
2372 }
2373
2374 void
2375 netdev_ports_flow_flush(const struct dpif_class *dpif_class)
2376 {
2377 struct port_to_netdev_data *data;
2378
2379 ovs_mutex_lock(&netdev_hmap_mutex);
2380 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
2381 if (data->dpif_class == dpif_class) {
2382 netdev_flow_flush(data->netdev);
2383 }
2384 }
2385 ovs_mutex_unlock(&netdev_hmap_mutex);
2386 }
2387
2388 struct netdev_flow_dump **
2389 netdev_ports_flow_dump_create(const struct dpif_class *dpif_class, int *ports)
2390 {
2391 struct port_to_netdev_data *data;
2392 struct netdev_flow_dump **dumps;
2393 int count = 0;
2394 int i = 0;
2395
2396 ovs_mutex_lock(&netdev_hmap_mutex);
2397 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
2398 if (data->dpif_class == dpif_class) {
2399 count++;
2400 }
2401 }
2402
2403 dumps = count ? xzalloc(sizeof *dumps * count) : NULL;
2404
2405 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
2406 if (data->dpif_class == dpif_class) {
2407 if (netdev_flow_dump_create(data->netdev, &dumps[i])) {
2408 continue;
2409 }
2410
2411 dumps[i]->port = data->dpif_port.port_no;
2412 i++;
2413 }
2414 }
2415 ovs_mutex_unlock(&netdev_hmap_mutex);
2416
2417 *ports = i;
2418 return dumps;
2419 }
2420
2421 int
2422 netdev_ports_flow_del(const struct dpif_class *dpif_class,
2423 const ovs_u128 *ufid,
2424 struct dpif_flow_stats *stats)
2425 {
2426 struct port_to_netdev_data *data;
2427
2428 ovs_mutex_lock(&netdev_hmap_mutex);
2429 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
2430 if (data->dpif_class == dpif_class
2431 && !netdev_flow_del(data->netdev, ufid, stats)) {
2432 ovs_mutex_unlock(&netdev_hmap_mutex);
2433 return 0;
2434 }
2435 }
2436 ovs_mutex_unlock(&netdev_hmap_mutex);
2437
2438 return ENOENT;
2439 }
2440
2441 int
2442 netdev_ports_flow_get(const struct dpif_class *dpif_class, struct match *match,
2443 struct nlattr **actions, const ovs_u128 *ufid,
2444 struct dpif_flow_stats *stats,
2445 struct dpif_flow_attrs *attrs, struct ofpbuf *buf)
2446 {
2447 struct port_to_netdev_data *data;
2448
2449 ovs_mutex_lock(&netdev_hmap_mutex);
2450 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
2451 if (data->dpif_class == dpif_class
2452 && !netdev_flow_get(data->netdev, match, actions,
2453 ufid, stats, attrs, buf)) {
2454 ovs_mutex_unlock(&netdev_hmap_mutex);
2455 return 0;
2456 }
2457 }
2458 ovs_mutex_unlock(&netdev_hmap_mutex);
2459 return ENOENT;
2460 }
2461
2462 void
2463 netdev_free_custom_stats_counters(struct netdev_custom_stats *custom_stats)
2464 {
2465 if (custom_stats) {
2466 if (custom_stats->counters) {
2467 free(custom_stats->counters);
2468 custom_stats->counters = NULL;
2469 custom_stats->size = 0;
2470 }
2471 }
2472 }
2473
2474 #ifdef __linux__
2475 static void
2476 netdev_ports_flow_init(void)
2477 {
2478 struct port_to_netdev_data *data;
2479
2480 ovs_mutex_lock(&netdev_hmap_mutex);
2481 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
2482 netdev_init_flow_api(data->netdev);
2483 }
2484 ovs_mutex_unlock(&netdev_hmap_mutex);
2485 }
2486
2487 void
2488 netdev_set_flow_api_enabled(const struct smap *ovs_other_config)
2489 {
2490 if (smap_get_bool(ovs_other_config, "hw-offload", false)) {
2491 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2492
2493 if (ovsthread_once_start(&once)) {
2494 netdev_flow_api_enabled = true;
2495
2496 VLOG_INFO("netdev: Flow API Enabled");
2497
2498 tc_set_policy(smap_get_def(ovs_other_config, "tc-policy",
2499 TC_POLICY_DEFAULT));
2500
2501 netdev_ports_flow_init();
2502
2503 ovsthread_once_done(&once);
2504 }
2505 }
2506 }
2507 #else
2508 void
2509 netdev_set_flow_api_enabled(const struct smap *ovs_other_config OVS_UNUSED)
2510 {
2511 }
2512 #endif