2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "dpif-provider.h"
28 #include "dp-packet.h"
29 #include "dpif-netdev.h"
30 #include "openvswitch/dynamic-string.h"
34 #include "odp-execute.h"
36 #include "openvswitch/ofp-print.h"
37 #include "openvswitch/ofpbuf.h"
39 #include "openvswitch/poll-loop.h"
40 #include "route-table.h"
42 #include "openvswitch/shash.h"
45 #include "tnl-neigh-cache.h"
46 #include "tnl-ports.h"
50 #include "openvswitch/ofp-errors.h"
51 #include "openvswitch/vlog.h"
52 #include "lib/netdev-provider.h"
54 VLOG_DEFINE_THIS_MODULE(dpif
);
56 COVERAGE_DEFINE(dpif_destroy
);
57 COVERAGE_DEFINE(dpif_port_add
);
58 COVERAGE_DEFINE(dpif_port_del
);
59 COVERAGE_DEFINE(dpif_flow_flush
);
60 COVERAGE_DEFINE(dpif_flow_get
);
61 COVERAGE_DEFINE(dpif_flow_put
);
62 COVERAGE_DEFINE(dpif_flow_del
);
63 COVERAGE_DEFINE(dpif_execute
);
64 COVERAGE_DEFINE(dpif_purge
);
65 COVERAGE_DEFINE(dpif_execute_with_help
);
66 COVERAGE_DEFINE(dpif_meter_set
);
67 COVERAGE_DEFINE(dpif_meter_get
);
68 COVERAGE_DEFINE(dpif_meter_del
);
70 static const struct dpif_class
*base_dpif_classes
[] = {
71 #if defined(__linux__) || defined(_WIN32)
77 struct registered_dpif_class
{
78 const struct dpif_class
*dpif_class
;
81 static struct shash dpif_classes
= SHASH_INITIALIZER(&dpif_classes
);
82 static struct sset dpif_disallowed
= SSET_INITIALIZER(&dpif_disallowed
);
84 /* Protects 'dpif_classes', including the refcount, and 'dpif_disallowed'. */
85 static struct ovs_mutex dpif_mutex
= OVS_MUTEX_INITIALIZER
;
87 /* Rate limit for individual messages going to or from the datapath, output at
88 * DBG level. This is very high because, if these are enabled, it is because
89 * we really need to see them. */
90 static struct vlog_rate_limit dpmsg_rl
= VLOG_RATE_LIMIT_INIT(600, 600);
92 /* Not really much point in logging many dpif errors. */
93 static struct vlog_rate_limit error_rl
= VLOG_RATE_LIMIT_INIT(60, 5);
95 static void log_operation(const struct dpif
*, const char *operation
,
97 static bool should_log_flow_message(const struct vlog_module
*module
,
100 /* Incremented whenever tnl route, arp, etc changes. */
101 struct seq
*tnl_conf_seq
;
104 dpif_is_tap_port(const char *type
)
106 return !strcmp(type
, "tap");
112 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
114 if (ovsthread_once_start(&once
)) {
117 tnl_conf_seq
= seq_create();
118 dpctl_unixctl_register();
120 tnl_neigh_cache_init();
123 for (i
= 0; i
< ARRAY_SIZE(base_dpif_classes
); i
++) {
124 dp_register_provider(base_dpif_classes
[i
]);
127 ovsthread_once_done(&once
);
132 dp_register_provider__(const struct dpif_class
*new_class
)
134 struct registered_dpif_class
*registered_class
;
137 if (sset_contains(&dpif_disallowed
, new_class
->type
)) {
138 VLOG_DBG("attempted to register disallowed provider: %s",
143 if (shash_find(&dpif_classes
, new_class
->type
)) {
144 VLOG_WARN("attempted to register duplicate datapath provider: %s",
149 error
= new_class
->init
? new_class
->init() : 0;
151 VLOG_WARN("failed to initialize %s datapath class: %s",
152 new_class
->type
, ovs_strerror(error
));
156 registered_class
= xmalloc(sizeof *registered_class
);
157 registered_class
->dpif_class
= new_class
;
158 registered_class
->refcount
= 0;
160 shash_add(&dpif_classes
, new_class
->type
, registered_class
);
165 /* Registers a new datapath provider. After successful registration, new
166 * datapaths of that type can be opened using dpif_open(). */
168 dp_register_provider(const struct dpif_class
*new_class
)
172 ovs_mutex_lock(&dpif_mutex
);
173 error
= dp_register_provider__(new_class
);
174 ovs_mutex_unlock(&dpif_mutex
);
179 /* Unregisters a datapath provider. 'type' must have been previously
180 * registered and not currently be in use by any dpifs. After unregistration
181 * new datapaths of that type cannot be opened using dpif_open(). */
183 dp_unregister_provider__(const char *type
)
185 struct shash_node
*node
;
186 struct registered_dpif_class
*registered_class
;
188 node
= shash_find(&dpif_classes
, type
);
193 registered_class
= node
->data
;
194 if (registered_class
->refcount
) {
195 VLOG_WARN("attempted to unregister in use datapath provider: %s", type
);
199 shash_delete(&dpif_classes
, node
);
200 free(registered_class
);
205 /* Unregisters a datapath provider. 'type' must have been previously
206 * registered and not currently be in use by any dpifs. After unregistration
207 * new datapaths of that type cannot be opened using dpif_open(). */
209 dp_unregister_provider(const char *type
)
215 ovs_mutex_lock(&dpif_mutex
);
216 error
= dp_unregister_provider__(type
);
217 ovs_mutex_unlock(&dpif_mutex
);
222 /* Disallows a provider. Causes future calls of dp_register_provider() with
223 * a dpif_class which implements 'type' to fail. */
225 dp_disallow_provider(const char *type
)
227 ovs_mutex_lock(&dpif_mutex
);
228 sset_add(&dpif_disallowed
, type
);
229 ovs_mutex_unlock(&dpif_mutex
);
232 /* Adds the types of all currently registered datapath providers to 'types'.
233 * The caller must first initialize the sset. */
235 dp_enumerate_types(struct sset
*types
)
237 struct shash_node
*node
;
241 ovs_mutex_lock(&dpif_mutex
);
242 SHASH_FOR_EACH(node
, &dpif_classes
) {
243 const struct registered_dpif_class
*registered_class
= node
->data
;
244 sset_add(types
, registered_class
->dpif_class
->type
);
246 ovs_mutex_unlock(&dpif_mutex
);
250 dp_class_unref(struct registered_dpif_class
*rc
)
252 ovs_mutex_lock(&dpif_mutex
);
253 ovs_assert(rc
->refcount
);
255 ovs_mutex_unlock(&dpif_mutex
);
258 static struct registered_dpif_class
*
259 dp_class_lookup(const char *type
)
261 struct registered_dpif_class
*rc
;
263 ovs_mutex_lock(&dpif_mutex
);
264 rc
= shash_find_data(&dpif_classes
, type
);
268 ovs_mutex_unlock(&dpif_mutex
);
273 /* Clears 'names' and enumerates the names of all known created datapaths with
274 * the given 'type'. The caller must first initialize the sset. Returns 0 if
275 * successful, otherwise a positive errno value.
277 * Some kinds of datapaths might not be practically enumerable. This is not
278 * considered an error. */
280 dp_enumerate_names(const char *type
, struct sset
*names
)
282 struct registered_dpif_class
*registered_class
;
283 const struct dpif_class
*dpif_class
;
289 registered_class
= dp_class_lookup(type
);
290 if (!registered_class
) {
291 VLOG_WARN("could not enumerate unknown type: %s", type
);
295 dpif_class
= registered_class
->dpif_class
;
296 error
= (dpif_class
->enumerate
297 ? dpif_class
->enumerate(names
, dpif_class
)
300 VLOG_WARN("failed to enumerate %s datapaths: %s", dpif_class
->type
,
301 ovs_strerror(error
));
303 dp_class_unref(registered_class
);
308 /* Parses 'datapath_name_', which is of the form [type@]name into its
309 * component pieces. 'name' and 'type' must be freed by the caller.
311 * The returned 'type' is normalized, as if by dpif_normalize_type(). */
313 dp_parse_name(const char *datapath_name_
, char **name
, char **type
)
315 char *datapath_name
= xstrdup(datapath_name_
);
318 separator
= strchr(datapath_name
, '@');
321 *type
= datapath_name
;
322 *name
= xstrdup(dpif_normalize_type(separator
+ 1));
324 *name
= datapath_name
;
325 *type
= xstrdup(dpif_normalize_type(NULL
));
330 do_open(const char *name
, const char *type
, bool create
, struct dpif
**dpifp
)
332 struct dpif
*dpif
= NULL
;
334 struct registered_dpif_class
*registered_class
;
338 type
= dpif_normalize_type(type
);
339 registered_class
= dp_class_lookup(type
);
340 if (!registered_class
) {
341 VLOG_WARN("could not create datapath %s of unknown type %s", name
,
343 error
= EAFNOSUPPORT
;
347 error
= registered_class
->dpif_class
->open(registered_class
->dpif_class
,
348 name
, create
, &dpif
);
350 const char *dpif_type_str
= dpif_normalize_type(dpif_type(dpif
));
351 struct dpif_port_dump port_dump
;
352 struct dpif_port dpif_port
;
354 ovs_assert(dpif
->dpif_class
== registered_class
->dpif_class
);
356 DPIF_PORT_FOR_EACH(&dpif_port
, &port_dump
, dpif
) {
357 struct netdev
*netdev
;
360 if (dpif_is_tap_port(dpif_port
.type
)) {
364 err
= netdev_open(dpif_port
.name
, dpif_port
.type
, &netdev
);
367 netdev_ports_insert(netdev
, dpif_type_str
, &dpif_port
);
368 netdev_close(netdev
);
370 VLOG_WARN("could not open netdev %s type %s: %s",
371 dpif_port
.name
, dpif_port
.type
, ovs_strerror(err
));
375 dp_class_unref(registered_class
);
379 *dpifp
= error
? NULL
: dpif
;
383 /* Tries to open an existing datapath named 'name' and type 'type'. Will fail
384 * if no datapath with 'name' and 'type' exists. 'type' may be either NULL or
385 * the empty string to specify the default system type. Returns 0 if
386 * successful, otherwise a positive errno value. On success stores a pointer
387 * to the datapath in '*dpifp', otherwise a null pointer. */
389 dpif_open(const char *name
, const char *type
, struct dpif
**dpifp
)
391 return do_open(name
, type
, false, dpifp
);
394 /* Tries to create and open a new datapath with the given 'name' and 'type'.
395 * 'type' may be either NULL or the empty string to specify the default system
396 * type. Will fail if a datapath with 'name' and 'type' already exists.
397 * Returns 0 if successful, otherwise a positive errno value. On success
398 * stores a pointer to the datapath in '*dpifp', otherwise a null pointer. */
400 dpif_create(const char *name
, const char *type
, struct dpif
**dpifp
)
402 return do_open(name
, type
, true, dpifp
);
405 /* Tries to open a datapath with the given 'name' and 'type', creating it if it
406 * does not exist. 'type' may be either NULL or the empty string to specify
407 * the default system type. Returns 0 if successful, otherwise a positive
408 * errno value. On success stores a pointer to the datapath in '*dpifp',
409 * otherwise a null pointer. */
411 dpif_create_and_open(const char *name
, const char *type
, struct dpif
**dpifp
)
415 error
= dpif_create(name
, type
, dpifp
);
416 if (error
== EEXIST
|| error
== EBUSY
) {
417 error
= dpif_open(name
, type
, dpifp
);
419 VLOG_WARN("datapath %s already exists but cannot be opened: %s",
420 name
, ovs_strerror(error
));
423 VLOG_WARN("failed to create datapath %s: %s",
424 name
, ovs_strerror(error
));
430 dpif_remove_netdev_ports(struct dpif
*dpif
) {
431 const char *dpif_type_str
= dpif_normalize_type(dpif_type(dpif
));
432 struct dpif_port_dump port_dump
;
433 struct dpif_port dpif_port
;
435 DPIF_PORT_FOR_EACH (&dpif_port
, &port_dump
, dpif
) {
436 if (!dpif_is_tap_port(dpif_port
.type
)) {
437 netdev_ports_remove(dpif_port
.port_no
, dpif_type_str
);
442 /* Closes and frees the connection to 'dpif'. Does not destroy the datapath
443 * itself; call dpif_delete() first, instead, if that is desirable. */
445 dpif_close(struct dpif
*dpif
)
448 struct registered_dpif_class
*rc
;
450 rc
= shash_find_data(&dpif_classes
, dpif
->dpif_class
->type
);
452 if (rc
->refcount
== 1) {
453 dpif_remove_netdev_ports(dpif
);
455 dpif_uninit(dpif
, true);
460 /* Performs periodic work needed by 'dpif'. */
462 dpif_run(struct dpif
*dpif
)
464 if (dpif
->dpif_class
->run
) {
465 return dpif
->dpif_class
->run(dpif
);
470 /* Arranges for poll_block() to wake up when dp_run() needs to be called for
473 dpif_wait(struct dpif
*dpif
)
475 if (dpif
->dpif_class
->wait
) {
476 dpif
->dpif_class
->wait(dpif
);
480 /* Returns the name of datapath 'dpif' prefixed with the type
481 * (for use in log messages). */
483 dpif_name(const struct dpif
*dpif
)
485 return dpif
->full_name
;
488 /* Returns the name of datapath 'dpif' without the type
489 * (for use in device names). */
491 dpif_base_name(const struct dpif
*dpif
)
493 return dpif
->base_name
;
496 /* Returns the type of datapath 'dpif'. */
498 dpif_type(const struct dpif
*dpif
)
500 return dpif
->dpif_class
->type
;
503 /* Checks if datapath 'dpif' requires cleanup. */
505 dpif_cleanup_required(const struct dpif
*dpif
)
507 return dpif
->dpif_class
->cleanup_required
;
510 /* Returns the fully spelled out name for the given datapath 'type'.
512 * Normalized type string can be compared with strcmp(). Unnormalized type
513 * string might be the same even if they have different spellings. */
515 dpif_normalize_type(const char *type
)
517 return type
&& type
[0] ? type
: "system";
520 /* Destroys the datapath that 'dpif' is connected to, first removing all of its
521 * ports. After calling this function, it does not make sense to pass 'dpif'
522 * to any functions other than dpif_name() or dpif_close(). */
524 dpif_delete(struct dpif
*dpif
)
528 COVERAGE_INC(dpif_destroy
);
530 error
= dpif
->dpif_class
->destroy(dpif
);
531 log_operation(dpif
, "delete", error
);
535 /* Retrieves statistics for 'dpif' into 'stats'. Returns 0 if successful,
536 * otherwise a positive errno value. */
538 dpif_get_dp_stats(const struct dpif
*dpif
, struct dpif_dp_stats
*stats
)
540 int error
= dpif
->dpif_class
->get_stats(dpif
, stats
);
542 memset(stats
, 0, sizeof *stats
);
544 log_operation(dpif
, "get_stats", error
);
549 dpif_set_features(struct dpif
*dpif
, uint32_t new_features
)
551 int error
= dpif
->dpif_class
->set_features(dpif
, new_features
);
553 log_operation(dpif
, "set_features", error
);
558 dpif_port_open_type(const char *datapath_type
, const char *port_type
)
560 struct registered_dpif_class
*rc
;
562 datapath_type
= dpif_normalize_type(datapath_type
);
564 ovs_mutex_lock(&dpif_mutex
);
565 rc
= shash_find_data(&dpif_classes
, datapath_type
);
566 if (rc
&& rc
->dpif_class
->port_open_type
) {
567 port_type
= rc
->dpif_class
->port_open_type(rc
->dpif_class
, port_type
);
569 ovs_mutex_unlock(&dpif_mutex
);
574 /* Attempts to add 'netdev' as a port on 'dpif'. If 'port_nop' is
575 * non-null and its value is not ODPP_NONE, then attempts to use the
576 * value as the port number.
578 * If successful, returns 0 and sets '*port_nop' to the new port's port
579 * number (if 'port_nop' is non-null). On failure, returns a positive
580 * errno value and sets '*port_nop' to ODPP_NONE (if 'port_nop' is
583 dpif_port_add(struct dpif
*dpif
, struct netdev
*netdev
, odp_port_t
*port_nop
)
585 const char *netdev_name
= netdev_get_name(netdev
);
586 odp_port_t port_no
= ODPP_NONE
;
589 COVERAGE_INC(dpif_port_add
);
595 error
= dpif
->dpif_class
->port_add(dpif
, netdev
, &port_no
);
597 VLOG_DBG_RL(&dpmsg_rl
, "%s: added %s as port %"PRIu32
,
598 dpif_name(dpif
), netdev_name
, port_no
);
600 if (!dpif_is_tap_port(netdev_get_type(netdev
))) {
602 const char *dpif_type_str
= dpif_normalize_type(dpif_type(dpif
));
603 struct dpif_port dpif_port
;
605 dpif_port
.type
= CONST_CAST(char *, netdev_get_type(netdev
));
606 dpif_port
.name
= CONST_CAST(char *, netdev_name
);
607 dpif_port
.port_no
= port_no
;
608 netdev_ports_insert(netdev
, dpif_type_str
, &dpif_port
);
611 VLOG_WARN_RL(&error_rl
, "%s: failed to add %s as port: %s",
612 dpif_name(dpif
), netdev_name
, ovs_strerror(error
));
621 /* Attempts to remove 'dpif''s port number 'port_no'. Returns 0 if successful,
622 * otherwise a positive errno value. */
624 dpif_port_del(struct dpif
*dpif
, odp_port_t port_no
, bool local_delete
)
628 COVERAGE_INC(dpif_port_del
);
631 error
= dpif
->dpif_class
->port_del(dpif
, port_no
);
633 VLOG_DBG_RL(&dpmsg_rl
, "%s: port_del(%"PRIu32
")",
634 dpif_name(dpif
), port_no
);
636 log_operation(dpif
, "port_del", error
);
640 netdev_ports_remove(port_no
, dpif_normalize_type(dpif_type(dpif
)));
644 /* Makes a deep copy of 'src' into 'dst'. */
646 dpif_port_clone(struct dpif_port
*dst
, const struct dpif_port
*src
)
648 dst
->name
= xstrdup(src
->name
);
649 dst
->type
= xstrdup(src
->type
);
650 dst
->port_no
= src
->port_no
;
653 /* Frees memory allocated to members of 'dpif_port'.
655 * Do not call this function on a dpif_port obtained from
656 * dpif_port_dump_next(): that function retains ownership of the data in the
659 dpif_port_destroy(struct dpif_port
*dpif_port
)
661 free(dpif_port
->name
);
662 free(dpif_port
->type
);
665 /* Checks if port named 'devname' exists in 'dpif'. If so, returns
666 * true; otherwise, returns false. */
668 dpif_port_exists(const struct dpif
*dpif
, const char *devname
)
670 int error
= dpif
->dpif_class
->port_query_by_name(dpif
, devname
, NULL
);
671 if (error
!= 0 && error
!= ENODEV
) {
672 VLOG_WARN_RL(&error_rl
, "%s: failed to query port %s: %s",
673 dpif_name(dpif
), devname
, ovs_strerror(error
));
679 /* Refreshes configuration of 'dpif's port. */
681 dpif_port_set_config(struct dpif
*dpif
, odp_port_t port_no
,
682 const struct smap
*cfg
)
686 if (dpif
->dpif_class
->port_set_config
) {
687 error
= dpif
->dpif_class
->port_set_config(dpif
, port_no
, cfg
);
689 log_operation(dpif
, "port_set_config", error
);
696 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and
697 * initializes '*port' appropriately; on failure, returns a positive errno
700 * Retuns ENODEV if the port doesn't exist.
702 * The caller owns the data in 'port' and must free it with
703 * dpif_port_destroy() when it is no longer needed. */
705 dpif_port_query_by_number(const struct dpif
*dpif
, odp_port_t port_no
,
706 struct dpif_port
*port
)
708 int error
= dpif
->dpif_class
->port_query_by_number(dpif
, port_no
, port
);
710 VLOG_DBG_RL(&dpmsg_rl
, "%s: port %"PRIu32
" is device %s",
711 dpif_name(dpif
), port_no
, port
->name
);
713 memset(port
, 0, sizeof *port
);
714 VLOG_WARN_RL(&error_rl
, "%s: failed to query port %"PRIu32
": %s",
715 dpif_name(dpif
), port_no
, ovs_strerror(error
));
720 /* Looks up port named 'devname' in 'dpif'. On success, returns 0 and
721 * initializes '*port' appropriately; on failure, returns a positive errno
724 * Retuns ENODEV if the port doesn't exist.
726 * The caller owns the data in 'port' and must free it with
727 * dpif_port_destroy() when it is no longer needed. */
729 dpif_port_query_by_name(const struct dpif
*dpif
, const char *devname
,
730 struct dpif_port
*port
)
732 int error
= dpif
->dpif_class
->port_query_by_name(dpif
, devname
, port
);
734 VLOG_DBG_RL(&dpmsg_rl
, "%s: device %s is on port %"PRIu32
,
735 dpif_name(dpif
), devname
, port
->port_no
);
737 memset(port
, 0, sizeof *port
);
739 /* For ENODEV we use DBG level because the caller is probably
740 * interested in whether 'dpif' actually has a port 'devname', so that
741 * it's not an issue worth logging if it doesn't. Other errors are
742 * uncommon and more likely to indicate a real problem. */
743 VLOG_RL(&error_rl
, error
== ENODEV
? VLL_DBG
: VLL_WARN
,
744 "%s: failed to query port %s: %s",
745 dpif_name(dpif
), devname
, ovs_strerror(error
));
750 /* Returns the Netlink PID value to supply in OVS_ACTION_ATTR_USERSPACE
751 * actions as the OVS_USERSPACE_ATTR_PID attribute's value, for use in
752 * flows whose packets arrived on port 'port_no'.
754 * A 'port_no' of ODPP_NONE is a special case: it returns a reserved PID, not
755 * allocated to any port, that the client may use for special purposes.
757 * The return value is only meaningful when DPIF_UC_ACTION has been enabled in
758 * the 'dpif''s listen mask. It is allowed to change when DPIF_UC_ACTION is
759 * disabled and then re-enabled, so a client that does that must be prepared to
760 * update all of the flows that it installed that contain
761 * OVS_ACTION_ATTR_USERSPACE actions. */
763 dpif_port_get_pid(const struct dpif
*dpif
, odp_port_t port_no
)
765 return (dpif
->dpif_class
->port_get_pid
766 ? (dpif
->dpif_class
->port_get_pid
)(dpif
, port_no
)
770 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and copies
771 * the port's name into the 'name_size' bytes in 'name', ensuring that the
772 * result is null-terminated. On failure, returns a positive errno value and
773 * makes 'name' the empty string. */
775 dpif_port_get_name(struct dpif
*dpif
, odp_port_t port_no
,
776 char *name
, size_t name_size
)
778 struct dpif_port port
;
781 ovs_assert(name_size
> 0);
783 error
= dpif_port_query_by_number(dpif
, port_no
, &port
);
785 ovs_strlcpy(name
, port
.name
, name_size
);
786 dpif_port_destroy(&port
);
793 /* Initializes 'dump' to begin dumping the ports in a dpif.
795 * This function provides no status indication. An error status for the entire
796 * dump operation is provided when it is completed by calling
797 * dpif_port_dump_done().
800 dpif_port_dump_start(struct dpif_port_dump
*dump
, const struct dpif
*dpif
)
803 dump
->error
= dpif
->dpif_class
->port_dump_start(dpif
, &dump
->state
);
804 log_operation(dpif
, "port_dump_start", dump
->error
);
807 /* Attempts to retrieve another port from 'dump', which must have been
808 * initialized with dpif_port_dump_start(). On success, stores a new dpif_port
809 * into 'port' and returns true. On failure, returns false.
811 * Failure might indicate an actual error or merely that the last port has been
812 * dumped. An error status for the entire dump operation is provided when it
813 * is completed by calling dpif_port_dump_done().
815 * The dpif owns the data stored in 'port'. It will remain valid until at
816 * least the next time 'dump' is passed to dpif_port_dump_next() or
817 * dpif_port_dump_done(). */
819 dpif_port_dump_next(struct dpif_port_dump
*dump
, struct dpif_port
*port
)
821 const struct dpif
*dpif
= dump
->dpif
;
827 dump
->error
= dpif
->dpif_class
->port_dump_next(dpif
, dump
->state
, port
);
828 if (dump
->error
== EOF
) {
829 VLOG_DBG_RL(&dpmsg_rl
, "%s: dumped all ports", dpif_name(dpif
));
831 log_operation(dpif
, "port_dump_next", dump
->error
);
835 dpif
->dpif_class
->port_dump_done(dpif
, dump
->state
);
841 /* Completes port table dump operation 'dump', which must have been initialized
842 * with dpif_port_dump_start(). Returns 0 if the dump operation was
843 * error-free, otherwise a positive errno value describing the problem. */
845 dpif_port_dump_done(struct dpif_port_dump
*dump
)
847 const struct dpif
*dpif
= dump
->dpif
;
849 dump
->error
= dpif
->dpif_class
->port_dump_done(dpif
, dump
->state
);
850 log_operation(dpif
, "port_dump_done", dump
->error
);
852 return dump
->error
== EOF
? 0 : dump
->error
;
855 /* Polls for changes in the set of ports in 'dpif'. If the set of ports in
856 * 'dpif' has changed, this function does one of the following:
858 * - Stores the name of the device that was added to or deleted from 'dpif' in
859 * '*devnamep' and returns 0. The caller is responsible for freeing
860 * '*devnamep' (with free()) when it no longer needs it.
862 * - Returns ENOBUFS and sets '*devnamep' to NULL.
864 * This function may also return 'false positives', where it returns 0 and
865 * '*devnamep' names a device that was not actually added or deleted or it
866 * returns ENOBUFS without any change.
868 * Returns EAGAIN if the set of ports in 'dpif' has not changed. May also
869 * return other positive errno values to indicate that something has gone
872 dpif_port_poll(const struct dpif
*dpif
, char **devnamep
)
874 int error
= dpif
->dpif_class
->port_poll(dpif
, devnamep
);
881 /* Arranges for the poll loop to wake up when port_poll(dpif) will return a
882 * value other than EAGAIN. */
884 dpif_port_poll_wait(const struct dpif
*dpif
)
886 dpif
->dpif_class
->port_poll_wait(dpif
);
889 /* Extracts the flow stats for a packet. The 'flow' and 'packet'
890 * arguments must have been initialized through a call to flow_extract().
891 * 'used' is stored into stats->used. */
893 dpif_flow_stats_extract(const struct flow
*flow
, const struct dp_packet
*packet
,
894 long long int used
, struct dpif_flow_stats
*stats
)
896 stats
->tcp_flags
= ntohs(flow
->tcp_flags
);
897 stats
->n_bytes
= dp_packet_size(packet
);
898 stats
->n_packets
= 1;
902 /* Appends a human-readable representation of 'stats' to 's'. */
904 dpif_flow_stats_format(const struct dpif_flow_stats
*stats
, struct ds
*s
)
906 ds_put_format(s
, "packets:%"PRIu64
", bytes:%"PRIu64
", used:",
907 stats
->n_packets
, stats
->n_bytes
);
909 ds_put_format(s
, "%.3fs", (time_msec() - stats
->used
) / 1000.0);
911 ds_put_format(s
, "never");
913 if (stats
->tcp_flags
) {
914 ds_put_cstr(s
, ", flags:");
915 packet_format_tcp_flags(s
, stats
->tcp_flags
);
919 /* Deletes all flows from 'dpif'. Returns 0 if successful, otherwise a
920 * positive errno value. */
922 dpif_flow_flush(struct dpif
*dpif
)
926 COVERAGE_INC(dpif_flow_flush
);
928 error
= dpif
->dpif_class
->flow_flush(dpif
);
929 log_operation(dpif
, "flow_flush", error
);
933 /* Attempts to install 'key' into the datapath, fetches it, then deletes it.
934 * Returns true if the datapath supported installing 'flow', false otherwise.
937 dpif_probe_feature(struct dpif
*dpif
, const char *name
,
938 const struct ofpbuf
*key
, const struct ofpbuf
*actions
,
939 const ovs_u128
*ufid
)
941 struct dpif_flow flow
;
943 uint64_t stub
[DPIF_FLOW_BUFSIZE
/ 8];
944 bool enable_feature
= false;
946 const struct nlattr
*nl_actions
= actions
? actions
->data
: NULL
;
947 const size_t nl_actions_size
= actions
? actions
->size
: 0;
949 /* Use DPIF_FP_MODIFY to cover the case where ovs-vswitchd is killed (and
950 * restarted) at just the right time such that feature probes from the
951 * previous run are still present in the datapath. */
952 error
= dpif_flow_put(dpif
, DPIF_FP_CREATE
| DPIF_FP_MODIFY
| DPIF_FP_PROBE
,
953 key
->data
, key
->size
, NULL
, 0,
954 nl_actions
, nl_actions_size
,
955 ufid
, NON_PMD_CORE_ID
, NULL
);
957 if (error
!= EINVAL
&& error
!= EOVERFLOW
) {
958 VLOG_WARN("%s: %s flow probe failed (%s)",
959 dpif_name(dpif
), name
, ovs_strerror(error
));
964 ofpbuf_use_stack(&reply
, &stub
, sizeof stub
);
965 error
= dpif_flow_get(dpif
, key
->data
, key
->size
, ufid
,
966 NON_PMD_CORE_ID
, &reply
, &flow
);
968 && (!ufid
|| (flow
.ufid_present
969 && ovs_u128_equals(*ufid
, flow
.ufid
)))) {
970 enable_feature
= true;
973 error
= dpif_flow_del(dpif
, key
->data
, key
->size
, ufid
,
974 NON_PMD_CORE_ID
, NULL
);
976 VLOG_WARN("%s: failed to delete %s feature probe flow",
977 dpif_name(dpif
), name
);
980 return enable_feature
;
983 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_GET. */
985 dpif_flow_get(struct dpif
*dpif
,
986 const struct nlattr
*key
, size_t key_len
, const ovs_u128
*ufid
,
987 const unsigned pmd_id
, struct ofpbuf
*buf
, struct dpif_flow
*flow
)
992 op
.type
= DPIF_OP_FLOW_GET
;
993 op
.flow_get
.key
= key
;
994 op
.flow_get
.key_len
= key_len
;
995 op
.flow_get
.ufid
= ufid
;
996 op
.flow_get
.pmd_id
= pmd_id
;
997 op
.flow_get
.buffer
= buf
;
999 memset(flow
, 0, sizeof *flow
);
1000 op
.flow_get
.flow
= flow
;
1001 op
.flow_get
.flow
->key
= key
;
1002 op
.flow_get
.flow
->key_len
= key_len
;
1005 dpif_operate(dpif
, &opp
, 1, DPIF_OFFLOAD_AUTO
);
1010 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_PUT. */
1012 dpif_flow_put(struct dpif
*dpif
, enum dpif_flow_put_flags flags
,
1013 const struct nlattr
*key
, size_t key_len
,
1014 const struct nlattr
*mask
, size_t mask_len
,
1015 const struct nlattr
*actions
, size_t actions_len
,
1016 const ovs_u128
*ufid
, const unsigned pmd_id
,
1017 struct dpif_flow_stats
*stats
)
1019 struct dpif_op
*opp
;
1022 op
.type
= DPIF_OP_FLOW_PUT
;
1023 op
.flow_put
.flags
= flags
;
1024 op
.flow_put
.key
= key
;
1025 op
.flow_put
.key_len
= key_len
;
1026 op
.flow_put
.mask
= mask
;
1027 op
.flow_put
.mask_len
= mask_len
;
1028 op
.flow_put
.actions
= actions
;
1029 op
.flow_put
.actions_len
= actions_len
;
1030 op
.flow_put
.ufid
= ufid
;
1031 op
.flow_put
.pmd_id
= pmd_id
;
1032 op
.flow_put
.stats
= stats
;
1035 dpif_operate(dpif
, &opp
, 1, DPIF_OFFLOAD_AUTO
);
1040 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_DEL. */
1042 dpif_flow_del(struct dpif
*dpif
,
1043 const struct nlattr
*key
, size_t key_len
, const ovs_u128
*ufid
,
1044 const unsigned pmd_id
, struct dpif_flow_stats
*stats
)
1046 struct dpif_op
*opp
;
1049 op
.type
= DPIF_OP_FLOW_DEL
;
1050 op
.flow_del
.key
= key
;
1051 op
.flow_del
.key_len
= key_len
;
1052 op
.flow_del
.ufid
= ufid
;
1053 op
.flow_del
.pmd_id
= pmd_id
;
1054 op
.flow_del
.stats
= stats
;
1055 op
.flow_del
.terse
= false;
1058 dpif_operate(dpif
, &opp
, 1, DPIF_OFFLOAD_AUTO
);
1063 /* Creates and returns a new 'struct dpif_flow_dump' for iterating through the
1064 * flows in 'dpif'. If 'terse' is true, then only UFID and statistics will
1065 * be returned in the dump. Otherwise, all fields will be returned.
1067 * This function always successfully returns a dpif_flow_dump. Error
1068 * reporting is deferred to dpif_flow_dump_destroy(). */
1069 struct dpif_flow_dump
*
1070 dpif_flow_dump_create(const struct dpif
*dpif
, bool terse
,
1071 struct dpif_flow_dump_types
*types
)
1073 return dpif
->dpif_class
->flow_dump_create(dpif
, terse
, types
);
1076 /* Destroys 'dump', which must have been created with dpif_flow_dump_create().
1077 * All dpif_flow_dump_thread structures previously created for 'dump' must
1078 * previously have been destroyed.
1080 * Returns 0 if the dump operation was error-free, otherwise a positive errno
1081 * value describing the problem. */
1083 dpif_flow_dump_destroy(struct dpif_flow_dump
*dump
)
1085 const struct dpif
*dpif
= dump
->dpif
;
1086 int error
= dpif
->dpif_class
->flow_dump_destroy(dump
);
1087 log_operation(dpif
, "flow_dump_destroy", error
);
1088 return error
== EOF
? 0 : error
;
1091 /* Returns new thread-local state for use with dpif_flow_dump_next(). */
1092 struct dpif_flow_dump_thread
*
1093 dpif_flow_dump_thread_create(struct dpif_flow_dump
*dump
)
1095 return dump
->dpif
->dpif_class
->flow_dump_thread_create(dump
);
1098 /* Releases 'thread'. */
1100 dpif_flow_dump_thread_destroy(struct dpif_flow_dump_thread
*thread
)
1102 thread
->dpif
->dpif_class
->flow_dump_thread_destroy(thread
);
1105 /* Attempts to retrieve up to 'max_flows' more flows from 'thread'. Returns 0
1106 * if and only if no flows remained to be retrieved, otherwise a positive
1107 * number reflecting the number of elements in 'flows[]' that were updated.
1108 * The number of flows returned might be less than 'max_flows' because
1109 * fewer than 'max_flows' remained, because this particular datapath does not
1110 * benefit from batching, or because an error occurred partway through
1111 * retrieval. Thus, the caller should continue calling until a 0 return value,
1112 * even if intermediate return values are less than 'max_flows'.
1114 * No error status is immediately provided. An error status for the entire
1115 * dump operation is provided when it is completed by calling
1116 * dpif_flow_dump_destroy().
1118 * All of the data stored into 'flows' is owned by the datapath, not by the
1119 * caller, and the caller must not modify or free it. The datapath guarantees
1120 * that it remains accessible and unchanged until the first of:
1121 * - The next call to dpif_flow_dump_next() for 'thread', or
1122 * - The next rcu quiescent period. */
1124 dpif_flow_dump_next(struct dpif_flow_dump_thread
*thread
,
1125 struct dpif_flow
*flows
, int max_flows
)
1127 struct dpif
*dpif
= thread
->dpif
;
1130 ovs_assert(max_flows
> 0);
1131 n
= dpif
->dpif_class
->flow_dump_next(thread
, flows
, max_flows
);
1133 struct dpif_flow
*f
;
1135 for (f
= flows
; f
< &flows
[n
]
1136 && should_log_flow_message(&this_module
, 0); f
++) {
1137 log_flow_message(dpif
, 0, &this_module
, "flow_dump",
1138 f
->key
, f
->key_len
, f
->mask
, f
->mask_len
,
1139 &f
->ufid
, &f
->stats
, f
->actions
, f
->actions_len
);
1142 VLOG_DBG_RL(&dpmsg_rl
, "%s: dumped all flows", dpif_name(dpif
));
1147 struct dpif_execute_helper_aux
{
1149 const struct flow
*flow
;
1151 const struct nlattr
*meter_action
; /* Non-NULL, if have a meter action. */
1154 /* This is called for actions that need the context of the datapath to be
1157 dpif_execute_helper_cb(void *aux_
, struct dp_packet_batch
*packets_
,
1158 const struct nlattr
*action
, bool should_steal
)
1160 struct dpif_execute_helper_aux
*aux
= aux_
;
1161 int type
= nl_attr_type(action
);
1162 struct dp_packet
*packet
= packets_
->packets
[0];
1164 ovs_assert(dp_packet_batch_size(packets_
) == 1);
1166 switch ((enum ovs_action_attr
)type
) {
1167 case OVS_ACTION_ATTR_METER
:
1168 /* Maintain a pointer to the first meter action seen. */
1169 if (!aux
->meter_action
) {
1170 aux
->meter_action
= action
;
1174 case OVS_ACTION_ATTR_CT
:
1175 case OVS_ACTION_ATTR_OUTPUT
:
1176 case OVS_ACTION_ATTR_LB_OUTPUT
:
1177 case OVS_ACTION_ATTR_TUNNEL_PUSH
:
1178 case OVS_ACTION_ATTR_TUNNEL_POP
:
1179 case OVS_ACTION_ATTR_USERSPACE
:
1180 case OVS_ACTION_ATTR_RECIRC
: {
1181 struct dpif_execute execute
;
1182 struct ofpbuf execute_actions
;
1183 uint64_t stub
[256 / 8];
1184 struct pkt_metadata
*md
= &packet
->md
;
1186 if (flow_tnl_dst_is_set(&md
->tunnel
) || aux
->meter_action
) {
1187 ofpbuf_use_stub(&execute_actions
, stub
, sizeof stub
);
1189 if (aux
->meter_action
) {
1190 const struct nlattr
*a
= aux
->meter_action
;
1192 /* XXX: This code collects meter actions since the last action
1193 * execution via the datapath to be executed right before the
1194 * current action that needs to be executed by the datapath.
1195 * This is only an approximation, but better than nothing.
1196 * Fundamentally, we should have a mechanism by which the
1197 * datapath could return the result of the meter action so that
1198 * we could execute them at the right order. */
1200 ofpbuf_put(&execute_actions
, a
, NLA_ALIGN(a
->nla_len
));
1201 /* Find next meter action before 'action', if any. */
1203 a
= nl_attr_next(a
);
1204 } while (a
!= action
&&
1205 nl_attr_type(a
) != OVS_ACTION_ATTR_METER
);
1206 } while (a
!= action
);
1209 /* The Linux kernel datapath throws away the tunnel information
1210 * that we supply as metadata. We have to use a "set" action to
1212 if (md
->tunnel
.ip_dst
) {
1213 odp_put_tunnel_action(&md
->tunnel
, &execute_actions
, NULL
);
1215 ofpbuf_put(&execute_actions
, action
, NLA_ALIGN(action
->nla_len
));
1217 execute
.actions
= execute_actions
.data
;
1218 execute
.actions_len
= execute_actions
.size
;
1220 execute
.actions
= action
;
1221 execute
.actions_len
= NLA_ALIGN(action
->nla_len
);
1224 struct dp_packet
*clone
= NULL
;
1225 uint32_t cutlen
= dp_packet_get_cutlen(packet
);
1226 if (cutlen
&& (type
== OVS_ACTION_ATTR_OUTPUT
1227 || type
== OVS_ACTION_ATTR_LB_OUTPUT
1228 || type
== OVS_ACTION_ATTR_TUNNEL_PUSH
1229 || type
== OVS_ACTION_ATTR_TUNNEL_POP
1230 || type
== OVS_ACTION_ATTR_USERSPACE
)) {
1231 dp_packet_reset_cutlen(packet
);
1232 if (!should_steal
) {
1233 packet
= clone
= dp_packet_clone(packet
);
1235 dp_packet_set_size(packet
, dp_packet_size(packet
) - cutlen
);
1238 execute
.packet
= packet
;
1239 execute
.flow
= aux
->flow
;
1240 execute
.needs_help
= false;
1241 execute
.probe
= false;
1243 aux
->error
= dpif_execute(aux
->dpif
, &execute
);
1244 log_execute_message(aux
->dpif
, &this_module
, &execute
,
1247 dp_packet_delete(clone
);
1249 if (flow_tnl_dst_is_set(&md
->tunnel
) || aux
->meter_action
) {
1250 ofpbuf_uninit(&execute_actions
);
1252 /* Do not re-use the same meters for later output actions. */
1253 aux
->meter_action
= NULL
;
1258 case OVS_ACTION_ATTR_HASH
:
1259 case OVS_ACTION_ATTR_PUSH_VLAN
:
1260 case OVS_ACTION_ATTR_POP_VLAN
:
1261 case OVS_ACTION_ATTR_PUSH_MPLS
:
1262 case OVS_ACTION_ATTR_POP_MPLS
:
1263 case OVS_ACTION_ATTR_SET
:
1264 case OVS_ACTION_ATTR_SET_MASKED
:
1265 case OVS_ACTION_ATTR_SAMPLE
:
1266 case OVS_ACTION_ATTR_TRUNC
:
1267 case OVS_ACTION_ATTR_PUSH_ETH
:
1268 case OVS_ACTION_ATTR_POP_ETH
:
1269 case OVS_ACTION_ATTR_CLONE
:
1270 case OVS_ACTION_ATTR_PUSH_NSH
:
1271 case OVS_ACTION_ATTR_POP_NSH
:
1272 case OVS_ACTION_ATTR_CT_CLEAR
:
1273 case OVS_ACTION_ATTR_UNSPEC
:
1274 case OVS_ACTION_ATTR_CHECK_PKT_LEN
:
1275 case OVS_ACTION_ATTR_DROP
:
1276 case __OVS_ACTION_ATTR_MAX
:
1279 dp_packet_delete_batch(packets_
, should_steal
);
1282 /* Executes 'execute' by performing most of the actions in userspace and
1283 * passing the fully constructed packets to 'dpif' for output and userspace
1286 * This helps with actions that a given 'dpif' doesn't implement directly. */
1288 dpif_execute_with_help(struct dpif
*dpif
, struct dpif_execute
*execute
)
1290 struct dpif_execute_helper_aux aux
= {dpif
, execute
->flow
, 0, NULL
};
1291 struct dp_packet_batch pb
;
1293 COVERAGE_INC(dpif_execute_with_help
);
1295 dp_packet_batch_init_packet(&pb
, execute
->packet
);
1296 odp_execute_actions(&aux
, &pb
, false, execute
->actions
,
1297 execute
->actions_len
, dpif_execute_helper_cb
);
1301 /* Returns true if the datapath needs help executing 'execute'. */
1303 dpif_execute_needs_help(const struct dpif_execute
*execute
)
1305 return execute
->needs_help
|| nl_attr_oversized(execute
->actions_len
);
1308 /* A dpif_operate() wrapper for performing a single DPIF_OP_EXECUTE. */
1310 dpif_execute(struct dpif
*dpif
, struct dpif_execute
*execute
)
1312 if (execute
->actions_len
) {
1313 struct dpif_op
*opp
;
1316 op
.type
= DPIF_OP_EXECUTE
;
1317 op
.execute
= *execute
;
1320 dpif_operate(dpif
, &opp
, 1, DPIF_OFFLOAD_AUTO
);
1328 /* Executes each of the 'n_ops' operations in 'ops' on 'dpif', in the order in
1329 * which they are specified. Places each operation's results in the "output"
1330 * members documented in comments, and 0 in the 'error' member on success or a
1331 * positive errno on failure.
1334 dpif_operate(struct dpif
*dpif
, struct dpif_op
**ops
, size_t n_ops
,
1335 enum dpif_offload_type offload_type
)
1337 if (offload_type
== DPIF_OFFLOAD_ALWAYS
&& !netdev_is_flow_api_enabled()) {
1339 for (i
= 0; i
< n_ops
; i
++) {
1340 struct dpif_op
*op
= ops
[i
];
1349 /* Count 'chunk', the number of ops that can be executed without
1350 * needing any help. Ops that need help should be rare, so we
1351 * expect this to ordinarily be 'n_ops', that is, all the ops. */
1352 for (chunk
= 0; chunk
< n_ops
; chunk
++) {
1353 struct dpif_op
*op
= ops
[chunk
];
1355 if (op
->type
== DPIF_OP_EXECUTE
1356 && dpif_execute_needs_help(&op
->execute
)) {
1362 /* Execute a chunk full of ops that the dpif provider can
1363 * handle itself, without help. */
1366 dpif
->dpif_class
->operate(dpif
, ops
, chunk
, offload_type
);
1368 for (i
= 0; i
< chunk
; i
++) {
1369 struct dpif_op
*op
= ops
[i
];
1370 int error
= op
->error
;
1373 case DPIF_OP_FLOW_PUT
: {
1374 struct dpif_flow_put
*put
= &op
->flow_put
;
1376 COVERAGE_INC(dpif_flow_put
);
1377 log_flow_put_message(dpif
, &this_module
, put
, error
);
1378 if (error
&& put
->stats
) {
1379 memset(put
->stats
, 0, sizeof *put
->stats
);
1384 case DPIF_OP_FLOW_GET
: {
1385 struct dpif_flow_get
*get
= &op
->flow_get
;
1387 COVERAGE_INC(dpif_flow_get
);
1389 memset(get
->flow
, 0, sizeof *get
->flow
);
1391 log_flow_get_message(dpif
, &this_module
, get
, error
);
1396 case DPIF_OP_FLOW_DEL
: {
1397 struct dpif_flow_del
*del
= &op
->flow_del
;
1399 COVERAGE_INC(dpif_flow_del
);
1400 log_flow_del_message(dpif
, &this_module
, del
, error
);
1401 if (error
&& del
->stats
) {
1402 memset(del
->stats
, 0, sizeof *del
->stats
);
1407 case DPIF_OP_EXECUTE
:
1408 COVERAGE_INC(dpif_execute
);
1409 log_execute_message(dpif
, &this_module
, &op
->execute
,
1418 /* Help the dpif provider to execute one op. */
1419 struct dpif_op
*op
= ops
[0];
1421 COVERAGE_INC(dpif_execute
);
1422 op
->error
= dpif_execute_with_help(dpif
, &op
->execute
);
1429 /* Returns a string that represents 'type', for use in log messages. */
1431 dpif_upcall_type_to_string(enum dpif_upcall_type type
)
1434 case DPIF_UC_MISS
: return "miss";
1435 case DPIF_UC_ACTION
: return "action";
1436 case DPIF_N_UC_TYPES
: default: return "<unknown>";
1440 /* Enables or disables receiving packets with dpif_recv() on 'dpif'. Returns 0
1441 * if successful, otherwise a positive errno value.
1443 * Turning packet receive off and then back on may change the Netlink PID
1444 * assignments returned by dpif_port_get_pid(). If the client does this, it
1445 * must update all of the flows that have OVS_ACTION_ATTR_USERSPACE actions
1446 * using the new PID assignment. */
1448 dpif_recv_set(struct dpif
*dpif
, bool enable
)
1452 if (dpif
->dpif_class
->recv_set
) {
1453 error
= dpif
->dpif_class
->recv_set(dpif
, enable
);
1454 log_operation(dpif
, "recv_set", error
);
1459 /* Refreshes the poll loops and Netlink sockets associated to each port,
1460 * when the number of upcall handlers (upcall receiving thread) is changed
1461 * to 'n_handlers' and receiving packets for 'dpif' is enabled by
1464 * Since multiple upcall handlers can read upcalls simultaneously from
1465 * 'dpif', each port can have multiple Netlink sockets, one per upcall
1466 * handler. So, handlers_set() is responsible for the following tasks:
1468 * When receiving upcall is enabled, extends or creates the
1469 * configuration to support:
1471 * - 'n_handlers' Netlink sockets for each port.
1473 * - 'n_handlers' poll loops, one for each upcall handler.
1475 * - registering the Netlink sockets for the same upcall handler to
1476 * the corresponding poll loop.
1478 * Returns 0 if successful, otherwise a positive errno value. */
1480 dpif_handlers_set(struct dpif
*dpif
, uint32_t n_handlers
)
1484 if (dpif
->dpif_class
->handlers_set
) {
1485 error
= dpif
->dpif_class
->handlers_set(dpif
, n_handlers
);
1486 log_operation(dpif
, "handlers_set", error
);
1492 dpif_register_dp_purge_cb(struct dpif
*dpif
, dp_purge_callback
*cb
, void *aux
)
1494 if (dpif
->dpif_class
->register_dp_purge_cb
) {
1495 dpif
->dpif_class
->register_dp_purge_cb(dpif
, cb
, aux
);
1500 dpif_register_upcall_cb(struct dpif
*dpif
, upcall_callback
*cb
, void *aux
)
1502 if (dpif
->dpif_class
->register_upcall_cb
) {
1503 dpif
->dpif_class
->register_upcall_cb(dpif
, cb
, aux
);
1508 dpif_enable_upcall(struct dpif
*dpif
)
1510 if (dpif
->dpif_class
->enable_upcall
) {
1511 dpif
->dpif_class
->enable_upcall(dpif
);
1516 dpif_disable_upcall(struct dpif
*dpif
)
1518 if (dpif
->dpif_class
->disable_upcall
) {
1519 dpif
->dpif_class
->disable_upcall(dpif
);
1524 dpif_print_packet(struct dpif
*dpif
, struct dpif_upcall
*upcall
)
1526 if (!VLOG_DROP_DBG(&dpmsg_rl
)) {
1530 packet
= ofp_dp_packet_to_string(&upcall
->packet
);
1533 odp_flow_key_format(upcall
->key
, upcall
->key_len
, &flow
);
1535 VLOG_DBG("%s: %s upcall:\n%s\n%s",
1536 dpif_name(dpif
), dpif_upcall_type_to_string(upcall
->type
),
1537 ds_cstr(&flow
), packet
);
1544 /* Pass custom configuration to the datapath implementation. Some of the
1545 * changes can be postponed until dpif_run() is called. */
1547 dpif_set_config(struct dpif
*dpif
, const struct smap
*cfg
)
1551 if (dpif
->dpif_class
->set_config
) {
1552 error
= dpif
->dpif_class
->set_config(dpif
, cfg
);
1554 log_operation(dpif
, "set_config", error
);
1561 /* Polls for an upcall from 'dpif' for an upcall handler. Since there can
1562 * be multiple poll loops, 'handler_id' is needed as index to identify the
1563 * corresponding poll loop. If successful, stores the upcall into '*upcall',
1564 * using 'buf' for storage. Should only be called if 'recv_set' has been used
1565 * to enable receiving packets from 'dpif'.
1567 * 'upcall->key' and 'upcall->userdata' point into data in the caller-provided
1568 * 'buf', so their memory cannot be freed separately from 'buf'.
1570 * The caller owns the data of 'upcall->packet' and may modify it. If
1571 * packet's headroom is exhausted as it is manipulated, 'upcall->packet'
1572 * will be reallocated. This requires the data of 'upcall->packet' to be
1573 * released with ofpbuf_uninit() before 'upcall' is destroyed. However,
1574 * when an error is returned, the 'upcall->packet' may be uninitialized
1575 * and should not be released.
1577 * Returns 0 if successful, otherwise a positive errno value. Returns EAGAIN
1578 * if no upcall is immediately available. */
1580 dpif_recv(struct dpif
*dpif
, uint32_t handler_id
, struct dpif_upcall
*upcall
,
1585 if (dpif
->dpif_class
->recv
) {
1586 error
= dpif
->dpif_class
->recv(dpif
, handler_id
, upcall
, buf
);
1588 dpif_print_packet(dpif
, upcall
);
1589 } else if (error
!= EAGAIN
) {
1590 log_operation(dpif
, "recv", error
);
1596 /* Discards all messages that would otherwise be received by dpif_recv() on
1599 dpif_recv_purge(struct dpif
*dpif
)
1601 COVERAGE_INC(dpif_purge
);
1602 if (dpif
->dpif_class
->recv_purge
) {
1603 dpif
->dpif_class
->recv_purge(dpif
);
1607 /* Arranges for the poll loop for an upcall handler to wake up when 'dpif'
1608 * 'dpif' has a message queued to be received with the recv member
1609 * function. Since there can be multiple poll loops, 'handler_id' is
1610 * needed as index to identify the corresponding poll loop. */
1612 dpif_recv_wait(struct dpif
*dpif
, uint32_t handler_id
)
1614 if (dpif
->dpif_class
->recv_wait
) {
1615 dpif
->dpif_class
->recv_wait(dpif
, handler_id
);
1620 * Return the datapath version. Caller is responsible for freeing
1624 dpif_get_dp_version(const struct dpif
*dpif
)
1626 char *version
= NULL
;
1628 if (dpif
->dpif_class
->get_datapath_version
) {
1629 version
= dpif
->dpif_class
->get_datapath_version();
1635 /* Obtains the NetFlow engine type and engine ID for 'dpif' into '*engine_type'
1636 * and '*engine_id', respectively. */
1638 dpif_get_netflow_ids(const struct dpif
*dpif
,
1639 uint8_t *engine_type
, uint8_t *engine_id
)
1641 *engine_type
= dpif
->netflow_engine_type
;
1642 *engine_id
= dpif
->netflow_engine_id
;
1645 /* Translates OpenFlow queue ID 'queue_id' (in host byte order) into a priority
1646 * value used for setting packet priority.
1647 * On success, returns 0 and stores the priority into '*priority'.
1648 * On failure, returns a positive errno value and stores 0 into '*priority'. */
1650 dpif_queue_to_priority(const struct dpif
*dpif
, uint32_t queue_id
,
1653 int error
= (dpif
->dpif_class
->queue_to_priority
1654 ? dpif
->dpif_class
->queue_to_priority(dpif
, queue_id
,
1660 log_operation(dpif
, "queue_to_priority", error
);
1665 dpif_init(struct dpif
*dpif
, const struct dpif_class
*dpif_class
,
1667 uint8_t netflow_engine_type
, uint8_t netflow_engine_id
)
1669 dpif
->dpif_class
= dpif_class
;
1670 dpif
->base_name
= xstrdup(name
);
1671 dpif
->full_name
= xasprintf("%s@%s", dpif_class
->type
, name
);
1672 dpif
->netflow_engine_type
= netflow_engine_type
;
1673 dpif
->netflow_engine_id
= netflow_engine_id
;
1676 /* Undoes the results of initialization.
1678 * Normally this function only needs to be called from dpif_close().
1679 * However, it may be called by providers due to an error on opening
1680 * that occurs after initialization. It this case dpif_close() would
1681 * never be called. */
1683 dpif_uninit(struct dpif
*dpif
, bool close
)
1685 char *base_name
= dpif
->base_name
;
1686 char *full_name
= dpif
->full_name
;
1689 dpif
->dpif_class
->close(dpif
);
1697 log_operation(const struct dpif
*dpif
, const char *operation
, int error
)
1700 VLOG_DBG_RL(&dpmsg_rl
, "%s: %s success", dpif_name(dpif
), operation
);
1701 } else if (ofperr_is_valid(error
)) {
1702 VLOG_WARN_RL(&error_rl
, "%s: %s failed (%s)",
1703 dpif_name(dpif
), operation
, ofperr_get_name(error
));
1705 VLOG_WARN_RL(&error_rl
, "%s: %s failed (%s)",
1706 dpif_name(dpif
), operation
, ovs_strerror(error
));
1710 static enum vlog_level
1711 flow_message_log_level(int error
)
1713 /* If flows arrive in a batch, userspace may push down multiple
1714 * unique flow definitions that overlap when wildcards are applied.
1715 * Kernels that support flow wildcarding will reject these flows as
1716 * duplicates (EEXIST), so lower the log level to debug for these
1717 * types of messages. */
1718 return (error
&& error
!= EEXIST
) ? VLL_WARN
: VLL_DBG
;
1722 should_log_flow_message(const struct vlog_module
*module
, int error
)
1724 return !vlog_should_drop(module
, flow_message_log_level(error
),
1725 error
? &error_rl
: &dpmsg_rl
);
1729 log_flow_message(const struct dpif
*dpif
, int error
,
1730 const struct vlog_module
*module
,
1731 const char *operation
,
1732 const struct nlattr
*key
, size_t key_len
,
1733 const struct nlattr
*mask
, size_t mask_len
,
1734 const ovs_u128
*ufid
, const struct dpif_flow_stats
*stats
,
1735 const struct nlattr
*actions
, size_t actions_len
)
1737 struct ds ds
= DS_EMPTY_INITIALIZER
;
1738 ds_put_format(&ds
, "%s: ", dpif_name(dpif
));
1740 ds_put_cstr(&ds
, "failed to ");
1742 ds_put_format(&ds
, "%s ", operation
);
1744 ds_put_format(&ds
, "(%s) ", ovs_strerror(error
));
1747 odp_format_ufid(ufid
, &ds
);
1748 ds_put_cstr(&ds
, " ");
1750 odp_flow_format(key
, key_len
, mask
, mask_len
, NULL
, &ds
, true);
1752 ds_put_cstr(&ds
, ", ");
1753 dpif_flow_stats_format(stats
, &ds
);
1755 if (actions
|| actions_len
) {
1756 ds_put_cstr(&ds
, ", actions:");
1757 format_odp_actions(&ds
, actions
, actions_len
, NULL
);
1759 vlog(module
, flow_message_log_level(error
), "%s", ds_cstr(&ds
));
1764 log_flow_put_message(const struct dpif
*dpif
,
1765 const struct vlog_module
*module
,
1766 const struct dpif_flow_put
*put
,
1769 if (should_log_flow_message(module
, error
)
1770 && !(put
->flags
& DPIF_FP_PROBE
)) {
1774 ds_put_cstr(&s
, "put");
1775 if (put
->flags
& DPIF_FP_CREATE
) {
1776 ds_put_cstr(&s
, "[create]");
1778 if (put
->flags
& DPIF_FP_MODIFY
) {
1779 ds_put_cstr(&s
, "[modify]");
1781 if (put
->flags
& DPIF_FP_ZERO_STATS
) {
1782 ds_put_cstr(&s
, "[zero]");
1784 log_flow_message(dpif
, error
, module
, ds_cstr(&s
),
1785 put
->key
, put
->key_len
, put
->mask
, put
->mask_len
,
1786 put
->ufid
, put
->stats
, put
->actions
,
1793 log_flow_del_message(const struct dpif
*dpif
,
1794 const struct vlog_module
*module
,
1795 const struct dpif_flow_del
*del
,
1798 if (should_log_flow_message(module
, error
)) {
1799 log_flow_message(dpif
, error
, module
, "flow_del",
1800 del
->key
, del
->key_len
,
1801 NULL
, 0, del
->ufid
, !error
? del
->stats
: NULL
,
1806 /* Logs that 'execute' was executed on 'dpif' and completed with errno 'error'
1807 * (0 for success). 'subexecute' should be true if the execution is a result
1808 * of breaking down a larger execution that needed help, false otherwise.
1811 * XXX In theory, the log message could be deceptive because this function is
1812 * called after the dpif_provider's '->execute' function, which is allowed to
1813 * modify execute->packet and execute->md. In practice, though:
1815 * - dpif-netlink doesn't modify execute->packet or execute->md.
1817 * - dpif-netdev does modify them but it is less likely to have problems
1818 * because it is built into ovs-vswitchd and cannot have version skew,
1821 * It would still be better to avoid the potential problem. I don't know of a
1822 * good way to do that, though, that isn't expensive. */
1824 log_execute_message(const struct dpif
*dpif
,
1825 const struct vlog_module
*module
,
1826 const struct dpif_execute
*execute
,
1827 bool subexecute
, int error
)
1829 if (!(error
? VLOG_DROP_WARN(&error_rl
) : VLOG_DROP_DBG(&dpmsg_rl
))
1830 && !execute
->probe
) {
1831 struct ds ds
= DS_EMPTY_INITIALIZER
;
1833 uint64_t stub
[1024 / 8];
1834 struct ofpbuf md
= OFPBUF_STUB_INITIALIZER(stub
);
1836 packet
= ofp_packet_to_string(dp_packet_data(execute
->packet
),
1837 dp_packet_size(execute
->packet
),
1838 execute
->packet
->packet_type
);
1839 odp_key_from_dp_packet(&md
, execute
->packet
);
1840 ds_put_format(&ds
, "%s: %sexecute ",
1842 (subexecute
? "sub-"
1843 : dpif_execute_needs_help(execute
) ? "super-"
1845 format_odp_actions(&ds
, execute
->actions
, execute
->actions_len
, NULL
);
1847 ds_put_format(&ds
, " failed (%s)", ovs_strerror(error
));
1849 ds_put_format(&ds
, " on packet %s", packet
);
1850 ds_put_format(&ds
, " with metadata ");
1851 odp_flow_format(md
.data
, md
.size
, NULL
, 0, NULL
, &ds
, true);
1852 ds_put_format(&ds
, " mtu %d", execute
->mtu
);
1853 vlog(module
, error
? VLL_WARN
: VLL_DBG
, "%s", ds_cstr(&ds
));
1861 log_flow_get_message(const struct dpif
*dpif
,
1862 const struct vlog_module
*module
,
1863 const struct dpif_flow_get
*get
,
1866 if (should_log_flow_message(module
, error
)) {
1867 log_flow_message(dpif
, error
, module
, "flow_get",
1868 get
->key
, get
->key_len
,
1869 get
->flow
->mask
, get
->flow
->mask_len
,
1870 get
->ufid
, &get
->flow
->stats
,
1871 get
->flow
->actions
, get
->flow
->actions_len
);
1876 dpif_supports_tnl_push_pop(const struct dpif
*dpif
)
1878 return dpif_is_netdev(dpif
);
1882 dpif_supports_explicit_drop_action(const struct dpif
*dpif
)
1884 return dpif_is_netdev(dpif
);
1888 dpif_supports_lb_output_action(const struct dpif
*dpif
)
1891 * Balance-tcp optimization is currently supported in netdev
1894 return dpif_is_netdev(dpif
);
1899 dpif_meter_get_features(const struct dpif
*dpif
,
1900 struct ofputil_meter_features
*features
)
1902 memset(features
, 0, sizeof *features
);
1903 if (dpif
->dpif_class
->meter_get_features
) {
1904 dpif
->dpif_class
->meter_get_features(dpif
, features
);
1908 /* Adds or modifies the meter in 'dpif' with the given 'meter_id' and
1909 * the configuration in 'config'.
1911 * The meter id specified through 'config->meter_id' is ignored. */
1913 dpif_meter_set(struct dpif
*dpif
, ofproto_meter_id meter_id
,
1914 struct ofputil_meter_config
*config
)
1916 COVERAGE_INC(dpif_meter_set
);
1918 if (!(config
->flags
& (OFPMF13_KBPS
| OFPMF13_PKTPS
))) {
1919 return EBADF
; /* Rate unit type not set. */
1922 if ((config
->flags
& OFPMF13_KBPS
) && (config
->flags
& OFPMF13_PKTPS
)) {
1923 return EBADF
; /* Both rate units may not be set. */
1926 if (config
->n_bands
== 0) {
1930 for (size_t i
= 0; i
< config
->n_bands
; i
++) {
1931 if (config
->bands
[i
].rate
== 0) {
1932 return EDOM
; /* Rate must be non-zero */
1936 int error
= dpif
->dpif_class
->meter_set(dpif
, meter_id
, config
);
1938 VLOG_DBG_RL(&dpmsg_rl
, "%s: DPIF meter %"PRIu32
" set",
1939 dpif_name(dpif
), meter_id
.uint32
);
1941 VLOG_WARN_RL(&error_rl
, "%s: failed to set DPIF meter %"PRIu32
": %s",
1942 dpif_name(dpif
), meter_id
.uint32
, ovs_strerror(error
));
1948 dpif_meter_get(const struct dpif
*dpif
, ofproto_meter_id meter_id
,
1949 struct ofputil_meter_stats
*stats
, uint16_t n_bands
)
1953 COVERAGE_INC(dpif_meter_get
);
1955 error
= dpif
->dpif_class
->meter_get(dpif
, meter_id
, stats
, n_bands
);
1957 VLOG_DBG_RL(&dpmsg_rl
, "%s: DPIF meter %"PRIu32
" get stats",
1958 dpif_name(dpif
), meter_id
.uint32
);
1960 VLOG_WARN_RL(&error_rl
,
1961 "%s: failed to get DPIF meter %"PRIu32
" stats: %s",
1962 dpif_name(dpif
), meter_id
.uint32
, ovs_strerror(error
));
1963 stats
->packet_in_count
= ~0;
1964 stats
->byte_in_count
= ~0;
1971 dpif_meter_del(struct dpif
*dpif
, ofproto_meter_id meter_id
,
1972 struct ofputil_meter_stats
*stats
, uint16_t n_bands
)
1976 COVERAGE_INC(dpif_meter_del
);
1978 error
= dpif
->dpif_class
->meter_del(dpif
, meter_id
, stats
, n_bands
);
1980 VLOG_DBG_RL(&dpmsg_rl
, "%s: DPIF meter %"PRIu32
" deleted",
1981 dpif_name(dpif
), meter_id
.uint32
);
1983 VLOG_WARN_RL(&error_rl
,
1984 "%s: failed to delete DPIF meter %"PRIu32
": %s",
1985 dpif_name(dpif
), meter_id
.uint32
, ovs_strerror(error
));
1987 stats
->packet_in_count
= ~0;
1988 stats
->byte_in_count
= ~0;
1996 dpif_bond_add(struct dpif
*dpif
, uint32_t bond_id
, odp_port_t
*member_map
)
1998 return dpif
->dpif_class
->bond_del
1999 ? dpif
->dpif_class
->bond_add(dpif
, bond_id
, member_map
)
2004 dpif_bond_del(struct dpif
*dpif
, uint32_t bond_id
)
2006 return dpif
->dpif_class
->bond_del
2007 ? dpif
->dpif_class
->bond_del(dpif
, bond_id
)
2012 dpif_bond_stats_get(struct dpif
*dpif
, uint32_t bond_id
,
2015 memset(n_bytes
, 0, BOND_BUCKETS
* sizeof *n_bytes
);
2017 return dpif
->dpif_class
->bond_stats_get
2018 ? dpif
->dpif_class
->bond_stats_get(dpif
, bond_id
, n_bytes
)