]> git.proxmox.com Git - mirror_ovs.git/blob - lib/dpif.c
ofp-ed-props: Fix using uninitialized padding for NSH encap actions.
[mirror_ovs.git] / lib / dpif.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "dpif-provider.h"
19
20 #include <ctype.h>
21 #include <errno.h>
22 #include <inttypes.h>
23 #include <stdlib.h>
24 #include <string.h>
25
26 #include "coverage.h"
27 #include "dpctl.h"
28 #include "dp-packet.h"
29 #include "dpif-netdev.h"
30 #include "openvswitch/dynamic-string.h"
31 #include "flow.h"
32 #include "netdev.h"
33 #include "netlink.h"
34 #include "odp-execute.h"
35 #include "odp-util.h"
36 #include "openvswitch/ofp-print.h"
37 #include "openvswitch/ofpbuf.h"
38 #include "packets.h"
39 #include "openvswitch/poll-loop.h"
40 #include "route-table.h"
41 #include "seq.h"
42 #include "openvswitch/shash.h"
43 #include "sset.h"
44 #include "timeval.h"
45 #include "tnl-neigh-cache.h"
46 #include "tnl-ports.h"
47 #include "util.h"
48 #include "uuid.h"
49 #include "valgrind.h"
50 #include "openvswitch/ofp-errors.h"
51 #include "openvswitch/vlog.h"
52 #include "lib/netdev-provider.h"
53
54 VLOG_DEFINE_THIS_MODULE(dpif);
55
56 COVERAGE_DEFINE(dpif_destroy);
57 COVERAGE_DEFINE(dpif_port_add);
58 COVERAGE_DEFINE(dpif_port_del);
59 COVERAGE_DEFINE(dpif_flow_flush);
60 COVERAGE_DEFINE(dpif_flow_get);
61 COVERAGE_DEFINE(dpif_flow_put);
62 COVERAGE_DEFINE(dpif_flow_del);
63 COVERAGE_DEFINE(dpif_execute);
64 COVERAGE_DEFINE(dpif_purge);
65 COVERAGE_DEFINE(dpif_execute_with_help);
66 COVERAGE_DEFINE(dpif_meter_set);
67 COVERAGE_DEFINE(dpif_meter_get);
68 COVERAGE_DEFINE(dpif_meter_del);
69
70 static const struct dpif_class *base_dpif_classes[] = {
71 #if defined(__linux__) || defined(_WIN32)
72 &dpif_netlink_class,
73 #endif
74 &dpif_netdev_class,
75 };
76
77 struct registered_dpif_class {
78 const struct dpif_class *dpif_class;
79 int refcount;
80 };
81 static struct shash dpif_classes = SHASH_INITIALIZER(&dpif_classes);
82 static struct sset dpif_disallowed = SSET_INITIALIZER(&dpif_disallowed);
83
84 /* Protects 'dpif_classes', including the refcount, and 'dpif_disallowed'. */
85 static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
86
87 /* Rate limit for individual messages going to or from the datapath, output at
88 * DBG level. This is very high because, if these are enabled, it is because
89 * we really need to see them. */
90 static struct vlog_rate_limit dpmsg_rl = VLOG_RATE_LIMIT_INIT(600, 600);
91
92 /* Not really much point in logging many dpif errors. */
93 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
94
95 static void log_operation(const struct dpif *, const char *operation,
96 int error);
97 static bool should_log_flow_message(const struct vlog_module *module,
98 int error);
99
100 /* Incremented whenever tnl route, arp, etc changes. */
101 struct seq *tnl_conf_seq;
102
103 static bool
104 dpif_is_tap_port(const char *type)
105 {
106 return !strcmp(type, "tap");
107 }
108
109 static void
110 dp_initialize(void)
111 {
112 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
113
114 if (ovsthread_once_start(&once)) {
115 int i;
116
117 tnl_conf_seq = seq_create();
118 dpctl_unixctl_register();
119 tnl_port_map_init();
120 tnl_neigh_cache_init();
121 route_table_init();
122
123 for (i = 0; i < ARRAY_SIZE(base_dpif_classes); i++) {
124 dp_register_provider(base_dpif_classes[i]);
125 }
126
127 ovsthread_once_done(&once);
128 }
129 }
130
131 static int
132 dp_register_provider__(const struct dpif_class *new_class)
133 {
134 struct registered_dpif_class *registered_class;
135 int error;
136
137 if (sset_contains(&dpif_disallowed, new_class->type)) {
138 VLOG_DBG("attempted to register disallowed provider: %s",
139 new_class->type);
140 return EINVAL;
141 }
142
143 if (shash_find(&dpif_classes, new_class->type)) {
144 VLOG_WARN("attempted to register duplicate datapath provider: %s",
145 new_class->type);
146 return EEXIST;
147 }
148
149 error = new_class->init ? new_class->init() : 0;
150 if (error) {
151 VLOG_WARN("failed to initialize %s datapath class: %s",
152 new_class->type, ovs_strerror(error));
153 return error;
154 }
155
156 registered_class = xmalloc(sizeof *registered_class);
157 registered_class->dpif_class = new_class;
158 registered_class->refcount = 0;
159
160 shash_add(&dpif_classes, new_class->type, registered_class);
161
162 return 0;
163 }
164
165 /* Registers a new datapath provider. After successful registration, new
166 * datapaths of that type can be opened using dpif_open(). */
167 int
168 dp_register_provider(const struct dpif_class *new_class)
169 {
170 int error;
171
172 ovs_mutex_lock(&dpif_mutex);
173 error = dp_register_provider__(new_class);
174 ovs_mutex_unlock(&dpif_mutex);
175
176 return error;
177 }
178
179 /* Unregisters a datapath provider. 'type' must have been previously
180 * registered and not currently be in use by any dpifs. After unregistration
181 * new datapaths of that type cannot be opened using dpif_open(). */
182 static int
183 dp_unregister_provider__(const char *type)
184 {
185 struct shash_node *node;
186 struct registered_dpif_class *registered_class;
187
188 node = shash_find(&dpif_classes, type);
189 if (!node) {
190 return EAFNOSUPPORT;
191 }
192
193 registered_class = node->data;
194 if (registered_class->refcount) {
195 VLOG_WARN("attempted to unregister in use datapath provider: %s", type);
196 return EBUSY;
197 }
198
199 shash_delete(&dpif_classes, node);
200 free(registered_class);
201
202 return 0;
203 }
204
205 /* Unregisters a datapath provider. 'type' must have been previously
206 * registered and not currently be in use by any dpifs. After unregistration
207 * new datapaths of that type cannot be opened using dpif_open(). */
208 int
209 dp_unregister_provider(const char *type)
210 {
211 int error;
212
213 dp_initialize();
214
215 ovs_mutex_lock(&dpif_mutex);
216 error = dp_unregister_provider__(type);
217 ovs_mutex_unlock(&dpif_mutex);
218
219 return error;
220 }
221
222 /* Disallows a provider. Causes future calls of dp_register_provider() with
223 * a dpif_class which implements 'type' to fail. */
224 void
225 dp_disallow_provider(const char *type)
226 {
227 ovs_mutex_lock(&dpif_mutex);
228 sset_add(&dpif_disallowed, type);
229 ovs_mutex_unlock(&dpif_mutex);
230 }
231
232 /* Adds the types of all currently registered datapath providers to 'types'.
233 * The caller must first initialize the sset. */
234 void
235 dp_enumerate_types(struct sset *types)
236 {
237 struct shash_node *node;
238
239 dp_initialize();
240
241 ovs_mutex_lock(&dpif_mutex);
242 SHASH_FOR_EACH(node, &dpif_classes) {
243 const struct registered_dpif_class *registered_class = node->data;
244 sset_add(types, registered_class->dpif_class->type);
245 }
246 ovs_mutex_unlock(&dpif_mutex);
247 }
248
249 static void
250 dp_class_unref(struct registered_dpif_class *rc)
251 {
252 ovs_mutex_lock(&dpif_mutex);
253 ovs_assert(rc->refcount);
254 rc->refcount--;
255 ovs_mutex_unlock(&dpif_mutex);
256 }
257
258 static struct registered_dpif_class *
259 dp_class_lookup(const char *type)
260 {
261 struct registered_dpif_class *rc;
262
263 ovs_mutex_lock(&dpif_mutex);
264 rc = shash_find_data(&dpif_classes, type);
265 if (rc) {
266 rc->refcount++;
267 }
268 ovs_mutex_unlock(&dpif_mutex);
269
270 return rc;
271 }
272
273 /* Clears 'names' and enumerates the names of all known created datapaths with
274 * the given 'type'. The caller must first initialize the sset. Returns 0 if
275 * successful, otherwise a positive errno value.
276 *
277 * Some kinds of datapaths might not be practically enumerable. This is not
278 * considered an error. */
279 int
280 dp_enumerate_names(const char *type, struct sset *names)
281 {
282 struct registered_dpif_class *registered_class;
283 const struct dpif_class *dpif_class;
284 int error;
285
286 dp_initialize();
287 sset_clear(names);
288
289 registered_class = dp_class_lookup(type);
290 if (!registered_class) {
291 VLOG_WARN("could not enumerate unknown type: %s", type);
292 return EAFNOSUPPORT;
293 }
294
295 dpif_class = registered_class->dpif_class;
296 error = (dpif_class->enumerate
297 ? dpif_class->enumerate(names, dpif_class)
298 : 0);
299 if (error) {
300 VLOG_WARN("failed to enumerate %s datapaths: %s", dpif_class->type,
301 ovs_strerror(error));
302 }
303 dp_class_unref(registered_class);
304
305 return error;
306 }
307
308 /* Parses 'datapath_name_', which is of the form [type@]name into its
309 * component pieces. 'name' and 'type' must be freed by the caller.
310 *
311 * The returned 'type' is normalized, as if by dpif_normalize_type(). */
312 void
313 dp_parse_name(const char *datapath_name_, char **name, char **type)
314 {
315 char *datapath_name = xstrdup(datapath_name_);
316 char *separator;
317
318 separator = strchr(datapath_name, '@');
319 if (separator) {
320 *separator = '\0';
321 *type = datapath_name;
322 *name = xstrdup(dpif_normalize_type(separator + 1));
323 } else {
324 *name = datapath_name;
325 *type = xstrdup(dpif_normalize_type(NULL));
326 }
327 }
328
329 static int
330 do_open(const char *name, const char *type, bool create, struct dpif **dpifp)
331 {
332 struct dpif *dpif = NULL;
333 int error;
334 struct registered_dpif_class *registered_class;
335
336 dp_initialize();
337
338 type = dpif_normalize_type(type);
339 registered_class = dp_class_lookup(type);
340 if (!registered_class) {
341 VLOG_WARN("could not create datapath %s of unknown type %s", name,
342 type);
343 error = EAFNOSUPPORT;
344 goto exit;
345 }
346
347 error = registered_class->dpif_class->open(registered_class->dpif_class,
348 name, create, &dpif);
349 if (!error) {
350 const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif));
351 struct dpif_port_dump port_dump;
352 struct dpif_port dpif_port;
353
354 ovs_assert(dpif->dpif_class == registered_class->dpif_class);
355
356 DPIF_PORT_FOR_EACH(&dpif_port, &port_dump, dpif) {
357 struct netdev *netdev;
358 int err;
359
360 if (dpif_is_tap_port(dpif_port.type)) {
361 continue;
362 }
363
364 err = netdev_open(dpif_port.name, dpif_port.type, &netdev);
365
366 if (!err) {
367 netdev_ports_insert(netdev, dpif_type_str, &dpif_port);
368 netdev_close(netdev);
369 } else {
370 VLOG_WARN("could not open netdev %s type %s: %s",
371 dpif_port.name, dpif_port.type, ovs_strerror(err));
372 }
373 }
374 } else {
375 dp_class_unref(registered_class);
376 }
377
378 exit:
379 *dpifp = error ? NULL : dpif;
380 return error;
381 }
382
383 /* Tries to open an existing datapath named 'name' and type 'type'. Will fail
384 * if no datapath with 'name' and 'type' exists. 'type' may be either NULL or
385 * the empty string to specify the default system type. Returns 0 if
386 * successful, otherwise a positive errno value. On success stores a pointer
387 * to the datapath in '*dpifp', otherwise a null pointer. */
388 int
389 dpif_open(const char *name, const char *type, struct dpif **dpifp)
390 {
391 return do_open(name, type, false, dpifp);
392 }
393
394 /* Tries to create and open a new datapath with the given 'name' and 'type'.
395 * 'type' may be either NULL or the empty string to specify the default system
396 * type. Will fail if a datapath with 'name' and 'type' already exists.
397 * Returns 0 if successful, otherwise a positive errno value. On success
398 * stores a pointer to the datapath in '*dpifp', otherwise a null pointer. */
399 int
400 dpif_create(const char *name, const char *type, struct dpif **dpifp)
401 {
402 return do_open(name, type, true, dpifp);
403 }
404
405 /* Tries to open a datapath with the given 'name' and 'type', creating it if it
406 * does not exist. 'type' may be either NULL or the empty string to specify
407 * the default system type. Returns 0 if successful, otherwise a positive
408 * errno value. On success stores a pointer to the datapath in '*dpifp',
409 * otherwise a null pointer. */
410 int
411 dpif_create_and_open(const char *name, const char *type, struct dpif **dpifp)
412 {
413 int error;
414
415 error = dpif_create(name, type, dpifp);
416 if (error == EEXIST || error == EBUSY) {
417 error = dpif_open(name, type, dpifp);
418 if (error) {
419 VLOG_WARN("datapath %s already exists but cannot be opened: %s",
420 name, ovs_strerror(error));
421 }
422 } else if (error) {
423 VLOG_WARN("failed to create datapath %s: %s",
424 name, ovs_strerror(error));
425 }
426 return error;
427 }
428
429 static void
430 dpif_remove_netdev_ports(struct dpif *dpif) {
431 const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif));
432 struct dpif_port_dump port_dump;
433 struct dpif_port dpif_port;
434
435 DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, dpif) {
436 if (!dpif_is_tap_port(dpif_port.type)) {
437 netdev_ports_remove(dpif_port.port_no, dpif_type_str);
438 }
439 }
440 }
441
442 /* Closes and frees the connection to 'dpif'. Does not destroy the datapath
443 * itself; call dpif_delete() first, instead, if that is desirable. */
444 void
445 dpif_close(struct dpif *dpif)
446 {
447 if (dpif) {
448 struct registered_dpif_class *rc;
449
450 rc = shash_find_data(&dpif_classes, dpif->dpif_class->type);
451
452 if (rc->refcount == 1) {
453 dpif_remove_netdev_ports(dpif);
454 }
455 dpif_uninit(dpif, true);
456 dp_class_unref(rc);
457 }
458 }
459
460 /* Performs periodic work needed by 'dpif'. */
461 bool
462 dpif_run(struct dpif *dpif)
463 {
464 if (dpif->dpif_class->run) {
465 return dpif->dpif_class->run(dpif);
466 }
467 return false;
468 }
469
470 /* Arranges for poll_block() to wake up when dp_run() needs to be called for
471 * 'dpif'. */
472 void
473 dpif_wait(struct dpif *dpif)
474 {
475 if (dpif->dpif_class->wait) {
476 dpif->dpif_class->wait(dpif);
477 }
478 }
479
480 /* Returns the name of datapath 'dpif' prefixed with the type
481 * (for use in log messages). */
482 const char *
483 dpif_name(const struct dpif *dpif)
484 {
485 return dpif->full_name;
486 }
487
488 /* Returns the name of datapath 'dpif' without the type
489 * (for use in device names). */
490 const char *
491 dpif_base_name(const struct dpif *dpif)
492 {
493 return dpif->base_name;
494 }
495
496 /* Returns the type of datapath 'dpif'. */
497 const char *
498 dpif_type(const struct dpif *dpif)
499 {
500 return dpif->dpif_class->type;
501 }
502
503 /* Checks if datapath 'dpif' requires cleanup. */
504 bool
505 dpif_cleanup_required(const struct dpif *dpif)
506 {
507 return dpif->dpif_class->cleanup_required;
508 }
509
510 /* Returns the fully spelled out name for the given datapath 'type'.
511 *
512 * Normalized type string can be compared with strcmp(). Unnormalized type
513 * string might be the same even if they have different spellings. */
514 const char *
515 dpif_normalize_type(const char *type)
516 {
517 return type && type[0] ? type : "system";
518 }
519
520 /* Destroys the datapath that 'dpif' is connected to, first removing all of its
521 * ports. After calling this function, it does not make sense to pass 'dpif'
522 * to any functions other than dpif_name() or dpif_close(). */
523 int
524 dpif_delete(struct dpif *dpif)
525 {
526 int error;
527
528 COVERAGE_INC(dpif_destroy);
529
530 error = dpif->dpif_class->destroy(dpif);
531 log_operation(dpif, "delete", error);
532 return error;
533 }
534
535 /* Retrieves statistics for 'dpif' into 'stats'. Returns 0 if successful,
536 * otherwise a positive errno value. */
537 int
538 dpif_get_dp_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
539 {
540 int error = dpif->dpif_class->get_stats(dpif, stats);
541 if (error) {
542 memset(stats, 0, sizeof *stats);
543 }
544 log_operation(dpif, "get_stats", error);
545 return error;
546 }
547
548 int
549 dpif_set_features(struct dpif *dpif, uint32_t new_features)
550 {
551 int error = dpif->dpif_class->set_features(dpif, new_features);
552
553 log_operation(dpif, "set_features", error);
554 return error;
555 }
556
557 const char *
558 dpif_port_open_type(const char *datapath_type, const char *port_type)
559 {
560 struct registered_dpif_class *rc;
561
562 datapath_type = dpif_normalize_type(datapath_type);
563
564 ovs_mutex_lock(&dpif_mutex);
565 rc = shash_find_data(&dpif_classes, datapath_type);
566 if (rc && rc->dpif_class->port_open_type) {
567 port_type = rc->dpif_class->port_open_type(rc->dpif_class, port_type);
568 }
569 ovs_mutex_unlock(&dpif_mutex);
570
571 return port_type;
572 }
573
574 /* Attempts to add 'netdev' as a port on 'dpif'. If 'port_nop' is
575 * non-null and its value is not ODPP_NONE, then attempts to use the
576 * value as the port number.
577 *
578 * If successful, returns 0 and sets '*port_nop' to the new port's port
579 * number (if 'port_nop' is non-null). On failure, returns a positive
580 * errno value and sets '*port_nop' to ODPP_NONE (if 'port_nop' is
581 * non-null). */
582 int
583 dpif_port_add(struct dpif *dpif, struct netdev *netdev, odp_port_t *port_nop)
584 {
585 const char *netdev_name = netdev_get_name(netdev);
586 odp_port_t port_no = ODPP_NONE;
587 int error;
588
589 COVERAGE_INC(dpif_port_add);
590
591 if (port_nop) {
592 port_no = *port_nop;
593 }
594
595 error = dpif->dpif_class->port_add(dpif, netdev, &port_no);
596 if (!error) {
597 VLOG_DBG_RL(&dpmsg_rl, "%s: added %s as port %"PRIu32,
598 dpif_name(dpif), netdev_name, port_no);
599
600 if (!dpif_is_tap_port(netdev_get_type(netdev))) {
601
602 const char *dpif_type_str = dpif_normalize_type(dpif_type(dpif));
603 struct dpif_port dpif_port;
604
605 dpif_port.type = CONST_CAST(char *, netdev_get_type(netdev));
606 dpif_port.name = CONST_CAST(char *, netdev_name);
607 dpif_port.port_no = port_no;
608 netdev_ports_insert(netdev, dpif_type_str, &dpif_port);
609 }
610 } else {
611 VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
612 dpif_name(dpif), netdev_name, ovs_strerror(error));
613 port_no = ODPP_NONE;
614 }
615 if (port_nop) {
616 *port_nop = port_no;
617 }
618 return error;
619 }
620
621 /* Attempts to remove 'dpif''s port number 'port_no'. Returns 0 if successful,
622 * otherwise a positive errno value. */
623 int
624 dpif_port_del(struct dpif *dpif, odp_port_t port_no, bool local_delete)
625 {
626 int error = 0;
627
628 COVERAGE_INC(dpif_port_del);
629
630 if (!local_delete) {
631 error = dpif->dpif_class->port_del(dpif, port_no);
632 if (!error) {
633 VLOG_DBG_RL(&dpmsg_rl, "%s: port_del(%"PRIu32")",
634 dpif_name(dpif), port_no);
635 } else {
636 log_operation(dpif, "port_del", error);
637 }
638 }
639
640 netdev_ports_remove(port_no, dpif_normalize_type(dpif_type(dpif)));
641 return error;
642 }
643
644 /* Makes a deep copy of 'src' into 'dst'. */
645 void
646 dpif_port_clone(struct dpif_port *dst, const struct dpif_port *src)
647 {
648 dst->name = xstrdup(src->name);
649 dst->type = xstrdup(src->type);
650 dst->port_no = src->port_no;
651 }
652
653 /* Frees memory allocated to members of 'dpif_port'.
654 *
655 * Do not call this function on a dpif_port obtained from
656 * dpif_port_dump_next(): that function retains ownership of the data in the
657 * dpif_port. */
658 void
659 dpif_port_destroy(struct dpif_port *dpif_port)
660 {
661 free(dpif_port->name);
662 free(dpif_port->type);
663 }
664
665 /* Checks if port named 'devname' exists in 'dpif'. If so, returns
666 * true; otherwise, returns false. */
667 bool
668 dpif_port_exists(const struct dpif *dpif, const char *devname)
669 {
670 int error = dpif->dpif_class->port_query_by_name(dpif, devname, NULL);
671 if (error != 0 && error != ENODEV) {
672 VLOG_WARN_RL(&error_rl, "%s: failed to query port %s: %s",
673 dpif_name(dpif), devname, ovs_strerror(error));
674 }
675
676 return !error;
677 }
678
679 /* Refreshes configuration of 'dpif's port. */
680 int
681 dpif_port_set_config(struct dpif *dpif, odp_port_t port_no,
682 const struct smap *cfg)
683 {
684 int error = 0;
685
686 if (dpif->dpif_class->port_set_config) {
687 error = dpif->dpif_class->port_set_config(dpif, port_no, cfg);
688 if (error) {
689 log_operation(dpif, "port_set_config", error);
690 }
691 }
692
693 return error;
694 }
695
696 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and
697 * initializes '*port' appropriately; on failure, returns a positive errno
698 * value.
699 *
700 * Retuns ENODEV if the port doesn't exist.
701 *
702 * The caller owns the data in 'port' and must free it with
703 * dpif_port_destroy() when it is no longer needed. */
704 int
705 dpif_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
706 struct dpif_port *port)
707 {
708 int error = dpif->dpif_class->port_query_by_number(dpif, port_no, port);
709 if (!error) {
710 VLOG_DBG_RL(&dpmsg_rl, "%s: port %"PRIu32" is device %s",
711 dpif_name(dpif), port_no, port->name);
712 } else {
713 memset(port, 0, sizeof *port);
714 VLOG_WARN_RL(&error_rl, "%s: failed to query port %"PRIu32": %s",
715 dpif_name(dpif), port_no, ovs_strerror(error));
716 }
717 return error;
718 }
719
720 /* Looks up port named 'devname' in 'dpif'. On success, returns 0 and
721 * initializes '*port' appropriately; on failure, returns a positive errno
722 * value.
723 *
724 * Retuns ENODEV if the port doesn't exist.
725 *
726 * The caller owns the data in 'port' and must free it with
727 * dpif_port_destroy() when it is no longer needed. */
728 int
729 dpif_port_query_by_name(const struct dpif *dpif, const char *devname,
730 struct dpif_port *port)
731 {
732 int error = dpif->dpif_class->port_query_by_name(dpif, devname, port);
733 if (!error) {
734 VLOG_DBG_RL(&dpmsg_rl, "%s: device %s is on port %"PRIu32,
735 dpif_name(dpif), devname, port->port_no);
736 } else {
737 memset(port, 0, sizeof *port);
738
739 /* For ENODEV we use DBG level because the caller is probably
740 * interested in whether 'dpif' actually has a port 'devname', so that
741 * it's not an issue worth logging if it doesn't. Other errors are
742 * uncommon and more likely to indicate a real problem. */
743 VLOG_RL(&error_rl, error == ENODEV ? VLL_DBG : VLL_WARN,
744 "%s: failed to query port %s: %s",
745 dpif_name(dpif), devname, ovs_strerror(error));
746 }
747 return error;
748 }
749
750 /* Returns the Netlink PID value to supply in OVS_ACTION_ATTR_USERSPACE
751 * actions as the OVS_USERSPACE_ATTR_PID attribute's value, for use in
752 * flows whose packets arrived on port 'port_no'.
753 *
754 * A 'port_no' of ODPP_NONE is a special case: it returns a reserved PID, not
755 * allocated to any port, that the client may use for special purposes.
756 *
757 * The return value is only meaningful when DPIF_UC_ACTION has been enabled in
758 * the 'dpif''s listen mask. It is allowed to change when DPIF_UC_ACTION is
759 * disabled and then re-enabled, so a client that does that must be prepared to
760 * update all of the flows that it installed that contain
761 * OVS_ACTION_ATTR_USERSPACE actions. */
762 uint32_t
763 dpif_port_get_pid(const struct dpif *dpif, odp_port_t port_no)
764 {
765 return (dpif->dpif_class->port_get_pid
766 ? (dpif->dpif_class->port_get_pid)(dpif, port_no)
767 : 0);
768 }
769
770 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and copies
771 * the port's name into the 'name_size' bytes in 'name', ensuring that the
772 * result is null-terminated. On failure, returns a positive errno value and
773 * makes 'name' the empty string. */
774 int
775 dpif_port_get_name(struct dpif *dpif, odp_port_t port_no,
776 char *name, size_t name_size)
777 {
778 struct dpif_port port;
779 int error;
780
781 ovs_assert(name_size > 0);
782
783 error = dpif_port_query_by_number(dpif, port_no, &port);
784 if (!error) {
785 ovs_strlcpy(name, port.name, name_size);
786 dpif_port_destroy(&port);
787 } else {
788 *name = '\0';
789 }
790 return error;
791 }
792
793 /* Initializes 'dump' to begin dumping the ports in a dpif.
794 *
795 * This function provides no status indication. An error status for the entire
796 * dump operation is provided when it is completed by calling
797 * dpif_port_dump_done().
798 */
799 void
800 dpif_port_dump_start(struct dpif_port_dump *dump, const struct dpif *dpif)
801 {
802 dump->dpif = dpif;
803 dump->error = dpif->dpif_class->port_dump_start(dpif, &dump->state);
804 log_operation(dpif, "port_dump_start", dump->error);
805 }
806
807 /* Attempts to retrieve another port from 'dump', which must have been
808 * initialized with dpif_port_dump_start(). On success, stores a new dpif_port
809 * into 'port' and returns true. On failure, returns false.
810 *
811 * Failure might indicate an actual error or merely that the last port has been
812 * dumped. An error status for the entire dump operation is provided when it
813 * is completed by calling dpif_port_dump_done().
814 *
815 * The dpif owns the data stored in 'port'. It will remain valid until at
816 * least the next time 'dump' is passed to dpif_port_dump_next() or
817 * dpif_port_dump_done(). */
818 bool
819 dpif_port_dump_next(struct dpif_port_dump *dump, struct dpif_port *port)
820 {
821 const struct dpif *dpif = dump->dpif;
822
823 if (dump->error) {
824 return false;
825 }
826
827 dump->error = dpif->dpif_class->port_dump_next(dpif, dump->state, port);
828 if (dump->error == EOF) {
829 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all ports", dpif_name(dpif));
830 } else {
831 log_operation(dpif, "port_dump_next", dump->error);
832 }
833
834 if (dump->error) {
835 dpif->dpif_class->port_dump_done(dpif, dump->state);
836 return false;
837 }
838 return true;
839 }
840
841 /* Completes port table dump operation 'dump', which must have been initialized
842 * with dpif_port_dump_start(). Returns 0 if the dump operation was
843 * error-free, otherwise a positive errno value describing the problem. */
844 int
845 dpif_port_dump_done(struct dpif_port_dump *dump)
846 {
847 const struct dpif *dpif = dump->dpif;
848 if (!dump->error) {
849 dump->error = dpif->dpif_class->port_dump_done(dpif, dump->state);
850 log_operation(dpif, "port_dump_done", dump->error);
851 }
852 return dump->error == EOF ? 0 : dump->error;
853 }
854
855 /* Polls for changes in the set of ports in 'dpif'. If the set of ports in
856 * 'dpif' has changed, this function does one of the following:
857 *
858 * - Stores the name of the device that was added to or deleted from 'dpif' in
859 * '*devnamep' and returns 0. The caller is responsible for freeing
860 * '*devnamep' (with free()) when it no longer needs it.
861 *
862 * - Returns ENOBUFS and sets '*devnamep' to NULL.
863 *
864 * This function may also return 'false positives', where it returns 0 and
865 * '*devnamep' names a device that was not actually added or deleted or it
866 * returns ENOBUFS without any change.
867 *
868 * Returns EAGAIN if the set of ports in 'dpif' has not changed. May also
869 * return other positive errno values to indicate that something has gone
870 * wrong. */
871 int
872 dpif_port_poll(const struct dpif *dpif, char **devnamep)
873 {
874 int error = dpif->dpif_class->port_poll(dpif, devnamep);
875 if (error) {
876 *devnamep = NULL;
877 }
878 return error;
879 }
880
881 /* Arranges for the poll loop to wake up when port_poll(dpif) will return a
882 * value other than EAGAIN. */
883 void
884 dpif_port_poll_wait(const struct dpif *dpif)
885 {
886 dpif->dpif_class->port_poll_wait(dpif);
887 }
888
889 /* Extracts the flow stats for a packet. The 'flow' and 'packet'
890 * arguments must have been initialized through a call to flow_extract().
891 * 'used' is stored into stats->used. */
892 void
893 dpif_flow_stats_extract(const struct flow *flow, const struct dp_packet *packet,
894 long long int used, struct dpif_flow_stats *stats)
895 {
896 stats->tcp_flags = ntohs(flow->tcp_flags);
897 stats->n_bytes = dp_packet_size(packet);
898 stats->n_packets = 1;
899 stats->used = used;
900 }
901
902 /* Appends a human-readable representation of 'stats' to 's'. */
903 void
904 dpif_flow_stats_format(const struct dpif_flow_stats *stats, struct ds *s)
905 {
906 ds_put_format(s, "packets:%"PRIu64", bytes:%"PRIu64", used:",
907 stats->n_packets, stats->n_bytes);
908 if (stats->used) {
909 ds_put_format(s, "%.3fs", (time_msec() - stats->used) / 1000.0);
910 } else {
911 ds_put_format(s, "never");
912 }
913 if (stats->tcp_flags) {
914 ds_put_cstr(s, ", flags:");
915 packet_format_tcp_flags(s, stats->tcp_flags);
916 }
917 }
918
919 /* Deletes all flows from 'dpif'. Returns 0 if successful, otherwise a
920 * positive errno value. */
921 int
922 dpif_flow_flush(struct dpif *dpif)
923 {
924 int error;
925
926 COVERAGE_INC(dpif_flow_flush);
927
928 error = dpif->dpif_class->flow_flush(dpif);
929 log_operation(dpif, "flow_flush", error);
930 return error;
931 }
932
933 /* Attempts to install 'key' into the datapath, fetches it, then deletes it.
934 * Returns true if the datapath supported installing 'flow', false otherwise.
935 */
936 bool
937 dpif_probe_feature(struct dpif *dpif, const char *name,
938 const struct ofpbuf *key, const struct ofpbuf *actions,
939 const ovs_u128 *ufid)
940 {
941 struct dpif_flow flow;
942 struct ofpbuf reply;
943 uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
944 bool enable_feature = false;
945 int error;
946 const struct nlattr *nl_actions = actions ? actions->data : NULL;
947 const size_t nl_actions_size = actions ? actions->size : 0;
948
949 /* Use DPIF_FP_MODIFY to cover the case where ovs-vswitchd is killed (and
950 * restarted) at just the right time such that feature probes from the
951 * previous run are still present in the datapath. */
952 error = dpif_flow_put(dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY | DPIF_FP_PROBE,
953 key->data, key->size, NULL, 0,
954 nl_actions, nl_actions_size,
955 ufid, NON_PMD_CORE_ID, NULL);
956 if (error) {
957 if (error != EINVAL && error != EOVERFLOW) {
958 VLOG_WARN("%s: %s flow probe failed (%s)",
959 dpif_name(dpif), name, ovs_strerror(error));
960 }
961 return false;
962 }
963
964 ofpbuf_use_stack(&reply, &stub, sizeof stub);
965 error = dpif_flow_get(dpif, key->data, key->size, ufid,
966 NON_PMD_CORE_ID, &reply, &flow);
967 if (!error
968 && (!ufid || (flow.ufid_present
969 && ovs_u128_equals(*ufid, flow.ufid)))) {
970 enable_feature = true;
971 }
972
973 error = dpif_flow_del(dpif, key->data, key->size, ufid,
974 NON_PMD_CORE_ID, NULL);
975 if (error) {
976 VLOG_WARN("%s: failed to delete %s feature probe flow",
977 dpif_name(dpif), name);
978 }
979
980 return enable_feature;
981 }
982
983 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_GET. */
984 int
985 dpif_flow_get(struct dpif *dpif,
986 const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
987 const unsigned pmd_id, struct ofpbuf *buf, struct dpif_flow *flow)
988 {
989 struct dpif_op *opp;
990 struct dpif_op op;
991
992 op.type = DPIF_OP_FLOW_GET;
993 op.flow_get.key = key;
994 op.flow_get.key_len = key_len;
995 op.flow_get.ufid = ufid;
996 op.flow_get.pmd_id = pmd_id;
997 op.flow_get.buffer = buf;
998
999 memset(flow, 0, sizeof *flow);
1000 op.flow_get.flow = flow;
1001 op.flow_get.flow->key = key;
1002 op.flow_get.flow->key_len = key_len;
1003
1004 opp = &op;
1005 dpif_operate(dpif, &opp, 1, DPIF_OFFLOAD_AUTO);
1006
1007 return op.error;
1008 }
1009
1010 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_PUT. */
1011 int
1012 dpif_flow_put(struct dpif *dpif, enum dpif_flow_put_flags flags,
1013 const struct nlattr *key, size_t key_len,
1014 const struct nlattr *mask, size_t mask_len,
1015 const struct nlattr *actions, size_t actions_len,
1016 const ovs_u128 *ufid, const unsigned pmd_id,
1017 struct dpif_flow_stats *stats)
1018 {
1019 struct dpif_op *opp;
1020 struct dpif_op op;
1021
1022 op.type = DPIF_OP_FLOW_PUT;
1023 op.flow_put.flags = flags;
1024 op.flow_put.key = key;
1025 op.flow_put.key_len = key_len;
1026 op.flow_put.mask = mask;
1027 op.flow_put.mask_len = mask_len;
1028 op.flow_put.actions = actions;
1029 op.flow_put.actions_len = actions_len;
1030 op.flow_put.ufid = ufid;
1031 op.flow_put.pmd_id = pmd_id;
1032 op.flow_put.stats = stats;
1033
1034 opp = &op;
1035 dpif_operate(dpif, &opp, 1, DPIF_OFFLOAD_AUTO);
1036
1037 return op.error;
1038 }
1039
1040 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_DEL. */
1041 int
1042 dpif_flow_del(struct dpif *dpif,
1043 const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
1044 const unsigned pmd_id, struct dpif_flow_stats *stats)
1045 {
1046 struct dpif_op *opp;
1047 struct dpif_op op;
1048
1049 op.type = DPIF_OP_FLOW_DEL;
1050 op.flow_del.key = key;
1051 op.flow_del.key_len = key_len;
1052 op.flow_del.ufid = ufid;
1053 op.flow_del.pmd_id = pmd_id;
1054 op.flow_del.stats = stats;
1055 op.flow_del.terse = false;
1056
1057 opp = &op;
1058 dpif_operate(dpif, &opp, 1, DPIF_OFFLOAD_AUTO);
1059
1060 return op.error;
1061 }
1062
1063 /* Creates and returns a new 'struct dpif_flow_dump' for iterating through the
1064 * flows in 'dpif'. If 'terse' is true, then only UFID and statistics will
1065 * be returned in the dump. Otherwise, all fields will be returned.
1066 *
1067 * This function always successfully returns a dpif_flow_dump. Error
1068 * reporting is deferred to dpif_flow_dump_destroy(). */
1069 struct dpif_flow_dump *
1070 dpif_flow_dump_create(const struct dpif *dpif, bool terse,
1071 struct dpif_flow_dump_types *types)
1072 {
1073 return dpif->dpif_class->flow_dump_create(dpif, terse, types);
1074 }
1075
1076 /* Destroys 'dump', which must have been created with dpif_flow_dump_create().
1077 * All dpif_flow_dump_thread structures previously created for 'dump' must
1078 * previously have been destroyed.
1079 *
1080 * Returns 0 if the dump operation was error-free, otherwise a positive errno
1081 * value describing the problem. */
1082 int
1083 dpif_flow_dump_destroy(struct dpif_flow_dump *dump)
1084 {
1085 const struct dpif *dpif = dump->dpif;
1086 int error = dpif->dpif_class->flow_dump_destroy(dump);
1087 log_operation(dpif, "flow_dump_destroy", error);
1088 return error == EOF ? 0 : error;
1089 }
1090
1091 /* Returns new thread-local state for use with dpif_flow_dump_next(). */
1092 struct dpif_flow_dump_thread *
1093 dpif_flow_dump_thread_create(struct dpif_flow_dump *dump)
1094 {
1095 return dump->dpif->dpif_class->flow_dump_thread_create(dump);
1096 }
1097
1098 /* Releases 'thread'. */
1099 void
1100 dpif_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread)
1101 {
1102 thread->dpif->dpif_class->flow_dump_thread_destroy(thread);
1103 }
1104
1105 /* Attempts to retrieve up to 'max_flows' more flows from 'thread'. Returns 0
1106 * if and only if no flows remained to be retrieved, otherwise a positive
1107 * number reflecting the number of elements in 'flows[]' that were updated.
1108 * The number of flows returned might be less than 'max_flows' because
1109 * fewer than 'max_flows' remained, because this particular datapath does not
1110 * benefit from batching, or because an error occurred partway through
1111 * retrieval. Thus, the caller should continue calling until a 0 return value,
1112 * even if intermediate return values are less than 'max_flows'.
1113 *
1114 * No error status is immediately provided. An error status for the entire
1115 * dump operation is provided when it is completed by calling
1116 * dpif_flow_dump_destroy().
1117 *
1118 * All of the data stored into 'flows' is owned by the datapath, not by the
1119 * caller, and the caller must not modify or free it. The datapath guarantees
1120 * that it remains accessible and unchanged until the first of:
1121 * - The next call to dpif_flow_dump_next() for 'thread', or
1122 * - The next rcu quiescent period. */
1123 int
1124 dpif_flow_dump_next(struct dpif_flow_dump_thread *thread,
1125 struct dpif_flow *flows, int max_flows)
1126 {
1127 struct dpif *dpif = thread->dpif;
1128 int n;
1129
1130 ovs_assert(max_flows > 0);
1131 n = dpif->dpif_class->flow_dump_next(thread, flows, max_flows);
1132 if (n > 0) {
1133 struct dpif_flow *f;
1134
1135 for (f = flows; f < &flows[n]
1136 && should_log_flow_message(&this_module, 0); f++) {
1137 log_flow_message(dpif, 0, &this_module, "flow_dump",
1138 f->key, f->key_len, f->mask, f->mask_len,
1139 &f->ufid, &f->stats, f->actions, f->actions_len);
1140 }
1141 } else {
1142 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all flows", dpif_name(dpif));
1143 }
1144 return n;
1145 }
1146
1147 struct dpif_execute_helper_aux {
1148 struct dpif *dpif;
1149 const struct flow *flow;
1150 int error;
1151 const struct nlattr *meter_action; /* Non-NULL, if have a meter action. */
1152 };
1153
1154 /* This is called for actions that need the context of the datapath to be
1155 * meaningful. */
1156 static void
1157 dpif_execute_helper_cb(void *aux_, struct dp_packet_batch *packets_,
1158 const struct nlattr *action, bool should_steal)
1159 {
1160 struct dpif_execute_helper_aux *aux = aux_;
1161 int type = nl_attr_type(action);
1162 struct dp_packet *packet = packets_->packets[0];
1163
1164 ovs_assert(dp_packet_batch_size(packets_) == 1);
1165
1166 switch ((enum ovs_action_attr)type) {
1167 case OVS_ACTION_ATTR_METER:
1168 /* Maintain a pointer to the first meter action seen. */
1169 if (!aux->meter_action) {
1170 aux->meter_action = action;
1171 }
1172 break;
1173
1174 case OVS_ACTION_ATTR_CT:
1175 case OVS_ACTION_ATTR_OUTPUT:
1176 case OVS_ACTION_ATTR_LB_OUTPUT:
1177 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1178 case OVS_ACTION_ATTR_TUNNEL_POP:
1179 case OVS_ACTION_ATTR_USERSPACE:
1180 case OVS_ACTION_ATTR_RECIRC: {
1181 struct dpif_execute execute;
1182 struct ofpbuf execute_actions;
1183 uint64_t stub[256 / 8];
1184 struct pkt_metadata *md = &packet->md;
1185
1186 if (flow_tnl_dst_is_set(&md->tunnel) || aux->meter_action) {
1187 ofpbuf_use_stub(&execute_actions, stub, sizeof stub);
1188
1189 if (aux->meter_action) {
1190 const struct nlattr *a = aux->meter_action;
1191
1192 /* XXX: This code collects meter actions since the last action
1193 * execution via the datapath to be executed right before the
1194 * current action that needs to be executed by the datapath.
1195 * This is only an approximation, but better than nothing.
1196 * Fundamentally, we should have a mechanism by which the
1197 * datapath could return the result of the meter action so that
1198 * we could execute them at the right order. */
1199 do {
1200 ofpbuf_put(&execute_actions, a, NLA_ALIGN(a->nla_len));
1201 /* Find next meter action before 'action', if any. */
1202 do {
1203 a = nl_attr_next(a);
1204 } while (a != action &&
1205 nl_attr_type(a) != OVS_ACTION_ATTR_METER);
1206 } while (a != action);
1207 }
1208
1209 /* The Linux kernel datapath throws away the tunnel information
1210 * that we supply as metadata. We have to use a "set" action to
1211 * supply it. */
1212 if (md->tunnel.ip_dst) {
1213 odp_put_tunnel_action(&md->tunnel, &execute_actions, NULL);
1214 }
1215 ofpbuf_put(&execute_actions, action, NLA_ALIGN(action->nla_len));
1216
1217 execute.actions = execute_actions.data;
1218 execute.actions_len = execute_actions.size;
1219 } else {
1220 execute.actions = action;
1221 execute.actions_len = NLA_ALIGN(action->nla_len);
1222 }
1223
1224 struct dp_packet *clone = NULL;
1225 uint32_t cutlen = dp_packet_get_cutlen(packet);
1226 if (cutlen && (type == OVS_ACTION_ATTR_OUTPUT
1227 || type == OVS_ACTION_ATTR_LB_OUTPUT
1228 || type == OVS_ACTION_ATTR_TUNNEL_PUSH
1229 || type == OVS_ACTION_ATTR_TUNNEL_POP
1230 || type == OVS_ACTION_ATTR_USERSPACE)) {
1231 dp_packet_reset_cutlen(packet);
1232 if (!should_steal) {
1233 packet = clone = dp_packet_clone(packet);
1234 }
1235 dp_packet_set_size(packet, dp_packet_size(packet) - cutlen);
1236 }
1237
1238 execute.packet = packet;
1239 execute.flow = aux->flow;
1240 execute.needs_help = false;
1241 execute.probe = false;
1242 execute.mtu = 0;
1243 aux->error = dpif_execute(aux->dpif, &execute);
1244 log_execute_message(aux->dpif, &this_module, &execute,
1245 true, aux->error);
1246
1247 dp_packet_delete(clone);
1248
1249 if (flow_tnl_dst_is_set(&md->tunnel) || aux->meter_action) {
1250 ofpbuf_uninit(&execute_actions);
1251
1252 /* Do not re-use the same meters for later output actions. */
1253 aux->meter_action = NULL;
1254 }
1255 break;
1256 }
1257
1258 case OVS_ACTION_ATTR_HASH:
1259 case OVS_ACTION_ATTR_PUSH_VLAN:
1260 case OVS_ACTION_ATTR_POP_VLAN:
1261 case OVS_ACTION_ATTR_PUSH_MPLS:
1262 case OVS_ACTION_ATTR_POP_MPLS:
1263 case OVS_ACTION_ATTR_SET:
1264 case OVS_ACTION_ATTR_SET_MASKED:
1265 case OVS_ACTION_ATTR_SAMPLE:
1266 case OVS_ACTION_ATTR_TRUNC:
1267 case OVS_ACTION_ATTR_PUSH_ETH:
1268 case OVS_ACTION_ATTR_POP_ETH:
1269 case OVS_ACTION_ATTR_CLONE:
1270 case OVS_ACTION_ATTR_PUSH_NSH:
1271 case OVS_ACTION_ATTR_POP_NSH:
1272 case OVS_ACTION_ATTR_CT_CLEAR:
1273 case OVS_ACTION_ATTR_UNSPEC:
1274 case OVS_ACTION_ATTR_CHECK_PKT_LEN:
1275 case OVS_ACTION_ATTR_DROP:
1276 case __OVS_ACTION_ATTR_MAX:
1277 OVS_NOT_REACHED();
1278 }
1279 dp_packet_delete_batch(packets_, should_steal);
1280 }
1281
1282 /* Executes 'execute' by performing most of the actions in userspace and
1283 * passing the fully constructed packets to 'dpif' for output and userspace
1284 * actions.
1285 *
1286 * This helps with actions that a given 'dpif' doesn't implement directly. */
1287 static int
1288 dpif_execute_with_help(struct dpif *dpif, struct dpif_execute *execute)
1289 {
1290 struct dpif_execute_helper_aux aux = {dpif, execute->flow, 0, NULL};
1291 struct dp_packet_batch pb;
1292
1293 COVERAGE_INC(dpif_execute_with_help);
1294
1295 dp_packet_batch_init_packet(&pb, execute->packet);
1296 odp_execute_actions(&aux, &pb, false, execute->actions,
1297 execute->actions_len, dpif_execute_helper_cb);
1298 return aux.error;
1299 }
1300
1301 /* Returns true if the datapath needs help executing 'execute'. */
1302 static bool
1303 dpif_execute_needs_help(const struct dpif_execute *execute)
1304 {
1305 return execute->needs_help || nl_attr_oversized(execute->actions_len);
1306 }
1307
1308 /* A dpif_operate() wrapper for performing a single DPIF_OP_EXECUTE. */
1309 int
1310 dpif_execute(struct dpif *dpif, struct dpif_execute *execute)
1311 {
1312 if (execute->actions_len) {
1313 struct dpif_op *opp;
1314 struct dpif_op op;
1315
1316 op.type = DPIF_OP_EXECUTE;
1317 op.execute = *execute;
1318
1319 opp = &op;
1320 dpif_operate(dpif, &opp, 1, DPIF_OFFLOAD_AUTO);
1321
1322 return op.error;
1323 } else {
1324 return 0;
1325 }
1326 }
1327
1328 /* Executes each of the 'n_ops' operations in 'ops' on 'dpif', in the order in
1329 * which they are specified. Places each operation's results in the "output"
1330 * members documented in comments, and 0 in the 'error' member on success or a
1331 * positive errno on failure.
1332 */
1333 void
1334 dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops,
1335 enum dpif_offload_type offload_type)
1336 {
1337 if (offload_type == DPIF_OFFLOAD_ALWAYS && !netdev_is_flow_api_enabled()) {
1338 size_t i;
1339 for (i = 0; i < n_ops; i++) {
1340 struct dpif_op *op = ops[i];
1341 op->error = EINVAL;
1342 }
1343 return;
1344 }
1345
1346 while (n_ops > 0) {
1347 size_t chunk;
1348
1349 /* Count 'chunk', the number of ops that can be executed without
1350 * needing any help. Ops that need help should be rare, so we
1351 * expect this to ordinarily be 'n_ops', that is, all the ops. */
1352 for (chunk = 0; chunk < n_ops; chunk++) {
1353 struct dpif_op *op = ops[chunk];
1354
1355 if (op->type == DPIF_OP_EXECUTE
1356 && dpif_execute_needs_help(&op->execute)) {
1357 break;
1358 }
1359 }
1360
1361 if (chunk) {
1362 /* Execute a chunk full of ops that the dpif provider can
1363 * handle itself, without help. */
1364 size_t i;
1365
1366 dpif->dpif_class->operate(dpif, ops, chunk, offload_type);
1367
1368 for (i = 0; i < chunk; i++) {
1369 struct dpif_op *op = ops[i];
1370 int error = op->error;
1371
1372 switch (op->type) {
1373 case DPIF_OP_FLOW_PUT: {
1374 struct dpif_flow_put *put = &op->flow_put;
1375
1376 COVERAGE_INC(dpif_flow_put);
1377 log_flow_put_message(dpif, &this_module, put, error);
1378 if (error && put->stats) {
1379 memset(put->stats, 0, sizeof *put->stats);
1380 }
1381 break;
1382 }
1383
1384 case DPIF_OP_FLOW_GET: {
1385 struct dpif_flow_get *get = &op->flow_get;
1386
1387 COVERAGE_INC(dpif_flow_get);
1388 if (error) {
1389 memset(get->flow, 0, sizeof *get->flow);
1390 }
1391 log_flow_get_message(dpif, &this_module, get, error);
1392
1393 break;
1394 }
1395
1396 case DPIF_OP_FLOW_DEL: {
1397 struct dpif_flow_del *del = &op->flow_del;
1398
1399 COVERAGE_INC(dpif_flow_del);
1400 log_flow_del_message(dpif, &this_module, del, error);
1401 if (error && del->stats) {
1402 memset(del->stats, 0, sizeof *del->stats);
1403 }
1404 break;
1405 }
1406
1407 case DPIF_OP_EXECUTE:
1408 COVERAGE_INC(dpif_execute);
1409 log_execute_message(dpif, &this_module, &op->execute,
1410 false, error);
1411 break;
1412 }
1413 }
1414
1415 ops += chunk;
1416 n_ops -= chunk;
1417 } else {
1418 /* Help the dpif provider to execute one op. */
1419 struct dpif_op *op = ops[0];
1420
1421 COVERAGE_INC(dpif_execute);
1422 op->error = dpif_execute_with_help(dpif, &op->execute);
1423 ops++;
1424 n_ops--;
1425 }
1426 }
1427 }
1428
1429 /* Returns a string that represents 'type', for use in log messages. */
1430 const char *
1431 dpif_upcall_type_to_string(enum dpif_upcall_type type)
1432 {
1433 switch (type) {
1434 case DPIF_UC_MISS: return "miss";
1435 case DPIF_UC_ACTION: return "action";
1436 case DPIF_N_UC_TYPES: default: return "<unknown>";
1437 }
1438 }
1439
1440 /* Enables or disables receiving packets with dpif_recv() on 'dpif'. Returns 0
1441 * if successful, otherwise a positive errno value.
1442 *
1443 * Turning packet receive off and then back on may change the Netlink PID
1444 * assignments returned by dpif_port_get_pid(). If the client does this, it
1445 * must update all of the flows that have OVS_ACTION_ATTR_USERSPACE actions
1446 * using the new PID assignment. */
1447 int
1448 dpif_recv_set(struct dpif *dpif, bool enable)
1449 {
1450 int error = 0;
1451
1452 if (dpif->dpif_class->recv_set) {
1453 error = dpif->dpif_class->recv_set(dpif, enable);
1454 log_operation(dpif, "recv_set", error);
1455 }
1456 return error;
1457 }
1458
1459 /* Refreshes the poll loops and Netlink sockets associated to each port,
1460 * when the number of upcall handlers (upcall receiving thread) is changed
1461 * to 'n_handlers' and receiving packets for 'dpif' is enabled by
1462 * recv_set().
1463 *
1464 * Since multiple upcall handlers can read upcalls simultaneously from
1465 * 'dpif', each port can have multiple Netlink sockets, one per upcall
1466 * handler. So, handlers_set() is responsible for the following tasks:
1467 *
1468 * When receiving upcall is enabled, extends or creates the
1469 * configuration to support:
1470 *
1471 * - 'n_handlers' Netlink sockets for each port.
1472 *
1473 * - 'n_handlers' poll loops, one for each upcall handler.
1474 *
1475 * - registering the Netlink sockets for the same upcall handler to
1476 * the corresponding poll loop.
1477 *
1478 * Returns 0 if successful, otherwise a positive errno value. */
1479 int
1480 dpif_handlers_set(struct dpif *dpif, uint32_t n_handlers)
1481 {
1482 int error = 0;
1483
1484 if (dpif->dpif_class->handlers_set) {
1485 error = dpif->dpif_class->handlers_set(dpif, n_handlers);
1486 log_operation(dpif, "handlers_set", error);
1487 }
1488 return error;
1489 }
1490
1491 void
1492 dpif_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb, void *aux)
1493 {
1494 if (dpif->dpif_class->register_dp_purge_cb) {
1495 dpif->dpif_class->register_dp_purge_cb(dpif, cb, aux);
1496 }
1497 }
1498
1499 void
1500 dpif_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, void *aux)
1501 {
1502 if (dpif->dpif_class->register_upcall_cb) {
1503 dpif->dpif_class->register_upcall_cb(dpif, cb, aux);
1504 }
1505 }
1506
1507 void
1508 dpif_enable_upcall(struct dpif *dpif)
1509 {
1510 if (dpif->dpif_class->enable_upcall) {
1511 dpif->dpif_class->enable_upcall(dpif);
1512 }
1513 }
1514
1515 void
1516 dpif_disable_upcall(struct dpif *dpif)
1517 {
1518 if (dpif->dpif_class->disable_upcall) {
1519 dpif->dpif_class->disable_upcall(dpif);
1520 }
1521 }
1522
1523 void
1524 dpif_print_packet(struct dpif *dpif, struct dpif_upcall *upcall)
1525 {
1526 if (!VLOG_DROP_DBG(&dpmsg_rl)) {
1527 struct ds flow;
1528 char *packet;
1529
1530 packet = ofp_dp_packet_to_string(&upcall->packet);
1531
1532 ds_init(&flow);
1533 odp_flow_key_format(upcall->key, upcall->key_len, &flow);
1534
1535 VLOG_DBG("%s: %s upcall:\n%s\n%s",
1536 dpif_name(dpif), dpif_upcall_type_to_string(upcall->type),
1537 ds_cstr(&flow), packet);
1538
1539 ds_destroy(&flow);
1540 free(packet);
1541 }
1542 }
1543
1544 /* Pass custom configuration to the datapath implementation. Some of the
1545 * changes can be postponed until dpif_run() is called. */
1546 int
1547 dpif_set_config(struct dpif *dpif, const struct smap *cfg)
1548 {
1549 int error = 0;
1550
1551 if (dpif->dpif_class->set_config) {
1552 error = dpif->dpif_class->set_config(dpif, cfg);
1553 if (error) {
1554 log_operation(dpif, "set_config", error);
1555 }
1556 }
1557
1558 return error;
1559 }
1560
1561 /* Polls for an upcall from 'dpif' for an upcall handler. Since there can
1562 * be multiple poll loops, 'handler_id' is needed as index to identify the
1563 * corresponding poll loop. If successful, stores the upcall into '*upcall',
1564 * using 'buf' for storage. Should only be called if 'recv_set' has been used
1565 * to enable receiving packets from 'dpif'.
1566 *
1567 * 'upcall->key' and 'upcall->userdata' point into data in the caller-provided
1568 * 'buf', so their memory cannot be freed separately from 'buf'.
1569 *
1570 * The caller owns the data of 'upcall->packet' and may modify it. If
1571 * packet's headroom is exhausted as it is manipulated, 'upcall->packet'
1572 * will be reallocated. This requires the data of 'upcall->packet' to be
1573 * released with ofpbuf_uninit() before 'upcall' is destroyed. However,
1574 * when an error is returned, the 'upcall->packet' may be uninitialized
1575 * and should not be released.
1576 *
1577 * Returns 0 if successful, otherwise a positive errno value. Returns EAGAIN
1578 * if no upcall is immediately available. */
1579 int
1580 dpif_recv(struct dpif *dpif, uint32_t handler_id, struct dpif_upcall *upcall,
1581 struct ofpbuf *buf)
1582 {
1583 int error = EAGAIN;
1584
1585 if (dpif->dpif_class->recv) {
1586 error = dpif->dpif_class->recv(dpif, handler_id, upcall, buf);
1587 if (!error) {
1588 dpif_print_packet(dpif, upcall);
1589 } else if (error != EAGAIN) {
1590 log_operation(dpif, "recv", error);
1591 }
1592 }
1593 return error;
1594 }
1595
1596 /* Discards all messages that would otherwise be received by dpif_recv() on
1597 * 'dpif'. */
1598 void
1599 dpif_recv_purge(struct dpif *dpif)
1600 {
1601 COVERAGE_INC(dpif_purge);
1602 if (dpif->dpif_class->recv_purge) {
1603 dpif->dpif_class->recv_purge(dpif);
1604 }
1605 }
1606
1607 /* Arranges for the poll loop for an upcall handler to wake up when 'dpif'
1608 * 'dpif' has a message queued to be received with the recv member
1609 * function. Since there can be multiple poll loops, 'handler_id' is
1610 * needed as index to identify the corresponding poll loop. */
1611 void
1612 dpif_recv_wait(struct dpif *dpif, uint32_t handler_id)
1613 {
1614 if (dpif->dpif_class->recv_wait) {
1615 dpif->dpif_class->recv_wait(dpif, handler_id);
1616 }
1617 }
1618
1619 /*
1620 * Return the datapath version. Caller is responsible for freeing
1621 * the string.
1622 */
1623 char *
1624 dpif_get_dp_version(const struct dpif *dpif)
1625 {
1626 char *version = NULL;
1627
1628 if (dpif->dpif_class->get_datapath_version) {
1629 version = dpif->dpif_class->get_datapath_version();
1630 }
1631
1632 return version;
1633 }
1634
1635 /* Obtains the NetFlow engine type and engine ID for 'dpif' into '*engine_type'
1636 * and '*engine_id', respectively. */
1637 void
1638 dpif_get_netflow_ids(const struct dpif *dpif,
1639 uint8_t *engine_type, uint8_t *engine_id)
1640 {
1641 *engine_type = dpif->netflow_engine_type;
1642 *engine_id = dpif->netflow_engine_id;
1643 }
1644
1645 /* Translates OpenFlow queue ID 'queue_id' (in host byte order) into a priority
1646 * value used for setting packet priority.
1647 * On success, returns 0 and stores the priority into '*priority'.
1648 * On failure, returns a positive errno value and stores 0 into '*priority'. */
1649 int
1650 dpif_queue_to_priority(const struct dpif *dpif, uint32_t queue_id,
1651 uint32_t *priority)
1652 {
1653 int error = (dpif->dpif_class->queue_to_priority
1654 ? dpif->dpif_class->queue_to_priority(dpif, queue_id,
1655 priority)
1656 : EOPNOTSUPP);
1657 if (error) {
1658 *priority = 0;
1659 }
1660 log_operation(dpif, "queue_to_priority", error);
1661 return error;
1662 }
1663 \f
1664 void
1665 dpif_init(struct dpif *dpif, const struct dpif_class *dpif_class,
1666 const char *name,
1667 uint8_t netflow_engine_type, uint8_t netflow_engine_id)
1668 {
1669 dpif->dpif_class = dpif_class;
1670 dpif->base_name = xstrdup(name);
1671 dpif->full_name = xasprintf("%s@%s", dpif_class->type, name);
1672 dpif->netflow_engine_type = netflow_engine_type;
1673 dpif->netflow_engine_id = netflow_engine_id;
1674 }
1675
1676 /* Undoes the results of initialization.
1677 *
1678 * Normally this function only needs to be called from dpif_close().
1679 * However, it may be called by providers due to an error on opening
1680 * that occurs after initialization. It this case dpif_close() would
1681 * never be called. */
1682 void
1683 dpif_uninit(struct dpif *dpif, bool close)
1684 {
1685 char *base_name = dpif->base_name;
1686 char *full_name = dpif->full_name;
1687
1688 if (close) {
1689 dpif->dpif_class->close(dpif);
1690 }
1691
1692 free(base_name);
1693 free(full_name);
1694 }
1695 \f
1696 static void
1697 log_operation(const struct dpif *dpif, const char *operation, int error)
1698 {
1699 if (!error) {
1700 VLOG_DBG_RL(&dpmsg_rl, "%s: %s success", dpif_name(dpif), operation);
1701 } else if (ofperr_is_valid(error)) {
1702 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1703 dpif_name(dpif), operation, ofperr_get_name(error));
1704 } else {
1705 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1706 dpif_name(dpif), operation, ovs_strerror(error));
1707 }
1708 }
1709
1710 static enum vlog_level
1711 flow_message_log_level(int error)
1712 {
1713 /* If flows arrive in a batch, userspace may push down multiple
1714 * unique flow definitions that overlap when wildcards are applied.
1715 * Kernels that support flow wildcarding will reject these flows as
1716 * duplicates (EEXIST), so lower the log level to debug for these
1717 * types of messages. */
1718 return (error && error != EEXIST) ? VLL_WARN : VLL_DBG;
1719 }
1720
1721 static bool
1722 should_log_flow_message(const struct vlog_module *module, int error)
1723 {
1724 return !vlog_should_drop(module, flow_message_log_level(error),
1725 error ? &error_rl : &dpmsg_rl);
1726 }
1727
1728 void
1729 log_flow_message(const struct dpif *dpif, int error,
1730 const struct vlog_module *module,
1731 const char *operation,
1732 const struct nlattr *key, size_t key_len,
1733 const struct nlattr *mask, size_t mask_len,
1734 const ovs_u128 *ufid, const struct dpif_flow_stats *stats,
1735 const struct nlattr *actions, size_t actions_len)
1736 {
1737 struct ds ds = DS_EMPTY_INITIALIZER;
1738 ds_put_format(&ds, "%s: ", dpif_name(dpif));
1739 if (error) {
1740 ds_put_cstr(&ds, "failed to ");
1741 }
1742 ds_put_format(&ds, "%s ", operation);
1743 if (error) {
1744 ds_put_format(&ds, "(%s) ", ovs_strerror(error));
1745 }
1746 if (ufid) {
1747 odp_format_ufid(ufid, &ds);
1748 ds_put_cstr(&ds, " ");
1749 }
1750 odp_flow_format(key, key_len, mask, mask_len, NULL, &ds, true);
1751 if (stats) {
1752 ds_put_cstr(&ds, ", ");
1753 dpif_flow_stats_format(stats, &ds);
1754 }
1755 if (actions || actions_len) {
1756 ds_put_cstr(&ds, ", actions:");
1757 format_odp_actions(&ds, actions, actions_len, NULL);
1758 }
1759 vlog(module, flow_message_log_level(error), "%s", ds_cstr(&ds));
1760 ds_destroy(&ds);
1761 }
1762
1763 void
1764 log_flow_put_message(const struct dpif *dpif,
1765 const struct vlog_module *module,
1766 const struct dpif_flow_put *put,
1767 int error)
1768 {
1769 if (should_log_flow_message(module, error)
1770 && !(put->flags & DPIF_FP_PROBE)) {
1771 struct ds s;
1772
1773 ds_init(&s);
1774 ds_put_cstr(&s, "put");
1775 if (put->flags & DPIF_FP_CREATE) {
1776 ds_put_cstr(&s, "[create]");
1777 }
1778 if (put->flags & DPIF_FP_MODIFY) {
1779 ds_put_cstr(&s, "[modify]");
1780 }
1781 if (put->flags & DPIF_FP_ZERO_STATS) {
1782 ds_put_cstr(&s, "[zero]");
1783 }
1784 log_flow_message(dpif, error, module, ds_cstr(&s),
1785 put->key, put->key_len, put->mask, put->mask_len,
1786 put->ufid, put->stats, put->actions,
1787 put->actions_len);
1788 ds_destroy(&s);
1789 }
1790 }
1791
1792 void
1793 log_flow_del_message(const struct dpif *dpif,
1794 const struct vlog_module *module,
1795 const struct dpif_flow_del *del,
1796 int error)
1797 {
1798 if (should_log_flow_message(module, error)) {
1799 log_flow_message(dpif, error, module, "flow_del",
1800 del->key, del->key_len,
1801 NULL, 0, del->ufid, !error ? del->stats : NULL,
1802 NULL, 0);
1803 }
1804 }
1805
1806 /* Logs that 'execute' was executed on 'dpif' and completed with errno 'error'
1807 * (0 for success). 'subexecute' should be true if the execution is a result
1808 * of breaking down a larger execution that needed help, false otherwise.
1809 *
1810 *
1811 * XXX In theory, the log message could be deceptive because this function is
1812 * called after the dpif_provider's '->execute' function, which is allowed to
1813 * modify execute->packet and execute->md. In practice, though:
1814 *
1815 * - dpif-netlink doesn't modify execute->packet or execute->md.
1816 *
1817 * - dpif-netdev does modify them but it is less likely to have problems
1818 * because it is built into ovs-vswitchd and cannot have version skew,
1819 * etc.
1820 *
1821 * It would still be better to avoid the potential problem. I don't know of a
1822 * good way to do that, though, that isn't expensive. */
1823 void
1824 log_execute_message(const struct dpif *dpif,
1825 const struct vlog_module *module,
1826 const struct dpif_execute *execute,
1827 bool subexecute, int error)
1828 {
1829 if (!(error ? VLOG_DROP_WARN(&error_rl) : VLOG_DROP_DBG(&dpmsg_rl))
1830 && !execute->probe) {
1831 struct ds ds = DS_EMPTY_INITIALIZER;
1832 char *packet;
1833 uint64_t stub[1024 / 8];
1834 struct ofpbuf md = OFPBUF_STUB_INITIALIZER(stub);
1835
1836 packet = ofp_packet_to_string(dp_packet_data(execute->packet),
1837 dp_packet_size(execute->packet),
1838 execute->packet->packet_type);
1839 odp_key_from_dp_packet(&md, execute->packet);
1840 ds_put_format(&ds, "%s: %sexecute ",
1841 dpif_name(dpif),
1842 (subexecute ? "sub-"
1843 : dpif_execute_needs_help(execute) ? "super-"
1844 : ""));
1845 format_odp_actions(&ds, execute->actions, execute->actions_len, NULL);
1846 if (error) {
1847 ds_put_format(&ds, " failed (%s)", ovs_strerror(error));
1848 }
1849 ds_put_format(&ds, " on packet %s", packet);
1850 ds_put_format(&ds, " with metadata ");
1851 odp_flow_format(md.data, md.size, NULL, 0, NULL, &ds, true);
1852 ds_put_format(&ds, " mtu %d", execute->mtu);
1853 vlog(module, error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
1854 ds_destroy(&ds);
1855 free(packet);
1856 ofpbuf_uninit(&md);
1857 }
1858 }
1859
1860 void
1861 log_flow_get_message(const struct dpif *dpif,
1862 const struct vlog_module *module,
1863 const struct dpif_flow_get *get,
1864 int error)
1865 {
1866 if (should_log_flow_message(module, error)) {
1867 log_flow_message(dpif, error, module, "flow_get",
1868 get->key, get->key_len,
1869 get->flow->mask, get->flow->mask_len,
1870 get->ufid, &get->flow->stats,
1871 get->flow->actions, get->flow->actions_len);
1872 }
1873 }
1874
1875 bool
1876 dpif_supports_tnl_push_pop(const struct dpif *dpif)
1877 {
1878 return dpif_is_netdev(dpif);
1879 }
1880
1881 bool
1882 dpif_supports_explicit_drop_action(const struct dpif *dpif)
1883 {
1884 return dpif_is_netdev(dpif);
1885 }
1886
1887 bool
1888 dpif_supports_lb_output_action(const struct dpif *dpif)
1889 {
1890 /*
1891 * Balance-tcp optimization is currently supported in netdev
1892 * datapath only.
1893 */
1894 return dpif_is_netdev(dpif);
1895 }
1896
1897 /* Meters */
1898 void
1899 dpif_meter_get_features(const struct dpif *dpif,
1900 struct ofputil_meter_features *features)
1901 {
1902 memset(features, 0, sizeof *features);
1903 if (dpif->dpif_class->meter_get_features) {
1904 dpif->dpif_class->meter_get_features(dpif, features);
1905 }
1906 }
1907
1908 /* Adds or modifies the meter in 'dpif' with the given 'meter_id' and
1909 * the configuration in 'config'.
1910 *
1911 * The meter id specified through 'config->meter_id' is ignored. */
1912 int
1913 dpif_meter_set(struct dpif *dpif, ofproto_meter_id meter_id,
1914 struct ofputil_meter_config *config)
1915 {
1916 COVERAGE_INC(dpif_meter_set);
1917
1918 if (!(config->flags & (OFPMF13_KBPS | OFPMF13_PKTPS))) {
1919 return EBADF; /* Rate unit type not set. */
1920 }
1921
1922 if ((config->flags & OFPMF13_KBPS) && (config->flags & OFPMF13_PKTPS)) {
1923 return EBADF; /* Both rate units may not be set. */
1924 }
1925
1926 if (config->n_bands == 0) {
1927 return EINVAL;
1928 }
1929
1930 for (size_t i = 0; i < config->n_bands; i++) {
1931 if (config->bands[i].rate == 0) {
1932 return EDOM; /* Rate must be non-zero */
1933 }
1934 }
1935
1936 int error = dpif->dpif_class->meter_set(dpif, meter_id, config);
1937 if (!error) {
1938 VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" set",
1939 dpif_name(dpif), meter_id.uint32);
1940 } else {
1941 VLOG_WARN_RL(&error_rl, "%s: failed to set DPIF meter %"PRIu32": %s",
1942 dpif_name(dpif), meter_id.uint32, ovs_strerror(error));
1943 }
1944 return error;
1945 }
1946
1947 int
1948 dpif_meter_get(const struct dpif *dpif, ofproto_meter_id meter_id,
1949 struct ofputil_meter_stats *stats, uint16_t n_bands)
1950 {
1951 int error;
1952
1953 COVERAGE_INC(dpif_meter_get);
1954
1955 error = dpif->dpif_class->meter_get(dpif, meter_id, stats, n_bands);
1956 if (!error) {
1957 VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" get stats",
1958 dpif_name(dpif), meter_id.uint32);
1959 } else {
1960 VLOG_WARN_RL(&error_rl,
1961 "%s: failed to get DPIF meter %"PRIu32" stats: %s",
1962 dpif_name(dpif), meter_id.uint32, ovs_strerror(error));
1963 stats->packet_in_count = ~0;
1964 stats->byte_in_count = ~0;
1965 stats->n_bands = 0;
1966 }
1967 return error;
1968 }
1969
1970 int
1971 dpif_meter_del(struct dpif *dpif, ofproto_meter_id meter_id,
1972 struct ofputil_meter_stats *stats, uint16_t n_bands)
1973 {
1974 int error;
1975
1976 COVERAGE_INC(dpif_meter_del);
1977
1978 error = dpif->dpif_class->meter_del(dpif, meter_id, stats, n_bands);
1979 if (!error) {
1980 VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" deleted",
1981 dpif_name(dpif), meter_id.uint32);
1982 } else {
1983 VLOG_WARN_RL(&error_rl,
1984 "%s: failed to delete DPIF meter %"PRIu32": %s",
1985 dpif_name(dpif), meter_id.uint32, ovs_strerror(error));
1986 if (stats) {
1987 stats->packet_in_count = ~0;
1988 stats->byte_in_count = ~0;
1989 stats->n_bands = 0;
1990 }
1991 }
1992 return error;
1993 }
1994
1995 int
1996 dpif_bond_add(struct dpif *dpif, uint32_t bond_id, odp_port_t *slave_map)
1997 {
1998 return dpif->dpif_class->bond_del
1999 ? dpif->dpif_class->bond_add(dpif, bond_id, slave_map)
2000 : EOPNOTSUPP;
2001 }
2002
2003 int
2004 dpif_bond_del(struct dpif *dpif, uint32_t bond_id)
2005 {
2006 return dpif->dpif_class->bond_del
2007 ? dpif->dpif_class->bond_del(dpif, bond_id)
2008 : EOPNOTSUPP;
2009 }
2010
2011 int
2012 dpif_bond_stats_get(struct dpif *dpif, uint32_t bond_id,
2013 uint64_t *n_bytes)
2014 {
2015 memset(n_bytes, 0, BOND_BUCKETS * sizeof *n_bytes);
2016
2017 return dpif->dpif_class->bond_stats_get
2018 ? dpif->dpif_class->bond_stats_get(dpif, bond_id, n_bytes)
2019 : EOPNOTSUPP;
2020 }