]> git.proxmox.com Git - mirror_ovs.git/blob - lib/dpif.c
dpif: Remove support for multiple queues per port.
[mirror_ovs.git] / lib / dpif.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "dpif-provider.h"
19
20 #include <ctype.h>
21 #include <errno.h>
22 #include <inttypes.h>
23 #include <stdlib.h>
24 #include <string.h>
25
26 #include "coverage.h"
27 #include "dpctl.h"
28 #include "dp-packet.h"
29 #include "dpif-netdev.h"
30 #include "openvswitch/dynamic-string.h"
31 #include "flow.h"
32 #include "netdev.h"
33 #include "netlink.h"
34 #include "odp-execute.h"
35 #include "odp-util.h"
36 #include "openvswitch/ofp-print.h"
37 #include "openvswitch/ofpbuf.h"
38 #include "packets.h"
39 #include "openvswitch/poll-loop.h"
40 #include "route-table.h"
41 #include "seq.h"
42 #include "openvswitch/shash.h"
43 #include "sset.h"
44 #include "timeval.h"
45 #include "tnl-neigh-cache.h"
46 #include "tnl-ports.h"
47 #include "util.h"
48 #include "uuid.h"
49 #include "valgrind.h"
50 #include "openvswitch/ofp-errors.h"
51 #include "openvswitch/vlog.h"
52
53 VLOG_DEFINE_THIS_MODULE(dpif);
54
55 COVERAGE_DEFINE(dpif_destroy);
56 COVERAGE_DEFINE(dpif_port_add);
57 COVERAGE_DEFINE(dpif_port_del);
58 COVERAGE_DEFINE(dpif_flow_flush);
59 COVERAGE_DEFINE(dpif_flow_get);
60 COVERAGE_DEFINE(dpif_flow_put);
61 COVERAGE_DEFINE(dpif_flow_del);
62 COVERAGE_DEFINE(dpif_execute);
63 COVERAGE_DEFINE(dpif_purge);
64 COVERAGE_DEFINE(dpif_execute_with_help);
65 COVERAGE_DEFINE(dpif_meter_set);
66 COVERAGE_DEFINE(dpif_meter_get);
67 COVERAGE_DEFINE(dpif_meter_del);
68
69 static const struct dpif_class *base_dpif_classes[] = {
70 #if defined(__linux__) || defined(_WIN32)
71 &dpif_netlink_class,
72 #endif
73 &dpif_netdev_class,
74 };
75
76 struct registered_dpif_class {
77 const struct dpif_class *dpif_class;
78 int refcount;
79 };
80 static struct shash dpif_classes = SHASH_INITIALIZER(&dpif_classes);
81 static struct sset dpif_blacklist = SSET_INITIALIZER(&dpif_blacklist);
82
83 /* Protects 'dpif_classes', including the refcount, and 'dpif_blacklist'. */
84 static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
85
86 /* Rate limit for individual messages going to or from the datapath, output at
87 * DBG level. This is very high because, if these are enabled, it is because
88 * we really need to see them. */
89 static struct vlog_rate_limit dpmsg_rl = VLOG_RATE_LIMIT_INIT(600, 600);
90
91 /* Not really much point in logging many dpif errors. */
92 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
93
94 static void log_operation(const struct dpif *, const char *operation,
95 int error);
96 static bool should_log_flow_message(const struct vlog_module *module,
97 int error);
98
99 /* Incremented whenever tnl route, arp, etc changes. */
100 struct seq *tnl_conf_seq;
101
102 static bool
103 dpif_is_internal_port(const char *type)
104 {
105 /* For userspace datapath, tap devices are the equivalent
106 * of internal devices in the kernel datapath, so both
107 * these types are 'internal' devices. */
108 return !strcmp(type, "internal") || !strcmp(type, "tap");
109 }
110
111 static void
112 dp_initialize(void)
113 {
114 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
115
116 if (ovsthread_once_start(&once)) {
117 int i;
118
119 tnl_conf_seq = seq_create();
120 dpctl_unixctl_register();
121 tnl_port_map_init();
122 tnl_neigh_cache_init();
123 route_table_init();
124
125 for (i = 0; i < ARRAY_SIZE(base_dpif_classes); i++) {
126 dp_register_provider(base_dpif_classes[i]);
127 }
128
129 ovsthread_once_done(&once);
130 }
131 }
132
133 static int
134 dp_register_provider__(const struct dpif_class *new_class)
135 {
136 struct registered_dpif_class *registered_class;
137 int error;
138
139 if (sset_contains(&dpif_blacklist, new_class->type)) {
140 VLOG_DBG("attempted to register blacklisted provider: %s",
141 new_class->type);
142 return EINVAL;
143 }
144
145 if (shash_find(&dpif_classes, new_class->type)) {
146 VLOG_WARN("attempted to register duplicate datapath provider: %s",
147 new_class->type);
148 return EEXIST;
149 }
150
151 error = new_class->init ? new_class->init() : 0;
152 if (error) {
153 VLOG_WARN("failed to initialize %s datapath class: %s",
154 new_class->type, ovs_strerror(error));
155 return error;
156 }
157
158 registered_class = xmalloc(sizeof *registered_class);
159 registered_class->dpif_class = new_class;
160 registered_class->refcount = 0;
161
162 shash_add(&dpif_classes, new_class->type, registered_class);
163
164 return 0;
165 }
166
167 /* Registers a new datapath provider. After successful registration, new
168 * datapaths of that type can be opened using dpif_open(). */
169 int
170 dp_register_provider(const struct dpif_class *new_class)
171 {
172 int error;
173
174 ovs_mutex_lock(&dpif_mutex);
175 error = dp_register_provider__(new_class);
176 ovs_mutex_unlock(&dpif_mutex);
177
178 return error;
179 }
180
181 /* Unregisters a datapath provider. 'type' must have been previously
182 * registered and not currently be in use by any dpifs. After unregistration
183 * new datapaths of that type cannot be opened using dpif_open(). */
184 static int
185 dp_unregister_provider__(const char *type)
186 {
187 struct shash_node *node;
188 struct registered_dpif_class *registered_class;
189
190 node = shash_find(&dpif_classes, type);
191 if (!node) {
192 return EAFNOSUPPORT;
193 }
194
195 registered_class = node->data;
196 if (registered_class->refcount) {
197 VLOG_WARN("attempted to unregister in use datapath provider: %s", type);
198 return EBUSY;
199 }
200
201 shash_delete(&dpif_classes, node);
202 free(registered_class);
203
204 return 0;
205 }
206
207 /* Unregisters a datapath provider. 'type' must have been previously
208 * registered and not currently be in use by any dpifs. After unregistration
209 * new datapaths of that type cannot be opened using dpif_open(). */
210 int
211 dp_unregister_provider(const char *type)
212 {
213 int error;
214
215 dp_initialize();
216
217 ovs_mutex_lock(&dpif_mutex);
218 error = dp_unregister_provider__(type);
219 ovs_mutex_unlock(&dpif_mutex);
220
221 return error;
222 }
223
224 /* Blacklists a provider. Causes future calls of dp_register_provider() with
225 * a dpif_class which implements 'type' to fail. */
226 void
227 dp_blacklist_provider(const char *type)
228 {
229 ovs_mutex_lock(&dpif_mutex);
230 sset_add(&dpif_blacklist, type);
231 ovs_mutex_unlock(&dpif_mutex);
232 }
233
234 /* Adds the types of all currently registered datapath providers to 'types'.
235 * The caller must first initialize the sset. */
236 void
237 dp_enumerate_types(struct sset *types)
238 {
239 struct shash_node *node;
240
241 dp_initialize();
242
243 ovs_mutex_lock(&dpif_mutex);
244 SHASH_FOR_EACH(node, &dpif_classes) {
245 const struct registered_dpif_class *registered_class = node->data;
246 sset_add(types, registered_class->dpif_class->type);
247 }
248 ovs_mutex_unlock(&dpif_mutex);
249 }
250
251 static void
252 dp_class_unref(struct registered_dpif_class *rc)
253 {
254 ovs_mutex_lock(&dpif_mutex);
255 ovs_assert(rc->refcount);
256 rc->refcount--;
257 ovs_mutex_unlock(&dpif_mutex);
258 }
259
260 static struct registered_dpif_class *
261 dp_class_lookup(const char *type)
262 {
263 struct registered_dpif_class *rc;
264
265 ovs_mutex_lock(&dpif_mutex);
266 rc = shash_find_data(&dpif_classes, type);
267 if (rc) {
268 rc->refcount++;
269 }
270 ovs_mutex_unlock(&dpif_mutex);
271
272 return rc;
273 }
274
275 /* Clears 'names' and enumerates the names of all known created datapaths with
276 * the given 'type'. The caller must first initialize the sset. Returns 0 if
277 * successful, otherwise a positive errno value.
278 *
279 * Some kinds of datapaths might not be practically enumerable. This is not
280 * considered an error. */
281 int
282 dp_enumerate_names(const char *type, struct sset *names)
283 {
284 struct registered_dpif_class *registered_class;
285 const struct dpif_class *dpif_class;
286 int error;
287
288 dp_initialize();
289 sset_clear(names);
290
291 registered_class = dp_class_lookup(type);
292 if (!registered_class) {
293 VLOG_WARN("could not enumerate unknown type: %s", type);
294 return EAFNOSUPPORT;
295 }
296
297 dpif_class = registered_class->dpif_class;
298 error = (dpif_class->enumerate
299 ? dpif_class->enumerate(names, dpif_class)
300 : 0);
301 if (error) {
302 VLOG_WARN("failed to enumerate %s datapaths: %s", dpif_class->type,
303 ovs_strerror(error));
304 }
305 dp_class_unref(registered_class);
306
307 return error;
308 }
309
310 /* Parses 'datapath_name_', which is of the form [type@]name into its
311 * component pieces. 'name' and 'type' must be freed by the caller.
312 *
313 * The returned 'type' is normalized, as if by dpif_normalize_type(). */
314 void
315 dp_parse_name(const char *datapath_name_, char **name, char **type)
316 {
317 char *datapath_name = xstrdup(datapath_name_);
318 char *separator;
319
320 separator = strchr(datapath_name, '@');
321 if (separator) {
322 *separator = '\0';
323 *type = datapath_name;
324 *name = xstrdup(dpif_normalize_type(separator + 1));
325 } else {
326 *name = datapath_name;
327 *type = xstrdup(dpif_normalize_type(NULL));
328 }
329 }
330
331 static int
332 do_open(const char *name, const char *type, bool create, struct dpif **dpifp)
333 {
334 struct dpif *dpif = NULL;
335 int error;
336 struct registered_dpif_class *registered_class;
337
338 dp_initialize();
339
340 type = dpif_normalize_type(type);
341 registered_class = dp_class_lookup(type);
342 if (!registered_class) {
343 VLOG_WARN("could not create datapath %s of unknown type %s", name,
344 type);
345 error = EAFNOSUPPORT;
346 goto exit;
347 }
348
349 error = registered_class->dpif_class->open(registered_class->dpif_class,
350 name, create, &dpif);
351 if (!error) {
352 struct dpif_port_dump port_dump;
353 struct dpif_port dpif_port;
354
355 ovs_assert(dpif->dpif_class == registered_class->dpif_class);
356
357 DPIF_PORT_FOR_EACH(&dpif_port, &port_dump, dpif) {
358 struct netdev *netdev;
359 int err;
360
361 if (dpif_is_internal_port(dpif_port.type)) {
362 continue;
363 }
364
365 err = netdev_open(dpif_port.name, dpif_port.type, &netdev);
366
367 if (!err) {
368 netdev_ports_insert(netdev, dpif->dpif_class, &dpif_port);
369 netdev_close(netdev);
370 } else {
371 VLOG_WARN("could not open netdev %s type %s: %s",
372 dpif_port.name, dpif_port.type, ovs_strerror(err));
373 }
374 }
375 } else {
376 dp_class_unref(registered_class);
377 }
378
379 exit:
380 *dpifp = error ? NULL : dpif;
381 return error;
382 }
383
384 /* Tries to open an existing datapath named 'name' and type 'type'. Will fail
385 * if no datapath with 'name' and 'type' exists. 'type' may be either NULL or
386 * the empty string to specify the default system type. Returns 0 if
387 * successful, otherwise a positive errno value. On success stores a pointer
388 * to the datapath in '*dpifp', otherwise a null pointer. */
389 int
390 dpif_open(const char *name, const char *type, struct dpif **dpifp)
391 {
392 return do_open(name, type, false, dpifp);
393 }
394
395 /* Tries to create and open a new datapath with the given 'name' and 'type'.
396 * 'type' may be either NULL or the empty string to specify the default system
397 * type. Will fail if a datapath with 'name' and 'type' already exists.
398 * Returns 0 if successful, otherwise a positive errno value. On success
399 * stores a pointer to the datapath in '*dpifp', otherwise a null pointer. */
400 int
401 dpif_create(const char *name, const char *type, struct dpif **dpifp)
402 {
403 return do_open(name, type, true, dpifp);
404 }
405
406 /* Tries to open a datapath with the given 'name' and 'type', creating it if it
407 * does not exist. 'type' may be either NULL or the empty string to specify
408 * the default system type. Returns 0 if successful, otherwise a positive
409 * errno value. On success stores a pointer to the datapath in '*dpifp',
410 * otherwise a null pointer. */
411 int
412 dpif_create_and_open(const char *name, const char *type, struct dpif **dpifp)
413 {
414 int error;
415
416 error = dpif_create(name, type, dpifp);
417 if (error == EEXIST || error == EBUSY) {
418 error = dpif_open(name, type, dpifp);
419 if (error) {
420 VLOG_WARN("datapath %s already exists but cannot be opened: %s",
421 name, ovs_strerror(error));
422 }
423 } else if (error) {
424 VLOG_WARN("failed to create datapath %s: %s",
425 name, ovs_strerror(error));
426 }
427 return error;
428 }
429
430 static void
431 dpif_remove_netdev_ports(struct dpif *dpif) {
432 struct dpif_port_dump port_dump;
433 struct dpif_port dpif_port;
434
435 DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, dpif) {
436 if (!dpif_is_internal_port(dpif_port.type)) {
437 netdev_ports_remove(dpif_port.port_no, dpif->dpif_class);
438 }
439 }
440 }
441
442 /* Closes and frees the connection to 'dpif'. Does not destroy the datapath
443 * itself; call dpif_delete() first, instead, if that is desirable. */
444 void
445 dpif_close(struct dpif *dpif)
446 {
447 if (dpif) {
448 struct registered_dpif_class *rc;
449
450 rc = shash_find_data(&dpif_classes, dpif->dpif_class->type);
451
452 if (rc->refcount == 1) {
453 dpif_remove_netdev_ports(dpif);
454 }
455 dpif_uninit(dpif, true);
456 dp_class_unref(rc);
457 }
458 }
459
460 /* Performs periodic work needed by 'dpif'. */
461 bool
462 dpif_run(struct dpif *dpif)
463 {
464 if (dpif->dpif_class->run) {
465 return dpif->dpif_class->run(dpif);
466 }
467 return false;
468 }
469
470 /* Arranges for poll_block() to wake up when dp_run() needs to be called for
471 * 'dpif'. */
472 void
473 dpif_wait(struct dpif *dpif)
474 {
475 if (dpif->dpif_class->wait) {
476 dpif->dpif_class->wait(dpif);
477 }
478 }
479
480 /* Returns the name of datapath 'dpif' prefixed with the type
481 * (for use in log messages). */
482 const char *
483 dpif_name(const struct dpif *dpif)
484 {
485 return dpif->full_name;
486 }
487
488 /* Returns the name of datapath 'dpif' without the type
489 * (for use in device names). */
490 const char *
491 dpif_base_name(const struct dpif *dpif)
492 {
493 return dpif->base_name;
494 }
495
496 /* Returns the type of datapath 'dpif'. */
497 const char *
498 dpif_type(const struct dpif *dpif)
499 {
500 return dpif->dpif_class->type;
501 }
502
503 /* Returns the fully spelled out name for the given datapath 'type'.
504 *
505 * Normalized type string can be compared with strcmp(). Unnormalized type
506 * string might be the same even if they have different spellings. */
507 const char *
508 dpif_normalize_type(const char *type)
509 {
510 return type && type[0] ? type : "system";
511 }
512
513 /* Destroys the datapath that 'dpif' is connected to, first removing all of its
514 * ports. After calling this function, it does not make sense to pass 'dpif'
515 * to any functions other than dpif_name() or dpif_close(). */
516 int
517 dpif_delete(struct dpif *dpif)
518 {
519 int error;
520
521 COVERAGE_INC(dpif_destroy);
522
523 error = dpif->dpif_class->destroy(dpif);
524 log_operation(dpif, "delete", error);
525 return error;
526 }
527
528 /* Retrieves statistics for 'dpif' into 'stats'. Returns 0 if successful,
529 * otherwise a positive errno value. */
530 int
531 dpif_get_dp_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
532 {
533 int error = dpif->dpif_class->get_stats(dpif, stats);
534 if (error) {
535 memset(stats, 0, sizeof *stats);
536 }
537 log_operation(dpif, "get_stats", error);
538 return error;
539 }
540
541 const char *
542 dpif_port_open_type(const char *datapath_type, const char *port_type)
543 {
544 struct registered_dpif_class *rc;
545
546 datapath_type = dpif_normalize_type(datapath_type);
547
548 ovs_mutex_lock(&dpif_mutex);
549 rc = shash_find_data(&dpif_classes, datapath_type);
550 if (rc && rc->dpif_class->port_open_type) {
551 port_type = rc->dpif_class->port_open_type(rc->dpif_class, port_type);
552 }
553 ovs_mutex_unlock(&dpif_mutex);
554
555 return port_type;
556 }
557
558 /* Attempts to add 'netdev' as a port on 'dpif'. If 'port_nop' is
559 * non-null and its value is not ODPP_NONE, then attempts to use the
560 * value as the port number.
561 *
562 * If successful, returns 0 and sets '*port_nop' to the new port's port
563 * number (if 'port_nop' is non-null). On failure, returns a positive
564 * errno value and sets '*port_nop' to ODPP_NONE (if 'port_nop' is
565 * non-null). */
566 int
567 dpif_port_add(struct dpif *dpif, struct netdev *netdev, odp_port_t *port_nop)
568 {
569 const char *netdev_name = netdev_get_name(netdev);
570 odp_port_t port_no = ODPP_NONE;
571 int error;
572
573 COVERAGE_INC(dpif_port_add);
574
575 if (port_nop) {
576 port_no = *port_nop;
577 }
578
579 error = dpif->dpif_class->port_add(dpif, netdev, &port_no);
580 if (!error) {
581 VLOG_DBG_RL(&dpmsg_rl, "%s: added %s as port %"PRIu32,
582 dpif_name(dpif), netdev_name, port_no);
583
584 if (!dpif_is_internal_port(netdev_get_type(netdev))) {
585
586 struct dpif_port dpif_port;
587
588 dpif_port.type = CONST_CAST(char *, netdev_get_type(netdev));
589 dpif_port.name = CONST_CAST(char *, netdev_name);
590 dpif_port.port_no = port_no;
591 netdev_ports_insert(netdev, dpif->dpif_class, &dpif_port);
592 }
593 } else {
594 if (error != EEXIST) {
595 VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
596 dpif_name(dpif), netdev_name, ovs_strerror(error));
597 } else {
598 /* It's fairly common for upper layers to try to add a duplicate
599 * port, and they know how to handle it properly. */
600 }
601 port_no = ODPP_NONE;
602 }
603 if (port_nop) {
604 *port_nop = port_no;
605 }
606 return error;
607 }
608
609 /* Attempts to remove 'dpif''s port number 'port_no'. Returns 0 if successful,
610 * otherwise a positive errno value. */
611 int
612 dpif_port_del(struct dpif *dpif, odp_port_t port_no, bool local_delete)
613 {
614 int error = 0;
615
616 COVERAGE_INC(dpif_port_del);
617
618 if (!local_delete) {
619 error = dpif->dpif_class->port_del(dpif, port_no);
620 if (!error) {
621 VLOG_DBG_RL(&dpmsg_rl, "%s: port_del(%"PRIu32")",
622 dpif_name(dpif), port_no);
623 } else {
624 log_operation(dpif, "port_del", error);
625 }
626 }
627
628 netdev_ports_remove(port_no, dpif->dpif_class);
629 return error;
630 }
631
632 /* Makes a deep copy of 'src' into 'dst'. */
633 void
634 dpif_port_clone(struct dpif_port *dst, const struct dpif_port *src)
635 {
636 dst->name = xstrdup(src->name);
637 dst->type = xstrdup(src->type);
638 dst->port_no = src->port_no;
639 }
640
641 /* Frees memory allocated to members of 'dpif_port'.
642 *
643 * Do not call this function on a dpif_port obtained from
644 * dpif_port_dump_next(): that function retains ownership of the data in the
645 * dpif_port. */
646 void
647 dpif_port_destroy(struct dpif_port *dpif_port)
648 {
649 free(dpif_port->name);
650 free(dpif_port->type);
651 }
652
653 /* Checks if port named 'devname' exists in 'dpif'. If so, returns
654 * true; otherwise, returns false. */
655 bool
656 dpif_port_exists(const struct dpif *dpif, const char *devname)
657 {
658 int error = dpif->dpif_class->port_query_by_name(dpif, devname, NULL);
659 if (error != 0 && error != ENODEV) {
660 VLOG_WARN_RL(&error_rl, "%s: failed to query port %s: %s",
661 dpif_name(dpif), devname, ovs_strerror(error));
662 }
663
664 return !error;
665 }
666
667 /* Refreshes configuration of 'dpif's port. */
668 int
669 dpif_port_set_config(struct dpif *dpif, odp_port_t port_no,
670 const struct smap *cfg)
671 {
672 int error = 0;
673
674 if (dpif->dpif_class->port_set_config) {
675 error = dpif->dpif_class->port_set_config(dpif, port_no, cfg);
676 if (error) {
677 log_operation(dpif, "port_set_config", error);
678 }
679 }
680
681 return error;
682 }
683
684 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and
685 * initializes '*port' appropriately; on failure, returns a positive errno
686 * value.
687 *
688 * Retuns ENODEV if the port doesn't exist.
689 *
690 * The caller owns the data in 'port' and must free it with
691 * dpif_port_destroy() when it is no longer needed. */
692 int
693 dpif_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
694 struct dpif_port *port)
695 {
696 int error = dpif->dpif_class->port_query_by_number(dpif, port_no, port);
697 if (!error) {
698 VLOG_DBG_RL(&dpmsg_rl, "%s: port %"PRIu32" is device %s",
699 dpif_name(dpif), port_no, port->name);
700 } else {
701 memset(port, 0, sizeof *port);
702 VLOG_WARN_RL(&error_rl, "%s: failed to query port %"PRIu32": %s",
703 dpif_name(dpif), port_no, ovs_strerror(error));
704 }
705 return error;
706 }
707
708 /* Looks up port named 'devname' in 'dpif'. On success, returns 0 and
709 * initializes '*port' appropriately; on failure, returns a positive errno
710 * value.
711 *
712 * Retuns ENODEV if the port doesn't exist.
713 *
714 * The caller owns the data in 'port' and must free it with
715 * dpif_port_destroy() when it is no longer needed. */
716 int
717 dpif_port_query_by_name(const struct dpif *dpif, const char *devname,
718 struct dpif_port *port)
719 {
720 int error = dpif->dpif_class->port_query_by_name(dpif, devname, port);
721 if (!error) {
722 VLOG_DBG_RL(&dpmsg_rl, "%s: device %s is on port %"PRIu32,
723 dpif_name(dpif), devname, port->port_no);
724 } else {
725 memset(port, 0, sizeof *port);
726
727 /* For ENODEV we use DBG level because the caller is probably
728 * interested in whether 'dpif' actually has a port 'devname', so that
729 * it's not an issue worth logging if it doesn't. Other errors are
730 * uncommon and more likely to indicate a real problem. */
731 VLOG_RL(&error_rl, error == ENODEV ? VLL_DBG : VLL_WARN,
732 "%s: failed to query port %s: %s",
733 dpif_name(dpif), devname, ovs_strerror(error));
734 }
735 return error;
736 }
737
738 /* Returns the Netlink PID value to supply in OVS_ACTION_ATTR_USERSPACE
739 * actions as the OVS_USERSPACE_ATTR_PID attribute's value, for use in
740 * flows whose packets arrived on port 'port_no'.
741 *
742 * A 'port_no' of ODPP_NONE is a special case: it returns a reserved PID, not
743 * allocated to any port, that the client may use for special purposes.
744 *
745 * The return value is only meaningful when DPIF_UC_ACTION has been enabled in
746 * the 'dpif''s listen mask. It is allowed to change when DPIF_UC_ACTION is
747 * disabled and then re-enabled, so a client that does that must be prepared to
748 * update all of the flows that it installed that contain
749 * OVS_ACTION_ATTR_USERSPACE actions. */
750 uint32_t
751 dpif_port_get_pid(const struct dpif *dpif, odp_port_t port_no)
752 {
753 return (dpif->dpif_class->port_get_pid
754 ? (dpif->dpif_class->port_get_pid)(dpif, port_no)
755 : 0);
756 }
757
758 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and copies
759 * the port's name into the 'name_size' bytes in 'name', ensuring that the
760 * result is null-terminated. On failure, returns a positive errno value and
761 * makes 'name' the empty string. */
762 int
763 dpif_port_get_name(struct dpif *dpif, odp_port_t port_no,
764 char *name, size_t name_size)
765 {
766 struct dpif_port port;
767 int error;
768
769 ovs_assert(name_size > 0);
770
771 error = dpif_port_query_by_number(dpif, port_no, &port);
772 if (!error) {
773 ovs_strlcpy(name, port.name, name_size);
774 dpif_port_destroy(&port);
775 } else {
776 *name = '\0';
777 }
778 return error;
779 }
780
781 /* Initializes 'dump' to begin dumping the ports in a dpif.
782 *
783 * This function provides no status indication. An error status for the entire
784 * dump operation is provided when it is completed by calling
785 * dpif_port_dump_done().
786 */
787 void
788 dpif_port_dump_start(struct dpif_port_dump *dump, const struct dpif *dpif)
789 {
790 dump->dpif = dpif;
791 dump->error = dpif->dpif_class->port_dump_start(dpif, &dump->state);
792 log_operation(dpif, "port_dump_start", dump->error);
793 }
794
795 /* Attempts to retrieve another port from 'dump', which must have been
796 * initialized with dpif_port_dump_start(). On success, stores a new dpif_port
797 * into 'port' and returns true. On failure, returns false.
798 *
799 * Failure might indicate an actual error or merely that the last port has been
800 * dumped. An error status for the entire dump operation is provided when it
801 * is completed by calling dpif_port_dump_done().
802 *
803 * The dpif owns the data stored in 'port'. It will remain valid until at
804 * least the next time 'dump' is passed to dpif_port_dump_next() or
805 * dpif_port_dump_done(). */
806 bool
807 dpif_port_dump_next(struct dpif_port_dump *dump, struct dpif_port *port)
808 {
809 const struct dpif *dpif = dump->dpif;
810
811 if (dump->error) {
812 return false;
813 }
814
815 dump->error = dpif->dpif_class->port_dump_next(dpif, dump->state, port);
816 if (dump->error == EOF) {
817 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all ports", dpif_name(dpif));
818 } else {
819 log_operation(dpif, "port_dump_next", dump->error);
820 }
821
822 if (dump->error) {
823 dpif->dpif_class->port_dump_done(dpif, dump->state);
824 return false;
825 }
826 return true;
827 }
828
829 /* Completes port table dump operation 'dump', which must have been initialized
830 * with dpif_port_dump_start(). Returns 0 if the dump operation was
831 * error-free, otherwise a positive errno value describing the problem. */
832 int
833 dpif_port_dump_done(struct dpif_port_dump *dump)
834 {
835 const struct dpif *dpif = dump->dpif;
836 if (!dump->error) {
837 dump->error = dpif->dpif_class->port_dump_done(dpif, dump->state);
838 log_operation(dpif, "port_dump_done", dump->error);
839 }
840 return dump->error == EOF ? 0 : dump->error;
841 }
842
843 /* Polls for changes in the set of ports in 'dpif'. If the set of ports in
844 * 'dpif' has changed, this function does one of the following:
845 *
846 * - Stores the name of the device that was added to or deleted from 'dpif' in
847 * '*devnamep' and returns 0. The caller is responsible for freeing
848 * '*devnamep' (with free()) when it no longer needs it.
849 *
850 * - Returns ENOBUFS and sets '*devnamep' to NULL.
851 *
852 * This function may also return 'false positives', where it returns 0 and
853 * '*devnamep' names a device that was not actually added or deleted or it
854 * returns ENOBUFS without any change.
855 *
856 * Returns EAGAIN if the set of ports in 'dpif' has not changed. May also
857 * return other positive errno values to indicate that something has gone
858 * wrong. */
859 int
860 dpif_port_poll(const struct dpif *dpif, char **devnamep)
861 {
862 int error = dpif->dpif_class->port_poll(dpif, devnamep);
863 if (error) {
864 *devnamep = NULL;
865 }
866 return error;
867 }
868
869 /* Arranges for the poll loop to wake up when port_poll(dpif) will return a
870 * value other than EAGAIN. */
871 void
872 dpif_port_poll_wait(const struct dpif *dpif)
873 {
874 dpif->dpif_class->port_poll_wait(dpif);
875 }
876
877 /* Extracts the flow stats for a packet. The 'flow' and 'packet'
878 * arguments must have been initialized through a call to flow_extract().
879 * 'used' is stored into stats->used. */
880 void
881 dpif_flow_stats_extract(const struct flow *flow, const struct dp_packet *packet,
882 long long int used, struct dpif_flow_stats *stats)
883 {
884 stats->tcp_flags = ntohs(flow->tcp_flags);
885 stats->n_bytes = dp_packet_size(packet);
886 stats->n_packets = 1;
887 stats->used = used;
888 }
889
890 /* Appends a human-readable representation of 'stats' to 's'. */
891 void
892 dpif_flow_stats_format(const struct dpif_flow_stats *stats, struct ds *s)
893 {
894 ds_put_format(s, "packets:%"PRIu64", bytes:%"PRIu64", used:",
895 stats->n_packets, stats->n_bytes);
896 if (stats->used) {
897 ds_put_format(s, "%.3fs", (time_msec() - stats->used) / 1000.0);
898 } else {
899 ds_put_format(s, "never");
900 }
901 if (stats->tcp_flags) {
902 ds_put_cstr(s, ", flags:");
903 packet_format_tcp_flags(s, stats->tcp_flags);
904 }
905 }
906
907 /* Places the hash of the 'key_len' bytes starting at 'key' into '*hash'. */
908 void
909 dpif_flow_hash(const struct dpif *dpif OVS_UNUSED,
910 const void *key, size_t key_len, ovs_u128 *hash)
911 {
912 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
913 static uint32_t secret;
914
915 if (ovsthread_once_start(&once)) {
916 secret = random_uint32();
917 ovsthread_once_done(&once);
918 }
919 hash_bytes128(key, key_len, secret, hash);
920 uuid_set_bits_v4((struct uuid *)hash);
921 }
922
923 /* Deletes all flows from 'dpif'. Returns 0 if successful, otherwise a
924 * positive errno value. */
925 int
926 dpif_flow_flush(struct dpif *dpif)
927 {
928 int error;
929
930 COVERAGE_INC(dpif_flow_flush);
931
932 error = dpif->dpif_class->flow_flush(dpif);
933 log_operation(dpif, "flow_flush", error);
934 return error;
935 }
936
937 /* Attempts to install 'key' into the datapath, fetches it, then deletes it.
938 * Returns true if the datapath supported installing 'flow', false otherwise.
939 */
940 bool
941 dpif_probe_feature(struct dpif *dpif, const char *name,
942 const struct ofpbuf *key, const struct ofpbuf *actions,
943 const ovs_u128 *ufid)
944 {
945 struct dpif_flow flow;
946 struct ofpbuf reply;
947 uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
948 bool enable_feature = false;
949 int error;
950 const struct nlattr *nl_actions = actions ? actions->data : NULL;
951 const size_t nl_actions_size = actions ? actions->size : 0;
952
953 /* Use DPIF_FP_MODIFY to cover the case where ovs-vswitchd is killed (and
954 * restarted) at just the right time such that feature probes from the
955 * previous run are still present in the datapath. */
956 error = dpif_flow_put(dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY | DPIF_FP_PROBE,
957 key->data, key->size, NULL, 0,
958 nl_actions, nl_actions_size,
959 ufid, NON_PMD_CORE_ID, NULL);
960 if (error) {
961 if (error != EINVAL && error != EOVERFLOW) {
962 VLOG_WARN("%s: %s flow probe failed (%s)",
963 dpif_name(dpif), name, ovs_strerror(error));
964 }
965 return false;
966 }
967
968 ofpbuf_use_stack(&reply, &stub, sizeof stub);
969 error = dpif_flow_get(dpif, key->data, key->size, ufid,
970 NON_PMD_CORE_ID, &reply, &flow);
971 if (!error
972 && (!ufid || (flow.ufid_present
973 && ovs_u128_equals(*ufid, flow.ufid)))) {
974 enable_feature = true;
975 }
976
977 error = dpif_flow_del(dpif, key->data, key->size, ufid,
978 NON_PMD_CORE_ID, NULL);
979 if (error) {
980 VLOG_WARN("%s: failed to delete %s feature probe flow",
981 dpif_name(dpif), name);
982 }
983
984 return enable_feature;
985 }
986
987 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_GET. */
988 int
989 dpif_flow_get(struct dpif *dpif,
990 const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
991 const unsigned pmd_id, struct ofpbuf *buf, struct dpif_flow *flow)
992 {
993 struct dpif_op *opp;
994 struct dpif_op op;
995
996 op.type = DPIF_OP_FLOW_GET;
997 op.flow_get.key = key;
998 op.flow_get.key_len = key_len;
999 op.flow_get.ufid = ufid;
1000 op.flow_get.pmd_id = pmd_id;
1001 op.flow_get.buffer = buf;
1002
1003 memset(flow, 0, sizeof *flow);
1004 op.flow_get.flow = flow;
1005 op.flow_get.flow->key = key;
1006 op.flow_get.flow->key_len = key_len;
1007
1008 opp = &op;
1009 dpif_operate(dpif, &opp, 1);
1010
1011 return op.error;
1012 }
1013
1014 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_PUT. */
1015 int
1016 dpif_flow_put(struct dpif *dpif, enum dpif_flow_put_flags flags,
1017 const struct nlattr *key, size_t key_len,
1018 const struct nlattr *mask, size_t mask_len,
1019 const struct nlattr *actions, size_t actions_len,
1020 const ovs_u128 *ufid, const unsigned pmd_id,
1021 struct dpif_flow_stats *stats)
1022 {
1023 struct dpif_op *opp;
1024 struct dpif_op op;
1025
1026 op.type = DPIF_OP_FLOW_PUT;
1027 op.flow_put.flags = flags;
1028 op.flow_put.key = key;
1029 op.flow_put.key_len = key_len;
1030 op.flow_put.mask = mask;
1031 op.flow_put.mask_len = mask_len;
1032 op.flow_put.actions = actions;
1033 op.flow_put.actions_len = actions_len;
1034 op.flow_put.ufid = ufid;
1035 op.flow_put.pmd_id = pmd_id;
1036 op.flow_put.stats = stats;
1037
1038 opp = &op;
1039 dpif_operate(dpif, &opp, 1);
1040
1041 return op.error;
1042 }
1043
1044 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_DEL. */
1045 int
1046 dpif_flow_del(struct dpif *dpif,
1047 const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
1048 const unsigned pmd_id, struct dpif_flow_stats *stats)
1049 {
1050 struct dpif_op *opp;
1051 struct dpif_op op;
1052
1053 op.type = DPIF_OP_FLOW_DEL;
1054 op.flow_del.key = key;
1055 op.flow_del.key_len = key_len;
1056 op.flow_del.ufid = ufid;
1057 op.flow_del.pmd_id = pmd_id;
1058 op.flow_del.stats = stats;
1059 op.flow_del.terse = false;
1060
1061 opp = &op;
1062 dpif_operate(dpif, &opp, 1);
1063
1064 return op.error;
1065 }
1066
1067 /* Creates and returns a new 'struct dpif_flow_dump' for iterating through the
1068 * flows in 'dpif'. If 'terse' is true, then only UFID and statistics will
1069 * be returned in the dump. Otherwise, all fields will be returned.
1070 *
1071 * This function always successfully returns a dpif_flow_dump. Error
1072 * reporting is deferred to dpif_flow_dump_destroy(). */
1073 struct dpif_flow_dump *
1074 dpif_flow_dump_create(const struct dpif *dpif, bool terse,
1075 struct dpif_flow_dump_types *types)
1076 {
1077 return dpif->dpif_class->flow_dump_create(dpif, terse, types);
1078 }
1079
1080 /* Destroys 'dump', which must have been created with dpif_flow_dump_create().
1081 * All dpif_flow_dump_thread structures previously created for 'dump' must
1082 * previously have been destroyed.
1083 *
1084 * Returns 0 if the dump operation was error-free, otherwise a positive errno
1085 * value describing the problem. */
1086 int
1087 dpif_flow_dump_destroy(struct dpif_flow_dump *dump)
1088 {
1089 const struct dpif *dpif = dump->dpif;
1090 int error = dpif->dpif_class->flow_dump_destroy(dump);
1091 log_operation(dpif, "flow_dump_destroy", error);
1092 return error == EOF ? 0 : error;
1093 }
1094
1095 /* Returns new thread-local state for use with dpif_flow_dump_next(). */
1096 struct dpif_flow_dump_thread *
1097 dpif_flow_dump_thread_create(struct dpif_flow_dump *dump)
1098 {
1099 return dump->dpif->dpif_class->flow_dump_thread_create(dump);
1100 }
1101
1102 /* Releases 'thread'. */
1103 void
1104 dpif_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread)
1105 {
1106 thread->dpif->dpif_class->flow_dump_thread_destroy(thread);
1107 }
1108
1109 /* Attempts to retrieve up to 'max_flows' more flows from 'thread'. Returns 0
1110 * if and only if no flows remained to be retrieved, otherwise a positive
1111 * number reflecting the number of elements in 'flows[]' that were updated.
1112 * The number of flows returned might be less than 'max_flows' because
1113 * fewer than 'max_flows' remained, because this particular datapath does not
1114 * benefit from batching, or because an error occurred partway through
1115 * retrieval. Thus, the caller should continue calling until a 0 return value,
1116 * even if intermediate return values are less than 'max_flows'.
1117 *
1118 * No error status is immediately provided. An error status for the entire
1119 * dump operation is provided when it is completed by calling
1120 * dpif_flow_dump_destroy().
1121 *
1122 * All of the data stored into 'flows' is owned by the datapath, not by the
1123 * caller, and the caller must not modify or free it. The datapath guarantees
1124 * that it remains accessible and unchanged until the first of:
1125 * - The next call to dpif_flow_dump_next() for 'thread', or
1126 * - The next rcu quiescent period. */
1127 int
1128 dpif_flow_dump_next(struct dpif_flow_dump_thread *thread,
1129 struct dpif_flow *flows, int max_flows)
1130 {
1131 struct dpif *dpif = thread->dpif;
1132 int n;
1133
1134 ovs_assert(max_flows > 0);
1135 n = dpif->dpif_class->flow_dump_next(thread, flows, max_flows);
1136 if (n > 0) {
1137 struct dpif_flow *f;
1138
1139 for (f = flows; f < &flows[n]
1140 && should_log_flow_message(&this_module, 0); f++) {
1141 log_flow_message(dpif, 0, &this_module, "flow_dump",
1142 f->key, f->key_len, f->mask, f->mask_len,
1143 &f->ufid, &f->stats, f->actions, f->actions_len);
1144 }
1145 } else {
1146 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all flows", dpif_name(dpif));
1147 }
1148 return n;
1149 }
1150
1151 struct dpif_execute_helper_aux {
1152 struct dpif *dpif;
1153 const struct flow *flow;
1154 int error;
1155 const struct nlattr *meter_action; /* Non-NULL, if have a meter action. */
1156 };
1157
1158 /* This is called for actions that need the context of the datapath to be
1159 * meaningful. */
1160 static void
1161 dpif_execute_helper_cb(void *aux_, struct dp_packet_batch *packets_,
1162 const struct nlattr *action, bool should_steal)
1163 {
1164 struct dpif_execute_helper_aux *aux = aux_;
1165 int type = nl_attr_type(action);
1166 struct dp_packet *packet = packets_->packets[0];
1167
1168 ovs_assert(packets_->count == 1);
1169
1170 switch ((enum ovs_action_attr)type) {
1171 case OVS_ACTION_ATTR_METER:
1172 /* Maintain a pointer to the first meter action seen. */
1173 if (!aux->meter_action) {
1174 aux->meter_action = action;
1175 }
1176 break;
1177
1178 case OVS_ACTION_ATTR_CT:
1179 case OVS_ACTION_ATTR_OUTPUT:
1180 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1181 case OVS_ACTION_ATTR_TUNNEL_POP:
1182 case OVS_ACTION_ATTR_USERSPACE:
1183 case OVS_ACTION_ATTR_RECIRC: {
1184 struct dpif_execute execute;
1185 struct ofpbuf execute_actions;
1186 uint64_t stub[256 / 8];
1187 struct pkt_metadata *md = &packet->md;
1188
1189 if (flow_tnl_dst_is_set(&md->tunnel) || aux->meter_action) {
1190 ofpbuf_use_stub(&execute_actions, stub, sizeof stub);
1191
1192 if (aux->meter_action) {
1193 const struct nlattr *a = aux->meter_action;
1194
1195 /* XXX: This code collects meter actions since the last action
1196 * execution via the datapath to be executed right before the
1197 * current action that needs to be executed by the datapath.
1198 * This is only an approximation, but better than nothing.
1199 * Fundamentally, we should have a mechanism by which the
1200 * datapath could return the result of the meter action so that
1201 * we could execute them at the right order. */
1202 do {
1203 ofpbuf_put(&execute_actions, a, NLA_ALIGN(a->nla_len));
1204 /* Find next meter action before 'action', if any. */
1205 do {
1206 a = nl_attr_next(a);
1207 } while (a != action &&
1208 nl_attr_type(a) != OVS_ACTION_ATTR_METER);
1209 } while (a != action);
1210 }
1211
1212 /* The Linux kernel datapath throws away the tunnel information
1213 * that we supply as metadata. We have to use a "set" action to
1214 * supply it. */
1215 if (md->tunnel.ip_dst) {
1216 odp_put_tunnel_action(&md->tunnel, &execute_actions, NULL);
1217 }
1218 ofpbuf_put(&execute_actions, action, NLA_ALIGN(action->nla_len));
1219
1220 execute.actions = execute_actions.data;
1221 execute.actions_len = execute_actions.size;
1222 } else {
1223 execute.actions = action;
1224 execute.actions_len = NLA_ALIGN(action->nla_len);
1225 }
1226
1227 struct dp_packet *clone = NULL;
1228 uint32_t cutlen = dp_packet_get_cutlen(packet);
1229 if (cutlen && (type == OVS_ACTION_ATTR_OUTPUT
1230 || type == OVS_ACTION_ATTR_TUNNEL_PUSH
1231 || type == OVS_ACTION_ATTR_TUNNEL_POP
1232 || type == OVS_ACTION_ATTR_USERSPACE)) {
1233 dp_packet_reset_cutlen(packet);
1234 if (!should_steal) {
1235 packet = clone = dp_packet_clone(packet);
1236 }
1237 dp_packet_set_size(packet, dp_packet_size(packet) - cutlen);
1238 }
1239
1240 execute.packet = packet;
1241 execute.flow = aux->flow;
1242 execute.needs_help = false;
1243 execute.probe = false;
1244 execute.mtu = 0;
1245 aux->error = dpif_execute(aux->dpif, &execute);
1246 log_execute_message(aux->dpif, &this_module, &execute,
1247 true, aux->error);
1248
1249 dp_packet_delete(clone);
1250
1251 if (flow_tnl_dst_is_set(&md->tunnel) || aux->meter_action) {
1252 ofpbuf_uninit(&execute_actions);
1253
1254 /* Do not re-use the same meters for later output actions. */
1255 aux->meter_action = NULL;
1256 }
1257 break;
1258 }
1259
1260 case OVS_ACTION_ATTR_HASH:
1261 case OVS_ACTION_ATTR_PUSH_VLAN:
1262 case OVS_ACTION_ATTR_POP_VLAN:
1263 case OVS_ACTION_ATTR_PUSH_MPLS:
1264 case OVS_ACTION_ATTR_POP_MPLS:
1265 case OVS_ACTION_ATTR_SET:
1266 case OVS_ACTION_ATTR_SET_MASKED:
1267 case OVS_ACTION_ATTR_SAMPLE:
1268 case OVS_ACTION_ATTR_TRUNC:
1269 case OVS_ACTION_ATTR_PUSH_ETH:
1270 case OVS_ACTION_ATTR_POP_ETH:
1271 case OVS_ACTION_ATTR_CLONE:
1272 case OVS_ACTION_ATTR_PUSH_NSH:
1273 case OVS_ACTION_ATTR_POP_NSH:
1274 case OVS_ACTION_ATTR_CT_CLEAR:
1275 case OVS_ACTION_ATTR_UNSPEC:
1276 case __OVS_ACTION_ATTR_MAX:
1277 OVS_NOT_REACHED();
1278 }
1279 dp_packet_delete_batch(packets_, should_steal);
1280 }
1281
1282 /* Executes 'execute' by performing most of the actions in userspace and
1283 * passing the fully constructed packets to 'dpif' for output and userspace
1284 * actions.
1285 *
1286 * This helps with actions that a given 'dpif' doesn't implement directly. */
1287 static int
1288 dpif_execute_with_help(struct dpif *dpif, struct dpif_execute *execute)
1289 {
1290 struct dpif_execute_helper_aux aux = {dpif, execute->flow, 0, NULL};
1291 struct dp_packet_batch pb;
1292
1293 COVERAGE_INC(dpif_execute_with_help);
1294
1295 dp_packet_batch_init_packet(&pb, execute->packet);
1296 odp_execute_actions(&aux, &pb, false, execute->actions,
1297 execute->actions_len, dpif_execute_helper_cb);
1298 return aux.error;
1299 }
1300
1301 /* Returns true if the datapath needs help executing 'execute'. */
1302 static bool
1303 dpif_execute_needs_help(const struct dpif_execute *execute)
1304 {
1305 return execute->needs_help || nl_attr_oversized(execute->actions_len);
1306 }
1307
1308 /* A dpif_operate() wrapper for performing a single DPIF_OP_EXECUTE. */
1309 int
1310 dpif_execute(struct dpif *dpif, struct dpif_execute *execute)
1311 {
1312 if (execute->actions_len) {
1313 struct dpif_op *opp;
1314 struct dpif_op op;
1315
1316 op.type = DPIF_OP_EXECUTE;
1317 op.execute = *execute;
1318
1319 opp = &op;
1320 dpif_operate(dpif, &opp, 1);
1321
1322 return op.error;
1323 } else {
1324 return 0;
1325 }
1326 }
1327
1328 /* Executes each of the 'n_ops' operations in 'ops' on 'dpif', in the order in
1329 * which they are specified. Places each operation's results in the "output"
1330 * members documented in comments, and 0 in the 'error' member on success or a
1331 * positive errno on failure. */
1332 void
1333 dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
1334 {
1335 while (n_ops > 0) {
1336 size_t chunk;
1337
1338 /* Count 'chunk', the number of ops that can be executed without
1339 * needing any help. Ops that need help should be rare, so we
1340 * expect this to ordinarily be 'n_ops', that is, all the ops. */
1341 for (chunk = 0; chunk < n_ops; chunk++) {
1342 struct dpif_op *op = ops[chunk];
1343
1344 if (op->type == DPIF_OP_EXECUTE
1345 && dpif_execute_needs_help(&op->execute)) {
1346 break;
1347 }
1348 }
1349
1350 if (chunk) {
1351 /* Execute a chunk full of ops that the dpif provider can
1352 * handle itself, without help. */
1353 size_t i;
1354
1355 dpif->dpif_class->operate(dpif, ops, chunk);
1356
1357 for (i = 0; i < chunk; i++) {
1358 struct dpif_op *op = ops[i];
1359 int error = op->error;
1360
1361 switch (op->type) {
1362 case DPIF_OP_FLOW_PUT: {
1363 struct dpif_flow_put *put = &op->flow_put;
1364
1365 COVERAGE_INC(dpif_flow_put);
1366 log_flow_put_message(dpif, &this_module, put, error);
1367 if (error && put->stats) {
1368 memset(put->stats, 0, sizeof *put->stats);
1369 }
1370 break;
1371 }
1372
1373 case DPIF_OP_FLOW_GET: {
1374 struct dpif_flow_get *get = &op->flow_get;
1375
1376 COVERAGE_INC(dpif_flow_get);
1377 if (error) {
1378 memset(get->flow, 0, sizeof *get->flow);
1379 }
1380 log_flow_get_message(dpif, &this_module, get, error);
1381
1382 break;
1383 }
1384
1385 case DPIF_OP_FLOW_DEL: {
1386 struct dpif_flow_del *del = &op->flow_del;
1387
1388 COVERAGE_INC(dpif_flow_del);
1389 log_flow_del_message(dpif, &this_module, del, error);
1390 if (error && del->stats) {
1391 memset(del->stats, 0, sizeof *del->stats);
1392 }
1393 break;
1394 }
1395
1396 case DPIF_OP_EXECUTE:
1397 COVERAGE_INC(dpif_execute);
1398 log_execute_message(dpif, &this_module, &op->execute,
1399 false, error);
1400 break;
1401 }
1402 }
1403
1404 ops += chunk;
1405 n_ops -= chunk;
1406 } else {
1407 /* Help the dpif provider to execute one op. */
1408 struct dpif_op *op = ops[0];
1409
1410 COVERAGE_INC(dpif_execute);
1411 op->error = dpif_execute_with_help(dpif, &op->execute);
1412 ops++;
1413 n_ops--;
1414 }
1415 }
1416 }
1417
1418 /* Returns a string that represents 'type', for use in log messages. */
1419 const char *
1420 dpif_upcall_type_to_string(enum dpif_upcall_type type)
1421 {
1422 switch (type) {
1423 case DPIF_UC_MISS: return "miss";
1424 case DPIF_UC_ACTION: return "action";
1425 case DPIF_N_UC_TYPES: default: return "<unknown>";
1426 }
1427 }
1428
1429 /* Enables or disables receiving packets with dpif_recv() on 'dpif'. Returns 0
1430 * if successful, otherwise a positive errno value.
1431 *
1432 * Turning packet receive off and then back on may change the Netlink PID
1433 * assignments returned by dpif_port_get_pid(). If the client does this, it
1434 * must update all of the flows that have OVS_ACTION_ATTR_USERSPACE actions
1435 * using the new PID assignment. */
1436 int
1437 dpif_recv_set(struct dpif *dpif, bool enable)
1438 {
1439 int error = 0;
1440
1441 if (dpif->dpif_class->recv_set) {
1442 error = dpif->dpif_class->recv_set(dpif, enable);
1443 log_operation(dpif, "recv_set", error);
1444 }
1445 return error;
1446 }
1447
1448 /* Refreshes the poll loops and Netlink sockets associated to each port,
1449 * when the number of upcall handlers (upcall receiving thread) is changed
1450 * to 'n_handlers' and receiving packets for 'dpif' is enabled by
1451 * recv_set().
1452 *
1453 * Since multiple upcall handlers can read upcalls simultaneously from
1454 * 'dpif', each port can have multiple Netlink sockets, one per upcall
1455 * handler. So, handlers_set() is responsible for the following tasks:
1456 *
1457 * When receiving upcall is enabled, extends or creates the
1458 * configuration to support:
1459 *
1460 * - 'n_handlers' Netlink sockets for each port.
1461 *
1462 * - 'n_handlers' poll loops, one for each upcall handler.
1463 *
1464 * - registering the Netlink sockets for the same upcall handler to
1465 * the corresponding poll loop.
1466 *
1467 * Returns 0 if successful, otherwise a positive errno value. */
1468 int
1469 dpif_handlers_set(struct dpif *dpif, uint32_t n_handlers)
1470 {
1471 int error = 0;
1472
1473 if (dpif->dpif_class->handlers_set) {
1474 error = dpif->dpif_class->handlers_set(dpif, n_handlers);
1475 log_operation(dpif, "handlers_set", error);
1476 }
1477 return error;
1478 }
1479
1480 void
1481 dpif_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb, void *aux)
1482 {
1483 if (dpif->dpif_class->register_dp_purge_cb) {
1484 dpif->dpif_class->register_dp_purge_cb(dpif, cb, aux);
1485 }
1486 }
1487
1488 void
1489 dpif_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, void *aux)
1490 {
1491 if (dpif->dpif_class->register_upcall_cb) {
1492 dpif->dpif_class->register_upcall_cb(dpif, cb, aux);
1493 }
1494 }
1495
1496 void
1497 dpif_enable_upcall(struct dpif *dpif)
1498 {
1499 if (dpif->dpif_class->enable_upcall) {
1500 dpif->dpif_class->enable_upcall(dpif);
1501 }
1502 }
1503
1504 void
1505 dpif_disable_upcall(struct dpif *dpif)
1506 {
1507 if (dpif->dpif_class->disable_upcall) {
1508 dpif->dpif_class->disable_upcall(dpif);
1509 }
1510 }
1511
1512 void
1513 dpif_print_packet(struct dpif *dpif, struct dpif_upcall *upcall)
1514 {
1515 if (!VLOG_DROP_DBG(&dpmsg_rl)) {
1516 struct ds flow;
1517 char *packet;
1518
1519 packet = ofp_dp_packet_to_string(&upcall->packet);
1520
1521 ds_init(&flow);
1522 odp_flow_key_format(upcall->key, upcall->key_len, &flow);
1523
1524 VLOG_DBG("%s: %s upcall:\n%s\n%s",
1525 dpif_name(dpif), dpif_upcall_type_to_string(upcall->type),
1526 ds_cstr(&flow), packet);
1527
1528 ds_destroy(&flow);
1529 free(packet);
1530 }
1531 }
1532
1533 /* Pass custom configuration to the datapath implementation. Some of the
1534 * changes can be postponed until dpif_run() is called. */
1535 int
1536 dpif_set_config(struct dpif *dpif, const struct smap *cfg)
1537 {
1538 int error = 0;
1539
1540 if (dpif->dpif_class->set_config) {
1541 error = dpif->dpif_class->set_config(dpif, cfg);
1542 if (error) {
1543 log_operation(dpif, "set_config", error);
1544 }
1545 }
1546
1547 return error;
1548 }
1549
1550 /* Polls for an upcall from 'dpif' for an upcall handler. Since there can
1551 * be multiple poll loops, 'handler_id' is needed as index to identify the
1552 * corresponding poll loop. If successful, stores the upcall into '*upcall',
1553 * using 'buf' for storage. Should only be called if 'recv_set' has been used
1554 * to enable receiving packets from 'dpif'.
1555 *
1556 * 'upcall->key' and 'upcall->userdata' point into data in the caller-provided
1557 * 'buf', so their memory cannot be freed separately from 'buf'.
1558 *
1559 * The caller owns the data of 'upcall->packet' and may modify it. If
1560 * packet's headroom is exhausted as it is manipulated, 'upcall->packet'
1561 * will be reallocated. This requires the data of 'upcall->packet' to be
1562 * released with ofpbuf_uninit() before 'upcall' is destroyed. However,
1563 * when an error is returned, the 'upcall->packet' may be uninitialized
1564 * and should not be released.
1565 *
1566 * Returns 0 if successful, otherwise a positive errno value. Returns EAGAIN
1567 * if no upcall is immediately available. */
1568 int
1569 dpif_recv(struct dpif *dpif, uint32_t handler_id, struct dpif_upcall *upcall,
1570 struct ofpbuf *buf)
1571 {
1572 int error = EAGAIN;
1573
1574 if (dpif->dpif_class->recv) {
1575 error = dpif->dpif_class->recv(dpif, handler_id, upcall, buf);
1576 if (!error) {
1577 dpif_print_packet(dpif, upcall);
1578 } else if (error != EAGAIN) {
1579 log_operation(dpif, "recv", error);
1580 }
1581 }
1582 return error;
1583 }
1584
1585 /* Discards all messages that would otherwise be received by dpif_recv() on
1586 * 'dpif'. */
1587 void
1588 dpif_recv_purge(struct dpif *dpif)
1589 {
1590 COVERAGE_INC(dpif_purge);
1591 if (dpif->dpif_class->recv_purge) {
1592 dpif->dpif_class->recv_purge(dpif);
1593 }
1594 }
1595
1596 /* Arranges for the poll loop for an upcall handler to wake up when 'dpif'
1597 * 'dpif' has a message queued to be received with the recv member
1598 * function. Since there can be multiple poll loops, 'handler_id' is
1599 * needed as index to identify the corresponding poll loop. */
1600 void
1601 dpif_recv_wait(struct dpif *dpif, uint32_t handler_id)
1602 {
1603 if (dpif->dpif_class->recv_wait) {
1604 dpif->dpif_class->recv_wait(dpif, handler_id);
1605 }
1606 }
1607
1608 /*
1609 * Return the datapath version. Caller is responsible for freeing
1610 * the string.
1611 */
1612 char *
1613 dpif_get_dp_version(const struct dpif *dpif)
1614 {
1615 char *version = NULL;
1616
1617 if (dpif->dpif_class->get_datapath_version) {
1618 version = dpif->dpif_class->get_datapath_version();
1619 }
1620
1621 return version;
1622 }
1623
1624 /* Obtains the NetFlow engine type and engine ID for 'dpif' into '*engine_type'
1625 * and '*engine_id', respectively. */
1626 void
1627 dpif_get_netflow_ids(const struct dpif *dpif,
1628 uint8_t *engine_type, uint8_t *engine_id)
1629 {
1630 *engine_type = dpif->netflow_engine_type;
1631 *engine_id = dpif->netflow_engine_id;
1632 }
1633
1634 /* Translates OpenFlow queue ID 'queue_id' (in host byte order) into a priority
1635 * value used for setting packet priority.
1636 * On success, returns 0 and stores the priority into '*priority'.
1637 * On failure, returns a positive errno value and stores 0 into '*priority'. */
1638 int
1639 dpif_queue_to_priority(const struct dpif *dpif, uint32_t queue_id,
1640 uint32_t *priority)
1641 {
1642 int error = (dpif->dpif_class->queue_to_priority
1643 ? dpif->dpif_class->queue_to_priority(dpif, queue_id,
1644 priority)
1645 : EOPNOTSUPP);
1646 if (error) {
1647 *priority = 0;
1648 }
1649 log_operation(dpif, "queue_to_priority", error);
1650 return error;
1651 }
1652 \f
1653 void
1654 dpif_init(struct dpif *dpif, const struct dpif_class *dpif_class,
1655 const char *name,
1656 uint8_t netflow_engine_type, uint8_t netflow_engine_id)
1657 {
1658 dpif->dpif_class = dpif_class;
1659 dpif->base_name = xstrdup(name);
1660 dpif->full_name = xasprintf("%s@%s", dpif_class->type, name);
1661 dpif->netflow_engine_type = netflow_engine_type;
1662 dpif->netflow_engine_id = netflow_engine_id;
1663 }
1664
1665 /* Undoes the results of initialization.
1666 *
1667 * Normally this function only needs to be called from dpif_close().
1668 * However, it may be called by providers due to an error on opening
1669 * that occurs after initialization. It this case dpif_close() would
1670 * never be called. */
1671 void
1672 dpif_uninit(struct dpif *dpif, bool close)
1673 {
1674 char *base_name = dpif->base_name;
1675 char *full_name = dpif->full_name;
1676
1677 if (close) {
1678 dpif->dpif_class->close(dpif);
1679 }
1680
1681 free(base_name);
1682 free(full_name);
1683 }
1684 \f
1685 static void
1686 log_operation(const struct dpif *dpif, const char *operation, int error)
1687 {
1688 if (!error) {
1689 VLOG_DBG_RL(&dpmsg_rl, "%s: %s success", dpif_name(dpif), operation);
1690 } else if (ofperr_is_valid(error)) {
1691 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1692 dpif_name(dpif), operation, ofperr_get_name(error));
1693 } else {
1694 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1695 dpif_name(dpif), operation, ovs_strerror(error));
1696 }
1697 }
1698
1699 static enum vlog_level
1700 flow_message_log_level(int error)
1701 {
1702 /* If flows arrive in a batch, userspace may push down multiple
1703 * unique flow definitions that overlap when wildcards are applied.
1704 * Kernels that support flow wildcarding will reject these flows as
1705 * duplicates (EEXIST), so lower the log level to debug for these
1706 * types of messages. */
1707 return (error && error != EEXIST) ? VLL_WARN : VLL_DBG;
1708 }
1709
1710 static bool
1711 should_log_flow_message(const struct vlog_module *module, int error)
1712 {
1713 return !vlog_should_drop(module, flow_message_log_level(error),
1714 error ? &error_rl : &dpmsg_rl);
1715 }
1716
1717 void
1718 log_flow_message(const struct dpif *dpif, int error,
1719 const struct vlog_module *module,
1720 const char *operation,
1721 const struct nlattr *key, size_t key_len,
1722 const struct nlattr *mask, size_t mask_len,
1723 const ovs_u128 *ufid, const struct dpif_flow_stats *stats,
1724 const struct nlattr *actions, size_t actions_len)
1725 {
1726 struct ds ds = DS_EMPTY_INITIALIZER;
1727 ds_put_format(&ds, "%s: ", dpif_name(dpif));
1728 if (error) {
1729 ds_put_cstr(&ds, "failed to ");
1730 }
1731 ds_put_format(&ds, "%s ", operation);
1732 if (error) {
1733 ds_put_format(&ds, "(%s) ", ovs_strerror(error));
1734 }
1735 if (ufid) {
1736 odp_format_ufid(ufid, &ds);
1737 ds_put_cstr(&ds, " ");
1738 }
1739 odp_flow_format(key, key_len, mask, mask_len, NULL, &ds, true);
1740 if (stats) {
1741 ds_put_cstr(&ds, ", ");
1742 dpif_flow_stats_format(stats, &ds);
1743 }
1744 if (actions || actions_len) {
1745 ds_put_cstr(&ds, ", actions:");
1746 format_odp_actions(&ds, actions, actions_len, NULL);
1747 }
1748 vlog(module, flow_message_log_level(error), "%s", ds_cstr(&ds));
1749 ds_destroy(&ds);
1750 }
1751
1752 void
1753 log_flow_put_message(const struct dpif *dpif,
1754 const struct vlog_module *module,
1755 const struct dpif_flow_put *put,
1756 int error)
1757 {
1758 if (should_log_flow_message(module, error)
1759 && !(put->flags & DPIF_FP_PROBE)) {
1760 struct ds s;
1761
1762 ds_init(&s);
1763 ds_put_cstr(&s, "put");
1764 if (put->flags & DPIF_FP_CREATE) {
1765 ds_put_cstr(&s, "[create]");
1766 }
1767 if (put->flags & DPIF_FP_MODIFY) {
1768 ds_put_cstr(&s, "[modify]");
1769 }
1770 if (put->flags & DPIF_FP_ZERO_STATS) {
1771 ds_put_cstr(&s, "[zero]");
1772 }
1773 log_flow_message(dpif, error, module, ds_cstr(&s),
1774 put->key, put->key_len, put->mask, put->mask_len,
1775 put->ufid, put->stats, put->actions,
1776 put->actions_len);
1777 ds_destroy(&s);
1778 }
1779 }
1780
1781 void
1782 log_flow_del_message(const struct dpif *dpif,
1783 const struct vlog_module *module,
1784 const struct dpif_flow_del *del,
1785 int error)
1786 {
1787 if (should_log_flow_message(module, error)) {
1788 log_flow_message(dpif, error, module, "flow_del",
1789 del->key, del->key_len,
1790 NULL, 0, del->ufid, !error ? del->stats : NULL,
1791 NULL, 0);
1792 }
1793 }
1794
1795 /* Logs that 'execute' was executed on 'dpif' and completed with errno 'error'
1796 * (0 for success). 'subexecute' should be true if the execution is a result
1797 * of breaking down a larger execution that needed help, false otherwise.
1798 *
1799 *
1800 * XXX In theory, the log message could be deceptive because this function is
1801 * called after the dpif_provider's '->execute' function, which is allowed to
1802 * modify execute->packet and execute->md. In practice, though:
1803 *
1804 * - dpif-netlink doesn't modify execute->packet or execute->md.
1805 *
1806 * - dpif-netdev does modify them but it is less likely to have problems
1807 * because it is built into ovs-vswitchd and cannot have version skew,
1808 * etc.
1809 *
1810 * It would still be better to avoid the potential problem. I don't know of a
1811 * good way to do that, though, that isn't expensive. */
1812 void
1813 log_execute_message(const struct dpif *dpif,
1814 const struct vlog_module *module,
1815 const struct dpif_execute *execute,
1816 bool subexecute, int error)
1817 {
1818 if (!(error ? VLOG_DROP_WARN(&error_rl) : VLOG_DROP_DBG(&dpmsg_rl))
1819 && !execute->probe) {
1820 struct ds ds = DS_EMPTY_INITIALIZER;
1821 char *packet;
1822 uint64_t stub[1024 / 8];
1823 struct ofpbuf md = OFPBUF_STUB_INITIALIZER(stub);
1824
1825 packet = ofp_packet_to_string(dp_packet_data(execute->packet),
1826 dp_packet_size(execute->packet),
1827 execute->packet->packet_type);
1828 odp_key_from_dp_packet(&md, execute->packet);
1829 ds_put_format(&ds, "%s: %sexecute ",
1830 dpif_name(dpif),
1831 (subexecute ? "sub-"
1832 : dpif_execute_needs_help(execute) ? "super-"
1833 : ""));
1834 format_odp_actions(&ds, execute->actions, execute->actions_len, NULL);
1835 if (error) {
1836 ds_put_format(&ds, " failed (%s)", ovs_strerror(error));
1837 }
1838 ds_put_format(&ds, " on packet %s", packet);
1839 ds_put_format(&ds, " with metadata ");
1840 odp_flow_format(md.data, md.size, NULL, 0, NULL, &ds, true);
1841 ds_put_format(&ds, " mtu %d", execute->mtu);
1842 vlog(module, error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
1843 ds_destroy(&ds);
1844 free(packet);
1845 ofpbuf_uninit(&md);
1846 }
1847 }
1848
1849 void
1850 log_flow_get_message(const struct dpif *dpif,
1851 const struct vlog_module *module,
1852 const struct dpif_flow_get *get,
1853 int error)
1854 {
1855 if (should_log_flow_message(module, error)) {
1856 log_flow_message(dpif, error, module, "flow_get",
1857 get->key, get->key_len,
1858 get->flow->mask, get->flow->mask_len,
1859 get->ufid, &get->flow->stats,
1860 get->flow->actions, get->flow->actions_len);
1861 }
1862 }
1863
1864 bool
1865 dpif_supports_tnl_push_pop(const struct dpif *dpif)
1866 {
1867 return dpif_is_netdev(dpif);
1868 }
1869
1870 /* Meters */
1871 void
1872 dpif_meter_get_features(const struct dpif *dpif,
1873 struct ofputil_meter_features *features)
1874 {
1875 memset(features, 0, sizeof *features);
1876 if (dpif->dpif_class->meter_get_features) {
1877 dpif->dpif_class->meter_get_features(dpif, features);
1878 }
1879 }
1880
1881 /* Adds or modifies the meter in 'dpif' with the given 'meter_id' and
1882 * the configuration in 'config'.
1883 *
1884 * The meter id specified through 'config->meter_id' is ignored. */
1885 int
1886 dpif_meter_set(struct dpif *dpif, ofproto_meter_id meter_id,
1887 struct ofputil_meter_config *config)
1888 {
1889 COVERAGE_INC(dpif_meter_set);
1890
1891 if (!(config->flags & (OFPMF13_KBPS | OFPMF13_PKTPS))) {
1892 return EBADF; /* Rate unit type not set. */
1893 }
1894
1895 if ((config->flags & OFPMF13_KBPS) && (config->flags & OFPMF13_PKTPS)) {
1896 return EBADF; /* Both rate units may not be set. */
1897 }
1898
1899 if (config->n_bands == 0) {
1900 return EINVAL;
1901 }
1902
1903 for (size_t i = 0; i < config->n_bands; i++) {
1904 if (config->bands[i].rate == 0) {
1905 return EDOM; /* Rate must be non-zero */
1906 }
1907 }
1908
1909 int error = dpif->dpif_class->meter_set(dpif, meter_id, config);
1910 if (!error) {
1911 VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" set",
1912 dpif_name(dpif), meter_id.uint32);
1913 } else {
1914 VLOG_WARN_RL(&error_rl, "%s: failed to set DPIF meter %"PRIu32": %s",
1915 dpif_name(dpif), meter_id.uint32, ovs_strerror(error));
1916 }
1917 return error;
1918 }
1919
1920 int
1921 dpif_meter_get(const struct dpif *dpif, ofproto_meter_id meter_id,
1922 struct ofputil_meter_stats *stats, uint16_t n_bands)
1923 {
1924 int error;
1925
1926 COVERAGE_INC(dpif_meter_get);
1927
1928 error = dpif->dpif_class->meter_get(dpif, meter_id, stats, n_bands);
1929 if (!error) {
1930 VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" get stats",
1931 dpif_name(dpif), meter_id.uint32);
1932 } else {
1933 VLOG_WARN_RL(&error_rl,
1934 "%s: failed to get DPIF meter %"PRIu32" stats: %s",
1935 dpif_name(dpif), meter_id.uint32, ovs_strerror(error));
1936 stats->packet_in_count = ~0;
1937 stats->byte_in_count = ~0;
1938 stats->n_bands = 0;
1939 }
1940 return error;
1941 }
1942
1943 int
1944 dpif_meter_del(struct dpif *dpif, ofproto_meter_id meter_id,
1945 struct ofputil_meter_stats *stats, uint16_t n_bands)
1946 {
1947 int error;
1948
1949 COVERAGE_INC(dpif_meter_del);
1950
1951 error = dpif->dpif_class->meter_del(dpif, meter_id, stats, n_bands);
1952 if (!error) {
1953 VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" deleted",
1954 dpif_name(dpif), meter_id.uint32);
1955 } else {
1956 VLOG_WARN_RL(&error_rl,
1957 "%s: failed to delete DPIF meter %"PRIu32": %s",
1958 dpif_name(dpif), meter_id.uint32, ovs_strerror(error));
1959 if (stats) {
1960 stats->packet_in_count = ~0;
1961 stats->byte_in_count = ~0;
1962 stats->n_bands = 0;
1963 }
1964 }
1965 return error;
1966 }