]> git.proxmox.com Git - mirror_ovs.git/blob - lib/dpif.c
dpif-netdev: Pass Openvswitch other_config smap to dpif.
[mirror_ovs.git] / lib / dpif.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "dpif-provider.h"
19
20 #include <ctype.h>
21 #include <errno.h>
22 #include <inttypes.h>
23 #include <stdlib.h>
24 #include <string.h>
25
26 #include "coverage.h"
27 #include "dpctl.h"
28 #include "dp-packet.h"
29 #include "dpif-netdev.h"
30 #include "openvswitch/dynamic-string.h"
31 #include "flow.h"
32 #include "netdev.h"
33 #include "netlink.h"
34 #include "odp-execute.h"
35 #include "odp-util.h"
36 #include "openvswitch/ofp-print.h"
37 #include "openvswitch/ofp-util.h"
38 #include "openvswitch/ofpbuf.h"
39 #include "packets.h"
40 #include "poll-loop.h"
41 #include "route-table.h"
42 #include "seq.h"
43 #include "openvswitch/shash.h"
44 #include "sset.h"
45 #include "timeval.h"
46 #include "tnl-neigh-cache.h"
47 #include "tnl-ports.h"
48 #include "util.h"
49 #include "uuid.h"
50 #include "valgrind.h"
51 #include "openvswitch/ofp-errors.h"
52 #include "openvswitch/vlog.h"
53
54 VLOG_DEFINE_THIS_MODULE(dpif);
55
56 COVERAGE_DEFINE(dpif_destroy);
57 COVERAGE_DEFINE(dpif_port_add);
58 COVERAGE_DEFINE(dpif_port_del);
59 COVERAGE_DEFINE(dpif_flow_flush);
60 COVERAGE_DEFINE(dpif_flow_get);
61 COVERAGE_DEFINE(dpif_flow_put);
62 COVERAGE_DEFINE(dpif_flow_del);
63 COVERAGE_DEFINE(dpif_execute);
64 COVERAGE_DEFINE(dpif_purge);
65 COVERAGE_DEFINE(dpif_execute_with_help);
66
67 static const struct dpif_class *base_dpif_classes[] = {
68 #if defined(__linux__) || defined(_WIN32)
69 &dpif_netlink_class,
70 #endif
71 &dpif_netdev_class,
72 };
73
74 struct registered_dpif_class {
75 const struct dpif_class *dpif_class;
76 int refcount;
77 };
78 static struct shash dpif_classes = SHASH_INITIALIZER(&dpif_classes);
79 static struct sset dpif_blacklist = SSET_INITIALIZER(&dpif_blacklist);
80
81 /* Protects 'dpif_classes', including the refcount, and 'dpif_blacklist'. */
82 static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
83
84 /* Rate limit for individual messages going to or from the datapath, output at
85 * DBG level. This is very high because, if these are enabled, it is because
86 * we really need to see them. */
87 static struct vlog_rate_limit dpmsg_rl = VLOG_RATE_LIMIT_INIT(600, 600);
88
89 /* Not really much point in logging many dpif errors. */
90 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
91
92 static void log_flow_message(const struct dpif *dpif, int error,
93 const char *operation,
94 const struct nlattr *key, size_t key_len,
95 const struct nlattr *mask, size_t mask_len,
96 const ovs_u128 *ufid,
97 const struct dpif_flow_stats *stats,
98 const struct nlattr *actions, size_t actions_len);
99 static void log_operation(const struct dpif *, const char *operation,
100 int error);
101 static bool should_log_flow_message(int error);
102 static void log_flow_put_message(struct dpif *, const struct dpif_flow_put *,
103 int error);
104 static void log_flow_del_message(struct dpif *, const struct dpif_flow_del *,
105 int error);
106 static void log_execute_message(struct dpif *, const struct dpif_execute *,
107 bool subexecute, int error);
108 static void log_flow_get_message(const struct dpif *,
109 const struct dpif_flow_get *, int error);
110
111 /* Incremented whenever tnl route, arp, etc changes. */
112 struct seq *tnl_conf_seq;
113
114 static void
115 dp_initialize(void)
116 {
117 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
118
119 if (ovsthread_once_start(&once)) {
120 int i;
121
122 tnl_conf_seq = seq_create();
123 dpctl_unixctl_register();
124 tnl_port_map_init();
125 tnl_neigh_cache_init();
126 route_table_init();
127
128 for (i = 0; i < ARRAY_SIZE(base_dpif_classes); i++) {
129 dp_register_provider(base_dpif_classes[i]);
130 }
131
132 ovsthread_once_done(&once);
133 }
134 }
135
136 static int
137 dp_register_provider__(const struct dpif_class *new_class)
138 {
139 struct registered_dpif_class *registered_class;
140 int error;
141
142 if (sset_contains(&dpif_blacklist, new_class->type)) {
143 VLOG_DBG("attempted to register blacklisted provider: %s",
144 new_class->type);
145 return EINVAL;
146 }
147
148 if (shash_find(&dpif_classes, new_class->type)) {
149 VLOG_WARN("attempted to register duplicate datapath provider: %s",
150 new_class->type);
151 return EEXIST;
152 }
153
154 error = new_class->init ? new_class->init() : 0;
155 if (error) {
156 VLOG_WARN("failed to initialize %s datapath class: %s",
157 new_class->type, ovs_strerror(error));
158 return error;
159 }
160
161 registered_class = xmalloc(sizeof *registered_class);
162 registered_class->dpif_class = new_class;
163 registered_class->refcount = 0;
164
165 shash_add(&dpif_classes, new_class->type, registered_class);
166
167 return 0;
168 }
169
170 /* Registers a new datapath provider. After successful registration, new
171 * datapaths of that type can be opened using dpif_open(). */
172 int
173 dp_register_provider(const struct dpif_class *new_class)
174 {
175 int error;
176
177 ovs_mutex_lock(&dpif_mutex);
178 error = dp_register_provider__(new_class);
179 ovs_mutex_unlock(&dpif_mutex);
180
181 return error;
182 }
183
184 /* Unregisters a datapath provider. 'type' must have been previously
185 * registered and not currently be in use by any dpifs. After unregistration
186 * new datapaths of that type cannot be opened using dpif_open(). */
187 static int
188 dp_unregister_provider__(const char *type)
189 {
190 struct shash_node *node;
191 struct registered_dpif_class *registered_class;
192
193 node = shash_find(&dpif_classes, type);
194 if (!node) {
195 return EAFNOSUPPORT;
196 }
197
198 registered_class = node->data;
199 if (registered_class->refcount) {
200 VLOG_WARN("attempted to unregister in use datapath provider: %s", type);
201 return EBUSY;
202 }
203
204 shash_delete(&dpif_classes, node);
205 free(registered_class);
206
207 return 0;
208 }
209
210 /* Unregisters a datapath provider. 'type' must have been previously
211 * registered and not currently be in use by any dpifs. After unregistration
212 * new datapaths of that type cannot be opened using dpif_open(). */
213 int
214 dp_unregister_provider(const char *type)
215 {
216 int error;
217
218 dp_initialize();
219
220 ovs_mutex_lock(&dpif_mutex);
221 error = dp_unregister_provider__(type);
222 ovs_mutex_unlock(&dpif_mutex);
223
224 return error;
225 }
226
227 /* Blacklists a provider. Causes future calls of dp_register_provider() with
228 * a dpif_class which implements 'type' to fail. */
229 void
230 dp_blacklist_provider(const char *type)
231 {
232 ovs_mutex_lock(&dpif_mutex);
233 sset_add(&dpif_blacklist, type);
234 ovs_mutex_unlock(&dpif_mutex);
235 }
236
237 /* Adds the types of all currently registered datapath providers to 'types'.
238 * The caller must first initialize the sset. */
239 void
240 dp_enumerate_types(struct sset *types)
241 {
242 struct shash_node *node;
243
244 dp_initialize();
245
246 ovs_mutex_lock(&dpif_mutex);
247 SHASH_FOR_EACH(node, &dpif_classes) {
248 const struct registered_dpif_class *registered_class = node->data;
249 sset_add(types, registered_class->dpif_class->type);
250 }
251 ovs_mutex_unlock(&dpif_mutex);
252 }
253
254 static void
255 dp_class_unref(struct registered_dpif_class *rc)
256 {
257 ovs_mutex_lock(&dpif_mutex);
258 ovs_assert(rc->refcount);
259 rc->refcount--;
260 ovs_mutex_unlock(&dpif_mutex);
261 }
262
263 static struct registered_dpif_class *
264 dp_class_lookup(const char *type)
265 {
266 struct registered_dpif_class *rc;
267
268 ovs_mutex_lock(&dpif_mutex);
269 rc = shash_find_data(&dpif_classes, type);
270 if (rc) {
271 rc->refcount++;
272 }
273 ovs_mutex_unlock(&dpif_mutex);
274
275 return rc;
276 }
277
278 /* Clears 'names' and enumerates the names of all known created datapaths with
279 * the given 'type'. The caller must first initialize the sset. Returns 0 if
280 * successful, otherwise a positive errno value.
281 *
282 * Some kinds of datapaths might not be practically enumerable. This is not
283 * considered an error. */
284 int
285 dp_enumerate_names(const char *type, struct sset *names)
286 {
287 struct registered_dpif_class *registered_class;
288 const struct dpif_class *dpif_class;
289 int error;
290
291 dp_initialize();
292 sset_clear(names);
293
294 registered_class = dp_class_lookup(type);
295 if (!registered_class) {
296 VLOG_WARN("could not enumerate unknown type: %s", type);
297 return EAFNOSUPPORT;
298 }
299
300 dpif_class = registered_class->dpif_class;
301 error = (dpif_class->enumerate
302 ? dpif_class->enumerate(names, dpif_class)
303 : 0);
304 if (error) {
305 VLOG_WARN("failed to enumerate %s datapaths: %s", dpif_class->type,
306 ovs_strerror(error));
307 }
308 dp_class_unref(registered_class);
309
310 return error;
311 }
312
313 /* Parses 'datapath_name_', which is of the form [type@]name into its
314 * component pieces. 'name' and 'type' must be freed by the caller.
315 *
316 * The returned 'type' is normalized, as if by dpif_normalize_type(). */
317 void
318 dp_parse_name(const char *datapath_name_, char **name, char **type)
319 {
320 char *datapath_name = xstrdup(datapath_name_);
321 char *separator;
322
323 separator = strchr(datapath_name, '@');
324 if (separator) {
325 *separator = '\0';
326 *type = datapath_name;
327 *name = xstrdup(dpif_normalize_type(separator + 1));
328 } else {
329 *name = datapath_name;
330 *type = xstrdup(dpif_normalize_type(NULL));
331 }
332 }
333
334 static int
335 do_open(const char *name, const char *type, bool create, struct dpif **dpifp)
336 {
337 struct dpif *dpif = NULL;
338 int error;
339 struct registered_dpif_class *registered_class;
340
341 dp_initialize();
342
343 type = dpif_normalize_type(type);
344 registered_class = dp_class_lookup(type);
345 if (!registered_class) {
346 VLOG_WARN("could not create datapath %s of unknown type %s", name,
347 type);
348 error = EAFNOSUPPORT;
349 goto exit;
350 }
351
352 error = registered_class->dpif_class->open(registered_class->dpif_class,
353 name, create, &dpif);
354 if (!error) {
355 ovs_assert(dpif->dpif_class == registered_class->dpif_class);
356 } else {
357 dp_class_unref(registered_class);
358 }
359
360 exit:
361 *dpifp = error ? NULL : dpif;
362 return error;
363 }
364
365 /* Tries to open an existing datapath named 'name' and type 'type'. Will fail
366 * if no datapath with 'name' and 'type' exists. 'type' may be either NULL or
367 * the empty string to specify the default system type. Returns 0 if
368 * successful, otherwise a positive errno value. On success stores a pointer
369 * to the datapath in '*dpifp', otherwise a null pointer. */
370 int
371 dpif_open(const char *name, const char *type, struct dpif **dpifp)
372 {
373 return do_open(name, type, false, dpifp);
374 }
375
376 /* Tries to create and open a new datapath with the given 'name' and 'type'.
377 * 'type' may be either NULL or the empty string to specify the default system
378 * type. Will fail if a datapath with 'name' and 'type' already exists.
379 * Returns 0 if successful, otherwise a positive errno value. On success
380 * stores a pointer to the datapath in '*dpifp', otherwise a null pointer. */
381 int
382 dpif_create(const char *name, const char *type, struct dpif **dpifp)
383 {
384 return do_open(name, type, true, dpifp);
385 }
386
387 /* Tries to open a datapath with the given 'name' and 'type', creating it if it
388 * does not exist. 'type' may be either NULL or the empty string to specify
389 * the default system type. Returns 0 if successful, otherwise a positive
390 * errno value. On success stores a pointer to the datapath in '*dpifp',
391 * otherwise a null pointer. */
392 int
393 dpif_create_and_open(const char *name, const char *type, struct dpif **dpifp)
394 {
395 int error;
396
397 error = dpif_create(name, type, dpifp);
398 if (error == EEXIST || error == EBUSY) {
399 error = dpif_open(name, type, dpifp);
400 if (error) {
401 VLOG_WARN("datapath %s already exists but cannot be opened: %s",
402 name, ovs_strerror(error));
403 }
404 } else if (error) {
405 VLOG_WARN("failed to create datapath %s: %s",
406 name, ovs_strerror(error));
407 }
408 return error;
409 }
410
411 /* Closes and frees the connection to 'dpif'. Does not destroy the datapath
412 * itself; call dpif_delete() first, instead, if that is desirable. */
413 void
414 dpif_close(struct dpif *dpif)
415 {
416 if (dpif) {
417 struct registered_dpif_class *rc;
418
419 rc = shash_find_data(&dpif_classes, dpif->dpif_class->type);
420 dpif_uninit(dpif, true);
421 dp_class_unref(rc);
422 }
423 }
424
425 /* Performs periodic work needed by 'dpif'. */
426 bool
427 dpif_run(struct dpif *dpif)
428 {
429 if (dpif->dpif_class->run) {
430 return dpif->dpif_class->run(dpif);
431 }
432 return false;
433 }
434
435 /* Arranges for poll_block() to wake up when dp_run() needs to be called for
436 * 'dpif'. */
437 void
438 dpif_wait(struct dpif *dpif)
439 {
440 if (dpif->dpif_class->wait) {
441 dpif->dpif_class->wait(dpif);
442 }
443 }
444
445 /* Returns the name of datapath 'dpif' prefixed with the type
446 * (for use in log messages). */
447 const char *
448 dpif_name(const struct dpif *dpif)
449 {
450 return dpif->full_name;
451 }
452
453 /* Returns the name of datapath 'dpif' without the type
454 * (for use in device names). */
455 const char *
456 dpif_base_name(const struct dpif *dpif)
457 {
458 return dpif->base_name;
459 }
460
461 /* Returns the type of datapath 'dpif'. */
462 const char *
463 dpif_type(const struct dpif *dpif)
464 {
465 return dpif->dpif_class->type;
466 }
467
468 /* Returns the fully spelled out name for the given datapath 'type'.
469 *
470 * Normalized type string can be compared with strcmp(). Unnormalized type
471 * string might be the same even if they have different spellings. */
472 const char *
473 dpif_normalize_type(const char *type)
474 {
475 return type && type[0] ? type : "system";
476 }
477
478 /* Destroys the datapath that 'dpif' is connected to, first removing all of its
479 * ports. After calling this function, it does not make sense to pass 'dpif'
480 * to any functions other than dpif_name() or dpif_close(). */
481 int
482 dpif_delete(struct dpif *dpif)
483 {
484 int error;
485
486 COVERAGE_INC(dpif_destroy);
487
488 error = dpif->dpif_class->destroy(dpif);
489 log_operation(dpif, "delete", error);
490 return error;
491 }
492
493 /* Retrieves statistics for 'dpif' into 'stats'. Returns 0 if successful,
494 * otherwise a positive errno value. */
495 int
496 dpif_get_dp_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
497 {
498 int error = dpif->dpif_class->get_stats(dpif, stats);
499 if (error) {
500 memset(stats, 0, sizeof *stats);
501 }
502 log_operation(dpif, "get_stats", error);
503 return error;
504 }
505
506 const char *
507 dpif_port_open_type(const char *datapath_type, const char *port_type)
508 {
509 struct registered_dpif_class *rc;
510
511 datapath_type = dpif_normalize_type(datapath_type);
512
513 ovs_mutex_lock(&dpif_mutex);
514 rc = shash_find_data(&dpif_classes, datapath_type);
515 if (rc && rc->dpif_class->port_open_type) {
516 port_type = rc->dpif_class->port_open_type(rc->dpif_class, port_type);
517 }
518 ovs_mutex_unlock(&dpif_mutex);
519
520 return port_type;
521 }
522
523 /* Attempts to add 'netdev' as a port on 'dpif'. If 'port_nop' is
524 * non-null and its value is not ODPP_NONE, then attempts to use the
525 * value as the port number.
526 *
527 * If successful, returns 0 and sets '*port_nop' to the new port's port
528 * number (if 'port_nop' is non-null). On failure, returns a positive
529 * errno value and sets '*port_nop' to ODPP_NONE (if 'port_nop' is
530 * non-null). */
531 int
532 dpif_port_add(struct dpif *dpif, struct netdev *netdev, odp_port_t *port_nop)
533 {
534 const char *netdev_name = netdev_get_name(netdev);
535 odp_port_t port_no = ODPP_NONE;
536 int error;
537
538 COVERAGE_INC(dpif_port_add);
539
540 if (port_nop) {
541 port_no = *port_nop;
542 }
543
544 error = dpif->dpif_class->port_add(dpif, netdev, &port_no);
545 if (!error) {
546 VLOG_DBG_RL(&dpmsg_rl, "%s: added %s as port %"PRIu32,
547 dpif_name(dpif), netdev_name, port_no);
548 } else {
549 VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
550 dpif_name(dpif), netdev_name, ovs_strerror(error));
551 port_no = ODPP_NONE;
552 }
553 if (port_nop) {
554 *port_nop = port_no;
555 }
556 return error;
557 }
558
559 /* Attempts to remove 'dpif''s port number 'port_no'. Returns 0 if successful,
560 * otherwise a positive errno value. */
561 int
562 dpif_port_del(struct dpif *dpif, odp_port_t port_no)
563 {
564 int error;
565
566 COVERAGE_INC(dpif_port_del);
567
568 error = dpif->dpif_class->port_del(dpif, port_no);
569 if (!error) {
570 VLOG_DBG_RL(&dpmsg_rl, "%s: port_del(%"PRIu32")",
571 dpif_name(dpif), port_no);
572 } else {
573 log_operation(dpif, "port_del", error);
574 }
575 return error;
576 }
577
578 /* Makes a deep copy of 'src' into 'dst'. */
579 void
580 dpif_port_clone(struct dpif_port *dst, const struct dpif_port *src)
581 {
582 dst->name = xstrdup(src->name);
583 dst->type = xstrdup(src->type);
584 dst->port_no = src->port_no;
585 }
586
587 /* Frees memory allocated to members of 'dpif_port'.
588 *
589 * Do not call this function on a dpif_port obtained from
590 * dpif_port_dump_next(): that function retains ownership of the data in the
591 * dpif_port. */
592 void
593 dpif_port_destroy(struct dpif_port *dpif_port)
594 {
595 free(dpif_port->name);
596 free(dpif_port->type);
597 }
598
599 /* Checks if port named 'devname' exists in 'dpif'. If so, returns
600 * true; otherwise, returns false. */
601 bool
602 dpif_port_exists(const struct dpif *dpif, const char *devname)
603 {
604 int error = dpif->dpif_class->port_query_by_name(dpif, devname, NULL);
605 if (error != 0 && error != ENODEV) {
606 VLOG_WARN_RL(&error_rl, "%s: failed to query port %s: %s",
607 dpif_name(dpif), devname, ovs_strerror(error));
608 }
609
610 return !error;
611 }
612
613 /* Refreshes configuration of 'dpif's port. */
614 int
615 dpif_port_set_config(struct dpif *dpif, odp_port_t port_no,
616 const struct smap *cfg)
617 {
618 int error = 0;
619
620 if (dpif->dpif_class->port_set_config) {
621 error = dpif->dpif_class->port_set_config(dpif, port_no, cfg);
622 if (error) {
623 log_operation(dpif, "port_set_config", error);
624 }
625 }
626
627 return error;
628 }
629
630 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and
631 * initializes '*port' appropriately; on failure, returns a positive errno
632 * value.
633 *
634 * Retuns ENODEV if the port doesn't exist.
635 *
636 * The caller owns the data in 'port' and must free it with
637 * dpif_port_destroy() when it is no longer needed. */
638 int
639 dpif_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
640 struct dpif_port *port)
641 {
642 int error = dpif->dpif_class->port_query_by_number(dpif, port_no, port);
643 if (!error) {
644 VLOG_DBG_RL(&dpmsg_rl, "%s: port %"PRIu32" is device %s",
645 dpif_name(dpif), port_no, port->name);
646 } else {
647 memset(port, 0, sizeof *port);
648 VLOG_WARN_RL(&error_rl, "%s: failed to query port %"PRIu32": %s",
649 dpif_name(dpif), port_no, ovs_strerror(error));
650 }
651 return error;
652 }
653
654 /* Looks up port named 'devname' in 'dpif'. On success, returns 0 and
655 * initializes '*port' appropriately; on failure, returns a positive errno
656 * value.
657 *
658 * Retuns ENODEV if the port doesn't exist.
659 *
660 * The caller owns the data in 'port' and must free it with
661 * dpif_port_destroy() when it is no longer needed. */
662 int
663 dpif_port_query_by_name(const struct dpif *dpif, const char *devname,
664 struct dpif_port *port)
665 {
666 int error = dpif->dpif_class->port_query_by_name(dpif, devname, port);
667 if (!error) {
668 VLOG_DBG_RL(&dpmsg_rl, "%s: device %s is on port %"PRIu32,
669 dpif_name(dpif), devname, port->port_no);
670 } else {
671 memset(port, 0, sizeof *port);
672
673 /* For ENODEV we use DBG level because the caller is probably
674 * interested in whether 'dpif' actually has a port 'devname', so that
675 * it's not an issue worth logging if it doesn't. Other errors are
676 * uncommon and more likely to indicate a real problem. */
677 VLOG_RL(&error_rl, error == ENODEV ? VLL_DBG : VLL_WARN,
678 "%s: failed to query port %s: %s",
679 dpif_name(dpif), devname, ovs_strerror(error));
680 }
681 return error;
682 }
683
684 /* Returns the Netlink PID value to supply in OVS_ACTION_ATTR_USERSPACE
685 * actions as the OVS_USERSPACE_ATTR_PID attribute's value, for use in
686 * flows whose packets arrived on port 'port_no'. In the case where the
687 * provider allocates multiple Netlink PIDs to a single port, it may use
688 * 'hash' to spread load among them. The caller need not use a particular
689 * hash function; a 5-tuple hash is suitable.
690 *
691 * (The datapath implementation might use some different hash function for
692 * distributing packets received via flow misses among PIDs. This means
693 * that packets received via flow misses might be reordered relative to
694 * packets received via userspace actions. This is not ordinarily a
695 * problem.)
696 *
697 * A 'port_no' of ODPP_NONE is a special case: it returns a reserved PID, not
698 * allocated to any port, that the client may use for special purposes.
699 *
700 * The return value is only meaningful when DPIF_UC_ACTION has been enabled in
701 * the 'dpif''s listen mask. It is allowed to change when DPIF_UC_ACTION is
702 * disabled and then re-enabled, so a client that does that must be prepared to
703 * update all of the flows that it installed that contain
704 * OVS_ACTION_ATTR_USERSPACE actions. */
705 uint32_t
706 dpif_port_get_pid(const struct dpif *dpif, odp_port_t port_no, uint32_t hash)
707 {
708 return (dpif->dpif_class->port_get_pid
709 ? (dpif->dpif_class->port_get_pid)(dpif, port_no, hash)
710 : 0);
711 }
712
713 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and copies
714 * the port's name into the 'name_size' bytes in 'name', ensuring that the
715 * result is null-terminated. On failure, returns a positive errno value and
716 * makes 'name' the empty string. */
717 int
718 dpif_port_get_name(struct dpif *dpif, odp_port_t port_no,
719 char *name, size_t name_size)
720 {
721 struct dpif_port port;
722 int error;
723
724 ovs_assert(name_size > 0);
725
726 error = dpif_port_query_by_number(dpif, port_no, &port);
727 if (!error) {
728 ovs_strlcpy(name, port.name, name_size);
729 dpif_port_destroy(&port);
730 } else {
731 *name = '\0';
732 }
733 return error;
734 }
735
736 /* Initializes 'dump' to begin dumping the ports in a dpif.
737 *
738 * This function provides no status indication. An error status for the entire
739 * dump operation is provided when it is completed by calling
740 * dpif_port_dump_done().
741 */
742 void
743 dpif_port_dump_start(struct dpif_port_dump *dump, const struct dpif *dpif)
744 {
745 dump->dpif = dpif;
746 dump->error = dpif->dpif_class->port_dump_start(dpif, &dump->state);
747 log_operation(dpif, "port_dump_start", dump->error);
748 }
749
750 /* Attempts to retrieve another port from 'dump', which must have been
751 * initialized with dpif_port_dump_start(). On success, stores a new dpif_port
752 * into 'port' and returns true. On failure, returns false.
753 *
754 * Failure might indicate an actual error or merely that the last port has been
755 * dumped. An error status for the entire dump operation is provided when it
756 * is completed by calling dpif_port_dump_done().
757 *
758 * The dpif owns the data stored in 'port'. It will remain valid until at
759 * least the next time 'dump' is passed to dpif_port_dump_next() or
760 * dpif_port_dump_done(). */
761 bool
762 dpif_port_dump_next(struct dpif_port_dump *dump, struct dpif_port *port)
763 {
764 const struct dpif *dpif = dump->dpif;
765
766 if (dump->error) {
767 return false;
768 }
769
770 dump->error = dpif->dpif_class->port_dump_next(dpif, dump->state, port);
771 if (dump->error == EOF) {
772 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all ports", dpif_name(dpif));
773 } else {
774 log_operation(dpif, "port_dump_next", dump->error);
775 }
776
777 if (dump->error) {
778 dpif->dpif_class->port_dump_done(dpif, dump->state);
779 return false;
780 }
781 return true;
782 }
783
784 /* Completes port table dump operation 'dump', which must have been initialized
785 * with dpif_port_dump_start(). Returns 0 if the dump operation was
786 * error-free, otherwise a positive errno value describing the problem. */
787 int
788 dpif_port_dump_done(struct dpif_port_dump *dump)
789 {
790 const struct dpif *dpif = dump->dpif;
791 if (!dump->error) {
792 dump->error = dpif->dpif_class->port_dump_done(dpif, dump->state);
793 log_operation(dpif, "port_dump_done", dump->error);
794 }
795 return dump->error == EOF ? 0 : dump->error;
796 }
797
798 /* Polls for changes in the set of ports in 'dpif'. If the set of ports in
799 * 'dpif' has changed, this function does one of the following:
800 *
801 * - Stores the name of the device that was added to or deleted from 'dpif' in
802 * '*devnamep' and returns 0. The caller is responsible for freeing
803 * '*devnamep' (with free()) when it no longer needs it.
804 *
805 * - Returns ENOBUFS and sets '*devnamep' to NULL.
806 *
807 * This function may also return 'false positives', where it returns 0 and
808 * '*devnamep' names a device that was not actually added or deleted or it
809 * returns ENOBUFS without any change.
810 *
811 * Returns EAGAIN if the set of ports in 'dpif' has not changed. May also
812 * return other positive errno values to indicate that something has gone
813 * wrong. */
814 int
815 dpif_port_poll(const struct dpif *dpif, char **devnamep)
816 {
817 int error = dpif->dpif_class->port_poll(dpif, devnamep);
818 if (error) {
819 *devnamep = NULL;
820 }
821 return error;
822 }
823
824 /* Arranges for the poll loop to wake up when port_poll(dpif) will return a
825 * value other than EAGAIN. */
826 void
827 dpif_port_poll_wait(const struct dpif *dpif)
828 {
829 dpif->dpif_class->port_poll_wait(dpif);
830 }
831
832 /* Extracts the flow stats for a packet. The 'flow' and 'packet'
833 * arguments must have been initialized through a call to flow_extract().
834 * 'used' is stored into stats->used. */
835 void
836 dpif_flow_stats_extract(const struct flow *flow, const struct dp_packet *packet,
837 long long int used, struct dpif_flow_stats *stats)
838 {
839 stats->tcp_flags = ntohs(flow->tcp_flags);
840 stats->n_bytes = dp_packet_size(packet);
841 stats->n_packets = 1;
842 stats->used = used;
843 }
844
845 /* Appends a human-readable representation of 'stats' to 's'. */
846 void
847 dpif_flow_stats_format(const struct dpif_flow_stats *stats, struct ds *s)
848 {
849 ds_put_format(s, "packets:%"PRIu64", bytes:%"PRIu64", used:",
850 stats->n_packets, stats->n_bytes);
851 if (stats->used) {
852 ds_put_format(s, "%.3fs", (time_msec() - stats->used) / 1000.0);
853 } else {
854 ds_put_format(s, "never");
855 }
856 if (stats->tcp_flags) {
857 ds_put_cstr(s, ", flags:");
858 packet_format_tcp_flags(s, stats->tcp_flags);
859 }
860 }
861
862 /* Places the hash of the 'key_len' bytes starting at 'key' into '*hash'. */
863 void
864 dpif_flow_hash(const struct dpif *dpif OVS_UNUSED,
865 const void *key, size_t key_len, ovs_u128 *hash)
866 {
867 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
868 static uint32_t secret;
869
870 if (ovsthread_once_start(&once)) {
871 secret = random_uint32();
872 ovsthread_once_done(&once);
873 }
874 hash_bytes128(key, key_len, secret, hash);
875 uuid_set_bits_v4((struct uuid *)hash);
876 }
877
878 /* Deletes all flows from 'dpif'. Returns 0 if successful, otherwise a
879 * positive errno value. */
880 int
881 dpif_flow_flush(struct dpif *dpif)
882 {
883 int error;
884
885 COVERAGE_INC(dpif_flow_flush);
886
887 error = dpif->dpif_class->flow_flush(dpif);
888 log_operation(dpif, "flow_flush", error);
889 return error;
890 }
891
892 /* Attempts to install 'key' into the datapath, fetches it, then deletes it.
893 * Returns true if the datapath supported installing 'flow', false otherwise.
894 */
895 bool
896 dpif_probe_feature(struct dpif *dpif, const char *name,
897 const struct ofpbuf *key, const ovs_u128 *ufid)
898 {
899 struct dpif_flow flow;
900 struct ofpbuf reply;
901 uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
902 bool enable_feature = false;
903 int error;
904
905 /* Use DPIF_FP_MODIFY to cover the case where ovs-vswitchd is killed (and
906 * restarted) at just the right time such that feature probes from the
907 * previous run are still present in the datapath. */
908 error = dpif_flow_put(dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY | DPIF_FP_PROBE,
909 key->data, key->size, NULL, 0, NULL, 0,
910 ufid, NON_PMD_CORE_ID, NULL);
911 if (error) {
912 if (error != EINVAL) {
913 VLOG_WARN("%s: %s flow probe failed (%s)",
914 dpif_name(dpif), name, ovs_strerror(error));
915 }
916 return false;
917 }
918
919 ofpbuf_use_stack(&reply, &stub, sizeof stub);
920 error = dpif_flow_get(dpif, key->data, key->size, ufid,
921 NON_PMD_CORE_ID, &reply, &flow);
922 if (!error
923 && (!ufid || (flow.ufid_present
924 && ovs_u128_equals(*ufid, flow.ufid)))) {
925 enable_feature = true;
926 }
927
928 error = dpif_flow_del(dpif, key->data, key->size, ufid,
929 NON_PMD_CORE_ID, NULL);
930 if (error) {
931 VLOG_WARN("%s: failed to delete %s feature probe flow",
932 dpif_name(dpif), name);
933 }
934
935 return enable_feature;
936 }
937
938 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_GET. */
939 int
940 dpif_flow_get(struct dpif *dpif,
941 const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
942 const unsigned pmd_id, struct ofpbuf *buf, struct dpif_flow *flow)
943 {
944 struct dpif_op *opp;
945 struct dpif_op op;
946
947 op.type = DPIF_OP_FLOW_GET;
948 op.u.flow_get.key = key;
949 op.u.flow_get.key_len = key_len;
950 op.u.flow_get.ufid = ufid;
951 op.u.flow_get.pmd_id = pmd_id;
952 op.u.flow_get.buffer = buf;
953
954 memset(flow, 0, sizeof *flow);
955 op.u.flow_get.flow = flow;
956 op.u.flow_get.flow->key = key;
957 op.u.flow_get.flow->key_len = key_len;
958
959 opp = &op;
960 dpif_operate(dpif, &opp, 1);
961
962 return op.error;
963 }
964
965 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_PUT. */
966 int
967 dpif_flow_put(struct dpif *dpif, enum dpif_flow_put_flags flags,
968 const struct nlattr *key, size_t key_len,
969 const struct nlattr *mask, size_t mask_len,
970 const struct nlattr *actions, size_t actions_len,
971 const ovs_u128 *ufid, const unsigned pmd_id,
972 struct dpif_flow_stats *stats)
973 {
974 struct dpif_op *opp;
975 struct dpif_op op;
976
977 op.type = DPIF_OP_FLOW_PUT;
978 op.u.flow_put.flags = flags;
979 op.u.flow_put.key = key;
980 op.u.flow_put.key_len = key_len;
981 op.u.flow_put.mask = mask;
982 op.u.flow_put.mask_len = mask_len;
983 op.u.flow_put.actions = actions;
984 op.u.flow_put.actions_len = actions_len;
985 op.u.flow_put.ufid = ufid;
986 op.u.flow_put.pmd_id = pmd_id;
987 op.u.flow_put.stats = stats;
988
989 opp = &op;
990 dpif_operate(dpif, &opp, 1);
991
992 return op.error;
993 }
994
995 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_DEL. */
996 int
997 dpif_flow_del(struct dpif *dpif,
998 const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
999 const unsigned pmd_id, struct dpif_flow_stats *stats)
1000 {
1001 struct dpif_op *opp;
1002 struct dpif_op op;
1003
1004 op.type = DPIF_OP_FLOW_DEL;
1005 op.u.flow_del.key = key;
1006 op.u.flow_del.key_len = key_len;
1007 op.u.flow_del.ufid = ufid;
1008 op.u.flow_del.pmd_id = pmd_id;
1009 op.u.flow_del.stats = stats;
1010 op.u.flow_del.terse = false;
1011
1012 opp = &op;
1013 dpif_operate(dpif, &opp, 1);
1014
1015 return op.error;
1016 }
1017
1018 /* Creates and returns a new 'struct dpif_flow_dump' for iterating through the
1019 * flows in 'dpif'. If 'terse' is true, then only UFID and statistics will
1020 * be returned in the dump. Otherwise, all fields will be returned.
1021 *
1022 * This function always successfully returns a dpif_flow_dump. Error
1023 * reporting is deferred to dpif_flow_dump_destroy(). */
1024 struct dpif_flow_dump *
1025 dpif_flow_dump_create(const struct dpif *dpif, bool terse)
1026 {
1027 return dpif->dpif_class->flow_dump_create(dpif, terse);
1028 }
1029
1030 /* Destroys 'dump', which must have been created with dpif_flow_dump_create().
1031 * All dpif_flow_dump_thread structures previously created for 'dump' must
1032 * previously have been destroyed.
1033 *
1034 * Returns 0 if the dump operation was error-free, otherwise a positive errno
1035 * value describing the problem. */
1036 int
1037 dpif_flow_dump_destroy(struct dpif_flow_dump *dump)
1038 {
1039 const struct dpif *dpif = dump->dpif;
1040 int error = dpif->dpif_class->flow_dump_destroy(dump);
1041 log_operation(dpif, "flow_dump_destroy", error);
1042 return error == EOF ? 0 : error;
1043 }
1044
1045 /* Returns new thread-local state for use with dpif_flow_dump_next(). */
1046 struct dpif_flow_dump_thread *
1047 dpif_flow_dump_thread_create(struct dpif_flow_dump *dump)
1048 {
1049 return dump->dpif->dpif_class->flow_dump_thread_create(dump);
1050 }
1051
1052 /* Releases 'thread'. */
1053 void
1054 dpif_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread)
1055 {
1056 thread->dpif->dpif_class->flow_dump_thread_destroy(thread);
1057 }
1058
1059 /* Attempts to retrieve up to 'max_flows' more flows from 'thread'. Returns 0
1060 * if and only if no flows remained to be retrieved, otherwise a positive
1061 * number reflecting the number of elements in 'flows[]' that were updated.
1062 * The number of flows returned might be less than 'max_flows' because
1063 * fewer than 'max_flows' remained, because this particular datapath does not
1064 * benefit from batching, or because an error occurred partway through
1065 * retrieval. Thus, the caller should continue calling until a 0 return value,
1066 * even if intermediate return values are less than 'max_flows'.
1067 *
1068 * No error status is immediately provided. An error status for the entire
1069 * dump operation is provided when it is completed by calling
1070 * dpif_flow_dump_destroy().
1071 *
1072 * All of the data stored into 'flows' is owned by the datapath, not by the
1073 * caller, and the caller must not modify or free it. The datapath guarantees
1074 * that it remains accessible and unchanged until the first of:
1075 * - The next call to dpif_flow_dump_next() for 'thread', or
1076 * - The next rcu quiescent period. */
1077 int
1078 dpif_flow_dump_next(struct dpif_flow_dump_thread *thread,
1079 struct dpif_flow *flows, int max_flows)
1080 {
1081 struct dpif *dpif = thread->dpif;
1082 int n;
1083
1084 ovs_assert(max_flows > 0);
1085 n = dpif->dpif_class->flow_dump_next(thread, flows, max_flows);
1086 if (n > 0) {
1087 struct dpif_flow *f;
1088
1089 for (f = flows; f < &flows[n] && should_log_flow_message(0); f++) {
1090 log_flow_message(dpif, 0, "flow_dump",
1091 f->key, f->key_len, f->mask, f->mask_len,
1092 &f->ufid, &f->stats, f->actions, f->actions_len);
1093 }
1094 } else {
1095 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all flows", dpif_name(dpif));
1096 }
1097 return n;
1098 }
1099
1100 struct dpif_execute_helper_aux {
1101 struct dpif *dpif;
1102 const struct flow *flow;
1103 int error;
1104 };
1105
1106 /* This is called for actions that need the context of the datapath to be
1107 * meaningful. */
1108 static void
1109 dpif_execute_helper_cb(void *aux_, struct dp_packet_batch *packets_,
1110 const struct nlattr *action, bool may_steal)
1111 {
1112 struct dpif_execute_helper_aux *aux = aux_;
1113 int type = nl_attr_type(action);
1114 struct dp_packet *packet = packets_->packets[0];
1115
1116 ovs_assert(packets_->count == 1);
1117
1118 switch ((enum ovs_action_attr)type) {
1119 case OVS_ACTION_ATTR_CT:
1120 case OVS_ACTION_ATTR_OUTPUT:
1121 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1122 case OVS_ACTION_ATTR_TUNNEL_POP:
1123 case OVS_ACTION_ATTR_USERSPACE:
1124 case OVS_ACTION_ATTR_RECIRC: {
1125 struct dpif_execute execute;
1126 struct ofpbuf execute_actions;
1127 uint64_t stub[256 / 8];
1128 struct pkt_metadata *md = &packet->md;
1129 bool dst_set;
1130
1131 dst_set = flow_tnl_dst_is_set(&md->tunnel);
1132 if (dst_set) {
1133 /* The Linux kernel datapath throws away the tunnel information
1134 * that we supply as metadata. We have to use a "set" action to
1135 * supply it. */
1136 ofpbuf_use_stub(&execute_actions, stub, sizeof stub);
1137 odp_put_tunnel_action(&md->tunnel, &execute_actions);
1138 ofpbuf_put(&execute_actions, action, NLA_ALIGN(action->nla_len));
1139
1140 execute.actions = execute_actions.data;
1141 execute.actions_len = execute_actions.size;
1142 } else {
1143 execute.actions = action;
1144 execute.actions_len = NLA_ALIGN(action->nla_len);
1145 }
1146
1147 struct dp_packet *clone = NULL;
1148 uint32_t cutlen = dp_packet_get_cutlen(packet);
1149 if (cutlen && (type == OVS_ACTION_ATTR_OUTPUT
1150 || type == OVS_ACTION_ATTR_TUNNEL_PUSH
1151 || type == OVS_ACTION_ATTR_TUNNEL_POP
1152 || type == OVS_ACTION_ATTR_USERSPACE)) {
1153 dp_packet_reset_cutlen(packet);
1154 if (!may_steal) {
1155 packet = clone = dp_packet_clone(packet);
1156 }
1157 dp_packet_set_size(packet, dp_packet_size(packet) - cutlen);
1158 }
1159
1160 execute.packet = packet;
1161 execute.flow = aux->flow;
1162 execute.needs_help = false;
1163 execute.probe = false;
1164 execute.mtu = 0;
1165 aux->error = dpif_execute(aux->dpif, &execute);
1166 log_execute_message(aux->dpif, &execute, true, aux->error);
1167
1168 dp_packet_delete(clone);
1169
1170 if (dst_set) {
1171 ofpbuf_uninit(&execute_actions);
1172 }
1173 break;
1174 }
1175
1176 case OVS_ACTION_ATTR_HASH:
1177 case OVS_ACTION_ATTR_PUSH_VLAN:
1178 case OVS_ACTION_ATTR_POP_VLAN:
1179 case OVS_ACTION_ATTR_PUSH_MPLS:
1180 case OVS_ACTION_ATTR_POP_MPLS:
1181 case OVS_ACTION_ATTR_SET:
1182 case OVS_ACTION_ATTR_SET_MASKED:
1183 case OVS_ACTION_ATTR_SAMPLE:
1184 case OVS_ACTION_ATTR_TRUNC:
1185 case OVS_ACTION_ATTR_CLONE:
1186 case OVS_ACTION_ATTR_UNSPEC:
1187 case __OVS_ACTION_ATTR_MAX:
1188 OVS_NOT_REACHED();
1189 }
1190 }
1191
1192 /* Executes 'execute' by performing most of the actions in userspace and
1193 * passing the fully constructed packets to 'dpif' for output and userspace
1194 * actions.
1195 *
1196 * This helps with actions that a given 'dpif' doesn't implement directly. */
1197 static int
1198 dpif_execute_with_help(struct dpif *dpif, struct dpif_execute *execute)
1199 {
1200 struct dpif_execute_helper_aux aux = {dpif, execute->flow, 0};
1201 struct dp_packet_batch pb;
1202
1203 COVERAGE_INC(dpif_execute_with_help);
1204
1205 dp_packet_batch_init_packet(&pb, execute->packet);
1206 odp_execute_actions(&aux, &pb, false, execute->actions,
1207 execute->actions_len, dpif_execute_helper_cb);
1208 return aux.error;
1209 }
1210
1211 /* Returns true if the datapath needs help executing 'execute'. */
1212 static bool
1213 dpif_execute_needs_help(const struct dpif_execute *execute)
1214 {
1215 return execute->needs_help || nl_attr_oversized(execute->actions_len);
1216 }
1217
1218 /* A dpif_operate() wrapper for performing a single DPIF_OP_EXECUTE. */
1219 int
1220 dpif_execute(struct dpif *dpif, struct dpif_execute *execute)
1221 {
1222 if (execute->actions_len) {
1223 struct dpif_op *opp;
1224 struct dpif_op op;
1225
1226 op.type = DPIF_OP_EXECUTE;
1227 op.u.execute = *execute;
1228
1229 opp = &op;
1230 dpif_operate(dpif, &opp, 1);
1231
1232 return op.error;
1233 } else {
1234 return 0;
1235 }
1236 }
1237
1238 /* Executes each of the 'n_ops' operations in 'ops' on 'dpif', in the order in
1239 * which they are specified. Places each operation's results in the "output"
1240 * members documented in comments, and 0 in the 'error' member on success or a
1241 * positive errno on failure. */
1242 void
1243 dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
1244 {
1245 while (n_ops > 0) {
1246 size_t chunk;
1247
1248 /* Count 'chunk', the number of ops that can be executed without
1249 * needing any help. Ops that need help should be rare, so we
1250 * expect this to ordinarily be 'n_ops', that is, all the ops. */
1251 for (chunk = 0; chunk < n_ops; chunk++) {
1252 struct dpif_op *op = ops[chunk];
1253
1254 if (op->type == DPIF_OP_EXECUTE
1255 && dpif_execute_needs_help(&op->u.execute)) {
1256 break;
1257 }
1258 }
1259
1260 if (chunk) {
1261 /* Execute a chunk full of ops that the dpif provider can
1262 * handle itself, without help. */
1263 size_t i;
1264
1265 dpif->dpif_class->operate(dpif, ops, chunk);
1266
1267 for (i = 0; i < chunk; i++) {
1268 struct dpif_op *op = ops[i];
1269 int error = op->error;
1270
1271 switch (op->type) {
1272 case DPIF_OP_FLOW_PUT: {
1273 struct dpif_flow_put *put = &op->u.flow_put;
1274
1275 COVERAGE_INC(dpif_flow_put);
1276 log_flow_put_message(dpif, put, error);
1277 if (error && put->stats) {
1278 memset(put->stats, 0, sizeof *put->stats);
1279 }
1280 break;
1281 }
1282
1283 case DPIF_OP_FLOW_GET: {
1284 struct dpif_flow_get *get = &op->u.flow_get;
1285
1286 COVERAGE_INC(dpif_flow_get);
1287 if (error) {
1288 memset(get->flow, 0, sizeof *get->flow);
1289 }
1290 log_flow_get_message(dpif, get, error);
1291
1292 break;
1293 }
1294
1295 case DPIF_OP_FLOW_DEL: {
1296 struct dpif_flow_del *del = &op->u.flow_del;
1297
1298 COVERAGE_INC(dpif_flow_del);
1299 log_flow_del_message(dpif, del, error);
1300 if (error && del->stats) {
1301 memset(del->stats, 0, sizeof *del->stats);
1302 }
1303 break;
1304 }
1305
1306 case DPIF_OP_EXECUTE:
1307 COVERAGE_INC(dpif_execute);
1308 log_execute_message(dpif, &op->u.execute, false, error);
1309 break;
1310 }
1311 }
1312
1313 ops += chunk;
1314 n_ops -= chunk;
1315 } else {
1316 /* Help the dpif provider to execute one op. */
1317 struct dpif_op *op = ops[0];
1318
1319 COVERAGE_INC(dpif_execute);
1320 op->error = dpif_execute_with_help(dpif, &op->u.execute);
1321 ops++;
1322 n_ops--;
1323 }
1324 }
1325 }
1326
1327 /* Returns a string that represents 'type', for use in log messages. */
1328 const char *
1329 dpif_upcall_type_to_string(enum dpif_upcall_type type)
1330 {
1331 switch (type) {
1332 case DPIF_UC_MISS: return "miss";
1333 case DPIF_UC_ACTION: return "action";
1334 case DPIF_N_UC_TYPES: default: return "<unknown>";
1335 }
1336 }
1337
1338 /* Enables or disables receiving packets with dpif_recv() on 'dpif'. Returns 0
1339 * if successful, otherwise a positive errno value.
1340 *
1341 * Turning packet receive off and then back on may change the Netlink PID
1342 * assignments returned by dpif_port_get_pid(). If the client does this, it
1343 * must update all of the flows that have OVS_ACTION_ATTR_USERSPACE actions
1344 * using the new PID assignment. */
1345 int
1346 dpif_recv_set(struct dpif *dpif, bool enable)
1347 {
1348 int error = 0;
1349
1350 if (dpif->dpif_class->recv_set) {
1351 error = dpif->dpif_class->recv_set(dpif, enable);
1352 log_operation(dpif, "recv_set", error);
1353 }
1354 return error;
1355 }
1356
1357 /* Refreshes the poll loops and Netlink sockets associated to each port,
1358 * when the number of upcall handlers (upcall receiving thread) is changed
1359 * to 'n_handlers' and receiving packets for 'dpif' is enabled by
1360 * recv_set().
1361 *
1362 * Since multiple upcall handlers can read upcalls simultaneously from
1363 * 'dpif', each port can have multiple Netlink sockets, one per upcall
1364 * handler. So, handlers_set() is responsible for the following tasks:
1365 *
1366 * When receiving upcall is enabled, extends or creates the
1367 * configuration to support:
1368 *
1369 * - 'n_handlers' Netlink sockets for each port.
1370 *
1371 * - 'n_handlers' poll loops, one for each upcall handler.
1372 *
1373 * - registering the Netlink sockets for the same upcall handler to
1374 * the corresponding poll loop.
1375 *
1376 * Returns 0 if successful, otherwise a positive errno value. */
1377 int
1378 dpif_handlers_set(struct dpif *dpif, uint32_t n_handlers)
1379 {
1380 int error = 0;
1381
1382 if (dpif->dpif_class->handlers_set) {
1383 error = dpif->dpif_class->handlers_set(dpif, n_handlers);
1384 log_operation(dpif, "handlers_set", error);
1385 }
1386 return error;
1387 }
1388
1389 void
1390 dpif_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb, void *aux)
1391 {
1392 if (dpif->dpif_class->register_dp_purge_cb) {
1393 dpif->dpif_class->register_dp_purge_cb(dpif, cb, aux);
1394 }
1395 }
1396
1397 void
1398 dpif_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, void *aux)
1399 {
1400 if (dpif->dpif_class->register_upcall_cb) {
1401 dpif->dpif_class->register_upcall_cb(dpif, cb, aux);
1402 }
1403 }
1404
1405 void
1406 dpif_enable_upcall(struct dpif *dpif)
1407 {
1408 if (dpif->dpif_class->enable_upcall) {
1409 dpif->dpif_class->enable_upcall(dpif);
1410 }
1411 }
1412
1413 void
1414 dpif_disable_upcall(struct dpif *dpif)
1415 {
1416 if (dpif->dpif_class->disable_upcall) {
1417 dpif->dpif_class->disable_upcall(dpif);
1418 }
1419 }
1420
1421 void
1422 dpif_print_packet(struct dpif *dpif, struct dpif_upcall *upcall)
1423 {
1424 if (!VLOG_DROP_DBG(&dpmsg_rl)) {
1425 struct ds flow;
1426 char *packet;
1427
1428 packet = ofp_packet_to_string(dp_packet_data(&upcall->packet),
1429 dp_packet_size(&upcall->packet));
1430
1431 ds_init(&flow);
1432 odp_flow_key_format(upcall->key, upcall->key_len, &flow);
1433
1434 VLOG_DBG("%s: %s upcall:\n%s\n%s",
1435 dpif_name(dpif), dpif_upcall_type_to_string(upcall->type),
1436 ds_cstr(&flow), packet);
1437
1438 ds_destroy(&flow);
1439 free(packet);
1440 }
1441 }
1442
1443 /* Pass custom configuration to the datapath implementation. Some of the
1444 * changes can be postponed until dpif_run() is called. */
1445 int
1446 dpif_set_config(struct dpif *dpif, const struct smap *cfg)
1447 {
1448 int error = 0;
1449
1450 if (dpif->dpif_class->set_config) {
1451 error = dpif->dpif_class->set_config(dpif, cfg);
1452 if (error) {
1453 log_operation(dpif, "set_config", error);
1454 }
1455 }
1456
1457 return error;
1458 }
1459
1460 /* Polls for an upcall from 'dpif' for an upcall handler. Since there
1461 * there can be multiple poll loops, 'handler_id' is needed as index to
1462 * identify the corresponding poll loop. If successful, stores the upcall
1463 * into '*upcall', using 'buf' for storage. Should only be called if
1464 * 'recv_set' has been used to enable receiving packets from 'dpif'.
1465 *
1466 * 'upcall->key' and 'upcall->userdata' point into data in the caller-provided
1467 * 'buf', so their memory cannot be freed separately from 'buf'.
1468 *
1469 * The caller owns the data of 'upcall->packet' and may modify it. If
1470 * packet's headroom is exhausted as it is manipulated, 'upcall->packet'
1471 * will be reallocated. This requires the data of 'upcall->packet' to be
1472 * released with ofpbuf_uninit() before 'upcall' is destroyed. However,
1473 * when an error is returned, the 'upcall->packet' may be uninitialized
1474 * and should not be released.
1475 *
1476 * Returns 0 if successful, otherwise a positive errno value. Returns EAGAIN
1477 * if no upcall is immediately available. */
1478 int
1479 dpif_recv(struct dpif *dpif, uint32_t handler_id, struct dpif_upcall *upcall,
1480 struct ofpbuf *buf)
1481 {
1482 int error = EAGAIN;
1483
1484 if (dpif->dpif_class->recv) {
1485 error = dpif->dpif_class->recv(dpif, handler_id, upcall, buf);
1486 if (!error) {
1487 dpif_print_packet(dpif, upcall);
1488 } else if (error != EAGAIN) {
1489 log_operation(dpif, "recv", error);
1490 }
1491 }
1492 return error;
1493 }
1494
1495 /* Discards all messages that would otherwise be received by dpif_recv() on
1496 * 'dpif'. */
1497 void
1498 dpif_recv_purge(struct dpif *dpif)
1499 {
1500 COVERAGE_INC(dpif_purge);
1501 if (dpif->dpif_class->recv_purge) {
1502 dpif->dpif_class->recv_purge(dpif);
1503 }
1504 }
1505
1506 /* Arranges for the poll loop for an upcall handler to wake up when 'dpif'
1507 * 'dpif' has a message queued to be received with the recv member
1508 * function. Since there can be multiple poll loops, 'handler_id' is
1509 * needed as index to identify the corresponding poll loop. */
1510 void
1511 dpif_recv_wait(struct dpif *dpif, uint32_t handler_id)
1512 {
1513 if (dpif->dpif_class->recv_wait) {
1514 dpif->dpif_class->recv_wait(dpif, handler_id);
1515 }
1516 }
1517
1518 /*
1519 * Return the datapath version. Caller is responsible for freeing
1520 * the string.
1521 */
1522 char *
1523 dpif_get_dp_version(const struct dpif *dpif)
1524 {
1525 char *version = NULL;
1526
1527 if (dpif->dpif_class->get_datapath_version) {
1528 version = dpif->dpif_class->get_datapath_version();
1529 }
1530
1531 return version;
1532 }
1533
1534 /* Obtains the NetFlow engine type and engine ID for 'dpif' into '*engine_type'
1535 * and '*engine_id', respectively. */
1536 void
1537 dpif_get_netflow_ids(const struct dpif *dpif,
1538 uint8_t *engine_type, uint8_t *engine_id)
1539 {
1540 *engine_type = dpif->netflow_engine_type;
1541 *engine_id = dpif->netflow_engine_id;
1542 }
1543
1544 /* Translates OpenFlow queue ID 'queue_id' (in host byte order) into a priority
1545 * value used for setting packet priority.
1546 * On success, returns 0 and stores the priority into '*priority'.
1547 * On failure, returns a positive errno value and stores 0 into '*priority'. */
1548 int
1549 dpif_queue_to_priority(const struct dpif *dpif, uint32_t queue_id,
1550 uint32_t *priority)
1551 {
1552 int error = (dpif->dpif_class->queue_to_priority
1553 ? dpif->dpif_class->queue_to_priority(dpif, queue_id,
1554 priority)
1555 : EOPNOTSUPP);
1556 if (error) {
1557 *priority = 0;
1558 }
1559 log_operation(dpif, "queue_to_priority", error);
1560 return error;
1561 }
1562 \f
1563 void
1564 dpif_init(struct dpif *dpif, const struct dpif_class *dpif_class,
1565 const char *name,
1566 uint8_t netflow_engine_type, uint8_t netflow_engine_id)
1567 {
1568 dpif->dpif_class = dpif_class;
1569 dpif->base_name = xstrdup(name);
1570 dpif->full_name = xasprintf("%s@%s", dpif_class->type, name);
1571 dpif->netflow_engine_type = netflow_engine_type;
1572 dpif->netflow_engine_id = netflow_engine_id;
1573 }
1574
1575 /* Undoes the results of initialization.
1576 *
1577 * Normally this function only needs to be called from dpif_close().
1578 * However, it may be called by providers due to an error on opening
1579 * that occurs after initialization. It this case dpif_close() would
1580 * never be called. */
1581 void
1582 dpif_uninit(struct dpif *dpif, bool close)
1583 {
1584 char *base_name = dpif->base_name;
1585 char *full_name = dpif->full_name;
1586
1587 if (close) {
1588 dpif->dpif_class->close(dpif);
1589 }
1590
1591 free(base_name);
1592 free(full_name);
1593 }
1594 \f
1595 static void
1596 log_operation(const struct dpif *dpif, const char *operation, int error)
1597 {
1598 if (!error) {
1599 VLOG_DBG_RL(&dpmsg_rl, "%s: %s success", dpif_name(dpif), operation);
1600 } else if (ofperr_is_valid(error)) {
1601 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1602 dpif_name(dpif), operation, ofperr_get_name(error));
1603 } else {
1604 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1605 dpif_name(dpif), operation, ovs_strerror(error));
1606 }
1607 }
1608
1609 static enum vlog_level
1610 flow_message_log_level(int error)
1611 {
1612 /* If flows arrive in a batch, userspace may push down multiple
1613 * unique flow definitions that overlap when wildcards are applied.
1614 * Kernels that support flow wildcarding will reject these flows as
1615 * duplicates (EEXIST), so lower the log level to debug for these
1616 * types of messages. */
1617 return (error && error != EEXIST) ? VLL_WARN : VLL_DBG;
1618 }
1619
1620 static bool
1621 should_log_flow_message(int error)
1622 {
1623 return !vlog_should_drop(&this_module, flow_message_log_level(error),
1624 error ? &error_rl : &dpmsg_rl);
1625 }
1626
1627 static void
1628 log_flow_message(const struct dpif *dpif, int error, const char *operation,
1629 const struct nlattr *key, size_t key_len,
1630 const struct nlattr *mask, size_t mask_len,
1631 const ovs_u128 *ufid, const struct dpif_flow_stats *stats,
1632 const struct nlattr *actions, size_t actions_len)
1633 {
1634 struct ds ds = DS_EMPTY_INITIALIZER;
1635 ds_put_format(&ds, "%s: ", dpif_name(dpif));
1636 if (error) {
1637 ds_put_cstr(&ds, "failed to ");
1638 }
1639 ds_put_format(&ds, "%s ", operation);
1640 if (error) {
1641 ds_put_format(&ds, "(%s) ", ovs_strerror(error));
1642 }
1643 if (ufid) {
1644 odp_format_ufid(ufid, &ds);
1645 ds_put_cstr(&ds, " ");
1646 }
1647 odp_flow_format(key, key_len, mask, mask_len, NULL, &ds, true);
1648 if (stats) {
1649 ds_put_cstr(&ds, ", ");
1650 dpif_flow_stats_format(stats, &ds);
1651 }
1652 if (actions || actions_len) {
1653 ds_put_cstr(&ds, ", actions:");
1654 format_odp_actions(&ds, actions, actions_len);
1655 }
1656 vlog(&this_module, flow_message_log_level(error), "%s", ds_cstr(&ds));
1657 ds_destroy(&ds);
1658 }
1659
1660 static void
1661 log_flow_put_message(struct dpif *dpif, const struct dpif_flow_put *put,
1662 int error)
1663 {
1664 if (should_log_flow_message(error) && !(put->flags & DPIF_FP_PROBE)) {
1665 struct ds s;
1666
1667 ds_init(&s);
1668 ds_put_cstr(&s, "put");
1669 if (put->flags & DPIF_FP_CREATE) {
1670 ds_put_cstr(&s, "[create]");
1671 }
1672 if (put->flags & DPIF_FP_MODIFY) {
1673 ds_put_cstr(&s, "[modify]");
1674 }
1675 if (put->flags & DPIF_FP_ZERO_STATS) {
1676 ds_put_cstr(&s, "[zero]");
1677 }
1678 log_flow_message(dpif, error, ds_cstr(&s),
1679 put->key, put->key_len, put->mask, put->mask_len,
1680 put->ufid, put->stats, put->actions,
1681 put->actions_len);
1682 ds_destroy(&s);
1683 }
1684 }
1685
1686 static void
1687 log_flow_del_message(struct dpif *dpif, const struct dpif_flow_del *del,
1688 int error)
1689 {
1690 if (should_log_flow_message(error)) {
1691 log_flow_message(dpif, error, "flow_del", del->key, del->key_len,
1692 NULL, 0, del->ufid, !error ? del->stats : NULL,
1693 NULL, 0);
1694 }
1695 }
1696
1697 /* Logs that 'execute' was executed on 'dpif' and completed with errno 'error'
1698 * (0 for success). 'subexecute' should be true if the execution is a result
1699 * of breaking down a larger execution that needed help, false otherwise.
1700 *
1701 *
1702 * XXX In theory, the log message could be deceptive because this function is
1703 * called after the dpif_provider's '->execute' function, which is allowed to
1704 * modify execute->packet and execute->md. In practice, though:
1705 *
1706 * - dpif-netlink doesn't modify execute->packet or execute->md.
1707 *
1708 * - dpif-netdev does modify them but it is less likely to have problems
1709 * because it is built into ovs-vswitchd and cannot have version skew,
1710 * etc.
1711 *
1712 * It would still be better to avoid the potential problem. I don't know of a
1713 * good way to do that, though, that isn't expensive. */
1714 static void
1715 log_execute_message(struct dpif *dpif, const struct dpif_execute *execute,
1716 bool subexecute, int error)
1717 {
1718 if (!(error ? VLOG_DROP_WARN(&error_rl) : VLOG_DROP_DBG(&dpmsg_rl))
1719 && !execute->probe) {
1720 struct ds ds = DS_EMPTY_INITIALIZER;
1721 char *packet;
1722
1723 packet = ofp_packet_to_string(dp_packet_data(execute->packet),
1724 dp_packet_size(execute->packet));
1725 ds_put_format(&ds, "%s: %sexecute ",
1726 dpif_name(dpif),
1727 (subexecute ? "sub-"
1728 : dpif_execute_needs_help(execute) ? "super-"
1729 : ""));
1730 format_odp_actions(&ds, execute->actions, execute->actions_len);
1731 if (error) {
1732 ds_put_format(&ds, " failed (%s)", ovs_strerror(error));
1733 }
1734 ds_put_format(&ds, " on packet %s", packet);
1735 ds_put_format(&ds, " mtu %d", execute->mtu);
1736 vlog(&this_module, error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
1737 ds_destroy(&ds);
1738 free(packet);
1739 }
1740 }
1741
1742 static void
1743 log_flow_get_message(const struct dpif *dpif, const struct dpif_flow_get *get,
1744 int error)
1745 {
1746 if (should_log_flow_message(error)) {
1747 log_flow_message(dpif, error, "flow_get",
1748 get->key, get->key_len,
1749 get->flow->mask, get->flow->mask_len,
1750 get->ufid, &get->flow->stats,
1751 get->flow->actions, get->flow->actions_len);
1752 }
1753 }
1754
1755 bool
1756 dpif_supports_tnl_push_pop(const struct dpif *dpif)
1757 {
1758 return dpif_is_netdev(dpif);
1759 }