]> git.proxmox.com Git - mirror_ovs.git/blob - lib/dpif.c
ofproto: Probe for sample nesting level.
[mirror_ovs.git] / lib / dpif.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "dpif-provider.h"
19
20 #include <ctype.h>
21 #include <errno.h>
22 #include <inttypes.h>
23 #include <stdlib.h>
24 #include <string.h>
25
26 #include "coverage.h"
27 #include "dpctl.h"
28 #include "dp-packet.h"
29 #include "dpif-netdev.h"
30 #include "openvswitch/dynamic-string.h"
31 #include "flow.h"
32 #include "netdev.h"
33 #include "netlink.h"
34 #include "odp-execute.h"
35 #include "odp-util.h"
36 #include "openvswitch/ofp-print.h"
37 #include "openvswitch/ofp-util.h"
38 #include "openvswitch/ofpbuf.h"
39 #include "packets.h"
40 #include "poll-loop.h"
41 #include "route-table.h"
42 #include "seq.h"
43 #include "openvswitch/shash.h"
44 #include "sset.h"
45 #include "timeval.h"
46 #include "tnl-neigh-cache.h"
47 #include "tnl-ports.h"
48 #include "util.h"
49 #include "uuid.h"
50 #include "valgrind.h"
51 #include "openvswitch/ofp-errors.h"
52 #include "openvswitch/vlog.h"
53
54 VLOG_DEFINE_THIS_MODULE(dpif);
55
56 COVERAGE_DEFINE(dpif_destroy);
57 COVERAGE_DEFINE(dpif_port_add);
58 COVERAGE_DEFINE(dpif_port_del);
59 COVERAGE_DEFINE(dpif_flow_flush);
60 COVERAGE_DEFINE(dpif_flow_get);
61 COVERAGE_DEFINE(dpif_flow_put);
62 COVERAGE_DEFINE(dpif_flow_del);
63 COVERAGE_DEFINE(dpif_execute);
64 COVERAGE_DEFINE(dpif_purge);
65 COVERAGE_DEFINE(dpif_execute_with_help);
66 COVERAGE_DEFINE(dpif_meter_set);
67 COVERAGE_DEFINE(dpif_meter_get);
68 COVERAGE_DEFINE(dpif_meter_del);
69
70 static const struct dpif_class *base_dpif_classes[] = {
71 #if defined(__linux__) || defined(_WIN32)
72 &dpif_netlink_class,
73 #endif
74 &dpif_netdev_class,
75 };
76
77 struct registered_dpif_class {
78 const struct dpif_class *dpif_class;
79 int refcount;
80 };
81 static struct shash dpif_classes = SHASH_INITIALIZER(&dpif_classes);
82 static struct sset dpif_blacklist = SSET_INITIALIZER(&dpif_blacklist);
83
84 /* Protects 'dpif_classes', including the refcount, and 'dpif_blacklist'. */
85 static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
86
87 /* Rate limit for individual messages going to or from the datapath, output at
88 * DBG level. This is very high because, if these are enabled, it is because
89 * we really need to see them. */
90 static struct vlog_rate_limit dpmsg_rl = VLOG_RATE_LIMIT_INIT(600, 600);
91
92 /* Not really much point in logging many dpif errors. */
93 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
94
95 static void log_flow_message(const struct dpif *dpif, int error,
96 const char *operation,
97 const struct nlattr *key, size_t key_len,
98 const struct nlattr *mask, size_t mask_len,
99 const ovs_u128 *ufid,
100 const struct dpif_flow_stats *stats,
101 const struct nlattr *actions, size_t actions_len);
102 static void log_operation(const struct dpif *, const char *operation,
103 int error);
104 static bool should_log_flow_message(int error);
105 static void log_flow_put_message(struct dpif *, const struct dpif_flow_put *,
106 int error);
107 static void log_flow_del_message(struct dpif *, const struct dpif_flow_del *,
108 int error);
109 static void log_execute_message(struct dpif *, const struct dpif_execute *,
110 bool subexecute, int error);
111 static void log_flow_get_message(const struct dpif *,
112 const struct dpif_flow_get *, int error);
113
114 /* Incremented whenever tnl route, arp, etc changes. */
115 struct seq *tnl_conf_seq;
116
117 static void
118 dp_initialize(void)
119 {
120 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
121
122 if (ovsthread_once_start(&once)) {
123 int i;
124
125 tnl_conf_seq = seq_create();
126 dpctl_unixctl_register();
127 tnl_port_map_init();
128 tnl_neigh_cache_init();
129 route_table_init();
130
131 for (i = 0; i < ARRAY_SIZE(base_dpif_classes); i++) {
132 dp_register_provider(base_dpif_classes[i]);
133 }
134
135 ovsthread_once_done(&once);
136 }
137 }
138
139 static int
140 dp_register_provider__(const struct dpif_class *new_class)
141 {
142 struct registered_dpif_class *registered_class;
143 int error;
144
145 if (sset_contains(&dpif_blacklist, new_class->type)) {
146 VLOG_DBG("attempted to register blacklisted provider: %s",
147 new_class->type);
148 return EINVAL;
149 }
150
151 if (shash_find(&dpif_classes, new_class->type)) {
152 VLOG_WARN("attempted to register duplicate datapath provider: %s",
153 new_class->type);
154 return EEXIST;
155 }
156
157 error = new_class->init ? new_class->init() : 0;
158 if (error) {
159 VLOG_WARN("failed to initialize %s datapath class: %s",
160 new_class->type, ovs_strerror(error));
161 return error;
162 }
163
164 registered_class = xmalloc(sizeof *registered_class);
165 registered_class->dpif_class = new_class;
166 registered_class->refcount = 0;
167
168 shash_add(&dpif_classes, new_class->type, registered_class);
169
170 return 0;
171 }
172
173 /* Registers a new datapath provider. After successful registration, new
174 * datapaths of that type can be opened using dpif_open(). */
175 int
176 dp_register_provider(const struct dpif_class *new_class)
177 {
178 int error;
179
180 ovs_mutex_lock(&dpif_mutex);
181 error = dp_register_provider__(new_class);
182 ovs_mutex_unlock(&dpif_mutex);
183
184 return error;
185 }
186
187 /* Unregisters a datapath provider. 'type' must have been previously
188 * registered and not currently be in use by any dpifs. After unregistration
189 * new datapaths of that type cannot be opened using dpif_open(). */
190 static int
191 dp_unregister_provider__(const char *type)
192 {
193 struct shash_node *node;
194 struct registered_dpif_class *registered_class;
195
196 node = shash_find(&dpif_classes, type);
197 if (!node) {
198 return EAFNOSUPPORT;
199 }
200
201 registered_class = node->data;
202 if (registered_class->refcount) {
203 VLOG_WARN("attempted to unregister in use datapath provider: %s", type);
204 return EBUSY;
205 }
206
207 shash_delete(&dpif_classes, node);
208 free(registered_class);
209
210 return 0;
211 }
212
213 /* Unregisters a datapath provider. 'type' must have been previously
214 * registered and not currently be in use by any dpifs. After unregistration
215 * new datapaths of that type cannot be opened using dpif_open(). */
216 int
217 dp_unregister_provider(const char *type)
218 {
219 int error;
220
221 dp_initialize();
222
223 ovs_mutex_lock(&dpif_mutex);
224 error = dp_unregister_provider__(type);
225 ovs_mutex_unlock(&dpif_mutex);
226
227 return error;
228 }
229
230 /* Blacklists a provider. Causes future calls of dp_register_provider() with
231 * a dpif_class which implements 'type' to fail. */
232 void
233 dp_blacklist_provider(const char *type)
234 {
235 ovs_mutex_lock(&dpif_mutex);
236 sset_add(&dpif_blacklist, type);
237 ovs_mutex_unlock(&dpif_mutex);
238 }
239
240 /* Adds the types of all currently registered datapath providers to 'types'.
241 * The caller must first initialize the sset. */
242 void
243 dp_enumerate_types(struct sset *types)
244 {
245 struct shash_node *node;
246
247 dp_initialize();
248
249 ovs_mutex_lock(&dpif_mutex);
250 SHASH_FOR_EACH(node, &dpif_classes) {
251 const struct registered_dpif_class *registered_class = node->data;
252 sset_add(types, registered_class->dpif_class->type);
253 }
254 ovs_mutex_unlock(&dpif_mutex);
255 }
256
257 static void
258 dp_class_unref(struct registered_dpif_class *rc)
259 {
260 ovs_mutex_lock(&dpif_mutex);
261 ovs_assert(rc->refcount);
262 rc->refcount--;
263 ovs_mutex_unlock(&dpif_mutex);
264 }
265
266 static struct registered_dpif_class *
267 dp_class_lookup(const char *type)
268 {
269 struct registered_dpif_class *rc;
270
271 ovs_mutex_lock(&dpif_mutex);
272 rc = shash_find_data(&dpif_classes, type);
273 if (rc) {
274 rc->refcount++;
275 }
276 ovs_mutex_unlock(&dpif_mutex);
277
278 return rc;
279 }
280
281 /* Clears 'names' and enumerates the names of all known created datapaths with
282 * the given 'type'. The caller must first initialize the sset. Returns 0 if
283 * successful, otherwise a positive errno value.
284 *
285 * Some kinds of datapaths might not be practically enumerable. This is not
286 * considered an error. */
287 int
288 dp_enumerate_names(const char *type, struct sset *names)
289 {
290 struct registered_dpif_class *registered_class;
291 const struct dpif_class *dpif_class;
292 int error;
293
294 dp_initialize();
295 sset_clear(names);
296
297 registered_class = dp_class_lookup(type);
298 if (!registered_class) {
299 VLOG_WARN("could not enumerate unknown type: %s", type);
300 return EAFNOSUPPORT;
301 }
302
303 dpif_class = registered_class->dpif_class;
304 error = (dpif_class->enumerate
305 ? dpif_class->enumerate(names, dpif_class)
306 : 0);
307 if (error) {
308 VLOG_WARN("failed to enumerate %s datapaths: %s", dpif_class->type,
309 ovs_strerror(error));
310 }
311 dp_class_unref(registered_class);
312
313 return error;
314 }
315
316 /* Parses 'datapath_name_', which is of the form [type@]name into its
317 * component pieces. 'name' and 'type' must be freed by the caller.
318 *
319 * The returned 'type' is normalized, as if by dpif_normalize_type(). */
320 void
321 dp_parse_name(const char *datapath_name_, char **name, char **type)
322 {
323 char *datapath_name = xstrdup(datapath_name_);
324 char *separator;
325
326 separator = strchr(datapath_name, '@');
327 if (separator) {
328 *separator = '\0';
329 *type = datapath_name;
330 *name = xstrdup(dpif_normalize_type(separator + 1));
331 } else {
332 *name = datapath_name;
333 *type = xstrdup(dpif_normalize_type(NULL));
334 }
335 }
336
337 static int
338 do_open(const char *name, const char *type, bool create, struct dpif **dpifp)
339 {
340 struct dpif *dpif = NULL;
341 int error;
342 struct registered_dpif_class *registered_class;
343
344 dp_initialize();
345
346 type = dpif_normalize_type(type);
347 registered_class = dp_class_lookup(type);
348 if (!registered_class) {
349 VLOG_WARN("could not create datapath %s of unknown type %s", name,
350 type);
351 error = EAFNOSUPPORT;
352 goto exit;
353 }
354
355 error = registered_class->dpif_class->open(registered_class->dpif_class,
356 name, create, &dpif);
357 if (!error) {
358 ovs_assert(dpif->dpif_class == registered_class->dpif_class);
359 } else {
360 dp_class_unref(registered_class);
361 }
362
363 exit:
364 *dpifp = error ? NULL : dpif;
365 return error;
366 }
367
368 /* Tries to open an existing datapath named 'name' and type 'type'. Will fail
369 * if no datapath with 'name' and 'type' exists. 'type' may be either NULL or
370 * the empty string to specify the default system type. Returns 0 if
371 * successful, otherwise a positive errno value. On success stores a pointer
372 * to the datapath in '*dpifp', otherwise a null pointer. */
373 int
374 dpif_open(const char *name, const char *type, struct dpif **dpifp)
375 {
376 return do_open(name, type, false, dpifp);
377 }
378
379 /* Tries to create and open a new datapath with the given 'name' and 'type'.
380 * 'type' may be either NULL or the empty string to specify the default system
381 * type. Will fail if a datapath with 'name' and 'type' already exists.
382 * Returns 0 if successful, otherwise a positive errno value. On success
383 * stores a pointer to the datapath in '*dpifp', otherwise a null pointer. */
384 int
385 dpif_create(const char *name, const char *type, struct dpif **dpifp)
386 {
387 return do_open(name, type, true, dpifp);
388 }
389
390 /* Tries to open a datapath with the given 'name' and 'type', creating it if it
391 * does not exist. 'type' may be either NULL or the empty string to specify
392 * the default system type. Returns 0 if successful, otherwise a positive
393 * errno value. On success stores a pointer to the datapath in '*dpifp',
394 * otherwise a null pointer. */
395 int
396 dpif_create_and_open(const char *name, const char *type, struct dpif **dpifp)
397 {
398 int error;
399
400 error = dpif_create(name, type, dpifp);
401 if (error == EEXIST || error == EBUSY) {
402 error = dpif_open(name, type, dpifp);
403 if (error) {
404 VLOG_WARN("datapath %s already exists but cannot be opened: %s",
405 name, ovs_strerror(error));
406 }
407 } else if (error) {
408 VLOG_WARN("failed to create datapath %s: %s",
409 name, ovs_strerror(error));
410 }
411 return error;
412 }
413
414 /* Closes and frees the connection to 'dpif'. Does not destroy the datapath
415 * itself; call dpif_delete() first, instead, if that is desirable. */
416 void
417 dpif_close(struct dpif *dpif)
418 {
419 if (dpif) {
420 struct registered_dpif_class *rc;
421
422 rc = shash_find_data(&dpif_classes, dpif->dpif_class->type);
423 dpif_uninit(dpif, true);
424 dp_class_unref(rc);
425 }
426 }
427
428 /* Performs periodic work needed by 'dpif'. */
429 bool
430 dpif_run(struct dpif *dpif)
431 {
432 if (dpif->dpif_class->run) {
433 return dpif->dpif_class->run(dpif);
434 }
435 return false;
436 }
437
438 /* Arranges for poll_block() to wake up when dp_run() needs to be called for
439 * 'dpif'. */
440 void
441 dpif_wait(struct dpif *dpif)
442 {
443 if (dpif->dpif_class->wait) {
444 dpif->dpif_class->wait(dpif);
445 }
446 }
447
448 /* Returns the name of datapath 'dpif' prefixed with the type
449 * (for use in log messages). */
450 const char *
451 dpif_name(const struct dpif *dpif)
452 {
453 return dpif->full_name;
454 }
455
456 /* Returns the name of datapath 'dpif' without the type
457 * (for use in device names). */
458 const char *
459 dpif_base_name(const struct dpif *dpif)
460 {
461 return dpif->base_name;
462 }
463
464 /* Returns the type of datapath 'dpif'. */
465 const char *
466 dpif_type(const struct dpif *dpif)
467 {
468 return dpif->dpif_class->type;
469 }
470
471 /* Returns the fully spelled out name for the given datapath 'type'.
472 *
473 * Normalized type string can be compared with strcmp(). Unnormalized type
474 * string might be the same even if they have different spellings. */
475 const char *
476 dpif_normalize_type(const char *type)
477 {
478 return type && type[0] ? type : "system";
479 }
480
481 /* Destroys the datapath that 'dpif' is connected to, first removing all of its
482 * ports. After calling this function, it does not make sense to pass 'dpif'
483 * to any functions other than dpif_name() or dpif_close(). */
484 int
485 dpif_delete(struct dpif *dpif)
486 {
487 int error;
488
489 COVERAGE_INC(dpif_destroy);
490
491 error = dpif->dpif_class->destroy(dpif);
492 log_operation(dpif, "delete", error);
493 return error;
494 }
495
496 /* Retrieves statistics for 'dpif' into 'stats'. Returns 0 if successful,
497 * otherwise a positive errno value. */
498 int
499 dpif_get_dp_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
500 {
501 int error = dpif->dpif_class->get_stats(dpif, stats);
502 if (error) {
503 memset(stats, 0, sizeof *stats);
504 }
505 log_operation(dpif, "get_stats", error);
506 return error;
507 }
508
509 const char *
510 dpif_port_open_type(const char *datapath_type, const char *port_type)
511 {
512 struct registered_dpif_class *rc;
513
514 datapath_type = dpif_normalize_type(datapath_type);
515
516 ovs_mutex_lock(&dpif_mutex);
517 rc = shash_find_data(&dpif_classes, datapath_type);
518 if (rc && rc->dpif_class->port_open_type) {
519 port_type = rc->dpif_class->port_open_type(rc->dpif_class, port_type);
520 }
521 ovs_mutex_unlock(&dpif_mutex);
522
523 return port_type;
524 }
525
526 /* Attempts to add 'netdev' as a port on 'dpif'. If 'port_nop' is
527 * non-null and its value is not ODPP_NONE, then attempts to use the
528 * value as the port number.
529 *
530 * If successful, returns 0 and sets '*port_nop' to the new port's port
531 * number (if 'port_nop' is non-null). On failure, returns a positive
532 * errno value and sets '*port_nop' to ODPP_NONE (if 'port_nop' is
533 * non-null). */
534 int
535 dpif_port_add(struct dpif *dpif, struct netdev *netdev, odp_port_t *port_nop)
536 {
537 const char *netdev_name = netdev_get_name(netdev);
538 odp_port_t port_no = ODPP_NONE;
539 int error;
540
541 COVERAGE_INC(dpif_port_add);
542
543 if (port_nop) {
544 port_no = *port_nop;
545 }
546
547 error = dpif->dpif_class->port_add(dpif, netdev, &port_no);
548 if (!error) {
549 VLOG_DBG_RL(&dpmsg_rl, "%s: added %s as port %"PRIu32,
550 dpif_name(dpif), netdev_name, port_no);
551 } else {
552 VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
553 dpif_name(dpif), netdev_name, ovs_strerror(error));
554 port_no = ODPP_NONE;
555 }
556 if (port_nop) {
557 *port_nop = port_no;
558 }
559 return error;
560 }
561
562 /* Attempts to remove 'dpif''s port number 'port_no'. Returns 0 if successful,
563 * otherwise a positive errno value. */
564 int
565 dpif_port_del(struct dpif *dpif, odp_port_t port_no)
566 {
567 int error;
568
569 COVERAGE_INC(dpif_port_del);
570
571 error = dpif->dpif_class->port_del(dpif, port_no);
572 if (!error) {
573 VLOG_DBG_RL(&dpmsg_rl, "%s: port_del(%"PRIu32")",
574 dpif_name(dpif), port_no);
575 } else {
576 log_operation(dpif, "port_del", error);
577 }
578 return error;
579 }
580
581 /* Makes a deep copy of 'src' into 'dst'. */
582 void
583 dpif_port_clone(struct dpif_port *dst, const struct dpif_port *src)
584 {
585 dst->name = xstrdup(src->name);
586 dst->type = xstrdup(src->type);
587 dst->port_no = src->port_no;
588 }
589
590 /* Frees memory allocated to members of 'dpif_port'.
591 *
592 * Do not call this function on a dpif_port obtained from
593 * dpif_port_dump_next(): that function retains ownership of the data in the
594 * dpif_port. */
595 void
596 dpif_port_destroy(struct dpif_port *dpif_port)
597 {
598 free(dpif_port->name);
599 free(dpif_port->type);
600 }
601
602 /* Checks if port named 'devname' exists in 'dpif'. If so, returns
603 * true; otherwise, returns false. */
604 bool
605 dpif_port_exists(const struct dpif *dpif, const char *devname)
606 {
607 int error = dpif->dpif_class->port_query_by_name(dpif, devname, NULL);
608 if (error != 0 && error != ENODEV) {
609 VLOG_WARN_RL(&error_rl, "%s: failed to query port %s: %s",
610 dpif_name(dpif), devname, ovs_strerror(error));
611 }
612
613 return !error;
614 }
615
616 /* Refreshes configuration of 'dpif's port. */
617 int
618 dpif_port_set_config(struct dpif *dpif, odp_port_t port_no,
619 const struct smap *cfg)
620 {
621 int error = 0;
622
623 if (dpif->dpif_class->port_set_config) {
624 error = dpif->dpif_class->port_set_config(dpif, port_no, cfg);
625 if (error) {
626 log_operation(dpif, "port_set_config", error);
627 }
628 }
629
630 return error;
631 }
632
633 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and
634 * initializes '*port' appropriately; on failure, returns a positive errno
635 * value.
636 *
637 * Retuns ENODEV if the port doesn't exist.
638 *
639 * The caller owns the data in 'port' and must free it with
640 * dpif_port_destroy() when it is no longer needed. */
641 int
642 dpif_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
643 struct dpif_port *port)
644 {
645 int error = dpif->dpif_class->port_query_by_number(dpif, port_no, port);
646 if (!error) {
647 VLOG_DBG_RL(&dpmsg_rl, "%s: port %"PRIu32" is device %s",
648 dpif_name(dpif), port_no, port->name);
649 } else {
650 memset(port, 0, sizeof *port);
651 VLOG_WARN_RL(&error_rl, "%s: failed to query port %"PRIu32": %s",
652 dpif_name(dpif), port_no, ovs_strerror(error));
653 }
654 return error;
655 }
656
657 /* Looks up port named 'devname' in 'dpif'. On success, returns 0 and
658 * initializes '*port' appropriately; on failure, returns a positive errno
659 * value.
660 *
661 * Retuns ENODEV if the port doesn't exist.
662 *
663 * The caller owns the data in 'port' and must free it with
664 * dpif_port_destroy() when it is no longer needed. */
665 int
666 dpif_port_query_by_name(const struct dpif *dpif, const char *devname,
667 struct dpif_port *port)
668 {
669 int error = dpif->dpif_class->port_query_by_name(dpif, devname, port);
670 if (!error) {
671 VLOG_DBG_RL(&dpmsg_rl, "%s: device %s is on port %"PRIu32,
672 dpif_name(dpif), devname, port->port_no);
673 } else {
674 memset(port, 0, sizeof *port);
675
676 /* For ENODEV we use DBG level because the caller is probably
677 * interested in whether 'dpif' actually has a port 'devname', so that
678 * it's not an issue worth logging if it doesn't. Other errors are
679 * uncommon and more likely to indicate a real problem. */
680 VLOG_RL(&error_rl, error == ENODEV ? VLL_DBG : VLL_WARN,
681 "%s: failed to query port %s: %s",
682 dpif_name(dpif), devname, ovs_strerror(error));
683 }
684 return error;
685 }
686
687 /* Returns the Netlink PID value to supply in OVS_ACTION_ATTR_USERSPACE
688 * actions as the OVS_USERSPACE_ATTR_PID attribute's value, for use in
689 * flows whose packets arrived on port 'port_no'. In the case where the
690 * provider allocates multiple Netlink PIDs to a single port, it may use
691 * 'hash' to spread load among them. The caller need not use a particular
692 * hash function; a 5-tuple hash is suitable.
693 *
694 * (The datapath implementation might use some different hash function for
695 * distributing packets received via flow misses among PIDs. This means
696 * that packets received via flow misses might be reordered relative to
697 * packets received via userspace actions. This is not ordinarily a
698 * problem.)
699 *
700 * A 'port_no' of ODPP_NONE is a special case: it returns a reserved PID, not
701 * allocated to any port, that the client may use for special purposes.
702 *
703 * The return value is only meaningful when DPIF_UC_ACTION has been enabled in
704 * the 'dpif''s listen mask. It is allowed to change when DPIF_UC_ACTION is
705 * disabled and then re-enabled, so a client that does that must be prepared to
706 * update all of the flows that it installed that contain
707 * OVS_ACTION_ATTR_USERSPACE actions. */
708 uint32_t
709 dpif_port_get_pid(const struct dpif *dpif, odp_port_t port_no, uint32_t hash)
710 {
711 return (dpif->dpif_class->port_get_pid
712 ? (dpif->dpif_class->port_get_pid)(dpif, port_no, hash)
713 : 0);
714 }
715
716 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and copies
717 * the port's name into the 'name_size' bytes in 'name', ensuring that the
718 * result is null-terminated. On failure, returns a positive errno value and
719 * makes 'name' the empty string. */
720 int
721 dpif_port_get_name(struct dpif *dpif, odp_port_t port_no,
722 char *name, size_t name_size)
723 {
724 struct dpif_port port;
725 int error;
726
727 ovs_assert(name_size > 0);
728
729 error = dpif_port_query_by_number(dpif, port_no, &port);
730 if (!error) {
731 ovs_strlcpy(name, port.name, name_size);
732 dpif_port_destroy(&port);
733 } else {
734 *name = '\0';
735 }
736 return error;
737 }
738
739 /* Initializes 'dump' to begin dumping the ports in a dpif.
740 *
741 * This function provides no status indication. An error status for the entire
742 * dump operation is provided when it is completed by calling
743 * dpif_port_dump_done().
744 */
745 void
746 dpif_port_dump_start(struct dpif_port_dump *dump, const struct dpif *dpif)
747 {
748 dump->dpif = dpif;
749 dump->error = dpif->dpif_class->port_dump_start(dpif, &dump->state);
750 log_operation(dpif, "port_dump_start", dump->error);
751 }
752
753 /* Attempts to retrieve another port from 'dump', which must have been
754 * initialized with dpif_port_dump_start(). On success, stores a new dpif_port
755 * into 'port' and returns true. On failure, returns false.
756 *
757 * Failure might indicate an actual error or merely that the last port has been
758 * dumped. An error status for the entire dump operation is provided when it
759 * is completed by calling dpif_port_dump_done().
760 *
761 * The dpif owns the data stored in 'port'. It will remain valid until at
762 * least the next time 'dump' is passed to dpif_port_dump_next() or
763 * dpif_port_dump_done(). */
764 bool
765 dpif_port_dump_next(struct dpif_port_dump *dump, struct dpif_port *port)
766 {
767 const struct dpif *dpif = dump->dpif;
768
769 if (dump->error) {
770 return false;
771 }
772
773 dump->error = dpif->dpif_class->port_dump_next(dpif, dump->state, port);
774 if (dump->error == EOF) {
775 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all ports", dpif_name(dpif));
776 } else {
777 log_operation(dpif, "port_dump_next", dump->error);
778 }
779
780 if (dump->error) {
781 dpif->dpif_class->port_dump_done(dpif, dump->state);
782 return false;
783 }
784 return true;
785 }
786
787 /* Completes port table dump operation 'dump', which must have been initialized
788 * with dpif_port_dump_start(). Returns 0 if the dump operation was
789 * error-free, otherwise a positive errno value describing the problem. */
790 int
791 dpif_port_dump_done(struct dpif_port_dump *dump)
792 {
793 const struct dpif *dpif = dump->dpif;
794 if (!dump->error) {
795 dump->error = dpif->dpif_class->port_dump_done(dpif, dump->state);
796 log_operation(dpif, "port_dump_done", dump->error);
797 }
798 return dump->error == EOF ? 0 : dump->error;
799 }
800
801 /* Polls for changes in the set of ports in 'dpif'. If the set of ports in
802 * 'dpif' has changed, this function does one of the following:
803 *
804 * - Stores the name of the device that was added to or deleted from 'dpif' in
805 * '*devnamep' and returns 0. The caller is responsible for freeing
806 * '*devnamep' (with free()) when it no longer needs it.
807 *
808 * - Returns ENOBUFS and sets '*devnamep' to NULL.
809 *
810 * This function may also return 'false positives', where it returns 0 and
811 * '*devnamep' names a device that was not actually added or deleted or it
812 * returns ENOBUFS without any change.
813 *
814 * Returns EAGAIN if the set of ports in 'dpif' has not changed. May also
815 * return other positive errno values to indicate that something has gone
816 * wrong. */
817 int
818 dpif_port_poll(const struct dpif *dpif, char **devnamep)
819 {
820 int error = dpif->dpif_class->port_poll(dpif, devnamep);
821 if (error) {
822 *devnamep = NULL;
823 }
824 return error;
825 }
826
827 /* Arranges for the poll loop to wake up when port_poll(dpif) will return a
828 * value other than EAGAIN. */
829 void
830 dpif_port_poll_wait(const struct dpif *dpif)
831 {
832 dpif->dpif_class->port_poll_wait(dpif);
833 }
834
835 /* Extracts the flow stats for a packet. The 'flow' and 'packet'
836 * arguments must have been initialized through a call to flow_extract().
837 * 'used' is stored into stats->used. */
838 void
839 dpif_flow_stats_extract(const struct flow *flow, const struct dp_packet *packet,
840 long long int used, struct dpif_flow_stats *stats)
841 {
842 stats->tcp_flags = ntohs(flow->tcp_flags);
843 stats->n_bytes = dp_packet_size(packet);
844 stats->n_packets = 1;
845 stats->used = used;
846 }
847
848 /* Appends a human-readable representation of 'stats' to 's'. */
849 void
850 dpif_flow_stats_format(const struct dpif_flow_stats *stats, struct ds *s)
851 {
852 ds_put_format(s, "packets:%"PRIu64", bytes:%"PRIu64", used:",
853 stats->n_packets, stats->n_bytes);
854 if (stats->used) {
855 ds_put_format(s, "%.3fs", (time_msec() - stats->used) / 1000.0);
856 } else {
857 ds_put_format(s, "never");
858 }
859 if (stats->tcp_flags) {
860 ds_put_cstr(s, ", flags:");
861 packet_format_tcp_flags(s, stats->tcp_flags);
862 }
863 }
864
865 /* Places the hash of the 'key_len' bytes starting at 'key' into '*hash'. */
866 void
867 dpif_flow_hash(const struct dpif *dpif OVS_UNUSED,
868 const void *key, size_t key_len, ovs_u128 *hash)
869 {
870 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
871 static uint32_t secret;
872
873 if (ovsthread_once_start(&once)) {
874 secret = random_uint32();
875 ovsthread_once_done(&once);
876 }
877 hash_bytes128(key, key_len, secret, hash);
878 uuid_set_bits_v4((struct uuid *)hash);
879 }
880
881 /* Deletes all flows from 'dpif'. Returns 0 if successful, otherwise a
882 * positive errno value. */
883 int
884 dpif_flow_flush(struct dpif *dpif)
885 {
886 int error;
887
888 COVERAGE_INC(dpif_flow_flush);
889
890 error = dpif->dpif_class->flow_flush(dpif);
891 log_operation(dpif, "flow_flush", error);
892 return error;
893 }
894
895 /* Attempts to install 'key' into the datapath, fetches it, then deletes it.
896 * Returns true if the datapath supported installing 'flow', false otherwise.
897 */
898 bool
899 dpif_probe_feature(struct dpif *dpif, const char *name,
900 const struct ofpbuf *key, const struct ofpbuf *actions,
901 const ovs_u128 *ufid)
902 {
903 struct dpif_flow flow;
904 struct ofpbuf reply;
905 uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
906 bool enable_feature = false;
907 int error;
908 const struct nlattr *nl_actions = actions ? actions->data : NULL;
909 const size_t nl_actions_size = actions ? actions->size : 0;
910
911 /* Use DPIF_FP_MODIFY to cover the case where ovs-vswitchd is killed (and
912 * restarted) at just the right time such that feature probes from the
913 * previous run are still present in the datapath. */
914 error = dpif_flow_put(dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY | DPIF_FP_PROBE,
915 key->data, key->size, NULL, 0,
916 nl_actions, nl_actions_size,
917 ufid, NON_PMD_CORE_ID, NULL);
918 if (error) {
919 if (error != EINVAL && error != EOVERFLOW) {
920 VLOG_WARN("%s: %s flow probe failed (%s)",
921 dpif_name(dpif), name, ovs_strerror(error));
922 }
923 return false;
924 }
925
926 ofpbuf_use_stack(&reply, &stub, sizeof stub);
927 error = dpif_flow_get(dpif, key->data, key->size, ufid,
928 NON_PMD_CORE_ID, &reply, &flow);
929 if (!error
930 && (!ufid || (flow.ufid_present
931 && ovs_u128_equals(*ufid, flow.ufid)))) {
932 enable_feature = true;
933 }
934
935 error = dpif_flow_del(dpif, key->data, key->size, ufid,
936 NON_PMD_CORE_ID, NULL);
937 if (error) {
938 VLOG_WARN("%s: failed to delete %s feature probe flow",
939 dpif_name(dpif), name);
940 }
941
942 return enable_feature;
943 }
944
945 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_GET. */
946 int
947 dpif_flow_get(struct dpif *dpif,
948 const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
949 const unsigned pmd_id, struct ofpbuf *buf, struct dpif_flow *flow)
950 {
951 struct dpif_op *opp;
952 struct dpif_op op;
953
954 op.type = DPIF_OP_FLOW_GET;
955 op.u.flow_get.key = key;
956 op.u.flow_get.key_len = key_len;
957 op.u.flow_get.ufid = ufid;
958 op.u.flow_get.pmd_id = pmd_id;
959 op.u.flow_get.buffer = buf;
960
961 memset(flow, 0, sizeof *flow);
962 op.u.flow_get.flow = flow;
963 op.u.flow_get.flow->key = key;
964 op.u.flow_get.flow->key_len = key_len;
965
966 opp = &op;
967 dpif_operate(dpif, &opp, 1);
968
969 return op.error;
970 }
971
972 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_PUT. */
973 int
974 dpif_flow_put(struct dpif *dpif, enum dpif_flow_put_flags flags,
975 const struct nlattr *key, size_t key_len,
976 const struct nlattr *mask, size_t mask_len,
977 const struct nlattr *actions, size_t actions_len,
978 const ovs_u128 *ufid, const unsigned pmd_id,
979 struct dpif_flow_stats *stats)
980 {
981 struct dpif_op *opp;
982 struct dpif_op op;
983
984 op.type = DPIF_OP_FLOW_PUT;
985 op.u.flow_put.flags = flags;
986 op.u.flow_put.key = key;
987 op.u.flow_put.key_len = key_len;
988 op.u.flow_put.mask = mask;
989 op.u.flow_put.mask_len = mask_len;
990 op.u.flow_put.actions = actions;
991 op.u.flow_put.actions_len = actions_len;
992 op.u.flow_put.ufid = ufid;
993 op.u.flow_put.pmd_id = pmd_id;
994 op.u.flow_put.stats = stats;
995
996 opp = &op;
997 dpif_operate(dpif, &opp, 1);
998
999 return op.error;
1000 }
1001
1002 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_DEL. */
1003 int
1004 dpif_flow_del(struct dpif *dpif,
1005 const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
1006 const unsigned pmd_id, struct dpif_flow_stats *stats)
1007 {
1008 struct dpif_op *opp;
1009 struct dpif_op op;
1010
1011 op.type = DPIF_OP_FLOW_DEL;
1012 op.u.flow_del.key = key;
1013 op.u.flow_del.key_len = key_len;
1014 op.u.flow_del.ufid = ufid;
1015 op.u.flow_del.pmd_id = pmd_id;
1016 op.u.flow_del.stats = stats;
1017 op.u.flow_del.terse = false;
1018
1019 opp = &op;
1020 dpif_operate(dpif, &opp, 1);
1021
1022 return op.error;
1023 }
1024
1025 /* Creates and returns a new 'struct dpif_flow_dump' for iterating through the
1026 * flows in 'dpif'. If 'terse' is true, then only UFID and statistics will
1027 * be returned in the dump. Otherwise, all fields will be returned.
1028 *
1029 * This function always successfully returns a dpif_flow_dump. Error
1030 * reporting is deferred to dpif_flow_dump_destroy(). */
1031 struct dpif_flow_dump *
1032 dpif_flow_dump_create(const struct dpif *dpif, bool terse)
1033 {
1034 return dpif->dpif_class->flow_dump_create(dpif, terse);
1035 }
1036
1037 /* Destroys 'dump', which must have been created with dpif_flow_dump_create().
1038 * All dpif_flow_dump_thread structures previously created for 'dump' must
1039 * previously have been destroyed.
1040 *
1041 * Returns 0 if the dump operation was error-free, otherwise a positive errno
1042 * value describing the problem. */
1043 int
1044 dpif_flow_dump_destroy(struct dpif_flow_dump *dump)
1045 {
1046 const struct dpif *dpif = dump->dpif;
1047 int error = dpif->dpif_class->flow_dump_destroy(dump);
1048 log_operation(dpif, "flow_dump_destroy", error);
1049 return error == EOF ? 0 : error;
1050 }
1051
1052 /* Returns new thread-local state for use with dpif_flow_dump_next(). */
1053 struct dpif_flow_dump_thread *
1054 dpif_flow_dump_thread_create(struct dpif_flow_dump *dump)
1055 {
1056 return dump->dpif->dpif_class->flow_dump_thread_create(dump);
1057 }
1058
1059 /* Releases 'thread'. */
1060 void
1061 dpif_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread)
1062 {
1063 thread->dpif->dpif_class->flow_dump_thread_destroy(thread);
1064 }
1065
1066 /* Attempts to retrieve up to 'max_flows' more flows from 'thread'. Returns 0
1067 * if and only if no flows remained to be retrieved, otherwise a positive
1068 * number reflecting the number of elements in 'flows[]' that were updated.
1069 * The number of flows returned might be less than 'max_flows' because
1070 * fewer than 'max_flows' remained, because this particular datapath does not
1071 * benefit from batching, or because an error occurred partway through
1072 * retrieval. Thus, the caller should continue calling until a 0 return value,
1073 * even if intermediate return values are less than 'max_flows'.
1074 *
1075 * No error status is immediately provided. An error status for the entire
1076 * dump operation is provided when it is completed by calling
1077 * dpif_flow_dump_destroy().
1078 *
1079 * All of the data stored into 'flows' is owned by the datapath, not by the
1080 * caller, and the caller must not modify or free it. The datapath guarantees
1081 * that it remains accessible and unchanged until the first of:
1082 * - The next call to dpif_flow_dump_next() for 'thread', or
1083 * - The next rcu quiescent period. */
1084 int
1085 dpif_flow_dump_next(struct dpif_flow_dump_thread *thread,
1086 struct dpif_flow *flows, int max_flows)
1087 {
1088 struct dpif *dpif = thread->dpif;
1089 int n;
1090
1091 ovs_assert(max_flows > 0);
1092 n = dpif->dpif_class->flow_dump_next(thread, flows, max_flows);
1093 if (n > 0) {
1094 struct dpif_flow *f;
1095
1096 for (f = flows; f < &flows[n] && should_log_flow_message(0); f++) {
1097 log_flow_message(dpif, 0, "flow_dump",
1098 f->key, f->key_len, f->mask, f->mask_len,
1099 &f->ufid, &f->stats, f->actions, f->actions_len);
1100 }
1101 } else {
1102 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all flows", dpif_name(dpif));
1103 }
1104 return n;
1105 }
1106
1107 struct dpif_execute_helper_aux {
1108 struct dpif *dpif;
1109 const struct flow *flow;
1110 int error;
1111 const struct nlattr *meter_action; /* Non-NULL, if have a meter action. */
1112 };
1113
1114 /* This is called for actions that need the context of the datapath to be
1115 * meaningful. */
1116 static void
1117 dpif_execute_helper_cb(void *aux_, struct dp_packet_batch *packets_,
1118 const struct nlattr *action, bool may_steal)
1119 {
1120 struct dpif_execute_helper_aux *aux = aux_;
1121 int type = nl_attr_type(action);
1122 struct dp_packet *packet = packets_->packets[0];
1123
1124 ovs_assert(packets_->count == 1);
1125
1126 switch ((enum ovs_action_attr)type) {
1127 case OVS_ACTION_ATTR_METER:
1128 /* Maintain a pointer to the first meter action seen. */
1129 if (!aux->meter_action) {
1130 aux->meter_action = action;
1131 }
1132 break;
1133
1134 case OVS_ACTION_ATTR_CT:
1135 case OVS_ACTION_ATTR_OUTPUT:
1136 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1137 case OVS_ACTION_ATTR_TUNNEL_POP:
1138 case OVS_ACTION_ATTR_USERSPACE:
1139 case OVS_ACTION_ATTR_RECIRC: {
1140 struct dpif_execute execute;
1141 struct ofpbuf execute_actions;
1142 uint64_t stub[256 / 8];
1143 struct pkt_metadata *md = &packet->md;
1144
1145 if (flow_tnl_dst_is_set(&md->tunnel) || aux->meter_action) {
1146 ofpbuf_use_stub(&execute_actions, stub, sizeof stub);
1147
1148 if (aux->meter_action) {
1149 const struct nlattr *a = aux->meter_action;
1150
1151 /* XXX: This code collects meter actions since the last action
1152 * execution via the datapath to be executed right before the
1153 * current action that needs to be executed by the datapath.
1154 * This is only an approximation, but better than nothing.
1155 * Fundamentally, we should have a mechanism by which the
1156 * datapath could return the result of the meter action so that
1157 * we could execute them at the right order. */
1158 do {
1159 ofpbuf_put(&execute_actions, a, NLA_ALIGN(a->nla_len));
1160 /* Find next meter action before 'action', if any. */
1161 do {
1162 a = nl_attr_next(a);
1163 } while (a != action &&
1164 nl_attr_type(a) != OVS_ACTION_ATTR_METER);
1165 } while (a != action);
1166 }
1167
1168 /* The Linux kernel datapath throws away the tunnel information
1169 * that we supply as metadata. We have to use a "set" action to
1170 * supply it. */
1171 if (md->tunnel.ip_dst) {
1172 odp_put_tunnel_action(&md->tunnel, &execute_actions);
1173 }
1174 ofpbuf_put(&execute_actions, action, NLA_ALIGN(action->nla_len));
1175
1176 execute.actions = execute_actions.data;
1177 execute.actions_len = execute_actions.size;
1178 } else {
1179 execute.actions = action;
1180 execute.actions_len = NLA_ALIGN(action->nla_len);
1181 }
1182
1183 struct dp_packet *clone = NULL;
1184 uint32_t cutlen = dp_packet_get_cutlen(packet);
1185 if (cutlen && (type == OVS_ACTION_ATTR_OUTPUT
1186 || type == OVS_ACTION_ATTR_TUNNEL_PUSH
1187 || type == OVS_ACTION_ATTR_TUNNEL_POP
1188 || type == OVS_ACTION_ATTR_USERSPACE)) {
1189 dp_packet_reset_cutlen(packet);
1190 if (!may_steal) {
1191 packet = clone = dp_packet_clone(packet);
1192 }
1193 dp_packet_set_size(packet, dp_packet_size(packet) - cutlen);
1194 }
1195
1196 execute.packet = packet;
1197 execute.flow = aux->flow;
1198 execute.needs_help = false;
1199 execute.probe = false;
1200 execute.mtu = 0;
1201 aux->error = dpif_execute(aux->dpif, &execute);
1202 log_execute_message(aux->dpif, &execute, true, aux->error);
1203
1204 dp_packet_delete(clone);
1205
1206 if (flow_tnl_dst_is_set(&md->tunnel) || aux->meter_action) {
1207 ofpbuf_uninit(&execute_actions);
1208
1209 /* Do not re-use the same meters for later output actions. */
1210 aux->meter_action = NULL;
1211 }
1212 break;
1213 }
1214
1215 case OVS_ACTION_ATTR_HASH:
1216 case OVS_ACTION_ATTR_PUSH_VLAN:
1217 case OVS_ACTION_ATTR_POP_VLAN:
1218 case OVS_ACTION_ATTR_PUSH_MPLS:
1219 case OVS_ACTION_ATTR_POP_MPLS:
1220 case OVS_ACTION_ATTR_SET:
1221 case OVS_ACTION_ATTR_SET_MASKED:
1222 case OVS_ACTION_ATTR_SAMPLE:
1223 case OVS_ACTION_ATTR_TRUNC:
1224 case OVS_ACTION_ATTR_PUSH_ETH:
1225 case OVS_ACTION_ATTR_POP_ETH:
1226 case OVS_ACTION_ATTR_CLONE:
1227 case OVS_ACTION_ATTR_UNSPEC:
1228 case __OVS_ACTION_ATTR_MAX:
1229 OVS_NOT_REACHED();
1230 }
1231 }
1232
1233 /* Executes 'execute' by performing most of the actions in userspace and
1234 * passing the fully constructed packets to 'dpif' for output and userspace
1235 * actions.
1236 *
1237 * This helps with actions that a given 'dpif' doesn't implement directly. */
1238 static int
1239 dpif_execute_with_help(struct dpif *dpif, struct dpif_execute *execute)
1240 {
1241 struct dpif_execute_helper_aux aux = {dpif, execute->flow, 0, NULL};
1242 struct dp_packet_batch pb;
1243
1244 COVERAGE_INC(dpif_execute_with_help);
1245
1246 dp_packet_batch_init_packet(&pb, execute->packet);
1247 odp_execute_actions(&aux, &pb, false, execute->actions,
1248 execute->actions_len, dpif_execute_helper_cb);
1249 return aux.error;
1250 }
1251
1252 /* Returns true if the datapath needs help executing 'execute'. */
1253 static bool
1254 dpif_execute_needs_help(const struct dpif_execute *execute)
1255 {
1256 return execute->needs_help || nl_attr_oversized(execute->actions_len);
1257 }
1258
1259 /* A dpif_operate() wrapper for performing a single DPIF_OP_EXECUTE. */
1260 int
1261 dpif_execute(struct dpif *dpif, struct dpif_execute *execute)
1262 {
1263 if (execute->actions_len) {
1264 struct dpif_op *opp;
1265 struct dpif_op op;
1266
1267 op.type = DPIF_OP_EXECUTE;
1268 op.u.execute = *execute;
1269
1270 opp = &op;
1271 dpif_operate(dpif, &opp, 1);
1272
1273 return op.error;
1274 } else {
1275 return 0;
1276 }
1277 }
1278
1279 /* Executes each of the 'n_ops' operations in 'ops' on 'dpif', in the order in
1280 * which they are specified. Places each operation's results in the "output"
1281 * members documented in comments, and 0 in the 'error' member on success or a
1282 * positive errno on failure. */
1283 void
1284 dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
1285 {
1286 while (n_ops > 0) {
1287 size_t chunk;
1288
1289 /* Count 'chunk', the number of ops that can be executed without
1290 * needing any help. Ops that need help should be rare, so we
1291 * expect this to ordinarily be 'n_ops', that is, all the ops. */
1292 for (chunk = 0; chunk < n_ops; chunk++) {
1293 struct dpif_op *op = ops[chunk];
1294
1295 if (op->type == DPIF_OP_EXECUTE
1296 && dpif_execute_needs_help(&op->u.execute)) {
1297 break;
1298 }
1299 }
1300
1301 if (chunk) {
1302 /* Execute a chunk full of ops that the dpif provider can
1303 * handle itself, without help. */
1304 size_t i;
1305
1306 dpif->dpif_class->operate(dpif, ops, chunk);
1307
1308 for (i = 0; i < chunk; i++) {
1309 struct dpif_op *op = ops[i];
1310 int error = op->error;
1311
1312 switch (op->type) {
1313 case DPIF_OP_FLOW_PUT: {
1314 struct dpif_flow_put *put = &op->u.flow_put;
1315
1316 COVERAGE_INC(dpif_flow_put);
1317 log_flow_put_message(dpif, put, error);
1318 if (error && put->stats) {
1319 memset(put->stats, 0, sizeof *put->stats);
1320 }
1321 break;
1322 }
1323
1324 case DPIF_OP_FLOW_GET: {
1325 struct dpif_flow_get *get = &op->u.flow_get;
1326
1327 COVERAGE_INC(dpif_flow_get);
1328 if (error) {
1329 memset(get->flow, 0, sizeof *get->flow);
1330 }
1331 log_flow_get_message(dpif, get, error);
1332
1333 break;
1334 }
1335
1336 case DPIF_OP_FLOW_DEL: {
1337 struct dpif_flow_del *del = &op->u.flow_del;
1338
1339 COVERAGE_INC(dpif_flow_del);
1340 log_flow_del_message(dpif, del, error);
1341 if (error && del->stats) {
1342 memset(del->stats, 0, sizeof *del->stats);
1343 }
1344 break;
1345 }
1346
1347 case DPIF_OP_EXECUTE:
1348 COVERAGE_INC(dpif_execute);
1349 log_execute_message(dpif, &op->u.execute, false, error);
1350 break;
1351 }
1352 }
1353
1354 ops += chunk;
1355 n_ops -= chunk;
1356 } else {
1357 /* Help the dpif provider to execute one op. */
1358 struct dpif_op *op = ops[0];
1359
1360 COVERAGE_INC(dpif_execute);
1361 op->error = dpif_execute_with_help(dpif, &op->u.execute);
1362 ops++;
1363 n_ops--;
1364 }
1365 }
1366 }
1367
1368 /* Returns a string that represents 'type', for use in log messages. */
1369 const char *
1370 dpif_upcall_type_to_string(enum dpif_upcall_type type)
1371 {
1372 switch (type) {
1373 case DPIF_UC_MISS: return "miss";
1374 case DPIF_UC_ACTION: return "action";
1375 case DPIF_N_UC_TYPES: default: return "<unknown>";
1376 }
1377 }
1378
1379 /* Enables or disables receiving packets with dpif_recv() on 'dpif'. Returns 0
1380 * if successful, otherwise a positive errno value.
1381 *
1382 * Turning packet receive off and then back on may change the Netlink PID
1383 * assignments returned by dpif_port_get_pid(). If the client does this, it
1384 * must update all of the flows that have OVS_ACTION_ATTR_USERSPACE actions
1385 * using the new PID assignment. */
1386 int
1387 dpif_recv_set(struct dpif *dpif, bool enable)
1388 {
1389 int error = 0;
1390
1391 if (dpif->dpif_class->recv_set) {
1392 error = dpif->dpif_class->recv_set(dpif, enable);
1393 log_operation(dpif, "recv_set", error);
1394 }
1395 return error;
1396 }
1397
1398 /* Refreshes the poll loops and Netlink sockets associated to each port,
1399 * when the number of upcall handlers (upcall receiving thread) is changed
1400 * to 'n_handlers' and receiving packets for 'dpif' is enabled by
1401 * recv_set().
1402 *
1403 * Since multiple upcall handlers can read upcalls simultaneously from
1404 * 'dpif', each port can have multiple Netlink sockets, one per upcall
1405 * handler. So, handlers_set() is responsible for the following tasks:
1406 *
1407 * When receiving upcall is enabled, extends or creates the
1408 * configuration to support:
1409 *
1410 * - 'n_handlers' Netlink sockets for each port.
1411 *
1412 * - 'n_handlers' poll loops, one for each upcall handler.
1413 *
1414 * - registering the Netlink sockets for the same upcall handler to
1415 * the corresponding poll loop.
1416 *
1417 * Returns 0 if successful, otherwise a positive errno value. */
1418 int
1419 dpif_handlers_set(struct dpif *dpif, uint32_t n_handlers)
1420 {
1421 int error = 0;
1422
1423 if (dpif->dpif_class->handlers_set) {
1424 error = dpif->dpif_class->handlers_set(dpif, n_handlers);
1425 log_operation(dpif, "handlers_set", error);
1426 }
1427 return error;
1428 }
1429
1430 void
1431 dpif_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb, void *aux)
1432 {
1433 if (dpif->dpif_class->register_dp_purge_cb) {
1434 dpif->dpif_class->register_dp_purge_cb(dpif, cb, aux);
1435 }
1436 }
1437
1438 void
1439 dpif_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, void *aux)
1440 {
1441 if (dpif->dpif_class->register_upcall_cb) {
1442 dpif->dpif_class->register_upcall_cb(dpif, cb, aux);
1443 }
1444 }
1445
1446 void
1447 dpif_enable_upcall(struct dpif *dpif)
1448 {
1449 if (dpif->dpif_class->enable_upcall) {
1450 dpif->dpif_class->enable_upcall(dpif);
1451 }
1452 }
1453
1454 void
1455 dpif_disable_upcall(struct dpif *dpif)
1456 {
1457 if (dpif->dpif_class->disable_upcall) {
1458 dpif->dpif_class->disable_upcall(dpif);
1459 }
1460 }
1461
1462 void
1463 dpif_print_packet(struct dpif *dpif, struct dpif_upcall *upcall)
1464 {
1465 if (!VLOG_DROP_DBG(&dpmsg_rl)) {
1466 struct ds flow;
1467 char *packet;
1468
1469 packet = ofp_packet_to_string(dp_packet_data(&upcall->packet),
1470 dp_packet_size(&upcall->packet));
1471
1472 ds_init(&flow);
1473 odp_flow_key_format(upcall->key, upcall->key_len, &flow);
1474
1475 VLOG_DBG("%s: %s upcall:\n%s\n%s",
1476 dpif_name(dpif), dpif_upcall_type_to_string(upcall->type),
1477 ds_cstr(&flow), packet);
1478
1479 ds_destroy(&flow);
1480 free(packet);
1481 }
1482 }
1483
1484 /* Pass custom configuration to the datapath implementation. Some of the
1485 * changes can be postponed until dpif_run() is called. */
1486 int
1487 dpif_set_config(struct dpif *dpif, const struct smap *cfg)
1488 {
1489 int error = 0;
1490
1491 if (dpif->dpif_class->set_config) {
1492 error = dpif->dpif_class->set_config(dpif, cfg);
1493 if (error) {
1494 log_operation(dpif, "set_config", error);
1495 }
1496 }
1497
1498 return error;
1499 }
1500
1501 /* Polls for an upcall from 'dpif' for an upcall handler. Since there
1502 * there can be multiple poll loops, 'handler_id' is needed as index to
1503 * identify the corresponding poll loop. If successful, stores the upcall
1504 * into '*upcall', using 'buf' for storage. Should only be called if
1505 * 'recv_set' has been used to enable receiving packets from 'dpif'.
1506 *
1507 * 'upcall->key' and 'upcall->userdata' point into data in the caller-provided
1508 * 'buf', so their memory cannot be freed separately from 'buf'.
1509 *
1510 * The caller owns the data of 'upcall->packet' and may modify it. If
1511 * packet's headroom is exhausted as it is manipulated, 'upcall->packet'
1512 * will be reallocated. This requires the data of 'upcall->packet' to be
1513 * released with ofpbuf_uninit() before 'upcall' is destroyed. However,
1514 * when an error is returned, the 'upcall->packet' may be uninitialized
1515 * and should not be released.
1516 *
1517 * Returns 0 if successful, otherwise a positive errno value. Returns EAGAIN
1518 * if no upcall is immediately available. */
1519 int
1520 dpif_recv(struct dpif *dpif, uint32_t handler_id, struct dpif_upcall *upcall,
1521 struct ofpbuf *buf)
1522 {
1523 int error = EAGAIN;
1524
1525 if (dpif->dpif_class->recv) {
1526 error = dpif->dpif_class->recv(dpif, handler_id, upcall, buf);
1527 if (!error) {
1528 dpif_print_packet(dpif, upcall);
1529 } else if (error != EAGAIN) {
1530 log_operation(dpif, "recv", error);
1531 }
1532 }
1533 return error;
1534 }
1535
1536 /* Discards all messages that would otherwise be received by dpif_recv() on
1537 * 'dpif'. */
1538 void
1539 dpif_recv_purge(struct dpif *dpif)
1540 {
1541 COVERAGE_INC(dpif_purge);
1542 if (dpif->dpif_class->recv_purge) {
1543 dpif->dpif_class->recv_purge(dpif);
1544 }
1545 }
1546
1547 /* Arranges for the poll loop for an upcall handler to wake up when 'dpif'
1548 * 'dpif' has a message queued to be received with the recv member
1549 * function. Since there can be multiple poll loops, 'handler_id' is
1550 * needed as index to identify the corresponding poll loop. */
1551 void
1552 dpif_recv_wait(struct dpif *dpif, uint32_t handler_id)
1553 {
1554 if (dpif->dpif_class->recv_wait) {
1555 dpif->dpif_class->recv_wait(dpif, handler_id);
1556 }
1557 }
1558
1559 /*
1560 * Return the datapath version. Caller is responsible for freeing
1561 * the string.
1562 */
1563 char *
1564 dpif_get_dp_version(const struct dpif *dpif)
1565 {
1566 char *version = NULL;
1567
1568 if (dpif->dpif_class->get_datapath_version) {
1569 version = dpif->dpif_class->get_datapath_version();
1570 }
1571
1572 return version;
1573 }
1574
1575 /* Obtains the NetFlow engine type and engine ID for 'dpif' into '*engine_type'
1576 * and '*engine_id', respectively. */
1577 void
1578 dpif_get_netflow_ids(const struct dpif *dpif,
1579 uint8_t *engine_type, uint8_t *engine_id)
1580 {
1581 *engine_type = dpif->netflow_engine_type;
1582 *engine_id = dpif->netflow_engine_id;
1583 }
1584
1585 /* Translates OpenFlow queue ID 'queue_id' (in host byte order) into a priority
1586 * value used for setting packet priority.
1587 * On success, returns 0 and stores the priority into '*priority'.
1588 * On failure, returns a positive errno value and stores 0 into '*priority'. */
1589 int
1590 dpif_queue_to_priority(const struct dpif *dpif, uint32_t queue_id,
1591 uint32_t *priority)
1592 {
1593 int error = (dpif->dpif_class->queue_to_priority
1594 ? dpif->dpif_class->queue_to_priority(dpif, queue_id,
1595 priority)
1596 : EOPNOTSUPP);
1597 if (error) {
1598 *priority = 0;
1599 }
1600 log_operation(dpif, "queue_to_priority", error);
1601 return error;
1602 }
1603 \f
1604 void
1605 dpif_init(struct dpif *dpif, const struct dpif_class *dpif_class,
1606 const char *name,
1607 uint8_t netflow_engine_type, uint8_t netflow_engine_id)
1608 {
1609 dpif->dpif_class = dpif_class;
1610 dpif->base_name = xstrdup(name);
1611 dpif->full_name = xasprintf("%s@%s", dpif_class->type, name);
1612 dpif->netflow_engine_type = netflow_engine_type;
1613 dpif->netflow_engine_id = netflow_engine_id;
1614 }
1615
1616 /* Undoes the results of initialization.
1617 *
1618 * Normally this function only needs to be called from dpif_close().
1619 * However, it may be called by providers due to an error on opening
1620 * that occurs after initialization. It this case dpif_close() would
1621 * never be called. */
1622 void
1623 dpif_uninit(struct dpif *dpif, bool close)
1624 {
1625 char *base_name = dpif->base_name;
1626 char *full_name = dpif->full_name;
1627
1628 if (close) {
1629 dpif->dpif_class->close(dpif);
1630 }
1631
1632 free(base_name);
1633 free(full_name);
1634 }
1635 \f
1636 static void
1637 log_operation(const struct dpif *dpif, const char *operation, int error)
1638 {
1639 if (!error) {
1640 VLOG_DBG_RL(&dpmsg_rl, "%s: %s success", dpif_name(dpif), operation);
1641 } else if (ofperr_is_valid(error)) {
1642 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1643 dpif_name(dpif), operation, ofperr_get_name(error));
1644 } else {
1645 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1646 dpif_name(dpif), operation, ovs_strerror(error));
1647 }
1648 }
1649
1650 static enum vlog_level
1651 flow_message_log_level(int error)
1652 {
1653 /* If flows arrive in a batch, userspace may push down multiple
1654 * unique flow definitions that overlap when wildcards are applied.
1655 * Kernels that support flow wildcarding will reject these flows as
1656 * duplicates (EEXIST), so lower the log level to debug for these
1657 * types of messages. */
1658 return (error && error != EEXIST) ? VLL_WARN : VLL_DBG;
1659 }
1660
1661 static bool
1662 should_log_flow_message(int error)
1663 {
1664 return !vlog_should_drop(&this_module, flow_message_log_level(error),
1665 error ? &error_rl : &dpmsg_rl);
1666 }
1667
1668 static void
1669 log_flow_message(const struct dpif *dpif, int error, const char *operation,
1670 const struct nlattr *key, size_t key_len,
1671 const struct nlattr *mask, size_t mask_len,
1672 const ovs_u128 *ufid, const struct dpif_flow_stats *stats,
1673 const struct nlattr *actions, size_t actions_len)
1674 {
1675 struct ds ds = DS_EMPTY_INITIALIZER;
1676 ds_put_format(&ds, "%s: ", dpif_name(dpif));
1677 if (error) {
1678 ds_put_cstr(&ds, "failed to ");
1679 }
1680 ds_put_format(&ds, "%s ", operation);
1681 if (error) {
1682 ds_put_format(&ds, "(%s) ", ovs_strerror(error));
1683 }
1684 if (ufid) {
1685 odp_format_ufid(ufid, &ds);
1686 ds_put_cstr(&ds, " ");
1687 }
1688 odp_flow_format(key, key_len, mask, mask_len, NULL, &ds, true);
1689 if (stats) {
1690 ds_put_cstr(&ds, ", ");
1691 dpif_flow_stats_format(stats, &ds);
1692 }
1693 if (actions || actions_len) {
1694 ds_put_cstr(&ds, ", actions:");
1695 format_odp_actions(&ds, actions, actions_len);
1696 }
1697 vlog(&this_module, flow_message_log_level(error), "%s", ds_cstr(&ds));
1698 ds_destroy(&ds);
1699 }
1700
1701 static void
1702 log_flow_put_message(struct dpif *dpif, const struct dpif_flow_put *put,
1703 int error)
1704 {
1705 if (should_log_flow_message(error) && !(put->flags & DPIF_FP_PROBE)) {
1706 struct ds s;
1707
1708 ds_init(&s);
1709 ds_put_cstr(&s, "put");
1710 if (put->flags & DPIF_FP_CREATE) {
1711 ds_put_cstr(&s, "[create]");
1712 }
1713 if (put->flags & DPIF_FP_MODIFY) {
1714 ds_put_cstr(&s, "[modify]");
1715 }
1716 if (put->flags & DPIF_FP_ZERO_STATS) {
1717 ds_put_cstr(&s, "[zero]");
1718 }
1719 log_flow_message(dpif, error, ds_cstr(&s),
1720 put->key, put->key_len, put->mask, put->mask_len,
1721 put->ufid, put->stats, put->actions,
1722 put->actions_len);
1723 ds_destroy(&s);
1724 }
1725 }
1726
1727 static void
1728 log_flow_del_message(struct dpif *dpif, const struct dpif_flow_del *del,
1729 int error)
1730 {
1731 if (should_log_flow_message(error)) {
1732 log_flow_message(dpif, error, "flow_del", del->key, del->key_len,
1733 NULL, 0, del->ufid, !error ? del->stats : NULL,
1734 NULL, 0);
1735 }
1736 }
1737
1738 /* Logs that 'execute' was executed on 'dpif' and completed with errno 'error'
1739 * (0 for success). 'subexecute' should be true if the execution is a result
1740 * of breaking down a larger execution that needed help, false otherwise.
1741 *
1742 *
1743 * XXX In theory, the log message could be deceptive because this function is
1744 * called after the dpif_provider's '->execute' function, which is allowed to
1745 * modify execute->packet and execute->md. In practice, though:
1746 *
1747 * - dpif-netlink doesn't modify execute->packet or execute->md.
1748 *
1749 * - dpif-netdev does modify them but it is less likely to have problems
1750 * because it is built into ovs-vswitchd and cannot have version skew,
1751 * etc.
1752 *
1753 * It would still be better to avoid the potential problem. I don't know of a
1754 * good way to do that, though, that isn't expensive. */
1755 static void
1756 log_execute_message(struct dpif *dpif, const struct dpif_execute *execute,
1757 bool subexecute, int error)
1758 {
1759 if (!(error ? VLOG_DROP_WARN(&error_rl) : VLOG_DROP_DBG(&dpmsg_rl))
1760 && !execute->probe) {
1761 struct ds ds = DS_EMPTY_INITIALIZER;
1762 char *packet;
1763
1764 packet = ofp_packet_to_string(dp_packet_data(execute->packet),
1765 dp_packet_size(execute->packet));
1766 ds_put_format(&ds, "%s: %sexecute ",
1767 dpif_name(dpif),
1768 (subexecute ? "sub-"
1769 : dpif_execute_needs_help(execute) ? "super-"
1770 : ""));
1771 format_odp_actions(&ds, execute->actions, execute->actions_len);
1772 if (error) {
1773 ds_put_format(&ds, " failed (%s)", ovs_strerror(error));
1774 }
1775 ds_put_format(&ds, " on packet %s", packet);
1776 ds_put_format(&ds, " mtu %d", execute->mtu);
1777 vlog(&this_module, error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
1778 ds_destroy(&ds);
1779 free(packet);
1780 }
1781 }
1782
1783 static void
1784 log_flow_get_message(const struct dpif *dpif, const struct dpif_flow_get *get,
1785 int error)
1786 {
1787 if (should_log_flow_message(error)) {
1788 log_flow_message(dpif, error, "flow_get",
1789 get->key, get->key_len,
1790 get->flow->mask, get->flow->mask_len,
1791 get->ufid, &get->flow->stats,
1792 get->flow->actions, get->flow->actions_len);
1793 }
1794 }
1795
1796 bool
1797 dpif_supports_tnl_push_pop(const struct dpif *dpif)
1798 {
1799 return dpif_is_netdev(dpif);
1800 }
1801
1802 /* Meters */
1803 void
1804 dpif_meter_get_features(const struct dpif *dpif,
1805 struct ofputil_meter_features *features)
1806 {
1807 memset(features, 0, sizeof *features);
1808 if (dpif->dpif_class->meter_get_features) {
1809 dpif->dpif_class->meter_get_features(dpif, features);
1810 }
1811 }
1812
1813 /* Adds or modifies meter identified by 'meter_id' in 'dpif'. If '*meter_id'
1814 * is UINT32_MAX, adds a new meter, otherwise modifies an existing meter.
1815 *
1816 * If meter is successfully added, sets '*meter_id' to the new meter's
1817 * meter number. */
1818 int
1819 dpif_meter_set(struct dpif *dpif, ofproto_meter_id *meter_id,
1820 struct ofputil_meter_config *config)
1821 {
1822 int error;
1823
1824 COVERAGE_INC(dpif_meter_set);
1825
1826 error = dpif->dpif_class->meter_set(dpif, meter_id, config);
1827 if (!error) {
1828 VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" set",
1829 dpif_name(dpif), meter_id->uint32);
1830 } else {
1831 VLOG_WARN_RL(&error_rl, "%s: failed to set DPIF meter %"PRIu32": %s",
1832 dpif_name(dpif), meter_id->uint32, ovs_strerror(error));
1833 meter_id->uint32 = UINT32_MAX;
1834 }
1835 return error;
1836 }
1837
1838 int
1839 dpif_meter_get(const struct dpif *dpif, ofproto_meter_id meter_id,
1840 struct ofputil_meter_stats *stats, uint16_t n_bands)
1841 {
1842 int error;
1843
1844 COVERAGE_INC(dpif_meter_get);
1845
1846 error = dpif->dpif_class->meter_get(dpif, meter_id, stats, n_bands);
1847 if (!error) {
1848 VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" get stats",
1849 dpif_name(dpif), meter_id.uint32);
1850 } else {
1851 VLOG_WARN_RL(&error_rl,
1852 "%s: failed to get DPIF meter %"PRIu32" stats: %s",
1853 dpif_name(dpif), meter_id.uint32, ovs_strerror(error));
1854 stats->packet_in_count = ~0;
1855 stats->byte_in_count = ~0;
1856 stats->n_bands = 0;
1857 }
1858 return error;
1859 }
1860
1861 int
1862 dpif_meter_del(struct dpif *dpif, ofproto_meter_id meter_id,
1863 struct ofputil_meter_stats *stats, uint16_t n_bands)
1864 {
1865 int error;
1866
1867 COVERAGE_INC(dpif_meter_del);
1868
1869 error = dpif->dpif_class->meter_del(dpif, meter_id, stats, n_bands);
1870 if (!error) {
1871 VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" deleted",
1872 dpif_name(dpif), meter_id.uint32);
1873 } else {
1874 VLOG_WARN_RL(&error_rl,
1875 "%s: failed to delete DPIF meter %"PRIu32": %s",
1876 dpif_name(dpif), meter_id.uint32, ovs_strerror(error));
1877 if (stats) {
1878 stats->packet_in_count = ~0;
1879 stats->byte_in_count = ~0;
1880 stats->n_bands = 0;
1881 }
1882 }
1883 return error;
1884 }