]> git.proxmox.com Git - ovs.git/blob - lib/dpif.c
lockfile: Support \-delimited file names in lockfile_name().
[ovs.git] / lib / dpif.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "dpif-provider.h"
19
20 #include <ctype.h>
21 #include <errno.h>
22 #include <inttypes.h>
23 #include <stdlib.h>
24 #include <string.h>
25
26 #include "coverage.h"
27 #include "dpctl.h"
28 #include "dp-packet.h"
29 #include "dynamic-string.h"
30 #include "flow.h"
31 #include "netdev.h"
32 #include "netlink.h"
33 #include "odp-execute.h"
34 #include "odp-util.h"
35 #include "ofp-errors.h"
36 #include "ofp-print.h"
37 #include "ofp-util.h"
38 #include "ofpbuf.h"
39 #include "packets.h"
40 #include "poll-loop.h"
41 #include "route-table.h"
42 #include "seq.h"
43 #include "shash.h"
44 #include "sset.h"
45 #include "timeval.h"
46 #include "tnl-arp-cache.h"
47 #include "tnl-ports.h"
48 #include "util.h"
49 #include "valgrind.h"
50 #include "openvswitch/vlog.h"
51
52 VLOG_DEFINE_THIS_MODULE(dpif);
53
54 COVERAGE_DEFINE(dpif_destroy);
55 COVERAGE_DEFINE(dpif_port_add);
56 COVERAGE_DEFINE(dpif_port_del);
57 COVERAGE_DEFINE(dpif_flow_flush);
58 COVERAGE_DEFINE(dpif_flow_get);
59 COVERAGE_DEFINE(dpif_flow_put);
60 COVERAGE_DEFINE(dpif_flow_del);
61 COVERAGE_DEFINE(dpif_execute);
62 COVERAGE_DEFINE(dpif_purge);
63 COVERAGE_DEFINE(dpif_execute_with_help);
64
65 static const struct dpif_class *base_dpif_classes[] = {
66 #if defined(__linux__) || defined(_WIN32)
67 &dpif_netlink_class,
68 #endif
69 &dpif_netdev_class,
70 };
71
72 struct registered_dpif_class {
73 const struct dpif_class *dpif_class;
74 int refcount;
75 };
76 static struct shash dpif_classes = SHASH_INITIALIZER(&dpif_classes);
77 static struct sset dpif_blacklist = SSET_INITIALIZER(&dpif_blacklist);
78
79 /* Protects 'dpif_classes', including the refcount, and 'dpif_blacklist'. */
80 static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
81
82 /* Rate limit for individual messages going to or from the datapath, output at
83 * DBG level. This is very high because, if these are enabled, it is because
84 * we really need to see them. */
85 static struct vlog_rate_limit dpmsg_rl = VLOG_RATE_LIMIT_INIT(600, 600);
86
87 /* Not really much point in logging many dpif errors. */
88 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
89
90 static void log_flow_message(const struct dpif *dpif, int error,
91 const char *operation,
92 const struct nlattr *key, size_t key_len,
93 const struct nlattr *mask, size_t mask_len,
94 const ovs_u128 *ufid,
95 const struct dpif_flow_stats *stats,
96 const struct nlattr *actions, size_t actions_len);
97 static void log_operation(const struct dpif *, const char *operation,
98 int error);
99 static bool should_log_flow_message(int error);
100 static void log_flow_put_message(struct dpif *, const struct dpif_flow_put *,
101 int error);
102 static void log_flow_del_message(struct dpif *, const struct dpif_flow_del *,
103 int error);
104 static void log_execute_message(struct dpif *, const struct dpif_execute *,
105 bool subexecute, int error);
106 static void log_flow_get_message(const struct dpif *,
107 const struct dpif_flow_get *, int error);
108
109 /* Incremented whenever tnl route, arp, etc changes. */
110 struct seq *tnl_conf_seq;
111
112 static void
113 dp_initialize(void)
114 {
115 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
116
117 if (ovsthread_once_start(&once)) {
118 int i;
119
120 tnl_conf_seq = seq_create();
121 dpctl_unixctl_register();
122 tnl_port_map_init();
123 tnl_arp_cache_init();
124 route_table_init();
125
126 for (i = 0; i < ARRAY_SIZE(base_dpif_classes); i++) {
127 dp_register_provider(base_dpif_classes[i]);
128 }
129
130 ovsthread_once_done(&once);
131 }
132 }
133
134 static int
135 dp_register_provider__(const struct dpif_class *new_class)
136 {
137 struct registered_dpif_class *registered_class;
138
139 if (sset_contains(&dpif_blacklist, new_class->type)) {
140 VLOG_DBG("attempted to register blacklisted provider: %s",
141 new_class->type);
142 return EINVAL;
143 }
144
145 if (shash_find(&dpif_classes, new_class->type)) {
146 VLOG_WARN("attempted to register duplicate datapath provider: %s",
147 new_class->type);
148 return EEXIST;
149 }
150
151 registered_class = xmalloc(sizeof *registered_class);
152 registered_class->dpif_class = new_class;
153 registered_class->refcount = 0;
154
155 shash_add(&dpif_classes, new_class->type, registered_class);
156
157 return 0;
158 }
159
160 /* Registers a new datapath provider. After successful registration, new
161 * datapaths of that type can be opened using dpif_open(). */
162 int
163 dp_register_provider(const struct dpif_class *new_class)
164 {
165 int error;
166
167 ovs_mutex_lock(&dpif_mutex);
168 error = dp_register_provider__(new_class);
169 ovs_mutex_unlock(&dpif_mutex);
170
171 return error;
172 }
173
174 /* Unregisters a datapath provider. 'type' must have been previously
175 * registered and not currently be in use by any dpifs. After unregistration
176 * new datapaths of that type cannot be opened using dpif_open(). */
177 static int
178 dp_unregister_provider__(const char *type)
179 {
180 struct shash_node *node;
181 struct registered_dpif_class *registered_class;
182
183 node = shash_find(&dpif_classes, type);
184 if (!node) {
185 VLOG_WARN("attempted to unregister a datapath provider that is not "
186 "registered: %s", type);
187 return EAFNOSUPPORT;
188 }
189
190 registered_class = node->data;
191 if (registered_class->refcount) {
192 VLOG_WARN("attempted to unregister in use datapath provider: %s", type);
193 return EBUSY;
194 }
195
196 shash_delete(&dpif_classes, node);
197 free(registered_class);
198
199 return 0;
200 }
201
202 /* Unregisters a datapath provider. 'type' must have been previously
203 * registered and not currently be in use by any dpifs. After unregistration
204 * new datapaths of that type cannot be opened using dpif_open(). */
205 int
206 dp_unregister_provider(const char *type)
207 {
208 int error;
209
210 dp_initialize();
211
212 ovs_mutex_lock(&dpif_mutex);
213 error = dp_unregister_provider__(type);
214 ovs_mutex_unlock(&dpif_mutex);
215
216 return error;
217 }
218
219 /* Blacklists a provider. Causes future calls of dp_register_provider() with
220 * a dpif_class which implements 'type' to fail. */
221 void
222 dp_blacklist_provider(const char *type)
223 {
224 ovs_mutex_lock(&dpif_mutex);
225 sset_add(&dpif_blacklist, type);
226 ovs_mutex_unlock(&dpif_mutex);
227 }
228
229 /* Adds the types of all currently registered datapath providers to 'types'.
230 * The caller must first initialize the sset. */
231 void
232 dp_enumerate_types(struct sset *types)
233 {
234 struct shash_node *node;
235
236 dp_initialize();
237
238 ovs_mutex_lock(&dpif_mutex);
239 SHASH_FOR_EACH(node, &dpif_classes) {
240 const struct registered_dpif_class *registered_class = node->data;
241 sset_add(types, registered_class->dpif_class->type);
242 }
243 ovs_mutex_unlock(&dpif_mutex);
244 }
245
246 static void
247 dp_class_unref(struct registered_dpif_class *rc)
248 {
249 ovs_mutex_lock(&dpif_mutex);
250 ovs_assert(rc->refcount);
251 rc->refcount--;
252 ovs_mutex_unlock(&dpif_mutex);
253 }
254
255 static struct registered_dpif_class *
256 dp_class_lookup(const char *type)
257 {
258 struct registered_dpif_class *rc;
259
260 ovs_mutex_lock(&dpif_mutex);
261 rc = shash_find_data(&dpif_classes, type);
262 if (rc) {
263 rc->refcount++;
264 }
265 ovs_mutex_unlock(&dpif_mutex);
266
267 return rc;
268 }
269
270 /* Clears 'names' and enumerates the names of all known created datapaths with
271 * the given 'type'. The caller must first initialize the sset. Returns 0 if
272 * successful, otherwise a positive errno value.
273 *
274 * Some kinds of datapaths might not be practically enumerable. This is not
275 * considered an error. */
276 int
277 dp_enumerate_names(const char *type, struct sset *names)
278 {
279 struct registered_dpif_class *registered_class;
280 const struct dpif_class *dpif_class;
281 int error;
282
283 dp_initialize();
284 sset_clear(names);
285
286 registered_class = dp_class_lookup(type);
287 if (!registered_class) {
288 VLOG_WARN("could not enumerate unknown type: %s", type);
289 return EAFNOSUPPORT;
290 }
291
292 dpif_class = registered_class->dpif_class;
293 error = (dpif_class->enumerate
294 ? dpif_class->enumerate(names, dpif_class)
295 : 0);
296 if (error) {
297 VLOG_WARN("failed to enumerate %s datapaths: %s", dpif_class->type,
298 ovs_strerror(error));
299 }
300 dp_class_unref(registered_class);
301
302 return error;
303 }
304
305 /* Parses 'datapath_name_', which is of the form [type@]name into its
306 * component pieces. 'name' and 'type' must be freed by the caller.
307 *
308 * The returned 'type' is normalized, as if by dpif_normalize_type(). */
309 void
310 dp_parse_name(const char *datapath_name_, char **name, char **type)
311 {
312 char *datapath_name = xstrdup(datapath_name_);
313 char *separator;
314
315 separator = strchr(datapath_name, '@');
316 if (separator) {
317 *separator = '\0';
318 *type = datapath_name;
319 *name = xstrdup(dpif_normalize_type(separator + 1));
320 } else {
321 *name = datapath_name;
322 *type = xstrdup(dpif_normalize_type(NULL));
323 }
324 }
325
326 static int
327 do_open(const char *name, const char *type, bool create, struct dpif **dpifp)
328 {
329 struct dpif *dpif = NULL;
330 int error;
331 struct registered_dpif_class *registered_class;
332
333 dp_initialize();
334
335 type = dpif_normalize_type(type);
336 registered_class = dp_class_lookup(type);
337 if (!registered_class) {
338 VLOG_WARN("could not create datapath %s of unknown type %s", name,
339 type);
340 error = EAFNOSUPPORT;
341 goto exit;
342 }
343
344 error = registered_class->dpif_class->open(registered_class->dpif_class,
345 name, create, &dpif);
346 if (!error) {
347 ovs_assert(dpif->dpif_class == registered_class->dpif_class);
348 } else {
349 dp_class_unref(registered_class);
350 }
351
352 exit:
353 *dpifp = error ? NULL : dpif;
354 return error;
355 }
356
357 /* Tries to open an existing datapath named 'name' and type 'type'. Will fail
358 * if no datapath with 'name' and 'type' exists. 'type' may be either NULL or
359 * the empty string to specify the default system type. Returns 0 if
360 * successful, otherwise a positive errno value. On success stores a pointer
361 * to the datapath in '*dpifp', otherwise a null pointer. */
362 int
363 dpif_open(const char *name, const char *type, struct dpif **dpifp)
364 {
365 return do_open(name, type, false, dpifp);
366 }
367
368 /* Tries to create and open a new datapath with the given 'name' and 'type'.
369 * 'type' may be either NULL or the empty string to specify the default system
370 * type. Will fail if a datapath with 'name' and 'type' already exists.
371 * Returns 0 if successful, otherwise a positive errno value. On success
372 * stores a pointer to the datapath in '*dpifp', otherwise a null pointer. */
373 int
374 dpif_create(const char *name, const char *type, struct dpif **dpifp)
375 {
376 return do_open(name, type, true, dpifp);
377 }
378
379 /* Tries to open a datapath with the given 'name' and 'type', creating it if it
380 * does not exist. 'type' may be either NULL or the empty string to specify
381 * the default system type. Returns 0 if successful, otherwise a positive
382 * errno value. On success stores a pointer to the datapath in '*dpifp',
383 * otherwise a null pointer. */
384 int
385 dpif_create_and_open(const char *name, const char *type, struct dpif **dpifp)
386 {
387 int error;
388
389 error = dpif_create(name, type, dpifp);
390 if (error == EEXIST || error == EBUSY) {
391 error = dpif_open(name, type, dpifp);
392 if (error) {
393 VLOG_WARN("datapath %s already exists but cannot be opened: %s",
394 name, ovs_strerror(error));
395 }
396 } else if (error) {
397 VLOG_WARN("failed to create datapath %s: %s",
398 name, ovs_strerror(error));
399 }
400 return error;
401 }
402
403 /* Closes and frees the connection to 'dpif'. Does not destroy the datapath
404 * itself; call dpif_delete() first, instead, if that is desirable. */
405 void
406 dpif_close(struct dpif *dpif)
407 {
408 if (dpif) {
409 struct registered_dpif_class *rc;
410
411 rc = shash_find_data(&dpif_classes, dpif->dpif_class->type);
412 dpif_uninit(dpif, true);
413 dp_class_unref(rc);
414 }
415 }
416
417 /* Performs periodic work needed by 'dpif'. */
418 bool
419 dpif_run(struct dpif *dpif)
420 {
421 if (dpif->dpif_class->run) {
422 return dpif->dpif_class->run(dpif);
423 }
424 return false;
425 }
426
427 /* Arranges for poll_block() to wake up when dp_run() needs to be called for
428 * 'dpif'. */
429 void
430 dpif_wait(struct dpif *dpif)
431 {
432 if (dpif->dpif_class->wait) {
433 dpif->dpif_class->wait(dpif);
434 }
435 }
436
437 /* Returns the name of datapath 'dpif' prefixed with the type
438 * (for use in log messages). */
439 const char *
440 dpif_name(const struct dpif *dpif)
441 {
442 return dpif->full_name;
443 }
444
445 /* Returns the name of datapath 'dpif' without the type
446 * (for use in device names). */
447 const char *
448 dpif_base_name(const struct dpif *dpif)
449 {
450 return dpif->base_name;
451 }
452
453 /* Returns the type of datapath 'dpif'. */
454 const char *
455 dpif_type(const struct dpif *dpif)
456 {
457 return dpif->dpif_class->type;
458 }
459
460 /* Returns the fully spelled out name for the given datapath 'type'.
461 *
462 * Normalized type string can be compared with strcmp(). Unnormalized type
463 * string might be the same even if they have different spellings. */
464 const char *
465 dpif_normalize_type(const char *type)
466 {
467 return type && type[0] ? type : "system";
468 }
469
470 /* Destroys the datapath that 'dpif' is connected to, first removing all of its
471 * ports. After calling this function, it does not make sense to pass 'dpif'
472 * to any functions other than dpif_name() or dpif_close(). */
473 int
474 dpif_delete(struct dpif *dpif)
475 {
476 int error;
477
478 COVERAGE_INC(dpif_destroy);
479
480 error = dpif->dpif_class->destroy(dpif);
481 log_operation(dpif, "delete", error);
482 return error;
483 }
484
485 /* Retrieves statistics for 'dpif' into 'stats'. Returns 0 if successful,
486 * otherwise a positive errno value. */
487 int
488 dpif_get_dp_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
489 {
490 int error = dpif->dpif_class->get_stats(dpif, stats);
491 if (error) {
492 memset(stats, 0, sizeof *stats);
493 }
494 log_operation(dpif, "get_stats", error);
495 return error;
496 }
497
498 const char *
499 dpif_port_open_type(const char *datapath_type, const char *port_type)
500 {
501 struct registered_dpif_class *rc;
502
503 datapath_type = dpif_normalize_type(datapath_type);
504
505 ovs_mutex_lock(&dpif_mutex);
506 rc = shash_find_data(&dpif_classes, datapath_type);
507 if (rc && rc->dpif_class->port_open_type) {
508 port_type = rc->dpif_class->port_open_type(rc->dpif_class, port_type);
509 }
510 ovs_mutex_unlock(&dpif_mutex);
511
512 return port_type;
513 }
514
515 /* Attempts to add 'netdev' as a port on 'dpif'. If 'port_nop' is
516 * non-null and its value is not ODPP_NONE, then attempts to use the
517 * value as the port number.
518 *
519 * If successful, returns 0 and sets '*port_nop' to the new port's port
520 * number (if 'port_nop' is non-null). On failure, returns a positive
521 * errno value and sets '*port_nop' to ODPP_NONE (if 'port_nop' is
522 * non-null). */
523 int
524 dpif_port_add(struct dpif *dpif, struct netdev *netdev, odp_port_t *port_nop)
525 {
526 const char *netdev_name = netdev_get_name(netdev);
527 odp_port_t port_no = ODPP_NONE;
528 int error;
529
530 COVERAGE_INC(dpif_port_add);
531
532 if (port_nop) {
533 port_no = *port_nop;
534 }
535
536 error = dpif->dpif_class->port_add(dpif, netdev, &port_no);
537 if (!error) {
538 VLOG_DBG_RL(&dpmsg_rl, "%s: added %s as port %"PRIu32,
539 dpif_name(dpif), netdev_name, port_no);
540 } else {
541 VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
542 dpif_name(dpif), netdev_name, ovs_strerror(error));
543 port_no = ODPP_NONE;
544 }
545 if (port_nop) {
546 *port_nop = port_no;
547 }
548 return error;
549 }
550
551 /* Attempts to remove 'dpif''s port number 'port_no'. Returns 0 if successful,
552 * otherwise a positive errno value. */
553 int
554 dpif_port_del(struct dpif *dpif, odp_port_t port_no)
555 {
556 int error;
557
558 COVERAGE_INC(dpif_port_del);
559
560 error = dpif->dpif_class->port_del(dpif, port_no);
561 if (!error) {
562 VLOG_DBG_RL(&dpmsg_rl, "%s: port_del(%"PRIu32")",
563 dpif_name(dpif), port_no);
564 } else {
565 log_operation(dpif, "port_del", error);
566 }
567 return error;
568 }
569
570 /* Makes a deep copy of 'src' into 'dst'. */
571 void
572 dpif_port_clone(struct dpif_port *dst, const struct dpif_port *src)
573 {
574 dst->name = xstrdup(src->name);
575 dst->type = xstrdup(src->type);
576 dst->port_no = src->port_no;
577 }
578
579 /* Frees memory allocated to members of 'dpif_port'.
580 *
581 * Do not call this function on a dpif_port obtained from
582 * dpif_port_dump_next(): that function retains ownership of the data in the
583 * dpif_port. */
584 void
585 dpif_port_destroy(struct dpif_port *dpif_port)
586 {
587 free(dpif_port->name);
588 free(dpif_port->type);
589 }
590
591 /* Checks if port named 'devname' exists in 'dpif'. If so, returns
592 * true; otherwise, returns false. */
593 bool
594 dpif_port_exists(const struct dpif *dpif, const char *devname)
595 {
596 int error = dpif->dpif_class->port_query_by_name(dpif, devname, NULL);
597 if (error != 0 && error != ENOENT && error != ENODEV) {
598 VLOG_WARN_RL(&error_rl, "%s: failed to query port %s: %s",
599 dpif_name(dpif), devname, ovs_strerror(error));
600 }
601
602 return !error;
603 }
604
605 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and
606 * initializes '*port' appropriately; on failure, returns a positive errno
607 * value.
608 *
609 * The caller owns the data in 'port' and must free it with
610 * dpif_port_destroy() when it is no longer needed. */
611 int
612 dpif_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
613 struct dpif_port *port)
614 {
615 int error = dpif->dpif_class->port_query_by_number(dpif, port_no, port);
616 if (!error) {
617 VLOG_DBG_RL(&dpmsg_rl, "%s: port %"PRIu32" is device %s",
618 dpif_name(dpif), port_no, port->name);
619 } else {
620 memset(port, 0, sizeof *port);
621 VLOG_WARN_RL(&error_rl, "%s: failed to query port %"PRIu32": %s",
622 dpif_name(dpif), port_no, ovs_strerror(error));
623 }
624 return error;
625 }
626
627 /* Looks up port named 'devname' in 'dpif'. On success, returns 0 and
628 * initializes '*port' appropriately; on failure, returns a positive errno
629 * value.
630 *
631 * The caller owns the data in 'port' and must free it with
632 * dpif_port_destroy() when it is no longer needed. */
633 int
634 dpif_port_query_by_name(const struct dpif *dpif, const char *devname,
635 struct dpif_port *port)
636 {
637 int error = dpif->dpif_class->port_query_by_name(dpif, devname, port);
638 if (!error) {
639 VLOG_DBG_RL(&dpmsg_rl, "%s: device %s is on port %"PRIu32,
640 dpif_name(dpif), devname, port->port_no);
641 } else {
642 memset(port, 0, sizeof *port);
643
644 /* For ENOENT or ENODEV we use DBG level because the caller is probably
645 * interested in whether 'dpif' actually has a port 'devname', so that
646 * it's not an issue worth logging if it doesn't. Other errors are
647 * uncommon and more likely to indicate a real problem. */
648 VLOG_RL(&error_rl,
649 error == ENOENT || error == ENODEV ? VLL_DBG : VLL_WARN,
650 "%s: failed to query port %s: %s",
651 dpif_name(dpif), devname, ovs_strerror(error));
652 }
653 return error;
654 }
655
656 /* Returns the Netlink PID value to supply in OVS_ACTION_ATTR_USERSPACE
657 * actions as the OVS_USERSPACE_ATTR_PID attribute's value, for use in
658 * flows whose packets arrived on port 'port_no'. In the case where the
659 * provider allocates multiple Netlink PIDs to a single port, it may use
660 * 'hash' to spread load among them. The caller need not use a particular
661 * hash function; a 5-tuple hash is suitable.
662 *
663 * (The datapath implementation might use some different hash function for
664 * distributing packets received via flow misses among PIDs. This means
665 * that packets received via flow misses might be reordered relative to
666 * packets received via userspace actions. This is not ordinarily a
667 * problem.)
668 *
669 * A 'port_no' of ODPP_NONE is a special case: it returns a reserved PID, not
670 * allocated to any port, that the client may use for special purposes.
671 *
672 * The return value is only meaningful when DPIF_UC_ACTION has been enabled in
673 * the 'dpif''s listen mask. It is allowed to change when DPIF_UC_ACTION is
674 * disabled and then re-enabled, so a client that does that must be prepared to
675 * update all of the flows that it installed that contain
676 * OVS_ACTION_ATTR_USERSPACE actions. */
677 uint32_t
678 dpif_port_get_pid(const struct dpif *dpif, odp_port_t port_no, uint32_t hash)
679 {
680 return (dpif->dpif_class->port_get_pid
681 ? (dpif->dpif_class->port_get_pid)(dpif, port_no, hash)
682 : 0);
683 }
684
685 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and copies
686 * the port's name into the 'name_size' bytes in 'name', ensuring that the
687 * result is null-terminated. On failure, returns a positive errno value and
688 * makes 'name' the empty string. */
689 int
690 dpif_port_get_name(struct dpif *dpif, odp_port_t port_no,
691 char *name, size_t name_size)
692 {
693 struct dpif_port port;
694 int error;
695
696 ovs_assert(name_size > 0);
697
698 error = dpif_port_query_by_number(dpif, port_no, &port);
699 if (!error) {
700 ovs_strlcpy(name, port.name, name_size);
701 dpif_port_destroy(&port);
702 } else {
703 *name = '\0';
704 }
705 return error;
706 }
707
708 /* Initializes 'dump' to begin dumping the ports in a dpif.
709 *
710 * This function provides no status indication. An error status for the entire
711 * dump operation is provided when it is completed by calling
712 * dpif_port_dump_done().
713 */
714 void
715 dpif_port_dump_start(struct dpif_port_dump *dump, const struct dpif *dpif)
716 {
717 dump->dpif = dpif;
718 dump->error = dpif->dpif_class->port_dump_start(dpif, &dump->state);
719 log_operation(dpif, "port_dump_start", dump->error);
720 }
721
722 /* Attempts to retrieve another port from 'dump', which must have been
723 * initialized with dpif_port_dump_start(). On success, stores a new dpif_port
724 * into 'port' and returns true. On failure, returns false.
725 *
726 * Failure might indicate an actual error or merely that the last port has been
727 * dumped. An error status for the entire dump operation is provided when it
728 * is completed by calling dpif_port_dump_done().
729 *
730 * The dpif owns the data stored in 'port'. It will remain valid until at
731 * least the next time 'dump' is passed to dpif_port_dump_next() or
732 * dpif_port_dump_done(). */
733 bool
734 dpif_port_dump_next(struct dpif_port_dump *dump, struct dpif_port *port)
735 {
736 const struct dpif *dpif = dump->dpif;
737
738 if (dump->error) {
739 return false;
740 }
741
742 dump->error = dpif->dpif_class->port_dump_next(dpif, dump->state, port);
743 if (dump->error == EOF) {
744 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all ports", dpif_name(dpif));
745 } else {
746 log_operation(dpif, "port_dump_next", dump->error);
747 }
748
749 if (dump->error) {
750 dpif->dpif_class->port_dump_done(dpif, dump->state);
751 return false;
752 }
753 return true;
754 }
755
756 /* Completes port table dump operation 'dump', which must have been initialized
757 * with dpif_port_dump_start(). Returns 0 if the dump operation was
758 * error-free, otherwise a positive errno value describing the problem. */
759 int
760 dpif_port_dump_done(struct dpif_port_dump *dump)
761 {
762 const struct dpif *dpif = dump->dpif;
763 if (!dump->error) {
764 dump->error = dpif->dpif_class->port_dump_done(dpif, dump->state);
765 log_operation(dpif, "port_dump_done", dump->error);
766 }
767 return dump->error == EOF ? 0 : dump->error;
768 }
769
770 /* Polls for changes in the set of ports in 'dpif'. If the set of ports in
771 * 'dpif' has changed, this function does one of the following:
772 *
773 * - Stores the name of the device that was added to or deleted from 'dpif' in
774 * '*devnamep' and returns 0. The caller is responsible for freeing
775 * '*devnamep' (with free()) when it no longer needs it.
776 *
777 * - Returns ENOBUFS and sets '*devnamep' to NULL.
778 *
779 * This function may also return 'false positives', where it returns 0 and
780 * '*devnamep' names a device that was not actually added or deleted or it
781 * returns ENOBUFS without any change.
782 *
783 * Returns EAGAIN if the set of ports in 'dpif' has not changed. May also
784 * return other positive errno values to indicate that something has gone
785 * wrong. */
786 int
787 dpif_port_poll(const struct dpif *dpif, char **devnamep)
788 {
789 int error = dpif->dpif_class->port_poll(dpif, devnamep);
790 if (error) {
791 *devnamep = NULL;
792 }
793 return error;
794 }
795
796 /* Arranges for the poll loop to wake up when port_poll(dpif) will return a
797 * value other than EAGAIN. */
798 void
799 dpif_port_poll_wait(const struct dpif *dpif)
800 {
801 dpif->dpif_class->port_poll_wait(dpif);
802 }
803
804 /* Extracts the flow stats for a packet. The 'flow' and 'packet'
805 * arguments must have been initialized through a call to flow_extract().
806 * 'used' is stored into stats->used. */
807 void
808 dpif_flow_stats_extract(const struct flow *flow, const struct dp_packet *packet,
809 long long int used, struct dpif_flow_stats *stats)
810 {
811 stats->tcp_flags = ntohs(flow->tcp_flags);
812 stats->n_bytes = dp_packet_size(packet);
813 stats->n_packets = 1;
814 stats->used = used;
815 }
816
817 /* Appends a human-readable representation of 'stats' to 's'. */
818 void
819 dpif_flow_stats_format(const struct dpif_flow_stats *stats, struct ds *s)
820 {
821 ds_put_format(s, "packets:%"PRIu64", bytes:%"PRIu64", used:",
822 stats->n_packets, stats->n_bytes);
823 if (stats->used) {
824 ds_put_format(s, "%.3fs", (time_msec() - stats->used) / 1000.0);
825 } else {
826 ds_put_format(s, "never");
827 }
828 if (stats->tcp_flags) {
829 ds_put_cstr(s, ", flags:");
830 packet_format_tcp_flags(s, stats->tcp_flags);
831 }
832 }
833
834 /* Places the hash of the 'key_len' bytes starting at 'key' into '*hash'. */
835 void
836 dpif_flow_hash(const struct dpif *dpif OVS_UNUSED,
837 const void *key, size_t key_len, ovs_u128 *hash)
838 {
839 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
840 static uint32_t secret;
841
842 if (ovsthread_once_start(&once)) {
843 secret = random_uint32();
844 ovsthread_once_done(&once);
845 }
846 hash_bytes128(key, key_len, secret, hash);
847 }
848
849 /* Deletes all flows from 'dpif'. Returns 0 if successful, otherwise a
850 * positive errno value. */
851 int
852 dpif_flow_flush(struct dpif *dpif)
853 {
854 int error;
855
856 COVERAGE_INC(dpif_flow_flush);
857
858 error = dpif->dpif_class->flow_flush(dpif);
859 log_operation(dpif, "flow_flush", error);
860 return error;
861 }
862
863 /* Attempts to install 'key' into the datapath, fetches it, then deletes it.
864 * Returns true if the datapath supported installing 'flow', false otherwise.
865 */
866 bool
867 dpif_probe_feature(struct dpif *dpif, const char *name,
868 const struct ofpbuf *key, const ovs_u128 *ufid)
869 {
870 struct dpif_flow flow;
871 struct ofpbuf reply;
872 uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
873 bool enable_feature = false;
874 int error;
875
876 /* Use DPIF_FP_MODIFY to cover the case where ovs-vswitchd is killed (and
877 * restarted) at just the right time such that feature probes from the
878 * previous run are still present in the datapath. */
879 error = dpif_flow_put(dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY | DPIF_FP_PROBE,
880 key->data, key->size, NULL, 0, NULL, 0,
881 ufid, PMD_ID_NULL, NULL);
882 if (error) {
883 if (error != EINVAL) {
884 VLOG_WARN("%s: %s flow probe failed (%s)",
885 dpif_name(dpif), name, ovs_strerror(error));
886 }
887 return false;
888 }
889
890 ofpbuf_use_stack(&reply, &stub, sizeof stub);
891 error = dpif_flow_get(dpif, key->data, key->size, ufid,
892 PMD_ID_NULL, &reply, &flow);
893 if (!error
894 && (!ufid || (flow.ufid_present && ovs_u128_equal(ufid, &flow.ufid)))) {
895 enable_feature = true;
896 }
897
898 error = dpif_flow_del(dpif, key->data, key->size, ufid,
899 PMD_ID_NULL, NULL);
900 if (error) {
901 VLOG_WARN("%s: failed to delete %s feature probe flow",
902 dpif_name(dpif), name);
903 }
904
905 return enable_feature;
906 }
907
908 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_GET. */
909 int
910 dpif_flow_get(struct dpif *dpif,
911 const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
912 const int pmd_id, struct ofpbuf *buf, struct dpif_flow *flow)
913 {
914 struct dpif_op *opp;
915 struct dpif_op op;
916
917 op.type = DPIF_OP_FLOW_GET;
918 op.u.flow_get.key = key;
919 op.u.flow_get.key_len = key_len;
920 op.u.flow_get.ufid = ufid;
921 op.u.flow_get.pmd_id = pmd_id;
922 op.u.flow_get.buffer = buf;
923
924 memset(flow, 0, sizeof *flow);
925 op.u.flow_get.flow = flow;
926 op.u.flow_get.flow->key = key;
927 op.u.flow_get.flow->key_len = key_len;
928
929 opp = &op;
930 dpif_operate(dpif, &opp, 1);
931
932 return op.error;
933 }
934
935 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_PUT. */
936 int
937 dpif_flow_put(struct dpif *dpif, enum dpif_flow_put_flags flags,
938 const struct nlattr *key, size_t key_len,
939 const struct nlattr *mask, size_t mask_len,
940 const struct nlattr *actions, size_t actions_len,
941 const ovs_u128 *ufid, const int pmd_id,
942 struct dpif_flow_stats *stats)
943 {
944 struct dpif_op *opp;
945 struct dpif_op op;
946
947 op.type = DPIF_OP_FLOW_PUT;
948 op.u.flow_put.flags = flags;
949 op.u.flow_put.key = key;
950 op.u.flow_put.key_len = key_len;
951 op.u.flow_put.mask = mask;
952 op.u.flow_put.mask_len = mask_len;
953 op.u.flow_put.actions = actions;
954 op.u.flow_put.actions_len = actions_len;
955 op.u.flow_put.ufid = ufid;
956 op.u.flow_put.pmd_id = pmd_id;
957 op.u.flow_put.stats = stats;
958
959 opp = &op;
960 dpif_operate(dpif, &opp, 1);
961
962 return op.error;
963 }
964
965 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_DEL. */
966 int
967 dpif_flow_del(struct dpif *dpif,
968 const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
969 const int pmd_id, struct dpif_flow_stats *stats)
970 {
971 struct dpif_op *opp;
972 struct dpif_op op;
973
974 op.type = DPIF_OP_FLOW_DEL;
975 op.u.flow_del.key = key;
976 op.u.flow_del.key_len = key_len;
977 op.u.flow_del.ufid = ufid;
978 op.u.flow_del.pmd_id = pmd_id;
979 op.u.flow_del.stats = stats;
980 op.u.flow_del.terse = false;
981
982 opp = &op;
983 dpif_operate(dpif, &opp, 1);
984
985 return op.error;
986 }
987
988 /* Creates and returns a new 'struct dpif_flow_dump' for iterating through the
989 * flows in 'dpif'. If 'terse' is true, then only UFID and statistics will
990 * be returned in the dump. Otherwise, all fields will be returned.
991 *
992 * This function always successfully returns a dpif_flow_dump. Error
993 * reporting is deferred to dpif_flow_dump_destroy(). */
994 struct dpif_flow_dump *
995 dpif_flow_dump_create(const struct dpif *dpif, bool terse)
996 {
997 return dpif->dpif_class->flow_dump_create(dpif, terse);
998 }
999
1000 /* Destroys 'dump', which must have been created with dpif_flow_dump_create().
1001 * All dpif_flow_dump_thread structures previously created for 'dump' must
1002 * previously have been destroyed.
1003 *
1004 * Returns 0 if the dump operation was error-free, otherwise a positive errno
1005 * value describing the problem. */
1006 int
1007 dpif_flow_dump_destroy(struct dpif_flow_dump *dump)
1008 {
1009 const struct dpif *dpif = dump->dpif;
1010 int error = dpif->dpif_class->flow_dump_destroy(dump);
1011 log_operation(dpif, "flow_dump_destroy", error);
1012 return error == EOF ? 0 : error;
1013 }
1014
1015 /* Returns new thread-local state for use with dpif_flow_dump_next(). */
1016 struct dpif_flow_dump_thread *
1017 dpif_flow_dump_thread_create(struct dpif_flow_dump *dump)
1018 {
1019 return dump->dpif->dpif_class->flow_dump_thread_create(dump);
1020 }
1021
1022 /* Releases 'thread'. */
1023 void
1024 dpif_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread)
1025 {
1026 thread->dpif->dpif_class->flow_dump_thread_destroy(thread);
1027 }
1028
1029 /* Attempts to retrieve up to 'max_flows' more flows from 'thread'. Returns 0
1030 * if and only if no flows remained to be retrieved, otherwise a positive
1031 * number reflecting the number of elements in 'flows[]' that were updated.
1032 * The number of flows returned might be less than 'max_flows' because
1033 * fewer than 'max_flows' remained, because this particular datapath does not
1034 * benefit from batching, or because an error occurred partway through
1035 * retrieval. Thus, the caller should continue calling until a 0 return value,
1036 * even if intermediate return values are less than 'max_flows'.
1037 *
1038 * No error status is immediately provided. An error status for the entire
1039 * dump operation is provided when it is completed by calling
1040 * dpif_flow_dump_destroy().
1041 *
1042 * All of the data stored into 'flows' is owned by the datapath, not by the
1043 * caller, and the caller must not modify or free it. The datapath guarantees
1044 * that it remains accessible and unchanged until the first of:
1045 * - The next call to dpif_flow_dump_next() for 'thread', or
1046 * - The next rcu quiescent period. */
1047 int
1048 dpif_flow_dump_next(struct dpif_flow_dump_thread *thread,
1049 struct dpif_flow *flows, int max_flows)
1050 {
1051 struct dpif *dpif = thread->dpif;
1052 int n;
1053
1054 ovs_assert(max_flows > 0);
1055 n = dpif->dpif_class->flow_dump_next(thread, flows, max_flows);
1056 if (n > 0) {
1057 struct dpif_flow *f;
1058
1059 for (f = flows; f < &flows[n] && should_log_flow_message(0); f++) {
1060 log_flow_message(dpif, 0, "flow_dump",
1061 f->key, f->key_len, f->mask, f->mask_len,
1062 &f->ufid, &f->stats, f->actions, f->actions_len);
1063 }
1064 } else {
1065 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all flows", dpif_name(dpif));
1066 }
1067 return n;
1068 }
1069
1070 struct dpif_execute_helper_aux {
1071 struct dpif *dpif;
1072 int error;
1073 };
1074
1075 /* This is called for actions that need the context of the datapath to be
1076 * meaningful. */
1077 static void
1078 dpif_execute_helper_cb(void *aux_, struct dp_packet **packets, int cnt,
1079 const struct nlattr *action, bool may_steal OVS_UNUSED)
1080 {
1081 struct dpif_execute_helper_aux *aux = aux_;
1082 int type = nl_attr_type(action);
1083 struct dp_packet *packet = *packets;
1084
1085 ovs_assert(cnt == 1);
1086
1087 switch ((enum ovs_action_attr)type) {
1088 case OVS_ACTION_ATTR_OUTPUT:
1089 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1090 case OVS_ACTION_ATTR_TUNNEL_POP:
1091 case OVS_ACTION_ATTR_USERSPACE:
1092 case OVS_ACTION_ATTR_RECIRC: {
1093 struct dpif_execute execute;
1094 struct ofpbuf execute_actions;
1095 uint64_t stub[256 / 8];
1096 struct pkt_metadata *md = &packet->md;
1097
1098 if (md->tunnel.ip_dst) {
1099 /* The Linux kernel datapath throws away the tunnel information
1100 * that we supply as metadata. We have to use a "set" action to
1101 * supply it. */
1102 ofpbuf_use_stub(&execute_actions, stub, sizeof stub);
1103 odp_put_tunnel_action(&md->tunnel, &execute_actions);
1104 ofpbuf_put(&execute_actions, action, NLA_ALIGN(action->nla_len));
1105
1106 execute.actions = execute_actions.data;
1107 execute.actions_len = execute_actions.size;
1108 } else {
1109 execute.actions = action;
1110 execute.actions_len = NLA_ALIGN(action->nla_len);
1111 }
1112
1113 execute.packet = packet;
1114 execute.needs_help = false;
1115 execute.probe = false;
1116 aux->error = dpif_execute(aux->dpif, &execute);
1117 log_execute_message(aux->dpif, &execute, true, aux->error);
1118
1119 if (md->tunnel.ip_dst) {
1120 ofpbuf_uninit(&execute_actions);
1121 }
1122 break;
1123 }
1124
1125 case OVS_ACTION_ATTR_HASH:
1126 case OVS_ACTION_ATTR_PUSH_VLAN:
1127 case OVS_ACTION_ATTR_POP_VLAN:
1128 case OVS_ACTION_ATTR_PUSH_MPLS:
1129 case OVS_ACTION_ATTR_POP_MPLS:
1130 case OVS_ACTION_ATTR_SET:
1131 case OVS_ACTION_ATTR_SET_MASKED:
1132 case OVS_ACTION_ATTR_SAMPLE:
1133 case OVS_ACTION_ATTR_UNSPEC:
1134 case __OVS_ACTION_ATTR_MAX:
1135 OVS_NOT_REACHED();
1136 }
1137 }
1138
1139 /* Executes 'execute' by performing most of the actions in userspace and
1140 * passing the fully constructed packets to 'dpif' for output and userspace
1141 * actions.
1142 *
1143 * This helps with actions that a given 'dpif' doesn't implement directly. */
1144 static int
1145 dpif_execute_with_help(struct dpif *dpif, struct dpif_execute *execute)
1146 {
1147 struct dpif_execute_helper_aux aux = {dpif, 0};
1148 struct dp_packet *pp;
1149
1150 COVERAGE_INC(dpif_execute_with_help);
1151
1152 pp = execute->packet;
1153 odp_execute_actions(&aux, &pp, 1, false, execute->actions,
1154 execute->actions_len, dpif_execute_helper_cb);
1155 return aux.error;
1156 }
1157
1158 /* Returns true if the datapath needs help executing 'execute'. */
1159 static bool
1160 dpif_execute_needs_help(const struct dpif_execute *execute)
1161 {
1162 return execute->needs_help || nl_attr_oversized(execute->actions_len);
1163 }
1164
1165 /* A dpif_operate() wrapper for performing a single DPIF_OP_EXECUTE. */
1166 int
1167 dpif_execute(struct dpif *dpif, struct dpif_execute *execute)
1168 {
1169 if (execute->actions_len) {
1170 struct dpif_op *opp;
1171 struct dpif_op op;
1172
1173 op.type = DPIF_OP_EXECUTE;
1174 op.u.execute = *execute;
1175
1176 opp = &op;
1177 dpif_operate(dpif, &opp, 1);
1178
1179 return op.error;
1180 } else {
1181 return 0;
1182 }
1183 }
1184
1185 /* Executes each of the 'n_ops' operations in 'ops' on 'dpif', in the order in
1186 * which they are specified. Places each operation's results in the "output"
1187 * members documented in comments, and 0 in the 'error' member on success or a
1188 * positive errno on failure. */
1189 void
1190 dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
1191 {
1192 while (n_ops > 0) {
1193 size_t chunk;
1194
1195 /* Count 'chunk', the number of ops that can be executed without
1196 * needing any help. Ops that need help should be rare, so we
1197 * expect this to ordinarily be 'n_ops', that is, all the ops. */
1198 for (chunk = 0; chunk < n_ops; chunk++) {
1199 struct dpif_op *op = ops[chunk];
1200
1201 if (op->type == DPIF_OP_EXECUTE
1202 && dpif_execute_needs_help(&op->u.execute)) {
1203 break;
1204 }
1205 }
1206
1207 if (chunk) {
1208 /* Execute a chunk full of ops that the dpif provider can
1209 * handle itself, without help. */
1210 size_t i;
1211
1212 dpif->dpif_class->operate(dpif, ops, chunk);
1213
1214 for (i = 0; i < chunk; i++) {
1215 struct dpif_op *op = ops[i];
1216 int error = op->error;
1217
1218 switch (op->type) {
1219 case DPIF_OP_FLOW_PUT: {
1220 struct dpif_flow_put *put = &op->u.flow_put;
1221
1222 COVERAGE_INC(dpif_flow_put);
1223 log_flow_put_message(dpif, put, error);
1224 if (error && put->stats) {
1225 memset(put->stats, 0, sizeof *put->stats);
1226 }
1227 break;
1228 }
1229
1230 case DPIF_OP_FLOW_GET: {
1231 struct dpif_flow_get *get = &op->u.flow_get;
1232
1233 COVERAGE_INC(dpif_flow_get);
1234 if (error) {
1235 memset(get->flow, 0, sizeof *get->flow);
1236 }
1237 log_flow_get_message(dpif, get, error);
1238
1239 break;
1240 }
1241
1242 case DPIF_OP_FLOW_DEL: {
1243 struct dpif_flow_del *del = &op->u.flow_del;
1244
1245 COVERAGE_INC(dpif_flow_del);
1246 log_flow_del_message(dpif, del, error);
1247 if (error && del->stats) {
1248 memset(del->stats, 0, sizeof *del->stats);
1249 }
1250 break;
1251 }
1252
1253 case DPIF_OP_EXECUTE:
1254 COVERAGE_INC(dpif_execute);
1255 log_execute_message(dpif, &op->u.execute, false, error);
1256 break;
1257 }
1258 }
1259
1260 ops += chunk;
1261 n_ops -= chunk;
1262 } else {
1263 /* Help the dpif provider to execute one op. */
1264 struct dpif_op *op = ops[0];
1265
1266 COVERAGE_INC(dpif_execute);
1267 op->error = dpif_execute_with_help(dpif, &op->u.execute);
1268 ops++;
1269 n_ops--;
1270 }
1271 }
1272 }
1273
1274 /* Returns a string that represents 'type', for use in log messages. */
1275 const char *
1276 dpif_upcall_type_to_string(enum dpif_upcall_type type)
1277 {
1278 switch (type) {
1279 case DPIF_UC_MISS: return "miss";
1280 case DPIF_UC_ACTION: return "action";
1281 case DPIF_N_UC_TYPES: default: return "<unknown>";
1282 }
1283 }
1284
1285 /* Enables or disables receiving packets with dpif_recv() on 'dpif'. Returns 0
1286 * if successful, otherwise a positive errno value.
1287 *
1288 * Turning packet receive off and then back on may change the Netlink PID
1289 * assignments returned by dpif_port_get_pid(). If the client does this, it
1290 * must update all of the flows that have OVS_ACTION_ATTR_USERSPACE actions
1291 * using the new PID assignment. */
1292 int
1293 dpif_recv_set(struct dpif *dpif, bool enable)
1294 {
1295 int error = 0;
1296
1297 if (dpif->dpif_class->recv_set) {
1298 error = dpif->dpif_class->recv_set(dpif, enable);
1299 log_operation(dpif, "recv_set", error);
1300 }
1301 return error;
1302 }
1303
1304 /* Refreshes the poll loops and Netlink sockets associated to each port,
1305 * when the number of upcall handlers (upcall receiving thread) is changed
1306 * to 'n_handlers' and receiving packets for 'dpif' is enabled by
1307 * recv_set().
1308 *
1309 * Since multiple upcall handlers can read upcalls simultaneously from
1310 * 'dpif', each port can have multiple Netlink sockets, one per upcall
1311 * handler. So, handlers_set() is responsible for the following tasks:
1312 *
1313 * When receiving upcall is enabled, extends or creates the
1314 * configuration to support:
1315 *
1316 * - 'n_handlers' Netlink sockets for each port.
1317 *
1318 * - 'n_handlers' poll loops, one for each upcall handler.
1319 *
1320 * - registering the Netlink sockets for the same upcall handler to
1321 * the corresponding poll loop.
1322 *
1323 * Returns 0 if successful, otherwise a positive errno value. */
1324 int
1325 dpif_handlers_set(struct dpif *dpif, uint32_t n_handlers)
1326 {
1327 int error = 0;
1328
1329 if (dpif->dpif_class->handlers_set) {
1330 error = dpif->dpif_class->handlers_set(dpif, n_handlers);
1331 log_operation(dpif, "handlers_set", error);
1332 }
1333 return error;
1334 }
1335
1336 void
1337 dpif_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, void *aux)
1338 {
1339 if (dpif->dpif_class->register_upcall_cb) {
1340 dpif->dpif_class->register_upcall_cb(dpif, cb, aux);
1341 }
1342 }
1343
1344 void
1345 dpif_enable_upcall(struct dpif *dpif)
1346 {
1347 if (dpif->dpif_class->enable_upcall) {
1348 dpif->dpif_class->enable_upcall(dpif);
1349 }
1350 }
1351
1352 void
1353 dpif_disable_upcall(struct dpif *dpif)
1354 {
1355 if (dpif->dpif_class->disable_upcall) {
1356 dpif->dpif_class->disable_upcall(dpif);
1357 }
1358 }
1359
1360 void
1361 dpif_print_packet(struct dpif *dpif, struct dpif_upcall *upcall)
1362 {
1363 if (!VLOG_DROP_DBG(&dpmsg_rl)) {
1364 struct ds flow;
1365 char *packet;
1366
1367 packet = ofp_packet_to_string(dp_packet_data(&upcall->packet),
1368 dp_packet_size(&upcall->packet));
1369
1370 ds_init(&flow);
1371 odp_flow_key_format(upcall->key, upcall->key_len, &flow);
1372
1373 VLOG_DBG("%s: %s upcall:\n%s\n%s",
1374 dpif_name(dpif), dpif_upcall_type_to_string(upcall->type),
1375 ds_cstr(&flow), packet);
1376
1377 ds_destroy(&flow);
1378 free(packet);
1379 }
1380 }
1381
1382 /* If 'dpif' creates its own I/O polling threads, refreshes poll threads
1383 * configuration. */
1384 int
1385 dpif_poll_threads_set(struct dpif *dpif, unsigned int n_rxqs,
1386 const char *cmask)
1387 {
1388 int error = 0;
1389
1390 if (dpif->dpif_class->poll_threads_set) {
1391 error = dpif->dpif_class->poll_threads_set(dpif, n_rxqs, cmask);
1392 if (error) {
1393 log_operation(dpif, "poll_threads_set", error);
1394 }
1395 }
1396
1397 return error;
1398 }
1399
1400 /* Polls for an upcall from 'dpif' for an upcall handler. Since there
1401 * there can be multiple poll loops, 'handler_id' is needed as index to
1402 * identify the corresponding poll loop. If successful, stores the upcall
1403 * into '*upcall', using 'buf' for storage. Should only be called if
1404 * 'recv_set' has been used to enable receiving packets from 'dpif'.
1405 *
1406 * 'upcall->key' and 'upcall->userdata' point into data in the caller-provided
1407 * 'buf', so their memory cannot be freed separately from 'buf'.
1408 *
1409 * The caller owns the data of 'upcall->packet' and may modify it. If
1410 * packet's headroom is exhausted as it is manipulated, 'upcall->packet'
1411 * will be reallocated. This requires the data of 'upcall->packet' to be
1412 * released with ofpbuf_uninit() before 'upcall' is destroyed. However,
1413 * when an error is returned, the 'upcall->packet' may be uninitialized
1414 * and should not be released.
1415 *
1416 * Returns 0 if successful, otherwise a positive errno value. Returns EAGAIN
1417 * if no upcall is immediately available. */
1418 int
1419 dpif_recv(struct dpif *dpif, uint32_t handler_id, struct dpif_upcall *upcall,
1420 struct ofpbuf *buf)
1421 {
1422 int error = EAGAIN;
1423
1424 if (dpif->dpif_class->recv) {
1425 error = dpif->dpif_class->recv(dpif, handler_id, upcall, buf);
1426 if (!error) {
1427 dpif_print_packet(dpif, upcall);
1428 } else if (error != EAGAIN) {
1429 log_operation(dpif, "recv", error);
1430 }
1431 }
1432 return error;
1433 }
1434
1435 /* Discards all messages that would otherwise be received by dpif_recv() on
1436 * 'dpif'. */
1437 void
1438 dpif_recv_purge(struct dpif *dpif)
1439 {
1440 COVERAGE_INC(dpif_purge);
1441 if (dpif->dpif_class->recv_purge) {
1442 dpif->dpif_class->recv_purge(dpif);
1443 }
1444 }
1445
1446 /* Arranges for the poll loop for an upcall handler to wake up when 'dpif'
1447 * 'dpif' has a message queued to be received with the recv member
1448 * function. Since there can be multiple poll loops, 'handler_id' is
1449 * needed as index to identify the corresponding poll loop. */
1450 void
1451 dpif_recv_wait(struct dpif *dpif, uint32_t handler_id)
1452 {
1453 if (dpif->dpif_class->recv_wait) {
1454 dpif->dpif_class->recv_wait(dpif, handler_id);
1455 }
1456 }
1457
1458 /*
1459 * Return the datapath version. Caller is responsible for freeing
1460 * the string.
1461 */
1462 char *
1463 dpif_get_dp_version(const struct dpif *dpif)
1464 {
1465 char *version = NULL;
1466
1467 if (dpif->dpif_class->get_datapath_version) {
1468 version = dpif->dpif_class->get_datapath_version();
1469 }
1470
1471 return version;
1472 }
1473
1474 /* Obtains the NetFlow engine type and engine ID for 'dpif' into '*engine_type'
1475 * and '*engine_id', respectively. */
1476 void
1477 dpif_get_netflow_ids(const struct dpif *dpif,
1478 uint8_t *engine_type, uint8_t *engine_id)
1479 {
1480 *engine_type = dpif->netflow_engine_type;
1481 *engine_id = dpif->netflow_engine_id;
1482 }
1483
1484 /* Translates OpenFlow queue ID 'queue_id' (in host byte order) into a priority
1485 * value used for setting packet priority.
1486 * On success, returns 0 and stores the priority into '*priority'.
1487 * On failure, returns a positive errno value and stores 0 into '*priority'. */
1488 int
1489 dpif_queue_to_priority(const struct dpif *dpif, uint32_t queue_id,
1490 uint32_t *priority)
1491 {
1492 int error = (dpif->dpif_class->queue_to_priority
1493 ? dpif->dpif_class->queue_to_priority(dpif, queue_id,
1494 priority)
1495 : EOPNOTSUPP);
1496 if (error) {
1497 *priority = 0;
1498 }
1499 log_operation(dpif, "queue_to_priority", error);
1500 return error;
1501 }
1502 \f
1503 void
1504 dpif_init(struct dpif *dpif, const struct dpif_class *dpif_class,
1505 const char *name,
1506 uint8_t netflow_engine_type, uint8_t netflow_engine_id)
1507 {
1508 dpif->dpif_class = dpif_class;
1509 dpif->base_name = xstrdup(name);
1510 dpif->full_name = xasprintf("%s@%s", dpif_class->type, name);
1511 dpif->netflow_engine_type = netflow_engine_type;
1512 dpif->netflow_engine_id = netflow_engine_id;
1513 }
1514
1515 /* Undoes the results of initialization.
1516 *
1517 * Normally this function only needs to be called from dpif_close().
1518 * However, it may be called by providers due to an error on opening
1519 * that occurs after initialization. It this case dpif_close() would
1520 * never be called. */
1521 void
1522 dpif_uninit(struct dpif *dpif, bool close)
1523 {
1524 char *base_name = dpif->base_name;
1525 char *full_name = dpif->full_name;
1526
1527 if (close) {
1528 dpif->dpif_class->close(dpif);
1529 }
1530
1531 free(base_name);
1532 free(full_name);
1533 }
1534 \f
1535 static void
1536 log_operation(const struct dpif *dpif, const char *operation, int error)
1537 {
1538 if (!error) {
1539 VLOG_DBG_RL(&dpmsg_rl, "%s: %s success", dpif_name(dpif), operation);
1540 } else if (ofperr_is_valid(error)) {
1541 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1542 dpif_name(dpif), operation, ofperr_get_name(error));
1543 } else {
1544 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1545 dpif_name(dpif), operation, ovs_strerror(error));
1546 }
1547 }
1548
1549 static enum vlog_level
1550 flow_message_log_level(int error)
1551 {
1552 /* If flows arrive in a batch, userspace may push down multiple
1553 * unique flow definitions that overlap when wildcards are applied.
1554 * Kernels that support flow wildcarding will reject these flows as
1555 * duplicates (EEXIST), so lower the log level to debug for these
1556 * types of messages. */
1557 return (error && error != EEXIST) ? VLL_WARN : VLL_DBG;
1558 }
1559
1560 static bool
1561 should_log_flow_message(int error)
1562 {
1563 return !vlog_should_drop(THIS_MODULE, flow_message_log_level(error),
1564 error ? &error_rl : &dpmsg_rl);
1565 }
1566
1567 static void
1568 log_flow_message(const struct dpif *dpif, int error, const char *operation,
1569 const struct nlattr *key, size_t key_len,
1570 const struct nlattr *mask, size_t mask_len,
1571 const ovs_u128 *ufid, const struct dpif_flow_stats *stats,
1572 const struct nlattr *actions, size_t actions_len)
1573 {
1574 struct ds ds = DS_EMPTY_INITIALIZER;
1575 ds_put_format(&ds, "%s: ", dpif_name(dpif));
1576 if (error) {
1577 ds_put_cstr(&ds, "failed to ");
1578 }
1579 ds_put_format(&ds, "%s ", operation);
1580 if (error) {
1581 ds_put_format(&ds, "(%s) ", ovs_strerror(error));
1582 }
1583 if (ufid) {
1584 odp_format_ufid(ufid, &ds);
1585 ds_put_cstr(&ds, " ");
1586 }
1587 odp_flow_format(key, key_len, mask, mask_len, NULL, &ds, true);
1588 if (stats) {
1589 ds_put_cstr(&ds, ", ");
1590 dpif_flow_stats_format(stats, &ds);
1591 }
1592 if (actions || actions_len) {
1593 ds_put_cstr(&ds, ", actions:");
1594 format_odp_actions(&ds, actions, actions_len);
1595 }
1596 vlog(THIS_MODULE, flow_message_log_level(error), "%s", ds_cstr(&ds));
1597 ds_destroy(&ds);
1598 }
1599
1600 static void
1601 log_flow_put_message(struct dpif *dpif, const struct dpif_flow_put *put,
1602 int error)
1603 {
1604 if (should_log_flow_message(error) && !(put->flags & DPIF_FP_PROBE)) {
1605 struct ds s;
1606
1607 ds_init(&s);
1608 ds_put_cstr(&s, "put");
1609 if (put->flags & DPIF_FP_CREATE) {
1610 ds_put_cstr(&s, "[create]");
1611 }
1612 if (put->flags & DPIF_FP_MODIFY) {
1613 ds_put_cstr(&s, "[modify]");
1614 }
1615 if (put->flags & DPIF_FP_ZERO_STATS) {
1616 ds_put_cstr(&s, "[zero]");
1617 }
1618 log_flow_message(dpif, error, ds_cstr(&s),
1619 put->key, put->key_len, put->mask, put->mask_len,
1620 put->ufid, put->stats, put->actions,
1621 put->actions_len);
1622 ds_destroy(&s);
1623 }
1624 }
1625
1626 static void
1627 log_flow_del_message(struct dpif *dpif, const struct dpif_flow_del *del,
1628 int error)
1629 {
1630 if (should_log_flow_message(error)) {
1631 log_flow_message(dpif, error, "flow_del", del->key, del->key_len,
1632 NULL, 0, del->ufid, !error ? del->stats : NULL,
1633 NULL, 0);
1634 }
1635 }
1636
1637 /* Logs that 'execute' was executed on 'dpif' and completed with errno 'error'
1638 * (0 for success). 'subexecute' should be true if the execution is a result
1639 * of breaking down a larger execution that needed help, false otherwise.
1640 *
1641 *
1642 * XXX In theory, the log message could be deceptive because this function is
1643 * called after the dpif_provider's '->execute' function, which is allowed to
1644 * modify execute->packet and execute->md. In practice, though:
1645 *
1646 * - dpif-netlink doesn't modify execute->packet or execute->md.
1647 *
1648 * - dpif-netdev does modify them but it is less likely to have problems
1649 * because it is built into ovs-vswitchd and cannot have version skew,
1650 * etc.
1651 *
1652 * It would still be better to avoid the potential problem. I don't know of a
1653 * good way to do that, though, that isn't expensive. */
1654 static void
1655 log_execute_message(struct dpif *dpif, const struct dpif_execute *execute,
1656 bool subexecute, int error)
1657 {
1658 if (!(error ? VLOG_DROP_WARN(&error_rl) : VLOG_DROP_DBG(&dpmsg_rl))
1659 && !execute->probe) {
1660 struct ds ds = DS_EMPTY_INITIALIZER;
1661 char *packet;
1662
1663 packet = ofp_packet_to_string(dp_packet_data(execute->packet),
1664 dp_packet_size(execute->packet));
1665 ds_put_format(&ds, "%s: %sexecute ",
1666 dpif_name(dpif),
1667 (subexecute ? "sub-"
1668 : dpif_execute_needs_help(execute) ? "super-"
1669 : ""));
1670 format_odp_actions(&ds, execute->actions, execute->actions_len);
1671 if (error) {
1672 ds_put_format(&ds, " failed (%s)", ovs_strerror(error));
1673 }
1674 ds_put_format(&ds, " on packet %s", packet);
1675 vlog(THIS_MODULE, error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
1676 ds_destroy(&ds);
1677 free(packet);
1678 }
1679 }
1680
1681 static void
1682 log_flow_get_message(const struct dpif *dpif, const struct dpif_flow_get *get,
1683 int error)
1684 {
1685 if (should_log_flow_message(error)) {
1686 log_flow_message(dpif, error, "flow_get",
1687 get->key, get->key_len,
1688 get->flow->mask, get->flow->mask_len,
1689 get->ufid, &get->flow->stats,
1690 get->flow->actions, get->flow->actions_len);
1691 }
1692 }
1693
1694 bool
1695 dpif_supports_tnl_push_pop(const struct dpif *dpif)
1696 {
1697 return !strcmp(dpif->dpif_class->type, "netdev") ||
1698 !strcmp(dpif->dpif_class->type, "dummy");
1699 }