]> git.proxmox.com Git - mirror_ovs.git/blob - lib/dpif.c
types: Rename and move ovs_u128_equal().
[mirror_ovs.git] / lib / dpif.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "dpif-provider.h"
19
20 #include <ctype.h>
21 #include <errno.h>
22 #include <inttypes.h>
23 #include <stdlib.h>
24 #include <string.h>
25
26 #include "coverage.h"
27 #include "dpctl.h"
28 #include "dp-packet.h"
29 #include "dynamic-string.h"
30 #include "flow.h"
31 #include "netdev.h"
32 #include "netlink.h"
33 #include "odp-execute.h"
34 #include "odp-util.h"
35 #include "ofp-errors.h"
36 #include "ofp-print.h"
37 #include "ofp-util.h"
38 #include "ofpbuf.h"
39 #include "packets.h"
40 #include "poll-loop.h"
41 #include "route-table.h"
42 #include "seq.h"
43 #include "shash.h"
44 #include "sset.h"
45 #include "timeval.h"
46 #include "tnl-arp-cache.h"
47 #include "tnl-ports.h"
48 #include "util.h"
49 #include "uuid.h"
50 #include "valgrind.h"
51 #include "openvswitch/vlog.h"
52
53 VLOG_DEFINE_THIS_MODULE(dpif);
54
55 COVERAGE_DEFINE(dpif_destroy);
56 COVERAGE_DEFINE(dpif_port_add);
57 COVERAGE_DEFINE(dpif_port_del);
58 COVERAGE_DEFINE(dpif_flow_flush);
59 COVERAGE_DEFINE(dpif_flow_get);
60 COVERAGE_DEFINE(dpif_flow_put);
61 COVERAGE_DEFINE(dpif_flow_del);
62 COVERAGE_DEFINE(dpif_execute);
63 COVERAGE_DEFINE(dpif_purge);
64 COVERAGE_DEFINE(dpif_execute_with_help);
65
66 static const struct dpif_class *base_dpif_classes[] = {
67 #if defined(__linux__) || defined(_WIN32)
68 &dpif_netlink_class,
69 #endif
70 &dpif_netdev_class,
71 };
72
73 struct registered_dpif_class {
74 const struct dpif_class *dpif_class;
75 int refcount;
76 };
77 static struct shash dpif_classes = SHASH_INITIALIZER(&dpif_classes);
78 static struct sset dpif_blacklist = SSET_INITIALIZER(&dpif_blacklist);
79
80 /* Protects 'dpif_classes', including the refcount, and 'dpif_blacklist'. */
81 static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
82
83 /* Rate limit for individual messages going to or from the datapath, output at
84 * DBG level. This is very high because, if these are enabled, it is because
85 * we really need to see them. */
86 static struct vlog_rate_limit dpmsg_rl = VLOG_RATE_LIMIT_INIT(600, 600);
87
88 /* Not really much point in logging many dpif errors. */
89 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
90
91 static void log_flow_message(const struct dpif *dpif, int error,
92 const char *operation,
93 const struct nlattr *key, size_t key_len,
94 const struct nlattr *mask, size_t mask_len,
95 const ovs_u128 *ufid,
96 const struct dpif_flow_stats *stats,
97 const struct nlattr *actions, size_t actions_len);
98 static void log_operation(const struct dpif *, const char *operation,
99 int error);
100 static bool should_log_flow_message(int error);
101 static void log_flow_put_message(struct dpif *, const struct dpif_flow_put *,
102 int error);
103 static void log_flow_del_message(struct dpif *, const struct dpif_flow_del *,
104 int error);
105 static void log_execute_message(struct dpif *, const struct dpif_execute *,
106 bool subexecute, int error);
107 static void log_flow_get_message(const struct dpif *,
108 const struct dpif_flow_get *, int error);
109
110 /* Incremented whenever tnl route, arp, etc changes. */
111 struct seq *tnl_conf_seq;
112
113 static void
114 dp_initialize(void)
115 {
116 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
117
118 if (ovsthread_once_start(&once)) {
119 int i;
120
121 tnl_conf_seq = seq_create();
122 dpctl_unixctl_register();
123 tnl_port_map_init();
124 tnl_arp_cache_init();
125 route_table_init();
126
127 for (i = 0; i < ARRAY_SIZE(base_dpif_classes); i++) {
128 dp_register_provider(base_dpif_classes[i]);
129 }
130
131 ovsthread_once_done(&once);
132 }
133 }
134
135 static int
136 dp_register_provider__(const struct dpif_class *new_class)
137 {
138 struct registered_dpif_class *registered_class;
139 int error;
140
141 if (sset_contains(&dpif_blacklist, new_class->type)) {
142 VLOG_DBG("attempted to register blacklisted provider: %s",
143 new_class->type);
144 return EINVAL;
145 }
146
147 if (shash_find(&dpif_classes, new_class->type)) {
148 VLOG_WARN("attempted to register duplicate datapath provider: %s",
149 new_class->type);
150 return EEXIST;
151 }
152
153 error = new_class->init ? new_class->init() : 0;
154 if (error) {
155 VLOG_WARN("failed to initialize %s datapath class: %s",
156 new_class->type, ovs_strerror(error));
157 return error;
158 }
159
160 registered_class = xmalloc(sizeof *registered_class);
161 registered_class->dpif_class = new_class;
162 registered_class->refcount = 0;
163
164 shash_add(&dpif_classes, new_class->type, registered_class);
165
166 return 0;
167 }
168
169 /* Registers a new datapath provider. After successful registration, new
170 * datapaths of that type can be opened using dpif_open(). */
171 int
172 dp_register_provider(const struct dpif_class *new_class)
173 {
174 int error;
175
176 ovs_mutex_lock(&dpif_mutex);
177 error = dp_register_provider__(new_class);
178 ovs_mutex_unlock(&dpif_mutex);
179
180 return error;
181 }
182
183 /* Unregisters a datapath provider. 'type' must have been previously
184 * registered and not currently be in use by any dpifs. After unregistration
185 * new datapaths of that type cannot be opened using dpif_open(). */
186 static int
187 dp_unregister_provider__(const char *type)
188 {
189 struct shash_node *node;
190 struct registered_dpif_class *registered_class;
191
192 node = shash_find(&dpif_classes, type);
193 if (!node) {
194 VLOG_WARN("attempted to unregister a datapath provider that is not "
195 "registered: %s", type);
196 return EAFNOSUPPORT;
197 }
198
199 registered_class = node->data;
200 if (registered_class->refcount) {
201 VLOG_WARN("attempted to unregister in use datapath provider: %s", type);
202 return EBUSY;
203 }
204
205 shash_delete(&dpif_classes, node);
206 free(registered_class);
207
208 return 0;
209 }
210
211 /* Unregisters a datapath provider. 'type' must have been previously
212 * registered and not currently be in use by any dpifs. After unregistration
213 * new datapaths of that type cannot be opened using dpif_open(). */
214 int
215 dp_unregister_provider(const char *type)
216 {
217 int error;
218
219 dp_initialize();
220
221 ovs_mutex_lock(&dpif_mutex);
222 error = dp_unregister_provider__(type);
223 ovs_mutex_unlock(&dpif_mutex);
224
225 return error;
226 }
227
228 /* Blacklists a provider. Causes future calls of dp_register_provider() with
229 * a dpif_class which implements 'type' to fail. */
230 void
231 dp_blacklist_provider(const char *type)
232 {
233 ovs_mutex_lock(&dpif_mutex);
234 sset_add(&dpif_blacklist, type);
235 ovs_mutex_unlock(&dpif_mutex);
236 }
237
238 /* Adds the types of all currently registered datapath providers to 'types'.
239 * The caller must first initialize the sset. */
240 void
241 dp_enumerate_types(struct sset *types)
242 {
243 struct shash_node *node;
244
245 dp_initialize();
246
247 ovs_mutex_lock(&dpif_mutex);
248 SHASH_FOR_EACH(node, &dpif_classes) {
249 const struct registered_dpif_class *registered_class = node->data;
250 sset_add(types, registered_class->dpif_class->type);
251 }
252 ovs_mutex_unlock(&dpif_mutex);
253 }
254
255 static void
256 dp_class_unref(struct registered_dpif_class *rc)
257 {
258 ovs_mutex_lock(&dpif_mutex);
259 ovs_assert(rc->refcount);
260 rc->refcount--;
261 ovs_mutex_unlock(&dpif_mutex);
262 }
263
264 static struct registered_dpif_class *
265 dp_class_lookup(const char *type)
266 {
267 struct registered_dpif_class *rc;
268
269 ovs_mutex_lock(&dpif_mutex);
270 rc = shash_find_data(&dpif_classes, type);
271 if (rc) {
272 rc->refcount++;
273 }
274 ovs_mutex_unlock(&dpif_mutex);
275
276 return rc;
277 }
278
279 /* Clears 'names' and enumerates the names of all known created datapaths with
280 * the given 'type'. The caller must first initialize the sset. Returns 0 if
281 * successful, otherwise a positive errno value.
282 *
283 * Some kinds of datapaths might not be practically enumerable. This is not
284 * considered an error. */
285 int
286 dp_enumerate_names(const char *type, struct sset *names)
287 {
288 struct registered_dpif_class *registered_class;
289 const struct dpif_class *dpif_class;
290 int error;
291
292 dp_initialize();
293 sset_clear(names);
294
295 registered_class = dp_class_lookup(type);
296 if (!registered_class) {
297 VLOG_WARN("could not enumerate unknown type: %s", type);
298 return EAFNOSUPPORT;
299 }
300
301 dpif_class = registered_class->dpif_class;
302 error = (dpif_class->enumerate
303 ? dpif_class->enumerate(names, dpif_class)
304 : 0);
305 if (error) {
306 VLOG_WARN("failed to enumerate %s datapaths: %s", dpif_class->type,
307 ovs_strerror(error));
308 }
309 dp_class_unref(registered_class);
310
311 return error;
312 }
313
314 /* Parses 'datapath_name_', which is of the form [type@]name into its
315 * component pieces. 'name' and 'type' must be freed by the caller.
316 *
317 * The returned 'type' is normalized, as if by dpif_normalize_type(). */
318 void
319 dp_parse_name(const char *datapath_name_, char **name, char **type)
320 {
321 char *datapath_name = xstrdup(datapath_name_);
322 char *separator;
323
324 separator = strchr(datapath_name, '@');
325 if (separator) {
326 *separator = '\0';
327 *type = datapath_name;
328 *name = xstrdup(dpif_normalize_type(separator + 1));
329 } else {
330 *name = datapath_name;
331 *type = xstrdup(dpif_normalize_type(NULL));
332 }
333 }
334
335 static int
336 do_open(const char *name, const char *type, bool create, struct dpif **dpifp)
337 {
338 struct dpif *dpif = NULL;
339 int error;
340 struct registered_dpif_class *registered_class;
341
342 dp_initialize();
343
344 type = dpif_normalize_type(type);
345 registered_class = dp_class_lookup(type);
346 if (!registered_class) {
347 VLOG_WARN("could not create datapath %s of unknown type %s", name,
348 type);
349 error = EAFNOSUPPORT;
350 goto exit;
351 }
352
353 error = registered_class->dpif_class->open(registered_class->dpif_class,
354 name, create, &dpif);
355 if (!error) {
356 ovs_assert(dpif->dpif_class == registered_class->dpif_class);
357 } else {
358 dp_class_unref(registered_class);
359 }
360
361 exit:
362 *dpifp = error ? NULL : dpif;
363 return error;
364 }
365
366 /* Tries to open an existing datapath named 'name' and type 'type'. Will fail
367 * if no datapath with 'name' and 'type' exists. 'type' may be either NULL or
368 * the empty string to specify the default system type. Returns 0 if
369 * successful, otherwise a positive errno value. On success stores a pointer
370 * to the datapath in '*dpifp', otherwise a null pointer. */
371 int
372 dpif_open(const char *name, const char *type, struct dpif **dpifp)
373 {
374 return do_open(name, type, false, dpifp);
375 }
376
377 /* Tries to create and open a new datapath with the given 'name' and 'type'.
378 * 'type' may be either NULL or the empty string to specify the default system
379 * type. Will fail if a datapath with 'name' and 'type' already exists.
380 * Returns 0 if successful, otherwise a positive errno value. On success
381 * stores a pointer to the datapath in '*dpifp', otherwise a null pointer. */
382 int
383 dpif_create(const char *name, const char *type, struct dpif **dpifp)
384 {
385 return do_open(name, type, true, dpifp);
386 }
387
388 /* Tries to open a datapath with the given 'name' and 'type', creating it if it
389 * does not exist. 'type' may be either NULL or the empty string to specify
390 * the default system type. Returns 0 if successful, otherwise a positive
391 * errno value. On success stores a pointer to the datapath in '*dpifp',
392 * otherwise a null pointer. */
393 int
394 dpif_create_and_open(const char *name, const char *type, struct dpif **dpifp)
395 {
396 int error;
397
398 error = dpif_create(name, type, dpifp);
399 if (error == EEXIST || error == EBUSY) {
400 error = dpif_open(name, type, dpifp);
401 if (error) {
402 VLOG_WARN("datapath %s already exists but cannot be opened: %s",
403 name, ovs_strerror(error));
404 }
405 } else if (error) {
406 VLOG_WARN("failed to create datapath %s: %s",
407 name, ovs_strerror(error));
408 }
409 return error;
410 }
411
412 /* Closes and frees the connection to 'dpif'. Does not destroy the datapath
413 * itself; call dpif_delete() first, instead, if that is desirable. */
414 void
415 dpif_close(struct dpif *dpif)
416 {
417 if (dpif) {
418 struct registered_dpif_class *rc;
419
420 rc = shash_find_data(&dpif_classes, dpif->dpif_class->type);
421 dpif_uninit(dpif, true);
422 dp_class_unref(rc);
423 }
424 }
425
426 /* Performs periodic work needed by 'dpif'. */
427 bool
428 dpif_run(struct dpif *dpif)
429 {
430 if (dpif->dpif_class->run) {
431 return dpif->dpif_class->run(dpif);
432 }
433 return false;
434 }
435
436 /* Arranges for poll_block() to wake up when dp_run() needs to be called for
437 * 'dpif'. */
438 void
439 dpif_wait(struct dpif *dpif)
440 {
441 if (dpif->dpif_class->wait) {
442 dpif->dpif_class->wait(dpif);
443 }
444 }
445
446 /* Returns the name of datapath 'dpif' prefixed with the type
447 * (for use in log messages). */
448 const char *
449 dpif_name(const struct dpif *dpif)
450 {
451 return dpif->full_name;
452 }
453
454 /* Returns the name of datapath 'dpif' without the type
455 * (for use in device names). */
456 const char *
457 dpif_base_name(const struct dpif *dpif)
458 {
459 return dpif->base_name;
460 }
461
462 /* Returns the type of datapath 'dpif'. */
463 const char *
464 dpif_type(const struct dpif *dpif)
465 {
466 return dpif->dpif_class->type;
467 }
468
469 /* Returns the fully spelled out name for the given datapath 'type'.
470 *
471 * Normalized type string can be compared with strcmp(). Unnormalized type
472 * string might be the same even if they have different spellings. */
473 const char *
474 dpif_normalize_type(const char *type)
475 {
476 return type && type[0] ? type : "system";
477 }
478
479 /* Destroys the datapath that 'dpif' is connected to, first removing all of its
480 * ports. After calling this function, it does not make sense to pass 'dpif'
481 * to any functions other than dpif_name() or dpif_close(). */
482 int
483 dpif_delete(struct dpif *dpif)
484 {
485 int error;
486
487 COVERAGE_INC(dpif_destroy);
488
489 error = dpif->dpif_class->destroy(dpif);
490 log_operation(dpif, "delete", error);
491 return error;
492 }
493
494 /* Retrieves statistics for 'dpif' into 'stats'. Returns 0 if successful,
495 * otherwise a positive errno value. */
496 int
497 dpif_get_dp_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
498 {
499 int error = dpif->dpif_class->get_stats(dpif, stats);
500 if (error) {
501 memset(stats, 0, sizeof *stats);
502 }
503 log_operation(dpif, "get_stats", error);
504 return error;
505 }
506
507 const char *
508 dpif_port_open_type(const char *datapath_type, const char *port_type)
509 {
510 struct registered_dpif_class *rc;
511
512 datapath_type = dpif_normalize_type(datapath_type);
513
514 ovs_mutex_lock(&dpif_mutex);
515 rc = shash_find_data(&dpif_classes, datapath_type);
516 if (rc && rc->dpif_class->port_open_type) {
517 port_type = rc->dpif_class->port_open_type(rc->dpif_class, port_type);
518 }
519 ovs_mutex_unlock(&dpif_mutex);
520
521 return port_type;
522 }
523
524 /* Attempts to add 'netdev' as a port on 'dpif'. If 'port_nop' is
525 * non-null and its value is not ODPP_NONE, then attempts to use the
526 * value as the port number.
527 *
528 * If successful, returns 0 and sets '*port_nop' to the new port's port
529 * number (if 'port_nop' is non-null). On failure, returns a positive
530 * errno value and sets '*port_nop' to ODPP_NONE (if 'port_nop' is
531 * non-null). */
532 int
533 dpif_port_add(struct dpif *dpif, struct netdev *netdev, odp_port_t *port_nop)
534 {
535 const char *netdev_name = netdev_get_name(netdev);
536 odp_port_t port_no = ODPP_NONE;
537 int error;
538
539 COVERAGE_INC(dpif_port_add);
540
541 if (port_nop) {
542 port_no = *port_nop;
543 }
544
545 error = dpif->dpif_class->port_add(dpif, netdev, &port_no);
546 if (!error) {
547 VLOG_DBG_RL(&dpmsg_rl, "%s: added %s as port %"PRIu32,
548 dpif_name(dpif), netdev_name, port_no);
549 } else {
550 VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
551 dpif_name(dpif), netdev_name, ovs_strerror(error));
552 port_no = ODPP_NONE;
553 }
554 if (port_nop) {
555 *port_nop = port_no;
556 }
557 return error;
558 }
559
560 /* Attempts to remove 'dpif''s port number 'port_no'. Returns 0 if successful,
561 * otherwise a positive errno value. */
562 int
563 dpif_port_del(struct dpif *dpif, odp_port_t port_no)
564 {
565 int error;
566
567 COVERAGE_INC(dpif_port_del);
568
569 error = dpif->dpif_class->port_del(dpif, port_no);
570 if (!error) {
571 VLOG_DBG_RL(&dpmsg_rl, "%s: port_del(%"PRIu32")",
572 dpif_name(dpif), port_no);
573 } else {
574 log_operation(dpif, "port_del", error);
575 }
576 return error;
577 }
578
579 /* Makes a deep copy of 'src' into 'dst'. */
580 void
581 dpif_port_clone(struct dpif_port *dst, const struct dpif_port *src)
582 {
583 dst->name = xstrdup(src->name);
584 dst->type = xstrdup(src->type);
585 dst->port_no = src->port_no;
586 }
587
588 /* Frees memory allocated to members of 'dpif_port'.
589 *
590 * Do not call this function on a dpif_port obtained from
591 * dpif_port_dump_next(): that function retains ownership of the data in the
592 * dpif_port. */
593 void
594 dpif_port_destroy(struct dpif_port *dpif_port)
595 {
596 free(dpif_port->name);
597 free(dpif_port->type);
598 }
599
600 /* Checks if port named 'devname' exists in 'dpif'. If so, returns
601 * true; otherwise, returns false. */
602 bool
603 dpif_port_exists(const struct dpif *dpif, const char *devname)
604 {
605 int error = dpif->dpif_class->port_query_by_name(dpif, devname, NULL);
606 if (error != 0 && error != ENOENT && error != ENODEV) {
607 VLOG_WARN_RL(&error_rl, "%s: failed to query port %s: %s",
608 dpif_name(dpif), devname, ovs_strerror(error));
609 }
610
611 return !error;
612 }
613
614 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and
615 * initializes '*port' appropriately; on failure, returns a positive errno
616 * value.
617 *
618 * The caller owns the data in 'port' and must free it with
619 * dpif_port_destroy() when it is no longer needed. */
620 int
621 dpif_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
622 struct dpif_port *port)
623 {
624 int error = dpif->dpif_class->port_query_by_number(dpif, port_no, port);
625 if (!error) {
626 VLOG_DBG_RL(&dpmsg_rl, "%s: port %"PRIu32" is device %s",
627 dpif_name(dpif), port_no, port->name);
628 } else {
629 memset(port, 0, sizeof *port);
630 VLOG_WARN_RL(&error_rl, "%s: failed to query port %"PRIu32": %s",
631 dpif_name(dpif), port_no, ovs_strerror(error));
632 }
633 return error;
634 }
635
636 /* Looks up port named 'devname' in 'dpif'. On success, returns 0 and
637 * initializes '*port' appropriately; on failure, returns a positive errno
638 * value.
639 *
640 * The caller owns the data in 'port' and must free it with
641 * dpif_port_destroy() when it is no longer needed. */
642 int
643 dpif_port_query_by_name(const struct dpif *dpif, const char *devname,
644 struct dpif_port *port)
645 {
646 int error = dpif->dpif_class->port_query_by_name(dpif, devname, port);
647 if (!error) {
648 VLOG_DBG_RL(&dpmsg_rl, "%s: device %s is on port %"PRIu32,
649 dpif_name(dpif), devname, port->port_no);
650 } else {
651 memset(port, 0, sizeof *port);
652
653 /* For ENOENT or ENODEV we use DBG level because the caller is probably
654 * interested in whether 'dpif' actually has a port 'devname', so that
655 * it's not an issue worth logging if it doesn't. Other errors are
656 * uncommon and more likely to indicate a real problem. */
657 VLOG_RL(&error_rl,
658 error == ENOENT || error == ENODEV ? VLL_DBG : VLL_WARN,
659 "%s: failed to query port %s: %s",
660 dpif_name(dpif), devname, ovs_strerror(error));
661 }
662 return error;
663 }
664
665 /* Returns the Netlink PID value to supply in OVS_ACTION_ATTR_USERSPACE
666 * actions as the OVS_USERSPACE_ATTR_PID attribute's value, for use in
667 * flows whose packets arrived on port 'port_no'. In the case where the
668 * provider allocates multiple Netlink PIDs to a single port, it may use
669 * 'hash' to spread load among them. The caller need not use a particular
670 * hash function; a 5-tuple hash is suitable.
671 *
672 * (The datapath implementation might use some different hash function for
673 * distributing packets received via flow misses among PIDs. This means
674 * that packets received via flow misses might be reordered relative to
675 * packets received via userspace actions. This is not ordinarily a
676 * problem.)
677 *
678 * A 'port_no' of ODPP_NONE is a special case: it returns a reserved PID, not
679 * allocated to any port, that the client may use for special purposes.
680 *
681 * The return value is only meaningful when DPIF_UC_ACTION has been enabled in
682 * the 'dpif''s listen mask. It is allowed to change when DPIF_UC_ACTION is
683 * disabled and then re-enabled, so a client that does that must be prepared to
684 * update all of the flows that it installed that contain
685 * OVS_ACTION_ATTR_USERSPACE actions. */
686 uint32_t
687 dpif_port_get_pid(const struct dpif *dpif, odp_port_t port_no, uint32_t hash)
688 {
689 return (dpif->dpif_class->port_get_pid
690 ? (dpif->dpif_class->port_get_pid)(dpif, port_no, hash)
691 : 0);
692 }
693
694 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and copies
695 * the port's name into the 'name_size' bytes in 'name', ensuring that the
696 * result is null-terminated. On failure, returns a positive errno value and
697 * makes 'name' the empty string. */
698 int
699 dpif_port_get_name(struct dpif *dpif, odp_port_t port_no,
700 char *name, size_t name_size)
701 {
702 struct dpif_port port;
703 int error;
704
705 ovs_assert(name_size > 0);
706
707 error = dpif_port_query_by_number(dpif, port_no, &port);
708 if (!error) {
709 ovs_strlcpy(name, port.name, name_size);
710 dpif_port_destroy(&port);
711 } else {
712 *name = '\0';
713 }
714 return error;
715 }
716
717 /* Initializes 'dump' to begin dumping the ports in a dpif.
718 *
719 * This function provides no status indication. An error status for the entire
720 * dump operation is provided when it is completed by calling
721 * dpif_port_dump_done().
722 */
723 void
724 dpif_port_dump_start(struct dpif_port_dump *dump, const struct dpif *dpif)
725 {
726 dump->dpif = dpif;
727 dump->error = dpif->dpif_class->port_dump_start(dpif, &dump->state);
728 log_operation(dpif, "port_dump_start", dump->error);
729 }
730
731 /* Attempts to retrieve another port from 'dump', which must have been
732 * initialized with dpif_port_dump_start(). On success, stores a new dpif_port
733 * into 'port' and returns true. On failure, returns false.
734 *
735 * Failure might indicate an actual error or merely that the last port has been
736 * dumped. An error status for the entire dump operation is provided when it
737 * is completed by calling dpif_port_dump_done().
738 *
739 * The dpif owns the data stored in 'port'. It will remain valid until at
740 * least the next time 'dump' is passed to dpif_port_dump_next() or
741 * dpif_port_dump_done(). */
742 bool
743 dpif_port_dump_next(struct dpif_port_dump *dump, struct dpif_port *port)
744 {
745 const struct dpif *dpif = dump->dpif;
746
747 if (dump->error) {
748 return false;
749 }
750
751 dump->error = dpif->dpif_class->port_dump_next(dpif, dump->state, port);
752 if (dump->error == EOF) {
753 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all ports", dpif_name(dpif));
754 } else {
755 log_operation(dpif, "port_dump_next", dump->error);
756 }
757
758 if (dump->error) {
759 dpif->dpif_class->port_dump_done(dpif, dump->state);
760 return false;
761 }
762 return true;
763 }
764
765 /* Completes port table dump operation 'dump', which must have been initialized
766 * with dpif_port_dump_start(). Returns 0 if the dump operation was
767 * error-free, otherwise a positive errno value describing the problem. */
768 int
769 dpif_port_dump_done(struct dpif_port_dump *dump)
770 {
771 const struct dpif *dpif = dump->dpif;
772 if (!dump->error) {
773 dump->error = dpif->dpif_class->port_dump_done(dpif, dump->state);
774 log_operation(dpif, "port_dump_done", dump->error);
775 }
776 return dump->error == EOF ? 0 : dump->error;
777 }
778
779 /* Polls for changes in the set of ports in 'dpif'. If the set of ports in
780 * 'dpif' has changed, this function does one of the following:
781 *
782 * - Stores the name of the device that was added to or deleted from 'dpif' in
783 * '*devnamep' and returns 0. The caller is responsible for freeing
784 * '*devnamep' (with free()) when it no longer needs it.
785 *
786 * - Returns ENOBUFS and sets '*devnamep' to NULL.
787 *
788 * This function may also return 'false positives', where it returns 0 and
789 * '*devnamep' names a device that was not actually added or deleted or it
790 * returns ENOBUFS without any change.
791 *
792 * Returns EAGAIN if the set of ports in 'dpif' has not changed. May also
793 * return other positive errno values to indicate that something has gone
794 * wrong. */
795 int
796 dpif_port_poll(const struct dpif *dpif, char **devnamep)
797 {
798 int error = dpif->dpif_class->port_poll(dpif, devnamep);
799 if (error) {
800 *devnamep = NULL;
801 }
802 return error;
803 }
804
805 /* Arranges for the poll loop to wake up when port_poll(dpif) will return a
806 * value other than EAGAIN. */
807 void
808 dpif_port_poll_wait(const struct dpif *dpif)
809 {
810 dpif->dpif_class->port_poll_wait(dpif);
811 }
812
813 /* Extracts the flow stats for a packet. The 'flow' and 'packet'
814 * arguments must have been initialized through a call to flow_extract().
815 * 'used' is stored into stats->used. */
816 void
817 dpif_flow_stats_extract(const struct flow *flow, const struct dp_packet *packet,
818 long long int used, struct dpif_flow_stats *stats)
819 {
820 stats->tcp_flags = ntohs(flow->tcp_flags);
821 stats->n_bytes = dp_packet_size(packet);
822 stats->n_packets = 1;
823 stats->used = used;
824 }
825
826 /* Appends a human-readable representation of 'stats' to 's'. */
827 void
828 dpif_flow_stats_format(const struct dpif_flow_stats *stats, struct ds *s)
829 {
830 ds_put_format(s, "packets:%"PRIu64", bytes:%"PRIu64", used:",
831 stats->n_packets, stats->n_bytes);
832 if (stats->used) {
833 ds_put_format(s, "%.3fs", (time_msec() - stats->used) / 1000.0);
834 } else {
835 ds_put_format(s, "never");
836 }
837 if (stats->tcp_flags) {
838 ds_put_cstr(s, ", flags:");
839 packet_format_tcp_flags(s, stats->tcp_flags);
840 }
841 }
842
843 /* Places the hash of the 'key_len' bytes starting at 'key' into '*hash'. */
844 void
845 dpif_flow_hash(const struct dpif *dpif OVS_UNUSED,
846 const void *key, size_t key_len, ovs_u128 *hash)
847 {
848 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
849 static uint32_t secret;
850
851 if (ovsthread_once_start(&once)) {
852 secret = random_uint32();
853 ovsthread_once_done(&once);
854 }
855 hash_bytes128(key, key_len, secret, hash);
856 uuid_set_bits_v4((struct uuid *)hash);
857 }
858
859 /* Deletes all flows from 'dpif'. Returns 0 if successful, otherwise a
860 * positive errno value. */
861 int
862 dpif_flow_flush(struct dpif *dpif)
863 {
864 int error;
865
866 COVERAGE_INC(dpif_flow_flush);
867
868 error = dpif->dpif_class->flow_flush(dpif);
869 log_operation(dpif, "flow_flush", error);
870 return error;
871 }
872
873 /* Attempts to install 'key' into the datapath, fetches it, then deletes it.
874 * Returns true if the datapath supported installing 'flow', false otherwise.
875 */
876 bool
877 dpif_probe_feature(struct dpif *dpif, const char *name,
878 const struct ofpbuf *key, const ovs_u128 *ufid)
879 {
880 struct dpif_flow flow;
881 struct ofpbuf reply;
882 uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
883 bool enable_feature = false;
884 int error;
885
886 /* Use DPIF_FP_MODIFY to cover the case where ovs-vswitchd is killed (and
887 * restarted) at just the right time such that feature probes from the
888 * previous run are still present in the datapath. */
889 error = dpif_flow_put(dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY | DPIF_FP_PROBE,
890 key->data, key->size, NULL, 0, NULL, 0,
891 ufid, PMD_ID_NULL, NULL);
892 if (error) {
893 if (error != EINVAL) {
894 VLOG_WARN("%s: %s flow probe failed (%s)",
895 dpif_name(dpif), name, ovs_strerror(error));
896 }
897 return false;
898 }
899
900 ofpbuf_use_stack(&reply, &stub, sizeof stub);
901 error = dpif_flow_get(dpif, key->data, key->size, ufid,
902 PMD_ID_NULL, &reply, &flow);
903 if (!error
904 && (!ufid || (flow.ufid_present
905 && ovs_u128_equals(ufid, &flow.ufid)))) {
906 enable_feature = true;
907 }
908
909 error = dpif_flow_del(dpif, key->data, key->size, ufid,
910 PMD_ID_NULL, NULL);
911 if (error) {
912 VLOG_WARN("%s: failed to delete %s feature probe flow",
913 dpif_name(dpif), name);
914 }
915
916 return enable_feature;
917 }
918
919 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_GET. */
920 int
921 dpif_flow_get(struct dpif *dpif,
922 const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
923 const unsigned pmd_id, struct ofpbuf *buf, struct dpif_flow *flow)
924 {
925 struct dpif_op *opp;
926 struct dpif_op op;
927
928 op.type = DPIF_OP_FLOW_GET;
929 op.u.flow_get.key = key;
930 op.u.flow_get.key_len = key_len;
931 op.u.flow_get.ufid = ufid;
932 op.u.flow_get.pmd_id = pmd_id;
933 op.u.flow_get.buffer = buf;
934
935 memset(flow, 0, sizeof *flow);
936 op.u.flow_get.flow = flow;
937 op.u.flow_get.flow->key = key;
938 op.u.flow_get.flow->key_len = key_len;
939
940 opp = &op;
941 dpif_operate(dpif, &opp, 1);
942
943 return op.error;
944 }
945
946 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_PUT. */
947 int
948 dpif_flow_put(struct dpif *dpif, enum dpif_flow_put_flags flags,
949 const struct nlattr *key, size_t key_len,
950 const struct nlattr *mask, size_t mask_len,
951 const struct nlattr *actions, size_t actions_len,
952 const ovs_u128 *ufid, const unsigned pmd_id,
953 struct dpif_flow_stats *stats)
954 {
955 struct dpif_op *opp;
956 struct dpif_op op;
957
958 op.type = DPIF_OP_FLOW_PUT;
959 op.u.flow_put.flags = flags;
960 op.u.flow_put.key = key;
961 op.u.flow_put.key_len = key_len;
962 op.u.flow_put.mask = mask;
963 op.u.flow_put.mask_len = mask_len;
964 op.u.flow_put.actions = actions;
965 op.u.flow_put.actions_len = actions_len;
966 op.u.flow_put.ufid = ufid;
967 op.u.flow_put.pmd_id = pmd_id;
968 op.u.flow_put.stats = stats;
969
970 opp = &op;
971 dpif_operate(dpif, &opp, 1);
972
973 return op.error;
974 }
975
976 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_DEL. */
977 int
978 dpif_flow_del(struct dpif *dpif,
979 const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
980 const unsigned pmd_id, struct dpif_flow_stats *stats)
981 {
982 struct dpif_op *opp;
983 struct dpif_op op;
984
985 op.type = DPIF_OP_FLOW_DEL;
986 op.u.flow_del.key = key;
987 op.u.flow_del.key_len = key_len;
988 op.u.flow_del.ufid = ufid;
989 op.u.flow_del.pmd_id = pmd_id;
990 op.u.flow_del.stats = stats;
991 op.u.flow_del.terse = false;
992
993 opp = &op;
994 dpif_operate(dpif, &opp, 1);
995
996 return op.error;
997 }
998
999 /* Creates and returns a new 'struct dpif_flow_dump' for iterating through the
1000 * flows in 'dpif'. If 'terse' is true, then only UFID and statistics will
1001 * be returned in the dump. Otherwise, all fields will be returned.
1002 *
1003 * This function always successfully returns a dpif_flow_dump. Error
1004 * reporting is deferred to dpif_flow_dump_destroy(). */
1005 struct dpif_flow_dump *
1006 dpif_flow_dump_create(const struct dpif *dpif, bool terse)
1007 {
1008 return dpif->dpif_class->flow_dump_create(dpif, terse);
1009 }
1010
1011 /* Destroys 'dump', which must have been created with dpif_flow_dump_create().
1012 * All dpif_flow_dump_thread structures previously created for 'dump' must
1013 * previously have been destroyed.
1014 *
1015 * Returns 0 if the dump operation was error-free, otherwise a positive errno
1016 * value describing the problem. */
1017 int
1018 dpif_flow_dump_destroy(struct dpif_flow_dump *dump)
1019 {
1020 const struct dpif *dpif = dump->dpif;
1021 int error = dpif->dpif_class->flow_dump_destroy(dump);
1022 log_operation(dpif, "flow_dump_destroy", error);
1023 return error == EOF ? 0 : error;
1024 }
1025
1026 /* Returns new thread-local state for use with dpif_flow_dump_next(). */
1027 struct dpif_flow_dump_thread *
1028 dpif_flow_dump_thread_create(struct dpif_flow_dump *dump)
1029 {
1030 return dump->dpif->dpif_class->flow_dump_thread_create(dump);
1031 }
1032
1033 /* Releases 'thread'. */
1034 void
1035 dpif_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread)
1036 {
1037 thread->dpif->dpif_class->flow_dump_thread_destroy(thread);
1038 }
1039
1040 /* Attempts to retrieve up to 'max_flows' more flows from 'thread'. Returns 0
1041 * if and only if no flows remained to be retrieved, otherwise a positive
1042 * number reflecting the number of elements in 'flows[]' that were updated.
1043 * The number of flows returned might be less than 'max_flows' because
1044 * fewer than 'max_flows' remained, because this particular datapath does not
1045 * benefit from batching, or because an error occurred partway through
1046 * retrieval. Thus, the caller should continue calling until a 0 return value,
1047 * even if intermediate return values are less than 'max_flows'.
1048 *
1049 * No error status is immediately provided. An error status for the entire
1050 * dump operation is provided when it is completed by calling
1051 * dpif_flow_dump_destroy().
1052 *
1053 * All of the data stored into 'flows' is owned by the datapath, not by the
1054 * caller, and the caller must not modify or free it. The datapath guarantees
1055 * that it remains accessible and unchanged until the first of:
1056 * - The next call to dpif_flow_dump_next() for 'thread', or
1057 * - The next rcu quiescent period. */
1058 int
1059 dpif_flow_dump_next(struct dpif_flow_dump_thread *thread,
1060 struct dpif_flow *flows, int max_flows)
1061 {
1062 struct dpif *dpif = thread->dpif;
1063 int n;
1064
1065 ovs_assert(max_flows > 0);
1066 n = dpif->dpif_class->flow_dump_next(thread, flows, max_flows);
1067 if (n > 0) {
1068 struct dpif_flow *f;
1069
1070 for (f = flows; f < &flows[n] && should_log_flow_message(0); f++) {
1071 log_flow_message(dpif, 0, "flow_dump",
1072 f->key, f->key_len, f->mask, f->mask_len,
1073 &f->ufid, &f->stats, f->actions, f->actions_len);
1074 }
1075 } else {
1076 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all flows", dpif_name(dpif));
1077 }
1078 return n;
1079 }
1080
1081 struct dpif_execute_helper_aux {
1082 struct dpif *dpif;
1083 int error;
1084 };
1085
1086 /* This is called for actions that need the context of the datapath to be
1087 * meaningful. */
1088 static void
1089 dpif_execute_helper_cb(void *aux_, struct dp_packet **packets, int cnt,
1090 const struct nlattr *action, bool may_steal OVS_UNUSED)
1091 {
1092 struct dpif_execute_helper_aux *aux = aux_;
1093 int type = nl_attr_type(action);
1094 struct dp_packet *packet = *packets;
1095
1096 ovs_assert(cnt == 1);
1097
1098 switch ((enum ovs_action_attr)type) {
1099 case OVS_ACTION_ATTR_OUTPUT:
1100 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1101 case OVS_ACTION_ATTR_TUNNEL_POP:
1102 case OVS_ACTION_ATTR_USERSPACE:
1103 case OVS_ACTION_ATTR_RECIRC: {
1104 struct dpif_execute execute;
1105 struct ofpbuf execute_actions;
1106 uint64_t stub[256 / 8];
1107 struct pkt_metadata *md = &packet->md;
1108
1109 if (md->tunnel.ip_dst) {
1110 /* The Linux kernel datapath throws away the tunnel information
1111 * that we supply as metadata. We have to use a "set" action to
1112 * supply it. */
1113 ofpbuf_use_stub(&execute_actions, stub, sizeof stub);
1114 odp_put_tunnel_action(&md->tunnel, &execute_actions);
1115 ofpbuf_put(&execute_actions, action, NLA_ALIGN(action->nla_len));
1116
1117 execute.actions = execute_actions.data;
1118 execute.actions_len = execute_actions.size;
1119 } else {
1120 execute.actions = action;
1121 execute.actions_len = NLA_ALIGN(action->nla_len);
1122 }
1123
1124 execute.packet = packet;
1125 execute.needs_help = false;
1126 execute.probe = false;
1127 aux->error = dpif_execute(aux->dpif, &execute);
1128 log_execute_message(aux->dpif, &execute, true, aux->error);
1129
1130 if (md->tunnel.ip_dst) {
1131 ofpbuf_uninit(&execute_actions);
1132 }
1133 break;
1134 }
1135
1136 case OVS_ACTION_ATTR_HASH:
1137 case OVS_ACTION_ATTR_PUSH_VLAN:
1138 case OVS_ACTION_ATTR_POP_VLAN:
1139 case OVS_ACTION_ATTR_PUSH_MPLS:
1140 case OVS_ACTION_ATTR_POP_MPLS:
1141 case OVS_ACTION_ATTR_SET:
1142 case OVS_ACTION_ATTR_SET_MASKED:
1143 case OVS_ACTION_ATTR_SAMPLE:
1144 case OVS_ACTION_ATTR_UNSPEC:
1145 case __OVS_ACTION_ATTR_MAX:
1146 OVS_NOT_REACHED();
1147 }
1148 }
1149
1150 /* Executes 'execute' by performing most of the actions in userspace and
1151 * passing the fully constructed packets to 'dpif' for output and userspace
1152 * actions.
1153 *
1154 * This helps with actions that a given 'dpif' doesn't implement directly. */
1155 static int
1156 dpif_execute_with_help(struct dpif *dpif, struct dpif_execute *execute)
1157 {
1158 struct dpif_execute_helper_aux aux = {dpif, 0};
1159 struct dp_packet *pp;
1160
1161 COVERAGE_INC(dpif_execute_with_help);
1162
1163 pp = execute->packet;
1164 odp_execute_actions(&aux, &pp, 1, false, execute->actions,
1165 execute->actions_len, dpif_execute_helper_cb);
1166 return aux.error;
1167 }
1168
1169 /* Returns true if the datapath needs help executing 'execute'. */
1170 static bool
1171 dpif_execute_needs_help(const struct dpif_execute *execute)
1172 {
1173 return execute->needs_help || nl_attr_oversized(execute->actions_len);
1174 }
1175
1176 /* A dpif_operate() wrapper for performing a single DPIF_OP_EXECUTE. */
1177 int
1178 dpif_execute(struct dpif *dpif, struct dpif_execute *execute)
1179 {
1180 if (execute->actions_len) {
1181 struct dpif_op *opp;
1182 struct dpif_op op;
1183
1184 op.type = DPIF_OP_EXECUTE;
1185 op.u.execute = *execute;
1186
1187 opp = &op;
1188 dpif_operate(dpif, &opp, 1);
1189
1190 return op.error;
1191 } else {
1192 return 0;
1193 }
1194 }
1195
1196 /* Executes each of the 'n_ops' operations in 'ops' on 'dpif', in the order in
1197 * which they are specified. Places each operation's results in the "output"
1198 * members documented in comments, and 0 in the 'error' member on success or a
1199 * positive errno on failure. */
1200 void
1201 dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
1202 {
1203 while (n_ops > 0) {
1204 size_t chunk;
1205
1206 /* Count 'chunk', the number of ops that can be executed without
1207 * needing any help. Ops that need help should be rare, so we
1208 * expect this to ordinarily be 'n_ops', that is, all the ops. */
1209 for (chunk = 0; chunk < n_ops; chunk++) {
1210 struct dpif_op *op = ops[chunk];
1211
1212 if (op->type == DPIF_OP_EXECUTE
1213 && dpif_execute_needs_help(&op->u.execute)) {
1214 break;
1215 }
1216 }
1217
1218 if (chunk) {
1219 /* Execute a chunk full of ops that the dpif provider can
1220 * handle itself, without help. */
1221 size_t i;
1222
1223 dpif->dpif_class->operate(dpif, ops, chunk);
1224
1225 for (i = 0; i < chunk; i++) {
1226 struct dpif_op *op = ops[i];
1227 int error = op->error;
1228
1229 switch (op->type) {
1230 case DPIF_OP_FLOW_PUT: {
1231 struct dpif_flow_put *put = &op->u.flow_put;
1232
1233 COVERAGE_INC(dpif_flow_put);
1234 log_flow_put_message(dpif, put, error);
1235 if (error && put->stats) {
1236 memset(put->stats, 0, sizeof *put->stats);
1237 }
1238 break;
1239 }
1240
1241 case DPIF_OP_FLOW_GET: {
1242 struct dpif_flow_get *get = &op->u.flow_get;
1243
1244 COVERAGE_INC(dpif_flow_get);
1245 if (error) {
1246 memset(get->flow, 0, sizeof *get->flow);
1247 }
1248 log_flow_get_message(dpif, get, error);
1249
1250 break;
1251 }
1252
1253 case DPIF_OP_FLOW_DEL: {
1254 struct dpif_flow_del *del = &op->u.flow_del;
1255
1256 COVERAGE_INC(dpif_flow_del);
1257 log_flow_del_message(dpif, del, error);
1258 if (error && del->stats) {
1259 memset(del->stats, 0, sizeof *del->stats);
1260 }
1261 break;
1262 }
1263
1264 case DPIF_OP_EXECUTE:
1265 COVERAGE_INC(dpif_execute);
1266 log_execute_message(dpif, &op->u.execute, false, error);
1267 break;
1268 }
1269 }
1270
1271 ops += chunk;
1272 n_ops -= chunk;
1273 } else {
1274 /* Help the dpif provider to execute one op. */
1275 struct dpif_op *op = ops[0];
1276
1277 COVERAGE_INC(dpif_execute);
1278 op->error = dpif_execute_with_help(dpif, &op->u.execute);
1279 ops++;
1280 n_ops--;
1281 }
1282 }
1283 }
1284
1285 /* Returns a string that represents 'type', for use in log messages. */
1286 const char *
1287 dpif_upcall_type_to_string(enum dpif_upcall_type type)
1288 {
1289 switch (type) {
1290 case DPIF_UC_MISS: return "miss";
1291 case DPIF_UC_ACTION: return "action";
1292 case DPIF_N_UC_TYPES: default: return "<unknown>";
1293 }
1294 }
1295
1296 /* Enables or disables receiving packets with dpif_recv() on 'dpif'. Returns 0
1297 * if successful, otherwise a positive errno value.
1298 *
1299 * Turning packet receive off and then back on may change the Netlink PID
1300 * assignments returned by dpif_port_get_pid(). If the client does this, it
1301 * must update all of the flows that have OVS_ACTION_ATTR_USERSPACE actions
1302 * using the new PID assignment. */
1303 int
1304 dpif_recv_set(struct dpif *dpif, bool enable)
1305 {
1306 int error = 0;
1307
1308 if (dpif->dpif_class->recv_set) {
1309 error = dpif->dpif_class->recv_set(dpif, enable);
1310 log_operation(dpif, "recv_set", error);
1311 }
1312 return error;
1313 }
1314
1315 /* Refreshes the poll loops and Netlink sockets associated to each port,
1316 * when the number of upcall handlers (upcall receiving thread) is changed
1317 * to 'n_handlers' and receiving packets for 'dpif' is enabled by
1318 * recv_set().
1319 *
1320 * Since multiple upcall handlers can read upcalls simultaneously from
1321 * 'dpif', each port can have multiple Netlink sockets, one per upcall
1322 * handler. So, handlers_set() is responsible for the following tasks:
1323 *
1324 * When receiving upcall is enabled, extends or creates the
1325 * configuration to support:
1326 *
1327 * - 'n_handlers' Netlink sockets for each port.
1328 *
1329 * - 'n_handlers' poll loops, one for each upcall handler.
1330 *
1331 * - registering the Netlink sockets for the same upcall handler to
1332 * the corresponding poll loop.
1333 *
1334 * Returns 0 if successful, otherwise a positive errno value. */
1335 int
1336 dpif_handlers_set(struct dpif *dpif, uint32_t n_handlers)
1337 {
1338 int error = 0;
1339
1340 if (dpif->dpif_class->handlers_set) {
1341 error = dpif->dpif_class->handlers_set(dpif, n_handlers);
1342 log_operation(dpif, "handlers_set", error);
1343 }
1344 return error;
1345 }
1346
1347 void
1348 dpif_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, void *aux)
1349 {
1350 if (dpif->dpif_class->register_upcall_cb) {
1351 dpif->dpif_class->register_upcall_cb(dpif, cb, aux);
1352 }
1353 }
1354
1355 void
1356 dpif_enable_upcall(struct dpif *dpif)
1357 {
1358 if (dpif->dpif_class->enable_upcall) {
1359 dpif->dpif_class->enable_upcall(dpif);
1360 }
1361 }
1362
1363 void
1364 dpif_disable_upcall(struct dpif *dpif)
1365 {
1366 if (dpif->dpif_class->disable_upcall) {
1367 dpif->dpif_class->disable_upcall(dpif);
1368 }
1369 }
1370
1371 void
1372 dpif_print_packet(struct dpif *dpif, struct dpif_upcall *upcall)
1373 {
1374 if (!VLOG_DROP_DBG(&dpmsg_rl)) {
1375 struct ds flow;
1376 char *packet;
1377
1378 packet = ofp_packet_to_string(dp_packet_data(&upcall->packet),
1379 dp_packet_size(&upcall->packet));
1380
1381 ds_init(&flow);
1382 odp_flow_key_format(upcall->key, upcall->key_len, &flow);
1383
1384 VLOG_DBG("%s: %s upcall:\n%s\n%s",
1385 dpif_name(dpif), dpif_upcall_type_to_string(upcall->type),
1386 ds_cstr(&flow), packet);
1387
1388 ds_destroy(&flow);
1389 free(packet);
1390 }
1391 }
1392
1393 /* If 'dpif' creates its own I/O polling threads, refreshes poll threads
1394 * configuration. */
1395 int
1396 dpif_poll_threads_set(struct dpif *dpif, unsigned int n_rxqs,
1397 const char *cmask)
1398 {
1399 int error = 0;
1400
1401 if (dpif->dpif_class->poll_threads_set) {
1402 error = dpif->dpif_class->poll_threads_set(dpif, n_rxqs, cmask);
1403 if (error) {
1404 log_operation(dpif, "poll_threads_set", error);
1405 }
1406 }
1407
1408 return error;
1409 }
1410
1411 /* Polls for an upcall from 'dpif' for an upcall handler. Since there
1412 * there can be multiple poll loops, 'handler_id' is needed as index to
1413 * identify the corresponding poll loop. If successful, stores the upcall
1414 * into '*upcall', using 'buf' for storage. Should only be called if
1415 * 'recv_set' has been used to enable receiving packets from 'dpif'.
1416 *
1417 * 'upcall->key' and 'upcall->userdata' point into data in the caller-provided
1418 * 'buf', so their memory cannot be freed separately from 'buf'.
1419 *
1420 * The caller owns the data of 'upcall->packet' and may modify it. If
1421 * packet's headroom is exhausted as it is manipulated, 'upcall->packet'
1422 * will be reallocated. This requires the data of 'upcall->packet' to be
1423 * released with ofpbuf_uninit() before 'upcall' is destroyed. However,
1424 * when an error is returned, the 'upcall->packet' may be uninitialized
1425 * and should not be released.
1426 *
1427 * Returns 0 if successful, otherwise a positive errno value. Returns EAGAIN
1428 * if no upcall is immediately available. */
1429 int
1430 dpif_recv(struct dpif *dpif, uint32_t handler_id, struct dpif_upcall *upcall,
1431 struct ofpbuf *buf)
1432 {
1433 int error = EAGAIN;
1434
1435 if (dpif->dpif_class->recv) {
1436 error = dpif->dpif_class->recv(dpif, handler_id, upcall, buf);
1437 if (!error) {
1438 dpif_print_packet(dpif, upcall);
1439 } else if (error != EAGAIN) {
1440 log_operation(dpif, "recv", error);
1441 }
1442 }
1443 return error;
1444 }
1445
1446 /* Discards all messages that would otherwise be received by dpif_recv() on
1447 * 'dpif'. */
1448 void
1449 dpif_recv_purge(struct dpif *dpif)
1450 {
1451 COVERAGE_INC(dpif_purge);
1452 if (dpif->dpif_class->recv_purge) {
1453 dpif->dpif_class->recv_purge(dpif);
1454 }
1455 }
1456
1457 /* Arranges for the poll loop for an upcall handler to wake up when 'dpif'
1458 * 'dpif' has a message queued to be received with the recv member
1459 * function. Since there can be multiple poll loops, 'handler_id' is
1460 * needed as index to identify the corresponding poll loop. */
1461 void
1462 dpif_recv_wait(struct dpif *dpif, uint32_t handler_id)
1463 {
1464 if (dpif->dpif_class->recv_wait) {
1465 dpif->dpif_class->recv_wait(dpif, handler_id);
1466 }
1467 }
1468
1469 /*
1470 * Return the datapath version. Caller is responsible for freeing
1471 * the string.
1472 */
1473 char *
1474 dpif_get_dp_version(const struct dpif *dpif)
1475 {
1476 char *version = NULL;
1477
1478 if (dpif->dpif_class->get_datapath_version) {
1479 version = dpif->dpif_class->get_datapath_version();
1480 }
1481
1482 return version;
1483 }
1484
1485 /* Obtains the NetFlow engine type and engine ID for 'dpif' into '*engine_type'
1486 * and '*engine_id', respectively. */
1487 void
1488 dpif_get_netflow_ids(const struct dpif *dpif,
1489 uint8_t *engine_type, uint8_t *engine_id)
1490 {
1491 *engine_type = dpif->netflow_engine_type;
1492 *engine_id = dpif->netflow_engine_id;
1493 }
1494
1495 /* Translates OpenFlow queue ID 'queue_id' (in host byte order) into a priority
1496 * value used for setting packet priority.
1497 * On success, returns 0 and stores the priority into '*priority'.
1498 * On failure, returns a positive errno value and stores 0 into '*priority'. */
1499 int
1500 dpif_queue_to_priority(const struct dpif *dpif, uint32_t queue_id,
1501 uint32_t *priority)
1502 {
1503 int error = (dpif->dpif_class->queue_to_priority
1504 ? dpif->dpif_class->queue_to_priority(dpif, queue_id,
1505 priority)
1506 : EOPNOTSUPP);
1507 if (error) {
1508 *priority = 0;
1509 }
1510 log_operation(dpif, "queue_to_priority", error);
1511 return error;
1512 }
1513 \f
1514 void
1515 dpif_init(struct dpif *dpif, const struct dpif_class *dpif_class,
1516 const char *name,
1517 uint8_t netflow_engine_type, uint8_t netflow_engine_id)
1518 {
1519 dpif->dpif_class = dpif_class;
1520 dpif->base_name = xstrdup(name);
1521 dpif->full_name = xasprintf("%s@%s", dpif_class->type, name);
1522 dpif->netflow_engine_type = netflow_engine_type;
1523 dpif->netflow_engine_id = netflow_engine_id;
1524 }
1525
1526 /* Undoes the results of initialization.
1527 *
1528 * Normally this function only needs to be called from dpif_close().
1529 * However, it may be called by providers due to an error on opening
1530 * that occurs after initialization. It this case dpif_close() would
1531 * never be called. */
1532 void
1533 dpif_uninit(struct dpif *dpif, bool close)
1534 {
1535 char *base_name = dpif->base_name;
1536 char *full_name = dpif->full_name;
1537
1538 if (close) {
1539 dpif->dpif_class->close(dpif);
1540 }
1541
1542 free(base_name);
1543 free(full_name);
1544 }
1545 \f
1546 static void
1547 log_operation(const struct dpif *dpif, const char *operation, int error)
1548 {
1549 if (!error) {
1550 VLOG_DBG_RL(&dpmsg_rl, "%s: %s success", dpif_name(dpif), operation);
1551 } else if (ofperr_is_valid(error)) {
1552 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1553 dpif_name(dpif), operation, ofperr_get_name(error));
1554 } else {
1555 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1556 dpif_name(dpif), operation, ovs_strerror(error));
1557 }
1558 }
1559
1560 static enum vlog_level
1561 flow_message_log_level(int error)
1562 {
1563 /* If flows arrive in a batch, userspace may push down multiple
1564 * unique flow definitions that overlap when wildcards are applied.
1565 * Kernels that support flow wildcarding will reject these flows as
1566 * duplicates (EEXIST), so lower the log level to debug for these
1567 * types of messages. */
1568 return (error && error != EEXIST) ? VLL_WARN : VLL_DBG;
1569 }
1570
1571 static bool
1572 should_log_flow_message(int error)
1573 {
1574 return !vlog_should_drop(THIS_MODULE, flow_message_log_level(error),
1575 error ? &error_rl : &dpmsg_rl);
1576 }
1577
1578 static void
1579 log_flow_message(const struct dpif *dpif, int error, const char *operation,
1580 const struct nlattr *key, size_t key_len,
1581 const struct nlattr *mask, size_t mask_len,
1582 const ovs_u128 *ufid, const struct dpif_flow_stats *stats,
1583 const struct nlattr *actions, size_t actions_len)
1584 {
1585 struct ds ds = DS_EMPTY_INITIALIZER;
1586 ds_put_format(&ds, "%s: ", dpif_name(dpif));
1587 if (error) {
1588 ds_put_cstr(&ds, "failed to ");
1589 }
1590 ds_put_format(&ds, "%s ", operation);
1591 if (error) {
1592 ds_put_format(&ds, "(%s) ", ovs_strerror(error));
1593 }
1594 if (ufid) {
1595 odp_format_ufid(ufid, &ds);
1596 ds_put_cstr(&ds, " ");
1597 }
1598 odp_flow_format(key, key_len, mask, mask_len, NULL, &ds, true);
1599 if (stats) {
1600 ds_put_cstr(&ds, ", ");
1601 dpif_flow_stats_format(stats, &ds);
1602 }
1603 if (actions || actions_len) {
1604 ds_put_cstr(&ds, ", actions:");
1605 format_odp_actions(&ds, actions, actions_len);
1606 }
1607 vlog(THIS_MODULE, flow_message_log_level(error), "%s", ds_cstr(&ds));
1608 ds_destroy(&ds);
1609 }
1610
1611 static void
1612 log_flow_put_message(struct dpif *dpif, const struct dpif_flow_put *put,
1613 int error)
1614 {
1615 if (should_log_flow_message(error) && !(put->flags & DPIF_FP_PROBE)) {
1616 struct ds s;
1617
1618 ds_init(&s);
1619 ds_put_cstr(&s, "put");
1620 if (put->flags & DPIF_FP_CREATE) {
1621 ds_put_cstr(&s, "[create]");
1622 }
1623 if (put->flags & DPIF_FP_MODIFY) {
1624 ds_put_cstr(&s, "[modify]");
1625 }
1626 if (put->flags & DPIF_FP_ZERO_STATS) {
1627 ds_put_cstr(&s, "[zero]");
1628 }
1629 log_flow_message(dpif, error, ds_cstr(&s),
1630 put->key, put->key_len, put->mask, put->mask_len,
1631 put->ufid, put->stats, put->actions,
1632 put->actions_len);
1633 ds_destroy(&s);
1634 }
1635 }
1636
1637 static void
1638 log_flow_del_message(struct dpif *dpif, const struct dpif_flow_del *del,
1639 int error)
1640 {
1641 if (should_log_flow_message(error)) {
1642 log_flow_message(dpif, error, "flow_del", del->key, del->key_len,
1643 NULL, 0, del->ufid, !error ? del->stats : NULL,
1644 NULL, 0);
1645 }
1646 }
1647
1648 /* Logs that 'execute' was executed on 'dpif' and completed with errno 'error'
1649 * (0 for success). 'subexecute' should be true if the execution is a result
1650 * of breaking down a larger execution that needed help, false otherwise.
1651 *
1652 *
1653 * XXX In theory, the log message could be deceptive because this function is
1654 * called after the dpif_provider's '->execute' function, which is allowed to
1655 * modify execute->packet and execute->md. In practice, though:
1656 *
1657 * - dpif-netlink doesn't modify execute->packet or execute->md.
1658 *
1659 * - dpif-netdev does modify them but it is less likely to have problems
1660 * because it is built into ovs-vswitchd and cannot have version skew,
1661 * etc.
1662 *
1663 * It would still be better to avoid the potential problem. I don't know of a
1664 * good way to do that, though, that isn't expensive. */
1665 static void
1666 log_execute_message(struct dpif *dpif, const struct dpif_execute *execute,
1667 bool subexecute, int error)
1668 {
1669 if (!(error ? VLOG_DROP_WARN(&error_rl) : VLOG_DROP_DBG(&dpmsg_rl))
1670 && !execute->probe) {
1671 struct ds ds = DS_EMPTY_INITIALIZER;
1672 char *packet;
1673
1674 packet = ofp_packet_to_string(dp_packet_data(execute->packet),
1675 dp_packet_size(execute->packet));
1676 ds_put_format(&ds, "%s: %sexecute ",
1677 dpif_name(dpif),
1678 (subexecute ? "sub-"
1679 : dpif_execute_needs_help(execute) ? "super-"
1680 : ""));
1681 format_odp_actions(&ds, execute->actions, execute->actions_len);
1682 if (error) {
1683 ds_put_format(&ds, " failed (%s)", ovs_strerror(error));
1684 }
1685 ds_put_format(&ds, " on packet %s", packet);
1686 vlog(THIS_MODULE, error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
1687 ds_destroy(&ds);
1688 free(packet);
1689 }
1690 }
1691
1692 static void
1693 log_flow_get_message(const struct dpif *dpif, const struct dpif_flow_get *get,
1694 int error)
1695 {
1696 if (should_log_flow_message(error)) {
1697 log_flow_message(dpif, error, "flow_get",
1698 get->key, get->key_len,
1699 get->flow->mask, get->flow->mask_len,
1700 get->ufid, &get->flow->stats,
1701 get->flow->actions, get->flow->actions_len);
1702 }
1703 }
1704
1705 bool
1706 dpif_supports_tnl_push_pop(const struct dpif *dpif)
1707 {
1708 return !strcmp(dpif->dpif_class->type, "netdev") ||
1709 !strcmp(dpif->dpif_class->type, "dummy");
1710 }