]> git.proxmox.com Git - mirror_ovs.git/blob - lib/dpif.c
dpctl: Fix crash.
[mirror_ovs.git] / lib / dpif.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "dpif-provider.h"
19
20 #include <ctype.h>
21 #include <errno.h>
22 #include <inttypes.h>
23 #include <stdlib.h>
24 #include <string.h>
25
26 #include "coverage.h"
27 #include "dpctl.h"
28 #include "dynamic-string.h"
29 #include "flow.h"
30 #include "netdev.h"
31 #include "netlink.h"
32 #include "odp-execute.h"
33 #include "odp-util.h"
34 #include "ofp-errors.h"
35 #include "ofp-print.h"
36 #include "ofp-util.h"
37 #include "ofpbuf.h"
38 #include "packet-dpif.h"
39 #include "packets.h"
40 #include "poll-loop.h"
41 #include "route-table.h"
42 #include "shash.h"
43 #include "sset.h"
44 #include "timeval.h"
45 #include "tnl-arp-cache.h"
46 #include "tnl-ports.h"
47 #include "util.h"
48 #include "valgrind.h"
49 #include "vlog.h"
50
51 VLOG_DEFINE_THIS_MODULE(dpif);
52
53 COVERAGE_DEFINE(dpif_destroy);
54 COVERAGE_DEFINE(dpif_port_add);
55 COVERAGE_DEFINE(dpif_port_del);
56 COVERAGE_DEFINE(dpif_flow_flush);
57 COVERAGE_DEFINE(dpif_flow_get);
58 COVERAGE_DEFINE(dpif_flow_put);
59 COVERAGE_DEFINE(dpif_flow_del);
60 COVERAGE_DEFINE(dpif_execute);
61 COVERAGE_DEFINE(dpif_purge);
62 COVERAGE_DEFINE(dpif_execute_with_help);
63
64 static const struct dpif_class *base_dpif_classes[] = {
65 #if defined(__linux__) || defined(_WIN32)
66 &dpif_netlink_class,
67 #endif
68 &dpif_netdev_class,
69 };
70
71 struct registered_dpif_class {
72 const struct dpif_class *dpif_class;
73 int refcount;
74 };
75 static struct shash dpif_classes = SHASH_INITIALIZER(&dpif_classes);
76 static struct sset dpif_blacklist = SSET_INITIALIZER(&dpif_blacklist);
77
78 /* Protects 'dpif_classes', including the refcount, and 'dpif_blacklist'. */
79 static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
80
81 /* Rate limit for individual messages going to or from the datapath, output at
82 * DBG level. This is very high because, if these are enabled, it is because
83 * we really need to see them. */
84 static struct vlog_rate_limit dpmsg_rl = VLOG_RATE_LIMIT_INIT(600, 600);
85
86 /* Not really much point in logging many dpif errors. */
87 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
88
89 static void log_flow_message(const struct dpif *dpif, int error,
90 const char *operation,
91 const struct nlattr *key, size_t key_len,
92 const struct nlattr *mask, size_t mask_len,
93 const struct dpif_flow_stats *stats,
94 const struct nlattr *actions, size_t actions_len);
95 static void log_operation(const struct dpif *, const char *operation,
96 int error);
97 static bool should_log_flow_message(int error);
98 static void log_flow_put_message(struct dpif *, const struct dpif_flow_put *,
99 int error);
100 static void log_flow_del_message(struct dpif *, const struct dpif_flow_del *,
101 int error);
102 static void log_execute_message(struct dpif *, const struct dpif_execute *,
103 bool subexecute, int error);
104 static void log_flow_get_message(const struct dpif *,
105 const struct dpif_flow_get *, int error);
106
107 static void
108 dp_initialize(void)
109 {
110 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
111
112 if (ovsthread_once_start(&once)) {
113 int i;
114
115 for (i = 0; i < ARRAY_SIZE(base_dpif_classes); i++) {
116 dp_register_provider(base_dpif_classes[i]);
117 }
118 dpctl_unixctl_register();
119 tnl_port_map_init();
120 tnl_arp_cache_init();
121 route_table_register();
122
123 ovsthread_once_done(&once);
124 }
125 }
126
127 static int
128 dp_register_provider__(const struct dpif_class *new_class)
129 {
130 struct registered_dpif_class *registered_class;
131
132 if (sset_contains(&dpif_blacklist, new_class->type)) {
133 VLOG_DBG("attempted to register blacklisted provider: %s",
134 new_class->type);
135 return EINVAL;
136 }
137
138 if (shash_find(&dpif_classes, new_class->type)) {
139 VLOG_WARN("attempted to register duplicate datapath provider: %s",
140 new_class->type);
141 return EEXIST;
142 }
143
144 registered_class = xmalloc(sizeof *registered_class);
145 registered_class->dpif_class = new_class;
146 registered_class->refcount = 0;
147
148 shash_add(&dpif_classes, new_class->type, registered_class);
149
150 return 0;
151 }
152
153 /* Registers a new datapath provider. After successful registration, new
154 * datapaths of that type can be opened using dpif_open(). */
155 int
156 dp_register_provider(const struct dpif_class *new_class)
157 {
158 int error;
159
160 ovs_mutex_lock(&dpif_mutex);
161 error = dp_register_provider__(new_class);
162 ovs_mutex_unlock(&dpif_mutex);
163
164 return error;
165 }
166
167 /* Unregisters a datapath provider. 'type' must have been previously
168 * registered and not currently be in use by any dpifs. After unregistration
169 * new datapaths of that type cannot be opened using dpif_open(). */
170 static int
171 dp_unregister_provider__(const char *type)
172 {
173 struct shash_node *node;
174 struct registered_dpif_class *registered_class;
175
176 node = shash_find(&dpif_classes, type);
177 if (!node) {
178 VLOG_WARN("attempted to unregister a datapath provider that is not "
179 "registered: %s", type);
180 return EAFNOSUPPORT;
181 }
182
183 registered_class = node->data;
184 if (registered_class->refcount) {
185 VLOG_WARN("attempted to unregister in use datapath provider: %s", type);
186 return EBUSY;
187 }
188
189 shash_delete(&dpif_classes, node);
190 free(registered_class);
191
192 return 0;
193 }
194
195 /* Unregisters a datapath provider. 'type' must have been previously
196 * registered and not currently be in use by any dpifs. After unregistration
197 * new datapaths of that type cannot be opened using dpif_open(). */
198 int
199 dp_unregister_provider(const char *type)
200 {
201 int error;
202
203 dp_initialize();
204
205 ovs_mutex_lock(&dpif_mutex);
206 error = dp_unregister_provider__(type);
207 ovs_mutex_unlock(&dpif_mutex);
208
209 return error;
210 }
211
212 /* Blacklists a provider. Causes future calls of dp_register_provider() with
213 * a dpif_class which implements 'type' to fail. */
214 void
215 dp_blacklist_provider(const char *type)
216 {
217 ovs_mutex_lock(&dpif_mutex);
218 sset_add(&dpif_blacklist, type);
219 ovs_mutex_unlock(&dpif_mutex);
220 }
221
222 /* Clears 'types' and enumerates the types of all currently registered datapath
223 * providers into it. The caller must first initialize the sset. */
224 void
225 dp_enumerate_types(struct sset *types)
226 {
227 struct shash_node *node;
228
229 dp_initialize();
230 sset_clear(types);
231
232 ovs_mutex_lock(&dpif_mutex);
233 SHASH_FOR_EACH(node, &dpif_classes) {
234 const struct registered_dpif_class *registered_class = node->data;
235 sset_add(types, registered_class->dpif_class->type);
236 }
237 ovs_mutex_unlock(&dpif_mutex);
238 }
239
240 static void
241 dp_class_unref(struct registered_dpif_class *rc)
242 {
243 ovs_mutex_lock(&dpif_mutex);
244 ovs_assert(rc->refcount);
245 rc->refcount--;
246 ovs_mutex_unlock(&dpif_mutex);
247 }
248
249 static struct registered_dpif_class *
250 dp_class_lookup(const char *type)
251 {
252 struct registered_dpif_class *rc;
253
254 ovs_mutex_lock(&dpif_mutex);
255 rc = shash_find_data(&dpif_classes, type);
256 if (rc) {
257 rc->refcount++;
258 }
259 ovs_mutex_unlock(&dpif_mutex);
260
261 return rc;
262 }
263
264 /* Clears 'names' and enumerates the names of all known created datapaths with
265 * the given 'type'. The caller must first initialize the sset. Returns 0 if
266 * successful, otherwise a positive errno value.
267 *
268 * Some kinds of datapaths might not be practically enumerable. This is not
269 * considered an error. */
270 int
271 dp_enumerate_names(const char *type, struct sset *names)
272 {
273 struct registered_dpif_class *registered_class;
274 const struct dpif_class *dpif_class;
275 int error;
276
277 dp_initialize();
278 sset_clear(names);
279
280 registered_class = dp_class_lookup(type);
281 if (!registered_class) {
282 VLOG_WARN("could not enumerate unknown type: %s", type);
283 return EAFNOSUPPORT;
284 }
285
286 dpif_class = registered_class->dpif_class;
287 error = (dpif_class->enumerate
288 ? dpif_class->enumerate(names, dpif_class)
289 : 0);
290 if (error) {
291 VLOG_WARN("failed to enumerate %s datapaths: %s", dpif_class->type,
292 ovs_strerror(error));
293 }
294 dp_class_unref(registered_class);
295
296 return error;
297 }
298
299 /* Parses 'datapath_name_', which is of the form [type@]name into its
300 * component pieces. 'name' and 'type' must be freed by the caller.
301 *
302 * The returned 'type' is normalized, as if by dpif_normalize_type(). */
303 void
304 dp_parse_name(const char *datapath_name_, char **name, char **type)
305 {
306 char *datapath_name = xstrdup(datapath_name_);
307 char *separator;
308
309 separator = strchr(datapath_name, '@');
310 if (separator) {
311 *separator = '\0';
312 *type = datapath_name;
313 *name = xstrdup(dpif_normalize_type(separator + 1));
314 } else {
315 *name = datapath_name;
316 *type = xstrdup(dpif_normalize_type(NULL));
317 }
318 }
319
320 static int
321 do_open(const char *name, const char *type, bool create, struct dpif **dpifp)
322 {
323 struct dpif *dpif = NULL;
324 int error;
325 struct registered_dpif_class *registered_class;
326
327 dp_initialize();
328
329 type = dpif_normalize_type(type);
330 registered_class = dp_class_lookup(type);
331 if (!registered_class) {
332 VLOG_WARN("could not create datapath %s of unknown type %s", name,
333 type);
334 error = EAFNOSUPPORT;
335 goto exit;
336 }
337
338 error = registered_class->dpif_class->open(registered_class->dpif_class,
339 name, create, &dpif);
340 if (!error) {
341 ovs_assert(dpif->dpif_class == registered_class->dpif_class);
342 } else {
343 dp_class_unref(registered_class);
344 }
345
346 exit:
347 *dpifp = error ? NULL : dpif;
348 return error;
349 }
350
351 /* Tries to open an existing datapath named 'name' and type 'type'. Will fail
352 * if no datapath with 'name' and 'type' exists. 'type' may be either NULL or
353 * the empty string to specify the default system type. Returns 0 if
354 * successful, otherwise a positive errno value. On success stores a pointer
355 * to the datapath in '*dpifp', otherwise a null pointer. */
356 int
357 dpif_open(const char *name, const char *type, struct dpif **dpifp)
358 {
359 return do_open(name, type, false, dpifp);
360 }
361
362 /* Tries to create and open a new datapath with the given 'name' and 'type'.
363 * 'type' may be either NULL or the empty string to specify the default system
364 * type. Will fail if a datapath with 'name' and 'type' already exists.
365 * Returns 0 if successful, otherwise a positive errno value. On success
366 * stores a pointer to the datapath in '*dpifp', otherwise a null pointer. */
367 int
368 dpif_create(const char *name, const char *type, struct dpif **dpifp)
369 {
370 return do_open(name, type, true, dpifp);
371 }
372
373 /* Tries to open a datapath with the given 'name' and 'type', creating it if it
374 * does not exist. 'type' may be either NULL or the empty string to specify
375 * the default system type. Returns 0 if successful, otherwise a positive
376 * errno value. On success stores a pointer to the datapath in '*dpifp',
377 * otherwise a null pointer. */
378 int
379 dpif_create_and_open(const char *name, const char *type, struct dpif **dpifp)
380 {
381 int error;
382
383 error = dpif_create(name, type, dpifp);
384 if (error == EEXIST || error == EBUSY) {
385 error = dpif_open(name, type, dpifp);
386 if (error) {
387 VLOG_WARN("datapath %s already exists but cannot be opened: %s",
388 name, ovs_strerror(error));
389 }
390 } else if (error) {
391 VLOG_WARN("failed to create datapath %s: %s",
392 name, ovs_strerror(error));
393 }
394 return error;
395 }
396
397 /* Closes and frees the connection to 'dpif'. Does not destroy the datapath
398 * itself; call dpif_delete() first, instead, if that is desirable. */
399 void
400 dpif_close(struct dpif *dpif)
401 {
402 if (dpif) {
403 struct registered_dpif_class *rc;
404
405 rc = shash_find_data(&dpif_classes, dpif->dpif_class->type);
406 dpif_uninit(dpif, true);
407 dp_class_unref(rc);
408 }
409 }
410
411 /* Performs periodic work needed by 'dpif'. */
412 bool
413 dpif_run(struct dpif *dpif)
414 {
415 if (dpif->dpif_class->run) {
416 return dpif->dpif_class->run(dpif);
417 }
418 return false;
419 }
420
421 /* Arranges for poll_block() to wake up when dp_run() needs to be called for
422 * 'dpif'. */
423 void
424 dpif_wait(struct dpif *dpif)
425 {
426 if (dpif->dpif_class->wait) {
427 dpif->dpif_class->wait(dpif);
428 }
429 }
430
431 /* Returns the name of datapath 'dpif' prefixed with the type
432 * (for use in log messages). */
433 const char *
434 dpif_name(const struct dpif *dpif)
435 {
436 return dpif->full_name;
437 }
438
439 /* Returns the name of datapath 'dpif' without the type
440 * (for use in device names). */
441 const char *
442 dpif_base_name(const struct dpif *dpif)
443 {
444 return dpif->base_name;
445 }
446
447 /* Returns the type of datapath 'dpif'. */
448 const char *
449 dpif_type(const struct dpif *dpif)
450 {
451 return dpif->dpif_class->type;
452 }
453
454 /* Returns the fully spelled out name for the given datapath 'type'.
455 *
456 * Normalized type string can be compared with strcmp(). Unnormalized type
457 * string might be the same even if they have different spellings. */
458 const char *
459 dpif_normalize_type(const char *type)
460 {
461 return type && type[0] ? type : "system";
462 }
463
464 /* Destroys the datapath that 'dpif' is connected to, first removing all of its
465 * ports. After calling this function, it does not make sense to pass 'dpif'
466 * to any functions other than dpif_name() or dpif_close(). */
467 int
468 dpif_delete(struct dpif *dpif)
469 {
470 int error;
471
472 COVERAGE_INC(dpif_destroy);
473
474 error = dpif->dpif_class->destroy(dpif);
475 log_operation(dpif, "delete", error);
476 return error;
477 }
478
479 /* Retrieves statistics for 'dpif' into 'stats'. Returns 0 if successful,
480 * otherwise a positive errno value. */
481 int
482 dpif_get_dp_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
483 {
484 int error = dpif->dpif_class->get_stats(dpif, stats);
485 if (error) {
486 memset(stats, 0, sizeof *stats);
487 }
488 log_operation(dpif, "get_stats", error);
489 return error;
490 }
491
492 const char *
493 dpif_port_open_type(const char *datapath_type, const char *port_type)
494 {
495 struct registered_dpif_class *rc;
496
497 datapath_type = dpif_normalize_type(datapath_type);
498
499 ovs_mutex_lock(&dpif_mutex);
500 rc = shash_find_data(&dpif_classes, datapath_type);
501 if (rc && rc->dpif_class->port_open_type) {
502 port_type = rc->dpif_class->port_open_type(rc->dpif_class, port_type);
503 }
504 ovs_mutex_unlock(&dpif_mutex);
505
506 return port_type;
507 }
508
509 /* Attempts to add 'netdev' as a port on 'dpif'. If 'port_nop' is
510 * non-null and its value is not ODPP_NONE, then attempts to use the
511 * value as the port number.
512 *
513 * If successful, returns 0 and sets '*port_nop' to the new port's port
514 * number (if 'port_nop' is non-null). On failure, returns a positive
515 * errno value and sets '*port_nop' to ODPP_NONE (if 'port_nop' is
516 * non-null). */
517 int
518 dpif_port_add(struct dpif *dpif, struct netdev *netdev, odp_port_t *port_nop)
519 {
520 const char *netdev_name = netdev_get_name(netdev);
521 odp_port_t port_no = ODPP_NONE;
522 int error;
523
524 COVERAGE_INC(dpif_port_add);
525
526 if (port_nop) {
527 port_no = *port_nop;
528 }
529
530 error = dpif->dpif_class->port_add(dpif, netdev, &port_no);
531 if (!error) {
532 VLOG_DBG_RL(&dpmsg_rl, "%s: added %s as port %"PRIu32,
533 dpif_name(dpif), netdev_name, port_no);
534 } else {
535 VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
536 dpif_name(dpif), netdev_name, ovs_strerror(error));
537 port_no = ODPP_NONE;
538 }
539 if (port_nop) {
540 *port_nop = port_no;
541 }
542 return error;
543 }
544
545 /* Attempts to remove 'dpif''s port number 'port_no'. Returns 0 if successful,
546 * otherwise a positive errno value. */
547 int
548 dpif_port_del(struct dpif *dpif, odp_port_t port_no)
549 {
550 int error;
551
552 COVERAGE_INC(dpif_port_del);
553
554 error = dpif->dpif_class->port_del(dpif, port_no);
555 if (!error) {
556 VLOG_DBG_RL(&dpmsg_rl, "%s: port_del(%"PRIu32")",
557 dpif_name(dpif), port_no);
558 } else {
559 log_operation(dpif, "port_del", error);
560 }
561 return error;
562 }
563
564 /* Makes a deep copy of 'src' into 'dst'. */
565 void
566 dpif_port_clone(struct dpif_port *dst, const struct dpif_port *src)
567 {
568 dst->name = xstrdup(src->name);
569 dst->type = xstrdup(src->type);
570 dst->port_no = src->port_no;
571 }
572
573 /* Frees memory allocated to members of 'dpif_port'.
574 *
575 * Do not call this function on a dpif_port obtained from
576 * dpif_port_dump_next(): that function retains ownership of the data in the
577 * dpif_port. */
578 void
579 dpif_port_destroy(struct dpif_port *dpif_port)
580 {
581 free(dpif_port->name);
582 free(dpif_port->type);
583 }
584
585 /* Checks if port named 'devname' exists in 'dpif'. If so, returns
586 * true; otherwise, returns false. */
587 bool
588 dpif_port_exists(const struct dpif *dpif, const char *devname)
589 {
590 int error = dpif->dpif_class->port_query_by_name(dpif, devname, NULL);
591 if (error != 0 && error != ENOENT && error != ENODEV) {
592 VLOG_WARN_RL(&error_rl, "%s: failed to query port %s: %s",
593 dpif_name(dpif), devname, ovs_strerror(error));
594 }
595
596 return !error;
597 }
598
599 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and
600 * initializes '*port' appropriately; on failure, returns a positive errno
601 * value.
602 *
603 * The caller owns the data in 'port' and must free it with
604 * dpif_port_destroy() when it is no longer needed. */
605 int
606 dpif_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
607 struct dpif_port *port)
608 {
609 int error = dpif->dpif_class->port_query_by_number(dpif, port_no, port);
610 if (!error) {
611 VLOG_DBG_RL(&dpmsg_rl, "%s: port %"PRIu32" is device %s",
612 dpif_name(dpif), port_no, port->name);
613 } else {
614 memset(port, 0, sizeof *port);
615 VLOG_WARN_RL(&error_rl, "%s: failed to query port %"PRIu32": %s",
616 dpif_name(dpif), port_no, ovs_strerror(error));
617 }
618 return error;
619 }
620
621 /* Looks up port named 'devname' in 'dpif'. On success, returns 0 and
622 * initializes '*port' appropriately; on failure, returns a positive errno
623 * value.
624 *
625 * The caller owns the data in 'port' and must free it with
626 * dpif_port_destroy() when it is no longer needed. */
627 int
628 dpif_port_query_by_name(const struct dpif *dpif, const char *devname,
629 struct dpif_port *port)
630 {
631 int error = dpif->dpif_class->port_query_by_name(dpif, devname, port);
632 if (!error) {
633 VLOG_DBG_RL(&dpmsg_rl, "%s: device %s is on port %"PRIu32,
634 dpif_name(dpif), devname, port->port_no);
635 } else {
636 memset(port, 0, sizeof *port);
637
638 /* For ENOENT or ENODEV we use DBG level because the caller is probably
639 * interested in whether 'dpif' actually has a port 'devname', so that
640 * it's not an issue worth logging if it doesn't. Other errors are
641 * uncommon and more likely to indicate a real problem. */
642 VLOG_RL(&error_rl,
643 error == ENOENT || error == ENODEV ? VLL_DBG : VLL_WARN,
644 "%s: failed to query port %s: %s",
645 dpif_name(dpif), devname, ovs_strerror(error));
646 }
647 return error;
648 }
649
650 /* Returns the Netlink PID value to supply in OVS_ACTION_ATTR_USERSPACE
651 * actions as the OVS_USERSPACE_ATTR_PID attribute's value, for use in
652 * flows whose packets arrived on port 'port_no'. In the case where the
653 * provider allocates multiple Netlink PIDs to a single port, it may use
654 * 'hash' to spread load among them. The caller need not use a particular
655 * hash function; a 5-tuple hash is suitable.
656 *
657 * (The datapath implementation might use some different hash function for
658 * distributing packets received via flow misses among PIDs. This means
659 * that packets received via flow misses might be reordered relative to
660 * packets received via userspace actions. This is not ordinarily a
661 * problem.)
662 *
663 * A 'port_no' of ODPP_NONE is a special case: it returns a reserved PID, not
664 * allocated to any port, that the client may use for special purposes.
665 *
666 * The return value is only meaningful when DPIF_UC_ACTION has been enabled in
667 * the 'dpif''s listen mask. It is allowed to change when DPIF_UC_ACTION is
668 * disabled and then re-enabled, so a client that does that must be prepared to
669 * update all of the flows that it installed that contain
670 * OVS_ACTION_ATTR_USERSPACE actions. */
671 uint32_t
672 dpif_port_get_pid(const struct dpif *dpif, odp_port_t port_no, uint32_t hash)
673 {
674 return (dpif->dpif_class->port_get_pid
675 ? (dpif->dpif_class->port_get_pid)(dpif, port_no, hash)
676 : 0);
677 }
678
679 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and copies
680 * the port's name into the 'name_size' bytes in 'name', ensuring that the
681 * result is null-terminated. On failure, returns a positive errno value and
682 * makes 'name' the empty string. */
683 int
684 dpif_port_get_name(struct dpif *dpif, odp_port_t port_no,
685 char *name, size_t name_size)
686 {
687 struct dpif_port port;
688 int error;
689
690 ovs_assert(name_size > 0);
691
692 error = dpif_port_query_by_number(dpif, port_no, &port);
693 if (!error) {
694 ovs_strlcpy(name, port.name, name_size);
695 dpif_port_destroy(&port);
696 } else {
697 *name = '\0';
698 }
699 return error;
700 }
701
702 /* Initializes 'dump' to begin dumping the ports in a dpif.
703 *
704 * This function provides no status indication. An error status for the entire
705 * dump operation is provided when it is completed by calling
706 * dpif_port_dump_done().
707 */
708 void
709 dpif_port_dump_start(struct dpif_port_dump *dump, const struct dpif *dpif)
710 {
711 dump->dpif = dpif;
712 dump->error = dpif->dpif_class->port_dump_start(dpif, &dump->state);
713 log_operation(dpif, "port_dump_start", dump->error);
714 }
715
716 /* Attempts to retrieve another port from 'dump', which must have been
717 * initialized with dpif_port_dump_start(). On success, stores a new dpif_port
718 * into 'port' and returns true. On failure, returns false.
719 *
720 * Failure might indicate an actual error or merely that the last port has been
721 * dumped. An error status for the entire dump operation is provided when it
722 * is completed by calling dpif_port_dump_done().
723 *
724 * The dpif owns the data stored in 'port'. It will remain valid until at
725 * least the next time 'dump' is passed to dpif_port_dump_next() or
726 * dpif_port_dump_done(). */
727 bool
728 dpif_port_dump_next(struct dpif_port_dump *dump, struct dpif_port *port)
729 {
730 const struct dpif *dpif = dump->dpif;
731
732 if (dump->error) {
733 return false;
734 }
735
736 dump->error = dpif->dpif_class->port_dump_next(dpif, dump->state, port);
737 if (dump->error == EOF) {
738 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all ports", dpif_name(dpif));
739 } else {
740 log_operation(dpif, "port_dump_next", dump->error);
741 }
742
743 if (dump->error) {
744 dpif->dpif_class->port_dump_done(dpif, dump->state);
745 return false;
746 }
747 return true;
748 }
749
750 /* Completes port table dump operation 'dump', which must have been initialized
751 * with dpif_port_dump_start(). Returns 0 if the dump operation was
752 * error-free, otherwise a positive errno value describing the problem. */
753 int
754 dpif_port_dump_done(struct dpif_port_dump *dump)
755 {
756 const struct dpif *dpif = dump->dpif;
757 if (!dump->error) {
758 dump->error = dpif->dpif_class->port_dump_done(dpif, dump->state);
759 log_operation(dpif, "port_dump_done", dump->error);
760 }
761 return dump->error == EOF ? 0 : dump->error;
762 }
763
764 /* Polls for changes in the set of ports in 'dpif'. If the set of ports in
765 * 'dpif' has changed, this function does one of the following:
766 *
767 * - Stores the name of the device that was added to or deleted from 'dpif' in
768 * '*devnamep' and returns 0. The caller is responsible for freeing
769 * '*devnamep' (with free()) when it no longer needs it.
770 *
771 * - Returns ENOBUFS and sets '*devnamep' to NULL.
772 *
773 * This function may also return 'false positives', where it returns 0 and
774 * '*devnamep' names a device that was not actually added or deleted or it
775 * returns ENOBUFS without any change.
776 *
777 * Returns EAGAIN if the set of ports in 'dpif' has not changed. May also
778 * return other positive errno values to indicate that something has gone
779 * wrong. */
780 int
781 dpif_port_poll(const struct dpif *dpif, char **devnamep)
782 {
783 int error = dpif->dpif_class->port_poll(dpif, devnamep);
784 if (error) {
785 *devnamep = NULL;
786 }
787 return error;
788 }
789
790 /* Arranges for the poll loop to wake up when port_poll(dpif) will return a
791 * value other than EAGAIN. */
792 void
793 dpif_port_poll_wait(const struct dpif *dpif)
794 {
795 dpif->dpif_class->port_poll_wait(dpif);
796 }
797
798 /* Extracts the flow stats for a packet. The 'flow' and 'packet'
799 * arguments must have been initialized through a call to flow_extract().
800 * 'used' is stored into stats->used. */
801 void
802 dpif_flow_stats_extract(const struct flow *flow, const struct ofpbuf *packet,
803 long long int used, struct dpif_flow_stats *stats)
804 {
805 stats->tcp_flags = ntohs(flow->tcp_flags);
806 stats->n_bytes = ofpbuf_size(packet);
807 stats->n_packets = 1;
808 stats->used = used;
809 }
810
811 /* Appends a human-readable representation of 'stats' to 's'. */
812 void
813 dpif_flow_stats_format(const struct dpif_flow_stats *stats, struct ds *s)
814 {
815 ds_put_format(s, "packets:%"PRIu64", bytes:%"PRIu64", used:",
816 stats->n_packets, stats->n_bytes);
817 if (stats->used) {
818 ds_put_format(s, "%.3fs", (time_msec() - stats->used) / 1000.0);
819 } else {
820 ds_put_format(s, "never");
821 }
822 if (stats->tcp_flags) {
823 ds_put_cstr(s, ", flags:");
824 packet_format_tcp_flags(s, stats->tcp_flags);
825 }
826 }
827
828 /* Deletes all flows from 'dpif'. Returns 0 if successful, otherwise a
829 * positive errno value. */
830 int
831 dpif_flow_flush(struct dpif *dpif)
832 {
833 int error;
834
835 COVERAGE_INC(dpif_flow_flush);
836
837 error = dpif->dpif_class->flow_flush(dpif);
838 log_operation(dpif, "flow_flush", error);
839 return error;
840 }
841
842 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_GET. */
843 int
844 dpif_flow_get(struct dpif *dpif,
845 const struct nlattr *key, size_t key_len,
846 struct ofpbuf *buf, struct dpif_flow *flow)
847 {
848 struct dpif_op *opp;
849 struct dpif_op op;
850
851 op.type = DPIF_OP_FLOW_GET;
852 op.u.flow_get.key = key;
853 op.u.flow_get.key_len = key_len;
854 op.u.flow_get.buffer = buf;
855 op.u.flow_get.flow = flow;
856 op.u.flow_get.flow->key = key;
857 op.u.flow_get.flow->key_len = key_len;
858
859 opp = &op;
860 dpif_operate(dpif, &opp, 1);
861
862 return op.error;
863 }
864
865 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_PUT. */
866 int
867 dpif_flow_put(struct dpif *dpif, enum dpif_flow_put_flags flags,
868 const struct nlattr *key, size_t key_len,
869 const struct nlattr *mask, size_t mask_len,
870 const struct nlattr *actions, size_t actions_len,
871 struct dpif_flow_stats *stats)
872 {
873 struct dpif_op *opp;
874 struct dpif_op op;
875
876 op.type = DPIF_OP_FLOW_PUT;
877 op.u.flow_put.flags = flags;
878 op.u.flow_put.key = key;
879 op.u.flow_put.key_len = key_len;
880 op.u.flow_put.mask = mask;
881 op.u.flow_put.mask_len = mask_len;
882 op.u.flow_put.actions = actions;
883 op.u.flow_put.actions_len = actions_len;
884 op.u.flow_put.stats = stats;
885
886 opp = &op;
887 dpif_operate(dpif, &opp, 1);
888
889 return op.error;
890 }
891
892 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_DEL. */
893 int
894 dpif_flow_del(struct dpif *dpif,
895 const struct nlattr *key, size_t key_len,
896 struct dpif_flow_stats *stats)
897 {
898 struct dpif_op *opp;
899 struct dpif_op op;
900
901 op.type = DPIF_OP_FLOW_DEL;
902 op.u.flow_del.key = key;
903 op.u.flow_del.key_len = key_len;
904 op.u.flow_del.stats = stats;
905
906 opp = &op;
907 dpif_operate(dpif, &opp, 1);
908
909 return op.error;
910 }
911
912 /* Creates and returns a new 'struct dpif_flow_dump' for iterating through the
913 * flows in 'dpif'.
914 *
915 * This function always successfully returns a dpif_flow_dump. Error
916 * reporting is deferred to dpif_flow_dump_destroy(). */
917 struct dpif_flow_dump *
918 dpif_flow_dump_create(const struct dpif *dpif)
919 {
920 return dpif->dpif_class->flow_dump_create(dpif);
921 }
922
923 /* Destroys 'dump', which must have been created with dpif_flow_dump_create().
924 * All dpif_flow_dump_thread structures previously created for 'dump' must
925 * previously have been destroyed.
926 *
927 * Returns 0 if the dump operation was error-free, otherwise a positive errno
928 * value describing the problem. */
929 int
930 dpif_flow_dump_destroy(struct dpif_flow_dump *dump)
931 {
932 const struct dpif *dpif = dump->dpif;
933 int error = dpif->dpif_class->flow_dump_destroy(dump);
934 log_operation(dpif, "flow_dump_destroy", error);
935 return error == EOF ? 0 : error;
936 }
937
938 /* Returns new thread-local state for use with dpif_flow_dump_next(). */
939 struct dpif_flow_dump_thread *
940 dpif_flow_dump_thread_create(struct dpif_flow_dump *dump)
941 {
942 return dump->dpif->dpif_class->flow_dump_thread_create(dump);
943 }
944
945 /* Releases 'thread'. */
946 void
947 dpif_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread)
948 {
949 thread->dpif->dpif_class->flow_dump_thread_destroy(thread);
950 }
951
952 /* Attempts to retrieve up to 'max_flows' more flows from 'thread'. Returns 0
953 * if and only if no flows remained to be retrieved, otherwise a positive
954 * number reflecting the number of elements in 'flows[]' that were updated.
955 * The number of flows returned might be less than 'max_flows' because
956 * fewer than 'max_flows' remained, because this particular datapath does not
957 * benefit from batching, or because an error occurred partway through
958 * retrieval. Thus, the caller should continue calling until a 0 return value,
959 * even if intermediate return values are less than 'max_flows'.
960 *
961 * No error status is immediately provided. An error status for the entire
962 * dump operation is provided when it is completed by calling
963 * dpif_flow_dump_destroy().
964 *
965 * All of the data stored into 'flows' is owned by the datapath, not by the
966 * caller, and the caller must not modify or free it. The datapath guarantees
967 * that it remains accessible and unchanged until the first of:
968 * - The next call to dpif_flow_dump_next() for 'thread', or
969 * - The next rcu quiescent period. */
970 int
971 dpif_flow_dump_next(struct dpif_flow_dump_thread *thread,
972 struct dpif_flow *flows, int max_flows)
973 {
974 struct dpif *dpif = thread->dpif;
975 int n;
976
977 ovs_assert(max_flows > 0);
978 n = dpif->dpif_class->flow_dump_next(thread, flows, max_flows);
979 if (n > 0) {
980 struct dpif_flow *f;
981
982 for (f = flows; f < &flows[n] && should_log_flow_message(0); f++) {
983 log_flow_message(dpif, 0, "flow_dump",
984 f->key, f->key_len, f->mask, f->mask_len,
985 &f->stats, f->actions, f->actions_len);
986 }
987 } else {
988 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all flows", dpif_name(dpif));
989 }
990 return n;
991 }
992
993 struct dpif_execute_helper_aux {
994 struct dpif *dpif;
995 int error;
996 };
997
998 /* This is called for actions that need the context of the datapath to be
999 * meaningful. */
1000 static void
1001 dpif_execute_helper_cb(void *aux_, struct dpif_packet **packets, int cnt,
1002 const struct nlattr *action, bool may_steal OVS_UNUSED)
1003 {
1004 struct dpif_execute_helper_aux *aux = aux_;
1005 int type = nl_attr_type(action);
1006 struct ofpbuf *packet = &packets[0]->ofpbuf;
1007 struct pkt_metadata *md = &packets[0]->md;
1008
1009 ovs_assert(cnt == 1);
1010
1011 switch ((enum ovs_action_attr)type) {
1012 case OVS_ACTION_ATTR_OUTPUT:
1013 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1014 case OVS_ACTION_ATTR_TUNNEL_POP:
1015 case OVS_ACTION_ATTR_USERSPACE:
1016 case OVS_ACTION_ATTR_RECIRC: {
1017 struct dpif_execute execute;
1018 struct ofpbuf execute_actions;
1019 uint64_t stub[256 / 8];
1020
1021 if (md->tunnel.ip_dst) {
1022 /* The Linux kernel datapath throws away the tunnel information
1023 * that we supply as metadata. We have to use a "set" action to
1024 * supply it. */
1025 ofpbuf_use_stub(&execute_actions, stub, sizeof stub);
1026 odp_put_tunnel_action(&md->tunnel, &execute_actions);
1027 ofpbuf_put(&execute_actions, action, NLA_ALIGN(action->nla_len));
1028
1029 execute.actions = ofpbuf_data(&execute_actions);
1030 execute.actions_len = ofpbuf_size(&execute_actions);
1031 } else {
1032 execute.actions = action;
1033 execute.actions_len = NLA_ALIGN(action->nla_len);
1034 }
1035
1036 execute.packet = packet;
1037 execute.md = *md;
1038 execute.needs_help = false;
1039 execute.probe = false;
1040 aux->error = dpif_execute(aux->dpif, &execute);
1041 log_execute_message(aux->dpif, &execute, true, aux->error);
1042
1043 if (md->tunnel.ip_dst) {
1044 ofpbuf_uninit(&execute_actions);
1045 }
1046 break;
1047 }
1048
1049 case OVS_ACTION_ATTR_HASH:
1050 case OVS_ACTION_ATTR_PUSH_VLAN:
1051 case OVS_ACTION_ATTR_POP_VLAN:
1052 case OVS_ACTION_ATTR_PUSH_MPLS:
1053 case OVS_ACTION_ATTR_POP_MPLS:
1054 case OVS_ACTION_ATTR_SET:
1055 case OVS_ACTION_ATTR_SET_MASKED:
1056 case OVS_ACTION_ATTR_SAMPLE:
1057 case OVS_ACTION_ATTR_UNSPEC:
1058 case __OVS_ACTION_ATTR_MAX:
1059 OVS_NOT_REACHED();
1060 }
1061 }
1062
1063 /* Executes 'execute' by performing most of the actions in userspace and
1064 * passing the fully constructed packets to 'dpif' for output and userspace
1065 * actions.
1066 *
1067 * This helps with actions that a given 'dpif' doesn't implement directly. */
1068 static int
1069 dpif_execute_with_help(struct dpif *dpif, struct dpif_execute *execute)
1070 {
1071 struct dpif_execute_helper_aux aux = {dpif, 0};
1072 struct dpif_packet packet, *pp;
1073
1074 COVERAGE_INC(dpif_execute_with_help);
1075
1076 packet.ofpbuf = *execute->packet;
1077 packet.md = execute->md;
1078 pp = &packet;
1079
1080 odp_execute_actions(&aux, &pp, 1, false, execute->actions,
1081 execute->actions_len, dpif_execute_helper_cb);
1082
1083 /* Even though may_steal is set to false, some actions could modify or
1084 * reallocate the ofpbuf memory. We need to pass those changes to the
1085 * caller */
1086 *execute->packet = packet.ofpbuf;
1087 execute->md = packet.md;
1088
1089 return aux.error;
1090 }
1091
1092 /* Returns true if the datapath needs help executing 'execute'. */
1093 static bool
1094 dpif_execute_needs_help(const struct dpif_execute *execute)
1095 {
1096 return execute->needs_help || nl_attr_oversized(execute->actions_len);
1097 }
1098
1099 /* A dpif_operate() wrapper for performing a single DPIF_OP_EXECUTE. */
1100 int
1101 dpif_execute(struct dpif *dpif, struct dpif_execute *execute)
1102 {
1103 if (execute->actions_len) {
1104 struct dpif_op *opp;
1105 struct dpif_op op;
1106
1107 op.type = DPIF_OP_EXECUTE;
1108 op.u.execute = *execute;
1109
1110 opp = &op;
1111 dpif_operate(dpif, &opp, 1);
1112
1113 return op.error;
1114 } else {
1115 return 0;
1116 }
1117 }
1118
1119 /* Executes each of the 'n_ops' operations in 'ops' on 'dpif', in the order in
1120 * which they are specified. Places each operation's results in the "output"
1121 * members documented in comments, and 0 in the 'error' member on success or a
1122 * positive errno on failure. */
1123 void
1124 dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
1125 {
1126 while (n_ops > 0) {
1127 size_t chunk;
1128
1129 /* Count 'chunk', the number of ops that can be executed without
1130 * needing any help. Ops that need help should be rare, so we
1131 * expect this to ordinarily be 'n_ops', that is, all the ops. */
1132 for (chunk = 0; chunk < n_ops; chunk++) {
1133 struct dpif_op *op = ops[chunk];
1134
1135 if (op->type == DPIF_OP_EXECUTE
1136 && dpif_execute_needs_help(&op->u.execute)) {
1137 break;
1138 }
1139 }
1140
1141 if (chunk) {
1142 /* Execute a chunk full of ops that the dpif provider can
1143 * handle itself, without help. */
1144 size_t i;
1145
1146 dpif->dpif_class->operate(dpif, ops, chunk);
1147
1148 for (i = 0; i < chunk; i++) {
1149 struct dpif_op *op = ops[i];
1150 int error = op->error;
1151
1152 switch (op->type) {
1153 case DPIF_OP_FLOW_PUT: {
1154 struct dpif_flow_put *put = &op->u.flow_put;
1155
1156 COVERAGE_INC(dpif_flow_put);
1157 log_flow_put_message(dpif, put, error);
1158 if (error && put->stats) {
1159 memset(put->stats, 0, sizeof *put->stats);
1160 }
1161 break;
1162 }
1163
1164 case DPIF_OP_FLOW_GET: {
1165 struct dpif_flow_get *get = &op->u.flow_get;
1166
1167 COVERAGE_INC(dpif_flow_get);
1168 if (error) {
1169 memset(get->flow, 0, sizeof *get->flow);
1170 }
1171 log_flow_get_message(dpif, get, error);
1172
1173 break;
1174 }
1175
1176 case DPIF_OP_FLOW_DEL: {
1177 struct dpif_flow_del *del = &op->u.flow_del;
1178
1179 COVERAGE_INC(dpif_flow_del);
1180 log_flow_del_message(dpif, del, error);
1181 if (error && del->stats) {
1182 memset(del->stats, 0, sizeof *del->stats);
1183 }
1184 break;
1185 }
1186
1187 case DPIF_OP_EXECUTE:
1188 COVERAGE_INC(dpif_execute);
1189 log_execute_message(dpif, &op->u.execute, false, error);
1190 break;
1191 }
1192 }
1193
1194 ops += chunk;
1195 n_ops -= chunk;
1196 } else {
1197 /* Help the dpif provider to execute one op. */
1198 struct dpif_op *op = ops[0];
1199
1200 COVERAGE_INC(dpif_execute);
1201 op->error = dpif_execute_with_help(dpif, &op->u.execute);
1202 ops++;
1203 n_ops--;
1204 }
1205 }
1206 }
1207
1208 /* Returns a string that represents 'type', for use in log messages. */
1209 const char *
1210 dpif_upcall_type_to_string(enum dpif_upcall_type type)
1211 {
1212 switch (type) {
1213 case DPIF_UC_MISS: return "miss";
1214 case DPIF_UC_ACTION: return "action";
1215 case DPIF_N_UC_TYPES: default: return "<unknown>";
1216 }
1217 }
1218
1219 /* Enables or disables receiving packets with dpif_recv() on 'dpif'. Returns 0
1220 * if successful, otherwise a positive errno value.
1221 *
1222 * Turning packet receive off and then back on may change the Netlink PID
1223 * assignments returned by dpif_port_get_pid(). If the client does this, it
1224 * must update all of the flows that have OVS_ACTION_ATTR_USERSPACE actions
1225 * using the new PID assignment. */
1226 int
1227 dpif_recv_set(struct dpif *dpif, bool enable)
1228 {
1229 int error = 0;
1230
1231 if (dpif->dpif_class->recv_set) {
1232 error = dpif->dpif_class->recv_set(dpif, enable);
1233 log_operation(dpif, "recv_set", error);
1234 }
1235 return error;
1236 }
1237
1238 /* Refreshes the poll loops and Netlink sockets associated to each port,
1239 * when the number of upcall handlers (upcall receiving thread) is changed
1240 * to 'n_handlers' and receiving packets for 'dpif' is enabled by
1241 * recv_set().
1242 *
1243 * Since multiple upcall handlers can read upcalls simultaneously from
1244 * 'dpif', each port can have multiple Netlink sockets, one per upcall
1245 * handler. So, handlers_set() is responsible for the following tasks:
1246 *
1247 * When receiving upcall is enabled, extends or creates the
1248 * configuration to support:
1249 *
1250 * - 'n_handlers' Netlink sockets for each port.
1251 *
1252 * - 'n_handlers' poll loops, one for each upcall handler.
1253 *
1254 * - registering the Netlink sockets for the same upcall handler to
1255 * the corresponding poll loop.
1256 *
1257 * Returns 0 if successful, otherwise a positive errno value. */
1258 int
1259 dpif_handlers_set(struct dpif *dpif, uint32_t n_handlers)
1260 {
1261 int error = 0;
1262
1263 if (dpif->dpif_class->handlers_set) {
1264 error = dpif->dpif_class->handlers_set(dpif, n_handlers);
1265 log_operation(dpif, "handlers_set", error);
1266 }
1267 return error;
1268 }
1269
1270 void
1271 dpif_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, void *aux)
1272 {
1273 if (dpif->dpif_class->register_upcall_cb) {
1274 dpif->dpif_class->register_upcall_cb(dpif, cb, aux);
1275 }
1276 }
1277
1278 void
1279 dpif_enable_upcall(struct dpif *dpif)
1280 {
1281 if (dpif->dpif_class->enable_upcall) {
1282 dpif->dpif_class->enable_upcall(dpif);
1283 }
1284 }
1285
1286 void
1287 dpif_disable_upcall(struct dpif *dpif)
1288 {
1289 if (dpif->dpif_class->disable_upcall) {
1290 dpif->dpif_class->disable_upcall(dpif);
1291 }
1292 }
1293
1294 void
1295 dpif_print_packet(struct dpif *dpif, struct dpif_upcall *upcall)
1296 {
1297 if (!VLOG_DROP_DBG(&dpmsg_rl)) {
1298 struct ds flow;
1299 char *packet;
1300
1301 packet = ofp_packet_to_string(ofpbuf_data(&upcall->packet),
1302 ofpbuf_size(&upcall->packet));
1303
1304 ds_init(&flow);
1305 odp_flow_key_format(upcall->key, upcall->key_len, &flow);
1306
1307 VLOG_DBG("%s: %s upcall:\n%s\n%s",
1308 dpif_name(dpif), dpif_upcall_type_to_string(upcall->type),
1309 ds_cstr(&flow), packet);
1310
1311 ds_destroy(&flow);
1312 free(packet);
1313 }
1314 }
1315
1316 /* If 'dpif' creates its own I/O polling threads, refreshes poll threads
1317 * configuration. */
1318 int
1319 dpif_poll_threads_set(struct dpif *dpif, unsigned int n_rxqs,
1320 const char *cmask)
1321 {
1322 int error = 0;
1323
1324 if (dpif->dpif_class->poll_threads_set) {
1325 error = dpif->dpif_class->poll_threads_set(dpif, n_rxqs, cmask);
1326 if (error) {
1327 log_operation(dpif, "poll_threads_set", error);
1328 }
1329 }
1330
1331 return error;
1332 }
1333
1334 /* Polls for an upcall from 'dpif' for an upcall handler. Since there
1335 * there can be multiple poll loops, 'handler_id' is needed as index to
1336 * identify the corresponding poll loop. If successful, stores the upcall
1337 * into '*upcall', using 'buf' for storage. Should only be called if
1338 * 'recv_set' has been used to enable receiving packets from 'dpif'.
1339 *
1340 * 'upcall->key' and 'upcall->userdata' point into data in the caller-provided
1341 * 'buf', so their memory cannot be freed separately from 'buf'.
1342 *
1343 * The caller owns the data of 'upcall->packet' and may modify it. If
1344 * packet's headroom is exhausted as it is manipulated, 'upcall->packet'
1345 * will be reallocated. This requires the data of 'upcall->packet' to be
1346 * released with ofpbuf_uninit() before 'upcall' is destroyed. However,
1347 * when an error is returned, the 'upcall->packet' may be uninitialized
1348 * and should not be released.
1349 *
1350 * Returns 0 if successful, otherwise a positive errno value. Returns EAGAIN
1351 * if no upcall is immediately available. */
1352 int
1353 dpif_recv(struct dpif *dpif, uint32_t handler_id, struct dpif_upcall *upcall,
1354 struct ofpbuf *buf)
1355 {
1356 int error = EAGAIN;
1357
1358 if (dpif->dpif_class->recv) {
1359 error = dpif->dpif_class->recv(dpif, handler_id, upcall, buf);
1360 if (!error) {
1361 dpif_print_packet(dpif, upcall);
1362 } else if (error != EAGAIN) {
1363 log_operation(dpif, "recv", error);
1364 }
1365 }
1366 return error;
1367 }
1368
1369 /* Discards all messages that would otherwise be received by dpif_recv() on
1370 * 'dpif'. */
1371 void
1372 dpif_recv_purge(struct dpif *dpif)
1373 {
1374 COVERAGE_INC(dpif_purge);
1375 if (dpif->dpif_class->recv_purge) {
1376 dpif->dpif_class->recv_purge(dpif);
1377 }
1378 }
1379
1380 /* Arranges for the poll loop for an upcall handler to wake up when 'dpif'
1381 * 'dpif' has a message queued to be received with the recv member
1382 * function. Since there can be multiple poll loops, 'handler_id' is
1383 * needed as index to identify the corresponding poll loop. */
1384 void
1385 dpif_recv_wait(struct dpif *dpif, uint32_t handler_id)
1386 {
1387 if (dpif->dpif_class->recv_wait) {
1388 dpif->dpif_class->recv_wait(dpif, handler_id);
1389 }
1390 }
1391
1392 /*
1393 * Return the datapath version. Caller is responsible for freeing
1394 * the string.
1395 */
1396 char *
1397 dpif_get_dp_version(const struct dpif *dpif)
1398 {
1399 char *version = NULL;
1400
1401 if (dpif->dpif_class->get_datapath_version) {
1402 version = dpif->dpif_class->get_datapath_version();
1403 }
1404
1405 return version;
1406 }
1407
1408 /* Obtains the NetFlow engine type and engine ID for 'dpif' into '*engine_type'
1409 * and '*engine_id', respectively. */
1410 void
1411 dpif_get_netflow_ids(const struct dpif *dpif,
1412 uint8_t *engine_type, uint8_t *engine_id)
1413 {
1414 *engine_type = dpif->netflow_engine_type;
1415 *engine_id = dpif->netflow_engine_id;
1416 }
1417
1418 /* Translates OpenFlow queue ID 'queue_id' (in host byte order) into a priority
1419 * value used for setting packet priority.
1420 * On success, returns 0 and stores the priority into '*priority'.
1421 * On failure, returns a positive errno value and stores 0 into '*priority'. */
1422 int
1423 dpif_queue_to_priority(const struct dpif *dpif, uint32_t queue_id,
1424 uint32_t *priority)
1425 {
1426 int error = (dpif->dpif_class->queue_to_priority
1427 ? dpif->dpif_class->queue_to_priority(dpif, queue_id,
1428 priority)
1429 : EOPNOTSUPP);
1430 if (error) {
1431 *priority = 0;
1432 }
1433 log_operation(dpif, "queue_to_priority", error);
1434 return error;
1435 }
1436 \f
1437 void
1438 dpif_init(struct dpif *dpif, const struct dpif_class *dpif_class,
1439 const char *name,
1440 uint8_t netflow_engine_type, uint8_t netflow_engine_id)
1441 {
1442 dpif->dpif_class = dpif_class;
1443 dpif->base_name = xstrdup(name);
1444 dpif->full_name = xasprintf("%s@%s", dpif_class->type, name);
1445 dpif->netflow_engine_type = netflow_engine_type;
1446 dpif->netflow_engine_id = netflow_engine_id;
1447 }
1448
1449 /* Undoes the results of initialization.
1450 *
1451 * Normally this function only needs to be called from dpif_close().
1452 * However, it may be called by providers due to an error on opening
1453 * that occurs after initialization. It this case dpif_close() would
1454 * never be called. */
1455 void
1456 dpif_uninit(struct dpif *dpif, bool close)
1457 {
1458 char *base_name = dpif->base_name;
1459 char *full_name = dpif->full_name;
1460
1461 if (close) {
1462 dpif->dpif_class->close(dpif);
1463 }
1464
1465 free(base_name);
1466 free(full_name);
1467 }
1468 \f
1469 static void
1470 log_operation(const struct dpif *dpif, const char *operation, int error)
1471 {
1472 if (!error) {
1473 VLOG_DBG_RL(&dpmsg_rl, "%s: %s success", dpif_name(dpif), operation);
1474 } else if (ofperr_is_valid(error)) {
1475 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1476 dpif_name(dpif), operation, ofperr_get_name(error));
1477 } else {
1478 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1479 dpif_name(dpif), operation, ovs_strerror(error));
1480 }
1481 }
1482
1483 static enum vlog_level
1484 flow_message_log_level(int error)
1485 {
1486 /* If flows arrive in a batch, userspace may push down multiple
1487 * unique flow definitions that overlap when wildcards are applied.
1488 * Kernels that support flow wildcarding will reject these flows as
1489 * duplicates (EEXIST), so lower the log level to debug for these
1490 * types of messages. */
1491 return (error && error != EEXIST) ? VLL_WARN : VLL_DBG;
1492 }
1493
1494 static bool
1495 should_log_flow_message(int error)
1496 {
1497 return !vlog_should_drop(THIS_MODULE, flow_message_log_level(error),
1498 error ? &error_rl : &dpmsg_rl);
1499 }
1500
1501 static void
1502 log_flow_message(const struct dpif *dpif, int error, const char *operation,
1503 const struct nlattr *key, size_t key_len,
1504 const struct nlattr *mask, size_t mask_len,
1505 const struct dpif_flow_stats *stats,
1506 const struct nlattr *actions, size_t actions_len)
1507 {
1508 struct ds ds = DS_EMPTY_INITIALIZER;
1509 ds_put_format(&ds, "%s: ", dpif_name(dpif));
1510 if (error) {
1511 ds_put_cstr(&ds, "failed to ");
1512 }
1513 ds_put_format(&ds, "%s ", operation);
1514 if (error) {
1515 ds_put_format(&ds, "(%s) ", ovs_strerror(error));
1516 }
1517 odp_flow_format(key, key_len, mask, mask_len, NULL, &ds, true);
1518 if (stats) {
1519 ds_put_cstr(&ds, ", ");
1520 dpif_flow_stats_format(stats, &ds);
1521 }
1522 if (actions || actions_len) {
1523 ds_put_cstr(&ds, ", actions:");
1524 format_odp_actions(&ds, actions, actions_len);
1525 }
1526 vlog(THIS_MODULE, flow_message_log_level(error), "%s", ds_cstr(&ds));
1527 ds_destroy(&ds);
1528 }
1529
1530 static void
1531 log_flow_put_message(struct dpif *dpif, const struct dpif_flow_put *put,
1532 int error)
1533 {
1534 if (should_log_flow_message(error) && !(put->flags & DPIF_FP_PROBE)) {
1535 struct ds s;
1536
1537 ds_init(&s);
1538 ds_put_cstr(&s, "put");
1539 if (put->flags & DPIF_FP_CREATE) {
1540 ds_put_cstr(&s, "[create]");
1541 }
1542 if (put->flags & DPIF_FP_MODIFY) {
1543 ds_put_cstr(&s, "[modify]");
1544 }
1545 if (put->flags & DPIF_FP_ZERO_STATS) {
1546 ds_put_cstr(&s, "[zero]");
1547 }
1548 log_flow_message(dpif, error, ds_cstr(&s),
1549 put->key, put->key_len, put->mask, put->mask_len,
1550 put->stats, put->actions, put->actions_len);
1551 ds_destroy(&s);
1552 }
1553 }
1554
1555 static void
1556 log_flow_del_message(struct dpif *dpif, const struct dpif_flow_del *del,
1557 int error)
1558 {
1559 if (should_log_flow_message(error)) {
1560 log_flow_message(dpif, error, "flow_del", del->key, del->key_len,
1561 NULL, 0, !error ? del->stats : NULL, NULL, 0);
1562 }
1563 }
1564
1565 /* Logs that 'execute' was executed on 'dpif' and completed with errno 'error'
1566 * (0 for success). 'subexecute' should be true if the execution is a result
1567 * of breaking down a larger execution that needed help, false otherwise.
1568 *
1569 *
1570 * XXX In theory, the log message could be deceptive because this function is
1571 * called after the dpif_provider's '->execute' function, which is allowed to
1572 * modify execute->packet and execute->md. In practice, though:
1573 *
1574 * - dpif-netlink doesn't modify execute->packet or execute->md.
1575 *
1576 * - dpif-netdev does modify them but it is less likely to have problems
1577 * because it is built into ovs-vswitchd and cannot have version skew,
1578 * etc.
1579 *
1580 * It would still be better to avoid the potential problem. I don't know of a
1581 * good way to do that, though, that isn't expensive. */
1582 static void
1583 log_execute_message(struct dpif *dpif, const struct dpif_execute *execute,
1584 bool subexecute, int error)
1585 {
1586 if (!(error ? VLOG_DROP_WARN(&error_rl) : VLOG_DROP_DBG(&dpmsg_rl))
1587 && !execute->probe) {
1588 struct ds ds = DS_EMPTY_INITIALIZER;
1589 char *packet;
1590
1591 packet = ofp_packet_to_string(ofpbuf_data(execute->packet),
1592 ofpbuf_size(execute->packet));
1593 ds_put_format(&ds, "%s: %sexecute ",
1594 dpif_name(dpif),
1595 (subexecute ? "sub-"
1596 : dpif_execute_needs_help(execute) ? "super-"
1597 : ""));
1598 format_odp_actions(&ds, execute->actions, execute->actions_len);
1599 if (error) {
1600 ds_put_format(&ds, " failed (%s)", ovs_strerror(error));
1601 }
1602 ds_put_format(&ds, " on packet %s", packet);
1603 vlog(THIS_MODULE, error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
1604 ds_destroy(&ds);
1605 free(packet);
1606 }
1607 }
1608
1609 static void
1610 log_flow_get_message(const struct dpif *dpif, const struct dpif_flow_get *get,
1611 int error)
1612 {
1613 if (should_log_flow_message(error)) {
1614 log_flow_message(dpif, error, "flow_get",
1615 get->key, get->key_len,
1616 get->flow->mask, get->flow->mask_len,
1617 &get->flow->stats,
1618 get->flow->actions, get->flow->actions_len);
1619 }
1620 }
1621
1622 bool
1623 dpif_supports_tnl_push_pop(const struct dpif *dpif)
1624 {
1625 return !strcmp(dpif->dpif_class->type, "netdev") ||
1626 !strcmp(dpif->dpif_class->type, "dummy");
1627 }