]> git.proxmox.com Git - mirror_ovs.git/blob - lib/dpif.c
dpif: Use OVS_FLOW_ATTR_PROBE.
[mirror_ovs.git] / lib / dpif.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "dpif-provider.h"
19
20 #include <ctype.h>
21 #include <errno.h>
22 #include <inttypes.h>
23 #include <stdlib.h>
24 #include <string.h>
25
26 #include "coverage.h"
27 #include "dpctl.h"
28 #include "dynamic-string.h"
29 #include "flow.h"
30 #include "netdev.h"
31 #include "netlink.h"
32 #include "odp-execute.h"
33 #include "odp-util.h"
34 #include "ofp-errors.h"
35 #include "ofp-print.h"
36 #include "ofp-util.h"
37 #include "ofpbuf.h"
38 #include "packet-dpif.h"
39 #include "packets.h"
40 #include "poll-loop.h"
41 #include "shash.h"
42 #include "sset.h"
43 #include "timeval.h"
44 #include "util.h"
45 #include "valgrind.h"
46 #include "vlog.h"
47
48 VLOG_DEFINE_THIS_MODULE(dpif);
49
50 COVERAGE_DEFINE(dpif_destroy);
51 COVERAGE_DEFINE(dpif_port_add);
52 COVERAGE_DEFINE(dpif_port_del);
53 COVERAGE_DEFINE(dpif_flow_flush);
54 COVERAGE_DEFINE(dpif_flow_get);
55 COVERAGE_DEFINE(dpif_flow_put);
56 COVERAGE_DEFINE(dpif_flow_del);
57 COVERAGE_DEFINE(dpif_execute);
58 COVERAGE_DEFINE(dpif_purge);
59 COVERAGE_DEFINE(dpif_execute_with_help);
60
61 static const struct dpif_class *base_dpif_classes[] = {
62 #if defined(__linux__) || defined(_WIN32)
63 &dpif_netlink_class,
64 #endif
65 &dpif_netdev_class,
66 };
67
68 struct registered_dpif_class {
69 const struct dpif_class *dpif_class;
70 int refcount;
71 };
72 static struct shash dpif_classes = SHASH_INITIALIZER(&dpif_classes);
73 static struct sset dpif_blacklist = SSET_INITIALIZER(&dpif_blacklist);
74
75 /* Protects 'dpif_classes', including the refcount, and 'dpif_blacklist'. */
76 static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
77
78 /* Rate limit for individual messages going to or from the datapath, output at
79 * DBG level. This is very high because, if these are enabled, it is because
80 * we really need to see them. */
81 static struct vlog_rate_limit dpmsg_rl = VLOG_RATE_LIMIT_INIT(600, 600);
82
83 /* Not really much point in logging many dpif errors. */
84 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
85
86 static void log_flow_message(const struct dpif *dpif, int error,
87 const char *operation,
88 const struct nlattr *key, size_t key_len,
89 const struct nlattr *mask, size_t mask_len,
90 const struct dpif_flow_stats *stats,
91 const struct nlattr *actions, size_t actions_len);
92 static void log_operation(const struct dpif *, const char *operation,
93 int error);
94 static bool should_log_flow_message(int error);
95 static void log_flow_put_message(struct dpif *, const struct dpif_flow_put *,
96 int error);
97 static void log_flow_del_message(struct dpif *, const struct dpif_flow_del *,
98 int error);
99 static void log_execute_message(struct dpif *, const struct dpif_execute *,
100 bool subexecute, int error);
101 static void log_flow_get_message(const struct dpif *,
102 const struct dpif_flow_get *, int error);
103
104 static void
105 dp_initialize(void)
106 {
107 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
108
109 if (ovsthread_once_start(&once)) {
110 int i;
111
112 for (i = 0; i < ARRAY_SIZE(base_dpif_classes); i++) {
113 dp_register_provider(base_dpif_classes[i]);
114 }
115 dpctl_unixctl_register();
116 ovsthread_once_done(&once);
117 }
118 }
119
120 static int
121 dp_register_provider__(const struct dpif_class *new_class)
122 {
123 struct registered_dpif_class *registered_class;
124
125 if (sset_contains(&dpif_blacklist, new_class->type)) {
126 VLOG_DBG("attempted to register blacklisted provider: %s",
127 new_class->type);
128 return EINVAL;
129 }
130
131 if (shash_find(&dpif_classes, new_class->type)) {
132 VLOG_WARN("attempted to register duplicate datapath provider: %s",
133 new_class->type);
134 return EEXIST;
135 }
136
137 registered_class = xmalloc(sizeof *registered_class);
138 registered_class->dpif_class = new_class;
139 registered_class->refcount = 0;
140
141 shash_add(&dpif_classes, new_class->type, registered_class);
142
143 return 0;
144 }
145
146 /* Registers a new datapath provider. After successful registration, new
147 * datapaths of that type can be opened using dpif_open(). */
148 int
149 dp_register_provider(const struct dpif_class *new_class)
150 {
151 int error;
152
153 ovs_mutex_lock(&dpif_mutex);
154 error = dp_register_provider__(new_class);
155 ovs_mutex_unlock(&dpif_mutex);
156
157 return error;
158 }
159
160 /* Unregisters a datapath provider. 'type' must have been previously
161 * registered and not currently be in use by any dpifs. After unregistration
162 * new datapaths of that type cannot be opened using dpif_open(). */
163 static int
164 dp_unregister_provider__(const char *type)
165 {
166 struct shash_node *node;
167 struct registered_dpif_class *registered_class;
168
169 node = shash_find(&dpif_classes, type);
170 if (!node) {
171 VLOG_WARN("attempted to unregister a datapath provider that is not "
172 "registered: %s", type);
173 return EAFNOSUPPORT;
174 }
175
176 registered_class = node->data;
177 if (registered_class->refcount) {
178 VLOG_WARN("attempted to unregister in use datapath provider: %s", type);
179 return EBUSY;
180 }
181
182 shash_delete(&dpif_classes, node);
183 free(registered_class);
184
185 return 0;
186 }
187
188 /* Unregisters a datapath provider. 'type' must have been previously
189 * registered and not currently be in use by any dpifs. After unregistration
190 * new datapaths of that type cannot be opened using dpif_open(). */
191 int
192 dp_unregister_provider(const char *type)
193 {
194 int error;
195
196 dp_initialize();
197
198 ovs_mutex_lock(&dpif_mutex);
199 error = dp_unregister_provider__(type);
200 ovs_mutex_unlock(&dpif_mutex);
201
202 return error;
203 }
204
205 /* Blacklists a provider. Causes future calls of dp_register_provider() with
206 * a dpif_class which implements 'type' to fail. */
207 void
208 dp_blacklist_provider(const char *type)
209 {
210 ovs_mutex_lock(&dpif_mutex);
211 sset_add(&dpif_blacklist, type);
212 ovs_mutex_unlock(&dpif_mutex);
213 }
214
215 /* Clears 'types' and enumerates the types of all currently registered datapath
216 * providers into it. The caller must first initialize the sset. */
217 void
218 dp_enumerate_types(struct sset *types)
219 {
220 struct shash_node *node;
221
222 dp_initialize();
223 sset_clear(types);
224
225 ovs_mutex_lock(&dpif_mutex);
226 SHASH_FOR_EACH(node, &dpif_classes) {
227 const struct registered_dpif_class *registered_class = node->data;
228 sset_add(types, registered_class->dpif_class->type);
229 }
230 ovs_mutex_unlock(&dpif_mutex);
231 }
232
233 static void
234 dp_class_unref(struct registered_dpif_class *rc)
235 {
236 ovs_mutex_lock(&dpif_mutex);
237 ovs_assert(rc->refcount);
238 rc->refcount--;
239 ovs_mutex_unlock(&dpif_mutex);
240 }
241
242 static struct registered_dpif_class *
243 dp_class_lookup(const char *type)
244 {
245 struct registered_dpif_class *rc;
246
247 ovs_mutex_lock(&dpif_mutex);
248 rc = shash_find_data(&dpif_classes, type);
249 if (rc) {
250 rc->refcount++;
251 }
252 ovs_mutex_unlock(&dpif_mutex);
253
254 return rc;
255 }
256
257 /* Clears 'names' and enumerates the names of all known created datapaths with
258 * the given 'type'. The caller must first initialize the sset. Returns 0 if
259 * successful, otherwise a positive errno value.
260 *
261 * Some kinds of datapaths might not be practically enumerable. This is not
262 * considered an error. */
263 int
264 dp_enumerate_names(const char *type, struct sset *names)
265 {
266 struct registered_dpif_class *registered_class;
267 const struct dpif_class *dpif_class;
268 int error;
269
270 dp_initialize();
271 sset_clear(names);
272
273 registered_class = dp_class_lookup(type);
274 if (!registered_class) {
275 VLOG_WARN("could not enumerate unknown type: %s", type);
276 return EAFNOSUPPORT;
277 }
278
279 dpif_class = registered_class->dpif_class;
280 error = (dpif_class->enumerate
281 ? dpif_class->enumerate(names, dpif_class)
282 : 0);
283 if (error) {
284 VLOG_WARN("failed to enumerate %s datapaths: %s", dpif_class->type,
285 ovs_strerror(error));
286 }
287 dp_class_unref(registered_class);
288
289 return error;
290 }
291
292 /* Parses 'datapath_name_', which is of the form [type@]name into its
293 * component pieces. 'name' and 'type' must be freed by the caller.
294 *
295 * The returned 'type' is normalized, as if by dpif_normalize_type(). */
296 void
297 dp_parse_name(const char *datapath_name_, char **name, char **type)
298 {
299 char *datapath_name = xstrdup(datapath_name_);
300 char *separator;
301
302 separator = strchr(datapath_name, '@');
303 if (separator) {
304 *separator = '\0';
305 *type = datapath_name;
306 *name = xstrdup(dpif_normalize_type(separator + 1));
307 } else {
308 *name = datapath_name;
309 *type = xstrdup(dpif_normalize_type(NULL));
310 }
311 }
312
313 static int
314 do_open(const char *name, const char *type, bool create, struct dpif **dpifp)
315 {
316 struct dpif *dpif = NULL;
317 int error;
318 struct registered_dpif_class *registered_class;
319
320 dp_initialize();
321
322 type = dpif_normalize_type(type);
323 registered_class = dp_class_lookup(type);
324 if (!registered_class) {
325 VLOG_WARN("could not create datapath %s of unknown type %s", name,
326 type);
327 error = EAFNOSUPPORT;
328 goto exit;
329 }
330
331 error = registered_class->dpif_class->open(registered_class->dpif_class,
332 name, create, &dpif);
333 if (!error) {
334 ovs_assert(dpif->dpif_class == registered_class->dpif_class);
335 } else {
336 dp_class_unref(registered_class);
337 }
338
339 exit:
340 *dpifp = error ? NULL : dpif;
341 return error;
342 }
343
344 /* Tries to open an existing datapath named 'name' and type 'type'. Will fail
345 * if no datapath with 'name' and 'type' exists. 'type' may be either NULL or
346 * the empty string to specify the default system type. Returns 0 if
347 * successful, otherwise a positive errno value. On success stores a pointer
348 * to the datapath in '*dpifp', otherwise a null pointer. */
349 int
350 dpif_open(const char *name, const char *type, struct dpif **dpifp)
351 {
352 return do_open(name, type, false, dpifp);
353 }
354
355 /* Tries to create and open a new datapath with the given 'name' and 'type'.
356 * 'type' may be either NULL or the empty string to specify the default system
357 * type. Will fail if a datapath with 'name' and 'type' already exists.
358 * Returns 0 if successful, otherwise a positive errno value. On success
359 * stores a pointer to the datapath in '*dpifp', otherwise a null pointer. */
360 int
361 dpif_create(const char *name, const char *type, struct dpif **dpifp)
362 {
363 return do_open(name, type, true, dpifp);
364 }
365
366 /* Tries to open a datapath with the given 'name' and 'type', creating it if it
367 * does not exist. 'type' may be either NULL or the empty string to specify
368 * the default system type. Returns 0 if successful, otherwise a positive
369 * errno value. On success stores a pointer to the datapath in '*dpifp',
370 * otherwise a null pointer. */
371 int
372 dpif_create_and_open(const char *name, const char *type, struct dpif **dpifp)
373 {
374 int error;
375
376 error = dpif_create(name, type, dpifp);
377 if (error == EEXIST || error == EBUSY) {
378 error = dpif_open(name, type, dpifp);
379 if (error) {
380 VLOG_WARN("datapath %s already exists but cannot be opened: %s",
381 name, ovs_strerror(error));
382 }
383 } else if (error) {
384 VLOG_WARN("failed to create datapath %s: %s",
385 name, ovs_strerror(error));
386 }
387 return error;
388 }
389
390 /* Closes and frees the connection to 'dpif'. Does not destroy the datapath
391 * itself; call dpif_delete() first, instead, if that is desirable. */
392 void
393 dpif_close(struct dpif *dpif)
394 {
395 if (dpif) {
396 struct registered_dpif_class *rc;
397
398 rc = shash_find_data(&dpif_classes, dpif->dpif_class->type);
399 dpif_uninit(dpif, true);
400 dp_class_unref(rc);
401 }
402 }
403
404 /* Performs periodic work needed by 'dpif'. */
405 void
406 dpif_run(struct dpif *dpif)
407 {
408 if (dpif->dpif_class->run) {
409 dpif->dpif_class->run(dpif);
410 }
411 }
412
413 /* Arranges for poll_block() to wake up when dp_run() needs to be called for
414 * 'dpif'. */
415 void
416 dpif_wait(struct dpif *dpif)
417 {
418 if (dpif->dpif_class->wait) {
419 dpif->dpif_class->wait(dpif);
420 }
421 }
422
423 /* Returns the name of datapath 'dpif' prefixed with the type
424 * (for use in log messages). */
425 const char *
426 dpif_name(const struct dpif *dpif)
427 {
428 return dpif->full_name;
429 }
430
431 /* Returns the name of datapath 'dpif' without the type
432 * (for use in device names). */
433 const char *
434 dpif_base_name(const struct dpif *dpif)
435 {
436 return dpif->base_name;
437 }
438
439 /* Returns the type of datapath 'dpif'. */
440 const char *
441 dpif_type(const struct dpif *dpif)
442 {
443 return dpif->dpif_class->type;
444 }
445
446 /* Returns the fully spelled out name for the given datapath 'type'.
447 *
448 * Normalized type string can be compared with strcmp(). Unnormalized type
449 * string might be the same even if they have different spellings. */
450 const char *
451 dpif_normalize_type(const char *type)
452 {
453 return type && type[0] ? type : "system";
454 }
455
456 /* Destroys the datapath that 'dpif' is connected to, first removing all of its
457 * ports. After calling this function, it does not make sense to pass 'dpif'
458 * to any functions other than dpif_name() or dpif_close(). */
459 int
460 dpif_delete(struct dpif *dpif)
461 {
462 int error;
463
464 COVERAGE_INC(dpif_destroy);
465
466 error = dpif->dpif_class->destroy(dpif);
467 log_operation(dpif, "delete", error);
468 return error;
469 }
470
471 /* Retrieves statistics for 'dpif' into 'stats'. Returns 0 if successful,
472 * otherwise a positive errno value. */
473 int
474 dpif_get_dp_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
475 {
476 int error = dpif->dpif_class->get_stats(dpif, stats);
477 if (error) {
478 memset(stats, 0, sizeof *stats);
479 }
480 log_operation(dpif, "get_stats", error);
481 return error;
482 }
483
484 const char *
485 dpif_port_open_type(const char *datapath_type, const char *port_type)
486 {
487 struct registered_dpif_class *rc;
488
489 datapath_type = dpif_normalize_type(datapath_type);
490
491 ovs_mutex_lock(&dpif_mutex);
492 rc = shash_find_data(&dpif_classes, datapath_type);
493 if (rc && rc->dpif_class->port_open_type) {
494 port_type = rc->dpif_class->port_open_type(rc->dpif_class, port_type);
495 }
496 ovs_mutex_unlock(&dpif_mutex);
497
498 return port_type;
499 }
500
501 /* Attempts to add 'netdev' as a port on 'dpif'. If 'port_nop' is
502 * non-null and its value is not ODPP_NONE, then attempts to use the
503 * value as the port number.
504 *
505 * If successful, returns 0 and sets '*port_nop' to the new port's port
506 * number (if 'port_nop' is non-null). On failure, returns a positive
507 * errno value and sets '*port_nop' to ODPP_NONE (if 'port_nop' is
508 * non-null). */
509 int
510 dpif_port_add(struct dpif *dpif, struct netdev *netdev, odp_port_t *port_nop)
511 {
512 const char *netdev_name = netdev_get_name(netdev);
513 odp_port_t port_no = ODPP_NONE;
514 int error;
515
516 COVERAGE_INC(dpif_port_add);
517
518 if (port_nop) {
519 port_no = *port_nop;
520 }
521
522 error = dpif->dpif_class->port_add(dpif, netdev, &port_no);
523 if (!error) {
524 VLOG_DBG_RL(&dpmsg_rl, "%s: added %s as port %"PRIu32,
525 dpif_name(dpif), netdev_name, port_no);
526 } else {
527 VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
528 dpif_name(dpif), netdev_name, ovs_strerror(error));
529 port_no = ODPP_NONE;
530 }
531 if (port_nop) {
532 *port_nop = port_no;
533 }
534 return error;
535 }
536
537 /* Attempts to remove 'dpif''s port number 'port_no'. Returns 0 if successful,
538 * otherwise a positive errno value. */
539 int
540 dpif_port_del(struct dpif *dpif, odp_port_t port_no)
541 {
542 int error;
543
544 COVERAGE_INC(dpif_port_del);
545
546 error = dpif->dpif_class->port_del(dpif, port_no);
547 if (!error) {
548 VLOG_DBG_RL(&dpmsg_rl, "%s: port_del(%"PRIu32")",
549 dpif_name(dpif), port_no);
550 } else {
551 log_operation(dpif, "port_del", error);
552 }
553 return error;
554 }
555
556 /* Makes a deep copy of 'src' into 'dst'. */
557 void
558 dpif_port_clone(struct dpif_port *dst, const struct dpif_port *src)
559 {
560 dst->name = xstrdup(src->name);
561 dst->type = xstrdup(src->type);
562 dst->port_no = src->port_no;
563 }
564
565 /* Frees memory allocated to members of 'dpif_port'.
566 *
567 * Do not call this function on a dpif_port obtained from
568 * dpif_port_dump_next(): that function retains ownership of the data in the
569 * dpif_port. */
570 void
571 dpif_port_destroy(struct dpif_port *dpif_port)
572 {
573 free(dpif_port->name);
574 free(dpif_port->type);
575 }
576
577 /* Checks if port named 'devname' exists in 'dpif'. If so, returns
578 * true; otherwise, returns false. */
579 bool
580 dpif_port_exists(const struct dpif *dpif, const char *devname)
581 {
582 int error = dpif->dpif_class->port_query_by_name(dpif, devname, NULL);
583 if (error != 0 && error != ENOENT && error != ENODEV) {
584 VLOG_WARN_RL(&error_rl, "%s: failed to query port %s: %s",
585 dpif_name(dpif), devname, ovs_strerror(error));
586 }
587
588 return !error;
589 }
590
591 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and
592 * initializes '*port' appropriately; on failure, returns a positive errno
593 * value.
594 *
595 * The caller owns the data in 'port' and must free it with
596 * dpif_port_destroy() when it is no longer needed. */
597 int
598 dpif_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
599 struct dpif_port *port)
600 {
601 int error = dpif->dpif_class->port_query_by_number(dpif, port_no, port);
602 if (!error) {
603 VLOG_DBG_RL(&dpmsg_rl, "%s: port %"PRIu32" is device %s",
604 dpif_name(dpif), port_no, port->name);
605 } else {
606 memset(port, 0, sizeof *port);
607 VLOG_WARN_RL(&error_rl, "%s: failed to query port %"PRIu32": %s",
608 dpif_name(dpif), port_no, ovs_strerror(error));
609 }
610 return error;
611 }
612
613 /* Looks up port named 'devname' in 'dpif'. On success, returns 0 and
614 * initializes '*port' appropriately; on failure, returns a positive errno
615 * value.
616 *
617 * The caller owns the data in 'port' and must free it with
618 * dpif_port_destroy() when it is no longer needed. */
619 int
620 dpif_port_query_by_name(const struct dpif *dpif, const char *devname,
621 struct dpif_port *port)
622 {
623 int error = dpif->dpif_class->port_query_by_name(dpif, devname, port);
624 if (!error) {
625 VLOG_DBG_RL(&dpmsg_rl, "%s: device %s is on port %"PRIu32,
626 dpif_name(dpif), devname, port->port_no);
627 } else {
628 memset(port, 0, sizeof *port);
629
630 /* For ENOENT or ENODEV we use DBG level because the caller is probably
631 * interested in whether 'dpif' actually has a port 'devname', so that
632 * it's not an issue worth logging if it doesn't. Other errors are
633 * uncommon and more likely to indicate a real problem. */
634 VLOG_RL(&error_rl,
635 error == ENOENT || error == ENODEV ? VLL_DBG : VLL_WARN,
636 "%s: failed to query port %s: %s",
637 dpif_name(dpif), devname, ovs_strerror(error));
638 }
639 return error;
640 }
641
642 /* Returns the Netlink PID value to supply in OVS_ACTION_ATTR_USERSPACE
643 * actions as the OVS_USERSPACE_ATTR_PID attribute's value, for use in
644 * flows whose packets arrived on port 'port_no'. In the case where the
645 * provider allocates multiple Netlink PIDs to a single port, it may use
646 * 'hash' to spread load among them. The caller need not use a particular
647 * hash function; a 5-tuple hash is suitable.
648 *
649 * (The datapath implementation might use some different hash function for
650 * distributing packets received via flow misses among PIDs. This means
651 * that packets received via flow misses might be reordered relative to
652 * packets received via userspace actions. This is not ordinarily a
653 * problem.)
654 *
655 * A 'port_no' of ODPP_NONE is a special case: it returns a reserved PID, not
656 * allocated to any port, that the client may use for special purposes.
657 *
658 * The return value is only meaningful when DPIF_UC_ACTION has been enabled in
659 * the 'dpif''s listen mask. It is allowed to change when DPIF_UC_ACTION is
660 * disabled and then re-enabled, so a client that does that must be prepared to
661 * update all of the flows that it installed that contain
662 * OVS_ACTION_ATTR_USERSPACE actions. */
663 uint32_t
664 dpif_port_get_pid(const struct dpif *dpif, odp_port_t port_no, uint32_t hash)
665 {
666 return (dpif->dpif_class->port_get_pid
667 ? (dpif->dpif_class->port_get_pid)(dpif, port_no, hash)
668 : 0);
669 }
670
671 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and copies
672 * the port's name into the 'name_size' bytes in 'name', ensuring that the
673 * result is null-terminated. On failure, returns a positive errno value and
674 * makes 'name' the empty string. */
675 int
676 dpif_port_get_name(struct dpif *dpif, odp_port_t port_no,
677 char *name, size_t name_size)
678 {
679 struct dpif_port port;
680 int error;
681
682 ovs_assert(name_size > 0);
683
684 error = dpif_port_query_by_number(dpif, port_no, &port);
685 if (!error) {
686 ovs_strlcpy(name, port.name, name_size);
687 dpif_port_destroy(&port);
688 } else {
689 *name = '\0';
690 }
691 return error;
692 }
693
694 /* Initializes 'dump' to begin dumping the ports in a dpif.
695 *
696 * This function provides no status indication. An error status for the entire
697 * dump operation is provided when it is completed by calling
698 * dpif_port_dump_done().
699 */
700 void
701 dpif_port_dump_start(struct dpif_port_dump *dump, const struct dpif *dpif)
702 {
703 dump->dpif = dpif;
704 dump->error = dpif->dpif_class->port_dump_start(dpif, &dump->state);
705 log_operation(dpif, "port_dump_start", dump->error);
706 }
707
708 /* Attempts to retrieve another port from 'dump', which must have been
709 * initialized with dpif_port_dump_start(). On success, stores a new dpif_port
710 * into 'port' and returns true. On failure, returns false.
711 *
712 * Failure might indicate an actual error or merely that the last port has been
713 * dumped. An error status for the entire dump operation is provided when it
714 * is completed by calling dpif_port_dump_done().
715 *
716 * The dpif owns the data stored in 'port'. It will remain valid until at
717 * least the next time 'dump' is passed to dpif_port_dump_next() or
718 * dpif_port_dump_done(). */
719 bool
720 dpif_port_dump_next(struct dpif_port_dump *dump, struct dpif_port *port)
721 {
722 const struct dpif *dpif = dump->dpif;
723
724 if (dump->error) {
725 return false;
726 }
727
728 dump->error = dpif->dpif_class->port_dump_next(dpif, dump->state, port);
729 if (dump->error == EOF) {
730 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all ports", dpif_name(dpif));
731 } else {
732 log_operation(dpif, "port_dump_next", dump->error);
733 }
734
735 if (dump->error) {
736 dpif->dpif_class->port_dump_done(dpif, dump->state);
737 return false;
738 }
739 return true;
740 }
741
742 /* Completes port table dump operation 'dump', which must have been initialized
743 * with dpif_port_dump_start(). Returns 0 if the dump operation was
744 * error-free, otherwise a positive errno value describing the problem. */
745 int
746 dpif_port_dump_done(struct dpif_port_dump *dump)
747 {
748 const struct dpif *dpif = dump->dpif;
749 if (!dump->error) {
750 dump->error = dpif->dpif_class->port_dump_done(dpif, dump->state);
751 log_operation(dpif, "port_dump_done", dump->error);
752 }
753 return dump->error == EOF ? 0 : dump->error;
754 }
755
756 /* Polls for changes in the set of ports in 'dpif'. If the set of ports in
757 * 'dpif' has changed, this function does one of the following:
758 *
759 * - Stores the name of the device that was added to or deleted from 'dpif' in
760 * '*devnamep' and returns 0. The caller is responsible for freeing
761 * '*devnamep' (with free()) when it no longer needs it.
762 *
763 * - Returns ENOBUFS and sets '*devnamep' to NULL.
764 *
765 * This function may also return 'false positives', where it returns 0 and
766 * '*devnamep' names a device that was not actually added or deleted or it
767 * returns ENOBUFS without any change.
768 *
769 * Returns EAGAIN if the set of ports in 'dpif' has not changed. May also
770 * return other positive errno values to indicate that something has gone
771 * wrong. */
772 int
773 dpif_port_poll(const struct dpif *dpif, char **devnamep)
774 {
775 int error = dpif->dpif_class->port_poll(dpif, devnamep);
776 if (error) {
777 *devnamep = NULL;
778 }
779 return error;
780 }
781
782 /* Arranges for the poll loop to wake up when port_poll(dpif) will return a
783 * value other than EAGAIN. */
784 void
785 dpif_port_poll_wait(const struct dpif *dpif)
786 {
787 dpif->dpif_class->port_poll_wait(dpif);
788 }
789
790 /* Extracts the flow stats for a packet. The 'flow' and 'packet'
791 * arguments must have been initialized through a call to flow_extract().
792 * 'used' is stored into stats->used. */
793 void
794 dpif_flow_stats_extract(const struct flow *flow, const struct ofpbuf *packet,
795 long long int used, struct dpif_flow_stats *stats)
796 {
797 stats->tcp_flags = ntohs(flow->tcp_flags);
798 stats->n_bytes = ofpbuf_size(packet);
799 stats->n_packets = 1;
800 stats->used = used;
801 }
802
803 /* Appends a human-readable representation of 'stats' to 's'. */
804 void
805 dpif_flow_stats_format(const struct dpif_flow_stats *stats, struct ds *s)
806 {
807 ds_put_format(s, "packets:%"PRIu64", bytes:%"PRIu64", used:",
808 stats->n_packets, stats->n_bytes);
809 if (stats->used) {
810 ds_put_format(s, "%.3fs", (time_msec() - stats->used) / 1000.0);
811 } else {
812 ds_put_format(s, "never");
813 }
814 if (stats->tcp_flags) {
815 ds_put_cstr(s, ", flags:");
816 packet_format_tcp_flags(s, stats->tcp_flags);
817 }
818 }
819
820 /* Deletes all flows from 'dpif'. Returns 0 if successful, otherwise a
821 * positive errno value. */
822 int
823 dpif_flow_flush(struct dpif *dpif)
824 {
825 int error;
826
827 COVERAGE_INC(dpif_flow_flush);
828
829 error = dpif->dpif_class->flow_flush(dpif);
830 log_operation(dpif, "flow_flush", error);
831 return error;
832 }
833
834 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_GET. */
835 int
836 dpif_flow_get(struct dpif *dpif,
837 const struct nlattr *key, size_t key_len,
838 struct ofpbuf *buf, struct dpif_flow *flow)
839 {
840 struct dpif_op *opp;
841 struct dpif_op op;
842
843 op.type = DPIF_OP_FLOW_GET;
844 op.u.flow_get.key = key;
845 op.u.flow_get.key_len = key_len;
846 op.u.flow_get.buffer = buf;
847 op.u.flow_get.flow = flow;
848 op.u.flow_get.flow->key = key;
849 op.u.flow_get.flow->key_len = key_len;
850
851 opp = &op;
852 dpif_operate(dpif, &opp, 1);
853
854 return op.error;
855 }
856
857 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_PUT. */
858 int
859 dpif_flow_put(struct dpif *dpif, enum dpif_flow_put_flags flags,
860 const struct nlattr *key, size_t key_len,
861 const struct nlattr *mask, size_t mask_len,
862 const struct nlattr *actions, size_t actions_len,
863 struct dpif_flow_stats *stats)
864 {
865 struct dpif_op *opp;
866 struct dpif_op op;
867
868 op.type = DPIF_OP_FLOW_PUT;
869 op.u.flow_put.flags = flags;
870 op.u.flow_put.key = key;
871 op.u.flow_put.key_len = key_len;
872 op.u.flow_put.mask = mask;
873 op.u.flow_put.mask_len = mask_len;
874 op.u.flow_put.actions = actions;
875 op.u.flow_put.actions_len = actions_len;
876 op.u.flow_put.stats = stats;
877
878 opp = &op;
879 dpif_operate(dpif, &opp, 1);
880
881 return op.error;
882 }
883
884 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_DEL. */
885 int
886 dpif_flow_del(struct dpif *dpif,
887 const struct nlattr *key, size_t key_len,
888 struct dpif_flow_stats *stats)
889 {
890 struct dpif_op *opp;
891 struct dpif_op op;
892
893 op.type = DPIF_OP_FLOW_DEL;
894 op.u.flow_del.key = key;
895 op.u.flow_del.key_len = key_len;
896 op.u.flow_del.stats = stats;
897
898 opp = &op;
899 dpif_operate(dpif, &opp, 1);
900
901 return op.error;
902 }
903
904 /* Creates and returns a new 'struct dpif_flow_dump' for iterating through the
905 * flows in 'dpif'.
906 *
907 * This function always successfully returns a dpif_flow_dump. Error
908 * reporting is deferred to dpif_flow_dump_destroy(). */
909 struct dpif_flow_dump *
910 dpif_flow_dump_create(const struct dpif *dpif)
911 {
912 return dpif->dpif_class->flow_dump_create(dpif);
913 }
914
915 /* Destroys 'dump', which must have been created with dpif_flow_dump_create().
916 * All dpif_flow_dump_thread structures previously created for 'dump' must
917 * previously have been destroyed.
918 *
919 * Returns 0 if the dump operation was error-free, otherwise a positive errno
920 * value describing the problem. */
921 int
922 dpif_flow_dump_destroy(struct dpif_flow_dump *dump)
923 {
924 const struct dpif *dpif = dump->dpif;
925 int error = dpif->dpif_class->flow_dump_destroy(dump);
926 log_operation(dpif, "flow_dump_destroy", error);
927 return error == EOF ? 0 : error;
928 }
929
930 /* Returns new thread-local state for use with dpif_flow_dump_next(). */
931 struct dpif_flow_dump_thread *
932 dpif_flow_dump_thread_create(struct dpif_flow_dump *dump)
933 {
934 return dump->dpif->dpif_class->flow_dump_thread_create(dump);
935 }
936
937 /* Releases 'thread'. */
938 void
939 dpif_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread)
940 {
941 thread->dpif->dpif_class->flow_dump_thread_destroy(thread);
942 }
943
944 /* Attempts to retrieve up to 'max_flows' more flows from 'thread'. Returns 0
945 * if and only if no flows remained to be retrieved, otherwise a positive
946 * number reflecting the number of elements in 'flows[]' that were updated.
947 * The number of flows returned might be less than 'max_flows' because
948 * fewer than 'max_flows' remained, because this particular datapath does not
949 * benefit from batching, or because an error occurred partway through
950 * retrieval. Thus, the caller should continue calling until a 0 return value,
951 * even if intermediate return values are less than 'max_flows'.
952 *
953 * No error status is immediately provided. An error status for the entire
954 * dump operation is provided when it is completed by calling
955 * dpif_flow_dump_destroy().
956 *
957 * All of the data stored into 'flows' is owned by the datapath, not by the
958 * caller, and the caller must not modify or free it. The datapath guarantees
959 * that it remains accessible and unchanged until the first of:
960 * - The next call to dpif_flow_dump_next() for 'thread', or
961 * - The next rcu quiescent period. */
962 int
963 dpif_flow_dump_next(struct dpif_flow_dump_thread *thread,
964 struct dpif_flow *flows, int max_flows)
965 {
966 struct dpif *dpif = thread->dpif;
967 int n;
968
969 ovs_assert(max_flows > 0);
970 n = dpif->dpif_class->flow_dump_next(thread, flows, max_flows);
971 if (n > 0) {
972 struct dpif_flow *f;
973
974 for (f = flows; f < &flows[n] && should_log_flow_message(0); f++) {
975 log_flow_message(dpif, 0, "flow_dump",
976 f->key, f->key_len, f->mask, f->mask_len,
977 &f->stats, f->actions, f->actions_len);
978 }
979 } else {
980 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all flows", dpif_name(dpif));
981 }
982 return n;
983 }
984
985 struct dpif_execute_helper_aux {
986 struct dpif *dpif;
987 int error;
988 };
989
990 /* This is called for actions that need the context of the datapath to be
991 * meaningful. */
992 static void
993 dpif_execute_helper_cb(void *aux_, struct dpif_packet **packets, int cnt,
994 struct pkt_metadata *md,
995 const struct nlattr *action, bool may_steal OVS_UNUSED)
996 {
997 struct dpif_execute_helper_aux *aux = aux_;
998 int type = nl_attr_type(action);
999 struct ofpbuf * packet = &packets[0]->ofpbuf;
1000
1001 ovs_assert(cnt == 1);
1002
1003 switch ((enum ovs_action_attr)type) {
1004 case OVS_ACTION_ATTR_OUTPUT:
1005 case OVS_ACTION_ATTR_USERSPACE:
1006 case OVS_ACTION_ATTR_RECIRC: {
1007 struct dpif_execute execute;
1008 struct ofpbuf execute_actions;
1009 uint64_t stub[256 / 8];
1010
1011 if (md->tunnel.ip_dst) {
1012 /* The Linux kernel datapath throws away the tunnel information
1013 * that we supply as metadata. We have to use a "set" action to
1014 * supply it. */
1015 ofpbuf_use_stub(&execute_actions, stub, sizeof stub);
1016 odp_put_tunnel_action(&md->tunnel, &execute_actions);
1017 ofpbuf_put(&execute_actions, action, NLA_ALIGN(action->nla_len));
1018
1019 execute.actions = ofpbuf_data(&execute_actions);
1020 execute.actions_len = ofpbuf_size(&execute_actions);
1021 } else {
1022 execute.actions = action;
1023 execute.actions_len = NLA_ALIGN(action->nla_len);
1024 }
1025
1026 execute.packet = packet;
1027 execute.md = *md;
1028 execute.needs_help = false;
1029 execute.probe = false;
1030 aux->error = dpif_execute(aux->dpif, &execute);
1031 log_execute_message(aux->dpif, &execute, true, aux->error);
1032
1033 if (md->tunnel.ip_dst) {
1034 ofpbuf_uninit(&execute_actions);
1035 }
1036 break;
1037 }
1038
1039 case OVS_ACTION_ATTR_HASH:
1040 case OVS_ACTION_ATTR_PUSH_VLAN:
1041 case OVS_ACTION_ATTR_POP_VLAN:
1042 case OVS_ACTION_ATTR_PUSH_MPLS:
1043 case OVS_ACTION_ATTR_POP_MPLS:
1044 case OVS_ACTION_ATTR_SET:
1045 case OVS_ACTION_ATTR_SET_MASKED:
1046 case OVS_ACTION_ATTR_SAMPLE:
1047 case OVS_ACTION_ATTR_UNSPEC:
1048 case __OVS_ACTION_ATTR_MAX:
1049 OVS_NOT_REACHED();
1050 }
1051 }
1052
1053 /* Executes 'execute' by performing most of the actions in userspace and
1054 * passing the fully constructed packets to 'dpif' for output and userspace
1055 * actions.
1056 *
1057 * This helps with actions that a given 'dpif' doesn't implement directly. */
1058 static int
1059 dpif_execute_with_help(struct dpif *dpif, struct dpif_execute *execute)
1060 {
1061 struct dpif_execute_helper_aux aux = {dpif, 0};
1062 struct dpif_packet packet, *pp;
1063
1064 COVERAGE_INC(dpif_execute_with_help);
1065
1066 packet.ofpbuf = *execute->packet;
1067 pp = &packet;
1068
1069 odp_execute_actions(&aux, &pp, 1, false, &execute->md, execute->actions,
1070 execute->actions_len, dpif_execute_helper_cb);
1071
1072 /* Even though may_steal is set to false, some actions could modify or
1073 * reallocate the ofpbuf memory. We need to pass those changes to the
1074 * caller */
1075 *execute->packet = packet.ofpbuf;
1076
1077 return aux.error;
1078 }
1079
1080 /* Returns true if the datapath needs help executing 'execute'. */
1081 static bool
1082 dpif_execute_needs_help(const struct dpif_execute *execute)
1083 {
1084 return execute->needs_help || nl_attr_oversized(execute->actions_len);
1085 }
1086
1087 /* A dpif_operate() wrapper for performing a single DPIF_OP_EXECUTE. */
1088 int
1089 dpif_execute(struct dpif *dpif, struct dpif_execute *execute)
1090 {
1091 if (execute->actions_len) {
1092 struct dpif_op *opp;
1093 struct dpif_op op;
1094
1095 op.type = DPIF_OP_EXECUTE;
1096 op.u.execute = *execute;
1097
1098 opp = &op;
1099 dpif_operate(dpif, &opp, 1);
1100
1101 return op.error;
1102 } else {
1103 return 0;
1104 }
1105 }
1106
1107 /* Executes each of the 'n_ops' operations in 'ops' on 'dpif', in the order in
1108 * which they are specified. Places each operation's results in the "output"
1109 * members documented in comments, and 0 in the 'error' member on success or a
1110 * positive errno on failure. */
1111 void
1112 dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
1113 {
1114 while (n_ops > 0) {
1115 size_t chunk;
1116
1117 /* Count 'chunk', the number of ops that can be executed without
1118 * needing any help. Ops that need help should be rare, so we
1119 * expect this to ordinarily be 'n_ops', that is, all the ops. */
1120 for (chunk = 0; chunk < n_ops; chunk++) {
1121 struct dpif_op *op = ops[chunk];
1122
1123 if (op->type == DPIF_OP_EXECUTE
1124 && dpif_execute_needs_help(&op->u.execute)) {
1125 break;
1126 }
1127 }
1128
1129 if (chunk) {
1130 /* Execute a chunk full of ops that the dpif provider can
1131 * handle itself, without help. */
1132 size_t i;
1133
1134 dpif->dpif_class->operate(dpif, ops, chunk);
1135
1136 for (i = 0; i < chunk; i++) {
1137 struct dpif_op *op = ops[i];
1138 int error = op->error;
1139
1140 switch (op->type) {
1141 case DPIF_OP_FLOW_PUT: {
1142 struct dpif_flow_put *put = &op->u.flow_put;
1143
1144 COVERAGE_INC(dpif_flow_put);
1145 log_flow_put_message(dpif, put, error);
1146 if (error && put->stats) {
1147 memset(put->stats, 0, sizeof *put->stats);
1148 }
1149 break;
1150 }
1151
1152 case DPIF_OP_FLOW_GET: {
1153 struct dpif_flow_get *get = &op->u.flow_get;
1154
1155 COVERAGE_INC(dpif_flow_get);
1156 log_flow_get_message(dpif, get, error);
1157
1158 if (error) {
1159 memset(get->flow, 0, sizeof *get->flow);
1160 }
1161 break;
1162 }
1163
1164 case DPIF_OP_FLOW_DEL: {
1165 struct dpif_flow_del *del = &op->u.flow_del;
1166
1167 COVERAGE_INC(dpif_flow_del);
1168 log_flow_del_message(dpif, del, error);
1169 if (error && del->stats) {
1170 memset(del->stats, 0, sizeof *del->stats);
1171 }
1172 break;
1173 }
1174
1175 case DPIF_OP_EXECUTE:
1176 COVERAGE_INC(dpif_execute);
1177 log_execute_message(dpif, &op->u.execute, false, error);
1178 break;
1179 }
1180 }
1181
1182 ops += chunk;
1183 n_ops -= chunk;
1184 } else {
1185 /* Help the dpif provider to execute one op. */
1186 struct dpif_op *op = ops[0];
1187
1188 COVERAGE_INC(dpif_execute);
1189 op->error = dpif_execute_with_help(dpif, &op->u.execute);
1190 ops++;
1191 n_ops--;
1192 }
1193 }
1194 }
1195
1196 /* Returns a string that represents 'type', for use in log messages. */
1197 const char *
1198 dpif_upcall_type_to_string(enum dpif_upcall_type type)
1199 {
1200 switch (type) {
1201 case DPIF_UC_MISS: return "miss";
1202 case DPIF_UC_ACTION: return "action";
1203 case DPIF_N_UC_TYPES: default: return "<unknown>";
1204 }
1205 }
1206
1207 /* Enables or disables receiving packets with dpif_recv() on 'dpif'. Returns 0
1208 * if successful, otherwise a positive errno value.
1209 *
1210 * Turning packet receive off and then back on may change the Netlink PID
1211 * assignments returned by dpif_port_get_pid(). If the client does this, it
1212 * must update all of the flows that have OVS_ACTION_ATTR_USERSPACE actions
1213 * using the new PID assignment. */
1214 int
1215 dpif_recv_set(struct dpif *dpif, bool enable)
1216 {
1217 int error = 0;
1218
1219 if (dpif->dpif_class->recv_set) {
1220 error = dpif->dpif_class->recv_set(dpif, enable);
1221 log_operation(dpif, "recv_set", error);
1222 }
1223 return error;
1224 }
1225
1226 /* Refreshes the poll loops and Netlink sockets associated to each port,
1227 * when the number of upcall handlers (upcall receiving thread) is changed
1228 * to 'n_handlers' and receiving packets for 'dpif' is enabled by
1229 * recv_set().
1230 *
1231 * Since multiple upcall handlers can read upcalls simultaneously from
1232 * 'dpif', each port can have multiple Netlink sockets, one per upcall
1233 * handler. So, handlers_set() is responsible for the following tasks:
1234 *
1235 * When receiving upcall is enabled, extends or creates the
1236 * configuration to support:
1237 *
1238 * - 'n_handlers' Netlink sockets for each port.
1239 *
1240 * - 'n_handlers' poll loops, one for each upcall handler.
1241 *
1242 * - registering the Netlink sockets for the same upcall handler to
1243 * the corresponding poll loop.
1244 *
1245 * Returns 0 if successful, otherwise a positive errno value. */
1246 int
1247 dpif_handlers_set(struct dpif *dpif, uint32_t n_handlers)
1248 {
1249 int error = 0;
1250
1251 if (dpif->dpif_class->handlers_set) {
1252 error = dpif->dpif_class->handlers_set(dpif, n_handlers);
1253 log_operation(dpif, "handlers_set", error);
1254 }
1255 return error;
1256 }
1257
1258 void
1259 dpif_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, void *aux)
1260 {
1261 if (dpif->dpif_class->register_upcall_cb) {
1262 dpif->dpif_class->register_upcall_cb(dpif, cb, aux);
1263 }
1264 }
1265
1266 void
1267 dpif_enable_upcall(struct dpif *dpif)
1268 {
1269 if (dpif->dpif_class->enable_upcall) {
1270 dpif->dpif_class->enable_upcall(dpif);
1271 }
1272 }
1273
1274 void
1275 dpif_disable_upcall(struct dpif *dpif)
1276 {
1277 if (dpif->dpif_class->disable_upcall) {
1278 dpif->dpif_class->disable_upcall(dpif);
1279 }
1280 }
1281
1282 void
1283 dpif_print_packet(struct dpif *dpif, struct dpif_upcall *upcall)
1284 {
1285 if (!VLOG_DROP_DBG(&dpmsg_rl)) {
1286 struct ds flow;
1287 char *packet;
1288
1289 packet = ofp_packet_to_string(ofpbuf_data(&upcall->packet),
1290 ofpbuf_size(&upcall->packet));
1291
1292 ds_init(&flow);
1293 odp_flow_key_format(upcall->key, upcall->key_len, &flow);
1294
1295 VLOG_DBG("%s: %s upcall:\n%s\n%s",
1296 dpif_name(dpif), dpif_upcall_type_to_string(upcall->type),
1297 ds_cstr(&flow), packet);
1298
1299 ds_destroy(&flow);
1300 free(packet);
1301 }
1302 }
1303
1304 /* If 'dpif' creates its own I/O polling threads, refreshes poll threads
1305 * configuration. */
1306 int
1307 dpif_poll_threads_set(struct dpif *dpif, unsigned int n_rxqs,
1308 const char *cmask)
1309 {
1310 int error = 0;
1311
1312 if (dpif->dpif_class->poll_threads_set) {
1313 error = dpif->dpif_class->poll_threads_set(dpif, n_rxqs, cmask);
1314 if (error) {
1315 log_operation(dpif, "poll_threads_set", error);
1316 }
1317 }
1318
1319 return error;
1320 }
1321
1322 /* Polls for an upcall from 'dpif' for an upcall handler. Since there
1323 * there can be multiple poll loops, 'handler_id' is needed as index to
1324 * identify the corresponding poll loop. If successful, stores the upcall
1325 * into '*upcall', using 'buf' for storage. Should only be called if
1326 * 'recv_set' has been used to enable receiving packets from 'dpif'.
1327 *
1328 * 'upcall->key' and 'upcall->userdata' point into data in the caller-provided
1329 * 'buf', so their memory cannot be freed separately from 'buf'.
1330 *
1331 * The caller owns the data of 'upcall->packet' and may modify it. If
1332 * packet's headroom is exhausted as it is manipulated, 'upcall->packet'
1333 * will be reallocated. This requires the data of 'upcall->packet' to be
1334 * released with ofpbuf_uninit() before 'upcall' is destroyed. However,
1335 * when an error is returned, the 'upcall->packet' may be uninitialized
1336 * and should not be released.
1337 *
1338 * Returns 0 if successful, otherwise a positive errno value. Returns EAGAIN
1339 * if no upcall is immediately available. */
1340 int
1341 dpif_recv(struct dpif *dpif, uint32_t handler_id, struct dpif_upcall *upcall,
1342 struct ofpbuf *buf)
1343 {
1344 int error = EAGAIN;
1345
1346 if (dpif->dpif_class->recv) {
1347 error = dpif->dpif_class->recv(dpif, handler_id, upcall, buf);
1348 if (!error) {
1349 dpif_print_packet(dpif, upcall);
1350 } else if (error != EAGAIN) {
1351 log_operation(dpif, "recv", error);
1352 }
1353 }
1354 return error;
1355 }
1356
1357 /* Discards all messages that would otherwise be received by dpif_recv() on
1358 * 'dpif'. */
1359 void
1360 dpif_recv_purge(struct dpif *dpif)
1361 {
1362 COVERAGE_INC(dpif_purge);
1363 if (dpif->dpif_class->recv_purge) {
1364 dpif->dpif_class->recv_purge(dpif);
1365 }
1366 }
1367
1368 /* Arranges for the poll loop for an upcall handler to wake up when 'dpif'
1369 * 'dpif' has a message queued to be received with the recv member
1370 * function. Since there can be multiple poll loops, 'handler_id' is
1371 * needed as index to identify the corresponding poll loop. */
1372 void
1373 dpif_recv_wait(struct dpif *dpif, uint32_t handler_id)
1374 {
1375 if (dpif->dpif_class->recv_wait) {
1376 dpif->dpif_class->recv_wait(dpif, handler_id);
1377 }
1378 }
1379
1380 /* Obtains the NetFlow engine type and engine ID for 'dpif' into '*engine_type'
1381 * and '*engine_id', respectively. */
1382 void
1383 dpif_get_netflow_ids(const struct dpif *dpif,
1384 uint8_t *engine_type, uint8_t *engine_id)
1385 {
1386 *engine_type = dpif->netflow_engine_type;
1387 *engine_id = dpif->netflow_engine_id;
1388 }
1389
1390 /* Translates OpenFlow queue ID 'queue_id' (in host byte order) into a priority
1391 * value used for setting packet priority.
1392 * On success, returns 0 and stores the priority into '*priority'.
1393 * On failure, returns a positive errno value and stores 0 into '*priority'. */
1394 int
1395 dpif_queue_to_priority(const struct dpif *dpif, uint32_t queue_id,
1396 uint32_t *priority)
1397 {
1398 int error = (dpif->dpif_class->queue_to_priority
1399 ? dpif->dpif_class->queue_to_priority(dpif, queue_id,
1400 priority)
1401 : EOPNOTSUPP);
1402 if (error) {
1403 *priority = 0;
1404 }
1405 log_operation(dpif, "queue_to_priority", error);
1406 return error;
1407 }
1408 \f
1409 void
1410 dpif_init(struct dpif *dpif, const struct dpif_class *dpif_class,
1411 const char *name,
1412 uint8_t netflow_engine_type, uint8_t netflow_engine_id)
1413 {
1414 dpif->dpif_class = dpif_class;
1415 dpif->base_name = xstrdup(name);
1416 dpif->full_name = xasprintf("%s@%s", dpif_class->type, name);
1417 dpif->netflow_engine_type = netflow_engine_type;
1418 dpif->netflow_engine_id = netflow_engine_id;
1419 }
1420
1421 /* Undoes the results of initialization.
1422 *
1423 * Normally this function only needs to be called from dpif_close().
1424 * However, it may be called by providers due to an error on opening
1425 * that occurs after initialization. It this case dpif_close() would
1426 * never be called. */
1427 void
1428 dpif_uninit(struct dpif *dpif, bool close)
1429 {
1430 char *base_name = dpif->base_name;
1431 char *full_name = dpif->full_name;
1432
1433 if (close) {
1434 dpif->dpif_class->close(dpif);
1435 }
1436
1437 free(base_name);
1438 free(full_name);
1439 }
1440 \f
1441 static void
1442 log_operation(const struct dpif *dpif, const char *operation, int error)
1443 {
1444 if (!error) {
1445 VLOG_DBG_RL(&dpmsg_rl, "%s: %s success", dpif_name(dpif), operation);
1446 } else if (ofperr_is_valid(error)) {
1447 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1448 dpif_name(dpif), operation, ofperr_get_name(error));
1449 } else {
1450 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1451 dpif_name(dpif), operation, ovs_strerror(error));
1452 }
1453 }
1454
1455 static enum vlog_level
1456 flow_message_log_level(int error)
1457 {
1458 /* If flows arrive in a batch, userspace may push down multiple
1459 * unique flow definitions that overlap when wildcards are applied.
1460 * Kernels that support flow wildcarding will reject these flows as
1461 * duplicates (EEXIST), so lower the log level to debug for these
1462 * types of messages. */
1463 return (error && error != EEXIST) ? VLL_WARN : VLL_DBG;
1464 }
1465
1466 static bool
1467 should_log_flow_message(int error)
1468 {
1469 return !vlog_should_drop(THIS_MODULE, flow_message_log_level(error),
1470 error ? &error_rl : &dpmsg_rl);
1471 }
1472
1473 static void
1474 log_flow_message(const struct dpif *dpif, int error, const char *operation,
1475 const struct nlattr *key, size_t key_len,
1476 const struct nlattr *mask, size_t mask_len,
1477 const struct dpif_flow_stats *stats,
1478 const struct nlattr *actions, size_t actions_len)
1479 {
1480 struct ds ds = DS_EMPTY_INITIALIZER;
1481 ds_put_format(&ds, "%s: ", dpif_name(dpif));
1482 if (error) {
1483 ds_put_cstr(&ds, "failed to ");
1484 }
1485 ds_put_format(&ds, "%s ", operation);
1486 if (error) {
1487 ds_put_format(&ds, "(%s) ", ovs_strerror(error));
1488 }
1489 odp_flow_format(key, key_len, mask, mask_len, NULL, &ds, true);
1490 if (stats) {
1491 ds_put_cstr(&ds, ", ");
1492 dpif_flow_stats_format(stats, &ds);
1493 }
1494 if (actions || actions_len) {
1495 ds_put_cstr(&ds, ", actions:");
1496 format_odp_actions(&ds, actions, actions_len);
1497 }
1498 vlog(THIS_MODULE, flow_message_log_level(error), "%s", ds_cstr(&ds));
1499 ds_destroy(&ds);
1500 }
1501
1502 static void
1503 log_flow_put_message(struct dpif *dpif, const struct dpif_flow_put *put,
1504 int error)
1505 {
1506 if (should_log_flow_message(error) && !(put->flags & DPIF_FP_PROBE)) {
1507 struct ds s;
1508
1509 ds_init(&s);
1510 ds_put_cstr(&s, "put");
1511 if (put->flags & DPIF_FP_CREATE) {
1512 ds_put_cstr(&s, "[create]");
1513 }
1514 if (put->flags & DPIF_FP_MODIFY) {
1515 ds_put_cstr(&s, "[modify]");
1516 }
1517 if (put->flags & DPIF_FP_ZERO_STATS) {
1518 ds_put_cstr(&s, "[zero]");
1519 }
1520 log_flow_message(dpif, error, ds_cstr(&s),
1521 put->key, put->key_len, put->mask, put->mask_len,
1522 put->stats, put->actions, put->actions_len);
1523 ds_destroy(&s);
1524 }
1525 }
1526
1527 static void
1528 log_flow_del_message(struct dpif *dpif, const struct dpif_flow_del *del,
1529 int error)
1530 {
1531 if (should_log_flow_message(error)) {
1532 log_flow_message(dpif, error, "flow_del", del->key, del->key_len,
1533 NULL, 0, !error ? del->stats : NULL, NULL, 0);
1534 }
1535 }
1536
1537 /* Logs that 'execute' was executed on 'dpif' and completed with errno 'error'
1538 * (0 for success). 'subexecute' should be true if the execution is a result
1539 * of breaking down a larger execution that needed help, false otherwise.
1540 *
1541 *
1542 * XXX In theory, the log message could be deceptive because this function is
1543 * called after the dpif_provider's '->execute' function, which is allowed to
1544 * modify execute->packet and execute->md. In practice, though:
1545 *
1546 * - dpif-netlink doesn't modify execute->packet or execute->md.
1547 *
1548 * - dpif-netdev does modify them but it is less likely to have problems
1549 * because it is built into ovs-vswitchd and cannot have version skew,
1550 * etc.
1551 *
1552 * It would still be better to avoid the potential problem. I don't know of a
1553 * good way to do that, though, that isn't expensive. */
1554 static void
1555 log_execute_message(struct dpif *dpif, const struct dpif_execute *execute,
1556 bool subexecute, int error)
1557 {
1558 if (!(error ? VLOG_DROP_WARN(&error_rl) : VLOG_DROP_DBG(&dpmsg_rl))
1559 && !execute->probe) {
1560 struct ds ds = DS_EMPTY_INITIALIZER;
1561 char *packet;
1562
1563 packet = ofp_packet_to_string(ofpbuf_data(execute->packet),
1564 ofpbuf_size(execute->packet));
1565 ds_put_format(&ds, "%s: %sexecute ",
1566 dpif_name(dpif),
1567 (subexecute ? "sub-"
1568 : dpif_execute_needs_help(execute) ? "super-"
1569 : ""));
1570 format_odp_actions(&ds, execute->actions, execute->actions_len);
1571 if (error) {
1572 ds_put_format(&ds, " failed (%s)", ovs_strerror(error));
1573 }
1574 ds_put_format(&ds, " on packet %s", packet);
1575 vlog(THIS_MODULE, error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
1576 ds_destroy(&ds);
1577 free(packet);
1578 }
1579 }
1580
1581 static void
1582 log_flow_get_message(const struct dpif *dpif, const struct dpif_flow_get *get,
1583 int error)
1584 {
1585 if (should_log_flow_message(error)) {
1586 log_flow_message(dpif, error, "flow_get",
1587 get->key, get->key_len,
1588 get->flow->mask, get->flow->mask_len,
1589 &get->flow->stats,
1590 get->flow->actions, get->flow->actions_len);
1591 }
1592 }