]> git.proxmox.com Git - ovs.git/blob - lib/dpif.c
199f1972637e6873ea498c225d15a8c36698ee0b
[ovs.git] / lib / dpif.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "dpif-provider.h"
19
20 #include <ctype.h>
21 #include <errno.h>
22 #include <inttypes.h>
23 #include <stdlib.h>
24 #include <string.h>
25
26 #include "coverage.h"
27 #include "dynamic-string.h"
28 #include "flow.h"
29 #include "netdev.h"
30 #include "netlink.h"
31 #include "odp-execute.h"
32 #include "odp-util.h"
33 #include "ofp-errors.h"
34 #include "ofp-print.h"
35 #include "ofp-util.h"
36 #include "ofpbuf.h"
37 #include "packet-dpif.h"
38 #include "packets.h"
39 #include "poll-loop.h"
40 #include "shash.h"
41 #include "sset.h"
42 #include "timeval.h"
43 #include "util.h"
44 #include "valgrind.h"
45 #include "vlog.h"
46
47 VLOG_DEFINE_THIS_MODULE(dpif);
48
49 COVERAGE_DEFINE(dpif_destroy);
50 COVERAGE_DEFINE(dpif_port_add);
51 COVERAGE_DEFINE(dpif_port_del);
52 COVERAGE_DEFINE(dpif_flow_flush);
53 COVERAGE_DEFINE(dpif_flow_get);
54 COVERAGE_DEFINE(dpif_flow_put);
55 COVERAGE_DEFINE(dpif_flow_del);
56 COVERAGE_DEFINE(dpif_execute);
57 COVERAGE_DEFINE(dpif_purge);
58 COVERAGE_DEFINE(dpif_execute_with_help);
59
60 static const struct dpif_class *base_dpif_classes[] = {
61 #ifdef __linux__
62 &dpif_linux_class,
63 #endif
64 &dpif_netdev_class,
65 };
66
67 struct registered_dpif_class {
68 const struct dpif_class *dpif_class;
69 int refcount;
70 };
71 static struct shash dpif_classes = SHASH_INITIALIZER(&dpif_classes);
72 static struct sset dpif_blacklist = SSET_INITIALIZER(&dpif_blacklist);
73
74 /* Protects 'dpif_classes', including the refcount, and 'dpif_blacklist'. */
75 static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
76
77 /* Rate limit for individual messages going to or from the datapath, output at
78 * DBG level. This is very high because, if these are enabled, it is because
79 * we really need to see them. */
80 static struct vlog_rate_limit dpmsg_rl = VLOG_RATE_LIMIT_INIT(600, 600);
81
82 /* Not really much point in logging many dpif errors. */
83 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
84
85 static void log_flow_message(const struct dpif *dpif, int error,
86 const char *operation,
87 const struct nlattr *key, size_t key_len,
88 const struct nlattr *mask, size_t mask_len,
89 const struct dpif_flow_stats *stats,
90 const struct nlattr *actions, size_t actions_len);
91 static void log_operation(const struct dpif *, const char *operation,
92 int error);
93 static bool should_log_flow_message(int error);
94 static void log_flow_put_message(struct dpif *, const struct dpif_flow_put *,
95 int error);
96 static void log_flow_del_message(struct dpif *, const struct dpif_flow_del *,
97 int error);
98 static void log_execute_message(struct dpif *, const struct dpif_execute *,
99 int error);
100
101 static void
102 dp_initialize(void)
103 {
104 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
105
106 if (ovsthread_once_start(&once)) {
107 int i;
108
109 for (i = 0; i < ARRAY_SIZE(base_dpif_classes); i++) {
110 dp_register_provider(base_dpif_classes[i]);
111 }
112 ovsthread_once_done(&once);
113 }
114 }
115
116 static int
117 dp_register_provider__(const struct dpif_class *new_class)
118 {
119 struct registered_dpif_class *registered_class;
120
121 if (sset_contains(&dpif_blacklist, new_class->type)) {
122 VLOG_DBG("attempted to register blacklisted provider: %s",
123 new_class->type);
124 return EINVAL;
125 }
126
127 if (shash_find(&dpif_classes, new_class->type)) {
128 VLOG_WARN("attempted to register duplicate datapath provider: %s",
129 new_class->type);
130 return EEXIST;
131 }
132
133 registered_class = xmalloc(sizeof *registered_class);
134 registered_class->dpif_class = new_class;
135 registered_class->refcount = 0;
136
137 shash_add(&dpif_classes, new_class->type, registered_class);
138
139 return 0;
140 }
141
142 /* Registers a new datapath provider. After successful registration, new
143 * datapaths of that type can be opened using dpif_open(). */
144 int
145 dp_register_provider(const struct dpif_class *new_class)
146 {
147 int error;
148
149 ovs_mutex_lock(&dpif_mutex);
150 error = dp_register_provider__(new_class);
151 ovs_mutex_unlock(&dpif_mutex);
152
153 return error;
154 }
155
156 /* Unregisters a datapath provider. 'type' must have been previously
157 * registered and not currently be in use by any dpifs. After unregistration
158 * new datapaths of that type cannot be opened using dpif_open(). */
159 static int
160 dp_unregister_provider__(const char *type)
161 {
162 struct shash_node *node;
163 struct registered_dpif_class *registered_class;
164
165 node = shash_find(&dpif_classes, type);
166 if (!node) {
167 VLOG_WARN("attempted to unregister a datapath provider that is not "
168 "registered: %s", type);
169 return EAFNOSUPPORT;
170 }
171
172 registered_class = node->data;
173 if (registered_class->refcount) {
174 VLOG_WARN("attempted to unregister in use datapath provider: %s", type);
175 return EBUSY;
176 }
177
178 shash_delete(&dpif_classes, node);
179 free(registered_class);
180
181 return 0;
182 }
183
184 /* Unregisters a datapath provider. 'type' must have been previously
185 * registered and not currently be in use by any dpifs. After unregistration
186 * new datapaths of that type cannot be opened using dpif_open(). */
187 int
188 dp_unregister_provider(const char *type)
189 {
190 int error;
191
192 dp_initialize();
193
194 ovs_mutex_lock(&dpif_mutex);
195 error = dp_unregister_provider__(type);
196 ovs_mutex_unlock(&dpif_mutex);
197
198 return error;
199 }
200
201 /* Blacklists a provider. Causes future calls of dp_register_provider() with
202 * a dpif_class which implements 'type' to fail. */
203 void
204 dp_blacklist_provider(const char *type)
205 {
206 ovs_mutex_lock(&dpif_mutex);
207 sset_add(&dpif_blacklist, type);
208 ovs_mutex_unlock(&dpif_mutex);
209 }
210
211 /* Clears 'types' and enumerates the types of all currently registered datapath
212 * providers into it. The caller must first initialize the sset. */
213 void
214 dp_enumerate_types(struct sset *types)
215 {
216 struct shash_node *node;
217
218 dp_initialize();
219 sset_clear(types);
220
221 ovs_mutex_lock(&dpif_mutex);
222 SHASH_FOR_EACH(node, &dpif_classes) {
223 const struct registered_dpif_class *registered_class = node->data;
224 sset_add(types, registered_class->dpif_class->type);
225 }
226 ovs_mutex_unlock(&dpif_mutex);
227 }
228
229 static void
230 dp_class_unref(struct registered_dpif_class *rc)
231 {
232 ovs_mutex_lock(&dpif_mutex);
233 ovs_assert(rc->refcount);
234 rc->refcount--;
235 ovs_mutex_unlock(&dpif_mutex);
236 }
237
238 static struct registered_dpif_class *
239 dp_class_lookup(const char *type)
240 {
241 struct registered_dpif_class *rc;
242
243 ovs_mutex_lock(&dpif_mutex);
244 rc = shash_find_data(&dpif_classes, type);
245 if (rc) {
246 rc->refcount++;
247 }
248 ovs_mutex_unlock(&dpif_mutex);
249
250 return rc;
251 }
252
253 /* Clears 'names' and enumerates the names of all known created datapaths with
254 * the given 'type'. The caller must first initialize the sset. Returns 0 if
255 * successful, otherwise a positive errno value.
256 *
257 * Some kinds of datapaths might not be practically enumerable. This is not
258 * considered an error. */
259 int
260 dp_enumerate_names(const char *type, struct sset *names)
261 {
262 struct registered_dpif_class *registered_class;
263 const struct dpif_class *dpif_class;
264 int error;
265
266 dp_initialize();
267 sset_clear(names);
268
269 registered_class = dp_class_lookup(type);
270 if (!registered_class) {
271 VLOG_WARN("could not enumerate unknown type: %s", type);
272 return EAFNOSUPPORT;
273 }
274
275 dpif_class = registered_class->dpif_class;
276 error = (dpif_class->enumerate
277 ? dpif_class->enumerate(names, dpif_class)
278 : 0);
279 if (error) {
280 VLOG_WARN("failed to enumerate %s datapaths: %s", dpif_class->type,
281 ovs_strerror(error));
282 }
283 dp_class_unref(registered_class);
284
285 return error;
286 }
287
288 /* Parses 'datapath_name_', which is of the form [type@]name into its
289 * component pieces. 'name' and 'type' must be freed by the caller.
290 *
291 * The returned 'type' is normalized, as if by dpif_normalize_type(). */
292 void
293 dp_parse_name(const char *datapath_name_, char **name, char **type)
294 {
295 char *datapath_name = xstrdup(datapath_name_);
296 char *separator;
297
298 separator = strchr(datapath_name, '@');
299 if (separator) {
300 *separator = '\0';
301 *type = datapath_name;
302 *name = xstrdup(dpif_normalize_type(separator + 1));
303 } else {
304 *name = datapath_name;
305 *type = xstrdup(dpif_normalize_type(NULL));
306 }
307 }
308
309 static int
310 do_open(const char *name, const char *type, bool create, struct dpif **dpifp)
311 {
312 struct dpif *dpif = NULL;
313 int error;
314 struct registered_dpif_class *registered_class;
315
316 dp_initialize();
317
318 type = dpif_normalize_type(type);
319 registered_class = dp_class_lookup(type);
320 if (!registered_class) {
321 VLOG_WARN("could not create datapath %s of unknown type %s", name,
322 type);
323 error = EAFNOSUPPORT;
324 goto exit;
325 }
326
327 error = registered_class->dpif_class->open(registered_class->dpif_class,
328 name, create, &dpif);
329 if (!error) {
330 ovs_assert(dpif->dpif_class == registered_class->dpif_class);
331 } else {
332 dp_class_unref(registered_class);
333 }
334
335 exit:
336 *dpifp = error ? NULL : dpif;
337 return error;
338 }
339
340 /* Tries to open an existing datapath named 'name' and type 'type'. Will fail
341 * if no datapath with 'name' and 'type' exists. 'type' may be either NULL or
342 * the empty string to specify the default system type. Returns 0 if
343 * successful, otherwise a positive errno value. On success stores a pointer
344 * to the datapath in '*dpifp', otherwise a null pointer. */
345 int
346 dpif_open(const char *name, const char *type, struct dpif **dpifp)
347 {
348 return do_open(name, type, false, dpifp);
349 }
350
351 /* Tries to create and open a new datapath with the given 'name' and 'type'.
352 * 'type' may be either NULL or the empty string to specify the default system
353 * type. Will fail if a datapath with 'name' and 'type' already exists.
354 * Returns 0 if successful, otherwise a positive errno value. On success
355 * stores a pointer to the datapath in '*dpifp', otherwise a null pointer. */
356 int
357 dpif_create(const char *name, const char *type, struct dpif **dpifp)
358 {
359 return do_open(name, type, true, dpifp);
360 }
361
362 /* Tries to open a datapath with the given 'name' and 'type', creating it if it
363 * does not exist. 'type' may be either NULL or the empty string to specify
364 * the default system type. Returns 0 if successful, otherwise a positive
365 * errno value. On success stores a pointer to the datapath in '*dpifp',
366 * otherwise a null pointer. */
367 int
368 dpif_create_and_open(const char *name, const char *type, struct dpif **dpifp)
369 {
370 int error;
371
372 error = dpif_create(name, type, dpifp);
373 if (error == EEXIST || error == EBUSY) {
374 error = dpif_open(name, type, dpifp);
375 if (error) {
376 VLOG_WARN("datapath %s already exists but cannot be opened: %s",
377 name, ovs_strerror(error));
378 }
379 } else if (error) {
380 VLOG_WARN("failed to create datapath %s: %s",
381 name, ovs_strerror(error));
382 }
383 return error;
384 }
385
386 /* Closes and frees the connection to 'dpif'. Does not destroy the datapath
387 * itself; call dpif_delete() first, instead, if that is desirable. */
388 void
389 dpif_close(struct dpif *dpif)
390 {
391 if (dpif) {
392 struct registered_dpif_class *rc;
393
394 rc = shash_find_data(&dpif_classes, dpif->dpif_class->type);
395 dpif_uninit(dpif, true);
396 dp_class_unref(rc);
397 }
398 }
399
400 /* Performs periodic work needed by 'dpif'. */
401 void
402 dpif_run(struct dpif *dpif)
403 {
404 if (dpif->dpif_class->run) {
405 dpif->dpif_class->run(dpif);
406 }
407 }
408
409 /* Arranges for poll_block() to wake up when dp_run() needs to be called for
410 * 'dpif'. */
411 void
412 dpif_wait(struct dpif *dpif)
413 {
414 if (dpif->dpif_class->wait) {
415 dpif->dpif_class->wait(dpif);
416 }
417 }
418
419 /* Returns the name of datapath 'dpif' prefixed with the type
420 * (for use in log messages). */
421 const char *
422 dpif_name(const struct dpif *dpif)
423 {
424 return dpif->full_name;
425 }
426
427 /* Returns the name of datapath 'dpif' without the type
428 * (for use in device names). */
429 const char *
430 dpif_base_name(const struct dpif *dpif)
431 {
432 return dpif->base_name;
433 }
434
435 /* Returns the type of datapath 'dpif'. */
436 const char *
437 dpif_type(const struct dpif *dpif)
438 {
439 return dpif->dpif_class->type;
440 }
441
442 /* Returns the fully spelled out name for the given datapath 'type'.
443 *
444 * Normalized type string can be compared with strcmp(). Unnormalized type
445 * string might be the same even if they have different spellings. */
446 const char *
447 dpif_normalize_type(const char *type)
448 {
449 return type && type[0] ? type : "system";
450 }
451
452 /* Destroys the datapath that 'dpif' is connected to, first removing all of its
453 * ports. After calling this function, it does not make sense to pass 'dpif'
454 * to any functions other than dpif_name() or dpif_close(). */
455 int
456 dpif_delete(struct dpif *dpif)
457 {
458 int error;
459
460 COVERAGE_INC(dpif_destroy);
461
462 error = dpif->dpif_class->destroy(dpif);
463 log_operation(dpif, "delete", error);
464 return error;
465 }
466
467 /* Retrieves statistics for 'dpif' into 'stats'. Returns 0 if successful,
468 * otherwise a positive errno value. */
469 int
470 dpif_get_dp_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
471 {
472 int error = dpif->dpif_class->get_stats(dpif, stats);
473 if (error) {
474 memset(stats, 0, sizeof *stats);
475 }
476 log_operation(dpif, "get_stats", error);
477 return error;
478 }
479
480 const char *
481 dpif_port_open_type(const char *datapath_type, const char *port_type)
482 {
483 struct registered_dpif_class *rc;
484
485 datapath_type = dpif_normalize_type(datapath_type);
486
487 ovs_mutex_lock(&dpif_mutex);
488 rc = shash_find_data(&dpif_classes, datapath_type);
489 if (rc && rc->dpif_class->port_open_type) {
490 port_type = rc->dpif_class->port_open_type(rc->dpif_class, port_type);
491 }
492 ovs_mutex_unlock(&dpif_mutex);
493
494 return port_type;
495 }
496
497 /* Attempts to add 'netdev' as a port on 'dpif'. If 'port_nop' is
498 * non-null and its value is not ODPP_NONE, then attempts to use the
499 * value as the port number.
500 *
501 * If successful, returns 0 and sets '*port_nop' to the new port's port
502 * number (if 'port_nop' is non-null). On failure, returns a positive
503 * errno value and sets '*port_nop' to ODPP_NONE (if 'port_nop' is
504 * non-null). */
505 int
506 dpif_port_add(struct dpif *dpif, struct netdev *netdev, odp_port_t *port_nop)
507 {
508 const char *netdev_name = netdev_get_name(netdev);
509 odp_port_t port_no = ODPP_NONE;
510 int error;
511
512 COVERAGE_INC(dpif_port_add);
513
514 if (port_nop) {
515 port_no = *port_nop;
516 }
517
518 error = dpif->dpif_class->port_add(dpif, netdev, &port_no);
519 if (!error) {
520 VLOG_DBG_RL(&dpmsg_rl, "%s: added %s as port %"PRIu32,
521 dpif_name(dpif), netdev_name, port_no);
522 } else {
523 VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
524 dpif_name(dpif), netdev_name, ovs_strerror(error));
525 port_no = ODPP_NONE;
526 }
527 if (port_nop) {
528 *port_nop = port_no;
529 }
530 return error;
531 }
532
533 /* Attempts to remove 'dpif''s port number 'port_no'. Returns 0 if successful,
534 * otherwise a positive errno value. */
535 int
536 dpif_port_del(struct dpif *dpif, odp_port_t port_no)
537 {
538 int error;
539
540 COVERAGE_INC(dpif_port_del);
541
542 error = dpif->dpif_class->port_del(dpif, port_no);
543 if (!error) {
544 VLOG_DBG_RL(&dpmsg_rl, "%s: port_del(%"PRIu32")",
545 dpif_name(dpif), port_no);
546 } else {
547 log_operation(dpif, "port_del", error);
548 }
549 return error;
550 }
551
552 /* Makes a deep copy of 'src' into 'dst'. */
553 void
554 dpif_port_clone(struct dpif_port *dst, const struct dpif_port *src)
555 {
556 dst->name = xstrdup(src->name);
557 dst->type = xstrdup(src->type);
558 dst->port_no = src->port_no;
559 }
560
561 /* Frees memory allocated to members of 'dpif_port'.
562 *
563 * Do not call this function on a dpif_port obtained from
564 * dpif_port_dump_next(): that function retains ownership of the data in the
565 * dpif_port. */
566 void
567 dpif_port_destroy(struct dpif_port *dpif_port)
568 {
569 free(dpif_port->name);
570 free(dpif_port->type);
571 }
572
573 /* Checks if port named 'devname' exists in 'dpif'. If so, returns
574 * true; otherwise, returns false. */
575 bool
576 dpif_port_exists(const struct dpif *dpif, const char *devname)
577 {
578 int error = dpif->dpif_class->port_query_by_name(dpif, devname, NULL);
579 if (error != 0 && error != ENOENT && error != ENODEV) {
580 VLOG_WARN_RL(&error_rl, "%s: failed to query port %s: %s",
581 dpif_name(dpif), devname, ovs_strerror(error));
582 }
583
584 return !error;
585 }
586
587 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and
588 * initializes '*port' appropriately; on failure, returns a positive errno
589 * value.
590 *
591 * The caller owns the data in 'port' and must free it with
592 * dpif_port_destroy() when it is no longer needed. */
593 int
594 dpif_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
595 struct dpif_port *port)
596 {
597 int error = dpif->dpif_class->port_query_by_number(dpif, port_no, port);
598 if (!error) {
599 VLOG_DBG_RL(&dpmsg_rl, "%s: port %"PRIu32" is device %s",
600 dpif_name(dpif), port_no, port->name);
601 } else {
602 memset(port, 0, sizeof *port);
603 VLOG_WARN_RL(&error_rl, "%s: failed to query port %"PRIu32": %s",
604 dpif_name(dpif), port_no, ovs_strerror(error));
605 }
606 return error;
607 }
608
609 /* Looks up port named 'devname' in 'dpif'. On success, returns 0 and
610 * initializes '*port' appropriately; on failure, returns a positive errno
611 * value.
612 *
613 * The caller owns the data in 'port' and must free it with
614 * dpif_port_destroy() when it is no longer needed. */
615 int
616 dpif_port_query_by_name(const struct dpif *dpif, const char *devname,
617 struct dpif_port *port)
618 {
619 int error = dpif->dpif_class->port_query_by_name(dpif, devname, port);
620 if (!error) {
621 VLOG_DBG_RL(&dpmsg_rl, "%s: device %s is on port %"PRIu32,
622 dpif_name(dpif), devname, port->port_no);
623 } else {
624 memset(port, 0, sizeof *port);
625
626 /* For ENOENT or ENODEV we use DBG level because the caller is probably
627 * interested in whether 'dpif' actually has a port 'devname', so that
628 * it's not an issue worth logging if it doesn't. Other errors are
629 * uncommon and more likely to indicate a real problem. */
630 VLOG_RL(&error_rl,
631 error == ENOENT || error == ENODEV ? VLL_DBG : VLL_WARN,
632 "%s: failed to query port %s: %s",
633 dpif_name(dpif), devname, ovs_strerror(error));
634 }
635 return error;
636 }
637
638 /* Returns the Netlink PID value to supply in OVS_ACTION_ATTR_USERSPACE
639 * actions as the OVS_USERSPACE_ATTR_PID attribute's value, for use in
640 * flows whose packets arrived on port 'port_no'. In the case where the
641 * provider allocates multiple Netlink PIDs to a single port, it may use
642 * 'hash' to spread load among them. The caller need not use a particular
643 * hash function; a 5-tuple hash is suitable.
644 *
645 * (The datapath implementation might use some different hash function for
646 * distributing packets received via flow misses among PIDs. This means
647 * that packets received via flow misses might be reordered relative to
648 * packets received via userspace actions. This is not ordinarily a
649 * problem.)
650 *
651 * A 'port_no' of ODPP_NONE is a special case: it returns a reserved PID, not
652 * allocated to any port, that the client may use for special purposes.
653 *
654 * The return value is only meaningful when DPIF_UC_ACTION has been enabled in
655 * the 'dpif''s listen mask. It is allowed to change when DPIF_UC_ACTION is
656 * disabled and then re-enabled, so a client that does that must be prepared to
657 * update all of the flows that it installed that contain
658 * OVS_ACTION_ATTR_USERSPACE actions. */
659 uint32_t
660 dpif_port_get_pid(const struct dpif *dpif, odp_port_t port_no, uint32_t hash)
661 {
662 return (dpif->dpif_class->port_get_pid
663 ? (dpif->dpif_class->port_get_pid)(dpif, port_no, hash)
664 : 0);
665 }
666
667 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and copies
668 * the port's name into the 'name_size' bytes in 'name', ensuring that the
669 * result is null-terminated. On failure, returns a positive errno value and
670 * makes 'name' the empty string. */
671 int
672 dpif_port_get_name(struct dpif *dpif, odp_port_t port_no,
673 char *name, size_t name_size)
674 {
675 struct dpif_port port;
676 int error;
677
678 ovs_assert(name_size > 0);
679
680 error = dpif_port_query_by_number(dpif, port_no, &port);
681 if (!error) {
682 ovs_strlcpy(name, port.name, name_size);
683 dpif_port_destroy(&port);
684 } else {
685 *name = '\0';
686 }
687 return error;
688 }
689
690 /* Initializes 'dump' to begin dumping the ports in a dpif.
691 *
692 * This function provides no status indication. An error status for the entire
693 * dump operation is provided when it is completed by calling
694 * dpif_port_dump_done().
695 */
696 void
697 dpif_port_dump_start(struct dpif_port_dump *dump, const struct dpif *dpif)
698 {
699 dump->dpif = dpif;
700 dump->error = dpif->dpif_class->port_dump_start(dpif, &dump->state);
701 log_operation(dpif, "port_dump_start", dump->error);
702 }
703
704 /* Attempts to retrieve another port from 'dump', which must have been
705 * initialized with dpif_port_dump_start(). On success, stores a new dpif_port
706 * into 'port' and returns true. On failure, returns false.
707 *
708 * Failure might indicate an actual error or merely that the last port has been
709 * dumped. An error status for the entire dump operation is provided when it
710 * is completed by calling dpif_port_dump_done().
711 *
712 * The dpif owns the data stored in 'port'. It will remain valid until at
713 * least the next time 'dump' is passed to dpif_port_dump_next() or
714 * dpif_port_dump_done(). */
715 bool
716 dpif_port_dump_next(struct dpif_port_dump *dump, struct dpif_port *port)
717 {
718 const struct dpif *dpif = dump->dpif;
719
720 if (dump->error) {
721 return false;
722 }
723
724 dump->error = dpif->dpif_class->port_dump_next(dpif, dump->state, port);
725 if (dump->error == EOF) {
726 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all ports", dpif_name(dpif));
727 } else {
728 log_operation(dpif, "port_dump_next", dump->error);
729 }
730
731 if (dump->error) {
732 dpif->dpif_class->port_dump_done(dpif, dump->state);
733 return false;
734 }
735 return true;
736 }
737
738 /* Completes port table dump operation 'dump', which must have been initialized
739 * with dpif_port_dump_start(). Returns 0 if the dump operation was
740 * error-free, otherwise a positive errno value describing the problem. */
741 int
742 dpif_port_dump_done(struct dpif_port_dump *dump)
743 {
744 const struct dpif *dpif = dump->dpif;
745 if (!dump->error) {
746 dump->error = dpif->dpif_class->port_dump_done(dpif, dump->state);
747 log_operation(dpif, "port_dump_done", dump->error);
748 }
749 return dump->error == EOF ? 0 : dump->error;
750 }
751
752 /* Polls for changes in the set of ports in 'dpif'. If the set of ports in
753 * 'dpif' has changed, this function does one of the following:
754 *
755 * - Stores the name of the device that was added to or deleted from 'dpif' in
756 * '*devnamep' and returns 0. The caller is responsible for freeing
757 * '*devnamep' (with free()) when it no longer needs it.
758 *
759 * - Returns ENOBUFS and sets '*devnamep' to NULL.
760 *
761 * This function may also return 'false positives', where it returns 0 and
762 * '*devnamep' names a device that was not actually added or deleted or it
763 * returns ENOBUFS without any change.
764 *
765 * Returns EAGAIN if the set of ports in 'dpif' has not changed. May also
766 * return other positive errno values to indicate that something has gone
767 * wrong. */
768 int
769 dpif_port_poll(const struct dpif *dpif, char **devnamep)
770 {
771 int error = dpif->dpif_class->port_poll(dpif, devnamep);
772 if (error) {
773 *devnamep = NULL;
774 }
775 return error;
776 }
777
778 /* Arranges for the poll loop to wake up when port_poll(dpif) will return a
779 * value other than EAGAIN. */
780 void
781 dpif_port_poll_wait(const struct dpif *dpif)
782 {
783 dpif->dpif_class->port_poll_wait(dpif);
784 }
785
786 /* Extracts the flow stats for a packet. The 'flow' and 'packet'
787 * arguments must have been initialized through a call to flow_extract().
788 * 'used' is stored into stats->used. */
789 void
790 dpif_flow_stats_extract(const struct flow *flow, const struct ofpbuf *packet,
791 long long int used, struct dpif_flow_stats *stats)
792 {
793 stats->tcp_flags = ntohs(flow->tcp_flags);
794 stats->n_bytes = ofpbuf_size(packet);
795 stats->n_packets = 1;
796 stats->used = used;
797 }
798
799 /* Appends a human-readable representation of 'stats' to 's'. */
800 void
801 dpif_flow_stats_format(const struct dpif_flow_stats *stats, struct ds *s)
802 {
803 ds_put_format(s, "packets:%"PRIu64", bytes:%"PRIu64", used:",
804 stats->n_packets, stats->n_bytes);
805 if (stats->used) {
806 ds_put_format(s, "%.3fs", (time_msec() - stats->used) / 1000.0);
807 } else {
808 ds_put_format(s, "never");
809 }
810 if (stats->tcp_flags) {
811 ds_put_cstr(s, ", flags:");
812 packet_format_tcp_flags(s, stats->tcp_flags);
813 }
814 }
815
816 /* Deletes all flows from 'dpif'. Returns 0 if successful, otherwise a
817 * positive errno value. */
818 int
819 dpif_flow_flush(struct dpif *dpif)
820 {
821 int error;
822
823 COVERAGE_INC(dpif_flow_flush);
824
825 error = dpif->dpif_class->flow_flush(dpif);
826 log_operation(dpif, "flow_flush", error);
827 return error;
828 }
829
830 /* Queries 'dpif' for a flow entry. The flow is specified by the Netlink
831 * attributes with types OVS_KEY_ATTR_* in the 'key_len' bytes starting at
832 * 'key'.
833 *
834 * Returns 0 if successful. If no flow matches, returns ENOENT. On other
835 * failure, returns a positive errno value.
836 *
837 * If 'actionsp' is nonnull, then on success '*actionsp' will be set to an
838 * ofpbuf owned by the caller that contains the Netlink attributes for the
839 * flow's actions. The caller must free the ofpbuf (with ofpbuf_delete()) when
840 * it is no longer needed.
841 *
842 * If 'stats' is nonnull, then on success it will be updated with the flow's
843 * statistics. */
844 int
845 dpif_flow_get(const struct dpif *dpif,
846 const struct nlattr *key, size_t key_len,
847 struct ofpbuf **actionsp, struct dpif_flow_stats *stats)
848 {
849 int error;
850
851 COVERAGE_INC(dpif_flow_get);
852
853 error = dpif->dpif_class->flow_get(dpif, key, key_len, actionsp, stats);
854 if (error) {
855 if (actionsp) {
856 *actionsp = NULL;
857 }
858 if (stats) {
859 memset(stats, 0, sizeof *stats);
860 }
861 }
862 if (should_log_flow_message(error)) {
863 const struct nlattr *actions;
864 size_t actions_len;
865
866 if (!error && actionsp) {
867 actions = ofpbuf_data(*actionsp);
868 actions_len = ofpbuf_size(*actionsp);
869 } else {
870 actions = NULL;
871 actions_len = 0;
872 }
873 log_flow_message(dpif, error, "flow_get", key, key_len,
874 NULL, 0, stats, actions, actions_len);
875 }
876 return error;
877 }
878
879 static int
880 dpif_flow_put__(struct dpif *dpif, const struct dpif_flow_put *put)
881 {
882 int error;
883
884 COVERAGE_INC(dpif_flow_put);
885 ovs_assert(!(put->flags & ~(DPIF_FP_CREATE | DPIF_FP_MODIFY
886 | DPIF_FP_ZERO_STATS)));
887
888 error = dpif->dpif_class->flow_put(dpif, put);
889 if (error && put->stats) {
890 memset(put->stats, 0, sizeof *put->stats);
891 }
892 log_flow_put_message(dpif, put, error);
893 return error;
894 }
895
896 /* Adds or modifies a flow in 'dpif'. The flow is specified by the Netlink
897 * attribute OVS_FLOW_ATTR_KEY with types OVS_KEY_ATTR_* in the 'key_len' bytes
898 * starting at 'key', and OVS_FLOW_ATTR_MASK with types of OVS_KEY_ATTR_* in
899 * the 'mask_len' bytes starting at 'mask'. The associated actions are
900 * specified by the Netlink attributes with types OVS_ACTION_ATTR_* in the
901 * 'actions_len' bytes starting at 'actions'.
902 *
903 * - If the flow's key does not exist in 'dpif', then the flow will be added if
904 * 'flags' includes DPIF_FP_CREATE. Otherwise the operation will fail with
905 * ENOENT.
906 *
907 * The datapath may reject attempts to insert overlapping flows with EINVAL
908 * or EEXIST, but clients should not rely on this: avoiding overlapping flows
909 * is primarily the client's responsibility.
910 *
911 * If the operation succeeds, then 'stats', if nonnull, will be zeroed.
912 *
913 * - If the flow's key does exist in 'dpif', then the flow's actions will be
914 * updated if 'flags' includes DPIF_FP_MODIFY. Otherwise the operation will
915 * fail with EEXIST. If the flow's actions are updated, then its statistics
916 * will be zeroed if 'flags' includes DPIF_FP_ZERO_STATS, and left as-is
917 * otherwise.
918 *
919 * If the operation succeeds, then 'stats', if nonnull, will be set to the
920 * flow's statistics before the update.
921 */
922 int
923 dpif_flow_put(struct dpif *dpif, enum dpif_flow_put_flags flags,
924 const struct nlattr *key, size_t key_len,
925 const struct nlattr *mask, size_t mask_len,
926 const struct nlattr *actions, size_t actions_len,
927 struct dpif_flow_stats *stats)
928 {
929 struct dpif_flow_put put;
930
931 put.flags = flags;
932 put.key = key;
933 put.key_len = key_len;
934 put.mask = mask;
935 put.mask_len = mask_len;
936 put.actions = actions;
937 put.actions_len = actions_len;
938 put.stats = stats;
939 return dpif_flow_put__(dpif, &put);
940 }
941
942 static int
943 dpif_flow_del__(struct dpif *dpif, struct dpif_flow_del *del)
944 {
945 int error;
946
947 COVERAGE_INC(dpif_flow_del);
948
949 error = dpif->dpif_class->flow_del(dpif, del);
950 if (error && del->stats) {
951 memset(del->stats, 0, sizeof *del->stats);
952 }
953 log_flow_del_message(dpif, del, error);
954 return error;
955 }
956
957 /* Deletes a flow from 'dpif' and returns 0, or returns ENOENT if 'dpif' does
958 * not contain such a flow. The flow is specified by the Netlink attributes
959 * with types OVS_KEY_ATTR_* in the 'key_len' bytes starting at 'key'.
960 *
961 * If the operation succeeds, then 'stats', if nonnull, will be set to the
962 * flow's statistics before its deletion. */
963 int
964 dpif_flow_del(struct dpif *dpif,
965 const struct nlattr *key, size_t key_len,
966 struct dpif_flow_stats *stats)
967 {
968 struct dpif_flow_del del;
969
970 del.key = key;
971 del.key_len = key_len;
972 del.stats = stats;
973 return dpif_flow_del__(dpif, &del);
974 }
975
976 /* Creates and returns a new 'struct dpif_flow_dump' for iterating through the
977 * flows in 'dpif'.
978 *
979 * This function always successfully returns a dpif_flow_dump. Error
980 * reporting is deferred to dpif_flow_dump_destroy(). */
981 struct dpif_flow_dump *
982 dpif_flow_dump_create(const struct dpif *dpif)
983 {
984 return dpif->dpif_class->flow_dump_create(dpif);
985 }
986
987 /* Destroys 'dump', which must have been created with dpif_flow_dump_create().
988 * All dpif_flow_dump_thread structures previously created for 'dump' must
989 * previously have been destroyed.
990 *
991 * Returns 0 if the dump operation was error-free, otherwise a positive errno
992 * value describing the problem. */
993 int
994 dpif_flow_dump_destroy(struct dpif_flow_dump *dump)
995 {
996 const struct dpif *dpif = dump->dpif;
997 int error = dpif->dpif_class->flow_dump_destroy(dump);
998 log_operation(dpif, "flow_dump_destroy", error);
999 return error == EOF ? 0 : error;
1000 }
1001
1002 /* Returns new thread-local state for use with dpif_flow_dump_next(). */
1003 struct dpif_flow_dump_thread *
1004 dpif_flow_dump_thread_create(struct dpif_flow_dump *dump)
1005 {
1006 return dump->dpif->dpif_class->flow_dump_thread_create(dump);
1007 }
1008
1009 /* Releases 'thread'. */
1010 void
1011 dpif_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread)
1012 {
1013 thread->dpif->dpif_class->flow_dump_thread_destroy(thread);
1014 }
1015
1016 /* Attempts to retrieve up to 'max_flows' more flows from 'thread'. Returns 0
1017 * if and only if no flows remained to be retrieved, otherwise a positive
1018 * number reflecting the number of elements in 'flows[]' that were updated.
1019 * The number of flows returned might be less than 'max_flows' because
1020 * fewer than 'max_flows' remained, because this particular datapath does not
1021 * benefit from batching, or because an error occurred partway through
1022 * retrieval. Thus, the caller should continue calling until a 0 return value,
1023 * even if intermediate return values are less than 'max_flows'.
1024 *
1025 * No error status is immediately provided. An error status for the entire
1026 * dump operation is provided when it is completed by calling
1027 * dpif_flow_dump_destroy().
1028 *
1029 * All of the data stored into 'flows' is owned by the datapath, not by the
1030 * caller, and the caller must not modify or free it. The datapath guarantees
1031 * that it remains accessible and unchanged until at least the next call to
1032 * dpif_flow_dump_next() for 'thread'. */
1033 int
1034 dpif_flow_dump_next(struct dpif_flow_dump_thread *thread,
1035 struct dpif_flow *flows, int max_flows)
1036 {
1037 struct dpif *dpif = thread->dpif;
1038 int n;
1039
1040 ovs_assert(max_flows > 0);
1041 n = dpif->dpif_class->flow_dump_next(thread, flows, max_flows);
1042 if (n > 0) {
1043 struct dpif_flow *f;
1044
1045 for (f = flows; f < &flows[n] && should_log_flow_message(0); f++) {
1046 log_flow_message(dpif, 0, "flow_dump",
1047 f->key, f->key_len, f->mask, f->mask_len,
1048 &f->stats, f->actions, f->actions_len);
1049 }
1050 } else {
1051 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all flows", dpif_name(dpif));
1052 }
1053 return n;
1054 }
1055
1056 struct dpif_execute_helper_aux {
1057 struct dpif *dpif;
1058 int error;
1059 };
1060
1061 /* This is called for actions that need the context of the datapath to be
1062 * meaningful. */
1063 static void
1064 dpif_execute_helper_cb(void *aux_, struct dpif_packet **packets, int cnt,
1065 struct pkt_metadata *md,
1066 const struct nlattr *action, bool may_steal OVS_UNUSED)
1067 {
1068 struct dpif_execute_helper_aux *aux = aux_;
1069 int type = nl_attr_type(action);
1070 struct ofpbuf * packet = &packets[0]->ofpbuf;
1071
1072 ovs_assert(cnt == 1);
1073
1074 switch ((enum ovs_action_attr)type) {
1075 case OVS_ACTION_ATTR_OUTPUT:
1076 case OVS_ACTION_ATTR_USERSPACE:
1077 case OVS_ACTION_ATTR_RECIRC: {
1078 struct dpif_execute execute;
1079 struct ofpbuf execute_actions;
1080 uint64_t stub[256 / 8];
1081
1082 if (md->tunnel.ip_dst) {
1083 /* The Linux kernel datapath throws away the tunnel information
1084 * that we supply as metadata. We have to use a "set" action to
1085 * supply it. */
1086 ofpbuf_use_stub(&execute_actions, stub, sizeof stub);
1087 odp_put_tunnel_action(&md->tunnel, &execute_actions);
1088 ofpbuf_put(&execute_actions, action, NLA_ALIGN(action->nla_len));
1089
1090 execute.actions = ofpbuf_data(&execute_actions);
1091 execute.actions_len = ofpbuf_size(&execute_actions);
1092 } else {
1093 execute.actions = action;
1094 execute.actions_len = NLA_ALIGN(action->nla_len);
1095 }
1096
1097 execute.packet = packet;
1098 execute.md = *md;
1099 execute.needs_help = false;
1100 aux->error = aux->dpif->dpif_class->execute(aux->dpif, &execute);
1101
1102 if (md->tunnel.ip_dst) {
1103 ofpbuf_uninit(&execute_actions);
1104 }
1105 break;
1106 }
1107
1108 case OVS_ACTION_ATTR_HASH:
1109 case OVS_ACTION_ATTR_PUSH_VLAN:
1110 case OVS_ACTION_ATTR_POP_VLAN:
1111 case OVS_ACTION_ATTR_PUSH_MPLS:
1112 case OVS_ACTION_ATTR_POP_MPLS:
1113 case OVS_ACTION_ATTR_SET:
1114 case OVS_ACTION_ATTR_SAMPLE:
1115 case OVS_ACTION_ATTR_UNSPEC:
1116 case __OVS_ACTION_ATTR_MAX:
1117 OVS_NOT_REACHED();
1118 }
1119 }
1120
1121 /* Executes 'execute' by performing most of the actions in userspace and
1122 * passing the fully constructed packets to 'dpif' for output and userspace
1123 * actions.
1124 *
1125 * This helps with actions that a given 'dpif' doesn't implement directly. */
1126 static int
1127 dpif_execute_with_help(struct dpif *dpif, struct dpif_execute *execute)
1128 {
1129 struct dpif_execute_helper_aux aux = {dpif, 0};
1130 struct dpif_packet packet, *pp;
1131
1132 COVERAGE_INC(dpif_execute_with_help);
1133
1134 packet.ofpbuf = *execute->packet;
1135 pp = &packet;
1136
1137 odp_execute_actions(&aux, &pp, 1, false, &execute->md, execute->actions,
1138 execute->actions_len, dpif_execute_helper_cb);
1139
1140 /* Even though may_steal is set to false, some actions could modify or
1141 * reallocate the ofpbuf memory. We need to pass those changes to the
1142 * caller */
1143 *execute->packet = packet.ofpbuf;
1144
1145 return aux.error;
1146 }
1147
1148 /* Causes 'dpif' to perform the 'execute->actions_len' bytes of actions in
1149 * 'execute->actions' on the Ethernet frame in 'execute->packet' and on packet
1150 * metadata in 'execute->md'. The implementation is allowed to modify both the
1151 * '*execute->packet' and 'execute->md'.
1152 *
1153 * Some dpif providers do not implement every action. The Linux kernel
1154 * datapath, in particular, does not implement ARP field modification. If
1155 * 'needs_help' is true, the dpif layer executes in userspace all of the
1156 * actions that it can, and for OVS_ACTION_ATTR_OUTPUT and
1157 * OVS_ACTION_ATTR_USERSPACE actions it passes the packet through to the dpif
1158 * implementation.
1159 *
1160 * This works even if 'execute->actions_len' is too long for a Netlink
1161 * attribute.
1162 *
1163 * Returns 0 if successful, otherwise a positive errno value. */
1164 int
1165 dpif_execute(struct dpif *dpif, struct dpif_execute *execute)
1166 {
1167 int error;
1168
1169 COVERAGE_INC(dpif_execute);
1170 if (execute->actions_len > 0) {
1171 error = (execute->needs_help || nl_attr_oversized(execute->actions_len)
1172 ? dpif_execute_with_help(dpif, execute)
1173 : dpif->dpif_class->execute(dpif, execute));
1174 } else {
1175 error = 0;
1176 }
1177
1178 log_execute_message(dpif, execute, error);
1179
1180 return error;
1181 }
1182
1183 /* Executes each of the 'n_ops' operations in 'ops' on 'dpif', in the order in
1184 * which they are specified, placing each operation's results in the "output"
1185 * members documented in comments.
1186 *
1187 * This function exists because some datapaths can perform batched operations
1188 * faster than individual operations. */
1189 void
1190 dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
1191 {
1192 if (dpif->dpif_class->operate) {
1193 while (n_ops > 0) {
1194 size_t chunk;
1195
1196 /* Count 'chunk', the number of ops that can be executed without
1197 * needing any help. Ops that need help should be rare, so we
1198 * expect this to ordinarily be 'n_ops', that is, all the ops. */
1199 for (chunk = 0; chunk < n_ops; chunk++) {
1200 struct dpif_op *op = ops[chunk];
1201
1202 if (op->type == DPIF_OP_EXECUTE && op->u.execute.needs_help) {
1203 break;
1204 }
1205 }
1206
1207 if (chunk) {
1208 /* Execute a chunk full of ops that the dpif provider can
1209 * handle itself, without help. */
1210 size_t i;
1211
1212 dpif->dpif_class->operate(dpif, ops, chunk);
1213
1214 for (i = 0; i < chunk; i++) {
1215 struct dpif_op *op = ops[i];
1216
1217 switch (op->type) {
1218 case DPIF_OP_FLOW_PUT:
1219 log_flow_put_message(dpif, &op->u.flow_put, op->error);
1220 break;
1221
1222 case DPIF_OP_FLOW_DEL:
1223 log_flow_del_message(dpif, &op->u.flow_del, op->error);
1224 break;
1225
1226 case DPIF_OP_EXECUTE:
1227 log_execute_message(dpif, &op->u.execute, op->error);
1228 break;
1229 }
1230 }
1231
1232 ops += chunk;
1233 n_ops -= chunk;
1234 } else {
1235 /* Help the dpif provider to execute one op. */
1236 struct dpif_op *op = ops[0];
1237
1238 op->error = dpif_execute(dpif, &op->u.execute);
1239 ops++;
1240 n_ops--;
1241 }
1242 }
1243 } else {
1244 size_t i;
1245
1246 for (i = 0; i < n_ops; i++) {
1247 struct dpif_op *op = ops[i];
1248
1249 switch (op->type) {
1250 case DPIF_OP_FLOW_PUT:
1251 op->error = dpif_flow_put__(dpif, &op->u.flow_put);
1252 break;
1253
1254 case DPIF_OP_FLOW_DEL:
1255 op->error = dpif_flow_del__(dpif, &op->u.flow_del);
1256 break;
1257
1258 case DPIF_OP_EXECUTE:
1259 op->error = dpif_execute(dpif, &op->u.execute);
1260 break;
1261
1262 default:
1263 OVS_NOT_REACHED();
1264 }
1265 }
1266 }
1267 }
1268
1269 /* Returns a string that represents 'type', for use in log messages. */
1270 const char *
1271 dpif_upcall_type_to_string(enum dpif_upcall_type type)
1272 {
1273 switch (type) {
1274 case DPIF_UC_MISS: return "miss";
1275 case DPIF_UC_ACTION: return "action";
1276 case DPIF_N_UC_TYPES: default: return "<unknown>";
1277 }
1278 }
1279
1280 /* Enables or disables receiving packets with dpif_recv() on 'dpif'. Returns 0
1281 * if successful, otherwise a positive errno value.
1282 *
1283 * Turning packet receive off and then back on may change the Netlink PID
1284 * assignments returned by dpif_port_get_pid(). If the client does this, it
1285 * must update all of the flows that have OVS_ACTION_ATTR_USERSPACE actions
1286 * using the new PID assignment. */
1287 int
1288 dpif_recv_set(struct dpif *dpif, bool enable)
1289 {
1290 int error = dpif->dpif_class->recv_set(dpif, enable);
1291 log_operation(dpif, "recv_set", error);
1292 return error;
1293 }
1294
1295 /* Refreshes the poll loops and Netlink sockets associated to each port,
1296 * when the number of upcall handlers (upcall receiving thread) is changed
1297 * to 'n_handlers' and receiving packets for 'dpif' is enabled by
1298 * recv_set().
1299 *
1300 * Since multiple upcall handlers can read upcalls simultaneously from
1301 * 'dpif', each port can have multiple Netlink sockets, one per upcall
1302 * handler. So, handlers_set() is responsible for the following tasks:
1303 *
1304 * When receiving upcall is enabled, extends or creates the
1305 * configuration to support:
1306 *
1307 * - 'n_handlers' Netlink sockets for each port.
1308 *
1309 * - 'n_handlers' poll loops, one for each upcall handler.
1310 *
1311 * - registering the Netlink sockets for the same upcall handler to
1312 * the corresponding poll loop.
1313 *
1314 * Returns 0 if successful, otherwise a positive errno value. */
1315 int
1316 dpif_handlers_set(struct dpif *dpif, uint32_t n_handlers)
1317 {
1318 int error = dpif->dpif_class->handlers_set(dpif, n_handlers);
1319 log_operation(dpif, "handlers_set", error);
1320 return error;
1321 }
1322
1323 /* Polls for an upcall from 'dpif' for an upcall handler. Since there
1324 * there can be multiple poll loops, 'handler_id' is needed as index to
1325 * identify the corresponding poll loop. If successful, stores the upcall
1326 * into '*upcall', using 'buf' for storage. Should only be called if
1327 * 'recv_set' has been used to enable receiving packets from 'dpif'.
1328 *
1329 * 'upcall->key' and 'upcall->userdata' point into data in the caller-provided
1330 * 'buf', so their memory cannot be freed separately from 'buf'.
1331 *
1332 * The caller owns the data of 'upcall->packet' and may modify it. If
1333 * packet's headroom is exhausted as it is manipulated, 'upcall->packet'
1334 * will be reallocated. This requires the data of 'upcall->packet' to be
1335 * released with ofpbuf_uninit() before 'upcall' is destroyed. However,
1336 * when an error is returned, the 'upcall->packet' may be uninitialized
1337 * and should not be released.
1338 *
1339 * Returns 0 if successful, otherwise a positive errno value. Returns EAGAIN
1340 * if no upcall is immediately available. */
1341 int
1342 dpif_recv(struct dpif *dpif, uint32_t handler_id, struct dpif_upcall *upcall,
1343 struct ofpbuf *buf)
1344 {
1345 int error = dpif->dpif_class->recv(dpif, handler_id, upcall, buf);
1346 if (!error && !VLOG_DROP_DBG(&dpmsg_rl)) {
1347 struct ds flow;
1348 char *packet;
1349
1350 packet = ofp_packet_to_string(ofpbuf_data(&upcall->packet),
1351 ofpbuf_size(&upcall->packet));
1352
1353 ds_init(&flow);
1354 odp_flow_key_format(upcall->key, upcall->key_len, &flow);
1355
1356 VLOG_DBG("%s: %s upcall:\n%s\n%s",
1357 dpif_name(dpif), dpif_upcall_type_to_string(upcall->type),
1358 ds_cstr(&flow), packet);
1359
1360 ds_destroy(&flow);
1361 free(packet);
1362 } else if (error && error != EAGAIN) {
1363 log_operation(dpif, "recv", error);
1364 }
1365 return error;
1366 }
1367
1368 /* Discards all messages that would otherwise be received by dpif_recv() on
1369 * 'dpif'. */
1370 void
1371 dpif_recv_purge(struct dpif *dpif)
1372 {
1373 COVERAGE_INC(dpif_purge);
1374 if (dpif->dpif_class->recv_purge) {
1375 dpif->dpif_class->recv_purge(dpif);
1376 }
1377 }
1378
1379 /* Arranges for the poll loop for an upcall handler to wake up when 'dpif'
1380 * 'dpif' has a message queued to be received with the recv member
1381 * function. Since there can be multiple poll loops, 'handler_id' is
1382 * needed as index to identify the corresponding poll loop. */
1383 void
1384 dpif_recv_wait(struct dpif *dpif, uint32_t handler_id)
1385 {
1386 dpif->dpif_class->recv_wait(dpif, handler_id);
1387 }
1388
1389 /* Obtains the NetFlow engine type and engine ID for 'dpif' into '*engine_type'
1390 * and '*engine_id', respectively. */
1391 void
1392 dpif_get_netflow_ids(const struct dpif *dpif,
1393 uint8_t *engine_type, uint8_t *engine_id)
1394 {
1395 *engine_type = dpif->netflow_engine_type;
1396 *engine_id = dpif->netflow_engine_id;
1397 }
1398
1399 /* Translates OpenFlow queue ID 'queue_id' (in host byte order) into a priority
1400 * value used for setting packet priority.
1401 * On success, returns 0 and stores the priority into '*priority'.
1402 * On failure, returns a positive errno value and stores 0 into '*priority'. */
1403 int
1404 dpif_queue_to_priority(const struct dpif *dpif, uint32_t queue_id,
1405 uint32_t *priority)
1406 {
1407 int error = (dpif->dpif_class->queue_to_priority
1408 ? dpif->dpif_class->queue_to_priority(dpif, queue_id,
1409 priority)
1410 : EOPNOTSUPP);
1411 if (error) {
1412 *priority = 0;
1413 }
1414 log_operation(dpif, "queue_to_priority", error);
1415 return error;
1416 }
1417 \f
1418 void
1419 dpif_init(struct dpif *dpif, const struct dpif_class *dpif_class,
1420 const char *name,
1421 uint8_t netflow_engine_type, uint8_t netflow_engine_id)
1422 {
1423 dpif->dpif_class = dpif_class;
1424 dpif->base_name = xstrdup(name);
1425 dpif->full_name = xasprintf("%s@%s", dpif_class->type, name);
1426 dpif->netflow_engine_type = netflow_engine_type;
1427 dpif->netflow_engine_id = netflow_engine_id;
1428 }
1429
1430 /* Undoes the results of initialization.
1431 *
1432 * Normally this function only needs to be called from dpif_close().
1433 * However, it may be called by providers due to an error on opening
1434 * that occurs after initialization. It this case dpif_close() would
1435 * never be called. */
1436 void
1437 dpif_uninit(struct dpif *dpif, bool close)
1438 {
1439 char *base_name = dpif->base_name;
1440 char *full_name = dpif->full_name;
1441
1442 if (close) {
1443 dpif->dpif_class->close(dpif);
1444 }
1445
1446 free(base_name);
1447 free(full_name);
1448 }
1449 \f
1450 static void
1451 log_operation(const struct dpif *dpif, const char *operation, int error)
1452 {
1453 if (!error) {
1454 VLOG_DBG_RL(&dpmsg_rl, "%s: %s success", dpif_name(dpif), operation);
1455 } else if (ofperr_is_valid(error)) {
1456 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1457 dpif_name(dpif), operation, ofperr_get_name(error));
1458 } else {
1459 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1460 dpif_name(dpif), operation, ovs_strerror(error));
1461 }
1462 }
1463
1464 static enum vlog_level
1465 flow_message_log_level(int error)
1466 {
1467 /* If flows arrive in a batch, userspace may push down multiple
1468 * unique flow definitions that overlap when wildcards are applied.
1469 * Kernels that support flow wildcarding will reject these flows as
1470 * duplicates (EEXIST), so lower the log level to debug for these
1471 * types of messages. */
1472 return (error && error != EEXIST) ? VLL_WARN : VLL_DBG;
1473 }
1474
1475 static bool
1476 should_log_flow_message(int error)
1477 {
1478 return !vlog_should_drop(THIS_MODULE, flow_message_log_level(error),
1479 error ? &error_rl : &dpmsg_rl);
1480 }
1481
1482 static void
1483 log_flow_message(const struct dpif *dpif, int error, const char *operation,
1484 const struct nlattr *key, size_t key_len,
1485 const struct nlattr *mask, size_t mask_len,
1486 const struct dpif_flow_stats *stats,
1487 const struct nlattr *actions, size_t actions_len)
1488 {
1489 struct ds ds = DS_EMPTY_INITIALIZER;
1490 ds_put_format(&ds, "%s: ", dpif_name(dpif));
1491 if (error) {
1492 ds_put_cstr(&ds, "failed to ");
1493 }
1494 ds_put_format(&ds, "%s ", operation);
1495 if (error) {
1496 ds_put_format(&ds, "(%s) ", ovs_strerror(error));
1497 }
1498 odp_flow_format(key, key_len, mask, mask_len, NULL, &ds, true);
1499 if (stats) {
1500 ds_put_cstr(&ds, ", ");
1501 dpif_flow_stats_format(stats, &ds);
1502 }
1503 if (actions || actions_len) {
1504 ds_put_cstr(&ds, ", actions:");
1505 format_odp_actions(&ds, actions, actions_len);
1506 }
1507 vlog(THIS_MODULE, flow_message_log_level(error), "%s", ds_cstr(&ds));
1508 ds_destroy(&ds);
1509 }
1510
1511 static void
1512 log_flow_put_message(struct dpif *dpif, const struct dpif_flow_put *put,
1513 int error)
1514 {
1515 if (should_log_flow_message(error)) {
1516 struct ds s;
1517
1518 ds_init(&s);
1519 ds_put_cstr(&s, "put");
1520 if (put->flags & DPIF_FP_CREATE) {
1521 ds_put_cstr(&s, "[create]");
1522 }
1523 if (put->flags & DPIF_FP_MODIFY) {
1524 ds_put_cstr(&s, "[modify]");
1525 }
1526 if (put->flags & DPIF_FP_ZERO_STATS) {
1527 ds_put_cstr(&s, "[zero]");
1528 }
1529 log_flow_message(dpif, error, ds_cstr(&s),
1530 put->key, put->key_len, put->mask, put->mask_len,
1531 put->stats, put->actions, put->actions_len);
1532 ds_destroy(&s);
1533 }
1534 }
1535
1536 static void
1537 log_flow_del_message(struct dpif *dpif, const struct dpif_flow_del *del,
1538 int error)
1539 {
1540 if (should_log_flow_message(error)) {
1541 log_flow_message(dpif, error, "flow_del", del->key, del->key_len,
1542 NULL, 0, !error ? del->stats : NULL, NULL, 0);
1543 }
1544 }
1545
1546 static void
1547 log_execute_message(struct dpif *dpif, const struct dpif_execute *execute,
1548 int error)
1549 {
1550 if (!(error ? VLOG_DROP_WARN(&error_rl) : VLOG_DROP_DBG(&dpmsg_rl))) {
1551 struct ds ds = DS_EMPTY_INITIALIZER;
1552 char *packet;
1553
1554 packet = ofp_packet_to_string(ofpbuf_data(execute->packet),
1555 ofpbuf_size(execute->packet));
1556 ds_put_format(&ds, "%s: execute ", dpif_name(dpif));
1557 format_odp_actions(&ds, execute->actions, execute->actions_len);
1558 if (error) {
1559 ds_put_format(&ds, " failed (%s)", ovs_strerror(error));
1560 }
1561 ds_put_format(&ds, " on packet %s", packet);
1562 vlog(THIS_MODULE, error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
1563 ds_destroy(&ds);
1564 free(packet);
1565 }
1566 }