]> git.proxmox.com Git - mirror_ovs.git/blob - lib/dpif.c
dpif: Restore a few lines with form feed characters
[mirror_ovs.git] / lib / dpif.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "dpif-provider.h"
19
20 #include <ctype.h>
21 #include <errno.h>
22 #include <inttypes.h>
23 #include <stdlib.h>
24 #include <string.h>
25
26 #include "coverage.h"
27 #include "dpctl.h"
28 #include "dp-packet.h"
29 #include "dpif-netdev.h"
30 #include "openvswitch/dynamic-string.h"
31 #include "flow.h"
32 #include "netdev.h"
33 #include "netlink.h"
34 #include "odp-execute.h"
35 #include "odp-util.h"
36 #include "openvswitch/ofp-print.h"
37 #include "openvswitch/ofpbuf.h"
38 #include "packets.h"
39 #include "openvswitch/poll-loop.h"
40 #include "route-table.h"
41 #include "seq.h"
42 #include "openvswitch/shash.h"
43 #include "sset.h"
44 #include "timeval.h"
45 #include "tnl-neigh-cache.h"
46 #include "tnl-ports.h"
47 #include "util.h"
48 #include "uuid.h"
49 #include "valgrind.h"
50 #include "openvswitch/ofp-errors.h"
51 #include "openvswitch/vlog.h"
52 #include "lib/netdev-provider.h"
53
54 VLOG_DEFINE_THIS_MODULE(dpif);
55
56 COVERAGE_DEFINE(dpif_destroy);
57 COVERAGE_DEFINE(dpif_port_add);
58 COVERAGE_DEFINE(dpif_port_del);
59 COVERAGE_DEFINE(dpif_flow_flush);
60 COVERAGE_DEFINE(dpif_flow_get);
61 COVERAGE_DEFINE(dpif_flow_put);
62 COVERAGE_DEFINE(dpif_flow_del);
63 COVERAGE_DEFINE(dpif_execute);
64 COVERAGE_DEFINE(dpif_purge);
65 COVERAGE_DEFINE(dpif_execute_with_help);
66 COVERAGE_DEFINE(dpif_meter_set);
67 COVERAGE_DEFINE(dpif_meter_get);
68 COVERAGE_DEFINE(dpif_meter_del);
69
70 static const struct dpif_class *base_dpif_classes[] = {
71 #if defined(__linux__) || defined(_WIN32)
72 &dpif_netlink_class,
73 #endif
74 &dpif_netdev_class,
75 };
76
77 struct registered_dpif_class {
78 const struct dpif_class *dpif_class;
79 int refcount;
80 };
81 static struct shash dpif_classes = SHASH_INITIALIZER(&dpif_classes);
82 static struct sset dpif_blacklist = SSET_INITIALIZER(&dpif_blacklist);
83
84 /* Protects 'dpif_classes', including the refcount, and 'dpif_blacklist'. */
85 static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
86
87 /* Rate limit for individual messages going to or from the datapath, output at
88 * DBG level. This is very high because, if these are enabled, it is because
89 * we really need to see them. */
90 static struct vlog_rate_limit dpmsg_rl = VLOG_RATE_LIMIT_INIT(600, 600);
91
92 /* Not really much point in logging many dpif errors. */
93 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
94
95 static void log_operation(const struct dpif *, const char *operation,
96 int error);
97 static bool should_log_flow_message(const struct vlog_module *module,
98 int error);
99
100 /* Incremented whenever tnl route, arp, etc changes. */
101 struct seq *tnl_conf_seq;
102
103 static bool
104 dpif_is_internal_port(const char *type)
105 {
106 /* For userspace datapath, tap devices are the equivalent
107 * of internal devices in the kernel datapath, so both
108 * these types are 'internal' devices. */
109 return !strcmp(type, "internal") || !strcmp(type, "tap");
110 }
111
112 static void
113 dp_initialize(void)
114 {
115 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
116
117 if (ovsthread_once_start(&once)) {
118 int i;
119
120 tnl_conf_seq = seq_create();
121 dpctl_unixctl_register();
122 tnl_port_map_init();
123 tnl_neigh_cache_init();
124 route_table_init();
125
126 for (i = 0; i < ARRAY_SIZE(base_dpif_classes); i++) {
127 dp_register_provider(base_dpif_classes[i]);
128 }
129
130 ovsthread_once_done(&once);
131 }
132 }
133
134 static int
135 dp_register_provider__(const struct dpif_class *new_class)
136 {
137 struct registered_dpif_class *registered_class;
138 int error;
139
140 if (sset_contains(&dpif_blacklist, new_class->type)) {
141 VLOG_DBG("attempted to register blacklisted provider: %s",
142 new_class->type);
143 return EINVAL;
144 }
145
146 if (shash_find(&dpif_classes, new_class->type)) {
147 VLOG_WARN("attempted to register duplicate datapath provider: %s",
148 new_class->type);
149 return EEXIST;
150 }
151
152 error = new_class->init ? new_class->init() : 0;
153 if (error) {
154 VLOG_WARN("failed to initialize %s datapath class: %s",
155 new_class->type, ovs_strerror(error));
156 return error;
157 }
158
159 registered_class = xmalloc(sizeof *registered_class);
160 registered_class->dpif_class = new_class;
161 registered_class->refcount = 0;
162
163 shash_add(&dpif_classes, new_class->type, registered_class);
164
165 return 0;
166 }
167
168 /* Registers a new datapath provider. After successful registration, new
169 * datapaths of that type can be opened using dpif_open(). */
170 int
171 dp_register_provider(const struct dpif_class *new_class)
172 {
173 int error;
174
175 ovs_mutex_lock(&dpif_mutex);
176 error = dp_register_provider__(new_class);
177 ovs_mutex_unlock(&dpif_mutex);
178
179 return error;
180 }
181
182 /* Unregisters a datapath provider. 'type' must have been previously
183 * registered and not currently be in use by any dpifs. After unregistration
184 * new datapaths of that type cannot be opened using dpif_open(). */
185 static int
186 dp_unregister_provider__(const char *type)
187 {
188 struct shash_node *node;
189 struct registered_dpif_class *registered_class;
190
191 node = shash_find(&dpif_classes, type);
192 if (!node) {
193 return EAFNOSUPPORT;
194 }
195
196 registered_class = node->data;
197 if (registered_class->refcount) {
198 VLOG_WARN("attempted to unregister in use datapath provider: %s", type);
199 return EBUSY;
200 }
201
202 shash_delete(&dpif_classes, node);
203 free(registered_class);
204
205 return 0;
206 }
207
208 /* Unregisters a datapath provider. 'type' must have been previously
209 * registered and not currently be in use by any dpifs. After unregistration
210 * new datapaths of that type cannot be opened using dpif_open(). */
211 int
212 dp_unregister_provider(const char *type)
213 {
214 int error;
215
216 dp_initialize();
217
218 ovs_mutex_lock(&dpif_mutex);
219 error = dp_unregister_provider__(type);
220 ovs_mutex_unlock(&dpif_mutex);
221
222 return error;
223 }
224
225 /* Blacklists a provider. Causes future calls of dp_register_provider() with
226 * a dpif_class which implements 'type' to fail. */
227 void
228 dp_blacklist_provider(const char *type)
229 {
230 ovs_mutex_lock(&dpif_mutex);
231 sset_add(&dpif_blacklist, type);
232 ovs_mutex_unlock(&dpif_mutex);
233 }
234
235 /* Adds the types of all currently registered datapath providers to 'types'.
236 * The caller must first initialize the sset. */
237 void
238 dp_enumerate_types(struct sset *types)
239 {
240 struct shash_node *node;
241
242 dp_initialize();
243
244 ovs_mutex_lock(&dpif_mutex);
245 SHASH_FOR_EACH(node, &dpif_classes) {
246 const struct registered_dpif_class *registered_class = node->data;
247 sset_add(types, registered_class->dpif_class->type);
248 }
249 ovs_mutex_unlock(&dpif_mutex);
250 }
251
252 static void
253 dp_class_unref(struct registered_dpif_class *rc)
254 {
255 ovs_mutex_lock(&dpif_mutex);
256 ovs_assert(rc->refcount);
257 rc->refcount--;
258 ovs_mutex_unlock(&dpif_mutex);
259 }
260
261 static struct registered_dpif_class *
262 dp_class_lookup(const char *type)
263 {
264 struct registered_dpif_class *rc;
265
266 ovs_mutex_lock(&dpif_mutex);
267 rc = shash_find_data(&dpif_classes, type);
268 if (rc) {
269 rc->refcount++;
270 }
271 ovs_mutex_unlock(&dpif_mutex);
272
273 return rc;
274 }
275
276 /* Clears 'names' and enumerates the names of all known created datapaths with
277 * the given 'type'. The caller must first initialize the sset. Returns 0 if
278 * successful, otherwise a positive errno value.
279 *
280 * Some kinds of datapaths might not be practically enumerable. This is not
281 * considered an error. */
282 int
283 dp_enumerate_names(const char *type, struct sset *names)
284 {
285 struct registered_dpif_class *registered_class;
286 const struct dpif_class *dpif_class;
287 int error;
288
289 dp_initialize();
290 sset_clear(names);
291
292 registered_class = dp_class_lookup(type);
293 if (!registered_class) {
294 VLOG_WARN("could not enumerate unknown type: %s", type);
295 return EAFNOSUPPORT;
296 }
297
298 dpif_class = registered_class->dpif_class;
299 error = (dpif_class->enumerate
300 ? dpif_class->enumerate(names, dpif_class)
301 : 0);
302 if (error) {
303 VLOG_WARN("failed to enumerate %s datapaths: %s", dpif_class->type,
304 ovs_strerror(error));
305 }
306 dp_class_unref(registered_class);
307
308 return error;
309 }
310
311 /* Parses 'datapath_name_', which is of the form [type@]name into its
312 * component pieces. 'name' and 'type' must be freed by the caller.
313 *
314 * The returned 'type' is normalized, as if by dpif_normalize_type(). */
315 void
316 dp_parse_name(const char *datapath_name_, char **name, char **type)
317 {
318 char *datapath_name = xstrdup(datapath_name_);
319 char *separator;
320
321 separator = strchr(datapath_name, '@');
322 if (separator) {
323 *separator = '\0';
324 *type = datapath_name;
325 *name = xstrdup(dpif_normalize_type(separator + 1));
326 } else {
327 *name = datapath_name;
328 *type = xstrdup(dpif_normalize_type(NULL));
329 }
330 }
331
332 static int
333 do_open(const char *name, const char *type, bool create, struct dpif **dpifp)
334 {
335 struct dpif *dpif = NULL;
336 int error;
337 struct registered_dpif_class *registered_class;
338
339 dp_initialize();
340
341 type = dpif_normalize_type(type);
342 registered_class = dp_class_lookup(type);
343 if (!registered_class) {
344 VLOG_WARN("could not create datapath %s of unknown type %s", name,
345 type);
346 error = EAFNOSUPPORT;
347 goto exit;
348 }
349
350 error = registered_class->dpif_class->open(registered_class->dpif_class,
351 name, create, &dpif);
352 if (!error) {
353 struct dpif_port_dump port_dump;
354 struct dpif_port dpif_port;
355
356 ovs_assert(dpif->dpif_class == registered_class->dpif_class);
357
358 DPIF_PORT_FOR_EACH(&dpif_port, &port_dump, dpif) {
359 struct netdev *netdev;
360 int err;
361
362 if (dpif_is_internal_port(dpif_port.type)) {
363 continue;
364 }
365
366 err = netdev_open(dpif_port.name, dpif_port.type, &netdev);
367
368 if (!err) {
369 netdev_ports_insert(netdev, dpif->dpif_class, &dpif_port);
370 netdev_close(netdev);
371 } else {
372 VLOG_WARN("could not open netdev %s type %s: %s",
373 dpif_port.name, dpif_port.type, ovs_strerror(err));
374 }
375 }
376 } else {
377 dp_class_unref(registered_class);
378 }
379
380 exit:
381 *dpifp = error ? NULL : dpif;
382 return error;
383 }
384
385 /* Tries to open an existing datapath named 'name' and type 'type'. Will fail
386 * if no datapath with 'name' and 'type' exists. 'type' may be either NULL or
387 * the empty string to specify the default system type. Returns 0 if
388 * successful, otherwise a positive errno value. On success stores a pointer
389 * to the datapath in '*dpifp', otherwise a null pointer. */
390 int
391 dpif_open(const char *name, const char *type, struct dpif **dpifp)
392 {
393 return do_open(name, type, false, dpifp);
394 }
395
396 /* Tries to create and open a new datapath with the given 'name' and 'type'.
397 * 'type' may be either NULL or the empty string to specify the default system
398 * type. Will fail if a datapath with 'name' and 'type' already exists.
399 * Returns 0 if successful, otherwise a positive errno value. On success
400 * stores a pointer to the datapath in '*dpifp', otherwise a null pointer. */
401 int
402 dpif_create(const char *name, const char *type, struct dpif **dpifp)
403 {
404 return do_open(name, type, true, dpifp);
405 }
406
407 /* Tries to open a datapath with the given 'name' and 'type', creating it if it
408 * does not exist. 'type' may be either NULL or the empty string to specify
409 * the default system type. Returns 0 if successful, otherwise a positive
410 * errno value. On success stores a pointer to the datapath in '*dpifp',
411 * otherwise a null pointer. */
412 int
413 dpif_create_and_open(const char *name, const char *type, struct dpif **dpifp)
414 {
415 int error;
416
417 error = dpif_create(name, type, dpifp);
418 if (error == EEXIST || error == EBUSY) {
419 error = dpif_open(name, type, dpifp);
420 if (error) {
421 VLOG_WARN("datapath %s already exists but cannot be opened: %s",
422 name, ovs_strerror(error));
423 }
424 } else if (error) {
425 VLOG_WARN("failed to create datapath %s: %s",
426 name, ovs_strerror(error));
427 }
428 return error;
429 }
430
431 static void
432 dpif_remove_netdev_ports(struct dpif *dpif) {
433 struct dpif_port_dump port_dump;
434 struct dpif_port dpif_port;
435
436 DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, dpif) {
437 if (!dpif_is_internal_port(dpif_port.type)) {
438 netdev_ports_remove(dpif_port.port_no, dpif->dpif_class);
439 }
440 }
441 }
442
443 /* Closes and frees the connection to 'dpif'. Does not destroy the datapath
444 * itself; call dpif_delete() first, instead, if that is desirable. */
445 void
446 dpif_close(struct dpif *dpif)
447 {
448 if (dpif) {
449 struct registered_dpif_class *rc;
450
451 rc = shash_find_data(&dpif_classes, dpif->dpif_class->type);
452
453 if (rc->refcount == 1) {
454 dpif_remove_netdev_ports(dpif);
455 }
456 dpif_uninit(dpif, true);
457 dp_class_unref(rc);
458 }
459 }
460
461 /* Performs periodic work needed by 'dpif'. */
462 bool
463 dpif_run(struct dpif *dpif)
464 {
465 if (dpif->dpif_class->run) {
466 return dpif->dpif_class->run(dpif);
467 }
468 return false;
469 }
470
471 /* Arranges for poll_block() to wake up when dp_run() needs to be called for
472 * 'dpif'. */
473 void
474 dpif_wait(struct dpif *dpif)
475 {
476 if (dpif->dpif_class->wait) {
477 dpif->dpif_class->wait(dpif);
478 }
479 }
480
481 /* Returns the name of datapath 'dpif' prefixed with the type
482 * (for use in log messages). */
483 const char *
484 dpif_name(const struct dpif *dpif)
485 {
486 return dpif->full_name;
487 }
488
489 /* Returns the name of datapath 'dpif' without the type
490 * (for use in device names). */
491 const char *
492 dpif_base_name(const struct dpif *dpif)
493 {
494 return dpif->base_name;
495 }
496
497 /* Returns the type of datapath 'dpif'. */
498 const char *
499 dpif_type(const struct dpif *dpif)
500 {
501 return dpif->dpif_class->type;
502 }
503
504 /* Returns the fully spelled out name for the given datapath 'type'.
505 *
506 * Normalized type string can be compared with strcmp(). Unnormalized type
507 * string might be the same even if they have different spellings. */
508 const char *
509 dpif_normalize_type(const char *type)
510 {
511 return type && type[0] ? type : "system";
512 }
513
514 /* Destroys the datapath that 'dpif' is connected to, first removing all of its
515 * ports. After calling this function, it does not make sense to pass 'dpif'
516 * to any functions other than dpif_name() or dpif_close(). */
517 int
518 dpif_delete(struct dpif *dpif)
519 {
520 int error;
521
522 COVERAGE_INC(dpif_destroy);
523
524 error = dpif->dpif_class->destroy(dpif);
525 log_operation(dpif, "delete", error);
526 return error;
527 }
528
529 /* Retrieves statistics for 'dpif' into 'stats'. Returns 0 if successful,
530 * otherwise a positive errno value. */
531 int
532 dpif_get_dp_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
533 {
534 int error = dpif->dpif_class->get_stats(dpif, stats);
535 if (error) {
536 memset(stats, 0, sizeof *stats);
537 }
538 log_operation(dpif, "get_stats", error);
539 return error;
540 }
541
542 const char *
543 dpif_port_open_type(const char *datapath_type, const char *port_type)
544 {
545 struct registered_dpif_class *rc;
546
547 datapath_type = dpif_normalize_type(datapath_type);
548
549 ovs_mutex_lock(&dpif_mutex);
550 rc = shash_find_data(&dpif_classes, datapath_type);
551 if (rc && rc->dpif_class->port_open_type) {
552 port_type = rc->dpif_class->port_open_type(rc->dpif_class, port_type);
553 }
554 ovs_mutex_unlock(&dpif_mutex);
555
556 return port_type;
557 }
558
559 /* Attempts to add 'netdev' as a port on 'dpif'. If 'port_nop' is
560 * non-null and its value is not ODPP_NONE, then attempts to use the
561 * value as the port number.
562 *
563 * If successful, returns 0 and sets '*port_nop' to the new port's port
564 * number (if 'port_nop' is non-null). On failure, returns a positive
565 * errno value and sets '*port_nop' to ODPP_NONE (if 'port_nop' is
566 * non-null). */
567 int
568 dpif_port_add(struct dpif *dpif, struct netdev *netdev, odp_port_t *port_nop)
569 {
570 const char *netdev_name = netdev_get_name(netdev);
571 odp_port_t port_no = ODPP_NONE;
572 int error;
573
574 COVERAGE_INC(dpif_port_add);
575
576 if (port_nop) {
577 port_no = *port_nop;
578 }
579
580 error = dpif->dpif_class->port_add(dpif, netdev, &port_no);
581 if (!error) {
582 VLOG_DBG_RL(&dpmsg_rl, "%s: added %s as port %"PRIu32,
583 dpif_name(dpif), netdev_name, port_no);
584
585 if (!dpif_is_internal_port(netdev_get_type(netdev))) {
586
587 struct dpif_port dpif_port;
588
589 dpif_port.type = CONST_CAST(char *, netdev_get_type(netdev));
590 dpif_port.name = CONST_CAST(char *, netdev_name);
591 dpif_port.port_no = port_no;
592 netdev_ports_insert(netdev, dpif->dpif_class, &dpif_port);
593 }
594 } else {
595 if (error != EEXIST) {
596 VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
597 dpif_name(dpif), netdev_name, ovs_strerror(error));
598 } else {
599 /* It's fairly common for upper layers to try to add a duplicate
600 * port, and they know how to handle it properly. */
601 }
602 port_no = ODPP_NONE;
603 }
604 if (port_nop) {
605 *port_nop = port_no;
606 }
607 return error;
608 }
609
610 /* Attempts to remove 'dpif''s port number 'port_no'. Returns 0 if successful,
611 * otherwise a positive errno value. */
612 int
613 dpif_port_del(struct dpif *dpif, odp_port_t port_no, bool local_delete)
614 {
615 int error = 0;
616
617 COVERAGE_INC(dpif_port_del);
618
619 if (!local_delete) {
620 error = dpif->dpif_class->port_del(dpif, port_no);
621 if (!error) {
622 VLOG_DBG_RL(&dpmsg_rl, "%s: port_del(%"PRIu32")",
623 dpif_name(dpif), port_no);
624 } else {
625 log_operation(dpif, "port_del", error);
626 }
627 }
628
629 netdev_ports_remove(port_no, dpif->dpif_class);
630 return error;
631 }
632
633 /* Makes a deep copy of 'src' into 'dst'. */
634 void
635 dpif_port_clone(struct dpif_port *dst, const struct dpif_port *src)
636 {
637 dst->name = xstrdup(src->name);
638 dst->type = xstrdup(src->type);
639 dst->port_no = src->port_no;
640 }
641
642 /* Frees memory allocated to members of 'dpif_port'.
643 *
644 * Do not call this function on a dpif_port obtained from
645 * dpif_port_dump_next(): that function retains ownership of the data in the
646 * dpif_port. */
647 void
648 dpif_port_destroy(struct dpif_port *dpif_port)
649 {
650 free(dpif_port->name);
651 free(dpif_port->type);
652 }
653
654 /* Checks if port named 'devname' exists in 'dpif'. If so, returns
655 * true; otherwise, returns false. */
656 bool
657 dpif_port_exists(const struct dpif *dpif, const char *devname)
658 {
659 int error = dpif->dpif_class->port_query_by_name(dpif, devname, NULL);
660 if (error != 0 && error != ENODEV) {
661 VLOG_WARN_RL(&error_rl, "%s: failed to query port %s: %s",
662 dpif_name(dpif), devname, ovs_strerror(error));
663 }
664
665 return !error;
666 }
667
668 /* Refreshes configuration of 'dpif's port. */
669 int
670 dpif_port_set_config(struct dpif *dpif, odp_port_t port_no,
671 const struct smap *cfg)
672 {
673 int error = 0;
674
675 if (dpif->dpif_class->port_set_config) {
676 error = dpif->dpif_class->port_set_config(dpif, port_no, cfg);
677 if (error) {
678 log_operation(dpif, "port_set_config", error);
679 }
680 }
681
682 return error;
683 }
684
685 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and
686 * initializes '*port' appropriately; on failure, returns a positive errno
687 * value.
688 *
689 * Retuns ENODEV if the port doesn't exist.
690 *
691 * The caller owns the data in 'port' and must free it with
692 * dpif_port_destroy() when it is no longer needed. */
693 int
694 dpif_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
695 struct dpif_port *port)
696 {
697 int error = dpif->dpif_class->port_query_by_number(dpif, port_no, port);
698 if (!error) {
699 VLOG_DBG_RL(&dpmsg_rl, "%s: port %"PRIu32" is device %s",
700 dpif_name(dpif), port_no, port->name);
701 } else {
702 memset(port, 0, sizeof *port);
703 VLOG_WARN_RL(&error_rl, "%s: failed to query port %"PRIu32": %s",
704 dpif_name(dpif), port_no, ovs_strerror(error));
705 }
706 return error;
707 }
708
709 /* Looks up port named 'devname' in 'dpif'. On success, returns 0 and
710 * initializes '*port' appropriately; on failure, returns a positive errno
711 * value.
712 *
713 * Retuns ENODEV if the port doesn't exist.
714 *
715 * The caller owns the data in 'port' and must free it with
716 * dpif_port_destroy() when it is no longer needed. */
717 int
718 dpif_port_query_by_name(const struct dpif *dpif, const char *devname,
719 struct dpif_port *port)
720 {
721 int error = dpif->dpif_class->port_query_by_name(dpif, devname, port);
722 if (!error) {
723 VLOG_DBG_RL(&dpmsg_rl, "%s: device %s is on port %"PRIu32,
724 dpif_name(dpif), devname, port->port_no);
725 } else {
726 memset(port, 0, sizeof *port);
727
728 /* For ENODEV we use DBG level because the caller is probably
729 * interested in whether 'dpif' actually has a port 'devname', so that
730 * it's not an issue worth logging if it doesn't. Other errors are
731 * uncommon and more likely to indicate a real problem. */
732 VLOG_RL(&error_rl, error == ENODEV ? VLL_DBG : VLL_WARN,
733 "%s: failed to query port %s: %s",
734 dpif_name(dpif), devname, ovs_strerror(error));
735 }
736 return error;
737 }
738
739 /* Returns the Netlink PID value to supply in OVS_ACTION_ATTR_USERSPACE
740 * actions as the OVS_USERSPACE_ATTR_PID attribute's value, for use in
741 * flows whose packets arrived on port 'port_no'.
742 *
743 * A 'port_no' of ODPP_NONE is a special case: it returns a reserved PID, not
744 * allocated to any port, that the client may use for special purposes.
745 *
746 * The return value is only meaningful when DPIF_UC_ACTION has been enabled in
747 * the 'dpif''s listen mask. It is allowed to change when DPIF_UC_ACTION is
748 * disabled and then re-enabled, so a client that does that must be prepared to
749 * update all of the flows that it installed that contain
750 * OVS_ACTION_ATTR_USERSPACE actions. */
751 uint32_t
752 dpif_port_get_pid(const struct dpif *dpif, odp_port_t port_no)
753 {
754 return (dpif->dpif_class->port_get_pid
755 ? (dpif->dpif_class->port_get_pid)(dpif, port_no)
756 : 0);
757 }
758
759 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and copies
760 * the port's name into the 'name_size' bytes in 'name', ensuring that the
761 * result is null-terminated. On failure, returns a positive errno value and
762 * makes 'name' the empty string. */
763 int
764 dpif_port_get_name(struct dpif *dpif, odp_port_t port_no,
765 char *name, size_t name_size)
766 {
767 struct dpif_port port;
768 int error;
769
770 ovs_assert(name_size > 0);
771
772 error = dpif_port_query_by_number(dpif, port_no, &port);
773 if (!error) {
774 ovs_strlcpy(name, port.name, name_size);
775 dpif_port_destroy(&port);
776 } else {
777 *name = '\0';
778 }
779 return error;
780 }
781
782 /* Initializes 'dump' to begin dumping the ports in a dpif.
783 *
784 * This function provides no status indication. An error status for the entire
785 * dump operation is provided when it is completed by calling
786 * dpif_port_dump_done().
787 */
788 void
789 dpif_port_dump_start(struct dpif_port_dump *dump, const struct dpif *dpif)
790 {
791 dump->dpif = dpif;
792 dump->error = dpif->dpif_class->port_dump_start(dpif, &dump->state);
793 log_operation(dpif, "port_dump_start", dump->error);
794 }
795
796 /* Attempts to retrieve another port from 'dump', which must have been
797 * initialized with dpif_port_dump_start(). On success, stores a new dpif_port
798 * into 'port' and returns true. On failure, returns false.
799 *
800 * Failure might indicate an actual error or merely that the last port has been
801 * dumped. An error status for the entire dump operation is provided when it
802 * is completed by calling dpif_port_dump_done().
803 *
804 * The dpif owns the data stored in 'port'. It will remain valid until at
805 * least the next time 'dump' is passed to dpif_port_dump_next() or
806 * dpif_port_dump_done(). */
807 bool
808 dpif_port_dump_next(struct dpif_port_dump *dump, struct dpif_port *port)
809 {
810 const struct dpif *dpif = dump->dpif;
811
812 if (dump->error) {
813 return false;
814 }
815
816 dump->error = dpif->dpif_class->port_dump_next(dpif, dump->state, port);
817 if (dump->error == EOF) {
818 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all ports", dpif_name(dpif));
819 } else {
820 log_operation(dpif, "port_dump_next", dump->error);
821 }
822
823 if (dump->error) {
824 dpif->dpif_class->port_dump_done(dpif, dump->state);
825 return false;
826 }
827 return true;
828 }
829
830 /* Completes port table dump operation 'dump', which must have been initialized
831 * with dpif_port_dump_start(). Returns 0 if the dump operation was
832 * error-free, otherwise a positive errno value describing the problem. */
833 int
834 dpif_port_dump_done(struct dpif_port_dump *dump)
835 {
836 const struct dpif *dpif = dump->dpif;
837 if (!dump->error) {
838 dump->error = dpif->dpif_class->port_dump_done(dpif, dump->state);
839 log_operation(dpif, "port_dump_done", dump->error);
840 }
841 return dump->error == EOF ? 0 : dump->error;
842 }
843
844 /* Polls for changes in the set of ports in 'dpif'. If the set of ports in
845 * 'dpif' has changed, this function does one of the following:
846 *
847 * - Stores the name of the device that was added to or deleted from 'dpif' in
848 * '*devnamep' and returns 0. The caller is responsible for freeing
849 * '*devnamep' (with free()) when it no longer needs it.
850 *
851 * - Returns ENOBUFS and sets '*devnamep' to NULL.
852 *
853 * This function may also return 'false positives', where it returns 0 and
854 * '*devnamep' names a device that was not actually added or deleted or it
855 * returns ENOBUFS without any change.
856 *
857 * Returns EAGAIN if the set of ports in 'dpif' has not changed. May also
858 * return other positive errno values to indicate that something has gone
859 * wrong. */
860 int
861 dpif_port_poll(const struct dpif *dpif, char **devnamep)
862 {
863 int error = dpif->dpif_class->port_poll(dpif, devnamep);
864 if (error) {
865 *devnamep = NULL;
866 }
867 return error;
868 }
869
870 /* Arranges for the poll loop to wake up when port_poll(dpif) will return a
871 * value other than EAGAIN. */
872 void
873 dpif_port_poll_wait(const struct dpif *dpif)
874 {
875 dpif->dpif_class->port_poll_wait(dpif);
876 }
877
878 /* Extracts the flow stats for a packet. The 'flow' and 'packet'
879 * arguments must have been initialized through a call to flow_extract().
880 * 'used' is stored into stats->used. */
881 void
882 dpif_flow_stats_extract(const struct flow *flow, const struct dp_packet *packet,
883 long long int used, struct dpif_flow_stats *stats)
884 {
885 stats->tcp_flags = ntohs(flow->tcp_flags);
886 stats->n_bytes = dp_packet_size(packet);
887 stats->n_packets = 1;
888 stats->used = used;
889 }
890
891 /* Appends a human-readable representation of 'stats' to 's'. */
892 void
893 dpif_flow_stats_format(const struct dpif_flow_stats *stats, struct ds *s)
894 {
895 ds_put_format(s, "packets:%"PRIu64", bytes:%"PRIu64", used:",
896 stats->n_packets, stats->n_bytes);
897 if (stats->used) {
898 ds_put_format(s, "%.3fs", (time_msec() - stats->used) / 1000.0);
899 } else {
900 ds_put_format(s, "never");
901 }
902 if (stats->tcp_flags) {
903 ds_put_cstr(s, ", flags:");
904 packet_format_tcp_flags(s, stats->tcp_flags);
905 }
906 }
907
908 /* Places the hash of the 'key_len' bytes starting at 'key' into '*hash'. */
909 void
910 dpif_flow_hash(const struct dpif *dpif OVS_UNUSED,
911 const void *key, size_t key_len, ovs_u128 *hash)
912 {
913 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
914 static uint32_t secret;
915
916 if (ovsthread_once_start(&once)) {
917 secret = random_uint32();
918 ovsthread_once_done(&once);
919 }
920 hash_bytes128(key, key_len, secret, hash);
921 uuid_set_bits_v4((struct uuid *)hash);
922 }
923
924 /* Deletes all flows from 'dpif'. Returns 0 if successful, otherwise a
925 * positive errno value. */
926 int
927 dpif_flow_flush(struct dpif *dpif)
928 {
929 int error;
930
931 COVERAGE_INC(dpif_flow_flush);
932
933 error = dpif->dpif_class->flow_flush(dpif);
934 log_operation(dpif, "flow_flush", error);
935 return error;
936 }
937
938 /* Attempts to install 'key' into the datapath, fetches it, then deletes it.
939 * Returns true if the datapath supported installing 'flow', false otherwise.
940 */
941 bool
942 dpif_probe_feature(struct dpif *dpif, const char *name,
943 const struct ofpbuf *key, const struct ofpbuf *actions,
944 const ovs_u128 *ufid)
945 {
946 struct dpif_flow flow;
947 struct ofpbuf reply;
948 uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
949 bool enable_feature = false;
950 int error;
951 const struct nlattr *nl_actions = actions ? actions->data : NULL;
952 const size_t nl_actions_size = actions ? actions->size : 0;
953
954 /* Use DPIF_FP_MODIFY to cover the case where ovs-vswitchd is killed (and
955 * restarted) at just the right time such that feature probes from the
956 * previous run are still present in the datapath. */
957 error = dpif_flow_put(dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY | DPIF_FP_PROBE,
958 key->data, key->size, NULL, 0,
959 nl_actions, nl_actions_size,
960 ufid, NON_PMD_CORE_ID, NULL);
961 if (error) {
962 if (error != EINVAL && error != EOVERFLOW) {
963 VLOG_WARN("%s: %s flow probe failed (%s)",
964 dpif_name(dpif), name, ovs_strerror(error));
965 }
966 return false;
967 }
968
969 ofpbuf_use_stack(&reply, &stub, sizeof stub);
970 error = dpif_flow_get(dpif, key->data, key->size, ufid,
971 NON_PMD_CORE_ID, &reply, &flow);
972 if (!error
973 && (!ufid || (flow.ufid_present
974 && ovs_u128_equals(*ufid, flow.ufid)))) {
975 enable_feature = true;
976 }
977
978 error = dpif_flow_del(dpif, key->data, key->size, ufid,
979 NON_PMD_CORE_ID, NULL);
980 if (error) {
981 VLOG_WARN("%s: failed to delete %s feature probe flow",
982 dpif_name(dpif), name);
983 }
984
985 return enable_feature;
986 }
987
988 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_GET. */
989 int
990 dpif_flow_get(struct dpif *dpif,
991 const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
992 const unsigned pmd_id, struct ofpbuf *buf, struct dpif_flow *flow)
993 {
994 struct dpif_op *opp;
995 struct dpif_op op;
996
997 op.type = DPIF_OP_FLOW_GET;
998 op.flow_get.key = key;
999 op.flow_get.key_len = key_len;
1000 op.flow_get.ufid = ufid;
1001 op.flow_get.pmd_id = pmd_id;
1002 op.flow_get.buffer = buf;
1003
1004 memset(flow, 0, sizeof *flow);
1005 op.flow_get.flow = flow;
1006 op.flow_get.flow->key = key;
1007 op.flow_get.flow->key_len = key_len;
1008
1009 opp = &op;
1010 dpif_operate(dpif, &opp, 1, DPIF_OFFLOAD_AUTO);
1011
1012 return op.error;
1013 }
1014
1015 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_PUT. */
1016 int
1017 dpif_flow_put(struct dpif *dpif, enum dpif_flow_put_flags flags,
1018 const struct nlattr *key, size_t key_len,
1019 const struct nlattr *mask, size_t mask_len,
1020 const struct nlattr *actions, size_t actions_len,
1021 const ovs_u128 *ufid, const unsigned pmd_id,
1022 struct dpif_flow_stats *stats)
1023 {
1024 struct dpif_op *opp;
1025 struct dpif_op op;
1026
1027 op.type = DPIF_OP_FLOW_PUT;
1028 op.flow_put.flags = flags;
1029 op.flow_put.key = key;
1030 op.flow_put.key_len = key_len;
1031 op.flow_put.mask = mask;
1032 op.flow_put.mask_len = mask_len;
1033 op.flow_put.actions = actions;
1034 op.flow_put.actions_len = actions_len;
1035 op.flow_put.ufid = ufid;
1036 op.flow_put.pmd_id = pmd_id;
1037 op.flow_put.stats = stats;
1038
1039 opp = &op;
1040 dpif_operate(dpif, &opp, 1, DPIF_OFFLOAD_AUTO);
1041
1042 return op.error;
1043 }
1044
1045 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_DEL. */
1046 int
1047 dpif_flow_del(struct dpif *dpif,
1048 const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
1049 const unsigned pmd_id, struct dpif_flow_stats *stats)
1050 {
1051 struct dpif_op *opp;
1052 struct dpif_op op;
1053
1054 op.type = DPIF_OP_FLOW_DEL;
1055 op.flow_del.key = key;
1056 op.flow_del.key_len = key_len;
1057 op.flow_del.ufid = ufid;
1058 op.flow_del.pmd_id = pmd_id;
1059 op.flow_del.stats = stats;
1060 op.flow_del.terse = false;
1061
1062 opp = &op;
1063 dpif_operate(dpif, &opp, 1, DPIF_OFFLOAD_AUTO);
1064
1065 return op.error;
1066 }
1067
1068 /* Creates and returns a new 'struct dpif_flow_dump' for iterating through the
1069 * flows in 'dpif'. If 'terse' is true, then only UFID and statistics will
1070 * be returned in the dump. Otherwise, all fields will be returned.
1071 *
1072 * This function always successfully returns a dpif_flow_dump. Error
1073 * reporting is deferred to dpif_flow_dump_destroy(). */
1074 struct dpif_flow_dump *
1075 dpif_flow_dump_create(const struct dpif *dpif, bool terse,
1076 struct dpif_flow_dump_types *types)
1077 {
1078 return dpif->dpif_class->flow_dump_create(dpif, terse, types);
1079 }
1080
1081 /* Destroys 'dump', which must have been created with dpif_flow_dump_create().
1082 * All dpif_flow_dump_thread structures previously created for 'dump' must
1083 * previously have been destroyed.
1084 *
1085 * Returns 0 if the dump operation was error-free, otherwise a positive errno
1086 * value describing the problem. */
1087 int
1088 dpif_flow_dump_destroy(struct dpif_flow_dump *dump)
1089 {
1090 const struct dpif *dpif = dump->dpif;
1091 int error = dpif->dpif_class->flow_dump_destroy(dump);
1092 log_operation(dpif, "flow_dump_destroy", error);
1093 return error == EOF ? 0 : error;
1094 }
1095
1096 /* Returns new thread-local state for use with dpif_flow_dump_next(). */
1097 struct dpif_flow_dump_thread *
1098 dpif_flow_dump_thread_create(struct dpif_flow_dump *dump)
1099 {
1100 return dump->dpif->dpif_class->flow_dump_thread_create(dump);
1101 }
1102
1103 /* Releases 'thread'. */
1104 void
1105 dpif_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread)
1106 {
1107 thread->dpif->dpif_class->flow_dump_thread_destroy(thread);
1108 }
1109
1110 /* Attempts to retrieve up to 'max_flows' more flows from 'thread'. Returns 0
1111 * if and only if no flows remained to be retrieved, otherwise a positive
1112 * number reflecting the number of elements in 'flows[]' that were updated.
1113 * The number of flows returned might be less than 'max_flows' because
1114 * fewer than 'max_flows' remained, because this particular datapath does not
1115 * benefit from batching, or because an error occurred partway through
1116 * retrieval. Thus, the caller should continue calling until a 0 return value,
1117 * even if intermediate return values are less than 'max_flows'.
1118 *
1119 * No error status is immediately provided. An error status for the entire
1120 * dump operation is provided when it is completed by calling
1121 * dpif_flow_dump_destroy().
1122 *
1123 * All of the data stored into 'flows' is owned by the datapath, not by the
1124 * caller, and the caller must not modify or free it. The datapath guarantees
1125 * that it remains accessible and unchanged until the first of:
1126 * - The next call to dpif_flow_dump_next() for 'thread', or
1127 * - The next rcu quiescent period. */
1128 int
1129 dpif_flow_dump_next(struct dpif_flow_dump_thread *thread,
1130 struct dpif_flow *flows, int max_flows)
1131 {
1132 struct dpif *dpif = thread->dpif;
1133 int n;
1134
1135 ovs_assert(max_flows > 0);
1136 n = dpif->dpif_class->flow_dump_next(thread, flows, max_flows);
1137 if (n > 0) {
1138 struct dpif_flow *f;
1139
1140 for (f = flows; f < &flows[n]
1141 && should_log_flow_message(&this_module, 0); f++) {
1142 log_flow_message(dpif, 0, &this_module, "flow_dump",
1143 f->key, f->key_len, f->mask, f->mask_len,
1144 &f->ufid, &f->stats, f->actions, f->actions_len);
1145 }
1146 } else {
1147 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all flows", dpif_name(dpif));
1148 }
1149 return n;
1150 }
1151
1152 struct dpif_execute_helper_aux {
1153 struct dpif *dpif;
1154 const struct flow *flow;
1155 int error;
1156 const struct nlattr *meter_action; /* Non-NULL, if have a meter action. */
1157 };
1158
1159 /* This is called for actions that need the context of the datapath to be
1160 * meaningful. */
1161 static void
1162 dpif_execute_helper_cb(void *aux_, struct dp_packet_batch *packets_,
1163 const struct nlattr *action, bool should_steal)
1164 {
1165 struct dpif_execute_helper_aux *aux = aux_;
1166 int type = nl_attr_type(action);
1167 struct dp_packet *packet = packets_->packets[0];
1168
1169 ovs_assert(packets_->count == 1);
1170
1171 switch ((enum ovs_action_attr)type) {
1172 case OVS_ACTION_ATTR_METER:
1173 /* Maintain a pointer to the first meter action seen. */
1174 if (!aux->meter_action) {
1175 aux->meter_action = action;
1176 }
1177 break;
1178
1179 case OVS_ACTION_ATTR_CT:
1180 case OVS_ACTION_ATTR_OUTPUT:
1181 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1182 case OVS_ACTION_ATTR_TUNNEL_POP:
1183 case OVS_ACTION_ATTR_USERSPACE:
1184 case OVS_ACTION_ATTR_RECIRC: {
1185 struct dpif_execute execute;
1186 struct ofpbuf execute_actions;
1187 uint64_t stub[256 / 8];
1188 struct pkt_metadata *md = &packet->md;
1189
1190 if (flow_tnl_dst_is_set(&md->tunnel) || aux->meter_action) {
1191 ofpbuf_use_stub(&execute_actions, stub, sizeof stub);
1192
1193 if (aux->meter_action) {
1194 const struct nlattr *a = aux->meter_action;
1195
1196 /* XXX: This code collects meter actions since the last action
1197 * execution via the datapath to be executed right before the
1198 * current action that needs to be executed by the datapath.
1199 * This is only an approximation, but better than nothing.
1200 * Fundamentally, we should have a mechanism by which the
1201 * datapath could return the result of the meter action so that
1202 * we could execute them at the right order. */
1203 do {
1204 ofpbuf_put(&execute_actions, a, NLA_ALIGN(a->nla_len));
1205 /* Find next meter action before 'action', if any. */
1206 do {
1207 a = nl_attr_next(a);
1208 } while (a != action &&
1209 nl_attr_type(a) != OVS_ACTION_ATTR_METER);
1210 } while (a != action);
1211 }
1212
1213 /* The Linux kernel datapath throws away the tunnel information
1214 * that we supply as metadata. We have to use a "set" action to
1215 * supply it. */
1216 if (md->tunnel.ip_dst) {
1217 odp_put_tunnel_action(&md->tunnel, &execute_actions, NULL);
1218 }
1219 ofpbuf_put(&execute_actions, action, NLA_ALIGN(action->nla_len));
1220
1221 execute.actions = execute_actions.data;
1222 execute.actions_len = execute_actions.size;
1223 } else {
1224 execute.actions = action;
1225 execute.actions_len = NLA_ALIGN(action->nla_len);
1226 }
1227
1228 struct dp_packet *clone = NULL;
1229 uint32_t cutlen = dp_packet_get_cutlen(packet);
1230 if (cutlen && (type == OVS_ACTION_ATTR_OUTPUT
1231 || type == OVS_ACTION_ATTR_TUNNEL_PUSH
1232 || type == OVS_ACTION_ATTR_TUNNEL_POP
1233 || type == OVS_ACTION_ATTR_USERSPACE)) {
1234 dp_packet_reset_cutlen(packet);
1235 if (!should_steal) {
1236 packet = clone = dp_packet_clone(packet);
1237 }
1238 dp_packet_set_size(packet, dp_packet_size(packet) - cutlen);
1239 }
1240
1241 execute.packet = packet;
1242 execute.flow = aux->flow;
1243 execute.needs_help = false;
1244 execute.probe = false;
1245 execute.mtu = 0;
1246 aux->error = dpif_execute(aux->dpif, &execute);
1247 log_execute_message(aux->dpif, &this_module, &execute,
1248 true, aux->error);
1249
1250 dp_packet_delete(clone);
1251
1252 if (flow_tnl_dst_is_set(&md->tunnel) || aux->meter_action) {
1253 ofpbuf_uninit(&execute_actions);
1254
1255 /* Do not re-use the same meters for later output actions. */
1256 aux->meter_action = NULL;
1257 }
1258 break;
1259 }
1260
1261 case OVS_ACTION_ATTR_HASH:
1262 case OVS_ACTION_ATTR_PUSH_VLAN:
1263 case OVS_ACTION_ATTR_POP_VLAN:
1264 case OVS_ACTION_ATTR_PUSH_MPLS:
1265 case OVS_ACTION_ATTR_POP_MPLS:
1266 case OVS_ACTION_ATTR_SET:
1267 case OVS_ACTION_ATTR_SET_MASKED:
1268 case OVS_ACTION_ATTR_SAMPLE:
1269 case OVS_ACTION_ATTR_TRUNC:
1270 case OVS_ACTION_ATTR_PUSH_ETH:
1271 case OVS_ACTION_ATTR_POP_ETH:
1272 case OVS_ACTION_ATTR_CLONE:
1273 case OVS_ACTION_ATTR_PUSH_NSH:
1274 case OVS_ACTION_ATTR_POP_NSH:
1275 case OVS_ACTION_ATTR_CT_CLEAR:
1276 case OVS_ACTION_ATTR_UNSPEC:
1277 case __OVS_ACTION_ATTR_MAX:
1278 OVS_NOT_REACHED();
1279 }
1280 dp_packet_delete_batch(packets_, should_steal);
1281 }
1282
1283 /* Executes 'execute' by performing most of the actions in userspace and
1284 * passing the fully constructed packets to 'dpif' for output and userspace
1285 * actions.
1286 *
1287 * This helps with actions that a given 'dpif' doesn't implement directly. */
1288 static int
1289 dpif_execute_with_help(struct dpif *dpif, struct dpif_execute *execute)
1290 {
1291 struct dpif_execute_helper_aux aux = {dpif, execute->flow, 0, NULL};
1292 struct dp_packet_batch pb;
1293
1294 COVERAGE_INC(dpif_execute_with_help);
1295
1296 dp_packet_batch_init_packet(&pb, execute->packet);
1297 odp_execute_actions(&aux, &pb, false, execute->actions,
1298 execute->actions_len, dpif_execute_helper_cb);
1299 return aux.error;
1300 }
1301
1302 /* Returns true if the datapath needs help executing 'execute'. */
1303 static bool
1304 dpif_execute_needs_help(const struct dpif_execute *execute)
1305 {
1306 return execute->needs_help || nl_attr_oversized(execute->actions_len);
1307 }
1308
1309 /* A dpif_operate() wrapper for performing a single DPIF_OP_EXECUTE. */
1310 int
1311 dpif_execute(struct dpif *dpif, struct dpif_execute *execute)
1312 {
1313 if (execute->actions_len) {
1314 struct dpif_op *opp;
1315 struct dpif_op op;
1316
1317 op.type = DPIF_OP_EXECUTE;
1318 op.execute = *execute;
1319
1320 opp = &op;
1321 dpif_operate(dpif, &opp, 1, DPIF_OFFLOAD_AUTO);
1322
1323 return op.error;
1324 } else {
1325 return 0;
1326 }
1327 }
1328
1329 /* Executes each of the 'n_ops' operations in 'ops' on 'dpif', in the order in
1330 * which they are specified. Places each operation's results in the "output"
1331 * members documented in comments, and 0 in the 'error' member on success or a
1332 * positive errno on failure.
1333 */
1334 void
1335 dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops,
1336 enum dpif_offload_type offload_type)
1337 {
1338 if (offload_type == DPIF_OFFLOAD_ALWAYS && !netdev_is_flow_api_enabled()) {
1339 size_t i;
1340 for (i = 0; i < n_ops; i++) {
1341 struct dpif_op *op = ops[i];
1342 op->error = EINVAL;
1343 }
1344 return;
1345 }
1346
1347 while (n_ops > 0) {
1348 size_t chunk;
1349
1350 /* Count 'chunk', the number of ops that can be executed without
1351 * needing any help. Ops that need help should be rare, so we
1352 * expect this to ordinarily be 'n_ops', that is, all the ops. */
1353 for (chunk = 0; chunk < n_ops; chunk++) {
1354 struct dpif_op *op = ops[chunk];
1355
1356 if (op->type == DPIF_OP_EXECUTE
1357 && dpif_execute_needs_help(&op->execute)) {
1358 break;
1359 }
1360 }
1361
1362 if (chunk) {
1363 /* Execute a chunk full of ops that the dpif provider can
1364 * handle itself, without help. */
1365 size_t i;
1366
1367 dpif->dpif_class->operate(dpif, ops, chunk, offload_type);
1368
1369 for (i = 0; i < chunk; i++) {
1370 struct dpif_op *op = ops[i];
1371 int error = op->error;
1372
1373 switch (op->type) {
1374 case DPIF_OP_FLOW_PUT: {
1375 struct dpif_flow_put *put = &op->flow_put;
1376
1377 COVERAGE_INC(dpif_flow_put);
1378 log_flow_put_message(dpif, &this_module, put, error);
1379 if (error && put->stats) {
1380 memset(put->stats, 0, sizeof *put->stats);
1381 }
1382 break;
1383 }
1384
1385 case DPIF_OP_FLOW_GET: {
1386 struct dpif_flow_get *get = &op->flow_get;
1387
1388 COVERAGE_INC(dpif_flow_get);
1389 if (error) {
1390 memset(get->flow, 0, sizeof *get->flow);
1391 }
1392 log_flow_get_message(dpif, &this_module, get, error);
1393
1394 break;
1395 }
1396
1397 case DPIF_OP_FLOW_DEL: {
1398 struct dpif_flow_del *del = &op->flow_del;
1399
1400 COVERAGE_INC(dpif_flow_del);
1401 log_flow_del_message(dpif, &this_module, del, error);
1402 if (error && del->stats) {
1403 memset(del->stats, 0, sizeof *del->stats);
1404 }
1405 break;
1406 }
1407
1408 case DPIF_OP_EXECUTE:
1409 COVERAGE_INC(dpif_execute);
1410 log_execute_message(dpif, &this_module, &op->execute,
1411 false, error);
1412 break;
1413 }
1414 }
1415
1416 ops += chunk;
1417 n_ops -= chunk;
1418 } else {
1419 /* Help the dpif provider to execute one op. */
1420 struct dpif_op *op = ops[0];
1421
1422 COVERAGE_INC(dpif_execute);
1423 op->error = dpif_execute_with_help(dpif, &op->execute);
1424 ops++;
1425 n_ops--;
1426 }
1427 }
1428 }
1429
1430 /* Returns a string that represents 'type', for use in log messages. */
1431 const char *
1432 dpif_upcall_type_to_string(enum dpif_upcall_type type)
1433 {
1434 switch (type) {
1435 case DPIF_UC_MISS: return "miss";
1436 case DPIF_UC_ACTION: return "action";
1437 case DPIF_N_UC_TYPES: default: return "<unknown>";
1438 }
1439 }
1440
1441 /* Enables or disables receiving packets with dpif_recv() on 'dpif'. Returns 0
1442 * if successful, otherwise a positive errno value.
1443 *
1444 * Turning packet receive off and then back on may change the Netlink PID
1445 * assignments returned by dpif_port_get_pid(). If the client does this, it
1446 * must update all of the flows that have OVS_ACTION_ATTR_USERSPACE actions
1447 * using the new PID assignment. */
1448 int
1449 dpif_recv_set(struct dpif *dpif, bool enable)
1450 {
1451 int error = 0;
1452
1453 if (dpif->dpif_class->recv_set) {
1454 error = dpif->dpif_class->recv_set(dpif, enable);
1455 log_operation(dpif, "recv_set", error);
1456 }
1457 return error;
1458 }
1459
1460 /* Refreshes the poll loops and Netlink sockets associated to each port,
1461 * when the number of upcall handlers (upcall receiving thread) is changed
1462 * to 'n_handlers' and receiving packets for 'dpif' is enabled by
1463 * recv_set().
1464 *
1465 * Since multiple upcall handlers can read upcalls simultaneously from
1466 * 'dpif', each port can have multiple Netlink sockets, one per upcall
1467 * handler. So, handlers_set() is responsible for the following tasks:
1468 *
1469 * When receiving upcall is enabled, extends or creates the
1470 * configuration to support:
1471 *
1472 * - 'n_handlers' Netlink sockets for each port.
1473 *
1474 * - 'n_handlers' poll loops, one for each upcall handler.
1475 *
1476 * - registering the Netlink sockets for the same upcall handler to
1477 * the corresponding poll loop.
1478 *
1479 * Returns 0 if successful, otherwise a positive errno value. */
1480 int
1481 dpif_handlers_set(struct dpif *dpif, uint32_t n_handlers)
1482 {
1483 int error = 0;
1484
1485 if (dpif->dpif_class->handlers_set) {
1486 error = dpif->dpif_class->handlers_set(dpif, n_handlers);
1487 log_operation(dpif, "handlers_set", error);
1488 }
1489 return error;
1490 }
1491
1492 void
1493 dpif_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb, void *aux)
1494 {
1495 if (dpif->dpif_class->register_dp_purge_cb) {
1496 dpif->dpif_class->register_dp_purge_cb(dpif, cb, aux);
1497 }
1498 }
1499
1500 void
1501 dpif_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, void *aux)
1502 {
1503 if (dpif->dpif_class->register_upcall_cb) {
1504 dpif->dpif_class->register_upcall_cb(dpif, cb, aux);
1505 }
1506 }
1507
1508 void
1509 dpif_enable_upcall(struct dpif *dpif)
1510 {
1511 if (dpif->dpif_class->enable_upcall) {
1512 dpif->dpif_class->enable_upcall(dpif);
1513 }
1514 }
1515
1516 void
1517 dpif_disable_upcall(struct dpif *dpif)
1518 {
1519 if (dpif->dpif_class->disable_upcall) {
1520 dpif->dpif_class->disable_upcall(dpif);
1521 }
1522 }
1523
1524 void
1525 dpif_print_packet(struct dpif *dpif, struct dpif_upcall *upcall)
1526 {
1527 if (!VLOG_DROP_DBG(&dpmsg_rl)) {
1528 struct ds flow;
1529 char *packet;
1530
1531 packet = ofp_dp_packet_to_string(&upcall->packet);
1532
1533 ds_init(&flow);
1534 odp_flow_key_format(upcall->key, upcall->key_len, &flow);
1535
1536 VLOG_DBG("%s: %s upcall:\n%s\n%s",
1537 dpif_name(dpif), dpif_upcall_type_to_string(upcall->type),
1538 ds_cstr(&flow), packet);
1539
1540 ds_destroy(&flow);
1541 free(packet);
1542 }
1543 }
1544
1545 /* Pass custom configuration to the datapath implementation. Some of the
1546 * changes can be postponed until dpif_run() is called. */
1547 int
1548 dpif_set_config(struct dpif *dpif, const struct smap *cfg)
1549 {
1550 int error = 0;
1551
1552 if (dpif->dpif_class->set_config) {
1553 error = dpif->dpif_class->set_config(dpif, cfg);
1554 if (error) {
1555 log_operation(dpif, "set_config", error);
1556 }
1557 }
1558
1559 return error;
1560 }
1561
1562 /* Polls for an upcall from 'dpif' for an upcall handler. Since there can
1563 * be multiple poll loops, 'handler_id' is needed as index to identify the
1564 * corresponding poll loop. If successful, stores the upcall into '*upcall',
1565 * using 'buf' for storage. Should only be called if 'recv_set' has been used
1566 * to enable receiving packets from 'dpif'.
1567 *
1568 * 'upcall->key' and 'upcall->userdata' point into data in the caller-provided
1569 * 'buf', so their memory cannot be freed separately from 'buf'.
1570 *
1571 * The caller owns the data of 'upcall->packet' and may modify it. If
1572 * packet's headroom is exhausted as it is manipulated, 'upcall->packet'
1573 * will be reallocated. This requires the data of 'upcall->packet' to be
1574 * released with ofpbuf_uninit() before 'upcall' is destroyed. However,
1575 * when an error is returned, the 'upcall->packet' may be uninitialized
1576 * and should not be released.
1577 *
1578 * Returns 0 if successful, otherwise a positive errno value. Returns EAGAIN
1579 * if no upcall is immediately available. */
1580 int
1581 dpif_recv(struct dpif *dpif, uint32_t handler_id, struct dpif_upcall *upcall,
1582 struct ofpbuf *buf)
1583 {
1584 int error = EAGAIN;
1585
1586 if (dpif->dpif_class->recv) {
1587 error = dpif->dpif_class->recv(dpif, handler_id, upcall, buf);
1588 if (!error) {
1589 dpif_print_packet(dpif, upcall);
1590 } else if (error != EAGAIN) {
1591 log_operation(dpif, "recv", error);
1592 }
1593 }
1594 return error;
1595 }
1596
1597 /* Discards all messages that would otherwise be received by dpif_recv() on
1598 * 'dpif'. */
1599 void
1600 dpif_recv_purge(struct dpif *dpif)
1601 {
1602 COVERAGE_INC(dpif_purge);
1603 if (dpif->dpif_class->recv_purge) {
1604 dpif->dpif_class->recv_purge(dpif);
1605 }
1606 }
1607
1608 /* Arranges for the poll loop for an upcall handler to wake up when 'dpif'
1609 * 'dpif' has a message queued to be received with the recv member
1610 * function. Since there can be multiple poll loops, 'handler_id' is
1611 * needed as index to identify the corresponding poll loop. */
1612 void
1613 dpif_recv_wait(struct dpif *dpif, uint32_t handler_id)
1614 {
1615 if (dpif->dpif_class->recv_wait) {
1616 dpif->dpif_class->recv_wait(dpif, handler_id);
1617 }
1618 }
1619
1620 /*
1621 * Return the datapath version. Caller is responsible for freeing
1622 * the string.
1623 */
1624 char *
1625 dpif_get_dp_version(const struct dpif *dpif)
1626 {
1627 char *version = NULL;
1628
1629 if (dpif->dpif_class->get_datapath_version) {
1630 version = dpif->dpif_class->get_datapath_version();
1631 }
1632
1633 return version;
1634 }
1635
1636 /* Obtains the NetFlow engine type and engine ID for 'dpif' into '*engine_type'
1637 * and '*engine_id', respectively. */
1638 void
1639 dpif_get_netflow_ids(const struct dpif *dpif,
1640 uint8_t *engine_type, uint8_t *engine_id)
1641 {
1642 *engine_type = dpif->netflow_engine_type;
1643 *engine_id = dpif->netflow_engine_id;
1644 }
1645
1646 /* Translates OpenFlow queue ID 'queue_id' (in host byte order) into a priority
1647 * value used for setting packet priority.
1648 * On success, returns 0 and stores the priority into '*priority'.
1649 * On failure, returns a positive errno value and stores 0 into '*priority'. */
1650 int
1651 dpif_queue_to_priority(const struct dpif *dpif, uint32_t queue_id,
1652 uint32_t *priority)
1653 {
1654 int error = (dpif->dpif_class->queue_to_priority
1655 ? dpif->dpif_class->queue_to_priority(dpif, queue_id,
1656 priority)
1657 : EOPNOTSUPP);
1658 if (error) {
1659 *priority = 0;
1660 }
1661 log_operation(dpif, "queue_to_priority", error);
1662 return error;
1663 }
1664 \f
1665 void
1666 dpif_init(struct dpif *dpif, const struct dpif_class *dpif_class,
1667 const char *name,
1668 uint8_t netflow_engine_type, uint8_t netflow_engine_id)
1669 {
1670 dpif->dpif_class = dpif_class;
1671 dpif->base_name = xstrdup(name);
1672 dpif->full_name = xasprintf("%s@%s", dpif_class->type, name);
1673 dpif->netflow_engine_type = netflow_engine_type;
1674 dpif->netflow_engine_id = netflow_engine_id;
1675 }
1676
1677 /* Undoes the results of initialization.
1678 *
1679 * Normally this function only needs to be called from dpif_close().
1680 * However, it may be called by providers due to an error on opening
1681 * that occurs after initialization. It this case dpif_close() would
1682 * never be called. */
1683 void
1684 dpif_uninit(struct dpif *dpif, bool close)
1685 {
1686 char *base_name = dpif->base_name;
1687 char *full_name = dpif->full_name;
1688
1689 if (close) {
1690 dpif->dpif_class->close(dpif);
1691 }
1692
1693 free(base_name);
1694 free(full_name);
1695 }
1696 \f
1697 static void
1698 log_operation(const struct dpif *dpif, const char *operation, int error)
1699 {
1700 if (!error) {
1701 VLOG_DBG_RL(&dpmsg_rl, "%s: %s success", dpif_name(dpif), operation);
1702 } else if (ofperr_is_valid(error)) {
1703 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1704 dpif_name(dpif), operation, ofperr_get_name(error));
1705 } else {
1706 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1707 dpif_name(dpif), operation, ovs_strerror(error));
1708 }
1709 }
1710
1711 static enum vlog_level
1712 flow_message_log_level(int error)
1713 {
1714 /* If flows arrive in a batch, userspace may push down multiple
1715 * unique flow definitions that overlap when wildcards are applied.
1716 * Kernels that support flow wildcarding will reject these flows as
1717 * duplicates (EEXIST), so lower the log level to debug for these
1718 * types of messages. */
1719 return (error && error != EEXIST) ? VLL_WARN : VLL_DBG;
1720 }
1721
1722 static bool
1723 should_log_flow_message(const struct vlog_module *module, int error)
1724 {
1725 return !vlog_should_drop(module, flow_message_log_level(error),
1726 error ? &error_rl : &dpmsg_rl);
1727 }
1728
1729 void
1730 log_flow_message(const struct dpif *dpif, int error,
1731 const struct vlog_module *module,
1732 const char *operation,
1733 const struct nlattr *key, size_t key_len,
1734 const struct nlattr *mask, size_t mask_len,
1735 const ovs_u128 *ufid, const struct dpif_flow_stats *stats,
1736 const struct nlattr *actions, size_t actions_len)
1737 {
1738 struct ds ds = DS_EMPTY_INITIALIZER;
1739 ds_put_format(&ds, "%s: ", dpif_name(dpif));
1740 if (error) {
1741 ds_put_cstr(&ds, "failed to ");
1742 }
1743 ds_put_format(&ds, "%s ", operation);
1744 if (error) {
1745 ds_put_format(&ds, "(%s) ", ovs_strerror(error));
1746 }
1747 if (ufid) {
1748 odp_format_ufid(ufid, &ds);
1749 ds_put_cstr(&ds, " ");
1750 }
1751 odp_flow_format(key, key_len, mask, mask_len, NULL, &ds, true);
1752 if (stats) {
1753 ds_put_cstr(&ds, ", ");
1754 dpif_flow_stats_format(stats, &ds);
1755 }
1756 if (actions || actions_len) {
1757 ds_put_cstr(&ds, ", actions:");
1758 format_odp_actions(&ds, actions, actions_len, NULL);
1759 }
1760 vlog(module, flow_message_log_level(error), "%s", ds_cstr(&ds));
1761 ds_destroy(&ds);
1762 }
1763
1764 void
1765 log_flow_put_message(const struct dpif *dpif,
1766 const struct vlog_module *module,
1767 const struct dpif_flow_put *put,
1768 int error)
1769 {
1770 if (should_log_flow_message(module, error)
1771 && !(put->flags & DPIF_FP_PROBE)) {
1772 struct ds s;
1773
1774 ds_init(&s);
1775 ds_put_cstr(&s, "put");
1776 if (put->flags & DPIF_FP_CREATE) {
1777 ds_put_cstr(&s, "[create]");
1778 }
1779 if (put->flags & DPIF_FP_MODIFY) {
1780 ds_put_cstr(&s, "[modify]");
1781 }
1782 if (put->flags & DPIF_FP_ZERO_STATS) {
1783 ds_put_cstr(&s, "[zero]");
1784 }
1785 log_flow_message(dpif, error, module, ds_cstr(&s),
1786 put->key, put->key_len, put->mask, put->mask_len,
1787 put->ufid, put->stats, put->actions,
1788 put->actions_len);
1789 ds_destroy(&s);
1790 }
1791 }
1792
1793 void
1794 log_flow_del_message(const struct dpif *dpif,
1795 const struct vlog_module *module,
1796 const struct dpif_flow_del *del,
1797 int error)
1798 {
1799 if (should_log_flow_message(module, error)) {
1800 log_flow_message(dpif, error, module, "flow_del",
1801 del->key, del->key_len,
1802 NULL, 0, del->ufid, !error ? del->stats : NULL,
1803 NULL, 0);
1804 }
1805 }
1806
1807 /* Logs that 'execute' was executed on 'dpif' and completed with errno 'error'
1808 * (0 for success). 'subexecute' should be true if the execution is a result
1809 * of breaking down a larger execution that needed help, false otherwise.
1810 *
1811 *
1812 * XXX In theory, the log message could be deceptive because this function is
1813 * called after the dpif_provider's '->execute' function, which is allowed to
1814 * modify execute->packet and execute->md. In practice, though:
1815 *
1816 * - dpif-netlink doesn't modify execute->packet or execute->md.
1817 *
1818 * - dpif-netdev does modify them but it is less likely to have problems
1819 * because it is built into ovs-vswitchd and cannot have version skew,
1820 * etc.
1821 *
1822 * It would still be better to avoid the potential problem. I don't know of a
1823 * good way to do that, though, that isn't expensive. */
1824 void
1825 log_execute_message(const struct dpif *dpif,
1826 const struct vlog_module *module,
1827 const struct dpif_execute *execute,
1828 bool subexecute, int error)
1829 {
1830 if (!(error ? VLOG_DROP_WARN(&error_rl) : VLOG_DROP_DBG(&dpmsg_rl))
1831 && !execute->probe) {
1832 struct ds ds = DS_EMPTY_INITIALIZER;
1833 char *packet;
1834 uint64_t stub[1024 / 8];
1835 struct ofpbuf md = OFPBUF_STUB_INITIALIZER(stub);
1836
1837 packet = ofp_packet_to_string(dp_packet_data(execute->packet),
1838 dp_packet_size(execute->packet),
1839 execute->packet->packet_type);
1840 odp_key_from_dp_packet(&md, execute->packet);
1841 ds_put_format(&ds, "%s: %sexecute ",
1842 dpif_name(dpif),
1843 (subexecute ? "sub-"
1844 : dpif_execute_needs_help(execute) ? "super-"
1845 : ""));
1846 format_odp_actions(&ds, execute->actions, execute->actions_len, NULL);
1847 if (error) {
1848 ds_put_format(&ds, " failed (%s)", ovs_strerror(error));
1849 }
1850 ds_put_format(&ds, " on packet %s", packet);
1851 ds_put_format(&ds, " with metadata ");
1852 odp_flow_format(md.data, md.size, NULL, 0, NULL, &ds, true);
1853 ds_put_format(&ds, " mtu %d", execute->mtu);
1854 vlog(module, error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
1855 ds_destroy(&ds);
1856 free(packet);
1857 ofpbuf_uninit(&md);
1858 }
1859 }
1860
1861 void
1862 log_flow_get_message(const struct dpif *dpif,
1863 const struct vlog_module *module,
1864 const struct dpif_flow_get *get,
1865 int error)
1866 {
1867 if (should_log_flow_message(module, error)) {
1868 log_flow_message(dpif, error, module, "flow_get",
1869 get->key, get->key_len,
1870 get->flow->mask, get->flow->mask_len,
1871 get->ufid, &get->flow->stats,
1872 get->flow->actions, get->flow->actions_len);
1873 }
1874 }
1875
1876 bool
1877 dpif_supports_tnl_push_pop(const struct dpif *dpif)
1878 {
1879 return dpif_is_netdev(dpif);
1880 }
1881
1882 /* Meters */
1883 void
1884 dpif_meter_get_features(const struct dpif *dpif,
1885 struct ofputil_meter_features *features)
1886 {
1887 memset(features, 0, sizeof *features);
1888 if (dpif->dpif_class->meter_get_features) {
1889 dpif->dpif_class->meter_get_features(dpif, features);
1890 }
1891 }
1892
1893 /* Adds or modifies the meter in 'dpif' with the given 'meter_id' and
1894 * the configuration in 'config'.
1895 *
1896 * The meter id specified through 'config->meter_id' is ignored. */
1897 int
1898 dpif_meter_set(struct dpif *dpif, ofproto_meter_id meter_id,
1899 struct ofputil_meter_config *config)
1900 {
1901 COVERAGE_INC(dpif_meter_set);
1902
1903 if (!(config->flags & (OFPMF13_KBPS | OFPMF13_PKTPS))) {
1904 return EBADF; /* Rate unit type not set. */
1905 }
1906
1907 if ((config->flags & OFPMF13_KBPS) && (config->flags & OFPMF13_PKTPS)) {
1908 return EBADF; /* Both rate units may not be set. */
1909 }
1910
1911 if (config->n_bands == 0) {
1912 return EINVAL;
1913 }
1914
1915 for (size_t i = 0; i < config->n_bands; i++) {
1916 if (config->bands[i].rate == 0) {
1917 return EDOM; /* Rate must be non-zero */
1918 }
1919 }
1920
1921 int error = dpif->dpif_class->meter_set(dpif, meter_id, config);
1922 if (!error) {
1923 VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" set",
1924 dpif_name(dpif), meter_id.uint32);
1925 } else {
1926 VLOG_WARN_RL(&error_rl, "%s: failed to set DPIF meter %"PRIu32": %s",
1927 dpif_name(dpif), meter_id.uint32, ovs_strerror(error));
1928 }
1929 return error;
1930 }
1931
1932 int
1933 dpif_meter_get(const struct dpif *dpif, ofproto_meter_id meter_id,
1934 struct ofputil_meter_stats *stats, uint16_t n_bands)
1935 {
1936 int error;
1937
1938 COVERAGE_INC(dpif_meter_get);
1939
1940 error = dpif->dpif_class->meter_get(dpif, meter_id, stats, n_bands);
1941 if (!error) {
1942 VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" get stats",
1943 dpif_name(dpif), meter_id.uint32);
1944 } else {
1945 VLOG_WARN_RL(&error_rl,
1946 "%s: failed to get DPIF meter %"PRIu32" stats: %s",
1947 dpif_name(dpif), meter_id.uint32, ovs_strerror(error));
1948 stats->packet_in_count = ~0;
1949 stats->byte_in_count = ~0;
1950 stats->n_bands = 0;
1951 }
1952 return error;
1953 }
1954
1955 int
1956 dpif_meter_del(struct dpif *dpif, ofproto_meter_id meter_id,
1957 struct ofputil_meter_stats *stats, uint16_t n_bands)
1958 {
1959 int error;
1960
1961 COVERAGE_INC(dpif_meter_del);
1962
1963 error = dpif->dpif_class->meter_del(dpif, meter_id, stats, n_bands);
1964 if (!error) {
1965 VLOG_DBG_RL(&dpmsg_rl, "%s: DPIF meter %"PRIu32" deleted",
1966 dpif_name(dpif), meter_id.uint32);
1967 } else {
1968 VLOG_WARN_RL(&error_rl,
1969 "%s: failed to delete DPIF meter %"PRIu32": %s",
1970 dpif_name(dpif), meter_id.uint32, ovs_strerror(error));
1971 if (stats) {
1972 stats->packet_in_count = ~0;
1973 stats->byte_in_count = ~0;
1974 stats->n_bands = 0;
1975 }
1976 }
1977 return error;
1978 }