]> git.proxmox.com Git - mirror_ovs.git/blame - lib/netdev-offload.c
netdev-offload: Implement terse dump support
[mirror_ovs.git] / lib / netdev-offload.c
CommitLineData
b6cabb8f
IM
1/*
2 * Copyright (c) 2008 - 2014, 2016, 2017 Nicira, Inc.
3 * Copyright (c) 2019 Samsung Electronics Co.,Ltd.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <config.h>
19#include "netdev-offload.h"
20
21#include <errno.h>
22#include <inttypes.h>
23#include <sys/types.h>
24#include <netinet/in.h>
25#include <stdlib.h>
26#include <string.h>
27#include <unistd.h>
28
29#include "cmap.h"
30#include "coverage.h"
31#include "dpif.h"
32#include "dp-packet.h"
33#include "openvswitch/dynamic-string.h"
34#include "fatal-signal.h"
35#include "hash.h"
36#include "openvswitch/list.h"
37#include "netdev-offload-provider.h"
38#include "netdev-provider.h"
39#include "netdev-vport.h"
40#include "odp-netlink.h"
41#include "openflow/openflow.h"
42#include "packets.h"
43#include "openvswitch/ofp-print.h"
44#include "openvswitch/poll-loop.h"
45#include "seq.h"
46#include "openvswitch/shash.h"
47#include "smap.h"
48#include "socket-util.h"
49#include "sset.h"
50#include "svec.h"
51#include "openvswitch/vlog.h"
52#include "flow.h"
53#include "util.h"
54#ifdef __linux__
55#include "tc.h"
56#endif
57
58VLOG_DEFINE_THIS_MODULE(netdev_offload);
59
60
61static bool netdev_flow_api_enabled = false;
62
63/* Protects 'netdev_flow_apis'. */
64static struct ovs_mutex netdev_flow_api_provider_mutex = OVS_MUTEX_INITIALIZER;
65
66/* Contains 'struct netdev_registered_flow_api's. */
67static struct cmap netdev_flow_apis = CMAP_INITIALIZER;
68
69struct netdev_registered_flow_api {
70 struct cmap_node cmap_node; /* In 'netdev_flow_apis', by flow_api->type. */
71 const struct netdev_flow_api *flow_api;
72
73 /* Number of references: one for the flow_api itself and one for every
74 * instance of the netdev that uses it. */
75 struct ovs_refcount refcnt;
76};
77
78static struct netdev_registered_flow_api *
79netdev_lookup_flow_api(const char *type)
80{
81 struct netdev_registered_flow_api *rfa;
82 CMAP_FOR_EACH_WITH_HASH (rfa, cmap_node, hash_string(type, 0),
83 &netdev_flow_apis) {
84 if (!strcmp(type, rfa->flow_api->type)) {
85 return rfa;
86 }
87 }
88 return NULL;
89}
90
91/* Registers a new netdev flow api provider. */
92int
93netdev_register_flow_api_provider(const struct netdev_flow_api *new_flow_api)
94 OVS_EXCLUDED(netdev_flow_api_provider_mutex)
95{
96 int error = 0;
97
98 if (!new_flow_api->init_flow_api) {
99 VLOG_WARN("attempted to register invalid flow api provider: %s",
100 new_flow_api->type);
101 error = EINVAL;
102 }
103
104 ovs_mutex_lock(&netdev_flow_api_provider_mutex);
105 if (netdev_lookup_flow_api(new_flow_api->type)) {
106 VLOG_WARN("attempted to register duplicate flow api provider: %s",
107 new_flow_api->type);
108 error = EEXIST;
109 } else {
110 struct netdev_registered_flow_api *rfa;
111
112 rfa = xmalloc(sizeof *rfa);
113 cmap_insert(&netdev_flow_apis, &rfa->cmap_node,
114 hash_string(new_flow_api->type, 0));
115 rfa->flow_api = new_flow_api;
116 ovs_refcount_init(&rfa->refcnt);
117 VLOG_DBG("netdev: flow API '%s' registered.", new_flow_api->type);
118 }
119 ovs_mutex_unlock(&netdev_flow_api_provider_mutex);
120
121 return error;
122}
123
124/* Unregisters a netdev flow api provider. 'type' must have been previously
125 * registered and not currently be in use by any netdevs. After unregistration
126 * netdev flow api of that type cannot be used for netdevs. (However, the
127 * provider may still be accessible from other threads until the next RCU grace
128 * period, so the caller must not free or re-register the same netdev_flow_api
129 * until that has passed.) */
130int
131netdev_unregister_flow_api_provider(const char *type)
132 OVS_EXCLUDED(netdev_flow_api_provider_mutex)
133{
134 struct netdev_registered_flow_api *rfa;
135 int error;
136
137 ovs_mutex_lock(&netdev_flow_api_provider_mutex);
138 rfa = netdev_lookup_flow_api(type);
139 if (!rfa) {
140 VLOG_WARN("attempted to unregister a flow api provider that is not "
141 "registered: %s", type);
142 error = EAFNOSUPPORT;
143 } else if (ovs_refcount_unref(&rfa->refcnt) != 1) {
144 ovs_refcount_ref(&rfa->refcnt);
145 VLOG_WARN("attempted to unregister in use flow api provider: %s",
146 type);
147 error = EBUSY;
148 } else {
149 cmap_remove(&netdev_flow_apis, &rfa->cmap_node,
150 hash_string(rfa->flow_api->type, 0));
151 ovsrcu_postpone(free, rfa);
152 error = 0;
153 }
154 ovs_mutex_unlock(&netdev_flow_api_provider_mutex);
155
156 return error;
157}
158
65c73b3f
EB
159bool
160netdev_flow_api_equals(const struct netdev *netdev1,
161 const struct netdev *netdev2)
162{
163 const struct netdev_flow_api *netdev_flow_api1 =
164 ovsrcu_get(const struct netdev_flow_api *, &netdev1->flow_api);
165 const struct netdev_flow_api *netdev_flow_api2 =
166 ovsrcu_get(const struct netdev_flow_api *, &netdev2->flow_api);
167
168 return netdev_flow_api1 == netdev_flow_api2;
169}
170
b6cabb8f
IM
171static int
172netdev_assign_flow_api(struct netdev *netdev)
173{
174 struct netdev_registered_flow_api *rfa;
175
176 CMAP_FOR_EACH (rfa, cmap_node, &netdev_flow_apis) {
177 if (!rfa->flow_api->init_flow_api(netdev)) {
178 ovs_refcount_ref(&rfa->refcnt);
179 ovsrcu_set(&netdev->flow_api, rfa->flow_api);
180 VLOG_INFO("%s: Assigned flow API '%s'.",
181 netdev_get_name(netdev), rfa->flow_api->type);
182 return 0;
183 }
184 VLOG_DBG("%s: flow API '%s' is not suitable.",
185 netdev_get_name(netdev), rfa->flow_api->type);
186 }
187 VLOG_INFO("%s: No suitable flow API found.", netdev_get_name(netdev));
188
189 return -1;
190}
191
192int
193netdev_flow_flush(struct netdev *netdev)
194{
195 const struct netdev_flow_api *flow_api =
196 ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api);
197
198 return (flow_api && flow_api->flow_flush)
199 ? flow_api->flow_flush(netdev)
200 : EOPNOTSUPP;
201}
202
203int
19153657
VB
204netdev_flow_dump_create(struct netdev *netdev, struct netdev_flow_dump **dump,
205 bool terse)
b6cabb8f
IM
206{
207 const struct netdev_flow_api *flow_api =
208 ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api);
209
210 return (flow_api && flow_api->flow_dump_create)
19153657 211 ? flow_api->flow_dump_create(netdev, dump, terse)
b6cabb8f
IM
212 : EOPNOTSUPP;
213}
214
215int
216netdev_flow_dump_destroy(struct netdev_flow_dump *dump)
217{
218 const struct netdev_flow_api *flow_api =
219 ovsrcu_get(const struct netdev_flow_api *, &dump->netdev->flow_api);
220
221 return (flow_api && flow_api->flow_dump_destroy)
222 ? flow_api->flow_dump_destroy(dump)
223 : EOPNOTSUPP;
224}
225
226bool
227netdev_flow_dump_next(struct netdev_flow_dump *dump, struct match *match,
228 struct nlattr **actions, struct dpif_flow_stats *stats,
229 struct dpif_flow_attrs *attrs, ovs_u128 *ufid,
230 struct ofpbuf *rbuffer, struct ofpbuf *wbuffer)
231{
232 const struct netdev_flow_api *flow_api =
233 ovsrcu_get(const struct netdev_flow_api *, &dump->netdev->flow_api);
234
235 return (flow_api && flow_api->flow_dump_next)
236 ? flow_api->flow_dump_next(dump, match, actions, stats, attrs,
237 ufid, rbuffer, wbuffer)
238 : false;
239}
240
241int
242netdev_flow_put(struct netdev *netdev, struct match *match,
243 struct nlattr *actions, size_t act_len,
244 const ovs_u128 *ufid, struct offload_info *info,
245 struct dpif_flow_stats *stats)
246{
247 const struct netdev_flow_api *flow_api =
248 ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api);
249
250 return (flow_api && flow_api->flow_put)
251 ? flow_api->flow_put(netdev, match, actions, act_len, ufid,
252 info, stats)
253 : EOPNOTSUPP;
254}
255
256int
257netdev_flow_get(struct netdev *netdev, struct match *match,
258 struct nlattr **actions, const ovs_u128 *ufid,
259 struct dpif_flow_stats *stats,
260 struct dpif_flow_attrs *attrs, struct ofpbuf *buf)
261{
262 const struct netdev_flow_api *flow_api =
263 ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api);
264
265 return (flow_api && flow_api->flow_get)
266 ? flow_api->flow_get(netdev, match, actions, ufid,
267 stats, attrs, buf)
268 : EOPNOTSUPP;
269}
270
271int
272netdev_flow_del(struct netdev *netdev, const ovs_u128 *ufid,
273 struct dpif_flow_stats *stats)
274{
275 const struct netdev_flow_api *flow_api =
276 ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api);
277
278 return (flow_api && flow_api->flow_del)
279 ? flow_api->flow_del(netdev, ufid, stats)
280 : EOPNOTSUPP;
281}
282
283int
284netdev_init_flow_api(struct netdev *netdev)
285{
286 if (!netdev_is_flow_api_enabled()) {
287 return EOPNOTSUPP;
288 }
289
290 if (ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api)) {
291 return 0;
292 }
293
294 if (netdev_assign_flow_api(netdev)) {
295 return EOPNOTSUPP;
296 }
297
298 return 0;
299}
300
301void
302netdev_uninit_flow_api(struct netdev *netdev)
303{
304 struct netdev_registered_flow_api *rfa;
305 const struct netdev_flow_api *flow_api =
306 ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api);
307
308 if (!flow_api) {
309 return;
310 }
311
312 ovsrcu_set(&netdev->flow_api, NULL);
313 rfa = netdev_lookup_flow_api(flow_api->type);
314 ovs_refcount_unref(&rfa->refcnt);
315}
316
317uint32_t
318netdev_get_block_id(struct netdev *netdev)
319{
320 const struct netdev_class *class = netdev->netdev_class;
321
322 return (class->get_block_id
323 ? class->get_block_id(netdev)
324 : 0);
325}
326
327/*
328 * Get the value of the hw info parameter specified by type.
329 * Returns the value on success (>= 0). Returns -1 on failure.
330 */
331int
332netdev_get_hw_info(struct netdev *netdev, int type)
333{
334 int val = -1;
335
336 switch (type) {
337 case HW_INFO_TYPE_OOR:
338 val = netdev->hw_info.oor;
339 break;
340 case HW_INFO_TYPE_PEND_COUNT:
341 val = netdev->hw_info.pending_count;
342 break;
343 case HW_INFO_TYPE_OFFL_COUNT:
344 val = netdev->hw_info.offload_count;
345 break;
346 default:
347 break;
348 }
349
350 return val;
351}
352
353/*
354 * Set the value of the hw info parameter specified by type.
355 */
356void
357netdev_set_hw_info(struct netdev *netdev, int type, int val)
358{
359 switch (type) {
360 case HW_INFO_TYPE_OOR:
361 if (val == 0) {
362 VLOG_DBG("Offload rebalance: netdev: %s is not OOR", netdev->name);
363 }
364 netdev->hw_info.oor = val;
365 break;
366 case HW_INFO_TYPE_PEND_COUNT:
367 netdev->hw_info.pending_count = val;
368 break;
369 case HW_INFO_TYPE_OFFL_COUNT:
370 netdev->hw_info.offload_count = val;
371 break;
372 default:
373 break;
374 }
375}
376
377/* Protects below port hashmaps. */
9fe21a4f 378static struct ovs_rwlock netdev_hmap_rwlock = OVS_RWLOCK_INITIALIZER;
b6cabb8f 379
9fe21a4f 380static struct hmap port_to_netdev OVS_GUARDED_BY(netdev_hmap_rwlock)
b6cabb8f 381 = HMAP_INITIALIZER(&port_to_netdev);
9fe21a4f 382static struct hmap ifindex_to_port OVS_GUARDED_BY(netdev_hmap_rwlock)
b6cabb8f
IM
383 = HMAP_INITIALIZER(&ifindex_to_port);
384
385struct port_to_netdev_data {
386 struct hmap_node portno_node; /* By (dpif_class, dpif_port.port_no). */
387 struct hmap_node ifindex_node; /* By (dpif_class, ifindex). */
388 struct netdev *netdev;
389 struct dpif_port dpif_port;
390 const struct dpif_class *dpif_class;
391 int ifindex;
392};
393
394/*
395 * Find if any netdev is in OOR state. Return true if there's at least
396 * one netdev that's in OOR state; otherwise return false.
397 */
398bool
399netdev_any_oor(void)
9fe21a4f 400 OVS_EXCLUDED(netdev_hmap_rwlock)
b6cabb8f
IM
401{
402 struct port_to_netdev_data *data;
403 bool oor = false;
404
9fe21a4f 405 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
b6cabb8f
IM
406 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
407 struct netdev *dev = data->netdev;
408
409 if (dev->hw_info.oor) {
410 oor = true;
411 break;
412 }
413 }
9fe21a4f 414 ovs_rwlock_unlock(&netdev_hmap_rwlock);
b6cabb8f
IM
415
416 return oor;
417}
418
419bool
420netdev_is_flow_api_enabled(void)
421{
422 return netdev_flow_api_enabled;
423}
424
425void
426netdev_ports_flow_flush(const struct dpif_class *dpif_class)
427{
428 struct port_to_netdev_data *data;
429
9fe21a4f 430 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
b6cabb8f
IM
431 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
432 if (data->dpif_class == dpif_class) {
433 netdev_flow_flush(data->netdev);
434 }
435 }
9fe21a4f 436 ovs_rwlock_unlock(&netdev_hmap_rwlock);
b6cabb8f
IM
437}
438
439struct netdev_flow_dump **
19153657
VB
440netdev_ports_flow_dump_create(const struct dpif_class *dpif_class, int *ports,
441 bool terse)
b6cabb8f
IM
442{
443 struct port_to_netdev_data *data;
444 struct netdev_flow_dump **dumps;
445 int count = 0;
446 int i = 0;
447
9fe21a4f 448 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
b6cabb8f
IM
449 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
450 if (data->dpif_class == dpif_class) {
451 count++;
452 }
453 }
454
455 dumps = count ? xzalloc(sizeof *dumps * count) : NULL;
456
457 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
458 if (data->dpif_class == dpif_class) {
19153657 459 if (netdev_flow_dump_create(data->netdev, &dumps[i], terse)) {
b6cabb8f
IM
460 continue;
461 }
462
463 dumps[i]->port = data->dpif_port.port_no;
464 i++;
465 }
466 }
9fe21a4f 467 ovs_rwlock_unlock(&netdev_hmap_rwlock);
b6cabb8f
IM
468
469 *ports = i;
470 return dumps;
471}
472
473int
474netdev_ports_flow_del(const struct dpif_class *dpif_class,
475 const ovs_u128 *ufid,
476 struct dpif_flow_stats *stats)
477{
478 struct port_to_netdev_data *data;
479
9fe21a4f 480 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
b6cabb8f
IM
481 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
482 if (data->dpif_class == dpif_class
483 && !netdev_flow_del(data->netdev, ufid, stats)) {
9fe21a4f 484 ovs_rwlock_unlock(&netdev_hmap_rwlock);
b6cabb8f
IM
485 return 0;
486 }
487 }
9fe21a4f 488 ovs_rwlock_unlock(&netdev_hmap_rwlock);
b6cabb8f
IM
489
490 return ENOENT;
491}
492
493int
494netdev_ports_flow_get(const struct dpif_class *dpif_class, struct match *match,
495 struct nlattr **actions, const ovs_u128 *ufid,
496 struct dpif_flow_stats *stats,
497 struct dpif_flow_attrs *attrs, struct ofpbuf *buf)
498{
499 struct port_to_netdev_data *data;
500
9fe21a4f 501 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
b6cabb8f
IM
502 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
503 if (data->dpif_class == dpif_class
504 && !netdev_flow_get(data->netdev, match, actions,
505 ufid, stats, attrs, buf)) {
9fe21a4f 506 ovs_rwlock_unlock(&netdev_hmap_rwlock);
b6cabb8f
IM
507 return 0;
508 }
509 }
9fe21a4f 510 ovs_rwlock_unlock(&netdev_hmap_rwlock);
b6cabb8f
IM
511 return ENOENT;
512}
513
514static uint32_t
515netdev_ports_hash(odp_port_t port, const struct dpif_class *dpif_class)
516{
517 return hash_int(odp_to_u32(port), hash_pointer(dpif_class, 0));
518}
519
520static struct port_to_netdev_data *
521netdev_ports_lookup(odp_port_t port_no, const struct dpif_class *dpif_class)
9fe21a4f 522 OVS_REQ_RDLOCK(netdev_hmap_rwlock)
b6cabb8f
IM
523{
524 struct port_to_netdev_data *data;
525
526 HMAP_FOR_EACH_WITH_HASH (data, portno_node,
527 netdev_ports_hash(port_no, dpif_class),
528 &port_to_netdev) {
529 if (data->dpif_class == dpif_class
530 && data->dpif_port.port_no == port_no) {
531 return data;
532 }
533 }
534 return NULL;
535}
536
537int
538netdev_ports_insert(struct netdev *netdev, const struct dpif_class *dpif_class,
539 struct dpif_port *dpif_port)
540{
541 struct port_to_netdev_data *data;
542 int ifindex = netdev_get_ifindex(netdev);
543
544 if (ifindex < 0) {
545 return ENODEV;
546 }
547
9fe21a4f 548 ovs_rwlock_wrlock(&netdev_hmap_rwlock);
b6cabb8f 549 if (netdev_ports_lookup(dpif_port->port_no, dpif_class)) {
9fe21a4f 550 ovs_rwlock_unlock(&netdev_hmap_rwlock);
b6cabb8f
IM
551 return EEXIST;
552 }
553
554 data = xzalloc(sizeof *data);
555 data->netdev = netdev_ref(netdev);
556 data->dpif_class = dpif_class;
557 dpif_port_clone(&data->dpif_port, dpif_port);
558 data->ifindex = ifindex;
559
560 hmap_insert(&port_to_netdev, &data->portno_node,
561 netdev_ports_hash(dpif_port->port_no, dpif_class));
562 hmap_insert(&ifindex_to_port, &data->ifindex_node, ifindex);
9fe21a4f 563 ovs_rwlock_unlock(&netdev_hmap_rwlock);
b6cabb8f
IM
564
565 netdev_init_flow_api(netdev);
566
567 return 0;
568}
569
570struct netdev *
571netdev_ports_get(odp_port_t port_no, const struct dpif_class *dpif_class)
572{
573 struct port_to_netdev_data *data;
574 struct netdev *ret = NULL;
575
9fe21a4f 576 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
b6cabb8f
IM
577 data = netdev_ports_lookup(port_no, dpif_class);
578 if (data) {
579 ret = netdev_ref(data->netdev);
580 }
9fe21a4f 581 ovs_rwlock_unlock(&netdev_hmap_rwlock);
b6cabb8f
IM
582
583 return ret;
584}
585
586int
587netdev_ports_remove(odp_port_t port_no, const struct dpif_class *dpif_class)
588{
589 struct port_to_netdev_data *data;
590 int ret = ENOENT;
591
9fe21a4f 592 ovs_rwlock_wrlock(&netdev_hmap_rwlock);
b6cabb8f
IM
593 data = netdev_ports_lookup(port_no, dpif_class);
594 if (data) {
595 dpif_port_destroy(&data->dpif_port);
596 netdev_close(data->netdev); /* unref and possibly close */
597 hmap_remove(&port_to_netdev, &data->portno_node);
598 hmap_remove(&ifindex_to_port, &data->ifindex_node);
599 free(data);
600 ret = 0;
601 }
9fe21a4f 602 ovs_rwlock_unlock(&netdev_hmap_rwlock);
b6cabb8f
IM
603
604 return ret;
605}
606
607odp_port_t
608netdev_ifindex_to_odp_port(int ifindex)
609{
610 struct port_to_netdev_data *data;
611 odp_port_t ret = 0;
612
9fe21a4f 613 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
b6cabb8f
IM
614 HMAP_FOR_EACH_WITH_HASH (data, ifindex_node, ifindex, &ifindex_to_port) {
615 if (data->ifindex == ifindex) {
616 ret = data->dpif_port.port_no;
617 break;
618 }
619 }
9fe21a4f 620 ovs_rwlock_unlock(&netdev_hmap_rwlock);
b6cabb8f
IM
621
622 return ret;
623}
624
625static bool netdev_offload_rebalance_policy = false;
626
627bool
628netdev_is_offload_rebalance_policy_enabled(void)
629{
630 return netdev_offload_rebalance_policy;
631}
632
633static void
634netdev_ports_flow_init(void)
635{
636 struct port_to_netdev_data *data;
637
9fe21a4f 638 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
b6cabb8f
IM
639 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
640 netdev_init_flow_api(data->netdev);
641 }
9fe21a4f 642 ovs_rwlock_unlock(&netdev_hmap_rwlock);
b6cabb8f
IM
643}
644
645void
646netdev_set_flow_api_enabled(const struct smap *ovs_other_config)
647{
648 if (smap_get_bool(ovs_other_config, "hw-offload", false)) {
649 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
650
651 if (ovsthread_once_start(&once)) {
652 netdev_flow_api_enabled = true;
653
654 VLOG_INFO("netdev: Flow API Enabled");
655
656#ifdef __linux__
657 tc_set_policy(smap_get_def(ovs_other_config, "tc-policy",
658 TC_POLICY_DEFAULT));
659#endif
660
661 if (smap_get_bool(ovs_other_config, "offload-rebalance", false)) {
662 netdev_offload_rebalance_policy = true;
663 }
664
665 netdev_ports_flow_init();
666
667 ovsthread_once_done(&once);
668 }
669 }
670}