]> git.proxmox.com Git - mirror_ovs.git/blob - lib/netdev-offload.c
tunnel: Bareudp Tunnel Support.
[mirror_ovs.git] / lib / netdev-offload.c
1 /*
2 * Copyright (c) 2008 - 2014, 2016, 2017 Nicira, Inc.
3 * Copyright (c) 2019 Samsung Electronics Co.,Ltd.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18 #include <config.h>
19 #include "netdev-offload.h"
20
21 #include <errno.h>
22 #include <inttypes.h>
23 #include <sys/types.h>
24 #include <netinet/in.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28
29 #include "cmap.h"
30 #include "coverage.h"
31 #include "dpif.h"
32 #include "dp-packet.h"
33 #include "openvswitch/dynamic-string.h"
34 #include "fatal-signal.h"
35 #include "hash.h"
36 #include "openvswitch/list.h"
37 #include "netdev-offload-provider.h"
38 #include "netdev-provider.h"
39 #include "netdev-vport.h"
40 #include "odp-netlink.h"
41 #include "openflow/openflow.h"
42 #include "packets.h"
43 #include "openvswitch/ofp-print.h"
44 #include "openvswitch/poll-loop.h"
45 #include "seq.h"
46 #include "openvswitch/shash.h"
47 #include "smap.h"
48 #include "socket-util.h"
49 #include "sset.h"
50 #include "svec.h"
51 #include "openvswitch/vlog.h"
52 #include "flow.h"
53 #include "util.h"
54 #ifdef __linux__
55 #include "tc.h"
56 #endif
57
58 VLOG_DEFINE_THIS_MODULE(netdev_offload);
59
60
61 static bool netdev_flow_api_enabled = false;
62
63 /* Protects 'netdev_flow_apis'. */
64 static struct ovs_mutex netdev_flow_api_provider_mutex = OVS_MUTEX_INITIALIZER;
65
66 /* Contains 'struct netdev_registered_flow_api's. */
67 static struct cmap netdev_flow_apis = CMAP_INITIALIZER;
68
69 struct netdev_registered_flow_api {
70 struct cmap_node cmap_node; /* In 'netdev_flow_apis', by flow_api->type. */
71 const struct netdev_flow_api *flow_api;
72
73 /* Number of references: one for the flow_api itself and one for every
74 * instance of the netdev that uses it. */
75 struct ovs_refcount refcnt;
76 };
77
78 static struct netdev_registered_flow_api *
79 netdev_lookup_flow_api(const char *type)
80 {
81 struct netdev_registered_flow_api *rfa;
82 CMAP_FOR_EACH_WITH_HASH (rfa, cmap_node, hash_string(type, 0),
83 &netdev_flow_apis) {
84 if (!strcmp(type, rfa->flow_api->type)) {
85 return rfa;
86 }
87 }
88 return NULL;
89 }
90
91 /* Registers a new netdev flow api provider. */
92 int
93 netdev_register_flow_api_provider(const struct netdev_flow_api *new_flow_api)
94 OVS_EXCLUDED(netdev_flow_api_provider_mutex)
95 {
96 int error = 0;
97
98 if (!new_flow_api->init_flow_api) {
99 VLOG_WARN("attempted to register invalid flow api provider: %s",
100 new_flow_api->type);
101 error = EINVAL;
102 }
103
104 ovs_mutex_lock(&netdev_flow_api_provider_mutex);
105 if (netdev_lookup_flow_api(new_flow_api->type)) {
106 VLOG_WARN("attempted to register duplicate flow api provider: %s",
107 new_flow_api->type);
108 error = EEXIST;
109 } else {
110 struct netdev_registered_flow_api *rfa;
111
112 rfa = xmalloc(sizeof *rfa);
113 cmap_insert(&netdev_flow_apis, &rfa->cmap_node,
114 hash_string(new_flow_api->type, 0));
115 rfa->flow_api = new_flow_api;
116 ovs_refcount_init(&rfa->refcnt);
117 VLOG_DBG("netdev: flow API '%s' registered.", new_flow_api->type);
118 }
119 ovs_mutex_unlock(&netdev_flow_api_provider_mutex);
120
121 return error;
122 }
123
124 /* Unregisters a netdev flow api provider. 'type' must have been previously
125 * registered and not currently be in use by any netdevs. After unregistration
126 * netdev flow api of that type cannot be used for netdevs. (However, the
127 * provider may still be accessible from other threads until the next RCU grace
128 * period, so the caller must not free or re-register the same netdev_flow_api
129 * until that has passed.) */
130 int
131 netdev_unregister_flow_api_provider(const char *type)
132 OVS_EXCLUDED(netdev_flow_api_provider_mutex)
133 {
134 struct netdev_registered_flow_api *rfa;
135 int error;
136
137 ovs_mutex_lock(&netdev_flow_api_provider_mutex);
138 rfa = netdev_lookup_flow_api(type);
139 if (!rfa) {
140 VLOG_WARN("attempted to unregister a flow api provider that is not "
141 "registered: %s", type);
142 error = EAFNOSUPPORT;
143 } else if (ovs_refcount_unref(&rfa->refcnt) != 1) {
144 ovs_refcount_ref(&rfa->refcnt);
145 VLOG_WARN("attempted to unregister in use flow api provider: %s",
146 type);
147 error = EBUSY;
148 } else {
149 cmap_remove(&netdev_flow_apis, &rfa->cmap_node,
150 hash_string(rfa->flow_api->type, 0));
151 ovsrcu_postpone(free, rfa);
152 error = 0;
153 }
154 ovs_mutex_unlock(&netdev_flow_api_provider_mutex);
155
156 return error;
157 }
158
159 bool
160 netdev_flow_api_equals(const struct netdev *netdev1,
161 const struct netdev *netdev2)
162 {
163 const struct netdev_flow_api *netdev_flow_api1 =
164 ovsrcu_get(const struct netdev_flow_api *, &netdev1->flow_api);
165 const struct netdev_flow_api *netdev_flow_api2 =
166 ovsrcu_get(const struct netdev_flow_api *, &netdev2->flow_api);
167
168 return netdev_flow_api1 == netdev_flow_api2;
169 }
170
171 static int
172 netdev_assign_flow_api(struct netdev *netdev)
173 {
174 struct netdev_registered_flow_api *rfa;
175
176 CMAP_FOR_EACH (rfa, cmap_node, &netdev_flow_apis) {
177 if (!rfa->flow_api->init_flow_api(netdev)) {
178 ovs_refcount_ref(&rfa->refcnt);
179 ovsrcu_set(&netdev->flow_api, rfa->flow_api);
180 VLOG_INFO("%s: Assigned flow API '%s'.",
181 netdev_get_name(netdev), rfa->flow_api->type);
182 return 0;
183 }
184 VLOG_DBG("%s: flow API '%s' is not suitable.",
185 netdev_get_name(netdev), rfa->flow_api->type);
186 }
187 VLOG_INFO("%s: No suitable flow API found.", netdev_get_name(netdev));
188
189 return -1;
190 }
191
192 int
193 netdev_flow_flush(struct netdev *netdev)
194 {
195 const struct netdev_flow_api *flow_api =
196 ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api);
197
198 return (flow_api && flow_api->flow_flush)
199 ? flow_api->flow_flush(netdev)
200 : EOPNOTSUPP;
201 }
202
203 int
204 netdev_flow_dump_create(struct netdev *netdev, struct netdev_flow_dump **dump,
205 bool terse)
206 {
207 const struct netdev_flow_api *flow_api =
208 ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api);
209
210 return (flow_api && flow_api->flow_dump_create)
211 ? flow_api->flow_dump_create(netdev, dump, terse)
212 : EOPNOTSUPP;
213 }
214
215 int
216 netdev_flow_dump_destroy(struct netdev_flow_dump *dump)
217 {
218 const struct netdev_flow_api *flow_api =
219 ovsrcu_get(const struct netdev_flow_api *, &dump->netdev->flow_api);
220
221 return (flow_api && flow_api->flow_dump_destroy)
222 ? flow_api->flow_dump_destroy(dump)
223 : EOPNOTSUPP;
224 }
225
226 bool
227 netdev_flow_dump_next(struct netdev_flow_dump *dump, struct match *match,
228 struct nlattr **actions, struct dpif_flow_stats *stats,
229 struct dpif_flow_attrs *attrs, ovs_u128 *ufid,
230 struct ofpbuf *rbuffer, struct ofpbuf *wbuffer)
231 {
232 const struct netdev_flow_api *flow_api =
233 ovsrcu_get(const struct netdev_flow_api *, &dump->netdev->flow_api);
234
235 return (flow_api && flow_api->flow_dump_next)
236 ? flow_api->flow_dump_next(dump, match, actions, stats, attrs,
237 ufid, rbuffer, wbuffer)
238 : false;
239 }
240
241 int
242 netdev_flow_put(struct netdev *netdev, struct match *match,
243 struct nlattr *actions, size_t act_len,
244 const ovs_u128 *ufid, struct offload_info *info,
245 struct dpif_flow_stats *stats)
246 {
247 const struct netdev_flow_api *flow_api =
248 ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api);
249
250 return (flow_api && flow_api->flow_put)
251 ? flow_api->flow_put(netdev, match, actions, act_len, ufid,
252 info, stats)
253 : EOPNOTSUPP;
254 }
255
256 int
257 netdev_flow_get(struct netdev *netdev, struct match *match,
258 struct nlattr **actions, const ovs_u128 *ufid,
259 struct dpif_flow_stats *stats,
260 struct dpif_flow_attrs *attrs, struct ofpbuf *buf)
261 {
262 const struct netdev_flow_api *flow_api =
263 ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api);
264
265 return (flow_api && flow_api->flow_get)
266 ? flow_api->flow_get(netdev, match, actions, ufid,
267 stats, attrs, buf)
268 : EOPNOTSUPP;
269 }
270
271 int
272 netdev_flow_del(struct netdev *netdev, const ovs_u128 *ufid,
273 struct dpif_flow_stats *stats)
274 {
275 const struct netdev_flow_api *flow_api =
276 ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api);
277
278 return (flow_api && flow_api->flow_del)
279 ? flow_api->flow_del(netdev, ufid, stats)
280 : EOPNOTSUPP;
281 }
282
283 int
284 netdev_flow_get_n_flows(struct netdev *netdev, uint64_t *n_flows)
285 {
286 const struct netdev_flow_api *flow_api =
287 ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api);
288
289 return (flow_api && flow_api->flow_get_n_flows)
290 ? flow_api->flow_get_n_flows(netdev, n_flows)
291 : EOPNOTSUPP;
292 }
293
294 int
295 netdev_init_flow_api(struct netdev *netdev)
296 {
297 if (!netdev_is_flow_api_enabled()) {
298 return EOPNOTSUPP;
299 }
300
301 if (ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api)) {
302 return 0;
303 }
304
305 if (netdev_assign_flow_api(netdev)) {
306 return EOPNOTSUPP;
307 }
308
309 return 0;
310 }
311
312 void
313 netdev_uninit_flow_api(struct netdev *netdev)
314 {
315 struct netdev_registered_flow_api *rfa;
316 const struct netdev_flow_api *flow_api =
317 ovsrcu_get(const struct netdev_flow_api *, &netdev->flow_api);
318
319 if (!flow_api) {
320 return;
321 }
322
323 ovsrcu_set(&netdev->flow_api, NULL);
324 rfa = netdev_lookup_flow_api(flow_api->type);
325 ovs_refcount_unref(&rfa->refcnt);
326 }
327
328 uint32_t
329 netdev_get_block_id(struct netdev *netdev)
330 {
331 const struct netdev_class *class = netdev->netdev_class;
332
333 return (class->get_block_id
334 ? class->get_block_id(netdev)
335 : 0);
336 }
337
338 /*
339 * Get the value of the hw info parameter specified by type.
340 * Returns the value on success (>= 0). Returns -1 on failure.
341 */
342 int
343 netdev_get_hw_info(struct netdev *netdev, int type)
344 {
345 int val = -1;
346
347 switch (type) {
348 case HW_INFO_TYPE_OOR:
349 val = netdev->hw_info.oor;
350 break;
351 case HW_INFO_TYPE_PEND_COUNT:
352 val = netdev->hw_info.pending_count;
353 break;
354 case HW_INFO_TYPE_OFFL_COUNT:
355 val = netdev->hw_info.offload_count;
356 break;
357 default:
358 break;
359 }
360
361 return val;
362 }
363
364 /*
365 * Set the value of the hw info parameter specified by type.
366 */
367 void
368 netdev_set_hw_info(struct netdev *netdev, int type, int val)
369 {
370 switch (type) {
371 case HW_INFO_TYPE_OOR:
372 if (val == 0) {
373 VLOG_DBG("Offload rebalance: netdev: %s is not OOR", netdev->name);
374 }
375 netdev->hw_info.oor = val;
376 break;
377 case HW_INFO_TYPE_PEND_COUNT:
378 netdev->hw_info.pending_count = val;
379 break;
380 case HW_INFO_TYPE_OFFL_COUNT:
381 netdev->hw_info.offload_count = val;
382 break;
383 default:
384 break;
385 }
386 }
387
388 /* Protects below port hashmaps. */
389 static struct ovs_rwlock netdev_hmap_rwlock = OVS_RWLOCK_INITIALIZER;
390
391 static struct hmap port_to_netdev OVS_GUARDED_BY(netdev_hmap_rwlock)
392 = HMAP_INITIALIZER(&port_to_netdev);
393 static struct hmap ifindex_to_port OVS_GUARDED_BY(netdev_hmap_rwlock)
394 = HMAP_INITIALIZER(&ifindex_to_port);
395
396 struct port_to_netdev_data {
397 struct hmap_node portno_node; /* By (dpif_type, dpif_port.port_no). */
398 struct hmap_node ifindex_node; /* By (dpif_type, ifindex). */
399 struct netdev *netdev;
400 struct dpif_port dpif_port;
401 int ifindex;
402 };
403
404 /*
405 * Find if any netdev is in OOR state. Return true if there's at least
406 * one netdev that's in OOR state; otherwise return false.
407 */
408 bool
409 netdev_any_oor(void)
410 OVS_EXCLUDED(netdev_hmap_rwlock)
411 {
412 struct port_to_netdev_data *data;
413 bool oor = false;
414
415 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
416 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
417 struct netdev *dev = data->netdev;
418
419 if (dev->hw_info.oor) {
420 oor = true;
421 break;
422 }
423 }
424 ovs_rwlock_unlock(&netdev_hmap_rwlock);
425
426 return oor;
427 }
428
429 bool
430 netdev_is_flow_api_enabled(void)
431 {
432 return netdev_flow_api_enabled;
433 }
434
435 void
436 netdev_ports_flow_flush(const char *dpif_type)
437 {
438 struct port_to_netdev_data *data;
439
440 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
441 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
442 if (netdev_get_dpif_type(data->netdev) == dpif_type) {
443 netdev_flow_flush(data->netdev);
444 }
445 }
446 ovs_rwlock_unlock(&netdev_hmap_rwlock);
447 }
448
449 struct netdev_flow_dump **
450 netdev_ports_flow_dump_create(const char *dpif_type, int *ports, bool terse)
451 {
452 struct port_to_netdev_data *data;
453 struct netdev_flow_dump **dumps;
454 int count = 0;
455 int i = 0;
456
457 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
458 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
459 if (netdev_get_dpif_type(data->netdev) == dpif_type) {
460 count++;
461 }
462 }
463
464 dumps = count ? xzalloc(sizeof *dumps * count) : NULL;
465
466 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
467 if (netdev_get_dpif_type(data->netdev) == dpif_type) {
468 if (netdev_flow_dump_create(data->netdev, &dumps[i], terse)) {
469 continue;
470 }
471
472 dumps[i]->port = data->dpif_port.port_no;
473 i++;
474 }
475 }
476 ovs_rwlock_unlock(&netdev_hmap_rwlock);
477
478 *ports = i;
479 return dumps;
480 }
481
482 int
483 netdev_ports_flow_del(const char *dpif_type, const ovs_u128 *ufid,
484 struct dpif_flow_stats *stats)
485 {
486 struct port_to_netdev_data *data;
487
488 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
489 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
490 if (netdev_get_dpif_type(data->netdev) == dpif_type
491 && !netdev_flow_del(data->netdev, ufid, stats)) {
492 ovs_rwlock_unlock(&netdev_hmap_rwlock);
493 return 0;
494 }
495 }
496 ovs_rwlock_unlock(&netdev_hmap_rwlock);
497
498 return ENOENT;
499 }
500
501 int
502 netdev_ports_flow_get(const char *dpif_type, struct match *match,
503 struct nlattr **actions, const ovs_u128 *ufid,
504 struct dpif_flow_stats *stats,
505 struct dpif_flow_attrs *attrs, struct ofpbuf *buf)
506 {
507 struct port_to_netdev_data *data;
508
509 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
510 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
511 if (netdev_get_dpif_type(data->netdev) == dpif_type
512 && !netdev_flow_get(data->netdev, match, actions,
513 ufid, stats, attrs, buf)) {
514 ovs_rwlock_unlock(&netdev_hmap_rwlock);
515 return 0;
516 }
517 }
518 ovs_rwlock_unlock(&netdev_hmap_rwlock);
519 return ENOENT;
520 }
521
522 static uint32_t
523 netdev_ports_hash(odp_port_t port, const char *dpif_type)
524 {
525 return hash_int(odp_to_u32(port), hash_pointer(dpif_type, 0));
526 }
527
528 static struct port_to_netdev_data *
529 netdev_ports_lookup(odp_port_t port_no, const char *dpif_type)
530 OVS_REQ_RDLOCK(netdev_hmap_rwlock)
531 {
532 struct port_to_netdev_data *data;
533
534 HMAP_FOR_EACH_WITH_HASH (data, portno_node,
535 netdev_ports_hash(port_no, dpif_type),
536 &port_to_netdev) {
537 if (netdev_get_dpif_type(data->netdev) == dpif_type
538 && data->dpif_port.port_no == port_no) {
539 return data;
540 }
541 }
542 return NULL;
543 }
544
545 int
546 netdev_ports_insert(struct netdev *netdev, const char *dpif_type,
547 struct dpif_port *dpif_port)
548 {
549 struct port_to_netdev_data *data;
550 int ifindex = netdev_get_ifindex(netdev);
551
552 if (ifindex < 0) {
553 return ENODEV;
554 }
555
556 ovs_rwlock_wrlock(&netdev_hmap_rwlock);
557 if (netdev_ports_lookup(dpif_port->port_no, dpif_type)) {
558 ovs_rwlock_unlock(&netdev_hmap_rwlock);
559 return EEXIST;
560 }
561
562 data = xzalloc(sizeof *data);
563 data->netdev = netdev_ref(netdev);
564 dpif_port_clone(&data->dpif_port, dpif_port);
565 data->ifindex = ifindex;
566
567 netdev_set_dpif_type(netdev, dpif_type);
568
569 hmap_insert(&port_to_netdev, &data->portno_node,
570 netdev_ports_hash(dpif_port->port_no, dpif_type));
571 hmap_insert(&ifindex_to_port, &data->ifindex_node, ifindex);
572 ovs_rwlock_unlock(&netdev_hmap_rwlock);
573
574 netdev_init_flow_api(netdev);
575
576 return 0;
577 }
578
579 struct netdev *
580 netdev_ports_get(odp_port_t port_no, const char *dpif_type)
581 {
582 struct port_to_netdev_data *data;
583 struct netdev *ret = NULL;
584
585 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
586 data = netdev_ports_lookup(port_no, dpif_type);
587 if (data) {
588 ret = netdev_ref(data->netdev);
589 }
590 ovs_rwlock_unlock(&netdev_hmap_rwlock);
591
592 return ret;
593 }
594
595 int
596 netdev_ports_remove(odp_port_t port_no, const char *dpif_type)
597 {
598 struct port_to_netdev_data *data;
599 int ret = ENOENT;
600
601 ovs_rwlock_wrlock(&netdev_hmap_rwlock);
602 data = netdev_ports_lookup(port_no, dpif_type);
603 if (data) {
604 dpif_port_destroy(&data->dpif_port);
605 netdev_close(data->netdev); /* unref and possibly close */
606 hmap_remove(&port_to_netdev, &data->portno_node);
607 hmap_remove(&ifindex_to_port, &data->ifindex_node);
608 free(data);
609 ret = 0;
610 }
611 ovs_rwlock_unlock(&netdev_hmap_rwlock);
612
613 return ret;
614 }
615
616 int
617 netdev_ports_get_n_flows(const char *dpif_type, odp_port_t port_no,
618 uint64_t *n_flows)
619 {
620 struct port_to_netdev_data *data;
621 int ret = EOPNOTSUPP;
622
623 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
624 data = netdev_ports_lookup(port_no, dpif_type);
625 if (data) {
626 ret = netdev_flow_get_n_flows(data->netdev, n_flows);
627 }
628 ovs_rwlock_unlock(&netdev_hmap_rwlock);
629 return ret;
630 }
631
632 odp_port_t
633 netdev_ifindex_to_odp_port(int ifindex)
634 {
635 struct port_to_netdev_data *data;
636 odp_port_t ret = 0;
637
638 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
639 HMAP_FOR_EACH_WITH_HASH (data, ifindex_node, ifindex, &ifindex_to_port) {
640 if (data->ifindex == ifindex) {
641 ret = data->dpif_port.port_no;
642 break;
643 }
644 }
645 ovs_rwlock_unlock(&netdev_hmap_rwlock);
646
647 return ret;
648 }
649
650 static bool netdev_offload_rebalance_policy = false;
651
652 bool
653 netdev_is_offload_rebalance_policy_enabled(void)
654 {
655 return netdev_offload_rebalance_policy;
656 }
657
658 static void
659 netdev_ports_flow_init(void)
660 {
661 struct port_to_netdev_data *data;
662
663 ovs_rwlock_rdlock(&netdev_hmap_rwlock);
664 HMAP_FOR_EACH (data, portno_node, &port_to_netdev) {
665 netdev_init_flow_api(data->netdev);
666 }
667 ovs_rwlock_unlock(&netdev_hmap_rwlock);
668 }
669
670 void
671 netdev_set_flow_api_enabled(const struct smap *ovs_other_config)
672 {
673 if (smap_get_bool(ovs_other_config, "hw-offload", false)) {
674 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
675
676 if (ovsthread_once_start(&once)) {
677 netdev_flow_api_enabled = true;
678
679 VLOG_INFO("netdev: Flow API Enabled");
680
681 #ifdef __linux__
682 tc_set_policy(smap_get_def(ovs_other_config, "tc-policy",
683 TC_POLICY_DEFAULT));
684 #endif
685
686 if (smap_get_bool(ovs_other_config, "offload-rebalance", false)) {
687 netdev_offload_rebalance_policy = true;
688 }
689
690 netdev_ports_flow_init();
691
692 ovsthread_once_done(&once);
693 }
694 }
695 }