]> git.proxmox.com Git - mirror_ovs.git/blob - ovn/northd/ovn-northd.c
ovn: Add stateful ACL support.
[mirror_ovs.git] / ovn / northd / ovn-northd.c
1 /*
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at:
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 #include <config.h>
16
17 #include <getopt.h>
18 #include <stdlib.h>
19 #include <stdio.h>
20
21 #include "command-line.h"
22 #include "daemon.h"
23 #include "dirs.h"
24 #include "dynamic-string.h"
25 #include "fatal-signal.h"
26 #include "hash.h"
27 #include "hmap.h"
28 #include "json.h"
29 #include "ovn/lib/lex.h"
30 #include "ovn/lib/ovn-nb-idl.h"
31 #include "ovn/lib/ovn-sb-idl.h"
32 #include "poll-loop.h"
33 #include "smap.h"
34 #include "stream.h"
35 #include "stream-ssl.h"
36 #include "unixctl.h"
37 #include "util.h"
38 #include "uuid.h"
39 #include "openvswitch/vlog.h"
40
41 VLOG_DEFINE_THIS_MODULE(ovn_northd);
42
43 static unixctl_cb_func ovn_northd_exit;
44
45 struct northd_context {
46 struct ovsdb_idl *ovnnb_idl;
47 struct ovsdb_idl *ovnsb_idl;
48 struct ovsdb_idl_txn *ovnnb_txn;
49 struct ovsdb_idl_txn *ovnsb_txn;
50 };
51
52 static const char *ovnnb_db;
53 static const char *ovnsb_db;
54
55 static const char *default_db(void);
56
57
58 /* Ingress pipeline stages.
59 *
60 * These must be listed in the order that the stages will be executed. */
61 #define INGRESS_STAGES \
62 INGRESS_STAGE(PORT_SEC, port_sec) \
63 INGRESS_STAGE(PRE_ACL, pre_acl) \
64 INGRESS_STAGE(ACL, acl) \
65 INGRESS_STAGE(L2_LKUP, l2_lkup)
66
67 enum ingress_stage {
68 #define INGRESS_STAGE(NAME, STR) S_IN_##NAME,
69 INGRESS_STAGES
70 #undef INGRESS_STAGE
71 INGRESS_N_STAGES
72 };
73
74 /* Egress pipeline stages.
75 *
76 * These must be listed in the order that the stages will be executed. */
77 #define EGRESS_STAGES \
78 EGRESS_STAGE(PRE_ACL, pre_acl) \
79 EGRESS_STAGE(ACL, acl) \
80 EGRESS_STAGE(PORT_SEC, port_sec)
81
82 enum egress_stage {
83 #define EGRESS_STAGE(NAME, STR) S_OUT_##NAME,
84 EGRESS_STAGES
85 #undef EGRESS_STAGE
86 EGRESS_N_STAGES
87 };
88
89 static void
90 usage(void)
91 {
92 printf("\
93 %s: OVN northbound management daemon\n\
94 usage: %s [OPTIONS]\n\
95 \n\
96 Options:\n\
97 --ovnnb-db=DATABASE connect to ovn-nb database at DATABASE\n\
98 (default: %s)\n\
99 --ovnsb-db=DATABASE connect to ovn-sb database at DATABASE\n\
100 (default: %s)\n\
101 -h, --help display this help message\n\
102 -o, --options list available options\n\
103 -V, --version display version information\n\
104 ", program_name, program_name, default_db(), default_db());
105 daemon_usage();
106 vlog_usage();
107 stream_usage("database", true, true, false);
108 }
109 \f
110 struct tnlid_node {
111 struct hmap_node hmap_node;
112 uint32_t tnlid;
113 };
114
115 static void
116 destroy_tnlids(struct hmap *tnlids)
117 {
118 struct tnlid_node *node, *next;
119 HMAP_FOR_EACH_SAFE (node, next, hmap_node, tnlids) {
120 hmap_remove(tnlids, &node->hmap_node);
121 free(node);
122 }
123 hmap_destroy(tnlids);
124 }
125
126 static void
127 add_tnlid(struct hmap *set, uint32_t tnlid)
128 {
129 struct tnlid_node *node = xmalloc(sizeof *node);
130 hmap_insert(set, &node->hmap_node, hash_int(tnlid, 0));
131 node->tnlid = tnlid;
132 }
133
134 static bool
135 tnlid_in_use(const struct hmap *set, uint32_t tnlid)
136 {
137 const struct tnlid_node *node;
138 HMAP_FOR_EACH_IN_BUCKET (node, hmap_node, hash_int(tnlid, 0), set) {
139 if (node->tnlid == tnlid) {
140 return true;
141 }
142 }
143 return false;
144 }
145
146 static uint32_t
147 allocate_tnlid(struct hmap *set, const char *name, uint32_t max,
148 uint32_t *hint)
149 {
150 for (uint32_t tnlid = *hint + 1; tnlid != *hint;
151 tnlid = tnlid + 1 <= max ? tnlid + 1 : 1) {
152 if (!tnlid_in_use(set, tnlid)) {
153 add_tnlid(set, tnlid);
154 *hint = tnlid;
155 return tnlid;
156 }
157 }
158
159 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
160 VLOG_WARN_RL(&rl, "all %s tunnel ids exhausted", name);
161 return 0;
162 }
163 \f
164 /* The 'key' comes from nb->header_.uuid or sb->external_ids:logical-switch. */
165 struct ovn_datapath {
166 struct hmap_node key_node; /* Index on 'key'. */
167 struct uuid key; /* nb->header_.uuid. */
168
169 const struct nbrec_logical_switch *nb; /* May be NULL. */
170 const struct sbrec_datapath_binding *sb; /* May be NULL. */
171
172 struct ovs_list list; /* In list of similar records. */
173
174 struct hmap port_tnlids;
175 uint32_t port_key_hint;
176
177 bool has_unknown;
178 };
179
180 static struct ovn_datapath *
181 ovn_datapath_create(struct hmap *datapaths, const struct uuid *key,
182 const struct nbrec_logical_switch *nb,
183 const struct sbrec_datapath_binding *sb)
184 {
185 struct ovn_datapath *od = xzalloc(sizeof *od);
186 od->key = *key;
187 od->sb = sb;
188 od->nb = nb;
189 hmap_init(&od->port_tnlids);
190 od->port_key_hint = 0;
191 hmap_insert(datapaths, &od->key_node, uuid_hash(&od->key));
192 return od;
193 }
194
195 static void
196 ovn_datapath_destroy(struct hmap *datapaths, struct ovn_datapath *od)
197 {
198 if (od) {
199 /* Don't remove od->list. It is used within build_datapaths() as a
200 * private list and once we've exited that function it is not safe to
201 * use it. */
202 hmap_remove(datapaths, &od->key_node);
203 destroy_tnlids(&od->port_tnlids);
204 free(od);
205 }
206 }
207
208 static struct ovn_datapath *
209 ovn_datapath_find(struct hmap *datapaths, const struct uuid *uuid)
210 {
211 struct ovn_datapath *od;
212
213 HMAP_FOR_EACH_WITH_HASH (od, key_node, uuid_hash(uuid), datapaths) {
214 if (uuid_equals(uuid, &od->key)) {
215 return od;
216 }
217 }
218 return NULL;
219 }
220
221 static struct ovn_datapath *
222 ovn_datapath_from_sbrec(struct hmap *datapaths,
223 const struct sbrec_datapath_binding *sb)
224 {
225 struct uuid key;
226
227 if (!smap_get_uuid(&sb->external_ids, "logical-switch", &key)) {
228 return NULL;
229 }
230 return ovn_datapath_find(datapaths, &key);
231 }
232
233 static void
234 join_datapaths(struct northd_context *ctx, struct hmap *datapaths,
235 struct ovs_list *sb_only, struct ovs_list *nb_only,
236 struct ovs_list *both)
237 {
238 hmap_init(datapaths);
239 list_init(sb_only);
240 list_init(nb_only);
241 list_init(both);
242
243 const struct sbrec_datapath_binding *sb, *sb_next;
244 SBREC_DATAPATH_BINDING_FOR_EACH_SAFE (sb, sb_next, ctx->ovnsb_idl) {
245 struct uuid key;
246 if (!smap_get_uuid(&sb->external_ids, "logical-switch", &key)) {
247 ovsdb_idl_txn_add_comment(ctx->ovnsb_txn,
248 "deleting Datapath_Binding "UUID_FMT" that "
249 "lacks external-ids:logical-switch",
250 UUID_ARGS(&sb->header_.uuid));
251 sbrec_datapath_binding_delete(sb);
252 continue;
253 }
254
255 if (ovn_datapath_find(datapaths, &key)) {
256 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
257 VLOG_INFO_RL(&rl, "deleting Datapath_Binding "UUID_FMT" with "
258 "duplicate external-ids:logical-switch "UUID_FMT,
259 UUID_ARGS(&sb->header_.uuid), UUID_ARGS(&key));
260 sbrec_datapath_binding_delete(sb);
261 continue;
262 }
263
264 struct ovn_datapath *od = ovn_datapath_create(datapaths, &key,
265 NULL, sb);
266 list_push_back(sb_only, &od->list);
267 }
268
269 const struct nbrec_logical_switch *nb;
270 NBREC_LOGICAL_SWITCH_FOR_EACH (nb, ctx->ovnnb_idl) {
271 struct ovn_datapath *od = ovn_datapath_find(datapaths,
272 &nb->header_.uuid);
273 if (od) {
274 od->nb = nb;
275 list_remove(&od->list);
276 list_push_back(both, &od->list);
277 } else {
278 od = ovn_datapath_create(datapaths, &nb->header_.uuid, nb, NULL);
279 list_push_back(nb_only, &od->list);
280 }
281 }
282 }
283
284 static uint32_t
285 ovn_datapath_allocate_key(struct hmap *dp_tnlids)
286 {
287 static uint32_t hint;
288 return allocate_tnlid(dp_tnlids, "datapath", (1u << 24) - 1, &hint);
289 }
290
291 static void
292 build_datapaths(struct northd_context *ctx, struct hmap *datapaths)
293 {
294 struct ovs_list sb_only, nb_only, both;
295
296 join_datapaths(ctx, datapaths, &sb_only, &nb_only, &both);
297
298 if (!list_is_empty(&nb_only)) {
299 /* First index the in-use datapath tunnel IDs. */
300 struct hmap dp_tnlids = HMAP_INITIALIZER(&dp_tnlids);
301 struct ovn_datapath *od;
302 LIST_FOR_EACH (od, list, &both) {
303 add_tnlid(&dp_tnlids, od->sb->tunnel_key);
304 }
305
306 /* Add southbound record for each unmatched northbound record. */
307 LIST_FOR_EACH (od, list, &nb_only) {
308 uint16_t tunnel_key = ovn_datapath_allocate_key(&dp_tnlids);
309 if (!tunnel_key) {
310 break;
311 }
312
313 od->sb = sbrec_datapath_binding_insert(ctx->ovnsb_txn);
314
315 char uuid_s[UUID_LEN + 1];
316 sprintf(uuid_s, UUID_FMT, UUID_ARGS(&od->nb->header_.uuid));
317 const struct smap id = SMAP_CONST1(&id, "logical-switch", uuid_s);
318 sbrec_datapath_binding_set_external_ids(od->sb, &id);
319
320 sbrec_datapath_binding_set_tunnel_key(od->sb, tunnel_key);
321 }
322 destroy_tnlids(&dp_tnlids);
323 }
324
325 /* Delete southbound records without northbound matches. */
326 struct ovn_datapath *od, *next;
327 LIST_FOR_EACH_SAFE (od, next, list, &sb_only) {
328 list_remove(&od->list);
329 sbrec_datapath_binding_delete(od->sb);
330 ovn_datapath_destroy(datapaths, od);
331 }
332 }
333 \f
334 struct ovn_port {
335 struct hmap_node key_node; /* Index on 'key'. */
336 const char *key; /* nb->name and sb->logical_port */
337
338 const struct nbrec_logical_port *nb; /* May be NULL. */
339 const struct sbrec_port_binding *sb; /* May be NULL. */
340
341 struct ovn_datapath *od;
342
343 struct ovs_list list; /* In list of similar records. */
344 };
345
346 static struct ovn_port *
347 ovn_port_create(struct hmap *ports, const char *key,
348 const struct nbrec_logical_port *nb,
349 const struct sbrec_port_binding *sb)
350 {
351 struct ovn_port *op = xzalloc(sizeof *op);
352 op->key = key;
353 op->sb = sb;
354 op->nb = nb;
355 hmap_insert(ports, &op->key_node, hash_string(op->key, 0));
356 return op;
357 }
358
359 static void
360 ovn_port_destroy(struct hmap *ports, struct ovn_port *port)
361 {
362 if (port) {
363 /* Don't remove port->list. It is used within build_ports() as a
364 * private list and once we've exited that function it is not safe to
365 * use it. */
366 hmap_remove(ports, &port->key_node);
367 free(port);
368 }
369 }
370
371 static struct ovn_port *
372 ovn_port_find(struct hmap *ports, const char *name)
373 {
374 struct ovn_port *op;
375
376 HMAP_FOR_EACH_WITH_HASH (op, key_node, hash_string(name, 0), ports) {
377 if (!strcmp(op->key, name)) {
378 return op;
379 }
380 }
381 return NULL;
382 }
383
384 static uint32_t
385 ovn_port_allocate_key(struct ovn_datapath *od)
386 {
387 return allocate_tnlid(&od->port_tnlids, "port",
388 (1u << 15) - 1, &od->port_key_hint);
389 }
390
391 static void
392 join_logical_ports(struct northd_context *ctx,
393 struct hmap *datapaths, struct hmap *ports,
394 struct ovs_list *sb_only, struct ovs_list *nb_only,
395 struct ovs_list *both)
396 {
397 hmap_init(ports);
398 list_init(sb_only);
399 list_init(nb_only);
400 list_init(both);
401
402 const struct sbrec_port_binding *sb;
403 SBREC_PORT_BINDING_FOR_EACH (sb, ctx->ovnsb_idl) {
404 struct ovn_port *op = ovn_port_create(ports, sb->logical_port,
405 NULL, sb);
406 list_push_back(sb_only, &op->list);
407 }
408
409 struct ovn_datapath *od;
410 HMAP_FOR_EACH (od, key_node, datapaths) {
411 for (size_t i = 0; i < od->nb->n_ports; i++) {
412 const struct nbrec_logical_port *nb = od->nb->ports[i];
413 struct ovn_port *op = ovn_port_find(ports, nb->name);
414 if (op) {
415 op->nb = nb;
416 list_remove(&op->list);
417 list_push_back(both, &op->list);
418 } else {
419 op = ovn_port_create(ports, nb->name, nb, NULL);
420 list_push_back(nb_only, &op->list);
421 }
422 op->od = od;
423 }
424 }
425 }
426
427 static void
428 ovn_port_update_sbrec(const struct ovn_port *op)
429 {
430 sbrec_port_binding_set_type(op->sb, op->nb->type);
431 sbrec_port_binding_set_options(op->sb, &op->nb->options);
432 sbrec_port_binding_set_datapath(op->sb, op->od->sb);
433 sbrec_port_binding_set_parent_port(op->sb, op->nb->parent_name);
434 sbrec_port_binding_set_tag(op->sb, op->nb->tag, op->nb->n_tag);
435 sbrec_port_binding_set_mac(op->sb, (const char **) op->nb->macs,
436 op->nb->n_macs);
437 }
438
439 static void
440 build_ports(struct northd_context *ctx, struct hmap *datapaths,
441 struct hmap *ports)
442 {
443 struct ovs_list sb_only, nb_only, both;
444
445 join_logical_ports(ctx, datapaths, ports, &sb_only, &nb_only, &both);
446
447 /* For logical ports that are in both databases, update the southbound
448 * record based on northbound data. Also index the in-use tunnel_keys. */
449 struct ovn_port *op, *next;
450 LIST_FOR_EACH_SAFE (op, next, list, &both) {
451 ovn_port_update_sbrec(op);
452
453 add_tnlid(&op->od->port_tnlids, op->sb->tunnel_key);
454 if (op->sb->tunnel_key > op->od->port_key_hint) {
455 op->od->port_key_hint = op->sb->tunnel_key;
456 }
457 }
458
459 /* Add southbound record for each unmatched northbound record. */
460 LIST_FOR_EACH_SAFE (op, next, list, &nb_only) {
461 uint16_t tunnel_key = ovn_port_allocate_key(op->od);
462 if (!tunnel_key) {
463 continue;
464 }
465
466 op->sb = sbrec_port_binding_insert(ctx->ovnsb_txn);
467 ovn_port_update_sbrec(op);
468
469 sbrec_port_binding_set_logical_port(op->sb, op->key);
470 sbrec_port_binding_set_tunnel_key(op->sb, tunnel_key);
471 }
472
473 /* Delete southbound records without northbound matches. */
474 LIST_FOR_EACH_SAFE(op, next, list, &sb_only) {
475 list_remove(&op->list);
476 sbrec_port_binding_delete(op->sb);
477 ovn_port_destroy(ports, op);
478 }
479 }
480 \f
481 #define OVN_MIN_MULTICAST 32768
482 #define OVN_MAX_MULTICAST 65535
483
484 struct multicast_group {
485 const char *name;
486 uint16_t key; /* OVN_MIN_MULTICAST...OVN_MAX_MULTICAST. */
487 };
488
489 #define MC_FLOOD "_MC_flood"
490 static const struct multicast_group mc_flood = { MC_FLOOD, 65535 };
491
492 #define MC_UNKNOWN "_MC_unknown"
493 static const struct multicast_group mc_unknown = { MC_UNKNOWN, 65534 };
494
495 static bool
496 multicast_group_equal(const struct multicast_group *a,
497 const struct multicast_group *b)
498 {
499 return !strcmp(a->name, b->name) && a->key == b->key;
500 }
501
502 /* Multicast group entry. */
503 struct ovn_multicast {
504 struct hmap_node hmap_node; /* Index on 'datapath' and 'key'. */
505 struct ovn_datapath *datapath;
506 const struct multicast_group *group;
507
508 struct ovn_port **ports;
509 size_t n_ports, allocated_ports;
510 };
511
512 static uint32_t
513 ovn_multicast_hash(const struct ovn_datapath *datapath,
514 const struct multicast_group *group)
515 {
516 return hash_pointer(datapath, group->key);
517 }
518
519 static struct ovn_multicast *
520 ovn_multicast_find(struct hmap *mcgroups, struct ovn_datapath *datapath,
521 const struct multicast_group *group)
522 {
523 struct ovn_multicast *mc;
524
525 HMAP_FOR_EACH_WITH_HASH (mc, hmap_node,
526 ovn_multicast_hash(datapath, group), mcgroups) {
527 if (mc->datapath == datapath
528 && multicast_group_equal(mc->group, group)) {
529 return mc;
530 }
531 }
532 return NULL;
533 }
534
535 static void
536 ovn_multicast_add(struct hmap *mcgroups, const struct multicast_group *group,
537 struct ovn_port *port)
538 {
539 struct ovn_datapath *od = port->od;
540 struct ovn_multicast *mc = ovn_multicast_find(mcgroups, od, group);
541 if (!mc) {
542 mc = xmalloc(sizeof *mc);
543 hmap_insert(mcgroups, &mc->hmap_node, ovn_multicast_hash(od, group));
544 mc->datapath = od;
545 mc->group = group;
546 mc->n_ports = 0;
547 mc->allocated_ports = 4;
548 mc->ports = xmalloc(mc->allocated_ports * sizeof *mc->ports);
549 }
550 if (mc->n_ports >= mc->allocated_ports) {
551 mc->ports = x2nrealloc(mc->ports, &mc->allocated_ports,
552 sizeof *mc->ports);
553 }
554 mc->ports[mc->n_ports++] = port;
555 }
556
557 static void
558 ovn_multicast_destroy(struct hmap *mcgroups, struct ovn_multicast *mc)
559 {
560 if (mc) {
561 hmap_remove(mcgroups, &mc->hmap_node);
562 free(mc->ports);
563 free(mc);
564 }
565 }
566
567 static void
568 ovn_multicast_update_sbrec(const struct ovn_multicast *mc,
569 const struct sbrec_multicast_group *sb)
570 {
571 struct sbrec_port_binding **ports = xmalloc(mc->n_ports * sizeof *ports);
572 for (size_t i = 0; i < mc->n_ports; i++) {
573 ports[i] = CONST_CAST(struct sbrec_port_binding *, mc->ports[i]->sb);
574 }
575 sbrec_multicast_group_set_ports(sb, ports, mc->n_ports);
576 free(ports);
577 }
578 \f
579 /* Logical flow generation.
580 *
581 * This code generates the Logical_Flow table in the southbound database, as a
582 * function of most of the northbound database.
583 */
584
585 struct ovn_lflow {
586 struct hmap_node hmap_node;
587
588 struct ovn_datapath *od;
589 enum ovn_pipeline { P_IN, P_OUT } pipeline;
590 uint8_t table_id;
591 uint16_t priority;
592 char *match;
593 char *actions;
594 };
595
596 static size_t
597 ovn_lflow_hash(const struct ovn_lflow *lflow)
598 {
599 size_t hash = uuid_hash(&lflow->od->key);
600 hash = hash_2words((lflow->table_id << 16) | lflow->priority, hash);
601 hash = hash_string(lflow->match, hash);
602 return hash_string(lflow->actions, hash);
603 }
604
605 static bool
606 ovn_lflow_equal(const struct ovn_lflow *a, const struct ovn_lflow *b)
607 {
608 return (a->od == b->od
609 && a->pipeline == b->pipeline
610 && a->table_id == b->table_id
611 && a->priority == b->priority
612 && !strcmp(a->match, b->match)
613 && !strcmp(a->actions, b->actions));
614 }
615
616 static void
617 ovn_lflow_init(struct ovn_lflow *lflow, struct ovn_datapath *od,
618 enum ovn_pipeline pipeline, uint8_t table_id, uint16_t priority,
619 char *match, char *actions)
620 {
621 lflow->od = od;
622 lflow->pipeline = pipeline;
623 lflow->table_id = table_id;
624 lflow->priority = priority;
625 lflow->match = match;
626 lflow->actions = actions;
627 }
628
629 static const char *
630 ingress_stage_to_str(int stage) {
631 switch (stage) {
632 #define INGRESS_STAGE(NAME, STR) case S_IN_##NAME: return #STR;
633 INGRESS_STAGES
634 #undef INGRESS_STAGE
635 default: return "<unknown>";
636 }
637 }
638
639 static const char *
640 egress_stage_to_str(int stage) {
641 switch (stage) {
642 #define EGRESS_STAGE(NAME, STR) case S_OUT_##NAME: return #STR;
643 EGRESS_STAGES
644 #undef EGRESS_STAGE
645 default: return "<unknown>";
646 }
647 }
648
649 /* Adds a row with the specified contents to the Logical_Flow table. */
650 static void
651 ovn_lflow_add(struct hmap *lflow_map, struct ovn_datapath *od,
652 enum ovn_pipeline pipeline, uint8_t table_id, uint16_t priority,
653 const char *match, const char *actions)
654 {
655 struct ovn_lflow *lflow = xmalloc(sizeof *lflow);
656 ovn_lflow_init(lflow, od, pipeline, table_id, priority,
657 xstrdup(match), xstrdup(actions));
658 hmap_insert(lflow_map, &lflow->hmap_node, ovn_lflow_hash(lflow));
659 }
660
661 static struct ovn_lflow *
662 ovn_lflow_find(struct hmap *lflows, struct ovn_datapath *od,
663 enum ovn_pipeline pipeline, uint8_t table_id, uint16_t priority,
664 const char *match, const char *actions)
665 {
666 struct ovn_lflow target;
667 ovn_lflow_init(&target, od, pipeline, table_id, priority,
668 CONST_CAST(char *, match), CONST_CAST(char *, actions));
669
670 struct ovn_lflow *lflow;
671 HMAP_FOR_EACH_WITH_HASH (lflow, hmap_node, ovn_lflow_hash(&target),
672 lflows) {
673 if (ovn_lflow_equal(lflow, &target)) {
674 return lflow;
675 }
676 }
677 return NULL;
678 }
679
680 static void
681 ovn_lflow_destroy(struct hmap *lflows, struct ovn_lflow *lflow)
682 {
683 if (lflow) {
684 hmap_remove(lflows, &lflow->hmap_node);
685 free(lflow->match);
686 free(lflow->actions);
687 free(lflow);
688 }
689 }
690
691 /* Appends port security constraints on L2 address field 'eth_addr_field'
692 * (e.g. "eth.src" or "eth.dst") to 'match'. 'port_security', with
693 * 'n_port_security' elements, is the collection of port_security constraints
694 * from an OVN_NB Logical_Port row. */
695 static void
696 build_port_security(const char *eth_addr_field,
697 char **port_security, size_t n_port_security,
698 struct ds *match)
699 {
700 size_t base_len = match->length;
701 ds_put_format(match, " && %s == {", eth_addr_field);
702
703 size_t n = 0;
704 for (size_t i = 0; i < n_port_security; i++) {
705 struct eth_addr ea;
706
707 if (eth_addr_from_string(port_security[i], &ea)) {
708 ds_put_format(match, ETH_ADDR_FMT, ETH_ADDR_ARGS(ea));
709 ds_put_char(match, ' ');
710 n++;
711 }
712 }
713 ds_chomp(match, ' ');
714 ds_put_cstr(match, "}");
715
716 if (!n) {
717 match->length = base_len;
718 }
719 }
720
721 static bool
722 lport_is_enabled(const struct nbrec_logical_port *lport)
723 {
724 return !lport->enabled || *lport->enabled;
725 }
726
727 static bool
728 has_stateful_acl(struct ovn_datapath *od)
729 {
730 for (size_t i = 0; i < od->nb->n_acls; i++) {
731 struct nbrec_acl *acl = od->nb->acls[i];
732 if (!strcmp(acl->action, "allow-related")) {
733 return true;
734 }
735 }
736
737 return false;
738 }
739
740 static void
741 build_acls(struct ovn_datapath *od, struct hmap *lflows)
742 {
743 bool has_stateful = has_stateful_acl(od);
744
745 /* Ingress and Egress Pre-ACL Table (Priority 0): Packets are
746 * allowed by default. */
747 ovn_lflow_add(lflows, od, P_IN, S_IN_PRE_ACL, 0, "1", "next;");
748 ovn_lflow_add(lflows, od, P_OUT, S_OUT_PRE_ACL, 0, "1", "next;");
749
750 /* Ingress and Egress ACL Table (Priority 0): Packets are allowed by
751 * default. A related rule at priority 1 is added below if there
752 * are any stateful ACLs in this datapath. */
753 ovn_lflow_add(lflows, od, P_IN, S_IN_ACL, 0, "1", "next;");
754 ovn_lflow_add(lflows, od, P_OUT, S_OUT_ACL, 0, "1", "next;");
755
756 /* If there are any stateful ACL rules in this dapapath, we must
757 * send all IP packets through the conntrack action, which handles
758 * defragmentation, in order to match L4 headers. */
759 if (has_stateful) {
760 /* Ingress and Egress Pre-ACL Table (Priority 100).
761 *
762 * Regardless of whether the ACL is "from-lport" or "to-lport",
763 * we need rules in both the ingress and egress table, because
764 * the return traffic needs to be followed. */
765 ovn_lflow_add(lflows, od, P_IN, S_IN_PRE_ACL, 100,
766 "ip", "ct_next;");
767 ovn_lflow_add(lflows, od, P_OUT, S_OUT_PRE_ACL, 100,
768 "ip", "ct_next;");
769
770 /* Ingress and Egress ACL Table (Priority 1).
771 *
772 * By default, traffic is allowed. This is partially handled by
773 * the Priority 0 ACL flows added earlier, but we also need to
774 * commit IP flows. This is because, while the initiater's
775 * direction may not have any stateful rules, the server's may
776 * and then its return traffic would not have an associated
777 * conntrack entry and would return "+invalid". */
778 ovn_lflow_add(lflows, od, P_IN, S_IN_ACL, 1, "ip",
779 "ct_commit; next;");
780 ovn_lflow_add(lflows, od, P_OUT, S_OUT_ACL, 1, "ip",
781 "ct_commit; next;");
782
783 /* Ingress and Egress ACL Table (Priority 65535).
784 *
785 * Always drop traffic that's in an invalid state. This is
786 * enforced at a higher priority than ACLs can be defined. */
787 ovn_lflow_add(lflows, od, P_IN, S_IN_ACL, UINT16_MAX,
788 "ct.inv", "drop;");
789 ovn_lflow_add(lflows, od, P_OUT, S_OUT_ACL, UINT16_MAX,
790 "ct.inv", "drop;");
791
792 /* Ingress and Egress ACL Table (Priority 65535).
793 *
794 * Always allow traffic that is established to a committed
795 * conntrack entry. This is enforced at a higher priority than
796 * ACLs can be defined. */
797 ovn_lflow_add(lflows, od, P_IN, S_IN_ACL, UINT16_MAX,
798 "ct.est && !ct.rel && !ct.new && !ct.inv",
799 "next;");
800 ovn_lflow_add(lflows, od, P_OUT, S_OUT_ACL, UINT16_MAX,
801 "ct.est && !ct.rel && !ct.new && !ct.inv",
802 "next;");
803
804 /* Ingress and Egress ACL Table (Priority 65535).
805 *
806 * Always allow traffic that is related to an existing conntrack
807 * entry. This is enforced at a higher priority than ACLs can
808 * be defined.
809 *
810 * NOTE: This does not support related data sessions (eg,
811 * a dynamically negotiated FTP data channel), but will allow
812 * related traffic such as an ICMP Port Unreachable through
813 * that's generated from a non-listening UDP port. */
814 ovn_lflow_add(lflows, od, P_IN, S_IN_ACL, UINT16_MAX,
815 "!ct.est && ct.rel && !ct.new && !ct.inv",
816 "next;");
817 ovn_lflow_add(lflows, od, P_OUT, S_OUT_ACL, UINT16_MAX,
818 "!ct.est && ct.rel && !ct.new && !ct.inv",
819 "next;");
820 }
821
822 /* Ingress or Egress ACL Table (Various priorities). */
823 for (size_t i = 0; i < od->nb->n_acls; i++) {
824 struct nbrec_acl *acl = od->nb->acls[i];
825 bool ingress = !strcmp(acl->direction, "from-lport") ? true :false;
826 enum ovn_pipeline pipeline = ingress ? P_IN : P_OUT;
827 uint8_t stage = ingress ? S_IN_ACL : S_OUT_ACL;
828
829 if (!strcmp(acl->action, "allow")) {
830 /* If there are any stateful flows, we must even commit "allow"
831 * actions. This is because, while the initiater's
832 * direction may not have any stateful rules, the server's
833 * may and then its return traffic would not have an
834 * associated conntrack entry and would return "+invalid". */
835 const char *actions = has_stateful ? "ct_commit; next;" : "next;";
836 ovn_lflow_add(lflows, od, pipeline, stage, acl->priority,
837 acl->match, actions);
838 } else if (!strcmp(acl->action, "allow-related")) {
839 struct ds match = DS_EMPTY_INITIALIZER;
840
841 /* Commit the connection tracking entry, which allows all
842 * other traffic related to this entry to flow due to the
843 * 65535 priority flow defined earlier. */
844 ds_put_format(&match, "ct.new && (%s)", acl->match);
845 ovn_lflow_add(lflows, od, pipeline, stage, acl->priority,
846 ds_cstr(&match), "ct_commit; next;");
847
848 ds_destroy(&match);
849 } else if (!strcmp(acl->action, "drop")) {
850 ovn_lflow_add(lflows, od, pipeline, stage, acl->priority,
851 acl->match, "drop;");
852 } else if (!strcmp(acl->action, "reject")) {
853 /* xxx Need to support "reject". */
854 VLOG_INFO("reject is not a supported action");
855 ovn_lflow_add(lflows, od, pipeline, stage, acl->priority,
856 acl->match, "drop;");
857 }
858 }
859 }
860
861 /* Updates the Logical_Flow and Multicast_Group tables in the OVN_SB database,
862 * constructing their contents based on the OVN_NB database. */
863 static void
864 build_lflows(struct northd_context *ctx, struct hmap *datapaths,
865 struct hmap *ports)
866 {
867 /* This flow table structure is documented in ovn-northd(8), so please
868 * update ovn-northd.8.xml if you change anything. */
869
870 struct hmap lflows = HMAP_INITIALIZER(&lflows);
871 struct hmap mcgroups = HMAP_INITIALIZER(&mcgroups);
872
873 /* Ingress table 0: Admission control framework (priorities 0 and 100). */
874 struct ovn_datapath *od;
875 HMAP_FOR_EACH (od, key_node, datapaths) {
876 /* Logical VLANs not supported. */
877 ovn_lflow_add(&lflows, od, P_IN, S_IN_PORT_SEC, 100, "vlan.present",
878 "drop;");
879
880 /* Broadcast/multicast source address is invalid. */
881 ovn_lflow_add(&lflows, od, P_IN, S_IN_PORT_SEC, 100, "eth.src[40]",
882 "drop;");
883
884 /* Port security flows have priority 50 (see below) and will continue
885 * to the next table if packet source is acceptable. */
886 }
887
888 /* Ingress table 0: Ingress port security (priority 50). */
889 struct ovn_port *op;
890 HMAP_FOR_EACH (op, key_node, ports) {
891 if (!lport_is_enabled(op->nb)) {
892 /* Drop packets from disabled logical ports (since logical flow
893 * tables are default-drop). */
894 continue;
895 }
896
897 struct ds match = DS_EMPTY_INITIALIZER;
898 ds_put_cstr(&match, "inport == ");
899 json_string_escape(op->key, &match);
900 build_port_security("eth.src",
901 op->nb->port_security, op->nb->n_port_security,
902 &match);
903 ovn_lflow_add(&lflows, op->od, P_IN, S_IN_PORT_SEC, 50,
904 ds_cstr(&match), "next;");
905 ds_destroy(&match);
906 }
907
908 /* Ingress table 2: Destination lookup, broadcast and multicast handling
909 * (priority 100). */
910 HMAP_FOR_EACH (op, key_node, ports) {
911 if (lport_is_enabled(op->nb)) {
912 ovn_multicast_add(&mcgroups, &mc_flood, op);
913 }
914 }
915 HMAP_FOR_EACH (od, key_node, datapaths) {
916 ovn_lflow_add(&lflows, od, P_IN, S_IN_L2_LKUP, 100, "eth.dst[40]",
917 "outport = \""MC_FLOOD"\"; output;");
918 }
919
920 /* Ingress table 3: Destination lookup, unicast handling (priority 50), */
921 HMAP_FOR_EACH (op, key_node, ports) {
922 for (size_t i = 0; i < op->nb->n_macs; i++) {
923 struct eth_addr mac;
924
925 if (eth_addr_from_string(op->nb->macs[i], &mac)) {
926 struct ds match, actions;
927
928 ds_init(&match);
929 ds_put_format(&match, "eth.dst == %s", op->nb->macs[i]);
930
931 ds_init(&actions);
932 ds_put_cstr(&actions, "outport = ");
933 json_string_escape(op->nb->name, &actions);
934 ds_put_cstr(&actions, "; output;");
935 ovn_lflow_add(&lflows, op->od, P_IN, S_IN_L2_LKUP, 50,
936 ds_cstr(&match), ds_cstr(&actions));
937 ds_destroy(&actions);
938 ds_destroy(&match);
939 } else if (!strcmp(op->nb->macs[i], "unknown")) {
940 if (lport_is_enabled(op->nb)) {
941 ovn_multicast_add(&mcgroups, &mc_unknown, op);
942 op->od->has_unknown = true;
943 }
944 } else {
945 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
946
947 VLOG_INFO_RL(&rl, "%s: invalid syntax '%s' in macs column",
948 op->nb->name, op->nb->macs[i]);
949 }
950 }
951 }
952
953 /* Ingress table 3: Destination lookup for unknown MACs (priority 0). */
954 HMAP_FOR_EACH (od, key_node, datapaths) {
955 if (od->has_unknown) {
956 ovn_lflow_add(&lflows, od, P_IN, S_IN_L2_LKUP, 0, "1",
957 "outport = \""MC_UNKNOWN"\"; output;");
958 }
959 }
960
961 /* Egress table 2: Egress port security multicast/broadcast (priority
962 * 100). */
963 HMAP_FOR_EACH (od, key_node, datapaths) {
964 ovn_lflow_add(&lflows, od, P_OUT, S_OUT_PORT_SEC, 100, "eth.dst[40]",
965 "output;");
966 }
967
968 /* Egress table 2: Egress port security (priorities 50 and 150).
969 *
970 * Priority 50 rules implement port security for enabled logical port.
971 *
972 * Priority 150 rules drop packets to disabled logical ports, so that they
973 * don't even receive multicast or broadcast packets. */
974 HMAP_FOR_EACH (op, key_node, ports) {
975 struct ds match;
976
977 ds_init(&match);
978 ds_put_cstr(&match, "outport == ");
979 json_string_escape(op->key, &match);
980 if (lport_is_enabled(op->nb)) {
981 build_port_security("eth.dst",
982 op->nb->port_security, op->nb->n_port_security,
983 &match);
984 ovn_lflow_add(&lflows, op->od, P_OUT, S_OUT_PORT_SEC, 50,
985 ds_cstr(&match), "output;");
986 } else {
987 ovn_lflow_add(&lflows, op->od, P_OUT, S_OUT_PORT_SEC, 150,
988 ds_cstr(&match), "drop;");
989 }
990
991 ds_destroy(&match);
992 }
993
994 /* Build pre-ACL and ACL tables for both ingress and egress.
995 * Ingress tables 1 and 2. Egress tables 0 and 1. */
996 HMAP_FOR_EACH (od, key_node, datapaths) {
997 build_acls(od, &lflows);
998 }
999
1000 /* Push changes to the Logical_Flow table to database. */
1001 const struct sbrec_logical_flow *sbflow, *next_sbflow;
1002 SBREC_LOGICAL_FLOW_FOR_EACH_SAFE (sbflow, next_sbflow, ctx->ovnsb_idl) {
1003 struct ovn_datapath *od
1004 = ovn_datapath_from_sbrec(datapaths, sbflow->logical_datapath);
1005 if (!od) {
1006 sbrec_logical_flow_delete(sbflow);
1007 continue;
1008 }
1009
1010 struct ovn_lflow *lflow = ovn_lflow_find(
1011 &lflows, od, (!strcmp(sbflow->pipeline, "ingress") ? P_IN : P_OUT),
1012 sbflow->table_id, sbflow->priority,
1013 sbflow->match, sbflow->actions);
1014 if (lflow) {
1015 ovn_lflow_destroy(&lflows, lflow);
1016 } else {
1017 sbrec_logical_flow_delete(sbflow);
1018 }
1019 }
1020 struct ovn_lflow *lflow, *next_lflow;
1021 HMAP_FOR_EACH_SAFE (lflow, next_lflow, hmap_node, &lflows) {
1022 sbflow = sbrec_logical_flow_insert(ctx->ovnsb_txn);
1023 sbrec_logical_flow_set_logical_datapath(sbflow, lflow->od->sb);
1024 sbrec_logical_flow_set_pipeline(
1025 sbflow, lflow->pipeline == P_IN ? "ingress" : "egress");
1026 sbrec_logical_flow_set_table_id(sbflow, lflow->table_id);
1027 sbrec_logical_flow_set_priority(sbflow, lflow->priority);
1028 sbrec_logical_flow_set_match(sbflow, lflow->match);
1029 sbrec_logical_flow_set_actions(sbflow, lflow->actions);
1030
1031 const struct smap ids = SMAP_CONST1(
1032 &ids, "stage-name",
1033 (lflow->pipeline == P_IN
1034 ? ingress_stage_to_str(lflow->table_id)
1035 : egress_stage_to_str(lflow->table_id)));
1036 sbrec_logical_flow_set_external_ids(sbflow, &ids);
1037
1038 ovn_lflow_destroy(&lflows, lflow);
1039 }
1040 hmap_destroy(&lflows);
1041
1042 /* Push changes to the Multicast_Group table to database. */
1043 const struct sbrec_multicast_group *sbmc, *next_sbmc;
1044 SBREC_MULTICAST_GROUP_FOR_EACH_SAFE (sbmc, next_sbmc, ctx->ovnsb_idl) {
1045 struct ovn_datapath *od = ovn_datapath_from_sbrec(datapaths,
1046 sbmc->datapath);
1047 if (!od) {
1048 sbrec_multicast_group_delete(sbmc);
1049 continue;
1050 }
1051
1052 struct multicast_group group = { .name = sbmc->name,
1053 .key = sbmc->tunnel_key };
1054 struct ovn_multicast *mc = ovn_multicast_find(&mcgroups, od, &group);
1055 if (mc) {
1056 ovn_multicast_update_sbrec(mc, sbmc);
1057 ovn_multicast_destroy(&mcgroups, mc);
1058 } else {
1059 sbrec_multicast_group_delete(sbmc);
1060 }
1061 }
1062 struct ovn_multicast *mc, *next_mc;
1063 HMAP_FOR_EACH_SAFE (mc, next_mc, hmap_node, &mcgroups) {
1064 sbmc = sbrec_multicast_group_insert(ctx->ovnsb_txn);
1065 sbrec_multicast_group_set_datapath(sbmc, mc->datapath->sb);
1066 sbrec_multicast_group_set_name(sbmc, mc->group->name);
1067 sbrec_multicast_group_set_tunnel_key(sbmc, mc->group->key);
1068 ovn_multicast_update_sbrec(mc, sbmc);
1069 ovn_multicast_destroy(&mcgroups, mc);
1070 }
1071 hmap_destroy(&mcgroups);
1072 }
1073 \f
1074 static void
1075 ovnnb_db_changed(struct northd_context *ctx)
1076 {
1077 VLOG_DBG("ovn-nb db contents have changed.");
1078
1079 struct hmap datapaths, ports;
1080 build_datapaths(ctx, &datapaths);
1081 build_ports(ctx, &datapaths, &ports);
1082 build_lflows(ctx, &datapaths, &ports);
1083
1084 struct ovn_datapath *dp, *next_dp;
1085 HMAP_FOR_EACH_SAFE (dp, next_dp, key_node, &datapaths) {
1086 ovn_datapath_destroy(&datapaths, dp);
1087 }
1088 hmap_destroy(&datapaths);
1089
1090 struct ovn_port *port, *next_port;
1091 HMAP_FOR_EACH_SAFE (port, next_port, key_node, &ports) {
1092 ovn_port_destroy(&ports, port);
1093 }
1094 hmap_destroy(&ports);
1095 }
1096
1097 /*
1098 * The only change we get notified about is if the 'chassis' column of the
1099 * 'Port_Binding' table changes. When this column is not empty, it means we
1100 * need to set the corresponding logical port as 'up' in the northbound DB.
1101 */
1102 static void
1103 ovnsb_db_changed(struct northd_context *ctx)
1104 {
1105 struct hmap lports_hmap;
1106 const struct sbrec_port_binding *sb;
1107 const struct nbrec_logical_port *nb;
1108
1109 struct lport_hash_node {
1110 struct hmap_node node;
1111 const struct nbrec_logical_port *nb;
1112 } *hash_node, *hash_node_next;
1113
1114 VLOG_DBG("Recalculating port up states for ovn-nb db.");
1115
1116 hmap_init(&lports_hmap);
1117
1118 NBREC_LOGICAL_PORT_FOR_EACH(nb, ctx->ovnnb_idl) {
1119 hash_node = xzalloc(sizeof *hash_node);
1120 hash_node->nb = nb;
1121 hmap_insert(&lports_hmap, &hash_node->node, hash_string(nb->name, 0));
1122 }
1123
1124 SBREC_PORT_BINDING_FOR_EACH(sb, ctx->ovnsb_idl) {
1125 nb = NULL;
1126 HMAP_FOR_EACH_WITH_HASH(hash_node, node,
1127 hash_string(sb->logical_port, 0),
1128 &lports_hmap) {
1129 if (!strcmp(sb->logical_port, hash_node->nb->name)) {
1130 nb = hash_node->nb;
1131 break;
1132 }
1133 }
1134
1135 if (!nb) {
1136 /* The logical port doesn't exist for this port binding. This can
1137 * happen under normal circumstances when ovn-northd hasn't gotten
1138 * around to pruning the Port_Binding yet. */
1139 continue;
1140 }
1141
1142 if (sb->chassis && (!nb->up || !*nb->up)) {
1143 bool up = true;
1144 nbrec_logical_port_set_up(nb, &up, 1);
1145 } else if (!sb->chassis && (!nb->up || *nb->up)) {
1146 bool up = false;
1147 nbrec_logical_port_set_up(nb, &up, 1);
1148 }
1149 }
1150
1151 HMAP_FOR_EACH_SAFE(hash_node, hash_node_next, node, &lports_hmap) {
1152 hmap_remove(&lports_hmap, &hash_node->node);
1153 free(hash_node);
1154 }
1155 hmap_destroy(&lports_hmap);
1156 }
1157 \f
1158
1159 static char *default_db_;
1160
1161 static const char *
1162 default_db(void)
1163 {
1164 if (!default_db_) {
1165 default_db_ = xasprintf("unix:%s/db.sock", ovs_rundir());
1166 }
1167 return default_db_;
1168 }
1169
1170 static void
1171 parse_options(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
1172 {
1173 enum {
1174 DAEMON_OPTION_ENUMS,
1175 VLOG_OPTION_ENUMS,
1176 };
1177 static const struct option long_options[] = {
1178 {"ovnsb-db", required_argument, NULL, 'd'},
1179 {"ovnnb-db", required_argument, NULL, 'D'},
1180 {"help", no_argument, NULL, 'h'},
1181 {"options", no_argument, NULL, 'o'},
1182 {"version", no_argument, NULL, 'V'},
1183 DAEMON_LONG_OPTIONS,
1184 VLOG_LONG_OPTIONS,
1185 STREAM_SSL_LONG_OPTIONS,
1186 {NULL, 0, NULL, 0},
1187 };
1188 char *short_options = ovs_cmdl_long_options_to_short_options(long_options);
1189
1190 for (;;) {
1191 int c;
1192
1193 c = getopt_long(argc, argv, short_options, long_options, NULL);
1194 if (c == -1) {
1195 break;
1196 }
1197
1198 switch (c) {
1199 DAEMON_OPTION_HANDLERS;
1200 VLOG_OPTION_HANDLERS;
1201 STREAM_SSL_OPTION_HANDLERS;
1202
1203 case 'd':
1204 ovnsb_db = optarg;
1205 break;
1206
1207 case 'D':
1208 ovnnb_db = optarg;
1209 break;
1210
1211 case 'h':
1212 usage();
1213 exit(EXIT_SUCCESS);
1214
1215 case 'o':
1216 ovs_cmdl_print_options(long_options);
1217 exit(EXIT_SUCCESS);
1218
1219 case 'V':
1220 ovs_print_version(0, 0);
1221 exit(EXIT_SUCCESS);
1222
1223 default:
1224 break;
1225 }
1226 }
1227
1228 if (!ovnsb_db) {
1229 ovnsb_db = default_db();
1230 }
1231
1232 if (!ovnnb_db) {
1233 ovnnb_db = default_db();
1234 }
1235
1236 free(short_options);
1237 }
1238
1239 static void
1240 add_column_noalert(struct ovsdb_idl *idl,
1241 const struct ovsdb_idl_column *column)
1242 {
1243 ovsdb_idl_add_column(idl, column);
1244 ovsdb_idl_omit_alert(idl, column);
1245 }
1246
1247 int
1248 main(int argc, char *argv[])
1249 {
1250 extern struct vlog_module VLM_reconnect;
1251 struct ovsdb_idl *ovnnb_idl, *ovnsb_idl;
1252 unsigned int ovnnb_seqno, ovn_seqno;
1253 int res = EXIT_SUCCESS;
1254 struct northd_context ctx = {
1255 .ovnsb_txn = NULL,
1256 };
1257 bool ovnnb_changes_pending = false;
1258 bool ovn_changes_pending = false;
1259 struct unixctl_server *unixctl;
1260 int retval;
1261 bool exiting;
1262
1263 fatal_ignore_sigpipe();
1264 set_program_name(argv[0]);
1265 service_start(&argc, &argv);
1266 vlog_set_levels(NULL, VLF_CONSOLE, VLL_WARN);
1267 vlog_set_levels(&VLM_reconnect, VLF_ANY_DESTINATION, VLL_WARN);
1268 parse_options(argc, argv);
1269
1270 daemonize_start(false);
1271
1272 retval = unixctl_server_create(NULL, &unixctl);
1273 if (retval) {
1274 exit(EXIT_FAILURE);
1275 }
1276 unixctl_command_register("exit", "", 0, 0, ovn_northd_exit, &exiting);
1277
1278 daemonize_complete();
1279
1280 nbrec_init();
1281 sbrec_init();
1282
1283 /* We want to detect all changes to the ovn-nb db. */
1284 ctx.ovnnb_idl = ovnnb_idl = ovsdb_idl_create(ovnnb_db,
1285 &nbrec_idl_class, true, true);
1286
1287 ctx.ovnsb_idl = ovnsb_idl = ovsdb_idl_create(ovnsb_db,
1288 &sbrec_idl_class, false, true);
1289
1290 ovsdb_idl_add_table(ovnsb_idl, &sbrec_table_logical_flow);
1291 add_column_noalert(ovnsb_idl, &sbrec_logical_flow_col_logical_datapath);
1292 add_column_noalert(ovnsb_idl, &sbrec_logical_flow_col_pipeline);
1293 add_column_noalert(ovnsb_idl, &sbrec_logical_flow_col_table_id);
1294 add_column_noalert(ovnsb_idl, &sbrec_logical_flow_col_priority);
1295 add_column_noalert(ovnsb_idl, &sbrec_logical_flow_col_match);
1296 add_column_noalert(ovnsb_idl, &sbrec_logical_flow_col_actions);
1297
1298 ovsdb_idl_add_table(ovnsb_idl, &sbrec_table_multicast_group);
1299 add_column_noalert(ovnsb_idl, &sbrec_multicast_group_col_datapath);
1300 add_column_noalert(ovnsb_idl, &sbrec_multicast_group_col_tunnel_key);
1301 add_column_noalert(ovnsb_idl, &sbrec_multicast_group_col_name);
1302 add_column_noalert(ovnsb_idl, &sbrec_multicast_group_col_ports);
1303
1304 ovsdb_idl_add_table(ovnsb_idl, &sbrec_table_datapath_binding);
1305 add_column_noalert(ovnsb_idl, &sbrec_datapath_binding_col_tunnel_key);
1306 add_column_noalert(ovnsb_idl, &sbrec_datapath_binding_col_external_ids);
1307
1308 ovsdb_idl_add_table(ovnsb_idl, &sbrec_table_port_binding);
1309 add_column_noalert(ovnsb_idl, &sbrec_port_binding_col_datapath);
1310 add_column_noalert(ovnsb_idl, &sbrec_port_binding_col_logical_port);
1311 add_column_noalert(ovnsb_idl, &sbrec_port_binding_col_tunnel_key);
1312 add_column_noalert(ovnsb_idl, &sbrec_port_binding_col_parent_port);
1313 add_column_noalert(ovnsb_idl, &sbrec_port_binding_col_tag);
1314 add_column_noalert(ovnsb_idl, &sbrec_port_binding_col_type);
1315 add_column_noalert(ovnsb_idl, &sbrec_port_binding_col_options);
1316 add_column_noalert(ovnsb_idl, &sbrec_port_binding_col_mac);
1317 ovsdb_idl_add_column(ovnsb_idl, &sbrec_port_binding_col_chassis);
1318
1319 /*
1320 * The loop here just runs the IDL in a loop waiting for the seqno to
1321 * change, which indicates that the contents of the db have changed.
1322 *
1323 * If the contents of the ovn-nb db change, the mappings to the ovn-sb
1324 * db must be recalculated.
1325 *
1326 * If the contents of the ovn-sb db change, it means the 'up' state of
1327 * a port may have changed, as that's the only type of change ovn-northd is
1328 * watching for.
1329 */
1330
1331 ovnnb_seqno = ovsdb_idl_get_seqno(ovnnb_idl);
1332 ovn_seqno = ovsdb_idl_get_seqno(ovnsb_idl);
1333 exiting = false;
1334 while (!exiting) {
1335 ovsdb_idl_run(ovnnb_idl);
1336 ovsdb_idl_run(ovnsb_idl);
1337 unixctl_server_run(unixctl);
1338
1339 if (!ovsdb_idl_is_alive(ovnnb_idl)) {
1340 int retval = ovsdb_idl_get_last_error(ovnnb_idl);
1341 VLOG_ERR("%s: database connection failed (%s)",
1342 ovnnb_db, ovs_retval_to_string(retval));
1343 res = EXIT_FAILURE;
1344 break;
1345 }
1346
1347 if (!ovsdb_idl_is_alive(ovnsb_idl)) {
1348 int retval = ovsdb_idl_get_last_error(ovnsb_idl);
1349 VLOG_ERR("%s: database connection failed (%s)",
1350 ovnsb_db, ovs_retval_to_string(retval));
1351 res = EXIT_FAILURE;
1352 break;
1353 }
1354
1355 if (ovnnb_seqno != ovsdb_idl_get_seqno(ovnnb_idl)) {
1356 ovnnb_seqno = ovsdb_idl_get_seqno(ovnnb_idl);
1357 ovnnb_changes_pending = true;
1358 }
1359
1360 if (ovn_seqno != ovsdb_idl_get_seqno(ovnsb_idl)) {
1361 ovn_seqno = ovsdb_idl_get_seqno(ovnsb_idl);
1362 ovn_changes_pending = true;
1363 }
1364
1365 /*
1366 * If there are any pending changes, we delay recalculating the
1367 * necessary updates until after an existing transaction finishes.
1368 * This avoids the possibility of rapid updates causing ovn-northd to
1369 * never be able to successfully make the corresponding updates to the
1370 * other db. Instead, pending changes are batched up until the next
1371 * time we get a chance to calculate the new state and apply it.
1372 */
1373
1374 if (ovnnb_changes_pending && !ctx.ovnsb_txn) {
1375 /*
1376 * The OVN-nb db contents have changed, so create a transaction for
1377 * updating the OVN-sb DB.
1378 */
1379 ctx.ovnsb_txn = ovsdb_idl_txn_create(ctx.ovnsb_idl);
1380 ovsdb_idl_txn_add_comment(ctx.ovnsb_txn,
1381 "ovn-northd: northbound db changed");
1382 ovnnb_db_changed(&ctx);
1383 ovnnb_changes_pending = false;
1384 }
1385
1386 if (ovn_changes_pending && !ctx.ovnnb_txn) {
1387 /*
1388 * The OVN-sb db contents have changed, so create a transaction for
1389 * updating the northbound DB.
1390 */
1391 ctx.ovnnb_txn = ovsdb_idl_txn_create(ctx.ovnnb_idl);
1392 ovsdb_idl_txn_add_comment(ctx.ovnnb_txn,
1393 "ovn-northd: southbound db changed");
1394 ovnsb_db_changed(&ctx);
1395 ovn_changes_pending = false;
1396 }
1397
1398 if (ctx.ovnnb_txn) {
1399 enum ovsdb_idl_txn_status txn_status;
1400 txn_status = ovsdb_idl_txn_commit(ctx.ovnnb_txn);
1401 switch (txn_status) {
1402 case TXN_UNCOMMITTED:
1403 case TXN_INCOMPLETE:
1404 /* Come back around and try to commit this transaction again */
1405 break;
1406 case TXN_ABORTED:
1407 case TXN_TRY_AGAIN:
1408 case TXN_NOT_LOCKED:
1409 case TXN_ERROR:
1410 /* Something went wrong, so try creating a new transaction. */
1411 ovn_changes_pending = true;
1412 case TXN_UNCHANGED:
1413 case TXN_SUCCESS:
1414 ovsdb_idl_txn_destroy(ctx.ovnnb_txn);
1415 ctx.ovnnb_txn = NULL;
1416 }
1417 }
1418
1419 if (ctx.ovnsb_txn) {
1420 enum ovsdb_idl_txn_status txn_status;
1421 txn_status = ovsdb_idl_txn_commit(ctx.ovnsb_txn);
1422 switch (txn_status) {
1423 case TXN_UNCOMMITTED:
1424 case TXN_INCOMPLETE:
1425 /* Come back around and try to commit this transaction again */
1426 break;
1427 case TXN_ABORTED:
1428 case TXN_TRY_AGAIN:
1429 case TXN_NOT_LOCKED:
1430 case TXN_ERROR:
1431 /* Something went wrong, so try creating a new transaction. */
1432 ovnnb_changes_pending = true;
1433 case TXN_UNCHANGED:
1434 case TXN_SUCCESS:
1435 ovsdb_idl_txn_destroy(ctx.ovnsb_txn);
1436 ctx.ovnsb_txn = NULL;
1437 }
1438 }
1439
1440 if (ovnnb_seqno == ovsdb_idl_get_seqno(ovnnb_idl) &&
1441 ovn_seqno == ovsdb_idl_get_seqno(ovnsb_idl)) {
1442 ovsdb_idl_wait(ovnnb_idl);
1443 ovsdb_idl_wait(ovnsb_idl);
1444 if (ctx.ovnnb_txn) {
1445 ovsdb_idl_txn_wait(ctx.ovnnb_txn);
1446 }
1447 if (ctx.ovnsb_txn) {
1448 ovsdb_idl_txn_wait(ctx.ovnsb_txn);
1449 }
1450 unixctl_server_wait(unixctl);
1451 if (exiting) {
1452 poll_immediate_wake();
1453 }
1454 poll_block();
1455 }
1456 if (should_service_stop()) {
1457 exiting = true;
1458 }
1459 }
1460
1461 unixctl_server_destroy(unixctl);
1462 ovsdb_idl_destroy(ovnsb_idl);
1463 ovsdb_idl_destroy(ovnnb_idl);
1464 service_stop();
1465
1466 free(default_db_);
1467
1468 exit(res);
1469 }
1470
1471 static void
1472 ovn_northd_exit(struct unixctl_conn *conn, int argc OVS_UNUSED,
1473 const char *argv[] OVS_UNUSED, void *exiting_)
1474 {
1475 bool *exiting = exiting_;
1476 *exiting = true;
1477
1478 unixctl_command_reply(conn, NULL);
1479 }