]> git.proxmox.com Git - mirror_ovs.git/blob - ovn/northd/ovn-northd.c
ovn: Add column enabled to table Logical_Router
[mirror_ovs.git] / ovn / northd / ovn-northd.c
1 /*
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at:
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 #include <config.h>
16
17 #include <getopt.h>
18 #include <stdlib.h>
19 #include <stdio.h>
20
21 #include "command-line.h"
22 #include "daemon.h"
23 #include "dirs.h"
24 #include "openvswitch/dynamic-string.h"
25 #include "fatal-signal.h"
26 #include "hash.h"
27 #include "hmap.h"
28 #include "json.h"
29 #include "ovn/lib/lex.h"
30 #include "ovn/lib/ovn-nb-idl.h"
31 #include "ovn/lib/ovn-sb-idl.h"
32 #include "packets.h"
33 #include "poll-loop.h"
34 #include "smap.h"
35 #include "stream.h"
36 #include "stream-ssl.h"
37 #include "unixctl.h"
38 #include "util.h"
39 #include "uuid.h"
40 #include "openvswitch/vlog.h"
41
42 VLOG_DEFINE_THIS_MODULE(ovn_northd);
43
44 static unixctl_cb_func ovn_northd_exit;
45
46 struct northd_context {
47 struct ovsdb_idl *ovnnb_idl;
48 struct ovsdb_idl *ovnsb_idl;
49 struct ovsdb_idl_txn *ovnnb_txn;
50 struct ovsdb_idl_txn *ovnsb_txn;
51 };
52
53 static const char *ovnnb_db;
54 static const char *ovnsb_db;
55
56 static const char *default_nb_db(void);
57 static const char *default_sb_db(void);
58 \f
59 /* Pipeline stages. */
60
61 /* The two pipelines in an OVN logical flow table. */
62 enum ovn_pipeline {
63 P_IN, /* Ingress pipeline. */
64 P_OUT /* Egress pipeline. */
65 };
66
67 /* The two purposes for which ovn-northd uses OVN logical datapaths. */
68 enum ovn_datapath_type {
69 DP_SWITCH, /* OVN logical switch. */
70 DP_ROUTER /* OVN logical router. */
71 };
72
73 /* Returns an "enum ovn_stage" built from the arguments.
74 *
75 * (It's better to use ovn_stage_build() for type-safety reasons, but inline
76 * functions can't be used in enums or switch cases.) */
77 #define OVN_STAGE_BUILD(DP_TYPE, PIPELINE, TABLE) \
78 (((DP_TYPE) << 9) | ((PIPELINE) << 8) | (TABLE))
79
80 /* A stage within an OVN logical switch or router.
81 *
82 * An "enum ovn_stage" indicates whether the stage is part of a logical switch
83 * or router, whether the stage is part of the ingress or egress pipeline, and
84 * the table within that pipeline. The first three components are combined to
85 * form the stage's full name, e.g. S_SWITCH_IN_PORT_SEC_L2,
86 * S_ROUTER_OUT_DELIVERY. */
87 enum ovn_stage {
88 #define PIPELINE_STAGES \
89 /* Logical switch ingress stages. */ \
90 PIPELINE_STAGE(SWITCH, IN, PORT_SEC_L2, 0, "ls_in_port_sec_l2") \
91 PIPELINE_STAGE(SWITCH, IN, PORT_SEC_IP, 1, "ls_in_port_sec_ip") \
92 PIPELINE_STAGE(SWITCH, IN, PORT_SEC_ND, 2, "ls_in_port_sec_nd") \
93 PIPELINE_STAGE(SWITCH, IN, PRE_ACL, 3, "ls_in_pre_acl") \
94 PIPELINE_STAGE(SWITCH, IN, ACL, 4, "ls_in_acl") \
95 PIPELINE_STAGE(SWITCH, IN, ARP_RSP, 5, "ls_in_arp_rsp") \
96 PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 6, "ls_in_l2_lkup") \
97 \
98 /* Logical switch egress stages. */ \
99 PIPELINE_STAGE(SWITCH, OUT, PRE_ACL, 0, "ls_out_pre_acl") \
100 PIPELINE_STAGE(SWITCH, OUT, ACL, 1, "ls_out_acl") \
101 PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_IP, 2, "ls_out_port_sec_ip") \
102 PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_L2, 3, "ls_out_port_sec_l2") \
103 \
104 /* Logical router ingress stages. */ \
105 PIPELINE_STAGE(ROUTER, IN, ADMISSION, 0, "lr_in_admission") \
106 PIPELINE_STAGE(ROUTER, IN, IP_INPUT, 1, "lr_in_ip_input") \
107 PIPELINE_STAGE(ROUTER, IN, IP_ROUTING, 2, "lr_in_ip_routing") \
108 PIPELINE_STAGE(ROUTER, IN, ARP_RESOLVE, 3, "lr_in_arp_resolve") \
109 PIPELINE_STAGE(ROUTER, IN, ARP_REQUEST, 4, "lr_in_arp_request") \
110 \
111 /* Logical router egress stages. */ \
112 PIPELINE_STAGE(ROUTER, OUT, DELIVERY, 0, "lr_out_delivery")
113
114 #define PIPELINE_STAGE(DP_TYPE, PIPELINE, STAGE, TABLE, NAME) \
115 S_##DP_TYPE##_##PIPELINE##_##STAGE \
116 = OVN_STAGE_BUILD(DP_##DP_TYPE, P_##PIPELINE, TABLE),
117 PIPELINE_STAGES
118 #undef PIPELINE_STAGE
119 };
120
121 /* Due to various hard-coded priorities need to implement ACLs, the
122 * northbound database supports a smaller range of ACL priorities than
123 * are available to logical flows. This value is added to an ACL
124 * priority to determine the ACL's logical flow priority. */
125 #define OVN_ACL_PRI_OFFSET 1000
126
127 /* Returns an "enum ovn_stage" built from the arguments. */
128 static enum ovn_stage
129 ovn_stage_build(enum ovn_datapath_type dp_type, enum ovn_pipeline pipeline,
130 uint8_t table)
131 {
132 return OVN_STAGE_BUILD(dp_type, pipeline, table);
133 }
134
135 /* Returns the pipeline to which 'stage' belongs. */
136 static enum ovn_pipeline
137 ovn_stage_get_pipeline(enum ovn_stage stage)
138 {
139 return (stage >> 8) & 1;
140 }
141
142 /* Returns the table to which 'stage' belongs. */
143 static uint8_t
144 ovn_stage_get_table(enum ovn_stage stage)
145 {
146 return stage & 0xff;
147 }
148
149 /* Returns a string name for 'stage'. */
150 static const char *
151 ovn_stage_to_str(enum ovn_stage stage)
152 {
153 switch (stage) {
154 #define PIPELINE_STAGE(DP_TYPE, PIPELINE, STAGE, TABLE, NAME) \
155 case S_##DP_TYPE##_##PIPELINE##_##STAGE: return NAME;
156 PIPELINE_STAGES
157 #undef PIPELINE_STAGE
158 default: return "<unknown>";
159 }
160 }
161 \f
162 static void
163 usage(void)
164 {
165 printf("\
166 %s: OVN northbound management daemon\n\
167 usage: %s [OPTIONS]\n\
168 \n\
169 Options:\n\
170 --ovnnb-db=DATABASE connect to ovn-nb database at DATABASE\n\
171 (default: %s)\n\
172 --ovnsb-db=DATABASE connect to ovn-sb database at DATABASE\n\
173 (default: %s)\n\
174 -h, --help display this help message\n\
175 -o, --options list available options\n\
176 -V, --version display version information\n\
177 ", program_name, program_name, default_nb_db(), default_sb_db());
178 daemon_usage();
179 vlog_usage();
180 stream_usage("database", true, true, false);
181 }
182 \f
183 struct tnlid_node {
184 struct hmap_node hmap_node;
185 uint32_t tnlid;
186 };
187
188 static void
189 destroy_tnlids(struct hmap *tnlids)
190 {
191 struct tnlid_node *node, *next;
192 HMAP_FOR_EACH_SAFE (node, next, hmap_node, tnlids) {
193 hmap_remove(tnlids, &node->hmap_node);
194 free(node);
195 }
196 hmap_destroy(tnlids);
197 }
198
199 static void
200 add_tnlid(struct hmap *set, uint32_t tnlid)
201 {
202 struct tnlid_node *node = xmalloc(sizeof *node);
203 hmap_insert(set, &node->hmap_node, hash_int(tnlid, 0));
204 node->tnlid = tnlid;
205 }
206
207 static bool
208 tnlid_in_use(const struct hmap *set, uint32_t tnlid)
209 {
210 const struct tnlid_node *node;
211 HMAP_FOR_EACH_IN_BUCKET (node, hmap_node, hash_int(tnlid, 0), set) {
212 if (node->tnlid == tnlid) {
213 return true;
214 }
215 }
216 return false;
217 }
218
219 static uint32_t
220 allocate_tnlid(struct hmap *set, const char *name, uint32_t max,
221 uint32_t *hint)
222 {
223 for (uint32_t tnlid = *hint + 1; tnlid != *hint;
224 tnlid = tnlid + 1 <= max ? tnlid + 1 : 1) {
225 if (!tnlid_in_use(set, tnlid)) {
226 add_tnlid(set, tnlid);
227 *hint = tnlid;
228 return tnlid;
229 }
230 }
231
232 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
233 VLOG_WARN_RL(&rl, "all %s tunnel ids exhausted", name);
234 return 0;
235 }
236 \f
237 /* The 'key' comes from nbs->header_.uuid or nbr->header_.uuid or
238 * sb->external_ids:logical-switch. */
239 struct ovn_datapath {
240 struct hmap_node key_node; /* Index on 'key'. */
241 struct uuid key; /* (nbs/nbr)->header_.uuid. */
242
243 const struct nbrec_logical_switch *nbs; /* May be NULL. */
244 const struct nbrec_logical_router *nbr; /* May be NULL. */
245 const struct sbrec_datapath_binding *sb; /* May be NULL. */
246
247 struct ovs_list list; /* In list of similar records. */
248
249 /* Logical router data (digested from nbr). */
250 const struct ovn_port *gateway_port;
251 ovs_be32 gateway;
252
253 /* Logical switch data. */
254 struct ovn_port **router_ports;
255 size_t n_router_ports;
256
257 struct hmap port_tnlids;
258 uint32_t port_key_hint;
259
260 bool has_unknown;
261 };
262
263 static struct ovn_datapath *
264 ovn_datapath_create(struct hmap *datapaths, const struct uuid *key,
265 const struct nbrec_logical_switch *nbs,
266 const struct nbrec_logical_router *nbr,
267 const struct sbrec_datapath_binding *sb)
268 {
269 struct ovn_datapath *od = xzalloc(sizeof *od);
270 od->key = *key;
271 od->sb = sb;
272 od->nbs = nbs;
273 od->nbr = nbr;
274 hmap_init(&od->port_tnlids);
275 od->port_key_hint = 0;
276 hmap_insert(datapaths, &od->key_node, uuid_hash(&od->key));
277 return od;
278 }
279
280 static void
281 ovn_datapath_destroy(struct hmap *datapaths, struct ovn_datapath *od)
282 {
283 if (od) {
284 /* Don't remove od->list. It is used within build_datapaths() as a
285 * private list and once we've exited that function it is not safe to
286 * use it. */
287 hmap_remove(datapaths, &od->key_node);
288 destroy_tnlids(&od->port_tnlids);
289 free(od->router_ports);
290 free(od);
291 }
292 }
293
294 static struct ovn_datapath *
295 ovn_datapath_find(struct hmap *datapaths, const struct uuid *uuid)
296 {
297 struct ovn_datapath *od;
298
299 HMAP_FOR_EACH_WITH_HASH (od, key_node, uuid_hash(uuid), datapaths) {
300 if (uuid_equals(uuid, &od->key)) {
301 return od;
302 }
303 }
304 return NULL;
305 }
306
307 static struct ovn_datapath *
308 ovn_datapath_from_sbrec(struct hmap *datapaths,
309 const struct sbrec_datapath_binding *sb)
310 {
311 struct uuid key;
312
313 if (!smap_get_uuid(&sb->external_ids, "logical-switch", &key) &&
314 !smap_get_uuid(&sb->external_ids, "logical-router", &key)) {
315 return NULL;
316 }
317 return ovn_datapath_find(datapaths, &key);
318 }
319
320 static bool
321 lrouter_is_enabled(const struct nbrec_logical_router *lrouter)
322 {
323 return !lrouter->enabled || *lrouter->enabled;
324 }
325
326 static void
327 join_datapaths(struct northd_context *ctx, struct hmap *datapaths,
328 struct ovs_list *sb_only, struct ovs_list *nb_only,
329 struct ovs_list *both)
330 {
331 hmap_init(datapaths);
332 ovs_list_init(sb_only);
333 ovs_list_init(nb_only);
334 ovs_list_init(both);
335
336 const struct sbrec_datapath_binding *sb, *sb_next;
337 SBREC_DATAPATH_BINDING_FOR_EACH_SAFE (sb, sb_next, ctx->ovnsb_idl) {
338 struct uuid key;
339 if (!smap_get_uuid(&sb->external_ids, "logical-switch", &key) &&
340 !smap_get_uuid(&sb->external_ids, "logical-router", &key)) {
341 ovsdb_idl_txn_add_comment(
342 ctx->ovnsb_txn,
343 "deleting Datapath_Binding "UUID_FMT" that lacks "
344 "external-ids:logical-switch and "
345 "external-ids:logical-router",
346 UUID_ARGS(&sb->header_.uuid));
347 sbrec_datapath_binding_delete(sb);
348 continue;
349 }
350
351 if (ovn_datapath_find(datapaths, &key)) {
352 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
353 VLOG_INFO_RL(
354 &rl, "deleting Datapath_Binding "UUID_FMT" with "
355 "duplicate external-ids:logical-switch/router "UUID_FMT,
356 UUID_ARGS(&sb->header_.uuid), UUID_ARGS(&key));
357 sbrec_datapath_binding_delete(sb);
358 continue;
359 }
360
361 struct ovn_datapath *od = ovn_datapath_create(datapaths, &key,
362 NULL, NULL, sb);
363 ovs_list_push_back(sb_only, &od->list);
364 }
365
366 const struct nbrec_logical_switch *nbs;
367 NBREC_LOGICAL_SWITCH_FOR_EACH (nbs, ctx->ovnnb_idl) {
368 struct ovn_datapath *od = ovn_datapath_find(datapaths,
369 &nbs->header_.uuid);
370 if (od) {
371 od->nbs = nbs;
372 ovs_list_remove(&od->list);
373 ovs_list_push_back(both, &od->list);
374 } else {
375 od = ovn_datapath_create(datapaths, &nbs->header_.uuid,
376 nbs, NULL, NULL);
377 ovs_list_push_back(nb_only, &od->list);
378 }
379 }
380
381 const struct nbrec_logical_router *nbr;
382 NBREC_LOGICAL_ROUTER_FOR_EACH (nbr, ctx->ovnnb_idl) {
383 if (!lrouter_is_enabled(nbr)) {
384 continue;
385 }
386
387 struct ovn_datapath *od = ovn_datapath_find(datapaths,
388 &nbr->header_.uuid);
389 if (od) {
390 if (!od->nbs) {
391 od->nbr = nbr;
392 ovs_list_remove(&od->list);
393 ovs_list_push_back(both, &od->list);
394 } else {
395 /* Can't happen! */
396 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
397 VLOG_WARN_RL(&rl,
398 "duplicate UUID "UUID_FMT" in OVN_Northbound",
399 UUID_ARGS(&nbr->header_.uuid));
400 continue;
401 }
402 } else {
403 od = ovn_datapath_create(datapaths, &nbr->header_.uuid,
404 NULL, nbr, NULL);
405 ovs_list_push_back(nb_only, &od->list);
406 }
407
408 od->gateway = 0;
409 if (nbr->default_gw) {
410 ovs_be32 ip;
411 if (!ip_parse(nbr->default_gw, &ip) || !ip) {
412 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
413 VLOG_WARN_RL(&rl, "bad 'gateway' %s", nbr->default_gw);
414 } else {
415 od->gateway = ip;
416 }
417 }
418
419 /* Set the gateway port to NULL. If there is a gateway, it will get
420 * filled in as we go through the ports later. */
421 od->gateway_port = NULL;
422 }
423 }
424
425 static uint32_t
426 ovn_datapath_allocate_key(struct hmap *dp_tnlids)
427 {
428 static uint32_t hint;
429 return allocate_tnlid(dp_tnlids, "datapath", (1u << 24) - 1, &hint);
430 }
431
432 /* Updates the southbound Datapath_Binding table so that it contains the
433 * logical switches and routers specified by the northbound database.
434 *
435 * Initializes 'datapaths' to contain a "struct ovn_datapath" for every logical
436 * switch and router. */
437 static void
438 build_datapaths(struct northd_context *ctx, struct hmap *datapaths)
439 {
440 struct ovs_list sb_only, nb_only, both;
441
442 join_datapaths(ctx, datapaths, &sb_only, &nb_only, &both);
443
444 if (!ovs_list_is_empty(&nb_only)) {
445 /* First index the in-use datapath tunnel IDs. */
446 struct hmap dp_tnlids = HMAP_INITIALIZER(&dp_tnlids);
447 struct ovn_datapath *od;
448 LIST_FOR_EACH (od, list, &both) {
449 add_tnlid(&dp_tnlids, od->sb->tunnel_key);
450 }
451
452 /* Add southbound record for each unmatched northbound record. */
453 LIST_FOR_EACH (od, list, &nb_only) {
454 uint16_t tunnel_key = ovn_datapath_allocate_key(&dp_tnlids);
455 if (!tunnel_key) {
456 break;
457 }
458
459 od->sb = sbrec_datapath_binding_insert(ctx->ovnsb_txn);
460
461 char uuid_s[UUID_LEN + 1];
462 sprintf(uuid_s, UUID_FMT, UUID_ARGS(&od->key));
463 const char *key = od->nbs ? "logical-switch" : "logical-router";
464 const struct smap id = SMAP_CONST1(&id, key, uuid_s);
465 sbrec_datapath_binding_set_external_ids(od->sb, &id);
466
467 sbrec_datapath_binding_set_tunnel_key(od->sb, tunnel_key);
468 }
469 destroy_tnlids(&dp_tnlids);
470 }
471
472 /* Delete southbound records without northbound matches. */
473 struct ovn_datapath *od, *next;
474 LIST_FOR_EACH_SAFE (od, next, list, &sb_only) {
475 ovs_list_remove(&od->list);
476 sbrec_datapath_binding_delete(od->sb);
477 ovn_datapath_destroy(datapaths, od);
478 }
479 }
480 \f
481 struct ovn_port {
482 struct hmap_node key_node; /* Index on 'key'. */
483 char *key; /* nbs->name, nbr->name, sb->logical_port. */
484 char *json_key; /* 'key', quoted for use in JSON. */
485
486 const struct nbrec_logical_port *nbs; /* May be NULL. */
487 const struct nbrec_logical_router_port *nbr; /* May be NULL. */
488 const struct sbrec_port_binding *sb; /* May be NULL. */
489
490 /* Logical router port data. */
491 ovs_be32 ip, mask; /* 192.168.10.123/24. */
492 ovs_be32 network; /* 192.168.10.0. */
493 ovs_be32 bcast; /* 192.168.10.255. */
494 struct eth_addr mac;
495 struct ovn_port *peer;
496
497 struct ovn_datapath *od;
498
499 struct ovs_list list; /* In list of similar records. */
500 };
501
502 static struct ovn_port *
503 ovn_port_create(struct hmap *ports, const char *key,
504 const struct nbrec_logical_port *nbs,
505 const struct nbrec_logical_router_port *nbr,
506 const struct sbrec_port_binding *sb)
507 {
508 struct ovn_port *op = xzalloc(sizeof *op);
509
510 struct ds json_key = DS_EMPTY_INITIALIZER;
511 json_string_escape(key, &json_key);
512 op->json_key = ds_steal_cstr(&json_key);
513
514 op->key = xstrdup(key);
515 op->sb = sb;
516 op->nbs = nbs;
517 op->nbr = nbr;
518 hmap_insert(ports, &op->key_node, hash_string(op->key, 0));
519 return op;
520 }
521
522 static void
523 ovn_port_destroy(struct hmap *ports, struct ovn_port *port)
524 {
525 if (port) {
526 /* Don't remove port->list. It is used within build_ports() as a
527 * private list and once we've exited that function it is not safe to
528 * use it. */
529 hmap_remove(ports, &port->key_node);
530 free(port->json_key);
531 free(port->key);
532 free(port);
533 }
534 }
535
536 static struct ovn_port *
537 ovn_port_find(struct hmap *ports, const char *name)
538 {
539 struct ovn_port *op;
540
541 HMAP_FOR_EACH_WITH_HASH (op, key_node, hash_string(name, 0), ports) {
542 if (!strcmp(op->key, name)) {
543 return op;
544 }
545 }
546 return NULL;
547 }
548
549 static uint32_t
550 ovn_port_allocate_key(struct ovn_datapath *od)
551 {
552 return allocate_tnlid(&od->port_tnlids, "port",
553 (1u << 15) - 1, &od->port_key_hint);
554 }
555
556 static void
557 join_logical_ports(struct northd_context *ctx,
558 struct hmap *datapaths, struct hmap *ports,
559 struct ovs_list *sb_only, struct ovs_list *nb_only,
560 struct ovs_list *both)
561 {
562 hmap_init(ports);
563 ovs_list_init(sb_only);
564 ovs_list_init(nb_only);
565 ovs_list_init(both);
566
567 const struct sbrec_port_binding *sb;
568 SBREC_PORT_BINDING_FOR_EACH (sb, ctx->ovnsb_idl) {
569 struct ovn_port *op = ovn_port_create(ports, sb->logical_port,
570 NULL, NULL, sb);
571 ovs_list_push_back(sb_only, &op->list);
572 }
573
574 struct ovn_datapath *od;
575 HMAP_FOR_EACH (od, key_node, datapaths) {
576 if (od->nbs) {
577 for (size_t i = 0; i < od->nbs->n_ports; i++) {
578 const struct nbrec_logical_port *nbs = od->nbs->ports[i];
579 struct ovn_port *op = ovn_port_find(ports, nbs->name);
580 if (op) {
581 if (op->nbs || op->nbr) {
582 static struct vlog_rate_limit rl
583 = VLOG_RATE_LIMIT_INIT(5, 1);
584 VLOG_WARN_RL(&rl, "duplicate logical port %s",
585 nbs->name);
586 continue;
587 }
588 op->nbs = nbs;
589 ovs_list_remove(&op->list);
590 ovs_list_push_back(both, &op->list);
591 } else {
592 op = ovn_port_create(ports, nbs->name, nbs, NULL, NULL);
593 ovs_list_push_back(nb_only, &op->list);
594 }
595
596 op->od = od;
597 }
598 } else {
599 for (size_t i = 0; i < od->nbr->n_ports; i++) {
600 const struct nbrec_logical_router_port *nbr
601 = od->nbr->ports[i];
602
603 struct eth_addr mac;
604 if (!eth_addr_from_string(nbr->mac, &mac)) {
605 static struct vlog_rate_limit rl
606 = VLOG_RATE_LIMIT_INIT(5, 1);
607 VLOG_WARN_RL(&rl, "bad 'mac' %s", nbr->mac);
608 continue;
609 }
610
611 ovs_be32 ip, mask;
612 char *error = ip_parse_masked(nbr->network, &ip, &mask);
613 if (error || mask == OVS_BE32_MAX || !ip_is_cidr(mask)) {
614 static struct vlog_rate_limit rl
615 = VLOG_RATE_LIMIT_INIT(5, 1);
616 VLOG_WARN_RL(&rl, "bad 'network' %s", nbr->network);
617 free(error);
618 continue;
619 }
620
621 struct ovn_port *op = ovn_port_find(ports, nbr->name);
622 if (op) {
623 if (op->nbs || op->nbr) {
624 static struct vlog_rate_limit rl
625 = VLOG_RATE_LIMIT_INIT(5, 1);
626 VLOG_WARN_RL(&rl, "duplicate logical router port %s",
627 nbr->name);
628 continue;
629 }
630 op->nbr = nbr;
631 ovs_list_remove(&op->list);
632 ovs_list_push_back(both, &op->list);
633 } else {
634 op = ovn_port_create(ports, nbr->name, NULL, nbr, NULL);
635 ovs_list_push_back(nb_only, &op->list);
636 }
637
638 op->ip = ip;
639 op->mask = mask;
640 op->network = ip & mask;
641 op->bcast = ip | ~mask;
642 op->mac = mac;
643
644 op->od = od;
645
646 /* If 'od' has a gateway and 'op' routes to it... */
647 if (od->gateway && !((op->network ^ od->gateway) & op->mask)) {
648 /* ...and if 'op' is a longer match than the current
649 * choice... */
650 const struct ovn_port *gw = od->gateway_port;
651 int len = gw ? ip_count_cidr_bits(gw->mask) : 0;
652 if (ip_count_cidr_bits(op->mask) > len) {
653 /* ...then it's the default gateway port. */
654 od->gateway_port = op;
655 }
656 }
657 }
658 }
659 }
660
661 /* Connect logical router ports, and logical switch ports of type "router",
662 * to their peers. */
663 struct ovn_port *op;
664 HMAP_FOR_EACH (op, key_node, ports) {
665 if (op->nbs && !strcmp(op->nbs->type, "router")) {
666 const char *peer_name = smap_get(&op->nbs->options, "router-port");
667 if (!peer_name) {
668 continue;
669 }
670
671 struct ovn_port *peer = ovn_port_find(ports, peer_name);
672 if (!peer || !peer->nbr) {
673 continue;
674 }
675
676 peer->peer = op;
677 op->peer = peer;
678 op->od->router_ports = xrealloc(
679 op->od->router_ports,
680 sizeof *op->od->router_ports * (op->od->n_router_ports + 1));
681 op->od->router_ports[op->od->n_router_ports++] = op;
682 } else if (op->nbr && op->nbr->peer) {
683 op->peer = ovn_port_find(ports, op->nbr->peer);
684 }
685 }
686 }
687
688 static void
689 ovn_port_update_sbrec(const struct ovn_port *op)
690 {
691 sbrec_port_binding_set_datapath(op->sb, op->od->sb);
692 if (op->nbr) {
693 sbrec_port_binding_set_type(op->sb, "patch");
694
695 const char *peer = op->peer ? op->peer->key : "<error>";
696 const struct smap ids = SMAP_CONST1(&ids, "peer", peer);
697 sbrec_port_binding_set_options(op->sb, &ids);
698
699 sbrec_port_binding_set_parent_port(op->sb, NULL);
700 sbrec_port_binding_set_tag(op->sb, NULL, 0);
701 sbrec_port_binding_set_mac(op->sb, NULL, 0);
702 } else {
703 if (strcmp(op->nbs->type, "router")) {
704 sbrec_port_binding_set_type(op->sb, op->nbs->type);
705 sbrec_port_binding_set_options(op->sb, &op->nbs->options);
706 } else {
707 sbrec_port_binding_set_type(op->sb, "patch");
708
709 const char *router_port = smap_get(&op->nbs->options,
710 "router-port");
711 if (!router_port) {
712 router_port = "<error>";
713 }
714 const struct smap ids = SMAP_CONST1(&ids, "peer", router_port);
715 sbrec_port_binding_set_options(op->sb, &ids);
716 }
717 sbrec_port_binding_set_parent_port(op->sb, op->nbs->parent_name);
718 sbrec_port_binding_set_tag(op->sb, op->nbs->tag, op->nbs->n_tag);
719 sbrec_port_binding_set_mac(op->sb, (const char **) op->nbs->addresses,
720 op->nbs->n_addresses);
721 }
722 }
723
724 /* Updates the southbound Port_Binding table so that it contains the logical
725 * ports specified by the northbound database.
726 *
727 * Initializes 'ports' to contain a "struct ovn_port" for every logical port,
728 * using the "struct ovn_datapath"s in 'datapaths' to look up logical
729 * datapaths. */
730 static void
731 build_ports(struct northd_context *ctx, struct hmap *datapaths,
732 struct hmap *ports)
733 {
734 struct ovs_list sb_only, nb_only, both;
735
736 join_logical_ports(ctx, datapaths, ports, &sb_only, &nb_only, &both);
737
738 /* For logical ports that are in both databases, update the southbound
739 * record based on northbound data. Also index the in-use tunnel_keys. */
740 struct ovn_port *op, *next;
741 LIST_FOR_EACH_SAFE (op, next, list, &both) {
742 ovn_port_update_sbrec(op);
743
744 add_tnlid(&op->od->port_tnlids, op->sb->tunnel_key);
745 if (op->sb->tunnel_key > op->od->port_key_hint) {
746 op->od->port_key_hint = op->sb->tunnel_key;
747 }
748 }
749
750 /* Add southbound record for each unmatched northbound record. */
751 LIST_FOR_EACH_SAFE (op, next, list, &nb_only) {
752 uint16_t tunnel_key = ovn_port_allocate_key(op->od);
753 if (!tunnel_key) {
754 continue;
755 }
756
757 op->sb = sbrec_port_binding_insert(ctx->ovnsb_txn);
758 ovn_port_update_sbrec(op);
759
760 sbrec_port_binding_set_logical_port(op->sb, op->key);
761 sbrec_port_binding_set_tunnel_key(op->sb, tunnel_key);
762 }
763
764 /* Delete southbound records without northbound matches. */
765 LIST_FOR_EACH_SAFE(op, next, list, &sb_only) {
766 ovs_list_remove(&op->list);
767 sbrec_port_binding_delete(op->sb);
768 ovn_port_destroy(ports, op);
769 }
770 }
771 \f
772 #define OVN_MIN_MULTICAST 32768
773 #define OVN_MAX_MULTICAST 65535
774
775 struct multicast_group {
776 const char *name;
777 uint16_t key; /* OVN_MIN_MULTICAST...OVN_MAX_MULTICAST. */
778 };
779
780 #define MC_FLOOD "_MC_flood"
781 static const struct multicast_group mc_flood = { MC_FLOOD, 65535 };
782
783 #define MC_UNKNOWN "_MC_unknown"
784 static const struct multicast_group mc_unknown = { MC_UNKNOWN, 65534 };
785
786 static bool
787 multicast_group_equal(const struct multicast_group *a,
788 const struct multicast_group *b)
789 {
790 return !strcmp(a->name, b->name) && a->key == b->key;
791 }
792
793 /* Multicast group entry. */
794 struct ovn_multicast {
795 struct hmap_node hmap_node; /* Index on 'datapath' and 'key'. */
796 struct ovn_datapath *datapath;
797 const struct multicast_group *group;
798
799 struct ovn_port **ports;
800 size_t n_ports, allocated_ports;
801 };
802
803 static uint32_t
804 ovn_multicast_hash(const struct ovn_datapath *datapath,
805 const struct multicast_group *group)
806 {
807 return hash_pointer(datapath, group->key);
808 }
809
810 static struct ovn_multicast *
811 ovn_multicast_find(struct hmap *mcgroups, struct ovn_datapath *datapath,
812 const struct multicast_group *group)
813 {
814 struct ovn_multicast *mc;
815
816 HMAP_FOR_EACH_WITH_HASH (mc, hmap_node,
817 ovn_multicast_hash(datapath, group), mcgroups) {
818 if (mc->datapath == datapath
819 && multicast_group_equal(mc->group, group)) {
820 return mc;
821 }
822 }
823 return NULL;
824 }
825
826 static void
827 ovn_multicast_add(struct hmap *mcgroups, const struct multicast_group *group,
828 struct ovn_port *port)
829 {
830 struct ovn_datapath *od = port->od;
831 struct ovn_multicast *mc = ovn_multicast_find(mcgroups, od, group);
832 if (!mc) {
833 mc = xmalloc(sizeof *mc);
834 hmap_insert(mcgroups, &mc->hmap_node, ovn_multicast_hash(od, group));
835 mc->datapath = od;
836 mc->group = group;
837 mc->n_ports = 0;
838 mc->allocated_ports = 4;
839 mc->ports = xmalloc(mc->allocated_ports * sizeof *mc->ports);
840 }
841 if (mc->n_ports >= mc->allocated_ports) {
842 mc->ports = x2nrealloc(mc->ports, &mc->allocated_ports,
843 sizeof *mc->ports);
844 }
845 mc->ports[mc->n_ports++] = port;
846 }
847
848 static void
849 ovn_multicast_destroy(struct hmap *mcgroups, struct ovn_multicast *mc)
850 {
851 if (mc) {
852 hmap_remove(mcgroups, &mc->hmap_node);
853 free(mc->ports);
854 free(mc);
855 }
856 }
857
858 static void
859 ovn_multicast_update_sbrec(const struct ovn_multicast *mc,
860 const struct sbrec_multicast_group *sb)
861 {
862 struct sbrec_port_binding **ports = xmalloc(mc->n_ports * sizeof *ports);
863 for (size_t i = 0; i < mc->n_ports; i++) {
864 ports[i] = CONST_CAST(struct sbrec_port_binding *, mc->ports[i]->sb);
865 }
866 sbrec_multicast_group_set_ports(sb, ports, mc->n_ports);
867 free(ports);
868 }
869 \f
870 /* Logical flow generation.
871 *
872 * This code generates the Logical_Flow table in the southbound database, as a
873 * function of most of the northbound database.
874 */
875
876 struct ovn_lflow {
877 struct hmap_node hmap_node;
878
879 struct ovn_datapath *od;
880 enum ovn_stage stage;
881 uint16_t priority;
882 char *match;
883 char *actions;
884 };
885
886 static size_t
887 ovn_lflow_hash(const struct ovn_lflow *lflow)
888 {
889 size_t hash = uuid_hash(&lflow->od->key);
890 hash = hash_2words((lflow->stage << 16) | lflow->priority, hash);
891 hash = hash_string(lflow->match, hash);
892 return hash_string(lflow->actions, hash);
893 }
894
895 static bool
896 ovn_lflow_equal(const struct ovn_lflow *a, const struct ovn_lflow *b)
897 {
898 return (a->od == b->od
899 && a->stage == b->stage
900 && a->priority == b->priority
901 && !strcmp(a->match, b->match)
902 && !strcmp(a->actions, b->actions));
903 }
904
905 static void
906 ovn_lflow_init(struct ovn_lflow *lflow, struct ovn_datapath *od,
907 enum ovn_stage stage, uint16_t priority,
908 char *match, char *actions)
909 {
910 lflow->od = od;
911 lflow->stage = stage;
912 lflow->priority = priority;
913 lflow->match = match;
914 lflow->actions = actions;
915 }
916
917 /* Adds a row with the specified contents to the Logical_Flow table. */
918 static void
919 ovn_lflow_add(struct hmap *lflow_map, struct ovn_datapath *od,
920 enum ovn_stage stage, uint16_t priority,
921 const char *match, const char *actions)
922 {
923 struct ovn_lflow *lflow = xmalloc(sizeof *lflow);
924 ovn_lflow_init(lflow, od, stage, priority,
925 xstrdup(match), xstrdup(actions));
926 hmap_insert(lflow_map, &lflow->hmap_node, ovn_lflow_hash(lflow));
927 }
928
929 static struct ovn_lflow *
930 ovn_lflow_find(struct hmap *lflows, struct ovn_datapath *od,
931 enum ovn_stage stage, uint16_t priority,
932 const char *match, const char *actions)
933 {
934 struct ovn_lflow target;
935 ovn_lflow_init(&target, od, stage, priority,
936 CONST_CAST(char *, match), CONST_CAST(char *, actions));
937
938 struct ovn_lflow *lflow;
939 HMAP_FOR_EACH_WITH_HASH (lflow, hmap_node, ovn_lflow_hash(&target),
940 lflows) {
941 if (ovn_lflow_equal(lflow, &target)) {
942 return lflow;
943 }
944 }
945 return NULL;
946 }
947
948 static void
949 ovn_lflow_destroy(struct hmap *lflows, struct ovn_lflow *lflow)
950 {
951 if (lflow) {
952 hmap_remove(lflows, &lflow->hmap_node);
953 free(lflow->match);
954 free(lflow->actions);
955 free(lflow);
956 }
957 }
958
959 struct ipv4_netaddr {
960 ovs_be32 addr;
961 unsigned int plen;
962 };
963
964 struct ipv6_netaddr {
965 struct in6_addr addr;
966 unsigned int plen;
967 };
968
969 struct lport_addresses {
970 struct eth_addr ea;
971 size_t n_ipv4_addrs;
972 struct ipv4_netaddr *ipv4_addrs;
973 size_t n_ipv6_addrs;
974 struct ipv6_netaddr *ipv6_addrs;
975 };
976
977 /*
978 * Extracts the mac, ipv4 and ipv6 addresses from the input param 'address'
979 * which should be of the format 'MAC [IP1 IP2 ..]" where IPn should be
980 * a valid IPv4 or IPv6 address and stores them in the 'ipv4_addrs' and
981 * 'ipv6_addrs' fields of input param 'laddrs'.
982 * The caller has to free the 'ipv4_addrs' and 'ipv6_addrs' fields.
983 * If input param 'store_ipv6' is true only then extracted ipv6 addresses
984 * are stored in 'ipv6_addrs' fields.
985 * Return true if at least 'MAC' is found in 'address', false otherwise.
986 * Eg 1.
987 * If 'address' = '00:00:00:00:00:01 10.0.0.4 fe80::ea2a:eaff:fe28:3390/64
988 * 30.0.0.3/23' and 'store_ipv6' = true
989 * then returns true with laddrs->n_ipv4_addrs = 2, naddrs->n_ipv6_addrs = 1.
990 *
991 * Eg. 2
992 * If 'address' = '00:00:00:00:00:01 10.0.0.4 fe80::ea2a:eaff:fe28:3390/64
993 * 30.0.0.3/23' and 'store_ipv6' = false
994 * then returns true with laddrs->n_ipv4_addrs = 2, naddrs->n_ipv6_addrs = 0.
995 *
996 * Eg 3. If 'address' = '00:00:00:00:00:01 10.0.0.4 addr 30.0.0.4', then
997 * returns true with laddrs->n_ipv4_addrs = 1 and laddrs->n_ipv6_addrs = 0.
998 */
999 static bool
1000 extract_lport_addresses(char *address, struct lport_addresses *laddrs,
1001 bool store_ipv6)
1002 {
1003 char *buf = address;
1004 int buf_index = 0;
1005 char *buf_end = buf + strlen(address);
1006 if (!ovs_scan_len(buf, &buf_index, ETH_ADDR_SCAN_FMT,
1007 ETH_ADDR_SCAN_ARGS(laddrs->ea))) {
1008 return false;
1009 }
1010
1011 ovs_be32 ip4;
1012 struct in6_addr ip6;
1013 unsigned int plen;
1014 char *error;
1015
1016 laddrs->n_ipv4_addrs = 0;
1017 laddrs->n_ipv6_addrs = 0;
1018 laddrs->ipv4_addrs = NULL;
1019 laddrs->ipv6_addrs = NULL;
1020
1021 /* Loop through the buffer and extract the IPv4/IPv6 addresses
1022 * and store in the 'laddrs'. Break the loop if invalid data is found.
1023 */
1024 buf += buf_index;
1025 while (buf < buf_end) {
1026 buf_index = 0;
1027 error = ip_parse_cidr_len(buf, &buf_index, &ip4, &plen);
1028 if (!error) {
1029 laddrs->n_ipv4_addrs++;
1030 laddrs->ipv4_addrs = xrealloc(
1031 laddrs->ipv4_addrs,
1032 sizeof (struct ipv4_netaddr) * laddrs->n_ipv4_addrs);
1033 laddrs->ipv4_addrs[laddrs->n_ipv4_addrs - 1].addr = ip4;
1034 laddrs->ipv4_addrs[laddrs->n_ipv4_addrs - 1].plen = plen;
1035 buf += buf_index;
1036 continue;
1037 }
1038 free(error);
1039 error = ipv6_parse_cidr_len(buf, &buf_index, &ip6, &plen);
1040 if (!error && store_ipv6) {
1041 laddrs->n_ipv6_addrs++;
1042 laddrs->ipv6_addrs = xrealloc(
1043 laddrs->ipv6_addrs,
1044 sizeof(struct ipv6_netaddr) * laddrs->n_ipv6_addrs);
1045 memcpy(&laddrs->ipv6_addrs[laddrs->n_ipv6_addrs - 1].addr, &ip6,
1046 sizeof(struct in6_addr));
1047 laddrs->ipv6_addrs[laddrs->n_ipv6_addrs - 1].plen = plen;
1048 }
1049
1050 if (error) {
1051 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
1052 VLOG_INFO_RL(&rl, "invalid syntax '%s' in address", address);
1053 free(error);
1054 break;
1055 }
1056 buf += buf_index;
1057 }
1058
1059 return true;
1060 }
1061
1062 /* Appends port security constraints on L2 address field 'eth_addr_field'
1063 * (e.g. "eth.src" or "eth.dst") to 'match'. 'port_security', with
1064 * 'n_port_security' elements, is the collection of port_security constraints
1065 * from an OVN_NB Logical_Port row. */
1066 static void
1067 build_port_security_l2(const char *eth_addr_field,
1068 char **port_security, size_t n_port_security,
1069 struct ds *match)
1070 {
1071 size_t base_len = match->length;
1072 ds_put_format(match, " && %s == {", eth_addr_field);
1073
1074 size_t n = 0;
1075 for (size_t i = 0; i < n_port_security; i++) {
1076 struct eth_addr ea;
1077
1078 if (eth_addr_from_string(port_security[i], &ea)) {
1079 ds_put_format(match, ETH_ADDR_FMT, ETH_ADDR_ARGS(ea));
1080 ds_put_char(match, ' ');
1081 n++;
1082 }
1083 }
1084 ds_chomp(match, ' ');
1085 ds_put_cstr(match, "}");
1086
1087 if (!n) {
1088 match->length = base_len;
1089 }
1090 }
1091
1092 static void
1093 build_port_security_ipv6_nd_flow(
1094 struct ds *match, struct eth_addr ea, struct ipv6_netaddr *ipv6_addrs,
1095 int n_ipv6_addrs)
1096 {
1097 ds_put_format(match, " && ip6 && nd && ((nd.sll == "ETH_ADDR_FMT" || "
1098 "nd.sll == "ETH_ADDR_FMT") || ((nd.tll == "ETH_ADDR_FMT" || "
1099 "nd.tll == "ETH_ADDR_FMT")", ETH_ADDR_ARGS(eth_addr_zero),
1100 ETH_ADDR_ARGS(ea), ETH_ADDR_ARGS(eth_addr_zero),
1101 ETH_ADDR_ARGS(ea));
1102 if (!n_ipv6_addrs) {
1103 ds_put_cstr(match, "))");
1104 return;
1105 }
1106
1107 char ip6_str[INET6_ADDRSTRLEN + 1];
1108 struct in6_addr lla;
1109 in6_generate_lla(ea, &lla);
1110 memset(ip6_str, 0, sizeof(ip6_str));
1111 ipv6_string_mapped(ip6_str, &lla);
1112 ds_put_format(match, " && (nd.target == %s", ip6_str);
1113
1114 for(int i = 0; i < n_ipv6_addrs; i++) {
1115 memset(ip6_str, 0, sizeof(ip6_str));
1116 ipv6_string_mapped(ip6_str, &ipv6_addrs[i].addr);
1117 ds_put_format(match, " || nd.target == %s", ip6_str);
1118 }
1119
1120 ds_put_format(match, ")))");
1121 }
1122
1123 static void
1124 build_port_security_ipv6_flow(
1125 enum ovn_pipeline pipeline, struct ds *match, struct eth_addr ea,
1126 struct ipv6_netaddr *ipv6_addrs, int n_ipv6_addrs)
1127 {
1128 char ip6_str[INET6_ADDRSTRLEN + 1];
1129
1130 ds_put_format(match, " && %s == {",
1131 pipeline == P_IN ? "ip6.src" : "ip6.dst");
1132
1133 /* Allow link-local address. */
1134 struct in6_addr lla;
1135 in6_generate_lla(ea, &lla);
1136 ipv6_string_mapped(ip6_str, &lla);
1137 ds_put_format(match, "%s, ", ip6_str);
1138
1139 /* Allow ip6.src=:: and ip6.dst=ff00::/8 for ND packets */
1140 ds_put_cstr(match, pipeline == P_IN ? "::" : "ff00::/8");
1141 for(int i = 0; i < n_ipv6_addrs; i++) {
1142 ipv6_string_mapped(ip6_str, &ipv6_addrs[i].addr);
1143 ds_put_format(match, ", %s", ip6_str);
1144 }
1145 ds_put_cstr(match, "}");
1146 }
1147
1148 /**
1149 * Build port security constraints on ARP and IPv6 ND fields
1150 * and add logical flows to S_SWITCH_IN_PORT_SEC_ND stage.
1151 *
1152 * For each port security of the logical port, following
1153 * logical flows are added
1154 * - If the port security has no IP (both IPv4 and IPv6) or
1155 * if it has IPv4 address(es)
1156 * - Priority 90 flow to allow ARP packets for known MAC addresses
1157 * in the eth.src and arp.spa fields. If the port security
1158 * has IPv4 addresses, allow known IPv4 addresses in the arp.tpa field.
1159 *
1160 * - If the port security has no IP (both IPv4 and IPv6) or
1161 * if it has IPv6 address(es)
1162 * - Priority 90 flow to allow IPv6 ND packets for known MAC addresses
1163 * in the eth.src and nd.sll/nd.tll fields. If the port security
1164 * has IPv6 addresses, allow known IPv6 addresses in the nd.target field
1165 * for IPv6 Neighbor Advertisement packet.
1166 *
1167 * - Priority 80 flow to drop ARP and IPv6 ND packets.
1168 */
1169 static void
1170 build_port_security_nd(struct ovn_port *op, struct hmap *lflows)
1171 {
1172 for (size_t i = 0; i < op->nbs->n_port_security; i++) {
1173 struct lport_addresses ps;
1174 if (!extract_lport_addresses(op->nbs->port_security[i], &ps, true)) {
1175 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
1176 VLOG_INFO_RL(&rl, "invalid syntax '%s' in port security. No MAC"
1177 " address found", op->nbs->port_security[i]);
1178 continue;
1179 }
1180
1181 bool no_ip = !(ps.n_ipv4_addrs || ps.n_ipv6_addrs);
1182 struct ds match = DS_EMPTY_INITIALIZER;
1183
1184 if (ps.n_ipv4_addrs || no_ip) {
1185 ds_put_format(
1186 &match, "inport == %s && eth.src == "ETH_ADDR_FMT" && arp.sha == "
1187 ETH_ADDR_FMT, op->json_key, ETH_ADDR_ARGS(ps.ea),
1188 ETH_ADDR_ARGS(ps.ea));
1189
1190 if (ps.n_ipv4_addrs) {
1191 ds_put_cstr(&match, " && (");
1192 for (size_t i = 0; i < ps.n_ipv4_addrs; i++) {
1193 ds_put_cstr(&match, "arp.spa == ");
1194 ovs_be32 mask = be32_prefix_mask(ps.ipv4_addrs[i].plen);
1195 /* When the netmask is applied, if the host portion is
1196 * non-zero, the host can only use the specified
1197 * address in the arp.spa. If zero, the host is allowed
1198 * to use any address in the subnet. */
1199 if (ps.ipv4_addrs[i].addr & ~mask) {
1200 ds_put_format(&match, IP_FMT,
1201 IP_ARGS(ps.ipv4_addrs[i].addr));
1202 } else {
1203 ip_format_masked(ps.ipv4_addrs[i].addr & mask, mask,
1204 &match);
1205 }
1206 ds_put_cstr(&match, " || ");
1207 }
1208 ds_chomp(&match, ' ');
1209 ds_chomp(&match, '|');
1210 ds_chomp(&match, '|');
1211 ds_put_cstr(&match, ")");
1212 }
1213 ovn_lflow_add(lflows, op->od, S_SWITCH_IN_PORT_SEC_ND, 90,
1214 ds_cstr(&match), "next;");
1215 ds_destroy(&match);
1216 }
1217
1218 if (ps.n_ipv6_addrs || no_ip) {
1219 ds_init(&match);
1220 ds_put_format(&match, "inport == %s && eth.src == "ETH_ADDR_FMT,
1221 op->json_key, ETH_ADDR_ARGS(ps.ea));
1222 build_port_security_ipv6_nd_flow(&match, ps.ea, ps.ipv6_addrs,
1223 ps.n_ipv6_addrs);
1224 ovn_lflow_add(lflows, op->od, S_SWITCH_IN_PORT_SEC_ND, 90,
1225 ds_cstr(&match), "next;");
1226 ds_destroy(&match);
1227 }
1228 free(ps.ipv4_addrs);
1229 free(ps.ipv6_addrs);
1230 }
1231
1232 char *match = xasprintf("inport == %s && (arp || nd)", op->json_key);
1233 ovn_lflow_add(lflows, op->od, S_SWITCH_IN_PORT_SEC_ND, 80,
1234 match, "drop;");
1235 free(match);
1236 }
1237
1238 /**
1239 * Build port security constraints on IPv4 and IPv6 src and dst fields
1240 * and add logical flows to S_SWITCH_(IN/OUT)_PORT_SEC_IP stage.
1241 *
1242 * For each port security of the logical port, following
1243 * logical flows are added
1244 * - If the port security has IPv4 addresses,
1245 * - Priority 90 flow to allow IPv4 packets for known IPv4 addresses
1246 *
1247 * - If the port security has IPv6 addresses,
1248 * - Priority 90 flow to allow IPv6 packets for known IPv6 addresses
1249 *
1250 * - If the port security has IPv4 addresses or IPv6 addresses or both
1251 * - Priority 80 flow to drop all IPv4 and IPv6 traffic
1252 */
1253 static void
1254 build_port_security_ip(enum ovn_pipeline pipeline, struct ovn_port *op,
1255 struct hmap *lflows)
1256 {
1257 char *port_direction;
1258 enum ovn_stage stage;
1259 if (pipeline == P_IN) {
1260 port_direction = "inport";
1261 stage = S_SWITCH_IN_PORT_SEC_IP;
1262 } else {
1263 port_direction = "outport";
1264 stage = S_SWITCH_OUT_PORT_SEC_IP;
1265 }
1266
1267 for (size_t i = 0; i < op->nbs->n_port_security; i++) {
1268 struct lport_addresses ps;
1269 if (!extract_lport_addresses(op->nbs->port_security[i], &ps, true)) {
1270 continue;
1271 }
1272
1273 if (!(ps.n_ipv4_addrs || ps.n_ipv6_addrs)) {
1274 continue;
1275 }
1276
1277 if (ps.n_ipv4_addrs) {
1278 struct ds match = DS_EMPTY_INITIALIZER;
1279 if (pipeline == P_IN) {
1280 ds_put_format(&match, "inport == %s && eth.src == "ETH_ADDR_FMT
1281 " && ip4.src == {0.0.0.0, ", op->json_key,
1282 ETH_ADDR_ARGS(ps.ea));
1283 } else {
1284 ds_put_format(&match, "outport == %s && eth.dst == "ETH_ADDR_FMT
1285 " && ip4.dst == {255.255.255.255, 224.0.0.0/4, ",
1286 op->json_key, ETH_ADDR_ARGS(ps.ea));
1287 }
1288
1289 for (int i = 0; i < ps.n_ipv4_addrs; i++) {
1290 ovs_be32 mask = be32_prefix_mask(ps.ipv4_addrs[i].plen);
1291 /* When the netmask is applied, if the host portion is
1292 * non-zero, the host can only use the specified
1293 * address. If zero, the host is allowed to use any
1294 * address in the subnet.
1295 * */
1296 if (ps.ipv4_addrs[i].addr & ~mask) {
1297 ds_put_format(&match, IP_FMT,
1298 IP_ARGS(ps.ipv4_addrs[i].addr));
1299 if (pipeline == P_OUT && ps.ipv4_addrs[i].plen != 32) {
1300 /* Host is also allowed to receive packets to the
1301 * broadcast address in the specified subnet.
1302 */
1303 ds_put_format(&match, ", "IP_FMT,
1304 IP_ARGS(ps.ipv4_addrs[i].addr | ~mask));
1305 }
1306 } else {
1307 /* host portion is zero */
1308 ip_format_masked(ps.ipv4_addrs[i].addr & mask, mask,
1309 &match);
1310 }
1311 ds_put_cstr(&match, ", ");
1312 }
1313
1314 /* Replace ", " by "}". */
1315 ds_chomp(&match, ' ');
1316 ds_chomp(&match, ',');
1317 ds_put_cstr(&match, "}");
1318 ovn_lflow_add(lflows, op->od, stage, 90, ds_cstr(&match), "next;");
1319 ds_destroy(&match);
1320 free(ps.ipv4_addrs);
1321 }
1322
1323 if (ps.n_ipv6_addrs) {
1324 struct ds match = DS_EMPTY_INITIALIZER;
1325 ds_put_format(&match, "%s == %s && %s == "ETH_ADDR_FMT"",
1326 port_direction, op->json_key,
1327 pipeline == P_IN ? "eth.src" : "eth.dst",
1328 ETH_ADDR_ARGS(ps.ea));
1329 build_port_security_ipv6_flow(pipeline, &match, ps.ea,
1330 ps.ipv6_addrs, ps.n_ipv6_addrs);
1331 ovn_lflow_add(lflows, op->od, stage, 90,
1332 ds_cstr(&match), "next;");
1333 ds_destroy(&match);
1334 free(ps.ipv6_addrs);
1335 }
1336
1337 char *match = xasprintf(
1338 "%s == %s && %s == "ETH_ADDR_FMT" && ip", port_direction,
1339 op->json_key, pipeline == P_IN ? "eth.src" : "eth.dst",
1340 ETH_ADDR_ARGS(ps.ea));
1341 ovn_lflow_add(lflows, op->od, stage, 80, match, "drop;");
1342 free(match);
1343 }
1344 }
1345
1346 static bool
1347 lport_is_enabled(const struct nbrec_logical_port *lport)
1348 {
1349 return !lport->enabled || *lport->enabled;
1350 }
1351
1352 static bool
1353 lport_is_up(const struct nbrec_logical_port *lport)
1354 {
1355 return !lport->up || *lport->up;
1356 }
1357
1358 static bool
1359 has_stateful_acl(struct ovn_datapath *od)
1360 {
1361 for (size_t i = 0; i < od->nbs->n_acls; i++) {
1362 struct nbrec_acl *acl = od->nbs->acls[i];
1363 if (!strcmp(acl->action, "allow-related")) {
1364 return true;
1365 }
1366 }
1367
1368 return false;
1369 }
1370
1371 static void
1372 build_acls(struct ovn_datapath *od, struct hmap *lflows, struct hmap *ports)
1373 {
1374 bool has_stateful = has_stateful_acl(od);
1375 struct ovn_port *op;
1376
1377 /* Ingress and Egress Pre-ACL Table (Priority 0): Packets are
1378 * allowed by default. */
1379 ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_ACL, 0, "1", "next;");
1380 ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_ACL, 0, "1", "next;");
1381
1382 /* Ingress and Egress ACL Table (Priority 0): Packets are allowed by
1383 * default. A related rule at priority 1 is added below if there
1384 * are any stateful ACLs in this datapath. */
1385 ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, 0, "1", "next;");
1386 ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, 0, "1", "next;");
1387
1388 /* If there are any stateful ACL rules in this dapapath, we must
1389 * send all IP packets through the conntrack action, which handles
1390 * defragmentation, in order to match L4 headers. */
1391 if (has_stateful) {
1392 HMAP_FOR_EACH (op, key_node, ports) {
1393 if (op->od == od && !strcmp(op->nbs->type, "router")) {
1394 /* Can't use ct() for router ports. Consider the
1395 * following configuration: lp1(10.0.0.2) on
1396 * hostA--ls1--lr0--ls2--lp2(10.0.1.2) on hostB, For a
1397 * ping from lp1 to lp2, First, the response will go
1398 * through ct() with a zone for lp2 in the ls2 ingress
1399 * pipeline on hostB. That ct zone knows about this
1400 * connection. Next, it goes through ct() with the zone
1401 * for the router port in the egress pipeline of ls2 on
1402 * hostB. This zone does not know about the connection,
1403 * as the icmp request went through the logical router
1404 * on hostA, not hostB. This would only work with
1405 * distributed conntrack state across all chassis. */
1406 struct ds match_in = DS_EMPTY_INITIALIZER;
1407 struct ds match_out = DS_EMPTY_INITIALIZER;
1408
1409 ds_put_format(&match_in, "ip && inport == %s", op->json_key);
1410 ds_put_format(&match_out, "ip && outport == %s", op->json_key);
1411 ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_ACL, 110,
1412 ds_cstr(&match_in), "next;");
1413 ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_ACL, 110,
1414 ds_cstr(&match_out), "next;");
1415
1416 ds_destroy(&match_in);
1417 ds_destroy(&match_out);
1418 }
1419 }
1420
1421 /* Ingress and Egress Pre-ACL Table (Priority 100).
1422 *
1423 * Regardless of whether the ACL is "from-lport" or "to-lport",
1424 * we need rules in both the ingress and egress table, because
1425 * the return traffic needs to be followed. */
1426 ovn_lflow_add(lflows, od, S_SWITCH_IN_PRE_ACL, 100, "ip", "ct_next;");
1427 ovn_lflow_add(lflows, od, S_SWITCH_OUT_PRE_ACL, 100, "ip", "ct_next;");
1428
1429 /* Ingress and Egress ACL Table (Priority 1).
1430 *
1431 * By default, traffic is allowed. This is partially handled by
1432 * the Priority 0 ACL flows added earlier, but we also need to
1433 * commit IP flows. This is because, while the initiater's
1434 * direction may not have any stateful rules, the server's may
1435 * and then its return traffic would not have an associated
1436 * conntrack entry and would return "+invalid". */
1437 ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, 1, "ip",
1438 "ct_commit; next;");
1439 ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, 1, "ip",
1440 "ct_commit; next;");
1441
1442 /* Ingress and Egress ACL Table (Priority 65535).
1443 *
1444 * Always drop traffic that's in an invalid state. This is
1445 * enforced at a higher priority than ACLs can be defined. */
1446 ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX,
1447 "ct.inv", "drop;");
1448 ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX,
1449 "ct.inv", "drop;");
1450
1451 /* Ingress and Egress ACL Table (Priority 65535).
1452 *
1453 * Always allow traffic that is established to a committed
1454 * conntrack entry. This is enforced at a higher priority than
1455 * ACLs can be defined. */
1456 ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX,
1457 "ct.est && !ct.rel && !ct.new && !ct.inv",
1458 "next;");
1459 ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX,
1460 "ct.est && !ct.rel && !ct.new && !ct.inv",
1461 "next;");
1462
1463 /* Ingress and Egress ACL Table (Priority 65535).
1464 *
1465 * Always allow traffic that is related to an existing conntrack
1466 * entry. This is enforced at a higher priority than ACLs can
1467 * be defined.
1468 *
1469 * NOTE: This does not support related data sessions (eg,
1470 * a dynamically negotiated FTP data channel), but will allow
1471 * related traffic such as an ICMP Port Unreachable through
1472 * that's generated from a non-listening UDP port. */
1473 ovn_lflow_add(lflows, od, S_SWITCH_IN_ACL, UINT16_MAX,
1474 "!ct.est && ct.rel && !ct.new && !ct.inv",
1475 "next;");
1476 ovn_lflow_add(lflows, od, S_SWITCH_OUT_ACL, UINT16_MAX,
1477 "!ct.est && ct.rel && !ct.new && !ct.inv",
1478 "next;");
1479 }
1480
1481 /* Ingress or Egress ACL Table (Various priorities). */
1482 for (size_t i = 0; i < od->nbs->n_acls; i++) {
1483 struct nbrec_acl *acl = od->nbs->acls[i];
1484 bool ingress = !strcmp(acl->direction, "from-lport") ? true :false;
1485 enum ovn_stage stage = ingress ? S_SWITCH_IN_ACL : S_SWITCH_OUT_ACL;
1486
1487 if (!strcmp(acl->action, "allow")) {
1488 /* If there are any stateful flows, we must even commit "allow"
1489 * actions. This is because, while the initiater's
1490 * direction may not have any stateful rules, the server's
1491 * may and then its return traffic would not have an
1492 * associated conntrack entry and would return "+invalid". */
1493 const char *actions = has_stateful ? "ct_commit; next;" : "next;";
1494 ovn_lflow_add(lflows, od, stage,
1495 acl->priority + OVN_ACL_PRI_OFFSET,
1496 acl->match, actions);
1497 } else if (!strcmp(acl->action, "allow-related")) {
1498 struct ds match = DS_EMPTY_INITIALIZER;
1499
1500 /* Commit the connection tracking entry, which allows all
1501 * other traffic related to this entry to flow due to the
1502 * 65535 priority flow defined earlier. */
1503 ds_put_format(&match, "ct.new && (%s)", acl->match);
1504 ovn_lflow_add(lflows, od, stage,
1505 acl->priority + OVN_ACL_PRI_OFFSET,
1506 ds_cstr(&match), "ct_commit; next;");
1507
1508 ds_destroy(&match);
1509 } else if (!strcmp(acl->action, "drop")) {
1510 ovn_lflow_add(lflows, od, stage,
1511 acl->priority + OVN_ACL_PRI_OFFSET,
1512 acl->match, "drop;");
1513 } else if (!strcmp(acl->action, "reject")) {
1514 /* xxx Need to support "reject". */
1515 VLOG_INFO("reject is not a supported action");
1516 ovn_lflow_add(lflows, od, stage,
1517 acl->priority + OVN_ACL_PRI_OFFSET,
1518 acl->match, "drop;");
1519 }
1520 }
1521 }
1522
1523 static void
1524 build_lswitch_flows(struct hmap *datapaths, struct hmap *ports,
1525 struct hmap *lflows, struct hmap *mcgroups)
1526 {
1527 /* This flow table structure is documented in ovn-northd(8), so please
1528 * update ovn-northd.8.xml if you change anything. */
1529
1530 /* Build pre-ACL and ACL tables for both ingress and egress.
1531 * Ingress tables 3 and 4. Egress tables 0 and 1. */
1532 struct ovn_datapath *od;
1533 HMAP_FOR_EACH (od, key_node, datapaths) {
1534 if (!od->nbs) {
1535 continue;
1536 }
1537
1538 build_acls(od, lflows, ports);
1539 }
1540
1541 /* Logical switch ingress table 0: Admission control framework (priority
1542 * 100). */
1543 HMAP_FOR_EACH (od, key_node, datapaths) {
1544 if (!od->nbs) {
1545 continue;
1546 }
1547
1548 /* Logical VLANs not supported. */
1549 ovn_lflow_add(lflows, od, S_SWITCH_IN_PORT_SEC_L2, 100, "vlan.present",
1550 "drop;");
1551
1552 /* Broadcast/multicast source address is invalid. */
1553 ovn_lflow_add(lflows, od, S_SWITCH_IN_PORT_SEC_L2, 100, "eth.src[40]",
1554 "drop;");
1555
1556 /* Port security flows have priority 50 (see below) and will continue
1557 * to the next table if packet source is acceptable. */
1558 }
1559
1560 /* Logical switch ingress table 0: Ingress port security - L2
1561 * (priority 50).
1562 * Ingress table 1: Ingress port security - IP (priority 90 and 80)
1563 * Ingress table 2: Ingress port security - ND (priority 90 and 80)
1564 */
1565 struct ovn_port *op;
1566 HMAP_FOR_EACH (op, key_node, ports) {
1567 if (!op->nbs) {
1568 continue;
1569 }
1570
1571 if (!lport_is_enabled(op->nbs)) {
1572 /* Drop packets from disabled logical ports (since logical flow
1573 * tables are default-drop). */
1574 continue;
1575 }
1576
1577 struct ds match = DS_EMPTY_INITIALIZER;
1578 ds_put_format(&match, "inport == %s", op->json_key);
1579 build_port_security_l2(
1580 "eth.src", op->nbs->port_security, op->nbs->n_port_security,
1581 &match);
1582 ovn_lflow_add(lflows, op->od, S_SWITCH_IN_PORT_SEC_L2, 50,
1583 ds_cstr(&match), "next;");
1584 ds_destroy(&match);
1585
1586 if (op->nbs->n_port_security) {
1587 build_port_security_ip(P_IN, op, lflows);
1588 build_port_security_nd(op, lflows);
1589 }
1590 }
1591
1592 /* Ingress table 1 and 2: Port security - IP and ND, by default goto next.
1593 * (priority 0)*/
1594 HMAP_FOR_EACH (od, key_node, datapaths) {
1595 if (!od->nbs) {
1596 continue;
1597 }
1598
1599 ovn_lflow_add(lflows, od, S_SWITCH_IN_PORT_SEC_ND, 0, "1", "next;");
1600 ovn_lflow_add(lflows, od, S_SWITCH_IN_PORT_SEC_IP, 0, "1", "next;");
1601 }
1602
1603 /* Ingress table 3: ARP responder, skip requests coming from localnet ports.
1604 * (priority 100). */
1605 HMAP_FOR_EACH (op, key_node, ports) {
1606 if (!op->nbs) {
1607 continue;
1608 }
1609
1610 if (!strcmp(op->nbs->type, "localnet")) {
1611 char *match = xasprintf("inport == %s", op->json_key);
1612 ovn_lflow_add(lflows, op->od, S_SWITCH_IN_ARP_RSP, 100,
1613 match, "next;");
1614 free(match);
1615 }
1616 }
1617
1618 /* Ingress table 5: ARP responder, reply for known IPs.
1619 * (priority 50). */
1620 HMAP_FOR_EACH (op, key_node, ports) {
1621 if (!op->nbs) {
1622 continue;
1623 }
1624
1625 /*
1626 * Add ARP reply flows if either the
1627 * - port is up or
1628 * - port type is router
1629 */
1630 if (!lport_is_up(op->nbs) && strcmp(op->nbs->type, "router")) {
1631 continue;
1632 }
1633
1634 for (size_t i = 0; i < op->nbs->n_addresses; i++) {
1635 struct lport_addresses laddrs;
1636 if (!extract_lport_addresses(op->nbs->addresses[i], &laddrs,
1637 false)) {
1638 continue;
1639 }
1640 for (size_t j = 0; j < laddrs.n_ipv4_addrs; j++) {
1641 char *match = xasprintf(
1642 "arp.tpa == "IP_FMT" && arp.op == 1",
1643 IP_ARGS(laddrs.ipv4_addrs[j].addr));
1644 char *actions = xasprintf(
1645 "eth.dst = eth.src; "
1646 "eth.src = "ETH_ADDR_FMT"; "
1647 "arp.op = 2; /* ARP reply */ "
1648 "arp.tha = arp.sha; "
1649 "arp.sha = "ETH_ADDR_FMT"; "
1650 "arp.tpa = arp.spa; "
1651 "arp.spa = "IP_FMT"; "
1652 "outport = inport; "
1653 "inport = \"\"; /* Allow sending out inport. */ "
1654 "output;",
1655 ETH_ADDR_ARGS(laddrs.ea),
1656 ETH_ADDR_ARGS(laddrs.ea),
1657 IP_ARGS(laddrs.ipv4_addrs[j].addr));
1658 ovn_lflow_add(lflows, op->od, S_SWITCH_IN_ARP_RSP, 50,
1659 match, actions);
1660 free(match);
1661 free(actions);
1662 }
1663
1664 free(laddrs.ipv4_addrs);
1665 }
1666 }
1667
1668 /* Ingress table 5: ARP responder, by default goto next.
1669 * (priority 0)*/
1670 HMAP_FOR_EACH (od, key_node, datapaths) {
1671 if (!od->nbs) {
1672 continue;
1673 }
1674
1675 ovn_lflow_add(lflows, od, S_SWITCH_IN_ARP_RSP, 0, "1", "next;");
1676 }
1677
1678 /* Ingress table 6: Destination lookup, broadcast and multicast handling
1679 * (priority 100). */
1680 HMAP_FOR_EACH (op, key_node, ports) {
1681 if (!op->nbs) {
1682 continue;
1683 }
1684
1685 if (lport_is_enabled(op->nbs)) {
1686 ovn_multicast_add(mcgroups, &mc_flood, op);
1687 }
1688 }
1689 HMAP_FOR_EACH (od, key_node, datapaths) {
1690 if (!od->nbs) {
1691 continue;
1692 }
1693
1694 ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 100, "eth.mcast",
1695 "outport = \""MC_FLOOD"\"; output;");
1696 }
1697
1698 /* Ingress table 6: Destination lookup, unicast handling (priority 50), */
1699 HMAP_FOR_EACH (op, key_node, ports) {
1700 if (!op->nbs) {
1701 continue;
1702 }
1703
1704 for (size_t i = 0; i < op->nbs->n_addresses; i++) {
1705 struct eth_addr mac;
1706
1707 if (eth_addr_from_string(op->nbs->addresses[i], &mac)) {
1708 struct ds match, actions;
1709
1710 ds_init(&match);
1711 ds_put_format(&match, "eth.dst == "ETH_ADDR_FMT,
1712 ETH_ADDR_ARGS(mac));
1713
1714 ds_init(&actions);
1715 ds_put_format(&actions, "outport = %s; output;", op->json_key);
1716 ovn_lflow_add(lflows, op->od, S_SWITCH_IN_L2_LKUP, 50,
1717 ds_cstr(&match), ds_cstr(&actions));
1718 ds_destroy(&actions);
1719 ds_destroy(&match);
1720 } else if (!strcmp(op->nbs->addresses[i], "unknown")) {
1721 if (lport_is_enabled(op->nbs)) {
1722 ovn_multicast_add(mcgroups, &mc_unknown, op);
1723 op->od->has_unknown = true;
1724 }
1725 } else {
1726 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
1727
1728 VLOG_INFO_RL(&rl,
1729 "%s: invalid syntax '%s' in addresses column",
1730 op->nbs->name, op->nbs->addresses[i]);
1731 }
1732 }
1733 }
1734
1735 /* Ingress table 6: Destination lookup for unknown MACs (priority 0). */
1736 HMAP_FOR_EACH (od, key_node, datapaths) {
1737 if (!od->nbs) {
1738 continue;
1739 }
1740
1741 if (od->has_unknown) {
1742 ovn_lflow_add(lflows, od, S_SWITCH_IN_L2_LKUP, 0, "1",
1743 "outport = \""MC_UNKNOWN"\"; output;");
1744 }
1745 }
1746
1747 /* Egress table 2: Egress port security - IP (priority 0)
1748 * port security L2 - multicast/broadcast (priority
1749 * 100). */
1750 HMAP_FOR_EACH (od, key_node, datapaths) {
1751 if (!od->nbs) {
1752 continue;
1753 }
1754
1755 ovn_lflow_add(lflows, od, S_SWITCH_OUT_PORT_SEC_IP, 0, "1", "next;");
1756 ovn_lflow_add(lflows, od, S_SWITCH_OUT_PORT_SEC_L2, 100, "eth.mcast",
1757 "output;");
1758 }
1759
1760 /* Egress table 2: Egress port security - IP (priorities 90 and 80)
1761 * if port security enabled.
1762 *
1763 * Egress table 3: Egress port security - L2 (priorities 50 and 150).
1764 *
1765 * Priority 50 rules implement port security for enabled logical port.
1766 *
1767 * Priority 150 rules drop packets to disabled logical ports, so that they
1768 * don't even receive multicast or broadcast packets. */
1769 HMAP_FOR_EACH (op, key_node, ports) {
1770 if (!op->nbs) {
1771 continue;
1772 }
1773
1774 struct ds match = DS_EMPTY_INITIALIZER;
1775 ds_put_format(&match, "outport == %s", op->json_key);
1776 if (lport_is_enabled(op->nbs)) {
1777 build_port_security_l2("eth.dst", op->nbs->port_security,
1778 op->nbs->n_port_security, &match);
1779 ovn_lflow_add(lflows, op->od, S_SWITCH_OUT_PORT_SEC_L2, 50,
1780 ds_cstr(&match), "output;");
1781 } else {
1782 ovn_lflow_add(lflows, op->od, S_SWITCH_OUT_PORT_SEC_L2, 150,
1783 ds_cstr(&match), "drop;");
1784 }
1785
1786 ds_destroy(&match);
1787
1788 if (op->nbs->n_port_security) {
1789 build_port_security_ip(P_OUT, op, lflows);
1790 }
1791 }
1792 }
1793
1794 static bool
1795 lrport_is_enabled(const struct nbrec_logical_router_port *lrport)
1796 {
1797 return !lrport->enabled || *lrport->enabled;
1798 }
1799
1800 static void
1801 add_route(struct hmap *lflows, const struct ovn_port *op,
1802 ovs_be32 network, ovs_be32 mask, ovs_be32 gateway)
1803 {
1804 char *match = xasprintf("ip4.dst == "IP_FMT"/"IP_FMT,
1805 IP_ARGS(network), IP_ARGS(mask));
1806
1807 struct ds actions = DS_EMPTY_INITIALIZER;
1808 ds_put_cstr(&actions, "ip.ttl--; reg0 = ");
1809 if (gateway) {
1810 ds_put_format(&actions, IP_FMT, IP_ARGS(gateway));
1811 } else {
1812 ds_put_cstr(&actions, "ip4.dst");
1813 }
1814 ds_put_format(&actions,
1815 "; "
1816 "reg1 = "IP_FMT"; "
1817 "eth.src = "ETH_ADDR_FMT"; "
1818 "outport = %s; "
1819 "next;",
1820 IP_ARGS(op->ip), ETH_ADDR_ARGS(op->mac), op->json_key);
1821
1822 /* The priority here is calculated to implement longest-prefix-match
1823 * routing. */
1824 ovn_lflow_add(lflows, op->od, S_ROUTER_IN_IP_ROUTING,
1825 count_1bits(ntohl(mask)), match, ds_cstr(&actions));
1826 ds_destroy(&actions);
1827 free(match);
1828 }
1829
1830 static void
1831 build_lrouter_flows(struct hmap *datapaths, struct hmap *ports,
1832 struct hmap *lflows)
1833 {
1834 /* This flow table structure is documented in ovn-northd(8), so please
1835 * update ovn-northd.8.xml if you change anything. */
1836
1837 /* Logical router ingress table 0: Admission control framework. */
1838 struct ovn_datapath *od;
1839 HMAP_FOR_EACH (od, key_node, datapaths) {
1840 if (!od->nbr) {
1841 continue;
1842 }
1843
1844 /* Logical VLANs not supported.
1845 * Broadcast/multicast source address is invalid. */
1846 ovn_lflow_add(lflows, od, S_ROUTER_IN_ADMISSION, 100,
1847 "vlan.present || eth.src[40]", "drop;");
1848 }
1849
1850 /* Logical router ingress table 0: match (priority 50). */
1851 struct ovn_port *op;
1852 HMAP_FOR_EACH (op, key_node, ports) {
1853 if (!op->nbr) {
1854 continue;
1855 }
1856
1857 if (!lrport_is_enabled(op->nbr)) {
1858 /* Drop packets from disabled logical ports (since logical flow
1859 * tables are default-drop). */
1860 continue;
1861 }
1862
1863 char *match = xasprintf(
1864 "(eth.mcast || eth.dst == "ETH_ADDR_FMT") && inport == %s",
1865 ETH_ADDR_ARGS(op->mac), op->json_key);
1866 ovn_lflow_add(lflows, op->od, S_ROUTER_IN_ADMISSION, 50,
1867 match, "next;");
1868 free(match);
1869 }
1870
1871 /* Logical router ingress table 1: IP Input. */
1872 HMAP_FOR_EACH (od, key_node, datapaths) {
1873 if (!od->nbr) {
1874 continue;
1875 }
1876
1877 /* L3 admission control: drop multicast and broadcast source, localhost
1878 * source or destination, and zero network source or destination
1879 * (priority 100). */
1880 ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 100,
1881 "ip4.mcast || "
1882 "ip4.src == 255.255.255.255 || "
1883 "ip4.src == 127.0.0.0/8 || "
1884 "ip4.dst == 127.0.0.0/8 || "
1885 "ip4.src == 0.0.0.0/8 || "
1886 "ip4.dst == 0.0.0.0/8",
1887 "drop;");
1888
1889 /* ARP reply handling. Use ARP replies to populate the logical
1890 * router's ARP table. */
1891 ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 90, "arp.op == 2",
1892 "put_arp(inport, arp.spa, arp.sha);");
1893
1894 /* Drop Ethernet local broadcast. By definition this traffic should
1895 * not be forwarded.*/
1896 ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 50,
1897 "eth.bcast", "drop;");
1898
1899 /* Drop IP multicast. */
1900 ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 50,
1901 "ip4.mcast", "drop;");
1902
1903 /* TTL discard.
1904 *
1905 * XXX Need to send ICMP time exceeded if !ip.later_frag. */
1906 char *match = xasprintf("ip4 && ip.ttl == {0, 1}");
1907 ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 30, match, "drop;");
1908 free(match);
1909
1910 /* Pass other traffic not already handled to the next table for
1911 * routing. */
1912 ovn_lflow_add(lflows, od, S_ROUTER_IN_IP_INPUT, 0, "1", "next;");
1913 }
1914
1915 HMAP_FOR_EACH (op, key_node, ports) {
1916 if (!op->nbr) {
1917 continue;
1918 }
1919
1920 /* L3 admission control: drop packets that originate from an IP address
1921 * owned by the router or a broadcast address known to the router
1922 * (priority 100). */
1923 char *match = xasprintf("ip4.src == {"IP_FMT", "IP_FMT"}",
1924 IP_ARGS(op->ip), IP_ARGS(op->bcast));
1925 ovn_lflow_add(lflows, op->od, S_ROUTER_IN_IP_INPUT, 100,
1926 match, "drop;");
1927 free(match);
1928
1929 /* ICMP echo reply. These flows reply to ICMP echo requests
1930 * received for the router's IP address. */
1931 match = xasprintf(
1932 "inport == %s && (ip4.dst == "IP_FMT" || ip4.dst == "IP_FMT") && "
1933 "icmp4.type == 8 && icmp4.code == 0",
1934 op->json_key, IP_ARGS(op->ip), IP_ARGS(op->bcast));
1935 char *actions = xasprintf(
1936 "ip4.dst = ip4.src; "
1937 "ip4.src = "IP_FMT"; "
1938 "ip.ttl = 255; "
1939 "icmp4.type = 0; "
1940 "inport = \"\"; /* Allow sending out inport. */ "
1941 "next; ",
1942 IP_ARGS(op->ip));
1943 ovn_lflow_add(lflows, op->od, S_ROUTER_IN_IP_INPUT, 90,
1944 match, actions);
1945 free(match);
1946 free(actions);
1947
1948 /* ARP reply. These flows reply to ARP requests for the router's own
1949 * IP address. */
1950 match = xasprintf(
1951 "inport == %s && arp.tpa == "IP_FMT" && arp.op == 1",
1952 op->json_key, IP_ARGS(op->ip));
1953 actions = xasprintf(
1954 "eth.dst = eth.src; "
1955 "eth.src = "ETH_ADDR_FMT"; "
1956 "arp.op = 2; /* ARP reply */ "
1957 "arp.tha = arp.sha; "
1958 "arp.sha = "ETH_ADDR_FMT"; "
1959 "arp.tpa = arp.spa; "
1960 "arp.spa = "IP_FMT"; "
1961 "outport = %s; "
1962 "inport = \"\"; /* Allow sending out inport. */ "
1963 "output;",
1964 ETH_ADDR_ARGS(op->mac),
1965 ETH_ADDR_ARGS(op->mac),
1966 IP_ARGS(op->ip),
1967 op->json_key);
1968 ovn_lflow_add(lflows, op->od, S_ROUTER_IN_IP_INPUT, 90,
1969 match, actions);
1970 free(match);
1971 free(actions);
1972
1973 /* Drop IP traffic to this router. */
1974 match = xasprintf("ip4.dst == "IP_FMT, IP_ARGS(op->ip));
1975 ovn_lflow_add(lflows, op->od, S_ROUTER_IN_IP_INPUT, 60,
1976 match, "drop;");
1977 free(match);
1978 }
1979
1980 /* Logical router ingress table 2: IP Routing.
1981 *
1982 * A packet that arrives at this table is an IP packet that should be
1983 * routed to the address in ip4.dst. This table sets outport to the correct
1984 * output port, eth.src to the output port's MAC address, and reg0 to the
1985 * next-hop IP address (leaving ip4.dst, the packet’s final destination,
1986 * unchanged), and advances to the next table for ARP resolution. */
1987 HMAP_FOR_EACH (op, key_node, ports) {
1988 if (!op->nbr) {
1989 continue;
1990 }
1991
1992 add_route(lflows, op, op->network, op->mask, 0);
1993 }
1994 HMAP_FOR_EACH (od, key_node, datapaths) {
1995 if (!od->nbr) {
1996 continue;
1997 }
1998
1999 if (od->gateway && od->gateway_port) {
2000 add_route(lflows, od->gateway_port, 0, 0, od->gateway);
2001 }
2002 }
2003 /* XXX destination unreachable */
2004
2005 /* Local router ingress table 3: ARP Resolution.
2006 *
2007 * Any packet that reaches this table is an IP packet whose next-hop IP
2008 * address is in reg0. (ip4.dst is the final destination.) This table
2009 * resolves the IP address in reg0 into an output port in outport and an
2010 * Ethernet address in eth.dst. */
2011 HMAP_FOR_EACH (op, key_node, ports) {
2012 if (op->nbr) {
2013 /* This is a logical router port. If next-hop IP address in 'reg0'
2014 * matches ip address of this router port, then the packet is
2015 * intended to eventually be sent to this logical port. Set the
2016 * destination mac address using this port's mac address.
2017 *
2018 * The packet is still in peer's logical pipeline. So the match
2019 * should be on peer's outport. */
2020 if (op->nbr->peer) {
2021 struct ovn_port *peer = ovn_port_find(ports, op->nbr->peer);
2022 if (!peer) {
2023 continue;
2024 }
2025
2026 if (!peer->ip || !op->ip) {
2027 continue;
2028 }
2029 char *match = xasprintf("outport == %s && reg0 == "IP_FMT,
2030 peer->json_key, IP_ARGS(op->ip));
2031 char *actions = xasprintf("eth.dst = "ETH_ADDR_FMT"; "
2032 "next;", ETH_ADDR_ARGS(op->mac));
2033 ovn_lflow_add(lflows, peer->od, S_ROUTER_IN_ARP_RESOLVE,
2034 100, match, actions);
2035 free(actions);
2036 free(match);
2037 }
2038 } else if (op->od->n_router_ports) {
2039 for (size_t i = 0; i < op->nbs->n_addresses; i++) {
2040 struct lport_addresses laddrs;
2041 if (!extract_lport_addresses(op->nbs->addresses[i], &laddrs,
2042 false)) {
2043 continue;
2044 }
2045
2046 for (size_t k = 0; k < laddrs.n_ipv4_addrs; k++) {
2047 ovs_be32 ip = laddrs.ipv4_addrs[k].addr;
2048 for (size_t j = 0; j < op->od->n_router_ports; j++) {
2049 /* Get the Logical_Router_Port that the Logical_Port is
2050 * connected to, as 'peer'. */
2051 const char *peer_name = smap_get(
2052 &op->od->router_ports[j]->nbs->options,
2053 "router-port");
2054 if (!peer_name) {
2055 continue;
2056 }
2057
2058 struct ovn_port *peer
2059 = ovn_port_find(ports, peer_name);
2060 if (!peer || !peer->nbr) {
2061 continue;
2062 }
2063
2064 /* Make sure that 'ip' is in 'peer''s network. */
2065 if ((ip ^ peer->network) & peer->mask) {
2066 continue;
2067 }
2068
2069 char *match = xasprintf(
2070 "outport == %s && reg0 == "IP_FMT,
2071 peer->json_key, IP_ARGS(ip));
2072 char *actions = xasprintf("eth.dst = "ETH_ADDR_FMT"; "
2073 "next;",
2074 ETH_ADDR_ARGS(laddrs.ea));
2075 ovn_lflow_add(lflows, peer->od,
2076 S_ROUTER_IN_ARP_RESOLVE,
2077 100, match, actions);
2078 free(actions);
2079 free(match);
2080 break;
2081 }
2082 }
2083
2084 free(laddrs.ipv4_addrs);
2085 }
2086 }
2087 }
2088 HMAP_FOR_EACH (od, key_node, datapaths) {
2089 if (!od->nbr) {
2090 continue;
2091 }
2092
2093 ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_RESOLVE, 0, "1",
2094 "get_arp(outport, reg0); next;");
2095 }
2096
2097 /* Local router ingress table 4: ARP request.
2098 *
2099 * In the common case where the Ethernet destination has been resolved,
2100 * this table outputs the packet (priority 100). Otherwise, it composes
2101 * and sends an ARP request (priority 0). */
2102 HMAP_FOR_EACH (od, key_node, datapaths) {
2103 if (!od->nbr) {
2104 continue;
2105 }
2106
2107 ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_REQUEST, 100,
2108 "eth.dst == 00:00:00:00:00:00",
2109 "arp { "
2110 "eth.dst = ff:ff:ff:ff:ff:ff; "
2111 "arp.spa = reg1; "
2112 "arp.op = 1; " /* ARP request */
2113 "output; "
2114 "};");
2115 ovn_lflow_add(lflows, od, S_ROUTER_IN_ARP_REQUEST, 0, "1", "output;");
2116 }
2117
2118 /* Logical router egress table 0: Delivery (priority 100).
2119 *
2120 * Priority 100 rules deliver packets to enabled logical ports. */
2121 HMAP_FOR_EACH (op, key_node, ports) {
2122 if (!op->nbr) {
2123 continue;
2124 }
2125
2126 if (!lrport_is_enabled(op->nbr)) {
2127 /* Drop packets to disabled logical ports (since logical flow
2128 * tables are default-drop). */
2129 continue;
2130 }
2131
2132 char *match = xasprintf("outport == %s", op->json_key);
2133 ovn_lflow_add(lflows, op->od, S_ROUTER_OUT_DELIVERY, 100,
2134 match, "output;");
2135 free(match);
2136 }
2137 }
2138
2139 /* Updates the Logical_Flow and Multicast_Group tables in the OVN_SB database,
2140 * constructing their contents based on the OVN_NB database. */
2141 static void
2142 build_lflows(struct northd_context *ctx, struct hmap *datapaths,
2143 struct hmap *ports)
2144 {
2145 struct hmap lflows = HMAP_INITIALIZER(&lflows);
2146 struct hmap mcgroups = HMAP_INITIALIZER(&mcgroups);
2147
2148 build_lswitch_flows(datapaths, ports, &lflows, &mcgroups);
2149 build_lrouter_flows(datapaths, ports, &lflows);
2150
2151 /* Push changes to the Logical_Flow table to database. */
2152 const struct sbrec_logical_flow *sbflow, *next_sbflow;
2153 SBREC_LOGICAL_FLOW_FOR_EACH_SAFE (sbflow, next_sbflow, ctx->ovnsb_idl) {
2154 struct ovn_datapath *od
2155 = ovn_datapath_from_sbrec(datapaths, sbflow->logical_datapath);
2156 if (!od) {
2157 sbrec_logical_flow_delete(sbflow);
2158 continue;
2159 }
2160
2161 enum ovn_datapath_type dp_type = od->nbs ? DP_SWITCH : DP_ROUTER;
2162 enum ovn_pipeline pipeline
2163 = !strcmp(sbflow->pipeline, "ingress") ? P_IN : P_OUT;
2164 struct ovn_lflow *lflow = ovn_lflow_find(
2165 &lflows, od, ovn_stage_build(dp_type, pipeline, sbflow->table_id),
2166 sbflow->priority, sbflow->match, sbflow->actions);
2167 if (lflow) {
2168 ovn_lflow_destroy(&lflows, lflow);
2169 } else {
2170 sbrec_logical_flow_delete(sbflow);
2171 }
2172 }
2173 struct ovn_lflow *lflow, *next_lflow;
2174 HMAP_FOR_EACH_SAFE (lflow, next_lflow, hmap_node, &lflows) {
2175 enum ovn_pipeline pipeline = ovn_stage_get_pipeline(lflow->stage);
2176 uint8_t table = ovn_stage_get_table(lflow->stage);
2177
2178 sbflow = sbrec_logical_flow_insert(ctx->ovnsb_txn);
2179 sbrec_logical_flow_set_logical_datapath(sbflow, lflow->od->sb);
2180 sbrec_logical_flow_set_pipeline(
2181 sbflow, pipeline == P_IN ? "ingress" : "egress");
2182 sbrec_logical_flow_set_table_id(sbflow, table);
2183 sbrec_logical_flow_set_priority(sbflow, lflow->priority);
2184 sbrec_logical_flow_set_match(sbflow, lflow->match);
2185 sbrec_logical_flow_set_actions(sbflow, lflow->actions);
2186
2187 const struct smap ids = SMAP_CONST1(&ids, "stage-name",
2188 ovn_stage_to_str(lflow->stage));
2189 sbrec_logical_flow_set_external_ids(sbflow, &ids);
2190
2191 ovn_lflow_destroy(&lflows, lflow);
2192 }
2193 hmap_destroy(&lflows);
2194
2195 /* Push changes to the Multicast_Group table to database. */
2196 const struct sbrec_multicast_group *sbmc, *next_sbmc;
2197 SBREC_MULTICAST_GROUP_FOR_EACH_SAFE (sbmc, next_sbmc, ctx->ovnsb_idl) {
2198 struct ovn_datapath *od = ovn_datapath_from_sbrec(datapaths,
2199 sbmc->datapath);
2200 if (!od) {
2201 sbrec_multicast_group_delete(sbmc);
2202 continue;
2203 }
2204
2205 struct multicast_group group = { .name = sbmc->name,
2206 .key = sbmc->tunnel_key };
2207 struct ovn_multicast *mc = ovn_multicast_find(&mcgroups, od, &group);
2208 if (mc) {
2209 ovn_multicast_update_sbrec(mc, sbmc);
2210 ovn_multicast_destroy(&mcgroups, mc);
2211 } else {
2212 sbrec_multicast_group_delete(sbmc);
2213 }
2214 }
2215 struct ovn_multicast *mc, *next_mc;
2216 HMAP_FOR_EACH_SAFE (mc, next_mc, hmap_node, &mcgroups) {
2217 sbmc = sbrec_multicast_group_insert(ctx->ovnsb_txn);
2218 sbrec_multicast_group_set_datapath(sbmc, mc->datapath->sb);
2219 sbrec_multicast_group_set_name(sbmc, mc->group->name);
2220 sbrec_multicast_group_set_tunnel_key(sbmc, mc->group->key);
2221 ovn_multicast_update_sbrec(mc, sbmc);
2222 ovn_multicast_destroy(&mcgroups, mc);
2223 }
2224 hmap_destroy(&mcgroups);
2225 }
2226 \f
2227 static void
2228 ovnnb_db_run(struct northd_context *ctx)
2229 {
2230 if (!ctx->ovnsb_txn) {
2231 return;
2232 }
2233 struct hmap datapaths, ports;
2234 build_datapaths(ctx, &datapaths);
2235 build_ports(ctx, &datapaths, &ports);
2236 build_lflows(ctx, &datapaths, &ports);
2237
2238 struct ovn_datapath *dp, *next_dp;
2239 HMAP_FOR_EACH_SAFE (dp, next_dp, key_node, &datapaths) {
2240 ovn_datapath_destroy(&datapaths, dp);
2241 }
2242 hmap_destroy(&datapaths);
2243
2244 struct ovn_port *port, *next_port;
2245 HMAP_FOR_EACH_SAFE (port, next_port, key_node, &ports) {
2246 ovn_port_destroy(&ports, port);
2247 }
2248 hmap_destroy(&ports);
2249 }
2250
2251 /*
2252 * The only change we get notified about is if the 'chassis' column of the
2253 * 'Port_Binding' table changes. When this column is not empty, it means we
2254 * need to set the corresponding logical port as 'up' in the northbound DB.
2255 */
2256 static void
2257 ovnsb_db_run(struct northd_context *ctx)
2258 {
2259 if (!ctx->ovnnb_txn) {
2260 return;
2261 }
2262 struct hmap lports_hmap;
2263 const struct sbrec_port_binding *sb;
2264 const struct nbrec_logical_port *nb;
2265
2266 struct lport_hash_node {
2267 struct hmap_node node;
2268 const struct nbrec_logical_port *nb;
2269 } *hash_node, *hash_node_next;
2270
2271 hmap_init(&lports_hmap);
2272
2273 NBREC_LOGICAL_PORT_FOR_EACH(nb, ctx->ovnnb_idl) {
2274 hash_node = xzalloc(sizeof *hash_node);
2275 hash_node->nb = nb;
2276 hmap_insert(&lports_hmap, &hash_node->node, hash_string(nb->name, 0));
2277 }
2278
2279 SBREC_PORT_BINDING_FOR_EACH(sb, ctx->ovnsb_idl) {
2280 nb = NULL;
2281 HMAP_FOR_EACH_WITH_HASH(hash_node, node,
2282 hash_string(sb->logical_port, 0),
2283 &lports_hmap) {
2284 if (!strcmp(sb->logical_port, hash_node->nb->name)) {
2285 nb = hash_node->nb;
2286 break;
2287 }
2288 }
2289
2290 if (!nb) {
2291 /* The logical port doesn't exist for this port binding. This can
2292 * happen under normal circumstances when ovn-northd hasn't gotten
2293 * around to pruning the Port_Binding yet. */
2294 continue;
2295 }
2296
2297 if (sb->chassis && (!nb->up || !*nb->up)) {
2298 bool up = true;
2299 nbrec_logical_port_set_up(nb, &up, 1);
2300 } else if (!sb->chassis && (!nb->up || *nb->up)) {
2301 bool up = false;
2302 nbrec_logical_port_set_up(nb, &up, 1);
2303 }
2304 }
2305
2306 HMAP_FOR_EACH_SAFE(hash_node, hash_node_next, node, &lports_hmap) {
2307 hmap_remove(&lports_hmap, &hash_node->node);
2308 free(hash_node);
2309 }
2310 hmap_destroy(&lports_hmap);
2311 }
2312 \f
2313
2314 static char *default_nb_db_;
2315
2316 static const char *
2317 default_nb_db(void)
2318 {
2319 if (!default_nb_db_) {
2320 default_nb_db_ = xasprintf("unix:%s/ovnnb_db.sock", ovs_rundir());
2321 }
2322 return default_nb_db_;
2323 }
2324
2325 static char *default_sb_db_;
2326
2327 static const char *
2328 default_sb_db(void)
2329 {
2330 if (!default_sb_db_) {
2331 default_sb_db_ = xasprintf("unix:%s/ovnsb_db.sock", ovs_rundir());
2332 }
2333 return default_sb_db_;
2334 }
2335
2336 static void
2337 parse_options(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
2338 {
2339 enum {
2340 DAEMON_OPTION_ENUMS,
2341 VLOG_OPTION_ENUMS,
2342 };
2343 static const struct option long_options[] = {
2344 {"ovnsb-db", required_argument, NULL, 'd'},
2345 {"ovnnb-db", required_argument, NULL, 'D'},
2346 {"help", no_argument, NULL, 'h'},
2347 {"options", no_argument, NULL, 'o'},
2348 {"version", no_argument, NULL, 'V'},
2349 DAEMON_LONG_OPTIONS,
2350 VLOG_LONG_OPTIONS,
2351 STREAM_SSL_LONG_OPTIONS,
2352 {NULL, 0, NULL, 0},
2353 };
2354 char *short_options = ovs_cmdl_long_options_to_short_options(long_options);
2355
2356 for (;;) {
2357 int c;
2358
2359 c = getopt_long(argc, argv, short_options, long_options, NULL);
2360 if (c == -1) {
2361 break;
2362 }
2363
2364 switch (c) {
2365 DAEMON_OPTION_HANDLERS;
2366 VLOG_OPTION_HANDLERS;
2367 STREAM_SSL_OPTION_HANDLERS;
2368
2369 case 'd':
2370 ovnsb_db = optarg;
2371 break;
2372
2373 case 'D':
2374 ovnnb_db = optarg;
2375 break;
2376
2377 case 'h':
2378 usage();
2379 exit(EXIT_SUCCESS);
2380
2381 case 'o':
2382 ovs_cmdl_print_options(long_options);
2383 exit(EXIT_SUCCESS);
2384
2385 case 'V':
2386 ovs_print_version(0, 0);
2387 exit(EXIT_SUCCESS);
2388
2389 default:
2390 break;
2391 }
2392 }
2393
2394 if (!ovnsb_db) {
2395 ovnsb_db = default_sb_db();
2396 }
2397
2398 if (!ovnnb_db) {
2399 ovnnb_db = default_nb_db();
2400 }
2401
2402 free(short_options);
2403 }
2404
2405 static void
2406 add_column_noalert(struct ovsdb_idl *idl,
2407 const struct ovsdb_idl_column *column)
2408 {
2409 ovsdb_idl_add_column(idl, column);
2410 ovsdb_idl_omit_alert(idl, column);
2411 }
2412
2413 int
2414 main(int argc, char *argv[])
2415 {
2416 int res = EXIT_SUCCESS;
2417 struct unixctl_server *unixctl;
2418 int retval;
2419 bool exiting;
2420
2421 fatal_ignore_sigpipe();
2422 set_program_name(argv[0]);
2423 service_start(&argc, &argv);
2424 parse_options(argc, argv);
2425
2426 daemonize_start(false);
2427
2428 retval = unixctl_server_create(NULL, &unixctl);
2429 if (retval) {
2430 exit(EXIT_FAILURE);
2431 }
2432 unixctl_command_register("exit", "", 0, 0, ovn_northd_exit, &exiting);
2433
2434 daemonize_complete();
2435
2436 nbrec_init();
2437 sbrec_init();
2438
2439 /* We want to detect all changes to the ovn-nb db. */
2440 struct ovsdb_idl_loop ovnnb_idl_loop = OVSDB_IDL_LOOP_INITIALIZER(
2441 ovsdb_idl_create(ovnnb_db, &nbrec_idl_class, true, true));
2442
2443 struct ovsdb_idl_loop ovnsb_idl_loop = OVSDB_IDL_LOOP_INITIALIZER(
2444 ovsdb_idl_create(ovnsb_db, &sbrec_idl_class, false, true));
2445
2446 ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_logical_flow);
2447 add_column_noalert(ovnsb_idl_loop.idl,
2448 &sbrec_logical_flow_col_logical_datapath);
2449 add_column_noalert(ovnsb_idl_loop.idl, &sbrec_logical_flow_col_pipeline);
2450 add_column_noalert(ovnsb_idl_loop.idl, &sbrec_logical_flow_col_table_id);
2451 add_column_noalert(ovnsb_idl_loop.idl, &sbrec_logical_flow_col_priority);
2452 add_column_noalert(ovnsb_idl_loop.idl, &sbrec_logical_flow_col_match);
2453 add_column_noalert(ovnsb_idl_loop.idl, &sbrec_logical_flow_col_actions);
2454
2455 ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_multicast_group);
2456 add_column_noalert(ovnsb_idl_loop.idl,
2457 &sbrec_multicast_group_col_datapath);
2458 add_column_noalert(ovnsb_idl_loop.idl,
2459 &sbrec_multicast_group_col_tunnel_key);
2460 add_column_noalert(ovnsb_idl_loop.idl, &sbrec_multicast_group_col_name);
2461 add_column_noalert(ovnsb_idl_loop.idl, &sbrec_multicast_group_col_ports);
2462
2463 ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_datapath_binding);
2464 add_column_noalert(ovnsb_idl_loop.idl,
2465 &sbrec_datapath_binding_col_tunnel_key);
2466 add_column_noalert(ovnsb_idl_loop.idl,
2467 &sbrec_datapath_binding_col_external_ids);
2468
2469 ovsdb_idl_add_table(ovnsb_idl_loop.idl, &sbrec_table_port_binding);
2470 add_column_noalert(ovnsb_idl_loop.idl, &sbrec_port_binding_col_datapath);
2471 add_column_noalert(ovnsb_idl_loop.idl,
2472 &sbrec_port_binding_col_logical_port);
2473 add_column_noalert(ovnsb_idl_loop.idl,
2474 &sbrec_port_binding_col_tunnel_key);
2475 add_column_noalert(ovnsb_idl_loop.idl,
2476 &sbrec_port_binding_col_parent_port);
2477 add_column_noalert(ovnsb_idl_loop.idl, &sbrec_port_binding_col_tag);
2478 add_column_noalert(ovnsb_idl_loop.idl, &sbrec_port_binding_col_type);
2479 add_column_noalert(ovnsb_idl_loop.idl, &sbrec_port_binding_col_options);
2480 add_column_noalert(ovnsb_idl_loop.idl, &sbrec_port_binding_col_mac);
2481 ovsdb_idl_add_column(ovnsb_idl_loop.idl, &sbrec_port_binding_col_chassis);
2482
2483 /* Main loop. */
2484 exiting = false;
2485 while (!exiting) {
2486 struct northd_context ctx = {
2487 .ovnnb_idl = ovnnb_idl_loop.idl,
2488 .ovnnb_txn = ovsdb_idl_loop_run(&ovnnb_idl_loop),
2489 .ovnsb_idl = ovnsb_idl_loop.idl,
2490 .ovnsb_txn = ovsdb_idl_loop_run(&ovnsb_idl_loop),
2491 };
2492
2493 ovnnb_db_run(&ctx);
2494 ovnsb_db_run(&ctx);
2495
2496 unixctl_server_run(unixctl);
2497 unixctl_server_wait(unixctl);
2498 if (exiting) {
2499 poll_immediate_wake();
2500 }
2501 ovsdb_idl_loop_commit_and_wait(&ovnnb_idl_loop);
2502 ovsdb_idl_loop_commit_and_wait(&ovnsb_idl_loop);
2503
2504 poll_block();
2505 if (should_service_stop()) {
2506 exiting = true;
2507 }
2508 }
2509
2510 unixctl_server_destroy(unixctl);
2511 ovsdb_idl_loop_destroy(&ovnnb_idl_loop);
2512 ovsdb_idl_loop_destroy(&ovnsb_idl_loop);
2513 service_stop();
2514
2515 free(default_nb_db_);
2516 free(default_sb_db_);
2517 exit(res);
2518 }
2519
2520 static void
2521 ovn_northd_exit(struct unixctl_conn *conn, int argc OVS_UNUSED,
2522 const char *argv[] OVS_UNUSED, void *exiting_)
2523 {
2524 bool *exiting = exiting_;
2525 *exiting = true;
2526
2527 unixctl_command_reply(conn, NULL);
2528 }