2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at:
6 * http://www.apache.org/licenses/LICENSE-2.0
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
21 #include "command-line.h"
24 #include "openvswitch/dynamic-string.h"
25 #include "fatal-signal.h"
29 #include "ovn/lib/lex.h"
30 #include "ovn/lib/ovn-nb-idl.h"
31 #include "ovn/lib/ovn-sb-idl.h"
32 #include "ovn/lib/ovn-util.h"
34 #include "poll-loop.h"
37 #include "stream-ssl.h"
41 #include "openvswitch/vlog.h"
43 VLOG_DEFINE_THIS_MODULE(ovn_northd
);
45 static unixctl_cb_func ovn_northd_exit
;
47 struct northd_context
{
48 struct ovsdb_idl
*ovnnb_idl
;
49 struct ovsdb_idl
*ovnsb_idl
;
50 struct ovsdb_idl_txn
*ovnnb_txn
;
51 struct ovsdb_idl_txn
*ovnsb_txn
;
54 static const char *ovnnb_db
;
55 static const char *ovnsb_db
;
57 static const char *default_nb_db(void);
58 static const char *default_sb_db(void);
60 /* Pipeline stages. */
62 /* The two pipelines in an OVN logical flow table. */
64 P_IN
, /* Ingress pipeline. */
65 P_OUT
/* Egress pipeline. */
68 /* The two purposes for which ovn-northd uses OVN logical datapaths. */
69 enum ovn_datapath_type
{
70 DP_SWITCH
, /* OVN logical switch. */
71 DP_ROUTER
/* OVN logical router. */
74 /* Returns an "enum ovn_stage" built from the arguments.
76 * (It's better to use ovn_stage_build() for type-safety reasons, but inline
77 * functions can't be used in enums or switch cases.) */
78 #define OVN_STAGE_BUILD(DP_TYPE, PIPELINE, TABLE) \
79 (((DP_TYPE) << 9) | ((PIPELINE) << 8) | (TABLE))
81 /* A stage within an OVN logical switch or router.
83 * An "enum ovn_stage" indicates whether the stage is part of a logical switch
84 * or router, whether the stage is part of the ingress or egress pipeline, and
85 * the table within that pipeline. The first three components are combined to
86 * form the stage's full name, e.g. S_SWITCH_IN_PORT_SEC_L2,
87 * S_ROUTER_OUT_DELIVERY. */
89 #define PIPELINE_STAGES \
90 /* Logical switch ingress stages. */ \
91 PIPELINE_STAGE(SWITCH, IN, PORT_SEC_L2, 0, "ls_in_port_sec_l2") \
92 PIPELINE_STAGE(SWITCH, IN, PORT_SEC_IP, 1, "ls_in_port_sec_ip") \
93 PIPELINE_STAGE(SWITCH, IN, PORT_SEC_ND, 2, "ls_in_port_sec_nd") \
94 PIPELINE_STAGE(SWITCH, IN, PRE_ACL, 3, "ls_in_pre_acl") \
95 PIPELINE_STAGE(SWITCH, IN, ACL, 4, "ls_in_acl") \
96 PIPELINE_STAGE(SWITCH, IN, ARP_RSP, 5, "ls_in_arp_rsp") \
97 PIPELINE_STAGE(SWITCH, IN, L2_LKUP, 6, "ls_in_l2_lkup") \
99 /* Logical switch egress stages. */ \
100 PIPELINE_STAGE(SWITCH, OUT, PRE_ACL, 0, "ls_out_pre_acl") \
101 PIPELINE_STAGE(SWITCH, OUT, ACL, 1, "ls_out_acl") \
102 PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_IP, 2, "ls_out_port_sec_ip") \
103 PIPELINE_STAGE(SWITCH, OUT, PORT_SEC_L2, 3, "ls_out_port_sec_l2") \
105 /* Logical router ingress stages. */ \
106 PIPELINE_STAGE(ROUTER, IN, ADMISSION, 0, "lr_in_admission") \
107 PIPELINE_STAGE(ROUTER, IN, IP_INPUT, 1, "lr_in_ip_input") \
108 PIPELINE_STAGE(ROUTER, IN, IP_ROUTING, 2, "lr_in_ip_routing") \
109 PIPELINE_STAGE(ROUTER, IN, ARP_RESOLVE, 3, "lr_in_arp_resolve") \
110 PIPELINE_STAGE(ROUTER, IN, ARP_REQUEST, 4, "lr_in_arp_request") \
112 /* Logical router egress stages. */ \
113 PIPELINE_STAGE(ROUTER, OUT, DELIVERY, 0, "lr_out_delivery")
115 #define PIPELINE_STAGE(DP_TYPE, PIPELINE, STAGE, TABLE, NAME) \
116 S_##DP_TYPE##_##PIPELINE##_##STAGE \
117 = OVN_STAGE_BUILD(DP_##DP_TYPE, P_##PIPELINE, TABLE),
119 #undef PIPELINE_STAGE
122 /* Due to various hard-coded priorities need to implement ACLs, the
123 * northbound database supports a smaller range of ACL priorities than
124 * are available to logical flows. This value is added to an ACL
125 * priority to determine the ACL's logical flow priority. */
126 #define OVN_ACL_PRI_OFFSET 1000
128 /* Returns an "enum ovn_stage" built from the arguments. */
129 static enum ovn_stage
130 ovn_stage_build(enum ovn_datapath_type dp_type
, enum ovn_pipeline pipeline
,
133 return OVN_STAGE_BUILD(dp_type
, pipeline
, table
);
136 /* Returns the pipeline to which 'stage' belongs. */
137 static enum ovn_pipeline
138 ovn_stage_get_pipeline(enum ovn_stage stage
)
140 return (stage
>> 8) & 1;
143 /* Returns the table to which 'stage' belongs. */
145 ovn_stage_get_table(enum ovn_stage stage
)
150 /* Returns a string name for 'stage'. */
152 ovn_stage_to_str(enum ovn_stage stage
)
155 #define PIPELINE_STAGE(DP_TYPE, PIPELINE, STAGE, TABLE, NAME) \
156 case S_##DP_TYPE##_##PIPELINE##_##STAGE: return NAME;
158 #undef PIPELINE_STAGE
159 default: return "<unknown>";
167 %s: OVN northbound management daemon\n\
168 usage: %s [OPTIONS]\n\
171 --ovnnb-db=DATABASE connect to ovn-nb database at DATABASE\n\
173 --ovnsb-db=DATABASE connect to ovn-sb database at DATABASE\n\
175 -h, --help display this help message\n\
176 -o, --options list available options\n\
177 -V, --version display version information\n\
178 ", program_name
, program_name
, default_nb_db(), default_sb_db());
181 stream_usage("database", true, true, false);
185 struct hmap_node hmap_node
;
190 destroy_tnlids(struct hmap
*tnlids
)
192 struct tnlid_node
*node
;
193 HMAP_FOR_EACH_POP (node
, hmap_node
, tnlids
) {
196 hmap_destroy(tnlids
);
200 add_tnlid(struct hmap
*set
, uint32_t tnlid
)
202 struct tnlid_node
*node
= xmalloc(sizeof *node
);
203 hmap_insert(set
, &node
->hmap_node
, hash_int(tnlid
, 0));
208 tnlid_in_use(const struct hmap
*set
, uint32_t tnlid
)
210 const struct tnlid_node
*node
;
211 HMAP_FOR_EACH_IN_BUCKET (node
, hmap_node
, hash_int(tnlid
, 0), set
) {
212 if (node
->tnlid
== tnlid
) {
220 allocate_tnlid(struct hmap
*set
, const char *name
, uint32_t max
,
223 for (uint32_t tnlid
= *hint
+ 1; tnlid
!= *hint
;
224 tnlid
= tnlid
+ 1 <= max
? tnlid
+ 1 : 1) {
225 if (!tnlid_in_use(set
, tnlid
)) {
226 add_tnlid(set
, tnlid
);
232 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 1);
233 VLOG_WARN_RL(&rl
, "all %s tunnel ids exhausted", name
);
237 /* The 'key' comes from nbs->header_.uuid or nbr->header_.uuid or
238 * sb->external_ids:logical-switch. */
239 struct ovn_datapath
{
240 struct hmap_node key_node
; /* Index on 'key'. */
241 struct uuid key
; /* (nbs/nbr)->header_.uuid. */
243 const struct nbrec_logical_switch
*nbs
; /* May be NULL. */
244 const struct nbrec_logical_router
*nbr
; /* May be NULL. */
245 const struct sbrec_datapath_binding
*sb
; /* May be NULL. */
247 struct ovs_list list
; /* In list of similar records. */
249 /* Logical router data (digested from nbr). */
250 const struct ovn_port
*gateway_port
;
253 /* Logical switch data. */
254 struct ovn_port
**router_ports
;
255 size_t n_router_ports
;
257 struct hmap port_tnlids
;
258 uint32_t port_key_hint
;
263 static struct ovn_datapath
*
264 ovn_datapath_create(struct hmap
*datapaths
, const struct uuid
*key
,
265 const struct nbrec_logical_switch
*nbs
,
266 const struct nbrec_logical_router
*nbr
,
267 const struct sbrec_datapath_binding
*sb
)
269 struct ovn_datapath
*od
= xzalloc(sizeof *od
);
274 hmap_init(&od
->port_tnlids
);
275 od
->port_key_hint
= 0;
276 hmap_insert(datapaths
, &od
->key_node
, uuid_hash(&od
->key
));
281 ovn_datapath_destroy(struct hmap
*datapaths
, struct ovn_datapath
*od
)
284 /* Don't remove od->list. It is used within build_datapaths() as a
285 * private list and once we've exited that function it is not safe to
287 hmap_remove(datapaths
, &od
->key_node
);
288 destroy_tnlids(&od
->port_tnlids
);
289 free(od
->router_ports
);
294 static struct ovn_datapath
*
295 ovn_datapath_find(struct hmap
*datapaths
, const struct uuid
*uuid
)
297 struct ovn_datapath
*od
;
299 HMAP_FOR_EACH_WITH_HASH (od
, key_node
, uuid_hash(uuid
), datapaths
) {
300 if (uuid_equals(uuid
, &od
->key
)) {
307 static struct ovn_datapath
*
308 ovn_datapath_from_sbrec(struct hmap
*datapaths
,
309 const struct sbrec_datapath_binding
*sb
)
313 if (!smap_get_uuid(&sb
->external_ids
, "logical-switch", &key
) &&
314 !smap_get_uuid(&sb
->external_ids
, "logical-router", &key
)) {
317 return ovn_datapath_find(datapaths
, &key
);
321 lrouter_is_enabled(const struct nbrec_logical_router
*lrouter
)
323 return !lrouter
->enabled
|| *lrouter
->enabled
;
327 join_datapaths(struct northd_context
*ctx
, struct hmap
*datapaths
,
328 struct ovs_list
*sb_only
, struct ovs_list
*nb_only
,
329 struct ovs_list
*both
)
331 hmap_init(datapaths
);
332 ovs_list_init(sb_only
);
333 ovs_list_init(nb_only
);
336 const struct sbrec_datapath_binding
*sb
, *sb_next
;
337 SBREC_DATAPATH_BINDING_FOR_EACH_SAFE (sb
, sb_next
, ctx
->ovnsb_idl
) {
339 if (!smap_get_uuid(&sb
->external_ids
, "logical-switch", &key
) &&
340 !smap_get_uuid(&sb
->external_ids
, "logical-router", &key
)) {
341 ovsdb_idl_txn_add_comment(
343 "deleting Datapath_Binding "UUID_FMT
" that lacks "
344 "external-ids:logical-switch and "
345 "external-ids:logical-router",
346 UUID_ARGS(&sb
->header_
.uuid
));
347 sbrec_datapath_binding_delete(sb
);
351 if (ovn_datapath_find(datapaths
, &key
)) {
352 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 1);
354 &rl
, "deleting Datapath_Binding "UUID_FMT
" with "
355 "duplicate external-ids:logical-switch/router "UUID_FMT
,
356 UUID_ARGS(&sb
->header_
.uuid
), UUID_ARGS(&key
));
357 sbrec_datapath_binding_delete(sb
);
361 struct ovn_datapath
*od
= ovn_datapath_create(datapaths
, &key
,
363 ovs_list_push_back(sb_only
, &od
->list
);
366 const struct nbrec_logical_switch
*nbs
;
367 NBREC_LOGICAL_SWITCH_FOR_EACH (nbs
, ctx
->ovnnb_idl
) {
368 struct ovn_datapath
*od
= ovn_datapath_find(datapaths
,
372 ovs_list_remove(&od
->list
);
373 ovs_list_push_back(both
, &od
->list
);
375 od
= ovn_datapath_create(datapaths
, &nbs
->header_
.uuid
,
377 ovs_list_push_back(nb_only
, &od
->list
);
381 const struct nbrec_logical_router
*nbr
;
382 NBREC_LOGICAL_ROUTER_FOR_EACH (nbr
, ctx
->ovnnb_idl
) {
383 if (!lrouter_is_enabled(nbr
)) {
387 struct ovn_datapath
*od
= ovn_datapath_find(datapaths
,
392 ovs_list_remove(&od
->list
);
393 ovs_list_push_back(both
, &od
->list
);
396 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 1);
398 "duplicate UUID "UUID_FMT
" in OVN_Northbound",
399 UUID_ARGS(&nbr
->header_
.uuid
));
403 od
= ovn_datapath_create(datapaths
, &nbr
->header_
.uuid
,
405 ovs_list_push_back(nb_only
, &od
->list
);
409 if (nbr
->default_gw
) {
411 if (!ip_parse(nbr
->default_gw
, &ip
) || !ip
) {
412 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 1);
413 VLOG_WARN_RL(&rl
, "bad 'gateway' %s", nbr
->default_gw
);
419 /* Set the gateway port to NULL. If there is a gateway, it will get
420 * filled in as we go through the ports later. */
421 od
->gateway_port
= NULL
;
426 ovn_datapath_allocate_key(struct hmap
*dp_tnlids
)
428 static uint32_t hint
;
429 return allocate_tnlid(dp_tnlids
, "datapath", (1u << 24) - 1, &hint
);
432 /* Updates the southbound Datapath_Binding table so that it contains the
433 * logical switches and routers specified by the northbound database.
435 * Initializes 'datapaths' to contain a "struct ovn_datapath" for every logical
436 * switch and router. */
438 build_datapaths(struct northd_context
*ctx
, struct hmap
*datapaths
)
440 struct ovs_list sb_only
, nb_only
, both
;
442 join_datapaths(ctx
, datapaths
, &sb_only
, &nb_only
, &both
);
444 if (!ovs_list_is_empty(&nb_only
)) {
445 /* First index the in-use datapath tunnel IDs. */
446 struct hmap dp_tnlids
= HMAP_INITIALIZER(&dp_tnlids
);
447 struct ovn_datapath
*od
;
448 LIST_FOR_EACH (od
, list
, &both
) {
449 add_tnlid(&dp_tnlids
, od
->sb
->tunnel_key
);
452 /* Add southbound record for each unmatched northbound record. */
453 LIST_FOR_EACH (od
, list
, &nb_only
) {
454 uint16_t tunnel_key
= ovn_datapath_allocate_key(&dp_tnlids
);
459 od
->sb
= sbrec_datapath_binding_insert(ctx
->ovnsb_txn
);
461 char uuid_s
[UUID_LEN
+ 1];
462 sprintf(uuid_s
, UUID_FMT
, UUID_ARGS(&od
->key
));
463 const char *key
= od
->nbs
? "logical-switch" : "logical-router";
464 const struct smap id
= SMAP_CONST1(&id
, key
, uuid_s
);
465 sbrec_datapath_binding_set_external_ids(od
->sb
, &id
);
467 sbrec_datapath_binding_set_tunnel_key(od
->sb
, tunnel_key
);
469 destroy_tnlids(&dp_tnlids
);
472 /* Delete southbound records without northbound matches. */
473 struct ovn_datapath
*od
, *next
;
474 LIST_FOR_EACH_SAFE (od
, next
, list
, &sb_only
) {
475 ovs_list_remove(&od
->list
);
476 sbrec_datapath_binding_delete(od
->sb
);
477 ovn_datapath_destroy(datapaths
, od
);
482 struct hmap_node key_node
; /* Index on 'key'. */
483 char *key
; /* nbs->name, nbr->name, sb->logical_port. */
484 char *json_key
; /* 'key', quoted for use in JSON. */
486 const struct nbrec_logical_port
*nbs
; /* May be NULL. */
487 const struct nbrec_logical_router_port
*nbr
; /* May be NULL. */
488 const struct sbrec_port_binding
*sb
; /* May be NULL. */
490 /* Logical router port data. */
491 ovs_be32 ip
, mask
; /* 192.168.10.123/24. */
492 ovs_be32 network
; /* 192.168.10.0. */
493 ovs_be32 bcast
; /* 192.168.10.255. */
495 struct ovn_port
*peer
;
497 struct ovn_datapath
*od
;
499 struct ovs_list list
; /* In list of similar records. */
502 static struct ovn_port
*
503 ovn_port_create(struct hmap
*ports
, const char *key
,
504 const struct nbrec_logical_port
*nbs
,
505 const struct nbrec_logical_router_port
*nbr
,
506 const struct sbrec_port_binding
*sb
)
508 struct ovn_port
*op
= xzalloc(sizeof *op
);
510 struct ds json_key
= DS_EMPTY_INITIALIZER
;
511 json_string_escape(key
, &json_key
);
512 op
->json_key
= ds_steal_cstr(&json_key
);
514 op
->key
= xstrdup(key
);
518 hmap_insert(ports
, &op
->key_node
, hash_string(op
->key
, 0));
523 ovn_port_destroy(struct hmap
*ports
, struct ovn_port
*port
)
526 /* Don't remove port->list. It is used within build_ports() as a
527 * private list and once we've exited that function it is not safe to
529 hmap_remove(ports
, &port
->key_node
);
530 free(port
->json_key
);
536 static struct ovn_port
*
537 ovn_port_find(struct hmap
*ports
, const char *name
)
541 HMAP_FOR_EACH_WITH_HASH (op
, key_node
, hash_string(name
, 0), ports
) {
542 if (!strcmp(op
->key
, name
)) {
550 ovn_port_allocate_key(struct ovn_datapath
*od
)
552 return allocate_tnlid(&od
->port_tnlids
, "port",
553 (1u << 15) - 1, &od
->port_key_hint
);
557 join_logical_ports(struct northd_context
*ctx
,
558 struct hmap
*datapaths
, struct hmap
*ports
,
559 struct ovs_list
*sb_only
, struct ovs_list
*nb_only
,
560 struct ovs_list
*both
)
563 ovs_list_init(sb_only
);
564 ovs_list_init(nb_only
);
567 const struct sbrec_port_binding
*sb
;
568 SBREC_PORT_BINDING_FOR_EACH (sb
, ctx
->ovnsb_idl
) {
569 struct ovn_port
*op
= ovn_port_create(ports
, sb
->logical_port
,
571 ovs_list_push_back(sb_only
, &op
->list
);
574 struct ovn_datapath
*od
;
575 HMAP_FOR_EACH (od
, key_node
, datapaths
) {
577 for (size_t i
= 0; i
< od
->nbs
->n_ports
; i
++) {
578 const struct nbrec_logical_port
*nbs
= od
->nbs
->ports
[i
];
579 struct ovn_port
*op
= ovn_port_find(ports
, nbs
->name
);
581 if (op
->nbs
|| op
->nbr
) {
582 static struct vlog_rate_limit rl
583 = VLOG_RATE_LIMIT_INIT(5, 1);
584 VLOG_WARN_RL(&rl
, "duplicate logical port %s",
589 ovs_list_remove(&op
->list
);
590 ovs_list_push_back(both
, &op
->list
);
592 op
= ovn_port_create(ports
, nbs
->name
, nbs
, NULL
, NULL
);
593 ovs_list_push_back(nb_only
, &op
->list
);
599 for (size_t i
= 0; i
< od
->nbr
->n_ports
; i
++) {
600 const struct nbrec_logical_router_port
*nbr
604 if (!eth_addr_from_string(nbr
->mac
, &mac
)) {
605 static struct vlog_rate_limit rl
606 = VLOG_RATE_LIMIT_INIT(5, 1);
607 VLOG_WARN_RL(&rl
, "bad 'mac' %s", nbr
->mac
);
612 char *error
= ip_parse_masked(nbr
->network
, &ip
, &mask
);
613 if (error
|| mask
== OVS_BE32_MAX
|| !ip_is_cidr(mask
)) {
614 static struct vlog_rate_limit rl
615 = VLOG_RATE_LIMIT_INIT(5, 1);
616 VLOG_WARN_RL(&rl
, "bad 'network' %s", nbr
->network
);
621 struct ovn_port
*op
= ovn_port_find(ports
, nbr
->name
);
623 if (op
->nbs
|| op
->nbr
) {
624 static struct vlog_rate_limit rl
625 = VLOG_RATE_LIMIT_INIT(5, 1);
626 VLOG_WARN_RL(&rl
, "duplicate logical router port %s",
631 ovs_list_remove(&op
->list
);
632 ovs_list_push_back(both
, &op
->list
);
634 op
= ovn_port_create(ports
, nbr
->name
, NULL
, nbr
, NULL
);
635 ovs_list_push_back(nb_only
, &op
->list
);
640 op
->network
= ip
& mask
;
641 op
->bcast
= ip
| ~mask
;
646 /* If 'od' has a gateway and 'op' routes to it... */
647 if (od
->gateway
&& !((op
->network
^ od
->gateway
) & op
->mask
)) {
648 /* ...and if 'op' is a longer match than the current
650 const struct ovn_port
*gw
= od
->gateway_port
;
651 int len
= gw
? ip_count_cidr_bits(gw
->mask
) : 0;
652 if (ip_count_cidr_bits(op
->mask
) > len
) {
653 /* ...then it's the default gateway port. */
654 od
->gateway_port
= op
;
661 /* Connect logical router ports, and logical switch ports of type "router",
664 HMAP_FOR_EACH (op
, key_node
, ports
) {
665 if (op
->nbs
&& !strcmp(op
->nbs
->type
, "router")) {
666 const char *peer_name
= smap_get(&op
->nbs
->options
, "router-port");
671 struct ovn_port
*peer
= ovn_port_find(ports
, peer_name
);
672 if (!peer
|| !peer
->nbr
) {
678 op
->od
->router_ports
= xrealloc(
679 op
->od
->router_ports
,
680 sizeof *op
->od
->router_ports
* (op
->od
->n_router_ports
+ 1));
681 op
->od
->router_ports
[op
->od
->n_router_ports
++] = op
;
682 } else if (op
->nbr
&& op
->nbr
->peer
) {
683 op
->peer
= ovn_port_find(ports
, op
->nbr
->peer
);
689 ovn_port_update_sbrec(const struct ovn_port
*op
)
691 sbrec_port_binding_set_datapath(op
->sb
, op
->od
->sb
);
693 sbrec_port_binding_set_type(op
->sb
, "patch");
695 const char *peer
= op
->peer
? op
->peer
->key
: "<error>";
696 const struct smap ids
= SMAP_CONST1(&ids
, "peer", peer
);
697 sbrec_port_binding_set_options(op
->sb
, &ids
);
699 sbrec_port_binding_set_parent_port(op
->sb
, NULL
);
700 sbrec_port_binding_set_tag(op
->sb
, NULL
, 0);
701 sbrec_port_binding_set_mac(op
->sb
, NULL
, 0);
703 if (strcmp(op
->nbs
->type
, "router")) {
704 sbrec_port_binding_set_type(op
->sb
, op
->nbs
->type
);
705 sbrec_port_binding_set_options(op
->sb
, &op
->nbs
->options
);
707 sbrec_port_binding_set_type(op
->sb
, "patch");
709 const char *router_port
= smap_get(&op
->nbs
->options
,
712 router_port
= "<error>";
714 const struct smap ids
= SMAP_CONST1(&ids
, "peer", router_port
);
715 sbrec_port_binding_set_options(op
->sb
, &ids
);
717 sbrec_port_binding_set_parent_port(op
->sb
, op
->nbs
->parent_name
);
718 sbrec_port_binding_set_tag(op
->sb
, op
->nbs
->tag
, op
->nbs
->n_tag
);
719 sbrec_port_binding_set_mac(op
->sb
, (const char **) op
->nbs
->addresses
,
720 op
->nbs
->n_addresses
);
724 /* Updates the southbound Port_Binding table so that it contains the logical
725 * ports specified by the northbound database.
727 * Initializes 'ports' to contain a "struct ovn_port" for every logical port,
728 * using the "struct ovn_datapath"s in 'datapaths' to look up logical
731 build_ports(struct northd_context
*ctx
, struct hmap
*datapaths
,
734 struct ovs_list sb_only
, nb_only
, both
;
736 join_logical_ports(ctx
, datapaths
, ports
, &sb_only
, &nb_only
, &both
);
738 /* For logical ports that are in both databases, update the southbound
739 * record based on northbound data. Also index the in-use tunnel_keys. */
740 struct ovn_port
*op
, *next
;
741 LIST_FOR_EACH_SAFE (op
, next
, list
, &both
) {
742 ovn_port_update_sbrec(op
);
744 add_tnlid(&op
->od
->port_tnlids
, op
->sb
->tunnel_key
);
745 if (op
->sb
->tunnel_key
> op
->od
->port_key_hint
) {
746 op
->od
->port_key_hint
= op
->sb
->tunnel_key
;
750 /* Add southbound record for each unmatched northbound record. */
751 LIST_FOR_EACH_SAFE (op
, next
, list
, &nb_only
) {
752 uint16_t tunnel_key
= ovn_port_allocate_key(op
->od
);
757 op
->sb
= sbrec_port_binding_insert(ctx
->ovnsb_txn
);
758 ovn_port_update_sbrec(op
);
760 sbrec_port_binding_set_logical_port(op
->sb
, op
->key
);
761 sbrec_port_binding_set_tunnel_key(op
->sb
, tunnel_key
);
764 /* Delete southbound records without northbound matches. */
765 LIST_FOR_EACH_SAFE(op
, next
, list
, &sb_only
) {
766 ovs_list_remove(&op
->list
);
767 sbrec_port_binding_delete(op
->sb
);
768 ovn_port_destroy(ports
, op
);
772 #define OVN_MIN_MULTICAST 32768
773 #define OVN_MAX_MULTICAST 65535
775 struct multicast_group
{
777 uint16_t key
; /* OVN_MIN_MULTICAST...OVN_MAX_MULTICAST. */
780 #define MC_FLOOD "_MC_flood"
781 static const struct multicast_group mc_flood
= { MC_FLOOD
, 65535 };
783 #define MC_UNKNOWN "_MC_unknown"
784 static const struct multicast_group mc_unknown
= { MC_UNKNOWN
, 65534 };
787 multicast_group_equal(const struct multicast_group
*a
,
788 const struct multicast_group
*b
)
790 return !strcmp(a
->name
, b
->name
) && a
->key
== b
->key
;
793 /* Multicast group entry. */
794 struct ovn_multicast
{
795 struct hmap_node hmap_node
; /* Index on 'datapath' and 'key'. */
796 struct ovn_datapath
*datapath
;
797 const struct multicast_group
*group
;
799 struct ovn_port
**ports
;
800 size_t n_ports
, allocated_ports
;
804 ovn_multicast_hash(const struct ovn_datapath
*datapath
,
805 const struct multicast_group
*group
)
807 return hash_pointer(datapath
, group
->key
);
810 static struct ovn_multicast
*
811 ovn_multicast_find(struct hmap
*mcgroups
, struct ovn_datapath
*datapath
,
812 const struct multicast_group
*group
)
814 struct ovn_multicast
*mc
;
816 HMAP_FOR_EACH_WITH_HASH (mc
, hmap_node
,
817 ovn_multicast_hash(datapath
, group
), mcgroups
) {
818 if (mc
->datapath
== datapath
819 && multicast_group_equal(mc
->group
, group
)) {
827 ovn_multicast_add(struct hmap
*mcgroups
, const struct multicast_group
*group
,
828 struct ovn_port
*port
)
830 struct ovn_datapath
*od
= port
->od
;
831 struct ovn_multicast
*mc
= ovn_multicast_find(mcgroups
, od
, group
);
833 mc
= xmalloc(sizeof *mc
);
834 hmap_insert(mcgroups
, &mc
->hmap_node
, ovn_multicast_hash(od
, group
));
838 mc
->allocated_ports
= 4;
839 mc
->ports
= xmalloc(mc
->allocated_ports
* sizeof *mc
->ports
);
841 if (mc
->n_ports
>= mc
->allocated_ports
) {
842 mc
->ports
= x2nrealloc(mc
->ports
, &mc
->allocated_ports
,
845 mc
->ports
[mc
->n_ports
++] = port
;
849 ovn_multicast_destroy(struct hmap
*mcgroups
, struct ovn_multicast
*mc
)
852 hmap_remove(mcgroups
, &mc
->hmap_node
);
859 ovn_multicast_update_sbrec(const struct ovn_multicast
*mc
,
860 const struct sbrec_multicast_group
*sb
)
862 struct sbrec_port_binding
**ports
= xmalloc(mc
->n_ports
* sizeof *ports
);
863 for (size_t i
= 0; i
< mc
->n_ports
; i
++) {
864 ports
[i
] = CONST_CAST(struct sbrec_port_binding
*, mc
->ports
[i
]->sb
);
866 sbrec_multicast_group_set_ports(sb
, ports
, mc
->n_ports
);
870 /* Logical flow generation.
872 * This code generates the Logical_Flow table in the southbound database, as a
873 * function of most of the northbound database.
877 struct hmap_node hmap_node
;
879 struct ovn_datapath
*od
;
880 enum ovn_stage stage
;
887 ovn_lflow_hash(const struct ovn_lflow
*lflow
)
889 size_t hash
= uuid_hash(&lflow
->od
->key
);
890 hash
= hash_2words((lflow
->stage
<< 16) | lflow
->priority
, hash
);
891 hash
= hash_string(lflow
->match
, hash
);
892 return hash_string(lflow
->actions
, hash
);
896 ovn_lflow_equal(const struct ovn_lflow
*a
, const struct ovn_lflow
*b
)
898 return (a
->od
== b
->od
899 && a
->stage
== b
->stage
900 && a
->priority
== b
->priority
901 && !strcmp(a
->match
, b
->match
)
902 && !strcmp(a
->actions
, b
->actions
));
906 ovn_lflow_init(struct ovn_lflow
*lflow
, struct ovn_datapath
*od
,
907 enum ovn_stage stage
, uint16_t priority
,
908 char *match
, char *actions
)
911 lflow
->stage
= stage
;
912 lflow
->priority
= priority
;
913 lflow
->match
= match
;
914 lflow
->actions
= actions
;
917 /* Adds a row with the specified contents to the Logical_Flow table. */
919 ovn_lflow_add(struct hmap
*lflow_map
, struct ovn_datapath
*od
,
920 enum ovn_stage stage
, uint16_t priority
,
921 const char *match
, const char *actions
)
923 struct ovn_lflow
*lflow
= xmalloc(sizeof *lflow
);
924 ovn_lflow_init(lflow
, od
, stage
, priority
,
925 xstrdup(match
), xstrdup(actions
));
926 hmap_insert(lflow_map
, &lflow
->hmap_node
, ovn_lflow_hash(lflow
));
929 static struct ovn_lflow
*
930 ovn_lflow_find(struct hmap
*lflows
, struct ovn_datapath
*od
,
931 enum ovn_stage stage
, uint16_t priority
,
932 const char *match
, const char *actions
)
934 struct ovn_lflow target
;
935 ovn_lflow_init(&target
, od
, stage
, priority
,
936 CONST_CAST(char *, match
), CONST_CAST(char *, actions
));
938 struct ovn_lflow
*lflow
;
939 HMAP_FOR_EACH_WITH_HASH (lflow
, hmap_node
, ovn_lflow_hash(&target
),
941 if (ovn_lflow_equal(lflow
, &target
)) {
949 ovn_lflow_destroy(struct hmap
*lflows
, struct ovn_lflow
*lflow
)
952 hmap_remove(lflows
, &lflow
->hmap_node
);
954 free(lflow
->actions
);
959 /* Appends port security constraints on L2 address field 'eth_addr_field'
960 * (e.g. "eth.src" or "eth.dst") to 'match'. 'port_security', with
961 * 'n_port_security' elements, is the collection of port_security constraints
962 * from an OVN_NB Logical_Port row. */
964 build_port_security_l2(const char *eth_addr_field
,
965 char **port_security
, size_t n_port_security
,
968 size_t base_len
= match
->length
;
969 ds_put_format(match
, " && %s == {", eth_addr_field
);
972 for (size_t i
= 0; i
< n_port_security
; i
++) {
975 if (eth_addr_from_string(port_security
[i
], &ea
)) {
976 ds_put_format(match
, ETH_ADDR_FMT
, ETH_ADDR_ARGS(ea
));
977 ds_put_char(match
, ' ');
981 ds_chomp(match
, ' ');
982 ds_put_cstr(match
, "}");
985 match
->length
= base_len
;
990 build_port_security_ipv6_nd_flow(
991 struct ds
*match
, struct eth_addr ea
, struct ipv6_netaddr
*ipv6_addrs
,
994 ds_put_format(match
, " && ip6 && nd && ((nd.sll == "ETH_ADDR_FMT
" || "
995 "nd.sll == "ETH_ADDR_FMT
") || ((nd.tll == "ETH_ADDR_FMT
" || "
996 "nd.tll == "ETH_ADDR_FMT
")", ETH_ADDR_ARGS(eth_addr_zero
),
997 ETH_ADDR_ARGS(ea
), ETH_ADDR_ARGS(eth_addr_zero
),
1000 ds_put_cstr(match
, "))");
1004 char ip6_str
[INET6_ADDRSTRLEN
+ 1];
1005 struct in6_addr lla
;
1006 in6_generate_lla(ea
, &lla
);
1007 memset(ip6_str
, 0, sizeof(ip6_str
));
1008 ipv6_string_mapped(ip6_str
, &lla
);
1009 ds_put_format(match
, " && (nd.target == %s", ip6_str
);
1011 for(int i
= 0; i
< n_ipv6_addrs
; i
++) {
1012 memset(ip6_str
, 0, sizeof(ip6_str
));
1013 ipv6_string_mapped(ip6_str
, &ipv6_addrs
[i
].addr
);
1014 ds_put_format(match
, " || nd.target == %s", ip6_str
);
1017 ds_put_format(match
, ")))");
1021 build_port_security_ipv6_flow(
1022 enum ovn_pipeline pipeline
, struct ds
*match
, struct eth_addr ea
,
1023 struct ipv6_netaddr
*ipv6_addrs
, int n_ipv6_addrs
)
1025 char ip6_str
[INET6_ADDRSTRLEN
+ 1];
1027 ds_put_format(match
, " && %s == {",
1028 pipeline
== P_IN
? "ip6.src" : "ip6.dst");
1030 /* Allow link-local address. */
1031 struct in6_addr lla
;
1032 in6_generate_lla(ea
, &lla
);
1033 ipv6_string_mapped(ip6_str
, &lla
);
1034 ds_put_format(match
, "%s, ", ip6_str
);
1036 /* Allow ip6.dst=ff00::/8 for multicast packets */
1037 if (pipeline
== P_OUT
) {
1038 ds_put_cstr(match
, "ff00::/8, ");
1040 for(int i
= 0; i
< n_ipv6_addrs
; i
++) {
1041 ipv6_string_mapped(ip6_str
, &ipv6_addrs
[i
].addr
);
1042 ds_put_format(match
, "%s, ", ip6_str
);
1044 /* Replace ", " by "}". */
1045 ds_chomp(match
, ' ');
1046 ds_chomp(match
, ',');
1047 ds_put_cstr(match
, "}");
1051 * Build port security constraints on ARP and IPv6 ND fields
1052 * and add logical flows to S_SWITCH_IN_PORT_SEC_ND stage.
1054 * For each port security of the logical port, following
1055 * logical flows are added
1056 * - If the port security has no IP (both IPv4 and IPv6) or
1057 * if it has IPv4 address(es)
1058 * - Priority 90 flow to allow ARP packets for known MAC addresses
1059 * in the eth.src and arp.spa fields. If the port security
1060 * has IPv4 addresses, allow known IPv4 addresses in the arp.tpa field.
1062 * - If the port security has no IP (both IPv4 and IPv6) or
1063 * if it has IPv6 address(es)
1064 * - Priority 90 flow to allow IPv6 ND packets for known MAC addresses
1065 * in the eth.src and nd.sll/nd.tll fields. If the port security
1066 * has IPv6 addresses, allow known IPv6 addresses in the nd.target field
1067 * for IPv6 Neighbor Advertisement packet.
1069 * - Priority 80 flow to drop ARP and IPv6 ND packets.
1072 build_port_security_nd(struct ovn_port
*op
, struct hmap
*lflows
)
1074 for (size_t i
= 0; i
< op
->nbs
->n_port_security
; i
++) {
1075 struct lport_addresses ps
;
1076 if (!extract_lport_addresses(op
->nbs
->port_security
[i
], &ps
, true)) {
1077 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 1);
1078 VLOG_INFO_RL(&rl
, "invalid syntax '%s' in port security. No MAC"
1079 " address found", op
->nbs
->port_security
[i
]);
1083 bool no_ip
= !(ps
.n_ipv4_addrs
|| ps
.n_ipv6_addrs
);
1084 struct ds match
= DS_EMPTY_INITIALIZER
;
1086 if (ps
.n_ipv4_addrs
|| no_ip
) {
1088 &match
, "inport == %s && eth.src == "ETH_ADDR_FMT
" && arp.sha == "
1089 ETH_ADDR_FMT
, op
->json_key
, ETH_ADDR_ARGS(ps
.ea
),
1090 ETH_ADDR_ARGS(ps
.ea
));
1092 if (ps
.n_ipv4_addrs
) {
1093 ds_put_cstr(&match
, " && (");
1094 for (size_t i
= 0; i
< ps
.n_ipv4_addrs
; i
++) {
1095 ds_put_cstr(&match
, "arp.spa == ");
1096 ovs_be32 mask
= be32_prefix_mask(ps
.ipv4_addrs
[i
].plen
);
1097 /* When the netmask is applied, if the host portion is
1098 * non-zero, the host can only use the specified
1099 * address in the arp.spa. If zero, the host is allowed
1100 * to use any address in the subnet. */
1101 if (ps
.ipv4_addrs
[i
].addr
& ~mask
) {
1102 ds_put_format(&match
, IP_FMT
,
1103 IP_ARGS(ps
.ipv4_addrs
[i
].addr
));
1105 ip_format_masked(ps
.ipv4_addrs
[i
].addr
& mask
, mask
,
1108 ds_put_cstr(&match
, " || ");
1110 ds_chomp(&match
, ' ');
1111 ds_chomp(&match
, '|');
1112 ds_chomp(&match
, '|');
1113 ds_put_cstr(&match
, ")");
1115 ovn_lflow_add(lflows
, op
->od
, S_SWITCH_IN_PORT_SEC_ND
, 90,
1116 ds_cstr(&match
), "next;");
1120 if (ps
.n_ipv6_addrs
|| no_ip
) {
1122 ds_put_format(&match
, "inport == %s && eth.src == "ETH_ADDR_FMT
,
1123 op
->json_key
, ETH_ADDR_ARGS(ps
.ea
));
1124 build_port_security_ipv6_nd_flow(&match
, ps
.ea
, ps
.ipv6_addrs
,
1126 ovn_lflow_add(lflows
, op
->od
, S_SWITCH_IN_PORT_SEC_ND
, 90,
1127 ds_cstr(&match
), "next;");
1130 free(ps
.ipv4_addrs
);
1131 free(ps
.ipv6_addrs
);
1134 char *match
= xasprintf("inport == %s && (arp || nd)", op
->json_key
);
1135 ovn_lflow_add(lflows
, op
->od
, S_SWITCH_IN_PORT_SEC_ND
, 80,
1141 * Build port security constraints on IPv4 and IPv6 src and dst fields
1142 * and add logical flows to S_SWITCH_(IN/OUT)_PORT_SEC_IP stage.
1144 * For each port security of the logical port, following
1145 * logical flows are added
1146 * - If the port security has IPv4 addresses,
1147 * - Priority 90 flow to allow IPv4 packets for known IPv4 addresses
1149 * - If the port security has IPv6 addresses,
1150 * - Priority 90 flow to allow IPv6 packets for known IPv6 addresses
1152 * - If the port security has IPv4 addresses or IPv6 addresses or both
1153 * - Priority 80 flow to drop all IPv4 and IPv6 traffic
1156 build_port_security_ip(enum ovn_pipeline pipeline
, struct ovn_port
*op
,
1157 struct hmap
*lflows
)
1159 char *port_direction
;
1160 enum ovn_stage stage
;
1161 if (pipeline
== P_IN
) {
1162 port_direction
= "inport";
1163 stage
= S_SWITCH_IN_PORT_SEC_IP
;
1165 port_direction
= "outport";
1166 stage
= S_SWITCH_OUT_PORT_SEC_IP
;
1169 for (size_t i
= 0; i
< op
->nbs
->n_port_security
; i
++) {
1170 struct lport_addresses ps
;
1171 if (!extract_lport_addresses(op
->nbs
->port_security
[i
], &ps
, true)) {
1175 if (!(ps
.n_ipv4_addrs
|| ps
.n_ipv6_addrs
)) {
1179 if (ps
.n_ipv4_addrs
) {
1180 struct ds match
= DS_EMPTY_INITIALIZER
;
1181 if (pipeline
== P_IN
) {
1182 /* Permit use of the unspecified address for DHCP discovery */
1183 struct ds dhcp_match
= DS_EMPTY_INITIALIZER
;
1184 ds_put_format(&dhcp_match
, "inport == %s"
1185 " && eth.src == "ETH_ADDR_FMT
1186 " && ip4.src == 0.0.0.0"
1187 " && ip4.dst == 255.255.255.255"
1188 " && udp.src == 68 && udp.dst == 67", op
->json_key
,
1189 ETH_ADDR_ARGS(ps
.ea
));
1190 ovn_lflow_add(lflows
, op
->od
, stage
, 90,
1191 ds_cstr(&dhcp_match
), "next;");
1192 ds_destroy(&dhcp_match
);
1193 ds_put_format(&match
, "inport == %s && eth.src == "ETH_ADDR_FMT
1194 " && ip4.src == {", op
->json_key
,
1195 ETH_ADDR_ARGS(ps
.ea
));
1197 ds_put_format(&match
, "outport == %s && eth.dst == "ETH_ADDR_FMT
1198 " && ip4.dst == {255.255.255.255, 224.0.0.0/4, ",
1199 op
->json_key
, ETH_ADDR_ARGS(ps
.ea
));
1202 for (int i
= 0; i
< ps
.n_ipv4_addrs
; i
++) {
1203 ovs_be32 mask
= be32_prefix_mask(ps
.ipv4_addrs
[i
].plen
);
1204 /* When the netmask is applied, if the host portion is
1205 * non-zero, the host can only use the specified
1206 * address. If zero, the host is allowed to use any
1207 * address in the subnet.
1209 if (ps
.ipv4_addrs
[i
].addr
& ~mask
) {
1210 ds_put_format(&match
, IP_FMT
,
1211 IP_ARGS(ps
.ipv4_addrs
[i
].addr
));
1212 if (pipeline
== P_OUT
&& ps
.ipv4_addrs
[i
].plen
!= 32) {
1213 /* Host is also allowed to receive packets to the
1214 * broadcast address in the specified subnet.
1216 ds_put_format(&match
, ", "IP_FMT
,
1217 IP_ARGS(ps
.ipv4_addrs
[i
].addr
| ~mask
));
1220 /* host portion is zero */
1221 ip_format_masked(ps
.ipv4_addrs
[i
].addr
& mask
, mask
,
1224 ds_put_cstr(&match
, ", ");
1227 /* Replace ", " by "}". */
1228 ds_chomp(&match
, ' ');
1229 ds_chomp(&match
, ',');
1230 ds_put_cstr(&match
, "}");
1231 ovn_lflow_add(lflows
, op
->od
, stage
, 90, ds_cstr(&match
), "next;");
1233 free(ps
.ipv4_addrs
);
1236 if (ps
.n_ipv6_addrs
) {
1237 struct ds match
= DS_EMPTY_INITIALIZER
;
1238 if (pipeline
== P_IN
) {
1239 /* Permit use of unspecified address for duplicate address
1241 struct ds dad_match
= DS_EMPTY_INITIALIZER
;
1242 ds_put_format(&dad_match
, "inport == %s"
1243 " && eth.src == "ETH_ADDR_FMT
1245 " && ip6.dst == ff02::/16"
1246 " && icmp6.type == {131, 135, 143}", op
->json_key
,
1247 ETH_ADDR_ARGS(ps
.ea
));
1248 ovn_lflow_add(lflows
, op
->od
, stage
, 90,
1249 ds_cstr(&dad_match
), "next;");
1250 ds_destroy(&dad_match
);
1252 ds_put_format(&match
, "%s == %s && %s == "ETH_ADDR_FMT
"",
1253 port_direction
, op
->json_key
,
1254 pipeline
== P_IN
? "eth.src" : "eth.dst",
1255 ETH_ADDR_ARGS(ps
.ea
));
1256 build_port_security_ipv6_flow(pipeline
, &match
, ps
.ea
,
1257 ps
.ipv6_addrs
, ps
.n_ipv6_addrs
);
1258 ovn_lflow_add(lflows
, op
->od
, stage
, 90,
1259 ds_cstr(&match
), "next;");
1261 free(ps
.ipv6_addrs
);
1264 char *match
= xasprintf(
1265 "%s == %s && %s == "ETH_ADDR_FMT
" && ip", port_direction
,
1266 op
->json_key
, pipeline
== P_IN
? "eth.src" : "eth.dst",
1267 ETH_ADDR_ARGS(ps
.ea
));
1268 ovn_lflow_add(lflows
, op
->od
, stage
, 80, match
, "drop;");
1274 lport_is_enabled(const struct nbrec_logical_port
*lport
)
1276 return !lport
->enabled
|| *lport
->enabled
;
1280 lport_is_up(const struct nbrec_logical_port
*lport
)
1282 return !lport
->up
|| *lport
->up
;
1286 has_stateful_acl(struct ovn_datapath
*od
)
1288 for (size_t i
= 0; i
< od
->nbs
->n_acls
; i
++) {
1289 struct nbrec_acl
*acl
= od
->nbs
->acls
[i
];
1290 if (!strcmp(acl
->action
, "allow-related")) {
1299 build_acls(struct ovn_datapath
*od
, struct hmap
*lflows
, struct hmap
*ports
)
1301 bool has_stateful
= has_stateful_acl(od
);
1302 struct ovn_port
*op
;
1304 /* Ingress and Egress Pre-ACL Table (Priority 0): Packets are
1305 * allowed by default. */
1306 ovn_lflow_add(lflows
, od
, S_SWITCH_IN_PRE_ACL
, 0, "1", "next;");
1307 ovn_lflow_add(lflows
, od
, S_SWITCH_OUT_PRE_ACL
, 0, "1", "next;");
1309 /* Ingress and Egress ACL Table (Priority 0): Packets are allowed by
1310 * default. A related rule at priority 1 is added below if there
1311 * are any stateful ACLs in this datapath. */
1312 ovn_lflow_add(lflows
, od
, S_SWITCH_IN_ACL
, 0, "1", "next;");
1313 ovn_lflow_add(lflows
, od
, S_SWITCH_OUT_ACL
, 0, "1", "next;");
1315 /* If there are any stateful ACL rules in this dapapath, we must
1316 * send all IP packets through the conntrack action, which handles
1317 * defragmentation, in order to match L4 headers. */
1319 HMAP_FOR_EACH (op
, key_node
, ports
) {
1320 if (op
->od
== od
&& !strcmp(op
->nbs
->type
, "router")) {
1321 /* Can't use ct() for router ports. Consider the
1322 * following configuration: lp1(10.0.0.2) on
1323 * hostA--ls1--lr0--ls2--lp2(10.0.1.2) on hostB, For a
1324 * ping from lp1 to lp2, First, the response will go
1325 * through ct() with a zone for lp2 in the ls2 ingress
1326 * pipeline on hostB. That ct zone knows about this
1327 * connection. Next, it goes through ct() with the zone
1328 * for the router port in the egress pipeline of ls2 on
1329 * hostB. This zone does not know about the connection,
1330 * as the icmp request went through the logical router
1331 * on hostA, not hostB. This would only work with
1332 * distributed conntrack state across all chassis. */
1333 struct ds match_in
= DS_EMPTY_INITIALIZER
;
1334 struct ds match_out
= DS_EMPTY_INITIALIZER
;
1336 ds_put_format(&match_in
, "ip && inport == %s", op
->json_key
);
1337 ds_put_format(&match_out
, "ip && outport == %s", op
->json_key
);
1338 ovn_lflow_add(lflows
, od
, S_SWITCH_IN_PRE_ACL
, 110,
1339 ds_cstr(&match_in
), "next;");
1340 ovn_lflow_add(lflows
, od
, S_SWITCH_OUT_PRE_ACL
, 110,
1341 ds_cstr(&match_out
), "next;");
1343 ds_destroy(&match_in
);
1344 ds_destroy(&match_out
);
1348 /* Ingress and Egress Pre-ACL Table (Priority 100).
1350 * Regardless of whether the ACL is "from-lport" or "to-lport",
1351 * we need rules in both the ingress and egress table, because
1352 * the return traffic needs to be followed. */
1353 ovn_lflow_add(lflows
, od
, S_SWITCH_IN_PRE_ACL
, 100, "ip", "ct_next;");
1354 ovn_lflow_add(lflows
, od
, S_SWITCH_OUT_PRE_ACL
, 100, "ip", "ct_next;");
1356 /* Ingress and Egress ACL Table (Priority 1).
1358 * By default, traffic is allowed. This is partially handled by
1359 * the Priority 0 ACL flows added earlier, but we also need to
1360 * commit IP flows. This is because, while the initiater's
1361 * direction may not have any stateful rules, the server's may
1362 * and then its return traffic would not have an associated
1363 * conntrack entry and would return "+invalid". */
1364 ovn_lflow_add(lflows
, od
, S_SWITCH_IN_ACL
, 1, "ip",
1365 "ct_commit; next;");
1366 ovn_lflow_add(lflows
, od
, S_SWITCH_OUT_ACL
, 1, "ip",
1367 "ct_commit; next;");
1369 /* Ingress and Egress ACL Table (Priority 65535).
1371 * Always drop traffic that's in an invalid state. This is
1372 * enforced at a higher priority than ACLs can be defined. */
1373 ovn_lflow_add(lflows
, od
, S_SWITCH_IN_ACL
, UINT16_MAX
,
1375 ovn_lflow_add(lflows
, od
, S_SWITCH_OUT_ACL
, UINT16_MAX
,
1378 /* Ingress and Egress ACL Table (Priority 65535).
1380 * Always allow traffic that is established to a committed
1381 * conntrack entry. This is enforced at a higher priority than
1382 * ACLs can be defined. */
1383 ovn_lflow_add(lflows
, od
, S_SWITCH_IN_ACL
, UINT16_MAX
,
1384 "ct.est && !ct.rel && !ct.new && !ct.inv",
1386 ovn_lflow_add(lflows
, od
, S_SWITCH_OUT_ACL
, UINT16_MAX
,
1387 "ct.est && !ct.rel && !ct.new && !ct.inv",
1390 /* Ingress and Egress ACL Table (Priority 65535).
1392 * Always allow traffic that is related to an existing conntrack
1393 * entry. This is enforced at a higher priority than ACLs can
1396 * NOTE: This does not support related data sessions (eg,
1397 * a dynamically negotiated FTP data channel), but will allow
1398 * related traffic such as an ICMP Port Unreachable through
1399 * that's generated from a non-listening UDP port. */
1400 ovn_lflow_add(lflows
, od
, S_SWITCH_IN_ACL
, UINT16_MAX
,
1401 "!ct.est && ct.rel && !ct.new && !ct.inv",
1403 ovn_lflow_add(lflows
, od
, S_SWITCH_OUT_ACL
, UINT16_MAX
,
1404 "!ct.est && ct.rel && !ct.new && !ct.inv",
1408 /* Ingress or Egress ACL Table (Various priorities). */
1409 for (size_t i
= 0; i
< od
->nbs
->n_acls
; i
++) {
1410 struct nbrec_acl
*acl
= od
->nbs
->acls
[i
];
1411 bool ingress
= !strcmp(acl
->direction
, "from-lport") ? true :false;
1412 enum ovn_stage stage
= ingress
? S_SWITCH_IN_ACL
: S_SWITCH_OUT_ACL
;
1414 if (!strcmp(acl
->action
, "allow")) {
1415 /* If there are any stateful flows, we must even commit "allow"
1416 * actions. This is because, while the initiater's
1417 * direction may not have any stateful rules, the server's
1418 * may and then its return traffic would not have an
1419 * associated conntrack entry and would return "+invalid". */
1420 const char *actions
= has_stateful
? "ct_commit; next;" : "next;";
1421 ovn_lflow_add(lflows
, od
, stage
,
1422 acl
->priority
+ OVN_ACL_PRI_OFFSET
,
1423 acl
->match
, actions
);
1424 } else if (!strcmp(acl
->action
, "allow-related")) {
1425 struct ds match
= DS_EMPTY_INITIALIZER
;
1427 /* Commit the connection tracking entry, which allows all
1428 * other traffic related to this entry to flow due to the
1429 * 65535 priority flow defined earlier. */
1430 ds_put_format(&match
, "ct.new && (%s)", acl
->match
);
1431 ovn_lflow_add(lflows
, od
, stage
,
1432 acl
->priority
+ OVN_ACL_PRI_OFFSET
,
1433 ds_cstr(&match
), "ct_commit; next;");
1436 } else if (!strcmp(acl
->action
, "drop")) {
1437 ovn_lflow_add(lflows
, od
, stage
,
1438 acl
->priority
+ OVN_ACL_PRI_OFFSET
,
1439 acl
->match
, "drop;");
1440 } else if (!strcmp(acl
->action
, "reject")) {
1441 /* xxx Need to support "reject". */
1442 VLOG_INFO("reject is not a supported action");
1443 ovn_lflow_add(lflows
, od
, stage
,
1444 acl
->priority
+ OVN_ACL_PRI_OFFSET
,
1445 acl
->match
, "drop;");
1451 build_lswitch_flows(struct hmap
*datapaths
, struct hmap
*ports
,
1452 struct hmap
*lflows
, struct hmap
*mcgroups
)
1454 /* This flow table structure is documented in ovn-northd(8), so please
1455 * update ovn-northd.8.xml if you change anything. */
1457 /* Build pre-ACL and ACL tables for both ingress and egress.
1458 * Ingress tables 3 and 4. Egress tables 0 and 1. */
1459 struct ovn_datapath
*od
;
1460 HMAP_FOR_EACH (od
, key_node
, datapaths
) {
1465 build_acls(od
, lflows
, ports
);
1468 /* Logical switch ingress table 0: Admission control framework (priority
1470 HMAP_FOR_EACH (od
, key_node
, datapaths
) {
1475 /* Logical VLANs not supported. */
1476 ovn_lflow_add(lflows
, od
, S_SWITCH_IN_PORT_SEC_L2
, 100, "vlan.present",
1479 /* Broadcast/multicast source address is invalid. */
1480 ovn_lflow_add(lflows
, od
, S_SWITCH_IN_PORT_SEC_L2
, 100, "eth.src[40]",
1483 /* Port security flows have priority 50 (see below) and will continue
1484 * to the next table if packet source is acceptable. */
1487 /* Logical switch ingress table 0: Ingress port security - L2
1489 * Ingress table 1: Ingress port security - IP (priority 90 and 80)
1490 * Ingress table 2: Ingress port security - ND (priority 90 and 80)
1492 struct ovn_port
*op
;
1493 HMAP_FOR_EACH (op
, key_node
, ports
) {
1498 if (!lport_is_enabled(op
->nbs
)) {
1499 /* Drop packets from disabled logical ports (since logical flow
1500 * tables are default-drop). */
1504 struct ds match
= DS_EMPTY_INITIALIZER
;
1505 ds_put_format(&match
, "inport == %s", op
->json_key
);
1506 build_port_security_l2(
1507 "eth.src", op
->nbs
->port_security
, op
->nbs
->n_port_security
,
1509 ovn_lflow_add(lflows
, op
->od
, S_SWITCH_IN_PORT_SEC_L2
, 50,
1510 ds_cstr(&match
), "next;");
1513 if (op
->nbs
->n_port_security
) {
1514 build_port_security_ip(P_IN
, op
, lflows
);
1515 build_port_security_nd(op
, lflows
);
1519 /* Ingress table 1 and 2: Port security - IP and ND, by default goto next.
1521 HMAP_FOR_EACH (od
, key_node
, datapaths
) {
1526 ovn_lflow_add(lflows
, od
, S_SWITCH_IN_PORT_SEC_ND
, 0, "1", "next;");
1527 ovn_lflow_add(lflows
, od
, S_SWITCH_IN_PORT_SEC_IP
, 0, "1", "next;");
1530 /* Ingress table 3: ARP responder, skip requests coming from localnet ports.
1531 * (priority 100). */
1532 HMAP_FOR_EACH (op
, key_node
, ports
) {
1537 if (!strcmp(op
->nbs
->type
, "localnet")) {
1538 char *match
= xasprintf("inport == %s", op
->json_key
);
1539 ovn_lflow_add(lflows
, op
->od
, S_SWITCH_IN_ARP_RSP
, 100,
1545 /* Ingress table 5: ARP responder, reply for known IPs.
1547 HMAP_FOR_EACH (op
, key_node
, ports
) {
1553 * Add ARP reply flows if either the
1555 * - port type is router
1557 if (!lport_is_up(op
->nbs
) && strcmp(op
->nbs
->type
, "router")) {
1561 for (size_t i
= 0; i
< op
->nbs
->n_addresses
; i
++) {
1562 struct lport_addresses laddrs
;
1563 if (!extract_lport_addresses(op
->nbs
->addresses
[i
], &laddrs
,
1567 for (size_t j
= 0; j
< laddrs
.n_ipv4_addrs
; j
++) {
1568 char *match
= xasprintf(
1569 "arp.tpa == "IP_FMT
" && arp.op == 1",
1570 IP_ARGS(laddrs
.ipv4_addrs
[j
].addr
));
1571 char *actions
= xasprintf(
1572 "eth.dst = eth.src; "
1573 "eth.src = "ETH_ADDR_FMT
"; "
1574 "arp.op = 2; /* ARP reply */ "
1575 "arp.tha = arp.sha; "
1576 "arp.sha = "ETH_ADDR_FMT
"; "
1577 "arp.tpa = arp.spa; "
1578 "arp.spa = "IP_FMT
"; "
1579 "outport = inport; "
1580 "inport = \"\"; /* Allow sending out inport. */ "
1582 ETH_ADDR_ARGS(laddrs
.ea
),
1583 ETH_ADDR_ARGS(laddrs
.ea
),
1584 IP_ARGS(laddrs
.ipv4_addrs
[j
].addr
));
1585 ovn_lflow_add(lflows
, op
->od
, S_SWITCH_IN_ARP_RSP
, 50,
1591 free(laddrs
.ipv4_addrs
);
1595 /* Ingress table 5: ARP responder, by default goto next.
1597 HMAP_FOR_EACH (od
, key_node
, datapaths
) {
1602 ovn_lflow_add(lflows
, od
, S_SWITCH_IN_ARP_RSP
, 0, "1", "next;");
1605 /* Ingress table 6: Destination lookup, broadcast and multicast handling
1606 * (priority 100). */
1607 HMAP_FOR_EACH (op
, key_node
, ports
) {
1612 if (lport_is_enabled(op
->nbs
)) {
1613 ovn_multicast_add(mcgroups
, &mc_flood
, op
);
1616 HMAP_FOR_EACH (od
, key_node
, datapaths
) {
1621 ovn_lflow_add(lflows
, od
, S_SWITCH_IN_L2_LKUP
, 100, "eth.mcast",
1622 "outport = \""MC_FLOOD
"\"; output;");
1625 /* Ingress table 6: Destination lookup, unicast handling (priority 50), */
1626 HMAP_FOR_EACH (op
, key_node
, ports
) {
1631 for (size_t i
= 0; i
< op
->nbs
->n_addresses
; i
++) {
1632 struct eth_addr mac
;
1634 if (eth_addr_from_string(op
->nbs
->addresses
[i
], &mac
)) {
1635 struct ds match
, actions
;
1638 ds_put_format(&match
, "eth.dst == "ETH_ADDR_FMT
,
1639 ETH_ADDR_ARGS(mac
));
1642 ds_put_format(&actions
, "outport = %s; output;", op
->json_key
);
1643 ovn_lflow_add(lflows
, op
->od
, S_SWITCH_IN_L2_LKUP
, 50,
1644 ds_cstr(&match
), ds_cstr(&actions
));
1645 ds_destroy(&actions
);
1647 } else if (!strcmp(op
->nbs
->addresses
[i
], "unknown")) {
1648 if (lport_is_enabled(op
->nbs
)) {
1649 ovn_multicast_add(mcgroups
, &mc_unknown
, op
);
1650 op
->od
->has_unknown
= true;
1653 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 1);
1656 "%s: invalid syntax '%s' in addresses column",
1657 op
->nbs
->name
, op
->nbs
->addresses
[i
]);
1662 /* Ingress table 6: Destination lookup for unknown MACs (priority 0). */
1663 HMAP_FOR_EACH (od
, key_node
, datapaths
) {
1668 if (od
->has_unknown
) {
1669 ovn_lflow_add(lflows
, od
, S_SWITCH_IN_L2_LKUP
, 0, "1",
1670 "outport = \""MC_UNKNOWN
"\"; output;");
1674 /* Egress table 2: Egress port security - IP (priority 0)
1675 * port security L2 - multicast/broadcast (priority
1677 HMAP_FOR_EACH (od
, key_node
, datapaths
) {
1682 ovn_lflow_add(lflows
, od
, S_SWITCH_OUT_PORT_SEC_IP
, 0, "1", "next;");
1683 ovn_lflow_add(lflows
, od
, S_SWITCH_OUT_PORT_SEC_L2
, 100, "eth.mcast",
1687 /* Egress table 2: Egress port security - IP (priorities 90 and 80)
1688 * if port security enabled.
1690 * Egress table 3: Egress port security - L2 (priorities 50 and 150).
1692 * Priority 50 rules implement port security for enabled logical port.
1694 * Priority 150 rules drop packets to disabled logical ports, so that they
1695 * don't even receive multicast or broadcast packets. */
1696 HMAP_FOR_EACH (op
, key_node
, ports
) {
1701 struct ds match
= DS_EMPTY_INITIALIZER
;
1702 ds_put_format(&match
, "outport == %s", op
->json_key
);
1703 if (lport_is_enabled(op
->nbs
)) {
1704 build_port_security_l2("eth.dst", op
->nbs
->port_security
,
1705 op
->nbs
->n_port_security
, &match
);
1706 ovn_lflow_add(lflows
, op
->od
, S_SWITCH_OUT_PORT_SEC_L2
, 50,
1707 ds_cstr(&match
), "output;");
1709 ovn_lflow_add(lflows
, op
->od
, S_SWITCH_OUT_PORT_SEC_L2
, 150,
1710 ds_cstr(&match
), "drop;");
1715 if (op
->nbs
->n_port_security
) {
1716 build_port_security_ip(P_OUT
, op
, lflows
);
1722 lrport_is_enabled(const struct nbrec_logical_router_port
*lrport
)
1724 return !lrport
->enabled
|| *lrport
->enabled
;
1728 add_route(struct hmap
*lflows
, const struct ovn_port
*op
,
1729 ovs_be32 network
, ovs_be32 mask
, ovs_be32 gateway
)
1731 char *match
= xasprintf("ip4.dst == "IP_FMT
"/"IP_FMT
,
1732 IP_ARGS(network
), IP_ARGS(mask
));
1734 struct ds actions
= DS_EMPTY_INITIALIZER
;
1735 ds_put_cstr(&actions
, "ip.ttl--; reg0 = ");
1737 ds_put_format(&actions
, IP_FMT
, IP_ARGS(gateway
));
1739 ds_put_cstr(&actions
, "ip4.dst");
1741 ds_put_format(&actions
,
1744 "eth.src = "ETH_ADDR_FMT
"; "
1747 IP_ARGS(op
->ip
), ETH_ADDR_ARGS(op
->mac
), op
->json_key
);
1749 /* The priority here is calculated to implement longest-prefix-match
1751 ovn_lflow_add(lflows
, op
->od
, S_ROUTER_IN_IP_ROUTING
,
1752 count_1bits(ntohl(mask
)), match
, ds_cstr(&actions
));
1753 ds_destroy(&actions
);
1758 build_static_route_flow(struct hmap
*lflows
, struct ovn_datapath
*od
,
1760 const struct nbrec_logical_router_static_route
*route
)
1762 ovs_be32 prefix
, next_hop
, mask
;
1764 /* Verify that next hop is an IP address with 32 bits mask. */
1765 char *error
= ip_parse_masked(route
->nexthop
, &next_hop
, &mask
);
1766 if (error
|| mask
!= OVS_BE32_MAX
) {
1767 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 1);
1768 VLOG_WARN_RL(&rl
, "bad next hop ip address %s", route
->nexthop
);
1773 /* Verify that ip prefix is a valid CIDR address. */
1774 error
= ip_parse_masked(route
->ip_prefix
, &prefix
, &mask
);
1775 if (error
|| !ip_is_cidr(mask
)) {
1776 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 1);
1777 VLOG_WARN_RL(&rl
, "bad 'network' in static routes %s",
1783 /* Find the outgoing port. */
1784 struct ovn_port
*out_port
= NULL
;
1785 if (route
->output_port
) {
1786 out_port
= ovn_port_find(ports
, route
->output_port
);
1788 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 1);
1789 VLOG_WARN_RL(&rl
, "Bad out port %s for static route %s",
1790 route
->output_port
, route
->ip_prefix
);
1794 /* output_port is not specified, find the
1795 * router port matching the next hop. */
1797 for (i
= 0; i
< od
->nbr
->n_ports
; i
++) {
1798 struct nbrec_logical_router_port
*lrp
= od
->nbr
->ports
[i
];
1799 out_port
= ovn_port_find(ports
, lrp
->name
);
1801 /* This should not happen. */
1805 if (out_port
->network
1806 && !((out_port
->network
^ next_hop
) & out_port
->mask
)) {
1807 /* There should be only 1 interface that matches the next hop.
1808 * Otherwise, it's a configuration error, because subnets of
1809 * router's interfaces should NOT overlap. */
1813 if (i
== od
->nbr
->n_ports
) {
1814 /* There is no matched out port. */
1815 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 1);
1816 VLOG_WARN_RL(&rl
, "No path for static route %s; next hop %s",
1817 route
->ip_prefix
, route
->nexthop
);
1822 add_route(lflows
, out_port
, prefix
, mask
, next_hop
);
1826 build_lrouter_flows(struct hmap
*datapaths
, struct hmap
*ports
,
1827 struct hmap
*lflows
)
1829 /* This flow table structure is documented in ovn-northd(8), so please
1830 * update ovn-northd.8.xml if you change anything. */
1832 /* Logical router ingress table 0: Admission control framework. */
1833 struct ovn_datapath
*od
;
1834 HMAP_FOR_EACH (od
, key_node
, datapaths
) {
1839 /* Logical VLANs not supported.
1840 * Broadcast/multicast source address is invalid. */
1841 ovn_lflow_add(lflows
, od
, S_ROUTER_IN_ADMISSION
, 100,
1842 "vlan.present || eth.src[40]", "drop;");
1845 /* Logical router ingress table 0: match (priority 50). */
1846 struct ovn_port
*op
;
1847 HMAP_FOR_EACH (op
, key_node
, ports
) {
1852 if (!lrport_is_enabled(op
->nbr
)) {
1853 /* Drop packets from disabled logical ports (since logical flow
1854 * tables are default-drop). */
1858 char *match
= xasprintf(
1859 "(eth.mcast || eth.dst == "ETH_ADDR_FMT
") && inport == %s",
1860 ETH_ADDR_ARGS(op
->mac
), op
->json_key
);
1861 ovn_lflow_add(lflows
, op
->od
, S_ROUTER_IN_ADMISSION
, 50,
1866 /* Logical router ingress table 1: IP Input. */
1867 HMAP_FOR_EACH (od
, key_node
, datapaths
) {
1872 /* L3 admission control: drop multicast and broadcast source, localhost
1873 * source or destination, and zero network source or destination
1874 * (priority 100). */
1875 ovn_lflow_add(lflows
, od
, S_ROUTER_IN_IP_INPUT
, 100,
1877 "ip4.src == 255.255.255.255 || "
1878 "ip4.src == 127.0.0.0/8 || "
1879 "ip4.dst == 127.0.0.0/8 || "
1880 "ip4.src == 0.0.0.0/8 || "
1881 "ip4.dst == 0.0.0.0/8",
1884 /* ARP reply handling. Use ARP replies to populate the logical
1885 * router's ARP table. */
1886 ovn_lflow_add(lflows
, od
, S_ROUTER_IN_IP_INPUT
, 90, "arp.op == 2",
1887 "put_arp(inport, arp.spa, arp.sha);");
1889 /* Drop Ethernet local broadcast. By definition this traffic should
1890 * not be forwarded.*/
1891 ovn_lflow_add(lflows
, od
, S_ROUTER_IN_IP_INPUT
, 50,
1892 "eth.bcast", "drop;");
1894 /* Drop IP multicast. */
1895 ovn_lflow_add(lflows
, od
, S_ROUTER_IN_IP_INPUT
, 50,
1896 "ip4.mcast", "drop;");
1900 * XXX Need to send ICMP time exceeded if !ip.later_frag. */
1901 char *match
= xasprintf("ip4 && ip.ttl == {0, 1}");
1902 ovn_lflow_add(lflows
, od
, S_ROUTER_IN_IP_INPUT
, 30, match
, "drop;");
1905 /* Pass other traffic not already handled to the next table for
1907 ovn_lflow_add(lflows
, od
, S_ROUTER_IN_IP_INPUT
, 0, "1", "next;");
1910 HMAP_FOR_EACH (op
, key_node
, ports
) {
1915 /* L3 admission control: drop packets that originate from an IP address
1916 * owned by the router or a broadcast address known to the router
1917 * (priority 100). */
1918 char *match
= xasprintf("ip4.src == {"IP_FMT
", "IP_FMT
"}",
1919 IP_ARGS(op
->ip
), IP_ARGS(op
->bcast
));
1920 ovn_lflow_add(lflows
, op
->od
, S_ROUTER_IN_IP_INPUT
, 100,
1924 /* ICMP echo reply. These flows reply to ICMP echo requests
1925 * received for the router's IP address. */
1927 "inport == %s && (ip4.dst == "IP_FMT
" || ip4.dst == "IP_FMT
") && "
1928 "icmp4.type == 8 && icmp4.code == 0",
1929 op
->json_key
, IP_ARGS(op
->ip
), IP_ARGS(op
->bcast
));
1930 char *actions
= xasprintf(
1931 "ip4.dst = ip4.src; "
1932 "ip4.src = "IP_FMT
"; "
1935 "inport = \"\"; /* Allow sending out inport. */ "
1938 ovn_lflow_add(lflows
, op
->od
, S_ROUTER_IN_IP_INPUT
, 90,
1943 /* ARP reply. These flows reply to ARP requests for the router's own
1946 "inport == %s && arp.tpa == "IP_FMT
" && arp.op == 1",
1947 op
->json_key
, IP_ARGS(op
->ip
));
1948 actions
= xasprintf(
1949 "eth.dst = eth.src; "
1950 "eth.src = "ETH_ADDR_FMT
"; "
1951 "arp.op = 2; /* ARP reply */ "
1952 "arp.tha = arp.sha; "
1953 "arp.sha = "ETH_ADDR_FMT
"; "
1954 "arp.tpa = arp.spa; "
1955 "arp.spa = "IP_FMT
"; "
1957 "inport = \"\"; /* Allow sending out inport. */ "
1959 ETH_ADDR_ARGS(op
->mac
),
1960 ETH_ADDR_ARGS(op
->mac
),
1963 ovn_lflow_add(lflows
, op
->od
, S_ROUTER_IN_IP_INPUT
, 90,
1968 /* Drop IP traffic to this router. */
1969 match
= xasprintf("ip4.dst == "IP_FMT
, IP_ARGS(op
->ip
));
1970 ovn_lflow_add(lflows
, op
->od
, S_ROUTER_IN_IP_INPUT
, 60,
1975 /* Logical router ingress table 2: IP Routing.
1977 * A packet that arrives at this table is an IP packet that should be
1978 * routed to the address in ip4.dst. This table sets outport to the correct
1979 * output port, eth.src to the output port's MAC address, and reg0 to the
1980 * next-hop IP address (leaving ip4.dst, the packet’s final destination,
1981 * unchanged), and advances to the next table for ARP resolution. */
1982 HMAP_FOR_EACH (op
, key_node
, ports
) {
1987 add_route(lflows
, op
, op
->network
, op
->mask
, 0);
1989 HMAP_FOR_EACH (od
, key_node
, datapaths
) {
1994 /* Convert the static routes to flows. */
1995 for (int i
= 0; i
< od
->nbr
->n_static_routes
; i
++) {
1996 const struct nbrec_logical_router_static_route
*route
;
1998 route
= od
->nbr
->static_routes
[i
];
1999 build_static_route_flow(lflows
, od
, ports
, route
);
2002 if (od
->gateway
&& od
->gateway_port
) {
2003 add_route(lflows
, od
->gateway_port
, 0, 0, od
->gateway
);
2006 /* XXX destination unreachable */
2008 /* Local router ingress table 3: ARP Resolution.
2010 * Any packet that reaches this table is an IP packet whose next-hop IP
2011 * address is in reg0. (ip4.dst is the final destination.) This table
2012 * resolves the IP address in reg0 into an output port in outport and an
2013 * Ethernet address in eth.dst. */
2014 HMAP_FOR_EACH (op
, key_node
, ports
) {
2016 /* This is a logical router port. If next-hop IP address in 'reg0'
2017 * matches ip address of this router port, then the packet is
2018 * intended to eventually be sent to this logical port. Set the
2019 * destination mac address using this port's mac address.
2021 * The packet is still in peer's logical pipeline. So the match
2022 * should be on peer's outport. */
2023 if (op
->nbr
->peer
) {
2024 struct ovn_port
*peer
= ovn_port_find(ports
, op
->nbr
->peer
);
2029 if (!peer
->ip
|| !op
->ip
) {
2032 char *match
= xasprintf("outport == %s && reg0 == "IP_FMT
,
2033 peer
->json_key
, IP_ARGS(op
->ip
));
2034 char *actions
= xasprintf("eth.dst = "ETH_ADDR_FMT
"; "
2035 "next;", ETH_ADDR_ARGS(op
->mac
));
2036 ovn_lflow_add(lflows
, peer
->od
, S_ROUTER_IN_ARP_RESOLVE
,
2037 100, match
, actions
);
2041 } else if (op
->od
->n_router_ports
&& strcmp(op
->nbs
->type
, "router")) {
2042 /* This is a logical switch port that backs a VM or a container.
2043 * Extract its addresses. For each of the address, go through all
2044 * the router ports attached to the switch (to which this port
2045 * connects) and if the address in question is reachable from the
2046 * router port, add an ARP entry in that router's pipeline. */
2048 for (size_t i
= 0; i
< op
->nbs
->n_addresses
; i
++) {
2049 struct lport_addresses laddrs
;
2050 if (!extract_lport_addresses(op
->nbs
->addresses
[i
], &laddrs
,
2055 for (size_t k
= 0; k
< laddrs
.n_ipv4_addrs
; k
++) {
2056 ovs_be32 ip
= laddrs
.ipv4_addrs
[k
].addr
;
2057 for (size_t j
= 0; j
< op
->od
->n_router_ports
; j
++) {
2058 /* Get the Logical_Router_Port that the Logical_Port is
2059 * connected to, as 'peer'. */
2060 const char *peer_name
= smap_get(
2061 &op
->od
->router_ports
[j
]->nbs
->options
,
2067 struct ovn_port
*peer
2068 = ovn_port_find(ports
, peer_name
);
2069 if (!peer
|| !peer
->nbr
) {
2073 /* Make sure that 'ip' is in 'peer''s network. */
2074 if ((ip
^ peer
->network
) & peer
->mask
) {
2078 char *match
= xasprintf(
2079 "outport == %s && reg0 == "IP_FMT
,
2080 peer
->json_key
, IP_ARGS(ip
));
2081 char *actions
= xasprintf("eth.dst = "ETH_ADDR_FMT
"; "
2083 ETH_ADDR_ARGS(laddrs
.ea
));
2084 ovn_lflow_add(lflows
, peer
->od
,
2085 S_ROUTER_IN_ARP_RESOLVE
,
2086 100, match
, actions
);
2093 free(laddrs
.ipv4_addrs
);
2095 } else if (!strcmp(op
->nbs
->type
, "router")) {
2096 /* This is a logical switch port that connects to a router. */
2098 /* The peer of this switch port is the router port for which
2099 * we need to add logical flows such that it can resolve
2100 * ARP entries for all the other router ports connected to
2101 * the switch in question. */
2103 const char *peer_name
= smap_get(&op
->nbs
->options
,
2109 struct ovn_port
*peer
= ovn_port_find(ports
, peer_name
);
2110 if (!peer
|| !peer
->nbr
|| !peer
->ip
) {
2114 for (size_t j
= 0; j
< op
->od
->n_router_ports
; j
++) {
2115 const char *router_port_name
= smap_get(
2116 &op
->od
->router_ports
[j
]->nbs
->options
,
2118 struct ovn_port
*router_port
= ovn_port_find(ports
,
2120 if (!router_port
|| !router_port
->nbr
|| !router_port
->ip
) {
2124 /* Skip the router port under consideration. */
2125 if (router_port
== peer
) {
2129 if (!router_port
->ip
) {
2132 char *match
= xasprintf("outport == %s && reg0 == "IP_FMT
,
2134 IP_ARGS(router_port
->ip
));
2135 char *actions
= xasprintf("eth.dst = "ETH_ADDR_FMT
"; next;",
2136 ETH_ADDR_ARGS(router_port
->mac
));
2137 ovn_lflow_add(lflows
, peer
->od
, S_ROUTER_IN_ARP_RESOLVE
,
2138 100, match
, actions
);
2145 HMAP_FOR_EACH (od
, key_node
, datapaths
) {
2150 ovn_lflow_add(lflows
, od
, S_ROUTER_IN_ARP_RESOLVE
, 0, "1",
2151 "get_arp(outport, reg0); next;");
2154 /* Local router ingress table 4: ARP request.
2156 * In the common case where the Ethernet destination has been resolved,
2157 * this table outputs the packet (priority 100). Otherwise, it composes
2158 * and sends an ARP request (priority 0). */
2159 HMAP_FOR_EACH (od
, key_node
, datapaths
) {
2164 ovn_lflow_add(lflows
, od
, S_ROUTER_IN_ARP_REQUEST
, 100,
2165 "eth.dst == 00:00:00:00:00:00",
2167 "eth.dst = ff:ff:ff:ff:ff:ff; "
2169 "arp.op = 1; " /* ARP request */
2172 ovn_lflow_add(lflows
, od
, S_ROUTER_IN_ARP_REQUEST
, 0, "1", "output;");
2175 /* Logical router egress table 0: Delivery (priority 100).
2177 * Priority 100 rules deliver packets to enabled logical ports. */
2178 HMAP_FOR_EACH (op
, key_node
, ports
) {
2183 if (!lrport_is_enabled(op
->nbr
)) {
2184 /* Drop packets to disabled logical ports (since logical flow
2185 * tables are default-drop). */
2189 char *match
= xasprintf("outport == %s", op
->json_key
);
2190 ovn_lflow_add(lflows
, op
->od
, S_ROUTER_OUT_DELIVERY
, 100,
2196 /* Updates the Logical_Flow and Multicast_Group tables in the OVN_SB database,
2197 * constructing their contents based on the OVN_NB database. */
2199 build_lflows(struct northd_context
*ctx
, struct hmap
*datapaths
,
2202 struct hmap lflows
= HMAP_INITIALIZER(&lflows
);
2203 struct hmap mcgroups
= HMAP_INITIALIZER(&mcgroups
);
2205 build_lswitch_flows(datapaths
, ports
, &lflows
, &mcgroups
);
2206 build_lrouter_flows(datapaths
, ports
, &lflows
);
2208 /* Push changes to the Logical_Flow table to database. */
2209 const struct sbrec_logical_flow
*sbflow
, *next_sbflow
;
2210 SBREC_LOGICAL_FLOW_FOR_EACH_SAFE (sbflow
, next_sbflow
, ctx
->ovnsb_idl
) {
2211 struct ovn_datapath
*od
2212 = ovn_datapath_from_sbrec(datapaths
, sbflow
->logical_datapath
);
2214 sbrec_logical_flow_delete(sbflow
);
2218 enum ovn_datapath_type dp_type
= od
->nbs
? DP_SWITCH
: DP_ROUTER
;
2219 enum ovn_pipeline pipeline
2220 = !strcmp(sbflow
->pipeline
, "ingress") ? P_IN
: P_OUT
;
2221 struct ovn_lflow
*lflow
= ovn_lflow_find(
2222 &lflows
, od
, ovn_stage_build(dp_type
, pipeline
, sbflow
->table_id
),
2223 sbflow
->priority
, sbflow
->match
, sbflow
->actions
);
2225 ovn_lflow_destroy(&lflows
, lflow
);
2227 sbrec_logical_flow_delete(sbflow
);
2230 struct ovn_lflow
*lflow
, *next_lflow
;
2231 HMAP_FOR_EACH_SAFE (lflow
, next_lflow
, hmap_node
, &lflows
) {
2232 enum ovn_pipeline pipeline
= ovn_stage_get_pipeline(lflow
->stage
);
2233 uint8_t table
= ovn_stage_get_table(lflow
->stage
);
2235 sbflow
= sbrec_logical_flow_insert(ctx
->ovnsb_txn
);
2236 sbrec_logical_flow_set_logical_datapath(sbflow
, lflow
->od
->sb
);
2237 sbrec_logical_flow_set_pipeline(
2238 sbflow
, pipeline
== P_IN
? "ingress" : "egress");
2239 sbrec_logical_flow_set_table_id(sbflow
, table
);
2240 sbrec_logical_flow_set_priority(sbflow
, lflow
->priority
);
2241 sbrec_logical_flow_set_match(sbflow
, lflow
->match
);
2242 sbrec_logical_flow_set_actions(sbflow
, lflow
->actions
);
2244 const struct smap ids
= SMAP_CONST1(&ids
, "stage-name",
2245 ovn_stage_to_str(lflow
->stage
));
2246 sbrec_logical_flow_set_external_ids(sbflow
, &ids
);
2248 ovn_lflow_destroy(&lflows
, lflow
);
2250 hmap_destroy(&lflows
);
2252 /* Push changes to the Multicast_Group table to database. */
2253 const struct sbrec_multicast_group
*sbmc
, *next_sbmc
;
2254 SBREC_MULTICAST_GROUP_FOR_EACH_SAFE (sbmc
, next_sbmc
, ctx
->ovnsb_idl
) {
2255 struct ovn_datapath
*od
= ovn_datapath_from_sbrec(datapaths
,
2258 sbrec_multicast_group_delete(sbmc
);
2262 struct multicast_group group
= { .name
= sbmc
->name
,
2263 .key
= sbmc
->tunnel_key
};
2264 struct ovn_multicast
*mc
= ovn_multicast_find(&mcgroups
, od
, &group
);
2266 ovn_multicast_update_sbrec(mc
, sbmc
);
2267 ovn_multicast_destroy(&mcgroups
, mc
);
2269 sbrec_multicast_group_delete(sbmc
);
2272 struct ovn_multicast
*mc
, *next_mc
;
2273 HMAP_FOR_EACH_SAFE (mc
, next_mc
, hmap_node
, &mcgroups
) {
2274 sbmc
= sbrec_multicast_group_insert(ctx
->ovnsb_txn
);
2275 sbrec_multicast_group_set_datapath(sbmc
, mc
->datapath
->sb
);
2276 sbrec_multicast_group_set_name(sbmc
, mc
->group
->name
);
2277 sbrec_multicast_group_set_tunnel_key(sbmc
, mc
->group
->key
);
2278 ovn_multicast_update_sbrec(mc
, sbmc
);
2279 ovn_multicast_destroy(&mcgroups
, mc
);
2281 hmap_destroy(&mcgroups
);
2285 ovnnb_db_run(struct northd_context
*ctx
)
2287 if (!ctx
->ovnsb_txn
) {
2290 struct hmap datapaths
, ports
;
2291 build_datapaths(ctx
, &datapaths
);
2292 build_ports(ctx
, &datapaths
, &ports
);
2293 build_lflows(ctx
, &datapaths
, &ports
);
2295 struct ovn_datapath
*dp
, *next_dp
;
2296 HMAP_FOR_EACH_SAFE (dp
, next_dp
, key_node
, &datapaths
) {
2297 ovn_datapath_destroy(&datapaths
, dp
);
2299 hmap_destroy(&datapaths
);
2301 struct ovn_port
*port
, *next_port
;
2302 HMAP_FOR_EACH_SAFE (port
, next_port
, key_node
, &ports
) {
2303 ovn_port_destroy(&ports
, port
);
2305 hmap_destroy(&ports
);
2309 * The only change we get notified about is if the 'chassis' column of the
2310 * 'Port_Binding' table changes. When this column is not empty, it means we
2311 * need to set the corresponding logical port as 'up' in the northbound DB.
2314 ovnsb_db_run(struct northd_context
*ctx
)
2316 if (!ctx
->ovnnb_txn
) {
2319 struct hmap lports_hmap
;
2320 const struct sbrec_port_binding
*sb
;
2321 const struct nbrec_logical_port
*nb
;
2323 struct lport_hash_node
{
2324 struct hmap_node node
;
2325 const struct nbrec_logical_port
*nb
;
2328 hmap_init(&lports_hmap
);
2330 NBREC_LOGICAL_PORT_FOR_EACH(nb
, ctx
->ovnnb_idl
) {
2331 hash_node
= xzalloc(sizeof *hash_node
);
2333 hmap_insert(&lports_hmap
, &hash_node
->node
, hash_string(nb
->name
, 0));
2336 SBREC_PORT_BINDING_FOR_EACH(sb
, ctx
->ovnsb_idl
) {
2338 HMAP_FOR_EACH_WITH_HASH(hash_node
, node
,
2339 hash_string(sb
->logical_port
, 0),
2341 if (!strcmp(sb
->logical_port
, hash_node
->nb
->name
)) {
2348 /* The logical port doesn't exist for this port binding. This can
2349 * happen under normal circumstances when ovn-northd hasn't gotten
2350 * around to pruning the Port_Binding yet. */
2354 if (sb
->chassis
&& (!nb
->up
|| !*nb
->up
)) {
2356 nbrec_logical_port_set_up(nb
, &up
, 1);
2357 } else if (!sb
->chassis
&& (!nb
->up
|| *nb
->up
)) {
2359 nbrec_logical_port_set_up(nb
, &up
, 1);
2363 HMAP_FOR_EACH_POP(hash_node
, node
, &lports_hmap
) {
2366 hmap_destroy(&lports_hmap
);
2370 static char *default_nb_db_
;
2375 if (!default_nb_db_
) {
2376 default_nb_db_
= xasprintf("unix:%s/ovnnb_db.sock", ovs_rundir());
2378 return default_nb_db_
;
2381 static char *default_sb_db_
;
2386 if (!default_sb_db_
) {
2387 default_sb_db_
= xasprintf("unix:%s/ovnsb_db.sock", ovs_rundir());
2389 return default_sb_db_
;
2393 parse_options(int argc OVS_UNUSED
, char *argv
[] OVS_UNUSED
)
2396 DAEMON_OPTION_ENUMS
,
2399 static const struct option long_options
[] = {
2400 {"ovnsb-db", required_argument
, NULL
, 'd'},
2401 {"ovnnb-db", required_argument
, NULL
, 'D'},
2402 {"help", no_argument
, NULL
, 'h'},
2403 {"options", no_argument
, NULL
, 'o'},
2404 {"version", no_argument
, NULL
, 'V'},
2405 DAEMON_LONG_OPTIONS
,
2407 STREAM_SSL_LONG_OPTIONS
,
2410 char *short_options
= ovs_cmdl_long_options_to_short_options(long_options
);
2415 c
= getopt_long(argc
, argv
, short_options
, long_options
, NULL
);
2421 DAEMON_OPTION_HANDLERS
;
2422 VLOG_OPTION_HANDLERS
;
2423 STREAM_SSL_OPTION_HANDLERS
;
2438 ovs_cmdl_print_options(long_options
);
2442 ovs_print_version(0, 0);
2451 ovnsb_db
= default_sb_db();
2455 ovnnb_db
= default_nb_db();
2458 free(short_options
);
2462 add_column_noalert(struct ovsdb_idl
*idl
,
2463 const struct ovsdb_idl_column
*column
)
2465 ovsdb_idl_add_column(idl
, column
);
2466 ovsdb_idl_omit_alert(idl
, column
);
2470 main(int argc
, char *argv
[])
2472 int res
= EXIT_SUCCESS
;
2473 struct unixctl_server
*unixctl
;
2477 fatal_ignore_sigpipe();
2478 set_program_name(argv
[0]);
2479 service_start(&argc
, &argv
);
2480 parse_options(argc
, argv
);
2482 daemonize_start(false);
2484 retval
= unixctl_server_create(NULL
, &unixctl
);
2488 unixctl_command_register("exit", "", 0, 0, ovn_northd_exit
, &exiting
);
2490 daemonize_complete();
2495 /* We want to detect all changes to the ovn-nb db. */
2496 struct ovsdb_idl_loop ovnnb_idl_loop
= OVSDB_IDL_LOOP_INITIALIZER(
2497 ovsdb_idl_create(ovnnb_db
, &nbrec_idl_class
, true, true));
2499 struct ovsdb_idl_loop ovnsb_idl_loop
= OVSDB_IDL_LOOP_INITIALIZER(
2500 ovsdb_idl_create(ovnsb_db
, &sbrec_idl_class
, false, true));
2502 ovsdb_idl_add_table(ovnsb_idl_loop
.idl
, &sbrec_table_logical_flow
);
2503 add_column_noalert(ovnsb_idl_loop
.idl
,
2504 &sbrec_logical_flow_col_logical_datapath
);
2505 add_column_noalert(ovnsb_idl_loop
.idl
, &sbrec_logical_flow_col_pipeline
);
2506 add_column_noalert(ovnsb_idl_loop
.idl
, &sbrec_logical_flow_col_table_id
);
2507 add_column_noalert(ovnsb_idl_loop
.idl
, &sbrec_logical_flow_col_priority
);
2508 add_column_noalert(ovnsb_idl_loop
.idl
, &sbrec_logical_flow_col_match
);
2509 add_column_noalert(ovnsb_idl_loop
.idl
, &sbrec_logical_flow_col_actions
);
2511 ovsdb_idl_add_table(ovnsb_idl_loop
.idl
, &sbrec_table_multicast_group
);
2512 add_column_noalert(ovnsb_idl_loop
.idl
,
2513 &sbrec_multicast_group_col_datapath
);
2514 add_column_noalert(ovnsb_idl_loop
.idl
,
2515 &sbrec_multicast_group_col_tunnel_key
);
2516 add_column_noalert(ovnsb_idl_loop
.idl
, &sbrec_multicast_group_col_name
);
2517 add_column_noalert(ovnsb_idl_loop
.idl
, &sbrec_multicast_group_col_ports
);
2519 ovsdb_idl_add_table(ovnsb_idl_loop
.idl
, &sbrec_table_datapath_binding
);
2520 add_column_noalert(ovnsb_idl_loop
.idl
,
2521 &sbrec_datapath_binding_col_tunnel_key
);
2522 add_column_noalert(ovnsb_idl_loop
.idl
,
2523 &sbrec_datapath_binding_col_external_ids
);
2525 ovsdb_idl_add_table(ovnsb_idl_loop
.idl
, &sbrec_table_port_binding
);
2526 add_column_noalert(ovnsb_idl_loop
.idl
, &sbrec_port_binding_col_datapath
);
2527 add_column_noalert(ovnsb_idl_loop
.idl
,
2528 &sbrec_port_binding_col_logical_port
);
2529 add_column_noalert(ovnsb_idl_loop
.idl
,
2530 &sbrec_port_binding_col_tunnel_key
);
2531 add_column_noalert(ovnsb_idl_loop
.idl
,
2532 &sbrec_port_binding_col_parent_port
);
2533 add_column_noalert(ovnsb_idl_loop
.idl
, &sbrec_port_binding_col_tag
);
2534 add_column_noalert(ovnsb_idl_loop
.idl
, &sbrec_port_binding_col_type
);
2535 add_column_noalert(ovnsb_idl_loop
.idl
, &sbrec_port_binding_col_options
);
2536 add_column_noalert(ovnsb_idl_loop
.idl
, &sbrec_port_binding_col_mac
);
2537 ovsdb_idl_add_column(ovnsb_idl_loop
.idl
, &sbrec_port_binding_col_chassis
);
2542 struct northd_context ctx
= {
2543 .ovnnb_idl
= ovnnb_idl_loop
.idl
,
2544 .ovnnb_txn
= ovsdb_idl_loop_run(&ovnnb_idl_loop
),
2545 .ovnsb_idl
= ovnsb_idl_loop
.idl
,
2546 .ovnsb_txn
= ovsdb_idl_loop_run(&ovnsb_idl_loop
),
2552 unixctl_server_run(unixctl
);
2553 unixctl_server_wait(unixctl
);
2555 poll_immediate_wake();
2557 ovsdb_idl_loop_commit_and_wait(&ovnnb_idl_loop
);
2558 ovsdb_idl_loop_commit_and_wait(&ovnsb_idl_loop
);
2561 if (should_service_stop()) {
2566 unixctl_server_destroy(unixctl
);
2567 ovsdb_idl_loop_destroy(&ovnnb_idl_loop
);
2568 ovsdb_idl_loop_destroy(&ovnsb_idl_loop
);
2571 free(default_nb_db_
);
2572 free(default_sb_db_
);
2577 ovn_northd_exit(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
2578 const char *argv
[] OVS_UNUSED
, void *exiting_
)
2580 bool *exiting
= exiting_
;
2583 unixctl_command_reply(conn
, NULL
);