ospfd/Makefile \
pbrd/Makefile \
pimd/Makefile \
- ports/Makefile \
qpb/Makefile \
ripd/Makefile \
ripngd/Makefile \
* expires
*/
BFD_SESS_FLAG_SHUTDOWN = 1 << 7, /* disable BGP peer function */
+ BFD_SESS_FLAG_CONFIG = 1 << 8, /* Session configured with bfd NB API */
};
#define BFD_SET_FLAG(field, flag) (field |= flag)
#define BFD_PKT_INFO_VAL 1
#define BFD_IPV6_PKT_INFO_VAL 1
#define BFD_IPV6_ONLY_VAL 1
-#define BFD_SRCPORTINIT 49142
-#define BFD_SRCPORTMAX 65536
+#define BFD_SRCPORTINIT 49152
+#define BFD_SRCPORTMAX 65535
#define BFD_DEFDESTPORT 3784
#define BFD_DEF_ECHO_PORT 3785
#define BFD_DEF_MHOP_DEST_PORT 4784
}
}
+ if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG)) {
+ if (bs->refcount)
+ vty_out(vty, "%% session peer is now configurable via bfd daemon.\n");
+ BFD_SET_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG);
+ }
+
VTY_PUSH_CONTEXT(BFD_PEER_NODE, bs);
return CMD_SUCCESS;
struct vty *vty = arg;
struct bfd_session *bs = hb->data;
+ if (!BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG))
+ return;
+
_bfdd_peer_write_config(vty, bs);
}
/* Unregister client peer notification. */
pcn = pcn_lookup(pc, bs);
pcn_free(pcn);
+ if (bs->refcount ||
+ BFD_CHECK_FLAG(bs->flags, BFD_SESS_FLAG_CONFIG))
+ return;
+ ptm_bfd_ses_del(&bpc);
}
/*
peer->last_reset = PEER_DOWN_BFD_DOWN;
BGP_EVENT_ADD(peer, BGP_Stop);
}
+ if ((status == BFD_STATUS_UP) && (old_status == BFD_STATUS_DOWN)
+ && peer->status != Established) {
+ if (!BGP_PEER_START_SUPPRESSED(peer)) {
+ bgp_fsm_event_update(peer, 1);
+ BGP_EVENT_ADD(peer, BGP_Start);
+ }
+ }
}
/*
* Add (update) or delete remote VTEP from zebra.
*/
static int bgp_zebra_send_remote_vtep(struct bgp *bgp, struct bgpevpn *vpn,
- struct prefix_evpn *p, int add)
+ struct prefix_evpn *p,
+ int flood_control, int add)
{
struct stream *s;
add ? "ADD" : "DEL", vpn->vni);
return -1;
}
+ stream_putl(s, flood_control);
stream_putw_at(s, 0, stream_get_endp(s));
{
int ret;
uint8_t flags;
+ int flood_control;
if (p->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE) {
flags = 0;
bgp, vpn, p, pi->attr->nexthop, 1, flags,
mac_mobility_seqnum(pi->attr));
} else {
- ret = bgp_zebra_send_remote_vtep(bgp, vpn, p, 1);
+ switch (pi->attr->pmsi_tnl_type) {
+ case PMSI_TNLTYPE_INGR_REPL:
+ flood_control = VXLAN_FLOOD_HEAD_END_REPL;
+ break;
+
+ case PMSI_TNLTYPE_PIM_SM:
+ flood_control = VXLAN_FLOOD_PIM_SM;
+ break;
+
+ default:
+ flood_control = VXLAN_FLOOD_DISABLED;
+ break;
+ }
+ ret = bgp_zebra_send_remote_vtep(bgp, vpn, p, flood_control, 1);
}
return ret;
ret = bgp_zebra_send_remote_macip(bgp, vpn, p, remote_vtep_ip,
0, 0, 0);
else
- ret = bgp_zebra_send_remote_vtep(bgp, vpn, p, 0);
+ ret = bgp_zebra_send_remote_vtep(bgp, vpn, p,
+ VXLAN_FLOOD_DISABLED, 0);
return ret;
}
return 0;
}
+/* BUM traffic flood mode per-l2-vni */
+static int bgp_evpn_vni_flood_mode_get(struct bgp *bgp,
+ struct bgpevpn *vpn)
+{
+ /* if flooding has been globally disabled per-vni mode is
+ * not relevant
+ */
+ if (bgp->vxlan_flood_ctrl == VXLAN_FLOOD_DISABLED)
+ return VXLAN_FLOOD_DISABLED;
+
+ /* if mcast group ip has been specified we use a PIM-SM MDT */
+ if (vpn->mcast_grp.s_addr != INADDR_ANY)
+ return VXLAN_FLOOD_PIM_SM;
+
+ /* default is ingress replication */
+ return VXLAN_FLOOD_HEAD_END_REPL;
+}
+
/*
* Update (and advertise) local routes for a VNI. Invoked upon the VNI
* export RT getting modified or change to tunnel IP. Note that these
*
* RT-3 only if doing head-end replication
*/
- if (bgp->vxlan_flood_ctrl == VXLAN_FLOOD_HEAD_END_REPL) {
+ if (bgp_evpn_vni_flood_mode_get(bgp, vpn)
+ == VXLAN_FLOOD_HEAD_END_REPL) {
build_evpn_type3_prefix(&p, vpn->originator_ip);
ret = update_evpn_route(bgp, vpn, &p, 0, 0);
if (ret)
return delete_all_vni_routes(bgp, vpn);
}
+/*
+ * There is a flood mcast IP address change. Update the mcast-grp and
+ * remove the type-3 route if any. A new type-3 route will be generated
+ * post tunnel_ip update if the new flood mode is head-end-replication.
+ */
+static int bgp_evpn_mcast_grp_change(struct bgp *bgp, struct bgpevpn *vpn,
+ struct in_addr mcast_grp)
+{
+ struct prefix_evpn p;
+
+ vpn->mcast_grp = mcast_grp;
+
+ if (is_vni_live(vpn)) {
+ build_evpn_type3_prefix(&p, vpn->originator_ip);
+ delete_evpn_route(bgp, vpn, &p);
+ }
+
+ return 0;
+}
+
/*
* There is a tunnel endpoint IP address change for this VNI, delete
* prior type-3 route (if needed) and update.
*
* RT-3 only if doing head-end replication
*/
- if (bgp->vxlan_flood_ctrl == VXLAN_FLOOD_HEAD_END_REPL) {
+ if (bgp_evpn_vni_flood_mode_get(bgp, vpn)
+ == VXLAN_FLOOD_HEAD_END_REPL) {
build_evpn_type3_prefix(&p, vpn->originator_ip);
rn = bgp_node_lookup(vpn->route_table, (struct prefix *)&p);
if (!rn) /* unexpected */
struct bgp *bgp = data;
struct prefix_evpn p;
- if (!vpn || !is_vni_live(vpn))
+ if (!vpn || !is_vni_live(vpn) ||
+ bgp_evpn_vni_flood_mode_get(bgp, vpn)
+ != VXLAN_FLOOD_HEAD_END_REPL)
return;
build_evpn_type3_prefix(&p, vpn->originator_ip);
*/
if (attr &&
(attr->flag & ATTR_FLAG_BIT(BGP_ATTR_PMSI_TUNNEL))) {
- if (attr->pmsi_tnl_type != PMSI_TNLTYPE_INGR_REPL) {
- flog_warn(
- EC_BGP_EVPN_PMSI_PRESENT,
- "%u:%s - Rx EVPN Type-3 NLRI with unsupported PTA %d",
- peer->bgp->vrf_id, peer->host,
- attr->pmsi_tnl_type);
+ if (attr->pmsi_tnl_type != PMSI_TNLTYPE_INGR_REPL &&
+ attr->pmsi_tnl_type != PMSI_TNLTYPE_PIM_SM) {
+ flog_warn(EC_BGP_EVPN_PMSI_PRESENT,
+ "%u:%s - Rx EVPN Type-3 NLRI with unsupported PTA %d",
+ peer->bgp->vrf_id, peer->host,
+ attr->pmsi_tnl_type);
}
}
if (addpath_encoded) {
/* When packet overflow occurs return immediately. */
if (pnt + BGP_ADDPATH_ID_LEN > lim)
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_OVERFLOW;
addpath_id = ntohl(*((uint32_t *)pnt));
pnt += BGP_ADDPATH_ID_LEN;
/* All EVPN NLRI types start with type and length. */
if (pnt + 2 > lim)
- return -1;
+ return BGP_NLRI_PARSE_ERROR_EVPN_MISSING_TYPE;
rtype = *pnt++;
psize = *pnt++;
/* When packet overflow occur return immediately. */
if (pnt + psize > lim)
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_OVERFLOW;
switch (rtype) {
case BGP_EVPN_MAC_IP_ROUTE:
EC_BGP_EVPN_FAIL,
"%u:%s - Error in processing EVPN type-2 NLRI size %d",
peer->bgp->vrf_id, peer->host, psize);
- return -1;
+ return BGP_NLRI_PARSE_ERROR_EVPN_TYPE2_SIZE;
}
break;
EC_BGP_PKT_PROCESS,
"%u:%s - Error in processing EVPN type-3 NLRI size %d",
peer->bgp->vrf_id, peer->host, psize);
- return -1;
+ return BGP_NLRI_PARSE_ERROR_EVPN_TYPE3_SIZE;
}
break;
EC_BGP_PKT_PROCESS,
"%u:%s - Error in processing EVPN type-4 NLRI size %d",
peer->bgp->vrf_id, peer->host, psize);
- return -1;
+ return BGP_NLRI_PARSE_ERROR_EVPN_TYPE4_SIZE;
}
break;
EC_BGP_PKT_PROCESS,
"%u:%s - Error in processing EVPN type-5 NLRI size %d",
peer->bgp->vrf_id, peer->host, psize);
- return -1;
+ return BGP_NLRI_PARSE_ERROR_EVPN_TYPE5_SIZE;
}
break;
/* Packet length consistency check. */
if (pnt != lim)
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_LENGTH;
- return 0;
+ return BGP_NLRI_PARSE_OK;
}
/*
* Create a new vpn - invoked upon configuration or zebra notification.
*/
struct bgpevpn *bgp_evpn_new(struct bgp *bgp, vni_t vni,
- struct in_addr originator_ip,
- vrf_id_t tenant_vrf_id)
+ struct in_addr originator_ip,
+ vrf_id_t tenant_vrf_id,
+ struct in_addr mcast_grp)
{
struct bgpevpn *vpn;
vpn->vni = vni;
vpn->originator_ip = originator_ip;
vpn->tenant_vrf_id = tenant_vrf_id;
+ vpn->mcast_grp = mcast_grp;
/* Initialize route-target import and export lists */
vpn->import_rtl = list_new();
* about are for the local-tunnel-ip and the (tenant) VRF.
*/
int bgp_evpn_local_vni_add(struct bgp *bgp, vni_t vni,
- struct in_addr originator_ip, vrf_id_t tenant_vrf_id)
+ struct in_addr originator_ip,
+ vrf_id_t tenant_vrf_id,
+ struct in_addr mcast_grp)
+
{
struct bgpevpn *vpn;
struct prefix_evpn p;
if (is_vni_live(vpn)
&& IPV4_ADDR_SAME(&vpn->originator_ip, &originator_ip)
+ && IPV4_ADDR_SAME(&vpn->mcast_grp, &mcast_grp)
&& vpn->tenant_vrf_id == tenant_vrf_id)
/* Probably some other param has changed that we don't
* care about. */
return 0;
+ bgp_evpn_mcast_grp_change(bgp, vpn, mcast_grp);
+
/* Update tenant_vrf_id if it has changed. */
if (vpn->tenant_vrf_id != tenant_vrf_id) {
bgpevpn_unlink_from_l3vni(vpn);
/* Create or update as appropriate. */
if (!vpn) {
- vpn = bgp_evpn_new(bgp, vni, originator_ip, tenant_vrf_id);
+ vpn = bgp_evpn_new(bgp, vni, originator_ip, tenant_vrf_id,
+ mcast_grp);
if (!vpn) {
flog_err(
EC_BGP_VNI,
*
* RT-3 only if doing head-end replication
*/
- if (bgp->vxlan_flood_ctrl == VXLAN_FLOOD_HEAD_END_REPL) {
+ if (bgp_evpn_vni_flood_mode_get(bgp, vpn)
+ == VXLAN_FLOOD_HEAD_END_REPL) {
build_evpn_type3_prefix(&p, vpn->originator_ip);
if (update_evpn_route(bgp, vpn, &p, 0, 0)) {
flog_err(EC_BGP_EVPN_ROUTE_CREATE,
extern int bgp_evpn_local_vni_del(struct bgp *bgp, vni_t vni);
extern int bgp_evpn_local_vni_add(struct bgp *bgp, vni_t vni,
struct in_addr originator_ip,
- vrf_id_t tenant_vrf_id);
+ vrf_id_t tenant_vrf_id,
+ struct in_addr mcast_grp);
extern int bgp_evpn_local_es_add(struct bgp *bgp, esi_t *esi,
struct ipaddr *originator_ip);
extern int bgp_evpn_local_es_del(struct bgp *bgp, esi_t *esi,
/* Route type 3 field */
struct in_addr originator_ip;
+ /* PIM-SM MDT group for BUM flooding */
+ struct in_addr mcast_grp;
+
/* Import and Export RTs. */
struct list *import_rtl;
struct list *export_rtl;
listnode_delete(vpn->bgp_vrf->l2vnis, vpn);
/* remove the backpointer to the vrf instance */
+ bgp_unlock(vpn->bgp_vrf);
vpn->bgp_vrf = NULL;
}
return;
/* associate the vpn to the bgp_vrf instance */
- vpn->bgp_vrf = bgp_vrf;
+ vpn->bgp_vrf = bgp_lock(bgp_vrf);
listnode_add_sort(bgp_vrf->l2vnis, vpn);
/* check if we are advertising two labels for this vpn */
extern void bgp_evpn_derive_auto_rd_for_vrf(struct bgp *bgp);
extern struct bgpevpn *bgp_evpn_lookup_vni(struct bgp *bgp, vni_t vni);
extern struct bgpevpn *bgp_evpn_new(struct bgp *bgp, vni_t vni,
- struct in_addr originator_ip,
- vrf_id_t tenant_vrf_id);
+ struct in_addr originator_ip,
+ vrf_id_t tenant_vrf_id,
+ struct in_addr mcast_grp);
extern void bgp_evpn_free(struct bgp *bgp, struct bgpevpn *vpn);
extern struct evpnes *bgp_evpn_lookup_es(struct bgp *bgp, esi_t *esi);
extern struct evpnes *bgp_evpn_es_new(struct bgp *bgp, esi_t *esi,
prefix_rd2str(&vpn->prd, buf1, sizeof(buf1)));
json_object_string_add(json, "originatorIp",
inet_ntoa(vpn->originator_ip));
+ json_object_string_add(json, "mcastGroup",
+ inet_ntoa(vpn->mcast_grp));
json_object_string_add(json, "advertiseGatewayMacip",
vpn->advertise_gw_macip ? "Yes" : "No");
} else {
prefix_rd2str(&vpn->prd, buf1, sizeof(buf1)));
vty_out(vty, " Originator IP: %s\n",
inet_ntoa(vpn->originator_ip));
+ vty_out(vty, " Mcast group: %s\n",
+ inet_ntoa(vpn->mcast_grp));
vty_out(vty, " Advertise-gw-macip : %s\n",
vpn->advertise_gw_macip ? "Yes" : "No");
vty_out(vty, " Advertise-svi-macip : %s\n",
static struct bgpevpn *evpn_create_update_vni(struct bgp *bgp, vni_t vni)
{
struct bgpevpn *vpn;
+ struct in_addr mcast_grp = {INADDR_ANY};
if (!bgp->vnihash)
return NULL;
/* tenant vrf will be updated when we get local_vni_add from
* zebra
*/
- vpn = bgp_evpn_new(bgp, vni, bgp->router_id, 0);
+ vpn = bgp_evpn_new(bgp, vni, bgp->router_id, 0, mcast_grp);
if (!vpn) {
flog_err(
EC_BGP_VNI,
* If 'type' is non-zero, only routes matching that type are shown.
*/
static void evpn_show_all_routes(struct vty *vty, struct bgp *bgp, int type,
- json_object *json)
+ json_object *json, int detail)
{
struct bgp_node *rd_rn;
struct bgp_table *table;
struct bgp_node *rn;
struct bgp_path_info *pi;
- int header = 1;
+ int header = detail ? 0 : 1;
int rd_header;
afi_t afi;
safi_t safi;
rn->p.prefixlen);
}
+ /* Prefix and num paths displayed once per prefix. */
+ if (detail)
+ route_vty_out_detail_header(
+ vty, bgp, rn,
+ (struct prefix_rd *)&rd_rn->p,
+ AFI_L2VPN, SAFI_EVPN, json_prefix);
+
/* For EVPN, the prefix is displayed for each path (to
* fit in
* with code that already exists).
if (json)
json_path = json_object_new_array();
- route_vty_out(vty, &rn->p, pi, 0, SAFI_EVPN,
- json_path);
+ if (detail) {
+ route_vty_out_detail(
+ vty, bgp, &rn->p, pi, AFI_L2VPN,
+ SAFI_EVPN, json_path);
+ } else
+ route_vty_out(vty, &rn->p, pi, 0,
+ SAFI_EVPN, json_path);
if (json)
json_object_array_add(json_paths,
*/
DEFUN(show_bgp_l2vpn_evpn_route,
show_bgp_l2vpn_evpn_route_cmd,
- "show bgp l2vpn evpn route [type <macip|multicast|es|prefix>] [json]",
+ "show bgp l2vpn evpn route [detail] [type <macip|multicast|es|prefix>] [json]",
SHOW_STR
BGP_STR
L2VPN_HELP_STR
EVPN_HELP_STR
"EVPN route information\n"
+ "Display Detailed Information\n"
"Specify Route type\n"
"MAC-IP (Type-2) route\n"
"Multicast (Type-3) route\n"
{
struct bgp *bgp;
int type_idx = 0;
+ int detail = 0;
int type = 0;
bool uj = false;
json_object *json = NULL;
return CMD_WARNING;
}
- evpn_show_all_routes(vty, bgp, type, json);
+ if (argv_find(argv, argc, "detail", &detail))
+ detail = 1;
+
+ evpn_show_all_routes(vty, bgp, type, json, detail);
if (uj) {
vty_out(vty, "%s\n", json_object_to_json_string_ext(
"Summary of BGP neighbor status\n" JSON_STR)
ALIAS_HIDDEN(show_bgp_l2vpn_evpn_route, show_bgp_evpn_route_cmd,
- "show bgp evpn route [type <macip|multicast>]",
+ "show bgp evpn route [detail] [type <macip|multicast>]",
SHOW_STR BGP_STR EVPN_HELP_STR
"EVPN route information\n"
+ "Display Detailed Information\n"
"Specify Route type\n"
"MAC-IP (Type-2) route\n"
"Multicast (Type-3) route\n")
return 0;
}
-static int config_bgp_aspath_validate(const char *regstr)
+int config_bgp_aspath_validate(const char *regstr)
{
- char valid_chars[] = "1234567890_^|[,{}() ]$*+.?-";
+ char valid_chars[] = "1234567890_^|[,{}() ]$*+.?-\\";
if (strspn(regstr, valid_chars) == strlen(regstr))
return 1;
"Regular expression access list name\n"
"Specify packets to reject\n"
"Specify packets to forward\n"
- "A regular-expression (1234567890_(^|[,{}() ]|$)) to match the BGP AS paths\n")
+ "A regular-expression (1234567890_^|[,{}() ]$*+.?-\\) to match the BGP AS paths\n")
{
int idx = 0;
enum as_filter_type type;
"Regular expression access list name\n"
"Specify packets to reject\n"
"Specify packets to forward\n"
- "A regular-expression (1234567890_(^|[,{}() ]|$)) to match the BGP AS paths\n")
+ "A regular-expression (1234567890_^|[,{}() ]$*+.?-\\) to match the BGP AS paths\n")
DEFUN(no_as_path, no_bgp_as_path_cmd,
"no bgp as-path access-list WORD <deny|permit> LINE...",
"Regular expression access list name\n"
"Specify packets to reject\n"
"Specify packets to forward\n"
- "A regular-expression (1234567890_(^|[,{}() ]|$)) to match the BGP AS paths\n")
+ "A regular-expression (1234567890_^|[,{}() ]$*+.?-\\) to match the BGP AS paths\n")
{
int idx = 0;
enum as_filter_type type;
"Regular expression access list name\n"
"Specify packets to reject\n"
"Specify packets to forward\n"
- "A regular-expression (1234567890_(^|[,{}() ]|$)) to match the BGP AS paths\n")
+ "A regular-expression (1234567890_^|[,{}() ]$*+.?-\\) to match the BGP AS paths\n")
DEFUN (no_as_path_all,
no_bgp_as_path_all_cmd,
extern struct as_list *as_list_lookup(const char *);
extern void as_list_add_hook(void (*func)(char *));
extern void as_list_delete_hook(void (*func)(const char *));
+extern int config_bgp_aspath_validate(const char *regstr);
#endif /* _QUAGGA_BGP_FILTER_H */
if (afi == AFI_IP6) {
flog_err(EC_LIB_DEVELOPMENT, "BGP flowspec IPv6 not supported");
- return -1;
+ return BGP_NLRI_PARSE_ERROR_FLOWSPEC_IPV6_NOT_SUPPORTED;
}
if (packet->length >= FLOWSPEC_NLRI_SIZELIMIT) {
flog_err(EC_BGP_FLOWSPEC_PACKET,
"BGP flowspec nlri length maximum reached (%u)",
packet->length);
- return -1;
+ return BGP_NLRI_PARSE_ERROR_FLOWSPEC_NLRI_SIZELIMIT;
}
for (; pnt < lim; pnt += psize) {
/* All FlowSpec NLRI begin with length. */
if (pnt + 1 > lim)
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_OVERFLOW;
psize = *pnt++;
EC_BGP_FLOWSPEC_PACKET,
"Flowspec NLRI length inconsistent ( size %u seen)",
psize);
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_OVERFLOW;
}
if (bgp_fs_nlri_validate(pnt, psize) < 0) {
flog_err(
EC_BGP_FLOWSPEC_PACKET,
"Bad flowspec format or NLRI options not supported");
- return -1;
+ return BGP_NLRI_PARSE_ERROR_FLOWSPEC_BAD_FORMAT;
}
p.family = AF_FLOWSPEC;
p.prefixlen = 0;
flog_err(EC_BGP_FLOWSPEC_INSTALLATION,
"Flowspec NLRI failed to be %s.",
attr ? "added" : "withdrawn");
- return -1;
+ return BGP_NLRI_PARSE_ERROR;
}
}
- return 0;
+ return BGP_NLRI_PARSE_OK;
}
return (bgp_stop(peer));
}
-void bgp_fsm_nht_update(struct peer *peer, int valid)
+void bgp_fsm_event_update(struct peer *peer, int valid)
{
if (!peer)
return;
}
}
-
/* Finite State Machine structure */
static const struct {
int (*func)(struct peer *);
#define FSM_PEER_TRANSITIONED 3
/* Prototypes. */
-extern void bgp_fsm_nht_update(struct peer *, int valid);
+extern void bgp_fsm_event_update(struct peer *peer, int valid);
extern int bgp_event(struct thread *);
extern int bgp_event_update(struct peer *, int event);
extern int bgp_stop(struct peer *peer);
/* When packet overflow occurs return immediately. */
if (pnt + BGP_ADDPATH_ID_LEN > lim)
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_OVERFLOW;
addpath_id = ntohl(*((uint32_t *)pnt));
pnt += BGP_ADDPATH_ID_LEN;
EC_BGP_UPDATE_RCV,
"%s [Error] Update packet error / L-U (prefix length %d exceeds packet size %u)",
peer->host, prefixlen, (uint)(lim - pnt));
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_OVERFLOW;
}
/* Fill in the labels */
peer->host, prefixlen);
bgp_notify_send(peer, BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_INVAL_NETWORK);
- return -1;
+ return BGP_NLRI_PARSE_ERROR_LABEL_LENGTH;
}
if ((afi == AFI_IP && p.prefixlen > 32)
|| (afi == AFI_IP6 && p.prefixlen > 128))
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PREFIX_LENGTH;
/* Fetch prefix from NLRI packet */
memcpy(&p.u.prefix, pnt + llen, psize - llen);
EC_BGP_UPDATE_RCV,
"%s [Error] Update packet error / L-U (%zu data remaining after parsing)",
peer->host, lim - pnt);
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_LENGTH;
}
- return 0;
+ return BGP_NLRI_PARSE_OK;
}
{"skip_runas", no_argument, NULL, 'S'},
{"ecmp", required_argument, NULL, 'e'},
{"int_num", required_argument, NULL, 'I'},
+ {"no_zebra", no_argument, NULL, 'Z'},
{0}};
/* signal definitions */
frr_preinit(&bgpd_di, argc, argv);
frr_opt_add(
- "p:l:Sne:I:" DEPRECATED_OPTIONS, longopts,
+ "p:l:SnZe:I:" DEPRECATED_OPTIONS, longopts,
" -p, --bgp_port Set BGP listen port number (0 means do not listen).\n"
" -l, --listenon Listen on specified address (implies -n)\n"
" -n, --no_kernel Do not install route to kernel.\n"
/* When packet overflow occurs return immediately. */
if (pnt + BGP_ADDPATH_ID_LEN > lim)
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_OVERFLOW;
addpath_id = ntohl(*((uint32_t *)pnt));
pnt += BGP_ADDPATH_ID_LEN;
EC_BGP_UPDATE_RCV,
"%s [Error] Update packet error / VPN (prefix length %d less than VPN min length)",
peer->host, prefixlen);
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PREFIX_LENGTH;
}
/* sanity check against packet data */
EC_BGP_UPDATE_RCV,
"%s [Error] Update packet error / VPN (prefix length %d exceeds packet size %u)",
peer->host, prefixlen, (uint)(lim - pnt));
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_OVERFLOW;
}
/* sanity check against storage for the IP address portion */
peer->host,
prefixlen - VPN_PREFIXLEN_MIN_BYTES * 8,
sizeof(p.u));
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_LENGTH;
}
/* Sanity check against max bitlen of the address family */
peer->host,
prefixlen - VPN_PREFIXLEN_MIN_BYTES * 8,
p.family, prefix_blen(&p));
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_LENGTH;
}
/* Copy label to prefix. */
EC_BGP_UPDATE_RCV,
"%s [Error] Update packet error / VPN (%zu data remaining after parsing)",
peer->host, lim - pnt);
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_LENGTH;
}
return 0;
if (BGP_DEBUG(nht, NHT))
zlog_debug("%s: Updating peer (%s) status with NHT",
__FUNCTION__, peer->host);
- bgp_fsm_nht_update(peer, bgp_isvalid_nexthop(bnc));
+ bgp_fsm_event_update(peer, bgp_isvalid_nexthop(bnc));
SET_FLAG(bnc->flags, BGP_NEXTHOP_PEER_NOTIFIED);
}
case SAFI_FLOWSPEC:
return bgp_nlri_parse_flowspec(peer, attr, packet, mp_withdraw);
}
- return -1;
+ return BGP_NLRI_PARSE_ERROR;
}
/*
nlri_ret = bgp_nlri_parse(peer, &attr, &nlris[i], 1);
break;
default:
- nlri_ret = -1;
+ nlri_ret = BGP_NLRI_PARSE_ERROR;
}
- if (nlri_ret < 0) {
+ if (nlri_ret < BGP_NLRI_PARSE_OK
+ && nlri_ret != BGP_NLRI_PARSE_ERROR_PREFIX_OVERFLOW) {
flog_err(EC_BGP_UPDATE_RCV,
"%s [Error] Error parsing NLRI", peer->host);
if (peer->status == Established)
for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn))
for (pi = bgp_node_get_bgp_path_info(rn); pi; pi = next) {
next = pi->next;
+
+ /* Unimport EVPN routes from VRFs */
+ if (safi == SAFI_EVPN)
+ bgp_evpn_unimport_route(bgp, AFI_L2VPN,
+ SAFI_EVPN,
+ &rn->p, pi);
+
if (CHECK_FLAG(pi->flags, BGP_PATH_SELECTED)
&& pi->type == ZEBRA_ROUTE_BGP
&& (pi->sub_type == BGP_ROUTE_NORMAL
/* When packet overflow occurs return immediately. */
if (pnt + BGP_ADDPATH_ID_LEN > lim)
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_OVERFLOW;
addpath_id = ntohl(*((uint32_t *)pnt));
pnt += BGP_ADDPATH_ID_LEN;
EC_BGP_UPDATE_RCV,
"%s [Error] Update packet error (wrong prefix length %d for afi %u)",
peer->host, p.prefixlen, packet->afi);
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PREFIX_LENGTH;
}
/* Packet size overflow check. */
EC_BGP_UPDATE_RCV,
"%s [Error] Update packet error (prefix length %d overflows packet)",
peer->host, p.prefixlen);
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_OVERFLOW;
}
/* Defensive coding, double-check the psize fits in a struct
EC_BGP_UPDATE_RCV,
"%s [Error] Update packet error (prefix length %d too large for prefix storage %zu)",
peer->host, p.prefixlen, sizeof(p.u));
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_LENGTH;
}
/* Fetch prefix from NLRI packet. */
BGP_ROUTE_NORMAL, NULL, NULL, 0,
NULL);
- /* Address family configuration mismatch or maximum-prefix count
- overflow. */
+ /* Do not send BGP notification twice when maximum-prefix count
+ * overflow. */
+ if (CHECK_FLAG(peer->sflags, PEER_STATUS_PREFIX_OVERFLOW))
+ return BGP_NLRI_PARSE_ERROR_PREFIX_OVERFLOW;
+
+ /* Address family configuration mismatch. */
if (ret < 0)
- return -1;
+ return BGP_NLRI_PARSE_ERROR_ADDRESS_FAMILY;
}
/* Packet length consistency check. */
EC_BGP_UPDATE_RCV,
"%s [Error] Update packet error (prefix length mismatch with total length)",
peer->host);
- return -1;
+ return BGP_NLRI_PARSE_ERROR_PACKET_LENGTH;
}
- return 0;
+ return BGP_NLRI_PARSE_OK;
}
static struct bgp_static *bgp_static_new(void)
unsigned long i;
for (i = 0; i < *json_header_depth; ++i)
vty_out(vty, " } ");
+ vty_out(vty, "\n");
}
} else {
if (is_last) {
BGP_AFI_HELP_STR
BGP_SAFI_WITH_LABEL_HELP_STR
"Display routes matching the AS path regular expression\n"
- "A regular-expression to match the BGP AS paths\n")
+ "A regular-expression (1234567890_^|[,{}() ]$*+.?-\\) to match the BGP AS paths\n")
{
afi_t afi = AFI_IP6;
safi_t safi = SAFI_UNICAST;
regex_t *regex;
int rc;
+ if (!config_bgp_aspath_validate(regstr)) {
+ vty_out(vty, "Invalid character in as-path access-list %s\n",
+ regstr);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
regex = bgp_regcomp(regstr);
if (!regex) {
vty_out(vty, "Can't compile regexp %s\n", regstr);
*/
#define BGP_MAX_LABELS 2
+/* Error codes for handling NLRI */
+#define BGP_NLRI_PARSE_OK 0
+#define BGP_NLRI_PARSE_ERROR_PREFIX_OVERFLOW -1
+#define BGP_NLRI_PARSE_ERROR_PACKET_OVERFLOW -2
+#define BGP_NLRI_PARSE_ERROR_PREFIX_LENGTH -3
+#define BGP_NLRI_PARSE_ERROR_PACKET_LENGTH -4
+#define BGP_NLRI_PARSE_ERROR_LABEL_LENGTH -5
+#define BGP_NLRI_PARSE_ERROR_EVPN_MISSING_TYPE -6
+#define BGP_NLRI_PARSE_ERROR_EVPN_TYPE2_SIZE -7
+#define BGP_NLRI_PARSE_ERROR_EVPN_TYPE3_SIZE -8
+#define BGP_NLRI_PARSE_ERROR_EVPN_TYPE4_SIZE -9
+#define BGP_NLRI_PARSE_ERROR_EVPN_TYPE5_SIZE -10
+#define BGP_NLRI_PARSE_ERROR_FLOWSPEC_IPV6_NOT_SUPPORTED -11
+#define BGP_NLRI_PARSE_ERROR_FLOWSPEC_NLRI_SIZELIMIT -12
+#define BGP_NLRI_PARSE_ERROR_FLOWSPEC_BAD_FORMAT -13
+#define BGP_NLRI_PARSE_ERROR_ADDRESS_FAMILY -14
+#define BGP_NLRI_PARSE_ERROR -32
+
/* Ancillary information to struct bgp_path_info,
* used for uncommonly used data (aggregation, MPLS, etc.)
* and lazily allocated to save memory.
"Disable the next hop calculation for this neighbor\n"
"Set the next hop to self for reflected routes\n")
+ALIAS_HIDDEN(neighbor_nexthop_self_force,
+ neighbor_nexthop_self_all_hidden_cmd,
+ "neighbor <A.B.C.D|X:X::X:X|WORD> next-hop-self all",
+ NEIGHBOR_STR NEIGHBOR_ADDR_STR2
+ "Disable the next hop calculation for this neighbor\n"
+ "Set the next hop to self for reflected routes\n")
+
DEFUN (no_neighbor_nexthop_self,
no_neighbor_nexthop_self_cmd,
"no neighbor <A.B.C.D|X:X::X:X|WORD> next-hop-self",
"Disable the next hop calculation for this neighbor\n"
"Set the next hop to self for reflected routes\n")
+ALIAS_HIDDEN(no_neighbor_nexthop_self_force,
+ no_neighbor_nexthop_self_all_hidden_cmd,
+ "no neighbor <A.B.C.D|X:X::X:X|WORD> next-hop-self all",
+ NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2
+ "Disable the next hop calculation for this neighbor\n"
+ "Set the next hop to self for reflected routes\n")
+
/* neighbor as-override */
DEFUN (neighbor_as_override,
neighbor_as_override_cmd,
/* "neighbor next-hop-self force" commands. */
install_element(BGP_NODE, &neighbor_nexthop_self_force_hidden_cmd);
install_element(BGP_NODE, &no_neighbor_nexthop_self_force_hidden_cmd);
+ install_element(BGP_NODE, &neighbor_nexthop_self_all_hidden_cmd);
+ install_element(BGP_NODE, &no_neighbor_nexthop_self_all_hidden_cmd);
install_element(BGP_IPV4_NODE, &neighbor_nexthop_self_force_cmd);
install_element(BGP_IPV4_NODE, &no_neighbor_nexthop_self_force_cmd);
install_element(BGP_IPV4M_NODE, &neighbor_nexthop_self_force_cmd);
struct bgp *bgp;
struct in_addr vtep_ip = {INADDR_ANY};
vrf_id_t tenant_vrf_id = VRF_DEFAULT;
+ struct in_addr mcast_grp = {INADDR_ANY};
s = zclient->ibuf;
vni = stream_getl(s);
if (command == ZEBRA_VNI_ADD) {
vtep_ip.s_addr = stream_get_ipv4(s);
stream_get(&tenant_vrf_id, s, sizeof(vrf_id_t));
+ mcast_grp.s_addr = stream_get_ipv4(s);
}
bgp = bgp_lookup_by_vrf_id(vrf_id);
if (command == ZEBRA_VNI_ADD)
return bgp_evpn_local_vni_add(
bgp, vni, vtep_ip.s_addr ? vtep_ip : bgp->router_id,
- tenant_vrf_id);
+ tenant_vrf_id, mcast_grp);
else
return bgp_evpn_local_vni_del(bgp, vni);
}
Disclaimer
----------
-The OSPF daemon contains an API for application access to the LSA
-database. This API was created by Ralph Keller, originally as patch for
-Zebra. Unfortunately, the page containing documentation of the API is no
-longer online. This page is an attempt to recreate documentation for the
-API (with lots of help of the WayBackMachine)
+The OSPF daemon contains an API for application access to the LSA database.
+This API and documentation was created by Ralph Keller, originally as patch for
+Zebra. Unfortunately, the page containing documentation for the API is no
+longer online. This page is an attempt to recreate documentation for the API
+(with lots of help from the WayBackMachine).
+
+Ralph has kindly licensed this documentation under GPLv2+. Please preserve the
+acknowledgements at the bottom of this document.
Introduction
------------
-This page describes an API that allows external applications to access
-the link-state database (LSDB) of the OSPF daemon. The implementation is
-based on the OSPF code from FRRouting (forked from Quagga and formerly
-Zebra) routing protocol suite and is subject to the GNU General Public
-License. The OSPF API provides you with the following functionality:
-
-- Retrieval of the full or partial link-state database of the OSPF
- daemon. This allows applications to obtain an exact copy of the LSDB
- including router LSAs, network LSAs and so on. Whenever a new LSA
- arrives at the OSPF daemon, the API module immediately informs the
- application by sending a message. This way, the application is always
- synchronized with the LSDB of the OSPF daemon.
-- Origination of own opaque LSAs (of type 9, 10, or 11) which are then
- distributed transparently to other routers within the flooding scope
- and received by other applications through the OSPF API.
-
-Opaque LSAs, which are described in RFC 2370 , allow you to distribute
-application-specific information within a network using the OSPF
-protocol. The information contained in opaque LSAs is transparent for
-the routing process but it can be processed by other modules such as
-traffic engineering (e.g., MPLS-TE).
+This page describes an API that allows external applications to access the
+link-state database (LSDB) of the OSPF daemon. The implementation is based on
+the OSPF code from FRRouting (forked from Quagga and formerly Zebra) routing
+protocol suite and is subject to the GNU General Public License. The OSPF API
+provides you with the following functionality:
+
+- Retrieval of the full or partial link-state database of the OSPF daemon.
+ This allows applications to obtain an exact copy of the LSDB including router
+ LSAs, network LSAs and so on. Whenever a new LSA arrives at the OSPF daemon,
+ the API module immediately informs the application by sending a message. This
+ way, the application is always synchronized with the LSDB of the OSPF daemon.
+- Origination of own opaque LSAs (of type 9, 10, or 11) which are then
+ distributed transparently to other routers within the flooding scope and
+ received by other applications through the OSPF API.
+
+Opaque LSAs, which are described in :rfc:`2370`, allow you to distribute
+application-specific information within a network using the OSPF protocol. The
+information contained in opaque LSAs is transparent for the routing process but
+it can be processed by other modules such as traffic engineering (e.g.,
+MPLS-TE).
Architecture
------------
-The following picture depicts the architecture of the Quagga/Zebra
-protocol suite. The OSPF daemon is extended with opaque LSA capabilities
-and an API for external applications. The OSPF core module executes the
-OSPF protocol by discovering neighbors and exchanging neighbor state.
-The opaque module, implemented by Masahiko Endo, provides functions to
-exchange opaque LSAs between routers. Opaque LSAs can be generated by
-several modules such as the MPLS-TE module or the API server module.
-These modules then invoke the opaque module to flood their data to
-neighbors within the flooding scope.
-
-The client, which is an application potentially running on a different
-node than the OSPF daemon, links against the OSPF API client library.
-This client library establishes a socket connection with the API server
-module of the OSPF daemon and uses this connection to retrieve LSAs and
-originate opaque LSAs.
+The following picture depicts the architecture of the Quagga/Zebra protocol
+suite. The OSPF daemon is extended with opaque LSA capabilities and an API for
+external applications. The OSPF core module executes the OSPF protocol by
+discovering neighbors and exchanging neighbor state. The opaque module,
+implemented by Masahiko Endo, provides functions to exchange opaque LSAs
+between routers. Opaque LSAs can be generated by several modules such as the
+MPLS-TE module or the API server module. These modules then invoke the opaque
+module to flood their data to neighbors within the flooding scope.
+
+The client, which is an application potentially running on a different node
+than the OSPF daemon, links against the OSPF API client library. This client
+library establishes a socket connection with the API server module of the OSPF
+daemon and uses this connection to retrieve LSAs and originate opaque LSAs.
.. figure:: ../figures/ospf_api_architecture.png
:alt: image
image
-The OSPF API server module works like any other internal opaque module
-(such as the MPLS-TE module), but listens to connections from external
-applications that want to communicate with the OSPF daemon. The API
-server module can handle multiple clients concurrently.
+The OSPF API server module works like any other internal opaque module (such as
+the MPLS-TE module), but listens to connections from external applications that
+want to communicate with the OSPF daemon. The API server module can handle
+multiple clients concurrently.
-One of the main objectives of the implementation is to make as little
-changes to the existing Zebra code as possible.
+One of the main objectives of the implementation is to make as little changes
+to the existing Zebra code as possible.
Installation & Configuration
----------------------------
-Download FRRouting and unpack
+Download FRRouting and unpack it.
-Configure your frr version (note that --enable-opaque-lsa also enables
-the ospfapi server and ospfclient).
+Configure and build FRR (note that ``--enable-opaque-lsa`` also enables the
+ospfapi server and ospfclient).
::
This should also compile the client library and sample application in
ospfclient.
-Make sure that you have enabled opaque LSAs in your configuration. Add
-the ospf opaque-lsa statement to your ospfd.conf:
+Make sure that you have enabled opaque LSAs in your configuration. Add the
+``ospf opaque-lsa`` statement to your :file:`ospfd.conf`:
::
-----
In the following we describe how you can use the sample application to
-originate opaque LSAs. The sample application first registers with the
-OSPF daemon the opaque type it wants to inject and then waits until the
-OSPF daemon is ready to accept opaque LSAs of that type. Then the client
-application originates an opaque LSA, waits 10 seconds and then updates
-the opaque LSA with new opaque data. After another 20 seconds, the
-client application deletes the opaque LSA from the LSDB. If the clients
-terminates unexpectedly, the OSPF API module will remove all the opaque
-LSAs that the application registered. Since the opaque LSAs are flooded
-to other routers, we will see the opaque LSAs in all routers according
-to the flooding scope of the opaque LSA.
+originate opaque LSAs. The sample application first registers with the OSPF
+daemon the opaque type it wants to inject and then waits until the OSPF daemon
+is ready to accept opaque LSAs of that type. Then the client application
+originates an opaque LSA, waits 10 seconds and then updates the opaque LSA with
+new opaque data. After another 20 seconds, the client application deletes the
+opaque LSA from the LSDB. If the clients terminates unexpectedly, the OSPF API
+module will remove all the opaque LSAs that the application registered. Since
+the opaque LSAs are flooded to other routers, we will see the opaque LSAs in
+all routers according to the flooding scope of the opaque LSA.
We have a very simple demo setup, just two routers connected with an ATM
-point-to-point link. Start the modified OSPF daemons on two adjacent
-routers. First run on msr2:
+point-to-point link. Start the modified OSPF daemons on two adjacent routers.
+First run on msr2:
-::
+.. code-block:: console
- > msr2:/home/keller/ospfapi/zebra/ospfd# ./ospfd -f /usr/local/etc/ospfd.conf
+ # ./ospfd --apiserver -f /usr/local/etc/ospfd.conf
And on the neighboring router msr3:
-::
+.. code-block:: console
- > msr3:/home/keller/ospfapi/zebra/ospfd# ./ospfd -f /usr/local/etc/ospfd.conf
+ # ./ospfd --apiserver -f /usr/local/etc/ospfd.conf
Now the two routers form adjacency and start exchanging their databases.
Looking at the OSPF daemon of msr2 (or msr3), you see this:
-::
+.. code-block:: console
- ospfd> show ip ospf database
+ ospfd> show ip ospf database
- OSPF Router with ID (10.0.0.1)
+ OSPF Router with ID (10.0.0.1)
- Router Link States (Area 0.0.0.1)
+ Router Link States (Area 0.0.0.1)
- Link ID ADV Router Age Seq# CkSum Link count
- 10.0.0.1 10.0.0.1 55 0x80000003 0xc62f 2
- 10.0.0.2 10.0.0.2 55 0x80000003 0xe3e4 3
+ Link ID ADV Router Age Seq# CkSum Link count
+ 10.0.0.1 10.0.0.1 55 0x80000003 0xc62f 2
+ 10.0.0.2 10.0.0.2 55 0x80000003 0xe3e4 3
- Net Link States (Area 0.0.0.1)
+ Net Link States (Area 0.0.0.1)
- Link ID ADV Router Age Seq# CkSum
- 10.0.0.2 10.0.0.2 60 0x80000001 0x5fcb
+ Link ID ADV Router Age Seq# CkSum
+ 10.0.0.2 10.0.0.2 60 0x80000001 0x5fcb
Now we start the sample main application that originates an opaque LSA.
-::
+.. code-block:: console
- > cd ospfapi/apiclient
- > ./main msr2 10 250 20 0.0.0.0 0.0.0.1
+ # cd ospfapi/apiclient
+ # ./main msr2 10 250 20 0.0.0.0 0.0.0.1
-This originates an opaque LSA of type 10 (area local), with opaque type
-250 (experimental), opaque id of 20 (chosen arbitrarily), interface
-address 0.0.0.0 (which is used only for opaque LSAs type 9), and area
-0.0.0.1
+This originates an opaque LSA of type 10 (area local), with opaque type 250
+(experimental), opaque id of 20 (chosen arbitrarily), interface address 0.0.0.0
+(which is used only for opaque LSAs type 9), and area 0.0.0.1
Again looking at the OSPF database you see:
-::
+.. code-block:: console
- ospfd> show ip ospf database
+ ospfd> show ip ospf database
- OSPF Router with ID (10.0.0.1)
+ OSPF Router with ID (10.0.0.1)
- Router Link States (Area 0.0.0.1)
+ Router Link States (Area 0.0.0.1)
- Link ID ADV Router Age Seq# CkSum Link count
- 10.0.0.1 10.0.0.1 437 0x80000003 0xc62f 2
- 10.0.0.2 10.0.0.2 437 0x80000003 0xe3e4 3
+ Link ID ADV Router Age Seq# CkSum Link count
+ 10.0.0.1 10.0.0.1 437 0x80000003 0xc62f 2
+ 10.0.0.2 10.0.0.2 437 0x80000003 0xe3e4 3
- Net Link States (Area 0.0.0.1)
+ Net Link States (Area 0.0.0.1)
- Link ID ADV Router Age Seq# CkSum
- 10.0.0.2 10.0.0.2 442 0x80000001 0x5fcb
+ Link ID ADV Router Age Seq# CkSum
+ 10.0.0.2 10.0.0.2 442 0x80000001 0x5fcb
- Area-Local Opaque-LSA (Area 0.0.0.1)
+ Area-Local Opaque-LSA (Area 0.0.0.1)
- Opaque-Type/Id ADV Router Age Seq# CkSum
- 250.0.0.20 10.0.0.1 0 0x80000001 0x58a6 <=== opaque LSA
+ Opaque-Type/Id ADV Router Age Seq# CkSum
+ 250.0.0.20 10.0.0.1 0 0x80000001 0x58a6 <=== opaque LSA
You can take a closer look at this opaque LSA:
-::
+.. code-block:: console
- ospfd> show ip ospf database opaque-area
+ ospfd> show ip ospf database opaque-area
- OSPF Router with ID (10.0.0.1)
+ OSPF Router with ID (10.0.0.1)
- Area-Local Opaque-LSA (Area 0.0.0.1)
+ Area-Local Opaque-LSA (Area 0.0.0.1)
- LS age: 4
- Options: 66
- LS Type: Area-Local Opaque-LSA
- Link State ID: 250.0.0.20 (Area-Local Opaque-Type/ID)
- Advertising Router: 10.0.0.1
- LS Seq Number: 80000001
- Checksum: 0x58a6
- Length: 24
- Opaque-Type 250 (Private/Experimental)
- Opaque-ID 0x14
- Opaque-Info: 4 octets of data
- Added using OSPF API: 4 octets of opaque data
- Opaque data: 1 0 0 0 <==== counter is 1
+ LS age: 4
+ Options: 66
+ LS Type: Area-Local Opaque-LSA
+ Link State ID: 250.0.0.20 (Area-Local Opaque-Type/ID)
+ Advertising Router: 10.0.0.1
+ LS Seq Number: 80000001
+ Checksum: 0x58a6
+ Length: 24
+ Opaque-Type 250 (Private/Experimental)
+ Opaque-ID 0x14
+ Opaque-Info: 4 octets of data
+ Added using OSPF API: 4 octets of opaque data
+ Opaque data: 1 0 0 0 <==== counter is 1
-Note that the main application updates the opaque LSA after 10 seconds,
-then it looks as follows:
+Note that the main application updates the opaque LSA after 10 seconds, then it
+looks as follows:
-::
+.. code-block:: console
- ospfd> show ip ospf database opaque-area
+ ospfd> show ip ospf database opaque-area
- OSPF Router with ID (10.0.0.1)
+ OSPF Router with ID (10.0.0.1)
- Area-Local Opaque-LSA (Area 0.0.0.1)
+ Area-Local Opaque-LSA (Area 0.0.0.1)
- LS age: 1
- Options: 66
- LS Type: Area-Local Opaque-LSA
- Link State ID: 250.0.0.20 (Area-Local Opaque-Type/ID)
- Advertising Router: 10.0.0.1
- LS Seq Number: 80000002
- Checksum: 0x59a3
- Length: 24
- Opaque-Type 250 (Private/Experimental)
- Opaque-ID 0x14
- Opaque-Info: 4 octets of data
- Added using OSPF API: 4 octets of opaque data
- Opaque data: 2 0 0 0 <==== counter is now 2
+ LS age: 1
+ Options: 66
+ LS Type: Area-Local Opaque-LSA
+ Link State ID: 250.0.0.20 (Area-Local Opaque-Type/ID)
+ Advertising Router: 10.0.0.1
+ LS Seq Number: 80000002
+ Checksum: 0x59a3
+ Length: 24
+ Opaque-Type 250 (Private/Experimental)
+ Opaque-ID 0x14
+ Opaque-Info: 4 octets of data
+ Added using OSPF API: 4 octets of opaque data
+ Opaque data: 2 0 0 0 <==== counter is now 2
-Note that the payload of the opaque LSA has changed as you can see
-above.
+Note that the payload of the opaque LSA has changed as you can see above.
-Then, again after another 20 seconds, the opaque LSA is flushed from the
-LSDB.
+Then, again after another 20 seconds, the opaque LSA is flushed from the LSDB.
Important note:
^^^^^^^^^^^^^^^
In order to originate an opaque LSA, there must be at least one active
-opaque-capable neighbor. Thus, you cannot originate opaque LSAs of no
-neighbors are present. If you try to originate even so no neighbor is
-ready, you will receive a not ready error message. The reason for this
-restriction is that it might be possible that some routers have an
-identical opaque LSA from a previous origination in their LSDB that
-unfortunately could not be flushed due to a crash, and now if the router
-comes up again and starts originating a new opaque LSA, the new opaque
-LSA is considered older since it has a lower sequence number and is
-ignored by other routers (that consider the stalled opaque LSA as more
-recent). However, if the originating router first synchronizes the
-database before originating opaque LSAs, it will detect the older opaque
-LSA and can flush it first.
+opaque-capable neighbor. Thus, you cannot originate opaque LSAs if no neighbors
+are present. If you try to originate when no neighbors are ready, you will
+receive a not ready error message. The reason for this restriction is that it
+might be possible that some routers have an identical opaque LSA from a
+previous origination in their LSDB that unfortunately could not be flushed due
+to a crash, and now if the router comes up again and starts originating a new
+opaque LSA, the new opaque LSA is considered older since it has a lower
+sequence number and is ignored by other routers (that consider the stalled
+opaque LSA as more recent). However, if the originating router first
+synchronizes the database before originating opaque LSAs, it will detect the
+older opaque LSA and can flush it first.
Protocol and Message Formats
----------------------------
-If you are developing your own client application and you don't want to
-make use of the client library (due to the GNU license restriction or
-whatever reason), you can implement your own client-side message
-handling. The OSPF API uses two connections between the client and the
-OSPF API server: One connection is used for a synchronous request /reply
-protocol and another connection is used for asynchronous notifications
-(e.g., LSA update, neighbor status change).
+If you are developing your own client application and you don't want to make
+use of the client library (due to the GNU license restriction or whatever
+reason), you can implement your own client-side message handling. The OSPF API
+uses two connections between the client and the OSPF API server: One connection
+is used for a synchronous request /reply protocol and another connection is
+used for asynchronous notifications (e.g., LSA update, neighbor status change).
Each message begins with the following header:
image
-The origin field allows to select according to the following types of
-origins:
+The origin field allows origin-based filtering using the following origin
+types:
+-------------------------+---------+
| Origin | Value |
| ANY\_ORIGIN | 2 |
+-------------------------+---------+
-The reply message has on of the following error codes:
+The reply message has one of the following error codes:
+--------------------------+---------+
| Error code | Value |
image
+
+.. Do not delete these acknowledgements!
+
Original Acknowledgments from Ralph Keller
------------------------------------------
-I would like to thank Masahiko Endo, the author of the opaque LSA
-extension module, for his great support. His wonderful ASCII graphs
-explaining the internal workings of this code, and his invaluable input
-proved to be crucial in designing a useful API for accessing the link
-state database of the OSPF daemon. Once, he even decided to take the
-plane from Tokyo to Zurich so that we could actually meet and have
-face-to-face discussions, which was a lot of fun. Clearly, without
-Masahiko no API would ever be completed. I also would like to thank
-Daniel Bauer who wrote an opaque LSA implementation too and was willing
+I would like to thank Masahiko Endo, the author of the opaque LSA extension
+module, for his great support. His wonderful ASCII graphs explaining the
+internal workings of this code, and his invaluable input proved to be crucial
+in designing a useful API for accessing the link state database of the OSPF
+daemon. Once, he even decided to take the plane from Tokyo to Zurich so that we
+could actually meet and have face-to-face discussions, which was a lot of fun.
+Clearly, without Masahiko no API would ever be completed. I also would like to
+thank Daniel Bauer who wrote an opaque LSA implementation too and was willing
to test the OSPF API code in one of his projects.
.. include:: common-options.rst
+.. option:: -a, --apiserver
+
+ Enable the OSPF API server.
+
FILES
=====
.. option:: -a, --apiserver
- Enable the OSPF API server
+ Enable the OSPF API server. This is required to use ``ospfclient``.
*ospfd* must acquire interface information from *zebra* in order to function.
Therefore *zebra* must be running before invoking *ospfd*. Also, if *zebra* is
+-----------------------------------+----------------+--------------+------------+------------+------------+
| ASM (Any Source) | :mark:`Y` | :mark:`N` | :mark:`N` | :mark:`N` | :mark:`N` |
+-----------------------------------+----------------+--------------+------------+------------+------------+
+| EVPN BUM Forwarding | :mark:`≥5.0` | :mark:`N` | :mark:`N` | :mark:`N` | :mark:`N` |
++-----------------------------------+----------------+--------------+------------+------------+------------+
The indicators have the following semantics:
*pimd* supports pim-sm as well as igmp v2 and v3. pim is
vrf aware and can work within the context of vrf's in order to
-do S,G mrouting.
+do S,G mrouting. Additionally PIM can be used in the EVPN underlay
+network for optimizing forwarding of overlay BUM traffic.
.. _starting-and-stopping-pimd:
Display various information about the interfaces used in this pim instance.
-.. index:: show ip mroute
-.. clicmd:: show ip mroute
+.. index:: show ip mroute [vrf NAME] [A.B.C.D [A.B.C.D]] [fill] [json]
+.. clicmd:: show ip mroute [vrf NAME] [A.B.C.D [A.B.C.D]] [fill] [json]
- Display information about installed into the kernel S,G mroutes.
+ Display information about installed into the kernel S,G mroutes. If
+ one address is specified we assume it is the Group we are interested
+ in displaying data on. If the second address is specified then it is
+ Source Group. The keyword `fill` says to fill in all assumed data
+ for test/data gathering purposes.
.. index:: show ip mroute count
.. clicmd:: show ip mroute count
Display information about interfaces PIM is using.
-.. index:: show ip pim join
+.. index:: show ip pim [vrf NAME] join [A.B.C.D [A.B.C.D]] [json]
.. clicmd:: show ip pim join
- Display information about PIM joins received.
+ Display information about PIM joins received. If one address is specified
+ then we assume it is the Group we are interested in displaying data on.
+ If the second address is specified then it is Source Group.
.. index:: show ip pim local-membership
.. clicmd:: show ip pim local-membership
Display information about known S,G's and incoming interface as well as the
OIL and how they were chosen.
-.. index:: show ip pim upstream
+.. index:: show ip pim [vrf NAME] upstream [A.B.C.D [A.B.C.D]] [json]
.. clicmd:: show ip pim upstream
- Display upstream information about a S,G mroute.
+ Display upstream information about a S,G mroute. Allow the user to
+ specify sub Source and Groups that we are only interested in.
.. index:: show ip pim upstream-join-desired
.. clicmd:: show ip pim upstream-join-desired
.. clicmd:: clear ip pim oil
Rescan PIM OIL (output interface list).
+
+PIM EVPN configuration
+======================
+To use PIM in the underlay for overlay BUM forwarding associate a multicast
+group with the L2 VNI. The actual configuration is based on your distribution.
+Here is an ifupdown2 example::
+
+ auto vx-10100
+ iface vx-10100
+ vxlan-id 10100
+ bridge-access 100
+ vxlan-local-tunnelip 27.0.0.11
+ vxlan-mcastgrp 239.1.1.100
+
+.. note::
+
+ PIM will see the ``vxlan-mcastgrp`` configuration and auto configure state
+ to properly forward BUM traffic.
+
+PIM also needs to be configured in the underlay to allow the BUM MDT to be
+setup. This is existing PIM configuration:
+
+- Enable pim on the underlay L3 interface via the "ip pim" command.
+- Configure RPs for the BUM multicast group range.
+- Ensure the PIM is enabled on the lo of the VTEPs and the RP.
uint16_t psnp_interval[2]; /* psnp-interval in seconds */
uint8_t metric[2];
uint32_t te_metric[2];
- struct mpls_te_circuit
- *mtc; /* Support for MPLS-TE parameters - see isis_te.[c,h] */
+ struct mpls_te_circuit *mtc; /* MPLS-TE parameters */
int ip_router; /* Route IP ? */
int is_passive; /* Is Passive ? */
struct list *mt_settings; /* IS-IS MT Settings */
const char *circ_type;
struct isis_area *area;
struct interface *ifp;
- const struct lyd_node *dnode =
- yang_dnode_get(running_config->dnode, VTY_CURR_XPATH);
/* area will be created if it is not present. make sure the yang model
* is synced with FRR and call the appropriate NB cb.
}
/* check if the interface is a loopback and if so set it as passive */
- ifp = yang_dnode_get_entry(dnode, false);
+ ifp = nb_running_get_entry(NULL, VTY_CURR_XPATH, false);
if (ifp && if_is_loopback(ifp))
nb_cli_enqueue_change(vty, "./frr-isisd:isis/passive",
NB_OP_MODIFY, "true");
const char *circ_type;
struct isis_area *area;
struct interface *ifp;
- const struct lyd_node *dnode =
- yang_dnode_get(running_config->dnode, VTY_CURR_XPATH);
/* area will be created if it is not present. make sure the yang model
* is synced with FRR and call the appropriate NB cb.
}
/* check if the interface is a loopback and if so set it as passive */
- ifp = yang_dnode_get_entry(dnode, false);
+ ifp = nb_running_get_entry(NULL, VTY_CURR_XPATH, false);
if (ifp && if_is_loopback(ifp))
nb_cli_enqueue_change(vty, "./frr-isisd:isis/passive",
NB_OP_MODIFY, "true");
"Act as an area router only\n")
{
const char *value = NULL;
- const struct lyd_node *dnode =
- yang_dnode_get(running_config->dnode, VTY_CURR_XPATH);
- struct isis_area *area = yang_dnode_get_entry(dnode, false);
+ struct isis_area *area;
+
+ area = nb_running_get_entry(NULL, VTY_CURR_XPATH, false);
/*
* Put the is-type back to defaults:
}
/*
- * XPath: /frr-isisd:isis/mpls-te
+ * XPath: /frr-isisd:isis/instance/mpls-te
*/
DEFPY(isis_mpls_te_on, isis_mpls_te_on_cmd, "mpls-te on",
MPLS_TE_STR "Enable the MPLS-TE functionality\n")
{
- nb_cli_enqueue_change(vty, "/frr-isisd:isis/mpls-te", NB_OP_CREATE,
+ nb_cli_enqueue_change(vty, "./mpls-te", NB_OP_CREATE,
NULL);
return nb_cli_apply_changes(vty, NULL);
DEFPY(no_isis_mpls_te_on, no_isis_mpls_te_on_cmd, "no mpls-te [on]",
NO_STR
"Disable the MPLS-TE functionality\n"
- "Enable the MPLS-TE functionality\n")
+ "Disable the MPLS-TE functionality\n")
{
- nb_cli_enqueue_change(vty, "/frr-isisd:isis/mpls-te", NB_OP_DESTROY,
+ nb_cli_enqueue_change(vty, "./mpls-te", NB_OP_DESTROY,
NULL);
return nb_cli_apply_changes(vty, NULL);
}
/*
- * XPath: /frr-isisd:isis/mpls-te/router-address
+ * XPath: /frr-isisd:isis/instance/mpls-te/router-address
*/
DEFPY(isis_mpls_te_router_addr, isis_mpls_te_router_addr_cmd,
"mpls-te router-address A.B.C.D",
"Stable IP address of the advertising router\n"
"MPLS-TE router address in IPv4 address format\n")
{
- nb_cli_enqueue_change(vty, "/frr-isisd:isis/mpls-te/router-address",
+ nb_cli_enqueue_change(vty, "./mpls-te/router-address",
NB_OP_MODIFY, router_address_str);
return nb_cli_apply_changes(vty, NULL);
}
+DEFPY(no_isis_mpls_te_router_addr, no_isis_mpls_te_router_addr_cmd,
+ "no mpls-te router-address [A.B.C.D]",
+ NO_STR MPLS_TE_STR
+ "Delete IP address of the advertising router\n"
+ "MPLS-TE router address in IPv4 address format\n")
+{
+ nb_cli_enqueue_change(vty, "./mpls-te/router-address",
+ NB_OP_DESTROY, NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
void cli_show_isis_mpls_te_router_addr(struct vty *vty, struct lyd_node *dnode,
bool show_defaults)
{
"AREA native mode self originate INTER-AS LSP with L1 and L2 flooding scope\n"
"AS native mode self originate INTER-AS LSP with L2 only flooding scope\n")
{
- vty_out(vty, "MPLS-TE Inter-AS is not yet supported.");
+ vty_out(vty, "MPLS-TE Inter-AS is not yet supported\n");
return CMD_SUCCESS;
}
"Level-1-2 adjacencies are formed\n"
"Level-2 only adjacencies are formed\n")
{
- const struct lyd_node *dnode;
struct interface *ifp;
struct isis_circuit *circuit;
int is_type;
* and the is-type of the area if there is one. So we need to do this
* here.
*/
- dnode = yang_dnode_get(running_config->dnode, VTY_CURR_XPATH);
- ifp = yang_dnode_get_entry(dnode, false);
+ ifp = nb_running_get_entry(NULL, VTY_CURR_XPATH, false);
if (!ifp)
goto def_val;
install_element(ISIS_NODE, &isis_mpls_te_on_cmd);
install_element(ISIS_NODE, &no_isis_mpls_te_on_cmd);
install_element(ISIS_NODE, &isis_mpls_te_router_addr_cmd);
+ install_element(ISIS_NODE, &no_isis_mpls_te_router_addr_cmd);
install_element(ISIS_NODE, &isis_mpls_te_inter_as_cmd);
install_element(ISIS_NODE, &isis_default_originate_cmd);
uint8_t subtlvs[256];
uint8_t subtlv_len;
- if (IS_MPLS_TE(isisMplsTE)
+ if (IS_MPLS_TE(area->mta)
&& circuit->interface
&& HAS_LINK_PARAMS(
circuit->interface))
uint8_t subtlvs[256];
uint8_t subtlv_len;
- if (IS_MPLS_TE(isisMplsTE)
+ if (IS_MPLS_TE(area->mta)
&& circuit->interface != NULL
&& HAS_LINK_PARAMS(
circuit->interface))
#include "hash.h"
#include "if.h"
#include "command.h"
-#include "log_int.h"
#include "isisd/isis_constants.h"
#include "isisd/isis_common.h"
area = isis_area_create(area_tag);
/* save area in dnode to avoid looking it up all the time */
- yang_dnode_set_entry(dnode, area);
+ nb_running_set_entry(dnode, area);
return NB_OK;
}
static int isis_instance_destroy(enum nb_event event,
const struct lyd_node *dnode)
{
- const char *area_tag;
+ struct isis_area *area;
if (event != NB_EV_APPLY)
return NB_OK;
- area_tag = yang_dnode_get_string(dnode, "./area-tag");
- isis_area_destroy(area_tag);
+ area = nb_running_unset_entry(dnode);
+ isis_area_destroy(area->area_tag);
return NB_OK;
}
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
type = yang_dnode_get_enum(dnode, NULL);
isis_area_is_type_set(area, type);
XFREE(MTYPE_ISIS_AREA_ADDR, resource->ptr);
break;
case NB_EV_APPLY:
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
addrr = resource->ptr;
if (isis->sysid_set == 0) {
net_title = yang_dnode_get_string(dnode, NULL);
addr.addr_len = dotformat2buff(buff, net_title);
memcpy(addr.area_addr, buff, (int)addr.addr_len);
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
for (ALL_LIST_ELEMENTS_RO(area->area_addrs, node, addrp)) {
if ((addrp->addr_len + ISIS_SYS_ID_LEN + 1) == addr.addr_len
&& !memcmp(addrp->area_addr, addr.area_addr, addr.addr_len))
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
isis_area_dynhostname_set(area, yang_dnode_get_bool(dnode, NULL));
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
attached = yang_dnode_get_bool(dnode, NULL);
isis_area_attached_bit_set(area, attached);
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
overload = yang_dnode_get_bool(dnode, NULL);
isis_area_overload_bit_set(area, overload);
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
old_metric = (metric_style == ISIS_WIDE_METRIC) ? false : true;
new_metric = (metric_style == ISIS_NARROW_METRIC) ? false : true;
isis_area_metricstyle_set(area, old_metric, new_metric);
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
area->purge_originator = yang_dnode_get_bool(dnode, NULL);
return NB_OK;
switch (event) {
case NB_EV_VALIDATE:
- area = yang_dnode_get_entry(dnode, false);
+ area = nb_running_get_entry(dnode, NULL, false);
if (!area)
break;
for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) {
case NB_EV_ABORT:
break;
case NB_EV_APPLY:
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
isis_area_lsp_mtu_set(area, lsp_mtu);
break;
}
return NB_OK;
refr_int = yang_dnode_get_uint16(dnode, NULL);
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
isis_area_lsp_refresh_set(area, IS_LEVEL_1, refr_int);
return NB_OK;
return NB_OK;
refr_int = yang_dnode_get_uint16(dnode, NULL);
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
isis_area_lsp_refresh_set(area, IS_LEVEL_2, refr_int);
return NB_OK;
return NB_OK;
max_lt = yang_dnode_get_uint16(dnode, NULL);
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
isis_area_max_lsp_lifetime_set(area, IS_LEVEL_1, max_lt);
return NB_OK;
return NB_OK;
max_lt = yang_dnode_get_uint16(dnode, NULL);
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
isis_area_max_lsp_lifetime_set(area, IS_LEVEL_2, max_lt);
return NB_OK;
return NB_OK;
gen_int = yang_dnode_get_uint16(dnode, NULL);
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
area->lsp_gen_interval[0] = gen_int;
return NB_OK;
return NB_OK;
gen_int = yang_dnode_get_uint16(dnode, NULL);
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
area->lsp_gen_interval[1] = gen_int;
return NB_OK;
long long_delay = yang_dnode_get_uint16(dnode, "./long-delay");
long holddown = yang_dnode_get_uint16(dnode, "./hold-down");
long timetolearn = yang_dnode_get_uint16(dnode, "./time-to-learn");
- struct isis_area *area = yang_dnode_get_entry(dnode, true);
+ struct isis_area *area = nb_running_get_entry(dnode, NULL, true);
size_t bufsiz = strlen(area->area_tag) + sizeof("IS-IS Lx");
char *buf = XCALLOC(MTYPE_TMP, bufsiz);
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
spf_backoff_free(area->spf_delay_ietf[0]);
spf_backoff_free(area->spf_delay_ietf[1]);
area->spf_delay_ietf[0] = NULL;
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
area->min_spf_interval[0] = yang_dnode_get_uint16(dnode, NULL);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
area->min_spf_interval[1] = yang_dnode_get_uint16(dnode, NULL);
return NB_OK;
static void area_password_apply_finish(const struct lyd_node *dnode)
{
const char *password = yang_dnode_get_string(dnode, "./password");
- struct isis_area *area = yang_dnode_get_entry(dnode, true);
+ struct isis_area *area = nb_running_get_entry(dnode, NULL, true);
int pass_type = yang_dnode_get_enum(dnode, "./password-type");
uint8_t snp_auth = yang_dnode_get_enum(dnode, "./authenticate-snp");
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
isis_area_passwd_unset(area, IS_LEVEL_1);
return NB_OK;
static void domain_password_apply_finish(const struct lyd_node *dnode)
{
const char *password = yang_dnode_get_string(dnode, "./password");
- struct isis_area *area = yang_dnode_get_entry(dnode, true);
+ struct isis_area *area = nb_running_get_entry(dnode, NULL, true);
int pass_type = yang_dnode_get_enum(dnode, "./password-type");
uint8_t snp_auth = yang_dnode_get_enum(dnode, "./authenticate-snp");
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
isis_area_passwd_unset(area, IS_LEVEL_2);
return NB_OK;
int originate_type = DEFAULT_ORIGINATE;
unsigned long metric = 0;
const char *routemap = NULL;
- struct isis_area *area = yang_dnode_get_entry(dnode, true);
+ struct isis_area *area = nb_running_get_entry(dnode, NULL, true);
int level = yang_dnode_get_enum(dnode, "./level");
if (yang_dnode_get_bool(dnode, "./always")) {
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
level = yang_dnode_get_enum(dnode, "./level");
isis_redist_unset(area, level, AF_INET, DEFAULT_ROUTE);
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
level = yang_dnode_get_enum(dnode, "./level");
isis_redist_unset(area, level, AF_INET6, DEFAULT_ROUTE);
type = yang_dnode_get_enum(dnode, "./protocol");
level = yang_dnode_get_enum(dnode, "./level");
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
if (yang_dnode_exists(dnode, "./metric"))
metric = yang_dnode_get_uint32(dnode, "./metric");
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
level = yang_dnode_get_enum(dnode, "./level");
type = yang_dnode_get_enum(dnode, "./protocol");
isis_redist_unset(area, level, AF_INET, type);
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
level = yang_dnode_get_enum(dnode, "./level");
type = yang_dnode_get_enum(dnode, "./protocol");
isis_redist_unset(area, level, AF_INET6, type);
case NB_EV_ABORT:
break;
case NB_EV_APPLY:
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
setting = area_get_mt_setting(area, mtid);
setting->enabled = create;
lsp_regenerate_schedule(area, IS_LEVEL_1 | IS_LEVEL_2, 0);
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
setting = area_get_mt_setting(area, mtid);
setting->overload = yang_dnode_get_bool(dnode, NULL);
if (setting->enabled)
if (event != NB_EV_APPLY)
return NB_OK;
- area = yang_dnode_get_entry(dnode, true);
+ area = nb_running_get_entry(dnode, NULL, true);
area->log_adj_changes = log ? 1 : 0;
return NB_OK;
}
/*
- * XPath: /frr-isisd:isis/mpls-te
+ * XPath: /frr-isisd:isis/instance/mpls-te
*/
-static int isis_mpls_te_create(enum nb_event event,
+static int isis_instance_mpls_te_create(enum nb_event event,
const struct lyd_node *dnode,
union nb_resource *resource)
{
struct listnode *node;
+ struct isis_area *area;
struct isis_circuit *circuit;
if (event != NB_EV_APPLY)
return NB_OK;
- isisMplsTE.status = enable;
+ area = nb_running_get_entry(dnode, NULL, true);
+ if (area->mta == NULL) {
+
+ struct mpls_te_area *new;
+
+ zlog_debug("ISIS MPLS-TE: Initialize area %s",
+ area->area_tag);
+
+ new = XCALLOC(MTYPE_ISIS_MPLS_TE, sizeof(struct mpls_te_area));
+
+ /* Initialize MPLS_TE structure */
+ new->status = enable;
+ new->level = 0;
+ new->inter_as = off;
+ new->interas_areaid.s_addr = 0;
+ new->router_id.s_addr = 0;
+
+ area->mta = new;
+ } else {
+ area->mta->status = enable;
+ }
/*
* Following code is intended to handle two cases;
* MPLS_TE flag
* 2) MPLS-TE was once enabled then disabled, and now enabled again.
*/
- for (ALL_LIST_ELEMENTS_RO(isisMplsTE.cir_list, node, circuit)) {
+ for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) {
if (circuit->mtc == NULL || IS_FLOOD_AS(circuit->mtc->type))
continue;
- if ((circuit->mtc->status == disable)
+ if (!IS_MPLS_TE(circuit->mtc)
&& HAS_LINK_PARAMS(circuit->interface))
circuit->mtc->status = enable;
else
return NB_OK;
}
-static int isis_mpls_te_destroy(enum nb_event event,
+static int isis_instance_mpls_te_destroy(enum nb_event event,
const struct lyd_node *dnode)
{
struct listnode *node;
+ struct isis_area *area;
struct isis_circuit *circuit;
if (event != NB_EV_APPLY)
return NB_OK;
- isisMplsTE.status = disable;
+ area = nb_running_get_entry(dnode, NULL, true);
+ if (IS_MPLS_TE(area->mta))
+ area->mta->status = disable;
+ else
+ return NB_OK;
/* Flush LSP if circuit engage */
- for (ALL_LIST_ELEMENTS_RO(isisMplsTE.cir_list, node, circuit)) {
+ for (ALL_LIST_ELEMENTS_RO(area->circuit_list, node, circuit)) {
if (circuit->mtc == NULL || (circuit->mtc->status == disable))
continue;
}
/*
- * XPath: /frr-isisd:isis/mpls-te/router-address
+ * XPath: /frr-isisd:isis/instance/mpls-te/router-address
*/
-static int isis_mpls_te_router_address_modify(enum nb_event event,
+static int isis_instance_mpls_te_router_address_modify(enum nb_event event,
const struct lyd_node *dnode,
union nb_resource *resource)
{
struct in_addr value;
- struct listnode *node;
struct isis_area *area;
if (event != NB_EV_APPLY)
return NB_OK;
- yang_dnode_get_ipv4(&value, dnode, NULL);
- isisMplsTE.router_id.s_addr = value.s_addr;
+ area = nb_running_get_entry(dnode, NULL, true);
/* only proceed if MPLS-TE is enabled */
- if (isisMplsTE.status == disable)
+ if (!IS_MPLS_TE(area->mta))
return NB_OK;
- /* Update main Router ID in isis global structure */
- isis->router_id = value.s_addr;
+ /* Update Area Router ID */
+ yang_dnode_get_ipv4(&value, dnode, NULL);
+ area->mta->router_id.s_addr = value.s_addr;
+
/* And re-schedule LSP update */
- for (ALL_LIST_ELEMENTS_RO(isis->area_list, node, area))
- if (listcount(area->area_addrs) > 0)
- lsp_regenerate_schedule(area, area->is_type, 0);
+ if (listcount(area->area_addrs) > 0)
+ lsp_regenerate_schedule(area, area->is_type, 0);
return NB_OK;
}
-static int isis_mpls_te_router_address_destroy(enum nb_event event,
+static int isis_instance_mpls_te_router_address_destroy(enum nb_event event,
const struct lyd_node *dnode)
{
- struct listnode *node;
struct isis_area *area;
if (event != NB_EV_APPLY)
return NB_OK;
- isisMplsTE.router_id.s_addr = INADDR_ANY;
+ area = nb_running_get_entry(dnode, NULL, true);
/* only proceed if MPLS-TE is enabled */
- if (isisMplsTE.status == disable)
+ if (!IS_MPLS_TE(area->mta))
return NB_OK;
- /* Update main Router ID in isis global structure */
- isis->router_id = 0;
+ /* Reset Area Router ID */
+ area->mta->router_id.s_addr = INADDR_ANY;
+
/* And re-schedule LSP update */
- for (ALL_LIST_ELEMENTS_RO(isis->area_list, node, area))
- if (listcount(area->area_addrs) > 0)
- lsp_regenerate_schedule(area, area->is_type, 0);
+ if (listcount(area->area_addrs) > 0)
+ lsp_regenerate_schedule(area, area->is_type, 0);
return NB_OK;
}
abort();
}
- ifp = yang_dnode_get_entry(dnode, true);
+ ifp = nb_running_get_entry(dnode, NULL, true);
circuit = isis_circuit_create(area, ifp);
assert(circuit->state == C_STATE_CONF || circuit->state == C_STATE_UP);
- yang_dnode_set_entry(dnode, circuit);
+ nb_running_set_entry(dnode, circuit);
return NB_OK;
}
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_unset_entry(dnode);
if (!circuit)
return NB_ERR_INCONSISTENCY;
/* delete circuit through csm changes */
case NB_EV_ABORT:
break;
case NB_EV_APPLY:
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
isis_circuit_is_type_set(circuit, circ_type);
break;
}
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
ipv4 = yang_dnode_get_bool(dnode, NULL);
ipv6 = yang_dnode_get_bool(dnode, "../ipv6-routing");
isis_circuit_af_set(circuit, ipv4, ipv6);
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
ipv4 = yang_dnode_exists(dnode, "../ipv4-routing");
ipv6 = yang_dnode_get_bool(dnode, NULL);
isis_circuit_af_set(circuit, ipv4, ipv6);
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
circuit->csnp_interval[0] = yang_dnode_get_uint16(dnode, NULL);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
circuit->csnp_interval[1] = yang_dnode_get_uint16(dnode, NULL);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
circuit->psnp_interval[0] = yang_dnode_get_uint16(dnode, NULL);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
circuit->psnp_interval[1] = yang_dnode_get_uint16(dnode, NULL);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
circuit->pad_hellos = yang_dnode_get_bool(dnode, NULL);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
interval = yang_dnode_get_uint32(dnode, NULL);
circuit->hello_interval[0] = interval;
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
interval = yang_dnode_get_uint32(dnode, NULL);
circuit->hello_interval[1] = interval;
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
multi = yang_dnode_get_uint16(dnode, NULL);
circuit->hello_multiplier[0] = multi;
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
multi = yang_dnode_get_uint16(dnode, NULL);
circuit->hello_multiplier[1] = multi;
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
met = yang_dnode_get_uint32(dnode, NULL);
isis_circuit_metric_set(circuit, IS_LEVEL_1, met);
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
met = yang_dnode_get_uint32(dnode, NULL);
isis_circuit_metric_set(circuit, IS_LEVEL_2, met);
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
circuit->priority[0] = yang_dnode_get_uint8(dnode, NULL);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
circuit->priority[1] = yang_dnode_get_uint8(dnode, NULL);
return NB_OK;
switch (event) {
case NB_EV_VALIDATE:
- circuit = yang_dnode_get_entry(dnode, false);
+ circuit = nb_running_get_entry(dnode, NULL, false);
if (!circuit)
break;
if (circuit->circ_type == CIRCUIT_T_LOOPBACK) {
case NB_EV_ABORT:
break;
case NB_EV_APPLY:
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
isis_circuit_circ_type_set(circuit, net_type);
break;
}
/* validation only applies if we are setting passive to false */
if (!passive && event == NB_EV_VALIDATE) {
- circuit = yang_dnode_get_entry(dnode, false);
+ circuit = nb_running_get_entry(dnode, NULL, false);
if (!circuit)
return NB_OK;
ifp = circuit->interface;
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
if (circuit->state != C_STATE_UP) {
circuit->is_passive = passive;
} else {
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
isis_circuit_passwd_unset(circuit);
return NB_OK;
return NB_OK;
password = yang_dnode_get_string(dnode, NULL);
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
isis_circuit_passwd_set(circuit, circuit->passwd.type, password);
return NB_OK;
pass_type = yang_dnode_get_enum(dnode, NULL);
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
circuit->passwd.type = pass_type;
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
circuit->disable_threeway_adj = yang_dnode_get_bool(dnode, NULL);
return NB_OK;
switch (event) {
case NB_EV_VALIDATE:
- circuit = yang_dnode_get_entry(dnode, false);
+ circuit = nb_running_get_entry(dnode, NULL, false);
if (circuit && circuit->area && circuit->area->oldmetric) {
flog_warn(
EC_LIB_NB_CB_CONFIG_VALIDATE,
case NB_EV_ABORT:
break;
case NB_EV_APPLY:
- circuit = yang_dnode_get_entry(dnode, true);
+ circuit = nb_running_get_entry(dnode, NULL, true);
value = yang_dnode_get_bool(dnode, NULL);
isis_circuit_mt_enabled_set(circuit, mtid, value);
break;
.cbs.cli_show = cli_show_isis_log_adjacency,
},
{
- .xpath = "/frr-isisd:isis/mpls-te",
- .cbs.create = isis_mpls_te_create,
- .cbs.destroy = isis_mpls_te_destroy,
+ .xpath = "/frr-isisd:isis/instance/mpls-te",
+ .cbs.create = isis_instance_mpls_te_create,
+ .cbs.destroy = isis_instance_mpls_te_destroy,
.cbs.cli_show = cli_show_isis_mpls_te,
},
{
- .xpath = "/frr-isisd:isis/mpls-te/router-address",
- .cbs.modify = isis_mpls_te_router_address_modify,
- .cbs.destroy = isis_mpls_te_router_address_destroy,
+ .xpath = "/frr-isisd:isis/instance/mpls-te/router-address",
+ .cbs.modify = isis_instance_mpls_te_router_address_modify,
+ .cbs.destroy = isis_instance_mpls_te_router_address_destroy,
.cbs.cli_show = cli_show_isis_mpls_te_router_addr,
},
{
adj);
/* Update MPLS TE Remote IP address parameter if possible */
- if (IS_MPLS_TE(isisMplsTE) && iih->circuit->mtc
- && IS_CIRCUIT_TE(iih->circuit->mtc) && adj->ipv4_address_count)
+ if (IS_MPLS_TE(iih->circuit->area->mta)
+ && IS_MPLS_TE(iih->circuit->mtc)
+ && adj->ipv4_address_count)
set_circuitparams_rmt_ipaddr(iih->circuit->mtc,
adj->ipv4_addresses[0]);
#include "isisd/isis_spf.h"
#include "isisd/isis_te.h"
-/* Global varial for MPLS TE management */
-struct isis_mpls_te isisMplsTE;
-
const char *mode2text[] = {"Disable", "Area", "AS", "Emulate"};
/*------------------------------------------------------------------------*
/* Finally Update LSP */
#if 0
- if (IS_MPLS_TE(isisMplsTE) && circuit->area)
+ if (circuit->area && IS_MPLS_TE(circuit->area->mta))
lsp_regenerate_schedule (circuit->area, circuit->is_type, 0);
#endif
return;
isis_link_params_update(circuit, ifp);
/* ... and LSP */
- if (IS_MPLS_TE(isisMplsTE) && circuit->area)
+ if (circuit->area && IS_MPLS_TE(circuit->area->mta))
lsp_regenerate_schedule(circuit->area, circuit->is_type, 0);
return;
return;
}
-/* Specific MPLS TE router parameters write function */
-void isis_mpls_te_config_write_router(struct vty *vty)
-{
- if (IS_MPLS_TE(isisMplsTE)) {
- vty_out(vty, " mpls-te on\n");
- vty_out(vty, " mpls-te router-address %s\n",
- inet_ntoa(isisMplsTE.router_id));
- }
-
- return;
-}
-
-
/*------------------------------------------------------------------------*
* Followings are vty command functions.
*------------------------------------------------------------------------*/
#ifndef FABRICD
-/* Search MPLS TE Circuit context from Interface */
-static struct mpls_te_circuit *lookup_mpls_params_by_ifp(struct interface *ifp)
-{
- struct isis_circuit *circuit;
-
- if ((circuit = circuit_scan_by_ifp(ifp)) == NULL)
- return NULL;
-
- return circuit->mtc;
-}
-
DEFUN (show_isis_mpls_te_router,
show_isis_mpls_te_router_cmd,
"show " PROTO_NAME " mpls-te router",
MPLS_TE_STR
"Router information\n")
{
- if (IS_MPLS_TE(isisMplsTE)) {
- vty_out(vty, "--- MPLS-TE router parameters ---\n");
- if (ntohs(isisMplsTE.router_id.s_addr) != 0)
- vty_out(vty, " Router-Address: %s\n",
- inet_ntoa(isisMplsTE.router_id));
+ struct listnode *anode;
+ struct isis_area *area;
+
+ if (!isis) {
+ vty_out(vty, "IS-IS Routing Process not enabled\n");
+ return CMD_SUCCESS;
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(isis->area_list, anode, area)) {
+
+ if (!IS_MPLS_TE(area->mta))
+ continue;
+
+ vty_out(vty, "Area %s:\n", area->area_tag);
+ if (ntohs(area->mta->router_id.s_addr) != 0)
+ vty_out(vty, " MPLS-TE Router-Address: %s\n",
+ inet_ntoa(area->mta->router_id));
else
vty_out(vty, " N/A\n");
- } else
- vty_out(vty, " MPLS-TE is disable on this router\n");
+ }
return CMD_SUCCESS;
}
-static void show_mpls_te_sub(struct vty *vty, struct interface *ifp)
+static void show_mpls_te_sub(struct vty *vty, char *name,
+ struct mpls_te_circuit *mtc)
{
- struct mpls_te_circuit *mtc;
struct sbuf buf;
sbuf_init(&buf, NULL, 0);
- if ((IS_MPLS_TE(isisMplsTE))
- && ((mtc = lookup_mpls_params_by_ifp(ifp)) != NULL)) {
- /* Continue only if interface is not passive or support Inter-AS
- * TEv2 */
- if (mtc->status != enable) {
- if (IS_INTER_AS(mtc->type)) {
- vty_out(vty,
- "-- Inter-AS TEv2 link parameters for %s --\n",
- ifp->name);
- } else {
- /* MPLS-TE is not activate on this interface */
- /* or this interface is passive and Inter-AS
- * TEv2 is not activate */
- vty_out(vty,
- " %s: MPLS-TE is disabled on this interface\n",
- ifp->name);
- return;
- }
- } else {
- vty_out(vty, "-- MPLS-TE link parameters for %s --\n",
- ifp->name);
- }
-
- sbuf_reset(&buf);
- print_subtlv_admin_grp(&buf, 4, &mtc->admin_grp);
+ if (mtc->status != enable)
+ return;
- if (SUBTLV_TYPE(mtc->local_ipaddr) != 0)
- print_subtlv_local_ipaddr(&buf, 4, &mtc->local_ipaddr);
- if (SUBTLV_TYPE(mtc->rmt_ipaddr) != 0)
- print_subtlv_rmt_ipaddr(&buf, 4, &mtc->rmt_ipaddr);
+ vty_out(vty, "-- MPLS-TE link parameters for %s --\n", name);
- print_subtlv_max_bw(&buf, 4, &mtc->max_bw);
- print_subtlv_max_rsv_bw(&buf, 4, &mtc->max_rsv_bw);
- print_subtlv_unrsv_bw(&buf, 4, &mtc->unrsv_bw);
- print_subtlv_te_metric(&buf, 4, &mtc->te_metric);
+ sbuf_reset(&buf);
+ print_subtlv_admin_grp(&buf, 4, &mtc->admin_grp);
- if (IS_INTER_AS(mtc->type)) {
- if (SUBTLV_TYPE(mtc->ras) != 0)
- print_subtlv_ras(&buf, 4, &mtc->ras);
- if (SUBTLV_TYPE(mtc->rip) != 0)
- print_subtlv_rip(&buf, 4, &mtc->rip);
- }
+ if (SUBTLV_TYPE(mtc->local_ipaddr) != 0)
+ print_subtlv_local_ipaddr(&buf, 4, &mtc->local_ipaddr);
+ if (SUBTLV_TYPE(mtc->rmt_ipaddr) != 0)
+ print_subtlv_rmt_ipaddr(&buf, 4, &mtc->rmt_ipaddr);
+
+ print_subtlv_max_bw(&buf, 4, &mtc->max_bw);
+ print_subtlv_max_rsv_bw(&buf, 4, &mtc->max_rsv_bw);
+ print_subtlv_unrsv_bw(&buf, 4, &mtc->unrsv_bw);
+ print_subtlv_te_metric(&buf, 4, &mtc->te_metric);
+
+ if (IS_INTER_AS(mtc->type)) {
+ if (SUBTLV_TYPE(mtc->ras) != 0)
+ print_subtlv_ras(&buf, 4, &mtc->ras);
+ if (SUBTLV_TYPE(mtc->rip) != 0)
+ print_subtlv_rip(&buf, 4, &mtc->rip);
+ }
- print_subtlv_av_delay(&buf, 4, &mtc->av_delay);
- print_subtlv_mm_delay(&buf, 4, &mtc->mm_delay);
- print_subtlv_delay_var(&buf, 4, &mtc->delay_var);
- print_subtlv_pkt_loss(&buf, 4, &mtc->pkt_loss);
- print_subtlv_res_bw(&buf, 4, &mtc->res_bw);
- print_subtlv_ava_bw(&buf, 4, &mtc->ava_bw);
- print_subtlv_use_bw(&buf, 4, &mtc->use_bw);
+ print_subtlv_av_delay(&buf, 4, &mtc->av_delay);
+ print_subtlv_mm_delay(&buf, 4, &mtc->mm_delay);
+ print_subtlv_delay_var(&buf, 4, &mtc->delay_var);
+ print_subtlv_pkt_loss(&buf, 4, &mtc->pkt_loss);
+ print_subtlv_res_bw(&buf, 4, &mtc->res_bw);
+ print_subtlv_ava_bw(&buf, 4, &mtc->ava_bw);
+ print_subtlv_use_bw(&buf, 4, &mtc->use_bw);
- vty_multiline(vty, "", "%s", sbuf_buf(&buf));
- vty_out(vty, "---------------\n\n");
- } else {
- vty_out(vty, " %s: MPLS-TE is disabled on this interface\n",
- ifp->name);
- }
+ vty_multiline(vty, "", "%s", sbuf_buf(&buf));
+ vty_out(vty, "---------------\n\n");
sbuf_free(&buf);
return;
"Interface information\n"
"Interface name\n")
{
- struct vrf *vrf = vrf_lookup_by_id(VRF_DEFAULT);
- int idx_interface = 4;
+ struct listnode *anode, *cnode;
+ struct isis_area *area;
+ struct isis_circuit *circuit;
struct interface *ifp;
+ int idx_interface = 4;
- /* Show All Interfaces. */
- if (argc == 4) {
- FOR_ALL_INTERFACES (vrf, ifp)
- show_mpls_te_sub(vty, ifp);
+ if (!isis) {
+ vty_out(vty, "IS-IS Routing Process not enabled\n");
+ return CMD_SUCCESS;
}
- /* Interface name is specified. */
- else {
- if ((ifp = if_lookup_by_name(argv[idx_interface]->arg,
- VRF_DEFAULT))
- == NULL)
+
+ if (argc == idx_interface) {
+ /* Show All Interfaces. */
+ for (ALL_LIST_ELEMENTS_RO(isis->area_list, anode, area)) {
+
+ if (!IS_MPLS_TE(area->mta))
+ continue;
+
+ vty_out(vty, "Area %s:\n", area->area_tag);
+
+ for (ALL_LIST_ELEMENTS_RO(area->circuit_list, cnode,
+ circuit))
+ show_mpls_te_sub(vty, circuit->interface->name,
+ circuit->mtc);
+ }
+ } else {
+ /* Interface name is specified. */
+ ifp = if_lookup_by_name(argv[idx_interface]->arg, VRF_DEFAULT);
+ if (ifp == NULL)
vty_out(vty, "No such interface name\n");
- else
- show_mpls_te_sub(vty, ifp);
+ else {
+ circuit = circuit_scan_by_ifp(ifp);
+ if (!circuit)
+ vty_out(vty,
+ "ISIS is not enabled on circuit %s\n",
+ ifp->name);
+ else
+ show_mpls_te_sub(vty, ifp->name, circuit->mtc);
+ }
}
return CMD_SUCCESS;
void isis_mpls_te_init(void)
{
- zlog_debug("ISIS MPLS-TE: Initialize");
-
- /* Initialize MPLS_TE structure */
- isisMplsTE.status = disable;
- isisMplsTE.level = 0;
- isisMplsTE.inter_as = off;
- isisMplsTE.interas_areaid.s_addr = 0;
- isisMplsTE.cir_list = list_new();
- isisMplsTE.router_id.s_addr = 0;
-
#ifndef FABRICD
/* Register new VTY commands */
install_element(VIEW_NODE, &show_isis_mpls_te_router_cmd);
/* Mode for Inter-AS LSP */ /* TODO: Check how if LSP is flooded in RFC5316 */
typedef enum _interas_mode_t { off, region, as, emulate } interas_mode_t;
-#define IS_MPLS_TE(m) (m.status == enable)
-#define IS_CIRCUIT_TE(c) (c->status == enable)
+#define IS_MPLS_TE(m) (m && m->status == enable)
-/* Following structure are internal use only. */
-struct isis_mpls_te {
+/* Per area MPLS-TE parameters */
+struct mpls_te_area {
/* Status of MPLS-TE: enable or disable */
status_t status;
interas_mode_t inter_as;
struct in_addr interas_areaid;
- /* Circuit list on which TE are enable */
- struct list *cir_list;
-
/* MPLS_TE router ID */
struct in_addr router_id;
};
-extern struct isis_mpls_te isisMplsTE;
-
+/* Per Circuit MPLS-TE parameters */
struct mpls_te_circuit {
/* Status of MPLS-TE on this interface */
uint8_t build_te_subtlvs(uint8_t *, struct isis_circuit *);
void isis_link_params_update(struct isis_circuit *, struct interface *);
void isis_mpls_te_update(struct interface *);
-void isis_mpls_te_config_write_router(struct vty *);
#endif /* _ZEBRA_ISIS_MPLS_TE_H */
struct listnode *node;
struct prefix router_id;
- /*
- * If ISIS TE is enable, TE Router ID is set through specific command.
- * See mpls_te_router_addr() command in isis_te.c
- */
- if (IS_MPLS_TE(isisMplsTE))
- return 0;
-
zebra_router_id_update_read(zclient->ibuf, &router_id);
if (isis->router_id == router_id.u.prefix4.s_addr)
return 0;
* uncomment the next line for full debugs
*/
/* isis->debugs = 0xFFFF; */
- isisMplsTE.status = disable; /* Only support TE metric */
QOBJ_REG(isis, isis);
}
if (fabricd)
fabricd_finish(area->fabricd);
+ /* Disable MPLS if necessary before flooding LSP */
+ if (IS_MPLS_TE(area->mta))
+ area->mta->status = disable;
+
if (area->circuit_list) {
for (ALL_LIST_ELEMENTS(area->circuit_list, node, nnode,
circuit)) {
write += area_write_mt_settings(area, vty);
write += fabricd_write_settings(area, vty);
}
- isis_mpls_te_config_write_router(vty);
}
return write;
uint8_t log_adj_changes;
/* multi topology settings */
struct list *mt_settings;
+ /* MPLS-TE settings */
+ struct mpls_te_area *mta;
int ipv6_circuits;
bool purge_originator;
/* Counters */
}
/* Does interface up ? */
-int if_is_up(struct interface *ifp)
+int if_is_up(const struct interface *ifp)
{
return ifp->flags & IFF_UP;
}
/* Is interface running? */
-int if_is_running(struct interface *ifp)
+int if_is_running(const struct interface *ifp)
{
return ifp->flags & IFF_RUNNING;
}
/* Is the interface operative, eg. either UP & RUNNING
or UP & !ZEBRA_INTERFACE_LINK_DETECTION and
if ptm checking is enabled, then ptm check has passed */
-int if_is_operative(struct interface *ifp)
+int if_is_operative(const struct interface *ifp)
{
return ((ifp->flags & IFF_UP)
&& (((ifp->flags & IFF_RUNNING)
/* Is the interface operative, eg. either UP & RUNNING
or UP & !ZEBRA_INTERFACE_LINK_DETECTION, without PTM check */
-int if_is_no_ptm_operative(struct interface *ifp)
+int if_is_no_ptm_operative(const struct interface *ifp)
{
return ((ifp->flags & IFF_UP)
&& ((ifp->flags & IFF_RUNNING)
}
/* Is this loopback interface ? */
-int if_is_loopback(struct interface *ifp)
+int if_is_loopback(const struct interface *ifp)
{
/* XXX: Do this better, eg what if IFF_WHATEVER means X on platform M
* but Y on platform N?
}
/* Check interface is VRF */
-int if_is_vrf(struct interface *ifp)
+int if_is_vrf(const struct interface *ifp)
{
return CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_VRF_LOOPBACK);
}
-bool if_is_loopback_or_vrf(struct interface *ifp)
+bool if_is_loopback_or_vrf(const struct interface *ifp)
{
if (if_is_loopback(ifp) || if_is_vrf(ifp))
return true;
}
/* Does this interface support broadcast ? */
-int if_is_broadcast(struct interface *ifp)
+int if_is_broadcast(const struct interface *ifp)
{
return ifp->flags & IFF_BROADCAST;
}
/* Does this interface support broadcast ? */
-int if_is_pointopoint(struct interface *ifp)
+int if_is_pointopoint(const struct interface *ifp)
{
return ifp->flags & IFF_POINTOPOINT;
}
/* Does this interface support multicast ? */
-int if_is_multicast(struct interface *ifp)
+int if_is_multicast(const struct interface *ifp)
{
return ifp->flags & IFF_MULTICAST;
}
#else
ifp = if_get_by_name(ifname, vrf->vrf_id);
#endif /* SUNOS_5 */
- yang_dnode_set_entry(dnode, ifp);
+ nb_running_set_entry(dnode, ifp);
break;
}
{
struct interface *ifp;
- ifp = yang_dnode_get_entry(dnode, true);
switch (event) {
case NB_EV_VALIDATE:
+ ifp = nb_running_get_entry(dnode, NULL, true);
if (CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE)) {
zlog_warn("%s: only inactive interfaces can be deleted",
__func__);
case NB_EV_ABORT:
break;
case NB_EV_APPLY:
+ ifp = nb_running_unset_entry(dnode);
if_delete(ifp);
break;
}
if (event != NB_EV_APPLY)
return NB_OK;
- ifp = yang_dnode_get_entry(dnode, true);
+ ifp = nb_running_get_entry(dnode, NULL, true);
XFREE(MTYPE_TMP, ifp->desc);
description = yang_dnode_get_string(dnode, NULL);
ifp->desc = XSTRDUP(MTYPE_TMP, description);
if (event != NB_EV_APPLY)
return NB_OK;
- ifp = yang_dnode_get_entry(dnode, true);
+ ifp = nb_running_get_entry(dnode, NULL, true);
XFREE(MTYPE_TMP, ifp->desc);
return NB_OK;
deletes it from the interface list and frees the structure. */
extern void if_delete(struct interface *);
-extern int if_is_up(struct interface *);
-extern int if_is_running(struct interface *);
-extern int if_is_operative(struct interface *);
-extern int if_is_no_ptm_operative(struct interface *);
-extern int if_is_loopback(struct interface *);
-extern int if_is_vrf(struct interface *ifp);
-extern bool if_is_loopback_or_vrf(struct interface *ifp);
-extern int if_is_broadcast(struct interface *);
-extern int if_is_pointopoint(struct interface *);
-extern int if_is_multicast(struct interface *);
+extern int if_is_up(const struct interface *ifp);
+extern int if_is_running(const struct interface *ifp);
+extern int if_is_operative(const struct interface *ifp);
+extern int if_is_no_ptm_operative(const struct interface *ifp);
+extern int if_is_loopback(const struct interface *ifp);
+extern int if_is_vrf(const struct interface *ifp);
+extern bool if_is_loopback_or_vrf(const struct interface *ifp);
+extern int if_is_broadcast(const struct interface *ifp);
+extern int if_is_pointopoint(const struct interface *ifp);
+extern int if_is_multicast(const struct interface *ifp);
struct vrf;
extern void if_terminate(struct vrf *vrf);
extern void if_dump_all(void);
void if_rmap_ctx_delete(struct if_rmap_ctx *ctx)
{
+ listnode_delete(if_rmap_ctx_list, ctx);
hash_clean(ctx->ifrmaphash, (void (*)(void *))if_rmap_free);
if (ctx->name)
XFREE(MTYPE_IF_RMAP_CTX_NAME, ctx);
#include "lib_errors.h"
#include "db.h"
#include "northbound_cli.h"
+#include "northbound_db.h"
DEFINE_HOOK(frr_late_init, (struct thread_master * tm), (tm))
DEFINE_KOOH(frr_early_fini, (), ())
yang_init();
nb_init(master, di->yang_modules, di->n_yang_modules);
+ if (nb_db_init() != NB_OK)
+ flog_warn(EC_LIB_NB_DATABASE,
+ "%s: failed to initialize northbound database",
+ __func__);
return master;
}
XFREE(MTYPE_LINK_NODE, node);
}
-void listnode_add(struct list *list, void *val)
+struct listnode *listnode_add(struct list *list, void *val)
{
struct listnode *node;
list->tail = node;
list->count++;
+
+ return node;
}
void listnode_add_head(struct list *list, void *val)
XFREE(MTYPE_TMP, items);
}
-void listnode_add_force(struct list **list, void *val)
+struct listnode *listnode_add_force(struct list **list, void *val)
{
if (*list == NULL)
*list = list_new();
* data
* element to add
*/
-extern void listnode_add(struct list *list, void *data);
+extern struct listnode *listnode_add(struct list *list, void *data);
/*
* Add a new element to the beginning of a list.
extern struct listnode *listnode_lookup_nocheck(struct list *list, void *data);
-extern void listnode_add_force(struct list **list, void *val);
+/*
+ * Add a node to *list, if non-NULL. Otherwise, allocate a new list, mail
+ * it back in *list, and add a new node.
+ *
+ * Return: the new node.
+ */
+extern struct listnode *listnode_add_force(struct list **list, void *val);
#ifdef __cplusplus
}
DESC_ENTRY(ZEBRA_IPTABLE_DELETE),
DESC_ENTRY(ZEBRA_IPTABLE_NOTIFY_OWNER),
DESC_ENTRY(ZEBRA_VXLAN_FLOOD_CONTROL),
+ DESC_ENTRY(ZEBRA_VXLAN_SG_ADD),
+ DESC_ENTRY(ZEBRA_VXLAN_SG_DEL),
};
#undef DESC_ENTRY
extern void zlog_info(const char *format, ...) PRINTF_ATTRIBUTE(1, 2);
extern void zlog_notice(const char *format, ...) PRINTF_ATTRIBUTE(1, 2);
extern void zlog_debug(const char *format, ...) PRINTF_ATTRIBUTE(1, 2);
+extern void zlog(int priority, const char *format, ...) PRINTF_ATTRIBUTE(2, 3);
/* For logs which have error codes associated with them */
#define flog_err(ferr_id, format, ...) \
flog_err(ferr_id, format, ##__VA_ARGS__)
#define flog_warn(ferr_id, format, ...) \
zlog_warn("[EC %" PRIu32 "] " format, ferr_id, ##__VA_ARGS__)
-
+#define flog(priority, ferr_id, format, ...) \
+ zlog(priority, "[EC %" PRIu32 "] " format, ferr_id, ##__VA_ARGS__)
extern void zlog_thread_info(int log_level);
/* Generic function for zlog. */
extern void vzlog(int priority, const char *format, va_list args);
-extern void zlog(int priority, const char *format, ...) PRINTF_ATTRIBUTE(2, 3);
#ifdef __cplusplus
}
#define NEXTHOP_FLAG_RECURSIVE (1 << 2) /* Recursive nexthop. */
#define NEXTHOP_FLAG_ONLINK (1 << 3) /* Nexthop should be installed onlink. */
#define NEXTHOP_FLAG_MATCHED (1 << 4) /* Already matched vs a nexthop */
-#define NEXTHOP_FLAG_FILTERED (1 << 5) /* rmap filtered, used by static only */
-#define NEXTHOP_FLAG_DUPLICATE (1 << 6) /* nexthop duplicates another active one */
+#define NEXTHOP_FLAG_DUPLICATE (1 << 5) /* nexthop duplicates another active one */
+#define NEXTHOP_FLAG_RNH_FILTERED (1 << 6) /* rmap filtered, used by rnh */
#define NEXTHOP_IS_ACTIVE(flags) \
(CHECK_FLAG(flags, NEXTHOP_FLAG_ACTIVE) \
&& !CHECK_FLAG(flags, NEXTHOP_FLAG_DUPLICATE))
return strcmp(nhgc1->name, nhgc2->name);
}
+uint8_t nexthop_group_nexthop_num(const struct nexthop_group *nhg)
+{
+ struct nexthop *nhop;
+ uint8_t num = 0;
+
+ for (ALL_NEXTHOPS_PTR(nhg, nhop))
+ num++;
+
+ return num;
+}
+
+uint8_t nexthop_group_active_nexthop_num(const struct nexthop_group *nhg)
+{
+ struct nexthop *nhop;
+ uint8_t num = 0;
+
+ for (ALL_NEXTHOPS_PTR(nhg, nhop)) {
+ if (CHECK_FLAG(nhop->flags, NEXTHOP_FLAG_ACTIVE))
+ num++;
+ }
+
+ return num;
+}
+
struct nexthop *nexthop_exists(struct nexthop_group *nhg, struct nexthop *nh)
{
struct nexthop *nexthop;
extern void nexthop_group_write_nexthop(struct vty *vty, struct nexthop *nh);
+/* Return the number of nexthops in this nhg */
+extern uint8_t nexthop_group_nexthop_num(const struct nexthop_group *nhg);
+extern uint8_t
+nexthop_group_active_nexthop_num(const struct nexthop_group *nhg);
+
#ifdef __cplusplus
}
#endif
#include "libfrr.h"
#include "log.h"
#include "lib_errors.h"
+#include "hash.h"
#include "command.h"
#include "debug.h"
#include "db.h"
DEFINE_MTYPE_STATIC(LIB, NB_NODE, "Northbound Node")
DEFINE_MTYPE_STATIC(LIB, NB_CONFIG, "Northbound Configuration")
+DEFINE_MTYPE_STATIC(LIB, NB_CONFIG_ENTRY, "Northbound Configuration Entry")
/* Running configuration - shouldn't be modified directly. */
struct nb_config *running_config;
+/* Hash table of user pointers associated with configuration entries. */
+static struct hash *running_config_entries;
+
/*
* Global lock used to prevent multiple configuration transactions from
* happening concurrently.
struct lyd_node *dnode;
char xpath_edit[XPATH_MAXLEN];
- if (!nb_operation_is_valid(operation, nb_node->snode)) {
- flog_warn(EC_LIB_NB_CANDIDATE_EDIT_ERROR,
- "%s: %s operation not valid for %s", __func__,
- nb_operation_name(operation), xpath);
- return NB_ERR;
- }
-
/* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
if (nb_node->snode->nodetype == LYS_LEAFLIST)
snprintf(xpath_edit, sizeof(xpath_edit), "%s[.='%s']", xpath,
return NB_OK;
}
-/*
- * The northbound configuration callbacks use the 'priv' pointer present in the
- * libyang lyd_node structure to store pointers to FRR internal variables
- * associated to YANG lists and presence containers. Before commiting a
- * candidate configuration, we must restore the 'priv' pointers stored in the
- * running configuration since they might be lost while editing the candidate.
- */
-static void nb_candidate_restore_priv_pointers(struct nb_config *candidate)
-{
- struct lyd_node *root, *next, *dnode_iter;
-
- LY_TREE_FOR (running_config->dnode, root) {
- LY_TREE_DFS_BEGIN (root, next, dnode_iter) {
- struct lyd_node *dnode_candidate;
- char xpath[XPATH_MAXLEN];
-
- if (!dnode_iter->priv)
- goto next;
-
- yang_dnode_get_path(dnode_iter, xpath, sizeof(xpath));
- dnode_candidate =
- yang_dnode_get(candidate->dnode, xpath);
- if (dnode_candidate)
- yang_dnode_set_entry(dnode_candidate,
- dnode_iter->priv);
-
- next:
- LY_TREE_DFS_END(root, next, dnode_iter);
- }
- }
-}
-
/*
* Perform YANG syntactic and semantic validation.
*
{
struct nb_config_cb *cb;
- nb_candidate_restore_priv_pointers(candidate);
RB_FOREACH (cb, nb_config_cbs, changes) {
struct nb_config_change *change = (struct nb_config_change *)cb;
int ret;
ret = (*nb_node->cbs.move)(event, dnode);
break;
default:
- break;
+ flog_err(EC_LIB_DEVELOPMENT,
+ "%s: unknown operation (%u) [xpath %s]", __func__,
+ operation, xpath);
+ exit(1);
}
if (ret != NB_OK) {
- enum lib_log_refs ref = 0;
+ int priority;
+ enum lib_log_refs ref;
switch (event) {
case NB_EV_VALIDATE:
+ priority = LOG_WARNING;
ref = EC_LIB_NB_CB_CONFIG_VALIDATE;
break;
case NB_EV_PREPARE:
+ priority = LOG_WARNING;
ref = EC_LIB_NB_CB_CONFIG_PREPARE;
break;
case NB_EV_ABORT:
+ priority = LOG_WARNING;
ref = EC_LIB_NB_CB_CONFIG_ABORT;
break;
case NB_EV_APPLY:
+ priority = LOG_ERR;
ref = EC_LIB_NB_CB_CONFIG_APPLY;
break;
+ default:
+ flog_err(EC_LIB_DEVELOPMENT,
+ "%s: unknown event (%u) [xpath %s]",
+ __func__, event, xpath);
+ exit(1);
}
- if (event == NB_EV_VALIDATE || event == NB_EV_PREPARE)
- flog_warn(
- ref,
- "%s: error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
- __func__, nb_err_name(ret),
- nb_event_name(event),
- nb_operation_name(operation), xpath);
- else
- flog_err(
- ref,
- "%s: error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
- __func__, nb_err_name(ret),
- nb_event_name(event),
- nb_operation_name(operation), xpath);
+
+ flog(priority, ref,
+ "%s: error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
+ __func__, nb_err_name(ret), nb_event_name(event),
+ nb_operation_name(operation), xpath);
}
return ret;
return ret;
}
+/* Running configuration user pointers management. */
+struct nb_config_entry {
+ char xpath[XPATH_MAXLEN];
+ void *entry;
+};
+
+static bool running_config_entry_cmp(const void *value1, const void *value2)
+{
+ const struct nb_config_entry *c1 = value1;
+ const struct nb_config_entry *c2 = value2;
+
+ return strmatch(c1->xpath, c2->xpath);
+}
+
+static unsigned int running_config_entry_key_make(void *value)
+{
+ return string_hash_make(value);
+}
+
+static void *running_config_entry_alloc(void *p)
+{
+ struct nb_config_entry *new, *key = p;
+
+ new = XCALLOC(MTYPE_NB_CONFIG_ENTRY, sizeof(*new));
+ strlcpy(new->xpath, key->xpath, sizeof(new->xpath));
+
+ return new;
+}
+
+static void running_config_entry_free(void *arg)
+{
+ XFREE(MTYPE_NB_CONFIG_ENTRY, arg);
+}
+
+void nb_running_set_entry(const struct lyd_node *dnode, void *entry)
+{
+ struct nb_config_entry *config, s;
+
+ yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
+ config = hash_get(running_config_entries, &s,
+ running_config_entry_alloc);
+ config->entry = entry;
+}
+
+static void *nb_running_unset_entry_helper(const struct lyd_node *dnode)
+{
+ struct nb_config_entry *config, s;
+ struct lyd_node *child;
+ void *entry = NULL;
+
+ yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
+ config = hash_release(running_config_entries, &s);
+ if (config) {
+ entry = config->entry;
+ running_config_entry_free(config);
+ }
+
+ /* Unset user pointers from the child nodes. */
+ if (CHECK_FLAG(dnode->schema->nodetype, LYS_LIST | LYS_CONTAINER)) {
+ LY_TREE_FOR (dnode->child, child) {
+ (void)nb_running_unset_entry_helper(child);
+ }
+ }
+
+ return entry;
+}
+
+void *nb_running_unset_entry(const struct lyd_node *dnode)
+{
+ void *entry;
+
+ entry = nb_running_unset_entry_helper(dnode);
+ assert(entry);
+
+ return entry;
+}
+
+void *nb_running_get_entry(const struct lyd_node *dnode, const char *xpath,
+ bool abort_if_not_found)
+{
+ const struct lyd_node *orig_dnode = dnode;
+ char xpath_buf[XPATH_MAXLEN];
+
+ assert(dnode || xpath);
+
+ if (!dnode)
+ dnode = yang_dnode_get(running_config->dnode, xpath);
+
+ while (dnode) {
+ struct nb_config_entry *config, s;
+
+ yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
+ config = hash_lookup(running_config_entries, &s);
+ if (config)
+ return config->entry;
+
+ dnode = dnode->parent;
+ }
+
+ if (!abort_if_not_found)
+ return NULL;
+
+ yang_dnode_get_path(orig_dnode, xpath_buf, sizeof(xpath_buf));
+ flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
+ "%s: failed to find entry [xpath %s]", __func__, xpath_buf);
+ zlog_backtrace(LOG_ERR);
+ abort();
+}
+
+/* Logging functions. */
const char *nb_event_name(enum nb_event event)
{
switch (event) {
exit(1);
}
- /* Initialize the northbound database (used for the rollback log). */
- if (nb_db_init() != NB_OK)
- flog_warn(EC_LIB_NB_DATABASE,
- "%s: failed to initialize northbound database",
- __func__);
-
/* Create an empty running configuration. */
running_config = nb_config_new(NULL);
+ running_config_entries = hash_create(running_config_entry_key_make,
+ running_config_entry_cmp,
+ "Running Configuration Entries");
/* Initialize the northbound CLI. */
nb_cli_init(tm);
nb_nodes_delete();
/* Delete the running configuration. */
+ hash_clean(running_config_entries, running_config_entry_free);
+ hash_free(running_config_entries);
nb_config_free(running_config);
}
*/
extern int nb_notification_send(const char *xpath, struct list *arguments);
+/*
+ * Associate a user pointer to a configuration node.
+ *
+ * This should be called by northbound 'create' callbacks in the NB_EV_APPLY
+ * phase only.
+ *
+ * dnode
+ * libyang data node - only its XPath is used.
+ *
+ * entry
+ * Arbitrary user-specified pointer.
+ */
+extern void nb_running_set_entry(const struct lyd_node *dnode, void *entry);
+
+/*
+ * Unset the user pointer associated to a configuration node.
+ *
+ * This should be called by northbound 'destroy' callbacks in the NB_EV_APPLY
+ * phase only.
+ *
+ * dnode
+ * libyang data node - only its XPath is used.
+ *
+ * Returns:
+ * The user pointer that was unset.
+ */
+extern void *nb_running_unset_entry(const struct lyd_node *dnode);
+
+/*
+ * Find the user pointer (if any) associated to a configuration node.
+ *
+ * The XPath associated to the configuration node can be provided directly or
+ * indirectly through a libyang data node.
+ *
+ * If an user point is not found, this function follows the parent nodes in the
+ * running configuration until an user pointer is found or until the root node
+ * is reached.
+ *
+ * dnode
+ * libyang data node - only its XPath is used (can be NULL if 'xpath' is
+ * provided).
+ *
+ * xpath
+ * XPath of the configuration node (can be NULL if 'dnode' is provided).
+ *
+ * abort_if_not_found
+ * When set to true, abort the program if no user pointer is found.
+ *
+ * As a rule of thumb, this parameter should be set to true in the following
+ * scenarios:
+ * - Calling this function from any northbound configuration callback during
+ * the NB_EV_APPLY phase.
+ * - Calling this function from a 'delete' northbound configuration callback
+ * during any phase.
+ *
+ * In both the above cases, the given configuration node should contain an
+ * user pointer except when there's a bug in the code, in which case it's
+ * better to abort the program right away and eliminate the need for
+ * unnecessary NULL checks.
+ *
+ * In all other cases, this parameter should be set to false and the caller
+ * should check if the function returned NULL or not.
+ *
+ * Returns:
+ * User pointer if found, NULL otherwise.
+ */
+extern void *nb_running_get_entry(const struct lyd_node *dnode, const char *xpath,
+ bool abort_if_not_found);
+
/*
* Return a human-readable string representing a northbound event.
*
/* Callback for state retrieval. */
static int frr_sr_state_cb(const char *xpath, sr_val_t **values,
size_t *values_cnt, uint64_t request_id,
- void *private_ctx)
+ const char *original_xpath, void *private_ctx)
{
struct list *elements;
struct yang_data *data;
return str;
}
+void prefix_mcast_inet4_dump(const char *onfail, struct in_addr addr,
+ char *buf, int buf_size)
+{
+ int save_errno = errno;
+
+ if (addr.s_addr == INADDR_ANY)
+ strcpy(buf, "*");
+ else {
+ if (!inet_ntop(AF_INET, &addr, buf, buf_size)) {
+ if (onfail)
+ snprintf(buf, buf_size, "%s", onfail);
+ }
+ }
+
+ errno = save_errno;
+}
+
+const char *prefix_sg2str(const struct prefix_sg *sg, char *sg_str)
+{
+ char src_str[INET_ADDRSTRLEN];
+ char grp_str[INET_ADDRSTRLEN];
+
+ prefix_mcast_inet4_dump("<src?>", sg->src, src_str, sizeof(src_str));
+ prefix_mcast_inet4_dump("<grp?>", sg->grp, grp_str, sizeof(grp_str));
+ snprintf(sg_str, PREFIX_SG_STR_LEN, "(%s,%s)", src_str, grp_str);
+
+ return sg_str;
+}
+
struct prefix *prefix_new(void)
{
struct prefix *p;
/* Maximum string length of the result of prefix2str */
#define PREFIX_STRLEN 80
+/*
+ * Longest possible length of a (S,G) string is 36 bytes
+ * 123.123.123.123 = 15 * 2
+ * (,) = 3
+ * NULL Character at end = 1
+ * (123.123.123.123,123.123.123.123)
+ */
+#define PREFIX_SG_STR_LEN 34
+
/* Max bit/byte length of IPv4 address. */
#define IPV4_MAX_BYTELEN 4
#define IPV4_MAX_BITLEN 32
#define PREFIX2STR_BUFFER PREFIX_STRLEN
+extern void prefix_mcast_inet4_dump(const char *onfail, struct in_addr addr,
+ char *buf, int buf_size);
+extern const char *prefix_sg2str(const struct prefix_sg *sg, char *str);
extern const char *prefix2str(union prefixconstptr, char *, int);
extern int prefix_match(const struct prefix *, const struct prefix *);
extern int prefix_match_network_statement(const struct prefix *,
#include "privs.h"
#include "memory.h"
#include "lib_errors.h"
+#include "lib/queue.h"
+DEFINE_MTYPE_STATIC(LIB, PRIVS, "Privilege information")
+
+/*
+ * Different capabilities/privileges apis have different characteristics: some
+ * are process-wide, and some are per-thread.
+ */
#ifdef HAVE_CAPABILITIES
+#ifdef HAVE_LCAPS
+static const bool privs_per_process; /* = false */
+#elif defined(HAVE_SOLARIS_CAPABILITIES)
+static const bool privs_per_process = true;
+#endif
+#else
+static const bool privs_per_process = true;
+#endif /* HAVE_CAPABILITIES */
-DEFINE_MTYPE_STATIC(LIB, PRIVS, "Privilege information")
+#ifdef HAVE_CAPABILITIES
/* sort out some generic internal types for:
*
}
#endif /* HAVE_GETGROUPLIST */
+/*
+ * Helper function that locates a refcounting object to use: a process-wide
+ * object or a per-pthread object.
+ */
+static struct zebra_privs_refs_t *get_privs_refs(struct zebra_privs_t *privs)
+{
+ struct zebra_privs_refs_t *temp, *refs = NULL;
+ pthread_t tid;
+
+ if (privs_per_process)
+ refs = &(privs->process_refs);
+ else {
+ /* Locate - or create - the object for the current pthread. */
+ tid = pthread_self();
+
+ STAILQ_FOREACH(temp, &(privs->thread_refs), entry) {
+ if (pthread_equal(temp->tid, tid)) {
+ refs = temp;
+ break;
+ }
+ }
+
+ /* Need to create a new refcounting object. */
+ if (refs == NULL) {
+ refs = XCALLOC(MTYPE_PRIVS,
+ sizeof(struct zebra_privs_refs_t));
+ refs->tid = tid;
+ STAILQ_INSERT_TAIL(&(privs->thread_refs), refs, entry);
+ }
+ }
+
+ return refs;
+}
+
struct zebra_privs_t *_zprivs_raise(struct zebra_privs_t *privs,
const char *funcname)
{
int save_errno = errno;
+ struct zebra_privs_refs_t *refs;
if (!privs)
return NULL;
- /* If we're already elevated, just return */
+ /*
+ * Serialize 'raise' operations; particularly important for
+ * OSes where privs are process-wide.
+ */
pthread_mutex_lock(&(privs->mutex));
{
- if (++(privs->refcount) == 1) {
+ /* Locate ref-counting object to use */
+ refs = get_privs_refs(privs);
+
+ if (++(refs->refcount) == 1) {
errno = 0;
if (privs->change(ZPRIVS_RAISE)) {
zlog_err("%s: Failed to raise privileges (%s)",
funcname, safe_strerror(errno));
}
errno = save_errno;
- privs->raised_in_funcname = funcname;
+ refs->raised_in_funcname = funcname;
}
}
pthread_mutex_unlock(&(privs->mutex));
void _zprivs_lower(struct zebra_privs_t **privs)
{
int save_errno = errno;
+ struct zebra_privs_refs_t *refs;
if (!*privs)
return;
- /* Don't lower privs if there's another caller */
+ /* Serialize 'lower privs' operation - particularly important
+ * when OS privs are process-wide.
+ */
pthread_mutex_lock(&(*privs)->mutex);
{
- if (--((*privs)->refcount) == 0) {
+ refs = get_privs_refs(*privs);
+
+ if (--(refs->refcount) == 0) {
errno = 0;
if ((*privs)->change(ZPRIVS_LOWER)) {
zlog_err("%s: Failed to lower privileges (%s)",
- (*privs)->raised_in_funcname,
+ refs->raised_in_funcname,
safe_strerror(errno));
}
errno = save_errno;
- (*privs)->raised_in_funcname = NULL;
+ refs->raised_in_funcname = NULL;
}
}
pthread_mutex_unlock(&(*privs)->mutex);
}
pthread_mutex_init(&(zprivs->mutex), NULL);
- zprivs->refcount = 0;
+ zprivs->process_refs.refcount = 0;
+ zprivs->process_refs.raised_in_funcname = NULL;
+ STAILQ_INIT(&zprivs->thread_refs);
if (zprivs->vty_group) {
/* in a "NULL" setup, this is allowed to fail too, but still
void zprivs_terminate(struct zebra_privs_t *zprivs)
{
+ struct zebra_privs_refs_t *refs;
+
if (!zprivs) {
fprintf(stderr, "%s: no privs struct given, terminating",
__func__);
}
#endif /* HAVE_LCAPS */
+ while ((refs = STAILQ_FIRST(&(zprivs->thread_refs))) != NULL) {
+ STAILQ_REMOVE_HEAD(&(zprivs->thread_refs), entry);
+ XFREE(MTYPE_PRIVS, refs);
+ }
+
zprivs->change = zprivs_change_null;
zprivs->current_state = zprivs_state_null;
zprivs_null_state = ZPRIVS_LOWERED;
#define _ZEBRA_PRIVS_H
#include <pthread.h>
+#include "lib/queue.h"
#ifdef __cplusplus
extern "C" {
ZPRIVS_LOWER,
} zebra_privs_ops_t;
+struct zebra_privs_refs_t {
+ STAILQ_ENTRY(zebra_privs_refs_t) entry;
+ pthread_t tid;
+ uint32_t refcount;
+ const char *raised_in_funcname;
+};
+
struct zebra_privs_t {
zebra_capabilities_t *caps_p; /* caps required for operation */
zebra_capabilities_t *caps_i; /* caps to allow inheritance of */
int cap_num_i;
/* Mutex and counter used to avoid race conditions in multi-threaded
- * processes. The privs elevation is process-wide, so we need to
- * avoid changing the privilege status across threads.
+ * processes. If privs status is process-wide, we need to
+ * control changes to the privilege status among threads.
+ * If privs changes are per-thread, we need to be able to
+ * manage that too.
*/
pthread_mutex_t mutex;
- uint32_t refcount;
+ struct zebra_privs_refs_t process_refs;
+
+ STAILQ_HEAD(thread_refs_q, zebra_privs_refs_t) thread_refs;
const char *user; /* user and group to run as */
const char *group;
int (*change)(zebra_privs_ops_t); /* change privileges, 0 on success */
zebra_privs_current_t (*current_state)(
void); /* current privilege state */
- const char *raised_in_funcname;
};
struct zprivs_ids_t {
enum vxlan_flood_control {
VXLAN_FLOOD_HEAD_END_REPL = 0,
VXLAN_FLOOD_DISABLED,
+ VXLAN_FLOOD_PIM_SM,
};
#ifdef __cplusplus
if (item->ran > wq->spec.max_retries) {
/* run error handler, if any */
if (wq->spec.errorfunc)
- wq->spec.errorfunc(wq, item->data);
+ wq->spec.errorfunc(wq, item);
work_queue_item_remove(wq, item);
continue;
}
#include <zebra.h>
#include "log.h"
-#include "log_int.h"
#include "lib_errors.h"
#include "yang.h"
#include "yang_translator.h"
lyd_change_leaf((struct lyd_node_leaf_list *)dnode, value);
}
-void yang_dnode_set_entry(const struct lyd_node *dnode, void *entry)
-{
- assert(CHECK_FLAG(dnode->schema->nodetype, LYS_LIST | LYS_CONTAINER));
- lyd_set_private(dnode, entry);
-}
-
-void *yang_dnode_get_entry(const struct lyd_node *dnode,
- bool abort_if_not_found)
-{
- const struct lyd_node *orig_dnode = dnode;
- char xpath[XPATH_MAXLEN];
-
- while (dnode) {
- switch (dnode->schema->nodetype) {
- case LYS_CONTAINER:
- case LYS_LIST:
- if (dnode->priv)
- return dnode->priv;
- break;
- default:
- break;
- }
-
- dnode = dnode->parent;
- }
-
- if (!abort_if_not_found)
- return NULL;
-
- yang_dnode_get_path(orig_dnode, xpath, sizeof(xpath));
- flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
- "%s: failed to find entry [xpath %s]", __func__, xpath);
- zlog_backtrace(LOG_ERR);
- abort();
-}
-
struct lyd_node *yang_dnode_new(struct ly_ctx *ly_ctx, bool config_only)
{
struct lyd_node *dnode;
return NULL;
}
-static void *ly_dup_cb(const void *priv)
-{
- /* Make a shallow copy of the priv pointer. */
- return (void *)priv;
-}
-
/* Make libyang log its errors using FRR logging infrastructure. */
static void ly_log_cb(LY_LOG_LEVEL level, const char *msg, const char *path)
{
flog_err(EC_LIB_LIBYANG, "%s: ly_ctx_new() failed", __func__);
exit(1);
}
- ly_ctx_set_priv_dup_clb(ly_native_ctx, ly_dup_cb);
#ifndef LIBYANG_EXT_BUILTIN
/* Detect if the required libyang plugin(s) were loaded successfully. */
*/
extern void yang_dnode_change_leaf(struct lyd_node *dnode, const char *value);
-/*
- * Set the libyang private pointer to a user pointer. Can only be used on YANG
- * lists and containers.
- *
- * dnode
- * libyang data node to operate on.
- *
- * entry
- * Arbitrary user-specified pointer.
- */
-extern void yang_dnode_set_entry(const struct lyd_node *dnode, void *entry);
-
-/*
- * Find the user pointer associated to the given libyang data node.
- *
- * The data node is traversed by following the parent pointers until an user
- * pointer is found or until the root node is reached.
- *
- * dnode
- * libyang data node to operate on.
- *
- * abort_if_not_found
- * When set to true, abort the program if no user pointer is found.
- *
- * As a rule of thumb, this parameter should be set to true in the following
- * scenarios:
- * - Calling this function from any northbound configuration callback during
- * the NB_EV_APPLY phase.
- * - Calling this function from a 'delete' northbound configuration callback
- * during any phase.
- *
- * In both the above cases, the libyang data node should contain an user
- * pointer except when there's a bug in the code, in which case it's better
- * to abort the program right away and eliminate the need for unnecessary
- * NULL checks.
- *
- * In all other cases, this parameter should be set to false and the caller
- * should check if the function returned NULL or not.
- *
- * Returns:
- * User pointer if found, NULL otherwise.
- */
-extern void *yang_dnode_get_entry(const struct lyd_node *dnode,
- bool abort_if_not_found);
-
/*
* Create a new libyang data node.
*
(*zclient->iptable_notify_owner)(command,
zclient, length,
vrf_id);
+ break;
+ case ZEBRA_VXLAN_SG_ADD:
+ if (zclient->vxlan_sg_add)
+ (*zclient->vxlan_sg_add)(command, zclient, length,
+ vrf_id);
+ break;
+ case ZEBRA_VXLAN_SG_DEL:
+ if (zclient->vxlan_sg_del)
+ (*zclient->vxlan_sg_del)(command, zclient, length,
+ vrf_id);
+ break;
default:
break;
}
ZEBRA_IPTABLE_DELETE,
ZEBRA_IPTABLE_NOTIFY_OWNER,
ZEBRA_VXLAN_FLOOD_CONTROL,
+ ZEBRA_VXLAN_SG_ADD,
+ ZEBRA_VXLAN_SG_DEL,
} zebra_message_types_t;
struct redist_proto {
struct zclient *zclient,
uint16_t length,
vrf_id_t vrf_id);
+ int (*vxlan_sg_add)(int command, struct zclient *client,
+ uint16_t length, vrf_id_t vrf_id);
+ int (*vxlan_sg_del)(int command, struct zclient *client,
+ uint16_t length, vrf_id_t vrf_id_t);
};
/* Zebra API message flag. */
#include "pim_ssm.h"
#include "pim_nht.h"
#include "pim_bfd.h"
+#include "pim_vxlan.h"
#include "bfd.h"
#ifndef VTYSH_EXTRACT_PL
}
}
-static void pim_show_join(struct pim_instance *pim, struct vty *vty, bool uj)
+static void pim_show_join(struct pim_instance *pim, struct vty *vty,
+ struct prefix_sg *sg, bool uj)
{
struct pim_interface *pim_ifp;
struct pim_ifchannel *ch;
continue;
RB_FOREACH (ch, pim_ifchannel_rb, &pim_ifp->ifchannel_rb) {
+ if (sg->grp.s_addr != 0
+ && sg->grp.s_addr != ch->sg.grp.s_addr)
+ continue;
+ if (sg->src.s_addr != 0
+ && sg->src.s_addr != ch->sg.src.s_addr)
+ continue;
pim_show_join_helper(vty, pim_ifp, ch, json, now, uj);
} /* scan interface channels */
}
json = json_object_new_object();
} else {
vty_out(vty,
- "Codes: J -> Pim Join, I -> IGMP Report, S -> Source, * -> Inherited from (*,G)");
+ "Codes: J -> Pim Join, I -> IGMP Report, S -> Source, * -> Inherited from (*,G), V -> VxLAN");
vty_out(vty,
"\nInstalled Source Group IIF OIL\n");
}
} else {
if (first_oif) {
first_oif = 0;
- vty_out(vty, "%s(%c%c%c%c)", out_ifname,
+ vty_out(vty, "%s(%c%c%c%c%c)", out_ifname,
(c_oil->oif_flags[oif_vif_index]
& PIM_OIF_FLAG_PROTO_IGMP)
? 'I'
& PIM_OIF_FLAG_PROTO_PIM)
? 'J'
: ' ',
+ (c_oil->oif_flags[oif_vif_index]
+ & PIM_OIF_FLAG_PROTO_VXLAN)
+ ? 'V'
+ : ' ',
(c_oil->oif_flags[oif_vif_index]
& PIM_OIF_FLAG_PROTO_SOURCE)
? 'S'
? '*'
: ' ');
} else
- vty_out(vty, ", %s(%c%c%c%c)",
+ vty_out(vty, ", %s(%c%c%c%c%c)",
out_ifname,
(c_oil->oif_flags[oif_vif_index]
& PIM_OIF_FLAG_PROTO_IGMP)
& PIM_OIF_FLAG_PROTO_PIM)
? 'J'
: ' ',
+ (c_oil->oif_flags[oif_vif_index]
+ & PIM_OIF_FLAG_PROTO_VXLAN)
+ ? 'V'
+ : ' ',
(c_oil->oif_flags[oif_vif_index]
& PIM_OIF_FLAG_PROTO_SOURCE)
? 'S'
/* XXX: need to print ths flag in the plain text display as well */
if (up->flags & PIM_UPSTREAM_FLAG_MASK_SRC_MSDP)
json_object_boolean_true_add(json, "sourceMsdp");
+
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_SEND_SG_RPT_PRUNE)
+ json_object_boolean_true_add(json, "sendSGRptPrune");
+
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_SRC_LHR)
+ json_object_boolean_true_add(json, "lastHopRouter");
+
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_DISABLE_KAT_EXPIRY)
+ json_object_boolean_true_add(json, "disableKATExpiry");
+
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_STATIC_IIF)
+ json_object_boolean_true_add(json, "staticIncomingInterface");
+
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_ALLOW_IIF_IN_OIL)
+ json_object_boolean_true_add(json,
+ "allowIncomingInterfaceinOil");
+
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_NO_PIMREG_DATA)
+ json_object_boolean_true_add(json, "noPimRegistrationData");
+
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_FORCE_PIMREG)
+ json_object_boolean_true_add(json, "forcePimRegistration");
+
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG)
+ json_object_boolean_true_add(json, "sourceVxlanOrigination");
+
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM)
+ json_object_boolean_true_add(json, "sourceVxlanTermination");
+
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_MLAG_VXLAN)
+ json_object_boolean_true_add(json, "mlagVxlan");
+
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_MLAG_NON_DF)
+ json_object_boolean_true_add(json,
+ "mlagNonDesignatedForwarder");
}
static const char *
}
static void pim_show_upstream(struct pim_instance *pim, struct vty *vty,
- bool uj)
+ struct prefix_sg *sg, bool uj)
{
struct listnode *upnode;
struct pim_upstream *up;
char msdp_reg_timer[10];
char state_str[PIM_REG_STATE_STR_LEN];
+ if (sg->grp.s_addr != 0 && sg->grp.s_addr != up->sg.grp.s_addr)
+ continue;
+ if (sg->src.s_addr != 0 && sg->src.s_addr != up->sg.src.s_addr)
+ continue;
+
pim_inet4_dump("<src?>", up->sg.src, src_str, sizeof(src_str));
pim_inet4_dump("<grp?>", up->sg.grp, grp_str, sizeof(grp_str));
pim_time_uptime(uptime, sizeof(uptime),
return CMD_SUCCESS;
}
-DEFUN (show_ip_pim_join,
+DEFPY (show_ip_pim_join,
show_ip_pim_join_cmd,
- "show ip pim [vrf NAME] join [json]",
+ "show ip pim [vrf NAME] join [A.B.C.D$s_or_g [A.B.C.D$g]] [json$json]",
SHOW_STR
IP_STR
PIM_STR
VRF_CMD_HELP_STR
"PIM interface join information\n"
+ "The Source or Group\n"
+ "The Group\n"
JSON_STR)
{
- int idx = 2;
- struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
- bool uj = use_json(argc, argv);
+ struct prefix_sg sg = {0};
+ struct vrf *v;
+ bool uj = !!json;
+ struct pim_instance *pim;
- if (!vrf)
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v) {
+ vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf);
return CMD_WARNING;
+ }
+ pim = pim_get_pim_instance(v->vrf_id);
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ if (s_or_g.s_addr != 0) {
+ if (g.s_addr != 0) {
+ sg.src = s_or_g;
+ sg.grp = g;
+ } else
+ sg.grp = s_or_g;
+ }
- pim_show_join(vrf->info, vty, uj);
+ pim_show_join(pim, vty, &sg, uj);
return CMD_SUCCESS;
}
"PIM interface join information\n"
JSON_STR)
{
+ struct prefix_sg sg = {0};
bool uj = use_json(argc, argv);
struct vrf *vrf;
bool first = true;
first = false;
} else
vty_out(vty, "VRF: %s\n", vrf->name);
- pim_show_join(vrf->info, vty, uj);
+ pim_show_join(vrf->info, vty, &sg, uj);
}
if (uj)
vty_out(vty, "}\n");
return CMD_SUCCESS;
}
-DEFUN (show_ip_pim_upstream,
+DEFPY (show_ip_pim_upstream,
show_ip_pim_upstream_cmd,
- "show ip pim [vrf NAME] upstream [json]",
+ "show ip pim [vrf NAME] upstream [A.B.C.D$s_or_g [A.B.C.D$g]] [json$json]",
SHOW_STR
IP_STR
PIM_STR
VRF_CMD_HELP_STR
"PIM upstream information\n"
+ "The Source or Group\n"
+ "The Group\n"
JSON_STR)
{
- int idx = 2;
- struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
- bool uj = use_json(argc, argv);
+ struct prefix_sg sg = {0};
+ struct vrf *v;
+ bool uj = !!json;
+ struct pim_instance *pim;
- if (!vrf)
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v) {
+ vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf);
+ return CMD_WARNING;
+ }
+ pim = pim_get_pim_instance(v->vrf_id);
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
return CMD_WARNING;
+ }
- pim_show_upstream(vrf->info, vty, uj);
+ if (s_or_g.s_addr != 0) {
+ if (g.s_addr != 0) {
+ sg.src = s_or_g;
+ sg.grp = g;
+ } else
+ sg.grp = s_or_g;
+ }
+ pim_show_upstream(pim, vty, &sg, uj);
return CMD_SUCCESS;
}
"PIM upstream information\n"
JSON_STR)
{
+ struct prefix_sg sg = {0};
bool uj = use_json(argc, argv);
struct vrf *vrf;
bool first = true;
first = false;
} else
vty_out(vty, "VRF: %s\n", vrf->name);
- pim_show_upstream(vrf->info, vty, uj);
+ pim_show_upstream(vrf->info, vty, &sg, uj);
}
return CMD_SUCCESS;
return CMD_SUCCESS;
}
-static void show_mroute(struct pim_instance *pim, struct vty *vty, bool fill,
- bool uj)
+static void show_mroute(struct pim_instance *pim, struct vty *vty,
+ struct prefix_sg *sg, bool fill, bool uj)
{
struct listnode *node;
struct channel_oil *c_oil;
if (!c_oil->installed && !uj)
continue;
+ if (sg->grp.s_addr != 0 &&
+ sg->grp.s_addr != c_oil->oil.mfcc_mcastgrp.s_addr)
+ continue;
+ if (sg->src.s_addr != 0 &&
+ sg->src.s_addr != c_oil->oil.mfcc_origin.s_addr)
+ continue;
+
pim_inet4_dump("<group?>", c_oil->oil.mfcc_mcastgrp, grp_str,
sizeof(grp_str));
pim_inet4_dump("<source?>", c_oil->oil.mfcc_origin, src_str,
json_object_boolean_true_add(
json_ifp_out, "protocolIgmp");
+ if (c_oil->oif_flags[oif_vif_index]
+ & PIM_OIF_FLAG_PROTO_VXLAN)
+ json_object_boolean_true_add(
+ json_ifp_out, "protocolVxlan");
+
if (c_oil->oif_flags[oif_vif_index]
& PIM_OIF_FLAG_PROTO_SOURCE)
json_object_boolean_true_add(
strcpy(proto, "IGMP");
}
+ if (c_oil->oif_flags[oif_vif_index]
+ & PIM_OIF_FLAG_PROTO_VXLAN) {
+ strcpy(proto, "VxLAN");
+ }
+
if (c_oil->oif_flags[oif_vif_index]
& PIM_OIF_FLAG_PROTO_SOURCE) {
strcpy(proto, "SRC");
}
}
-DEFUN (show_ip_mroute,
+DEFPY (show_ip_mroute,
show_ip_mroute_cmd,
- "show ip mroute [vrf NAME] [fill] [json]",
+ "show ip mroute [vrf NAME] [A.B.C.D$s_or_g [A.B.C.D$g]] [fill$fill] [json$json]",
SHOW_STR
IP_STR
MROUTE_STR
VRF_CMD_HELP_STR
+ "The Source or Group\n"
+ "The Group\n"
"Fill in Assumed data\n"
JSON_STR)
{
- bool uj = use_json(argc, argv);
- bool fill = false;
- int idx = 2;
- struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+ struct prefix_sg sg = {0};
+ struct pim_instance *pim;
+ struct vrf *v;
- if (!vrf)
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v) {
+ vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf);
return CMD_WARNING;
+ }
+ pim = pim_get_pim_instance(v->vrf_id);
- if (argv_find(argv, argc, "fill", &idx))
- fill = true;
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
- show_mroute(vrf->info, vty, fill, uj);
+ if (s_or_g.s_addr != 0) {
+ if (g.s_addr != 0) {
+ sg.src = s_or_g;
+ sg.grp = g;
+ } else
+ sg.grp = s_or_g;
+ }
+ show_mroute(pim, vty, &sg, !!fill, !!json);
return CMD_SUCCESS;
}
"Fill in Assumed data\n"
JSON_STR)
{
+ struct prefix_sg sg = {0};
bool uj = use_json(argc, argv);
int idx = 4;
struct vrf *vrf;
first = false;
} else
vty_out(vty, "VRF: %s\n", vrf->name);
- show_mroute(vrf->info, vty, fill, uj);
+ show_mroute(vrf->info, vty, &sg, fill, uj);
}
if (uj)
vty_out(vty, "}\n");
pim_ifp = ifp->info;
if (!pim_ifp) {
- pim_ifp = pim_if_new(ifp, true, false, false);
+ pim_ifp = pim_if_new(ifp, true, false, false,
+ false /*vxlan_term*/);
if (!pim_ifp) {
vty_out(vty, "Could not enable IGMP on interface %s\n",
ifp->name);
struct pim_interface *pim_ifp = ifp->info;
if (!pim_ifp) {
- pim_ifp = pim_if_new(ifp, false, true, false);
+ pim_ifp = pim_if_new(ifp, false, true, false,
+ false /*vxlan_term*/);
if (!pim_ifp) {
return 0;
}
return CMD_SUCCESS;
}
+DEFUN (debug_pim_vxlan,
+ debug_pim_vxlan_cmd,
+ "debug pim vxlan",
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_VXLAN_STR)
+{
+ PIM_DO_DEBUG_VXLAN;
+ return CMD_SUCCESS;
+}
+
+DEFUN (no_debug_pim_vxlan,
+ no_debug_pim_vxlan_cmd,
+ "no debug pim vxlan",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIM_STR
+ DEBUG_PIM_VXLAN_STR)
+{
+ PIM_DONT_DEBUG_VXLAN;
+ return CMD_SUCCESS;
+}
+
DEFUN (debug_msdp,
debug_msdp_cmd,
"debug msdp",
return CMD_SUCCESS;
}
+struct pim_sg_cache_walk_data {
+ struct vty *vty;
+ json_object *json;
+ json_object *json_group;
+ struct in_addr addr;
+ bool addr_match;
+};
+
+static void pim_show_vxlan_sg_entry(struct pim_vxlan_sg *vxlan_sg,
+ struct pim_sg_cache_walk_data *cwd)
+{
+ struct vty *vty = cwd->vty;
+ json_object *json = cwd->json;
+ char src_str[INET_ADDRSTRLEN];
+ char grp_str[INET_ADDRSTRLEN];
+ json_object *json_row;
+ bool installed = (vxlan_sg->up)?TRUE:FALSE;
+ const char *iif_name = vxlan_sg->iif?vxlan_sg->iif->name:"-";
+ const char *oif_name;
+
+ if (pim_vxlan_is_orig_mroute(vxlan_sg))
+ oif_name = vxlan_sg->orig_oif?vxlan_sg->orig_oif->name:"";
+ else
+ oif_name = vxlan_sg->term_oif?vxlan_sg->term_oif->name:"";
+
+ if (cwd->addr_match && (vxlan_sg->sg.src.s_addr != cwd->addr.s_addr) &&
+ (vxlan_sg->sg.grp.s_addr != cwd->addr.s_addr)) {
+ return;
+ }
+ pim_inet4_dump("<src?>", vxlan_sg->sg.src, src_str, sizeof(src_str));
+ pim_inet4_dump("<grp?>", vxlan_sg->sg.grp, grp_str, sizeof(grp_str));
+ if (json) {
+ json_object_object_get_ex(json, grp_str, &cwd->json_group);
+
+ if (!cwd->json_group) {
+ cwd->json_group = json_object_new_object();
+ json_object_object_add(json, grp_str,
+ cwd->json_group);
+ }
+
+ json_row = json_object_new_object();
+ json_object_string_add(json_row, "source", src_str);
+ json_object_string_add(json_row, "group", grp_str);
+ json_object_string_add(json_row, "input", iif_name);
+ json_object_string_add(json_row, "output", oif_name);
+ if (installed)
+ json_object_boolean_true_add(json_row, "installed");
+ else
+ json_object_boolean_false_add(json_row, "installed");
+ json_object_object_add(cwd->json_group, src_str, json_row);
+ } else {
+ vty_out(vty, "%-15s %-15s %-15s %-15s %-5s\n",
+ src_str, grp_str, iif_name, oif_name,
+ installed?"I":"");
+ }
+}
+
+static void pim_show_vxlan_sg_hash_entry(struct hash_backet *backet, void *arg)
+{
+ pim_show_vxlan_sg_entry((struct pim_vxlan_sg *)backet->data,
+ (struct pim_sg_cache_walk_data *)arg);
+}
+
+static void pim_show_vxlan_sg(struct pim_instance *pim,
+ struct vty *vty, bool uj)
+{
+ json_object *json = NULL;
+ struct pim_sg_cache_walk_data cwd;
+
+ if (uj) {
+ json = json_object_new_object();
+ } else {
+ vty_out(vty, "Codes: I -> installed\n");
+ vty_out(vty,
+ "Source Group Input Output Flags\n");
+ }
+
+ memset(&cwd, 0, sizeof(cwd));
+ cwd.vty = vty;
+ cwd.json = json;
+ hash_iterate(pim->vxlan.sg_hash, pim_show_vxlan_sg_hash_entry, &cwd);
+
+ if (uj) {
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+}
+
+static void pim_show_vxlan_sg_match_addr(struct pim_instance *pim,
+ struct vty *vty, char *addr_str, bool uj)
+{
+ json_object *json = NULL;
+ struct pim_sg_cache_walk_data cwd;
+ int result = 0;
+
+ memset(&cwd, 0, sizeof(cwd));
+ result = inet_pton(AF_INET, addr_str, &cwd.addr);
+ if (result <= 0) {
+ vty_out(vty, "Bad address %s: errno=%d: %s\n", addr_str,
+ errno, safe_strerror(errno));
+ return;
+ }
+
+ if (uj) {
+ json = json_object_new_object();
+ } else {
+ vty_out(vty, "Codes: I -> installed\n");
+ vty_out(vty,
+ "Source Group Input Output Flags\n");
+ }
+
+ cwd.vty = vty;
+ cwd.json = json;
+ cwd.addr_match = TRUE;
+ hash_iterate(pim->vxlan.sg_hash, pim_show_vxlan_sg_hash_entry, &cwd);
+
+ if (uj) {
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+}
+
+static void pim_show_vxlan_sg_one(struct pim_instance *pim,
+ struct vty *vty, char *src_str, char *grp_str, bool uj)
+{
+ json_object *json = NULL;
+ struct prefix_sg sg;
+ int result = 0;
+ struct pim_vxlan_sg *vxlan_sg;
+ const char *iif_name;
+ bool installed;
+ const char *oif_name;
+
+ result = inet_pton(AF_INET, src_str, &sg.src);
+ if (result <= 0) {
+ vty_out(vty, "Bad src address %s: errno=%d: %s\n", src_str,
+ errno, safe_strerror(errno));
+ return;
+ }
+ result = inet_pton(AF_INET, grp_str, &sg.grp);
+ if (result <= 0) {
+ vty_out(vty, "Bad grp address %s: errno=%d: %s\n", grp_str,
+ errno, safe_strerror(errno));
+ return;
+ }
+
+ sg.family = AF_INET;
+ sg.prefixlen = IPV4_MAX_BITLEN;
+ if (uj)
+ json = json_object_new_object();
+
+ vxlan_sg = pim_vxlan_sg_find(pim, &sg);
+ if (vxlan_sg) {
+ installed = (vxlan_sg->up)?TRUE:FALSE;
+ iif_name = vxlan_sg->iif?vxlan_sg->iif->name:"-";
+
+ if (pim_vxlan_is_orig_mroute(vxlan_sg))
+ oif_name =
+ vxlan_sg->orig_oif?vxlan_sg->orig_oif->name:"";
+ else
+ oif_name =
+ vxlan_sg->term_oif?vxlan_sg->term_oif->name:"";
+
+ if (uj) {
+ json_object_string_add(json, "source", src_str);
+ json_object_string_add(json, "group", grp_str);
+ json_object_string_add(json, "input", iif_name);
+ json_object_string_add(json, "output", oif_name);
+ if (installed)
+ json_object_boolean_true_add(json, "installed");
+ else
+ json_object_boolean_false_add(json,
+ "installed");
+ } else {
+ vty_out(vty, "SG : %s\n", vxlan_sg->sg_str);
+ vty_out(vty, " Input : %s\n", iif_name);
+ vty_out(vty, " Output : %s\n", oif_name);
+ vty_out(vty, " installed : %s\n",
+ installed?"yes":"no");
+ }
+ }
+
+ if (uj) {
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+}
+
+DEFUN (show_ip_pim_vxlan_sg,
+ show_ip_pim_vxlan_sg_cmd,
+ "show ip pim [vrf NAME] vxlan-groups [A.B.C.D [A.B.C.D]] [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "VxLAN BUM groups\n"
+ "source or group ip\n"
+ "group ip\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf;
+ int idx = 2;
+
+ vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ char *src_ip = argv_find(argv, argc, "A.B.C.D", &idx) ?
+ argv[idx++]->arg:NULL;
+ char *grp_ip = idx < argc && argv_find(argv, argc, "A.B.C.D", &idx) ?
+ argv[idx]->arg:NULL;
+
+ if (src_ip && grp_ip)
+ pim_show_vxlan_sg_one(vrf->info, vty, src_ip, grp_ip, uj);
+ else if (src_ip)
+ pim_show_vxlan_sg_match_addr(vrf->info, vty, src_ip, uj);
+ else
+ pim_show_vxlan_sg(vrf->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+static void pim_show_vxlan_sg_work(struct pim_instance *pim,
+ struct vty *vty, bool uj)
+{
+ json_object *json = NULL;
+ struct pim_sg_cache_walk_data cwd;
+ struct listnode *node;
+ struct pim_vxlan_sg *vxlan_sg;
+
+ if (uj) {
+ json = json_object_new_object();
+ } else {
+ vty_out(vty, "Codes: I -> installed\n");
+ vty_out(vty,
+ "Source Group Input Flags\n");
+ }
+
+ memset(&cwd, 0, sizeof(cwd));
+ cwd.vty = vty;
+ cwd.json = json;
+ for (ALL_LIST_ELEMENTS_RO(pim_vxlan_p->work_list, node, vxlan_sg))
+ pim_show_vxlan_sg_entry(vxlan_sg, &cwd);
+
+ if (uj) {
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+}
+
+DEFUN_HIDDEN (show_ip_pim_vxlan_sg_work,
+ show_ip_pim_vxlan_sg_work_cmd,
+ "show ip pim [vrf NAME] vxlan-work [json]",
+ SHOW_STR
+ IP_STR
+ PIM_STR
+ VRF_CMD_HELP_STR
+ "VxLAN work list\n"
+ JSON_STR)
+{
+ bool uj = use_json(argc, argv);
+ struct vrf *vrf;
+ int idx = 2;
+
+ vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+
+ if (!vrf)
+ return CMD_WARNING;
+
+ pim_show_vxlan_sg_work(vrf->info, vty, uj);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN_HIDDEN (no_ip_pim_mlag,
+ no_ip_pim_mlag_cmd,
+ "no ip pim mlag",
+ NO_STR
+ IP_STR
+ PIM_STR
+ "MLAG\n")
+{
+ struct in_addr addr;
+
+ addr.s_addr = 0;
+ pim_vxlan_mlag_update(TRUE /*mlag_enable*/,
+ FALSE /*peer_state*/, PIM_VXLAN_MLAG_ROLE_SECONDARY,
+ NULL/*peerlink*/, &addr);
+
+ return CMD_SUCCESS;
+}
+
+DEFUN_HIDDEN (ip_pim_mlag,
+ ip_pim_mlag_cmd,
+ "ip pim mlag INTERFACE role [primary|secondary] state [up|down] addr A.B.C.D",
+ IP_STR
+ PIM_STR
+ "MLAG\n"
+ "peerlink sub interface\n"
+ "MLAG role\n"
+ "MLAG role primary\n"
+ "MLAG role secondary\n"
+ "peer session state\n"
+ "peer session state up\n"
+ "peer session state down\n"
+ "configure PIP\n"
+ "unique ip address\n")
+{
+ struct interface *ifp;
+ const char *peerlink;
+ uint32_t role;
+ int idx;
+ bool peer_state;
+ int result;
+ struct in_addr reg_addr;
+
+ idx = 3;
+ peerlink = argv[idx]->arg;
+ ifp = if_lookup_by_name(peerlink, VRF_DEFAULT);
+ if (!ifp) {
+ vty_out(vty, "No such interface name %s\n", peerlink);
+ return CMD_WARNING;
+ }
+
+ idx += 2;
+ if (!strcmp(argv[idx]->arg, "primary")) {
+ role = PIM_VXLAN_MLAG_ROLE_PRIMARY;
+ } else if (!strcmp(argv[idx]->arg, "secondary")) {
+ role = PIM_VXLAN_MLAG_ROLE_SECONDARY;
+ } else {
+ vty_out(vty, "unknown MLAG role %s\n", argv[idx]->arg);
+ return CMD_WARNING;
+ }
+
+ idx += 2;
+ if (!strcmp(argv[idx]->arg, "up")) {
+ peer_state = TRUE;
+ } else if (strcmp(argv[idx]->arg, "down")) {
+ peer_state = FALSE;
+ } else {
+ vty_out(vty, "unknown MLAG state %s\n", argv[idx]->arg);
+ return CMD_WARNING;
+ }
+
+ idx += 2;
+ result = inet_pton(AF_INET, argv[idx]->arg, ®_addr);
+ if (result <= 0) {
+ vty_out(vty, "%% Bad reg address %s: errno=%d: %s\n",
+ argv[idx]->arg,
+ errno, safe_strerror(errno));
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ pim_vxlan_mlag_update(TRUE, peer_state, role, ifp, ®_addr);
+
+ return CMD_SUCCESS;
+}
+
void pim_cmd_init(void)
{
install_node(&interface_node,
install_element(VRF_NODE, &ip_pim_ecmp_rebalance_cmd);
install_element(CONFIG_NODE, &no_ip_pim_ecmp_rebalance_cmd);
install_element(VRF_NODE, &no_ip_pim_ecmp_rebalance_cmd);
+ install_element(CONFIG_NODE, &ip_pim_mlag_cmd);
+ install_element(CONFIG_NODE, &no_ip_pim_mlag_cmd);
install_element(INTERFACE_NODE, &interface_ip_igmp_cmd);
install_element(INTERFACE_NODE, &interface_no_ip_igmp_cmd);
install_element(ENABLE_NODE, &no_debug_ssmpingd_cmd);
install_element(ENABLE_NODE, &debug_pim_zebra_cmd);
install_element(ENABLE_NODE, &no_debug_pim_zebra_cmd);
+ install_element(ENABLE_NODE, &debug_pim_vxlan_cmd);
+ install_element(ENABLE_NODE, &no_debug_pim_vxlan_cmd);
install_element(ENABLE_NODE, &debug_msdp_cmd);
install_element(ENABLE_NODE, &no_debug_msdp_cmd);
install_element(ENABLE_NODE, &debug_msdp_events_cmd);
install_element(CONFIG_NODE, &no_debug_ssmpingd_cmd);
install_element(CONFIG_NODE, &debug_pim_zebra_cmd);
install_element(CONFIG_NODE, &no_debug_pim_zebra_cmd);
+ install_element(CONFIG_NODE, &debug_pim_vxlan_cmd);
+ install_element(CONFIG_NODE, &no_debug_pim_vxlan_cmd);
install_element(CONFIG_NODE, &debug_msdp_cmd);
install_element(CONFIG_NODE, &no_debug_msdp_cmd);
install_element(CONFIG_NODE, &debug_msdp_events_cmd);
install_element(VIEW_NODE, &show_ip_msdp_mesh_group_vrf_all_cmd);
install_element(VIEW_NODE, &show_ip_pim_ssm_range_cmd);
install_element(VIEW_NODE, &show_ip_pim_group_type_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_vxlan_sg_cmd);
+ install_element(VIEW_NODE, &show_ip_pim_vxlan_sg_work_cmd);
install_element(INTERFACE_NODE, &interface_pim_use_source_cmd);
install_element(INTERFACE_NODE, &interface_no_pim_use_source_cmd);
/* Install BFD command */
#define DEBUG_PIM_PACKETDUMP_RECV_STR "Dump received packets\n"
#define DEBUG_PIM_TRACE_STR "PIM internal daemon activity\n"
#define DEBUG_PIM_ZEBRA_STR "ZEBRA protocol activity\n"
+#define DEBUG_PIM_VXLAN_STR "PIM VxLAN events\n"
#define DEBUG_SSMPINGD_STR "ssmpingd activity\n"
#define CLEAR_IP_IGMP_STR "IGMP clear commands\n"
#define CLEAR_IP_PIM_STR "PIM clear commands\n"
#include "pim_nht.h"
#include "pim_jp_agg.h"
#include "pim_igmp_join.h"
+#include "pim_vxlan.h"
static void pim_if_igmp_join_del_all(struct interface *ifp);
static int igmp_join_sock(const char *ifname, ifindex_t ifindex,
}
struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,
- bool ispimreg)
+ bool ispimreg, bool is_vxlan_term)
{
struct pim_interface *pim_ifp;
pim_sock_reset(ifp);
- pim_if_add_vif(ifp, ispimreg);
+ pim_if_add_vif(ifp, ispimreg, is_vxlan_term);
return pim_ifp;
}
address assigned, then try to create a vif_index.
*/
if (pim_ifp->mroute_vif_index < 0) {
- pim_if_add_vif(ifp, false);
+ pim_if_add_vif(ifp, false, false /*vxlan_term*/);
}
pim_ifchannel_scan_forward_start(ifp);
}
* address assigned, then try to create a vif_index.
*/
if (pim_ifp->mroute_vif_index < 0) {
- pim_if_add_vif(ifp, false);
+ pim_if_add_vif(ifp, false, false /*vxlan_term*/);
}
pim_ifchannel_scan_forward_start(ifp);
see also pim_if_find_vifindex_by_ifindex()
*/
-int pim_if_add_vif(struct interface *ifp, bool ispimreg)
+int pim_if_add_vif(struct interface *ifp, bool ispimreg, bool is_vxlan_term)
{
struct pim_interface *pim_ifp = ifp->info;
struct in_addr ifaddr;
}
ifaddr = pim_ifp->primary_address;
- if (!ispimreg && PIM_INADDR_IS_ANY(ifaddr)) {
+ if (!ispimreg && !is_vxlan_term && PIM_INADDR_IS_ANY(ifaddr)) {
zlog_warn(
"%s: could not get address for interface %s ifindex=%d",
__PRETTY_FUNCTION__, ifp->name, ifp->ifindex);
}
pim_ifp->pim->iface_vif_index[pim_ifp->mroute_vif_index] = 1;
+
+ /* if the device qualifies as pim_vxlan iif/oif update vxlan entries */
+ pim_vxlan_add_vif(ifp);
+
return 0;
}
return -1;
}
+ /* if the device was a pim_vxlan iif/oif update vxlan mroute entries */
+ pim_vxlan_del_vif(ifp);
+
pim_mroute_del_vif(ifp);
/*
pim->regiface = if_create(pimreg_name, pim->vrf_id);
pim->regiface->ifindex = PIM_OIF_PIM_REGISTER_VIF;
- pim_if_new(pim->regiface, false, false, true);
+ pim_if_new(pim->regiface, false, false, true,
+ false /*vxlan_term*/);
}
}
void pim_if_terminate(struct pim_instance *pim);
struct pim_interface *pim_if_new(struct interface *ifp, bool igmp, bool pim,
- bool ispimreg);
+ bool ispimreg, bool is_vxlan_term);
void pim_if_delete(struct interface *ifp);
void pim_if_addr_add(struct connected *ifc);
void pim_if_addr_del(struct connected *ifc, int force_prim_as_any);
void pim_if_addr_del_all_igmp(struct interface *ifp);
void pim_if_addr_del_all_pim(struct interface *ifp);
-int pim_if_add_vif(struct interface *ifp, bool ispimreg);
+int pim_if_add_vif(struct interface *ifp, bool ispimreg, bool is_vxlan_term);
int pim_if_del_vif(struct interface *ifp);
void pim_if_add_vif_all(struct pim_instance *pim);
void pim_if_del_vif_all(struct pim_instance *pim);
static void pim_instance_terminate(struct pim_instance *pim)
{
+ pim_vxlan_exit(pim);
+
if (pim->ssm_info) {
pim_ssm_terminate(pim->ssm_info);
pim->ssm_info = NULL;
pim->spt.plist = NULL;
pim_msdp_init(pim, router->master);
+ pim_vxlan_init(pim);
snprintf(hash_name, 64, "PIM %s RPF Hash", vrf->name);
pim->rpf_hash = hash_create_size(256, pim_rpf_hash_key, pim_rpf_equal,
#include "pim_str.h"
#include "pim_msdp.h"
#include "pim_assert.h"
+#include "pim_vxlan_instance.h"
#if defined(HAVE_LINUX_MROUTE_H)
#include <linux/mroute.h>
struct hash *channel_oil_hash;
struct pim_msdp msdp;
+ struct pim_vxlan_instance vxlan;
struct list *ssmpingd_list;
struct in_addr ssmpingd_group_addr;
DEFINE_MTYPE(PIMD, PIM_NEXTHOP_CACHE, "PIM nexthop cache state")
DEFINE_MTYPE(PIMD, PIM_SSM_INFO, "PIM SSM configuration")
DEFINE_MTYPE(PIMD, PIM_SPT_PLIST_NAME, "PIM SPT Prefix List Name")
+DEFINE_MTYPE(PIMD, PIM_VXLAN_SG, "PIM VxLAN mroute cache")
DECLARE_MTYPE(PIM_NEXTHOP_CACHE)
DECLARE_MTYPE(PIM_SSM_INFO)
DECLARE_MTYPE(PIM_SPT_PLIST_NAME);
+DECLARE_MTYPE(PIM_VXLAN_SG)
#endif /* _QUAGGA_PIM_MEMORY_H */
#include "pim_zlookup.h"
#include "pim_ssm.h"
#include "pim_sock.h"
+#include "pim_vxlan.h"
static void mroute_read_on(struct pim_instance *pim);
int err;
int orig = 0;
int orig_iif_vif = 0;
+ struct pim_interface *pim_reg_ifp;
+ int orig_pimreg_ttl;
+ bool pimreg_ttl_reset = false;
+ struct pim_interface *vxlan_ifp;
+ int orig_term_ttl;
+ bool orig_term_ttl_reset = false;
pim->mroute_add_last = pim_time_monotonic_sec();
++pim->mroute_add_events;
c_oil->oil.mfcc_ttls[c_oil->oil.mfcc_parent] = 1;
}
+ if (c_oil->up) {
+ /* suppress pimreg in the OIL if the mroute is not supposed to
+ * trigger register encapsulated data
+ */
+ if (PIM_UPSTREAM_FLAG_TEST_NO_PIMREG_DATA(c_oil->up->flags)) {
+ pim_reg_ifp = pim->regiface->info;
+ orig_pimreg_ttl =
+ c_oil->oil.mfcc_ttls[pim_reg_ifp->mroute_vif_index];
+ c_oil->oil.mfcc_ttls[pim_reg_ifp->mroute_vif_index] = 0;
+ /* remember to flip it back after MFC programming */
+ pimreg_ttl_reset = true;
+ }
+
+ vxlan_ifp = pim_vxlan_get_term_ifp(pim);
+ /* 1. vxlan termination device must never be added to the
+ * origination mroute (and that can actually happen because
+ * of XG inheritance from the termination mroute) otherwise
+ * traffic will end up looping.
+ * 2. vxlan termination device should be removed from the non-DF
+ * to prevent duplicates to the overlay rxer
+ */
+ if (vxlan_ifp &&
+ (PIM_UPSTREAM_FLAG_TEST_SRC_VXLAN_ORIG(c_oil->up->flags) ||
+ PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(c_oil->up->flags))) {
+ orig_term_ttl_reset = true;
+ orig_term_ttl =
+ c_oil->oil.mfcc_ttls[vxlan_ifp->mroute_vif_index];
+ c_oil->oil.mfcc_ttls[vxlan_ifp->mroute_vif_index] = 0;
+ }
+ }
+
/*
* If we have an unresolved cache entry for the S,G
* it is owned by the pimreg for the incoming IIF
if (c_oil->oil.mfcc_origin.s_addr == INADDR_ANY)
c_oil->oil.mfcc_ttls[c_oil->oil.mfcc_parent] = orig;
+ if (pimreg_ttl_reset)
+ c_oil->oil.mfcc_ttls[pim_reg_ifp->mroute_vif_index] =
+ orig_pimreg_ttl;
+
+ if (orig_term_ttl_reset)
+ c_oil->oil.mfcc_ttls[vxlan_ifp->mroute_vif_index] =
+ orig_term_ttl;
+
if (err) {
zlog_warn(
"%s %s: failure: setsockopt(fd=%d,IPPROTO_IP,MRT_ADD_MFC): errno=%d: %s",
{
struct pim_interface *pim_ifp;
int old_ttl;
+ bool allow_iif_in_oil = false;
/*
* If we've gotten here we've gone bad, but let's
by both source and receiver attached to the same interface. See
TODO T22.
*/
- if (pim_ifp->mroute_vif_index == channel_oil->oil.mfcc_parent) {
+ if (channel_oil->up &&
+ PIM_UPSTREAM_FLAG_TEST_ALLOW_IIF_IN_OIL(
+ channel_oil->up->flags)) {
+ allow_iif_in_oil = true;
+ }
+
+ if (!allow_iif_in_oil &&
+ pim_ifp->mroute_vif_index == channel_oil->oil.mfcc_parent) {
channel_oil->oil_inherited_rescan = 1;
if (PIM_DEBUG_MROUTE) {
char group_str[INET_ADDRSTRLEN];
#define PIM_OIF_FLAG_PROTO_PIM (1 << 1)
#define PIM_OIF_FLAG_PROTO_SOURCE (1 << 2)
#define PIM_OIF_FLAG_PROTO_STAR (1 << 3)
-#define PIM_OIF_FLAG_PROTO_ANY \
- (PIM_OIF_FLAG_PROTO_IGMP | PIM_OIF_FLAG_PROTO_PIM \
- | PIM_OIF_FLAG_PROTO_SOURCE | PIM_OIF_FLAG_PROTO_STAR)
+#define PIM_OIF_FLAG_PROTO_VXLAN (1 << 4)
+#define PIM_OIF_FLAG_PROTO_ANY \
+ (PIM_OIF_FLAG_PROTO_IGMP | PIM_OIF_FLAG_PROTO_PIM \
+ | PIM_OIF_FLAG_PROTO_SOURCE | PIM_OIF_FLAG_PROTO_STAR \
+ | PIM_OIF_FLAG_PROTO_VXLAN)
/*
* We need a pimreg vif id from the kernel.
#include "pim_join.h"
#include "pim_util.h"
#include "pim_ssm.h"
+#include "pim_vxlan.h"
struct thread *send_test_packet_timer = NULL;
pim_channel_add_oif(up->channel_oil, pim->regiface,
PIM_OIF_FLAG_PROTO_PIM);
up->reg_state = PIM_REG_JOIN;
+ pim_vxlan_update_sg_reg_state(pim, up, TRUE /*reg_join*/);
}
void pim_register_stop_send(struct interface *ifp, struct prefix_sg *sg,
pim_channel_del_oif(upstream->channel_oil, pim->regiface,
PIM_OIF_FLAG_PROTO_PIM);
pim_upstream_start_register_stop_timer(upstream, 0);
+ pim_vxlan_update_sg_reg_state(pim, upstream,
+ FALSE /*reg_join*/);
break;
case PIM_REG_JOIN_PENDING:
upstream->reg_state = PIM_REG_PRUNE;
}
}
+void pim_null_register_send(struct pim_upstream *up)
+{
+ struct ip ip_hdr;
+ struct pim_interface *pim_ifp;
+ struct pim_rpf *rpg;
+ struct in_addr src;
+
+ pim_ifp = up->rpf.source_nexthop.interface->info;
+ if (!pim_ifp) {
+ if (PIM_DEBUG_TRACE)
+ zlog_debug(
+ "%s: Cannot send null-register for %s no valid iif",
+ __PRETTY_FUNCTION__, up->sg_str);
+ return;
+ }
+
+ rpg = RP(pim_ifp->pim, up->sg.grp);
+ if (!rpg) {
+ if (PIM_DEBUG_TRACE)
+ zlog_debug(
+ "%s: Cannot send null-register for %s no RPF to the RP",
+ __PRETTY_FUNCTION__, up->sg_str);
+ return;
+ }
+
+ memset(&ip_hdr, 0, sizeof(struct ip));
+ ip_hdr.ip_p = PIM_IP_PROTO_PIM;
+ ip_hdr.ip_hl = 5;
+ ip_hdr.ip_v = 4;
+ ip_hdr.ip_src = up->sg.src;
+ ip_hdr.ip_dst = up->sg.grp;
+ ip_hdr.ip_len = htons(20);
+
+ /* checksum is broken */
+ src = pim_ifp->primary_address;
+ if (PIM_UPSTREAM_FLAG_TEST_SRC_VXLAN_ORIG(up->flags)) {
+ if (!pim_vxlan_get_register_src(pim_ifp->pim, up, &src)) {
+ if (PIM_DEBUG_TRACE)
+ zlog_debug(
+ "%s: Cannot send null-register for %s vxlan-aa PIP unavailable",
+ __PRETTY_FUNCTION__, up->sg_str);
+ return;
+ }
+ }
+ pim_register_send((uint8_t *)&ip_hdr, sizeof(struct ip),
+ src, rpg, 1, up);
+}
+
/*
* 4.4.2 Receiving Register Messages at the RP
*
void pim_register_stop_send(struct interface *ifp, struct prefix_sg *sg,
struct in_addr src, struct in_addr originator);
void pim_register_join(struct pim_upstream *up);
+void pim_null_register_send(struct pim_upstream *up);
#endif
struct prefix src, grp;
bool neigh_needed = true;
+ if (PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags))
+ return PIM_RPF_OK;
+
if (up->upstream_addr.s_addr == INADDR_ANY) {
zlog_debug("%s: RP is not configured yet for %s",
__PRETTY_FUNCTION__, up->sg_str);
errno = save_errno;
}
-void pim_inet4_dump(const char *onfail, struct in_addr addr, char *buf,
- int buf_size)
-{
- int save_errno = errno;
-
- if (addr.s_addr == INADDR_ANY)
- strcpy(buf, "*");
- else {
- if (!inet_ntop(AF_INET, &addr, buf, buf_size)) {
- zlog_warn(
- "pim_inet4_dump: inet_ntop(AF_INET,buf_size=%d): errno=%d: %s",
- buf_size, errno, safe_strerror(errno));
- if (onfail)
- snprintf(buf, buf_size, "%s", onfail);
- }
- }
-
- errno = save_errno;
-}
-
char *pim_str_sg_dump(const struct prefix_sg *sg)
{
- char src_str[INET_ADDRSTRLEN];
- char grp_str[INET_ADDRSTRLEN];
static char sg_str[PIM_SG_LEN];
- pim_inet4_dump("<src?>", sg->src, src_str, sizeof(src_str));
- pim_inet4_dump("<grp?>", sg->grp, grp_str, sizeof(grp_str));
- snprintf(sg_str, PIM_SG_LEN, "(%s,%s)", src_str, grp_str);
+ pim_str_sg_set(sg, sg_str);
return sg_str;
}
-char *pim_str_sg_set(const struct prefix_sg *sg, char *sg_str)
-{
- char src_str[INET_ADDRSTRLEN];
- char grp_str[INET_ADDRSTRLEN];
-
- pim_inet4_dump("<src?>", sg->src, src_str, sizeof(src_str));
- pim_inet4_dump("<grp?>", sg->grp, grp_str, sizeof(grp_str));
- snprintf(sg_str, PIM_SG_LEN, "(%s,%s)", src_str, grp_str);
-
- return sg_str;
-}
* NULL Character at end = 1
* (123.123.123.123,123,123,123,123)
*/
-#define PIM_SG_LEN 36
+#define PIM_SG_LEN PREFIX_SG_STR_LEN
+#define pim_inet4_dump prefix_mcast_inet4_dump
+#define pim_str_sg_set prefix_sg2str
void pim_addr_dump(const char *onfail, struct prefix *p, char *buf,
int buf_size);
void pim_inet4_dump(const char *onfail, struct in_addr addr, char *buf,
int buf_size);
char *pim_str_sg_dump(const struct prefix_sg *sg);
-char *pim_str_sg_set(const struct prefix_sg *sg, char *sg_str);
#endif
#include "pim_jp_agg.h"
#include "pim_nht.h"
#include "pim_ssm.h"
+#include "pim_vxlan.h"
static void join_timer_stop(struct pim_upstream *up);
static void
{
struct pim_interface *pim_ifp = NULL;
+ /* FORCE_PIMREG is a generic flag to let an app like VxLAN-AA register
+ * a source on an upstream entry even if the source is not directly
+ * connected on the IIF.
+ */
+ if (PIM_UPSTREAM_FLAG_TEST_FORCE_PIMREG(up->flags))
+ return 1;
+
if (up->rpf.source_nexthop.interface)
pim_ifp = up->rpf.source_nexthop.interface->info;
else {
pim_upstream_update_assert_tracking_desired(up);
if (new_state == PIM_UPSTREAM_JOINED) {
+ pim_upstream_inherited_olist_decide(pim, up);
if (old_state != PIM_UPSTREAM_JOINED) {
int old_fhr = PIM_UPSTREAM_FLAG_TEST_FHR(up->flags);
- forward_on(up);
+
pim_msdp_up_join_state_changed(pim, up);
if (pim_upstream_could_register(up)) {
PIM_UPSTREAM_FLAG_SET_FHR(up->flags);
pim_upstream_send_join(up);
join_timer_start(up);
}
- } else {
- forward_on(up);
}
} else {
return 0;
}
+void pim_upstream_fill_static_iif(struct pim_upstream *up,
+ struct interface *incoming)
+{
+ up->rpf.source_nexthop.interface = incoming;
+
+ /* reset other parameters to matched a connected incoming interface */
+ up->rpf.source_nexthop.mrib_nexthop_addr.family = AF_INET;
+ up->rpf.source_nexthop.mrib_nexthop_addr.u.prefix4.s_addr =
+ PIM_NET_INADDR_ANY;
+ up->rpf.source_nexthop.mrib_metric_preference =
+ ZEBRA_CONNECT_DISTANCE_DEFAULT;
+ up->rpf.source_nexthop.mrib_route_metric = 0;
+ up->rpf.rpf_addr.family = AF_INET;
+ up->rpf.rpf_addr.u.prefix4.s_addr = PIM_NET_INADDR_ANY;
+
+}
+
static struct pim_upstream *pim_upstream_new(struct pim_instance *pim,
struct prefix_sg *sg,
struct interface *incoming,
if (up->sg.src.s_addr != INADDR_ANY)
wheel_add_item(pim->upstream_sg_wheel, up);
- if (up->upstream_addr.s_addr == INADDR_ANY)
+ if (PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags)) {
+ pim_upstream_fill_static_iif(up, incoming);
+ pim_ifp = up->rpf.source_nexthop.interface->info;
+ assert(pim_ifp);
+ up->channel_oil = pim_channel_oil_add(pim,
+ &up->sg, pim_ifp->mroute_vif_index);
+ } else if (up->upstream_addr.s_addr == INADDR_ANY) {
/* Create a dummmy channel oil with incoming ineterface MAXVIFS,
* since RP is not configured
*/
up->channel_oil = pim_channel_oil_add(pim, &up->sg, MAXVIFS);
- else {
+ } else {
rpf_result = pim_rpf_update(pim, up, NULL, 1);
if (rpf_result == PIM_RPF_FAILURE) {
if (PIM_DEBUG_TRACE)
* KAT expiry indicates that flow is inactive. If the flow was created or
* maintained by activity now is the time to deref it.
*/
-static int pim_upstream_keep_alive_timer(struct thread *t)
+struct pim_upstream *pim_upstream_keep_alive_timer_proc(
+ struct pim_upstream *up)
{
- struct pim_upstream *up;
struct pim_instance *pim;
- up = THREAD_ARG(t);
pim = up->channel_oil->pim;
+ if (PIM_UPSTREAM_FLAG_TEST_DISABLE_KAT_EXPIRY(up->flags)) {
+ /* if the router is a PIM vxlan encapsulator we prevent expiry
+ * of KAT as the mroute is pre-setup without any traffic
+ */
+ pim_upstream_keep_alive_timer_start(up, pim->keep_alive_time);
+ return up;
+ }
+
if (I_am_RP(pim, up->sg.grp)) {
pim_br_clear_pmbr(&up->sg);
/*
"kat expired on %s[%s]; remove stream reference",
up->sg_str, pim->vrf->name);
PIM_UPSTREAM_FLAG_UNSET_SRC_STREAM(up->flags);
- pim_upstream_del(pim, up, __PRETTY_FUNCTION__);
+ up = pim_upstream_del(pim, up, __PRETTY_FUNCTION__);
} else if (PIM_UPSTREAM_FLAG_TEST_SRC_LHR(up->flags)) {
struct pim_upstream *parent = up->parent;
PIM_UPSTREAM_FLAG_UNSET_SRC_LHR(up->flags);
- pim_upstream_del(pim, up, __PRETTY_FUNCTION__);
+ up = pim_upstream_del(pim, up, __PRETTY_FUNCTION__);
if (parent) {
pim_jp_agg_single_upstream_send(&parent->rpf, parent,
}
}
+ return up;
+}
+static int pim_upstream_keep_alive_timer(struct thread *t)
+{
+ struct pim_upstream *up;
+
+ up = THREAD_ARG(t);
+
+ pim_upstream_keep_alive_timer_proc(up);
return 0;
}
struct pim_interface *pim_ifp;
struct pim_instance *pim;
struct pim_upstream *up;
- struct pim_rpf *rpg;
- struct ip ip_hdr;
up = THREAD_ARG(t);
pim = up->channel_oil->pim;
up->reg_state = PIM_REG_JOIN;
pim_channel_add_oif(up->channel_oil, pim->regiface,
PIM_OIF_FLAG_PROTO_PIM);
+ pim_vxlan_update_sg_reg_state(pim, up, TRUE /*reg_join*/);
break;
case PIM_REG_JOIN:
break;
__PRETTY_FUNCTION__);
return 0;
}
- rpg = RP(pim_ifp->pim, up->sg.grp);
- if (!rpg) {
- if (PIM_DEBUG_TRACE)
- zlog_debug(
- "%s: Cannot send register for %s no RPF to the RP",
- __PRETTY_FUNCTION__, up->sg_str);
- return 0;
- }
- memset(&ip_hdr, 0, sizeof(struct ip));
- ip_hdr.ip_p = PIM_IP_PROTO_PIM;
- ip_hdr.ip_hl = 5;
- ip_hdr.ip_v = 4;
- ip_hdr.ip_src = up->sg.src;
- ip_hdr.ip_dst = up->sg.grp;
- ip_hdr.ip_len = htons(20);
- // checksum is broken
- pim_register_send((uint8_t *)&ip_hdr, sizeof(struct ip),
- pim_ifp->primary_address, rpg, 1, up);
+ pim_null_register_send(up);
break;
default:
break;
#define PIM_UPSTREAM_FLAG_MASK_SRC_MSDP (1 << 6)
#define PIM_UPSTREAM_FLAG_MASK_SEND_SG_RPT_PRUNE (1 << 7)
#define PIM_UPSTREAM_FLAG_MASK_SRC_LHR (1 << 8)
+/* In the case of pim vxlan we prime the pump by registering the
+ * vxlan source and keeping the SPT (FHR-RP) alive by sending periodic
+ * NULL registers. So we need to prevent KAT expiry because of the
+ * lack of BUM traffic.
+ */
+#define PIM_UPSTREAM_FLAG_MASK_DISABLE_KAT_EXPIRY (1 << 9)
+/* for pim vxlan we need to pin the IIF to lo or MLAG-ISL on the
+ * originating VTEP. This flag allows that by setting IIF to the
+ * value specified and preventing next-hop-tracking on the entry
+ */
+#define PIM_UPSTREAM_FLAG_MASK_STATIC_IIF (1 << 10)
+#define PIM_UPSTREAM_FLAG_MASK_ALLOW_IIF_IN_OIL (1 << 11)
+/* Disable pimreg encasulation for a flow */
+#define PIM_UPSTREAM_FLAG_MASK_NO_PIMREG_DATA (1 << 12)
+/* For some MDTs we need to register the router as a source even
+ * if the not DR or directly connected on the IIF. This is typically
+ * needed on a VxLAN-AA (MLAG) setup.
+ */
+#define PIM_UPSTREAM_FLAG_MASK_FORCE_PIMREG (1 << 13)
+/* VxLAN origination mroute - SG was registered by EVPN where S is the
+ * local VTEP IP and G is the BUM multicast group address
+ */
+#define PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG (1 << 14)
+/* VxLAN termination mroute - *G entry where G is the BUM multicast group
+ * address
+ */
+#define PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM (1 << 15)
+/* MLAG mroute - synced to the MLAG peer and subject to DF (designated
+ * forwarder) election
+ */
+#define PIM_UPSTREAM_FLAG_MASK_MLAG_VXLAN (1 << 16)
+/* MLAG mroute that lost the DF election with peer and is installed in
+ * a dormant state i.e. MLAG OIFs are removed from the MFC.
+ * In most cases the OIL is empty (but not not always) simply
+ * blackholing the traffic pulled down to the LHR.
+ */
+#define PIM_UPSTREAM_FLAG_MASK_MLAG_NON_DF (1 << 17)
#define PIM_UPSTREAM_FLAG_ALL 0xFFFFFFFF
#define PIM_UPSTREAM_FLAG_TEST_DR_JOIN_DESIRED(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED)
#define PIM_UPSTREAM_FLAG_TEST_SRC_MSDP(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_SRC_MSDP)
#define PIM_UPSTREAM_FLAG_TEST_SEND_SG_RPT_PRUNE(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_SEND_SG_RPT_PRUNE)
#define PIM_UPSTREAM_FLAG_TEST_SRC_LHR(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_SRC_LHR)
+#define PIM_UPSTREAM_FLAG_TEST_DISABLE_KAT_EXPIRY(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_DISABLE_KAT_EXPIRY)
+#define PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_STATIC_IIF)
+#define PIM_UPSTREAM_FLAG_TEST_ALLOW_IIF_IN_OIL(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_ALLOW_IIF_IN_OIL)
+#define PIM_UPSTREAM_FLAG_TEST_NO_PIMREG_DATA(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_NO_PIMREG_DATA)
+#define PIM_UPSTREAM_FLAG_TEST_FORCE_PIMREG(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_FORCE_PIMREG)
+#define PIM_UPSTREAM_FLAG_TEST_SRC_VXLAN_ORIG(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG)
+#define PIM_UPSTREAM_FLAG_TEST_SRC_VXLAN_TERM(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM)
+#define PIM_UPSTREAM_FLAG_TEST_SRC_VXLAN(flags) ((flags) & (PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG | PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM))
+#define PIM_UPSTREAM_FLAG_TEST_MLAG_VXLAN(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_MLAG_VXLAN)
+#define PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(flags) ((flags) & PIM_UPSTREAM_FLAG_MASK_MLAG_NON_DF)
#define PIM_UPSTREAM_FLAG_SET_DR_JOIN_DESIRED(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED)
#define PIM_UPSTREAM_FLAG_SET_DR_JOIN_DESIRED_UPDATED(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED_UPDATED)
#define PIM_UPSTREAM_FLAG_SET_SRC_MSDP(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_SRC_MSDP)
#define PIM_UPSTREAM_FLAG_SET_SEND_SG_RPT_PRUNE(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_SEND_SG_RPT_PRUNE)
#define PIM_UPSTREAM_FLAG_SET_SRC_LHR(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_SRC_LHR)
+#define PIM_UPSTREAM_FLAG_SET_DISABLE_KAT_EXPIRY(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_DISABLE_KAT_EXPIRY)
+#define PIM_UPSTREAM_FLAG_SET_STATIC_IIF(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_STATIC_IIF)
+#define PIM_UPSTREAM_FLAG_SET_ALLOW_IIF_IN_OIL(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_ALLOW_IIF_IN_OIL)
+#define PIM_UPSTREAM_FLAG_SET_NO_PIMREG_DATA(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_NO_PIMREG_DATA)
+#define PIM_UPSTREAM_FLAG_SET_FORCE_PIMREG(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_FORCE_PIMREG)
+#define PIM_UPSTREAM_FLAG_SET_SRC_VXLAN_ORIG(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG)
+#define PIM_UPSTREAM_FLAG_SET_SRC_VXLAN_TERM(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM)
+#define PIM_UPSTREAM_FLAG_SET_MLAG_VXLAN(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_MLAG_VXLAN)
+#define PIM_UPSTREAM_FLAG_SET_MLAG_NON_DF(flags) ((flags) |= PIM_UPSTREAM_FLAG_MASK_MLAG_NON_DF)
#define PIM_UPSTREAM_FLAG_UNSET_DR_JOIN_DESIRED(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED)
#define PIM_UPSTREAM_FLAG_UNSET_DR_JOIN_DESIRED_UPDATED(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_DR_JOIN_DESIRED_UPDATED)
#define PIM_UPSTREAM_FLAG_UNSET_SRC_MSDP(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_SRC_MSDP)
#define PIM_UPSTREAM_FLAG_UNSET_SEND_SG_RPT_PRUNE(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_SEND_SG_RPT_PRUNE)
#define PIM_UPSTREAM_FLAG_UNSET_SRC_LHR(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_SRC_LHR)
+#define PIM_UPSTREAM_FLAG_UNSET_DISABLE_KAT_EXPIRY(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_DISABLE_KAT_EXPIRY)
+#define PIM_UPSTREAM_FLAG_UNSET_STATIC_IIF(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_STATIC_IIF)
+#define PIM_UPSTREAM_FLAG_UNSET_ALLOW_IIF_IN_OIL(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_ALLOW_IIF_IN_OIL)
+#define PIM_UPSTREAM_FLAG_UNSET_NO_PIMREG_DATA(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_NO_PIMREG_DATA)
+#define PIM_UPSTREAM_FLAG_UNSET_FORCE_PIMREG(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_FORCE_PIMREG)
+#define PIM_UPSTREAM_FLAG_UNSET_SRC_VXLAN_ORIG(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG)
+#define PIM_UPSTREAM_FLAG_UNSET_SRC_VXLAN_TERM(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM)
+#define PIM_UPSTREAM_FLAG_UNSET_MLAG_VXLAN(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_MLAG_VXLAN)
+#define PIM_UPSTREAM_FLAG_UNSET_MLAG_NON_DF(flags) ((flags) &= ~PIM_UPSTREAM_FLAG_MASK_MLAG_NON_DF)
enum pim_upstream_state {
PIM_UPSTREAM_NOTJOINED,
unsigned int pim_upstream_hash_key(void *arg);
bool pim_upstream_equal(const void *arg1, const void *arg2);
+struct pim_upstream *pim_upstream_keep_alive_timer_proc(
+ struct pim_upstream *up);
+void pim_upstream_fill_static_iif(struct pim_upstream *up,
+ struct interface *incoming);
#endif /* PIM_UPSTREAM_H */
#include "pim_msdp.h"
#include "pim_ssm.h"
#include "pim_bfd.h"
+#include "pim_vxlan.h"
int pim_debug_config_write(struct vty *vty)
{
++writes;
}
+ if (PIM_DEBUG_VXLAN) {
+ vty_out(vty, "debug pim vxlan\n");
+ ++writes;
+ }
+
if (PIM_DEBUG_SSMPINGD) {
vty_out(vty, "debug ssmpingd\n");
++writes;
}
}
+ pim_vxlan_config_write(vty, spaces, &writes);
+
return writes;
}
--- /dev/null
+/* PIM support for VxLAN BUM flooding
+ *
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ *
+ * This file is part of FRR.
+ *
+ * FRR is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * FRR is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <zebra.h>
+
+#include <hash.h>
+#include <jhash.h>
+#include <log.h>
+#include <prefix.h>
+#include <vrf.h>
+
+#include "pimd.h"
+#include "pim_iface.h"
+#include "pim_memory.h"
+#include "pim_oil.h"
+#include "pim_register.h"
+#include "pim_str.h"
+#include "pim_upstream.h"
+#include "pim_ifchannel.h"
+#include "pim_nht.h"
+#include "pim_zebra.h"
+#include "pim_vxlan.h"
+
+/* pim-vxlan global info */
+struct pim_vxlan vxlan_info, *pim_vxlan_p = &vxlan_info;
+
+static void pim_vxlan_work_timer_setup(bool start);
+static void pim_vxlan_set_peerlink_rif(struct pim_instance *pim,
+ struct interface *ifp);
+
+/*************************** vxlan work list **********************************
+ * A work list is maintained for staggered generation of pim null register
+ * messages for vxlan SG entries that are in a reg_join state.
+ *
+ * A max of 500 NULL registers are generated at one shot. If paused reg
+ * generation continues on the next second and so on till all register
+ * messages have been sent out. And the process is restarted every 60s.
+ *
+ * purpose of this null register generation is to setup the SPT and maintain
+ * independent of the presence of overlay BUM traffic.
+ ****************************************************************************/
+static void pim_vxlan_do_reg_work(void)
+{
+ struct listnode *listnode;
+ int work_cnt = 0;
+ struct pim_vxlan_sg *vxlan_sg;
+ static int sec_count;
+
+ ++sec_count;
+
+ if (sec_count > PIM_VXLAN_NULL_REG_INTERVAL) {
+ sec_count = 0;
+ listnode = vxlan_info.next_work ?
+ vxlan_info.next_work :
+ vxlan_info.work_list->head;
+ if (PIM_DEBUG_VXLAN && listnode)
+ zlog_debug("vxlan SG work %s",
+ vxlan_info.next_work ? "continues" : "starts");
+ } else {
+ listnode = vxlan_info.next_work;
+ }
+
+ for (; listnode; listnode = listnode->next) {
+ vxlan_sg = (struct pim_vxlan_sg *)listnode->data;
+ if (vxlan_sg->up && (vxlan_sg->up->reg_state == PIM_REG_JOIN)) {
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s periodic NULL register",
+ vxlan_sg->sg_str);
+ pim_null_register_send(vxlan_sg->up);
+ ++work_cnt;
+ }
+
+ if (work_cnt > vxlan_info.max_work_cnt) {
+ vxlan_info.next_work = listnode->next;
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %d work items proc and pause",
+ work_cnt);
+ return;
+ }
+ }
+
+ if (work_cnt) {
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %d work items proc", work_cnt);
+ }
+ vxlan_info.next_work = NULL;
+}
+
+/* Staggered work related info is initialized when the first work comes
+ * along
+ */
+static void pim_vxlan_init_work(void)
+{
+ if (vxlan_info.flags & PIM_VXLANF_WORK_INITED)
+ return;
+
+ vxlan_info.max_work_cnt = PIM_VXLAN_WORK_MAX;
+ vxlan_info.flags |= PIM_VXLANF_WORK_INITED;
+ vxlan_info.work_list = list_new();
+ pim_vxlan_work_timer_setup(TRUE /* start */);
+}
+
+static void pim_vxlan_add_work(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (vxlan_sg->flags & PIM_VXLAN_SGF_DEL_IN_PROG) {
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s skip work list; del-in-prog",
+ vxlan_sg->sg_str);
+ return;
+ }
+
+ pim_vxlan_init_work();
+
+ /* already a part of the work list */
+ if (vxlan_sg->work_node)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s work list add",
+ vxlan_sg->sg_str);
+ vxlan_sg->work_node = listnode_add(vxlan_info.work_list, vxlan_sg);
+ /* XXX: adjust max_work_cnt if needed */
+}
+
+static void pim_vxlan_del_work(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (!vxlan_sg->work_node)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s work list del",
+ vxlan_sg->sg_str);
+
+ if (vxlan_sg->work_node == vxlan_info.next_work)
+ vxlan_info.next_work = vxlan_sg->work_node->next;
+
+ list_delete_node(vxlan_info.work_list, vxlan_sg->work_node);
+ vxlan_sg->work_node = NULL;
+}
+
+void pim_vxlan_update_sg_reg_state(struct pim_instance *pim,
+ struct pim_upstream *up, bool reg_join)
+{
+ struct pim_vxlan_sg *vxlan_sg;
+
+ vxlan_sg = pim_vxlan_sg_find(pim, &up->sg);
+ if (!vxlan_sg)
+ return;
+
+ /* add the vxlan sg entry to a work list for periodic reg joins.
+ * the entry will stay in the list as long as the register state is
+ * PIM_REG_JOIN
+ */
+ if (reg_join)
+ pim_vxlan_add_work(vxlan_sg);
+ else
+ pim_vxlan_del_work(vxlan_sg);
+}
+
+static int pim_vxlan_work_timer_cb(struct thread *t)
+{
+ pim_vxlan_do_reg_work();
+ pim_vxlan_work_timer_setup(true /* start */);
+ return 0;
+}
+
+/* global 1second timer used for periodic processing */
+static void pim_vxlan_work_timer_setup(bool start)
+{
+ THREAD_OFF(vxlan_info.work_timer);
+ if (start)
+ thread_add_timer(router->master, pim_vxlan_work_timer_cb, NULL,
+ PIM_VXLAN_WORK_TIME, &vxlan_info.work_timer);
+}
+
+/**************************** vxlan origination mroutes ***********************
+ * For every (local-vtep-ip, bum-mcast-grp) registered by evpn an origination
+ * mroute is setup by pimd. The purpose of this mroute is to forward vxlan
+ * encapsulated BUM (broadcast, unknown-unicast and unknown-multicast packets
+ * over the underlay.)
+ *
+ * Sample mroute (single VTEP):
+ * (27.0.0.7, 239.1.1.100) Iif: lo Oifs: uplink-1
+ *
+ * Sample mroute (anycast VTEP):
+ * (36.0.0.9, 239.1.1.100) Iif: peerlink-3.4094\
+ * Oifs: peerlink-3.4094 uplink-1
+ ***************************************************************************/
+static void pim_vxlan_orig_mr_up_del(struct pim_vxlan_sg *vxlan_sg)
+{
+ struct pim_upstream *up = vxlan_sg->up;
+
+ if (!up)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s orig mroute-up del",
+ vxlan_sg->sg_str);
+
+ vxlan_sg->up = NULL;
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG) {
+ /* clear out all the vxlan properties */
+ up->flags &= ~(PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_ORIG |
+ PIM_UPSTREAM_FLAG_MASK_STATIC_IIF |
+ PIM_UPSTREAM_FLAG_MASK_DISABLE_KAT_EXPIRY |
+ PIM_UPSTREAM_FLAG_MASK_FORCE_PIMREG |
+ PIM_UPSTREAM_FLAG_MASK_NO_PIMREG_DATA |
+ PIM_UPSTREAM_FLAG_MASK_ALLOW_IIF_IN_OIL);
+
+ /* We bring things to a grinding halt by force expirying
+ * the kat. Doing this will also remove the reference we
+ * created as a "vxlan" source and delete the upstream entry
+ * if there are no other references.
+ */
+ if (PIM_UPSTREAM_FLAG_TEST_SRC_STREAM(up->flags)) {
+ THREAD_OFF(up->t_ka_timer);
+ up = pim_upstream_keep_alive_timer_proc(up);
+ } else {
+ /* this is really unexpected as we force vxlan
+ * origination mroutes active sources but just in
+ * case
+ */
+ up = pim_upstream_del(vxlan_sg->pim, up,
+ __PRETTY_FUNCTION__);
+ }
+ /* if there are other references register the source
+ * for nht
+ */
+ if (up)
+ pim_rpf_update(vxlan_sg->pim, up, NULL, 1 /* is_new */);
+ }
+}
+
+static void pim_vxlan_orig_mr_up_iif_update(struct pim_vxlan_sg *vxlan_sg)
+{
+ int vif_index;
+
+ /* update MFC with the new IIF */
+ pim_upstream_fill_static_iif(vxlan_sg->up, vxlan_sg->iif);
+ vif_index = pim_if_find_vifindex_by_ifindex(vxlan_sg->pim,
+ vxlan_sg->iif->ifindex);
+ if (vif_index > 0)
+ pim_scan_individual_oil(vxlan_sg->up->channel_oil,
+ vif_index);
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s orig mroute-up updated with iif %s vifi %d",
+ vxlan_sg->sg_str,
+ vxlan_sg->iif?vxlan_sg->iif->name:"-", vif_index);
+
+}
+
+/* For every VxLAN BUM multicast group we setup a SG-up that has the following
+ * "forced properties" -
+ * 1. Directly connected on a DR interface i.e. we must act as an FHR
+ * 2. We prime the pump i.e. no multicast data is needed to register this
+ * source with the FHR. To do that we send periodic null registers if
+ * the SG entry is in a register-join state. We also prevent expiry of
+ * KAT.
+ * 3. As this SG is setup without data there is no need to register encapsulate
+ * data traffic. This encapsulation is explicitly skipped for the following
+ * reasons -
+ * a) Many levels of encapsulation are needed creating MTU disc challenges.
+ * Overlay BUM is encapsulated in a vxlan/UDP/IP header and then
+ * encapsulated again in a pim-register header.
+ * b) On a vxlan-aa setup both switches rx a copy of each BUM packet. if
+ * they both reg encapsulated traffic the RP will accept the duplicates
+ * as there are no RPF checks for this encapsulated data.
+ * a), b) can be workarounded if needed, but there is really no need because
+ * of (2) i.e. the pump is primed without data.
+ */
+static void pim_vxlan_orig_mr_up_add(struct pim_vxlan_sg *vxlan_sg)
+{
+ struct pim_upstream *up;
+ int flags = 0;
+ struct prefix nht_p;
+
+ if (vxlan_sg->up) {
+ /* nothing to do */
+ return;
+ }
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s orig mroute-up add with iif %s",
+ vxlan_sg->sg_str,
+ vxlan_sg->iif?vxlan_sg->iif->name:"-");
+
+ PIM_UPSTREAM_FLAG_SET_SRC_VXLAN_ORIG(flags);
+ /* pin the IIF to lo or peerlink-subinterface and disable NHT */
+ PIM_UPSTREAM_FLAG_SET_STATIC_IIF(flags);
+ /* Fake traffic by setting SRC_STREAM and starting KAT */
+ /* We intentionally skip updating ref count for SRC_STREAM/FHR.
+ * Setting SRC_VXLAN should have already created a reference
+ * preventing the entry from being deleted
+ */
+ PIM_UPSTREAM_FLAG_SET_FHR(flags);
+ PIM_UPSTREAM_FLAG_SET_SRC_STREAM(flags);
+ /* Force pimreg even if non-DR. This is needed on a MLAG setup for
+ * VxLAN AA
+ */
+ PIM_UPSTREAM_FLAG_SET_FORCE_PIMREG(flags);
+ /* prevent KAT expiry. we want the MDT setup even if there is no BUM
+ * traffic
+ */
+ PIM_UPSTREAM_FLAG_SET_DISABLE_KAT_EXPIRY(flags);
+ /* SPT for vxlan BUM groups is primed and maintained via NULL
+ * registers so there is no need to reg-encapsulate
+ * vxlan-encapsulated overlay data traffic
+ */
+ PIM_UPSTREAM_FLAG_SET_NO_PIMREG_DATA(flags);
+ /* On a MLAG setup we force a copy to the MLAG peer while also
+ * accepting traffic from the peer. To do this we set peerlink-rif as
+ * the IIF and also add it to the OIL
+ */
+ PIM_UPSTREAM_FLAG_SET_ALLOW_IIF_IN_OIL(flags);
+
+ /* XXX: todo: defer pim_upstream add if pim is not enabled on the iif */
+ up = pim_upstream_find(vxlan_sg->pim, &vxlan_sg->sg);
+ if (up) {
+ /* if the iif is set to something other than the vxlan_sg->iif
+ * we must dereg the old nexthop and force to new "static"
+ * iif
+ */
+ if (!PIM_UPSTREAM_FLAG_TEST_STATIC_IIF(up->flags)) {
+ nht_p.family = AF_INET;
+ nht_p.prefixlen = IPV4_MAX_BITLEN;
+ nht_p.u.prefix4 = up->upstream_addr;
+ pim_delete_tracked_nexthop(vxlan_sg->pim,
+ &nht_p, up, NULL);
+ }
+ pim_upstream_ref(up, flags, __PRETTY_FUNCTION__);
+ vxlan_sg->up = up;
+ pim_vxlan_orig_mr_up_iif_update(vxlan_sg);
+ } else {
+ up = pim_upstream_add(vxlan_sg->pim, &vxlan_sg->sg,
+ vxlan_sg->iif, flags,
+ __PRETTY_FUNCTION__, NULL);
+ vxlan_sg->up = up;
+ }
+
+ if (!up) {
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s orig mroute-up add failed",
+ vxlan_sg->sg_str);
+ return;
+ }
+
+ pim_upstream_keep_alive_timer_start(up, vxlan_sg->pim->keep_alive_time);
+
+ /* register the source with the RP */
+ if (up->reg_state == PIM_REG_NOINFO) {
+ pim_register_join(up);
+ pim_null_register_send(up);
+ }
+
+ /* update the inherited OIL */
+ pim_upstream_inherited_olist(vxlan_sg->pim, up);
+}
+
+static void pim_vxlan_orig_mr_oif_add(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (!vxlan_sg->up || !vxlan_sg->orig_oif)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s oif %s add",
+ vxlan_sg->sg_str, vxlan_sg->orig_oif->name);
+
+ vxlan_sg->flags |= PIM_VXLAN_SGF_OIF_INSTALLED;
+ pim_channel_add_oif(vxlan_sg->up->channel_oil,
+ vxlan_sg->orig_oif, PIM_OIF_FLAG_PROTO_VXLAN);
+}
+
+static void pim_vxlan_orig_mr_oif_del(struct pim_vxlan_sg *vxlan_sg)
+{
+ struct interface *orig_oif;
+
+ orig_oif = vxlan_sg->orig_oif;
+ vxlan_sg->orig_oif = NULL;
+
+ if (!(vxlan_sg->flags & PIM_VXLAN_SGF_OIF_INSTALLED))
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s oif %s del",
+ vxlan_sg->sg_str, orig_oif->name);
+
+ vxlan_sg->flags &= ~PIM_VXLAN_SGF_OIF_INSTALLED;
+ pim_channel_del_oif(vxlan_sg->up->channel_oil,
+ orig_oif, PIM_OIF_FLAG_PROTO_VXLAN);
+}
+
+static inline struct interface *pim_vxlan_orig_mr_oif_get(
+ struct pim_instance *pim)
+{
+ return (vxlan_mlag.flags & PIM_VXLAN_MLAGF_ENABLED) ?
+ pim->vxlan.peerlink_rif : NULL;
+}
+
+/* Single VTEPs: IIF for the vxlan-origination-mroutes is lo or vrf-dev (if
+ * the mroute is in a non-default vrf).
+ * Anycast VTEPs: IIF is the MLAG ISL/peerlink.
+ */
+static inline struct interface *pim_vxlan_orig_mr_iif_get(
+ struct pim_instance *pim)
+{
+ return ((vxlan_mlag.flags & PIM_VXLAN_MLAGF_ENABLED) &&
+ pim->vxlan.peerlink_rif) ?
+ pim->vxlan.peerlink_rif : pim->vxlan.default_iif;
+}
+
+static bool pim_vxlan_orig_mr_add_is_ok(struct pim_vxlan_sg *vxlan_sg)
+{
+ struct pim_interface *pim_ifp;
+
+ vxlan_sg->iif = pim_vxlan_orig_mr_iif_get(vxlan_sg->pim);
+ if (!vxlan_sg->iif)
+ return false;
+
+ pim_ifp = (struct pim_interface *)vxlan_sg->iif->info;
+ if (!pim_ifp || (pim_ifp->mroute_vif_index < 0))
+ return false;
+
+ return true;
+}
+
+static void pim_vxlan_orig_mr_install(struct pim_vxlan_sg *vxlan_sg)
+{
+ pim_vxlan_orig_mr_up_add(vxlan_sg);
+
+ vxlan_sg->orig_oif = pim_vxlan_orig_mr_oif_get(vxlan_sg->pim);
+ pim_vxlan_orig_mr_oif_add(vxlan_sg);
+}
+
+static void pim_vxlan_orig_mr_add(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (!pim_vxlan_orig_mr_add_is_ok(vxlan_sg))
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s orig-mr add", vxlan_sg->sg_str);
+
+ pim_vxlan_orig_mr_install(vxlan_sg);
+}
+
+static void pim_vxlan_orig_mr_del(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s orig-mr del", vxlan_sg->sg_str);
+
+ pim_vxlan_orig_mr_oif_del(vxlan_sg);
+ pim_vxlan_orig_mr_up_del(vxlan_sg);
+}
+
+static void pim_vxlan_orig_mr_iif_update(struct hash_backet *backet, void *arg)
+{
+ struct interface *ifp = (struct interface *)arg;
+ struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)backet->data;
+ struct interface *old_iif = vxlan_sg->iif;
+
+ if (!pim_vxlan_is_orig_mroute(vxlan_sg))
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s iif changed from %s to %s",
+ vxlan_sg->sg_str,
+ old_iif ? old_iif->name : "-",
+ ifp ? ifp->name : "-");
+
+ if (pim_vxlan_orig_mr_add_is_ok(vxlan_sg)) {
+ if (vxlan_sg->up) {
+ /* upstream exists but iif changed */
+ pim_vxlan_orig_mr_up_iif_update(vxlan_sg);
+ } else {
+ /* install mroute */
+ pim_vxlan_orig_mr_install(vxlan_sg);
+ }
+ } else {
+ pim_vxlan_orig_mr_del(vxlan_sg);
+ }
+}
+
+/**************************** vxlan termination mroutes ***********************
+ * For every bum-mcast-grp registered by evpn a *G termination
+ * mroute is setup by pimd. The purpose of this mroute is to pull down vxlan
+ * packets with the bum-mcast-grp dip from the underlay and terminate the
+ * tunnel. This is done by including the vxlan termination device (ipmr-lo) in
+ * its OIL. The vxlan de-capsulated packets are subject to subsequent overlay
+ * bridging.
+ *
+ * Sample mroute:
+ * (0.0.0.0, 239.1.1.100) Iif: uplink-1 Oifs: ipmr-lo, uplink-1
+ *****************************************************************************/
+struct pim_interface *pim_vxlan_get_term_ifp(struct pim_instance *pim)
+{
+ return pim->vxlan.term_if ?
+ (struct pim_interface *)pim->vxlan.term_if->info : NULL;
+}
+
+static void pim_vxlan_term_mr_oif_add(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (vxlan_sg->flags & PIM_VXLAN_SGF_OIF_INSTALLED)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s term-oif %s add",
+ vxlan_sg->sg_str, vxlan_sg->term_oif->name);
+
+ if (pim_ifchannel_local_membership_add(vxlan_sg->term_oif,
+ &vxlan_sg->sg)) {
+ vxlan_sg->flags |= PIM_VXLAN_SGF_OIF_INSTALLED;
+ } else {
+ zlog_warn("vxlan SG %s term-oif %s add failed",
+ vxlan_sg->sg_str, vxlan_sg->term_oif->name);
+ }
+}
+
+static void pim_vxlan_term_mr_oif_del(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (!(vxlan_sg->flags & PIM_VXLAN_SGF_OIF_INSTALLED))
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s oif %s del",
+ vxlan_sg->sg_str, vxlan_sg->term_oif->name);
+
+ vxlan_sg->flags &= ~PIM_VXLAN_SGF_OIF_INSTALLED;
+ pim_ifchannel_local_membership_del(vxlan_sg->term_oif, &vxlan_sg->sg);
+}
+
+static void pim_vxlan_term_mr_up_add(struct pim_vxlan_sg *vxlan_sg)
+{
+ struct pim_upstream *up;
+ int flags = 0;
+
+ if (vxlan_sg->up) {
+ /* nothing to do */
+ return;
+ }
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s term mroute-up add",
+ vxlan_sg->sg_str);
+
+ PIM_UPSTREAM_FLAG_SET_SRC_VXLAN_TERM(flags);
+ /* enable MLAG designated-forwarder election on termination mroutes */
+ PIM_UPSTREAM_FLAG_SET_MLAG_VXLAN(flags);
+
+ up = pim_upstream_add(vxlan_sg->pim, &vxlan_sg->sg,
+ NULL /* iif */, flags,
+ __PRETTY_FUNCTION__, NULL);
+ vxlan_sg->up = up;
+
+ if (!up) {
+ zlog_warn("vxlan SG %s term mroute-up add failed",
+ vxlan_sg->sg_str);
+ }
+}
+
+static void pim_vxlan_term_mr_up_del(struct pim_vxlan_sg *vxlan_sg)
+{
+ struct pim_upstream *up = vxlan_sg->up;
+
+ if (!up)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s term mroute-up del",
+ vxlan_sg->sg_str);
+ vxlan_sg->up = NULL;
+ if (up->flags & PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM) {
+ /* clear out all the vxlan related flags */
+ up->flags &= ~(PIM_UPSTREAM_FLAG_MASK_SRC_VXLAN_TERM |
+ PIM_UPSTREAM_FLAG_MASK_MLAG_VXLAN);
+
+ pim_upstream_del(vxlan_sg->pim, up,
+ __PRETTY_FUNCTION__);
+ }
+}
+
+static void pim_vxlan_term_mr_add(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s term mroute add", vxlan_sg->sg_str);
+
+ vxlan_sg->term_oif = vxlan_sg->pim->vxlan.term_if;
+ if (!vxlan_sg->term_oif)
+ /* defer termination mroute till we have a termination device */
+ return;
+
+ pim_vxlan_term_mr_up_add(vxlan_sg);
+ /* set up local membership for the term-oif */
+ pim_vxlan_term_mr_oif_add(vxlan_sg);
+}
+
+static void pim_vxlan_term_mr_del(struct pim_vxlan_sg *vxlan_sg)
+{
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s term mroute del", vxlan_sg->sg_str);
+
+ /* remove local membership associated with the term oif */
+ pim_vxlan_term_mr_oif_del(vxlan_sg);
+ /* remove references to the upstream entry */
+ pim_vxlan_term_mr_up_del(vxlan_sg);
+}
+
+/************************** vxlan SG cache management ************************/
+static unsigned int pim_vxlan_sg_hash_key_make(void *p)
+{
+ struct pim_vxlan_sg *vxlan_sg = p;
+
+ return (jhash_2words(vxlan_sg->sg.src.s_addr,
+ vxlan_sg->sg.grp.s_addr, 0));
+}
+
+static bool pim_vxlan_sg_hash_eq(const void *p1, const void *p2)
+{
+ const struct pim_vxlan_sg *sg1 = p1;
+ const struct pim_vxlan_sg *sg2 = p2;
+
+ return ((sg1->sg.src.s_addr == sg2->sg.src.s_addr)
+ && (sg1->sg.grp.s_addr == sg2->sg.grp.s_addr));
+}
+
+static struct pim_vxlan_sg *pim_vxlan_sg_new(struct pim_instance *pim,
+ struct prefix_sg *sg)
+{
+ struct pim_vxlan_sg *vxlan_sg;
+
+ vxlan_sg = XCALLOC(MTYPE_PIM_VXLAN_SG, sizeof(*vxlan_sg));
+
+ vxlan_sg->pim = pim;
+ vxlan_sg->sg = *sg;
+ pim_str_sg_set(sg, vxlan_sg->sg_str);
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s alloc", vxlan_sg->sg_str);
+
+ vxlan_sg = hash_get(pim->vxlan.sg_hash, vxlan_sg, hash_alloc_intern);
+
+ return vxlan_sg;
+}
+
+struct pim_vxlan_sg *pim_vxlan_sg_find(struct pim_instance *pim,
+ struct prefix_sg *sg)
+{
+ struct pim_vxlan_sg lookup;
+
+ lookup.sg = *sg;
+ return hash_lookup(pim->vxlan.sg_hash, &lookup);
+}
+
+struct pim_vxlan_sg *pim_vxlan_sg_add(struct pim_instance *pim,
+ struct prefix_sg *sg)
+{
+ struct pim_vxlan_sg *vxlan_sg;
+
+ vxlan_sg = pim_vxlan_sg_find(pim, sg);
+ if (vxlan_sg)
+ return vxlan_sg;
+
+ vxlan_sg = pim_vxlan_sg_new(pim, sg);
+
+ if (pim_vxlan_is_orig_mroute(vxlan_sg))
+ pim_vxlan_orig_mr_add(vxlan_sg);
+ else
+ pim_vxlan_term_mr_add(vxlan_sg);
+
+ return vxlan_sg;
+}
+
+void pim_vxlan_sg_del(struct pim_instance *pim, struct prefix_sg *sg)
+{
+ struct pim_vxlan_sg *vxlan_sg;
+
+ vxlan_sg = pim_vxlan_sg_find(pim, sg);
+ if (!vxlan_sg)
+ return;
+
+ vxlan_sg->flags |= PIM_VXLAN_SGF_DEL_IN_PROG;
+
+ pim_vxlan_del_work(vxlan_sg);
+
+ if (pim_vxlan_is_orig_mroute(vxlan_sg))
+ pim_vxlan_orig_mr_del(vxlan_sg);
+ else
+ pim_vxlan_term_mr_del(vxlan_sg);
+
+ hash_release(vxlan_sg->pim->vxlan.sg_hash, vxlan_sg);
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s free", vxlan_sg->sg_str);
+
+ XFREE(MTYPE_PIM_VXLAN_SG, vxlan_sg);
+}
+
+/******************************* MLAG handling *******************************/
+/* The peerlink sub-interface is added as an OIF to the origination-mroute.
+ * This is done to send a copy of the multicast-vxlan encapsulated traffic
+ * to the MLAG peer which may mroute it over the underlay if there are any
+ * interested receivers.
+ */
+static void pim_vxlan_sg_peerlink_update(struct hash_backet *backet, void *arg)
+{
+ struct interface *new_oif = (struct interface *)arg;
+ struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)backet->data;
+
+ if (!pim_vxlan_is_orig_mroute(vxlan_sg))
+ return;
+
+ if (vxlan_sg->orig_oif == new_oif)
+ return;
+
+ pim_vxlan_orig_mr_oif_del(vxlan_sg);
+
+ vxlan_sg->orig_oif = new_oif;
+ pim_vxlan_orig_mr_oif_add(vxlan_sg);
+}
+
+/* In the case of anycast VTEPs the VTEP-PIP must be used as the
+ * register source.
+ */
+bool pim_vxlan_get_register_src(struct pim_instance *pim,
+ struct pim_upstream *up, struct in_addr *src_p)
+{
+ if (!(vxlan_mlag.flags & PIM_VXLAN_MLAGF_ENABLED))
+ return true;
+
+ /* if address is not available suppress the pim-register */
+ if (vxlan_mlag.reg_addr.s_addr == INADDR_ANY)
+ return false;
+
+ *src_p = vxlan_mlag.reg_addr;
+ return true;
+}
+
+void pim_vxlan_mlag_update(bool enable, bool peer_state, uint32_t role,
+ struct interface *peerlink_rif,
+ struct in_addr *reg_addr)
+{
+ struct pim_instance *pim;
+ struct interface *old_oif;
+ struct interface *new_oif;
+ char addr_buf[INET_ADDRSTRLEN];
+ struct pim_interface *pim_ifp = NULL;
+
+ if (PIM_DEBUG_VXLAN) {
+ inet_ntop(AF_INET, reg_addr,
+ addr_buf, INET_ADDRSTRLEN);
+ zlog_debug("vxlan MLAG update %s state %s role %d rif %s addr %s",
+ enable ? "enable" : "disable",
+ peer_state ? "up" : "down",
+ role,
+ peerlink_rif ? peerlink_rif->name : "-",
+ addr_buf);
+ }
+
+ /* XXX: for now vxlan termination is only possible in the default VRF
+ * when that changes this will need to change to iterate all VRFs
+ */
+ pim = pim_get_pim_instance(VRF_DEFAULT);
+
+ old_oif = pim_vxlan_orig_mr_oif_get(pim);
+
+ if (enable)
+ vxlan_mlag.flags |= PIM_VXLAN_MLAGF_ENABLED;
+ else
+ vxlan_mlag.flags &= ~PIM_VXLAN_MLAGF_ENABLED;
+
+ if (vxlan_mlag.peerlink_rif != peerlink_rif)
+ vxlan_mlag.peerlink_rif = peerlink_rif;
+
+ vxlan_mlag.reg_addr = *reg_addr;
+ vxlan_mlag.peer_state = peer_state;
+ vxlan_mlag.role = role;
+
+ /* process changes */
+ if (vxlan_mlag.peerlink_rif)
+ pim_ifp = (struct pim_interface *)vxlan_mlag.peerlink_rif->info;
+ if ((vxlan_mlag.flags & PIM_VXLAN_MLAGF_ENABLED) &&
+ pim_ifp && (pim_ifp->mroute_vif_index > 0))
+ pim_vxlan_set_peerlink_rif(pim, peerlink_rif);
+ else
+ pim_vxlan_set_peerlink_rif(pim, NULL);
+
+ new_oif = pim_vxlan_orig_mr_oif_get(pim);
+ if (old_oif != new_oif)
+ hash_iterate(pim->vxlan.sg_hash, pim_vxlan_sg_peerlink_update,
+ new_oif);
+}
+
+/****************************** misc callbacks *******************************/
+void pim_vxlan_config_write(struct vty *vty, char *spaces, int *writes)
+{
+ char addr_buf[INET_ADDRSTRLEN];
+
+ if ((vxlan_mlag.flags & PIM_VXLAN_MLAGF_ENABLED) &&
+ vxlan_mlag.peerlink_rif) {
+
+ inet_ntop(AF_INET, &vxlan_mlag.reg_addr,
+ addr_buf, sizeof(addr_buf));
+ vty_out(vty,
+ "%sip pim mlag %s role %s state %s addr %s\n",
+ spaces,
+ vxlan_mlag.peerlink_rif->name,
+ (vxlan_mlag.role == PIM_VXLAN_MLAG_ROLE_PRIMARY) ?
+ "primary":"secondary",
+ vxlan_mlag.peer_state ? "up" : "down",
+ addr_buf);
+ *writes += 1;
+ }
+}
+
+static void pim_vxlan_set_default_iif(struct pim_instance *pim,
+ struct interface *ifp)
+{
+ struct interface *old_iif;
+
+ if (pim->vxlan.default_iif == ifp)
+ return;
+
+ old_iif = pim->vxlan.default_iif;
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("%s: vxlan default iif changed from %s to %s",
+ __PRETTY_FUNCTION__,
+ old_iif ? old_iif->name : "-",
+ ifp ? ifp->name : "-");
+
+ old_iif = pim_vxlan_orig_mr_iif_get(pim);
+ pim->vxlan.default_iif = ifp;
+ ifp = pim_vxlan_orig_mr_iif_get(pim);
+ if (old_iif == ifp)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("%s: vxlan orig iif changed from %s to %s",
+ __PRETTY_FUNCTION__, old_iif ? old_iif->name : "-",
+ ifp ? ifp->name : "-");
+
+ /* add/del upstream entries for the existing vxlan SG when the
+ * interface becomes available
+ */
+ if (pim->vxlan.sg_hash)
+ hash_iterate(pim->vxlan.sg_hash,
+ pim_vxlan_orig_mr_iif_update, ifp);
+}
+
+static void pim_vxlan_set_peerlink_rif(struct pim_instance *pim,
+ struct interface *ifp)
+{
+ struct interface *old_iif;
+
+ if (pim->vxlan.peerlink_rif == ifp)
+ return;
+
+ old_iif = pim->vxlan.peerlink_rif;
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("%s: vxlan peerlink_rif changed from %s to %s",
+ __PRETTY_FUNCTION__, old_iif ? old_iif->name : "-",
+ ifp ? ifp->name : "-");
+
+ old_iif = pim_vxlan_orig_mr_iif_get(pim);
+ pim->vxlan.peerlink_rif = ifp;
+ ifp = pim_vxlan_orig_mr_iif_get(pim);
+ if (old_iif == ifp)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("%s: vxlan orig iif changed from %s to %s",
+ __PRETTY_FUNCTION__, old_iif ? old_iif->name : "-",
+ ifp ? ifp->name : "-");
+
+ /* add/del upstream entries for the existing vxlan SG when the
+ * interface becomes available
+ */
+ if (pim->vxlan.sg_hash)
+ hash_iterate(pim->vxlan.sg_hash,
+ pim_vxlan_orig_mr_iif_update, ifp);
+}
+
+void pim_vxlan_add_vif(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct pim_instance *pim = pim_ifp->pim;
+
+ if (pim->vrf_id != VRF_DEFAULT)
+ return;
+
+ if (if_is_loopback_or_vrf(ifp))
+ pim_vxlan_set_default_iif(pim, ifp);
+
+ if (vxlan_mlag.flags & PIM_VXLAN_MLAGF_ENABLED &&
+ (ifp == vxlan_mlag.peerlink_rif))
+ pim_vxlan_set_peerlink_rif(pim, ifp);
+}
+
+void pim_vxlan_del_vif(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct pim_instance *pim = pim_ifp->pim;
+
+ if (pim->vrf_id != VRF_DEFAULT)
+ return;
+
+ if (pim->vxlan.default_iif == ifp)
+ pim_vxlan_set_default_iif(pim, NULL);
+
+ if (pim->vxlan.peerlink_rif == ifp)
+ pim_vxlan_set_peerlink_rif(pim, NULL);
+}
+
+static void pim_vxlan_term_mr_oif_update(struct hash_backet *backet, void *arg)
+{
+ struct interface *ifp = (struct interface *)arg;
+ struct pim_vxlan_sg *vxlan_sg = (struct pim_vxlan_sg *)backet->data;
+
+ if (pim_vxlan_is_orig_mroute(vxlan_sg))
+ return;
+
+ if (vxlan_sg->term_oif == ifp)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s term oif changed from %s to %s",
+ vxlan_sg->sg_str,
+ vxlan_sg->term_oif ? vxlan_sg->term_oif->name : "-",
+ ifp ? ifp->name : "-");
+
+ pim_vxlan_term_mr_del(vxlan_sg);
+ vxlan_sg->term_oif = ifp;
+ pim_vxlan_term_mr_add(vxlan_sg);
+}
+
+void pim_vxlan_add_term_dev(struct pim_instance *pim,
+ struct interface *ifp)
+{
+ struct pim_interface *pim_ifp;
+
+ if (pim->vxlan.term_if == ifp)
+ return;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan term oif changed from %s to %s",
+ pim->vxlan.term_if ? pim->vxlan.term_if->name : "-",
+ ifp->name);
+
+ /* enable pim on the term ifp */
+ pim_ifp = (struct pim_interface *)ifp->info;
+ if (pim_ifp) {
+ PIM_IF_DO_PIM(pim_ifp->options);
+ } else {
+ pim_ifp = pim_if_new(ifp, false /*igmp*/, true /*pim*/,
+ false /*pimreg*/, true /*vxlan_term*/);
+ /* ensure that pimreg existss before using the newly created
+ * vxlan termination device
+ */
+ pim_if_create_pimreg(pim);
+ }
+
+ pim->vxlan.term_if = ifp;
+
+ if (pim->vxlan.sg_hash)
+ hash_iterate(pim_ifp->pim->vxlan.sg_hash,
+ pim_vxlan_term_mr_oif_update, ifp);
+}
+
+void pim_vxlan_del_term_dev(struct pim_instance *pim)
+{
+ struct interface *ifp = pim->vxlan.term_if;
+ struct pim_interface *pim_ifp;
+
+ if (PIM_DEBUG_VXLAN)
+ zlog_debug("vxlan term oif changed from %s to -", ifp->name);
+
+ pim->vxlan.term_if = NULL;
+
+ if (pim->vxlan.sg_hash)
+ hash_iterate(pim->vxlan.sg_hash,
+ pim_vxlan_term_mr_oif_update, NULL);
+
+ pim_ifp = (struct pim_interface *)ifp->info;
+ if (pim_ifp) {
+ PIM_IF_DONT_PIM(pim_ifp->options);
+ if (!PIM_IF_TEST_IGMP(pim_ifp->options))
+ pim_if_delete(ifp);
+ }
+
+}
+
+void pim_vxlan_init(struct pim_instance *pim)
+{
+ char hash_name[64];
+
+ snprintf(hash_name, sizeof(hash_name),
+ "PIM %s vxlan SG hash", pim->vrf->name);
+ pim->vxlan.sg_hash = hash_create(pim_vxlan_sg_hash_key_make,
+ pim_vxlan_sg_hash_eq, hash_name);
+}
+
+void pim_vxlan_exit(struct pim_instance *pim)
+{
+ if (pim->vxlan.sg_hash) {
+ hash_clean(pim->vxlan.sg_hash, NULL);
+ hash_free(pim->vxlan.sg_hash);
+ pim->vxlan.sg_hash = NULL;
+ }
+}
--- /dev/null
+/* PIM support for VxLAN BUM flooding
+ *
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ *
+ * This file is part of FRR.
+ *
+ * FRR is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * FRR is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef PIM_VXLAN_H
+#define PIM_VXLAN_H
+
+/* global timer used for miscellaneous staggered processing */
+#define PIM_VXLAN_WORK_TIME 1
+/* number of SG entries processed at one shot */
+#define PIM_VXLAN_WORK_MAX 500
+/* frequency of periodic NULL registers */
+#define PIM_VXLAN_NULL_REG_INTERVAL 60 /* seconds */
+
+#define vxlan_mlag (vxlan_info.mlag)
+
+enum pim_vxlan_sg_flags {
+ PIM_VXLAN_SGF_NONE = 0,
+ PIM_VXLAN_SGF_DEL_IN_PROG = (1 << 0),
+ PIM_VXLAN_SGF_OIF_INSTALLED = (1 << 1)
+};
+
+struct pim_vxlan_sg {
+ struct pim_instance *pim;
+
+ /* key */
+ struct prefix_sg sg;
+ char sg_str[PIM_SG_LEN];
+
+ enum pim_vxlan_sg_flags flags;
+ struct pim_upstream *up;
+ struct listnode *work_node; /* to pim_vxlan.work_list */
+
+ /* termination info (only applicable to termination XG mroutes)
+ * term_if - termination device ipmr-lo is added to the OIL
+ * as local/IGMP membership to allow termination of vxlan traffic
+ */
+ struct interface *term_oif;
+
+ /* origination info
+ * iif - lo/vrf or peerlink (on MLAG setups)
+ * peerlink_oif - added to the OIL to send encapsulated BUM traffic to
+ * the MLAG peer switch
+ */
+ struct interface *iif;
+ /* on a MLAG setup the peerlink is added as a static OIF */
+ struct interface *orig_oif;
+};
+
+enum pim_vxlan_mlag_flags {
+ PIM_VXLAN_MLAGF_NONE = 0,
+ PIM_VXLAN_MLAGF_ENABLED = (1 << 0)
+};
+
+enum pim_vxlan_mlag_role {
+ PIM_VXLAN_MLAG_ROLE_SECONDARY = 0,
+ PIM_VXLAN_MLAG_ROLE_PRIMARY
+};
+
+struct pim_vxlan_mlag {
+ enum pim_vxlan_mlag_flags flags;
+ enum pim_vxlan_mlag_role role;
+ bool peer_state;
+ /* routed interface setup on top of MLAG peerlink */
+ struct interface *peerlink_rif;
+ struct in_addr reg_addr;
+};
+
+enum pim_vxlan_flags {
+ PIM_VXLANF_NONE = 0,
+ PIM_VXLANF_WORK_INITED = (1 << 0)
+};
+
+struct pim_vxlan {
+ enum pim_vxlan_flags flags;
+
+ struct thread *work_timer;
+ struct list *work_list;
+ struct listnode *next_work;
+ int max_work_cnt;
+
+ struct pim_vxlan_mlag mlag;
+};
+
+/* zebra adds-
+ * 1. one (S, G) entry where S=local-VTEP-IP and G==BUM-mcast-grp for
+ * each BUM MDT. This is the origination entry.
+ * 2. and one (*, G) entry each MDT. This is the termination place holder.
+ *
+ * Note: This doesn't mean that only (*, G) mroutes are used for tunnel
+ * termination. (S, G) mroutes with ipmr-lo in the OIL can also be
+ * used for tunnel termiation if SPT switchover happens; however such
+ * SG entries are created by traffic and will NOT be a part of the vxlan SG
+ * database.
+ */
+static inline bool pim_vxlan_is_orig_mroute(struct pim_vxlan_sg *vxlan_sg)
+{
+ return (vxlan_sg->sg.src.s_addr != 0);
+}
+
+extern struct pim_vxlan *pim_vxlan_p;
+extern struct pim_vxlan_sg *pim_vxlan_sg_find(struct pim_instance *pim,
+ struct prefix_sg *sg);
+extern struct pim_vxlan_sg *pim_vxlan_sg_add(struct pim_instance *pim,
+ struct prefix_sg *sg);
+extern void pim_vxlan_sg_del(struct pim_instance *pim, struct prefix_sg *sg);
+extern void pim_vxlan_update_sg_reg_state(struct pim_instance *pim,
+ struct pim_upstream *up, bool reg_join);
+extern struct pim_interface *pim_vxlan_get_term_ifp(struct pim_instance *pim);
+extern void pim_vxlan_add_vif(struct interface *ifp);
+extern void pim_vxlan_del_vif(struct interface *ifp);
+extern void pim_vxlan_add_term_dev(struct pim_instance *pim,
+ struct interface *ifp);
+extern void pim_vxlan_del_term_dev(struct pim_instance *pim);
+extern bool pim_vxlan_get_register_src(struct pim_instance *pim,
+ struct pim_upstream *up, struct in_addr *src_p);
+extern void pim_vxlan_mlag_update(bool enable, bool peer_state, uint32_t role,
+ struct interface *peerlink_rif,
+ struct in_addr *reg_addr);
+extern void pim_vxlan_config_write(struct vty *vty, char *spaces, int *writes);
+
+#endif /* PIM_VXLAN_H */
--- /dev/null
+/* PIM support for VxLAN BUM flooding
+ *
+ * Copyright (C) 2019 Cumulus Networks, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef PIM_VXLAN_INSTANCE_H
+#define PIM_VXLAN_INSTANCE_H
+
+/* pim termination device is expected to include the substring ipmr-lo */
+#define PIM_VXLAN_TERM_DEV_NAME "ipmr-lo"
+
+struct pim_vxlan_instance {
+ struct hash *sg_hash;
+
+ /* this is lo for default instance and vrf-dev for non-default
+ * instances
+ */
+ struct interface *default_iif;
+
+ /* In a MLAG/VxLAN-AA setup the peerlink sub-interface (ISL-rif) is
+ * used as the IIF in
+ */
+ struct interface *peerlink_rif;
+
+ /* device used by the dataplane to terminate multicast encapsulated
+ * vxlan traffic
+ */
+ struct interface *term_if;
+};
+
+extern void pim_vxlan_init(struct pim_instance *pim);
+extern void pim_vxlan_exit(struct pim_instance *pim);
+
+#endif /* PIM_VXLAN_INSTANCE_H */
#include "pim_jp_agg.h"
#include "pim_nht.h"
#include "pim_ssm.h"
+#include "pim_vxlan.h"
#undef PIM_DEBUG_IFADDR_DUMP
#define PIM_DEBUG_IFADDR_DUMP
struct pim_interface *pim_ifp;
if (!ifp->info) {
- pim_ifp = pim_if_new(ifp, false, false, false);
+ pim_ifp = pim_if_new(ifp, false, false, false,
+ false /*vxlan_term*/);
ifp->info = pim_ifp;
}
pim_sock_add(ifp);
}
+ if (!strncmp(ifp->name, PIM_VXLAN_TERM_DEV_NAME,
+ sizeof(PIM_VXLAN_TERM_DEV_NAME)))
+ pim_vxlan_add_term_dev(pim, ifp);
+
return 0;
}
zebra_size_t length, vrf_id_t vrf_id)
{
struct interface *ifp;
+ struct pim_instance *pim;
/*
zebra api adds/dels interfaces using the same call
if_set_index(ifp, IFINDEX_INTERNAL);
+ pim = pim_get_pim_instance(vrf_id);
+ if (pim && pim->vxlan.term_if == ifp)
+ pim_vxlan_del_term_dev(pim);
+
return 0;
}
pim_upstream_update_join_desired(pim, up);
}
+static int pim_zebra_vxlan_sg_proc(int command, struct zclient *zclient,
+ zebra_size_t length, vrf_id_t vrf_id)
+{
+ struct stream *s;
+ struct pim_instance *pim;
+ struct prefix_sg sg;
+
+ pim = pim_get_pim_instance(vrf_id);
+ if (!pim)
+ return 0;
+
+ s = zclient->ibuf;
+
+ sg.family = AF_INET;
+ sg.prefixlen = stream_getl(s);
+ stream_get(&sg.src.s_addr, s, sg.prefixlen);
+ stream_get(&sg.grp.s_addr, s, sg.prefixlen);
+
+ if (PIM_DEBUG_ZEBRA) {
+ char sg_str[PIM_SG_LEN];
+
+ pim_str_sg_set(&sg, sg_str);
+ zlog_debug("%u:recv SG %s %s", vrf_id,
+ (command == ZEBRA_VXLAN_SG_ADD)?"add":"del",
+ sg_str);
+ }
+
+ if (command == ZEBRA_VXLAN_SG_ADD)
+ pim_vxlan_sg_add(pim, &sg);
+ else
+ pim_vxlan_sg_del(pim, &sg);
+
+ return 0;
+}
+
void pim_scan_individual_oil(struct channel_oil *c_oil, int in_vif_index)
{
struct in_addr vif_source;
zclient->interface_address_delete = pim_zebra_if_address_del;
zclient->interface_vrf_update = pim_zebra_interface_vrf_update;
zclient->nexthop_update = pim_parse_nexthop_update;
+ zclient->vxlan_sg_add = pim_zebra_vxlan_sg_proc;
+ zclient->vxlan_sg_del = pim_zebra_vxlan_sg_proc;
zclient_init(zclient, ZEBRA_ROUTE_PIM, 0, &pimd_privs);
if (PIM_DEBUG_PIM_TRACE) {
#define PIM_MASK_PIM_NHT_DETAIL (1 << 23)
#define PIM_MASK_PIM_NHT_RP (1 << 24)
#define PIM_MASK_MTRACE (1 << 25)
+#define PIM_MASK_VXLAN (1 << 26)
/* Remember 32 bits!!! */
/* PIM error codes */
#define PIM_DEBUG_PIM_NHT_DETAIL (router->debugs & PIM_MASK_PIM_NHT_DETAIL)
#define PIM_DEBUG_PIM_NHT_RP (router->debugs & PIM_MASK_PIM_NHT_RP)
#define PIM_DEBUG_MTRACE (router->debugs & PIM_MASK_MTRACE)
+#define PIM_DEBUG_VXLAN (router->debugs & PIM_MASK_VXLAN)
#define PIM_DEBUG_EVENTS \
(router->debugs \
#define PIM_DO_DEBUG_PIM_NHT (router->debugs |= PIM_MASK_PIM_NHT)
#define PIM_DO_DEBUG_PIM_NHT_RP (router->debugs |= PIM_MASK_PIM_NHT_RP)
#define PIM_DO_DEBUG_MTRACE (router->debugs |= PIM_MASK_MTRACE)
+#define PIM_DO_DEBUG_VXLAN (router->debugs |= PIM_MASK_VXLAN)
#define PIM_DONT_DEBUG_PIM_EVENTS (router->debugs &= ~PIM_MASK_PIM_EVENTS)
#define PIM_DONT_DEBUG_PIM_PACKETS (router->debugs &= ~PIM_MASK_PIM_PACKETS)
#define PIM_DONT_DEBUG_PIM_NHT (router->debugs &= ~PIM_MASK_PIM_NHT)
#define PIM_DONT_DEBUG_PIM_NHT_RP (router->debugs &= ~PIM_MASK_PIM_NHT_RP)
#define PIM_DONT_DEBUG_MTRACE (router->debugs &= ~PIM_MASK_MTRACE)
+#define PIM_DONT_DEBUG_VXLAN (router->debugs &= ~PIM_MASK_VXLAN)
void pim_router_init(void);
void pim_router_terminate(void);
pimd/pim_vty.c \
pimd/pim_zebra.c \
pimd/pim_zlookup.c \
+ pimd/pim_vxlan.c \
pimd/pimd.c \
# end
pimd/pim_vty.h \
pimd/pim_zebra.h \
pimd/pim_zlookup.h \
+ pimd/pim_vxlan.h \
+ pimd/pim_vxlan_instance.h \
pimd/pimd.h \
pimd/mtracebis_netlink.h \
pimd/mtracebis_routeget.h \
+++ /dev/null
-# New ports collection makefile for: zebra
-# Version required: 2.1.5
-# Date created: 28 Feb 1998
-# Whom: seirios@matrix.iri.co.jp
-#
-
-#DISTNAME= zebra-980224
-DISTNAME= zebra-current
-PKGNAME= zebra
-CATEGORIES= net
-MASTER_SITES= ftp://ftp.zebra.org/pub/zebra/
-
-MAINTAINER= seirios@matrix.iri.co.jp
-
-WRKSRC= ${WRKDIR}/zebra-current
-
-#### Under constructing, We cannot support md5
-NO_CHECKSUM= yes
-
-do-build:
- @(cd ${WRKSRC}; sh ./configure; make)
-
-post-install:
- @if [ ! -f ${PREFIX}/etc/rc.d/zebra.sh ]; then \
- echo "Installing ${PREFIX}/etc/rc.d/zebra.sh startup file."; \
- echo "#!/bin/sh" > ${PREFIX}/etc/rc.d/zebra.sh; \
- echo "# zebra" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo "if [ -x /usr/local/sbin/zebra -a ! -f /var/run/zebra.pid -a -f /usr/local/etc/zebra.conf ]; then" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo " /usr/local/sbin/zebra -d -f /usr/local/etc/zebra.conf" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo " echo -n ' zebra'" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo "fi" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo "# bgpd" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo "if [ -x /usr/local/sbin/bgpd -a ! -f /var/run/bgpd.pid -a -f /usr/local/etc/bgpd.conf ]; then" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo " /usr/local/sbin/bgpd -d -f /usr/local/etc/bgpd.conf" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo " echo -n ' bgpd'" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo "fi" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo "# ripd" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo "if [ -x /usr/local/sbin/ripd -a ! -f /var/run/ripd.pid -a -f /usr/local/etc/ripd.conf ]; then" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo " /usr/local/sbin/ripd -d -f /usr/local/etc/ripd.conf" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo " echo -n ' ripd'" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo "fi" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo "# ripngd" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo "if [ -x /usr/local/sbin/ripngd -a ! -f /var/run/ripd.pid -a -f /usr/local/etc/ripd.conf ]; then" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo " /usr/local/sbin/ripngd -d -f /usr/local/etc/ripd.conf" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo " echo -n ' ripngd'" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- echo "fi" >> ${PREFIX}/etc/rc.d/zebra.sh; \
- chmod 751 ${PREFIX}/etc/rc.d/zebra.sh; \
- fi
- @echo "If you will access zebra,bgpd,ripd,ripngd with telnet,";
- @echo "then you add some line (written under this line) to /etc/services";
- @echo " zebrasrv 2600/tcp # zebra service";
- @echo " zebra 2601/tcp # zebra vty";
- @echo " ripd 2602/tcp # RIPd vty";
- @echo " ripngd 2603/tcp # RIPngd vty";
- @echo " ospfd 2604/tcp # OSPFd vty";
- @echo " bgpd 2605/tcp # BGPd vty";
- @echo " pimd 2611/tcp # PIMd vty";
-
-.include <bsd.port.mk>
+++ /dev/null
-This directory contain files for making FreeBSD package.
+++ /dev/null
-MD5 (zebra-980224.tar.gz) = c6887645741200c43341156c168c7034
+++ /dev/null
-Zebra Routing protocol daemon
+++ /dev/null
-=============
-WHAT IS ZEBRA
-=============
- Zebra is a free software that manages TCP/IP based routing protocol.
- It takes multi-server and multi-thread approach to resolve the current
-complexity of the Internet.
-
- Currently zebra is still under development, so If you want to use zebra,
-I strongly recommend you to get the latest version of zebra.
- Zebra snapshot is released on every monday.
-
-===================
-SUPPORTED Protocols
-===================
- Zebra supports both IPv4 and IPv6 :-)
- For supporting IPv4 Routing protocols is here
- RIP (both version1 and version2)
- RIPv2 supports both Multicast and Broadcast
- BGP (only support BGP4)
-
- For supporting IPv6 Routing protocols is here
- RIPng
- BGP4+
-
-===================
-Supported plat-home
-===================
- Now zebra is testing on
- o FreeBSD 2.2.8
- -- without IPv6 ;-)
- -- with KAME
- -- with INRIA IPv6 protocol stack.
-
- o GNU/Linux 2.2.2
- o GNU/Linux 2.0.36
-
-===========
-ZEBRA Ports
-===========
- Each daemon has each own terminal interface. Also zebra has communication
-port which provides several services to other daemons. Below is zebra ports
-list.
-
-zebrasrv 2600/tcp # zebra service
-zebra 2601/tcp # zebra vty
-ripd 2602/tcp # RIPd vty
-ripngd 2603/tcp # RIPngd vty
-ospfd 2604/tcp # OSPFd vty
-bgpd 2605/tcp # BGPd vty
-pimd 2611/tcp # PIMd vty
-
-I recommend you to add upper list to /etc/services.
-
-====================
-For More Information
-====================
- Web page is located at:
- http://www.zebra.org/
-
- Alpha version source file can be found at:
- ftp://ftp.zebra.org/pub/zebra/
-
- Mailing List is here
- zebra@zebra.org
- zebra-jp@zebra.org
-
- If you want to join zebra mailing list, mail to
- majordomo@zebra.org
- and you write
- subscribe zebra
- -- if you want to talk with English
- subscribe zebra-jp
- -- if you want to talk with Japanese
- on Mail BODY (Not Subject).
-
-Enjoy.
+++ /dev/null
-sbin/zebra
-sbin/bgpd
-sbin/ripd
-etc/bgpd.conf.sample
-etc/ripd.conf.sample
-etc/zebra.conf.sample
-etc/rc.d/zebra.sh
-info/zebra.info
socket = -1;
rip = rip_create(vrf_name, vrf, socket);
- yang_dnode_set_entry(dnode, rip);
+ nb_running_set_entry(dnode, rip);
break;
}
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_unset_entry(dnode);
rip_clean(rip);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
rip->ecmp = yang_dnode_get_bool(dnode, NULL);
if (!rip->ecmp)
rip_ecmp_disable(rip);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
default_information = yang_dnode_get_bool(dnode, NULL);
memset(&p, 0, sizeof(struct prefix_ipv4));
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
rip->default_metric = yang_dnode_get_uint8(dnode, NULL);
/* rip_update_default_metric (); */
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
rip->distance = yang_dnode_get_uint8(dnode, NULL);
return NB_OK;
apply_mask_ipv4(&prefix);
/* Get RIP distance node. */
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
rn = route_node_get(rip->distance_table, (struct prefix *)&prefix);
rn->info = rip_distance_new();
- yang_dnode_set_entry(dnode, rn);
+ nb_running_set_entry(dnode, rn);
return NB_OK;
}
if (event != NB_EV_APPLY)
return NB_OK;
- rn = yang_dnode_get_entry(dnode, true);
+ rn = nb_running_unset_entry(dnode);
rdistance = rn->info;
rip_distance_free(rdistance);
rn->info = NULL;
return NB_OK;
/* Set distance value. */
- rn = yang_dnode_get_entry(dnode, true);
+ rn = nb_running_get_entry(dnode, NULL, true);
distance = yang_dnode_get_uint8(dnode, NULL);
rdistance = rn->info;
rdistance->distance = distance;
acl_name = yang_dnode_get_string(dnode, NULL);
/* Set access-list */
- rn = yang_dnode_get_entry(dnode, true);
+ rn = nb_running_get_entry(dnode, NULL, true);
rdistance = rn->info;
if (rdistance->access_list)
free(rdistance->access_list);
return NB_OK;
/* Reset access-list configuration. */
- rn = yang_dnode_get_entry(dnode, true);
+ rn = nb_running_get_entry(dnode, NULL, true);
rdistance = rn->info;
free(rdistance->access_list);
rdistance->access_list = NULL;
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
p.family = AF_INET;
p.prefixlen = IPV4_MAX_BITLEN;
yang_dnode_get_ipv4(&p.prefix, dnode, NULL);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
p.family = AF_INET;
p.prefixlen = IPV4_MAX_BITLEN;
yang_dnode_get_ipv4(&p.prefix, dnode, NULL);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
yang_dnode_get_ipv4p(&p, dnode, NULL);
apply_mask_ipv4((struct prefix_ipv4 *)&p);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
yang_dnode_get_ipv4p(&p, dnode, NULL);
apply_mask_ipv4((struct prefix_ipv4 *)&p);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
ifname = yang_dnode_get_string(dnode, NULL);
return rip_enable_if_add(rip, ifname);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
ifname = yang_dnode_get_string(dnode, NULL);
return rip_enable_if_delete(rip, ifname);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
ifname = yang_dnode_get_string(dnode, "./interface");
offset = rip_offset_list_new(rip, ifname);
- yang_dnode_set_entry(dnode, offset);
+ nb_running_set_entry(dnode, offset);
return NB_OK;
}
direct = yang_dnode_get_enum(dnode, "./direction");
- offset = yang_dnode_get_entry(dnode, true);
+ offset = nb_running_unset_entry(dnode);
if (offset->direct[direct].alist_name) {
free(offset->direct[direct].alist_name);
offset->direct[direct].alist_name = NULL;
direct = yang_dnode_get_enum(dnode, "../direction");
alist_name = yang_dnode_get_string(dnode, NULL);
- offset = yang_dnode_get_entry(dnode, true);
+ offset = nb_running_get_entry(dnode, NULL, true);
if (offset->direct[direct].alist_name)
free(offset->direct[direct].alist_name);
offset->direct[direct].alist_name = strdup(alist_name);
direct = yang_dnode_get_enum(dnode, "../direction");
metric = yang_dnode_get_uint8(dnode, NULL);
- offset = yang_dnode_get_entry(dnode, true);
+ offset = nb_running_get_entry(dnode, NULL, true);
offset->direct[direct].metric = metric;
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
rip->passive_default = yang_dnode_get_bool(dnode, NULL);
rip_passive_nondefault_clean(rip);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
ifname = yang_dnode_get_string(dnode, NULL);
return rip_passive_nondefault_set(rip, ifname);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
ifname = yang_dnode_get_string(dnode, NULL);
return rip_passive_nondefault_unset(rip, ifname);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
ifname = yang_dnode_get_string(dnode, NULL);
return rip_passive_nondefault_unset(rip, ifname);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
ifname = yang_dnode_get_string(dnode, NULL);
return rip_passive_nondefault_set(rip, ifname);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
type = yang_dnode_get_enum(dnode, "./protocol");
rip->redist[type].enabled = true;
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
type = yang_dnode_get_enum(dnode, "./protocol");
rip->redist[type].enabled = false;
struct rip *rip;
int type;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
type = yang_dnode_get_enum(dnode, "./protocol");
if (rip->enabled)
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
type = yang_dnode_get_enum(dnode, "../protocol");
rmap_name = yang_dnode_get_string(dnode, NULL);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
type = yang_dnode_get_enum(dnode, "../protocol");
free(rip->redist[type].route_map.name);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
type = yang_dnode_get_enum(dnode, "../protocol");
metric = yang_dnode_get_uint8(dnode, NULL);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
type = yang_dnode_get_enum(dnode, "../protocol");
rip->redist[type].metric_config = false;
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
yang_dnode_get_ipv4p(&p, dnode, NULL);
apply_mask_ipv4(&p);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
yang_dnode_get_ipv4p(&p, dnode, NULL);
apply_mask_ipv4(&p);
{
struct rip *rip;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
/* Reset update timer thread. */
rip_event(rip, RIP_UPDATE_EVENT, 0);
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
rip->garbage_time = yang_dnode_get_uint32(dnode, NULL);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
rip->timeout_time = yang_dnode_get_uint32(dnode, NULL);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
rip->update_time = yang_dnode_get_uint32(dnode, NULL);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
rip->version_recv = yang_dnode_get_enum(dnode, NULL);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- rip = yang_dnode_get_entry(dnode, true);
+ rip = nb_running_get_entry(dnode, NULL, true);
rip->version_send = yang_dnode_get_enum(dnode, NULL);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- ifp = yang_dnode_get_entry(dnode, true);
+ ifp = nb_running_get_entry(dnode, NULL, true);
ri = ifp->info;
ri->split_horizon = yang_dnode_get_enum(dnode, NULL);
if (event != NB_EV_APPLY)
return NB_OK;
- ifp = yang_dnode_get_entry(dnode, true);
+ ifp = nb_running_get_entry(dnode, NULL, true);
ri = ifp->info;
ri->v2_broadcast = yang_dnode_get_bool(dnode, NULL);
if (event != NB_EV_APPLY)
return NB_OK;
- ifp = yang_dnode_get_entry(dnode, true);
+ ifp = nb_running_get_entry(dnode, NULL, true);
ri = ifp->info;
ri->ri_receive = yang_dnode_get_enum(dnode, NULL);
if (event != NB_EV_APPLY)
return NB_OK;
- ifp = yang_dnode_get_entry(dnode, true);
+ ifp = nb_running_get_entry(dnode, NULL, true);
ri = ifp->info;
ri->ri_send = yang_dnode_get_enum(dnode, NULL);
if (event != NB_EV_APPLY)
return NB_OK;
- ifp = yang_dnode_get_entry(dnode, true);
+ ifp = nb_running_get_entry(dnode, NULL, true);
ri = ifp->info;
ri->auth_type = yang_dnode_get_enum(dnode, NULL);
if (event != NB_EV_APPLY)
return NB_OK;
- ifp = yang_dnode_get_entry(dnode, true);
+ ifp = nb_running_get_entry(dnode, NULL, true);
ri = ifp->info;
ri->md5_auth_len = yang_dnode_get_enum(dnode, NULL);
if (event != NB_EV_APPLY)
return NB_OK;
- ifp = yang_dnode_get_entry(dnode, true);
+ ifp = nb_running_get_entry(dnode, NULL, true);
ri = ifp->info;
ri->md5_auth_len = yang_get_default_enum(
"%s/authentication-scheme/md5-auth-length", RIP_IFACE);
if (event != NB_EV_APPLY)
return NB_OK;
- ifp = yang_dnode_get_entry(dnode, true);
+ ifp = nb_running_get_entry(dnode, NULL, true);
ri = ifp->info;
XFREE(MTYPE_RIP_INTERFACE_STRING, ri->auth_str);
ri->auth_str = XSTRDUP(MTYPE_RIP_INTERFACE_STRING,
if (event != NB_EV_APPLY)
return NB_OK;
- ifp = yang_dnode_get_entry(dnode, true);
+ ifp = nb_running_get_entry(dnode, NULL, true);
ri = ifp->info;
XFREE(MTYPE_RIP_INTERFACE_STRING, ri->auth_str);
if (event != NB_EV_APPLY)
return NB_OK;
- ifp = yang_dnode_get_entry(dnode, true);
+ ifp = nb_running_get_entry(dnode, NULL, true);
ri = ifp->info;
XFREE(MTYPE_RIP_INTERFACE_STRING, ri->key_chain);
ri->key_chain = XSTRDUP(MTYPE_RIP_INTERFACE_STRING,
if (event != NB_EV_APPLY)
return NB_OK;
- ifp = yang_dnode_get_entry(dnode, true);
+ ifp = nb_running_get_entry(dnode, NULL, true);
ri = ifp->info;
XFREE(MTYPE_RIP_INTERFACE_STRING, ri->key_chain);
void offset_list_del(struct rip_offset_list *offset)
{
listnode_delete(offset->rip->offset_list_master, offset);
+ offset_list_free(offset);
+}
+
+void offset_list_free(struct rip_offset_list *offset)
+{
if (OFFSET_LIST_IN_NAME(offset))
free(OFFSET_LIST_IN_NAME(offset));
if (OFFSET_LIST_OUT_NAME(offset))
rip->passive_nondefault = vector_init(1);
rip->offset_list_master = list_new();
rip->offset_list_master->cmp = (int (*)(void *, void *))offset_list_cmp;
- rip->offset_list_master->del = (void (*)(void *))offset_list_del;
+ rip->offset_list_master->del = (void (*)(void *))offset_list_free;
/* Distribute list install. */
rip->distribute_ctx = distribute_list_ctx_create(vrf);
extern struct rip_offset_list *rip_offset_list_new(struct rip *rip,
const char *ifname);
extern void offset_list_del(struct rip_offset_list *offset);
+extern void offset_list_free(struct rip_offset_list *offset);
extern struct rip_offset_list *rip_offset_list_lookup(struct rip *rip,
const char *ifname);
extern int rip_offset_list_apply_in(struct prefix_ipv4 *, struct interface *,
socket = -1;
ripng = ripng_create(vrf_name, vrf, socket);
- yang_dnode_set_entry(dnode, ripng);
+ nb_running_set_entry(dnode, ripng);
break;
}
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_unset_entry(dnode);
ripng_clean(ripng);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
ripng->ecmp = yang_dnode_get_bool(dnode, NULL);
if (!ripng->ecmp)
ripng_ecmp_disable(ripng);
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
default_information = yang_dnode_get_bool(dnode, NULL);
str2prefix_ipv6("::/0", &p);
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
ripng->default_metric = yang_dnode_get_uint8(dnode, NULL);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
yang_dnode_get_ipv6p(&p, dnode, NULL);
apply_mask_ipv6((struct prefix_ipv6 *)&p);
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
yang_dnode_get_ipv6p(&p, dnode, NULL);
apply_mask_ipv6((struct prefix_ipv6 *)&p);
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
ifname = yang_dnode_get_string(dnode, NULL);
return ripng_enable_if_add(ripng, ifname);
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
ifname = yang_dnode_get_string(dnode, NULL);
return ripng_enable_if_delete(ripng, ifname);
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
ifname = yang_dnode_get_string(dnode, "./interface");
offset = ripng_offset_list_new(ripng, ifname);
- yang_dnode_set_entry(dnode, offset);
+ nb_running_set_entry(dnode, offset);
return NB_OK;
}
direct = yang_dnode_get_enum(dnode, "./direction");
- offset = yang_dnode_get_entry(dnode, true);
+ offset = nb_running_unset_entry(dnode);
if (offset->direct[direct].alist_name) {
free(offset->direct[direct].alist_name);
offset->direct[direct].alist_name = NULL;
direct = yang_dnode_get_enum(dnode, "../direction");
alist_name = yang_dnode_get_string(dnode, NULL);
- offset = yang_dnode_get_entry(dnode, true);
+ offset = nb_running_get_entry(dnode, NULL, true);
if (offset->direct[direct].alist_name)
free(offset->direct[direct].alist_name);
offset->direct[direct].alist_name = strdup(alist_name);
direct = yang_dnode_get_enum(dnode, "../direction");
metric = yang_dnode_get_uint8(dnode, NULL);
- offset = yang_dnode_get_entry(dnode, true);
+ offset = nb_running_get_entry(dnode, NULL, true);
offset->direct[direct].metric = metric;
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
ifname = yang_dnode_get_string(dnode, NULL);
return ripng_passive_interface_set(ripng, ifname);
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
ifname = yang_dnode_get_string(dnode, NULL);
return ripng_passive_interface_unset(ripng, ifname);
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
type = yang_dnode_get_enum(dnode, "./protocol");
ripng->redist[type].enabled = true;
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
type = yang_dnode_get_enum(dnode, "./protocol");
ripng->redist[type].enabled = false;
struct ripng *ripng;
int type;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
type = yang_dnode_get_enum(dnode, "./protocol");
if (ripng->enabled)
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
type = yang_dnode_get_enum(dnode, "../protocol");
rmap_name = yang_dnode_get_string(dnode, NULL);
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
type = yang_dnode_get_enum(dnode, "../protocol");
free(ripng->redist[type].route_map.name);
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
type = yang_dnode_get_enum(dnode, "../protocol");
metric = yang_dnode_get_uint8(dnode, NULL);
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
type = yang_dnode_get_enum(dnode, "../protocol");
ripng->redist[type].metric_config = false;
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
yang_dnode_get_ipv6p(&p, dnode, NULL);
apply_mask_ipv6(&p);
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
yang_dnode_get_ipv6p(&p, dnode, NULL);
apply_mask_ipv6(&p);
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
yang_dnode_get_ipv6p(&p, dnode, NULL);
apply_mask_ipv6(&p);
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
yang_dnode_get_ipv6p(&p, dnode, NULL);
apply_mask_ipv6(&p);
{
struct ripng *ripng;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
/* Reset update timer thread. */
ripng_event(ripng, RIPNG_UPDATE_EVENT, 0);
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
ripng->garbage_time = yang_dnode_get_uint16(dnode, NULL);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
ripng->timeout_time = yang_dnode_get_uint16(dnode, NULL);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- ripng = yang_dnode_get_entry(dnode, true);
+ ripng = nb_running_get_entry(dnode, NULL, true);
ripng->update_time = yang_dnode_get_uint16(dnode, NULL);
return NB_OK;
if (event != NB_EV_APPLY)
return NB_OK;
- ifp = yang_dnode_get_entry(dnode, true);
+ ifp = nb_running_get_entry(dnode, NULL, true);
ri = ifp->info;
ri->split_horizon = yang_dnode_get_enum(dnode, NULL);
void ripng_offset_list_del(struct ripng_offset_list *offset)
{
listnode_delete(offset->ripng->offset_list_master, offset);
+ ripng_offset_list_free(offset);
+}
+
+void ripng_offset_list_free(struct ripng_offset_list *offset)
+{
if (OFFSET_LIST_IN_NAME(offset))
free(OFFSET_LIST_IN_NAME(offset));
if (OFFSET_LIST_OUT_NAME(offset))
ripng->offset_list_master->cmp =
(int (*)(void *, void *))offset_list_cmp;
ripng->offset_list_master->del =
- (void (*)(void *))ripng_offset_list_del;
+ (void (*)(void *))ripng_offset_list_free;
ripng->distribute_ctx = distribute_list_ctx_create(vrf);
distribute_list_add_hook(ripng->distribute_ctx,
ripng_distribute_update);
extern struct ripng_offset_list *ripng_offset_list_new(struct ripng *ripng,
const char *ifname);
extern void ripng_offset_list_del(struct ripng_offset_list *offset);
+extern void ripng_offset_list_free(struct ripng_offset_list *offset);
extern struct ripng_offset_list *ripng_offset_list_lookup(struct ripng *ripng,
const char *ifname);
extern int ripng_offset_list_apply_in(struct ripng *ripng,
from lutil import luCommand
luCommand('ce1','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
-luCommand('ce2','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up')
-luCommand('ce3','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up')
+luCommand('ce2','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
+luCommand('ce3','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
luCommand('ce4','vtysh -c "show bgp vrf all summary"',' 00:0','wait','Adjacencies up',180)
luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
--- /dev/null
+router bgp 65000
+ neighbor 192.168.255.2 remote-as 65001
+ address-family ipv4 unicast
+ redistribute connected
--- /dev/null
+!
+interface lo
+ ip address 172.16.255.254/32
+!
+interface r1-eth0
+ ip address 192.168.255.1/24
+!
+ip forwarding
+!
--- /dev/null
+router bgp 65001
+ neighbor 192.168.255.1 remote-as 65000
+ address-family ipv4
+ neighbor 192.168.255.1 maximum-prefix 1
--- /dev/null
+!
+interface r2-eth0
+ ip address 192.168.255.2/24
+!
+ip forwarding
+!
--- /dev/null
+#!/usr/bin/env python
+
+#
+# bgp_local_as_private_remove.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2019 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+bgp_maximum_prefix_invalid_update.py:
+Test if unnecesarry UPDATE message like below:
+
+[Error] Error parsing NLRI
+%NOTIFICATION: sent to neighbor X.X.X.X 3/10 (UPDATE Message Error/Invalid Network Field) 0 bytes
+
+is not sent if maximum-prefix count is overflow.
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from mininet.topo import Topo
+
+class TemplateTopo(Topo):
+ def build(self, *_args, **_opts):
+ tgen = get_topogen(self)
+
+ for routern in range(1, 3):
+ tgen.add_router('r{}'.format(routern))
+
+ switch = tgen.add_switch('s1')
+ switch.add_link(tgen.gears['r1'])
+ switch.add_link(tgen.gears['r2'])
+
+def setup_module(mod):
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.iteritems(), 1):
+ router.load_config(
+ TopoRouter.RD_ZEBRA,
+ os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP,
+ os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ )
+
+ tgen.start_router()
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+def test_bgp_maximum_prefix_invalid():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ def _bgp_converge(router):
+ while True:
+ output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
+ if output['192.168.255.1']['connectionsEstablished'] > 3:
+ return True
+ time.sleep(1)
+
+ def _bgp_parsing_nlri(router):
+ cmd_max_exceeded = 'grep "%MAXPFXEXCEED: No. of IPv4 Unicast prefix received" bgpd.log'
+ cmdt_error_parsing_nlri = 'grep "Error parsing NLRI" bgpd.log'
+ output_max_exceeded = tgen.gears[router].run(cmd_max_exceeded)
+ output_error_parsing_nlri = tgen.gears[router].run(cmdt_error_parsing_nlri)
+
+ if len(output_max_exceeded) > 0:
+ if len(output_error_parsing_nlri) > 0:
+ return False
+ return True
+
+
+ if _bgp_converge('r2'):
+ assert _bgp_parsing_nlri('r2') == True
+
+if __name__ == '__main__':
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
lines_to_add_to_del.append((ctx_keys, swpx_interface))
lines_to_add_to_del.append((tmp_ctx_keys, swpx_peergroup))
+ '''
+ Changing the bfd timers on neighbors is allowed without doing
+ a delete/add process. Since doing a "no neighbor blah bfd ..."
+ will cause the peer to bounce unnecessarily, just skip the delete
+ and just do the add.
+ '''
+ re_nbr_bfd_timers = re.search(r'neighbor (\S+) bfd (\S+) (\S+) (\S+)', line)
+
+ if re_nbr_bfd_timers:
+ nbr = re_nbr_bfd_timers.group(1)
+ bfd_nbr = "neighbor %s" % nbr
+
+ for (ctx_keys, add_line) in lines_to_add:
+ re_add_nbr_bfd_timers = re.search(r'neighbor (\S+) bfd (\S+) (\S+) (\S+)', add_line)
+
+ if re_add_nbr_bfd_timers:
+ found_add_bfd_nbr = line_exist(lines_to_add, ctx_keys, bfd_nbr, False)
+
+ if found_add_bfd_nbr:
+ lines_to_del_to_del.append((ctx_keys, line))
+
'''
We changed how we display the neighbor interface command. Older
versions of frr would display the following:
return vtysh_exit_vrf(self, vty, argc, argv);
}
-DEFUNSH(VTYSH_PBRD, vtysh_exit_nexthop_group, vtysh_exit_nexthop_group_cmd,
+DEFUNSH(VTYSH_PBRD | VTYSH_SHARPD, vtysh_exit_nexthop_group, vtysh_exit_nexthop_group_cmd,
"exit", "Exit current mode and down to previous mode\n")
{
return vtysh_exit(vty);
description
"Log changes to the IS-IS adjacencies in this area.";
}
- }
- container mpls-te {
- presence "Present if MPLS-TE is enabled.";
- description
- "Enable MPLS-TE functionality.";
- leaf router-address {
- type inet:ipv4-address;
+ container mpls-te {
+ presence "Present if MPLS-TE is enabled.";
description
- "Stable IP address of the advertising router.";
+ "Enable MPLS-TE functionality.";
+ leaf router-address {
+ type inet:ipv4-address;
+ description
+ "Stable IP address of the advertising router.";
+ }
}
}
}
vxl_info->vtep_ip = vtep_ip_in_msg;
}
+ if (attr[IFLA_VXLAN_GROUP]) {
+ vxl_info->mcast_grp =
+ *(struct in_addr *)RTA_DATA(attr[IFLA_VXLAN_GROUP]);
+ }
+
return 0;
}
}
/* Interface address modification. */
-static int netlink_address(int cmd, int family, struct interface *ifp,
- struct connected *ifc)
+static int netlink_address_ctx(const struct zebra_dplane_ctx *ctx)
{
int bytelen;
- struct prefix *p;
+ const struct prefix *p;
+ int cmd;
+ const char *label;
struct {
struct nlmsghdr n;
char buf[NL_PKT_BUF_SIZE];
} req;
- struct zebra_ns *zns;
-
- if (vrf_is_backend_netns())
- zns = zebra_ns_lookup((ns_id_t)ifp->vrf_id);
- else
- zns = zebra_ns_lookup(NS_DEFAULT);
- p = ifc->address;
- memset(&req, 0, sizeof req - NL_PKT_BUF_SIZE);
+ p = dplane_ctx_get_intf_addr(ctx);
+ memset(&req, 0, sizeof(req) - NL_PKT_BUF_SIZE);
- bytelen = (family == AF_INET ? 4 : 16);
+ bytelen = (p->family == AF_INET ? 4 : 16);
req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifaddrmsg));
req.n.nlmsg_flags = NLM_F_REQUEST;
- req.n.nlmsg_type = cmd;
- req.n.nlmsg_pid = zns->netlink_cmd.snl.nl_pid;
- req.ifa.ifa_family = family;
+ if (dplane_ctx_get_op(ctx) == DPLANE_OP_ADDR_INSTALL)
+ cmd = RTM_NEWADDR;
+ else
+ cmd = RTM_DELADDR;
+
+ req.n.nlmsg_type = cmd;
+ req.ifa.ifa_family = p->family;
- req.ifa.ifa_index = ifp->ifindex;
+ req.ifa.ifa_index = dplane_ctx_get_ifindex(ctx);
- addattr_l(&req.n, sizeof req, IFA_LOCAL, &p->u.prefix, bytelen);
+ addattr_l(&req.n, sizeof(req), IFA_LOCAL, &p->u.prefix, bytelen);
- if (family == AF_INET) {
- if (CONNECTED_PEER(ifc)) {
- p = ifc->destination;
- addattr_l(&req.n, sizeof req, IFA_ADDRESS, &p->u.prefix,
- bytelen);
- } else if (cmd == RTM_NEWADDR && ifc->destination) {
- p = ifc->destination;
- addattr_l(&req.n, sizeof req, IFA_BROADCAST,
+ if (p->family == AF_INET) {
+ if (dplane_ctx_intf_is_connected(ctx)) {
+ p = dplane_ctx_get_intf_dest(ctx);
+ addattr_l(&req.n, sizeof(req), IFA_ADDRESS,
+ &p->u.prefix, bytelen);
+ } else if (cmd == RTM_NEWADDR &&
+ dplane_ctx_intf_has_dest(ctx)) {
+ p = dplane_ctx_get_intf_dest(ctx);
+ addattr_l(&req.n, sizeof(req), IFA_BROADCAST,
&p->u.prefix, bytelen);
}
}
- /* p is now either ifc->address or ifc->destination */
+ /* p is now either address or destination/bcast addr */
req.ifa.ifa_prefixlen = p->prefixlen;
- if (CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY))
+ if (dplane_ctx_intf_is_secondary(ctx))
SET_FLAG(req.ifa.ifa_flags, IFA_F_SECONDARY);
- if (ifc->label)
- addattr_l(&req.n, sizeof req, IFA_LABEL, ifc->label,
- strlen(ifc->label) + 1);
-
- return netlink_talk(netlink_talk_filter, &req.n, &zns->netlink_cmd, zns,
- 0);
-}
-
-int kernel_address_add_ipv4(struct interface *ifp, struct connected *ifc)
-{
- return netlink_address(RTM_NEWADDR, AF_INET, ifp, ifc);
-}
-
-int kernel_address_delete_ipv4(struct interface *ifp, struct connected *ifc)
-{
- return netlink_address(RTM_DELADDR, AF_INET, ifp, ifc);
-}
+ if (dplane_ctx_intf_has_label(ctx)) {
+ label = dplane_ctx_get_intf_label(ctx);
+ addattr_l(&req.n, sizeof(req), IFA_LABEL, label,
+ strlen(label) + 1);
+ }
-int kernel_address_add_ipv6(struct interface *ifp, struct connected *ifc)
-{
- return netlink_address(RTM_NEWADDR, AF_INET6, ifp, ifc);
+ return netlink_talk_info(netlink_talk_filter, &req.n,
+ dplane_ctx_get_ns(ctx), 0);
}
-int kernel_address_delete_ipv6(struct interface *ifp, struct connected *ifc)
+enum zebra_dplane_result kernel_address_update_ctx(struct zebra_dplane_ctx *ctx)
{
- return netlink_address(RTM_DELADDR, AF_INET6, ifp, ifc);
+ return (netlink_address_ctx(ctx) == 0 ?
+ ZEBRA_DPLANE_REQUEST_SUCCESS : ZEBRA_DPLANE_REQUEST_FAILURE);
}
int netlink_interface_addr(struct nlmsghdr *h, ns_id_t ns_id, int startup)
struct listnode *node, *nnode;
struct connected *ifc;
struct prefix *p;
- int ret;
+ enum zebra_dplane_result dplane_res;
for (ALL_LIST_ELEMENTS(ifp->connected, node, nnode, ifc)) {
p = ifc->address;
if_refresh(ifp);
}
- ret = if_set_prefix(ifp, ifc);
- if (ret < 0) {
+ dplane_res = dplane_intf_addr_set(ifp, ifc);
+ if (dplane_res ==
+ ZEBRA_DPLANE_REQUEST_FAILURE) {
flog_err_sys(
EC_ZEBRA_IFACE_ADDR_ADD_FAILED,
"Can't set interface's address: %s",
- safe_strerror(errno));
+ dplane_res2str(dplane_res));
continue;
}
if_refresh(ifp);
}
- ret = if_prefix_add_ipv6(ifp, ifc);
- if (ret < 0) {
+
+ dplane_res = dplane_intf_addr_set(ifp, ifc);
+ if (dplane_res ==
+ ZEBRA_DPLANE_REQUEST_FAILURE) {
flog_err_sys(
EC_ZEBRA_IFACE_ADDR_ADD_FAILED,
"Can't set interface's address: %s",
- safe_strerror(errno));
+ dplane_res2str(dplane_res));
continue;
}
vty_out(vty, " VTEP IP: %s",
inet_ntoa(vxlan_info->vtep_ip));
if (vxlan_info->access_vlan)
- vty_out(vty, " Access VLAN Id %u",
+ vty_out(vty, " Access VLAN Id %u\n",
vxlan_info->access_vlan);
+ if (vxlan_info->mcast_grp.s_addr != INADDR_ANY)
+ vty_out(vty, " Mcast Group %s",
+ inet_ntoa(vxlan_info->mcast_grp));
vty_out(vty, "\n");
}
struct connected *ifc;
struct prefix_ipv4 *p;
int ret;
+ enum zebra_dplane_result dplane_res;
if_data = ifp->info;
if_refresh(ifp);
}
- ret = if_set_prefix(ifp, ifc);
- if (ret < 0) {
+ dplane_res = dplane_intf_addr_set(ifp, ifc);
+ if (dplane_res == ZEBRA_DPLANE_REQUEST_FAILURE) {
vty_out(vty, "%% Can't set interface IP address: %s.\n",
- safe_strerror(errno));
+ dplane_res2str(dplane_res));
return CMD_WARNING_CONFIG_FAILED;
}
struct prefix_ipv4 lp, pp;
struct connected *ifc;
int ret;
+ enum zebra_dplane_result dplane_res;
/* Convert to prefix structure. */
ret = str2prefix_ipv4(addr_str, &lp);
}
/* This is real route. */
- ret = if_unset_prefix(ifp, ifc);
- if (ret < 0) {
+ dplane_res = dplane_intf_addr_unset(ifp, ifc);
+ if (dplane_res == ZEBRA_DPLANE_REQUEST_FAILURE) {
vty_out(vty, "%% Can't unset interface IP address: %s.\n",
- safe_strerror(errno));
+ dplane_res2str(dplane_res));
return CMD_WARNING_CONFIG_FAILED;
}
UNSET_FLAG(ifc->conf, ZEBRA_IFC_QUEUED);
struct connected *ifc;
struct prefix_ipv6 *p;
int ret;
+ enum zebra_dplane_result dplane_res;
if_data = ifp->info;
if_refresh(ifp);
}
- ret = if_prefix_add_ipv6(ifp, ifc);
-
- if (ret < 0) {
+ dplane_res = dplane_intf_addr_set(ifp, ifc);
+ if (dplane_res == ZEBRA_DPLANE_REQUEST_FAILURE) {
vty_out(vty, "%% Can't set interface IP address: %s.\n",
- safe_strerror(errno));
+ dplane_res2str(dplane_res));
return CMD_WARNING_CONFIG_FAILED;
}
struct prefix_ipv6 cp;
struct connected *ifc;
int ret;
+ enum zebra_dplane_result dplane_res;
/* Convert to prefix structure. */
ret = str2prefix_ipv6(addr_str, &cp);
}
/* This is real route. */
- ret = if_prefix_delete_ipv6(ifp, ifc);
- if (ret < 0) {
+ dplane_res = dplane_intf_addr_unset(ifp, ifc);
+ if (dplane_res == ZEBRA_DPLANE_REQUEST_FAILURE) {
vty_out(vty, "%% Can't unset interface IP address: %s.\n",
- safe_strerror(errno));
+ dplane_res2str(dplane_res));
return CMD_WARNING_CONFIG_FAILED;
}
#include "zebra/rt.h"
#include "zebra/interface.h"
#include "zebra/zebra_errors.h"
+#include "zebra/debug.h"
#ifndef SUNOS_5
#endif
}
-#ifdef HAVE_NETLINK
-/* Interface address setting via netlink interface. */
-int if_set_prefix(struct interface *ifp, struct connected *ifc)
-{
- return kernel_address_add_ipv4(ifp, ifc);
-}
+/*
+ * Handler for interface address programming via the zebra dplane,
+ * for non-netlink platforms. This handler dispatches to per-platform
+ * helpers, based on the operation requested.
+ */
+#ifndef HAVE_NETLINK
-/* Interface address is removed using netlink interface. */
-int if_unset_prefix(struct interface *ifp, struct connected *ifc)
+/* Prototypes: these are placed in this block so that they're only seen
+ * on non-netlink platforms.
+ */
+static int if_set_prefix_ctx(const struct zebra_dplane_ctx *ctx);
+static int if_unset_prefix_ctx(const struct zebra_dplane_ctx *ctx);
+static int if_set_prefix6_ctx(const struct zebra_dplane_ctx *ctx);
+static int if_unset_prefix6_ctx(const struct zebra_dplane_ctx *ctx);
+
+enum zebra_dplane_result kernel_address_update_ctx(
+ struct zebra_dplane_ctx *ctx)
{
- return kernel_address_delete_ipv4(ifp, ifc);
+ int ret = -1;
+ const struct prefix *p;
+
+ p = dplane_ctx_get_intf_addr(ctx);
+
+ if (dplane_ctx_get_op(ctx) == DPLANE_OP_ADDR_INSTALL) {
+ if (p->family == AF_INET)
+ ret = if_set_prefix_ctx(ctx);
+ else
+ ret = if_set_prefix6_ctx(ctx);
+ } else if (dplane_ctx_get_op(ctx) == DPLANE_OP_ADDR_UNINSTALL) {
+ if (p->family == AF_INET)
+ ret = if_unset_prefix_ctx(ctx);
+ else
+ ret = if_unset_prefix6_ctx(ctx);
+ } else {
+ if (IS_ZEBRA_DEBUG_DPLANE)
+ zlog_debug("Invalid op in interface-addr install");
+ }
+
+ return (ret == 0 ?
+ ZEBRA_DPLANE_REQUEST_SUCCESS : ZEBRA_DPLANE_REQUEST_FAILURE);
}
+
+#endif /* !HAVE_NETLINK */
+
+#ifdef HAVE_NETLINK
+
+/* TODO -- remove; no use of these apis with netlink any longer */
+
#else /* ! HAVE_NETLINK */
#ifdef HAVE_STRUCT_IFALIASREQ
-/* Set up interface's IP address, netmask (and broadcas? ). *BSD may
- has ifaliasreq structure. */
-int if_set_prefix(struct interface *ifp, struct connected *ifc)
+
+/*
+ * Helper for interface-addr install, non-netlink
+ */
+static int if_set_prefix_ctx(const struct zebra_dplane_ctx *ctx)
{
int ret;
struct ifaliasreq addreq;
struct sockaddr_in addr, mask, peer;
struct prefix_ipv4 *p;
- /* don't configure PtP addresses on broadcast ifs or reverse */
- if (!(ifp->flags & IFF_POINTOPOINT) != !CONNECTED_PEER(ifc)) {
- errno = EINVAL;
- return -1;
- }
+ p = (struct prefix_ipv4 *)dplane_ctx_get_intf_addr(ctx);
- p = (struct prefix_ipv4 *)ifc->address;
- rib_lookup_and_pushup(p, ifp->vrf_id);
-
- memset(&addreq, 0, sizeof addreq);
- strlcpy(addreq.ifra_name, ifp->name, sizeof(addreq.ifra_name));
+ memset(&addreq, 0, sizeof(addreq));
+ strncpy((char *)&addreq.ifra_name, dplane_ctx_get_ifname(ctx),
+ sizeof(addreq.ifra_name));
memset(&addr, 0, sizeof(struct sockaddr_in));
addr.sin_addr = p->prefix;
#endif
memcpy(&addreq.ifra_addr, &addr, sizeof(struct sockaddr_in));
- if (CONNECTED_PEER(ifc)) {
- p = (struct prefix_ipv4 *)ifc->destination;
+ if (dplane_ctx_intf_is_connected(ctx)) {
+ p = (struct prefix_ipv4 *)dplane_ctx_get_intf_dest(ctx);
memset(&mask, 0, sizeof(struct sockaddr_in));
peer.sin_addr = p->prefix;
peer.sin_family = p->family;
if (ret < 0)
return ret;
return 0;
+
}
-/* Set up interface's IP address, netmask (and broadcas? ). *BSD may
- has ifaliasreq structure. */
-int if_unset_prefix(struct interface *ifp, struct connected *ifc)
+/*
+ * Helper for interface-addr un-install, non-netlink
+ */
+static int if_unset_prefix_ctx(const struct zebra_dplane_ctx *ctx)
{
int ret;
struct ifaliasreq addreq;
struct sockaddr_in addr, mask, peer;
struct prefix_ipv4 *p;
- /* this would probably wreak havoc */
- if (!(ifp->flags & IFF_POINTOPOINT) != !CONNECTED_PEER(ifc)) {
- errno = EINVAL;
- return -1;
- }
+ p = (struct prefix_ipv4 *)dplane_ctx_get_intf_addr(ctx);
- p = (struct prefix_ipv4 *)ifc->address;
-
- memset(&addreq, 0, sizeof addreq);
- strlcpy(addreq.ifra_name, ifp->name, sizeof(addreq.ifra_name));
+ memset(&addreq, 0, sizeof(addreq));
+ strncpy((char *)&addreq.ifra_name, dplane_ctx_get_ifname(ctx),
+ sizeof(addreq.ifra_name));
memset(&addr, 0, sizeof(struct sockaddr_in));
addr.sin_addr = p->prefix;
#endif
memcpy(&addreq.ifra_addr, &addr, sizeof(struct sockaddr_in));
- if (CONNECTED_PEER(ifc)) {
- p = (struct prefix_ipv4 *)ifc->destination;
+ if (dplane_ctx_intf_is_connected(ctx)) {
+ p = (struct prefix_ipv4 *)dplane_ctx_get_intf_dest(ctx);
memset(&mask, 0, sizeof(struct sockaddr_in));
peer.sin_addr = p->prefix;
peer.sin_family = p->family;
#else
/* Set up interface's address, netmask (and broadcas? ). Linux or
Solaris uses ifname:number semantics to set IP address aliases. */
-int if_set_prefix(struct interface *ifp, struct connected *ifc)
+int if_set_prefix_ctx(const struct zebra_dplane_ctx *ctx)
{
int ret;
struct ifreq ifreq;
struct prefix_ipv4 ifaddr;
struct prefix_ipv4 *p;
- p = (struct prefix_ipv4 *)ifc->address;
+ p = (struct prefix_ipv4 *)dplane_ctx_get_intf_addr(ctx);
ifaddr = *p;
- ifreq_set_name(&ifreq, ifp);
+ strlcpy(ifreq.ifr_name, dplane_ctx_get_ifname(ctx),
+ sizeof(ifreq.ifr_name));
addr.sin_addr = p->prefix;
addr.sin_family = p->family;
/* We need mask for make broadcast addr. */
masklen2ip(p->prefixlen, &mask.sin_addr);
- if (if_is_broadcast(ifp)) {
+ if (dplane_ctx_intf_is_broadcast(ctx)) {
apply_mask_ipv4(&ifaddr);
addr.sin_addr = ifaddr.prefix;
#ifdef SUNOS_5
memcpy(&mask, &ifreq.ifr_addr, sizeof(mask));
#else
- memcpy(&ifreq.ifr_netmask, &mask, sizeof(struct sockaddr_in));
+ memcpy(&ifreq.ifr_addr, &mask, sizeof(struct sockaddr_in));
#endif /* SUNOS5 */
ret = if_ioctl(SIOCSIFNETMASK, (caddr_t)&ifreq);
if (ret < 0)
/* Set up interface's address, netmask (and broadcas? ). Linux or
Solaris uses ifname:number semantics to set IP address aliases. */
-int if_unset_prefix(struct interface *ifp, struct connected *ifc)
+int if_unset_prefix_ctx(const struct zebra_dplane_ctx *ctx)
{
int ret;
struct ifreq ifreq;
struct sockaddr_in addr;
struct prefix_ipv4 *p;
- p = (struct prefix_ipv4 *)ifc->address;
+ p = (struct prefix_ipv4 *)dplane_ctx_get_intf_addr(ctx);
- ifreq_set_name(&ifreq, ifp);
+ strlcpy(ifreq.ifr_name, dplane_ctx_get_ifname(ctx),
+ sizeof(ifreq.ifr_name));
memset(&addr, 0, sizeof(struct sockaddr_in));
addr.sin_family = p->family;
return 0;
}
-#ifdef LINUX_IPV6
-#ifndef _LINUX_IN6_H
-/* linux/include/net/ipv6.h */
-struct in6_ifreq {
- struct in6_addr ifr6_addr;
- uint32_t ifr6_prefixlen;
- int ifr6_ifindex;
-};
-#endif /* _LINUX_IN6_H */
-/* Interface's address add/delete functions. */
-int if_prefix_add_ipv6(struct interface *ifp, struct connected *ifc)
-{
-#ifdef HAVE_NETLINK
- return kernel_address_add_ipv6(ifp, ifc);
-#endif /* HAVE_NETLINK */
-}
+#ifndef LINUX_IPV6 /* Netlink has its own code */
-int if_prefix_delete_ipv6(struct interface *ifp, struct connected *ifc)
-{
-#ifdef HAVE_NETLINK
- return kernel_address_delete_ipv6(ifp, ifc);
-#endif /* HAVE_NETLINK */
-}
-#else /* LINUX_IPV6 */
#ifdef HAVE_STRUCT_IN6_ALIASREQ
#ifndef ND6_INFINITE_LIFETIME
#define ND6_INFINITE_LIFETIME 0xffffffffL
#endif /* ND6_INFINITE_LIFETIME */
-int if_prefix_add_ipv6(struct interface *ifp, struct connected *ifc)
+
+/*
+ * Helper for interface-addr install, non-netlink
+ */
+static int if_set_prefix6_ctx(const struct zebra_dplane_ctx *ctx)
{
int ret;
struct in6_aliasreq addreq;
struct sockaddr_in6 mask;
struct prefix_ipv6 *p;
- p = (struct prefix_ipv6 *)ifc->address;
+ p = (struct prefix_ipv6 *)dplane_ctx_get_intf_addr(ctx);
- memset(&addreq, 0, sizeof addreq);
- strlcpy(addreq.ifra_name, ifp->name, sizeof(addreq.ifra_name));
+ memset(&addreq, 0, sizeof(addreq));
+ strlcpy((char *)&addreq.ifra_name,
+ dplane_ctx_get_ifname(ctx), sizeof(addreq.ifra_name));
memset(&addr, 0, sizeof(struct sockaddr_in6));
addr.sin6_addr = p->prefix;
return 0;
}
-int if_prefix_delete_ipv6(struct interface *ifp, struct connected *ifc)
+/*
+ * Helper for interface-addr un-install, non-netlink
+ */
+static int if_unset_prefix6_ctx(const struct zebra_dplane_ctx *ctx)
{
int ret;
struct in6_aliasreq addreq;
struct sockaddr_in6 mask;
struct prefix_ipv6 *p;
- p = (struct prefix_ipv6 *)ifc->address;
+ p = (struct prefix_ipv6 *)dplane_ctx_get_intf_addr(ctx);
- memset(&addreq, 0, sizeof addreq);
- strlcpy(addreq.ifra_name, ifp->name, sizeof(addreq.ifra_name));
+ memset(&addreq, 0, sizeof(addreq));
+ strlcpy((char *)&addreq.ifra_name,
+ dplane_ctx_get_ifname(ctx), sizeof(addreq.ifra_name));
memset(&addr, 0, sizeof(struct sockaddr_in6));
addr.sin6_addr = p->prefix;
return 0;
}
#else
-int if_prefix_add_ipv6(struct interface *ifp, struct connected *ifc)
+/* The old, pre-dataplane code here just returned, so we're retaining that
+ * choice.
+ */
+static int if_set_prefix6_ctx(const struct zebra_dplane_ctx *ctx)
{
return 0;
}
-int if_prefix_delete_ipv6(struct interface *ifp, struct connected *ifc)
+static int if_unset_prefix6_ctx(const struct zebra_dplane_ctx *ctx)
{
return 0;
}
extern int if_unset_flags(struct interface *, uint64_t);
extern void if_get_flags(struct interface *);
-extern int if_set_prefix(struct interface *, struct connected *);
-extern int if_unset_prefix(struct interface *, struct connected *);
-
extern void if_get_metric(struct interface *);
extern void if_get_mtu(struct interface *);
-extern int if_prefix_add_ipv6(struct interface *, struct connected *);
-extern int if_prefix_delete_ipv6(struct interface *, struct connected *);
-
#ifdef SOLARIS_IPV6
extern int if_ioctl_ipv6(unsigned long, caddr_t);
extern struct connected *if_lookup_linklocal(struct interface *);
#include "zebra/interface.h"
#include "zebra/ioctl_solaris.h"
#include "zebra/zebra_errors.h"
+#include "zebra/debug.h"
extern struct zebra_privs_t zserv_privs;
+/* Prototypes */
+static int if_set_prefix_ctx(const struct zebra_dplane_ctx *ctx);
+static int if_unset_prefix_ctx(const struct zebra_dplane_ctx *ctx);
+static int if_set_prefix6_ctx(const struct zebra_dplane_ctx *ctx);
+static int if_unset_prefix6_ctx(const struct zebra_dplane_ctx *ctx);
+
/* clear and set interface name string */
void lifreq_set_name(struct lifreq *lifreq, const char *ifname)
{
zebra_interface_up_update(ifp);
}
+/*
+ *
+ */
+enum zebra_dplane_result kernel_address_update_ctx(
+ struct zebra_dplane_ctx *ctx)
+{
+ int ret = -1;
+ const struct prefix *p;
+
+ p = dplane_ctx_get_intf_addr(ctx);
+
+ if (dplane_ctx_get_op(ctx) == DPLANE_OP_ADDR_INSTALL) {
+ if (p->family == AF_INET)
+ ret = if_set_prefix_ctx(ctx);
+ else
+ ret = if_set_prefix6_ctx(ctx);
+ } else if (dplane_ctx_get_op(ctx) == DPLANE_OP_ADDR_UNINSTALL) {
+ if (p->family == AF_INET)
+ ret = if_unset_prefix_ctx(ctx);
+ else
+ ret = if_unset_prefix6_ctx(ctx);
+ } else {
+ if (IS_ZEBRA_DEBUG_DPLANE)
+ zlog_debug("Invalid op in interface-addr install");
+ }
+
+ return (ret == 0 ?
+ ZEBRA_DPLANE_REQUEST_SUCCESS : ZEBRA_DPLANE_REQUEST_FAILURE);
+}
+
/* Set up interface's address, netmask (and broadcast? ).
Solaris uses ifname:number semantics to set IP address aliases. */
-int if_set_prefix(struct interface *ifp, struct connected *ifc)
+static int if_set_prefix_ctx(const struct zebra_dplane_ctx *ctx)
{
int ret;
struct ifreq ifreq;
- struct sockaddr_in addr;
- struct sockaddr_in broad;
- struct sockaddr_in mask;
+ struct sockaddr_in addr, broad, mask;
struct prefix_ipv4 ifaddr;
struct prefix_ipv4 *p;
- p = (struct prefix_ipv4 *)ifc->address;
+ p = (struct prefix_ipv4 *)dplane_ctx_get_intf_addr(ctx);
ifaddr = *p;
- strlcpy(ifreq.ifr_name, ifp->name, sizeof(ifreq.ifr_name));
+ strlcpy(ifreq.ifr_name, dplane_ctx_get_ifname(ctx),
+ sizeof(ifreq.ifr_name));
addr.sin_addr = p->prefix;
addr.sin_family = p->family;
/* We need mask for make broadcast addr. */
masklen2ip(p->prefixlen, &mask.sin_addr);
- if (if_is_broadcast(ifp)) {
+ if (dplane_ctx_intf_is_broadcast(ctx)) {
apply_mask_ipv4(&ifaddr);
addr.sin_addr = ifaddr.prefix;
/* Set up interface's address, netmask (and broadcast).
Solaris uses ifname:number semantics to set IP address aliases. */
-int if_unset_prefix(struct interface *ifp, struct connected *ifc)
+static int if_unset_prefix_ctx(const struct zebra_dplane_ctx *ctx)
{
int ret;
struct ifreq ifreq;
struct sockaddr_in addr;
struct prefix_ipv4 *p;
- p = (struct prefix_ipv4 *)ifc->address;
+ p = (struct prefix_ipv4 *)dplane_ctx_get_intf_addr(ctx);
- strlcpy(ifreq.ifr_name, ifp->name, sizeof(ifreq.ifr_name));
+ strncpy(ifreq.ifr_name, dplane_ctx_get_ifname(ctx),
+ sizeof(ifreq.ifr_name));
memset(&addr, 0, sizeof(struct sockaddr_in));
addr.sin_family = p->family;
}
/* Interface's address add/delete functions. */
-int if_prefix_add_ipv6(struct interface *ifp, struct connected *ifc)
+static int if_set_prefix6_ctx(const struct zebra_dplane_ctx *ctx)
{
char addrbuf[PREFIX_STRLEN];
+ prefix2str(dplane_ctx_get_intf_addr(ctx), addrbuf, sizeof(addrbuf));
+
flog_warn(EC_LIB_DEVELOPMENT, "Can't set %s on interface %s",
- prefix2str(ifc->address, addrbuf, sizeof(addrbuf)),
- ifp->name);
+ addrbuf, dplane_ctx_get_ifname(ctx));
return 0;
}
-int if_prefix_delete_ipv6(struct interface *ifp, struct connected *ifc)
+static int if_unset_prefix6_ctx(const struct zebra_dplane_ctx *ctx)
{
char addrbuf[PREFIX_STRLEN];
+ prefix2str(dplane_ctx_get_intf_addr(ctx), addrbuf, sizeof(addrbuf));
+
flog_warn(EC_LIB_DEVELOPMENT, "Can't delete %s on interface %s",
- prefix2str(ifc->address, addrbuf, sizeof(addrbuf)),
- ifp->name);
+ addrbuf, dplane_ctx_get_ifname(ctx));
return 0;
}
enum zebra_dplane_result kernel_pw_update(struct zebra_dplane_ctx *ctx);
-extern int kernel_address_add_ipv4(struct interface *, struct connected *);
-extern int kernel_address_delete_ipv4(struct interface *, struct connected *);
-extern int kernel_address_add_ipv6(struct interface *, struct connected *);
-extern int kernel_address_delete_ipv6(struct interface *, struct connected *);
+enum zebra_dplane_result kernel_address_update_ctx(
+ struct zebra_dplane_ctx *ctx);
+
extern int kernel_neigh_update(int cmd, int ifindex, uint32_t addr, char *lla,
int llalen, ns_id_t ns_id);
extern int kernel_interface_set_master(struct interface *master,
union pw_protocol_fields fields;
};
+/*
+ * Interface/prefix info for the dataplane
+ */
+struct dplane_intf_info {
+
+ char ifname[INTERFACE_NAMSIZ];
+ ifindex_t ifindex;
+
+ uint32_t metric;
+ uint32_t flags;
+
+#define DPLANE_INTF_CONNECTED (1 << 0) /* Connected peer, p2p */
+#define DPLANE_INTF_SECONDARY (1 << 1)
+#define DPLANE_INTF_BROADCAST (1 << 2)
+#define DPLANE_INTF_HAS_DEST (1 << 3)
+#define DPLANE_INTF_HAS_LABEL (1 << 4)
+
+ /* Interface address/prefix */
+ struct prefix prefix;
+
+ /* Dest address, for p2p, or broadcast prefix */
+ struct prefix dest_prefix;
+
+ char *label;
+ char label_buf[32];
+};
+
/*
* The context block used to exchange info about route updates across
* the boundary between the zebra main context (and pthread) and the
vrf_id_t zd_vrf_id;
uint32_t zd_table_id;
- /* Support info for either route or LSP update */
+ /* Support info for different kinds of updates */
union {
struct dplane_route_info rinfo;
zebra_lsp_t lsp;
struct dplane_pw_info pw;
+ struct dplane_intf_info intf;
} u;
/* Namespace info, used especially for netlink kernel communication */
_Atomic uint32_t dg_pws_in;
_Atomic uint32_t dg_pw_errors;
+ _Atomic uint32_t dg_intf_addrs_in;
+ _Atomic uint32_t dg_intf_addr_errors;
+
_Atomic uint32_t dg_update_yields;
/* Dataplane pthread */
enum dplane_op_e op);
static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
enum dplane_op_e op);
+static enum zebra_dplane_result intf_addr_update_internal(
+ const struct interface *ifp, const struct connected *ifc,
+ enum dplane_op_e op);
/*
* Public APIs
}
break;
+ case DPLANE_OP_ADDR_INSTALL:
+ case DPLANE_OP_ADDR_UNINSTALL:
+ /* Maybe free label string, if allocated */
+ if ((*pctx)->u.intf.label != NULL &&
+ (*pctx)->u.intf.label != (*pctx)->u.intf.label_buf) {
+ free((*pctx)->u.intf.label);
+ (*pctx)->u.intf.label = NULL;
+ }
+ break;
+
case DPLANE_OP_NONE:
break;
}
case DPLANE_OP_SYS_ROUTE_DELETE:
ret = "SYS_ROUTE_DEL";
break;
+
+ case DPLANE_OP_ADDR_INSTALL:
+ ret = "ADDR_INSTALL";
+ break;
+ case DPLANE_OP_ADDR_UNINSTALL:
+ ret = "ADDR_UNINSTALL";
+ break;
+
}
return ret;
return &(ctx->u.pw.nhg);
}
+/* Accessors for interface information */
+const char *dplane_ctx_get_ifname(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.intf.ifname;
+}
+
+ifindex_t dplane_ctx_get_ifindex(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.intf.ifindex;
+}
+
+uint32_t dplane_ctx_get_intf_metric(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.intf.metric;
+}
+
+/* Is interface addr p2p? */
+bool dplane_ctx_intf_is_connected(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return (ctx->u.intf.flags & DPLANE_INTF_CONNECTED);
+}
+
+bool dplane_ctx_intf_is_secondary(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return (ctx->u.intf.flags & DPLANE_INTF_SECONDARY);
+}
+
+bool dplane_ctx_intf_is_broadcast(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return (ctx->u.intf.flags & DPLANE_INTF_BROADCAST);
+}
+
+const struct prefix *dplane_ctx_get_intf_addr(
+ const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return &(ctx->u.intf.prefix);
+}
+
+bool dplane_ctx_intf_has_dest(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return (ctx->u.intf.flags & DPLANE_INTF_HAS_DEST);
+}
+
+const struct prefix *dplane_ctx_get_intf_dest(
+ const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ if (ctx->u.intf.flags & DPLANE_INTF_HAS_DEST)
+ return &(ctx->u.intf.dest_prefix);
+ else
+ return NULL;
+}
+
+bool dplane_ctx_intf_has_label(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return (ctx->u.intf.flags & DPLANE_INTF_HAS_LABEL);
+}
+
+const char *dplane_ctx_get_intf_label(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return ctx->u.intf.label;
+}
+
/*
* End of dplane context accessors
*/
return result;
}
+/*
+ * Enqueue interface address add for the dataplane.
+ */
+enum zebra_dplane_result dplane_intf_addr_set(const struct interface *ifp,
+ const struct connected *ifc)
+{
+#if !defined(HAVE_NETLINK) && defined(HAVE_STRUCT_IFALIASREQ)
+ /* Extra checks for this OS path. */
+
+ /* Don't configure PtP addresses on broadcast ifs or reverse */
+ if (!(ifp->flags & IFF_POINTOPOINT) != !CONNECTED_PEER(ifc)) {
+ if (IS_ZEBRA_DEBUG_KERNEL || IS_ZEBRA_DEBUG_DPLANE)
+ zlog_debug("Failed to set intf addr: mismatch p2p and connected");
+
+ return ZEBRA_DPLANE_REQUEST_FAILURE;
+ }
+
+ /* Ensure that no existing installed v4 route conflicts with
+ * the new interface prefix. This check must be done in the
+ * zebra pthread context, and any route delete (if needed)
+ * is enqueued before the interface address programming attempt.
+ */
+ if (ifc->address->family == AF_INET) {
+ struct prefix_ipv4 *p;
+
+ p = (struct prefix_ipv4 *)ifc->address;
+ rib_lookup_and_pushup(p, ifp->vrf_id);
+ }
+#endif
+
+ return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_INSTALL);
+}
+
+/*
+ * Enqueue interface address remove/uninstall for the dataplane.
+ */
+enum zebra_dplane_result dplane_intf_addr_unset(const struct interface *ifp,
+ const struct connected *ifc)
+{
+ return intf_addr_update_internal(ifp, ifc, DPLANE_OP_ADDR_UNINSTALL);
+}
+
+static enum zebra_dplane_result intf_addr_update_internal(
+ const struct interface *ifp, const struct connected *ifc,
+ enum dplane_op_e op)
+{
+ enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
+ int ret = EINVAL;
+ struct zebra_dplane_ctx *ctx = NULL;
+ struct zebra_ns *zns;
+
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
+ char addr_str[PREFIX_STRLEN];
+
+ prefix2str(ifc->address, addr_str, sizeof(addr_str));
+
+ zlog_debug("init intf ctx %s: idx %d, addr %u:%s",
+ dplane_op2str(op), ifp->ifindex, ifp->vrf_id,
+ addr_str);
+ }
+
+ ctx = dplane_ctx_alloc();
+ if (ctx == NULL) {
+ ret = ENOMEM;
+ goto done;
+ }
+
+ ctx->zd_op = op;
+ ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
+ ctx->zd_vrf_id = ifp->vrf_id;
+
+ zns = zebra_ns_lookup(ifp->vrf_id);
+ dplane_ctx_ns_init(ctx, zns, false);
+
+ /* Init the interface-addr-specific area */
+ memset(&ctx->u.intf, 0, sizeof(ctx->u.intf));
+
+ strncpy(ctx->u.intf.ifname, ifp->name, sizeof(ctx->u.intf.ifname));
+ ctx->u.intf.ifindex = ifp->ifindex;
+ ctx->u.intf.prefix = *(ifc->address);
+
+ if (if_is_broadcast(ifp))
+ ctx->u.intf.flags |= DPLANE_INTF_BROADCAST;
+
+ if (CONNECTED_PEER(ifc)) {
+ ctx->u.intf.dest_prefix = *(ifc->destination);
+ ctx->u.intf.flags |=
+ (DPLANE_INTF_CONNECTED | DPLANE_INTF_HAS_DEST);
+ } else if (ifc->destination) {
+ ctx->u.intf.dest_prefix = *(ifc->destination);
+ ctx->u.intf.flags |= DPLANE_INTF_HAS_DEST;
+ }
+
+ if (CHECK_FLAG(ifc->flags, ZEBRA_IFA_SECONDARY))
+ ctx->u.intf.flags |= DPLANE_INTF_SECONDARY;
+
+ if (ifc->label) {
+ size_t len;
+
+ ctx->u.intf.flags |= DPLANE_INTF_HAS_LABEL;
+
+ /* Use embedded buffer if it's adequate; else allocate. */
+ len = strlen(ifc->label);
+
+ if (len < sizeof(ctx->u.intf.label_buf)) {
+ strncpy(ctx->u.intf.label_buf, ifc->label,
+ sizeof(ctx->u.intf.label_buf));
+ ctx->u.intf.label = ctx->u.intf.label_buf;
+ } else {
+ ctx->u.intf.label = strdup(ifc->label);
+ }
+ }
+
+ ret = dplane_route_enqueue(ctx);
+
+done:
+
+ /* Increment counter */
+ atomic_fetch_add_explicit(&zdplane_info.dg_intf_addrs_in, 1,
+ memory_order_relaxed);
+
+ if (ret == AOK)
+ result = ZEBRA_DPLANE_REQUEST_QUEUED;
+ else {
+ /* Error counter */
+ atomic_fetch_add_explicit(&zdplane_info.dg_intf_addr_errors,
+ 1, memory_order_relaxed);
+ if (ctx)
+ dplane_ctx_free(&ctx);
+ }
+
+ return result;
+}
+
/*
* Handler for 'show dplane'
*/
return res;
}
+/*
+ * Handler for kernel-facing interface address updates
+ */
+static enum zebra_dplane_result
+kernel_dplane_address_update(struct zebra_dplane_ctx *ctx)
+{
+ enum zebra_dplane_result res;
+
+
+ if (IS_ZEBRA_DEBUG_DPLANE_DETAIL) {
+ char dest_str[PREFIX_STRLEN];
+
+ prefix2str(dplane_ctx_get_intf_addr(ctx), dest_str,
+ sizeof(dest_str));
+
+ zlog_debug("Dplane intf %s, idx %u, addr %s",
+ dplane_op2str(dplane_ctx_get_op(ctx)),
+ dplane_ctx_get_ifindex(ctx), dest_str);
+ }
+
+ res = kernel_address_update_ctx(ctx);
+
+ if (res != ZEBRA_DPLANE_REQUEST_SUCCESS)
+ atomic_fetch_add_explicit(&zdplane_info.dg_intf_addr_errors,
+ 1, memory_order_relaxed);
+
+ return res;
+}
+
/*
* Kernel provider callback
*/
res = kernel_dplane_pw_update(ctx);
break;
+ case DPLANE_OP_ADDR_INSTALL:
+ case DPLANE_OP_ADDR_UNINSTALL:
+ res = kernel_dplane_address_update(ctx);
+ break;
+
/* Ignore system 'notifications' - the kernel already knows */
case DPLANE_OP_SYS_ROUTE_ADD:
case DPLANE_OP_SYS_ROUTE_DELETE:
/* System route notification */
DPLANE_OP_SYS_ROUTE_ADD,
DPLANE_OP_SYS_ROUTE_DELETE,
+
+ /* Interface address update */
+ DPLANE_OP_ADDR_INSTALL,
+ DPLANE_OP_ADDR_UNINSTALL,
};
/* Enable system route notifications */
const struct nexthop_group *dplane_ctx_get_pw_nhg(
const struct zebra_dplane_ctx *ctx);
+/* Accessors for interface information */
+const char *dplane_ctx_get_ifname(const struct zebra_dplane_ctx *ctx);
+ifindex_t dplane_ctx_get_ifindex(const struct zebra_dplane_ctx *ctx);
+uint32_t dplane_ctx_get_intf_metric(const struct zebra_dplane_ctx *ctx);
+/* Is interface addr p2p? */
+bool dplane_ctx_intf_is_connected(const struct zebra_dplane_ctx *ctx);
+bool dplane_ctx_intf_is_secondary(const struct zebra_dplane_ctx *ctx);
+bool dplane_ctx_intf_is_broadcast(const struct zebra_dplane_ctx *ctx);
+const struct prefix *dplane_ctx_get_intf_addr(
+ const struct zebra_dplane_ctx *ctx);
+bool dplane_ctx_intf_has_dest(const struct zebra_dplane_ctx *ctx);
+const struct prefix *dplane_ctx_get_intf_dest(
+ const struct zebra_dplane_ctx *ctx);
+bool dplane_ctx_intf_has_label(const struct zebra_dplane_ctx *ctx);
+const char *dplane_ctx_get_intf_label(const struct zebra_dplane_ctx *ctx);
+
/* Namespace info - esp. for netlink communication */
const struct zebra_dplane_info *dplane_ctx_get_ns(
const struct zebra_dplane_ctx *ctx);
enum zebra_dplane_result dplane_pw_install(struct zebra_pw *pw);
enum zebra_dplane_result dplane_pw_uninstall(struct zebra_pw *pw);
+/*
+ * Enqueue interface address changes for the dataplane.
+ */
+enum zebra_dplane_result dplane_intf_addr_set(const struct interface *ifp,
+ const struct connected *ifc);
+enum zebra_dplane_result dplane_intf_addr_unset(const struct interface *ifp,
+ const struct connected *ifc);
+
+
/* Retrieve the limit on the number of pending, unprocessed updates. */
uint32_t dplane_get_in_queue_limit(void);
{
struct zebra_if *zif;
struct in_addr old_vtep_ip;
+ uint16_t chgflags = 0;
zif = ifp->info;
assert(zif);
}
old_vtep_ip = zif->l2info.vxl.vtep_ip;
- if (IPV4_ADDR_SAME(&old_vtep_ip, &vxlan_info->vtep_ip))
- return;
- zif->l2info.vxl.vtep_ip = vxlan_info->vtep_ip;
- zebra_vxlan_if_update(ifp, ZEBRA_VXLIF_LOCAL_IP_CHANGE);
+ if (!IPV4_ADDR_SAME(&old_vtep_ip, &vxlan_info->vtep_ip)) {
+ chgflags |= ZEBRA_VXLIF_LOCAL_IP_CHANGE;
+ zif->l2info.vxl.vtep_ip = vxlan_info->vtep_ip;
+ }
+
+ if (!IPV4_ADDR_SAME(&zif->l2info.vxl.mcast_grp,
+ &vxlan_info->mcast_grp)) {
+ chgflags |= ZEBRA_VXLIF_MCAST_GRP_CHANGE;
+ zif->l2info.vxl.mcast_grp = vxlan_info->mcast_grp;
+ }
+
+ if (chgflags)
+ zebra_vxlan_if_update(ifp, chgflags);
}
/*
vni_t vni; /* VNI */
struct in_addr vtep_ip; /* Local tunnel IP */
vlanid_t access_vlan; /* Access VLAN - for VLAN-aware bridge. */
+ struct in_addr mcast_grp;
};
struct zebra_l2info_bondslave {
#include "if.h"
#include "linklist.h"
#include "log.h"
-#include "log_int.h"
#include "memory.h"
#include "mpls.h"
#include "nexthop.h"
nexthop_add(&nexthop->resolved, resolved_hop);
}
-/* If force flag is not set, do not modify falgs at all for uninstall
- the route from FIB. */
+/*
+ * Given a nexthop we need to properly recursively resolve
+ * the route. As such, do a table lookup to find and match
+ * if at all possible. Set the nexthop->ifindex as appropriate
+ */
static int nexthop_active(afi_t afi, struct route_entry *re,
- struct nexthop *nexthop, bool set,
+ struct nexthop *nexthop,
struct route_node *top)
{
struct prefix p;
|| nexthop->type == NEXTHOP_TYPE_IPV6)
nexthop->ifindex = 0;
- if (set) {
- UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE);
- nexthops_free(nexthop->resolved);
- nexthop->resolved = NULL;
- re->nexthop_mtu = 0;
- }
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE);
+ nexthops_free(nexthop->resolved);
+ nexthop->resolved = NULL;
+ re->nexthop_mtu = 0;
/*
* If the kernel has sent us a route, then
re->type == ZEBRA_ROUTE_SYSTEM)
return 1;
- /* Skip nexthops that have been filtered out due to route-map */
- /* The nexthops are specific to this route and so the same */
- /* nexthop for a different route may not have this flag set */
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FILTERED)) {
- if (IS_ZEBRA_DEBUG_RIB_DETAILED)
- zlog_debug("\t%s: Nexthop Filtered",
- __PRETTY_FUNCTION__);
- return 0;
- }
-
/*
* Check to see if we should trust the passed in information
* for UNNUMBERED interfaces as that we won't find the GW
NEXTHOP_FLAG_RECURSIVE))
continue;
- if (set) {
- SET_FLAG(nexthop->flags,
- NEXTHOP_FLAG_RECURSIVE);
- SET_FLAG(re->status,
- ROUTE_ENTRY_NEXTHOPS_CHANGED);
- nexthop_set_resolved(afi, newhop,
- nexthop);
- }
+ SET_FLAG(nexthop->flags,
+ NEXTHOP_FLAG_RECURSIVE);
+ SET_FLAG(re->status,
+ ROUTE_ENTRY_NEXTHOPS_CHANGED);
+ nexthop_set_resolved(afi, newhop, nexthop);
resolved = 1;
}
- if (resolved && set)
+ if (resolved)
re->nexthop_mtu = match->mtu;
if (!resolved && IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug("\t%s: Recursion failed to find",
NEXTHOP_FLAG_RECURSIVE))
continue;
- if (set) {
- SET_FLAG(nexthop->flags,
- NEXTHOP_FLAG_RECURSIVE);
- nexthop_set_resolved(afi, newhop,
- nexthop);
- }
+ SET_FLAG(nexthop->flags,
+ NEXTHOP_FLAG_RECURSIVE);
+ nexthop_set_resolved(afi, newhop, nexthop);
resolved = 1;
}
- if (resolved && set)
+ if (resolved)
re->nexthop_mtu = match->mtu;
if (!resolved && IS_ZEBRA_DEBUG_RIB_DETAILED)
/* This function verifies reachability of one given nexthop, which can be
* numbered or unnumbered, IPv4 or IPv6. The result is unconditionally stored
- * in nexthop->flags field. If the 4th parameter, 'set', is non-zero,
- * nexthop->ifindex will be updated appropriately as well.
- * An existing route map can turn (otherwise active) nexthop into inactive, but
- * not vice versa.
+ * in nexthop->flags field. The nexthop->ifindex will be updated
+ * appropriately as well. An existing route map can turn
+ * (otherwise active) nexthop into inactive, but not vice versa.
*
* The return value is the final value of 'ACTIVE' flag.
*/
-
static unsigned nexthop_active_check(struct route_node *rn,
struct route_entry *re,
- struct nexthop *nexthop, bool set)
+ struct nexthop *nexthop)
{
struct interface *ifp;
route_map_result_t ret = RMAP_MATCH;
case NEXTHOP_TYPE_IPV4:
case NEXTHOP_TYPE_IPV4_IFINDEX:
family = AFI_IP;
- if (nexthop_active(AFI_IP, re, nexthop, set, rn))
+ if (nexthop_active(AFI_IP, re, nexthop, rn))
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
else
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
break;
case NEXTHOP_TYPE_IPV6:
family = AFI_IP6;
- if (nexthop_active(AFI_IP6, re, nexthop, set, rn))
+ if (nexthop_active(AFI_IP6, re, nexthop, rn))
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
else
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
else
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
} else {
- if (nexthop_active(AFI_IP6, re, nexthop, set, rn))
+ if (nexthop_active(AFI_IP6, re, nexthop, rn))
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
else
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
return CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
}
-/* Iterate over all nexthops of the given RIB entry and refresh their
+/*
+ * Iterate over all nexthops of the given RIB entry and refresh their
* ACTIVE flag. re->nexthop_active_num is updated accordingly. If any
* nexthop is found to toggle the ACTIVE flag, the whole re structure
- * is flagged with ROUTE_ENTRY_CHANGED. The 4th 'set' argument is
- * transparently passed to nexthop_active_check().
+ * is flagged with ROUTE_ENTRY_CHANGED.
*
* Return value is the new number of active nexthops.
*/
-
-static int nexthop_active_update(struct route_node *rn, struct route_entry *re,
- bool set)
+static int nexthop_active_update(struct route_node *rn, struct route_entry *re)
{
struct nexthop *nexthop;
union g_addr prev_src;
- unsigned int prev_active, new_active, old_num_nh;
+ unsigned int prev_active, new_active;
ifindex_t prev_index;
- old_num_nh = re->nexthop_active_num;
-
re->nexthop_active_num = 0;
UNSET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
* a multipath perpsective should not be a data plane
* decision point.
*/
- new_active = nexthop_active_check(rn, re, nexthop, set);
+ new_active = nexthop_active_check(rn, re, nexthop);
if (new_active && re->nexthop_active_num >= multipath_num) {
UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
new_active = 0;
|| ((nexthop->type >= NEXTHOP_TYPE_IPV6
&& nexthop->type < NEXTHOP_TYPE_BLACKHOLE)
&& !(IPV6_ADDR_SAME(&prev_src.ipv6,
- &nexthop->rmap_src.ipv6)))) {
+ &nexthop->rmap_src.ipv6)))
+ || CHECK_FLAG(re->status, ROUTE_ENTRY_LABELS_CHANGED)) {
SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
SET_FLAG(re->status, ROUTE_ENTRY_NEXTHOPS_CHANGED);
}
}
- if (old_num_nh != re->nexthop_active_num)
- SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
-
- if (CHECK_FLAG(re->status, ROUTE_ENTRY_CHANGED)) {
- SET_FLAG(re->status, ROUTE_ENTRY_NEXTHOPS_CHANGED);
- }
-
return re->nexthop_active_num;
}
/* Update real nexthop. This may actually determine if nexthop is active
* or not. */
- if (!nexthop_active_update(rn, new, true)) {
+ if (!nexthop_group_active_nexthop_num(&new->ng)) {
UNSET_FLAG(new->status, ROUTE_ENTRY_CHANGED);
return;
}
* down, causing the kernel to delete routes without sending DELROUTE
* notifications
*/
- if (!nexthop_active_update(rn, old, true) &&
- (RIB_KERNEL_ROUTE(old)))
+ if (RIB_KERNEL_ROUTE(old))
SET_FLAG(old->status, ROUTE_ENTRY_REMOVED);
else
UNSET_FLAG(old->status, ROUTE_ENTRY_CHANGED);
/* Update the nexthop; we could determine here that nexthop is
* inactive. */
- if (nexthop_active_update(rn, new, true))
+ if (nexthop_group_active_nexthop_num(&new->ng))
nh_active = 1;
/* If nexthop is active, install the selected route, if
}
/* Update prior route. */
- if (new != old) {
- /* Set real nexthop. */
- nexthop_active_update(rn, old, true);
+ if (new != old)
UNSET_FLAG(old->status, ROUTE_ENTRY_CHANGED);
- }
/* Clear changed flag. */
UNSET_FLAG(new->status, ROUTE_ENTRY_CHANGED);
/* Skip unreachable nexthop. */
/* This first call to nexthop_active_update is merely to
- * determine if
- * there's any change to nexthops associated with this RIB
- * entry. Now,
- * rib_process() can be invoked due to an external event such as
- * link
- * down or due to next-hop-tracking evaluation. In the latter
- * case,
+ * determine if there's any change to nexthops associated
+ * with this RIB entry. Now, rib_process() can be invoked due
+ * to an external event such as link down or due to
+ * next-hop-tracking evaluation. In the latter case,
* a decision has already been made that the NHs have changed.
- * So, no
- * need to invoke a potentially expensive call again. Further,
- * since
- * the change might be in a recursive NH which is not caught in
- * the nexthop_active_update() code. Thus, we might miss changes
- * to
- * recursive NHs.
+ * So, no need to invoke a potentially expensive call again.
+ * Further, since the change might be in a recursive NH which
+ * is not caught in the nexthop_active_update() code. Thus, we
+ * might miss changes to recursive NHs.
*/
- if (!CHECK_FLAG(re->status, ROUTE_ENTRY_CHANGED)
- && !nexthop_active_update(rn, re, false)) {
+ if (CHECK_FLAG(re->status, ROUTE_ENTRY_CHANGED)
+ && !nexthop_active_update(rn, re)) {
if (re->type == ZEBRA_ROUTE_TABLE) {
/* XXX: HERE BE DRAGONS!!!!!
* In all honesty, I have not yet figured out
- * what this part
- * does or why the ROUTE_ENTRY_CHANGED test
- * above is correct
+ * what this part does or why the
+ * ROUTE_ENTRY_CHANGED test above is correct
* or why we need to delete a route here, and
- * also not whether
- * this concerns both selected and fib route, or
- * only selected
- * or only fib */
- /* This entry was denied by the 'ip protocol
- * table' route-map, we
- * need to delete it */
+ * also not whether this concerns both selected
+ * and fib route, or only selected
+ * or only fib
+ *
+ * This entry was denied by the 'ip protocol
+ * table' route-map, we need to delete it */
if (re != old_selected) {
if (IS_ZEBRA_DEBUG_RIB)
zlog_debug(
/* Update SELECTED entry */
if (old_selected != new_selected || selected_changed) {
- if (new_selected && new_selected != new_fib) {
- nexthop_active_update(rn, new_selected, true);
+ if (new_selected && new_selected != new_fib)
UNSET_FLAG(new_selected->status, ROUTE_ENTRY_CHANGED);
- }
if (new_selected)
SET_FLAG(new_selected->flags, ZEBRA_FLAG_SELECTED);
INET6_ADDRSTRLEN);
break;
}
- zlog_debug("%s: %s %s[%u] vrf %s(%u) with flags %s%s%s", func,
- (nexthop->rparent ? " NH" : "NH"), straddr,
+ zlog_debug("%s: %s %s[%u] vrf %s(%u) with flags %s%s%s%s%s%s",
+ func, (nexthop->rparent ? " NH" : "NH"), straddr,
nexthop->ifindex, vrf ? vrf->name : "Unknown",
nexthop->vrf_id,
(CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)
? "FIB "
: ""),
(CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE)
- ? "RECURSIVE"
+ ? "RECURSIVE "
+ : ""),
+ (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK)
+ ? "ONLINK "
+ : ""),
+ (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_MATCHED)
+ ? "MATCHED "
+ : ""),
+ (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE)
+ ? "DUPLICATE "
: ""));
}
zlog_debug("%s: dump complete", func);
{
struct route_table *table;
struct route_node *rn;
- unsigned changed = 0;
rib_dest_t *dest;
if (NULL == (table = zebra_vrf_table(AFI_IP, SAFI_UNICAST, vrf_id))) {
* of the rest of the RE.
*/
if (dest->selected_fib) {
- changed = 1;
if (IS_ZEBRA_DEBUG_RIB) {
char buf[PREFIX_STRLEN];
route_entry_dump(&rn->p, NULL, dest->selected_fib);
}
rib_uninstall(rn, dest->selected_fib);
- }
- if (changed)
rib_queue_add(rn);
+ }
}
int rib_add_multipath(afi_t afi, safi_t safi, struct prefix *p,
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
route_entry_dump(p, src_p, re);
}
+
+ SET_FLAG(re->status, ROUTE_ENTRY_CHANGED);
rib_addnode(rn, re, 1);
ret = 1;
continue;
if (re->type != ZEBRA_ROUTE_STATIC) {
+ SET_FLAG(re->status,
+ ROUTE_ENTRY_CHANGED);
rib_queue_add(rn);
continue;
}
* gateway, NHT will
* take care.
*/
- if (nh)
+ if (nh) {
+ SET_FLAG(re->status,
+ ROUTE_ENTRY_CHANGED);
rib_queue_add(rn);
+ }
}
break;
* protocol in
* some cases (TODO).
*/
- if (rnode_to_ribs(rn))
+ if (rnode_to_ribs(rn)) {
+ RNODE_FOREACH_RE_SAFE (rn, re, next)
+ SET_FLAG(re->status,
+ ROUTE_ENTRY_CHANGED);
rib_queue_add(rn);
+ }
break;
default:
zebra_delete_rnh(rnh, RNH_NEXTHOP_TYPE);
}
+/* Clear the NEXTHOP_FLAG_RNH_FILTERED flags on all nexthops
+ */
+static void zebra_rnh_clear_nexthop_rnh_filters(struct route_entry *re)
+{
+ struct nexthop *nexthop;
+
+ if (re) {
+ for (nexthop = re->ng.nexthop; nexthop;
+ nexthop = nexthop->next) {
+ UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_RNH_FILTERED);
+ }
+ }
+}
+
/* Apply the NHT route-map for a client to the route (and nexthops)
* resolving a NH.
*/
nexthop = nexthop->next) {
ret = zebra_nht_route_map_check(
afi, proto, &prn->p, zvrf, re, nexthop);
- if (ret != RMAP_DENYMATCH) {
- SET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ if (ret != RMAP_DENYMATCH)
at_least_one++; /* at least one valid NH */
- } else {
- UNSET_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE);
+ else {
+ SET_FLAG(nexthop->flags,
+ NEXTHOP_FLAG_RNH_FILTERED);
}
}
}
* this
* nexthop to see if it is filtered or not.
*/
+ zebra_rnh_clear_nexthop_rnh_filters(re);
num_resolving_nh = zebra_rnh_apply_nht_rmap(
afi, zvrf, prn, re, client->proto);
if (num_resolving_nh)
send_client(rnh, client, RNH_NEXTHOP_TYPE, zvrf->vrf->vrf_id);
}
+
+ if (re)
+ zebra_rnh_clear_nexthop_rnh_filters(re);
}
static void zebra_rnh_process_pbr_tables(afi_t afi, struct route_node *nrn,
const struct nexthop *nh)
{
return (CHECK_FLAG(re->status, ROUTE_ENTRY_INSTALLED)
- && CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE));
+ && CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)
+ && !CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RECURSIVE)
+ && !CHECK_FLAG(nh->flags, NEXTHOP_FLAG_DUPLICATE)
+ && !CHECK_FLAG(nh->flags, NEXTHOP_FLAG_RNH_FILTERED));
}
/*
/* l3-vni info */
vni_t l3vni;
+ /* pim mroutes installed for vxlan flooding */
+ struct hash *vxlan_sg_table;
+
bool dup_addr_detect;
int dad_time;
DEFINE_MTYPE_STATIC(ZEBRA, ZVNI_VTEP, "VNI remote VTEP");
DEFINE_MTYPE_STATIC(ZEBRA, MAC, "VNI MAC");
DEFINE_MTYPE_STATIC(ZEBRA, NEIGH, "VNI Neighbor");
+DEFINE_MTYPE_STATIC(ZEBRA, ZVXLAN_SG, "zebra VxLAN multicast group");
/* definitions */
+/* PMSI strings. */
+#define VXLAN_FLOOD_STR_NO_INFO "-"
+#define VXLAN_FLOOD_STR_DEFAULT VXLAN_FLOOD_STR_NO_INFO
+static const struct message zvtep_flood_str[] = {
+ {VXLAN_FLOOD_DISABLED, VXLAN_FLOOD_STR_NO_INFO},
+ {VXLAN_FLOOD_PIM_SM, "PIM-SM"},
+ {VXLAN_FLOOD_HEAD_END_REPL, "HER"},
+ {0}
+};
+
/* static function declarations */
static int ip_prefix_send_to_client(vrf_id_t vrf_id, struct prefix *p,
static void zvni_build_hash_table(void);
static int zvni_vtep_match(struct in_addr *vtep_ip, zebra_vtep_t *zvtep);
static zebra_vtep_t *zvni_vtep_find(zebra_vni_t *zvni, struct in_addr *vtep_ip);
-static zebra_vtep_t *zvni_vtep_add(zebra_vni_t *zvni, struct in_addr *vtep_ip);
+static zebra_vtep_t *zvni_vtep_add(zebra_vni_t *zvni, struct in_addr *vtep_ip,
+ int flood_control);
static int zvni_vtep_del(zebra_vni_t *zvni, zebra_vtep_t *zvtep);
static int zvni_vtep_del_all(zebra_vni_t *zvni, int uninstall);
-static int zvni_vtep_install(zebra_vni_t *zvni, struct in_addr *vtep_ip);
+static int zvni_vtep_install(zebra_vni_t *zvni, zebra_vtep_t *zvtep);
static int zvni_vtep_uninstall(zebra_vni_t *zvni, struct in_addr *vtep_ip);
static int zvni_del_macip_for_intf(struct interface *ifp, zebra_vni_t *zvni);
static int zvni_add_macip_for_intf(struct interface *ifp, zebra_vni_t *zvni);
bool do_dad,
bool *is_dup_detect,
bool is_local);
+static unsigned int zebra_vxlan_sg_hash_key_make(void *p);
+static bool zebra_vxlan_sg_hash_eq(const void *p1, const void *p2);
+static void zebra_vxlan_sg_do_deref(struct zebra_vrf *zvrf,
+ struct in_addr sip, struct in_addr mcast_grp);
+static zebra_vxlan_sg_t *zebra_vxlan_sg_do_ref(struct zebra_vrf *vrf,
+ struct in_addr sip, struct in_addr mcast_grp);
+static void zebra_vxlan_sg_deref(struct in_addr local_vtep_ip,
+ struct in_addr mcast_grp);
+static void zebra_vxlan_sg_ref(struct in_addr local_vtep_ip,
+ struct in_addr mcast_grp);
+static void zebra_vxlan_sg_cleanup(struct hash_backet *backet, void *arg);
/* Private functions */
static int host_rb_entry_compare(const struct host_rb_entry *hle1,
vty_out(vty, " VxLAN ifIndex: %u\n", zvni->vxlan_if->ifindex);
vty_out(vty, " Local VTEP IP: %s\n",
inet_ntoa(zvni->local_vtep_ip));
+ vty_out(vty, " Mcast group: %s\n",
+ inet_ntoa(zvni->mcast_grp));
} else {
json_object_string_add(json, "vxlanInterface",
zvni->vxlan_if->name);
json_object_int_add(json, "ifindex", zvni->vxlan_if->ifindex);
json_object_string_add(json, "vtepIp",
inet_ntoa(zvni->local_vtep_ip));
+ json_object_string_add(json, "mcastGroup",
+ inet_ntoa(zvni->mcast_grp));
json_object_string_add(json, "advertiseGatewayMacip",
zvni->advertise_gw_macip ? "Yes" : "No");
json_object_int_add(json, "numMacs", num_macs);
else
json_vtep_list = json_object_new_array();
for (zvtep = zvni->vteps; zvtep; zvtep = zvtep->next) {
- if (json == NULL)
- vty_out(vty, " %s\n",
- inet_ntoa(zvtep->vtep_ip));
- else {
+ const char *flood_str = lookup_msg(zvtep_flood_str,
+ zvtep->flood_control,
+ VXLAN_FLOOD_STR_DEFAULT);
+
+ if (json == NULL) {
+ vty_out(vty, " %s flood: %s\n",
+ inet_ntoa(zvtep->vtep_ip),
+ flood_str);
+ } else {
json_ip_str = json_object_new_string(
- inet_ntoa(zvtep->vtep_ip));
+ inet_ntoa(zvtep->vtep_ip));
json_object_array_add(json_vtep_list,
- json_ip_str);
+ json_ip_str);
}
}
if (json)
zvni->vxlan_if = NULL;
+ /* Remove references to the BUM mcast grp */
+ zebra_vxlan_sg_deref(zvni->local_vtep_ip, zvni->mcast_grp);
+
/* Free the neighbor hash table. */
hash_free(zvni->neigh_table);
zvni->neigh_table = NULL;
stream_putl(s, zvni->vni);
stream_put_in_addr(s, &zvni->local_vtep_ip);
stream_put(s, &zvni->vrf_id, sizeof(vrf_id_t)); /* tenant vrf */
+ stream_put_in_addr(s, &zvni->mcast_grp);
/* Write packet size. */
stream_putw_at(s, 0, stream_get_endp(s));
return;
}
- zvni->local_vtep_ip = vxl->vtep_ip;
+ if (zvni->local_vtep_ip.s_addr != vxl->vtep_ip.s_addr ||
+ zvni->mcast_grp.s_addr != vxl->mcast_grp.s_addr) {
+ zebra_vxlan_sg_deref(zvni->local_vtep_ip,
+ zvni->mcast_grp);
+ zebra_vxlan_sg_ref(vxl->vtep_ip,
+ vxl->mcast_grp);
+ zvni->local_vtep_ip = vxl->vtep_ip;
+ zvni->mcast_grp = vxl->mcast_grp;
+ }
zvni->vxlan_if = ifp;
vlan_if = zvni_map_to_svi(vxl->access_vlan,
zif->brslave_info.br_if);
/*
* Add remote VTEP to VNI hash table.
*/
-static zebra_vtep_t *zvni_vtep_add(zebra_vni_t *zvni, struct in_addr *vtep_ip)
+static zebra_vtep_t *zvni_vtep_add(zebra_vni_t *zvni, struct in_addr *vtep_ip,
+ int flood_control)
+
{
zebra_vtep_t *zvtep;
zvtep = XCALLOC(MTYPE_ZVNI_VTEP, sizeof(zebra_vtep_t));
zvtep->vtep_ip = *vtep_ip;
+ zvtep->flood_control = flood_control;
if (zvni->vteps)
zvni->vteps->prev = zvtep;
}
/*
- * Install remote VTEP into the kernel.
+ * Install remote VTEP into the kernel if the remote VTEP has asked
+ * for head-end-replication.
*/
-static int zvni_vtep_install(zebra_vni_t *zvni, struct in_addr *vtep_ip)
+static int zvni_vtep_install(zebra_vni_t *zvni, zebra_vtep_t *zvtep)
{
- if (is_vxlan_flooding_head_end())
- return kernel_add_vtep(zvni->vni, zvni->vxlan_if, vtep_ip);
+ if (is_vxlan_flooding_head_end() &&
+ (zvtep->flood_control == VXLAN_FLOOD_HEAD_END_REPL))
+ return kernel_add_vtep(zvni->vni, zvni->vxlan_if,
+ &zvtep->vtep_ip);
return 0;
}
for (zvtep = zvni->vteps; zvtep; zvtep = zvtep->next) {
if (is_vxlan_flooding_head_end())
- zvni_vtep_install(zvni, &zvtep->vtep_ip);
+ zvni_vtep_install(zvni, zvtep);
else
zvni_vtep_uninstall(zvni, &zvtep->vtep_ip);
}
*/
zvtep = zvni_vtep_find(zvni, &vtep_ip);
if (!zvtep) {
- if (zvni_vtep_add(zvni, &vtep_ip) == NULL) {
+ zvtep = zvni_vtep_add(zvni, &vtep_ip, VXLAN_FLOOD_DISABLED);
+ if (!zvtep) {
flog_err(
EC_ZEBRA_VTEP_ADD_FAILED,
"Failed to add remote VTEP, VNI %u zvni %p upon remote MACIP ADD",
return;
}
- zvni_vtep_install(zvni, &vtep_ip);
+ zvni_vtep_install(zvni, zvtep);
}
sticky = !!CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_STICKY);
zebra_vni_t *zvni;
struct interface *ifp;
struct zebra_if *zif;
+ int flood_control;
+ zebra_vtep_t *zvtep;
if (!is_evpn_enabled()) {
zlog_debug(
STREAM_GETL(s, vni);
l += 4;
STREAM_GET(&vtep_ip.s_addr, s, IPV4_MAX_BYTELEN);
+ STREAM_GETL(s, flood_control);
l += IPV4_MAX_BYTELEN;
if (IS_ZEBRA_DEBUG_VXLAN)
- zlog_debug("Recv VTEP_ADD %s VNI %u from %s",
- inet_ntoa(vtep_ip), vni,
- zebra_route_string(client->proto));
+ zlog_debug("Recv VTEP_ADD %s VNI %u flood %d from %s",
+ inet_ntoa(vtep_ip), vni, flood_control,
+ zebra_route_string(client->proto));
/* Locate VNI hash entry - expected to exist. */
zvni = zvni_lookup(vni);
if (!if_is_operative(ifp) || !zif->brslave_info.br_if)
continue;
- /* If the remote VTEP already exists,
- there's nothing more to do. */
- if (zvni_vtep_find(zvni, &vtep_ip))
- continue;
-
- if (zvni_vtep_add(zvni, &vtep_ip) == NULL) {
- flog_err(EC_ZEBRA_VTEP_ADD_FAILED,
- "Failed to add remote VTEP, VNI %u zvni %p",
- vni, zvni);
- continue;
+ zvtep = zvni_vtep_find(zvni, &vtep_ip);
+ if (zvtep) {
+ /* If the remote VTEP already exists check if
+ * the flood mode has changed
+ */
+ if (zvtep->flood_control != flood_control) {
+ if (zvtep->flood_control
+ == VXLAN_FLOOD_DISABLED)
+ /* old mode was head-end-replication but
+ * is no longer; get rid of the HER fdb
+ * entry installed before
+ */
+ zvni_vtep_uninstall(zvni, &vtep_ip);
+ zvtep->flood_control = flood_control;
+ zvni_vtep_install(zvni, zvtep);
+ }
+ } else {
+ zvtep = zvni_vtep_add(zvni, &vtep_ip, flood_control);
+ if (zvtep)
+ zvni_vtep_install(zvni, zvtep);
+ else
+ flog_err(EC_ZEBRA_VTEP_ADD_FAILED,
+ "Failed to add remote VTEP, VNI %u zvni %p",
+ vni, zvni);
}
-
- zvni_vtep_install(zvni, &vtep_ip);
}
stream_failure:
zvni_mac_del_all(zvni, 0, 1, DEL_LOCAL_MAC);
}
- zvni->local_vtep_ip = vxl->vtep_ip;
+ if (zvni->local_vtep_ip.s_addr != vxl->vtep_ip.s_addr ||
+ zvni->mcast_grp.s_addr != vxl->mcast_grp.s_addr) {
+ zebra_vxlan_sg_deref(zvni->local_vtep_ip,
+ zvni->mcast_grp);
+ zebra_vxlan_sg_ref(vxl->vtep_ip, vxl->mcast_grp);
+ zvni->local_vtep_ip = vxl->vtep_ip;
+ zvni->mcast_grp = vxl->mcast_grp;
+ }
zvni->vxlan_if = ifp;
/* Take further actions needed.
/* Inform BGP, if there is a change of interest. */
if (chgflags
- & (ZEBRA_VXLIF_MASTER_CHANGE | ZEBRA_VXLIF_LOCAL_IP_CHANGE))
+ & (ZEBRA_VXLIF_MASTER_CHANGE |
+ ZEBRA_VXLIF_LOCAL_IP_CHANGE |
+ ZEBRA_VXLIF_MCAST_GRP_CHANGE))
zvni_send_add_to_client(zvni);
/* If there is a valid new master or a VLAN mapping change,
}
}
- zvni->local_vtep_ip = vxl->vtep_ip;
+ if (zvni->local_vtep_ip.s_addr != vxl->vtep_ip.s_addr ||
+ zvni->mcast_grp.s_addr != vxl->mcast_grp.s_addr) {
+ zebra_vxlan_sg_deref(zvni->local_vtep_ip,
+ zvni->mcast_grp);
+ zebra_vxlan_sg_ref(vxl->vtep_ip, vxl->mcast_grp);
+ zvni->local_vtep_ip = vxl->vtep_ip;
+ zvni->mcast_grp = vxl->mcast_grp;
+ }
zvni->vxlan_if = ifp;
vlan_if = zvni_map_to_svi(vxl->access_vlan,
zif->brslave_info.br_if);
listnode_add_sort(zl3vni->l2vnis, zvni);
}
- if (IS_ZEBRA_DEBUG_VXLAN)
+ if (IS_ZEBRA_DEBUG_VXLAN) {
+ char addr_buf1[INET_ADDRSTRLEN];
+ char addr_buf2[INET_ADDRSTRLEN];
+
+ inet_ntop(AF_INET, &vxl->vtep_ip,
+ addr_buf1, INET_ADDRSTRLEN);
+ inet_ntop(AF_INET, &vxl->mcast_grp,
+ addr_buf2, INET_ADDRSTRLEN);
+
zlog_debug(
- "Add L2-VNI %u VRF %s intf %s(%u) VLAN %u local IP %s master %u",
+ "Add L2-VNI %u VRF %s intf %s(%u) VLAN %u local IP %s mcast_grp %s master %u",
vni,
vlan_if ? vrf_id_to_name(vlan_if->vrf_id)
: VRF_DEFAULT_NAME,
ifp->name, ifp->ifindex, vxl->access_vlan,
- inet_ntoa(vxl->vtep_ip),
+ addr_buf1, addr_buf2,
zif->brslave_info.bridge_ifindex);
+ }
/* If down or not mapped to a bridge, we're done. */
if (!if_is_operative(ifp) || !zif->brslave_info.br_if)
return -1;
}
+ if (zvrf->l3vni != vni) {
+ snprintf(err, err_str_sz,
+ "VNI %d doesn't exist in VRF: %s",
+ vni, zvrf->vrf->name);
+ return -1;
+ }
+
if (filter && !CHECK_FLAG(zl3vni->filter, PREFIX_ROUTES_ONLY)) {
snprintf(err, ERR_STR_SZ,
"prefix-routes-only is not set for the vni");
return;
zvrf->vni_table = hash_create(vni_hash_keymake, vni_hash_cmp,
"Zebra VRF VNI Table");
+ zvrf->vxlan_sg_table = hash_create(zebra_vxlan_sg_hash_key_make,
+ zebra_vxlan_sg_hash_eq, "Zebra VxLAN SG Table");
}
/* Cleanup VNI info, but don't free the table. */
if (!zvrf)
return;
hash_iterate(zvrf->vni_table, zvni_cleanup_all, zvrf);
+ hash_iterate(zvrf->vxlan_sg_table, zebra_vxlan_sg_cleanup, NULL);
}
/* Close all VNI handling */
return 0;
}
+
+/************************** vxlan SG cache management ************************/
+/* Inform PIM about the mcast group */
+static int zebra_vxlan_sg_send(struct prefix_sg *sg,
+ char *sg_str, uint16_t cmd)
+{
+ struct zserv *client = NULL;
+ struct stream *s = NULL;
+
+ client = zserv_find_client(ZEBRA_ROUTE_PIM, 0);
+ if (!client)
+ return 0;
+
+ s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ zclient_create_header(s, cmd, VRF_DEFAULT);
+ stream_putl(s, IPV4_MAX_BYTELEN);
+ stream_put(s, &sg->src.s_addr, IPV4_MAX_BYTELEN);
+ stream_put(s, &sg->grp.s_addr, IPV4_MAX_BYTELEN);
+
+ /* Write packet size. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug(
+ "Send %s %s to %s",
+ (cmd == ZEBRA_VXLAN_SG_ADD) ? "add" : "del", sg_str,
+ zebra_route_string(client->proto));
+
+ if (cmd == ZEBRA_VXLAN_SG_ADD)
+ client->vxlan_sg_add_cnt++;
+ else
+ client->vxlan_sg_del_cnt++;
+
+ return zserv_send_message(client, s);
+}
+
+static unsigned int zebra_vxlan_sg_hash_key_make(void *p)
+{
+ zebra_vxlan_sg_t *vxlan_sg = p;
+
+ return (jhash_2words(vxlan_sg->sg.src.s_addr,
+ vxlan_sg->sg.grp.s_addr, 0));
+}
+
+static bool zebra_vxlan_sg_hash_eq(const void *p1, const void *p2)
+{
+ const zebra_vxlan_sg_t *sg1 = p1;
+ const zebra_vxlan_sg_t *sg2 = p2;
+
+ return ((sg1->sg.src.s_addr == sg2->sg.src.s_addr)
+ && (sg1->sg.grp.s_addr == sg2->sg.grp.s_addr));
+}
+
+static zebra_vxlan_sg_t *zebra_vxlan_sg_new(struct zebra_vrf *zvrf,
+ struct prefix_sg *sg)
+{
+ zebra_vxlan_sg_t *vxlan_sg;
+
+ vxlan_sg = XCALLOC(MTYPE_ZVXLAN_SG, sizeof(*vxlan_sg));
+
+ vxlan_sg->zvrf = zvrf;
+ vxlan_sg->sg = *sg;
+ prefix_sg2str(sg, vxlan_sg->sg_str);
+
+ vxlan_sg = hash_get(zvrf->vxlan_sg_table, vxlan_sg, hash_alloc_intern);
+
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug("vxlan SG %s created", vxlan_sg->sg_str);
+
+ return vxlan_sg;
+}
+
+static zebra_vxlan_sg_t *zebra_vxlan_sg_find(struct zebra_vrf *zvrf,
+ struct prefix_sg *sg)
+{
+ zebra_vxlan_sg_t lookup;
+
+ lookup.sg = *sg;
+ return hash_lookup(zvrf->vxlan_sg_table, &lookup);
+}
+
+static zebra_vxlan_sg_t *zebra_vxlan_sg_add(struct zebra_vrf *zvrf,
+ struct prefix_sg *sg)
+{
+ zebra_vxlan_sg_t *vxlan_sg;
+ zebra_vxlan_sg_t *parent = NULL;
+ struct in_addr sip;
+
+ vxlan_sg = zebra_vxlan_sg_find(zvrf, sg);
+ if (vxlan_sg)
+ return vxlan_sg;
+
+ /* create a *G entry for every BUM group implicitly -
+ * 1. The SG entry is used by pimd to setup the vxlan-origination-mroute
+ * 2. the XG entry is used by pimd to setup the
+ * vxlan-termination-mroute
+ */
+ if (sg->src.s_addr) {
+ memset(&sip, 0, sizeof(sip));
+ parent = zebra_vxlan_sg_do_ref(zvrf, sip, sg->grp);
+ if (!parent)
+ return NULL;
+ }
+
+ vxlan_sg = zebra_vxlan_sg_new(zvrf, sg);
+ if (!vxlan_sg) {
+ if (parent)
+ zebra_vxlan_sg_do_deref(zvrf, sip, sg->grp);
+ return vxlan_sg;
+ }
+
+ zebra_vxlan_sg_send(sg, vxlan_sg->sg_str, ZEBRA_VXLAN_SG_ADD);
+
+ return vxlan_sg;
+}
+
+static void zebra_vxlan_sg_del(zebra_vxlan_sg_t *vxlan_sg)
+{
+ struct in_addr sip;
+ struct zebra_vrf *zvrf;
+
+ zvrf = vrf_info_lookup(VRF_DEFAULT);
+ if (!zvrf)
+ return;
+
+ /* On SG entry deletion remove the reference to its parent XG
+ * entry
+ */
+ if (vxlan_sg->sg.src.s_addr) {
+ memset(&sip, 0, sizeof(sip));
+ zebra_vxlan_sg_do_deref(zvrf, sip, vxlan_sg->sg.grp);
+ }
+
+ zebra_vxlan_sg_send(&vxlan_sg->sg, vxlan_sg->sg_str,
+ ZEBRA_VXLAN_SG_DEL);
+
+ hash_release(vxlan_sg->zvrf->vxlan_sg_table, vxlan_sg);
+
+ if (IS_ZEBRA_DEBUG_VXLAN)
+ zlog_debug("VXLAN SG %s deleted", vxlan_sg->sg_str);
+
+ XFREE(MTYPE_ZVXLAN_SG, vxlan_sg);
+}
+
+static void zebra_vxlan_sg_do_deref(struct zebra_vrf *zvrf,
+ struct in_addr sip, struct in_addr mcast_grp)
+{
+ zebra_vxlan_sg_t *vxlan_sg;
+ struct prefix_sg sg;
+
+ sg.family = AF_INET;
+ sg.prefixlen = IPV4_MAX_BYTELEN;
+ sg.src = sip;
+ sg.grp = mcast_grp;
+ vxlan_sg = zebra_vxlan_sg_find(zvrf, &sg);
+ if (!vxlan_sg)
+ return;
+
+ if (vxlan_sg->ref_cnt)
+ --vxlan_sg->ref_cnt;
+
+ if (!vxlan_sg->ref_cnt)
+ zebra_vxlan_sg_del(vxlan_sg);
+}
+
+static zebra_vxlan_sg_t *zebra_vxlan_sg_do_ref(struct zebra_vrf *zvrf,
+ struct in_addr sip, struct in_addr mcast_grp)
+{
+ zebra_vxlan_sg_t *vxlan_sg;
+ struct prefix_sg sg;
+
+ sg.family = AF_INET;
+ sg.prefixlen = IPV4_MAX_BYTELEN;
+ sg.src = sip;
+ sg.grp = mcast_grp;
+ vxlan_sg = zebra_vxlan_sg_add(zvrf, &sg);
+ if (vxlan_sg)
+ ++vxlan_sg->ref_cnt;
+
+ return vxlan_sg;
+}
+
+static void zebra_vxlan_sg_deref(struct in_addr local_vtep_ip,
+ struct in_addr mcast_grp)
+{
+ struct zebra_vrf *zvrf;
+
+ if (!local_vtep_ip.s_addr || !mcast_grp.s_addr)
+ return;
+
+ zvrf = vrf_info_lookup(VRF_DEFAULT);
+ if (!zvrf)
+ return;
+
+ zebra_vxlan_sg_do_deref(zvrf, local_vtep_ip, mcast_grp);
+}
+
+static void zebra_vxlan_sg_ref(struct in_addr local_vtep_ip,
+ struct in_addr mcast_grp)
+{
+ struct zebra_vrf *zvrf;
+
+ if (!local_vtep_ip.s_addr || !mcast_grp.s_addr)
+ return;
+
+ zvrf = vrf_info_lookup(VRF_DEFAULT);
+ if (!zvrf)
+ return;
+ zebra_vxlan_sg_do_ref(zvrf, local_vtep_ip, mcast_grp);
+}
+
+static void zebra_vxlan_sg_cleanup(struct hash_backet *backet, void *arg)
+{
+ zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)backet->data;
+
+ zebra_vxlan_sg_del(vxlan_sg);
+}
}
/* VxLAN interface change flags of interest. */
-#define ZEBRA_VXLIF_LOCAL_IP_CHANGE 0x1
-#define ZEBRA_VXLIF_MASTER_CHANGE 0x2
-#define ZEBRA_VXLIF_VLAN_CHANGE 0x4
+#define ZEBRA_VXLIF_LOCAL_IP_CHANGE (1 << 0)
+#define ZEBRA_VXLIF_MASTER_CHANGE (1 << 1)
+#define ZEBRA_VXLIF_VLAN_CHANGE (1 << 2)
+#define ZEBRA_VXLIF_MCAST_GRP_CHANGE (1 << 3)
+
#define VNI_STR_LEN 32
/* Remote IP. */
/* NOTE: Can only be IPv4 right now. */
struct in_addr vtep_ip;
+ /* Flood mode (one of enum vxlan_flood_control) based on the PMSI
+ * tunnel type advertised by the remote VTEP
+ */
+ int flood_control;
/* Links. */
struct zebra_vtep_t_ *next;
/* Local IP */
struct in_addr local_vtep_ip;
+ /* PIM-SM MDT group for BUM flooding */
+ struct in_addr mcast_grp;
+
/* tenant VRF, if any */
vrf_id_t vrf_id;
}
#endif
+/*
+ * Multicast hash table.
+ *
+ * This table contains -
+ * 1. The (S, G) entries used for encapsulating and forwarding BUM traffic.
+ * S is the local VTEP-IP and G is a BUM mcast group address.
+ * 2. The (X, G) entries used for terminating a BUM flow.
+ * Multiple L2-VNIs can share the same MDT hence the need to maintain
+ * an aggregated table that pimd can consume without much
+ * re-interpretation.
+ */
+typedef struct zebra_vxlan_sg_ {
+ struct zebra_vrf *zvrf;
+
+ struct prefix_sg sg;
+ char sg_str[PREFIX_SG_STR_LEN];
+
+ /* For SG - num of L2 VNIs using this entry for sending BUM traffic */
+ /* For XG - num of SG using this as parent */
+ uint32_t ref_cnt;
+} zebra_vxlan_sg_t;
+
#endif /* _ZEBRA_VXLAN_PRIVATE_H */
client->v4_nh_watch_add_cnt, 0, client->v4_nh_watch_rem_cnt);
vty_out(vty, "NHT v6 %-12d%-12d%-12d\n",
client->v6_nh_watch_add_cnt, 0, client->v6_nh_watch_rem_cnt);
+ vty_out(vty, "VxLAN SG %-12d%-12d%-12d\n", client->vxlan_sg_add_cnt,
+ 0, client->vxlan_sg_del_cnt);
vty_out(vty, "Interface Up Notifications: %d\n", client->ifup_cnt);
vty_out(vty, "Interface Down Notifications: %d\n", client->ifdown_cnt);
vty_out(vty, "VNI add notifications: %d\n", client->vniadd_cnt);
uint32_t v4_nh_watch_rem_cnt;
uint32_t v6_nh_watch_add_cnt;
uint32_t v6_nh_watch_rem_cnt;
+ uint32_t vxlan_sg_add_cnt;
+ uint32_t vxlan_sg_del_cnt;
time_t nh_reg_time;
time_t nh_dereg_time;