EXTRA_DIST += \
aclocal.m4 \
- update-autotools \
m4/README.txt \
\
python/clidef.py \
endif
bgpd_snmp_la_SOURCES = bgp_snmp.c
-bgpd_snmp_la_CFLAGS = $(WERROR) $(SNMP_CFLAGS)
+bgpd_snmp_la_CFLAGS = $(WERROR) $(SNMP_CFLAGS) -std=gnu99
bgpd_snmp_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
bgpd_snmp_la_LIBADD = ../lib/libfrrsnmp.la
* not right.
* So do the checks later, i.e. here
*/
- struct bgp *bgp = peer->bgp;
struct aspath *aspath;
/* Confederation sanity check. */
}
/* First AS check for EBGP. */
- if (bgp != NULL && bgp_flag_check(bgp, BGP_FLAG_ENFORCE_FIRST_AS)) {
+ if (CHECK_FLAG(peer->flags, PEER_FLAG_ENFORCE_FIRST_AS)) {
if (peer->sort == BGP_PEER_EBGP
&& !aspath_firstas_check(attr->aspath, peer->as)) {
zlog_err("%s incorrect first AS (must be %u)",
are present, it should. Check for any other attribute being present
instead.
*/
- if (attr->flag == ATTR_FLAG_BIT(BGP_ATTR_MP_UNREACH_NLRI))
+ if ((!CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_MP_REACH_NLRI)) &&
+ CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_MP_UNREACH_NLRI))))
return BGP_ATTR_PARSE_PROCEED;
if (!CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGIN)))
/* debug bgp pbr */
DEFUN (debug_bgp_pbr,
debug_bgp_pbr_cmd,
- "debug bgp pbr",
+ "debug bgp pbr [error]",
DEBUG_STR
BGP_STR
- "BGP policy based routing\n")
+ "BGP policy based routing\n"
+ "BGP PBR error\n")
{
+ int idx = 3;
+
+ if (argv_find(argv, argc, "error", &idx)) {
+ if (vty->node == CONFIG_NODE)
+ DEBUG_ON(pbr, PBR_ERROR);
+ else {
+ TERM_DEBUG_ON(pbr, PBR_ERROR);
+ vty_out(vty, "BGP policy based routing error is on\n");
+ }
+ return CMD_SUCCESS;
+ }
if (vty->node == CONFIG_NODE)
DEBUG_ON(pbr, PBR);
else {
DEFUN (no_debug_bgp_pbr,
no_debug_bgp_pbr_cmd,
- "no debug bgp pbr",
+ "no debug bgp pbr [error]",
NO_STR
DEBUG_STR
BGP_STR
- "BGP policy based routing\n")
+ "BGP policy based routing\n"
+ "BGP PBR Error\n")
{
+ int idx = 3;
+
+ if (argv_find(argv, argc, "error", &idx)) {
+ if (vty->node == CONFIG_NODE)
+ DEBUG_OFF(pbr, PBR_ERROR);
+ else {
+ TERM_DEBUG_OFF(pbr, PBR_ERROR);
+ vty_out(vty, "BGP policy based routing error is off\n");
+ }
+ return CMD_SUCCESS;
+ }
if (vty->node == CONFIG_NODE)
DEBUG_OFF(pbr, PBR);
else {
TERM_DEBUG_OFF(flowspec, FLOWSPEC);
TERM_DEBUG_OFF(labelpool, LABELPOOL);
TERM_DEBUG_OFF(pbr, PBR);
+ TERM_DEBUG_OFF(pbr, PBR_ERROR);
vty_out(vty, "All possible debugging has been turned off\n");
return CMD_SUCCESS;
if (BGP_DEBUG(pbr, PBR))
vty_out(vty, " BGP policy based routing debugging is on\n");
+ if (BGP_DEBUG(pbr, PBR_ERROR))
+ vty_out(vty, " BGP policy based routing error debugging is on\n");
vty_out(vty, "\n");
return CMD_SUCCESS;
if (BGP_DEBUG(pbr, PBR))
ret++;
+ if (BGP_DEBUG(pbr, PBR_ERROR))
+ ret++;
return ret;
}
vty_out(vty, "debug bgp pbr\n");
write++;
}
+ if (CONF_BGP_DEBUG(pbr, PBR_ERROR)) {
+ vty_out(vty, "debug bgp pbr error\n");
+ write++;
+ }
return write;
}
{
int i;
uint8_t *pnt;
- int type = 0;
- int sub_type = 0;
-#define ECOMMUNITY_STR_DEFAULT_LEN 27
+ uint8_t type = 0;
+ uint8_t sub_type = 0;
+#define ECOMMUNITY_STR_DEFAULT_LEN 64
int str_size;
int str_pnt;
char *str_buf;
"FS:redirect IP 0x%x", *(pnt+5));
} else
unk_ecom = 1;
- } else if (type == ECOMMUNITY_ENCODE_TRANS_EXP) {
+ } else if (type == ECOMMUNITY_ENCODE_TRANS_EXP ||
+ type == ECOMMUNITY_EXTENDED_COMMUNITY_PART_2 ||
+ type == ECOMMUNITY_EXTENDED_COMMUNITY_PART_3) {
sub_type = *pnt++;
+ if (sub_type == ECOMMUNITY_REDIRECT_VRF) {
+ char buf[16];
- if (sub_type == ECOMMUNITY_TRAFFIC_ACTION) {
+ memset(buf, 0, sizeof(buf));
+ ecommunity_rt_soo_str(buf, (uint8_t *)pnt,
+ type &
+ ~ECOMMUNITY_ENCODE_TRANS_EXP,
+ ECOMMUNITY_ROUTE_TARGET,
+ ECOMMUNITY_FORMAT_DISPLAY);
+ len = snprintf(str_buf + str_pnt,
+ str_size - len,
+ "FS:redirect VRF %s", buf);
+ } else if (type != ECOMMUNITY_ENCODE_TRANS_EXP)
+ unk_ecom = 1;
+ else if (sub_type == ECOMMUNITY_TRAFFIC_ACTION) {
char action[64];
char *ptr = action;
len = sprintf(
str_buf + str_pnt,
"FS:rate %f", data.rate_float);
- } else if (sub_type == ECOMMUNITY_REDIRECT_VRF) {
- char buf[16];
-
- memset(buf, 0, sizeof(buf));
- ecommunity_rt_soo_str(buf, (uint8_t *)pnt,
- type &
- ~ECOMMUNITY_ENCODE_TRANS_EXP,
- ECOMMUNITY_ROUTE_TARGET,
- ECOMMUNITY_FORMAT_DISPLAY);
- len = snprintf(
- str_buf + str_pnt,
- str_size - len,
- "FS:redirect VRF %s", buf);
} else if (sub_type == ECOMMUNITY_TRAFFIC_MARKING) {
len = sprintf(
str_buf + str_pnt,
"FS:marking %u", *(pnt+5));
+ } else if (*pnt
+ == ECOMMUNITY_EVPN_SUBTYPE_ES_IMPORT_RT) {
+ struct ethaddr mac;
+
+ pnt++;
+ memcpy(&mac, pnt, ETH_ALEN);
+ len = sprintf(
+ str_buf + str_pnt,
+ "ES-Import-Rt:%02x:%02x:%02x:%02x:%02x:%02x",
+ (uint8_t)mac.octet[0],
+ (uint8_t)mac.octet[1],
+ (uint8_t)mac.octet[2],
+ (uint8_t)mac.octet[3],
+ (uint8_t)mac.octet[4],
+ (uint8_t)mac.octet[5]);
} else
unk_ecom = 1;
- } else
+ } else {
+ sub_type = *pnt++;
unk_ecom = 1;
+ }
if (unk_ecom)
- len = sprintf(str_buf + str_pnt, "?");
+ len = sprintf(str_buf + str_pnt, "UNK:%d, %d",
+ type, sub_type);
str_pnt += len;
first = 0;
extern struct zclient *zclient;
DEFINE_QOBJ_TYPE(bgpevpn)
+DEFINE_QOBJ_TYPE(evpnes)
/*
* Static function declarations
*/
-static void delete_evpn_route_entry(struct bgp *bgp, struct bgpevpn *vpn,
- afi_t afi, safi_t safi, struct bgp_node *rn,
+static void delete_evpn_route_entry(struct bgp *bgp,
+ afi_t afi, safi_t safi,
+ struct bgp_node *rn,
struct bgp_info **ri);
static int delete_all_vni_routes(struct bgp *bgp, struct bgpevpn *vpn);
* Private functions.
*/
+/* compare two IPV4 VTEP IPs */
+static int evpn_vtep_ip_cmp(const void *p1, const void *p2)
+{
+ const struct in_addr *ip1 = p1;
+ const struct in_addr *ip2 = p2;
+
+ if (!ip1 && !ip2)
+ return 1;
+ if (!ip1 || !ip2)
+ return 0;
+ return (ip1->s_addr == ip2->s_addr);
+}
+
+/*
+ * Make hash key for ESI.
+ */
+static unsigned int esi_hash_keymake(void *p)
+{
+ struct evpnes *pes = p;
+ const void *pnt = (void *)pes->esi.val;
+
+ return jhash(pnt, ESI_BYTES, 0xa5a5a55a);
+}
+
+/*
+ * Compare two ESIs.
+ */
+static int esi_cmp(const void *p1, const void *p2)
+{
+ const struct evpnes *pes1 = p1;
+ const struct evpnes *pes2 = p2;
+
+ if (pes1 == NULL && pes2 == NULL)
+ return 1;
+
+ if (pes1 == NULL || pes2 == NULL)
+ return 0;
+
+ return (memcmp(pes1->esi.val, pes2->esi.val, ESI_BYTES) == 0);
+}
+
/*
* Make vni hash key.
*/
}
hash_release(bgp_def->vrf_import_rt_hash, irt);
+ list_delete_and_null(&irt->vrfs);
XFREE(MTYPE_BGP_EVPN_VRF_IMPORT_RT, irt);
}
static void import_rt_free(struct bgp *bgp, struct irt_node *irt)
{
hash_release(bgp->import_rt_hash, irt);
+ list_delete_and_null(&irt->vnis);
XFREE(MTYPE_BGP_EVPN_IMPORT_RT, irt);
}
/* Delete VRF from list for this RT. */
listnode_delete(irt->vrfs, bgp_vrf);
if (!listnode_head(irt->vrfs)) {
- list_delete_and_null(&irt->vrfs);
vrf_import_rt_free(irt);
}
}
mask_ecom_global_admin(&eval_tmp, eval);
irt = lookup_import_rt(bgp, &eval_tmp);
- if (irt && irt->vnis)
+ if (irt)
if (is_vni_present_in_irt_vnis(irt->vnis, vpn))
/* Already mapped. */
return;
/* Delete VNI from hash list for this RT. */
listnode_delete(irt->vnis, vpn);
if (!listnode_head(irt->vnis)) {
- list_delete_and_null(&irt->vnis);
import_rt_free(bgp, irt);
}
}
return zclient_send_message(zclient);
}
+/*
+ * Build extended community for EVPN ES (type-4) route
+ */
+static void build_evpn_type4_route_extcomm(struct evpnes *es,
+ struct attr *attr)
+{
+ struct ecommunity ecom_encap;
+ struct ecommunity ecom_es_rt;
+ struct ecommunity_val eval;
+ struct ecommunity_val eval_es_rt;
+ bgp_encap_types tnl_type;
+ struct ethaddr mac;
+
+ /* Encap */
+ tnl_type = BGP_ENCAP_TYPE_VXLAN;
+ memset(&ecom_encap, 0, sizeof(ecom_encap));
+ encode_encap_extcomm(tnl_type, &eval);
+ ecom_encap.size = 1;
+ ecom_encap.val = (uint8_t *)eval.val;
+ attr->ecommunity = ecommunity_dup(&ecom_encap);
+
+ /* ES import RT */
+ memset(&mac, 0, sizeof(struct ethaddr));
+ memset(&ecom_es_rt, 0, sizeof(ecom_es_rt));
+ es_get_system_mac(&es->esi, &mac);
+ encode_es_rt_extcomm(&eval_es_rt, &mac);
+ ecom_es_rt.size = 1;
+ ecom_es_rt.val = (uint8_t *)eval_es_rt.val;
+ attr->ecommunity =
+ ecommunity_merge(attr->ecommunity, &ecom_es_rt);
+
+ attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES);
+}
+
/*
* Build extended communities for EVPN prefix route.
*/
/* Add the export RTs for L3VNI/VRF */
vrf_export_rtl = bgp_vrf->vrf_export_rtl;
- if (vrf_export_rtl && !list_isempty(vrf_export_rtl)) {
- for (ALL_LIST_ELEMENTS(vrf_export_rtl, node, nnode, ecom))
- attr->ecommunity =
- ecommunity_merge(attr->ecommunity, ecom);
- }
+ for (ALL_LIST_ELEMENTS(vrf_export_rtl, node, nnode, ecom))
+ attr->ecommunity =
+ ecommunity_merge(attr->ecommunity, ecom);
/* add the router mac extended community */
if (!is_zero_mac(&attr->rmac)) {
ecom_tmp.size = 1;
ecom_tmp.val = (uint8_t *)eval.val;
- attr->ecommunity =
- ecommunity_merge(attr->ecommunity, &ecom_tmp);
+ if (attr->ecommunity)
+ attr->ecommunity =
+ ecommunity_merge(attr->ecommunity, &ecom_tmp);
+ else
+ attr->ecommunity = ecommunity_dup(&ecom_tmp);
}
}
(struct prefix *)&rn->p, &vpn->prd);
if (global_rn) {
/* Delete route entry in the global EVPN table. */
- delete_evpn_route_entry(bgp, vpn, afi, safi, global_rn, &ri);
+ delete_evpn_route_entry(bgp, afi, safi, global_rn, &ri);
/* Schedule for processing - withdraws to peers happen from
* this table.
bgp_info_delete(rn, old_local);
}
+static struct in_addr *es_vtep_new(struct in_addr vtep)
+{
+ struct in_addr *ip;
+
+ ip = XCALLOC(MTYPE_BGP_EVPN_ES_VTEP, sizeof(struct in_addr));
+ if (!ip)
+ return NULL;
+
+ ip->s_addr = vtep.s_addr;
+ return ip;
+}
+
+static void es_vtep_free(struct in_addr *ip)
+{
+ XFREE(MTYPE_BGP_EVPN_ES_VTEP, ip);
+}
+
+/* check if VTEP is already part of the list */
+static int is_vtep_present_in_list(struct list *list,
+ struct in_addr vtep)
+{
+ struct listnode *node = NULL;
+ struct in_addr *tmp;
+
+ for (ALL_LIST_ELEMENTS_RO(list, node, tmp)) {
+ if (tmp->s_addr == vtep.s_addr)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Best path for ES route was changed,
+ * update the list of VTEPs for this ES
+ */
+static int evpn_es_install_vtep(struct bgp *bgp,
+ struct evpnes *es,
+ struct prefix_evpn *p,
+ struct in_addr rvtep)
+{
+ struct in_addr *vtep_ip;
+
+ if (is_vtep_present_in_list(es->vtep_list, rvtep))
+ return 0;
+
+
+ vtep_ip = es_vtep_new(rvtep);
+ if (vtep_ip)
+ listnode_add_sort(es->vtep_list, vtep_ip);
+ return 0;
+}
+
+/*
+ * Best path for ES route was changed,
+ * update the list of VTEPs for this ES
+ */
+static int evpn_es_uninstall_vtep(struct bgp *bgp,
+ struct evpnes *es,
+ struct prefix_evpn *p,
+ struct in_addr rvtep)
+{
+ struct listnode *node, *nnode, *node_to_del = NULL;
+ struct in_addr *tmp;
+
+ for (ALL_LIST_ELEMENTS(es->vtep_list, node, nnode, tmp)) {
+ if (tmp->s_addr == rvtep.s_addr) {
+ es_vtep_free(tmp);
+ node_to_del = node;
+ }
+ }
+
+ if (node_to_del)
+ list_delete_node(es->vtep_list, node_to_del);
+
+ return 0;
+}
+
+/*
+ * Calculate the best path for a ES(type-4) route.
+ */
+static int evpn_es_route_select_install(struct bgp *bgp,
+ struct evpnes *es,
+ struct bgp_node *rn)
+{
+ int ret = 0;
+ afi_t afi = AFI_L2VPN;
+ safi_t safi = SAFI_EVPN;
+ struct bgp_info *old_select; /* old best */
+ struct bgp_info *new_select; /* new best */
+ struct bgp_info_pair old_and_new;
+
+ /* Compute the best path. */
+ bgp_best_selection(bgp, rn, &bgp->maxpaths[afi][safi],
+ &old_and_new, afi, safi);
+ old_select = old_and_new.old;
+ new_select = old_and_new.new;
+
+ /*
+ * If the best path hasn't changed - see if something needs to be
+ * updated
+ */
+ if (old_select && old_select == new_select
+ && old_select->type == ZEBRA_ROUTE_BGP
+ && old_select->sub_type == BGP_ROUTE_IMPORTED
+ && !CHECK_FLAG(rn->flags, BGP_NODE_USER_CLEAR)
+ && !CHECK_FLAG(old_select->flags, BGP_INFO_ATTR_CHANGED)
+ && !bgp->addpath_tx_used[afi][safi]) {
+ if (bgp_zebra_has_route_changed(rn, old_select)) {
+ ret = evpn_es_install_vtep(bgp, es,
+ (struct prefix_evpn *)&rn->p,
+ old_select->attr->nexthop);
+ }
+ UNSET_FLAG(old_select->flags, BGP_INFO_MULTIPATH_CHG);
+ bgp_zebra_clear_route_change_flags(rn);
+ return ret;
+ }
+
+ /* If the user did a "clear" this flag will be set */
+ UNSET_FLAG(rn->flags, BGP_NODE_USER_CLEAR);
+
+ /*
+ * bestpath has changed; update relevant fields and install or uninstall
+ * into the zebra RIB.
+ */
+ if (old_select || new_select)
+ bgp_bump_version(rn);
+
+ if (old_select)
+ bgp_info_unset_flag(rn, old_select, BGP_INFO_SELECTED);
+ if (new_select) {
+ bgp_info_set_flag(rn, new_select, BGP_INFO_SELECTED);
+ bgp_info_unset_flag(rn, new_select, BGP_INFO_ATTR_CHANGED);
+ UNSET_FLAG(new_select->flags, BGP_INFO_MULTIPATH_CHG);
+ }
+
+ if (new_select && new_select->type == ZEBRA_ROUTE_BGP
+ && new_select->sub_type == BGP_ROUTE_IMPORTED) {
+ ret = evpn_es_install_vtep(bgp, es,
+ (struct prefix_evpn *)&rn->p,
+ new_select->attr->nexthop);
+ } else {
+ if (old_select && old_select->type == ZEBRA_ROUTE_BGP
+ && old_select->sub_type == BGP_ROUTE_IMPORTED)
+ ret = evpn_es_uninstall_vtep(
+ bgp, es, (struct prefix_evpn *)&rn->p,
+ old_select->attr->nexthop);
+ }
+
+ /* Clear any route change flags. */
+ bgp_zebra_clear_route_change_flags(rn);
+
+ /* Reap old select bgp_info, if it has been removed */
+ if (old_select && CHECK_FLAG(old_select->flags, BGP_INFO_REMOVED))
+ bgp_info_reap(rn, old_select);
+
+ return ret;
+}
+
/*
* Calculate the best path for an EVPN route. Install/update best path in zebra,
* if appropriate.
return local_ri->attr->sticky;
}
+/*
+ * create or update EVPN type4 route entry.
+ * This could be in the ES table or the global table.
+ * TODO: handle remote ES (type4) routes as well
+ */
+static int update_evpn_type4_route_entry(struct bgp *bgp,
+ struct evpnes *es,
+ afi_t afi, safi_t safi,
+ struct bgp_node *rn,
+ struct attr *attr,
+ int add,
+ struct bgp_info **ri,
+ int *route_changed)
+{
+ char buf[ESI_STR_LEN];
+ char buf1[INET6_ADDRSTRLEN];
+ struct bgp_info *tmp_ri = NULL;
+ struct bgp_info *local_ri = NULL; /* local route entry if any */
+ struct bgp_info *remote_ri = NULL; /* remote route entry if any */
+ struct attr *attr_new = NULL;
+ struct prefix_evpn *evp = NULL;
+
+ *ri = NULL;
+ *route_changed = 1;
+ evp = (struct prefix_evpn *)&rn->p;
+
+ /* locate the local and remote entries if any */
+ for (tmp_ri = rn->info; tmp_ri; tmp_ri = tmp_ri->next) {
+ if (tmp_ri->peer == bgp->peer_self &&
+ tmp_ri->type == ZEBRA_ROUTE_BGP &&
+ tmp_ri->sub_type == BGP_ROUTE_STATIC)
+ local_ri = tmp_ri;
+ if (tmp_ri->type == ZEBRA_ROUTE_BGP &&
+ tmp_ri->sub_type == BGP_ROUTE_IMPORTED &&
+ CHECK_FLAG(tmp_ri->flags, BGP_INFO_VALID))
+ remote_ri = tmp_ri;
+ }
+
+ /* we don't expect to see a remote_ri at this point.
+ * An ES route has esi + vtep_ip as the key,
+ * We shouldn't see the same route from any other vtep.
+ */
+ if (remote_ri) {
+ zlog_err(
+ "%u ERROR: local es route for ESI: %s Vtep %s also learnt from remote",
+ bgp->vrf_id,
+ esi_to_str(&evp->prefix.es_addr.esi, buf, sizeof(buf)),
+ ipaddr2str(&es->originator_ip, buf1, sizeof(buf1)));
+ return -1;
+ }
+
+ if (!local_ri && !add)
+ return 0;
+
+ /* create or update the entry */
+ if (!local_ri) {
+
+ /* Add or update attribute to hash */
+ attr_new = bgp_attr_intern(attr);
+
+ /* Create new route with its attribute. */
+ tmp_ri = info_make(ZEBRA_ROUTE_BGP, BGP_ROUTE_STATIC,
+ 0, bgp->peer_self, attr_new, rn);
+ SET_FLAG(tmp_ri->flags, BGP_INFO_VALID);
+
+ /* add the newly created path to the route-node */
+ bgp_info_add(rn, tmp_ri);
+ } else {
+ tmp_ri = local_ri;
+ if (attrhash_cmp(tmp_ri->attr, attr)
+ && !CHECK_FLAG(tmp_ri->flags, BGP_INFO_REMOVED))
+ *route_changed = 0;
+ else {
+ /* The attribute has changed.
+ * Add (or update) attribute to hash. */
+ attr_new = bgp_attr_intern(attr);
+ bgp_info_set_flag(rn, tmp_ri, BGP_INFO_ATTR_CHANGED);
+
+ /* Restore route, if needed. */
+ if (CHECK_FLAG(tmp_ri->flags, BGP_INFO_REMOVED))
+ bgp_info_restore(rn, tmp_ri);
+
+ /* Unintern existing, set to new. */
+ bgp_attr_unintern(&tmp_ri->attr);
+ tmp_ri->attr = attr_new;
+ tmp_ri->uptime = bgp_clock();
+ }
+ }
+
+ /* Return back the route entry. */
+ *ri = tmp_ri;
+ return 0;
+}
+
+/* update evpn es (type-4) route */
+static int update_evpn_type4_route(struct bgp *bgp,
+ struct evpnes *es,
+ struct prefix_evpn *p)
+{
+ int ret = 0;
+ int route_changed = 0;
+ char buf[ESI_STR_LEN];
+ char buf1[INET6_ADDRSTRLEN];
+ afi_t afi = AFI_L2VPN;
+ safi_t safi = SAFI_EVPN;
+ struct attr attr;
+ struct attr *attr_new = NULL;
+ struct bgp_node *rn = NULL;
+ struct bgp_info *ri = NULL;
+
+ memset(&attr, 0, sizeof(struct attr));
+
+ /* Build path-attribute for this route. */
+ bgp_attr_default_set(&attr, BGP_ORIGIN_IGP);
+ attr.nexthop = es->originator_ip.ipaddr_v4;
+ attr.mp_nexthop_global_in = es->originator_ip.ipaddr_v4;
+ attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV4;
+
+ /* Set up extended community. */
+ build_evpn_type4_route_extcomm(es, &attr);
+
+ /* First, create (or fetch) route node within the ESI. */
+ /* NOTE: There is no RD here. */
+ rn = bgp_node_get(es->route_table, (struct prefix *)p);
+
+ /* Create or update route entry. */
+ ret = update_evpn_type4_route_entry(bgp, es, afi, safi, rn,
+ &attr, 1, &ri,
+ &route_changed);
+ if (ret != 0) {
+ zlog_err("%u ERROR: Failed to updated ES route ESI: %s VTEP %s",
+ bgp->vrf_id,
+ esi_to_str(&p->prefix.es_addr.esi, buf, sizeof(buf)),
+ ipaddr2str(&es->originator_ip, buf1, sizeof(buf1)));
+ }
+
+ assert(ri);
+ attr_new = ri->attr;
+
+ /* Perform route selection;
+ * this is just to set the flags correctly
+ * as local route in the ES always wins.
+ */
+ evpn_es_route_select_install(bgp, es, rn);
+ bgp_unlock_node(rn);
+
+ /* If this is a new route or some attribute has changed, export the
+ * route to the global table. The route will be advertised to peers
+ * from there. Note that this table is a 2-level tree (RD-level +
+ * Prefix-level) similar to L3VPN routes.
+ */
+ if (route_changed) {
+ struct bgp_info *global_ri;
+
+ rn = bgp_afi_node_get(bgp->rib[afi][safi], afi, safi,
+ (struct prefix *)p, &es->prd);
+ update_evpn_type4_route_entry(bgp, es, afi, safi,
+ rn, attr_new,
+ 1, &global_ri,
+ &route_changed);
+
+ /* Schedule for processing and unlock node. */
+ bgp_process(bgp, rn, afi, safi);
+ bgp_unlock_node(rn);
+ }
+
+ /* Unintern temporary. */
+ aspath_unintern(&attr.aspath);
+ return 0;
+}
+
static int update_evpn_type5_route_entry(struct bgp *bgp_def,
struct bgp *bgp_vrf, afi_t afi,
safi_t safi, struct bgp_node *rn,
local_ri = tmp_ri;
}
- /* create a new route entry if one doesnt exist.
- Otherwise see if route attr has changed
+ /*
+ * create a new route entry if one doesnt exist.
+ * Otherwise see if route attr has changed
*/
if (!local_ri) {
if (!local_ri) {
/* When learnt locally for the first time but already known from
* remote, we have to initiate appropriate MAC mobility steps.
- * This
- * is applicable when updating the VNI routing table.
+ * This is applicable when updating the VNI routing table.
* We need to skip mobility steps for g/w macs (local mac on g/w
* SVI) advertised in EVPN.
* This will ensure that local routes are preferred for g/w macs
return 0;
}
-/* Delete EVPN type5 route entry from global table */
-static void delete_evpn_type5_route_entry(struct bgp *bgp_def,
- struct bgp *bgp_vrf, afi_t afi,
- safi_t safi, struct bgp_node *rn,
- struct bgp_info **ri)
+/*
+ * Delete EVPN route entry.
+ * The entry can be in ESI/VNI table or the global table.
+ */
+static void delete_evpn_route_entry(struct bgp *bgp,
+ afi_t afi, safi_t safi,
+ struct bgp_node *rn,
+ struct bgp_info **ri)
{
- struct bgp_info *tmp_ri = NULL;
+ struct bgp_info *tmp_ri;
*ri = NULL;
- /* find the matching route entry */
+ /* Now, find matching route. */
for (tmp_ri = rn->info; tmp_ri; tmp_ri = tmp_ri->next)
- if (tmp_ri->peer == bgp_def->peer_self
+ if (tmp_ri->peer == bgp->peer_self
&& tmp_ri->type == ZEBRA_ROUTE_BGP
&& tmp_ri->sub_type == BGP_ROUTE_STATIC)
break;
bgp_info_delete(rn, tmp_ri);
}
+
+
+/* Delete EVPN ES (type-4) route */
+static int delete_evpn_type4_route(struct bgp *bgp,
+ struct evpnes *es,
+ struct prefix_evpn *p)
+{
+ afi_t afi = AFI_L2VPN;
+ safi_t safi = SAFI_EVPN;
+ struct bgp_info *ri;
+ struct bgp_node *rn = NULL; /* rn in esi table */
+ struct bgp_node *global_rn = NULL; /* rn in global table */
+
+ /* First, locate the route node within the ESI.
+ * If it doesn't exist, ther is nothing to do.
+ * Note: there is no RD here.
+ */
+ rn = bgp_node_lookup(es->route_table, (struct prefix *)p);
+ if (!rn)
+ return 0;
+
+ /* Next, locate route node in the global EVPN routing table.
+ * Note that this table is a 2-level tree (RD-level + Prefix-level)
+ */
+ global_rn = bgp_afi_node_lookup(bgp->rib[afi][safi], afi, safi,
+ (struct prefix *)p, &es->prd);
+ if (global_rn) {
+
+ /* Delete route entry in the global EVPN table. */
+ delete_evpn_route_entry(bgp, afi, safi,
+ global_rn, &ri);
+
+ /* Schedule for processing - withdraws to peers happen from
+ * this table.
+ */
+ if (ri)
+ bgp_process(bgp, global_rn, afi, safi);
+ bgp_unlock_node(global_rn);
+ }
+
+ /*
+ * Delete route entry in the ESI route table.
+ * This can just be removed.
+ */
+ delete_evpn_route_entry(bgp, afi, safi, rn, &ri);
+ if (ri)
+ bgp_info_reap(rn, ri);
+ bgp_unlock_node(rn);
+ return 0;
+}
+
/* Delete EVPN type5 route */
static int delete_evpn_type5_route(struct bgp *bgp_vrf, struct prefix_evpn *evp)
{
if (!rn)
return 0;
- delete_evpn_type5_route_entry(bgp_def, bgp_vrf, afi, safi, rn, &ri);
+ delete_evpn_route_entry(bgp_def, afi, safi, rn, &ri);
if (ri)
bgp_process(bgp_def, rn, afi, safi);
bgp_unlock_node(rn);
return 0;
}
-/*
- * Delete EVPN route entry. This could be in the VNI route table
- * or the global route table.
- */
-static void delete_evpn_route_entry(struct bgp *bgp, struct bgpevpn *vpn,
- afi_t afi, safi_t safi, struct bgp_node *rn,
- struct bgp_info **ri)
-{
- struct bgp_info *tmp_ri;
-
- *ri = NULL;
-
- /* Now, find matching route. */
- for (tmp_ri = rn->info; tmp_ri; tmp_ri = tmp_ri->next)
- if (tmp_ri->peer == bgp->peer_self
- && tmp_ri->type == ZEBRA_ROUTE_BGP
- && tmp_ri->sub_type == BGP_ROUTE_STATIC)
- break;
-
- *ri = tmp_ri;
-
- /* Mark route for delete. */
- if (tmp_ri)
- bgp_info_delete(rn, tmp_ri);
-}
-
/*
* Delete EVPN route (of type based on prefix) for specified VNI and
* schedule for processing.
(struct prefix *)p, &vpn->prd);
if (global_rn) {
/* Delete route entry in the global EVPN table. */
- delete_evpn_route_entry(bgp, vpn, afi, safi, global_rn, &ri);
+ delete_evpn_route_entry(bgp, afi, safi, global_rn, &ri);
/* Schedule for processing - withdraws to peers happen from
* this table.
/* Delete route entry in the VNI route table. This can just be removed.
*/
- delete_evpn_route_entry(bgp, vpn, afi, safi, rn, &ri);
+ delete_evpn_route_entry(bgp, afi, safi, rn, &ri);
if (ri)
bgp_info_reap(rn, ri);
bgp_unlock_node(rn);
if (evp->prefix.route_type != BGP_EVPN_MAC_IP_ROUTE)
continue;
- delete_evpn_route_entry(bgp, vpn, afi, safi, rn, &ri);
+ delete_evpn_route_entry(bgp, afi, safi, rn, &ri);
if (ri)
bgp_process(bgp, rn, afi, safi);
}
if (evp->prefix.route_type != BGP_EVPN_MAC_IP_ROUTE)
continue;
- delete_evpn_route_entry(bgp, vpn, afi, safi, rn, &ri);
+ delete_evpn_route_entry(bgp, afi, safi, rn, &ri);
/* Route entry in local table gets deleted immediately. */
if (ri)
return 0;
}
+/*
+ * Delete all routes in per ES route-table
+ */
+static int delete_all_es_routes(struct bgp *bgp, struct evpnes *es)
+{
+ struct bgp_node *rn;
+ struct bgp_info *ri, *nextri;
+
+ /* Walk this ES's route table and delete all routes. */
+ for (rn = bgp_table_top(es->route_table); rn;
+ rn = bgp_route_next(rn)) {
+ for (ri = rn->info; (ri != NULL) && (nextri = ri->next, 1);
+ ri = nextri) {
+ bgp_info_delete(rn, ri);
+ bgp_info_reap(rn, ri);
+ }
+ }
+
+ return 0;
+}
+
/*
* Delete all routes in the per-VNI route table.
*/
return update_all_type2_routes(bgp, vpn);
}
+/* Delete (and withdraw) local routes for specified ES from global and ES table.
+ * Also remove all other routes from the per ES table.
+ * Invoked when ES is deleted.
+ */
+static int delete_routes_for_es(struct bgp *bgp, struct evpnes *es)
+{
+ int ret;
+ char buf[ESI_STR_LEN];
+ struct prefix_evpn p;
+
+ /* Delete and withdraw locally learnt ES route */
+ build_evpn_type4_prefix(&p, &es->esi, es->originator_ip.ipaddr_v4);
+ ret = delete_evpn_type4_route(bgp, es, &p);
+ if (ret) {
+ zlog_err(
+ "%u failed to delete type-4 route for ESI %s",
+ bgp->vrf_id,
+ esi_to_str(&es->esi, buf, sizeof(buf)));
+ }
+
+ /* Delete all routes from per ES table */
+ return delete_all_es_routes(bgp, es);
+}
+
/*
* Delete (and withdraw) local routes for specified VNI from the global
* table and per-VNI table. After this, remove all other routes from
return 0;
}
+/* Install EVPN route entry in ES */
+static int install_evpn_route_entry_in_es(struct bgp *bgp,
+ struct evpnes *es,
+ struct prefix_evpn *p,
+ struct bgp_info *parent_ri)
+{
+ int ret = 0;
+ struct bgp_node *rn = NULL;
+ struct bgp_info *ri = NULL;
+ struct attr *attr_new = NULL;
+
+ /* Create (or fetch) route within the VNI.
+ * NOTE: There is no RD here.
+ */
+ rn = bgp_node_get(es->route_table, (struct prefix *)p);
+
+ /* Check if route entry is already present. */
+ for (ri = rn->info; ri; ri = ri->next)
+ if (ri->extra &&
+ (struct bgp_info *)ri->extra->parent == parent_ri)
+ break;
+
+ if (!ri) {
+ /* Add (or update) attribute to hash. */
+ attr_new = bgp_attr_intern(parent_ri->attr);
+
+ /* Create new route with its attribute. */
+ ri = info_make(parent_ri->type, BGP_ROUTE_IMPORTED, 0,
+ parent_ri->peer, attr_new, rn);
+ SET_FLAG(ri->flags, BGP_INFO_VALID);
+ bgp_info_extra_get(ri);
+ ri->extra->parent = parent_ri;
+ bgp_info_add(rn, ri);
+ } else {
+ if (attrhash_cmp(ri->attr, parent_ri->attr)
+ && !CHECK_FLAG(ri->flags, BGP_INFO_REMOVED)) {
+ bgp_unlock_node(rn);
+ return 0;
+ }
+ /* The attribute has changed. */
+ /* Add (or update) attribute to hash. */
+ attr_new = bgp_attr_intern(parent_ri->attr);
+
+ /* Restore route, if needed. */
+ if (CHECK_FLAG(ri->flags, BGP_INFO_REMOVED))
+ bgp_info_restore(rn, ri);
+
+ /* Mark if nexthop has changed. */
+ if (!IPV4_ADDR_SAME(&ri->attr->nexthop, &attr_new->nexthop))
+ SET_FLAG(ri->flags, BGP_INFO_IGP_CHANGED);
+
+ /* Unintern existing, set to new. */
+ bgp_attr_unintern(&ri->attr);
+ ri->attr = attr_new;
+ ri->uptime = bgp_clock();
+ }
+
+ /* Perform route selection and update zebra, if required. */
+ ret = evpn_es_route_select_install(bgp, es, rn);
+ return ret;
+}
+
/*
* Install route entry into the VRF routing table and invoke route selection.
*/
if (!IPV4_ADDR_SAME(&ri->attr->nexthop, &attr_new->nexthop))
SET_FLAG(ri->flags, BGP_INFO_IGP_CHANGED);
- /* Unintern existing, set to new. */
- bgp_attr_unintern(&ri->attr);
- ri->attr = attr_new;
- ri->uptime = bgp_clock();
- }
+ /* Unintern existing, set to new. */
+ bgp_attr_unintern(&ri->attr);
+ ri->attr = attr_new;
+ ri->uptime = bgp_clock();
+ }
+
+ /* Perform route selection and update zebra, if required. */
+ ret = evpn_route_select_install(bgp, vpn, rn);
+
+ return ret;
+}
+
+/* Uninstall EVPN route entry from ES route table */
+static int uninstall_evpn_route_entry_in_es(struct bgp *bgp,
+ struct evpnes *es,
+ struct prefix_evpn *p,
+ struct bgp_info *parent_ri)
+{
+ int ret;
+ struct bgp_node *rn;
+ struct bgp_info *ri;
+
+ if (!es->route_table)
+ return 0;
+
+ /* Locate route within the ESI.
+ * NOTE: There is no RD here.
+ */
+ rn = bgp_node_lookup(es->route_table, (struct prefix *)p);
+ if (!rn)
+ return 0;
+
+ /* Find matching route entry. */
+ for (ri = rn->info; ri; ri = ri->next)
+ if (ri->extra &&
+ (struct bgp_info *)ri->extra->parent == parent_ri)
+ break;
+
+ if (!ri)
+ return 0;
+
+ /* Mark entry for deletion */
+ bgp_info_delete(rn, ri);
/* Perform route selection and update zebra, if required. */
- ret = evpn_route_select_install(bgp, vpn, rn);
+ ret = evpn_es_route_select_install(bgp, es, rn);
+
+ /* Unlock route node. */
+ bgp_unlock_node(rn);
return ret;
}
return ret;
}
+/*
+ * Given a prefix, see if it belongs to ES.
+ */
+static int is_prefix_matching_for_es(struct prefix_evpn *p,
+ struct evpnes *es)
+{
+ /* if not an ES route return false */
+ if (p->prefix.route_type != BGP_EVPN_ES_ROUTE)
+ return 0;
+
+ if (memcmp(&p->prefix.es_addr.esi, &es->esi, sizeof(esi_t)) == 0)
+ return 1;
+
+ return 0;
+}
+
/*
* Given a route entry and a VRF, see if this route entry should be
* imported into the VRF i.e., RTs match.
/* See if this RT matches specified VNIs import RTs */
irt = lookup_vrf_import_rt(eval);
- if (irt && irt->vrfs)
+ if (irt)
if (is_vrf_present_in_irt_vrfs(irt->vrfs, bgp_vrf))
return 1;
mask_ecom_global_admin(&eval_tmp, eval);
irt = lookup_vrf_import_rt(&eval_tmp);
}
- if (irt && irt->vrfs)
+ if (irt)
if (is_vrf_present_in_irt_vrfs(irt->vrfs, bgp_vrf))
return 1;
}
/* See if this RT matches specified VNIs import RTs */
irt = lookup_import_rt(bgp, eval);
- if (irt && irt->vnis)
+ if (irt)
if (is_vni_present_in_irt_vnis(irt->vnis, vpn))
return 1;
mask_ecom_global_admin(&eval_tmp, eval);
irt = lookup_import_rt(bgp, &eval_tmp);
}
- if (irt && irt->vnis)
+ if (irt)
if (is_vni_present_in_irt_vnis(irt->vnis, vpn))
return 1;
}
return 0;
}
+static int install_uninstall_routes_for_es(struct bgp *bgp,
+ struct evpnes *es,
+ int install)
+{
+ int ret;
+ afi_t afi;
+ safi_t safi;
+ char buf[PREFIX_STRLEN];
+ char buf1[ESI_STR_LEN];
+ struct bgp_node *rd_rn, *rn;
+ struct bgp_table *table;
+ struct bgp_info *ri;
+
+ afi = AFI_L2VPN;
+ safi = SAFI_EVPN;
+
+ /*
+ * Walk entire global routing table and evaluate routes which could be
+ * imported into this VRF. Note that we need to loop through all global
+ * routes to determine which route matches the import rt on vrf
+ */
+ for (rd_rn = bgp_table_top(bgp->rib[afi][safi]); rd_rn;
+ rd_rn = bgp_route_next(rd_rn)) {
+ table = (struct bgp_table *)(rd_rn->info);
+ if (!table)
+ continue;
+
+ for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) {
+ struct prefix_evpn *evp = (struct prefix_evpn *)&rn->p;
+
+ for (ri = rn->info; ri; ri = ri->next) {
+ /*
+ * Consider "valid" remote routes applicable for
+ * this ES.
+ */
+ if (!(CHECK_FLAG(ri->flags, BGP_INFO_VALID)
+ && ri->type == ZEBRA_ROUTE_BGP
+ && ri->sub_type == BGP_ROUTE_NORMAL))
+ continue;
+
+ if (!is_prefix_matching_for_es(evp, es))
+ continue;
+
+ if (install)
+ ret = install_evpn_route_entry_in_es(
+ bgp, es, evp, ri);
+ else
+ ret = uninstall_evpn_route_entry_in_es(
+ bgp, es, evp, ri);
+
+ if (ret) {
+ zlog_err(
+ "Failed to %s EVPN %s route in ESI %s",
+ install ? "install"
+ : "uninstall",
+ prefix2str(evp, buf,
+ sizeof(buf)),
+ esi_to_str(&es->esi, buf1,
+ sizeof(buf1)));
+ return ret;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
/*
* Install or uninstall mac-ip routes are appropriate for this
* particular VRF.
return 0;
}
+/* Install any existing remote ES routes applicable for this ES into its routing
+ * table. This is invoked when ES comes up.
+ */
+static int install_routes_for_es(struct bgp *bgp, struct evpnes *es)
+{
+ return install_uninstall_routes_for_es(bgp, es, 1);
+}
+
+
/* Install any existing remote routes applicable for this VRF into VRF RIB. This
* is invoked upon l3vni-add or l3vni import rt change
*/
0);
}
+/* Install or unistall route in ES */
+static int install_uninstall_route_in_es(struct bgp *bgp, struct evpnes *es,
+ afi_t afi, safi_t safi,
+ struct prefix_evpn *evp,
+ struct bgp_info *ri,
+ int install)
+{
+ int ret = 0;
+ char buf[ESI_STR_LEN];
+
+ if (install)
+ ret = install_evpn_route_entry_in_es(bgp, es, evp, ri);
+ else
+ ret = uninstall_evpn_route_entry_in_es(bgp, es, evp, ri);
+
+ if (ret) {
+ zlog_err("%u: Failed to %s EVPN %s route in ESI %s",
+ bgp->vrf_id, install ? "install" : "uninstall",
+ "ES",
+ esi_to_str(&evp->prefix.es_addr.esi, buf,
+ sizeof(buf)));
+ return ret;
+ }
+ return 0;
+}
+
/*
* Install or uninstall route in matching VRFs (list).
*/
}
/*
- * Install or uninstall route for appropriate VNIs.
+ * Install or uninstall route for appropriate VNIs/ESIs.
*/
static int install_uninstall_evpn_route(struct bgp *bgp, afi_t afi, safi_t safi,
struct prefix *p, struct bgp_info *ri,
assert(attr);
- /* Only type-2 and type-3 and type-5 are supported currently */
+ /* Only type-2, type-3, type-4 and type-5 are supported currently */
if (!(evp->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE
|| evp->prefix.route_type == BGP_EVPN_IMET_ROUTE
+ || evp->prefix.route_type == BGP_EVPN_ES_ROUTE
|| evp->prefix.route_type == BGP_EVPN_IP_PREFIX_ROUTE))
return 0;
if (!ecom || !ecom->size)
return -1;
- /* For each extended community RT, see which VNIs/VRFs match and import
- * the route into matching VNIs/VRFs.
- */
+ /* An EVPN route belongs to a VNI or a VRF or an ESI based on the RTs
+ * attached to the route */
for (i = 0; i < ecom->size; i++) {
uint8_t *pnt;
uint8_t type, sub_type;
struct ecommunity_val eval_tmp;
struct irt_node *irt; /* import rt for l2vni */
struct vrf_irt_node *vrf_irt; /* import rt for l3vni */
+ struct evpnes *es;
/* Only deal with RTs */
pnt = (ecom->val + (i * ECOMMUNITY_SIZE));
if (sub_type != ECOMMUNITY_ROUTE_TARGET)
continue;
- /* Import route into matching l2-vnis (type-2/type-3 routes go
- * into l2vni table)
+ /*
+ * macip routes (type-2) are imported into VNI and VRF tables.
+ * IMET route is imported into VNI table.
+ * prefix routes are imported into VRF table.
*/
- irt = lookup_import_rt(bgp, eval);
- if (irt && irt->vnis)
- install_uninstall_route_in_vnis(bgp, afi, safi, evp, ri,
- irt->vnis, import);
+ if (evp->prefix.route_type == BGP_EVPN_MAC_IP_ROUTE ||
+ evp->prefix.route_type == BGP_EVPN_IMET_ROUTE ||
+ evp->prefix.route_type == BGP_EVPN_IP_PREFIX_ROUTE) {
- /* Import route into matching l3-vnis (type-2/type-5 routes go
- * into l3vni/vrf table)
- */
- vrf_irt = lookup_vrf_import_rt(eval);
- if (vrf_irt && vrf_irt->vrfs)
- install_uninstall_route_in_vrfs(bgp, afi, safi, evp, ri,
- vrf_irt->vrfs, import);
-
- /* Also check for non-exact match. In this,
- * we mask out the AS and
- * only check on the local-admin sub-field.
- * This is to facilitate using
- * VNI as the RT for EBGP peering too.
- */
- irt = NULL;
- vrf_irt = NULL;
- if (type == ECOMMUNITY_ENCODE_AS
- || type == ECOMMUNITY_ENCODE_AS4
- || type == ECOMMUNITY_ENCODE_IP) {
- memcpy(&eval_tmp, eval, ECOMMUNITY_SIZE);
- mask_ecom_global_admin(&eval_tmp, eval);
- irt = lookup_import_rt(bgp, &eval_tmp);
- vrf_irt = lookup_vrf_import_rt(&eval_tmp);
+ irt = lookup_import_rt(bgp, eval);
+ if (irt)
+ install_uninstall_route_in_vnis(bgp, afi, safi,
+ evp, ri,
+ irt->vnis,
+ import);
+
+ vrf_irt = lookup_vrf_import_rt(eval);
+ if (vrf_irt)
+ install_uninstall_route_in_vrfs(bgp, afi, safi,
+ evp, ri,
+ vrf_irt->vrfs,
+ import);
+
+ /* Also check for non-exact match.
+ * In this, we mask out the AS and
+ * only check on the local-admin sub-field.
+ * This is to facilitate using
+ * VNI as the RT for EBGP peering too.
+ */
+ irt = NULL;
+ vrf_irt = NULL;
+ if (type == ECOMMUNITY_ENCODE_AS
+ || type == ECOMMUNITY_ENCODE_AS4
+ || type == ECOMMUNITY_ENCODE_IP) {
+ memcpy(&eval_tmp, eval, ECOMMUNITY_SIZE);
+ mask_ecom_global_admin(&eval_tmp, eval);
+ irt = lookup_import_rt(bgp, &eval_tmp);
+ vrf_irt = lookup_vrf_import_rt(&eval_tmp);
+ }
+
+ if (irt)
+ install_uninstall_route_in_vnis(bgp, afi, safi,
+ evp, ri,
+ irt->vnis,
+ import);
+ if (vrf_irt)
+ install_uninstall_route_in_vrfs(bgp, afi, safi,
+ evp, ri,
+ vrf_irt->vrfs,
+ import);
+ }
+
+ /* es route is imported into the es table */
+ if (evp->prefix.route_type == BGP_EVPN_ES_ROUTE) {
+
+ /* we will match based on the entire esi to avoid
+ * imoort of an es route for esi2 into esi1
+ */
+ es = bgp_evpn_lookup_es(bgp, &evp->prefix.es_addr.esi);
+ if (es && is_es_local(es))
+ install_uninstall_route_in_es(bgp, es,
+ afi, safi,
+ evp, ri, import);
}
- if (irt && irt->vnis)
- install_uninstall_route_in_vnis(bgp, afi, safi, evp, ri,
- irt->vnis, import);
- if (vrf_irt && vrf_irt->vrfs)
- install_uninstall_route_in_vrfs(bgp, afi, safi, evp, ri,
- vrf_irt->vrfs, import);
}
return 0;
}
-/* delete and withdraw all ipv4 and ipv6 routes in the vrf table as type-5
- * routes */
+/*
+ * delete and withdraw all ipv4 and ipv6 routes in the vrf table as type-5
+ * routes
+ */
static void delete_withdraw_vrf_routes(struct bgp *bgp_vrf)
{
/* delete all ipv4 routes and withdraw from peers */
bgp_evpn_withdraw_type5_routes(bgp_vrf, AFI_IP6, SAFI_UNICAST);
}
-/* update and advertise all ipv4 and ipv6 routes in thr vrf table as type-5
- * routes */
+/*
+ * update and advertise all ipv4 and ipv6 routes in thr vrf table as type-5
+ * routes
+ */
static void update_advertise_vrf_routes(struct bgp *bgp_vrf)
{
/* update all ipv4 routes */
(struct prefix *)&p, &vpn->prd);
if (global_rn) {
/* Delete route entry in the global EVPN table. */
- delete_evpn_route_entry(bgp, vpn, afi, safi, global_rn, &ri);
+ delete_evpn_route_entry(bgp, afi, safi, global_rn, &ri);
/* Schedule for processing - withdraws to peers happen from
* this table.
/* Make EVPN prefix. */
memset(&p, 0, sizeof(struct prefix_evpn));
p.family = AF_EVPN;
- p.prefixlen = EVPN_TYPE_2_ROUTE_PREFIXLEN;
+ p.prefixlen = EVPN_ROUTE_PREFIXLEN;
p.prefix.route_type = BGP_EVPN_MAC_IP_ROUTE;
/* Copy Ethernet Seg Identifier */
/* Make EVPN prefix. */
memset(&p, 0, sizeof(struct prefix_evpn));
p.family = AF_EVPN;
- p.prefixlen = EVPN_TYPE_3_ROUTE_PREFIXLEN;
+ p.prefixlen = EVPN_ROUTE_PREFIXLEN;
p.prefix.route_type = BGP_EVPN_IMET_ROUTE;
/* Copy Ethernet Tag */
return ret;
}
+/*
+ * Process received EVPN type-4 route (advertise or withdraw).
+ */
+static int process_type4_route(struct peer *peer, afi_t afi, safi_t safi,
+ struct attr *attr, uint8_t *pfx, int psize,
+ uint32_t addpath_id)
+{
+ int ret;
+ esi_t esi;
+ uint8_t ipaddr_len;
+ struct in_addr vtep_ip;
+ struct prefix_rd prd;
+ struct prefix_evpn p;
+
+ /* Type-4 route should be either 23 or 35 bytes
+ * RD (8), ESI (10), ip-len (1), ip (4 or 16)
+ */
+ if (psize != 23 && psize != 35) {
+ zlog_err("%u:%s - Rx EVPN Type-4 NLRI with invalid length %d",
+ peer->bgp->vrf_id, peer->host, psize);
+ return -1;
+ }
+
+ /* Make prefix_rd */
+ prd.family = AF_UNSPEC;
+ prd.prefixlen = 64;
+ memcpy(&prd.val, pfx, 8);
+ pfx += 8;
+
+ /* get the ESI */
+ memcpy(&esi, pfx, ESI_BYTES);
+ pfx += ESI_BYTES;
+
+
+ /* Get the IP. */
+ ipaddr_len = *pfx++;
+ if (ipaddr_len == IPV4_MAX_BITLEN) {
+ memcpy(&vtep_ip, pfx, IPV4_MAX_BYTELEN);
+ } else {
+ zlog_err(
+ "%u:%s - Rx EVPN Type-4 NLRI with unsupported IP address length %d",
+ peer->bgp->vrf_id, peer->host, ipaddr_len);
+ return -1;
+ }
+
+ build_evpn_type4_prefix(&p, &esi, vtep_ip);
+ /* Process the route. */
+ if (attr) {
+ ret = bgp_update(peer, (struct prefix *)&p, addpath_id, attr,
+ afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL,
+ &prd, NULL, 0, 0, NULL);
+ } else {
+ ret = bgp_withdraw(peer, (struct prefix *)&p, addpath_id, attr,
+ afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL,
+ &prd, NULL, 0, NULL);
+ }
+ return ret;
+}
+
+
/*
* Process received EVPN type-5 route (advertise or withdraw).
*/
/* Make EVPN prefix. */
memset(&p, 0, sizeof(struct prefix_evpn));
p.family = AF_EVPN;
- p.prefixlen = EVPN_TYPE_5_ROUTE_PREFIXLEN;
+ p.prefixlen = EVPN_ROUTE_PREFIXLEN;
p.prefix.route_type = BGP_EVPN_IP_PREFIX_ROUTE;
/* Additional information outside of prefix - ESI and GW IP */
if (node_to_del)
list_delete_node(bgp_vrf->vrf_export_rtl, node_to_del);
+ /*
+ * Temporary assert to make SA happy.
+ * The ALL_LIST_ELEMENTS macro above has a NULL check
+ * which means that SA is going to complain about
+ * the list_isempty call, which doesn't NULL check.
+ * So until we get this situation cleaned up, here
+ * we are.
+ */
+ assert(bgp_vrf->vrf_export_rtl);
+
/* fall back to auto-generated RT if this was the last RT */
- if (bgp_vrf->vrf_export_rtl && list_isempty(bgp_vrf->vrf_export_rtl)) {
+ if (list_isempty(bgp_vrf->vrf_export_rtl)) {
UNSET_FLAG(bgp_vrf->vrf_flags, BGP_VRF_EXPORT_RT_CFGD);
evpn_auto_rt_export_add_for_vrf(bgp_vrf);
}
{
char buf1[ETHER_ADDR_STRLEN];
char buf2[PREFIX2STR_BUFFER];
+ char buf3[ESI_STR_LEN];
if (p->prefix.route_type == BGP_EVPN_IMET_ROUTE) {
snprintf(buf, len, "[%d]:[%d]:[%d]:[%s]", p->prefix.route_type,
is_evpn_prefix_ipaddr_v4(p)
? inet_ntoa(p->prefix.prefix_addr.ip.ipaddr_v4)
: inet6_ntoa(p->prefix.prefix_addr.ip.ipaddr_v6));
+ } else if (p->prefix.route_type == BGP_EVPN_ES_ROUTE) {
+ snprintf(buf, len, "[%d]:[%s]:[%d]:[%s]",
+ p->prefix.route_type,
+ esi_to_str(&p->prefix.es_addr.esi, buf3, sizeof(buf3)),
+ is_evpn_prefix_ipaddr_v4(p) ? IPV4_MAX_BITLEN
+ : IPV6_MAX_BITLEN,
+ inet_ntoa(p->prefix.es_addr.ip.ipaddr_v4));
} else {
/* For EVPN route types not supported yet. */
snprintf(buf, len, "(unsupported route type %d)",
stream_put_in_addr(s, &evp->prefix.imet_addr.ip.ipaddr_v4);
break;
+ case BGP_EVPN_ES_ROUTE:
+ stream_putc(s, 23); /* TODO: length: assumes ipv4 VTEP */
+ stream_put(s, prd->val, 8); /* RD */
+ stream_put(s, evp->prefix.es_addr.esi.val, 10); /* ESI */
+ stream_putc(s, IPV4_MAX_BITLEN); /* IP address Length - bits */
+ /* VTEP IP */
+ stream_put_in_addr(s, &evp->prefix.es_addr.ip.ipaddr_v4);
+ break;
+
case BGP_EVPN_IP_PREFIX_ROUTE:
/* TODO: AddPath support. */
evpn_mpattr_encode_type5(s, p, prd, label, num_labels, attr);
}
break;
+ case BGP_EVPN_ES_ROUTE:
+ if (process_type4_route(peer, afi, safi,
+ withdraw ? NULL : attr, pnt,
+ psize, addpath_id)) {
+ zlog_err(
+ "%u:%s - Error in processing EVPN type-4 NLRI size %d",
+ peer->bgp->vrf_id, peer->host, psize);
+ return -1;
+ }
+ break;
+
case BGP_EVPN_IP_PREFIX_ROUTE:
if (process_type5_route(peer, afi, safi, attr, pnt,
psize, addpath_id, withdraw)) {
}
/*
- * Import route into matching VNI(s).
+ * Lookup local ES.
+ */
+struct evpnes *bgp_evpn_lookup_es(struct bgp *bgp, esi_t *esi)
+{
+ struct evpnes *es;
+ struct evpnes tmp;
+
+ memset(&tmp, 0, sizeof(struct evpnes));
+ memcpy(&tmp.esi, esi, sizeof(esi_t));
+ es = hash_lookup(bgp->esihash, &tmp);
+ return es;
+}
+
+/*
+ * Create a new local es - invoked upon zebra notification.
+ */
+struct evpnes *bgp_evpn_es_new(struct bgp *bgp,
+ esi_t *esi,
+ struct ipaddr *originator_ip)
+{
+ char buf[100];
+ struct evpnes *es;
+
+ if (!bgp)
+ return NULL;
+
+ es = XCALLOC(MTYPE_BGP_EVPN_ES, sizeof(struct evpnes));
+ if (!es)
+ return NULL;
+
+ /* set the ESI and originator_ip */
+ memcpy(&es->esi, esi, sizeof(esi_t));
+ memcpy(&es->originator_ip, originator_ip, sizeof(struct ipaddr));
+
+ /* Initialise the VTEP list */
+ es->vtep_list = list_new();
+ es->vtep_list->cmp = (int (*)(void *, void *))evpn_vtep_ip_cmp;
+
+ /* auto derive RD for this es */
+ bf_assign_index(bm->rd_idspace, es->rd_id);
+ es->prd.family = AF_UNSPEC;
+ es->prd.prefixlen = 64;
+ sprintf(buf, "%s:%hu", inet_ntoa(bgp->router_id), es->rd_id);
+ (void)str2prefix_rd(buf, &es->prd);
+
+ /* Initialize the ES route table */
+ es->route_table = bgp_table_init(bgp, AFI_L2VPN, SAFI_EVPN);
+
+ /* Add to hash */
+ if (!hash_get(bgp->esihash, es, hash_alloc_intern)) {
+ XFREE(MTYPE_BGP_EVPN_ES, es);
+ return NULL;
+ }
+
+ QOBJ_REG(es, evpnes);
+ return es;
+}
+
+/*
+ * Free a given ES -
+ * This just frees appropriate memory, caller should have taken other
+ * needed actions.
+ */
+void bgp_evpn_es_free(struct bgp *bgp, struct evpnes *es)
+{
+ list_delete_and_null(&es->vtep_list);
+ bgp_table_unlock(es->route_table);
+ bf_release_index(bm->rd_idspace, es->rd_id);
+ hash_release(bgp->esihash, es);
+ QOBJ_UNREG(es);
+ XFREE(MTYPE_BGP_EVPN_ES, es);
+}
+
+/*
+ * Import evpn route from global table to VNI/VRF/ESI.
*/
int bgp_evpn_import_route(struct bgp *bgp, afi_t afi, safi_t safi,
struct prefix *p, struct bgp_info *ri)
}
/*
- * Unimport route from matching VNI(s).
+ * Unimport evpn route from VNI/VRF/ESI.
*/
int bgp_evpn_unimport_route(struct bgp *bgp, afi_t afi, safi_t safi,
struct prefix *p, struct bgp_info *ri)
struct bgpevpn *vpn;
struct prefix_evpn p;
- if (!bgp->vnihash) {
- zlog_err("%u: VNI hash not created", bgp->vrf_id);
- return -1;
- }
-
/* Lookup VNI hash - should exist. */
vpn = bgp_evpn_lookup_vni(bgp, vni);
if (!vpn || !is_vni_live(vpn)) {
struct bgpevpn *vpn;
struct prefix_evpn p;
- if (!bgp->vnihash) {
- zlog_err("%u: VNI hash not created", bgp->vrf_id);
- return -1;
- }
-
/* Lookup VNI hash - should exist. */
vpn = bgp_evpn_lookup_vni(bgp, vni);
if (!vpn || !is_vni_live(vpn)) {
memset(&bgp_vrf->rmac, 0, sizeof(struct ethaddr));
/* delete RD/RT */
- if (bgp_vrf->vrf_import_rtl && !list_isempty(bgp_vrf->vrf_import_rtl)) {
+ if (!list_isempty(bgp_vrf->vrf_import_rtl)) {
bgp_evpn_unmap_vrf_from_its_rts(bgp_vrf);
list_delete_all_node(bgp_vrf->vrf_import_rtl);
}
- if (bgp_vrf->vrf_export_rtl && !list_isempty(bgp_vrf->vrf_export_rtl)) {
+ if (!list_isempty(bgp_vrf->vrf_export_rtl)) {
list_delete_all_node(bgp_vrf->vrf_export_rtl);
}
{
struct bgpevpn *vpn;
- if (!bgp->vnihash) {
- zlog_err("%u: VNI hash not created", bgp->vrf_id);
- return -1;
- }
-
/* Locate VNI hash */
vpn = bgp_evpn_lookup_vni(bgp, vni);
if (!vpn) {
struct bgpevpn *vpn;
struct prefix_evpn p;
- if (!bgp->vnihash) {
- zlog_err("%u: VNI hash not created", bgp->vrf_id);
- return -1;
- }
-
/* Lookup VNI. If present and no change, exit. */
vpn = bgp_evpn_lookup_vni(bgp, vni);
if (vpn) {
return 0;
}
+/*
+ * bgp_evpn_local_es_del
+ */
+int bgp_evpn_local_es_del(struct bgp *bgp,
+ esi_t *esi,
+ struct ipaddr *originator_ip)
+{
+ char buf[ESI_STR_LEN];
+ struct evpnes *es = NULL;
+
+ if (!bgp->esihash) {
+ zlog_err("%u: ESI hash not yet created", bgp->vrf_id);
+ return -1;
+ }
+
+ /* Lookup ESI hash - should exist. */
+ es = bgp_evpn_lookup_es(bgp, esi);
+ if (!es) {
+ zlog_warn("%u: ESI hash entry for ESI %s at Local ES DEL",
+ bgp->vrf_id,
+ esi_to_str(esi, buf, sizeof(buf)));
+ return -1;
+ }
+
+ /* Delete all local EVPN ES routes from ESI table
+ * and schedule for processing (to withdraw from peers))
+ */
+ delete_routes_for_es(bgp, es);
+
+ /* free the hash entry */
+ bgp_evpn_es_free(bgp, es);
+
+ return 0;
+}
+
+/*
+ * bgp_evpn_local_es_add
+ */
+int bgp_evpn_local_es_add(struct bgp *bgp,
+ esi_t *esi,
+ struct ipaddr *originator_ip)
+{
+ char buf[ESI_STR_LEN];
+ struct evpnes *es = NULL;
+ struct prefix_evpn p;
+
+ if (!bgp->esihash) {
+ zlog_err("%u: ESI hash not yet created", bgp->vrf_id);
+ return -1;
+ }
+
+ /* create the new es */
+ es = bgp_evpn_lookup_es(bgp, esi);
+ if (!es) {
+ es = bgp_evpn_es_new(bgp, esi, originator_ip);
+ if (!es) {
+ zlog_err(
+ "%u: Failed to allocate ES entry for ESI %s - at Local ES Add",
+ bgp->vrf_id, esi_to_str(esi, buf, sizeof(buf)));
+ return -1;
+ }
+ }
+ UNSET_FLAG(es->flags, EVPNES_REMOTE);
+ SET_FLAG(es->flags, EVPNES_LOCAL);
+
+ build_evpn_type4_prefix(&p, esi, originator_ip->ipaddr_v4);
+ if (update_evpn_type4_route(bgp, es, &p)) {
+ zlog_err("%u: Type4 route creation failure for ESI %s",
+ bgp->vrf_id, esi_to_str(esi, buf, sizeof(buf)));
+ return -1;
+ }
+
+ /* import all remote ES routes in th ES table */
+ install_routes_for_es(bgp, es);
+
+ return 0;
+}
+
/*
* Cleanup EVPN information on disable - Need to delete and withdraw
* EVPN routes from peers.
*/
void bgp_evpn_cleanup(struct bgp *bgp)
{
- if (bgp->vnihash)
- hash_iterate(bgp->vnihash, (void (*)(struct hash_backet *,
- void *))free_vni_entry,
- bgp);
- if (bgp->import_rt_hash)
- hash_free(bgp->import_rt_hash);
+ hash_iterate(bgp->vnihash,
+ (void (*)(struct hash_backet *, void *))free_vni_entry,
+ bgp);
+
+ hash_free(bgp->import_rt_hash);
bgp->import_rt_hash = NULL;
- if (bgp->vrf_import_rt_hash)
- hash_free(bgp->vrf_import_rt_hash);
+
+ hash_free(bgp->vrf_import_rt_hash);
bgp->vrf_import_rt_hash = NULL;
- if (bgp->vnihash)
- hash_free(bgp->vnihash);
+
+ hash_free(bgp->vnihash);
bgp->vnihash = NULL;
- if (bgp->vrf_import_rtl)
- list_delete_and_null(&bgp->vrf_import_rtl);
- if (bgp->vrf_export_rtl)
- list_delete_and_null(&bgp->vrf_export_rtl);
- if (bgp->l2vnis)
- list_delete_and_null(&bgp->l2vnis);
+ if (bgp->esihash)
+ hash_free(bgp->esihash);
+ bgp->esihash = NULL;
+
+ list_delete_and_null(&bgp->vrf_import_rtl);
+ list_delete_and_null(&bgp->vrf_export_rtl);
+ list_delete_and_null(&bgp->l2vnis);
}
/*
{
bgp->vnihash =
hash_create(vni_hash_key_make, vni_hash_cmp, "BGP VNI Hash");
+ bgp->esihash =
+ hash_create(esi_hash_keymake, esi_cmp,
+ "BGP EVPN Local ESI Hash");
bgp->import_rt_hash =
hash_create(import_rt_hash_key_make, import_rt_hash_cmp,
"BGP Import RT Hash");
extern int bgp_evpn_local_vni_add(struct bgp *bgp, vni_t vni,
struct in_addr originator_ip,
vrf_id_t tenant_vrf_id);
+extern int bgp_evpn_local_es_add(struct bgp *bgp, esi_t *esi,
+ struct ipaddr *originator_ip);
+extern int bgp_evpn_local_es_del(struct bgp *bgp, esi_t *esi,
+ struct ipaddr *originator_ip);
extern void bgp_evpn_cleanup_on_disable(struct bgp *bgp);
extern void bgp_evpn_cleanup(struct bgp *bgp);
extern void bgp_evpn_init(struct bgp *bgp);
#define RT_ADDRSTRLEN 28
/* EVPN prefix lengths. This reprsent the sizeof struct prefix_evpn */
-#define EVPN_TYPE_2_ROUTE_PREFIXLEN 224
-#define EVPN_TYPE_3_ROUTE_PREFIXLEN 224
-#define EVPN_TYPE_5_ROUTE_PREFIXLEN 224
+#define EVPN_ROUTE_PREFIXLEN 224
/* EVPN route types. */
typedef enum {
DECLARE_QOBJ_TYPE(bgpevpn)
+struct evpnes {
+
+ /* Ethernet Segment Identifier */
+ esi_t esi;
+
+ /* es flags */
+ uint16_t flags;
+#define EVPNES_LOCAL 0x01
+#define EVPNES_REMOTE 0x02
+
+ /*
+ * Id for deriving the RD
+ * automatically for this ESI
+ */
+ uint16_t rd_id;
+
+ /* RD for this VNI. */
+ struct prefix_rd prd;
+
+ /* originator ip address */
+ struct ipaddr originator_ip;
+
+ /* list of VTEPs in the same site */
+ struct list *vtep_list;
+
+ /*
+ * Route table for EVPN routes for
+ * this ESI. - type4 routes
+ */
+ struct bgp_table *route_table;
+
+ QOBJ_FIELDS
+};
+
+DECLARE_QOBJ_TYPE(evpnes)
+
/* Mapping of Import RT to VNIs.
* The Import RTs of all VNIs are maintained in a hash table with each
* RT linking to all VNIs that will import routes matching this RT.
|| is_export_rt_configured(vpn));
}
+static inline void encode_es_rt_extcomm(struct ecommunity_val *eval,
+ struct ethaddr *mac)
+{
+ memset(eval, 0, sizeof(struct ecommunity_val));
+ eval->val[0] = ECOMMUNITY_ENCODE_EVPN;
+ eval->val[1] = ECOMMUNITY_EVPN_SUBTYPE_ES_IMPORT_RT;
+ memcpy(&eval->val[2], mac, ETH_ALEN);
+}
+
static inline void encode_rmac_extcomm(struct ecommunity_val *eval,
struct ethaddr *rmac)
{
{
memset(p, 0, sizeof(struct prefix_evpn));
p->family = AF_EVPN;
- p->prefixlen = EVPN_TYPE_2_ROUTE_PREFIXLEN;
+ p->prefixlen = EVPN_ROUTE_PREFIXLEN;
p->prefix.route_type = BGP_EVPN_MAC_IP_ROUTE;
memcpy(&p->prefix.macip_addr.mac.octet, mac->octet, ETH_ALEN);
p->prefix.macip_addr.ip.ipa_type = IPADDR_NONE;
memset(evp, 0, sizeof(struct prefix_evpn));
evp->family = AF_EVPN;
- evp->prefixlen = EVPN_TYPE_5_ROUTE_PREFIXLEN;
+ evp->prefixlen = EVPN_ROUTE_PREFIXLEN;
evp->prefix.route_type = BGP_EVPN_IP_PREFIX_ROUTE;
evp->prefix.prefix_addr.ip_prefix_length = ip_prefix->prefixlen;
evp->prefix.prefix_addr.ip.ipa_type = ip.ipa_type;
{
memset(p, 0, sizeof(struct prefix_evpn));
p->family = AF_EVPN;
- p->prefixlen = EVPN_TYPE_3_ROUTE_PREFIXLEN;
+ p->prefixlen = EVPN_ROUTE_PREFIXLEN;
p->prefix.route_type = BGP_EVPN_IMET_ROUTE;
p->prefix.imet_addr.ip.ipa_type = IPADDR_V4;
p->prefix.imet_addr.ip.ipaddr_v4 = originator_ip;
}
+static inline void build_evpn_type4_prefix(struct prefix_evpn *p,
+ esi_t *esi,
+ struct in_addr originator_ip)
+{
+ memset(p, 0, sizeof(struct prefix_evpn));
+ p->family = AF_EVPN;
+ p->prefixlen = EVPN_ROUTE_PREFIXLEN;
+ p->prefix.route_type = BGP_EVPN_ES_ROUTE;
+ p->prefix.es_addr.ip_prefix_length = IPV4_MAX_BITLEN;
+ p->prefix.es_addr.ip.ipa_type = IPADDR_V4;
+ p->prefix.es_addr.ip.ipaddr_v4 = originator_ip;
+ memcpy(&p->prefix.es_addr.esi, esi, sizeof(esi_t));
+}
+
static inline int evpn_default_originate_set(struct bgp *bgp, afi_t afi,
safi_t safi)
{
return 0;
}
+static inline void es_get_system_mac(esi_t *esi,
+ struct ethaddr *mac)
+{
+ /*
+ * for type-1 and type-3 ESIs,
+ * the system mac starts at val[1]
+ */
+ memcpy(mac, &esi->val[1], ETH_ALEN);
+}
+
+static inline int is_es_local(struct evpnes *es)
+{
+ return CHECK_FLAG(es->flags, EVPNES_LOCAL) ? 1 : 0;
+}
+
extern void evpn_rt_delete_auto(struct bgp *, vni_t, struct list *);
extern void bgp_evpn_configure_export_rt_for_vrf(struct bgp *bgp_vrf,
struct ecommunity *ecomadd);
struct in_addr originator_ip,
vrf_id_t tenant_vrf_id);
extern void bgp_evpn_free(struct bgp *bgp, struct bgpevpn *vpn);
+extern struct evpnes *bgp_evpn_lookup_es(struct bgp *bgp, esi_t *esi);
+extern struct evpnes *bgp_evpn_es_new(struct bgp *bgp, esi_t *esi,
+ struct ipaddr *originator_ip);
+extern void bgp_evpn_es_free(struct bgp *bgp, struct evpnes *es);
#endif /* _BGP_EVPN_PRIVATE_H */
vty_out(vty,
"EVPN type-2 prefix: [2]:[EthTag]:[MAClen]:[MAC]:[IPlen]:[IP]\n");
vty_out(vty, "EVPN type-3 prefix: [3]:[EthTag]:[IPlen]:[OrigIP]\n");
+ vty_out(vty, "EVPN type-4 prefix: [4]:[ESI]:[IPlen]:[OrigIP]\n");
vty_out(vty, "EVPN type-5 prefix: [5]:[EthTag]:[IPlen]:[IP]\n\n");
vty_out(vty, "%s", ri_header);
}
json_object_object_add(json, "exportRts", json_export_rtl);
}
+static void display_es(struct vty *vty, struct evpnes *es, json_object *json)
+{
+ struct in_addr *vtep;
+ char buf[ESI_STR_LEN];
+ char buf1[RD_ADDRSTRLEN];
+ char buf2[INET6_ADDRSTRLEN];
+ struct listnode *node = NULL;
+ json_object *json_vteps = NULL;
+
+ if (json) {
+ json_vteps = json_object_new_array();
+ json_object_string_add(json, "esi",
+ esi_to_str(&es->esi, buf, sizeof(buf)));
+ json_object_string_add(json, "rd",
+ prefix_rd2str(&es->prd, buf1,
+ sizeof(buf1)));
+ json_object_string_add(
+ json, "originatorIp",
+ ipaddr2str(&es->originator_ip, buf2, sizeof(buf2)));
+ if (es->vtep_list) {
+ for (ALL_LIST_ELEMENTS_RO(es->vtep_list, node, vtep))
+ json_object_array_add(
+ json_vteps, json_object_new_string(
+ inet_ntoa(*vtep)));
+ }
+ json_object_object_add(json, "vteps", json_vteps);
+ } else {
+ vty_out(vty, "ESI: %s\n",
+ esi_to_str(&es->esi, buf, sizeof(buf)));
+ vty_out(vty, " RD: %s\n", prefix_rd2str(&es->prd, buf1,
+ sizeof(buf1)));
+ vty_out(vty, " Originator-IP: %s\n",
+ ipaddr2str(&es->originator_ip, buf2, sizeof(buf2)));
+ if (es->vtep_list) {
+ vty_out(vty, " VTEP List:\n");
+ for (ALL_LIST_ELEMENTS_RO(es->vtep_list, node, vtep))
+ vty_out(vty, " %s\n", inet_ntoa(*vtep));
+ }
+ }
+}
+
static void display_vni(struct vty *vty, struct bgpevpn *vpn, json_object *json)
{
char buf1[RD_ADDRSTRLEN];
json_object_object_add(json, "exportRts", json_export_rtl);
}
+static void show_esi_routes(struct bgp *bgp,
+ struct evpnes *es,
+ struct vty *vty,
+ json_object *json)
+{
+ int header = 1;
+ struct bgp_node *rn;
+ struct bgp_info *ri;
+ uint32_t prefix_cnt, path_cnt;
+ uint64_t tbl_ver;
+
+ prefix_cnt = path_cnt = 0;
+
+ tbl_ver = es->route_table->version;
+ for (rn = bgp_table_top(es->route_table); rn;
+ rn = bgp_route_next(rn)) {
+ int add_prefix_to_json = 0;
+ char prefix_str[BUFSIZ];
+ json_object *json_paths = NULL;
+ json_object *json_prefix = NULL;
+
+ bgp_evpn_route2str((struct prefix_evpn *)&rn->p, prefix_str,
+ sizeof(prefix_str));
+
+ if (json)
+ json_prefix = json_object_new_object();
+
+ if (rn->info) {
+ /* Overall header/legend displayed once. */
+ if (header) {
+ bgp_evpn_show_route_header(vty, bgp,
+ tbl_ver, json);
+ header = 0;
+ }
+
+ prefix_cnt++;
+ }
+
+ if (json)
+ json_paths = json_object_new_array();
+
+ /* For EVPN, the prefix is displayed for each path (to fit in
+ * with code that already exists).
+ */
+ for (ri = rn->info; ri; ri = ri->next) {
+ json_object *json_path = NULL;
+
+ if (json)
+ json_path = json_object_new_array();
+
+ route_vty_out(vty, &rn->p, ri, 0, SAFI_EVPN, json_path);
+
+ if (json)
+ json_object_array_add(json_paths, json_path);
+
+ path_cnt++;
+ add_prefix_to_json = 1;
+ }
+
+ if (json && add_prefix_to_json) {
+ json_object_string_add(json_prefix, "prefix",
+ prefix_str);
+ json_object_int_add(json_prefix, "prefixLen",
+ rn->p.prefixlen);
+ json_object_object_add(json_prefix, "paths",
+ json_paths);
+ json_object_object_add(json, prefix_str, json_prefix);
+ }
+ }
+
+ if (json) {
+ json_object_int_add(json, "numPrefix", prefix_cnt);
+ json_object_int_add(json, "numPaths", path_cnt);
+ } else {
+ if (prefix_cnt == 0)
+ vty_out(vty, "No EVPN prefixes exist for this ESI");
+ else
+ vty_out(vty, "\nDisplayed %u prefixes (%u paths)\n",
+ prefix_cnt, path_cnt);
+ }
+}
+
static void show_vni_routes(struct bgp *bgp, struct bgpevpn *vpn, int type,
struct vty *vty, struct in_addr vtep_ip,
json_object *json)
vty_out(vty, "No EVPN prefixes %sexist for this VNI",
type ? "(of requested type) " : "");
else
- vty_out(vty, "\nDisplayed %u prefixes (%u paths)%s",
+ vty_out(vty, "\nDisplayed %u prefixes (%u paths)%s\n",
prefix_cnt, path_cnt,
type ? " (of requested type)" : "");
}
}
}
+static void show_es_entry(struct hash_backet *backet, void *args[])
+{
+ char buf[ESI_STR_LEN];
+ char buf1[RD_ADDRSTRLEN];
+ char buf2[INET6_ADDRSTRLEN];
+ struct in_addr *vtep = NULL;
+ struct vty *vty = args[0];
+ json_object *json = args[1];
+ json_object *json_vteps = NULL;
+ struct listnode *node = NULL;
+ struct evpnes *es = (struct evpnes *)backet->data;
+
+ if (json) {
+ json_vteps = json_object_new_array();
+ json_object_string_add(json, "esi",
+ esi_to_str(&es->esi, buf, sizeof(buf)));
+ json_object_string_add(json, "type",
+ is_es_local(es) ? "Local" : "Remote");
+ json_object_string_add(json, "rd",
+ prefix_rd2str(&es->prd, buf1,
+ sizeof(buf1)));
+ json_object_string_add(
+ json, "originatorIp",
+ ipaddr2str(&es->originator_ip, buf2, sizeof(buf2)));
+ if (es->vtep_list) {
+ for (ALL_LIST_ELEMENTS_RO(es->vtep_list, node, vtep))
+ json_object_array_add(json_vteps,
+ json_object_new_string(
+ inet_ntoa(*vtep)));
+ }
+ json_object_object_add(json, "vteps", json_vteps);
+ } else {
+ vty_out(vty, "%-30s %-6s %-21s %-15s %-6d\n",
+ esi_to_str(&es->esi, buf, sizeof(buf)),
+ is_es_local(es) ? "Local" : "Remote",
+ prefix_rd2str(&es->prd, buf1, sizeof(buf1)),
+ ipaddr2str(&es->originator_ip, buf2,
+ sizeof(buf2)),
+ es->vtep_list ? listcount(es->vtep_list) : 0);
+ }
+}
+
static void show_vni_entry(struct hash_backet *backet, void *args[])
{
struct vty *vty;
}
}
+/* Disaplay EVPN routes for a ESI - VTY handler */
+static void evpn_show_routes_esi(struct vty *vty, struct bgp *bgp,
+ esi_t *esi, json_object *json)
+{
+ struct evpnes *es = NULL;
+
+ /* locate the ES */
+ es = bgp_evpn_lookup_es(bgp, esi);
+ if (!es) {
+ if (!json)
+ vty_out(vty, "ESI not found\n");
+ return;
+ }
+
+ show_esi_routes(bgp, es, vty, json);
+}
+
/*
* Display EVPN routes for a VNI - vty handler.
* If 'type' is non-zero, only routes matching that type are shown.
}
}
+/* Display specific ES */
+static void evpn_show_es(struct vty *vty, struct bgp *bgp, esi_t *esi,
+ json_object *json)
+{
+ struct evpnes *es = NULL;
+
+ es = bgp_evpn_lookup_es(bgp, esi);
+ if (es) {
+ display_es(vty, es, json);
+ } else {
+ if (json) {
+ vty_out(vty, "{}\n");
+ } else {
+ vty_out(vty, "ESI not found\n");
+ return;
+ }
+ }
+}
+
+/* Display all ESs */
+static void evpn_show_all_es(struct vty *vty, struct bgp *bgp,
+ json_object *json)
+{
+ void *args[2];
+
+ if (!json)
+ vty_out(vty, "%-30s %-6s %-21s %-15s %-6s\n",
+ "ESI", "Type", "RD", "Originator-IP", "#VTEPs");
+
+ /* print all ESs */
+ args[0] = vty;
+ args[1] = json;
+ hash_iterate(bgp->esihash,
+ (void (*)(struct hash_backet *, void *))show_es_entry,
+ args);
+}
+
/*
* Display specified VNI (vty handler)
*/
}
}
-static void write_vni_config_for_entry(struct hash_backet *backet,
- struct vty *vty)
-{
- struct bgpevpn *vpn = (struct bgpevpn *)backet->data;
- write_vni_config(vty, vpn);
-}
-
#if defined(HAVE_CUMULUS)
DEFUN (bgp_evpn_advertise_default_gw_vni,
bgp_evpn_advertise_default_gw_vni_cmd,
return CMD_SUCCESS;
}
+/* Disaply ES */
+DEFUN(show_bgp_l2vpn_evpn_es,
+ show_bgp_l2vpn_evpn_es_cmd,
+ "show bgp l2vpn evpn es [ESI] [json]",
+ SHOW_STR
+ BGP_STR
+ L2VPN_HELP_STR
+ EVPN_HELP_STR
+ "ethernet-Segment\n"
+ "Ethernet-Segment Identifier\n"
+ JSON_STR)
+{
+ int idx = 0;
+ uint8_t uj = 0;
+ esi_t esi = {0};
+ json_object *json = NULL;
+ struct bgp *bgp = NULL;
+
+ uj = use_json(argc, argv);
+
+ bgp = bgp_get_default();
+ if (!bgp)
+ return CMD_WARNING;
+
+ if (!argv_find(argv, argc, "evpn", &idx))
+ return CMD_WARNING;
+
+ if ((uj && argc == ((idx + 1) + 2)) ||
+ (!uj && argc == (idx + 1) + 1)) {
+
+ /* show all ESs */
+ evpn_show_all_es(vty, bgp, json);
+ } else {
+
+ /* show a specific ES */
+
+ /* get the ESI - ESI-ID is at argv[5] */
+ if (!str_to_esi(argv[idx + 2]->arg, &esi)) {
+ vty_out(vty, "%% Malformed ESI\n");
+ return CMD_WARNING;
+ }
+ evpn_show_es(vty, bgp, &esi, json);
+ }
+
+ if (uj) {
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+
+ return CMD_SUCCESS;
+}
+
/*
* Display EVPN neighbor summary.
*/
*/
DEFUN(show_bgp_l2vpn_evpn_route,
show_bgp_l2vpn_evpn_route_cmd,
- "show bgp l2vpn evpn route [type <macip|multicast|prefix>] [json]",
+ "show bgp l2vpn evpn route [type <macip|multicast|es|prefix>] [json]",
SHOW_STR
BGP_STR
L2VPN_HELP_STR
"Specify Route type\n"
"MAC-IP (Type-2) route\n"
"Multicast (Type-3) route\n"
- "Prefix route\n"
+ "Ethernet Segment (type-4) route \n"
+ "Prefix (type-5 )route\n"
JSON_STR)
{
struct bgp *bgp;
type = BGP_EVPN_MAC_IP_ROUTE;
else if (strncmp(argv[type_idx + 1]->arg, "mu", 2) == 0)
type = BGP_EVPN_IMET_ROUTE;
+ else if (strncmp(argv[type_idx + 1]->arg, "es", 2) == 0)
+ type = BGP_EVPN_ES_ROUTE;
else if (strncmp(argv[type_idx + 1]->arg, "pr", 2) == 0)
type = BGP_EVPN_IP_PREFIX_ROUTE;
else
*/
DEFUN(show_bgp_l2vpn_evpn_route_rd,
show_bgp_l2vpn_evpn_route_rd_cmd,
- "show bgp l2vpn evpn route rd ASN:NN_OR_IP-ADDRESS:NN [type <macip|multicast|prefix>] [json]",
+ "show bgp l2vpn evpn route rd ASN:NN_OR_IP-ADDRESS:NN [type <macip|multicast|es|prefix>] [json]",
SHOW_STR
BGP_STR
L2VPN_HELP_STR
"Specify Route type\n"
"MAC-IP (Type-2) route\n"
"Multicast (Type-3) route\n"
+ "Ethernet Segment route\n"
"Prefix route\n"
JSON_STR)
{
return CMD_SUCCESS;
}
+/* Display per ESI routing table */
+DEFUN(show_bgp_l2vpn_evpn_route_esi,
+ show_bgp_l2vpn_evpn_route_esi_cmd,
+ "show bgp l2vpn evpn route esi ESI [json]",
+ SHOW_STR
+ BGP_STR
+ L2VPN_HELP_STR
+ EVPN_HELP_STR
+ "EVPN route information\n"
+ "Ethernet Segment Identifier\n"
+ "ESI ID\n"
+ JSON_STR)
+{
+ int uj = 0;
+ esi_t esi = {0};
+ struct bgp *bgp = NULL;
+ json_object *json = NULL;
+
+ bgp = bgp_get_default();
+ if (!bgp)
+ return CMD_WARNING;
+
+ uj = use_json(argc, argv);
+ if (uj)
+ json = json_object_new_object();
+
+ /* get the ESI - ESI-ID is at argv[6] */
+ if (!str_to_esi(argv[6]->arg, &esi)) {
+ vty_out(vty, "%% Malformed ESI\n");
+ return CMD_WARNING;
+ }
+
+ evpn_show_routes_esi(vty, bgp, &esi, json);
+
+ if (uj) {
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ }
+
+ return CMD_SUCCESS;
+}
+
+
/*
* Display per-VNI EVPN routing table.
*/
}
#if defined(HAVE_CUMULUS)
+DEFUN(test_adv_evpn_type4_route,
+ test_adv_evpn_type4_route_cmd,
+ "advertise es ESI",
+ "Advertise EVPN ES route\n"
+ "Ethernet-segment\n"
+ "Ethernet-Segment Identifier\n")
+{
+ int ret = 0;
+ esi_t esi;
+ struct bgp *bgp;
+ struct ipaddr vtep_ip;
+
+ bgp = bgp_get_default();
+ if (!bgp) {
+ vty_out(vty, "%%Default BGP instance not yet created\n");
+ return CMD_WARNING;
+ }
+
+ if (!str_to_esi(argv[2]->arg, &esi)) {
+ vty_out(vty, "%%Malformed ESI\n");
+ return CMD_WARNING;
+ }
+
+ vtep_ip.ipa_type = IPADDR_V4;
+ vtep_ip.ipaddr_v4 = bgp->router_id;
+
+ ret = bgp_evpn_local_es_add(bgp, &esi, &vtep_ip);
+ if (ret == -1) {
+ vty_out(vty, "%%Failed to EVPN advertise type-4 route\n");
+ return CMD_WARNING;
+ }
+ return CMD_SUCCESS;
+}
+
+DEFUN(test_withdraw_evpn_type4_route,
+ test_withdraw_evpn_type4_route_cmd,
+ "withdraw es ESI",
+ "Advertise EVPN ES route\n"
+ "Ethernet-segment\n"
+ "Ethernet-Segment Identifier\n")
+{
+ int ret = 0;
+ esi_t esi;
+ struct bgp *bgp;
+ struct ipaddr vtep_ip;
+
+ bgp = bgp_get_default();
+ if (!bgp) {
+ vty_out(vty, "%%Default BGP instance not yet created\n");
+ return CMD_WARNING;
+ }
+
+ if (!bgp->peer_self) {
+ vty_out(vty, "%%BGP instance doesnt have self peer\n");
+ return CMD_WARNING;
+ }
+
+ if (!str_to_esi(argv[2]->arg, &esi)) {
+ vty_out(vty, "%%Malformed ESI\n");
+ return CMD_WARNING;
+ }
+
+ vtep_ip.ipa_type = IPADDR_V4;
+ vtep_ip.ipaddr_v4 = bgp->router_id;
+ ret = bgp_evpn_local_es_del(bgp, &esi, &vtep_ip);
+ if (ret == -1) {
+ vty_out(vty, "%%Failed to withdraw EVPN type-4 route\n");
+ return CMD_WARNING;
+ }
+ return CMD_SUCCESS;
+}
+
ALIAS_HIDDEN(show_bgp_l2vpn_evpn_vni, show_bgp_evpn_vni_cmd,
"show bgp evpn vni [(1-16777215)]", SHOW_STR BGP_STR EVPN_HELP_STR
"Show VNI\n"
return CMD_SUCCESS;
}
#endif
+
+static int vni_cmp(const void **a, const void **b)
+{
+ const struct bgpevpn *first = *a;
+ const struct bgpevpn *secnd = *b;
+
+ return secnd->vni - first->vni;
+}
+
/*
* Output EVPN configuration information.
*/
{
char buf1[RD_ADDRSTRLEN];
- if (bgp->vnihash)
- hash_iterate(bgp->vnihash,
- (void (*)(struct hash_backet *,
- void *))write_vni_config_for_entry,
- vty);
+ if (bgp->vnihash) {
+ struct list *vnilist = hash_to_list(bgp->vnihash);
+ struct listnode *ln;
+ struct bgpevpn *data;
+
+ list_sort(vnilist, vni_cmp);
+ for (ALL_LIST_ELEMENTS_RO(vnilist, ln, data))
+ write_vni_config(vty, data);
+
+ list_delete_and_null(&vnilist);
+ }
if (bgp->advertise_all_vni)
vty_out(vty, " advertise-all-vni\n");
install_element(BGP_EVPN_NODE, &bgp_evpn_default_originate_cmd);
install_element(BGP_EVPN_NODE, &no_bgp_evpn_default_originate_cmd);
+ /* test commands */
+ install_element(BGP_EVPN_NODE, &test_adv_evpn_type4_route_cmd);
+ install_element(BGP_EVPN_NODE, &test_withdraw_evpn_type4_route_cmd);
+
/* "show bgp l2vpn evpn" commands. */
+ install_element(VIEW_NODE, &show_bgp_l2vpn_evpn_es_cmd);
install_element(VIEW_NODE, &show_bgp_l2vpn_evpn_vni_cmd);
install_element(VIEW_NODE, &show_bgp_l2vpn_evpn_summary_cmd);
install_element(VIEW_NODE, &show_bgp_l2vpn_evpn_route_cmd);
install_element(VIEW_NODE, &show_bgp_l2vpn_evpn_route_rd_cmd);
install_element(VIEW_NODE, &show_bgp_l2vpn_evpn_route_rd_macip_cmd);
+ install_element(VIEW_NODE, &show_bgp_l2vpn_evpn_route_esi_cmd);
install_element(VIEW_NODE, &show_bgp_l2vpn_evpn_route_vni_cmd);
install_element(VIEW_NODE,
&show_bgp_l2vpn_evpn_route_vni_multicast_cmd);
extern void route_vty_out_flowspec(struct vty *vty, struct prefix *p,
struct bgp_info *binfo,
int display, json_object *json_paths);
+extern int bgp_fs_config_write_pbr(struct vty *vty, struct bgp *bgp,
+ afi_t afi, safi_t safi);
+
#endif /* _FRR_BGP_FLOWSPEC_H */
#include "bgpd/bgp_flowspec_util.h"
#include "bgpd/bgp_flowspec_private.h"
#include "bgpd/bgp_debug.h"
+#include "bgpd/bgp_pbr.h"
/* Local Structures and variables declarations
* This code block hosts the struct declared that host the flowspec rules
XFREE(MTYPE_ECOMMUNITY_STR, s);
}
peer_uptime(binfo->uptime, timebuf, BGP_UPTIME_LEN, 0, NULL);
- if (display == NLRI_STRING_FORMAT_LARGE)
- vty_out(vty, "\tup for %8s\n", timebuf);
- else if (json_paths) {
+ if (display == NLRI_STRING_FORMAT_LARGE) {
+ vty_out(vty, "\treceived for %8s\n", timebuf);
+ } else if (json_paths) {
json_time_path = json_object_new_object();
json_object_string_add(json_time_path,
"time", timebuf);
if (display == NLRI_STRING_FORMAT_JSON)
json_object_array_add(json_paths, json_time_path);
}
+ if (display == NLRI_STRING_FORMAT_LARGE) {
+ struct bgp_info_extra *extra = bgp_info_extra_get(binfo);
+ if (extra->bgp_fs_pbr) {
+ struct bgp_pbr_match_entry *bpme;
+ struct bgp_pbr_match *bpm;
+
+ bpme = (struct bgp_pbr_match_entry *)extra->bgp_fs_pbr;
+ bpm = bpme->backpointer;
+ vty_out(vty, "\tinstalled in PBR");
+ if (bpm)
+ vty_out(vty, " (%s)\n", bpm->ipset_name);
+ else
+ vty_out(vty, "\n");
+ } else
+ vty_out(vty, "\tnot installed in PBR\n");
+ }
}
int bgp_show_table_flowspec(struct vty *vty, struct bgp *bgp, afi_t afi,
return CMD_SUCCESS;
}
+int bgp_fs_config_write_pbr(struct vty *vty, struct bgp *bgp,
+ afi_t afi, safi_t safi)
+{
+ struct bgp_pbr_interface *pbr_if;
+ bool declare_node = false;
+ struct bgp_pbr_config *bgp_pbr_cfg = bgp->bgp_pbr_cfg;
+ struct bgp_pbr_interface_head *head;
+ bool bgp_pbr_interface_any;
+
+ if (!bgp_pbr_cfg || safi != SAFI_FLOWSPEC || afi != AFI_IP)
+ return 0;
+ head = &(bgp_pbr_cfg->ifaces_by_name_ipv4);
+ bgp_pbr_interface_any = bgp_pbr_cfg->pbr_interface_any_ipv4;
+ if (!RB_EMPTY(bgp_pbr_interface_head, head) ||
+ !bgp_pbr_interface_any)
+ declare_node = true;
+ RB_FOREACH (pbr_if, bgp_pbr_interface_head, head) {
+ vty_out(vty, " local-install %s\n", pbr_if->name);
+ }
+ if (!bgp_pbr_interface_any)
+ vty_out(vty, " no local-install any\n");
+ return declare_node ? 1 : 0;
+}
+
+static int bgp_fs_local_install_interface(struct bgp *bgp,
+ const char *no, const char *ifname)
+{
+ struct bgp_pbr_interface *pbr_if;
+ struct bgp_pbr_config *bgp_pbr_cfg = bgp->bgp_pbr_cfg;
+ struct bgp_pbr_interface_head *head;
+ bool *bgp_pbr_interface_any;
+
+ if (!bgp_pbr_cfg)
+ return CMD_SUCCESS;
+ head = &(bgp_pbr_cfg->ifaces_by_name_ipv4);
+ bgp_pbr_interface_any = &(bgp_pbr_cfg->pbr_interface_any_ipv4);
+ if (no) {
+ if (!ifname) {
+ if (*bgp_pbr_interface_any) {
+ *bgp_pbr_interface_any = false;
+ /* remove all other interface list */
+ bgp_pbr_reset(bgp, AFI_IP);
+ }
+ return CMD_SUCCESS;
+ }
+ pbr_if = bgp_pbr_interface_lookup(ifname, head);
+ if (!pbr_if)
+ return CMD_SUCCESS;
+ RB_REMOVE(bgp_pbr_interface_head, head, pbr_if);
+ return CMD_SUCCESS;
+ }
+ if (ifname) {
+ pbr_if = bgp_pbr_interface_lookup(ifname, head);
+ if (pbr_if)
+ return CMD_SUCCESS;
+ pbr_if = XCALLOC(MTYPE_TMP,
+ sizeof(struct bgp_pbr_interface));
+ strlcpy(pbr_if->name, ifname, INTERFACE_NAMSIZ);
+ RB_INSERT(bgp_pbr_interface_head, head, pbr_if);
+ *bgp_pbr_interface_any = false;
+ } else {
+ /* set to default */
+ if (!*bgp_pbr_interface_any) {
+ /* remove all other interface list
+ */
+ bgp_pbr_reset(bgp, AFI_IP);
+ *bgp_pbr_interface_any = true;
+ }
+ }
+ return CMD_SUCCESS;
+}
+
+DEFUN (bgp_fs_local_install_ifname,
+ bgp_fs_local_install_ifname_cmd,
+ "[no] local-install INTERFACE",
+ NO_STR
+ "Apply local policy routing\n"
+ "Interface name\n")
+{
+ struct bgp *bgp = VTY_GET_CONTEXT(bgp);
+ int idx = 0;
+ const char *no = strmatch(argv[0]->text, (char *)"no") ? "no" : NULL;
+ char *ifname = argv_find(argv, argc, "INTERFACE", &idx) ?
+ argv[idx]->arg : NULL;
+
+ return bgp_fs_local_install_interface(bgp, no, ifname);
+}
+
+DEFUN (bgp_fs_local_install_any,
+ bgp_fs_local_install_any_cmd,
+ "[no] local-install any",
+ NO_STR
+ "Apply local policy routing\n"
+ "Any Interface\n")
+{
+ struct bgp *bgp = VTY_GET_CONTEXT(bgp);
+ const char *no = strmatch(argv[0]->text, (char *)"no") ? "no" : NULL;
+
+ return bgp_fs_local_install_interface(bgp, no, NULL);
+}
+
void bgp_flowspec_vty_init(void)
{
install_element(ENABLE_NODE, &debug_bgp_flowspec_cmd);
install_element(CONFIG_NODE, &debug_bgp_flowspec_cmd);
install_element(ENABLE_NODE, &no_debug_bgp_flowspec_cmd);
install_element(CONFIG_NODE, &no_debug_bgp_flowspec_cmd);
+ install_element(BGP_FLOWSPECV4_NODE, &bgp_fs_local_install_any_cmd);
+ install_element(BGP_FLOWSPECV4_NODE, &bgp_fs_local_install_ifname_cmd);
}
DEFINE_MTYPE(BGPD, LCOMMUNITY_VAL, "Large Community value")
DEFINE_MTYPE(BGPD, BGP_EVPN, "BGP EVPN Information")
+DEFINE_MTYPE(BGPD, BGP_EVPN_ES_VTEP, "BGP EVPN ES VTEP Ip")
+DEFINE_MTYPE(BGPD, BGP_EVPN_ES, "BGP EVPN ESI Information")
DEFINE_MTYPE(BGPD, BGP_EVPN_IMPORT_RT, "BGP EVPN Import RT")
DEFINE_MTYPE(BGPD, BGP_EVPN_VRF_IMPORT_RT, "BGP EVPN VRF Import RT")
DEFINE_MTYPE(BGPD, BGP_EVPN_MACIP, "BGP EVPN MAC IP")
DECLARE_MTYPE(LCOMMUNITY_STR)
DECLARE_MTYPE(LCOMMUNITY_VAL)
+DECLARE_MTYPE(BGP_EVPN_ES)
+DECLARE_MTYPE(BGP_EVPN_ES_VTEP)
+
DECLARE_MTYPE(BGP_EVPN)
DECLARE_MTYPE(BGP_EVPN_IMPORT_RT)
DECLARE_MTYPE(BGP_EVPN_VRF_IMPORT_RT)
if (bgpd_privs.change(ZPRIVS_RAISE))
zlog_err("Can't raise privileges");
sock = vrf_socket(ainfo->ai_family, ainfo->ai_socktype,
- ainfo->ai_protocol, bgp->vrf_id, NULL);
+ ainfo->ai_protocol, bgp->vrf_id,
+ (bgp->inst_type == BGP_INSTANCE_TYPE_VRF ?
+ bgp->name : NULL));
if (bgpd_privs.change(ZPRIVS_LOWER))
zlog_err("Can't lower privileges");
if (sock < 0) {
DEFINE_MTYPE_STATIC(BGPD, PBR_MATCH_ENTRY, "PBR match entry")
DEFINE_MTYPE_STATIC(BGPD, PBR_MATCH, "PBR match")
DEFINE_MTYPE_STATIC(BGPD, PBR_ACTION, "PBR action")
+DEFINE_MTYPE_STATIC(BGPD, PBR, "BGP PBR Context")
+
+RB_GENERATE(bgp_pbr_interface_head, bgp_pbr_interface,
+ id_entry, bgp_pbr_interface_compare);
+struct bgp_pbr_interface_head ifaces_by_name_ipv4 =
+ RB_INITIALIZER(&ifaces_by_name_ipv4);
static int bgp_pbr_match_counter_unique;
static int bgp_pbr_match_entry_counter_unique;
_cnt++; \
} while (0)
-/* return 1 if OK, 0 if validation should stop) */
+struct bgp_pbr_range_port {
+ uint16_t min_port;
+ uint16_t max_port;
+};
+
+/* return true if extraction ok
+ */
+static bool bgp_pbr_extract(struct bgp_pbr_match_val list[],
+ int num,
+ struct bgp_pbr_range_port *range)
+{
+ int i = 0;
+ bool exact_match = false;
+
+ if (range)
+ memset(range, 0, sizeof(struct bgp_pbr_range_port));
+
+ if (num > 2)
+ return false;
+ for (i = 0; i < num; i++) {
+ if (i != 0 && (list[i].compare_operator ==
+ OPERATOR_COMPARE_EQUAL_TO))
+ return false;
+ if (i == 0 && (list[i].compare_operator ==
+ OPERATOR_COMPARE_EQUAL_TO)) {
+ if (range)
+ range->min_port = list[i].value;
+ exact_match = true;
+ }
+ if (exact_match == true && i > 0)
+ return false;
+ if (list[i].compare_operator ==
+ (OPERATOR_COMPARE_GREATER_THAN +
+ OPERATOR_COMPARE_EQUAL_TO)) {
+ if (range)
+ range->min_port = list[i].value;
+ } else if (list[i].compare_operator ==
+ (OPERATOR_COMPARE_LESS_THAN +
+ OPERATOR_COMPARE_EQUAL_TO)) {
+ if (range)
+ range->max_port = list[i].value;
+ } else if (list[i].compare_operator ==
+ OPERATOR_COMPARE_LESS_THAN) {
+ if (range)
+ range->max_port = list[i].value - 1;
+ } else if (list[i].compare_operator ==
+ OPERATOR_COMPARE_GREATER_THAN) {
+ if (range)
+ range->min_port = list[i].value + 1;
+ }
+ }
+ return true;
+}
+
static int bgp_pbr_validate_policy_route(struct bgp_pbr_entry_main *api)
{
/* because bgp pbr entry may contain unsupported
* - combination src/dst => redirect nexthop [ + rate]
* - combination src/dst => redirect VRF [ + rate]
* - combination src/dst => drop
+ * - combination srcport + @IP
*/
- if (api->match_src_port_num || api->match_dst_port_num
- || api->match_port_num || api->match_protocol_num
- || api->match_icmp_type_num || api->match_icmp_type_num
- || api->match_packet_length_num || api->match_dscp_num
- || api->match_tcpflags_num) {
+ if (api->match_icmp_type_num || api->match_packet_length_num
+ || api->match_dscp_num || api->match_tcpflags_num) {
if (BGP_DEBUG(pbr, PBR)) {
bgp_pbr_print_policy_route(api);
zlog_debug("BGP: some SET actions not supported by Zebra. ignoring.");
+ zlog_debug("BGP: case icmp or length or dscp or tcp flags");
}
return 0;
}
+
+ if (api->match_protocol_num > 1) {
+ if (BGP_DEBUG(pbr, PBR))
+ zlog_debug("BGP: match protocol operations:"
+ "multiple protocols ( %d). ignoring.",
+ api->match_protocol_num);
+ return 0;
+ }
+ if (api->match_protocol_num == 1 &&
+ api->protocol[0].value != PROTOCOL_UDP &&
+ api->protocol[0].value != PROTOCOL_TCP) {
+ if (BGP_DEBUG(pbr, PBR))
+ zlog_debug("BGP: match protocol operations:"
+ "protocol (%d) not supported. ignoring",
+ api->match_protocol_num);
+ return 0;
+ }
+ if (!bgp_pbr_extract(api->src_port, api->match_src_port_num, NULL)) {
+ if (BGP_DEBUG(pbr, PBR))
+ zlog_debug("BGP: match src port operations:"
+ "too complex. ignoring.");
+ return 0;
+ }
+ if (!bgp_pbr_extract(api->dst_port, api->match_dst_port_num, NULL)) {
+ if (BGP_DEBUG(pbr, PBR))
+ zlog_debug("BGP: match dst port operations:"
+ "too complex. ignoring.");
+ return 0;
+ }
+ if (!bgp_pbr_extract(api->port, api->match_port_num, NULL)) {
+ if (BGP_DEBUG(pbr, PBR))
+ zlog_debug("BGP: match port operations:"
+ "too complex. ignoring.");
+ return 0;
+ }
+ /* no combinations with both src_port and dst_port
+ * or port with src_port and dst_port
+ */
+ if (api->match_src_port_num + api->match_dst_port_num +
+ api->match_port_num > 3) {
+ if (BGP_DEBUG(pbr, PBR))
+ zlog_debug("BGP: match multiple port operations:"
+ " too complex. ignoring.");
+ return 0;
+ }
if (!(api->match_bitmask & PREFIX_SRC_PRESENT) &&
!(api->match_bitmask & PREFIX_DST_PRESENT)) {
if (BGP_DEBUG(pbr, PBR)) {
ecom = info->attr->ecommunity;
for (i = 0; i < ecom->size; i++) {
ecom_eval = (struct ecommunity_val *)
- ecom->val + (i * ECOMMUNITY_SIZE);
-
+ (ecom->val + (i * ECOMMUNITY_SIZE));
+ action_count++;
if (action_count > ACTIONS_MAX_NUM) {
if (BGP_DEBUG(pbr, PBR_ERROR))
zlog_err("%s: flowspec actions exceeds limit (max %u)",
__func__, action_count);
break;
}
- api_action = &api->actions[action_count];
+ api_action = &api->actions[action_count - 1];
if ((ecom_eval->val[1] ==
(char)ECOMMUNITY_REDIRECT_VRF) &&
AFI_IP,
bpa->table_id,
false);
+ bpa->installed = false;
}
}
XFREE(MTYPE_PBR_ACTION, bpa);
pbme = (struct bgp_pbr_match_entry *)arg;
key = prefix_hash_key(&pbme->src);
key = jhash_1word(prefix_hash_key(&pbme->dst), key);
+ key = jhash(&pbme->dst_port_min, 2, key);
+ key = jhash(&pbme->src_port_min, 2, key);
+ key = jhash(&pbme->dst_port_max, 2, key);
+ key = jhash(&pbme->src_port_max, 2, key);
+ key = jhash(&pbme->proto, 1, key);
return key;
}
if (!prefix_same(&r1->dst, &r2->dst))
return 0;
+ if (r1->src_port_min != r2->src_port_min)
+ return 0;
+
+ if (r1->dst_port_min != r2->dst_port_min)
+ return 0;
+
+ if (r1->src_port_max != r2->src_port_max)
+ return 0;
+
+ if (r1->dst_port_max != r2->dst_port_max)
+ return 0;
+
+ if (r1->proto != r2->proto)
+ return 0;
+
return 1;
}
/* unique value is self calculated
* table and fwmark is self calculated
+ * rate is ignored
*/
- if (r1->rate != r2->rate)
- return 0;
-
if (r1->vrf_id != r2->vrf_id)
return 0;
hash_free(bgp->pbr_action_hash);
bgp->pbr_action_hash = NULL;
}
+ if (bgp->bgp_pbr_cfg == NULL)
+ return;
+ bgp_pbr_reset(bgp, AFI_IP);
+ XFREE(MTYPE_PBR, bgp->bgp_pbr_cfg);
+ bgp->bgp_pbr_cfg = NULL;
}
void bgp_pbr_init(struct bgp *bgp)
hash_create_size(8, bgp_pbr_action_hash_key,
bgp_pbr_action_hash_equal,
"Match Hash Entry");
+
+ bgp->bgp_pbr_cfg = XCALLOC(MTYPE_PBR, sizeof(struct bgp_pbr_config));
+ bgp->bgp_pbr_cfg->pbr_interface_any_ipv4 = true;
}
void bgp_pbr_print_policy_route(struct bgp_pbr_entry_main *api)
bgp_send_pbr_ipset_entry_match(bpme, false);
bpme->installed = false;
bpme->backpointer = NULL;
+ if (bpme->bgp_info) {
+ struct bgp_info *bgp_info;
+ struct bgp_info_extra *extra;
+
+ /* unlink bgp_info to bpme */
+ bgp_info = (struct bgp_info *)bpme->bgp_info;
+ extra = bgp_info_extra_get(bgp_info);
+ extra->bgp_fs_pbr = NULL;
+ bpme->bgp_info = NULL;
+ }
}
hash_release(bpm->entry_hash, bpme);
if (hashcount(bpm->entry_hash) == 0) {
AFI_IP,
bpa->table_id,
false);
+ bpa->installed = false;
}
}
}
}
static void bgp_pbr_policyroute_remove_from_zebra(struct bgp *bgp,
- struct bgp_info *binfo,
- vrf_id_t vrf_id,
- struct prefix *src,
- struct prefix *dst)
+ struct bgp_info *binfo,
+ vrf_id_t vrf_id,
+ struct prefix *src,
+ struct prefix *dst,
+ uint8_t protocol,
+ struct bgp_pbr_range_port *src_port,
+ struct bgp_pbr_range_port *dst_port)
{
struct bgp_pbr_match temp;
struct bgp_pbr_match_entry temp2;
prefix_copy(&temp2.dst, dst);
} else
temp2.dst.family = AF_INET;
-
- if (src == NULL || dst == NULL)
- temp.type = IPSET_NET;
- else
- temp.type = IPSET_NET_NET;
+ if (src_port) {
+ temp.flags |= MATCH_PORT_SRC_SET;
+ temp2.src_port_min = src_port->min_port;
+ if (src_port->max_port) {
+ temp.flags |= MATCH_PORT_SRC_RANGE_SET;
+ temp2.src_port_max = src_port->max_port;
+ }
+ }
+ if (dst_port) {
+ temp.flags |= MATCH_PORT_DST_SET;
+ temp2.dst_port_min = dst_port->min_port;
+ if (dst_port->max_port) {
+ temp.flags |= MATCH_PORT_DST_RANGE_SET;
+ temp2.dst_port_max = dst_port->max_port;
+ }
+ }
+ temp2.proto = protocol;
+
+ if (src == NULL || dst == NULL) {
+ if (temp.flags & (MATCH_PORT_DST_SET | MATCH_PORT_SRC_SET))
+ temp.type = IPSET_NET_PORT;
+ else
+ temp.type = IPSET_NET;
+ } else {
+ if (temp.flags & (MATCH_PORT_DST_SET | MATCH_PORT_SRC_SET))
+ temp.type = IPSET_NET_PORT_NET;
+ else
+ temp.type = IPSET_NET_NET;
+ }
if (vrf_id == VRF_UNKNOWN) /* XXX case BGP destroy */
temp.vrf_id = 0;
else
}
static void bgp_pbr_policyroute_add_to_zebra(struct bgp *bgp,
- struct bgp_info *binfo,
- vrf_id_t vrf_id,
- struct prefix *src,
- struct prefix *dst,
- struct nexthop *nh,
- float *rate)
+ struct bgp_info *binfo,
+ vrf_id_t vrf_id,
+ struct prefix *src,
+ struct prefix *dst,
+ struct nexthop *nh,
+ float *rate,
+ uint8_t protocol,
+ struct bgp_pbr_range_port *src_port,
+ struct bgp_pbr_range_port *dst_port)
{
struct bgp_pbr_match temp;
struct bgp_pbr_match_entry temp2;
/* then look for bpm */
memset(&temp, 0, sizeof(temp));
- if (src == NULL || dst == NULL)
- temp.type = IPSET_NET;
- else
- temp.type = IPSET_NET_NET;
+ if (src == NULL || dst == NULL) {
+ if ((src_port && src_port->min_port) ||
+ (dst_port && dst_port->min_port))
+ temp.type = IPSET_NET_PORT;
+ else
+ temp.type = IPSET_NET;
+ } else {
+ if ((src_port && src_port->min_port) ||
+ (dst_port && dst_port->min_port))
+ temp.type = IPSET_NET_PORT_NET;
+ else
+ temp.type = IPSET_NET_NET;
+ }
temp.vrf_id = vrf_id;
if (src)
temp.flags |= MATCH_IP_SRC_SET;
if (dst)
temp.flags |= MATCH_IP_DST_SET;
+
+ if (src_port && src_port->min_port)
+ temp.flags |= MATCH_PORT_SRC_SET;
+ if (dst_port && dst_port->min_port)
+ temp.flags |= MATCH_PORT_DST_SET;
+ if (src_port && src_port->max_port)
+ temp.flags |= MATCH_PORT_SRC_RANGE_SET;
+ if (dst_port && dst_port->max_port)
+ temp.flags |= MATCH_PORT_DST_RANGE_SET;
temp.action = bpa;
bpm = hash_get(bgp->pbr_match_hash, &temp,
bgp_pbr_match_alloc_intern);
prefix_copy(&temp2.dst, dst);
else
temp2.dst.family = AF_INET;
+ temp2.src_port_min = src_port ? src_port->min_port : 0;
+ temp2.dst_port_min = dst_port ? dst_port->min_port : 0;
+ temp2.src_port_max = src_port ? src_port->max_port : 0;
+ temp2.dst_port_max = dst_port ? dst_port->max_port : 0;
+ temp2.proto = protocol;
if (bpm)
bpme = hash_get(bpm->entry_hash, &temp2,
- bgp_pbr_match_entry_alloc_intern);
+ bgp_pbr_match_entry_alloc_intern);
if (bpme && bpme->unique == 0) {
bpme->unique = ++bgp_pbr_match_entry_counter_unique;
/* 0 value is forbidden */
bpme->backpointer = bpm;
bpme->installed = false;
bpme->install_in_progress = false;
+ /* link bgp info to bpme */
+ bpme->bgp_info = (void *)binfo;
}
/* BGP FS: append entry to zebra
int continue_loop = 1;
float rate = 0;
struct prefix *src = NULL, *dst = NULL;
+ uint8_t proto = 0;
+ struct bgp_pbr_range_port *srcp = NULL, *dstp = NULL;
+ struct bgp_pbr_range_port range;
+ memset(&nh, 0, sizeof(struct nexthop));
if (api->match_bitmask & PREFIX_SRC_PRESENT)
src = &api->src_prefix;
if (api->match_bitmask & PREFIX_DST_PRESENT)
dst = &api->dst_prefix;
memset(&nh, 0, sizeof(struct nexthop));
nh.vrf_id = VRF_UNKNOWN;
-
+ if (api->match_protocol_num)
+ proto = (uint8_t)api->protocol[0].value;
+ /* if match_port is selected, then either src or dst port will be parsed
+ * but not both at the same time
+ */
+ if (api->match_port_num >= 1) {
+ bgp_pbr_extract(api->port,
+ api->match_port_num,
+ &range);
+ srcp = dstp = ⦥
+ } else if (api->match_src_port_num >= 1) {
+ bgp_pbr_extract(api->src_port,
+ api->match_src_port_num,
+ &range);
+ srcp = ⦥
+ dstp = NULL;
+ } else if (api->match_dst_port_num >= 1) {
+ bgp_pbr_extract(api->dst_port,
+ api->match_dst_port_num,
+ &range);
+ dstp = ⦥
+ srcp = NULL;
+ }
if (!add)
return bgp_pbr_policyroute_remove_from_zebra(bgp, binfo,
- api->vrf_id, src, dst);
+ api->vrf_id, src, dst,
+ proto, srcp, dstp);
/* no action for add = true */
for (i = 0; i < api->action_num; i++) {
switch (api->actions[i].action) {
nh.type = NEXTHOP_TYPE_BLACKHOLE;
bgp_pbr_policyroute_add_to_zebra(bgp, binfo,
api->vrf_id, src, dst,
- &nh, &rate);
+ &nh, &rate, proto,
+ srcp, dstp);
} else {
/* update rate. can be reentrant */
rate = api->actions[i].u.r.rate;
bgp_pbr_policyroute_add_to_zebra(bgp, binfo,
api->vrf_id,
src, dst,
- &nh, &rate);
+ &nh, &rate, proto,
+ srcp, dstp);
/* XXX combination with REDIRECT_VRF
* + REDIRECT_NH_IP not done
*/
bgp_pbr_policyroute_add_to_zebra(bgp, binfo,
api->vrf_id,
src, dst,
- &nh, &rate);
+ &nh, &rate, proto,
+ srcp, dstp);
continue_loop = 0;
break;
case ACTION_MARKING:
bool nlri_update)
{
struct bgp_pbr_entry_main api;
+ struct bgp_info_extra *extra = bgp_info_extra_get(info);
if (afi == AFI_IP6)
return; /* IPv6 not supported */
api.vrf_id = bgp->vrf_id;
api.afi = afi;
- if (bgp_pbr_build_and_validate_entry(p, info, &api) < 0) {
+ if (!bgp_zebra_tm_chunk_obtained()) {
if (BGP_DEBUG(pbr, PBR_ERROR))
- zlog_err("%s: cancel updating entry in bgp pbr",
+ zlog_err("%s: table chunk not obtained yet",
__func__);
return;
}
+ /* already installed */
+ if (nlri_update && extra->bgp_fs_pbr) {
+ if (BGP_DEBUG(pbr, PBR_ERROR))
+ zlog_err("%s: entry %p already installed in bgp pbr",
+ __func__, info);
+ return;
+ }
+
+ if (bgp_pbr_build_and_validate_entry(p, info, &api) < 0) {
+ if (BGP_DEBUG(pbr, PBR_ERROR))
+ zlog_err("%s: cancel updating entry %p in bgp pbr",
+ __func__, info);
+ return;
+ }
bgp_pbr_handle_entry(bgp, info, &api, nlri_update);
}
+
+int bgp_pbr_interface_compare(const struct bgp_pbr_interface *a,
+ const struct bgp_pbr_interface *b)
+{
+ return strcmp(a->name, b->name);
+}
+
+struct bgp_pbr_interface *bgp_pbr_interface_lookup(const char *name,
+ struct bgp_pbr_interface_head *head)
+{
+ struct bgp_pbr_interface pbr_if;
+
+ strlcpy(pbr_if.name, name, sizeof(pbr_if.name));
+ return (RB_FIND(bgp_pbr_interface_head,
+ head, &pbr_if));
+}
+
+/* this function resets to the default policy routing
+ * go back to default status
+ */
+void bgp_pbr_reset(struct bgp *bgp, afi_t afi)
+{
+ struct bgp_pbr_config *bgp_pbr_cfg = bgp->bgp_pbr_cfg;
+ struct bgp_pbr_interface_head *head;
+ struct bgp_pbr_interface *pbr_if;
+
+ if (!bgp_pbr_cfg || afi != AFI_IP)
+ return;
+ head = &(bgp_pbr_cfg->ifaces_by_name_ipv4);
+
+ while (!RB_EMPTY(bgp_pbr_interface_head, head)) {
+ pbr_if = RB_ROOT(bgp_pbr_interface_head, head);
+ RB_REMOVE(bgp_pbr_interface_head, head, pbr_if);
+ XFREE(MTYPE_TMP, pbr_if);
+ }
+}
struct prefix src_prefix;
struct prefix dst_prefix;
+#define PROTOCOL_UDP 17
+#define PROTOCOL_TCP 6
struct bgp_pbr_match_val protocol[BGP_PBR_MATCH_VAL_MAX];
struct bgp_pbr_match_val src_port[BGP_PBR_MATCH_VAL_MAX];
struct bgp_pbr_match_val dst_port[BGP_PBR_MATCH_VAL_MAX];
vrf_id_t vrf_id;
};
+struct bgp_pbr_interface {
+ RB_ENTRY(bgp_pbr_interface) id_entry;
+ char name[INTERFACE_NAMSIZ];
+};
+
+RB_HEAD(bgp_pbr_interface_head, bgp_pbr_interface);
+RB_PROTOTYPE(bgp_pbr_interface_head, bgp_pbr_interface, id_entry,
+ bgp_pbr_interface_compare);
+
+extern int bgp_pbr_interface_compare(const struct bgp_pbr_interface *a,
+ const struct bgp_pbr_interface *b);
+
+struct bgp_pbr_config {
+ struct bgp_pbr_interface_head ifaces_by_name_ipv4;
+ bool pbr_interface_any_ipv4;
+};
+
+extern struct bgp_pbr_config *bgp_pbr_cfg;
+
struct bgp_pbr_match {
char ipset_name[ZEBRA_IPSET_NAME_SIZE];
#define MATCH_IP_SRC_SET (1 << 0)
#define MATCH_IP_DST_SET (1 << 1)
+#define MATCH_PORT_SRC_SET (1 << 2)
+#define MATCH_PORT_DST_SET (1 << 3)
+#define MATCH_PORT_SRC_RANGE_SET (1 << 4)
+#define MATCH_PORT_DST_RANGE_SET (1 << 5)
uint32_t flags;
vrf_id_t vrf_id;
struct prefix src;
struct prefix dst;
+ uint16_t src_port_min;
+ uint16_t src_port_max;
+ uint16_t dst_port_min;
+ uint16_t dst_port_max;
+ uint8_t proto;
+
+ void *bgp_info;
+
bool installed;
bool install_in_progress;
};
afi_t afi, safi_t safi,
bool nlri_update);
+/* bgp pbr utilities */
+extern struct bgp_pbr_interface *pbr_interface_lookup(const char *name);
+extern void bgp_pbr_reset(struct bgp *bgp, afi_t afi);
+extern struct bgp_pbr_interface *bgp_pbr_interface_lookup(const char *name,
+ struct bgp_pbr_interface_head *head);
+
#endif /* __BGP_PBR_H__ */
prd->family = AF_UNSPEC;
prd->prefixlen = 64;
sprintf(buf, "%s:%hu", inet_ntoa(router_id), rd_id);
- str2prefix_rd(buf, prd);
+ (void)str2prefix_rd(buf, prd);
}
{
struct bgp_info *mpinfo;
- /* If this is multipath, check all selected paths for any nexthop change
- * or
- * attribute change. Some attribute changes (e.g., community) aren't of
- * relevance to the RIB, but we'll update zebra to ensure we handle the
- * case of BGP nexthop change. This is the behavior when the best path
- * has
- * an attribute change anyway.
+ /* If this is multipath, check all selected paths for any nexthop
+ * change or attribute change. Some attribute changes (e.g., community)
+ * aren't of relevance to the RIB, but we'll update zebra to ensure
+ * we handle the case of BGP nexthop change. This is the behavior
+ * when the best path has an attribute change anyway.
*/
if (CHECK_FLAG(selected->flags, BGP_INFO_IGP_CHANGED)
|| CHECK_FLAG(selected->flags, BGP_INFO_MULTIPATH_CHG))
return 1;
- /* If this is multipath, check all selected paths for any nexthop change
+ /*
+ * If this is multipath, check all selected paths for any nexthop change
*/
for (mpinfo = bgp_info_mpath_first(selected); mpinfo;
mpinfo = bgp_info_mpath_next(mpinfo)) {
"%s Maximum-prefix restart timer expired, restore peering",
peer->host);
- peer_clear(peer, NULL);
+ if ((peer_clear(peer, NULL) < 0) && bgp_debug_neighbor_events(peer))
+ zlog_debug("%s: %s peer_clear failed",
+ __PRETTY_FUNCTION__, peer->host);
return 0;
}
json_object *json_no = NULL;
json_no = json_object_new_object();
json_object_string_add(json_no, "warning",
- "No such neighbor");
+ "No such neighbor in this view/vrf");
vty_out(vty, "%s\n",
json_object_to_json_string_ext(
json_no, JSON_C_TO_STRING_PRETTY));
json_object_free(json_no);
} else
- vty_out(vty, "No such neighbor\n");
+ vty_out(vty, "No such neighbor in this view/vrf\n");
return NULL;
}
}
static void show_adj_route(struct vty *vty, struct peer *peer, afi_t afi,
- safi_t safi, int in, const char *rmap_name,
- uint8_t use_json, json_object *json)
+ safi_t safi, enum bgp_show_adj_route_type type,
+ const char *rmap_name, uint8_t use_json,
+ json_object *json)
{
struct bgp_table *table;
struct bgp_adj_in *ain;
output_count = filtered_count = 0;
subgrp = peer_subgroup(peer, afi, safi);
- if (!in && subgrp
+ if (type == bgp_show_adj_route_advertised && subgrp
&& CHECK_FLAG(subgrp->sflags, SUBGRP_STATUS_DEFAULT_ORIGINATE)) {
if (use_json) {
json_object_int_add(json, "bgpTableVersion",
}
for (rn = bgp_table_top(table); rn; rn = bgp_route_next(rn)) {
- if (in) {
+ if (type == bgp_show_adj_route_received
+ || type == bgp_show_adj_route_filtered) {
for (ain = rn->adj_in; ain; ain = ain->next) {
- if (ain->peer != peer)
+ if (ain->peer != peer || !ain->attr)
continue;
+
if (header1) {
if (use_json) {
json_object_int_add(
vty_out(vty, BGP_SHOW_HEADER);
header2 = 0;
}
- if (ain->attr) {
- bgp_attr_dup(&attr, ain->attr);
- if (bgp_input_modifier(peer, &rn->p,
- &attr, afi, safi,
- rmap_name)
- != RMAP_DENY) {
- route_vty_out_tmp(vty, &rn->p,
- &attr, safi,
- use_json,
- json_ar);
- output_count++;
- } else
- filtered_count++;
+
+ bgp_attr_dup(&attr, ain->attr);
+ ret = bgp_input_modifier(peer, &rn->p, &attr,
+ afi, safi, rmap_name);
+
+ if (type == bgp_show_adj_route_filtered
+ && ret != RMAP_DENY) {
+ bgp_attr_undup(&attr, ain->attr);
+ continue;
}
+
+ if (type == bgp_show_adj_route_received
+ && ret == RMAP_DENY)
+ filtered_count++;
+
+ route_vty_out_tmp(vty, &rn->p, &attr, safi,
+ use_json, json_ar);
+ bgp_attr_undup(&attr, ain->attr);
+ output_count++;
}
- } else {
+ } else if (type == bgp_show_adj_route_advertised) {
for (adj = rn->adj_out; adj; adj = adj->next)
SUBGRP_FOREACH_PEER (adj->subgroup, paf) {
- if (paf->peer != peer)
+ if (paf->peer != peer || !adj->attr)
continue;
if (header1) {
}
header1 = 0;
}
-
if (header2) {
if (!use_json)
vty_out(vty,
header2 = 0;
}
- if (adj->attr) {
- bgp_attr_dup(&attr, adj->attr);
- ret = bgp_output_modifier(
- peer, &rn->p, &attr,
- afi, safi, rmap_name);
- if (ret != RMAP_DENY) {
- route_vty_out_tmp(
- vty, &rn->p,
- &attr, safi,
- use_json,
- json_ar);
- output_count++;
- } else
- filtered_count++;
+ bgp_attr_dup(&attr, adj->attr);
+ ret = bgp_output_modifier(
+ peer, &rn->p, &attr, afi, safi,
+ rmap_name);
- bgp_attr_undup(&attr,
- adj->attr);
+ if (ret != RMAP_DENY) {
+ route_vty_out_tmp(vty, &rn->p,
+ &attr, safi,
+ use_json,
+ json_ar);
+ output_count++;
+ } else {
+ filtered_count++;
}
+
+ bgp_attr_undup(&attr, adj->attr);
}
}
}
- if (use_json)
- json_object_object_add(json, "advertisedRoutes", json_ar);
- if (output_count != 0) {
- if (use_json)
- json_object_int_add(json, "totalPrefixCounter",
- output_count);
- else
- vty_out(vty, "\nTotal number of prefixes %ld\n",
- output_count);
- }
if (use_json) {
+ json_object_object_add(json, "advertisedRoutes", json_ar);
+ json_object_int_add(json, "totalPrefixCounter", output_count);
+ json_object_int_add(json, "filteredPrefixCounter",
+ filtered_count);
+
vty_out(vty, "%s\n", json_object_to_json_string_ext(
json, JSON_C_TO_STRING_PRETTY));
json_object_free(json);
+ } else if (output_count > 0) {
+ if (filtered_count > 0)
+ vty_out(vty,
+ "\nTotal number of prefixes %ld (%ld filtered)\n",
+ output_count, filtered_count);
+ else
+ vty_out(vty, "\nTotal number of prefixes %ld\n",
+ output_count);
}
}
static int peer_adj_routes(struct vty *vty, struct peer *peer, afi_t afi,
- safi_t safi, int in, const char *rmap_name,
- uint8_t use_json)
+ safi_t safi, enum bgp_show_adj_route_type type,
+ const char *rmap_name, uint8_t use_json)
{
json_object *json = NULL;
return CMD_WARNING;
}
- if (in
+ if ((type == bgp_show_adj_route_received
+ || type == bgp_show_adj_route_filtered)
&& !CHECK_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_SOFT_RECONFIG)) {
if (use_json) {
return CMD_WARNING;
}
- show_adj_route(vty, peer, afi, safi, in, rmap_name, use_json, json);
+ show_adj_route(vty, peer, afi, safi, type, rmap_name, use_json, json);
return CMD_SUCCESS;
}
DEFUN (show_ip_bgp_instance_neighbor_advertised_route,
show_ip_bgp_instance_neighbor_advertised_route_cmd,
"show [ip] bgp [<view|vrf> VIEWVRFNAME] ["BGP_AFI_CMD_STR" ["BGP_SAFI_WITH_LABEL_CMD_STR"]] "
- "neighbors <A.B.C.D|X:X::X:X|WORD> <received-routes|advertised-routes> [route-map WORD] [json]",
+ "neighbors <A.B.C.D|X:X::X:X|WORD> <advertised-routes|received-routes|filtered-routes> [route-map WORD] [json]",
SHOW_STR
IP_STR
BGP_STR
"Neighbor to display information about\n"
"Neighbor to display information about\n"
"Neighbor on BGP configured interface\n"
- "Display the received routes from neighbor\n"
"Display the routes advertised to a BGP neighbor\n"
+ "Display the received routes from neighbor\n"
+ "Display the filtered routes received from neighbor\n"
"Route-map to modify the attributes\n"
"Name of the route map\n"
JSON_STR)
safi_t safi = SAFI_UNICAST;
char *rmap_name = NULL;
char *peerstr = NULL;
- int rcvd = 0;
struct bgp *bgp = NULL;
struct peer *peer;
+ enum bgp_show_adj_route_type type = bgp_show_adj_route_advertised;
int idx = 0;
-
bgp_vty_find_and_parse_afi_safi_bgp(vty, argv, argc, &idx, &afi, &safi,
&bgp);
if (!idx)
return CMD_WARNING;
int uj = use_json(argc, argv);
+
if (uj)
argc--;
if (!peer)
return CMD_WARNING;
- if (argv_find(argv, argc, "received-routes", &idx))
- rcvd = 1;
if (argv_find(argv, argc, "advertised-routes", &idx))
- rcvd = 0;
+ type = bgp_show_adj_route_advertised;
+ else if (argv_find(argv, argc, "received-routes", &idx))
+ type = bgp_show_adj_route_received;
+ else if (argv_find(argv, argc, "filtered-routes", &idx))
+ type = bgp_show_adj_route_filtered;
+
if (argv_find(argv, argc, "route-map", &idx))
rmap_name = argv[++idx]->arg;
- return peer_adj_routes(vty, peer, afi, safi, rcvd, rmap_name, uj);
+ return peer_adj_routes(vty, peer, afi, safi, type, rmap_name, uj);
}
DEFUN (show_ip_bgp_neighbor_received_prefix_filter,
peerstr = argv[++idx]->arg;
peer = peer_lookup_in_view(vty, bgp, peerstr, uj);
- if (!peer) {
- vty_out(vty, "No such neighbor\n");
+ if (!peer)
return CMD_WARNING;
- }
if (argv_find(argv, argc, "flap-statistics", &idx))
sh_type = bgp_show_type_flap_neighbor;
bgp_show_type_detail,
};
+enum bgp_show_adj_route_type {
+ bgp_show_adj_route_advertised,
+ bgp_show_adj_route_received,
+ bgp_show_adj_route_filtered,
+};
+
#define BGP_SHOW_SCODE_HEADER \
"Status codes: s suppressed, d damped, " \
* Set nexthop_orig.family to 0 if not valid.
*/
struct prefix nexthop_orig;
+ /* presence of FS pbr entry */
+ void *bgp_fs_pbr;
};
struct bgp_info {
#endif
/* BGP global configuration. */
-
-DEFUN (bgp_multiple_instance_func,
- bgp_multiple_instance_cmd,
- "bgp multiple-instance",
- BGP_STR
- "Enable bgp multiple instance\n")
+#if defined(VERSION_TYPE_DEV) && (CONFDATE > 20190601)
+CPP_NOTICE("bgpd: time to remove deprecated bgp multiple-instance")
+CPP_NOTICE("This includes BGP_OPT_MULTIPLE_INSTANCE")
+#endif
+DEFUN_HIDDEN (bgp_multiple_instance_func,
+ bgp_multiple_instance_cmd,
+ "bgp multiple-instance",
+ BGP_STR
+ "Enable bgp multiple instance\n")
{
bgp_option_set(BGP_OPT_MULTIPLE_INSTANCE);
return CMD_SUCCESS;
}
-DEFUN (no_bgp_multiple_instance,
+DEFUN_HIDDEN (no_bgp_multiple_instance,
no_bgp_multiple_instance_cmd,
"no bgp multiple-instance",
NO_STR
{
int ret;
+ vty_out(vty, "This config option is deprecated, and is scheduled for removal.\n");
+ vty_out(vty, "if you are using this please let the developers know\n");
+ zlog_warn("Deprecated option: `bgp multiple-instance` being used");
ret = bgp_option_unset(BGP_OPT_MULTIPLE_INSTANCE);
if (ret < 0) {
vty_out(vty, "%% There are more than two BGP instances\n");
return CMD_SUCCESS;
}
-DEFUN (bgp_config_type,
- bgp_config_type_cmd,
- "bgp config-type <cisco|zebra>",
- BGP_STR
- "Configuration type\n"
- "cisco\n"
- "zebra\n")
+#if defined(VERSION_TYPE_DEV) && (CONFDATE > 20190601)
+CPP_NOTICE("bgpd: time to remove deprecated cli bgp config-type cisco")
+CPP_NOTICE("This includes BGP_OPT_CISCO_CONFIG")
+#endif
+DEFUN_HIDDEN (bgp_config_type,
+ bgp_config_type_cmd,
+ "bgp config-type <cisco|zebra>",
+ BGP_STR
+ "Configuration type\n"
+ "cisco\n"
+ "zebra\n")
{
int idx = 0;
- if (argv_find(argv, argc, "cisco", &idx))
+ if (argv_find(argv, argc, "cisco", &idx)) {
+ vty_out(vty, "This config option is deprecated, and is scheduled for removal.\n");
+ vty_out(vty, "if you are using this please let the developers know!\n");
+ zlog_warn("Deprecated option: `bgp config-type cisco` being used");
bgp_option_set(BGP_OPT_CONFIG_CISCO);
- else
+ } else
bgp_option_unset(BGP_OPT_CONFIG_CISCO);
return CMD_SUCCESS;
}
-DEFUN (no_bgp_config_type,
- no_bgp_config_type_cmd,
- "no bgp config-type [<cisco|zebra>]",
- NO_STR
- BGP_STR
- "Display configuration type\n"
- "cisco\n"
- "zebra\n")
+DEFUN_HIDDEN (no_bgp_config_type,
+ no_bgp_config_type_cmd,
+ "no bgp config-type [<cisco|zebra>]",
+ NO_STR
+ BGP_STR
+ "Display configuration type\n"
+ "cisco\n"
+ "zebra\n")
{
bgp_option_unset(BGP_OPT_CONFIG_CISCO);
return CMD_SUCCESS;
}
/* "bgp enforce-first-as" configuration. */
-DEFUN (bgp_enforce_first_as,
+#if defined(VERSION_TYPE_DEV) && CONFDATE > 20180517
+CPP_NOTICE("bgpd: remove deprecated '[no] bgp enforce-first-as' commands")
+#endif
+
+DEFUN_DEPRECATED (bgp_enforce_first_as,
bgp_enforce_first_as_cmd,
"bgp enforce-first-as",
BGP_STR
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
bgp_flag_set(bgp, BGP_FLAG_ENFORCE_FIRST_AS);
- bgp_clear_star_soft_in(vty, bgp->name);
return CMD_SUCCESS;
}
-DEFUN (no_bgp_enforce_first_as,
+DEFUN_DEPRECATED (no_bgp_enforce_first_as,
no_bgp_enforce_first_as_cmd,
"no bgp enforce-first-as",
NO_STR
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
bgp_flag_unset(bgp, BGP_FLAG_ENFORCE_FIRST_AS);
- bgp_clear_star_soft_in(vty, bgp->name);
return CMD_SUCCESS;
}
"Peer-group name\n")
static int peer_flag_modify_vty(struct vty *vty, const char *ip_str,
- uint16_t flag, int set)
+ uint32_t flag, int set)
{
int ret;
struct peer *peer;
return bgp_vty_return(vty, ret);
}
-static int peer_flag_set_vty(struct vty *vty, const char *ip_str, uint16_t flag)
+static int peer_flag_set_vty(struct vty *vty, const char *ip_str, uint32_t flag)
{
return peer_flag_modify_vty(vty, ip_str, flag, 1);
}
static int peer_flag_unset_vty(struct vty *vty, const char *ip_str,
- uint16_t flag)
+ uint32_t flag)
{
return peer_flag_modify_vty(vty, ip_str, flag, 0);
}
"Send Community attribute to this neighbor\n")
{
int idx_peer = 1;
+
return peer_af_flag_set_vty(vty, argv[idx_peer]->arg, bgp_node_afi(vty),
bgp_node_safi(vty),
PEER_FLAG_SEND_COMMUNITY);
"Send Community attribute to this neighbor\n")
{
int idx_peer = 2;
+
return peer_af_flag_unset_vty(vty, argv[idx_peer]->arg,
bgp_node_afi(vty), bgp_node_safi(vty),
PEER_FLAG_SEND_COMMUNITY);
"Send Standard Community attributes\n"
"Send Large Community attributes\n")
{
- int idx = 0;
+ int idx_peer = 1;
uint32_t flag = 0;
+ const char *type = argv[argc - 1]->text;
- char *peer = argv[1]->arg;
-
- if (argv_find(argv, argc, "standard", &idx))
+ if (strmatch(type, "standard")) {
SET_FLAG(flag, PEER_FLAG_SEND_COMMUNITY);
- else if (argv_find(argv, argc, "extended", &idx))
+ } else if (strmatch(type, "extended")) {
SET_FLAG(flag, PEER_FLAG_SEND_EXT_COMMUNITY);
- else if (argv_find(argv, argc, "large", &idx))
+ } else if (strmatch(type, "large")) {
SET_FLAG(flag, PEER_FLAG_SEND_LARGE_COMMUNITY);
- else if (argv_find(argv, argc, "both", &idx)) {
+ } else if (strmatch(type, "both")) {
SET_FLAG(flag, PEER_FLAG_SEND_COMMUNITY);
SET_FLAG(flag, PEER_FLAG_SEND_EXT_COMMUNITY);
- } else {
+ } else { /* if (strmatch(type, "all")) */
SET_FLAG(flag, PEER_FLAG_SEND_COMMUNITY);
SET_FLAG(flag, PEER_FLAG_SEND_EXT_COMMUNITY);
SET_FLAG(flag, PEER_FLAG_SEND_LARGE_COMMUNITY);
}
- return peer_af_flag_set_vty(vty, peer, bgp_node_afi(vty),
+ return peer_af_flag_set_vty(vty, argv[idx_peer]->arg, bgp_node_afi(vty),
bgp_node_safi(vty), flag);
}
"Send Large Community attributes\n")
{
int idx_peer = 2;
-
+ uint32_t flag = 0;
const char *type = argv[argc - 1]->text;
- if (strmatch(type, "standard"))
- return peer_af_flag_unset_vty(
- vty, argv[idx_peer]->arg, bgp_node_afi(vty),
- bgp_node_safi(vty), PEER_FLAG_SEND_COMMUNITY);
- if (strmatch(type, "extended"))
- return peer_af_flag_unset_vty(
- vty, argv[idx_peer]->arg, bgp_node_afi(vty),
- bgp_node_safi(vty), PEER_FLAG_SEND_EXT_COMMUNITY);
- if (strmatch(type, "large"))
- return peer_af_flag_unset_vty(
- vty, argv[idx_peer]->arg, bgp_node_afi(vty),
- bgp_node_safi(vty), PEER_FLAG_SEND_LARGE_COMMUNITY);
- if (strmatch(type, "both"))
- return peer_af_flag_unset_vty(
- vty, argv[idx_peer]->arg, bgp_node_afi(vty),
- bgp_node_safi(vty),
- PEER_FLAG_SEND_COMMUNITY
- | PEER_FLAG_SEND_EXT_COMMUNITY);
-
- /* if (strmatch (type, "all")) */
- return peer_af_flag_unset_vty(
- vty, argv[idx_peer]->arg, bgp_node_afi(vty), bgp_node_safi(vty),
- (PEER_FLAG_SEND_COMMUNITY | PEER_FLAG_SEND_EXT_COMMUNITY
- | PEER_FLAG_SEND_LARGE_COMMUNITY));
+ if (strmatch(type, "standard")) {
+ SET_FLAG(flag, PEER_FLAG_SEND_COMMUNITY);
+ } else if (strmatch(type, "extended")) {
+ SET_FLAG(flag, PEER_FLAG_SEND_EXT_COMMUNITY);
+ } else if (strmatch(type, "large")) {
+ SET_FLAG(flag, PEER_FLAG_SEND_LARGE_COMMUNITY);
+ } else if (strmatch(type, "both")) {
+ SET_FLAG(flag, PEER_FLAG_SEND_COMMUNITY);
+ SET_FLAG(flag, PEER_FLAG_SEND_EXT_COMMUNITY);
+ } else { /* if (strmatch(type, "all")) */
+ SET_FLAG(flag, PEER_FLAG_SEND_COMMUNITY);
+ SET_FLAG(flag, PEER_FLAG_SEND_EXT_COMMUNITY);
+ SET_FLAG(flag, PEER_FLAG_SEND_LARGE_COMMUNITY);
+ }
+
+ return peer_af_flag_unset_vty(vty, argv[idx_peer]->arg,
+ bgp_node_afi(vty), bgp_node_safi(vty),
+ flag);
}
ALIAS_HIDDEN(
PEER_FLAG_DISABLE_CONNECTED_CHECK);
}
+
+/* enforce-first-as */
+DEFUN (neighbor_enforce_first_as,
+ neighbor_enforce_first_as_cmd,
+ "neighbor <A.B.C.D|X:X::X:X|WORD> enforce-first-as",
+ NEIGHBOR_STR
+ NEIGHBOR_ADDR_STR2
+ "Enforce the first AS for EBGP routes\n")
+{
+ int idx_peer = 1;
+
+ return peer_flag_set_vty(vty, argv[idx_peer]->arg,
+ PEER_FLAG_ENFORCE_FIRST_AS);
+}
+
+DEFUN (no_neighbor_enforce_first_as,
+ no_neighbor_enforce_first_as_cmd,
+ "no neighbor <A.B.C.D|X:X::X:X|WORD> enforce-first-as",
+ NO_STR
+ NEIGHBOR_STR
+ NEIGHBOR_ADDR_STR2
+ "Enforce the first AS for EBGP routes\n")
+{
+ int idx_peer = 2;
+
+ return peer_flag_unset_vty(vty, argv[idx_peer]->arg,
+ PEER_FLAG_ENFORCE_FIRST_AS);
+}
+
+
DEFUN (neighbor_description,
neighbor_description_cmd,
"neighbor <A.B.C.D|X:X::X:X|WORD> description LINE...",
const char *direct_str)
{
int ret;
- struct peer *peer;
int direct = FILTER_IN;
+ struct peer *peer;
peer = peer_and_group_lookup_vty(vty, ip_str);
if (!peer)
if (argv_find(argv, argc, "no", &idx))
yes = 0;
+ /* If "no ...", squash trailing parameter */
+ if (!yes)
+ label_auto = NULL;
+
if (yes) {
if (!label_auto)
label = label_val; /* parser should force unsigned */
if (use_json)
json_object_boolean_true_add(json, "bgpNoSuchNeighbor");
else
- vty_out(vty, "%% No such neighbor\n");
+ vty_out(vty, "%% No such neighbor in this view/vrf\n");
}
if (use_json) {
install_element(BGP_NODE, &neighbor_disable_connected_check_cmd);
install_element(BGP_NODE, &no_neighbor_disable_connected_check_cmd);
+ /* "neighbor enforce-first-as" commands. */
+ install_element(BGP_NODE, &neighbor_enforce_first_as_cmd);
+ install_element(BGP_NODE, &no_neighbor_enforce_first_as_cmd);
+
/* "neighbor description" commands. */
install_element(BGP_NODE, &neighbor_description_cmd);
install_element(BGP_NODE, &no_neighbor_description_cmd);
static bool bgp_tm_chunk_obtained;
#define BGP_FLOWSPEC_TABLE_CHUNK 100000
static uint32_t bgp_tm_min, bgp_tm_max, bgp_tm_chunk_size;
+struct bgp *bgp_tm_bgp;
static int bgp_zebra_tm_connect(struct thread *t)
{
if (!bgp_tm_chunk_obtained) {
if (bgp_zebra_get_table_range(bgp_tm_chunk_size,
&bgp_tm_min,
- &bgp_tm_max) >= 0)
+ &bgp_tm_max) >= 0) {
bgp_tm_chunk_obtained = true;
+ /* parse non installed entries */
+ bgp_zebra_announce_table(bgp_tm_bgp, AFI_IP, SAFI_FLOWSPEC);
+ }
}
}
thread_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
return 0;
}
+bool bgp_zebra_tm_chunk_obtained(void)
+{
+ return bgp_tm_chunk_obtained;
+}
+
uint32_t bgp_zebra_tm_get_id(void)
{
static int table_id;
return bgp_tm_min++;
}
-void bgp_zebra_init_tm_connect(void)
+void bgp_zebra_init_tm_connect(struct bgp *bgp)
{
int delay = 1;
bgp_tm_chunk_obtained = false;
bgp_tm_min = bgp_tm_max = 0;
bgp_tm_chunk_size = BGP_FLOWSPEC_TABLE_CHUNK;
+ bgp_tm_bgp = bgp;
thread_add_timer(bm->master, bgp_zebra_tm_connect, zclient, delay,
&bgp_tm_thread_connect);
}
return;
if (bgp_debug_zebra(p))
- prefix2str(&api.prefix, buf_prefix, sizeof(buf_prefix));
+ prefix2str(p, buf_prefix, sizeof(buf_prefix));
if (safi == SAFI_FLOWSPEC)
return bgp_pbr_update_entry(bgp, &rn->p,
tag = info->attr->tag;
- /*
- * When we create an aggregate route we must also install a
- * Null0 route in the RIB
- */
- if (info->sub_type == BGP_ROUTE_AGGREGATE)
- zapi_route_set_blackhole(&api, BLACKHOLE_NULL);
-
/* If the route's source is EVPN, flag as such. */
is_evpn = is_route_parent_evpn(info);
if (is_evpn)
&mpinfo_cp->attr->nexthop,
mpinfo_cp->attr, is_evpn, api_nh);
} else {
- ifindex_t ifindex;
+ ifindex_t ifindex = IFINDEX_INTERNAL;
struct in6_addr *nexthop;
if (bgp->table_map[afi][safi].name) {
if (has_valid_label && !(CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)))
SET_FLAG(api.message, ZAPI_MESSAGE_LABEL);
- if (info->sub_type != BGP_ROUTE_AGGREGATE)
+ /*
+ * When we create an aggregate route we must also
+ * install a Null0 route in the RIB, so overwrite
+ * what was written into api with a blackhole route
+ */
+ if (info->sub_type == BGP_ROUTE_AGGREGATE)
+ zapi_route_set_blackhole(&api, BLACKHOLE_NULL);
+ else
api.nexthop_num = valid_nh_count;
SET_FLAG(api.message, ZAPI_MESSAGE_METRIC);
zlog_debug("%s: Received RULE_INSTALLED",
__PRETTY_FUNCTION__);
break;
+ case ZAPI_RULE_FAIL_REMOVE:
case ZAPI_RULE_REMOVED:
if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("%s: Received RULE REMOVED",
bgp_pbim = bgp_pbr_match_ipset_lookup(vrf_id, unique);
if (!bgp_pbim) {
if (BGP_DEBUG(zebra, ZEBRA))
- zlog_debug("%s: Fail to look BGP match (%u)",
- __PRETTY_FUNCTION__, unique);
+ zlog_debug("%s: Fail to look BGP match ( %u %u)",
+ __PRETTY_FUNCTION__, note, unique);
return 0;
}
zlog_debug("%s: Received IPSET_INSTALLED",
__PRETTY_FUNCTION__);
break;
+ case ZAPI_IPSET_FAIL_REMOVE:
case ZAPI_IPSET_REMOVED:
if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("%s: Received IPSET REMOVED",
unique);
if (!bgp_pbime) {
if (BGP_DEBUG(zebra, ZEBRA))
- zlog_debug("%s: Fail to look BGP match entry (%u)",
- __PRETTY_FUNCTION__, unique);
+ zlog_debug("%s: Fail to look BGP match entry (%u %u)",
+ __PRETTY_FUNCTION__, note, unique);
return 0;
}
bgp_pbime->install_in_progress = false;
break;
case ZAPI_IPSET_ENTRY_INSTALLED:
- bgp_pbime->installed = true;
- bgp_pbime->install_in_progress = false;
- if (BGP_DEBUG(zebra, ZEBRA))
- zlog_debug("%s: Received IPSET_ENTRY_INSTALLED",
- __PRETTY_FUNCTION__);
+ {
+ struct bgp_info *bgp_info;
+ struct bgp_info_extra *extra;
+
+ bgp_pbime->installed = true;
+ bgp_pbime->install_in_progress = false;
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: Received IPSET_ENTRY_INSTALLED",
+ __PRETTY_FUNCTION__);
+ /* link bgp_info to bpme */
+ bgp_info = (struct bgp_info *)bgp_pbime->bgp_info;
+ extra = bgp_info_extra_get(bgp_info);
+ extra->bgp_fs_pbr = (void *)bgp_pbime;
+ }
break;
+ case ZAPI_IPSET_ENTRY_FAIL_REMOVE:
case ZAPI_IPSET_ENTRY_REMOVED:
if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("%s: Received IPSET_ENTRY_REMOVED",
bgpm = bgp_pbr_match_iptable_lookup(vrf_id, unique);
if (!bgpm) {
if (BGP_DEBUG(zebra, ZEBRA))
- zlog_debug("%s: Fail to look BGP iptable (%u)",
- __PRETTY_FUNCTION__, unique);
+ zlog_debug("%s: Fail to look BGP iptable (%u %u)",
+ __PRETTY_FUNCTION__, note, unique);
return 0;
}
switch (note) {
__PRETTY_FUNCTION__);
bgpm->action->refcnt++;
break;
+ case ZAPI_IPTABLE_FAIL_REMOVE:
case ZAPI_IPTABLE_REMOVED:
if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("%s: Received IPTABLE REMOVED",
stream_putc(s, pbime->dst.family);
stream_putc(s, pbime->dst.prefixlen);
stream_put(s, &pbime->dst.u.prefix, prefix_blen(&pbime->dst));
+
+ stream_putw(s, pbime->src_port_min);
+ stream_putw(s, pbime->src_port_max);
+ stream_putw(s, pbime->dst_port_min);
+ stream_putw(s, pbime->dst_port_max);
+ stream_putc(s, pbime->proto);
}
static void bgp_encode_pbr_iptable_match(struct stream *s,
*/
}
+static int bgp_zebra_process_local_es(int cmd, struct zclient *zclient,
+ zebra_size_t length, vrf_id_t vrf_id)
+{
+ esi_t esi;
+ struct bgp *bgp = NULL;
+ struct stream *s = NULL;
+ char buf[ESI_STR_LEN];
+ char buf1[INET6_ADDRSTRLEN];
+ struct ipaddr originator_ip;
+
+ memset(&esi, 0, sizeof(esi_t));
+ memset(&originator_ip, 0, sizeof(struct ipaddr));
+
+ bgp = bgp_lookup_by_vrf_id(vrf_id);
+ if (!bgp)
+ return 0;
+
+ s = zclient->ibuf;
+ stream_get(&esi, s, sizeof(esi_t));
+ stream_get(&originator_ip, s, sizeof(struct ipaddr));
+
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("Rx %s ESI %s originator-ip %s",
+ (cmd == ZEBRA_LOCAL_ES_ADD) ? "add" : "del",
+ esi_to_str(&esi, buf, sizeof(buf)),
+ ipaddr2str(&originator_ip, buf1, sizeof(buf1)));
+
+ if (cmd == ZEBRA_LOCAL_ES_ADD)
+ bgp_evpn_local_es_add(bgp, &esi, &originator_ip);
+ else
+ bgp_evpn_local_es_del(bgp, &esi, &originator_ip);
+ return 0;
+}
+
static int bgp_zebra_process_local_l3vni(int cmd, struct zclient *zclient,
zebra_size_t length, vrf_id_t vrf_id)
{
zclient->nexthop_update = bgp_read_nexthop_update;
zclient->import_check_update = bgp_read_import_check_update;
zclient->fec_update = bgp_read_fec_update;
+ zclient->local_es_add = bgp_zebra_process_local_es;
+ zclient->local_es_del = bgp_zebra_process_local_es;
zclient->local_vni_add = bgp_zebra_process_local_vni;
zclient->local_vni_del = bgp_zebra_process_local_vni;
zclient->local_macip_add = bgp_zebra_process_local_macip;
if (pbra->install_in_progress)
return;
- zlog_debug("%s: table %d fwmark %d %d", __PRETTY_FUNCTION__,
- pbra->table_id, pbra->fwmark, install);
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: table %d fwmark %d %d",
+ __PRETTY_FUNCTION__,
+ pbra->table_id, pbra->fwmark, install);
s = zclient->obuf;
stream_reset(s);
if (pbrim->install_in_progress)
return;
- zlog_debug("%s: name %s type %d %d", __PRETTY_FUNCTION__,
- pbrim->ipset_name, pbrim->type, install);
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: name %s type %d %d",
+ __PRETTY_FUNCTION__,
+ pbrim->ipset_name, pbrim->type, install);
s = zclient->obuf;
stream_reset(s);
if (pbrime->install_in_progress)
return;
- zlog_debug("%s: name %s %d %d", __PRETTY_FUNCTION__,
- pbrime->backpointer->ipset_name,
- pbrime->unique, install);
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: name %s %d %d", __PRETTY_FUNCTION__,
+ pbrime->backpointer->ipset_name,
+ pbrime->unique, install);
s = zclient->obuf;
stream_reset(s);
pbrime->install_in_progress = true;
}
+static void bgp_encode_pbr_interface_list(struct bgp *bgp, struct stream *s)
+{
+ struct bgp_pbr_config *bgp_pbr_cfg = bgp->bgp_pbr_cfg;
+ struct bgp_pbr_interface_head *head;
+ struct bgp_pbr_interface *pbr_if;
+ struct interface *ifp;
+
+ if (!bgp_pbr_cfg)
+ return;
+ head = &(bgp_pbr_cfg->ifaces_by_name_ipv4);
+
+ RB_FOREACH (pbr_if, bgp_pbr_interface_head, head) {
+ ifp = if_lookup_by_name(pbr_if->name, bgp->vrf_id);
+ if (ifp)
+ stream_putl(s, ifp->ifindex);
+ }
+}
+
+static int bgp_pbr_get_ifnumber(struct bgp *bgp)
+{
+ struct bgp_pbr_config *bgp_pbr_cfg = bgp->bgp_pbr_cfg;
+ struct bgp_pbr_interface_head *head;
+ struct bgp_pbr_interface *pbr_if;
+ int cnt = 0;
+
+ if (!bgp_pbr_cfg)
+ return 0;
+ head = &(bgp_pbr_cfg->ifaces_by_name_ipv4);
+
+ RB_FOREACH (pbr_if, bgp_pbr_interface_head, head) {
+ if (if_lookup_by_name(pbr_if->name, bgp->vrf_id))
+ cnt++;
+ }
+ return cnt;
+}
+
void bgp_send_pbr_iptable(struct bgp_pbr_action *pba,
struct bgp_pbr_match *pbm,
bool install)
{
struct stream *s;
+ int ret = 0;
+ int nb_interface;
if (pbm->install_iptable_in_progress)
return;
- zlog_debug("%s: name %s type %d mark %d %d", __PRETTY_FUNCTION__,
- pbm->ipset_name, pbm->type, pba->fwmark, install);
+ if (BGP_DEBUG(zebra, ZEBRA))
+ zlog_debug("%s: name %s type %d mark %d %d",
+ __PRETTY_FUNCTION__, pbm->ipset_name,
+ pbm->type, pba->fwmark, install);
s = zclient->obuf;
stream_reset(s);
VRF_DEFAULT);
bgp_encode_pbr_iptable_match(s, pba, pbm);
-
+ nb_interface = bgp_pbr_get_ifnumber(pba->bgp);
+ stream_putl(s, nb_interface);
+ if (nb_interface)
+ bgp_encode_pbr_interface_list(pba->bgp, s);
stream_putw_at(s, 0, stream_get_endp(s));
- if (!zclient_send_message(zclient) && install) {
- pbm->install_iptable_in_progress = true;
- pba->refcnt++;
+ ret = zclient_send_message(zclient);
+ if (install) {
+ if (ret)
+ pba->refcnt++;
+ else
+ pbm->install_iptable_in_progress = true;
}
}
inet_ntop(AF_INET, &(nh->gate.ipv4), buff, INET_ADDRSTRLEN);
if (BGP_DEBUG(zebra, ZEBRA))
- zlog_info("BGP: sending default route to %s table %d (redirect IP)",
+ zlog_info("BGP: %s default route to %s table %d (redirect IP)",
+ announce ? "adding" : "withdrawing",
buff, table_id);
zclient_route_send(announce ? ZEBRA_ROUTE_ADD
: ZEBRA_ROUTE_DELETE,
api_nh->type = NEXTHOP_TYPE_IFINDEX;
api_nh->ifindex = ifp->ifindex;
if (BGP_DEBUG(zebra, ZEBRA))
- zlog_info("BGP: sending default route to %s table %d (redirect VRF)",
+ zlog_info("BGP: %s default route to %s table %d (redirect VRF)",
+ announce ? "adding" : "withdrawing",
vrf->name, table_id);
zclient_route_send(announce ? ZEBRA_ROUTE_ADD
: ZEBRA_ROUTE_DELETE,
#include "vxlan.h"
extern void bgp_zebra_init(struct thread_master *master);
-extern void bgp_zebra_init_tm_connect(void);
+extern void bgp_zebra_init_tm_connect(struct bgp *bgp);
extern uint32_t bgp_zebra_tm_get_id(void);
+extern bool bgp_zebra_tm_chunk_obtained(void);
extern void bgp_zebra_destroy(void);
extern int bgp_zebra_get_table_range(uint32_t chunk_size,
uint32_t *start, uint32_t *end);
static int bgp_check_main_socket(bool create, struct bgp *bgp)
{
static int bgp_server_main_created;
- struct listnode *bgpnode, *nbgpnode;
- struct bgp *bgp_temp;
- if (bgp->inst_type == BGP_INSTANCE_TYPE_VRF &&
- vrf_is_mapped_on_netns(bgp->vrf_id))
- return 0;
if (create == true) {
if (bgp_server_main_created)
return 0;
}
if (!bgp_server_main_created)
return 0;
- /* only delete socket on some cases */
- for (ALL_LIST_ELEMENTS(bm->bgp, bgpnode, nbgpnode, bgp_temp)) {
- /* do not count with current bgp */
- if (bgp_temp == bgp)
- continue;
- /* if other instance non VRF, do not delete socket */
- if (bgp_temp->inst_type == BGP_INSTANCE_TYPE_DEFAULT)
- return 0;
- /* vrf lite, do not delete socket */
- if (!vrf_is_mapped_on_netns(bgp_temp->vrf_id))
- return 0;
- }
bgp_close();
bgp_server_main_created = 0;
return 0;
return CHECK_FLAG(peer->af_flags[afi][safi], flag);
}
-/* Return true if flag is set for the peer but not the peer-group */
-static int peergroup_af_flag_check(struct peer *peer, afi_t afi, safi_t safi,
- uint32_t flag)
+void peer_af_flag_inherit(struct peer *peer, afi_t afi, safi_t safi,
+ uint32_t flag)
{
- struct peer *g_peer = NULL;
+ /* Skip if peer is not a peer-group member. */
+ if (!peer_group_active(peer))
+ return;
- if (peer_af_flag_check(peer, afi, safi, flag)) {
- if (peer_group_active(peer)) {
- g_peer = peer->group->conf;
+ /* Unset override flag to signal inheritance from peer-group. */
+ UNSET_FLAG(peer->af_flags_override[afi][safi], flag);
- /* If this flag is not set for the peer's peer-group
- * then return true */
- if (!peer_af_flag_check(g_peer, afi, safi, flag)) {
- return 1;
- }
- }
+ /* Inherit flag state from peer-group. */
+ if (CHECK_FLAG(peer->group->conf->af_flags[afi][safi], flag))
+ SET_FLAG(peer->af_flags[afi][safi], flag);
+ else
+ UNSET_FLAG(peer->af_flags[afi][safi], flag);
+}
- /* peer is not in a peer-group but the flag is set to return
- true */
- else {
- return 1;
- }
+static bool peergroup_af_flag_check(struct peer *peer, afi_t afi, safi_t safi,
+ uint32_t flag)
+{
+ if (!peer_group_active(peer)) {
+ if (CHECK_FLAG(peer->af_flags_invert[afi][safi], flag))
+ return !peer_af_flag_check(peer, afi, safi, flag);
+ else
+ return !!peer_af_flag_check(peer, afi, safi, flag);
}
- return 0;
+ return !!CHECK_FLAG(peer->af_flags_override[afi][safi], flag);
+}
+
+static bool peergroup_filter_check(struct peer *peer, afi_t afi, safi_t safi,
+ uint8_t type, int direct)
+{
+ struct bgp_filter *filter;
+
+ if (peer_group_active(peer))
+ return !!CHECK_FLAG(peer->filter_override[afi][safi][direct],
+ type);
+
+ filter = &peer->filter[afi][safi];
+ switch (type) {
+ case PEER_FT_DISTRIBUTE_LIST:
+ return !!(filter->dlist[direct].name);
+ case PEER_FT_FILTER_LIST:
+ return !!(filter->aslist[direct].name);
+ case PEER_FT_PREFIX_LIST:
+ return !!(filter->plist[direct].name);
+ case PEER_FT_ROUTE_MAP:
+ return !!(filter->map[direct].name);
+ case PEER_FT_UNSUPPRESS_MAP:
+ return !!(filter->usmap.name);
+ default:
+ return false;
+ }
}
/* Reset all address family specific configuration. */
PEER_FLAG_SEND_EXT_COMMUNITY);
SET_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_SEND_LARGE_COMMUNITY);
+
+ SET_FLAG(peer->af_flags_invert[afi][safi],
+ PEER_FLAG_SEND_COMMUNITY);
+ SET_FLAG(peer->af_flags_invert[afi][safi],
+ PEER_FLAG_SEND_EXT_COMMUNITY);
+ SET_FLAG(peer->af_flags_invert[afi][safi],
+ PEER_FLAG_SEND_LARGE_COMMUNITY);
}
/* Clear neighbor default_originate_rmap */
peer = peer_lock(peer); /* initial reference */
peer->password = NULL;
- /* Set default flags. */
+ /* Set default flags. */
FOREACH_AFI_SAFI (afi, safi) {
if (!bgp_option_check(BGP_OPT_CONFIG_CISCO)) {
SET_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_SEND_EXT_COMMUNITY);
SET_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_SEND_LARGE_COMMUNITY);
+
+ SET_FLAG(peer->af_flags_invert[afi][safi],
+ PEER_FLAG_SEND_COMMUNITY);
+ SET_FLAG(peer->af_flags_invert[afi][safi],
+ PEER_FLAG_SEND_EXT_COMMUNITY);
+ SET_FLAG(peer->af_flags_invert[afi][safi],
+ PEER_FLAG_SEND_LARGE_COMMUNITY);
}
- peer->orf_plist[afi][safi] = NULL;
}
/* set nexthop-unchanged for l2vpn evpn by default */
FOREACH_AFI_SAFI (afi, safi) {
peer_dst->afc[afi][safi] = peer_src->afc[afi][safi];
peer_dst->af_flags[afi][safi] = peer_src->af_flags[afi][safi];
+ peer_dst->af_flags_invert[afi][safi] =
+ peer_src->af_flags_invert[afi][safi];
peer_dst->allowas_in[afi][safi] =
peer_src->allowas_in[afi][safi];
peer_dst->weight[afi][safi] = peer_src->weight[afi][safi];
if (!peer->conf_if)
return;
+ /*
+ * Our peer structure is stored in the bgp->peerhash
+ * release it before we modify anything.
+ */
+ hash_release(peer->bgp->peerhash, peer);
+
prev_family = peer->su.sa.sa_family;
if ((ifp = if_lookup_by_name(peer->conf_if, peer->bgp->vrf_id))) {
peer->ifp = ifp;
memset(&peer->su.sin6.sin6_addr, 0, sizeof(struct in6_addr));
}
- /* Since our su changed we need to del/add peer to the peerhash */
- hash_release(peer->bgp->peerhash, peer);
+ /*
+ * Since our su changed we need to del/add peer to the peerhash
+ */
hash_get(peer->bgp->peerhash, peer, hash_alloc_intern);
}
/* peer af_flags apply */
peer->af_flags[afi][safi] = conf->af_flags[afi][safi];
+ peer->af_flags_invert[afi][safi] = conf->af_flags_invert[afi][safi];
/* maximum-prefix */
peer->pmax[afi][safi] = conf->pmax[afi][safi];
if (safi == SAFI_FLOWSPEC) {
/* connect to table manager */
- bgp_zebra_init_tm_connect();
+ bgp_zebra_init_tm_connect(bgp);
}
return ret;
}
/* Create BGP server socket, if listen mode not disabled */
if (!bgp || bgp_option_check(BGP_OPT_NO_LISTEN))
return 0;
- if (bgp->name && bgp->inst_type == BGP_INSTANCE_TYPE_VRF && vrf) {
+ if (bgp->inst_type == BGP_INSTANCE_TYPE_VRF) {
/*
* suppress vrf socket
*/
if (create == FALSE) {
- if (vrf_is_mapped_on_netns(vrf->vrf_id))
- bgp_close_vrf_socket(bgp);
- else
- ret = bgp_check_main_socket(create, bgp);
- return ret;
+ bgp_close_vrf_socket(bgp);
+ return 0;
}
+ if (vrf == NULL)
+ return BGP_ERR_INVALID_VALUE;
/* do nothing
* if vrf_id did not change
*/
*/
if (vrf->vrf_id == VRF_UNKNOWN)
return 0;
- /* if BGP VRF instance requested
- * if backend is NETNS, create BGP server socket in the NETNS
- */
- if (vrf_is_mapped_on_netns(bgp->vrf_id)) {
- ret = bgp_socket(bgp, bm->port, bm->address);
- if (ret < 0)
- return BGP_ERR_INVALID_VALUE;
- return 0;
- }
- }
- /* if BGP VRF instance requested or VRF lite backend
- * if BGP non VRF instance, create it
- * if not already done
- */
- return bgp_check_main_socket(create, bgp);
+ ret = bgp_socket(bgp, bm->port, bm->address);
+ if (ret < 0)
+ return BGP_ERR_INVALID_VALUE;
+ return 0;
+ } else
+ return bgp_check_main_socket(create, bgp);
}
/* Called from VTY commands. */
struct listnode *bgpnode, *nbgpnode;
for (ALL_LIST_ELEMENTS(bm->bgp, bgpnode, nbgpnode, bgp)) {
- /* Skip VRFs Lite only, this function will not be
- * invoked without an instance
- * when examining VRFs.
- */
- if ((bgp->inst_type == BGP_INSTANCE_TYPE_VRF)
- && !vrf_is_mapped_on_netns(bgp->vrf_id))
- continue;
-
peer = hash_lookup(bgp->peerhash, &tmp_peer);
-
if (peer)
break;
}
{PEER_FLAG_DYNAMIC_CAPABILITY, 0, peer_change_reset},
{PEER_FLAG_DISABLE_CONNECTED_CHECK, 0, peer_change_reset},
{PEER_FLAG_CAPABILITY_ENHE, 0, peer_change_reset},
+ {PEER_FLAG_ENFORCE_FIRST_AS, 0, peer_change_reset_in},
{0, 0, 0}};
static const struct peer_flag_action peer_af_flag_action_list[] = {
{PEER_FLAG_AS_PATH_UNCHANGED, 1, peer_change_reset_out},
{PEER_FLAG_NEXTHOP_UNCHANGED, 1, peer_change_reset_out},
{PEER_FLAG_MED_UNCHANGED, 1, peer_change_reset_out},
- // PEER_FLAG_DEFAULT_ORIGINATE
+ {PEER_FLAG_DEFAULT_ORIGINATE, 0, peer_change_none},
{PEER_FLAG_REMOVE_PRIVATE_AS, 1, peer_change_reset_out},
{PEER_FLAG_ALLOWAS_IN, 0, peer_change_reset_in},
{PEER_FLAG_ALLOWAS_IN_ORIGIN, 0, peer_change_reset_in},
{PEER_FLAG_ORF_PREFIX_SM, 1, peer_change_reset},
{PEER_FLAG_ORF_PREFIX_RM, 1, peer_change_reset},
- // PEER_FLAG_MAX_PREFIX
- // PEER_FLAG_MAX_PREFIX_WARNING
+ {PEER_FLAG_MAX_PREFIX, 0, peer_change_none},
+ {PEER_FLAG_MAX_PREFIX_WARNING, 0, peer_change_none},
{PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED, 0, peer_change_reset_out},
{PEER_FLAG_FORCE_NEXTHOP_SELF, 1, peer_change_reset_out},
{PEER_FLAG_REMOVE_PRIVATE_AS_ALL, 1, peer_change_reset_out},
}
static int peer_af_flag_modify(struct peer *peer, afi_t afi, safi_t safi,
- uint32_t flag, int set)
+ uint32_t flag, bool set)
{
int found;
int size;
+ int addpath_tx_used;
+ bool invert;
struct listnode *node, *nnode;
struct peer_group *group;
struct peer_flag_action action;
struct peer *tmp_peer;
struct bgp *bgp;
- int addpath_tx_used;
memset(&action, 0, sizeof(struct peer_flag_action));
size = sizeof peer_af_flag_action_list
/ sizeof(struct peer_flag_action);
+ invert = CHECK_FLAG(peer->af_flags_invert[afi][safi], flag);
found = peer_flag_action_set(peer_af_flag_action_list, size, &action,
flag);
/* When current flag configuration is same as requested one. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
- if (set && CHECK_FLAG(peer->af_flags[afi][safi], flag) == flag)
+ if (set && CHECK_FLAG(peer->af_flags[afi][safi], flag)) {
+ if (invert)
+ UNSET_FLAG(peer->af_flags_override[afi][safi],
+ flag);
+ else
+ SET_FLAG(peer->af_flags_override[afi][safi],
+ flag);
return 0;
- if (!set && !CHECK_FLAG(peer->af_flags[afi][safi], flag))
+ }
+
+ if (!set && !CHECK_FLAG(peer->af_flags[afi][safi], flag)) {
+ if (invert)
+ SET_FLAG(peer->af_flags_override[afi][safi],
+ flag);
+ else
+ UNSET_FLAG(peer->af_flags_override[afi][safi],
+ flag);
return 0;
+ }
}
/*
}
}
- if (set)
- SET_FLAG(peer->af_flags[afi][safi], flag);
- else
- UNSET_FLAG(peer->af_flags[afi][safi], flag);
+ /* Set/unset flag or inherit from peer-group if appropriate. */
+ if (invert) {
+ if (!set)
+ UNSET_FLAG(peer->af_flags[afi][safi], flag);
+ else if (peer_group_active(peer))
+ peer_af_flag_inherit(peer, afi, safi, flag);
+ else
+ SET_FLAG(peer->af_flags[afi][safi], flag);
+ } else {
+ if (set)
+ SET_FLAG(peer->af_flags[afi][safi], flag);
+ else if (peer_group_active(peer))
+ peer_af_flag_inherit(peer, afi, safi, flag);
+ else
+ UNSET_FLAG(peer->af_flags[afi][safi], flag);
+ }
/* Execute action when peer is established. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)
/* Peer group member updates. */
if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
group = peer->group;
-
for (ALL_LIST_ELEMENTS(group->peer, node, nnode, tmp_peer)) {
+ if (CHECK_FLAG(tmp_peer->af_flags_override[afi][safi],
+ flag))
+ continue;
+
if (set
- && CHECK_FLAG(tmp_peer->af_flags[afi][safi], flag)
- == flag)
+ && CHECK_FLAG(tmp_peer->af_flags[afi][safi], flag))
continue;
if (!set
}
}
}
+ } else {
+ if (set != invert)
+ SET_FLAG(peer->af_flags_override[afi][safi], flag);
+ else
+ UNSET_FLAG(peer->af_flags_override[afi][safi], flag);
}
/* Track if addpath TX is in use */
int peer_default_originate_set(struct peer *peer, afi_t afi, safi_t safi,
const char *rmap)
{
- struct peer_group *group;
+ struct peer *member;
struct listnode *node, *nnode;
- if (!CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_DEFAULT_ORIGINATE)
- || (rmap && !peer->default_rmap[afi][safi].name)
- || (rmap
- && strcmp(rmap, peer->default_rmap[afi][safi].name) != 0)) {
- SET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_DEFAULT_ORIGINATE);
-
- if (rmap) {
+ /* Set flag and configuration on peer. */
+ peer_af_flag_set(peer, afi, safi, PEER_FLAG_DEFAULT_ORIGINATE);
+ if (rmap) {
+ if (!peer->default_rmap[afi][safi].name
+ || strcmp(rmap, peer->default_rmap[afi][safi].name) != 0) {
if (peer->default_rmap[afi][safi].name)
XFREE(MTYPE_ROUTE_MAP_NAME,
peer->default_rmap[afi][safi].name);
+
peer->default_rmap[afi][safi].name =
XSTRDUP(MTYPE_ROUTE_MAP_NAME, rmap);
peer->default_rmap[afi][safi].map =
route_map_lookup_by_name(rmap);
}
+ } else if (!rmap) {
+ if (peer->default_rmap[afi][safi].name)
+ XFREE(MTYPE_ROUTE_MAP_NAME,
+ peer->default_rmap[afi][safi].name);
+
+ peer->default_rmap[afi][safi].name = NULL;
+ peer->default_rmap[afi][safi].map = NULL;
}
+ /* Check if handling a regular peer. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ /* Update peer route announcements. */
if (peer->status == Established && peer->afc_nego[afi][safi]) {
update_group_adjust_peer(peer_af_find(peer, afi, safi));
bgp_default_originate(peer, afi, safi, 0);
bgp_announce_route(peer, afi, safi);
}
+
+ /* Skip peer-group mechanics for regular peers. */
return 0;
}
- /* peer-group member updates. */
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- SET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_DEFAULT_ORIGINATE);
+ /*
+ * Set flag and configuration on all peer-group members, unless they are
+ * explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->af_flags_override[afi][safi],
+ PEER_FLAG_DEFAULT_ORIGINATE))
+ continue;
+ /* Set flag and configuration on peer-group member. */
+ SET_FLAG(member->af_flags[afi][safi],
+ PEER_FLAG_DEFAULT_ORIGINATE);
if (rmap) {
- if (peer->default_rmap[afi][safi].name)
+ if (member->default_rmap[afi][safi].name)
XFREE(MTYPE_ROUTE_MAP_NAME,
- peer->default_rmap[afi][safi].name);
- peer->default_rmap[afi][safi].name =
+ member->default_rmap[afi][safi].name);
+
+ member->default_rmap[afi][safi].name =
XSTRDUP(MTYPE_ROUTE_MAP_NAME, rmap);
- peer->default_rmap[afi][safi].map =
+ member->default_rmap[afi][safi].map =
route_map_lookup_by_name(rmap);
}
- if (peer->status == Established && peer->afc_nego[afi][safi]) {
- update_group_adjust_peer(peer_af_find(peer, afi, safi));
- bgp_default_originate(peer, afi, safi, 0);
- bgp_announce_route(peer, afi, safi);
+ /* Update peer route announcements. */
+ if (member->status == Established
+ && member->afc_nego[afi][safi]) {
+ update_group_adjust_peer(
+ peer_af_find(member, afi, safi));
+ bgp_default_originate(member, afi, safi, 0);
+ bgp_announce_route(member, afi, safi);
}
}
+
return 0;
}
int peer_default_originate_unset(struct peer *peer, afi_t afi, safi_t safi)
{
- struct peer_group *group;
+ struct peer *member;
struct listnode *node, *nnode;
- if (CHECK_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_DEFAULT_ORIGINATE)) {
- UNSET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_DEFAULT_ORIGINATE);
-
+ /* Inherit configuration from peer-group if peer is member. */
+ if (peer_group_active(peer)) {
+ peer_af_flag_inherit(peer, afi, safi,
+ PEER_FLAG_DEFAULT_ORIGINATE);
+ PEER_STR_ATTR_INHERIT(MTYPE_ROUTE_MAP_NAME, peer,
+ default_rmap[afi][safi].name);
+ PEER_ATTR_INHERIT(peer, default_rmap[afi][safi].map);
+ } else {
+ /* Otherwise remove flag and configuration from peer. */
+ peer_af_flag_unset(peer, afi, safi,
+ PEER_FLAG_DEFAULT_ORIGINATE);
if (peer->default_rmap[afi][safi].name)
XFREE(MTYPE_ROUTE_MAP_NAME,
peer->default_rmap[afi][safi].name);
peer->default_rmap[afi][safi].map = NULL;
}
+ /* Check if handling a regular peer. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ /* Update peer route announcements. */
if (peer->status == Established && peer->afc_nego[afi][safi]) {
update_group_adjust_peer(peer_af_find(peer, afi, safi));
bgp_default_originate(peer, afi, safi, 1);
bgp_announce_route(peer, afi, safi);
}
+
+ /* Skip peer-group mechanics for regular peers. */
return 0;
}
- /* peer-group member updates. */
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
+ /*
+ * Remove flag and configuration from all peer-group members, unless
+ * they are explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->af_flags_override[afi][safi],
+ PEER_FLAG_DEFAULT_ORIGINATE))
+ continue;
+
+ /* Remove flag and configuration on peer-group member. */
UNSET_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_DEFAULT_ORIGINATE);
-
if (peer->default_rmap[afi][safi].name)
XFREE(MTYPE_ROUTE_MAP_NAME,
peer->default_rmap[afi][safi].name);
peer->default_rmap[afi][safi].name = NULL;
peer->default_rmap[afi][safi].map = NULL;
+ /* Update peer route announcements. */
if (peer->status == Established && peer->afc_nego[afi][safi]) {
update_group_adjust_peer(peer_af_find(peer, afi, safi));
bgp_default_originate(peer, afi, safi, 1);
bgp_announce_route(peer, afi, safi);
}
}
+
return 0;
}
/* neighbor weight. */
int peer_weight_set(struct peer *peer, afi_t afi, safi_t safi, uint16_t weight)
{
- struct peer_group *group;
+ struct peer *member;
struct listnode *node, *nnode;
+ /* Set flag and configuration on peer. */
+ peer_af_flag_set(peer, afi, safi, PEER_FLAG_WEIGHT);
if (peer->weight[afi][safi] != weight) {
peer->weight[afi][safi] = weight;
- SET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_WEIGHT);
peer_on_policy_change(peer, afi, safi, 0);
}
+ /* Skip peer-group mechanics for regular peers. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP))
return 0;
- /* peer-group member updates. */
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- if (peer->weight[afi][safi] != weight) {
- peer->weight[afi][safi] = weight;
- SET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_WEIGHT);
- peer_on_policy_change(peer, afi, safi, 0);
+ /*
+ * Set flag and configuration on all peer-group members, unless they are
+ * explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->af_flags_override[afi][safi],
+ PEER_FLAG_WEIGHT))
+ continue;
+
+ /* Set flag and configuration on peer-group member. */
+ SET_FLAG(member->af_flags[afi][safi], PEER_FLAG_WEIGHT);
+ if (member->weight[afi][safi] != weight) {
+ member->weight[afi][safi] = weight;
+ peer_on_policy_change(member, afi, safi, 0);
}
}
+
return 0;
}
int peer_weight_unset(struct peer *peer, afi_t afi, safi_t safi)
{
- struct peer_group *group;
+ struct peer *member;
struct listnode *node, *nnode;
- /* not the peer-group itself but a peer in a peer-group */
+ if (!CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_WEIGHT))
+ return 0;
+
+ /* Inherit configuration from peer-group if peer is member. */
if (peer_group_active(peer)) {
- group = peer->group;
+ peer_af_flag_inherit(peer, afi, safi, PEER_FLAG_WEIGHT);
+ PEER_ATTR_INHERIT(peer, weight[afi][safi]);
- /* inherit weight from the peer-group */
- if (CHECK_FLAG(group->conf->af_flags[afi][safi],
- PEER_FLAG_WEIGHT)) {
- peer->weight[afi][safi] =
- group->conf->weight[afi][safi];
- peer_af_flag_set(peer, afi, safi, PEER_FLAG_WEIGHT);
- peer_on_policy_change(peer, afi, safi, 0);
- } else {
- if (CHECK_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_WEIGHT)) {
- peer->weight[afi][safi] = 0;
- peer_af_flag_unset(peer, afi, safi,
- PEER_FLAG_WEIGHT);
- peer_on_policy_change(peer, afi, safi, 0);
- }
- }
+ peer_on_policy_change(peer, afi, safi, 0);
+ return 0;
}
- else {
- if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_WEIGHT)) {
- peer->weight[afi][safi] = 0;
- peer_af_flag_unset(peer, afi, safi, PEER_FLAG_WEIGHT);
- peer_on_policy_change(peer, afi, safi, 0);
- }
+ /* Remove flag and configuration from peer. */
+ peer_af_flag_unset(peer, afi, safi, PEER_FLAG_WEIGHT);
+ peer->weight[afi][safi] = 0;
+ peer_on_policy_change(peer, afi, safi, 0);
- /* peer-group member updates. */
- group = peer->group;
+ /* Skip peer-group mechanics for regular peers. */
+ if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP))
+ return 0;
- if (group) {
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode,
- peer)) {
- if (CHECK_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_WEIGHT)) {
- peer->weight[afi][safi] = 0;
- peer_af_flag_unset(peer, afi, safi,
- PEER_FLAG_WEIGHT);
- peer_on_policy_change(peer, afi, safi,
- 0);
- }
- }
- }
+ /*
+ * Remove flag and configuration from all peer-group members, unless
+ * they are explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->af_flags_override[afi][safi],
+ PEER_FLAG_WEIGHT))
+ continue;
+
+ /* Skip peers where flag is already disabled. */
+ if (!CHECK_FLAG(member->af_flags[afi][safi], PEER_FLAG_WEIGHT))
+ continue;
+
+ /* Remove flag and configuration on peer-group member. */
+ UNSET_FLAG(member->af_flags[afi][safi], PEER_FLAG_WEIGHT);
+ member->weight[afi][safi] = 0;
+ peer_on_policy_change(member, afi, safi, 0);
}
+
return 0;
}
int peer_allowas_in_set(struct peer *peer, afi_t afi, safi_t safi,
int allow_num, int origin)
{
- struct peer_group *group;
+ struct peer *member;
struct listnode *node, *nnode;
+ if (!origin && (allow_num < 1 || allow_num > 10))
+ return BGP_ERR_INVALID_VALUE;
+
+ /* Set flag and configuration on peer. */
+ peer_af_flag_set(peer, afi, safi, PEER_FLAG_ALLOWAS_IN);
if (origin) {
- if (peer->allowas_in[afi][safi]
- || CHECK_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_ALLOWAS_IN)
+ if (peer->allowas_in[afi][safi] != 0
|| !CHECK_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_ALLOWAS_IN_ORIGIN)) {
- peer->allowas_in[afi][safi] = 0;
- peer_af_flag_unset(peer, afi, safi,
- PEER_FLAG_ALLOWAS_IN);
peer_af_flag_set(peer, afi, safi,
PEER_FLAG_ALLOWAS_IN_ORIGIN);
+ peer->allowas_in[afi][safi] = 0;
peer_on_policy_change(peer, afi, safi, 0);
}
-
- if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP))
- return 0;
-
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- if (peer->allowas_in[afi][safi]
- || CHECK_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_ALLOWAS_IN)
- || !CHECK_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_ALLOWAS_IN_ORIGIN)) {
- peer->allowas_in[afi][safi] = 0;
- peer_af_flag_unset(peer, afi, safi,
- PEER_FLAG_ALLOWAS_IN);
- peer_af_flag_set(peer, afi, safi,
- PEER_FLAG_ALLOWAS_IN_ORIGIN);
- peer_on_policy_change(peer, afi, safi, 0);
- }
- }
} else {
- if (allow_num < 1 || allow_num > 10)
- return BGP_ERR_INVALID_VALUE;
-
if (peer->allowas_in[afi][safi] != allow_num
|| CHECK_FLAG(peer->af_flags[afi][safi],
PEER_FLAG_ALLOWAS_IN_ORIGIN)) {
- peer->allowas_in[afi][safi] = allow_num;
- peer_af_flag_set(peer, afi, safi, PEER_FLAG_ALLOWAS_IN);
+
peer_af_flag_unset(peer, afi, safi,
PEER_FLAG_ALLOWAS_IN_ORIGIN);
+ peer->allowas_in[afi][safi] = allow_num;
peer_on_policy_change(peer, afi, safi, 0);
}
+ }
- if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP))
- return 0;
+ /* Skip peer-group mechanics for regular peers. */
+ if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP))
+ return 0;
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- if (peer->allowas_in[afi][safi] != allow_num
- || CHECK_FLAG(peer->af_flags[afi][safi],
+ /*
+ * Set flag and configuration on all peer-group members, unless
+ * they are explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->af_flags_override[afi][safi],
+ PEER_FLAG_ALLOWAS_IN))
+ continue;
+
+ /* Set flag and configuration on peer-group member. */
+ SET_FLAG(member->af_flags[afi][safi], PEER_FLAG_ALLOWAS_IN);
+ if (origin) {
+ if (member->allowas_in[afi][safi] != 0
+ || !CHECK_FLAG(member->af_flags[afi][safi],
+ PEER_FLAG_ALLOWAS_IN_ORIGIN)) {
+ SET_FLAG(member->af_flags[afi][safi],
+ PEER_FLAG_ALLOWAS_IN_ORIGIN);
+ member->allowas_in[afi][safi] = 0;
+ peer_on_policy_change(peer, afi, safi, 0);
+ }
+ } else {
+ if (member->allowas_in[afi][safi] != allow_num
+ || CHECK_FLAG(member->af_flags[afi][safi],
PEER_FLAG_ALLOWAS_IN_ORIGIN)) {
- peer->allowas_in[afi][safi] = allow_num;
- peer_af_flag_set(peer, afi, safi,
- PEER_FLAG_ALLOWAS_IN);
- peer_af_flag_unset(peer, afi, safi,
- PEER_FLAG_ALLOWAS_IN_ORIGIN);
+ UNSET_FLAG(member->af_flags[afi][safi],
+ PEER_FLAG_ALLOWAS_IN_ORIGIN);
+ member->allowas_in[afi][safi] = allow_num;
peer_on_policy_change(peer, afi, safi, 0);
}
}
int peer_allowas_in_unset(struct peer *peer, afi_t afi, safi_t safi)
{
- struct peer_group *group;
- struct peer *tmp_peer;
+ struct peer *member;
struct listnode *node, *nnode;
- /* If this is a peer-group we must first clear the flags for all of the
- * peer-group members
- */
- if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, tmp_peer)) {
- if (CHECK_FLAG(tmp_peer->af_flags[afi][safi],
- PEER_FLAG_ALLOWAS_IN)
- || CHECK_FLAG(tmp_peer->af_flags[afi][safi],
- PEER_FLAG_ALLOWAS_IN_ORIGIN)) {
- tmp_peer->allowas_in[afi][safi] = 0;
- peer_af_flag_unset(tmp_peer, afi, safi,
- PEER_FLAG_ALLOWAS_IN);
- peer_af_flag_unset(tmp_peer, afi, safi,
- PEER_FLAG_ALLOWAS_IN_ORIGIN);
- peer_on_policy_change(tmp_peer, afi, safi, 0);
- }
- }
- }
+ /* Skip peer if flag is already disabled. */
+ if (!CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_ALLOWAS_IN))
+ return 0;
- if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_ALLOWAS_IN)
- || CHECK_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_ALLOWAS_IN_ORIGIN)) {
- peer->allowas_in[afi][safi] = 0;
- peer_af_flag_unset(peer, afi, safi, PEER_FLAG_ALLOWAS_IN);
- peer_af_flag_unset(peer, afi, safi,
- PEER_FLAG_ALLOWAS_IN_ORIGIN);
+ /* Inherit configuration from peer-group if peer is member. */
+ if (peer_group_active(peer)) {
+ peer_af_flag_inherit(peer, afi, safi, PEER_FLAG_ALLOWAS_IN);
+ peer_af_flag_inherit(peer, afi, safi,
+ PEER_FLAG_ALLOWAS_IN_ORIGIN);
+ PEER_ATTR_INHERIT(peer, allowas_in[afi][safi]);
peer_on_policy_change(peer, afi, safi, 0);
+
+ return 0;
+ }
+
+ /* Remove flag and configuration from peer. */
+ peer_af_flag_unset(peer, afi, safi, PEER_FLAG_ALLOWAS_IN);
+ peer_af_flag_unset(peer, afi, safi, PEER_FLAG_ALLOWAS_IN_ORIGIN);
+ peer->allowas_in[afi][safi] = 0;
+ peer_on_policy_change(peer, afi, safi, 0);
+
+ /* Skip peer-group mechanics if handling a regular peer. */
+ if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP))
+ return 0;
+
+ /*
+ * Remove flags and configuration from all peer-group members, unless
+ * they are explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->af_flags_override[afi][safi],
+ PEER_FLAG_ALLOWAS_IN))
+ continue;
+
+ /* Skip peers where flag is already disabled. */
+ if (!CHECK_FLAG(member->af_flags[afi][safi],
+ PEER_FLAG_ALLOWAS_IN))
+ continue;
+
+ /* Remove flags and configuration on peer-group member. */
+ UNSET_FLAG(member->af_flags[afi][safi], PEER_FLAG_ALLOWAS_IN);
+ UNSET_FLAG(member->af_flags[afi][safi],
+ PEER_FLAG_ALLOWAS_IN_ORIGIN);
+ member->allowas_in[afi][safi] = 0;
+ peer_on_policy_change(member, afi, safi, 0);
}
return 0;
int peer_distribute_set(struct peer *peer, afi_t afi, safi_t safi, int direct,
const char *name)
{
+ struct peer *member;
struct bgp_filter *filter;
- struct peer_group *group;
struct listnode *node, *nnode;
if (direct != FILTER_IN && direct != FILTER_OUT)
return BGP_ERR_INVALID_VALUE;
+ /* Set configuration on peer. */
filter = &peer->filter[afi][safi];
-
if (filter->plist[direct].name)
return BGP_ERR_PEER_FILTER_CONFLICT;
-
if (filter->dlist[direct].name)
XFREE(MTYPE_BGP_FILTER_NAME, filter->dlist[direct].name);
filter->dlist[direct].name = XSTRDUP(MTYPE_BGP_FILTER_NAME, name);
filter->dlist[direct].alist = access_list_lookup(afi, name);
+ /* Check if handling a regular peer. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ /* Set override-flag and process peer route updates. */
+ SET_FLAG(peer->filter_override[afi][safi][direct],
+ PEER_FT_DISTRIBUTE_LIST);
peer_on_policy_change(peer, afi, safi,
(direct == FILTER_OUT) ? 1 : 0);
+
+ /* Skip peer-group mechanics for regular peers. */
return 0;
}
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- filter = &peer->filter[afi][safi];
+ /*
+ * Set configuration on all peer-group members, un less they are
+ * explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->filter_override[afi][safi][direct],
+ PEER_FT_DISTRIBUTE_LIST))
+ continue;
+ /* Set configuration on peer-group member. */
+ filter = &member->filter[afi][safi];
if (filter->dlist[direct].name)
XFREE(MTYPE_BGP_FILTER_NAME,
filter->dlist[direct].name);
filter->dlist[direct].name =
XSTRDUP(MTYPE_BGP_FILTER_NAME, name);
filter->dlist[direct].alist = access_list_lookup(afi, name);
- peer_on_policy_change(peer, afi, safi,
+
+ /* Process peer route updates. */
+ peer_on_policy_change(member, afi, safi,
(direct == FILTER_OUT) ? 1 : 0);
}
int peer_distribute_unset(struct peer *peer, afi_t afi, safi_t safi, int direct)
{
+ struct peer *member;
struct bgp_filter *filter;
- struct bgp_filter *gfilter;
- struct peer_group *group;
struct listnode *node, *nnode;
if (direct != FILTER_IN && direct != FILTER_OUT)
return BGP_ERR_INVALID_VALUE;
- filter = &peer->filter[afi][safi];
+ /* Unset override-flag unconditionally. */
+ UNSET_FLAG(peer->filter_override[afi][safi][direct],
+ PEER_FT_DISTRIBUTE_LIST);
- /* apply peer-group filter */
+ /* Inherit configuration from peer-group if peer is member. */
if (peer_group_active(peer)) {
- gfilter = &peer->group->conf->filter[afi][safi];
-
- if (gfilter->dlist[direct].name) {
- if (filter->dlist[direct].name)
- XFREE(MTYPE_BGP_FILTER_NAME,
- filter->dlist[direct].name);
- filter->dlist[direct].name =
- XSTRDUP(MTYPE_BGP_FILTER_NAME,
- gfilter->dlist[direct].name);
- filter->dlist[direct].alist =
- gfilter->dlist[direct].alist;
- peer_on_policy_change(peer, afi, safi,
- (direct == FILTER_OUT) ? 1 : 0);
- return 0;
- }
- }
-
- if (filter->dlist[direct].name)
- XFREE(MTYPE_BGP_FILTER_NAME, filter->dlist[direct].name);
- filter->dlist[direct].name = NULL;
- filter->dlist[direct].alist = NULL;
+ PEER_STR_ATTR_INHERIT(MTYPE_BGP_FILTER_NAME, peer,
+ filter[afi][safi].dlist[direct].name);
+ PEER_ATTR_INHERIT(peer, filter[afi][safi].dlist[direct].alist);
+ } else {
+ /* Otherwise remove configuration from peer. */
+ filter = &peer->filter[afi][safi];
+ if (filter->dlist[direct].name)
+ XFREE(MTYPE_BGP_FILTER_NAME,
+ filter->dlist[direct].name);
+ filter->dlist[direct].name = NULL;
+ filter->dlist[direct].alist = NULL;
+ }
+ /* Check if handling a regular peer. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ /* Process peer route updates. */
peer_on_policy_change(peer, afi, safi,
(direct == FILTER_OUT) ? 1 : 0);
+
+ /* Skip peer-group mechanics for regular peers. */
return 0;
}
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- filter = &peer->filter[afi][safi];
+ /*
+ * Remove configuration on all peer-group members, unless they are
+ * explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->filter_override[afi][safi][direct],
+ PEER_FT_DISTRIBUTE_LIST))
+ continue;
+ /* Remove configuration on peer-group member. */
+ filter = &member->filter[afi][safi];
if (filter->dlist[direct].name)
XFREE(MTYPE_BGP_FILTER_NAME,
filter->dlist[direct].name);
filter->dlist[direct].name = NULL;
filter->dlist[direct].alist = NULL;
- peer_on_policy_change(peer, afi, safi,
+
+ /* Process peer route updates. */
+ peer_on_policy_change(member, afi, safi,
(direct == FILTER_OUT) ? 1 : 0);
}
int peer_prefix_list_set(struct peer *peer, afi_t afi, safi_t safi, int direct,
const char *name)
{
+ struct peer *member;
struct bgp_filter *filter;
- struct peer_group *group;
struct listnode *node, *nnode;
if (direct != FILTER_IN && direct != FILTER_OUT)
return BGP_ERR_INVALID_VALUE;
+ /* Set configuration on peer. */
filter = &peer->filter[afi][safi];
-
if (filter->dlist[direct].name)
return BGP_ERR_PEER_FILTER_CONFLICT;
-
if (filter->plist[direct].name)
XFREE(MTYPE_BGP_FILTER_NAME, filter->plist[direct].name);
filter->plist[direct].name = XSTRDUP(MTYPE_BGP_FILTER_NAME, name);
filter->plist[direct].plist = prefix_list_lookup(afi, name);
+ /* Check if handling a regular peer. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ /* Set override-flag and process peer route updates. */
+ SET_FLAG(peer->filter_override[afi][safi][direct],
+ PEER_FT_PREFIX_LIST);
peer_on_policy_change(peer, afi, safi,
(direct == FILTER_OUT) ? 1 : 0);
+
+ /* Skip peer-group mechanics for regular peers. */
return 0;
}
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- filter = &peer->filter[afi][safi];
+ /*
+ * Set configuration on all peer-group members, unless they are
+ * explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->filter_override[afi][safi][direct],
+ PEER_FT_PREFIX_LIST))
+ continue;
+ /* Set configuration on peer-group member. */
+ filter = &member->filter[afi][safi];
if (filter->plist[direct].name)
XFREE(MTYPE_BGP_FILTER_NAME,
filter->plist[direct].name);
filter->plist[direct].name =
XSTRDUP(MTYPE_BGP_FILTER_NAME, name);
filter->plist[direct].plist = prefix_list_lookup(afi, name);
- peer_on_policy_change(peer, afi, safi,
+
+ /* Process peer route updates. */
+ peer_on_policy_change(member, afi, safi,
(direct == FILTER_OUT) ? 1 : 0);
}
+
return 0;
}
int peer_prefix_list_unset(struct peer *peer, afi_t afi, safi_t safi,
int direct)
{
+ struct peer *member;
struct bgp_filter *filter;
- struct bgp_filter *gfilter;
- struct peer_group *group;
struct listnode *node, *nnode;
if (direct != FILTER_IN && direct != FILTER_OUT)
return BGP_ERR_INVALID_VALUE;
- filter = &peer->filter[afi][safi];
+ /* Unset override-flag unconditionally. */
+ UNSET_FLAG(peer->filter_override[afi][safi][direct],
+ PEER_FT_PREFIX_LIST);
- /* apply peer-group filter */
+ /* Inherit configuration from peer-group if peer is member. */
if (peer_group_active(peer)) {
- gfilter = &peer->group->conf->filter[afi][safi];
-
- if (gfilter->plist[direct].name) {
- if (filter->plist[direct].name)
- XFREE(MTYPE_BGP_FILTER_NAME,
- filter->plist[direct].name);
- filter->plist[direct].name =
- XSTRDUP(MTYPE_BGP_FILTER_NAME,
- gfilter->plist[direct].name);
- filter->plist[direct].plist =
- gfilter->plist[direct].plist;
- peer_on_policy_change(peer, afi, safi,
- (direct == FILTER_OUT) ? 1 : 0);
- return 0;
- }
+ PEER_STR_ATTR_INHERIT(MTYPE_BGP_FILTER_NAME, peer,
+ filter[afi][safi].plist[direct].name);
+ PEER_ATTR_INHERIT(peer, filter[afi][safi].plist[direct].plist);
+ } else {
+ /* Otherwise remove configuration from peer. */
+ filter = &peer->filter[afi][safi];
+ if (filter->plist[direct].name)
+ XFREE(MTYPE_BGP_FILTER_NAME,
+ filter->plist[direct].name);
+ filter->plist[direct].name = NULL;
+ filter->plist[direct].plist = NULL;
}
- if (filter->plist[direct].name)
- XFREE(MTYPE_BGP_FILTER_NAME, filter->plist[direct].name);
- filter->plist[direct].name = NULL;
- filter->plist[direct].plist = NULL;
-
+ /* Check if handling a regular peer. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ /* Process peer route updates. */
peer_on_policy_change(peer, afi, safi,
(direct == FILTER_OUT) ? 1 : 0);
+
+ /* Skip peer-group mechanics for regular peers. */
return 0;
}
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- filter = &peer->filter[afi][safi];
+ /*
+ * Remove configuration on all peer-group members, unless they are
+ * explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->filter_override[afi][safi][direct],
+ PEER_FT_PREFIX_LIST))
+ continue;
+ /* Remove configuration on peer-group member. */
+ filter = &member->filter[afi][safi];
if (filter->plist[direct].name)
XFREE(MTYPE_BGP_FILTER_NAME,
filter->plist[direct].name);
filter->plist[direct].name = NULL;
filter->plist[direct].plist = NULL;
- peer_on_policy_change(peer, afi, safi,
+
+ /* Process peer route updates. */
+ peer_on_policy_change(member, afi, safi,
(direct == FILTER_OUT) ? 1 : 0);
}
int peer_aslist_set(struct peer *peer, afi_t afi, safi_t safi, int direct,
const char *name)
{
+ struct peer *member;
struct bgp_filter *filter;
- struct peer_group *group;
struct listnode *node, *nnode;
if (direct != FILTER_IN && direct != FILTER_OUT)
return BGP_ERR_INVALID_VALUE;
+ /* Set configuration on peer. */
filter = &peer->filter[afi][safi];
-
if (filter->aslist[direct].name)
XFREE(MTYPE_BGP_FILTER_NAME, filter->aslist[direct].name);
filter->aslist[direct].name = XSTRDUP(MTYPE_BGP_FILTER_NAME, name);
filter->aslist[direct].aslist = as_list_lookup(name);
+ /* Check if handling a regular peer. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ /* Set override-flag and process peer route updates. */
+ SET_FLAG(peer->filter_override[afi][safi][direct],
+ PEER_FT_FILTER_LIST);
peer_on_policy_change(peer, afi, safi,
(direct == FILTER_OUT) ? 1 : 0);
+
+ /* Skip peer-group mechanics for regular peers. */
return 0;
}
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- filter = &peer->filter[afi][safi];
+ /*
+ * Set configuration on all peer-group members, unless they are
+ * explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->filter_override[afi][safi][direct],
+ PEER_FT_FILTER_LIST))
+ continue;
+ /* Set configuration on peer-group member. */
+ filter = &member->filter[afi][safi];
if (filter->aslist[direct].name)
XFREE(MTYPE_BGP_FILTER_NAME,
filter->aslist[direct].name);
filter->aslist[direct].name =
XSTRDUP(MTYPE_BGP_FILTER_NAME, name);
filter->aslist[direct].aslist = as_list_lookup(name);
- peer_on_policy_change(peer, afi, safi,
+
+ /* Process peer route updates. */
+ peer_on_policy_change(member, afi, safi,
(direct == FILTER_OUT) ? 1 : 0);
}
+
return 0;
}
int peer_aslist_unset(struct peer *peer, afi_t afi, safi_t safi, int direct)
{
+ struct peer *member;
struct bgp_filter *filter;
- struct bgp_filter *gfilter;
- struct peer_group *group;
struct listnode *node, *nnode;
if (direct != FILTER_IN && direct != FILTER_OUT)
return BGP_ERR_INVALID_VALUE;
- filter = &peer->filter[afi][safi];
+ /* Unset override-flag unconditionally. */
+ UNSET_FLAG(peer->filter_override[afi][safi][direct],
+ PEER_FT_FILTER_LIST);
- /* apply peer-group filter */
+ /* Inherit configuration from peer-group if peer is member. */
if (peer_group_active(peer)) {
- gfilter = &peer->group->conf->filter[afi][safi];
-
- if (gfilter->aslist[direct].name) {
- if (filter->aslist[direct].name)
- XFREE(MTYPE_BGP_FILTER_NAME,
- filter->aslist[direct].name);
- filter->aslist[direct].name =
- XSTRDUP(MTYPE_BGP_FILTER_NAME,
- gfilter->aslist[direct].name);
- filter->aslist[direct].aslist =
- gfilter->aslist[direct].aslist;
- peer_on_policy_change(peer, afi, safi,
- (direct == FILTER_OUT) ? 1 : 0);
- return 0;
- }
+ PEER_STR_ATTR_INHERIT(MTYPE_BGP_FILTER_NAME, peer,
+ filter[afi][safi].aslist[direct].name);
+ PEER_ATTR_INHERIT(peer,
+ filter[afi][safi].aslist[direct].aslist);
+ } else {
+ /* Otherwise remove configuration from peer. */
+ filter = &peer->filter[afi][safi];
+ if (filter->aslist[direct].name)
+ XFREE(MTYPE_BGP_FILTER_NAME,
+ filter->aslist[direct].name);
+ filter->aslist[direct].name = NULL;
+ filter->aslist[direct].aslist = NULL;
}
- if (filter->aslist[direct].name)
- XFREE(MTYPE_BGP_FILTER_NAME, filter->aslist[direct].name);
- filter->aslist[direct].name = NULL;
- filter->aslist[direct].aslist = NULL;
-
+ /* Check if handling a regular peer. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ /* Process peer route updates. */
peer_on_policy_change(peer, afi, safi,
(direct == FILTER_OUT) ? 1 : 0);
+
+ /* Skip peer-group mechanics for regular peers. */
return 0;
}
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- filter = &peer->filter[afi][safi];
+ /*
+ * Remove configuration on all peer-group members, unless they are
+ * explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->filter_override[afi][safi][direct],
+ PEER_FT_FILTER_LIST))
+ continue;
+ /* Remove configuration on peer-group member. */
+ filter = &member->filter[afi][safi];
if (filter->aslist[direct].name)
XFREE(MTYPE_BGP_FILTER_NAME,
filter->aslist[direct].name);
filter->aslist[direct].name = NULL;
filter->aslist[direct].aslist = NULL;
- peer_on_policy_change(peer, afi, safi,
+
+ /* Process peer route updates. */
+ peer_on_policy_change(member, afi, safi,
(direct == FILTER_OUT) ? 1 : 0);
}
int peer_route_map_set(struct peer *peer, afi_t afi, safi_t safi, int direct,
const char *name)
{
+ struct peer *member;
struct bgp_filter *filter;
- struct peer_group *group;
struct listnode *node, *nnode;
if (direct != RMAP_IN && direct != RMAP_OUT)
return BGP_ERR_INVALID_VALUE;
+ /* Set configuration on peer. */
filter = &peer->filter[afi][safi];
-
if (filter->map[direct].name)
XFREE(MTYPE_BGP_FILTER_NAME, filter->map[direct].name);
-
filter->map[direct].name = XSTRDUP(MTYPE_BGP_FILTER_NAME, name);
filter->map[direct].map = route_map_lookup_by_name(name);
+ /* Check if handling a regular peer. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ /* Set override-flag and process peer route updates. */
+ SET_FLAG(peer->filter_override[afi][safi][direct],
+ PEER_FT_ROUTE_MAP);
peer_on_policy_change(peer, afi, safi,
(direct == RMAP_OUT) ? 1 : 0);
+
+ /* Skip peer-group mechanics for regular peers. */
return 0;
}
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- filter = &peer->filter[afi][safi];
+ /*
+ * Set configuration on all peer-group members, unless they are
+ * explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->filter_override[afi][safi][direct],
+ PEER_FT_ROUTE_MAP))
+ continue;
+ /* Set configuration on peer-group member. */
+ filter = &member->filter[afi][safi];
if (filter->map[direct].name)
XFREE(MTYPE_BGP_FILTER_NAME, filter->map[direct].name);
filter->map[direct].name = XSTRDUP(MTYPE_BGP_FILTER_NAME, name);
filter->map[direct].map = route_map_lookup_by_name(name);
- peer_on_policy_change(peer, afi, safi,
+
+ /* Process peer route updates. */
+ peer_on_policy_change(member, afi, safi,
(direct == RMAP_OUT) ? 1 : 0);
}
return 0;
/* Unset route-map from the peer. */
int peer_route_map_unset(struct peer *peer, afi_t afi, safi_t safi, int direct)
{
+ struct peer *member;
struct bgp_filter *filter;
- struct bgp_filter *gfilter;
- struct peer_group *group;
struct listnode *node, *nnode;
if (direct != RMAP_IN && direct != RMAP_OUT)
return BGP_ERR_INVALID_VALUE;
- filter = &peer->filter[afi][safi];
+ /* Unset override-flag unconditionally. */
+ UNSET_FLAG(peer->filter_override[afi][safi][direct], PEER_FT_ROUTE_MAP);
- /* apply peer-group filter */
+ /* Inherit configuration from peer-group if peer is member. */
if (peer_group_active(peer)) {
- gfilter = &peer->group->conf->filter[afi][safi];
-
- if (gfilter->map[direct].name) {
- if (filter->map[direct].name)
- XFREE(MTYPE_BGP_FILTER_NAME,
- filter->map[direct].name);
- filter->map[direct].name =
- XSTRDUP(MTYPE_BGP_FILTER_NAME,
- gfilter->map[direct].name);
- filter->map[direct].map = gfilter->map[direct].map;
- peer_on_policy_change(peer, afi, safi,
- (direct == RMAP_OUT) ? 1 : 0);
- return 0;
- }
+ PEER_STR_ATTR_INHERIT(MTYPE_BGP_FILTER_NAME, peer,
+ filter[afi][safi].map[direct].name);
+ PEER_ATTR_INHERIT(peer, filter[afi][safi].map[direct].map);
+ } else {
+ /* Otherwise remove configuration from peer. */
+ filter = &peer->filter[afi][safi];
+ if (filter->map[direct].name)
+ XFREE(MTYPE_BGP_FILTER_NAME, filter->map[direct].name);
+ filter->map[direct].name = NULL;
+ filter->map[direct].map = NULL;
}
- if (filter->map[direct].name)
- XFREE(MTYPE_BGP_FILTER_NAME, filter->map[direct].name);
- filter->map[direct].name = NULL;
- filter->map[direct].map = NULL;
-
+ /* Check if handling a regular peer. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ /* Process peer route updates. */
peer_on_policy_change(peer, afi, safi,
(direct == RMAP_OUT) ? 1 : 0);
+
+ /* Skip peer-group mechanics for regular peers. */
return 0;
}
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- filter = &peer->filter[afi][safi];
+ /*
+ * Remove configuration on all peer-group members, unless they are
+ * explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->filter_override[afi][safi][direct],
+ PEER_FT_ROUTE_MAP))
+ continue;
+ /* Remove configuration on peer-group member. */
+ filter = &member->filter[afi][safi];
if (filter->map[direct].name)
XFREE(MTYPE_BGP_FILTER_NAME, filter->map[direct].name);
filter->map[direct].name = NULL;
filter->map[direct].map = NULL;
- peer_on_policy_change(peer, afi, safi,
+
+ /* Process peer route updates. */
+ peer_on_policy_change(member, afi, safi,
(direct == RMAP_OUT) ? 1 : 0);
}
+
return 0;
}
int peer_unsuppress_map_set(struct peer *peer, afi_t afi, safi_t safi,
const char *name)
{
+ struct peer *member;
struct bgp_filter *filter;
- struct peer_group *group;
struct listnode *node, *nnode;
+ /* Set configuration on peer. */
filter = &peer->filter[afi][safi];
-
if (filter->usmap.name)
XFREE(MTYPE_BGP_FILTER_NAME, filter->usmap.name);
-
filter->usmap.name = XSTRDUP(MTYPE_BGP_FILTER_NAME, name);
filter->usmap.map = route_map_lookup_by_name(name);
+ /* Check if handling a regular peer. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ /* Set override-flag and process peer route updates. */
+ SET_FLAG(peer->filter_override[afi][safi][0],
+ PEER_FT_UNSUPPRESS_MAP);
peer_on_policy_change(peer, afi, safi, 1);
+
+ /* Skip peer-group mechanics for regular peers. */
return 0;
}
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- filter = &peer->filter[afi][safi];
+ /*
+ * Set configuration on all peer-group members, unless they are
+ * explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->filter_override[afi][safi][0],
+ PEER_FT_UNSUPPRESS_MAP))
+ continue;
+ /* Set configuration on peer-group member. */
+ filter = &member->filter[afi][safi];
if (filter->usmap.name)
XFREE(MTYPE_BGP_FILTER_NAME, filter->usmap.name);
filter->usmap.name = XSTRDUP(MTYPE_BGP_FILTER_NAME, name);
filter->usmap.map = route_map_lookup_by_name(name);
- peer_on_policy_change(peer, afi, safi, 1);
+
+ /* Process peer route updates. */
+ peer_on_policy_change(member, afi, safi, 1);
}
+
return 0;
}
/* Unset route-map from the peer. */
int peer_unsuppress_map_unset(struct peer *peer, afi_t afi, safi_t safi)
{
+ struct peer *member;
struct bgp_filter *filter;
- struct peer_group *group;
struct listnode *node, *nnode;
- filter = &peer->filter[afi][safi];
+ /* Unset override-flag unconditionally. */
+ UNSET_FLAG(peer->filter_override[afi][safi][0], PEER_FT_UNSUPPRESS_MAP);
- if (filter->usmap.name)
- XFREE(MTYPE_BGP_FILTER_NAME, filter->usmap.name);
- filter->usmap.name = NULL;
- filter->usmap.map = NULL;
+ /* Inherit configuration from peer-group if peer is member. */
+ if (peer_group_active(peer)) {
+ PEER_STR_ATTR_INHERIT(MTYPE_BGP_FILTER_NAME, peer,
+ filter[afi][safi].usmap.name);
+ PEER_ATTR_INHERIT(peer, filter[afi][safi].usmap.map);
+ } else {
+ /* Otherwise remove configuration from peer. */
+ filter = &peer->filter[afi][safi];
+ if (filter->usmap.name)
+ XFREE(MTYPE_BGP_FILTER_NAME, filter->usmap.name);
+ filter->usmap.name = NULL;
+ filter->usmap.map = NULL;
+ }
+ /* Check if handling a regular peer. */
if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ /* Process peer route updates. */
peer_on_policy_change(peer, afi, safi, 1);
+
+ /* Skip peer-group mechanics for regular peers. */
return 0;
}
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- filter = &peer->filter[afi][safi];
+ /*
+ * Remove configuration on all peer-group members, unless they are
+ * explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->filter_override[afi][safi][0],
+ PEER_FT_UNSUPPRESS_MAP))
+ continue;
+ /* Remove configuration on peer-group member. */
+ filter = &member->filter[afi][safi];
if (filter->usmap.name)
XFREE(MTYPE_BGP_FILTER_NAME, filter->usmap.name);
filter->usmap.name = NULL;
filter->usmap.map = NULL;
- peer_on_policy_change(peer, afi, safi, 1);
+
+ /* Process peer route updates. */
+ peer_on_policy_change(member, afi, safi, 1);
}
+
return 0;
}
uint32_t max, uint8_t threshold, int warning,
uint16_t restart)
{
- struct peer_group *group;
+ struct peer *member;
struct listnode *node, *nnode;
- /* apply configuration and set flags */
- SET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX);
+ /* Set flags and configuration on peer. */
+ peer_af_flag_set(peer, afi, safi, PEER_FLAG_MAX_PREFIX);
if (warning)
- SET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX_WARNING);
+ peer_af_flag_set(peer, afi, safi, PEER_FLAG_MAX_PREFIX_WARNING);
else
- UNSET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX_WARNING);
+ peer_af_flag_unset(peer, afi, safi,
+ PEER_FLAG_MAX_PREFIX_WARNING);
+
peer->pmax[afi][safi] = max;
peer->pmax_threshold[afi][safi] = threshold;
peer->pmax_restart[afi][safi] = restart;
- /* if handling a peer-group, apply to all children */
- if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- /*
- * If peer configuration is user-set, it overrides
- * peer-group config.
- */
- if (!CHECK_FLAG(peer->af_flags_override[afi][safi],
- PEER_FLAG_MAX_PREFIX)) {
- SET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX);
- peer->pmax[afi][safi] = max;
- peer->pmax_threshold[afi][safi] = threshold;
- peer->pmax_restart[afi][safi] = restart;
- }
- if (!CHECK_FLAG(peer->af_flags_override[afi][safi],
- PEER_FLAG_MAX_PREFIX_WARNING)) {
- if (warning)
- SET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX_WARNING);
- else
- UNSET_FLAG(
- peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX_WARNING);
- }
-
- if ((peer->status == Established)
- && (peer->afc[afi][safi]))
- bgp_maximum_prefix_overflow(peer, afi, safi, 1);
- }
- } else {
- /* if not handling a peer-group, set the override flags */
+ /* Check if handling a regular peer. */
+ if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
+ /* Re-check if peer violates maximum-prefix. */
if ((peer->status == Established) && (peer->afc[afi][safi]))
bgp_maximum_prefix_overflow(peer, afi, safi, 1);
- SET_FLAG(peer->af_flags_override[afi][safi],
- PEER_FLAG_MAX_PREFIX);
+ /* Skip peer-group mechanics for regular peers. */
+ return 0;
+ }
+
+ /*
+ * Set flags and configuration on all peer-group members, unless they
+ * are explicitely overriding peer-group configuration.
+ */
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->af_flags_override[afi][safi],
+ PEER_FLAG_MAX_PREFIX))
+ continue;
+ /* Set flag and configuration on peer-group member. */
+ member->pmax[afi][safi] = max;
+ member->pmax_threshold[afi][safi] = threshold;
+ member->pmax_restart[afi][safi] = restart;
if (warning)
- SET_FLAG(peer->af_flags_override[afi][safi],
+ SET_FLAG(member->af_flags[afi][safi],
PEER_FLAG_MAX_PREFIX_WARNING);
else
- UNSET_FLAG(peer->af_flags_override[afi][safi],
+ UNSET_FLAG(member->af_flags[afi][safi],
PEER_FLAG_MAX_PREFIX_WARNING);
+
+ /* Re-check if peer violates maximum-prefix. */
+ if ((member->status == Established) && (member->afc[afi][safi]))
+ bgp_maximum_prefix_overflow(member, afi, safi, 1);
}
return 0;
int peer_maximum_prefix_unset(struct peer *peer, afi_t afi, safi_t safi)
{
- struct peer_group *group;
+ struct peer *member;
struct listnode *node, *nnode;
- UNSET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX);
- UNSET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_WARNING);
- peer->pmax[afi][safi] = 0;
- peer->pmax_threshold[afi][safi] = 0;
- peer->pmax_restart[afi][safi] = 0;
-
- /* if not handling a peer-group, unset override flags */
- if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) {
- UNSET_FLAG(peer->af_flags_override[afi][safi],
- PEER_FLAG_MAX_PREFIX);
- UNSET_FLAG(peer->af_flags_override[afi][safi],
- PEER_FLAG_MAX_PREFIX_WARNING);
- /* if peer is part of a peer-group, apply peer-group config */
- if (peer_group_active(peer)) {
- peer->pmax[afi][safi] =
- peer->group->conf->pmax[afi][safi];
- peer->pmax_threshold[afi][safi] =
- peer->group->conf->pmax_threshold[afi][safi];
- peer->pmax_restart[afi][safi] =
- peer->group->conf->pmax_restart[afi][safi];
- }
+ /* Inherit configuration from peer-group if peer is member. */
+ if (peer_group_active(peer)) {
+ peer_af_flag_inherit(peer, afi, safi, PEER_FLAG_MAX_PREFIX);
+ peer_af_flag_inherit(peer, afi, safi,
+ PEER_FLAG_MAX_PREFIX_WARNING);
+ PEER_ATTR_INHERIT(peer, pmax[afi][safi]);
+ PEER_ATTR_INHERIT(peer, pmax_threshold[afi][safi]);
+ PEER_ATTR_INHERIT(peer, pmax_restart[afi][safi]);
return 0;
}
+ /* Remove flags and configuration from peer. */
+ peer_af_flag_unset(peer, afi, safi, PEER_FLAG_MAX_PREFIX);
+ peer_af_flag_unset(peer, afi, safi, PEER_FLAG_MAX_PREFIX_WARNING);
+ peer->pmax[afi][safi] = 0;
+ peer->pmax_threshold[afi][safi] = 0;
+ peer->pmax_restart[afi][safi] = 0;
+
/*
- * If this peer is a peer-group, set all peers in the group unless they
- * have overrides for our config.
+ * Remove flags and configuration from all peer-group members, unless
+ * they are explicitely overriding peer-group configuration.
*/
- group = peer->group;
- for (ALL_LIST_ELEMENTS(group->peer, node, nnode, peer)) {
- if (!CHECK_FLAG(peer->af_flags_override[afi][safi],
- PEER_FLAG_MAX_PREFIX_WARNING))
- UNSET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX_WARNING);
- if (!CHECK_FLAG(peer->af_flags_override[afi][safi],
- PEER_FLAG_MAX_PREFIX)) {
- UNSET_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX);
- peer->pmax[afi][safi] = 0;
- peer->pmax_threshold[afi][safi] = 0;
- peer->pmax_restart[afi][safi] = 0;
- }
+ for (ALL_LIST_ELEMENTS(peer->group->peer, node, nnode, member)) {
+ /* Skip peers with overridden configuration. */
+ if (CHECK_FLAG(member->af_flags_override[afi][safi],
+ PEER_FLAG_MAX_PREFIX))
+ continue;
+
+ /* Remove flag and configuration on peer-group member. */
+ UNSET_FLAG(member->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX);
+ UNSET_FLAG(member->af_flags[afi][safi],
+ PEER_FLAG_MAX_PREFIX_WARNING);
+ member->pmax[afi][safi] = 0;
+ member->pmax_threshold[afi][safi] = 0;
+ member->pmax_restart[afi][safi] = 0;
}
+
return 0;
}
afi_t afi, safi_t safi)
{
struct bgp_filter *filter;
- struct bgp_filter *gfilter = NULL;
char *addr;
- int in = FILTER_IN;
- int out = FILTER_OUT;
addr = peer->host;
filter = &peer->filter[afi][safi];
- if (peer_group_active(peer))
- gfilter = &peer->group->conf->filter[afi][safi];
-
/* distribute-list. */
- if (filter->dlist[in].name)
- if (!gfilter || !gfilter->dlist[in].name
- || strcmp(filter->dlist[in].name, gfilter->dlist[in].name)
- != 0) {
- vty_out(vty, " neighbor %s distribute-list %s in\n",
- addr, filter->dlist[in].name);
- }
+ if (peergroup_filter_check(peer, afi, safi, PEER_FT_DISTRIBUTE_LIST,
+ FILTER_IN))
+ vty_out(vty, " neighbor %s distribute-list %s in\n", addr,
+ filter->dlist[FILTER_IN].name);
- if (filter->dlist[out].name && !gfilter) {
+ if (peergroup_filter_check(peer, afi, safi, PEER_FT_DISTRIBUTE_LIST,
+ FILTER_OUT))
vty_out(vty, " neighbor %s distribute-list %s out\n", addr,
- filter->dlist[out].name);
- }
+ filter->dlist[FILTER_OUT].name);
/* prefix-list. */
- if (filter->plist[in].name)
- if (!gfilter || !gfilter->plist[in].name
- || strcmp(filter->plist[in].name, gfilter->plist[in].name)
- != 0) {
- vty_out(vty, " neighbor %s prefix-list %s in\n", addr,
- filter->plist[in].name);
- }
+ if (peergroup_filter_check(peer, afi, safi, PEER_FT_PREFIX_LIST,
+ FILTER_IN))
+ vty_out(vty, " neighbor %s prefix-list %s in\n", addr,
+ filter->plist[FILTER_IN].name);
- if (filter->plist[out].name)
- if (!gfilter || !gfilter->plist[out].name
- || strcmp(filter->plist[out].name, gfilter->plist[out].name)
- != 0) {
- vty_out(vty, " neighbor %s prefix-list %s out\n", addr,
- filter->plist[out].name);
- }
+ if (peergroup_filter_check(peer, afi, safi, PEER_FT_PREFIX_LIST,
+ FILTER_OUT))
+ vty_out(vty, " neighbor %s prefix-list %s out\n", addr,
+ filter->plist[FILTER_OUT].name);
/* route-map. */
- if (filter->map[RMAP_IN].name)
- if (!gfilter || !gfilter->map[RMAP_IN].name
- || strcmp(filter->map[RMAP_IN].name,
- gfilter->map[RMAP_IN].name)
- != 0) {
- vty_out(vty, " neighbor %s route-map %s in\n", addr,
- filter->map[RMAP_IN].name);
- }
+ if (peergroup_filter_check(peer, afi, safi, PEER_FT_ROUTE_MAP, RMAP_IN))
+ vty_out(vty, " neighbor %s route-map %s in\n", addr,
+ filter->map[RMAP_IN].name);
- if (filter->map[RMAP_OUT].name)
- if (!gfilter || !gfilter->map[RMAP_OUT].name
- || strcmp(filter->map[RMAP_OUT].name,
- gfilter->map[RMAP_OUT].name)
- != 0) {
- vty_out(vty, " neighbor %s route-map %s out\n", addr,
- filter->map[RMAP_OUT].name);
- }
+ if (peergroup_filter_check(peer, afi, safi, PEER_FT_ROUTE_MAP,
+ RMAP_OUT))
+ vty_out(vty, " neighbor %s route-map %s out\n", addr,
+ filter->map[RMAP_OUT].name);
/* unsuppress-map */
- if (filter->usmap.name && !gfilter) {
+ if (peergroup_filter_check(peer, afi, safi, PEER_FT_UNSUPPRESS_MAP, 0))
vty_out(vty, " neighbor %s unsuppress-map %s\n", addr,
filter->usmap.name);
- }
/* filter-list. */
- if (filter->aslist[in].name)
- if (!gfilter || !gfilter->aslist[in].name
- || strcmp(filter->aslist[in].name, gfilter->aslist[in].name)
- != 0) {
- vty_out(vty, " neighbor %s filter-list %s in\n", addr,
- filter->aslist[in].name);
- }
+ if (peergroup_filter_check(peer, afi, safi, PEER_FT_FILTER_LIST,
+ FILTER_IN))
+ vty_out(vty, " neighbor %s filter-list %s in\n", addr,
+ filter->aslist[FILTER_IN].name);
- if (filter->aslist[out].name && !gfilter) {
+ if (peergroup_filter_check(peer, afi, safi, PEER_FT_FILTER_LIST,
+ FILTER_OUT))
vty_out(vty, " neighbor %s filter-list %s out\n", addr,
- filter->aslist[out].name);
- }
+ filter->aslist[FILTER_OUT].name);
}
/* BGP peer configuration display function. */
}
}
+ /* enforce-first-as */
+ if (CHECK_FLAG(peer->flags, PEER_FLAG_ENFORCE_FIRST_AS)) {
+ if (!peer_group_active(peer)
+ || !CHECK_FLAG(g_peer->flags, PEER_FLAG_ENFORCE_FIRST_AS)) {
+ vty_out(vty, " neighbor %s enforce-first-as\n", addr);
+ }
+ }
+
/* update-source */
if (peer->update_if) {
if (!peer_group_active(peer) || !g_peer->update_if
{
struct peer *g_peer = NULL;
char *addr;
+ bool flag_scomm, flag_secomm, flag_slcomm;
/* Skip dynamic neighbors. */
if (peer_dynamic_neighbor(peer))
}
/* send-community print. */
- if (bgp_option_check(BGP_OPT_CONFIG_CISCO)) {
- if (peergroup_af_flag_check(peer, afi, safi,
- PEER_FLAG_SEND_COMMUNITY)
- && peergroup_af_flag_check(peer, afi, safi,
- PEER_FLAG_SEND_EXT_COMMUNITY)
- && peergroup_af_flag_check(
- peer, afi, safi,
- PEER_FLAG_SEND_LARGE_COMMUNITY)) {
- vty_out(vty, " neighbor %s send-community all\n",
- addr);
- } else if (peergroup_af_flag_check(
- peer, afi, safi,
- PEER_FLAG_SEND_LARGE_COMMUNITY)) {
- vty_out(vty, " neighbor %s send-community large\n",
- addr);
- } else if (peergroup_af_flag_check(
- peer, afi, safi,
- PEER_FLAG_SEND_EXT_COMMUNITY)) {
- vty_out(vty, " neighbor %s send-community extended\n",
- addr);
- } else if (peergroup_af_flag_check(peer, afi, safi,
- PEER_FLAG_SEND_COMMUNITY)) {
- vty_out(vty, " neighbor %s send-community\n", addr);
- }
- } else {
- if (!peer_af_flag_check(peer, afi, safi,
- PEER_FLAG_SEND_COMMUNITY)
- && (!g_peer || peer_af_flag_check(g_peer, afi, safi,
- PEER_FLAG_SEND_COMMUNITY))
- && !peer_af_flag_check(peer, afi, safi,
- PEER_FLAG_SEND_EXT_COMMUNITY)
- && (!g_peer
- || peer_af_flag_check(g_peer, afi, safi,
- PEER_FLAG_SEND_EXT_COMMUNITY))
- && !peer_af_flag_check(peer, afi, safi,
- PEER_FLAG_SEND_LARGE_COMMUNITY)
- && (!g_peer || peer_af_flag_check(
- g_peer, afi, safi,
- PEER_FLAG_SEND_LARGE_COMMUNITY))) {
+ flag_scomm = peergroup_af_flag_check(peer, afi, safi,
+ PEER_FLAG_SEND_COMMUNITY);
+ flag_secomm = peergroup_af_flag_check(peer, afi, safi,
+ PEER_FLAG_SEND_EXT_COMMUNITY);
+ flag_slcomm = peergroup_af_flag_check(peer, afi, safi,
+ PEER_FLAG_SEND_LARGE_COMMUNITY);
+
+ if (!bgp_option_check(BGP_OPT_CONFIG_CISCO)) {
+ if (flag_scomm && flag_secomm && flag_slcomm) {
vty_out(vty, " no neighbor %s send-community all\n",
addr);
} else {
- if (!peer_af_flag_check(peer, afi, safi,
- PEER_FLAG_SEND_LARGE_COMMUNITY)
- && (!g_peer
- || peer_af_flag_check(
- g_peer, afi, safi,
- PEER_FLAG_SEND_LARGE_COMMUNITY))) {
+ if (flag_scomm)
vty_out(vty,
- " no neighbor %s send-community large\n",
+ " no neighbor %s send-community\n",
addr);
- }
-
- if (!peer_af_flag_check(peer, afi, safi,
- PEER_FLAG_SEND_EXT_COMMUNITY)
- && (!g_peer
- || peer_af_flag_check(
- g_peer, afi, safi,
- PEER_FLAG_SEND_EXT_COMMUNITY))) {
+ if (flag_secomm)
vty_out(vty,
" no neighbor %s send-community extended\n",
addr);
- }
- if (!peer_af_flag_check(peer, afi, safi,
- PEER_FLAG_SEND_COMMUNITY)
- && (!g_peer || peer_af_flag_check(
- g_peer, afi, safi,
- PEER_FLAG_SEND_COMMUNITY))) {
+ if (flag_slcomm)
vty_out(vty,
- " no neighbor %s send-community\n",
+ " no neighbor %s send-community large\n",
+ addr);
+ }
+ } else {
+ if (flag_scomm && flag_secomm && flag_slcomm) {
+ vty_out(vty, " neighbor %s send-community all\n",
+ addr);
+ } else if (flag_scomm && flag_secomm) {
+ vty_out(vty, " neighbor %s send-community both\n",
+ addr);
+ } else {
+ if (flag_scomm)
+ vty_out(vty, " neighbor %s send-community\n",
+ addr);
+ if (flag_secomm)
+ vty_out(vty,
+ " neighbor %s send-community extended\n",
+ addr);
+ if (flag_slcomm)
+ vty_out(vty,
+ " neighbor %s send-community large\n",
addr);
- }
}
}
/* Default information */
if (peergroup_af_flag_check(peer, afi, safi,
- PEER_FLAG_DEFAULT_ORIGINATE)
- || (g_peer
- && ((peer->default_rmap[afi][safi].name
- && !g_peer->default_rmap[afi][safi].name)
- || (!peer->default_rmap[afi][safi].name
- && g_peer->default_rmap[afi][safi].name)
- || (peer->default_rmap[afi][safi].name
- && strcmp(peer->default_rmap[afi][safi].name,
- g_peer->default_rmap[afi][safi].name))))) {
+ PEER_FLAG_DEFAULT_ORIGINATE)) {
vty_out(vty, " neighbor %s default-originate", addr);
+
if (peer->default_rmap[afi][safi].name)
vty_out(vty, " route-map %s",
peer->default_rmap[afi][safi].name);
+
vty_out(vty, "\n");
}
}
/* maximum-prefix. */
- if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX))
- if (!peer_group_active(peer)
- || g_peer->pmax[afi][safi] != peer->pmax[afi][safi]
- || g_peer->pmax_threshold[afi][safi]
- != peer->pmax_threshold[afi][safi]
- || CHECK_FLAG(g_peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX_WARNING)
- != CHECK_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_MAX_PREFIX_WARNING)) {
- vty_out(vty, " neighbor %s maximum-prefix %lu", addr,
- peer->pmax[afi][safi]);
- if (peer->pmax_threshold[afi][safi]
- != MAXIMUM_PREFIX_THRESHOLD_DEFAULT)
- vty_out(vty, " %u",
- peer->pmax_threshold[afi][safi]);
- if (CHECK_FLAG(peer->af_flags[afi][safi],
+ if (peergroup_af_flag_check(peer, afi, safi, PEER_FLAG_MAX_PREFIX)) {
+ vty_out(vty, " neighbor %s maximum-prefix %lu", addr,
+ peer->pmax[afi][safi]);
+
+ if (peer->pmax_threshold[afi][safi]
+ != MAXIMUM_PREFIX_THRESHOLD_DEFAULT)
+ vty_out(vty, " %u", peer->pmax_threshold[afi][safi]);
+ if (peer_af_flag_check(peer, afi, safi,
PEER_FLAG_MAX_PREFIX_WARNING))
- vty_out(vty, " warning-only");
- if (peer->pmax_restart[afi][safi])
- vty_out(vty, " restart %u",
- peer->pmax_restart[afi][safi]);
- vty_out(vty, "\n");
- }
+ vty_out(vty, " warning-only");
+ if (peer->pmax_restart[afi][safi])
+ vty_out(vty, " restart %u",
+ peer->pmax_restart[afi][safi]);
+
+ vty_out(vty, "\n");
+ }
/* Route server client. */
if (peergroup_af_flag_check(peer, afi, safi,
}
/* allowas-in <1-10> */
- if (peer_af_flag_check(peer, afi, safi, PEER_FLAG_ALLOWAS_IN)) {
- if (!peer_group_active(peer)
- || !peer_af_flag_check(g_peer, afi, safi,
- PEER_FLAG_ALLOWAS_IN)
- || peer->allowas_in[afi][safi]
- != g_peer->allowas_in[afi][safi]) {
- if (peer->allowas_in[afi][safi] == 3) {
- vty_out(vty, " neighbor %s allowas-in\n",
- addr);
- } else {
- vty_out(vty, " neighbor %s allowas-in %d\n",
- addr, peer->allowas_in[afi][safi]);
- }
- }
- }
-
- /* allowas-in origin */
- else if (peer_af_flag_check(peer, afi, safi,
- PEER_FLAG_ALLOWAS_IN_ORIGIN)) {
- if (!peer_group_active(peer)
- || !peer_af_flag_check(g_peer, afi, safi,
- PEER_FLAG_ALLOWAS_IN_ORIGIN)) {
+ if (peergroup_af_flag_check(peer, afi, safi, PEER_FLAG_ALLOWAS_IN)) {
+ if (peer_af_flag_check(peer, afi, safi,
+ PEER_FLAG_ALLOWAS_IN_ORIGIN)) {
vty_out(vty, " neighbor %s allowas-in origin\n", addr);
+ } else if (peer->allowas_in[afi][safi] == 3) {
+ vty_out(vty, " neighbor %s allowas-in\n", addr);
+ } else {
+ vty_out(vty, " neighbor %s allowas-in %d\n", addr,
+ peer->allowas_in[afi][safi]);
}
}
/* weight */
- if (peer_af_flag_check(peer, afi, safi, PEER_FLAG_WEIGHT))
- if (!peer_group_active(peer)
- || !peer_af_flag_check(g_peer, afi, safi, PEER_FLAG_WEIGHT)
- || peer->weight[afi][safi] != g_peer->weight[afi][safi]) {
- if (peer->weight[afi][safi]) {
- vty_out(vty, " neighbor %s weight %lu\n", addr,
- peer->weight[afi][safi]);
- }
- }
+ if (peergroup_af_flag_check(peer, afi, safi, PEER_FLAG_WEIGHT))
+ vty_out(vty, " neighbor %s weight %lu\n", addr,
+ peer->weight[afi][safi]);
/* Filter. */
bgp_config_write_filter(vty, peer, afi, safi);
if (safi == SAFI_EVPN)
bgp_config_write_evpn_info(vty, bgp, afi, safi);
+ if (safi == SAFI_FLOWSPEC)
+ bgp_fs_config_write_pbr(vty, bgp, afi, safi);
+
if (safi == SAFI_UNICAST) {
bgp_vpn_policy_config_write_afi(vty, bgp, afi);
if (CHECK_FLAG(bgp->af_flags[afi][safi],
vty_endframe(vty, " exit-address-family\n");
}
+/* clang-format off */
+#if defined(VERSION_TYPE_DEV) && CONFDATE > 20180517
+CPP_NOTICE("bgpd: remove 'bgp enforce-first-as' config migration from bgp_config_write")
+#endif
+/* clang-format on */
+
int bgp_config_write(struct vty *vty)
{
int write = 0;
if (CHECK_FLAG(bgp->vrf_flags, BGP_VRF_AUTO))
continue;
+ /* Migrate deprecated 'bgp enforce-first-as'
+ * config to 'neighbor * enforce-first-as' configs
+ */
+ if (bgp_flag_check(bgp, BGP_FLAG_ENFORCE_FIRST_AS)) {
+ for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer))
+ peer_flag_set(peer, PEER_FLAG_ENFORCE_FIRST_AS);
+ bgp_flag_unset(bgp, BGP_FLAG_ENFORCE_FIRST_AS);
+ }
+
/* Router bgp ASN */
vty_out(vty, "router bgp %u", bgp->as);
vty_out(vty, "\n");
}
- /* BGP enforce-first-as. */
- if (bgp_flag_check(bgp, BGP_FLAG_ENFORCE_FIRST_AS))
- vty_out(vty, " bgp enforce-first-as\n");
-
/* BGP deterministic-med. */
if (!!bgp_flag_check(bgp, BGP_FLAG_DETERMINISTIC_MED)
!= DFLT_BGP_DETERMINISTIC_MED)
struct update_subgroup;
struct bpacket;
+struct bgp_pbr_config;
/*
* Allow the neighbor XXXX remote-as to take internal or external
struct vpn_policy vpn_policy[AFI_MAX];
+ struct bgp_pbr_config *bgp_pbr_cfg;
+
+ /* local esi hash table */
+ struct hash *esihash;
+
QOBJ_FIELDS
};
DECLARE_QOBJ_TYPE(bgp)
#define PEER_FLAG_DISABLE_CONNECTED_CHECK (1 << 6) /* disable-connected-check */
#define PEER_FLAG_LOCAL_AS_NO_PREPEND (1 << 7) /* local-as no-prepend */
#define PEER_FLAG_LOCAL_AS_REPLACE_AS (1 << 8) /* local-as no-prepend replace-as */
-#define PEER_FLAG_DELETE (1 << 9) /* mark the peer for deleting */
-#define PEER_FLAG_CONFIG_NODE (1 << 10) /* the node to update configs on */
+#define PEER_FLAG_DELETE (1 << 9) /* mark the peer for deleting */
+#define PEER_FLAG_CONFIG_NODE (1 << 10) /* the node to update configs on */
#define PEER_FLAG_LONESOUL (1 << 11)
#define PEER_FLAG_DYNAMIC_NEIGHBOR (1 << 12) /* dynamic neighbor */
#define PEER_FLAG_CAPABILITY_ENHE (1 << 13) /* Extended next-hop (rfc 5549)*/
#define PEER_FLAG_IFPEER_V6ONLY (1 << 14) /* if-based peer is v6 only */
-#define PEER_FLAG_IS_RFAPI_HD (1 << 15) /* attached to rfapi HD */
+#define PEER_FLAG_IS_RFAPI_HD (1 << 15) /* attached to rfapi HD */
+#define PEER_FLAG_ENFORCE_FIRST_AS (1 << 16) /* enforce-first-as */
/* outgoing message sent in CEASE_ADMIN_SHUTDOWN notify */
char *tx_shutdown_message;
* *peer-specific*.
*/
uint32_t af_flags_override[AFI_MAX][SAFI_MAX];
+ /*
+ * Parallel array to af_flags that indicates whether each flag should
+ * be treated as regular (defaults to 0) or inverted (defaults to 1).
+ * If a flag is set to 1 by default, the same bit should be set here.
+ *
+ * Notes:
+ * - This does *not* contain the flag values, rather it contains
+ * whether the flag at the same position in af_flags is *regular* or
+ * *inverted*.
+ */
+ uint32_t af_flags_invert[AFI_MAX][SAFI_MAX];
/*
* Effective flags, computed by applying peer-group flags and then
* overriding with individual flags
/* Filter structure. */
struct bgp_filter filter[AFI_MAX][SAFI_MAX];
+ /*
+ * Parallel array to filter that indicates whether each filter
+ * originates from a peer-group or if it is config that is specific to
+ * this individual peer. If a filter is set independent of the
+ * peer-group the appropriate bit should be set here. If this peer is a
+ * peer-group, this memory region should be all zeros. The assumption
+ * is that the default state for all flags is unset. Due to filters
+ * having a direction (e.g. in/out/...), this array has a third
+ * dimension for storing the overrides independently per direction.
+ *
+ * Notes:
+ * - if a filter for an individual peer is unset, the corresponding
+ * override flag is unset and the peer is considered to be back in
+ * sync with the peer-group.
+ * - This does *not* contain the filter values, rather it contains
+ * whether the filter in filter (struct bgp_filter) is peer-specific.
+ */
+ uint8_t filter_override[AFI_MAX][SAFI_MAX][(FILTER_MAX > RMAP_MAX)
+ ? FILTER_MAX
+ : RMAP_MAX];
+#define PEER_FT_DISTRIBUTE_LIST (1 << 0) /* distribute-list */
+#define PEER_FT_FILTER_LIST (1 << 1) /* filter-list */
+#define PEER_FT_PREFIX_LIST (1 << 2) /* prefix-list */
+#define PEER_FT_ROUTE_MAP (1 << 3) /* route-map */
+#define PEER_FT_UNSUPPRESS_MAP (1 << 4) /* unsuppress-map */
+
/* ORF Prefix-list */
struct prefix_list *orf_plist[AFI_MAX][SAFI_MAX];
};
DECLARE_QOBJ_TYPE(peer)
+/* Inherit peer attribute from peer-group. */
+#define PEER_ATTR_INHERIT(peer, attr) ((peer)->attr = (peer)->group->conf->attr)
+#define PEER_STR_ATTR_INHERIT(mt, peer, attr) \
+ do { \
+ if ((peer)->attr) \
+ XFREE(mt, (peer)->attr); \
+ if ((peer)->group->conf->attr) \
+ (peer)->attr = XSTRDUP(mt, (peer)->group->conf->attr); \
+ else \
+ (peer)->attr = NULL; \
+ } while (0)
+
/* Check if suppress start/restart of sessions to peer. */
#define BGP_PEER_START_SUPPRESSED(P) \
(CHECK_FLAG((P)->flags, PEER_FLAG_SHUTDOWN) \
extern int peer_af_flag_set(struct peer *, afi_t, safi_t, uint32_t);
extern int peer_af_flag_unset(struct peer *, afi_t, safi_t, uint32_t);
extern int peer_af_flag_check(struct peer *, afi_t, safi_t, uint32_t);
+extern void peer_af_flag_inherit(struct peer *peer, afi_t afi, safi_t safi,
+ uint32_t flag);
extern int peer_ebgp_multihop_set(struct peer *, int);
extern int peer_ebgp_multihop_unset(struct peer *);
::
- apt-get install git autoconf automake libtool make gawk libreadline-dev \
- texinfo libpam0g-dev dejagnu libjson0-dev pkg-config libpam0g-dev \
- libjson0-dev flex python-pip libc-ares-dev python3-dev python3-sphinx
+ apt-get install \
+ git autoconf automake libtool make gawk libreadline-dev texinfo \
+ dejagnu pkg-config libpam0g-dev libjson0-dev flex python-pip \
+ libc-ares-dev python3-dev python3-sphinx install-info
Install newer bison from 14.04 package source (Ubuntu 12.04 package
source is too old)
::
- sudo groupadd -g 92 frr
+ sudo groupadd -r -g 92 frr
sudo groupadd -r -g 85 frrvty
sudo adduser --system --ingroup frr --home /var/run/frr/ \
--gecos "FRR suite" --shell /sbin/nologin frr
::
- apt-get install git autoconf automake libtool make gawk libreadline-dev \
- texinfo dejagnu pkg-config libpam0g-dev libjson-c-dev bison flex \
- python-pytest libc-ares-dev python3-dev python3-sphinx
+ apt-get install \
+ git autoconf automake libtool make gawk libreadline-dev texinfo dejagnu \
+ pkg-config libpam0g-dev libjson-c-dev bison flex python-pytest \
+ libc-ares-dev python3-dev python3-sphinx install-info
Get FRR, compile it and install it (from Git)
---------------------------------------------
::
- sudo groupadd -g 92 frr
+ sudo groupadd -r -g 92 frr
sudo groupadd -r -g 85 frrvty
sudo adduser --system --ingroup frr --home /var/run/frr/ \
--gecos "FRR suite" --shell /sbin/nologin frr
::
- apt-get install git autoconf automake libtool make gawk libreadline-dev \
- texinfo dejagnu pkg-config libpam0g-dev libjson-c-dev bison flex \
- python-pytest libc-ares-dev python3-dev libsystemd-dev python-ipaddr \
- python3-sphinx
+ apt-get install \
+ git autoconf automake libtool make gawk libreadline-dev texinfo dejagnu \
+ pkg-config libpam0g-dev libjson-c-dev bison flex python-pytest \
+ libc-ares-dev python3-dev libsystemd-dev python-ipaddr python3-sphinx \
+ install-info
Get FRR, compile it and install it (from Git)
---------------------------------------------
::
- sudo groupadd -g 92 frr
+ sudo groupadd -r -g 92 frr
sudo groupadd -r -g 85 frrvty
sudo adduser --system --ingroup frr --home /var/run/frr/ \
--gecos "FRR suite" --shell /sbin/nologin frr
-Ubuntu 18.04LTS
-===============================================
+Ubuntu 18.04 LTS
+================
Install dependencies
--------------------------
+--------------------
+
Required packages
^^^^^^^^^^^^^^^^^
::
- sudo apt-get install \
- git \
- autoconf \
- automake \
- libtool \
- make \
- gawk \
- libreadline-dev \
- texinfo \
- pkg-config \
- libpam0g-dev \
- libjson-c-dev \
- bison \
- flex \
- python-pytest \
- libc-ares-dev \
- python3-dev \
- libsystemd-dev \
- python-ipaddr \
- python3-sphinx
+ sudo apt-get install \
+ git autoconf automake libtool make gawk libreadline-dev texinfo \
+ pkg-config libpam0g-dev libjson-c-dev bison flex python-pytest \
+ libc-ares-dev python3-dev libsystemd-dev python-ipaddr python3-sphinx \
+ install-info
Optional packages
^^^^^^^^^^^^^^^^^
::
- sudo apt-get install \
- protobuf-c-compiler \
- libprotobuf-c-dev
+ sudo apt-get install \
+ protobuf-c-compiler \
+ libprotobuf-c-dev
ZeroMQ
~~~~~~
::
- sudo apt-get install \
- libzmq5 \
- libzmq3-dev
+ sudo apt-get install \
+ libzmq5 \
+ libzmq3-dev
Get FRR, compile it and install it (from Git)
---------------------------------------------
::
- sudo groupadd -g 92 frr
- sudo groupadd -r -g 85 frrvty
- sudo adduser --system --ingroup frr --home /var/run/frr/ \
- --gecos "FRR suite" --shell /sbin/nologin frr
- sudo usermod -a -G frrvty frr
+ sudo groupadd -r -g 92 frr
+ sudo groupadd -r -g 85 frrvty
+ sudo adduser --system --ingroup frr --home /var/run/frr/ \
+ --gecos "FRR suite" --shell /sbin/nologin frr
+ sudo usermod -a -G frrvty frr
Download source
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^
::
- git clone https://github.com/frrouting/frr.git frr
+ git clone https://github.com/frrouting/frr.git frr
Configure
^^^^^^^^^
.. seealso:: *Installation* section of user guide
-::
-
- cd frr
- ./bootstrap.sh
- ./configure \
- --prefix=/usr \
- --enable-exampledir=/usr/share/doc/frr/examples/ \
- --localstatedir=/var/run/frr \
- --sbindir=/usr/lib/frr \
- --sysconfdir=/etc/frr \
- --enable-pimd \
- --enable-watchfrr \
- --enable-ospfclient=yes \
- --enable-ospfapi=yes \
- --enable-multipath=64 \
- --enable-user=frr \
- --enable-group=frr \
- --enable-vty-group=frrvty \
- --enable-configfile-mask=0640 \
- --enable-logfile-mask=0640 \
- --enable-rtadv \
- --enable-fpm \
- --enable-systemd=yes \
- --with-pkg-git-version \
- --with-pkg-extra-version=-MyOwnFRRVersion
+.. code-block:: shell
+
+ cd frr
+ ./bootstrap.sh
+ ./configure \
+ --prefix=/usr \
+ --enable-exampledir=/usr/share/doc/frr/examples/ \
+ --localstatedir=/var/run/frr \
+ --sbindir=/usr/lib/frr \
+ --sysconfdir=/etc/frr \
+ --enable-pimd \
+ --enable-watchfrr \
+ --enable-ospfclient=yes \
+ --enable-ospfapi=yes \
+ --enable-multipath=64 \
+ --enable-user=frr \
+ --enable-group=frr \
+ --enable-vty-group=frrvty \
+ --enable-configfile-mask=0640 \
+ --enable-logfile-mask=0640 \
+ --enable-rtadv \
+ --enable-fpm \
+ --enable-systemd=yes \
+ --with-pkg-git-version \
+ --with-pkg-extra-version=-MyOwnFRRVersion
If optional packages were installed, the associated feature may now be
enabled.
::
- make
- make check
- sudo make install
+ make
+ make check
+ sudo make install
Create empty FRR configuration files
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
are correct. If the files are not already present, FRR will create them.
It's also important to consider _which_ files to create. FRR supports writing
-configuration to a monolithic file, ``/etc/frr/frr.conf``, which is not
-recommended
+configuration to a monolithic file, :file:`/etc/frr/frr.conf`.
+
.. seealso:: *VTYSH* section of user guide
-The presence of ``/etc/frr/frr.conf`` on startup implicitly configures FRR to
-ignore daemon-specific configuration files.
+The presence of :file:`/etc/frr/frr.conf` on startup implicitly configures FRR
+to ignore daemon-specific configuration files.
Daemon-specific configuration
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
- sudo install -m 755 -o frr -g frr -d /var/log/frr
- sudo install -m 775 -o frr -g frrvty -d /etc/frr
- sudo install -m 640 -o frr -g frr /dev/null /etc/frr/zebra.conf
- sudo install -m 640 -o frr -g frr /dev/null /etc/frr/bgpd.conf
- sudo install -m 640 -o frr -g frr /dev/null /etc/frr/ospfd.conf
- sudo install -m 640 -o frr -g frr /dev/null /etc/frr/ospf6d.conf
- sudo install -m 640 -o frr -g frr /dev/null /etc/frr/isisd.conf
- sudo install -m 640 -o frr -g frr /dev/null /etc/frr/ripd.conf
- sudo install -m 640 -o frr -g frr /dev/null /etc/frr/ripngd.conf
- sudo install -m 640 -o frr -g frr /dev/null /etc/frr/pimd.conf
- sudo install -m 640 -o frr -g frr /dev/null /etc/frr/ldpd.conf
- sudo install -m 640 -o frr -g frr /dev/null /etc/frr/nhrpd.conf
+ sudo install -m 755 -o frr -g frr -d /var/log/frr
+ sudo install -m 775 -o frr -g frrvty -d /etc/frr
+ sudo install -m 640 -o frr -g frr /dev/null /etc/frr/zebra.conf
+ sudo install -m 640 -o frr -g frr /dev/null /etc/frr/bgpd.conf
+ sudo install -m 640 -o frr -g frr /dev/null /etc/frr/ospfd.conf
+ sudo install -m 640 -o frr -g frr /dev/null /etc/frr/ospf6d.conf
+ sudo install -m 640 -o frr -g frr /dev/null /etc/frr/isisd.conf
+ sudo install -m 640 -o frr -g frr /dev/null /etc/frr/ripd.conf
+ sudo install -m 640 -o frr -g frr /dev/null /etc/frr/ripngd.conf
+ sudo install -m 640 -o frr -g frr /dev/null /etc/frr/pimd.conf
+ sudo install -m 640 -o frr -g frr /dev/null /etc/frr/ldpd.conf
+ sudo install -m 640 -o frr -g frr /dev/null /etc/frr/nhrpd.conf
Monolithic configuration
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~
::
- sudo install -m 755 -o frr -g frr -d /var/log/frr
- sudo install -m 775 -o frr -g frrvty -d /etc/frr
- sudo install -m 640 -o frr -g frr /dev/null /etc/frr/frr.conf
+ sudo install -m 755 -o frr -g frr -d /var/log/frr
+ sudo install -m 775 -o frr -g frrvty -d /etc/frr
+ sudo install -m 640 -o frr -g frr /dev/null /etc/frr/frr.conf
Enable IPv4 & IPv6 forwarding
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Edit ``/etc/sysctl.conf`` and uncomment the following values (ignore the
-other settings)
+Edit :file:`/etc/sysctl.conf` and uncomment the following values (ignore the
+other settings):
::
- # Uncomment the next line to enable packet forwarding for IPv4
- net.ipv4.ip_forward=1
+ # Uncomment the next line to enable packet forwarding for IPv4
+ net.ipv4.ip_forward=1
- # Uncomment the next line to enable packet forwarding for IPv6
- # Enabling this option disables Stateless Address Autoconfiguration
- # based on Router Advertisements for this host
- net.ipv6.conf.all.forwarding=1
+ # Uncomment the next line to enable packet forwarding for IPv6
+ # Enabling this option disables Stateless Address Autoconfiguration
+ # based on Router Advertisements for this host
+ net.ipv6.conf.all.forwarding=1
Add MPLS kernel modules
^^^^^^^^^^^^^^^^^^^^^^^
-Ubuntu 18.04 ships with kernel 4.15. MPLS modules are present by default.
-To enable, add the following lines to ``/etc/modules-load.d/modules.conf``:
+Ubuntu 18.04 ships with kernel 4.15. MPLS modules are present by default. To
+enable, add the following lines to :file:`/etc/modules-load.d/modules.conf`:
::
- # Load MPLS Kernel Modules
- mpls_router
- mpls_iptunnel
+ # Load MPLS Kernel Modules
+ mpls_router
+ mpls_iptunnel
-**Reboot** or use ``sysctl -p`` to apply the same config to the running
-system
+Reboot or use ``sysctl -p`` to apply the same config to the running system.
Enable MPLS Forwarding
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^
-Edit ``/etc/sysctl.conf`` and the following lines. Make sure to add a
-line equal to ``net.mpls.conf.eth0.input`` or each interface used with
-MPLS
+Edit :file:`/etc/sysctl.conf` and the following lines. Make sure to add a line
+equal to :file:`net.mpls.conf.eth0.input` for each interface used with MPLS.
::
- # Enable MPLS Label processing on all interfaces
- net.mpls.conf.eth0.input=1
- net.mpls.conf.eth1.input=1
- net.mpls.conf.eth2.input=1
- net.mpls.platform_labels=100000
+ # Enable MPLS Label processing on all interfaces
+ net.mpls.conf.eth0.input=1
+ net.mpls.conf.eth1.input=1
+ net.mpls.conf.eth2.input=1
+ net.mpls.platform_labels=100000
-Install the systemd service (if rebooted from last step, change directory back to frr directory)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Install the systemd service
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
::
- sudo install -m 644 tools/frr.service /etc/systemd/system/frr.service
- sudo install -m 644 tools/etc/default/frr /etc/default/frr
- sudo install -m 644 tools/etc/frr/daemons /etc/frr/daemons
- sudo install -m 644 tools/etc/frr/daemons.conf /etc/frr/daemons.conf
- sudo install -m 644 tools/etc/frr/frr.conf /etc/frr/frr.conf
- sudo install -m 644 -o frr -g frr tools/etc/frr/vtysh.conf /etc/frr/vtysh.conf
+ sudo install -m 644 tools/frr.service /etc/systemd/system/frr.service
+ sudo install -m 644 tools/etc/default/frr /etc/default/frr
+ sudo install -m 644 tools/etc/frr/daemons /etc/frr/daemons
+ sudo install -m 644 tools/etc/frr/daemons.conf /etc/frr/daemons.conf
+ sudo install -m 644 tools/etc/frr/frr.conf /etc/frr/frr.conf
+ sudo install -m 644 -o frr -g frr tools/etc/frr/vtysh.conf /etc/frr/vtysh.conf
Enable daemons
^^^^^^^^^^^^^^
-| Edit ``/etc/frr/daemons`` and change the value from "no" to "yes" for
- those daemons you want to start by systemd.
-| For example.
+Edit ``/etc/frr/daemons`` and change the value from "no" to "yes" for those
+daemons you want to start by systemd. For example:
::
- zebra=yes
- bgpd=yes
- ospfd=yes
- ospf6d=yes
- ripd=yes
- ripngd=yes
- isisd=yes
+ zebra=yes
+ bgpd=yes
+ ospfd=yes
+ ospf6d=yes
+ ripd=yes
+ ripngd=yes
+ isisd=yes
Enable the systemd service
^^^^^^^^^^^^^^^^^^^^^^^^^^
-- systemctl enable frr
+Enabling the systemd service causes FRR to be started upon boot. To enable it,
+use the following command:
+
+.. code-block:: shell
+
+ systemctl enable frr
Start the systemd service
^^^^^^^^^^^^^^^^^^^^^^^^^
-- systemctl start frr
-- use ``systemctl status frr`` to check its status.
+.. code-block:: shell
+
+ systemctl start frr
+
+After starting the service, you can use ``systemctl status frr`` to check its
+status.
::
- % update-autotools
% sh ./configure --enable-opaque-lsa
% make
.. clicmd:: router bgp ASN
Enable a BGP protocol process with the specified ASN. After
- this statement you can input any `BGP Commands`. You can not
- create different BGP process under different ASN without
- specifying `multiple-instance` (:ref:`multiple-instance`).
+ this statement you can input any `BGP Commands`.
.. index:: no router bgp ASN
.. clicmd:: no router bgp ASN
VRFNAME is matched against VRFs configured in the kernel. When no *vrf VRFNAME*
is specified, the BGP protocol process belongs to the default VRF.
+With VRF, you can isolate networking information. Having BGP VRF allows you to
+have several BGP instances on the same system process. This solution solves
+scalabiliy issues where the network administrator had previously to run separately
+several BGP processes on each namespace. Now, not only BGP VRF solves this, but
+also this method applies to both kind of VRFs backend: default VRF from Linux kernel
+or network namespaces. Also, having separate BGP instances does not imply that the
+AS number has to be different. For internal purposes, it is possible to do iBGP
+peering from two differents network namespaces.
+
BGP routes may be leaked (i.e., copied) between a unicast VRF RIB and the VPN
safi RIB of the default VRF (leaking is also permitted between the unicast RIB
of the default VRF and VPN). A shortcut syntax is also available for
Multiple instance
-----------------
-To enable multiple view function of *bgpd*, you must turn on multiple instance
-feature beforehand.
+The multiple instances feature of *bgpd* is by default turned on. This command
+is deprecated and will be removed in a future version of FRR. If you do not
+want multiple instances, do not configure them from the cli. Please note
+that some commands auto-generate a second instance.
.. index:: bgp multiple-instance
.. clicmd:: bgp multiple-instance
- Enable BGP multiple instance feature. After this feature is enabled,
- you can make multiple BGP instances or multiple BGP views.
+ Enable BGP multiple instance feature. This is the default
+ configuration and this cli will not be displayed. This command
+ is deprecated and will be removed in the future.
.. index:: no bgp multiple-instance
.. clicmd:: no bgp multiple-instance
Disable BGP multiple instance feature. You can not disable this feature
- when BGP multiple instances or views exist.
+ when BGP multiple instances or views exist. This command
+ is deprecated and will be removed in the future.
When you want to make configuration more Cisco like one,
.. index:: bgp config-type cisco
.. clicmd:: bgp config-type cisco
- Cisco compatible BGP configuration output.
+ Cisco compatible BGP configuration output. This command is deprecated
+ and will be removed in a future version of FRR. Please transition
+ to using the appropriate bgp commands to affect your behavior.
When bgp config-type cisco is specified,
-``no synchronization`` is displayed.
+``no synchronization`` is displayed. This command does nothing and
+is for display purposes only.
``no auto-summary`` is displayed.
The ``network`` and ``aggregate-address`` arguments are displayed as::
.. index:: bgp config-type zebra
.. clicmd:: bgp config-type zebra
- FRR style BGP configuration. This is default.
+ FRR style BGP configuration. This is default. This command is deprecated
+ and will be removed in the future.
.. _bgp-instance-and-view:
- :rfc:`3137`
:t:`OSPF Stub Router Advertisement, A. Retana, L. Nguyen, R. White, A. Zinin,
D. McPherson. June 2001`
+- :rfc:`4447`
+ :t:`Pseudowire Setup and Maintenance Using the Label Distribution Protocol
+ (LDP), L. Martini, E. Rosen, N. El-Aawar, T. Smith, and G. Heron. April
+ 2006.`
+- :rfc:`4762`
+ :t:`Virtual Private LAN Service (VPLS) Using Label Distribution Protocol
+ (LDP) Signaling, M. Lasserre and V. Kompella. January 2007.`
+- :rfc:`5036`
+ :t:`LDP Specification, L. Andersson, I. Minei, and B. Thomas. October 2007.`
+- :rfc:`5561`
+ :t:`LDP Capabilities, B. Thomas, K. Raza, S. Aggarwal, R. Aggarwal, and
+ JL. Le Roux. July 2009.`
+- :rfc:`5918`
+ :t:`Label Distribution Protocol (LDP) 'Typed Wildcard' Forward Equivalence
+ Class (FEC), R. Asati, I. Minei, and B. Thomas. August 2010.`
+- :rfc:`5919`
+ :t:`Signaling LDP Label Advertisement Completion, R. Asati, P. Mohapatra,
+ E. Chen, and B. Thomas. August 2010.`
+- :rfc:`6667`
+ :t:`LDP 'Typed Wildcard' Forwarding Equivalence Class (FEC) for PWid and
+ Generalized PWid FEC Elements, K. Raza, S. Boutros, and C. Pignataro. July
+ 2012.`
+- :rfc:`6720`
+ :t:`The Generalized TTL Security Mechanism (GTSM) for the Label Distribution
+ Protocol (LDP), C. Pignataro and R. Asati. August 2012.`
+- :rfc:`7552`
+ :t:`Updates to LDP for IPv6, R. Asati, C. Pignataro, K. Raza, V. Manral,
+ and R. Papneja. June 2015.`
**When SNMP support is enabled, the following RFCs are also supported:**
Tell pim to receive IGMP reports and Query on this interface. The default
version is v3. This command is useful on the LHR.
+.. index:: ip igmp join
+.. clicmd:: ip igmp join
+
+ Join multicast source-group on an interface.
+
.. index:: ip igmp query-interval (1-1800)
.. clicmd:: ip igmp query-interval (1-1800)
'all' allows you to look at all vrfs for the command. Naming a vrf 'all' will
cause great confusion.
+.. index:: show ip igmp interface
+.. clicmd:: show ip igmp interface
+
+ Display IGMP interface information.
+
+.. index:: show ip igmp join
+.. clicmd:: show ip igmp join
+
+ Display IGMP static join information.
+
+.. index:: show ip igmp groups
+.. clicmd:: show ip igmp groups
+
+ Display IGMP groups information.
+
+.. index:: show ip igmp groups retransmissions
+.. clicmd:: show ip igmp groups retransmissions
+
+ Display IGMP group retransmission information.
+
+.. index:: show ip igmp sources
+.. clicmd:: show ip igmp sources
+
+ Display IGMP sources information.
+
+.. index:: show ip igmp sources retransmissions
+.. clicmd:: show ip igmp sources retransmissions
+
+ Display IGMP source retransmission information.
+
+.. index:: show ip igmp statistics
+.. clicmd:: show ip igmp statistics
+
+ Display IGMP statistics information.
+
.. index:: show ip multicast
.. clicmd:: show ip multicast
Display the multicast RIB created in zebra.
+.. index:: mtrace
+.. clicmd:: mtrace
+
+ Display multicast traceroute towards source.
+
PIM Debug Commands
==================
mode, the debug commands can be persistent across restarts of the FRR pimd if
the config was written out.
+.. index:: debug igmp
+.. clicmd:: debug igmp
+
+ This turns on debugging for IGMP protocol activity.
+
+.. index:: debug mtrace
+.. clicmd:: debug mtrace
+
+ This turns on debugging for mtrace protocol activity.
+
+.. index:: debug mroute
+.. clicmd:: debug mroute
+
+ This turns on debugging for PIM interaction with kernel MFC cache.
+
.. index:: debug pim events
.. clicmd:: debug pim events
.. clicmd:: debug pim zebra
This gathers data about events from zebra that come up through the ZAPI.
+
+PIM Clear Commands
+==================
+Clear commands reset various variables.
+
+.. index:: clear ip interfaces
+.. clicmd:: clear ip interfaces
+
+ Reset interfaces.
+
+.. index:: clear ip igmp interfaces
+.. clicmd:: clear ip igmp interfaces
+
+ Reset IGMP interfaces.
+
+.. index:: clear ip mroute
+.. clicmd:: clear ip mroute
+
+ Reset multicast routes.
+
+.. index:: clear ip pim interfaces
+.. clicmd:: clear ip pim interfaces
+
+ Reset PIM interfaces.
+
+.. index:: clear ip pim oil
+.. clicmd:: clear ip pim oil
+
+ Rescan PIM OIL (output interface list).
.. _interface-commands:
+Configuration Addresses behaviour
+=================================
+
+At startup, *Zebra* will first discover the underlying networking objects
+from the operating system. This includes interfaces, addresses of
+interfaces, static routes, etc. Then, it will read the configuration
+file, including its own interface addresses, static routes, etc. All this
+information comprises the operational context from *Zebra*. But
+configuration context from *Zebra* will remain the same as the one from
+:file:`zebra.conf` config file. As an example, executing the following
+:clicmd:`show running-config` will reflect what was in :file:`zebra.conf`.
+In a similar way, networking objects that are configured outside of the
+*Zebra* like *iproute2* will not impact the configuration context from
+*Zebra*. This behaviour permits you to continue saving your own config
+file, and decide what is really to be pushed on the config file, and what
+is dependent on the underlying system.
+Note that inversely, from *Zebra*, you will not be able to delete networking
+objects that were previously configured outside of *Zebra*.
+
+
Interface Commands
==================
This command is available on configuration mode. By default, above command
permits accessing the vrf configuration mode. This mode is available for
- both VRFs. It is to be noted that *Zebra* does not create *Linux VRF*.
- Provisioning this command is used to keep the configuration intact.
+ both VRFs. It is to be noted that *Zebra* does not create Linux VRF.
+ The network administrator can however decide to provision this command in
+ configuration file to provide more clarity about the intended configuration.
.. index:: netns NAMESPACE
.. clicmd:: netns NAMESPACE
when *Zebra* is run in :option:`-n` mode. This command reflects which *Linux
network namespace* is to be mapped with *Zebra* VRF. It is to be noted that
*Zebra* creates and detects added/suppressed VRFs from the Linux environment
- (in fact, those managed with iproute2). Provisioning this command is used to
- keep the configuration intact.
+ (in fact, those managed with iproute2). The network administrator can however
+ decide to provision this command in configuration file to provide more clarity
+ about the intended configuration.
.. index:: ip route NETWORK NETMASK GATEWAY NEXTHOPVRF
.. clicmd:: ip route NETWORK NETMASK GATEWAY NEXTHOPVRF
struct subtlv_header *tlvh = (struct subtlv_header *)subtlvs;
uint16_t sum = 0;
- for (; sum < subtlv_len; tlvh = SUBTLV_HDR_NEXT(tlvh)) {
+ for (; sum < subtlv_len;
+ tlvh = (struct subtlv_header *)(subtlvs + sum)) {
+ if (subtlv_len - sum < SUBTLV_SIZE(tlvh)) {
+ sbuf_push(buf, indent, "Available data %" PRIu8 " is less than TLV size %u!\n",
+ subtlv_len - sum, SUBTLV_SIZE(tlvh));
+ return;
+ }
+
switch (tlvh->type) {
case TE_SUBTLV_ADMIN_GRP:
+ if (tlvh->length != SUBTLV_DEF_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Administrative Group!\n");
+ return;
+ }
sum += print_subtlv_admin_grp(buf, indent,
(struct te_subtlv_admin_grp *)tlvh);
break;
case TE_SUBTLV_LLRI:
+ if (tlvh->length != TE_SUBTLV_LLRI_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Link ID!\n");
+ return;
+ }
sum += print_subtlv_llri(buf, indent,
(struct te_subtlv_llri *)tlvh);
break;
case TE_SUBTLV_LOCAL_IPADDR:
+ if (tlvh->length != SUBTLV_DEF_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Local IP address!\n");
+ return;
+ }
sum += print_subtlv_local_ipaddr(buf, indent,
(struct te_subtlv_local_ipaddr *)tlvh);
break;
case TE_SUBTLV_RMT_IPADDR:
+ if (tlvh->length != SUBTLV_DEF_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Remote Interface address!\n");
+ return;
+ }
sum += print_subtlv_rmt_ipaddr(buf, indent,
(struct te_subtlv_rmt_ipaddr *)tlvh);
break;
case TE_SUBTLV_MAX_BW:
+ if (tlvh->length != SUBTLV_DEF_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Maximum Bandwidth!\n");
+ return;
+ }
sum += print_subtlv_max_bw(buf, indent,
(struct te_subtlv_max_bw *)tlvh);
break;
case TE_SUBTLV_MAX_RSV_BW:
+ if (tlvh->length != SUBTLV_DEF_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Maximum Reservable Bandwidth!\n");
+ return;
+ }
sum += print_subtlv_max_rsv_bw(buf, indent,
(struct te_subtlv_max_rsv_bw *)tlvh);
break;
case TE_SUBTLV_UNRSV_BW:
+ if (tlvh->length != TE_SUBTLV_UNRSV_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Unreserved Bandwidth!\n");
+ return;
+ }
sum += print_subtlv_unrsv_bw(buf, indent,
(struct te_subtlv_unrsv_bw *)tlvh);
break;
case TE_SUBTLV_TE_METRIC:
+ if (tlvh->length != SUBTLV_DEF_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Traffic Engineering Metric!\n");
+ return;
+ }
sum += print_subtlv_te_metric(buf, indent,
(struct te_subtlv_te_metric *)tlvh);
break;
case TE_SUBTLV_RAS:
+ if (tlvh->length != SUBTLV_DEF_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Remote AS number!\n");
+ return;
+ }
sum += print_subtlv_ras(buf, indent,
(struct te_subtlv_ras *)tlvh);
break;
case TE_SUBTLV_RIP:
+ if (tlvh->length != SUBTLV_DEF_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Remote ASBR IP Address!\n");
+ return;
+ }
sum += print_subtlv_rip(buf, indent,
(struct te_subtlv_rip *)tlvh);
break;
case TE_SUBTLV_AV_DELAY:
+ if (tlvh->length != SUBTLV_DEF_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Average Link Delay!\n");
+ return;
+ }
sum += print_subtlv_av_delay(buf, indent,
(struct te_subtlv_av_delay *)tlvh);
break;
case TE_SUBTLV_MM_DELAY:
+ if (tlvh->length != SUBTLV_DEF_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Min/Max Link Delay!\n");
+ return;
+ }
sum += print_subtlv_mm_delay(buf, indent,
(struct te_subtlv_mm_delay *)tlvh);
break;
case TE_SUBTLV_DELAY_VAR:
+ if (tlvh->length != SUBTLV_DEF_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Delay Variation!\n");
+ return;
+ }
sum += print_subtlv_delay_var(buf, indent,
(struct te_subtlv_delay_var *)tlvh);
break;
case TE_SUBTLV_PKT_LOSS:
+ if (tlvh->length != SUBTLV_DEF_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Link Packet Loss!\n");
+ return;
+ }
sum += print_subtlv_pkt_loss(buf, indent,
(struct te_subtlv_pkt_loss *)tlvh);
break;
case TE_SUBTLV_RES_BW:
+ if (tlvh->length != SUBTLV_DEF_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Unidirectional Residual Bandwidth!\n");
+ return;
+ }
sum += print_subtlv_res_bw(buf, indent,
(struct te_subtlv_res_bw *)tlvh);
break;
case TE_SUBTLV_AVA_BW:
+ if (tlvh->length != SUBTLV_DEF_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Unidirectional Available Bandwidth!\n");
+ return;
+ }
sum += print_subtlv_ava_bw(buf, indent,
(struct te_subtlv_ava_bw *)tlvh);
break;
case TE_SUBTLV_USE_BW:
+ if (tlvh->length != SUBTLV_DEF_SIZE) {
+ sbuf_push(buf, indent, "TLV size does not match expected size for Unidirectional Utilized Bandwidth!\n");
+ return;
+ }
sum += print_subtlv_use_bw(buf, indent,
(struct te_subtlv_use_bw *)tlvh);
break;
copy_items(ISIS_CONTEXT_LSP, ISIS_TLV_MT_ROUTER_INFO,
&tlvs->mt_router_info, &rv->mt_router_info);
- tlvs->mt_router_info_empty = rv->mt_router_info_empty;
+ rv->mt_router_info_empty = tlvs->mt_router_info_empty;
copy_items(ISIS_CONTEXT_LSP, ISIS_TLV_OLDSTYLE_REACH,
&tlvs->oldstyle_reach, &rv->oldstyle_reach);
pthread_mutex_t _hashes_mtx = PTHREAD_MUTEX_INITIALIZER;
static struct list *_hashes;
-/* Allocate a new hash. */
struct hash *hash_create_size(unsigned int size,
unsigned int (*hash_key)(void *),
int (*hash_cmp)(const void *, const void *),
return hash;
}
-/* Allocate a new hash with default hash size. */
struct hash *hash_create(unsigned int (*hash_key)(void *),
int (*hash_cmp)(const void *, const void *),
const char *name)
return hash_create_size(HASH_INITIAL_SIZE, hash_key, hash_cmp, name);
}
-/* Utility function for hash_get(). When this function is specified
- as alloc_func, return arugment as it is. This function is used for
- intern already allocated value. */
void *hash_alloc_intern(void *arg)
{
return arg;
hash->index = new_index;
}
-/* Lookup and return hash backet in hash. If there is no
- corresponding hash backet and alloc_func is specified, create new
- hash backet. */
void *hash_get(struct hash *hash, void *data, void *(*alloc_func)(void *))
{
unsigned int key;
return NULL;
}
-/* Hash lookup. */
void *hash_lookup(struct hash *hash, void *data)
{
return hash_get(hash, data, NULL);
}
-/* Simple Bernstein hash which is simple and fast for common case */
unsigned int string_hash_make(const char *str)
{
unsigned int hash = 0;
return hash;
}
-/* This function release registered value from specified hash. When
- release is successfully finished, return the data pointer in the
- hash backet. */
void *hash_release(struct hash *hash, void *data)
{
void *ret;
return NULL;
}
-/* Iterator function for hash. */
void hash_iterate(struct hash *hash, void (*func)(struct hash_backet *, void *),
void *arg)
{
}
}
-/* Iterator function for hash. */
void hash_walk(struct hash *hash, int (*func)(struct hash_backet *, void *),
void *arg)
{
}
}
-/* Clean up hash. */
void hash_clean(struct hash *hash, void (*free_func)(void *))
{
unsigned int i;
hash->stats.empty = hash->size;
}
-/* Free hash memory. You may call hash_clean before call this
- function. */
+static void hash_to_list_iter(struct hash_backet *hb, void *arg)
+{
+ struct list *list = arg;
+
+ listnode_add(list, hb->data);
+}
+
+struct list *hash_to_list(struct hash *hash)
+{
+ struct list *list = list_new();
+
+ hash_iterate(hash, hash_to_list_iter, list);
+ return list;
+}
+
void hash_free(struct hash *hash)
{
pthread_mutex_lock(&_hashes_mtx);
#define HASHWALK_ABORT -1
struct hash_backet {
- /* if this backet is the head of the linked listed, len denotes the
- * number of
- * elements in the list */
+ /*
+ * if this backet is the head of the linked listed, len denotes the
+ * number of elements in the list
+ */
int len;
/* Linked list. */
#define hashcount(X) ((X)->count)
-extern struct hash *hash_create(unsigned int (*)(void *),
- int (*)(const void *, const void *),
- const char *);
-extern struct hash *hash_create_size(unsigned int, unsigned int (*)(void *),
- int (*)(const void *, const void *),
- const char *);
+/*
+ * Create a hash table.
+ *
+ * The created hash table uses chaining and a user-provided comparator function
+ * to resolve collisions. For best performance use a perfect hash function.
+ * Worst case lookup time is O(N) when using a constant hash function. Best
+ * case lookup time is O(1) when using a perfect hash function.
+ *
+ * The initial size of the created hash table is HASH_INITIAL_SIZE.
+ *
+ * hash_key
+ * hash function to use; should return a unique unsigned integer when called
+ * with a data item. Collisions are acceptable.
+ *
+ * hash_cmp
+ * comparison function used for resolving collisions; when called with two
+ * data items, should return nonzero if the two items are equal and 0
+ * otherwise
+ *
+ * name
+ * optional name for the hashtable; this is used when displaying global
+ * hashtable statistics. If this parameter is NULL the hash's name will be
+ * set to NULL and the default name will be displayed when showing
+ * statistics.
+ *
+ * Returns:
+ * a new hash table
+ */
+extern struct hash *hash_create(unsigned int (*hash_key)(void *),
+ int (*hash_cmp)(const void *, const void *),
+ const char *name);
+
+/*
+ * Create a hash table.
+ *
+ * The created hash table uses chaining and a user-provided comparator function
+ * to resolve collisions. For best performance use a perfect hash function.
+ * Worst case lookup time is O(N) when using a constant hash function. Best
+ * case lookup time is O(1) when using a perfect hash function.
+ *
+ * size
+ * initial number of hash buckets to allocate; must be a power of 2 or the
+ * program will assert
+ *
+ * hash_key
+ * hash function to use; should return a unique unsigned integer when called
+ * with a data item. Collisions are acceptable.
+ *
+ * hash_cmp
+ * comparison function used for resolving collisions; when called with two
+ * data items, should return nonzero if the two items are equal and 0
+ * otherwise
+ *
+ * name
+ * optional name for the hashtable; this is used when displaying global
+ * hashtable statistics. If this parameter is NULL the hash's name will be
+ * set to NULL and the default name will be displayed when showing
+ * statistics.
+ *
+ * Returns:
+ * a new hash table
+ */
+extern struct hash *
+hash_create_size(unsigned int size, unsigned int (*hash_key)(void *),
+ int (*hash_cmp)(const void *, const void *), const char *name);
+
+/*
+ * Retrieve or insert data from / into a hash table.
+ *
+ * This function is somewhat counterintuitive in its usage. In order to look up
+ * an element from its key, you must provide the data item itself, with the
+ * portions used in the hash function set to the same values as the data item
+ * to retrieve. To insert a data element, either provide the key as just
+ * described and provide alloc_func as descrbied below to allocate the full
+ * data element, or provide the full data element and pass 'hash_alloc_intern'
+ * to alloc_func.
+ *
+ * hash
+ * hash table to operate on
+ *
+ * data
+ * data to insert or retrieve
+ *
+ * alloc_func
+ * function to call if the item is not found in the hash table. This
+ * function is called with the value of 'data' and should create the data
+ * item to insert and return a pointer to it. If the data has already been
+ * completely created and provided in the 'data' parameter, passing
+ * 'hash_alloc_intern' to this parameter will cause 'data' to be inserted.
+ * If this parameter is NULL, then this call to hash_get is equivalent to
+ * hash_lookup.
+ *
+ * Returns:
+ * the data item found or inserted, or NULL if alloc_func is NULL and the
+ * data is not found
+ */
+extern void *hash_get(struct hash *hash, void *data,
+ void *(*alloc_func)(void *));
+
+/*
+ * Dummy element allocation function.
+ *
+ * See hash_get for details.
+ *
+ * data
+ * data to insert into the hash table
+ *
+ * Returns:
+ * data
+ */
+extern void *hash_alloc_intern(void *data);
+
+/*
+ * Retrieve an item from a hash table.
+ *
+ * This function is equivalent to calling hash_get with alloc_func set to NULL.
+ *
+ * hash
+ * hash table to operate on
+ *
+ * data
+ * data element with values used for key computation set
+ *
+ * Returns:
+ * the data element if found, or NULL if not found
+ */
+extern void *hash_lookup(struct hash *hash, void *data);
-extern void *hash_get(struct hash *, void *, void *(*)(void *));
-extern void *hash_alloc_intern(void *);
-extern void *hash_lookup(struct hash *, void *);
-extern void *hash_release(struct hash *, void *);
+/*
+ * Remove an element from a hash table.
+ *
+ * hash
+ * hash table to operate on
+ *
+ * data
+ * data element to remove with values used for key computation set
+ *
+ * Returns:
+ * the removed element if found, or NULL if not found
+ */
+extern void *hash_release(struct hash *hash, void *data);
-extern void hash_iterate(struct hash *, void (*)(struct hash_backet *, void *),
- void *);
+/*
+ * Iterate over the elements in a hash table.
+ *
+ * It is safe to delete items passed to the iteration function from the hash
+ * table during iteration.
+ *
+ * hash
+ * hash table to operate on
+ *
+ * func
+ * function to call with each data item
+ *
+ * arg
+ * arbitrary argument passed as the second parameter in each call to 'func'
+ */
+extern void hash_iterate(struct hash *hash,
+ void (*func)(struct hash_backet *, void *), void *arg);
-extern void hash_walk(struct hash *, int (*)(struct hash_backet *, void *),
- void *);
+/*
+ * Iterate over the elements in a hash table, stopping on condition.
+ *
+ * It is safe to delete items passed to the iteration function from the hash
+ * table during iteration.
+ *
+ * hash
+ * hash table to operate on
+ *
+ * func
+ * function to call with each data item. If this function returns
+ * HASHWALK_ABORT then the iteration stops.
+ *
+ * arg
+ * arbitrary argument passed as the second parameter in each call to 'func'
+ */
+extern void hash_walk(struct hash *hash,
+ int (*func)(struct hash_backet *, void *), void *arg);
-extern void hash_clean(struct hash *, void (*)(void *));
-extern void hash_free(struct hash *);
+/*
+ * Remove all elements from a hash table.
+ *
+ * hash
+ * hash table to operate on
+ *
+ * free_func
+ * function to call with each removed item; intended to free the data
+ */
+extern void hash_clean(struct hash *hash, void (*free_func)(void *));
+
+/*
+ * Delete a hash table.
+ *
+ * This function assumes the table is empty. Call hash_clean to delete the
+ * hashtable contents if necessary.
+ *
+ * hash
+ * hash table to delete
+ */
+extern void hash_free(struct hash *hash);
+/*
+ * Converts a hash table to an unsorted linked list.
+ * Does not modify the hash table in any way.
+ *
+ * hash
+ * hash table to convert
+ */
+extern struct list *hash_to_list(struct hash *hash);
+
+/*
+ * Hash a string using the modified Bernstein hash.
+ *
+ * This is not a perfect hash function.
+ *
+ * str
+ * string to hash
+ *
+ * Returns:
+ * modified Bernstein hash of the string
+ */
extern unsigned int string_hash_make(const char *);
+/*
+ * Install CLI commands for viewing global hash table statistics.
+ */
extern void hash_cmd_init(void);
#endif /* _ZEBRA_HASH_H */
*/
#include <zebra.h>
+#include <stdlib.h>
#include "linklist.h"
#include "memory.h"
DEFINE_MTYPE_STATIC(LIB, LINK_LIST, "Link List")
DEFINE_MTYPE_STATIC(LIB, LINK_NODE, "Link Node")
-/* Allocate new list. */
struct list *list_new(void)
{
return XCALLOC(MTYPE_LINK_LIST, sizeof(struct list));
XFREE(MTYPE_LINK_NODE, node);
}
-/* Add new data to the list. */
void listnode_add(struct list *list, void *val)
{
struct listnode *node;
list->count++;
}
-/*
- * Add a node to the list. If the list was sorted according to the
- * cmp function, insert a new node with the given val such that the
- * list remains sorted. The new node is always inserted; there is no
- * notion of omitting duplicates.
- */
void listnode_add_sort(struct list *list, void *val)
{
struct listnode *n;
return nn;
}
-/* Move given listnode to tail of the list */
void listnode_move_to_tail(struct list *l, struct listnode *n)
{
LISTNODE_DETACH(l, n);
LISTNODE_ATTACH(l, n);
}
-/* Delete specific date pointer from the list. */
void listnode_delete(struct list *list, void *val)
{
struct listnode *node;
}
}
-/* Return first node's data if it is there. */
void *listnode_head(struct list *list)
{
struct listnode *node;
return NULL;
}
-/* Delete all listnode from the list. */
void list_delete_all_node(struct list *list)
{
struct listnode *node;
list->count = 0;
}
-/* Delete all listnode then free list itself. */
void list_delete_and_null(struct list **list)
{
assert(*list);
list_delete_and_null(&list);
}
-/* Lookup the node which has given data. */
struct listnode *listnode_lookup(struct list *list, void *data)
{
struct listnode *node;
return NULL;
}
-/* Delete the node from list. For ospfd and ospf6d. */
void list_delete_node(struct list *list, struct listnode *node)
{
if (node->prev)
listnode_free(node);
}
-/* ospf_spf.c */
-void list_add_list(struct list *l, struct list *m)
+void list_add_list(struct list *list, struct list *add)
{
struct listnode *n;
- for (n = listhead(m); n; n = listnextnode(n))
- listnode_add(l, n->data);
+ for (n = listhead(add); n; n = listnextnode(n))
+ listnode_add(list, n->data);
+}
+
+struct list *list_dup(struct list *list)
+{
+ struct list *new = list_new();
+ struct listnode *ln;
+ void *data;
+
+ new->cmp = list->cmp;
+ new->del = list->del;
+
+ for (ALL_LIST_ELEMENTS_RO(list, ln, data))
+ listnode_add(new, data);
+
+ return new;
+}
+
+void list_sort(struct list *list, int (*cmp)(const void **, const void **))
+{
+ struct listnode *ln, *nn;
+ int i = -1;
+ void *data;
+ size_t n = list->count;
+ void **items = XCALLOC(MTYPE_TMP, (sizeof(void *)) * n);
+ int (*realcmp)(const void *, const void *) =
+ (int (*)(const void *, const void *))cmp;
+
+ for (ALL_LIST_ELEMENTS(list, ln, nn, data)) {
+ items[++i] = data;
+ list_delete_node(list, ln);
+ }
+
+ qsort(items, n, sizeof(void *), realcmp);
+
+ for (unsigned int i = 0; i < n; ++i)
+ listnode_add(list, items[i]);
+
+ XFREE(MTYPE_TMP, items);
}
/* return X->data only if X and X->data are not NULL */
#define listgetdata(X) (assert(X), assert((X)->data != NULL), (X)->data)
-/* Prototypes. */
-extern struct list *
-list_new(void); /* encouraged: set list.del callback on new lists */
-
-extern void listnode_add(struct list *, void *);
-extern void listnode_add_sort(struct list *, void *);
-extern struct listnode *listnode_add_after(struct list *, struct listnode *,
- void *);
-extern struct listnode *listnode_add_before(struct list *, struct listnode *,
- void *);
-extern void listnode_move_to_tail(struct list *, struct listnode *);
-extern void listnode_delete(struct list *, void *);
-extern struct listnode *listnode_lookup(struct list *, void *);
-extern void *listnode_head(struct list *);
+/*
+ * Create a new linked list.
+ *
+ * Returns:
+ * the created linked list
+ */
+extern struct list *list_new(void);
+
+/*
+ * Add a new element to the tail of a list.
+ *
+ * Runtime is O(1).
+ *
+ * list
+ * list to operate on
+ *
+ * data
+ * element to add
+ */
+extern void listnode_add(struct list *list, void *data);
+
+/*
+ * Insert a new element into a list with insertion sort.
+ *
+ * If list->cmp is set, this function is used to determine the position to
+ * insert the new element. If it is not set, this function is equivalent to
+ * listnode_add.
+ *
+ * Runtime is O(N).
+ *
+ * list
+ * list to operate on
+ *
+ * val
+ * element to add
+ */
+extern void listnode_add_sort(struct list *list, void *val);
+
+/*
+ * Insert a new element into a list after another element.
+ *
+ * Runtime is O(1).
+ *
+ * list
+ * list to operate on
+ *
+ * pp
+ * listnode to insert after
+ *
+ * data
+ * data to insert
+ *
+ * Returns:
+ * pointer to newly created listnode that contains the inserted data
+ */
+extern struct listnode *listnode_add_after(struct list *list,
+ struct listnode *pp, void *data);
+
+/*
+ * Insert a new element into a list before another element.
+ *
+ * Runtime is O(1).
+ *
+ * list
+ * list to operate on
+ *
+ * pp
+ * listnode to insert before
+ *
+ * data
+ * data to insert
+ *
+ * Returns:
+ * pointer to newly created listnode that contains the inserted data
+ */
+extern struct listnode *listnode_add_before(struct list *list,
+ struct listnode *pp, void *data);
+
+/*
+ * Move a node to the tail of a list.
+ *
+ * Runtime is O(1).
+ *
+ * list
+ * list to operate on
+ *
+ * node
+ * node to move to tail
+ */
+extern void listnode_move_to_tail(struct list *list, struct listnode *node);
+
+/*
+ * Delete an element from a list.
+ *
+ * Runtime is O(N).
+ *
+ * list
+ * list to operate on
+ *
+ * data
+ * data to insert into list
+ */
+extern void listnode_delete(struct list *list, void *data);
+
+/*
+ * Find the listnode corresponding to an element in a list.
+ *
+ * list
+ * list to operate on
+ *
+ * data
+ * data to search for
+ *
+ * Returns:
+ * pointer to listnode storing the given data if found, NULL otherwise
+ */
+extern struct listnode *listnode_lookup(struct list *list, void *data);
+
+/*
+ * Retrieve the element at the head of a list.
+ *
+ * list
+ * list to operate on
+ *
+ * Returns:
+ * data at head of list, or NULL if list is empty
+ */
+extern void *listnode_head(struct list *list);
+
+/*
+ * Duplicate a list.
+ *
+ * list
+ * list to duplicate
+ *
+ * Returns:
+ * copy of the list
+ */
+extern struct list *list_dup(struct list *l);
+
+/*
+ * Sort a list in place.
+ *
+ * The sorting algorithm used is quicksort. Runtimes are equivalent to those of
+ * quicksort plus N. The sort is not stable.
+ *
+ * For portability reasons, the comparison function takes a pointer to pointer
+ * to void. This pointer should be dereferenced to get the actual data pointer.
+ * It is always safe to do this.
+ *
+ * list
+ * list to sort
+ *
+ * cmp
+ * comparison function for quicksort. Should return less than, equal to or
+ * greater than zero if the first argument is less than, equal to or greater
+ * than the second argument.
+ */
+extern void list_sort(struct list *list,
+ int (*cmp)(const void **, const void **));
/*
* The usage of list_delete is being transitioned to pass in
#if defined(VERSION_TYPE_DEV) && CONFDATE > 20181001
CPP_NOTICE("list_delete without double pointer is deprecated, please fixup")
#endif
-extern void list_delete_and_null(struct list **);
-extern void list_delete_original(struct list *);
+
+/*
+ * Delete a list and NULL its pointer.
+ *
+ * If non-null, list->del is called with each data element.
+ *
+ * plist
+ * pointer to list pointer; this will be set to NULL after the list has been
+ * deleted
+ */
+extern void list_delete_and_null(struct list **plist);
+
+/*
+ * Delete a list.
+ *
+ * If non-null, list->del is called with each data element.
+ *
+ * plist
+ * pointer to list pointer
+ */
+extern void list_delete_original(struct list *list);
#define list_delete(X) \
list_delete_original((X)) \
CPP_WARN("Please transition to using list_delete_and_null")
list_delete_original((X)) \
CPP_WARN("Please transition tousing list_delete_and_null")
-extern void list_delete_all_node(struct list *);
+/*
+ * Delete all nodes from a list without deleting the list itself.
+ *
+ * If non-null, list->del is called with each data element.
+ *
+ * list
+ * list to operate on
+ */
+extern void list_delete_all_node(struct list *list);
-/* For ospfd and ospf6d. */
-extern void list_delete_node(struct list *, struct listnode *);
+/*
+ * Delete a node from a list.
+ *
+ * list->del is not called with the data associated with the node.
+ *
+ * Runtime is O(1).
+ *
+ * list
+ * list to operate on
+ *
+ * node
+ * the node to delete
+ */
+extern void list_delete_node(struct list *list, struct listnode *node);
-/* For ospf_spf.c */
-extern void list_add_list(struct list *, struct list *);
+/*
+ * Append a list to an existing list.
+ *
+ * Runtime is O(N) where N = listcount(add).
+ *
+ * list
+ * list to append to
+ *
+ * add
+ * list to append
+ */
+extern void list_add_list(struct list *list, struct list *add);
/* List iteration macro.
* Usage: for (ALL_LIST_ELEMENTS (...) { ... }
DESC_ENTRY(ZEBRA_ADVERTISE_ALL_VNI),
DESC_ENTRY(ZEBRA_ADVERTISE_DEFAULT_GW),
DESC_ENTRY(ZEBRA_ADVERTISE_SUBNET),
+ DESC_ENTRY(ZEBRA_LOCAL_ES_ADD),
+ DESC_ENTRY(ZEBRA_LOCAL_ES_DEL),
DESC_ENTRY(ZEBRA_VNI_ADD),
DESC_ENTRY(ZEBRA_VNI_DEL),
DESC_ENTRY(ZEBRA_L3VNI_ADD),
static inline void *mt_checkalloc(struct memtype *mt, void *ptr, size_t size)
{
if (__builtin_expect(ptr == NULL, 0)) {
- memory_oom(size, mt->name);
+ if (size) {
+ /* malloc(0) is allowed to return NULL */
+ memory_oom(size, mt->name);
+ }
return NULL;
}
mt_count_alloc(mt, size);
struct pbr_filter {
uint32_t filter_bm; /* not encoded by zapi
*/
-#define PBR_FILTER_SRC_IP (1 << 0)
-#define PBR_FILTER_DST_IP (1 << 1)
-#define PBR_FILTER_SRC_PORT (1 << 2)
-#define PBR_FILTER_DST_PORT (1 << 3)
-#define PBR_FILTER_FWMARK (1 << 4)
+#define PBR_FILTER_SRC_IP (1 << 0)
+#define PBR_FILTER_DST_IP (1 << 1)
+#define PBR_FILTER_SRC_PORT (1 << 2)
+#define PBR_FILTER_DST_PORT (1 << 3)
+#define PBR_FILTER_FWMARK (1 << 4)
+#define PBR_FILTER_PROTO (1 << 5)
+#define PBR_FILTER_SRC_PORT_RANGE (1 << 6)
+#define PBR_FILTER_DST_PORT_RANGE (1 << 7)
/* Source and Destination IP address with masks. */
struct prefix src_ip;
#define MASKBIT(offset) ((0xff << (PNBBY - (offset))) & 0xff)
+void prefix_hexdump(const struct prefix *p)
+{
+ char buf[PREFIX_STRLEN];
+
+ zlog_debug("prefix: %s",
+ prefix2str(p, buf, sizeof(buf)));
+ zlog_hexdump(p, sizeof(struct prefix));
+}
+
int is_zero_mac(struct ethaddr *mac)
{
int i = 0;
static const char *prefixevpn_es2str(const struct prefix_evpn *p, char *str,
int size)
{
- snprintf(str, size, "Unsupported EVPN prefix");
+ char buf[ESI_STR_LEN];
+
+ snprintf(str, size, "[%d]:[%s]:[%s]/%d", p->prefix.route_type,
+ esi_to_str(&p->prefix.es_addr.esi, buf, sizeof(buf)),
+ inet_ntoa(p->prefix.es_addr.ip.ipaddr_v4),
+ p->prefixlen);
return str;
}
offsetof(struct prefix, u.prefix) + PSIZE(copy.prefixlen),
0x55aa5a5a);
}
+
+/* converts to internal representation of esi
+ * returns 1 on success, 0 otherwise
+ * format accepted: aa:aa:aa:aa:aa:aa:aa:aa:aa:aa
+ * if esi parameter is null, then check only
+ */
+int str_to_esi(const char *str, esi_t *esi)
+{
+ int i;
+ unsigned int a[ESI_BYTES];
+
+ if (!str)
+ return 0;
+
+ if (sscanf(str, "%2x:%2x:%2x:%2x:%2x:%2x:%2x:%2x:%2x:%2x",
+ a + 0, a + 1, a + 2, a + 3,
+ a + 4, a + 5, a + 6, a + 7,
+ a + 8, a + 9)
+ != ESI_BYTES) {
+ /* error in incoming str length */
+ return 0;
+ }
+
+ /* valid ESI */
+ if (!esi)
+ return 1;
+ for (i = 0; i < ESI_BYTES; ++i)
+ esi->val[i] = a[i] & 0xff;
+ return 1;
+}
+
+char *esi_to_str(const esi_t *esi, char *buf, int size)
+{
+ char *ptr;
+
+ if (!esi)
+ return NULL;
+ if (!buf)
+ ptr = (char *)XMALLOC(MTYPE_TMP,
+ ESI_STR_LEN * sizeof(char));
+ else {
+ assert(size >= ESI_STR_LEN);
+ ptr = buf;
+ }
+
+ snprintf(ptr, ESI_STR_LEN,
+ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
+ esi->val[0], esi->val[1], esi->val[2],
+ esi->val[3], esi->val[4], esi->val[5],
+ esi->val[6], esi->val[7], esi->val[8],
+ esi->val[9]);
+ return ptr;
+}
#define ETH_ALEN 6
#endif
+#define ESI_BYTES 10
+#define ESI_STR_LEN (3 * ESI_BYTES)
+
#define ETHER_ADDR_STRLEN (3*ETH_ALEN)
/*
* there isn't a portable ethernet address type. We define our
return IS_IPADDR_NONE(&(evp)->prefix.macip_addr.ip);
if (evp->prefix.route_type == 3)
return IS_IPADDR_NONE(&(evp)->prefix.imet_addr.ip);
+ if (evp->prefix.route_type == 4)
+ return IS_IPADDR_NONE(&(evp)->prefix.es_addr.ip);
if (evp->prefix.route_type == 5)
return IS_IPADDR_NONE(&(evp)->prefix.prefix_addr.ip);
return 0;
return IS_IPADDR_V4(&(evp)->prefix.macip_addr.ip);
if (evp->prefix.route_type == 3)
return IS_IPADDR_V4(&(evp)->prefix.imet_addr.ip);
+ if (evp->prefix.route_type == 4)
+ return IS_IPADDR_V4(&(evp)->prefix.es_addr.ip);
if (evp->prefix.route_type == 5)
return IS_IPADDR_V4(&(evp)->prefix.prefix_addr.ip);
return 0;
return IS_IPADDR_V6(&(evp)->prefix.macip_addr.ip);
if (evp->prefix.route_type == 3)
return IS_IPADDR_V6(&(evp)->prefix.imet_addr.ip);
+ if (evp->prefix.route_type == 4)
+ return IS_IPADDR_V6(&(evp)->prefix.es_addr.ip);
if (evp->prefix.route_type == 5)
return IS_IPADDR_V6(&(evp)->prefix.prefix_addr.ip);
return 0;
extern unsigned prefix_hash_key(void *pp);
+extern int str_to_esi(const char *str, esi_t *esi);
+extern char *esi_to_str(const esi_t *esi, char *buf, int size);
+extern void prefix_hexdump(const struct prefix *p);
+extern void prefix_evpn_hexdump(const struct prefix_evpn *p);
+
static inline int ipv6_martian(struct in6_addr *addr)
{
struct in6_addr localhost_addr;
return (p->prefixlen == IPV6_MAX_BITLEN);
return 0;
}
-
#endif /* _ZEBRA_PREFIX_H */
lib_LTLIBRARIES += lib/libfrrsnmp.la
endif
-lib_libfrrsnmp_la_CFLAGS = $(WERROR) $(SNMP_CFLAGS)
+lib_libfrrsnmp_la_CFLAGS = $(WERROR) $(SNMP_CFLAGS) -std=gnu99
lib_libfrrsnmp_la_LDFLAGS = -version-info 0:0:0
lib_libfrrsnmp_la_LIBADD = lib/libfrr.la $(SNMP_LIBS)
lib_libfrrsnmp_la_SOURCES = \
if (vrf_is_mapped_on_netns(vrf_id))
return fd;
#ifdef SO_BINDTODEVICE
- ret = setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, name, strlen(name));
+ ret = setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, name, strlen(name)+1);
if (ret < 0)
zlog_debug("bind to interface %s failed, errno=%d", name,
errno);
if (zclient->fec_update)
(*zclient->fec_update)(command, zclient, length);
break;
+ case ZEBRA_LOCAL_ES_ADD:
+ if (zclient->local_es_add)
+ (*zclient->local_es_add)(command, zclient, length,
+ vrf_id);
+ break;
+ case ZEBRA_LOCAL_ES_DEL:
+ if (zclient->local_es_del)
+ (*zclient->local_es_del)(command, zclient, length,
+ vrf_id);
+ break;
case ZEBRA_VNI_ADD:
if (zclient->local_vni_add)
(*zclient->local_vni_add)(command, zclient, length,
ZEBRA_ADVERTISE_DEFAULT_GW,
ZEBRA_ADVERTISE_SUBNET,
ZEBRA_ADVERTISE_ALL_VNI,
+ ZEBRA_LOCAL_ES_ADD,
+ ZEBRA_LOCAL_ES_DEL,
ZEBRA_VNI_ADD,
ZEBRA_VNI_DEL,
ZEBRA_L3VNI_ADD,
int (*redistribute_route_del)(int, struct zclient *, uint16_t,
vrf_id_t);
int (*fec_update)(int, struct zclient *, uint16_t);
+ int (*local_es_add)(int command, struct zclient *zclient,
+ uint16_t length, vrf_id_t vrf_id);
+ int (*local_es_del)(int command, struct zclient *zclient,
+ uint16_t length, vrf_id_t vrf_id);
int (*local_vni_add)(int, struct zclient *, uint16_t, vrf_id_t);
int (*local_vni_del)(int, struct zclient *, uint16_t, vrf_id_t);
int (*local_l3vni_add)(int, struct zclient *, uint16_t, vrf_id_t);
ZAPI_RULE_FAIL_INSTALL,
ZAPI_RULE_INSTALLED,
ZAPI_RULE_REMOVED,
+ ZAPI_RULE_FAIL_REMOVE,
};
enum ipset_type {
ZAPI_IPSET_FAIL_INSTALL,
ZAPI_IPSET_INSTALLED,
ZAPI_IPSET_REMOVED,
+ ZAPI_IPSET_FAIL_REMOVE,
};
enum zapi_ipset_entry_notify_owner {
ZAPI_IPSET_ENTRY_FAIL_INSTALL,
ZAPI_IPSET_ENTRY_INSTALLED,
ZAPI_IPSET_ENTRY_REMOVED,
+ ZAPI_IPSET_ENTRY_FAIL_REMOVE,
};
enum zapi_iptable_notify_owner {
ZAPI_IPTABLE_FAIL_INSTALL,
ZAPI_IPTABLE_INSTALLED,
ZAPI_IPTABLE_REMOVED,
+ ZAPI_IPTABLE_FAIL_REMOVE,
};
/* Zebra MAC types */
summary_table = area->summary_router;
} else {
if (IS_OSPF6_DEBUG_ABR
- || IS_OSPF6_DEBUG_ORIGINATE(INTER_PREFIX)) {
+ || IS_OSPF6_DEBUG_ORIGINATE(INTER_PREFIX))
is_debug++;
+
+ if (route->type == OSPF6_DEST_TYPE_NETWORK &&
+ route->path.origin.type ==
+ htons(OSPF6_LSTYPE_INTER_PREFIX)) {
+ if (!CHECK_FLAG(route->flag, OSPF6_ROUTE_BEST)) {
+ if (is_debug) {
+ inet_ntop(AF_INET,
+ &(ADV_ROUTER_IN_PREFIX(
+ &route->prefix)), buf,
+ sizeof(buf));
+ zlog_debug(
+ "%s: route %s with cost %u is not best, ignore."
+ , __PRETTY_FUNCTION__, buf,
+ route->path.cost);
+ }
+ return 0;
+ }
+ }
+
+ if (is_debug) {
prefix2str(&route->prefix, buf, sizeof(buf));
- zlog_debug("Originating summary in area %s for %s",
- area->name, buf);
+ zlog_debug("Originating summary in area %s for %s cost %u",
+ area->name, buf, route->path.cost);
}
summary_table = area->summary_prefix;
}
ospf6_route_delete(def);
}
+void ospf6_abr_old_path_update(struct ospf6_route *old_route,
+ struct ospf6_route *route,
+ struct ospf6_route_table *table)
+{
+ struct ospf6_path *o_path = NULL;
+ struct listnode *anode, *anext;
+ struct listnode *nnode, *rnode, *rnext;
+ struct ospf6_nexthop *nh, *rnh;
+
+ for (ALL_LIST_ELEMENTS(old_route->paths, anode, anext, o_path)) {
+ if (o_path->area_id != route->path.area_id ||
+ (memcmp(&(o_path)->origin, &(route)->path.origin,
+ sizeof(struct ospf6_ls_origin)) != 0))
+ continue;
+
+ if ((o_path->cost == route->path.cost) &&
+ (o_path->u.cost_e2 == route->path.u.cost_e2))
+ continue;
+
+ for (ALL_LIST_ELEMENTS_RO(o_path->nh_list, nnode, nh)) {
+ for (ALL_LIST_ELEMENTS(old_route->nh_list, rnode,
+ rnext, rnh)) {
+ if (!ospf6_nexthop_is_same(rnh, nh))
+ continue;
+ listnode_delete(old_route->nh_list, rnh);
+ ospf6_nexthop_delete(rnh);
+ }
+
+ }
+
+ listnode_delete(old_route->paths, o_path);
+ ospf6_path_free(o_path);
+
+ for (ALL_LIST_ELEMENTS(old_route->paths, anode,
+ anext, o_path)) {
+ ospf6_merge_nexthops(old_route->nh_list,
+ o_path->nh_list);
+ }
+
+ if (IS_OSPF6_DEBUG_ABR || IS_OSPF6_DEBUG_EXAMIN(INTER_PREFIX))
+ zlog_debug("%s: paths %u nh %u", __PRETTY_FUNCTION__,
+ old_route->paths ?
+ listcount(old_route->paths) : 0,
+ old_route->nh_list ?
+ listcount(old_route->nh_list) : 0);
+
+ if (table->hook_add)
+ (*table->hook_add)(old_route);
+
+ if (old_route->path.origin.id == route->path.origin.id &&
+ old_route->path.origin.adv_router ==
+ route->path.origin.adv_router) {
+ struct ospf6_path *h_path;
+
+ h_path = (struct ospf6_path *)
+ listgetdata(listhead(old_route->paths));
+ old_route->path.origin.type = h_path->origin.type;
+ old_route->path.origin.id = h_path->origin.id;
+ old_route->path.origin.adv_router =
+ h_path->origin.adv_router;
+ }
+ }
+}
+
+void ospf6_abr_old_route_remove(struct ospf6_lsa *lsa,
+ struct ospf6_route *old,
+ struct ospf6_route_table *table)
+{
+ if (listcount(old->paths) > 1) {
+ struct listnode *anode, *anext, *nnode, *rnode, *rnext;
+ struct ospf6_path *o_path;
+ struct ospf6_nexthop *nh, *rnh;
+ bool nh_updated = false;
+ char buf[PREFIX2STR_BUFFER];
+
+ for (ALL_LIST_ELEMENTS(old->paths, anode, anext, o_path)) {
+ if (o_path->origin.adv_router != lsa->header->adv_router
+ && o_path->origin.id != lsa->header->id)
+ continue;
+ for (ALL_LIST_ELEMENTS_RO(o_path->nh_list, nnode, nh)) {
+ for (ALL_LIST_ELEMENTS(old->nh_list,
+ rnode, rnext, rnh)) {
+ if (!ospf6_nexthop_is_same(rnh, nh))
+ continue;
+ listnode_delete(old->nh_list, rnh);
+ ospf6_nexthop_delete(rnh);
+ }
+ }
+ listnode_delete(old->paths, o_path);
+ ospf6_path_free(o_path);
+ nh_updated = true;
+ }
+
+ if (nh_updated) {
+ if (listcount(old->paths)) {
+ if (IS_OSPF6_DEBUG_ABR ||
+ IS_OSPF6_DEBUG_EXAMIN(INTER_PREFIX)) {
+ prefix2str(&old->prefix, buf,
+ sizeof(buf));
+ zlog_debug("%s: old %s updated nh %u",
+ __PRETTY_FUNCTION__, buf,
+ old->nh_list ?
+ listcount(old->nh_list) : 0);
+ }
+
+ if (table->hook_add)
+ (*table->hook_add)(old);
+
+ if ((old->path.origin.id == lsa->header->id) &&
+ (old->path.origin.adv_router
+ == lsa->header->adv_router)) {
+ struct ospf6_path *h_path;
+
+ h_path = (struct ospf6_path *)
+ listgetdata(
+ listhead(old->paths));
+ old->path.origin.type =
+ h_path->origin.type;
+ old->path.origin.id = h_path->origin.id;
+ old->path.origin.adv_router =
+ h_path->origin.adv_router;
+ }
+ } else
+ ospf6_route_remove(old, table);
+ }
+ } else
+ ospf6_route_remove(old, table);
+
+}
+
/* RFC 2328 16.2. Calculating the inter-area routes */
void ospf6_abr_examin_summary(struct ospf6_lsa *lsa, struct ospf6_area *oa)
{
struct ospf6_inter_prefix_lsa *prefix_lsa = NULL;
struct ospf6_inter_router_lsa *router_lsa = NULL;
bool old_entry_updated = false;
+ struct ospf6_path *path, *o_path, *ecmp_path;
+ struct listnode *anode;
+ char adv_router[16];
memset(&prefix, 0, sizeof(prefix));
while (route && ospf6_route_is_prefix(&prefix, route)) {
if (route->path.area_id == oa->area_id
&& route->path.origin.type == lsa->header->type
- && route->path.origin.id == lsa->header->id
- && route->path.origin.adv_router == lsa->header->adv_router
- && !CHECK_FLAG(route->flag, OSPF6_ROUTE_WAS_REMOVED))
- old = route;
+ && !CHECK_FLAG(route->flag, OSPF6_ROUTE_WAS_REMOVED)) {
+ /* LSA adv. router could be part of route's
+ * paths list. Find the existing path and set
+ * old as the route.
+ */
+ if (listcount(route->paths) > 1) {
+ struct listnode *anode;
+ struct ospf6_path *o_path;
+
+ for (ALL_LIST_ELEMENTS_RO(route->paths, anode,
+ o_path)) {
+ inet_ntop(AF_INET,
+ &o_path->origin.adv_router,
+ adv_router,
+ sizeof(adv_router));
+ if (o_path->origin.id == lsa->header->id
+ && o_path->origin.adv_router ==
+ lsa->header->adv_router) {
+ old = route;
+
+ if (is_debug)
+ zlog_debug("%s: old entry found in paths, adv_router %s",
+ __PRETTY_FUNCTION__,
+ adv_router);
+
+ break;
+ }
+ }
+ } else if (route->path.origin.id == lsa->header->id &&
+ route->path.origin.adv_router ==
+ lsa->header->adv_router)
+ old = route;
+ }
route = ospf6_route_next(route);
}
if (route)
if (is_debug)
zlog_debug("cost is LS_INFINITY, ignore");
if (old)
- ospf6_route_remove(old, table);
+ ospf6_abr_old_route_remove(lsa, old, table);
return;
}
if (OSPF6_LSA_IS_MAXAGE(lsa)) {
zlog_debug("%s: LSA %s is MaxAge, ignore",
__PRETTY_FUNCTION__, lsa->name);
if (old)
- ospf6_route_remove(old, table);
+ ospf6_abr_old_route_remove(lsa, old, table);
return;
}
/* (2) if the LSA is self-originated, ignore */
if (lsa->header->adv_router == oa->ospf6->router_id) {
if (is_debug)
- zlog_debug("LSA is self-originated, ignore");
+ zlog_debug("LSA %s is self-originated, ignore",
+ lsa->name);
if (old)
ospf6_route_remove(old, table);
return;
}
/* Check input prefix-list */
- if (PREFIX_LIST_IN(oa))
+ if (PREFIX_LIST_IN(oa)) {
if (prefix_list_apply(PREFIX_LIST_IN(oa), &prefix)
!= PREFIX_PERMIT) {
if (is_debug)
ospf6_route_remove(old, table);
return;
}
+ }
/* (5),(6): the path preference is handled by the sorting
in the routing table. Always install the path by substituting
old route (if any). */
- if (old)
- route = ospf6_route_copy(old);
- else
- route = ospf6_route_create();
+ route = ospf6_route_create();
route->type = type;
route->prefix = prefix;
route->path.type = OSPF6_PATH_TYPE_INTER;
route->path.cost = abr_entry->path.cost + cost;
- /* Inter abr_entry is same as brouter.
- * Avoid duplicate nexthops to brouter and its
- * learnt route. i.e. use merge nexthops.
- */
- ospf6_route_merge_nexthops(route, abr_entry);
+ /* copy brouter rechable nexthops into the route. */
+ ospf6_route_copy_nexthops(route, abr_entry);
/* (7) If the routes are identical, copy the next hops over to existing
route. ospf6's route table implementation will otherwise string both
old_route->path.cost,
route->path.cost);
}
+
+ /* Check new route's adv. router is same in one of
+ * the paths with differed cost, if so remove the
+ * old path as later new route will be added.
+ */
+ if (listcount(old_route->paths) > 1)
+ ospf6_abr_old_path_update(old_route, route,
+ table);
continue;
}
+ ospf6_route_merge_nexthops(old_route, route);
old_entry_updated = true;
- ospf6_route_merge_nexthops(old, route);
+
+ for (ALL_LIST_ELEMENTS_RO(old_route->paths, anode,
+ o_path)) {
+ if (o_path->area_id == route->path.area_id &&
+ (memcmp(&(o_path)->origin, &(route)->path.origin,
+ sizeof(struct ospf6_ls_origin)) == 0))
+ break;
+ }
+
+ /* New adv. router for a existing path add to paths list */
+ if (o_path == NULL) {
+ ecmp_path = ospf6_path_dup(&route->path);
+
+ /* Add a nh_list to new ecmp path */
+ ospf6_copy_nexthops(ecmp_path->nh_list, route->nh_list);
+
+ /* Add the new path to route's path list */
+ listnode_add_sort(old_route->paths, ecmp_path);
+
+ if (is_debug) {
+ prefix2str(&route->prefix, buf, sizeof(buf));
+ inet_ntop(AF_INET,
+ &ecmp_path->origin.adv_router,
+ adv_router, sizeof(adv_router));
+ zlog_debug("%s: route %s cost %u another path %s added with nh %u, effective paths %u nh %u",
+ __PRETTY_FUNCTION__, buf,
+ old_route->path.cost,
+ adv_router,
+ listcount(ecmp_path->nh_list),
+ old_route->paths ?
+ listcount(old_route->paths) : 0,
+ listcount(old_route->nh_list));
+ }
+ } else {
+ /* adv. router exists in the list, update the nhs */
+ list_delete_all_node(o_path->nh_list);
+ ospf6_copy_nexthops(o_path->nh_list, route->nh_list);
+ }
+
if (is_debug)
- zlog_debug("%s: Update route: %s old cost %u new cost %u nh %u",
- __PRETTY_FUNCTION__,
- buf, old->path.cost, route->path.cost,
+ zlog_debug("%s: Update route: %s %p old cost %u new cost %u nh %u",
+ __PRETTY_FUNCTION__, buf, (void *)old_route,
+ old_route->path.cost, route->path.cost,
listcount(route->nh_list));
- /* Update RIB/FIB */
+ /* For Inter-Prefix route: Update RIB/FIB,
+ * For Inter-Router trigger summary update
+ */
if (table->hook_add)
- (*table->hook_add)(old);
+ (*table->hook_add)(old_route);
/* Delete new route */
ospf6_route_delete(route);
}
if (old_entry_updated == false) {
- if (is_debug)
- zlog_debug("%s: Install route: %s cost %u nh %u",
+ if (is_debug) {
+ inet_ntop(AF_INET, &route->path.origin.adv_router,
+ adv_router, sizeof(adv_router));
+ zlog_debug("%s: Install route: %s cost %u nh %u adv_router %s ",
__PRETTY_FUNCTION__, buf, route->path.cost,
- listcount(route->nh_list));
+ listcount(route->nh_list), adv_router);
+ }
+
+ path = ospf6_path_dup(&route->path);
+ ospf6_copy_nexthops(path->nh_list, abr_entry->nh_list);
+ listnode_add_sort(route->paths, path);
+
/* ospf6_ia_add_nw_route (table, &prefix, route); */
ospf6_route_add(route, table);
}
extern int config_write_ospf6_debug_abr(struct vty *vty);
extern void install_element_ospf6_debug_abr(void);
extern int ospf6_abr_config_write(struct vty *vty);
-
+extern void ospf6_abr_old_route_remove(struct ospf6_lsa *lsa,
+ struct ospf6_route *old,
+ struct ospf6_route_table *table);
+extern void ospf6_abr_old_path_update(struct ospf6_route *old_route,
+ struct ospf6_route *route,
+ struct ospf6_route_table *table);
extern void ospf6_abr_init(void);
#endif /*OSPF6_ABR_H*/
old_route->path.origin.adv_router =
h_path->origin.adv_router;
}
- break;
} else {
if (IS_OSPF6_DEBUG_EXAMIN(AS_EXTERNAL)) {
prefix2str(&old_route->prefix, buf,
}
ospf6_route_remove(old_route,
ospf6->route_table);
- break;
}
}
if (route_updated)
# end
ospf6d_ospf6d_snmp_la_SOURCES = ospf6d/ospf6_snmp.c
-ospf6d_ospf6d_snmp_la_CFLAGS = $(WERROR) $(SNMP_CFLAGS)
+ospf6d_ospf6d_snmp_la_CFLAGS = $(WERROR) $(SNMP_CFLAGS) -std=gnu99
ospf6d_ospf6d_snmp_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
ospf6d_ospf6d_snmp_la_LIBADD = lib/libfrrsnmp.la
/* Do some internal house keeping that is needed here */
SET_FLAG(new->flags, OSPF_LSA_RECEIVED);
- ospf_lsa_is_self_originated(ospf, new); /* Let it set the flag */
+ (void)ospf_lsa_is_self_originated(ospf, new); /* Let it set the flag */
/* Install the new LSA in the link state database
(replacing the current database copy). This may cause the
zlog_warn("ospf_mpls_te_lsa_refresh: Invalid parameter?");
lsa->data->ls_age =
htons(OSPF_LSA_MAXAGE); /* Flush it anyway. */
+ ospf_opaque_lsa_flush_schedule(lsa);
+ return NULL;
}
/* Check if lp was not disable in the interval */
/* If the lsa's age reached to MaxAge, start flushing procedure. */
if (IS_LSA_MAXAGE(lsa)) {
- if (lp)
- UNSET_FLAG(lp->flags, LPFLG_LSA_ENGAGED);
+ UNSET_FLAG(lp->flags, LPFLG_LSA_ENGAGED);
ospf_opaque_lsa_flush_schedule(lsa);
return NULL;
}
ospfd_ospfd_SOURCES = ospfd/ospf_main.c
ospfd_ospfd_snmp_la_SOURCES = ospfd/ospf_snmp.c
-ospfd_ospfd_snmp_la_CFLAGS = $(WERROR) $(SNMP_CFLAGS)
+ospfd_ospfd_snmp_la_CFLAGS = $(WERROR) $(SNMP_CFLAGS) -std=gnu99
ospfd_ospfd_snmp_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
ospfd_ospfd_snmp_la_LIBADD = lib/libfrrsnmp.la
DEBUGD(&pbr_dbg_zebra, "%s: Recived RULE_INSTALLED",
__PRETTY_FUNCTION__);
break;
+ case ZAPI_RULE_FAIL_REMOVE:
case ZAPI_RULE_REMOVED:
pbrms->installed &= ~installed;
DEBUGD(&pbr_dbg_zebra, "%s: Received RULE REMOVED",
/* insert into misc tables for easy access */
sa = hash_get(pim->msdp.sa_hash, sa, hash_alloc_intern);
- if (!sa) {
- zlog_err("%s: PIM hash get failure", __PRETTY_FUNCTION__);
- pim_msdp_sa_free(sa);
- return NULL;
- }
listnode_add_sort(pim->msdp.sa_list, sa);
if (PIM_DEBUG_MSDP_EVENTS) {
nexthop_tab[i].route_metric,
nexthop_tab[i].protocol_distance);
}
- /* update nextop data */
+ /* update nexthop data */
nexthop->interface = ifp;
nexthop->mrib_nexthop_addr =
nexthop_tab[i].nexthop_addr;
"%s: NHT Local Nexthop not found for RP %s ",
__PRETTY_FUNCTION__, buf);
}
- if (!pim_nexthop_lookup(
+ if (pim_nexthop_lookup(
pim, &rp_info->rp.source_nexthop,
- rp_info->rp.rpf_addr.u.prefix4, 1))
+ rp_info->rp.rpf_addr.u.prefix4, 1) < 0)
if (PIM_DEBUG_PIM_NHT_RP)
zlog_debug(
"Unable to lookup nexthop for rp specified");
pim_addr_dump("<nexthop?>", &nexthop->mrib_nexthop_addr,
nexthop_str, sizeof(nexthop_str));
zlog_debug(
- "%s: Using last lookup for %s at %lld, %" PRId64 " addr%s",
+ "%s: Using last lookup for %s at %lld, %" PRId64 " addr %s",
__PRETTY_FUNCTION__, addr_str,
nexthop->last_lookup_time,
pim->last_route_change_time, nexthop_str);
nexthop_tab[i].route_metric,
nexthop_tab[i].protocol_distance);
}
- /* update nextop data */
+ /* update nexthop data */
nexthop->interface = ifp;
nexthop->mrib_nexthop_addr = nexthop_tab[i].nexthop_addr;
nexthop->mrib_metric_preference =
# When using "vtysh" such a config file is also needed. It should be owned by
# group "frrvty" and set to ug=rw,o= though. Check /etc/pam.d/frr, too.
#
-watchfrr_enable=no
+watchfrr_enable=yes
watchfrr_options=("-b_" "-r/usr/lib/frr/frr_restart_%s" "-s/usr/lib/frr/frr_start_%s" "-k/usr/lib/frr/frr_stop_%s")
#
zebra=no
%if "%{initsystem}" == "systemd"
--enable-systemd=yes \
%endif
- --enable-poll=yes
%if %{with_rpki}
- --enable-rpki
+ --enable-rpki \
%endif
+ --enable-poll=yes
make %{?_smp_mflags} MAKEINFO="makeinfo --no-split" SPHINXBUILD=%{sphinx}
# Config files won't get replaced by default, so we do this ugly hack to fix it
%__sed -i 's|/etc/init.d/|%{_sbindir}/|g' %{_sysconfdir}/daemons 2> /dev/null || true
+# With systemd, watchfrr is mandatory. Fix config to make sure it's enabled if
+# we install or upgrade to a frr built with systemd
+%if "%{initsystem}" == "systemd"
+ %__sed -i 's|watchfrr_enable=no|watchfrr_enable=yes|g' %{_sysconfdir}/daemons 2> /dev/null || true
+%endif
+
/sbin/install-info %{_infodir}/frr.info.gz %{_infodir}/dir
# Create dummy files if they don't exist so basic functions can be used.
%if %{with_fpm}
%attr(755,root,root) %{_libdir}/frr/modules/zebra_fpm.so
%endif
+%if %{with_rpki}
+%attr(755,root,root) %{_libdir}/frr/modules/bgpd_rpki.so
+%endif
%attr(755,root,root) %{_libdir}/frr/modules/zebra_irdp.so
%{_bindir}/*
%config(noreplace) /etc/frr/[!v]*.conf*
%endif
%changelog
-* Sun Mar 4 2018 Martin Winter <mwinter@opensourcerouting.org> - %{version}
+* Sun May 20 2018 Martin Winter <mwinter@opensourcerouting.org> - %{version}
+- Fixed RPKI RPM build
+
+* Sun Mar 4 2018 Martin Winter <mwinter@opensourcerouting.org>
- Add option to build with RPKI (default: disabled)
* Tue Feb 20 2018 Martin Winter <mwinter@opensourcerouting.org>
}
packet->command = RIP_RESPONSE;
- rip_send_packet((uint8_t *)packet, size, from, ifc);
+ (void)rip_send_packet((uint8_t *)packet, size, from, ifc);
}
rip_global_queries++;
}
# end
ripd_ripd_snmp_la_SOURCES = ripd/rip_snmp.c
-ripd_ripd_snmp_la_CFLAGS = $(WERROR) $(SNMP_CFLAGS)
+ripd_ripd_snmp_la_CFLAGS = $(WERROR) $(SNMP_CFLAGS) -std=gnu99
ripd_ripd_snmp_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
ripd_ripd_snmp_la_LIBADD = lib/libfrrsnmp.la
.arch-inventory
.arch-ids
__pycache__
+.pytest_cache
/bgpd/test_aspath
/bgpd/test_capability
/bgpd/test_ecommunity
/bgpd/test_mp_attr
/bgpd/test_mpath
/bgpd/test_packet
+/bgpd/test_peer_attr
/isisd/test_fuzz_isis_tlv
/isisd/test_fuzz_isis_tlv_tests.h
/isisd/test_isis_vertex_queue
bgpd/test_aspath \
bgpd/test_capability \
bgpd/test_packet \
+ bgpd/test_peer_attr \
bgpd/test_ecommunity \
bgpd/test_mp_attr \
bgpd/test_mpath
bgpd_test_aspath_SOURCES = bgpd/test_aspath.c
bgpd_test_capability_SOURCES = bgpd/test_capability.c
bgpd_test_packet_SOURCES = bgpd/test_packet.c
+bgpd_test_peer_attr_SOURCES = bgpd/test_peer_attr.c
bgpd_test_ecommunity_SOURCES = bgpd/test_ecommunity.c
bgpd_test_mp_attr_SOURCES = bgpd/test_mp_attr.c
bgpd_test_mpath_SOURCES = bgpd/test_mpath.c
bgpd_test_aspath_LDADD = $(BGP_TEST_LDADD)
bgpd_test_capability_LDADD = $(BGP_TEST_LDADD)
bgpd_test_packet_LDADD = $(BGP_TEST_LDADD)
+bgpd_test_peer_attr_LDADD = $(BGP_TEST_LDADD)
bgpd_test_ecommunity_LDADD = $(BGP_TEST_LDADD)
bgpd_test_mp_attr_LDADD = $(BGP_TEST_LDADD)
bgpd_test_mpath_LDADD = $(BGP_TEST_LDADD)
bgpd/test_ecommunity.py \
bgpd/test_mp_attr.py \
bgpd/test_mpath.py \
+ bgpd/test_peer_attr.py \
helpers/python/frrsix.py \
helpers/python/frrtest.py \
isisd/test_fuzz_isis_tlv.py \
#include "bgpd/bgp_attr.h"
#include "bgpd/bgp_nexthop.h"
#include "bgpd/bgp_mpath.h"
+#include "bgpd/bgp_evpn.h"
#define VT100_RESET "\x1b[0m"
#define VT100_RED "\x1b[31m"
bgp->group = list_new();
// bgp->group->cmp = (int (*)(void *, void *)) peer_group_cmp;
+ bgp_evpn_init(bgp);
for (afi = AFI_IP; afi < AFI_MAX; afi++)
for (safi = SAFI_UNICAST; safi < SAFI_MAX; safi++) {
bgp->route[afi][safi] = bgp_table_init(bgp, afi, safi);
--- /dev/null
+/*
+ * BGP Peer Attribute Unit Tests
+ * Copyright (C) 2018 Pascal Mathis
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <zebra.h>
+
+#include "memory.h"
+#include "plist.h"
+#include "bgpd/bgpd.h"
+#include "bgpd/bgp_attr.h"
+#include "bgpd/bgp_regex.h"
+#include "bgpd/bgp_clist.h"
+#include "bgpd/bgp_dump.h"
+#include "bgpd/bgp_filter.h"
+#include "bgpd/bgp_route.h"
+#include "bgpd/bgp_vty.h"
+#include "bgpd/bgp_zebra.h"
+
+#ifdef ENABLE_BGP_VNC
+#include "bgpd/rfapi/rfapi_backend.h"
+#endif
+
+/* Required variables to link in libbgp */
+struct zebra_privs_t bgpd_privs = {0};
+struct thread_master *master;
+
+enum test_state {
+ TEST_SUCCESS,
+ TEST_COMMAND_ERROR,
+ TEST_CONFIG_ERROR,
+ TEST_ASSERT_ERROR,
+ TEST_INTERNAL_ERROR,
+};
+
+struct test {
+ enum test_state state;
+ char *desc;
+ char *error;
+ struct list *log;
+
+ struct vty *vty;
+ struct bgp *bgp;
+ struct peer *peer;
+ struct peer_group *group;
+};
+
+struct test_config {
+ int local_asn;
+ int peer_asn;
+ const char *peer_address;
+ const char *peer_group;
+};
+
+struct test_peer_family {
+ afi_t afi;
+ safi_t safi;
+};
+
+struct test_peer_attr {
+ const char *cmd;
+ const char *peer_cmd;
+ const char *group_cmd;
+
+ enum { PEER_AT_AF_FLAG = 0,
+ PEER_AT_AF_FILTER = 1,
+ PEER_AT_GLOBAL_FLAG = 2 } type;
+ union {
+ uint32_t flag;
+ struct {
+ uint32_t flag;
+ size_t direct;
+ } filter;
+ } u;
+ struct {
+ bool invert;
+ bool use_ibgp;
+ } o;
+
+ afi_t afi;
+ safi_t safi;
+ struct test_peer_family families[AFI_MAX * SAFI_MAX];
+};
+
+#define OUT_SYMBOL_INFO "\u25ba"
+#define OUT_SYMBOL_OK "\u2714"
+#define OUT_SYMBOL_NOK "\u2716"
+
+#define TEST_ASSERT_EQ(T, A, B) \
+ do { \
+ if ((T)->state != TEST_SUCCESS || ((A) == (B))) \
+ break; \
+ (T)->state = TEST_ASSERT_ERROR; \
+ (T)->error = str_printf( \
+ "assertion failed: %s[%d] == [%d]%s (%s:%d)", (#A), \
+ (A), (B), (#B), __FILE__, __LINE__); \
+ } while (0)
+
+static struct test_config cfg = {
+ .local_asn = 100,
+ .peer_asn = 200,
+ .peer_address = "1.1.1.1",
+ .peer_group = "PG-TEST",
+};
+
+static struct test_peer_family test_default_families[] = {
+ {.afi = AFI_IP, .safi = SAFI_UNICAST},
+ {.afi = AFI_IP, .safi = SAFI_MULTICAST},
+ {.afi = AFI_IP6, .safi = SAFI_UNICAST},
+ {.afi = AFI_IP6, .safi = SAFI_MULTICAST},
+};
+
+/* clang-format off */
+static struct test_peer_attr test_peer_attrs[] = {
+ {
+ .cmd = "addpath-tx-all-paths",
+ .u.flag = PEER_FLAG_ADDPATH_TX_ALL_PATHS,
+ },
+ {
+ .cmd = "addpath-tx-bestpath-per-AS",
+ .u.flag = PEER_FLAG_ADDPATH_TX_BESTPATH_PER_AS,
+ },
+ {
+ .cmd = "allowas-in",
+ .peer_cmd = "allowas-in 1",
+ .group_cmd = "allowas-in 2",
+ .u.flag = PEER_FLAG_ALLOWAS_IN,
+ },
+ {
+ .cmd = "allowas-in origin",
+ .u.flag = PEER_FLAG_ALLOWAS_IN_ORIGIN,
+ },
+ {
+ .cmd = "as-override",
+ .u.flag = PEER_FLAG_AS_OVERRIDE,
+ },
+ {
+ .cmd = "attribute-unchanged as-path",
+ .u.flag = PEER_FLAG_AS_PATH_UNCHANGED,
+ },
+ {
+ .cmd = "attribute-unchanged next-hop",
+ .u.flag = PEER_FLAG_NEXTHOP_UNCHANGED,
+ },
+ {
+ .cmd = "attribute-unchanged med",
+ .u.flag = PEER_FLAG_MED_UNCHANGED,
+ },
+ {
+ .cmd = "attribute-unchanged as-path next-hop",
+ .u.flag = PEER_FLAG_AS_PATH_UNCHANGED
+ | PEER_FLAG_NEXTHOP_UNCHANGED,
+ },
+ {
+ .cmd = "attribute-unchanged as-path med",
+ .u.flag = PEER_FLAG_AS_PATH_UNCHANGED
+ | PEER_FLAG_MED_UNCHANGED,
+ },
+ {
+ .cmd = "attribute-unchanged as-path next-hop med",
+ .u.flag = PEER_FLAG_AS_PATH_UNCHANGED
+ | PEER_FLAG_NEXTHOP_UNCHANGED
+ | PEER_FLAG_MED_UNCHANGED,
+ },
+ {
+ .cmd = "capability orf prefix-list send",
+ .u.flag = PEER_FLAG_ORF_PREFIX_SM,
+ },
+ {
+ .cmd = "capability orf prefix-list receive",
+ .u.flag = PEER_FLAG_ORF_PREFIX_RM,
+ },
+ {
+ .cmd = "capability orf prefix-list both",
+ .u.flag = PEER_FLAG_ORF_PREFIX_SM | PEER_FLAG_ORF_PREFIX_RM,
+ },
+ {
+ .cmd = "default-originate",
+ .u.flag = PEER_FLAG_DEFAULT_ORIGINATE,
+ },
+ {
+ .cmd = "default-originate route-map",
+ .peer_cmd = "default-originate route-map RM-PEER",
+ .group_cmd = "default-originate route-map RM-GROUP",
+ .u.flag = PEER_FLAG_DEFAULT_ORIGINATE,
+ },
+ {
+ .cmd = "filter-list",
+ .peer_cmd = "filter-list FL-PEER in",
+ .group_cmd = "filter-list FL-GROUP in",
+ .type = PEER_AT_AF_FILTER,
+ .u.filter.flag = PEER_FT_FILTER_LIST,
+ .u.filter.direct = FILTER_IN,
+ },
+ {
+ .cmd = "filter-list",
+ .peer_cmd = "filter-list FL-PEER out",
+ .group_cmd = "filter-list FL-GROUP out",
+ .type = PEER_AT_AF_FILTER,
+ .u.filter.flag = PEER_FT_FILTER_LIST,
+ .u.filter.direct = FILTER_OUT,
+ },
+ {
+ .cmd = "maximum-prefix",
+ .peer_cmd = "maximum-prefix 10",
+ .group_cmd = "maximum-prefix 20",
+ .u.flag = PEER_FLAG_MAX_PREFIX,
+ },
+ {
+ .cmd = "maximum-prefix",
+ .peer_cmd = "maximum-prefix 10 restart 100",
+ .group_cmd = "maximum-prefix 20 restart 200",
+ .u.flag = PEER_FLAG_MAX_PREFIX,
+ },
+ {
+ .cmd = "maximum-prefix",
+ .peer_cmd = "maximum-prefix 10 1 restart 100",
+ .group_cmd = "maximum-prefix 20 2 restart 200",
+ .u.flag = PEER_FLAG_MAX_PREFIX,
+ },
+ {
+ .cmd = "maximum-prefix",
+ .peer_cmd = "maximum-prefix 10 warning-only",
+ .group_cmd = "maximum-prefix 20 warning-only",
+ .u.flag = PEER_FLAG_MAX_PREFIX | PEER_FLAG_MAX_PREFIX_WARNING,
+ },
+ {
+ .cmd = "maximum-prefix",
+ .peer_cmd = "maximum-prefix 10 1 warning-only",
+ .group_cmd = "maximum-prefix 20 2 warning-only",
+ .u.flag = PEER_FLAG_MAX_PREFIX | PEER_FLAG_MAX_PREFIX_WARNING,
+ },
+ {
+ .cmd = "next-hop-self",
+ .u.flag = PEER_FLAG_NEXTHOP_SELF,
+ },
+ {
+ .cmd = "next-hop-self force",
+ .u.flag = PEER_FLAG_FORCE_NEXTHOP_SELF,
+ },
+ {
+ .cmd = "prefix-list",
+ .peer_cmd = "prefix-list PL-PEER in",
+ .group_cmd = "prefix-list PL-GROUP in",
+ .type = PEER_AT_AF_FILTER,
+ .u.filter.flag = PEER_FT_PREFIX_LIST,
+ .u.filter.direct = FILTER_IN,
+ },
+ {
+ .cmd = "prefix-list",
+ .peer_cmd = "prefix-list PL-PEER out",
+ .group_cmd = "prefix-list PL-GROUP out",
+ .type = PEER_AT_AF_FILTER,
+ .u.filter.flag = PEER_FT_PREFIX_LIST,
+ .u.filter.direct = FILTER_OUT,
+ },
+ {
+ .cmd = "remove-private-AS",
+ .u.flag = PEER_FLAG_REMOVE_PRIVATE_AS,
+ },
+ {
+ .cmd = "remove-private-AS all",
+ .u.flag = PEER_FLAG_REMOVE_PRIVATE_AS
+ | PEER_FLAG_REMOVE_PRIVATE_AS_ALL,
+ },
+ {
+ .cmd = "remove-private-AS replace-AS",
+ .u.flag = PEER_FLAG_REMOVE_PRIVATE_AS
+ | PEER_FLAG_REMOVE_PRIVATE_AS_REPLACE,
+ },
+ {
+ .cmd = "remove-private-AS all replace-AS",
+ .u.flag = PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE,
+ },
+ {
+ .cmd = "route-map",
+ .peer_cmd = "route-map RM-PEER in",
+ .group_cmd = "route-map RM-GROUP in",
+ .type = PEER_AT_AF_FILTER,
+ .u.filter.flag = PEER_FT_ROUTE_MAP,
+ .u.filter.direct = FILTER_IN,
+ },
+ {
+ .cmd = "route-map",
+ .peer_cmd = "route-map RM-PEER out",
+ .group_cmd = "route-map RM-GROUP out",
+ .type = PEER_AT_AF_FILTER,
+ .u.filter.flag = PEER_FT_ROUTE_MAP,
+ .u.filter.direct = FILTER_OUT,
+ },
+ {
+ .cmd = "route-reflector-client",
+ .u.flag = PEER_FLAG_REFLECTOR_CLIENT,
+ .o.use_ibgp = true,
+ },
+ {
+ .cmd = "route-server-client",
+ .u.flag = PEER_FLAG_RSERVER_CLIENT,
+ },
+ {
+ .cmd = "send-community",
+ .u.flag = PEER_FLAG_SEND_COMMUNITY,
+ .o.invert = true,
+ },
+ {
+ .cmd = "send-community extended",
+ .u.flag = PEER_FLAG_SEND_EXT_COMMUNITY,
+ .o.invert = true,
+ },
+ {
+ .cmd = "send-community large",
+ .u.flag = PEER_FLAG_SEND_LARGE_COMMUNITY,
+ .o.invert = true,
+ },
+ {
+ .cmd = "soft-reconfiguration inbound",
+ .u.flag = PEER_FLAG_SOFT_RECONFIG,
+ },
+ {
+ .cmd = "unsuppress-map",
+ .peer_cmd = "unsuppress-map UM-PEER",
+ .group_cmd = "unsuppress-map UM-GROUP",
+ .type = PEER_AT_AF_FILTER,
+ .u.filter.flag = PEER_FT_UNSUPPRESS_MAP,
+ .u.filter.direct = 0,
+ },
+ {
+ .cmd = "weight",
+ .peer_cmd = "weight 100",
+ .group_cmd = "weight 200",
+ .u.flag = PEER_FLAG_WEIGHT,
+ },
+ {NULL}
+};
+/* clang-format on */
+
+static char *str_vprintf(const char *fmt, va_list ap)
+{
+ int ret;
+ int buf_size = 0;
+ char *buf = NULL;
+ va_list apc;
+
+ while (1) {
+ va_copy(apc, ap);
+ ret = vsnprintf(buf, buf_size, fmt, apc);
+ va_end(apc);
+
+ if (ret >= 0 && ret < buf_size)
+ break;
+
+ if (ret >= 0)
+ buf_size = ret + 1;
+ else
+ buf_size *= 2;
+
+ buf = XREALLOC(MTYPE_TMP, buf, buf_size);
+ }
+
+ return buf;
+}
+
+static char *str_printf(const char *fmt, ...)
+{
+ char *buf;
+ va_list ap;
+
+ va_start(ap, fmt);
+ buf = str_vprintf(fmt, ap);
+ va_end(ap);
+
+ return buf;
+}
+
+static const char *str_from_afi(afi_t afi)
+{
+ switch (afi) {
+ case AFI_IP:
+ return "ipv4";
+ case AFI_IP6:
+ return "ipv6";
+ default:
+ return "<unknown AFI>";
+ }
+}
+
+static const char *str_from_safi(safi_t safi)
+{
+ switch (safi) {
+ case SAFI_UNICAST:
+ return "unicast";
+ case SAFI_MULTICAST:
+ return "multicast";
+ default:
+ return "<unknown SAFI>";
+ }
+}
+
+static void test_execute(struct test *test, const char *fmt, ...)
+{
+ int ret;
+ char *cmd;
+ va_list ap;
+ vector vline;
+
+ /* Skip execution if test instance has previously failed. */
+ if (test->state != TEST_SUCCESS)
+ return;
+
+ /* Format command string with variadic arguments. */
+ va_start(ap, fmt);
+ cmd = str_vprintf(fmt, ap);
+ va_end(ap);
+ if (!cmd) {
+ test->state = TEST_INTERNAL_ERROR;
+ test->error =
+ str_printf("could not format command string [%s]", fmt);
+ return;
+ }
+
+ /* Tokenize formatted command string. */
+ vline = cmd_make_strvec(cmd);
+ if (vline == NULL) {
+ test->state = TEST_INTERNAL_ERROR;
+ test->error = str_printf(
+ "tokenizing command string [%s] returned empty result",
+ cmd);
+ XFREE(MTYPE_TMP, cmd);
+
+ return;
+ }
+
+ /* Execute command (non-strict). */
+ ret = cmd_execute_command(vline, test->vty, NULL, 0);
+ if (ret != CMD_SUCCESS) {
+ test->state = TEST_COMMAND_ERROR;
+ test->error = str_printf(
+ "execution of command [%s] has failed with code [%d]",
+ cmd, ret);
+ }
+
+ /* Free memory. */
+ cmd_free_strvec(vline);
+ XFREE(MTYPE_TMP, cmd);
+}
+
+static void test_config(struct test *test, const char *fmt, bool invert,
+ va_list ap)
+{
+ char *matcher;
+ char *config;
+ bool matched;
+ va_list apc;
+
+ /* Skip execution if test instance has previously failed. */
+ if (test->state != TEST_SUCCESS)
+ return;
+
+ /* Format matcher string with variadic arguments. */
+ va_copy(apc, ap);
+ matcher = str_vprintf(fmt, apc);
+ va_end(apc);
+ if (!matcher) {
+ test->state = TEST_INTERNAL_ERROR;
+ test->error =
+ str_printf("could not format matcher string [%s]", fmt);
+ return;
+ }
+
+ /* Fetch BGP configuration into buffer. */
+ bgp_config_write(test->vty);
+ config = buffer_getstr(test->vty->obuf);
+ buffer_reset(test->vty->obuf);
+
+ /* Match config against matcher. */
+ matched = !!strstr(config, matcher);
+ if (!matched && !invert) {
+ test->state = TEST_CONFIG_ERROR;
+ test->error = str_printf("expected config [%s] to be present",
+ matcher);
+ } else if (matched && invert) {
+ test->state = TEST_CONFIG_ERROR;
+ test->error = str_printf("expected config [%s] to be absent",
+ matcher);
+ }
+
+ /* Free memory and return. */
+ XFREE(MTYPE_TMP, matcher);
+ XFREE(MTYPE_TMP, config);
+}
+
+static void test_config_present(struct test *test, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ test_config(test, fmt, false, ap);
+ va_end(ap);
+}
+
+static void test_config_absent(struct test *test, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ test_config(test, fmt, true, ap);
+ va_end(ap);
+}
+
+static struct test *test_new(const char *desc, bool use_ibgp)
+{
+ struct test *test;
+ union sockunion su;
+
+ test = XCALLOC(MTYPE_TMP, sizeof(struct test));
+ test->state = TEST_SUCCESS;
+ test->desc = XSTRDUP(MTYPE_TMP, desc);
+ test->log = list_new();
+
+ test->vty = vty_new();
+ test->vty->type = VTY_TERM;
+ test->vty->node = CONFIG_NODE;
+
+ /* Attempt gracefully to purge previous BGP configuration. */
+ test_execute(test, "no router bgp");
+ test->state = TEST_SUCCESS;
+
+ /* Initialize BGP test environment. */
+ test_execute(test, "router bgp %d", cfg.local_asn);
+ test_execute(test, "no bgp default ipv4-unicast");
+ test_execute(test, "neighbor %s peer-group", cfg.peer_group);
+ test_execute(test, "neighbor %s remote-as %d", cfg.peer_address,
+ use_ibgp ? cfg.local_asn : cfg.peer_asn);
+ if (test->state != TEST_SUCCESS)
+ return test;
+
+ /* Fetch default BGP instance. */
+ test->bgp = bgp_get_default();
+ if (!test->bgp) {
+ test->state = TEST_INTERNAL_ERROR;
+ test->error =
+ str_printf("could not retrieve default bgp instance");
+ return test;
+ }
+
+ /* Fetch peer instance. */
+ str2sockunion(cfg.peer_address, &su);
+ test->peer = peer_lookup(test->bgp, &su);
+ if (!test->peer) {
+ test->state = TEST_INTERNAL_ERROR;
+ test->error = str_printf(
+ "could not retrieve instance of bgp peer [%s]",
+ cfg.peer_address);
+ return test;
+ }
+
+ /* Fetch peer-group instance. */
+ test->group = peer_group_lookup(test->bgp, cfg.peer_group);
+ if (!test->group) {
+ test->state = TEST_INTERNAL_ERROR;
+ test->error = str_printf(
+ "could not retrieve instance of bgp peer-group [%s]",
+ cfg.peer_group);
+ return test;
+ }
+
+ return test;
+};
+
+static void test_log(struct test *test, const char *fmt, ...)
+{
+ va_list ap;
+
+ /* Skip logging if test instance has previously failed. */
+ if (test->state != TEST_SUCCESS)
+ return;
+
+ /* Store formatted log message. */
+ va_start(ap, fmt);
+ listnode_add(test->log, str_vprintf(fmt, ap));
+ va_end(ap);
+}
+
+static void test_finish(struct test *test)
+{
+ char *msg;
+ struct listnode *node, *nnode;
+
+ /* Print test output header. */
+ printf("%s [test] %s\n",
+ (test->state == TEST_SUCCESS) ? OUT_SYMBOL_OK : OUT_SYMBOL_NOK,
+ test->desc);
+
+ /* Print test log messages. */
+ for (ALL_LIST_ELEMENTS(test->log, node, nnode, msg)) {
+ printf("%s %s\n", OUT_SYMBOL_INFO, msg);
+ XFREE(MTYPE_TMP, msg);
+ }
+
+ /* Print test error message if available. */
+ if (test->state != TEST_SUCCESS && test->error)
+ printf("%s error: %s\n", OUT_SYMBOL_INFO, test->error);
+
+ /* Print machine-readable result of test. */
+ printf("%s\n", test->state == TEST_SUCCESS ? "OK" : "failed");
+
+ /* Cleanup allocated memory. */
+ if (test->vty) {
+ vty_close(test->vty);
+ test->vty = NULL;
+ }
+ if (test->log)
+ list_delete_and_null(&test->log);
+ if (test->desc)
+ XFREE(MTYPE_TMP, test->desc);
+ if (test->error)
+ XFREE(MTYPE_TMP, test->error);
+ XFREE(MTYPE_TMP, test);
+}
+
+static void test_af_flags(struct test *test, struct peer *peer,
+ struct test_peer_attr *attr, bool exp_val,
+ bool exp_ovrd)
+{
+ bool exp_inv, cur_val, cur_ovrd, cur_inv;
+
+ /* Flip expected values for inverted flags. */
+ exp_inv = attr->o.invert;
+ exp_val ^= exp_inv;
+
+ /* Fetch current state of value, override and invert flags. */
+ cur_val = !!CHECK_FLAG(peer->af_flags[attr->afi][attr->safi],
+ attr->u.flag);
+ cur_ovrd = !!CHECK_FLAG(peer->af_flags_override[attr->afi][attr->safi],
+ attr->u.flag);
+ cur_inv = !!CHECK_FLAG(peer->af_flags_invert[attr->afi][attr->safi],
+ attr->u.flag);
+
+ /* Assert expected flag states. */
+ TEST_ASSERT_EQ(test, cur_val, exp_val);
+ TEST_ASSERT_EQ(test, cur_ovrd, exp_ovrd);
+ TEST_ASSERT_EQ(test, cur_inv, exp_inv);
+}
+
+static void test_af_filter(struct test *test, struct peer *peer,
+ struct test_peer_attr *attr, bool exp_state,
+ bool exp_ovrd)
+{
+ bool cur_ovrd;
+ struct bgp_filter *filter;
+
+ /* Fetch and assert current state of override flag. */
+ cur_ovrd = !!CHECK_FLAG(peer->filter_override[attr->afi][attr->safi]
+ [attr->u.filter.direct],
+ attr->u.filter.flag);
+
+ TEST_ASSERT_EQ(test, cur_ovrd, exp_ovrd);
+
+ /* Assert that map/list matches expected state (set/unset). */
+ filter = &peer->filter[attr->afi][attr->safi];
+
+ switch (attr->u.filter.flag) {
+ case PEER_FT_DISTRIBUTE_LIST:
+ TEST_ASSERT_EQ(test,
+ !!(filter->dlist[attr->u.filter.direct].name),
+ exp_state);
+ break;
+ case PEER_FT_FILTER_LIST:
+ TEST_ASSERT_EQ(test,
+ !!(filter->aslist[attr->u.filter.direct].name),
+ exp_state);
+ break;
+ case PEER_FT_PREFIX_LIST:
+ TEST_ASSERT_EQ(test,
+ !!(filter->plist[attr->u.filter.direct].name),
+ exp_state);
+ break;
+ case PEER_FT_ROUTE_MAP:
+ TEST_ASSERT_EQ(test,
+ !!(filter->map[attr->u.filter.direct].name),
+ exp_state);
+ break;
+ case PEER_FT_UNSUPPRESS_MAP:
+ TEST_ASSERT_EQ(test, !!(filter->usmap.name), exp_state);
+ break;
+ }
+}
+
+static void test_peer_attr(struct test *test, struct test_peer_attr *pa)
+{
+ int tc = 1;
+ const char *type;
+ const char *ec = pa->o.invert ? "no " : "";
+ const char *dc = pa->o.invert ? "" : "no ";
+ const char *peer_cmd = pa->peer_cmd ?: pa->cmd;
+ const char *group_cmd = pa->group_cmd ?: pa->cmd;
+ struct peer *p = test->peer;
+ struct peer_group *g = test->group;
+
+ if (pa->type == PEER_AT_AF_FLAG)
+ type = "af-flag";
+ else /* if (pa->type == PEER_AT_AF_FILTER) */
+ type = "af-filter";
+
+ /* Test Case: Switch active address-family. */
+ if (pa->type == PEER_AT_AF_FLAG || pa->type == PEER_AT_AF_FILTER) {
+ test_log(test, "prepare: switch address-family to [%s]",
+ afi_safi_print(pa->afi, pa->safi));
+ test_execute(test, "address-family %s %s",
+ str_from_afi(pa->afi), str_from_safi(pa->safi));
+ }
+
+ /* Test Case: Set flag on BGP peer. */
+ test_log(test, "case %02d: set %s [%s] on [%s]", tc++, type, peer_cmd,
+ p->host);
+ test_execute(test, "%sneighbor %s %s", ec, p->host, peer_cmd);
+ test_config_present(test, "%sneighbor %s %s", ec, p->host, peer_cmd);
+ test_config_absent(test, "neighbor %s %s", g->name, pa->cmd);
+ if (pa->type == PEER_AT_AF_FLAG) {
+ test_af_flags(test, p, pa, true, true);
+ test_af_flags(test, g->conf, pa, false, false);
+ } else if (pa->type == PEER_AT_AF_FILTER) {
+ test_af_filter(test, p, pa, true, true);
+ test_af_filter(test, g->conf, pa, false, false);
+ }
+
+ /* Test Case: Add BGP peer to peer-group. */
+ test_log(test, "case %02d: add peer [%s] to group [%s]", tc++, p->host,
+ g->name);
+ test_execute(test, "neighbor %s peer-group %s", p->host, g->name);
+ test_config_present(test, "neighbor %s peer-group %s", p->host,
+ g->name);
+ test_config_present(test, "%sneighbor %s %s", ec, p->host, peer_cmd);
+ test_config_absent(test, "neighbor %s %s", g->name, pa->cmd);
+ if (pa->type == PEER_AT_AF_FLAG) {
+ test_af_flags(test, p, pa, true, true);
+ test_af_flags(test, g->conf, pa, false, false);
+ } else if (pa->type == PEER_AT_AF_FILTER) {
+ test_af_filter(test, p, pa, true, true);
+ test_af_filter(test, g->conf, pa, false, false);
+ }
+
+ /* Test Case: Re-add BGP peer to peer-group. */
+ test_log(test, "case %02d: re-add peer [%s] to group [%s]", tc++,
+ p->host, g->name);
+ test_execute(test, "neighbor %s peer-group %s", p->host, g->name);
+ test_config_present(test, "neighbor %s peer-group %s", p->host,
+ g->name);
+ test_config_present(test, "%sneighbor %s %s", ec, p->host, peer_cmd);
+ test_config_absent(test, "neighbor %s %s", g->name, pa->cmd);
+ if (pa->type == PEER_AT_AF_FLAG) {
+ test_af_flags(test, p, pa, true, true);
+ test_af_flags(test, g->conf, pa, false, false);
+ } else if (pa->type == PEER_AT_AF_FILTER) {
+ test_af_filter(test, p, pa, true, true);
+ test_af_filter(test, g->conf, pa, false, false);
+ }
+
+ /* Test Case: Set flag on BGP peer-group. */
+ test_log(test, "case %02d: set %s [%s] on [%s]", tc++, type, group_cmd,
+ g->name);
+ test_execute(test, "%sneighbor %s %s", ec, g->name, group_cmd);
+ test_config_present(test, "%sneighbor %s %s", ec, p->host, peer_cmd);
+ test_config_present(test, "%sneighbor %s %s", ec, g->name, group_cmd);
+ if (pa->type == PEER_AT_AF_FLAG) {
+ test_af_flags(test, p, pa, true, true);
+ test_af_flags(test, g->conf, pa, true, false);
+ } else if (pa->type == PEER_AT_AF_FILTER) {
+ test_af_filter(test, p, pa, true, true);
+ test_af_filter(test, g->conf, pa, true, false);
+ }
+
+ /* Test Case: Unset flag on BGP peer-group. */
+ test_log(test, "case %02d: unset %s [%s] on [%s]", tc++, type,
+ group_cmd, g->name);
+ test_execute(test, "%sneighbor %s %s", dc, g->name, group_cmd);
+ test_config_present(test, "%sneighbor %s %s", ec, p->host, peer_cmd);
+ test_config_absent(test, "neighbor %s %s", g->name, pa->cmd);
+ if (pa->type == PEER_AT_AF_FLAG) {
+ test_af_flags(test, p, pa, true, true);
+ test_af_flags(test, g->conf, pa, false, false);
+ } else if (pa->type == PEER_AT_AF_FILTER) {
+ test_af_filter(test, p, pa, true, true);
+ test_af_filter(test, g->conf, pa, false, false);
+ }
+
+ /* Test Case: Set flag on BGP peer-group. */
+ test_log(test, "case %02d: set %s [%s] on [%s]", tc++, type, group_cmd,
+ g->name);
+ test_execute(test, "%sneighbor %s %s", ec, g->name, group_cmd);
+ test_config_present(test, "%sneighbor %s %s", ec, p->host, peer_cmd);
+ test_config_present(test, "%sneighbor %s %s", ec, g->name, group_cmd);
+ if (pa->type == PEER_AT_AF_FLAG) {
+ test_af_flags(test, p, pa, true, true);
+ test_af_flags(test, g->conf, pa, true, false);
+ } else if (pa->type == PEER_AT_AF_FILTER) {
+ test_af_filter(test, p, pa, true, true);
+ test_af_filter(test, g->conf, pa, true, false);
+ }
+
+ /* Test Case: Re-set flag on BGP peer. */
+ test_log(test, "case %02d: re-set %s [%s] on [%s]", tc++, type,
+ peer_cmd, p->host);
+ test_execute(test, "%sneighbor %s %s", ec, p->host, peer_cmd);
+ test_config_present(test, "%sneighbor %s %s", ec, p->host, peer_cmd);
+ test_config_present(test, "%sneighbor %s %s", ec, g->name, group_cmd);
+ if (pa->type == PEER_AT_AF_FLAG) {
+ test_af_flags(test, p, pa, true, true);
+ test_af_flags(test, g->conf, pa, true, false);
+ } else if (pa->type == PEER_AT_AF_FILTER) {
+ test_af_filter(test, p, pa, true, true);
+ test_af_filter(test, g->conf, pa, true, false);
+ }
+
+ /* Test Case: Unset flag on BGP peer. */
+ test_log(test, "case %02d: unset %s [%s] on [%s]", tc++, type, peer_cmd,
+ p->host);
+ test_execute(test, "%sneighbor %s %s", dc, p->host, peer_cmd);
+ test_config_absent(test, "neighbor %s %s", p->host, pa->cmd);
+ test_config_present(test, "%sneighbor %s %s", ec, g->name, group_cmd);
+ if (pa->type == PEER_AT_AF_FLAG) {
+ test_af_flags(test, p, pa, true, false);
+ test_af_flags(test, g->conf, pa, true, false);
+ } else if (pa->type == PEER_AT_AF_FILTER) {
+ test_af_filter(test, p, pa, true, false);
+ test_af_filter(test, g->conf, pa, true, false);
+ }
+
+ /* Test Case: Unset flag on BGP peer-group. */
+ test_log(test, "case %02d: unset %s [%s] on [%s]", tc++, type,
+ group_cmd, g->name);
+ test_execute(test, "%sneighbor %s %s", dc, g->name, group_cmd);
+ test_config_absent(test, "neighbor %s %s", p->host, pa->cmd);
+ test_config_absent(test, "neighbor %s %s", g->name, pa->cmd);
+ if (pa->type == PEER_AT_AF_FLAG) {
+ test_af_flags(test, p, pa, false, false);
+ test_af_flags(test, g->conf, pa, false, false);
+ } else if (pa->type == PEER_AT_AF_FILTER) {
+ test_af_filter(test, p, pa, false, false);
+ test_af_filter(test, g->conf, pa, false, false);
+ }
+
+ /* Test Case: Set flag on BGP peer. */
+ test_log(test, "case %02d: set %s [%s] on [%s]", tc++, type, peer_cmd,
+ p->host);
+ test_execute(test, "%sneighbor %s %s", ec, p->host, peer_cmd);
+ test_config_present(test, "%sneighbor %s %s", ec, p->host, peer_cmd);
+ test_config_absent(test, "neighbor %s %s", g->name, pa->cmd);
+ if (pa->type == PEER_AT_AF_FLAG) {
+ test_af_flags(test, p, pa, true, true);
+ test_af_flags(test, g->conf, pa, false, false);
+ } else if (pa->type == PEER_AT_AF_FILTER) {
+ test_af_filter(test, p, pa, true, true);
+ test_af_filter(test, g->conf, pa, false, false);
+ }
+}
+
+static void bgp_startup(void)
+{
+ cmd_init(1);
+ openzlog("testbgpd", "NONE", 0, LOG_CONS | LOG_NDELAY | LOG_PID,
+ LOG_DAEMON);
+ zprivs_preinit(&bgpd_privs);
+ zprivs_init(&bgpd_privs);
+
+ master = thread_master_create(NULL);
+ bgp_master_init(master);
+ bgp_option_set(BGP_OPT_NO_LISTEN);
+ vrf_init(NULL, NULL, NULL, NULL);
+ bgp_init();
+ bgp_pthreads_run();
+}
+
+static void bgp_shutdown(void)
+{
+ struct bgp *bgp;
+ struct listnode *node, *nnode;
+
+ bgp_terminate();
+ bgp_close();
+ for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp))
+ bgp_delete(bgp);
+ bgp_dump_finish();
+ bgp_route_finish();
+ bgp_route_map_terminate();
+ bgp_attr_finish();
+ bgp_pthreads_finish();
+ access_list_add_hook(NULL);
+ access_list_delete_hook(NULL);
+ access_list_reset();
+ as_list_add_hook(NULL);
+ as_list_delete_hook(NULL);
+ bgp_filter_reset();
+ prefix_list_add_hook(NULL);
+ prefix_list_delete_hook(NULL);
+ prefix_list_reset();
+ community_list_terminate(bgp_clist);
+ vrf_terminate();
+#ifdef ENABLE_BGP_VNC
+ vnc_zebra_destroy();
+#endif
+ bgp_zebra_destroy();
+
+ bf_free(bm->rd_idspace);
+ list_delete_and_null(&bm->bgp);
+ memset(bm, 0, sizeof(*bm));
+
+ vty_terminate();
+ cmd_terminate();
+ zprivs_terminate(&bgpd_privs);
+ thread_master_free(master);
+ master = NULL;
+ closezlog();
+}
+
+int main(void)
+{
+ int i, ii;
+ struct list *pa_list;
+ struct test_peer_attr *pa, *pac;
+ struct listnode *node, *nnode;
+
+ bgp_startup();
+
+ pa_list = list_new();
+ i = 0;
+ while (test_peer_attrs[i].cmd) {
+ pa = &test_peer_attrs[i++];
+
+ /* Just copy the peer attribute structure for global flags. */
+ if (pa->type == PEER_AT_GLOBAL_FLAG) {
+ pac = XMALLOC(MTYPE_TMP, sizeof(struct test_peer_attr));
+ memcpy(pac, pa, sizeof(struct test_peer_attr));
+ listnode_add(pa_list, pac);
+ continue;
+ }
+
+ /* Fallback to default families if not specified. */
+ if (!pa->families[0].afi && !pa->families[0].safi)
+ memcpy(&pa->families, test_default_families,
+ sizeof(test_default_families));
+
+ /* Add peer attribute definition for each address family. */
+ ii = 0;
+ while (pa->families[ii].afi && pa->families[ii].safi) {
+ pac = XMALLOC(MTYPE_TMP, sizeof(struct test_peer_attr));
+ memcpy(pac, pa, sizeof(struct test_peer_attr));
+
+ pac->afi = pa->families[ii].afi;
+ pac->safi = pa->families[ii].safi;
+ listnode_add(pa_list, pac);
+
+ ii++;
+ }
+ }
+
+ for (ALL_LIST_ELEMENTS(pa_list, node, nnode, pa)) {
+ char *desc;
+ struct test *test;
+
+ /* Build test description string. */
+ if (pa->afi && pa->safi)
+ desc = str_printf("peer\\%s-%s\\%s",
+ str_from_afi(pa->afi),
+ str_from_safi(pa->safi), pa->cmd);
+ else
+ desc = str_printf("peer\\%s", pa->cmd);
+
+ /* Initialize new test instance. */
+ test = test_new(desc, pa->o.use_ibgp);
+ XFREE(MTYPE_TMP, desc);
+
+ /* Execute tests and finish test instance. */
+ test_peer_attr(test, pa);
+ test_finish(test);
+
+ /* Print empty line as spacer. */
+ printf("\n");
+
+ /* Free memory used for peer-attr declaration. */
+ XFREE(MTYPE_TMP, pa);
+ }
+
+ list_delete_and_null(&pa_list);
+ bgp_shutdown();
+
+ return 0;
+}
--- /dev/null
+import frrtest
+
+class TestFlag(frrtest.TestMultiOut):
+ program = './test_peer_attr'
+
+# List of tests can be generated by executing:
+# $> ./test_peer_attr 2>&1 | sed -n 's/\\/\\\\/g; s/\S\+ \[test\] \(.\+\)/TestFlag.okfail(\x27\1\x27)/pg'
+#
+TestFlag.okfail('peer\\ipv4-unicast\\addpath-tx-all-paths')
+TestFlag.okfail('peer\\ipv4-multicast\\addpath-tx-all-paths')
+TestFlag.okfail('peer\\ipv6-unicast\\addpath-tx-all-paths')
+TestFlag.okfail('peer\\ipv6-multicast\\addpath-tx-all-paths')
+TestFlag.okfail('peer\\ipv4-unicast\\addpath-tx-bestpath-per-AS')
+TestFlag.okfail('peer\\ipv4-multicast\\addpath-tx-bestpath-per-AS')
+TestFlag.okfail('peer\\ipv6-unicast\\addpath-tx-bestpath-per-AS')
+TestFlag.okfail('peer\\ipv6-multicast\\addpath-tx-bestpath-per-AS')
+TestFlag.okfail('peer\\ipv4-unicast\\allowas-in')
+TestFlag.okfail('peer\\ipv4-multicast\\allowas-in')
+TestFlag.okfail('peer\\ipv6-unicast\\allowas-in')
+TestFlag.okfail('peer\\ipv6-multicast\\allowas-in')
+TestFlag.okfail('peer\\ipv4-unicast\\allowas-in origin')
+TestFlag.okfail('peer\\ipv4-multicast\\allowas-in origin')
+TestFlag.okfail('peer\\ipv6-unicast\\allowas-in origin')
+TestFlag.okfail('peer\\ipv6-multicast\\allowas-in origin')
+TestFlag.okfail('peer\\ipv4-unicast\\as-override')
+TestFlag.okfail('peer\\ipv4-multicast\\as-override')
+TestFlag.okfail('peer\\ipv6-unicast\\as-override')
+TestFlag.okfail('peer\\ipv6-multicast\\as-override')
+TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged as-path')
+TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged as-path')
+TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged as-path')
+TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged as-path')
+TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged next-hop')
+TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged next-hop')
+TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged next-hop')
+TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged next-hop')
+TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged med')
+TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged med')
+TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged med')
+TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged med')
+TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged as-path next-hop')
+TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged as-path next-hop')
+TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged as-path next-hop')
+TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged as-path next-hop')
+TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged as-path med')
+TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged as-path med')
+TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged as-path med')
+TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged as-path med')
+TestFlag.okfail('peer\\ipv4-unicast\\attribute-unchanged as-path next-hop med')
+TestFlag.okfail('peer\\ipv4-multicast\\attribute-unchanged as-path next-hop med')
+TestFlag.okfail('peer\\ipv6-unicast\\attribute-unchanged as-path next-hop med')
+TestFlag.okfail('peer\\ipv6-multicast\\attribute-unchanged as-path next-hop med')
+TestFlag.okfail('peer\\ipv4-unicast\\capability orf prefix-list send')
+TestFlag.okfail('peer\\ipv4-multicast\\capability orf prefix-list send')
+TestFlag.okfail('peer\\ipv6-unicast\\capability orf prefix-list send')
+TestFlag.okfail('peer\\ipv6-multicast\\capability orf prefix-list send')
+TestFlag.okfail('peer\\ipv4-unicast\\capability orf prefix-list receive')
+TestFlag.okfail('peer\\ipv4-multicast\\capability orf prefix-list receive')
+TestFlag.okfail('peer\\ipv6-unicast\\capability orf prefix-list receive')
+TestFlag.okfail('peer\\ipv6-multicast\\capability orf prefix-list receive')
+TestFlag.okfail('peer\\ipv4-unicast\\capability orf prefix-list both')
+TestFlag.okfail('peer\\ipv4-multicast\\capability orf prefix-list both')
+TestFlag.okfail('peer\\ipv6-unicast\\capability orf prefix-list both')
+TestFlag.okfail('peer\\ipv6-multicast\\capability orf prefix-list both')
+TestFlag.okfail('peer\\ipv4-unicast\\default-originate')
+TestFlag.okfail('peer\\ipv4-multicast\\default-originate')
+TestFlag.okfail('peer\\ipv6-unicast\\default-originate')
+TestFlag.okfail('peer\\ipv6-multicast\\default-originate')
+TestFlag.okfail('peer\\ipv4-unicast\\default-originate route-map')
+TestFlag.okfail('peer\\ipv4-multicast\\default-originate route-map')
+TestFlag.okfail('peer\\ipv6-unicast\\default-originate route-map')
+TestFlag.okfail('peer\\ipv6-multicast\\default-originate route-map')
+TestFlag.okfail('peer\\ipv4-unicast\\filter-list')
+TestFlag.okfail('peer\\ipv4-multicast\\filter-list')
+TestFlag.okfail('peer\\ipv6-unicast\\filter-list')
+TestFlag.okfail('peer\\ipv6-multicast\\filter-list')
+TestFlag.okfail('peer\\ipv4-unicast\\filter-list')
+TestFlag.okfail('peer\\ipv4-multicast\\filter-list')
+TestFlag.okfail('peer\\ipv6-unicast\\filter-list')
+TestFlag.okfail('peer\\ipv6-multicast\\filter-list')
+TestFlag.okfail('peer\\ipv4-unicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv4-multicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv6-unicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv6-multicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv4-unicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv4-multicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv6-unicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv6-multicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv4-unicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv4-multicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv6-unicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv6-multicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv4-unicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv4-multicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv6-unicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv6-multicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv4-unicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv4-multicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv6-unicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv6-multicast\\maximum-prefix')
+TestFlag.okfail('peer\\ipv4-unicast\\next-hop-self')
+TestFlag.okfail('peer\\ipv4-multicast\\next-hop-self')
+TestFlag.okfail('peer\\ipv6-unicast\\next-hop-self')
+TestFlag.okfail('peer\\ipv6-multicast\\next-hop-self')
+TestFlag.okfail('peer\\ipv4-unicast\\next-hop-self force')
+TestFlag.okfail('peer\\ipv4-multicast\\next-hop-self force')
+TestFlag.okfail('peer\\ipv6-unicast\\next-hop-self force')
+TestFlag.okfail('peer\\ipv6-multicast\\next-hop-self force')
+TestFlag.okfail('peer\\ipv4-unicast\\prefix-list')
+TestFlag.okfail('peer\\ipv4-multicast\\prefix-list')
+TestFlag.okfail('peer\\ipv6-unicast\\prefix-list')
+TestFlag.okfail('peer\\ipv6-multicast\\prefix-list')
+TestFlag.okfail('peer\\ipv4-unicast\\prefix-list')
+TestFlag.okfail('peer\\ipv4-multicast\\prefix-list')
+TestFlag.okfail('peer\\ipv6-unicast\\prefix-list')
+TestFlag.okfail('peer\\ipv6-multicast\\prefix-list')
+TestFlag.okfail('peer\\ipv4-unicast\\remove-private-AS')
+TestFlag.okfail('peer\\ipv4-multicast\\remove-private-AS')
+TestFlag.okfail('peer\\ipv6-unicast\\remove-private-AS')
+TestFlag.okfail('peer\\ipv6-multicast\\remove-private-AS')
+TestFlag.okfail('peer\\ipv4-unicast\\remove-private-AS all')
+TestFlag.okfail('peer\\ipv4-multicast\\remove-private-AS all')
+TestFlag.okfail('peer\\ipv6-unicast\\remove-private-AS all')
+TestFlag.okfail('peer\\ipv6-multicast\\remove-private-AS all')
+TestFlag.okfail('peer\\ipv4-unicast\\remove-private-AS replace-AS')
+TestFlag.okfail('peer\\ipv4-multicast\\remove-private-AS replace-AS')
+TestFlag.okfail('peer\\ipv6-unicast\\remove-private-AS replace-AS')
+TestFlag.okfail('peer\\ipv6-multicast\\remove-private-AS replace-AS')
+TestFlag.okfail('peer\\ipv4-unicast\\remove-private-AS all replace-AS')
+TestFlag.okfail('peer\\ipv4-multicast\\remove-private-AS all replace-AS')
+TestFlag.okfail('peer\\ipv6-unicast\\remove-private-AS all replace-AS')
+TestFlag.okfail('peer\\ipv6-multicast\\remove-private-AS all replace-AS')
+TestFlag.okfail('peer\\ipv4-unicast\\route-map')
+TestFlag.okfail('peer\\ipv4-multicast\\route-map')
+TestFlag.okfail('peer\\ipv6-unicast\\route-map')
+TestFlag.okfail('peer\\ipv6-multicast\\route-map')
+TestFlag.okfail('peer\\ipv4-unicast\\route-map')
+TestFlag.okfail('peer\\ipv4-multicast\\route-map')
+TestFlag.okfail('peer\\ipv6-unicast\\route-map')
+TestFlag.okfail('peer\\ipv6-multicast\\route-map')
+TestFlag.okfail('peer\\ipv4-unicast\\route-reflector-client')
+TestFlag.okfail('peer\\ipv4-multicast\\route-reflector-client')
+TestFlag.okfail('peer\\ipv6-unicast\\route-reflector-client')
+TestFlag.okfail('peer\\ipv6-multicast\\route-reflector-client')
+TestFlag.okfail('peer\\ipv4-unicast\\route-server-client')
+TestFlag.okfail('peer\\ipv4-multicast\\route-server-client')
+TestFlag.okfail('peer\\ipv6-unicast\\route-server-client')
+TestFlag.okfail('peer\\ipv6-multicast\\route-server-client')
+TestFlag.okfail('peer\\ipv4-unicast\\send-community')
+TestFlag.okfail('peer\\ipv4-multicast\\send-community')
+TestFlag.okfail('peer\\ipv6-unicast\\send-community')
+TestFlag.okfail('peer\\ipv6-multicast\\send-community')
+TestFlag.okfail('peer\\ipv4-unicast\\send-community extended')
+TestFlag.okfail('peer\\ipv4-multicast\\send-community extended')
+TestFlag.okfail('peer\\ipv6-unicast\\send-community extended')
+TestFlag.okfail('peer\\ipv6-multicast\\send-community extended')
+TestFlag.okfail('peer\\ipv4-unicast\\send-community large')
+TestFlag.okfail('peer\\ipv4-multicast\\send-community large')
+TestFlag.okfail('peer\\ipv6-unicast\\send-community large')
+TestFlag.okfail('peer\\ipv6-multicast\\send-community large')
+TestFlag.okfail('peer\\ipv4-unicast\\soft-reconfiguration inbound')
+TestFlag.okfail('peer\\ipv4-multicast\\soft-reconfiguration inbound')
+TestFlag.okfail('peer\\ipv6-unicast\\soft-reconfiguration inbound')
+TestFlag.okfail('peer\\ipv6-multicast\\soft-reconfiguration inbound')
+TestFlag.okfail('peer\\ipv4-unicast\\unsuppress-map')
+TestFlag.okfail('peer\\ipv4-multicast\\unsuppress-map')
+TestFlag.okfail('peer\\ipv6-unicast\\unsuppress-map')
+TestFlag.okfail('peer\\ipv6-multicast\\unsuppress-map')
+TestFlag.okfail('peer\\ipv4-unicast\\weight')
+TestFlag.okfail('peer\\ipv4-multicast\\weight')
+TestFlag.okfail('peer\\ipv6-unicast\\weight')
+TestFlag.okfail('peer\\ipv6-multicast\\weight')
193 ldp
194 sharp
195 pbr
+196 static
ip route flush proto 193
ip route flush proto 194
ip route flush proto 195
+ ip route flush proto 196
else
[ -n "$dmn" ] && eval "${dmn/-/_}=0"
start_watchfrr
+++ /dev/null
-#! /bin/sh
-#
-# When local system does not have the latest autoconf/automake
-# -- Kunihiro Ishiguro <kunihiro@zebra.org>
-#
-
-rm -f config.cache Makefile.in aclocal.m4 config.h.in configure
-rm -rf config.guess config.sub ltmain.sh
-rm -rf autom4te.cache
-
-echo "This $0 script is deprecated, and will be removed at some stage."
-echo "Please use the 'autoreconf' command included with autoconf."
-
-echo "TOOLS VERIONS:"
-for tool in autoheader autoconf libtool libtoolize aclocal automake; do
- $tool --version | head -1
-done
-
-echo "ACLOCAL:"
-aclocal -I m4
-echo "AUTOHEADER:"
-autoheader
-echo "AUTOCONF:"
-autoconf
-echo "LIBTOOLIZE:"
-libtoolize -c
-echo "AUTOMAKE"
-automake --gnu --add-missing --copy
enum vtysh_write_integrated vtysh_write_integrated =
WRITE_INTEGRATED_UNSPECIFIED;
+static int vtysh_reconnect(struct vtysh_client *vclient);
+
static void vclient_close(struct vtysh_client *vclient)
{
if (vclient->fd >= 0) {
"Warning: closing connection to %s because of an I/O error!\n",
vclient->name);
close(vclient->fd);
- vclient->fd = -1;
+ /* indicate as candidate for reconnect */
+ vclient->fd = VTYSH_WAS_ACTIVE;
}
}
char *bufvalid, *end = NULL;
char terminator[3] = {0, 0, 0};
+ /* vclinet was previously active, try to reconnect */
+ if (vclient->fd == VTYSH_WAS_ACTIVE) {
+ ret = vtysh_reconnect(vclient);
+ if (ret < 0)
+ goto out_err;
+ }
+
if (vclient->fd < 0)
return CMD_SUCCESS;
ret = write(vclient->fd, line, strlen(line) + 1);
- if (ret <= 0)
- goto out_err;
+ if (ret <= 0) {
+ /* close connection and try to reconnect */
+ vclient_close(vclient);
+ ret = vtysh_reconnect(vclient);
+ if (ret < 0)
+ goto out_err;
+ /* retry line */
+ ret = write(vclient->fd, line, strlen(line) + 1);
+ if (ret <= 0)
+ goto out_err;
+ }
bufvalid = buf;
do {
ssize_t nread =
- read(vclient->fd, bufvalid, buf + bufsz - bufvalid);
+ read(vclient->fd, bufvalid, buf + bufsz - bufvalid - 1);
if (nread < 0 && (errno == EINTR || errno == EAGAIN))
continue;
bufvalid += nread;
+ /* Null terminate so we may pass this to *printf later. */
+ bufvalid[0] = '\0';
+
/*
* We expect string output from daemons, so instead of looking
* for the full 3 null bytes of the terminator, we check for
else if (end)
/* no nl, end of input, but some text left */
eol = end;
- else if (bufvalid == buf + bufsz) {
+ else if (bufvalid == buf + bufsz - 1) {
/*
* no nl, no end of input, no buffer space;
* realloc
saved_ret = ret = cmd_execute_command(vline, vty, &cmd, 1);
saved_node = vty->node;
- /* If command doesn't succeeded in current node, try to walk up in node
- * tree.
- * Changing vty->node is enough to try it just out without actual walkup
- * in
- * the vtysh. */
+ /*
+ * If command doesn't succeeded in current node, try to walk up in node
+ * tree. Changing vty->node is enough to try it just out without actual
+ * walkup in the vtysh.
+ */
while (ret != CMD_SUCCESS && ret != CMD_SUCCESS_DAEMON
&& ret != CMD_WARNING && ret != CMD_WARNING_CONFIG_FAILED
&& vty->node > CONFIG_NODE) {
vty->node = saved_node;
- /* If command succeeded in any other node than current (tried > 0) we
- * have
- * to move into node in the vtysh where it succeeded. */
+ /*
+ * If command succeeded in any other node than current (tried > 0) we
+ * have to move into node in the vtysh where it succeeded.
+ */
if (ret == CMD_SUCCESS || ret == CMD_SUCCESS_DAEMON
|| ret == CMD_WARNING) {
if ((saved_node == BGP_VPNV4_NODE
vtysh_execute("configure terminal");
}
}
- /* If command didn't succeed in any node, continue with return value
- * from
- * first try. */
+ /*
+ * If command didn't succeed in any node, continue with return value
+ * from first try.
+ */
else if (tried) {
ret = saved_ret;
}
struct vtysh_client *vc;
for (i = 0; i < array_size(vtysh_client); i++) {
if (cmd->daemon & vtysh_client[i].flag) {
+ if (vtysh_client[i].fd < 0
+ && (cmd->daemon == vtysh_client[i].flag)) {
+ for (vc = &vtysh_client[i]; vc;
+ vc = vc->next)
+ if (vc->fd < 0)
+ vtysh_reconnect(vc);
+ }
if (vtysh_client[i].fd < 0
&& (cmd->daemon == vtysh_client[i].flag)) {
bool any_inst = false;
continue;
}
- /* Ignore the "end" lines, we will generate these where
- * appropriate */
+ /*
+ * Ignore the "end" lines, we will generate these where
+ * appropriate
+ */
if (strlen(vty_buf_trimmed) == 3
&& strncmp("end", vty_buf_trimmed, 3) == 0) {
cmd_free_strvec(vline);
prev_node = vty->node;
saved_ret = ret = cmd_execute_command_strict(vline, vty, &cmd);
- /* If command doesn't succeeded in current node, try to walk up
- * in node tree.
- * Changing vty->node is enough to try it just out without
- * actual walkup in
- * the vtysh. */
+ /*
+ * If command doesn't succeeded in current node, try to walk up
+ * in node tree. Changing vty->node is enough to try it just
+ * out without actual walkup in the vtysh.
+ */
while (ret != CMD_SUCCESS && ret != CMD_SUCCESS_DAEMON
&& ret != CMD_WARNING && ret != CMD_WARNING_CONFIG_FAILED
&& vty->node > CONFIG_NODE) {
tried++;
}
- /* If command succeeded in any other node than current (tried >
- * 0) we have
- * to move into node in the vtysh where it succeeded. */
+ /*
+ * If command succeeded in any other node than current (tried >
+ * 0) we have to move into node in the vtysh where it
+ * succeeded.
+ */
if (ret == CMD_SUCCESS || ret == CMD_SUCCESS_DAEMON
|| ret == CMD_WARNING) {
if ((prev_node == BGP_VPNV4_NODE
fprintf(outputfile, "end\n");
}
}
- /* If command didn't succeed in any node, continue with return
- * value from
- * first try. */
+ /*
+ * If command didn't succeed in any node, continue with return
+ * value from first try.
+ */
else if (tried) {
ret = saved_ret;
vty->node = prev_node;
int ret;
const struct cmd_element *cmd;
int lineno = 0;
+ /* once we have an error, we remember & return that */
int retcode = CMD_SUCCESS;
while (fgets(vty->buf, VTY_BUFSIZ, fp)) {
if (vty->type == VTY_FILE)
fprintf(stderr, "line %d: Warning[%d]...: %s\n",
lineno, vty->node, vty->buf);
- retcode = ret; /* once we have an error, we remember &
- return that */
+ retcode = ret;
+
break;
case CMD_ERR_AMBIGUOUS:
fprintf(stderr,
"line %d: %% Ambiguous command[%d]: %s\n",
lineno, vty->node, vty->buf);
- retcode = CMD_ERR_AMBIGUOUS; /* once we have an error,
- we remember & return
- that */
+ retcode = CMD_ERR_AMBIGUOUS;
break;
case CMD_ERR_NO_MATCH:
fprintf(stderr, "line %d: %% Unknown command[%d]: %s",
lineno, vty->node, vty->buf);
- retcode = CMD_ERR_NO_MATCH; /* once we have an error, we
- remember & return that */
+ retcode = CMD_ERR_NO_MATCH;
break;
case CMD_ERR_INCOMPLETE:
fprintf(stderr,
"line %d: %% Command incomplete[%d]: %s\n",
lineno, vty->node, vty->buf);
- retcode = CMD_ERR_INCOMPLETE; /* once we have an error,
- we remember & return
- that */
+ retcode = CMD_ERR_INCOMPLETE;
break;
case CMD_SUCCESS_DAEMON: {
unsigned int i;
outputfile);
/*
* CMD_WARNING - Can mean that the
- * command was
- * parsed successfully but it was
- * already entered
- * in a few spots. As such if we
- * receive a
+ * command was parsed successfully but
+ * it was already entered in a few
+ * spots. As such if we receive a
* CMD_WARNING from a daemon we
- * shouldn't stop
- * talking to the other daemons for the
- * particular
- * command.
+ * shouldn't stop talking to the other
+ * daemons for the particular command.
*/
if (cmd_stat != CMD_SUCCESS
&& cmd_stat != CMD_WARNING) {
}
if (matched && matched[index])
- /* this is free()'d by readline, but we leak 1 count of
- * MTYPE_COMPLETION */
+ /*
+ * this is free()'d by readline, but we leak 1 count of
+ * MTYPE_COMPLETION
+ */
return matched[index++];
XFREE(MTYPE_TMP, matched);
return vtysh_exit_nexthop_group(self, vty, argc, argv);
}
-/* TODO Implement interface description commands in ripngd, ospf6d
- * and isisd. */
+/*
+ * TODO Implement interface description commands in ripngd, ospf6d
+ * and isisd.
+ */
DEFSH(VTYSH_ZEBRA | VTYSH_RIPD | VTYSH_OSPFD | VTYSH_EIGRPD,
vtysh_interface_desc_cmd, "description LINE...",
"Interface specific description\n"
vtysh_config_dump(fp);
- if (vtysh_pager_name && fp && fp != outputfile) {
+ if (vtysh_pager_name && fp != outputfile) {
fflush(fp);
if (pclose(fp) == -1) {
perror("pclose");
return 0;
}
+static int vtysh_reconnect(struct vtysh_client *vclient)
+{
+ int ret;
+
+ fprintf(stderr, "Warning: connecting to %s...", vclient->name);
+ ret = vtysh_connect(vclient);
+ if (ret < 0) {
+ fprintf(stderr, "failed!\n");
+ return ret;
+ }
+ fprintf(stderr, "success!\n");
+ if (vtysh_client_execute(vclient, "enable", NULL) < 0)
+ return -1;
+ return vtysh_execute_no_pager("end");
+}
+
/* Return true if str ends with suffix, else return false */
static int ends_with(const char *str, const char *suffix)
{
#define VTYSH_SHARPD 0x2000
#define VTYSH_PBRD 0x4000
+#define VTYSH_WAS_ACTIVE (-2)
+
/* commands in REALLYALL are crucial to correct vtysh operation */
#define VTYSH_REALLYALL ~0U
/* watchfrr is not in ALL since library CLI functions should not be
log_it(cmd->line);
/*
- * Parsing logic for regular commands will be different than
- * for those commands requiring further processing, such as
- * cli instructions terminating with question-mark character.
+ * Parsing logic for regular commands will be different
+ * than for those commands requiring further
+ * processing, such as cli instructions terminating
+ * with question-mark character.
*/
if (!vtysh_execute_command_questionmark(cmd->line))
ret = CMD_SUCCESS;
unsigned long zebra_debug_vxlan;
unsigned long zebra_debug_pw;
+DEFINE_HOOK(zebra_debug_show_debugging, (struct vty *vty), (vty));
+
DEFUN_NOSH (show_debugging_zebra,
show_debugging_zebra_cmd,
"show debugging [zebra]",
if (IS_ZEBRA_DEBUG_PW)
vty_out(vty, " Zebra pseudowire debugging is on\n");
+ hook_call(zebra_debug_show_debugging, vty);
return CMD_SUCCESS;
}
extern void zebra_debug_init(void);
+DECLARE_HOOK(zebra_debug_show_debugging, (struct vty *vty), (vty));
+
#endif /* _ZEBRA_DEBUG_H */
return 0;
}
-/* Called from interface_lookup_netlink(). This function is only used
- during bootstrap. */
-static int netlink_interface(struct sockaddr_nl *snl, struct nlmsghdr *h,
- ns_id_t ns_id, int startup)
+/*
+ * Called from interface_lookup_netlink(). This function is only used
+ * during bootstrap.
+ */
+static int netlink_interface(struct nlmsghdr *h, ns_id_t ns_id, int startup)
{
int len;
struct ifinfomsg *ifi;
memset(linkinfo, 0, sizeof linkinfo);
netlink_parse_rtattr(tb, IFLA_MAX, IFLA_RTA(ifi), len);
-#ifdef IFLA_WIRELESS
/* check for wireless messages to ignore */
if ((tb[IFLA_WIRELESS] != NULL) && (ifi->ifi_change == 0)) {
if (IS_ZEBRA_DEBUG_KERNEL)
__func__);
return 0;
}
-#endif /* IFLA_WIRELESS */
if (tb[IFLA_IFNAME] == NULL)
return -1;
return netlink_address(RTM_DELADDR, AF_INET6, ifp, ifc);
}
-int netlink_interface_addr(struct sockaddr_nl *snl, struct nlmsghdr *h,
- ns_id_t ns_id, int startup)
+int netlink_interface_addr(struct nlmsghdr *h, ns_id_t ns_id, int startup)
{
int len;
struct ifaddrmsg *ifa;
*/
}
-int netlink_link_change(struct sockaddr_nl *snl, struct nlmsghdr *h,
- ns_id_t ns_id, int startup)
+int netlink_link_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
{
int len;
struct ifinfomsg *ifi;
memset(linkinfo, 0, sizeof linkinfo);
netlink_parse_rtattr(tb, IFLA_MAX, IFLA_RTA(ifi), len);
-#ifdef IFLA_WIRELESS
/* check for wireless messages to ignore */
if ((tb[IFLA_WIRELESS] != NULL) && (ifi->ifi_change == 0)) {
if (IS_ZEBRA_DEBUG_KERNEL)
__func__);
return 0;
}
-#endif /* IFLA_WIRELESS */
if (tb[IFLA_IFNAME] == NULL)
return -1;
#ifdef HAVE_NETLINK
-extern int netlink_interface_addr(struct sockaddr_nl *snl, struct nlmsghdr *h,
- ns_id_t ns_id, int startup);
-extern int netlink_link_change(struct sockaddr_nl *snl, struct nlmsghdr *h,
- ns_id_t ns_id, int startup);
+extern int netlink_interface_addr(struct nlmsghdr *h, ns_id_t ns_id,
+ int startup);
+extern int netlink_link_change(struct nlmsghdr *h, ns_id_t ns_id, int startup);
extern int interface_lookup_netlink(struct zebra_ns *zns);
#endif /* HAVE_NETLINK */
{RTPROT_ISIS, "IS-IS"},
{RTPROT_RIP, "RIP"},
{RTPROT_RIPNG, "RIPNG"},
+ {RTPROT_ZSTATIC, "static"},
{0}};
static const struct message family_str[] = {{AF_INET, "ipv4"},
extern struct zebra_privs_t zserv_privs;
-int netlink_talk_filter(struct sockaddr_nl *snl, struct nlmsghdr *h,
- ns_id_t ns_id, int startup)
+int netlink_talk_filter(struct nlmsghdr *h, ns_id_t ns_id, int startup)
{
- zlog_warn("netlink_talk: ignoring message type 0x%04x NS %u",
- h->nlmsg_type, ns_id);
+ /*
+ * This is an error condition that must be handled during
+ * development.
+ *
+ * The netlink_talk_filter function is used for communication
+ * down the netlink_cmd pipe and we are expecting
+ * an ack being received. So if we get here
+ * then we did not receive the ack and instead
+ * received some other message in an unexpected
+ * way.
+ */
+ zlog_err("%s: ignoring message type 0x%04x(%s) NS %u",
+ __PRETTY_FUNCTION__, h->nlmsg_type,
+ nl_msg_type_to_str(h->nlmsg_type), ns_id);
return 0;
}
return ret;
}
-static int netlink_information_fetch(struct sockaddr_nl *snl,
- struct nlmsghdr *h, ns_id_t ns_id,
+static int netlink_information_fetch(struct nlmsghdr *h, ns_id_t ns_id,
int startup)
{
- /* JF: Ignore messages that aren't from the kernel */
- if (snl->nl_pid != 0) {
- zlog_err("Ignoring message from pid %u", snl->nl_pid);
- return 0;
- }
-
+ /*
+ * When we handle new message types here
+ * because we are starting to install them
+ * then lets check the netlink_install_filter
+ * and see if we should add the corresponding
+ * allow through entry there.
+ * Probably not needed to do but please
+ * think about it.
+ */
switch (h->nlmsg_type) {
case RTM_NEWROUTE:
- return netlink_route_change(snl, h, ns_id, startup);
+ return netlink_route_change(h, ns_id, startup);
case RTM_DELROUTE:
- return netlink_route_change(snl, h, ns_id, startup);
+ return netlink_route_change(h, ns_id, startup);
case RTM_NEWLINK:
- return netlink_link_change(snl, h, ns_id, startup);
+ return netlink_link_change(h, ns_id, startup);
case RTM_DELLINK:
- return netlink_link_change(snl, h, ns_id, startup);
+ return netlink_link_change(h, ns_id, startup);
case RTM_NEWADDR:
- return netlink_interface_addr(snl, h, ns_id, startup);
+ return netlink_interface_addr(h, ns_id, startup);
case RTM_DELADDR:
- return netlink_interface_addr(snl, h, ns_id, startup);
+ return netlink_interface_addr(h, ns_id, startup);
case RTM_NEWNEIGH:
- return netlink_neigh_change(snl, h, ns_id);
+ return netlink_neigh_change(h, ns_id);
case RTM_DELNEIGH:
- return netlink_neigh_change(snl, h, ns_id);
+ return netlink_neigh_change(h, ns_id);
case RTM_NEWRULE:
- return netlink_rule_change(snl, h, ns_id, startup);
+ return netlink_rule_change(h, ns_id, startup);
case RTM_DELRULE:
- return netlink_rule_change(snl, h, ns_id, startup);
+ return netlink_rule_change(h, ns_id, startup);
default:
- if (IS_ZEBRA_DEBUG_KERNEL)
- zlog_debug("Unknown netlink nlmsg_type %d vrf %u\n",
- h->nlmsg_type, ns_id);
+ /*
+ * If we have received this message then
+ * we have made a mistake during development
+ * and we need to write some code to handle
+ * this message type or not ask for
+ * it to be sent up to us
+ */
+ zlog_err("Unknown netlink nlmsg_type %s(%d) vrf %u\n",
+ nl_msg_type_to_str(h->nlmsg_type), h->nlmsg_type,
+ ns_id);
break;
}
return 0;
return 0;
}
-/* Filter out messages from self that occur on listener socket,
+/*
+ * Filter out messages from self that occur on listener socket,
* caused by our actions on the command socket
+ *
+ * When we add new Netlink message types we probably
+ * do not need to add them here as that we are filtering
+ * on the routes we actually care to receive( which is rarer
+ * then the normal course of operations). We are intentionally
+ * allowing some messages from ourselves through
+ * ( I'm looking at you Interface based netlink messages )
+ * so that we only had to write one way to handle incoming
+ * address add/delete changes.
*/
static void netlink_install_filter(int sock, __u32 pid)
{
+ /*
+ * BPF_JUMP instructions and where you jump to are based upon
+ * 0 as being the next statement. So count from 0. Writing
+ * this down because every time I look at this I have to
+ * re-remember it.
+ */
struct sock_filter filter[] = {
- /* 0: ldh [4] */
- BPF_STMT(BPF_LD | BPF_ABS | BPF_H,
- offsetof(struct nlmsghdr, nlmsg_type)),
- /* 1: jeq 0x18 jt 5 jf next */
- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, htons(RTM_NEWROUTE), 3, 0),
- /* 2: jeq 0x19 jt 5 jf next */
- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, htons(RTM_DELROUTE), 2, 0),
- /* 3: jeq 0x19 jt 5 jf next */
- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, htons(RTM_NEWNEIGH), 1, 0),
- /* 4: jeq 0x19 jt 5 jf 8 */
- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, htons(RTM_DELNEIGH), 0, 3),
- /* 5: ldw [12] */
+ /*
+ * Logic:
+ * if (nlmsg_pid == pid) {
+ * if (the incoming nlmsg_type ==
+ * RTM_NEWADDR | RTM_DELADDR)
+ * keep this message
+ * else
+ * skip this message
+ * } else
+ * keep this netlink message
+ */
+ /*
+ * 0: Load the nlmsg_pid into the BPF register
+ */
BPF_STMT(BPF_LD | BPF_ABS | BPF_W,
offsetof(struct nlmsghdr, nlmsg_pid)),
- /* 6: jeq XX jt 7 jf 8 */
- BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, htonl(pid), 0, 1),
- /* 7: ret 0 (skip) */
+ /*
+ * 1: Compare to pid
+ */
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, htonl(pid), 0, 4),
+ /*
+ * 2: Load the nlmsg_type into BPF register
+ */
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H,
+ offsetof(struct nlmsghdr, nlmsg_type)),
+ /*
+ * 3: Compare to RTM_NEWADDR
+ */
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, htons(RTM_NEWADDR), 2, 0),
+ /*
+ * 4: Compare to RTM_DELADDR
+ */
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, htons(RTM_DELADDR), 1, 0),
+ /*
+ * 5: This is the end state of we want to skip the
+ * message
+ */
BPF_STMT(BPF_RET | BPF_K, 0),
- /* 8: ret 0xffff (keep) */
+ /* 6: This is the end state of we want to keep
+ * the message
+ */
BPF_STMT(BPF_RET | BPF_K, 0xffff),
};
* startup -> Are we reading in under startup conditions? passed to
* the filter.
*/
-int netlink_parse_info(int (*filter)(struct sockaddr_nl *, struct nlmsghdr *,
- ns_id_t, int),
+int netlink_parse_info(int (*filter)(struct nlmsghdr *, ns_id_t, int),
struct nlsock *nl, struct zebra_ns *zns, int count,
int startup)
{
h->nlmsg_type, h->nlmsg_len,
h->nlmsg_seq, h->nlmsg_pid);
- /* skip unsolicited messages originating from command
- * socket
- * linux sets the originators port-id for {NEW|DEL}ADDR
- * messages,
- * so this has to be checked here. */
- if (nl != &zns->netlink_cmd
- && h->nlmsg_pid == zns->netlink_cmd.snl.nl_pid
- && (h->nlmsg_type != RTM_NEWADDR
- && h->nlmsg_type != RTM_DELADDR)) {
- if (IS_ZEBRA_DEBUG_KERNEL)
- zlog_debug(
- "netlink_parse_info: %s packet comes from %s",
- zns->netlink_cmd.name,
- nl->name);
+
+ /*
+ * Ignore messages that maybe sent from
+ * other actors besides the kernel
+ */
+ if (snl.nl_pid != 0) {
+ zlog_err("Ignoring message from pid %u",
+ snl.nl_pid);
continue;
}
- error = (*filter)(&snl, h, zns->ns_id, startup);
+ error = (*filter)(h, zns->ns_id, startup);
if (error < 0) {
zlog_err("%s filter function error", nl->name);
ret = error;
* startup -> Are we reading in under startup conditions
* This is passed through eventually to filter.
*/
-int netlink_talk(int (*filter)(struct sockaddr_nl *, struct nlmsghdr *, ns_id_t,
- int startup),
+int netlink_talk(int (*filter)(struct nlmsghdr *, ns_id_t, int startup),
struct nlmsghdr *n, struct nlsock *nl, struct zebra_ns *zns,
int startup)
{
{
unsigned long groups;
- /* Initialize netlink sockets */
- groups = RTMGRP_LINK | RTMGRP_IPV4_ROUTE | RTMGRP_IPV4_IFADDR
- | RTMGRP_IPV6_ROUTE | RTMGRP_IPV6_IFADDR | RTMGRP_IPV4_MROUTE
- | RTMGRP_NEIGH
- | RTNLGRP_IPV4_RULE | RTNLGRP_IPV6_RULE;
+ /*
+ * Initialize netlink sockets
+ *
+ * If RTMGRP_XXX exists use that, but at some point
+ * I think the kernel developers realized that
+ * keeping track of all the different values would
+ * lead to confusion, so we need to convert the
+ * RTNLGRP_XXX to a bit position for ourself
+ */
+ groups = RTMGRP_LINK |
+ RTMGRP_IPV4_ROUTE |
+ RTMGRP_IPV4_IFADDR |
+ RTMGRP_IPV6_ROUTE |
+ RTMGRP_IPV6_IFADDR |
+ RTMGRP_IPV4_MROUTE |
+ RTMGRP_NEIGH |
+ (1 << (RTNLGRP_IPV4_RULE - 1)) |
+ (1 << (RTNLGRP_IPV6_RULE - 1));
snprintf(zns->netlink.name, sizeof(zns->netlink.name),
"netlink-listen (NS %u)", zns->ns_id);
extern const char *nl_family_to_str(uint8_t family);
extern const char *nl_rttype_to_str(uint8_t rttype);
-extern int netlink_parse_info(int (*filter)(struct sockaddr_nl *,
- struct nlmsghdr *, ns_id_t, int),
+extern int netlink_parse_info(int (*filter)(struct nlmsghdr *, ns_id_t, int),
struct nlsock *nl, struct zebra_ns *zns,
int count, int startup);
-extern int netlink_talk_filter(struct sockaddr_nl *, struct nlmsghdr *, ns_id_t,
- int startup);
-extern int netlink_talk(int (*filter)(struct sockaddr_nl *, struct nlmsghdr *,
- ns_id_t, int startup),
+extern int netlink_talk_filter(struct nlmsghdr *h, ns_id_t ns, int startup);
+extern int netlink_talk(int (*filter)(struct nlmsghdr *, ns_id_t, int startup),
struct nlmsghdr *n, struct nlsock *nl,
struct zebra_ns *zns, int startup);
extern int netlink_request(struct nlsock *nl, struct nlmsghdr *n);
#include "zebra/label_manager.h"
#include "zebra/zebra_netns_notify.h"
#include "zebra/zebra_rnh.h"
+#include "zebra/zebra_pbr.h"
#define ZEBRA_PTM_SUPPORT
zebra_mpls_init();
zebra_mpls_vty_init();
zebra_pw_vty_init();
+ zebra_pbr_init();
/* For debug purpose. */
/* SET_FLAG (zebra_debug_event, ZEBRA_DEBUG_EVENT); */
if (IS_ZEBRA_DEBUG_EVENT)
zlog_debug(
- "%s: client %s %s(%d) checking: selected=%d, type=%d, distance=%d, metric=%d zebra_check_addr=%d",
+ "%s: client %s %s(%u) checking: selected=%d, type=%d, distance=%d, metric=%d zebra_check_addr=%d",
__func__,
zebra_route_string(client->proto),
prefix2str(dst_p, buf, sizeof(buf)),
if (send_redistribute) {
if (IS_ZEBRA_DEBUG_EVENT) {
zlog_debug(
- "%s: client %s %s(%d), type=%d, distance=%d, metric=%d",
+ "%s: client %s %s(%u), type=%d, distance=%d, metric=%d",
__func__,
zebra_route_string(client->proto),
prefix2str(p, buf, sizeof(buf)),
if (IS_ZEBRA_DEBUG_EVENT)
zlog_debug(
- "%s: client proto %s afi=%d, wants %s, vrf %d, instance=%d",
+ "%s: client proto %s afi=%d, wants %s, vrf %u, instance=%d",
__func__, zebra_route_string(client->proto), afi,
zebra_route_string(type), zvrf_id(zvrf), instance);
if (!vrf_bitmap_check(client->redist[afi][type],
zvrf_id(zvrf))) {
if (IS_ZEBRA_DEBUG_EVENT)
- zlog_debug("%s: setting vrf %d redist bitmap",
+ zlog_debug("%s: setting vrf %u redist bitmap",
__func__, zvrf_id(zvrf));
vrf_bitmap_set(client->redist[afi][type],
zvrf_id(zvrf));
struct zserv *client;
if (IS_ZEBRA_DEBUG_EVENT)
- zlog_debug("MESSAGE: ZEBRA_INTERFACE_UP %s", ifp->name);
+ zlog_debug("MESSAGE: ZEBRA_INTERFACE_UP %s(%u)",
+ ifp->name, ifp->vrf_id);
if (ifp->ptm_status || !ifp->ptm_enable) {
for (ALL_LIST_ELEMENTS(zebrad.client_list, node, nnode, client))
struct zserv *client;
if (IS_ZEBRA_DEBUG_EVENT)
- zlog_debug("MESSAGE: ZEBRA_INTERFACE_DOWN %s", ifp->name);
+ zlog_debug("MESSAGE: ZEBRA_INTERFACE_DOWN %s(%u)",
+ ifp->name, ifp->vrf_id);
for (ALL_LIST_ELEMENTS(zebrad.client_list, node, nnode, client)) {
zsend_interface_update(ZEBRA_INTERFACE_DOWN, client, ifp);
struct zserv *client;
if (IS_ZEBRA_DEBUG_EVENT)
- zlog_debug("MESSAGE: ZEBRA_INTERFACE_ADD %s[%d]", ifp->name,
+ zlog_debug("MESSAGE: ZEBRA_INTERFACE_ADD %s(%u)", ifp->name,
ifp->vrf_id);
for (ALL_LIST_ELEMENTS(zebrad.client_list, node, nnode, client))
struct zserv *client;
if (IS_ZEBRA_DEBUG_EVENT)
- zlog_debug("MESSAGE: ZEBRA_INTERFACE_DELETE %s", ifp->name);
+ zlog_debug("MESSAGE: ZEBRA_INTERFACE_DELETE %s(%u)",
+ ifp->name, ifp->vrf_id);
for (ALL_LIST_ELEMENTS(zebrad.client_list, node, nnode, client)) {
client->ifdel_cnt++;
char buf[PREFIX_STRLEN];
p = ifc->address;
- zlog_debug("MESSAGE: ZEBRA_INTERFACE_ADDRESS_ADD %s on %s",
- prefix2str(p, buf, sizeof(buf)), ifc->ifp->name);
+ zlog_debug("MESSAGE: ZEBRA_INTERFACE_ADDRESS_ADD %s on %s(%u)",
+ prefix2str(p, buf, sizeof(buf)), ifp->name,
+ ifp->vrf_id);
}
if (!CHECK_FLAG(ifc->conf, ZEBRA_IFC_REAL))
char buf[PREFIX_STRLEN];
p = ifc->address;
- zlog_debug("MESSAGE: ZEBRA_INTERFACE_ADDRESS_DELETE %s on %s",
- prefix2str(p, buf, sizeof(buf)), ifc->ifp->name);
+ zlog_debug("MESSAGE: ZEBRA_INTERFACE_ADDRESS_DELETE %s on %s(%u)",
+ prefix2str(p, buf, sizeof(buf)),
+ ifp->name, ifp->vrf_id);
}
zebra_vxlan_add_del_gw_macip(ifp, ifc->address, 0);
struct zserv *client;
if (IS_ZEBRA_DEBUG_EVENT)
- zlog_debug("MESSAGE: ZEBRA_INTERFACE_LINK_PARAMS %s",
- ifp->name);
+ zlog_debug("MESSAGE: ZEBRA_INTERFACE_LINK_PARAMS %s(%u)",
+ ifp->name, ifp->vrf_id);
for (ALL_LIST_ELEMENTS(zebrad.client_list, node, nnode, client))
if (client->ifinfo)
static inline int is_selfroute(int proto)
{
if ((proto == RTPROT_BGP) || (proto == RTPROT_OSPF)
- || (proto == RTPROT_STATIC) || (proto == RTPROT_ZEBRA)
+ || (proto == RTPROT_ZSTATIC) || (proto == RTPROT_ZEBRA)
|| (proto == RTPROT_ISIS) || (proto == RTPROT_RIPNG)
|| (proto == RTPROT_NHRP) || (proto == RTPROT_EIGRP)
|| (proto == RTPROT_LDP) || (proto == RTPROT_BABEL)
proto = RTPROT_OSPF;
break;
case ZEBRA_ROUTE_STATIC:
- proto = RTPROT_STATIC;
+ proto = RTPROT_ZSTATIC;
break;
case ZEBRA_ROUTE_ISIS:
proto = RTPROT_ISIS;
proto = ZEBRA_ROUTE_LDP;
break;
case RTPROT_STATIC:
+ case RTPROT_ZSTATIC:
proto = ZEBRA_ROUTE_STATIC;
break;
case RTPROT_SHARP:
}
/* Looking up routing table by netlink interface. */
-static int netlink_route_change_read_unicast(struct sockaddr_nl *snl,
- struct nlmsghdr *h, ns_id_t ns_id,
+static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id,
int startup)
{
int len;
return 0;
if (!startup && is_selfroute(rtm->rtm_protocol)
- && h->nlmsg_type == RTM_NEWROUTE)
+ && h->nlmsg_type == RTM_NEWROUTE) {
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("Route type: %d Received that we think we have originated, ignoring",
+ rtm->rtm_protocol);
return 0;
+ }
/* We don't care about change notifications for the MPLS table. */
/* TODO: Revisit this. */
static struct mcast_route_data *mroute = NULL;
-static int netlink_route_change_read_multicast(struct sockaddr_nl *snl,
- struct nlmsghdr *h,
+static int netlink_route_change_read_multicast(struct nlmsghdr *h,
ns_id_t ns_id, int startup)
{
int len;
return 0;
}
-int netlink_route_change(struct sockaddr_nl *snl, struct nlmsghdr *h,
- ns_id_t ns_id, int startup)
+int netlink_route_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
{
int len;
struct rtmsg *rtm;
return -1;
if (rtm->rtm_type == RTN_MULTICAST)
- netlink_route_change_read_multicast(snl, h, ns_id, startup);
+ netlink_route_change_read_multicast(h, ns_id, startup);
else
- netlink_route_change_read_unicast(snl, h, ns_id, startup);
+ netlink_route_change_read_unicast(h, ns_id, startup);
return 0;
}
((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct ndmsg))))
#endif
-static int netlink_macfdb_change(struct sockaddr_nl *snl, struct nlmsghdr *h,
- int len, ns_id_t ns_id)
+static int netlink_macfdb_change(struct nlmsghdr *h, int len, ns_id_t ns_id)
{
struct ndmsg *ndm;
struct interface *ifp;
return zebra_vxlan_local_mac_del(ifp, br_if, &mac, vid);
}
-static int netlink_macfdb_table(struct sockaddr_nl *snl, struct nlmsghdr *h,
- ns_id_t ns_id, int startup)
+static int netlink_macfdb_table(struct nlmsghdr *h, ns_id_t ns_id, int startup)
{
int len;
struct ndmsg *ndm;
if (ndm->ndm_family != AF_BRIDGE)
return 0;
- return netlink_macfdb_change(snl, h, len, ns_id);
+ return netlink_macfdb_change(h, len, ns_id);
}
/* Request for MAC FDB information from the kernel */
(NUD_PERMANENT | NUD_NOARP | NUD_REACHABLE | NUD_PROBE | NUD_STALE \
| NUD_DELAY)
-static int netlink_ipneigh_change(struct sockaddr_nl *snl, struct nlmsghdr *h,
- int len, ns_id_t ns_id)
+static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id)
{
struct ndmsg *ndm;
struct interface *ifp;
return zebra_vxlan_handle_kernel_neigh_del(ifp, link_if, &ip);
}
-static int netlink_neigh_table(struct sockaddr_nl *snl, struct nlmsghdr *h,
- ns_id_t ns_id, int startup)
+static int netlink_neigh_table(struct nlmsghdr *h, ns_id_t ns_id, int startup)
{
int len;
struct ndmsg *ndm;
if (ndm->ndm_family != AF_INET && ndm->ndm_family != AF_INET6)
return 0;
- return netlink_neigh_change(snl, h, len);
+ return netlink_neigh_change(h, len);
}
/* Request for IP neighbor information from the kernel */
return ret;
}
-int netlink_neigh_change(struct sockaddr_nl *snl, struct nlmsghdr *h,
- ns_id_t ns_id)
+int netlink_neigh_change(struct nlmsghdr *h, ns_id_t ns_id)
{
int len;
struct ndmsg *ndm;
/* Is this a notification for the MAC FDB or IP neighbor table? */
ndm = NLMSG_DATA(h);
if (ndm->ndm_family == AF_BRIDGE)
- return netlink_macfdb_change(snl, h, len, ns_id);
+ return netlink_macfdb_change(h, len, ns_id);
if (ndm->ndm_type != RTN_UNICAST)
return 0;
if (ndm->ndm_family == AF_INET || ndm->ndm_family == AF_INET6)
- return netlink_ipneigh_change(snl, h, len, ns_id);
+ return netlink_ipneigh_change(h, len, ns_id);
return 0;
}
#define RTPROT_LDP 193
#define RTPROT_SHARP 194
#define RTPROT_PBR 195
+#define RTPROT_ZSTATIC 196
void rt_netlink_init(void);
extern int netlink_mpls_multipath(int cmd, zebra_lsp_t *lsp);
-extern int netlink_route_change(struct sockaddr_nl *snl, struct nlmsghdr *h,
- ns_id_t ns_id, int startup);
+extern int netlink_route_change(struct nlmsghdr *h, ns_id_t ns_id, int startup);
extern int netlink_route_read(struct zebra_ns *zns);
-extern int netlink_neigh_change(struct sockaddr_nl *snl, struct nlmsghdr *h,
- ns_id_t ns_id);
+extern int netlink_neigh_change(struct nlmsghdr *h, ns_id_t ns_id);
extern int netlink_macfdb_read(struct zebra_ns *zns);
extern int netlink_macfdb_read_for_bridge(struct zebra_ns *zns,
struct interface *ifp,
&rule->rule.filter.dst_ip.u.prefix, bytelen);
}
+ /* fwmark, if specified */
+ if (IS_RULE_FILTERING_ON_FWMARK(rule)) {
+ addattr32(&req.n, sizeof(req), FRA_FWMARK,
+ rule->rule.filter.fwmark);
+ }
+
/* Route table to use to forward, if filter criteria matches. */
if (rule->rule.action.table < 256)
req.frh.table = rule->rule.action.table;
* notification of interest. The expectation is that if this corresponds
* to a PBR rule added by FRR, it will be readded.
*/
-int netlink_rule_change(struct sockaddr_nl *snl, struct nlmsghdr *h,
- ns_id_t ns_id, int startup)
+int netlink_rule_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
{
struct zebra_ns *zns;
struct fib_rule_hdr *frh;
/*
* Handle netlink notification informing a rule add or delete.
*/
-extern int netlink_rule_change(struct sockaddr_nl *snl, struct nlmsghdr *h,
- ns_id_t ns_id, int startup);
+extern int netlink_rule_change(struct nlmsghdr *h, ns_id_t ns_id, int startup);
/*
* Get to know existing PBR rules in the kernel - typically called at startup.
zebra_zebra_irdp_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
zebra_zebra_snmp_la_SOURCES = zebra/zebra_snmp.c
-zebra_zebra_snmp_la_CFLAGS = $(WERROR) $(SNMP_CFLAGS)
+zebra_zebra_snmp_la_CFLAGS = $(WERROR) $(SNMP_CFLAGS) -std=gnu99
zebra_zebra_snmp_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
zebra_zebra_snmp_la_LIBADD = lib/libfrrsnmp.la
memset(&zpi, 0, sizeof(zpi));
zpi.sock = client->sock;
+ zpi.vrf_id = zvrf->vrf->vrf_id;
STREAM_GETL(s, zpi.unique);
STREAM_GETL(s, zpi.type);
STREAM_GET(&zpi.ipset_name, s, ZEBRA_IPSET_NAME_SIZE);
STREAM_GETC(s, zpi.dst.prefixlen);
STREAM_GET(&zpi.dst.u.prefix, s, prefix_blen(&zpi.dst));
+ STREAM_GETW(s, zpi.src_port_min);
+ STREAM_GETW(s, zpi.src_port_max);
+ STREAM_GETW(s, zpi.dst_port_min);
+ STREAM_GETW(s, zpi.dst_port_max);
+ STREAM_GETC(s, zpi.proto);
if (!is_default_prefix(&zpi.src))
zpi.filter_bm |= PBR_FILTER_SRC_IP;
if (!is_default_prefix(&zpi.dst))
zpi.filter_bm |= PBR_FILTER_DST_IP;
+ if (zpi.dst_port_min != 0)
+ zpi.filter_bm |= PBR_FILTER_DST_PORT;
+ if (zpi.src_port_min != 0)
+ zpi.filter_bm |= PBR_FILTER_SRC_PORT;
+ if (zpi.dst_port_max != 0)
+ zpi.filter_bm |= PBR_FILTER_DST_PORT_RANGE;
+ if (zpi.src_port_max != 0)
+ zpi.filter_bm |= PBR_FILTER_SRC_PORT_RANGE;
+ if (zpi.proto != 0)
+ zpi.filter_bm |= PBR_FILTER_PROTO;
/* calculate backpointer */
zpi.backpointer = zebra_pbr_lookup_ipset_pername(
memset(&zpi, 0, sizeof(zpi));
+ zpi.interface_name_list = list_new();
zpi.sock = client->sock;
+ zpi.vrf_id = zvrf->vrf->vrf_id;
STREAM_GETL(s, zpi.unique);
STREAM_GETL(s, zpi.type);
STREAM_GETL(s, zpi.filter_bm);
STREAM_GETL(s, zpi.action);
STREAM_GETL(s, zpi.fwmark);
STREAM_GET(&zpi.ipset_name, s, ZEBRA_IPSET_NAME_SIZE);
+ STREAM_GETL(s, zpi.nb_interface);
+ zebra_pbr_iptable_update_interfacelist(s, &zpi);
if (hdr->command == ZEBRA_IPTABLE_ADD)
zebra_pbr_add_iptable(zvrf->zns, &zpi);
static inline int zebra_ns_table_entry_compare(const struct zebra_ns_table *e1,
const struct zebra_ns_table *e2)
{
- if (e1->tableid == e2->tableid)
- return (e1->afi - e2->afi);
-
- return e1->tableid - e2->tableid;
+ if (e1->tableid < e2->tableid)
+ return -1;
+ if (e1->tableid > e2->tableid)
+ return 1;
+ if (e1->ns_id < e2->ns_id)
+ return -1;
+ if (e1->ns_id > e2->ns_id)
+ return 1;
+ return (e1->afi - e2->afi);
}
static int logicalrouter_config_write(struct vty *vty);
memset(&finder, 0, sizeof(finder));
finder.afi = afi;
finder.tableid = tableid;
+ finder.ns_id = zns->ns_id;
znst = RB_FIND(zebra_ns_table_head, &zns->ns_tables, &finder);
if (znst)
zns = zebra_ns_lookup(NS_DEFAULT);
- RB_FOREACH (znst, zebra_ns_table_head, &zns->ns_tables)
+ RB_FOREACH (znst, zebra_ns_table_head, &zns->ns_tables) {
+ if (znst->ns_id != NS_DEFAULT)
+ continue;
cnt += rib_score_proto_table(proto, instance, znst->table);
-
+ }
return cnt;
}
zns = zebra_ns_lookup(NS_DEFAULT);
- RB_FOREACH (znst, zebra_ns_table_head, &zns->ns_tables)
+ RB_FOREACH (znst, zebra_ns_table_head, &zns->ns_tables) {
+ if (znst->ns_id != NS_DEFAULT)
+ continue;
rib_sweep_table(znst->table);
+ }
}
struct route_table *zebra_ns_get_table(struct zebra_ns *zns,
memset(&finder, 0, sizeof(finder));
finder.afi = afi;
finder.tableid = tableid;
+ finder.ns_id = zns->ns_id;
znst = RB_FIND(zebra_ns_table_head, &zns->ns_tables, &finder);
if (znst)
znst = XCALLOC(MTYPE_ZEBRA_NS, sizeof(*znst));
znst->tableid = tableid;
znst->afi = afi;
+ znst->ns_id = zns->ns_id;
znst->table =
(afi == AFI_IP6) ? srcdest_table_init() : route_table_init();
int zebra_ns_disable(ns_id_t ns_id, void **info)
{
- struct zebra_ns_table *znst;
+ struct zebra_ns_table *znst, *tmp;
struct zebra_ns *zns = (struct zebra_ns *)(*info);
hash_clean(zns->rules_hash, zebra_pbr_rules_free);
hash_free(zns->rules_hash);
- hash_clean(zns->ipset_hash, zebra_pbr_ipset_free);
- hash_free(zns->ipset_hash);
hash_clean(zns->ipset_entry_hash,
zebra_pbr_ipset_entry_free),
+ hash_clean(zns->ipset_hash, zebra_pbr_ipset_free);
+ hash_free(zns->ipset_hash);
hash_free(zns->ipset_entry_hash);
hash_clean(zns->iptable_hash,
zebra_pbr_iptable_free);
hash_free(zns->iptable_hash);
- while (!RB_EMPTY(zebra_ns_table_head, &zns->ns_tables)) {
- znst = RB_ROOT(zebra_ns_table_head, &zns->ns_tables);
-
+ RB_FOREACH_SAFE (znst, zebra_ns_table_head, &zns->ns_tables, tmp) {
+ if (znst->ns_id != ns_id)
+ continue;
RB_REMOVE(zebra_ns_table_head, &zns->ns_tables, znst);
zebra_ns_free_table(znst);
}
uint32_t tableid;
afi_t afi;
+ ns_id_t ns_id;
struct route_table *table;
};
#include <jhash.h>
#include <hash.h>
+#include <memory.h>
+#include <hook.h>
#include "zebra/zebra_pbr.h"
#include "zebra/rt.h"
#include "zebra/zapi_msg.h"
+#include "zebra/zebra_memory.h"
/* definitions */
+DEFINE_MTYPE_STATIC(ZEBRA, PBR_IPTABLE_IFNAME, "PBR interface list")
+
+/* definitions */
+static const struct message ipset_type_msg[] = {
+ {IPSET_NET_PORT_NET, "net,port,net"},
+ {IPSET_NET_PORT, "net,port"},
+ {IPSET_NET_NET, "net,net"},
+ {IPSET_NET, "net"},
+ {0}
+};
/* static function declarations */
+DEFINE_HOOK(zebra_pbr_ipset_entry_wrap_script_get_stat, (struct zebra_ns *zns,
+ struct zebra_pbr_ipset_entry *ipset,
+ uint64_t *pkts, uint64_t *bytes),
+ (zns, ipset, pkts, bytes))
+
+DEFINE_HOOK(zebra_pbr_iptable_wrap_script_get_stat, (struct zebra_ns *zns,
+ struct zebra_pbr_iptable *iptable,
+ uint64_t *pkts, uint64_t *bytes),
+ (zns, iptable, pkts, bytes))
+
+DEFINE_HOOK(zebra_pbr_iptable_wrap_script_update, (struct zebra_ns *zns,
+ int cmd,
+ struct zebra_pbr_iptable *iptable),
+ (zns, cmd, iptable));
+
+DEFINE_HOOK(zebra_pbr_ipset_entry_wrap_script_update, (struct zebra_ns *zns,
+ int cmd,
+ struct zebra_pbr_ipset_entry *ipset),
+ (zns, cmd, ipset));
+
+DEFINE_HOOK(zebra_pbr_ipset_wrap_script_update, (struct zebra_ns *zns,
+ int cmd,
+ struct zebra_pbr_ipset *ipset),
+ (zns, cmd, ipset));
/* Private functions */
void zebra_pbr_ipset_free(void *arg)
{
struct zebra_pbr_ipset *ipset;
+ struct zebra_ns *zns;
ipset = (struct zebra_pbr_ipset *)arg;
-
+ if (vrf_is_backend_netns())
+ zns = zebra_ns_lookup(ipset->vrf_id);
+ else
+ zns = zebra_ns_lookup(NS_DEFAULT);
+ hook_call(zebra_pbr_ipset_wrap_script_update,
+ zns, 0, ipset);
XFREE(MTYPE_TMP, ipset);
}
void zebra_pbr_ipset_entry_free(void *arg)
{
struct zebra_pbr_ipset_entry *ipset;
+ struct zebra_ns *zns;
ipset = (struct zebra_pbr_ipset_entry *)arg;
+ if (ipset->backpointer && vrf_is_backend_netns()) {
+ struct zebra_pbr_ipset *ips = ipset->backpointer;
+
+ zns = zebra_ns_lookup((ns_id_t)ips->vrf_id);
+ } else
+ zns = zebra_ns_lookup(NS_DEFAULT);
+ hook_call(zebra_pbr_ipset_entry_wrap_script_update,
+ zns, 0, ipset);
XFREE(MTYPE_TMP, ipset);
}
key = prefix_hash_key(&ipset->src);
key = jhash_1word(ipset->unique, key);
key = jhash_1word(prefix_hash_key(&ipset->dst), key);
+ key = jhash(&ipset->dst_port_min, 2, key);
+ key = jhash(&ipset->dst_port_max, 2, key);
+ key = jhash(&ipset->src_port_min, 2, key);
+ key = jhash(&ipset->src_port_max, 2, key);
+ key = jhash(&ipset->proto, 1, key);
return key;
}
if (!prefix_same(&r1->dst, &r2->dst))
return 0;
+ if (r1->src_port_min != r2->src_port_min)
+ return 0;
+
+ if (r1->src_port_max != r2->src_port_max)
+ return 0;
+
+ if (r1->dst_port_min != r2->dst_port_min)
+ return 0;
+
+ if (r1->dst_port_max != r2->dst_port_max)
+ return 0;
+
+ if (r1->proto != r2->proto)
+ return 0;
return 1;
}
void zebra_pbr_iptable_free(void *arg)
{
struct zebra_pbr_iptable *iptable;
+ struct listnode *node, *nnode;
+ char *name;
+ struct zebra_ns *zns;
iptable = (struct zebra_pbr_iptable *)arg;
-
+ if (vrf_is_backend_netns())
+ zns = zebra_ns_lookup((ns_id_t)iptable->vrf_id);
+ else
+ zns = zebra_ns_lookup(NS_DEFAULT);
+ hook_call(zebra_pbr_iptable_wrap_script_update,
+ zns, 0, iptable);
+
+ for (ALL_LIST_ELEMENTS(iptable->interface_name_list,
+ node, nnode, name)) {
+ XFREE(MTYPE_PBR_IPTABLE_IFNAME, name);
+ list_delete_node(iptable->interface_name_list,
+ node);
+ }
XFREE(MTYPE_TMP, iptable);
}
(void)hash_get(zns->rules_hash, rule, pbr_rule_alloc_intern);
kernel_add_pbr_rule(rule);
-
/*
* Rule Replace semantics, if we have an old, install the
* new rule, look above, and then delete the old
}
}
-void zebra_pbr_client_close_cleanup(int sock)
+static void zebra_pbr_cleanup_ipset(struct hash_backet *b, void *data)
+{
+ struct zebra_ns *zns = zebra_ns_lookup(NS_DEFAULT);
+ struct zebra_pbr_ipset *ipset = b->data;
+ int *sock = data;
+
+ if (ipset->sock == *sock) {
+ hook_call(zebra_pbr_ipset_wrap_script_update,
+ zns, 0, ipset);
+ hash_release(zns->ipset_hash, ipset);
+ }
+}
+
+static void zebra_pbr_cleanup_ipset_entry(struct hash_backet *b, void *data)
{
struct zebra_ns *zns = zebra_ns_lookup(NS_DEFAULT);
+ struct zebra_pbr_ipset_entry *ipset = b->data;
+ int *sock = data;
+ if (ipset->sock == *sock) {
+ hook_call(zebra_pbr_ipset_entry_wrap_script_update,
+ zns, 0, ipset);
+ hash_release(zns->ipset_entry_hash, ipset);
+ }
+}
+
+static void zebra_pbr_cleanup_iptable(struct hash_backet *b, void *data)
+{
+ struct zebra_ns *zns = zebra_ns_lookup(NS_DEFAULT);
+ struct zebra_pbr_iptable *iptable = b->data;
+ int *sock = data;
+
+ if (iptable->sock == *sock) {
+ hook_call(zebra_pbr_iptable_wrap_script_update,
+ zns, 0, iptable);
+ hash_release(zns->iptable_hash, iptable);
+ }
+}
+
+static int zebra_pbr_client_close_cleanup(struct zserv *client)
+{
+ int sock = client->sock;
+ struct zebra_ns *zns = zebra_ns_lookup(NS_DEFAULT);
+
+ if (!sock)
+ return 0;
hash_iterate(zns->rules_hash, zebra_pbr_cleanup_rules, &sock);
+ hash_iterate(zns->iptable_hash,
+ zebra_pbr_cleanup_iptable, &sock);
+ hash_iterate(zns->ipset_entry_hash,
+ zebra_pbr_cleanup_ipset_entry, &sock);
+ hash_iterate(zns->ipset_hash,
+ zebra_pbr_cleanup_ipset, &sock);
+ return 1;
+}
+
+void zebra_pbr_init(void)
+{
+ hook_register(zapi_client_close, zebra_pbr_client_close_cleanup);
}
static void *pbr_ipset_alloc_intern(void *arg)
void zebra_pbr_create_ipset(struct zebra_ns *zns,
struct zebra_pbr_ipset *ipset)
{
+ int ret;
+
(void)hash_get(zns->ipset_hash, ipset, pbr_ipset_alloc_intern);
- /* TODO:
- * - Netlink call
- */
+ ret = hook_call(zebra_pbr_ipset_wrap_script_update,
+ zns, 1, ipset);
+ kernel_pbr_ipset_add_del_status(ipset,
+ ret ? SOUTHBOUND_INSTALL_SUCCESS
+ : SOUTHBOUND_INSTALL_FAILURE);
}
void zebra_pbr_destroy_ipset(struct zebra_ns *zns,
struct zebra_pbr_ipset *lookup;
lookup = hash_lookup(zns->ipset_hash, ipset);
- /* TODO:
- * - Netlink destroy from kernel
- * - ?? destroy ipset entries before
- */
- if (lookup)
+ hook_call(zebra_pbr_ipset_wrap_script_update,
+ zns, 0, ipset);
+ if (lookup) {
+ hash_release(zns->ipset_hash, lookup);
XFREE(MTYPE_TMP, lookup);
- else
+ } else
zlog_warn("%s: IPSet Entry being deleted we know nothing about",
__PRETTY_FUNCTION__);
}
char ipset_name[ZEBRA_IPSET_NAME_SIZE];
};
+static const char *zebra_pbr_ipset_type2str(uint32_t type)
+{
+ return lookup_msg(ipset_type_msg, type,
+ "Unrecognized IPset Type");
+}
+
static int zebra_pbr_ipset_pername_walkcb(struct hash_backet *backet, void *arg)
{
struct pbr_ipset_name_lookup *pinl =
void zebra_pbr_add_ipset_entry(struct zebra_ns *zns,
struct zebra_pbr_ipset_entry *ipset)
{
+ int ret;
+
(void)hash_get(zns->ipset_entry_hash, ipset,
pbr_ipset_entry_alloc_intern);
- /* TODO:
- * - attach to ipset list
- * - Netlink add to kernel
- */
+ ret = hook_call(zebra_pbr_ipset_entry_wrap_script_update,
+ zns, 1, ipset);
+ kernel_pbr_ipset_entry_add_del_status(ipset,
+ ret ? SOUTHBOUND_INSTALL_SUCCESS
+ : SOUTHBOUND_INSTALL_FAILURE);
}
void zebra_pbr_del_ipset_entry(struct zebra_ns *zns,
struct zebra_pbr_ipset_entry *lookup;
lookup = hash_lookup(zns->ipset_entry_hash, ipset);
- /* TODO:
- * - Netlink destroy
- * - detach from ipset list
- * - ?? if no more entres, delete ipset
- */
- if (lookup)
+ hook_call(zebra_pbr_ipset_entry_wrap_script_update,
+ zns, 0, ipset);
+ if (lookup) {
+ hash_release(zns->ipset_entry_hash, lookup);
XFREE(MTYPE_TMP, lookup);
- else
+ } else
zlog_warn("%s: IPSet being deleted we know nothing about",
__PRETTY_FUNCTION__);
}
void zebra_pbr_add_iptable(struct zebra_ns *zns,
struct zebra_pbr_iptable *iptable)
{
+ int ret;
+
(void)hash_get(zns->iptable_hash, iptable,
pbr_iptable_alloc_intern);
- /* TODO call netlink layer */
+ ret = hook_call(zebra_pbr_iptable_wrap_script_update, zns, 1, iptable);
+ kernel_pbr_iptable_add_del_status(iptable,
+ ret ? SOUTHBOUND_INSTALL_SUCCESS
+ : SOUTHBOUND_INSTALL_FAILURE);
}
void zebra_pbr_del_iptable(struct zebra_ns *zns,
struct zebra_pbr_iptable *iptable)
{
- struct zebra_pbr_ipset_entry *lookup;
+ struct zebra_pbr_iptable *lookup;
lookup = hash_lookup(zns->iptable_hash, iptable);
- /* TODO:
- * - call netlink layer
- * - detach from iptable list
- */
- if (lookup)
+ hook_call(zebra_pbr_iptable_wrap_script_update, zns, 0, iptable);
+ if (lookup) {
+ struct listnode *node, *nnode;
+ char *name;
+
+ hash_release(zns->iptable_hash, lookup);
+ for (ALL_LIST_ELEMENTS(iptable->interface_name_list,
+ node, nnode, name)) {
+ XFREE(MTYPE_PBR_IPTABLE_IFNAME, name);
+ list_delete_node(iptable->interface_name_list,
+ node);
+ }
XFREE(MTYPE_TMP, lookup);
- else
+ } else
zlog_warn("%s: IPTable being deleted we know nothing about",
__PRETTY_FUNCTION__);
}
zsend_rule_notify_owner(rule, ZAPI_RULE_REMOVED);
break;
case SOUTHBOUND_DELETE_FAILURE:
- zsend_rule_notify_owner(rule, ZAPI_RULE_REMOVED);
+ zsend_rule_notify_owner(rule, ZAPI_RULE_FAIL_REMOVE);
break;
}
}
zsend_ipset_notify_owner(ipset, ZAPI_IPSET_FAIL_INSTALL);
break;
case SOUTHBOUND_DELETE_SUCCESS:
+ zsend_ipset_notify_owner(ipset, ZAPI_IPSET_REMOVED);
+ break;
case SOUTHBOUND_DELETE_FAILURE:
- /* TODO : handling of delete event */
+ zsend_ipset_notify_owner(ipset, ZAPI_IPSET_FAIL_REMOVE);
break;
}
}
ZAPI_IPSET_ENTRY_FAIL_INSTALL);
break;
case SOUTHBOUND_DELETE_SUCCESS:
+ zsend_ipset_entry_notify_owner(ipset,
+ ZAPI_IPSET_ENTRY_REMOVED);
+ break;
case SOUTHBOUND_DELETE_FAILURE:
- /* TODO : handling of delete event */
+ zsend_ipset_entry_notify_owner(ipset,
+ ZAPI_IPSET_ENTRY_FAIL_REMOVE);
break;
}
}
zsend_iptable_notify_owner(iptable, ZAPI_IPTABLE_FAIL_INSTALL);
break;
case SOUTHBOUND_DELETE_SUCCESS:
+ zsend_iptable_notify_owner(iptable,
+ ZAPI_IPTABLE_REMOVED);
+ break;
case SOUTHBOUND_DELETE_FAILURE:
- /* TODO : handling of delete event */
+ zsend_iptable_notify_owner(iptable,
+ ZAPI_IPTABLE_FAIL_REMOVE);
break;
}
}
{
return 0;
}
+
+struct zebra_pbr_ipset_entry_unique_display {
+ struct zebra_pbr_ipset *zpi;
+ struct vty *vty;
+ struct zebra_ns *zns;
+};
+
+struct zebra_pbr_env_display {
+ struct zebra_ns *zns;
+ struct vty *vty;
+};
+
+static const char *zebra_pbr_prefix2str(union prefixconstptr pu,
+ char *str, int size)
+{
+ const struct prefix *p = pu.p;
+ char buf[PREFIX2STR_BUFFER];
+
+ if (p->family == AF_INET && p->prefixlen == IPV4_MAX_PREFIXLEN) {
+ snprintf(str, size, "%s", inet_ntop(p->family, &p->u.prefix,
+ buf, PREFIX2STR_BUFFER));
+ return str;
+ }
+ return prefix2str(pu, str, size);
+}
+
+static void zebra_pbr_display_port(struct vty *vty, uint32_t filter_bm,
+ uint16_t port_min, uint16_t port_max,
+ uint8_t proto)
+{
+ if (!(filter_bm & PBR_FILTER_PROTO)) {
+ if (port_max)
+ vty_out(vty, ":udp/tcp:%d-%d",
+ port_min, port_max);
+ else
+ vty_out(vty, ":udp/tcp:%d",
+ port_min);
+ } else {
+ if (port_max)
+ vty_out(vty, ":proto %d:%d-%d",
+ proto, port_min, port_max);
+ else
+ vty_out(vty, ":proto %d:%d",
+ proto, port_min);
+ }
+}
+
+static int zebra_pbr_show_ipset_entry_walkcb(struct hash_backet *backet,
+ void *arg)
+{
+ struct zebra_pbr_ipset_entry_unique_display *unique =
+ (struct zebra_pbr_ipset_entry_unique_display *)arg;
+ struct zebra_pbr_ipset *zpi = unique->zpi;
+ struct vty *vty = unique->vty;
+ struct zebra_pbr_ipset_entry *zpie =
+ (struct zebra_pbr_ipset_entry *)backet->data;
+ uint64_t pkts = 0, bytes = 0;
+ struct zebra_ns *zns = unique->zns;
+ int ret = 0;
+
+ if (zpie->backpointer != zpi)
+ return HASHWALK_CONTINUE;
+
+ if ((zpi->type == IPSET_NET_NET) ||
+ (zpi->type == IPSET_NET_PORT_NET)) {
+ char buf[PREFIX_STRLEN];
+
+ zebra_pbr_prefix2str(&(zpie->src), buf, sizeof(buf));
+ vty_out(vty, "\tfrom %s", buf);
+ if (zpie->filter_bm & PBR_FILTER_SRC_PORT)
+ zebra_pbr_display_port(vty, zpie->filter_bm,
+ zpie->src_port_min,
+ zpie->src_port_max,
+ zpie->proto);
+ vty_out(vty, " to ");
+ zebra_pbr_prefix2str(&(zpie->dst), buf, sizeof(buf));
+ vty_out(vty, "%s", buf);
+ if (zpie->filter_bm & PBR_FILTER_DST_PORT)
+ zebra_pbr_display_port(vty, zpie->filter_bm,
+ zpie->dst_port_min,
+ zpie->dst_port_max,
+ zpie->proto);
+ } else if ((zpi->type == IPSET_NET) ||
+ (zpi->type == IPSET_NET_PORT)) {
+ char buf[PREFIX_STRLEN];
+
+ if (zpie->filter_bm & PBR_FILTER_SRC_IP) {
+ zebra_pbr_prefix2str(&(zpie->src), buf, sizeof(buf));
+ vty_out(vty, "\tfrom %s", buf);
+ }
+ if (zpie->filter_bm & PBR_FILTER_SRC_PORT)
+ zebra_pbr_display_port(vty, zpie->filter_bm,
+ zpie->src_port_min,
+ zpie->src_port_max,
+ zpie->proto);
+ if (zpie->filter_bm & PBR_FILTER_DST_IP) {
+ zebra_pbr_prefix2str(&(zpie->dst), buf, sizeof(buf));
+ vty_out(vty, "\tto %s", buf);
+ }
+ if (zpie->filter_bm & PBR_FILTER_DST_PORT)
+ zebra_pbr_display_port(vty, zpie->filter_bm,
+ zpie->dst_port_min,
+ zpie->dst_port_max,
+ zpie->proto);
+ }
+ vty_out(vty, " (%u)\n", zpie->unique);
+
+ ret = hook_call(zebra_pbr_ipset_entry_wrap_script_get_stat,
+ zns, zpie, &pkts, &bytes);
+ if (ret && pkts > 0)
+ vty_out(vty, "\t pkts %" PRIu64 ", bytes %" PRIu64"\n",
+ pkts, bytes);
+ return HASHWALK_CONTINUE;
+}
+
+static int zebra_pbr_show_ipset_walkcb(struct hash_backet *backet, void *arg)
+{
+ struct zebra_pbr_env_display *uniqueipset =
+ (struct zebra_pbr_env_display *)arg;
+ struct zebra_pbr_ipset *zpi = (struct zebra_pbr_ipset *)backet->data;
+ struct zebra_pbr_ipset_entry_unique_display unique;
+ struct vty *vty = uniqueipset->vty;
+ struct zebra_ns *zns = uniqueipset->zns;
+
+ vty_out(vty, "IPset %s type %s\n", zpi->ipset_name,
+ zebra_pbr_ipset_type2str(zpi->type));
+ unique.vty = vty;
+ unique.zpi = zpi;
+ unique.zns = zns;
+ hash_walk(zns->ipset_entry_hash, zebra_pbr_show_ipset_entry_walkcb,
+ &unique);
+ vty_out(vty, "\n");
+ return HASHWALK_CONTINUE;
+}
+
+/*
+ */
+void zebra_pbr_show_ipset_list(struct vty *vty, char *ipsetname)
+{
+ struct zebra_pbr_ipset *zpi;
+ struct zebra_ns *zns = zebra_ns_lookup(NS_DEFAULT);
+ struct zebra_pbr_ipset_entry_unique_display unique;
+ struct zebra_pbr_env_display uniqueipset;
+
+ if (ipsetname) {
+ zpi = zebra_pbr_lookup_ipset_pername(zns, ipsetname);
+ if (!zpi) {
+ vty_out(vty, "No IPset %s found\n", ipsetname);
+ return;
+ }
+ vty_out(vty, "IPset %s type %s\n", ipsetname,
+ zebra_pbr_ipset_type2str(zpi->type));
+
+ unique.vty = vty;
+ unique.zpi = zpi;
+ unique.zns = zns;
+ hash_walk(zns->ipset_entry_hash,
+ zebra_pbr_show_ipset_entry_walkcb,
+ &unique);
+ return;
+ }
+ uniqueipset.zns = zns;
+ uniqueipset.vty = vty;
+ hash_walk(zns->ipset_hash, zebra_pbr_show_ipset_walkcb,
+ &uniqueipset);
+}
+
+struct pbr_rule_fwmark_lookup {
+ struct zebra_pbr_rule *ptr;
+ uint32_t fwmark;
+};
+
+static int zebra_pbr_rule_lookup_fwmark_walkcb(struct hash_backet *backet,
+ void *arg)
+{
+ struct pbr_rule_fwmark_lookup *iprule =
+ (struct pbr_rule_fwmark_lookup *)arg;
+ struct zebra_pbr_rule *zpr = (struct zebra_pbr_rule *)backet->data;
+
+ if (iprule->fwmark == zpr->rule.filter.fwmark) {
+ iprule->ptr = zpr;
+ return HASHWALK_ABORT;
+ }
+ return HASHWALK_CONTINUE;
+}
+
+static int zebra_pbr_show_iptable_walkcb(struct hash_backet *backet, void *arg)
+{
+ struct zebra_pbr_iptable *iptable =
+ (struct zebra_pbr_iptable *)backet->data;
+ struct zebra_pbr_env_display *env = (struct zebra_pbr_env_display *)arg;
+ struct vty *vty = env->vty;
+ struct zebra_ns *zns = env->zns;
+ int ret;
+ uint64_t pkts = 0, bytes = 0;
+
+ vty_out(vty, "IPtable %s action %s (%u)\n", iptable->ipset_name,
+ iptable->action == ZEBRA_IPTABLES_DROP ? "drop" : "redirect",
+ iptable->unique);
+
+ ret = hook_call(zebra_pbr_iptable_wrap_script_get_stat,
+ zns, iptable, &pkts, &bytes);
+ if (ret && pkts > 0)
+ vty_out(vty, "\t pkts %" PRIu64 ", bytes %" PRIu64"\n",
+ pkts, bytes);
+ if (iptable->action != ZEBRA_IPTABLES_DROP) {
+ struct pbr_rule_fwmark_lookup prfl;
+
+ prfl.fwmark = iptable->fwmark;
+ prfl.ptr = NULL;
+ hash_walk(zns->rules_hash,
+ &zebra_pbr_rule_lookup_fwmark_walkcb, &prfl);
+ if (prfl.ptr) {
+ struct zebra_pbr_rule *zpr = prfl.ptr;
+
+ vty_out(vty, "\t table %u, fwmark %u\n",
+ zpr->rule.action.table,
+ prfl.fwmark);
+ }
+ }
+ return HASHWALK_CONTINUE;
+}
+
+void zebra_pbr_show_iptable(struct vty *vty)
+{
+ struct zebra_ns *zns = zebra_ns_lookup(NS_DEFAULT);
+ struct zebra_pbr_env_display env;
+
+ env.vty = vty;
+ env.zns = zns;
+
+ hash_walk(zns->iptable_hash, zebra_pbr_show_iptable_walkcb,
+ &env);
+}
+
+void zebra_pbr_iptable_update_interfacelist(struct stream *s,
+ struct zebra_pbr_iptable *zpi)
+{
+ uint32_t i = 0, index;
+ struct interface *ifp;
+ char *name;
+
+ for (i = 0; i < zpi->nb_interface; i++) {
+ STREAM_GETL(s, index);
+ ifp = if_lookup_by_index(index, zpi->vrf_id);
+ if (!ifp)
+ continue;
+ name = XSTRDUP(MTYPE_PBR_IPTABLE_IFNAME, ifp->name);
+ listnode_add(zpi->interface_name_list, name);
+ }
+stream_failure:
+ return;
+}
(r->rule.filter.filter_bm & PBR_FILTER_SRC_PORT)
#define IS_RULE_FILTERING_ON_DST_PORT(r) \
(r->rule.filter.filter_bm & PBR_FILTER_DST_PORT)
+#define IS_RULE_FILTERING_ON_FWMARK(r) \
+ (r->rule.filter.filter_bm & PBR_FILTER_FWMARK)
/*
* An IPSet Entry Filter
*/
int sock;
+ vrf_id_t vrf_id;
+
uint32_t unique;
/* type is encoded as uint32_t
struct prefix src;
struct prefix dst;
+ uint16_t src_port_min;
+ uint16_t src_port_max;
+ uint16_t dst_port_min;
+ uint16_t dst_port_max;
+
+ uint8_t proto;
+
uint32_t filter_bm;
struct zebra_pbr_ipset *backpointer;
*/
int sock;
+ vrf_id_t vrf_id;
+
uint32_t unique;
/* include ipset type
uint32_t action;
+ uint32_t nb_interface;
+
+ struct list *interface_name_list;
+
char ipset_name[ZEBRA_IPSET_NAME_SIZE];
};
*/
extern int kernel_pbr_rule_del(struct zebra_pbr_rule *rule);
-extern void zebra_pbr_client_close_cleanup(int sock);
-
extern void zebra_pbr_rules_free(void *arg);
extern uint32_t zebra_pbr_rules_hash_key(void *arg);
extern int zebra_pbr_rules_hash_equal(const void *arg1, const void *arg2);
extern uint32_t zebra_pbr_iptable_hash_key(void *arg);
extern int zebra_pbr_iptable_hash_equal(const void *arg1, const void *arg2);
+extern void zebra_pbr_init(void);
+extern void zebra_pbr_show_ipset_list(struct vty *vty, char *ipsetname);
+extern void zebra_pbr_show_iptable(struct vty *vty);
+extern void zebra_pbr_iptable_update_interfacelist(struct stream *s,
+ struct zebra_pbr_iptable *zpi);
+
+DECLARE_HOOK(zebra_pbr_ipset_entry_wrap_script_get_stat, (struct zebra_ns *zns,
+ struct zebra_pbr_ipset_entry *ipset,
+ uint64_t *pkts, uint64_t *bytes),
+ (zns, ipset, pkts, bytes))
+DECLARE_HOOK(zebra_pbr_iptable_wrap_script_get_stat, (struct zebra_ns *zns,
+ struct zebra_pbr_iptable *iptable,
+ uint64_t *pkts, uint64_t *bytes),
+ (zns, iptable, pkts, bytes))
+DECLARE_HOOK(zebra_pbr_iptable_wrap_script_update, (struct zebra_ns *zns,
+ int cmd,
+ struct zebra_pbr_iptable *iptable),
+ (zns, cmd, iptable));
+
+DECLARE_HOOK(zebra_pbr_ipset_entry_wrap_script_update, (struct zebra_ns *zns,
+ int cmd,
+ struct zebra_pbr_ipset_entry *ipset),
+ (zns, cmd, ipset));
+DECLARE_HOOK(zebra_pbr_ipset_wrap_script_update, (struct zebra_ns *zns,
+ int cmd,
+ struct zebra_pbr_ipset *ipset),
+ (zns, cmd, ipset));
+
#endif /* _ZEBRA_PBR_H */
struct vrf *nh_vrf;
/* Lookup table. */
- table = zebra_vrf_table(afi, safi, si->vrf_id);
+ table = zebra_vrf_table_with_table_id(afi, safi,
+ si->vrf_id,
+ si->table_id);
if (!table)
return;
re->metric = 0;
re->mtu = 0;
re->vrf_id = si->vrf_id;
- re->table =
- (si->vrf_id != VRF_DEFAULT)
+ if (!vrf_is_backend_netns()) {
+ re->table =
+ (si->vrf_id != VRF_DEFAULT)
? (zebra_vrf_lookup_by_id(si->vrf_id))->table_id
: zebrad.rtm_table_default;
+ } else {
+ struct zebra_vrf *zvrf = zebra_vrf_lookup_by_id(si->vrf_id);
+
+ if (zvrf->table_id != RT_TABLE_MAIN ||
+ zvrf->table_id != zebrad.rtm_table_default)
+ re->table = zvrf->table_id;
+ else
+ re->table = zebrad.rtm_table_default;
+ }
re->nexthop_num = 0;
re->tag = si->tag;
struct prefix nh_p;
/* Lookup table. */
- table = zebra_vrf_table(afi, safi, si->vrf_id);
+ table = zebra_vrf_table_with_table_id(afi, safi,
+ si->vrf_id,
+ si->table_id);
if (!table)
return;
const char *ifname, enum static_blackhole_type bh_type,
route_tag_t tag, uint8_t distance, struct zebra_vrf *zvrf,
struct zebra_vrf *nh_zvrf,
- struct static_nh_label *snh_label)
+ struct static_nh_label *snh_label,
+ uint32_t table_id)
{
struct route_node *rn;
struct static_route *si;
if (update)
static_delete_route(afi, safi, type, p, src_p, gate, ifname,
update->tag, update->distance, zvrf,
- &update->snh_label);
+ &update->snh_label, table_id);
/* Make new static route structure. */
si = XCALLOC(MTYPE_STATIC_ROUTE, sizeof(struct static_route));
si->vrf_id = zvrf_id(zvrf);
si->nh_vrf_id = zvrf_id(nh_zvrf);
strcpy(si->nh_vrfname, nh_zvrf->vrf->name);
+ si->table_id = table_id;
if (ifname)
strlcpy(si->ifname, ifname, sizeof(si->ifname));
struct prefix_ipv6 *src_p, union g_addr *gate,
const char *ifname, route_tag_t tag, uint8_t distance,
struct zebra_vrf *zvrf,
- struct static_nh_label *snh_label)
+ struct static_nh_label *snh_label,
+ uint32_t table_id)
{
struct route_node *rn;
struct static_route *si;
&& IPV6_ADDR_SAME(gate, &si->addr.ipv6))))
&& (!strcmp(ifname ? ifname : "", si->ifname))
&& (!tag || (tag == si->tag))
+ && (table_id == si->table_id)
&& (!snh_label->num_labels
|| !memcmp(&si->snh_label, snh_label,
sizeof(struct static_nh_label))))
/* Label information */
struct static_nh_label snh_label;
+
+ /* Table Information */
+ uint32_t table_id;
};
extern void static_install_route(afi_t afi, safi_t safi, struct prefix *p,
enum static_blackhole_type bh_type, route_tag_t tag,
uint8_t distance, struct zebra_vrf *zvrf,
struct zebra_vrf *nh_zvrf,
- struct static_nh_label *snh_label);
+ struct static_nh_label *snh_label,
+ uint32_t table_id);
extern int static_delete_route(afi_t, safi_t safi, uint8_t type,
struct prefix *p, struct prefix_ipv6 *src_p,
union g_addr *gate, const char *ifname,
route_tag_t tag, uint8_t distance,
struct zebra_vrf *zvrf,
- struct static_nh_label *snh_label);
+ struct static_nh_label *snh_label,
+ uint32_t table_id);
extern void static_ifindex_update(struct interface *ifp, bool up);
}
/* Lookup the routing table in a VRF based on both VRF-Id and table-id.
- * NOTE: Table-id is relevant only in the Default VRF.
+ * NOTE: Table-id is relevant on two modes:
+ * - case VRF backend is default : on default VRF only
+ * - case VRF backend is netns : on all VRFs
*/
struct route_table *zebra_vrf_table_with_table_id(afi_t afi, safi_t safi,
vrf_id_t vrf_id,
else
table = zebra_vrf_other_route_table(afi, table_id,
vrf_id);
+ } else if (vrf_is_backend_netns()) {
+ if (table_id == RT_TABLE_MAIN
+ || table_id == zebrad.rtm_table_default)
+ table = zebra_vrf_table(afi, safi, vrf_id);
+ else
+ table = zebra_vrf_other_route_table(afi, table_id,
+ vrf_id);
} else
table = zebra_vrf_table(afi, safi, vrf_id);
zebra_vxlan_init_tables(zvrf);
zebra_mpls_init_tables(zvrf);
zebra_pw_init(zvrf);
-
+ zvrf->table_id = RT_TABLE_MAIN;
+ /* by default table ID is default one */
return zvrf;
}
if (afi >= AFI_MAX)
return NULL;
- if ((vrf_id == VRF_DEFAULT) && (table_id != RT_TABLE_MAIN)
+ if ((table_id != RT_TABLE_MAIN)
&& (table_id != zebrad.rtm_table_default)) {
- return zebra_ns_get_table(zns, zvrf, table_id, afi);
+ if (zvrf->table_id == RT_TABLE_MAIN ||
+ zvrf->table_id == zebrad.rtm_table_default) {
+ /* this VRF use default table
+ * so in all cases, it does not use specific table
+ * so it is possible to configure tables in this VRF
+ */
+ return zebra_ns_get_table(zns, zvrf, table_id, afi);
+ }
}
return zvrf->table[afi][SAFI_UNICAST];
#include "zebra/router-id.h"
#include "zebra/ipforward.h"
#include "zebra/zebra_vxlan_private.h"
+#include "zebra/zebra_pbr.h"
extern int allow_delete;
char *tag_str;
char *distance_str;
char *label_str;
+ char *table_str;
/* processed & masked destination, used for config display */
struct prefix dest;
if (ret)
return ret;
+ ret = static_list_compare_helper(shr1->table_str,
+ shr2->table_str);
+ if (ret)
+ return ret;
+
return static_list_compare_helper(shr1->label_str, shr2->label_str);
}
safi_t safi, const char *negate, struct prefix *dest,
const char *dest_str, const char *mask_str, const char *src_str,
const char *gate_str, const char *ifname, const char *flag_str,
- const char *tag_str, const char *distance_str, const char *label_str)
+ const char *tag_str, const char *distance_str, const char *label_str,
+ const char *table_str)
{
struct static_hold_route *shr, *lookup;
struct listnode *node;
shr->distance_str = XSTRDUP(MTYPE_STATIC_ROUTE, distance_str);
if (label_str)
shr->label_str = XSTRDUP(MTYPE_STATIC_ROUTE, label_str);
+ if (table_str)
+ shr->table_str = XSTRDUP(MTYPE_STATIC_ROUTE, table_str);
for (ALL_LIST_ELEMENTS_RO(static_list, node, lookup)) {
if (static_list_compare(shr, lookup) == 0)
afi_t afi, safi_t safi, const char *negate, const char *dest_str,
const char *mask_str, const char *src_str, const char *gate_str,
const char *ifname, const char *flag_str, const char *tag_str,
- const char *distance_str, const char *label_str)
+ const char *distance_str, const char *label_str, const char *table_str)
{
int ret;
uint8_t distance;
route_tag_t tag = 0;
uint8_t type;
struct static_nh_label snh_label;
+ uint32_t table_id = 0;
ret = str2prefix(dest_str, &p);
if (ret <= 0) {
return zebra_static_route_holdem(
zvrf, nh_zvrf, afi, safi, negate, &p, dest_str,
mask_str, src_str, gate_str, ifname, flag_str, tag_str,
- distance_str, label_str);
+ distance_str, label_str, table_str);
+ }
+ if (table_str) {
+ /* table configured. check consistent with vrf config
+ */
+ if (zvrf->table_id != RT_TABLE_MAIN &&
+ zvrf->table_id != zebrad.rtm_table_default) {
+ if (vty)
+ vty_out(vty,
+ "%% Table %s overlaps vrf table %u\n",
+ table_str, zvrf->table_id);
+ else
+ zlog_warn(
+ "%s: Table %s overlaps vrf table %u",
+ __PRETTY_FUNCTION__,
+ table_str, zvrf->table_id);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
}
/* Administrative distance. */
return CMD_WARNING_CONFIG_FAILED;
}
}
+ /* TableID */
+ if (table_str)
+ table_id = atol(table_str);
/* Null0 static route. */
if (ifname != NULL) {
if (!negate) {
static_add_route(afi, safi, type, &p, src_p, gatep, ifname,
bh_type, tag, distance, zvrf, nh_zvrf,
- &snh_label);
+ &snh_label, table_id);
/* Mark as having FRR configuration */
vrf_set_user_cfged(zvrf->vrf);
} else {
static_delete_route(afi, safi, type, &p, src_p, gatep, ifname,
- tag, distance, zvrf, &snh_label);
+ tag, distance, zvrf, &snh_label, table_id);
/* If no other FRR config for this VRF, mark accordingly. */
if (!zebra_vrf_has_config(zvrf))
vrf_reset_user_cfged(zvrf->vrf);
const char *gate_str, const char *ifname,
const char *flag_str, const char *tag_str,
const char *distance_str, const char *vrf_name,
- const char *label_str)
+ const char *label_str, const char *table_str)
{
struct zebra_vrf *zvrf;
}
return zebra_static_route_leak(
vty, zvrf, zvrf, afi, safi, negate, dest_str, mask_str, src_str,
- gate_str, ifname, flag_str, tag_str, distance_str, label_str);
+ gate_str, ifname, flag_str, tag_str, distance_str, label_str,
+ table_str);
}
void static_config_install_delayed_routes(struct zebra_vrf *zvrf)
NULL, ozvrf, nh_zvrf, shr->afi, shr->safi, NULL,
shr->dest_str, shr->mask_str, shr->src_str,
shr->gate_str, shr->ifname, shr->flag_str, shr->tag_str,
- shr->distance_str, shr->label_str);
+ shr->distance_str, shr->label_str, shr->table_str);
if (installed != CMD_SUCCESS)
zlog_debug(
{
return zebra_static_route(vty, AFI_IP, SAFI_MULTICAST, no, prefix_str,
NULL, NULL, gate_str, ifname, NULL, NULL,
- distance_str, NULL, NULL);
+ distance_str, NULL, NULL, NULL);
}
DEFUN (ip_multicast_mode,
|(1-255)$distance \
|vrf NAME \
|label WORD \
+ |table (1-4294967295) \
}]",
NO_STR IP_STR
"Establish static routes\n"
"Tag value\n"
"Distance value for this route\n"
VRF_CMD_HELP_STR
- MPLS_LABEL_HELPSTR)
+ MPLS_LABEL_HELPSTR
+ "Table to configure\n"
+ "The table number to configure\n")
{
+ if (table_str && !vrf_is_backend_netns()) {
+ vty_out(vty,
+ "%% table param only available when running on netns-based vrfs\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
return zebra_static_route(vty, AFI_IP, SAFI_UNICAST, no, prefix,
mask_str, NULL, NULL, NULL, flag, tag_str,
- distance_str, vrf, label);
+ distance_str, vrf, label, table_str);
}
DEFPY(ip_route_blackhole_vrf,
tag (1-4294967295) \
|(1-255)$distance \
|label WORD \
+ |table (1-4294967295) \
}]",
NO_STR IP_STR
"Establish static routes\n"
"Set tag for this route\n"
"Tag value\n"
"Distance value for this route\n"
- MPLS_LABEL_HELPSTR)
+ MPLS_LABEL_HELPSTR
+ "Table to configure\n"
+ "The table number to configure\n")
{
VTY_DECLVAR_CONTEXT(vrf, vrf);
struct zebra_vrf *zvrf = vrf->info;
+ if (table_str && !vrf_is_backend_netns()) {
+ vty_out(vty,
+ "%% table param only available when running on netns-based vrfs\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
/*
* Coverity is complaining that prefix could
* be dereferenced, but we know that prefix will
assert(prefix);
return zebra_static_route_leak(vty, zvrf, zvrf, AFI_IP, SAFI_UNICAST,
no, prefix, mask_str, NULL, NULL, NULL,
- flag, tag_str, distance_str, label);
+ flag, tag_str, distance_str, label, table_str);
}
DEFPY(ip_route_address_interface,
|(1-255)$distance \
|vrf NAME \
|label WORD \
+ |table (1-4294967295) \
|nexthop-vrf NAME \
}]",
NO_STR IP_STR
"Distance value for this route\n"
VRF_CMD_HELP_STR
MPLS_LABEL_HELPSTR
+ "Table to configure\n"
+ "The table number to configure\n"
VRF_CMD_HELP_STR)
{
struct zebra_vrf *zvrf;
return CMD_WARNING_CONFIG_FAILED;
}
+ if (table_str && !vrf_is_backend_netns()) {
+ vty_out(vty,
+ "%% table param only available when running on netns-based vrfs\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
if (nexthop_vrf)
nh_zvrf = zebra_vty_get_unknown_vrf(vty, nexthop_vrf);
else
return zebra_static_route_leak(
vty, zvrf, nh_zvrf, AFI_IP, SAFI_UNICAST, no, prefix, mask_str,
- NULL, gate_str, ifname, flag, tag_str, distance_str, label);
+ NULL, gate_str, ifname, flag, tag_str, distance_str, label,
+ table_str);
}
DEFPY(ip_route_address_interface_vrf,
tag (1-4294967295) \
|(1-255)$distance \
|label WORD \
+ |table (1-4294967295) \
|nexthop-vrf NAME \
}]",
NO_STR IP_STR
"Tag value\n"
"Distance value for this route\n"
MPLS_LABEL_HELPSTR
+ "Table to configure\n"
+ "The table number to configure\n"
VRF_CMD_HELP_STR)
{
VTY_DECLVAR_CONTEXT(vrf, vrf);
struct zebra_vrf *zvrf = vrf->info;
struct zebra_vrf *nh_zvrf;
+ if (table_str && !vrf_is_backend_netns()) {
+ vty_out(vty,
+ "%% table param only available when running on netns-based vrfs\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
if (ifname && !strncasecmp(ifname, "Null0", 5)) {
flag = "Null0";
ifname = NULL;
return zebra_static_route_leak(
vty, zvrf, nh_zvrf, AFI_IP, SAFI_UNICAST, no, prefix, mask_str,
- NULL, gate_str, ifname, flag, tag_str, distance_str, label);
+ NULL, gate_str, ifname, flag, tag_str, distance_str, label,
+ table_str);
}
DEFPY(ip_route,
|(1-255)$distance \
|vrf NAME \
|label WORD \
+ |table (1-4294967295) \
|nexthop-vrf NAME \
}]",
NO_STR IP_STR
"Distance value for this route\n"
VRF_CMD_HELP_STR
MPLS_LABEL_HELPSTR
+ "Table to configure\n"
+ "The table number to configure\n"
VRF_CMD_HELP_STR)
{
struct zebra_vrf *zvrf;
struct zebra_vrf *nh_zvrf;
const char *flag = NULL;
+ if (table_str && !vrf_is_backend_netns()) {
+ vty_out(vty,
+ "%% table param only available when running on netns-based vrfs\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
if (ifname && !strncasecmp(ifname, "Null0", 5)) {
flag = "Null0";
ifname = NULL;
return zebra_static_route_leak(
vty, zvrf, nh_zvrf, AFI_IP, SAFI_UNICAST, no, prefix, mask_str,
- NULL, gate_str, ifname, flag, tag_str, distance_str, label);
+ NULL, gate_str, ifname, flag, tag_str, distance_str, label,
+ table_str);
}
DEFPY(ip_route_vrf,
tag (1-4294967295) \
|(1-255)$distance \
|label WORD \
+ |table (1-4294967295) \
|nexthop-vrf NAME \
}]",
NO_STR IP_STR
"Tag value\n"
"Distance value for this route\n"
MPLS_LABEL_HELPSTR
+ "Table to configure\n"
+ "The table number to configure\n"
VRF_CMD_HELP_STR)
{
VTY_DECLVAR_CONTEXT(vrf, vrf);
struct zebra_vrf *zvrf = vrf->info;
struct zebra_vrf *nh_zvrf;
-
const char *flag = NULL;
+
+ if (table_str && !vrf_is_backend_netns()) {
+ vty_out(vty,
+ "%% table param only available when running on netns-based vrfs\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
if (ifname && !strncasecmp(ifname, "Null0", 5)) {
flag = "Null0";
ifname = NULL;
return zebra_static_route_leak(
vty, zvrf, nh_zvrf, AFI_IP, SAFI_UNICAST, no, prefix, mask_str,
- NULL, gate_str, ifname, flag, tag_str, distance_str, label);
+ NULL, gate_str, ifname, flag, tag_str, distance_str, label,
+ NULL);
}
/* New RIB. Detailed information for IPv4 route. */
return CMD_SUCCESS;
}
+DEFPY (show_route_table_vrf,
+ show_route_table_vrf_cmd,
+ "show <ip$ipv4|ipv6$ipv6> route table (1-4294967295)$table vrf NAME$vrf_name [json$json]",
+ SHOW_STR
+ IP_STR
+ IP6_STR
+ "IP routing table\n"
+ "Table to display\n"
+ "The table number to display, if available\n"
+ VRF_CMD_HELP_STR
+ JSON_STR)
+{
+ afi_t afi = ipv4 ? AFI_IP : AFI_IP6;
+ struct zebra_vrf *zvrf;
+ struct route_table *t;
+ vrf_id_t vrf_id = VRF_DEFAULT;
+
+ if (vrf_name)
+ VRF_GET_ID(vrf_id, vrf_name);
+ zvrf = zebra_vrf_lookup_by_id(vrf_id);
+
+ t = zebra_ns_find_table(zvrf->zns, table, afi);
+ if (t)
+ do_show_route_helper(vty, zvrf, t, afi, false, 0, false, false,
+ 0, 0, !!json);
+
+ return CMD_SUCCESS;
+}
+
DEFUN (show_ip_nht,
show_ip_nht_cmd,
"show ip nht [vrf NAME]",
vty_out(vty, "%s ", shr->distance_str);
if (shr->label_str)
vty_out(vty, "label %s ", shr->label_str);
+ if (shr->table_str)
+ vty_out(vty, "table %s ", shr->table_str);
if (strcmp(shr->vrf_name, shr->nhvrf_name) != 0)
vty_out(vty, "nexthop-vrf %s", shr->nhvrf_name);
vty_out(vty, "\n");
vty_out(vty, " nexthop-vrf %s", si->nh_vrfname);
}
+ /* table ID from VRF overrides configured
+ */
+ if (si->table_id && zvrf->table_id == RT_TABLE_MAIN)
+ vty_out(vty, " table %u", si->table_id);
+
vty_out(vty, "\n");
write = 1;
|(1-255)$distance \
|vrf NAME \
|label WORD \
+ |table (1-4294967295) \
}]",
NO_STR
IPV6_STR
"Tag value\n"
"Distance value for this prefix\n"
VRF_CMD_HELP_STR
- MPLS_LABEL_HELPSTR)
+ MPLS_LABEL_HELPSTR
+ "Table to configure\n"
+ "The table number to configure\n")
{
+ if (table_str && !vrf_is_backend_netns()) {
+ vty_out(vty,
+ "%% table param only available when running on netns-based vrfs\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
return zebra_static_route(vty, AFI_IP6, SAFI_UNICAST, no, prefix_str,
NULL, from_str, NULL, NULL, flag, tag_str,
- distance_str, vrf, label);
+ distance_str, vrf, label, table_str);
}
DEFPY(ipv6_route_blackhole_vrf,
tag (1-4294967295) \
|(1-255)$distance \
|label WORD \
+ |table (1-4294967295) \
}]",
NO_STR
IPV6_STR
"Set tag for this route\n"
"Tag value\n"
"Distance value for this prefix\n"
- MPLS_LABEL_HELPSTR)
+ MPLS_LABEL_HELPSTR
+ "Table to configure\n"
+ "The table number to configure\n")
{
VTY_DECLVAR_CONTEXT(vrf, vrf);
struct zebra_vrf *zvrf = vrf->info;
+ if (table_str && !vrf_is_backend_netns()) {
+ vty_out(vty,
+ "%% table param only available when running on netns-based vrfs\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
/*
* Coverity is complaining that prefix could
* be dereferenced, but we know that prefix will
assert(prefix);
return zebra_static_route_leak(
vty, zvrf, zvrf, AFI_IP6, SAFI_UNICAST, no, prefix_str, NULL,
- from_str, NULL, NULL, flag, tag_str, distance_str, label);
+ from_str, NULL, NULL, flag, tag_str, distance_str, label,
+ table_str);
}
DEFPY(ipv6_route_address_interface,
|(1-255)$distance \
|vrf NAME \
|label WORD \
+ |table (1-4294967295) \
|nexthop-vrf NAME \
}]",
NO_STR
"Distance value for this prefix\n"
VRF_CMD_HELP_STR
MPLS_LABEL_HELPSTR
+ "Table to configure\n"
+ "The table number to configure\n"
VRF_CMD_HELP_STR)
{
struct zebra_vrf *zvrf;
struct zebra_vrf *nh_zvrf;
+ if (table_str && !vrf_is_backend_netns()) {
+ vty_out(vty,
+ "%% table param only available when running on netns-based vrfs\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
zvrf = zebra_vty_get_unknown_vrf(vty, vrf);
if (!zvrf) {
vty_out(vty, "%% vrf %s is not defined\n", vrf);
return zebra_static_route_leak(
vty, zvrf, nh_zvrf, AFI_IP6, SAFI_UNICAST, no, prefix_str, NULL,
- from_str, gate_str, ifname, NULL, tag_str, distance_str, label);
+ from_str, gate_str, ifname, NULL, tag_str, distance_str, label,
+ table_str);
}
DEFPY(ipv6_route_address_interface_vrf,
tag (1-4294967295) \
|(1-255)$distance \
|label WORD \
+ |table (1-4294967295) \
|nexthop-vrf NAME \
}]",
NO_STR
"Tag value\n"
"Distance value for this prefix\n"
MPLS_LABEL_HELPSTR
+ "Table to configure\n"
+ "The table number to configure\n"
VRF_CMD_HELP_STR)
{
VTY_DECLVAR_CONTEXT(vrf, vrf);
struct zebra_vrf *zvrf = vrf->info;
struct zebra_vrf *nh_zvrf;
+ if (table_str && !vrf_is_backend_netns()) {
+ vty_out(vty,
+ "%% table param only available when running on netns-based vrfs\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
if (nexthop_vrf)
nh_zvrf = zebra_vty_get_unknown_vrf(vty, nexthop_vrf);
else
return zebra_static_route_leak(
vty, zvrf, nh_zvrf, AFI_IP6, SAFI_UNICAST, no, prefix_str, NULL,
- from_str, gate_str, ifname, NULL, tag_str, distance_str, label);
+ from_str, gate_str, ifname, NULL, tag_str, distance_str, label,
+ table_str);
}
DEFPY(ipv6_route,
|(1-255)$distance \
|vrf NAME \
|label WORD \
+ |table (1-4294967295) \
|nexthop-vrf NAME \
}]",
NO_STR
"Distance value for this prefix\n"
VRF_CMD_HELP_STR
MPLS_LABEL_HELPSTR
+ "Table to configure\n"
+ "The table number to configure\n"
VRF_CMD_HELP_STR)
{
struct zebra_vrf *zvrf;
struct zebra_vrf *nh_zvrf;
+ if (table_str && !vrf_is_backend_netns()) {
+ vty_out(vty,
+ "%% table param only available when running on netns-based vrfs\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
zvrf = zebra_vty_get_unknown_vrf(vty, vrf);
if (!zvrf) {
vty_out(vty, "%% vrf %s is not defined\n", vrf);
return zebra_static_route_leak(
vty, zvrf, nh_zvrf, AFI_IP6, SAFI_UNICAST, no, prefix_str, NULL,
- from_str, gate_str, ifname, NULL, tag_str, distance_str, label);
+ from_str, gate_str, ifname, NULL, tag_str, distance_str, label,
+ table_str);
}
DEFPY(ipv6_route_vrf,
tag (1-4294967295) \
|(1-255)$distance \
|label WORD \
+ |table (1-4294967295) \
|nexthop-vrf NAME \
}]",
NO_STR
"Tag value\n"
"Distance value for this prefix\n"
MPLS_LABEL_HELPSTR
+ "Table to configure\n"
+ "The table number to configure\n"
VRF_CMD_HELP_STR)
{
VTY_DECLVAR_CONTEXT(vrf, vrf);
struct zebra_vrf *zvrf = vrf->info;
struct zebra_vrf *nh_zvrf;
+ if (table_str && !vrf_is_backend_netns()) {
+ vty_out(vty,
+ "%% table param only available when running on netns-based vrfs\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
if (nexthop_vrf)
nh_zvrf = zebra_vty_get_unknown_vrf(vty, nexthop_vrf);
else
return zebra_static_route_leak(
vty, zvrf, nh_zvrf, AFI_IP6, SAFI_UNICAST, no, prefix_str, NULL,
- from_str, gate_str, ifname, NULL, tag_str, distance_str, label);
+ from_str, gate_str, ifname, NULL, tag_str, distance_str, label,
+ table_str);
}
/*
struct vrf *vrf;
struct zebra_vrf *zvrf;
+ if (vrf_is_backend_netns())
+ vty_out(vty, "netns-based vrfs\n");
+
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
if (!(zvrf = vrf->info))
continue;
return CMD_SUCCESS;
}
+/* policy routing contexts */
+DEFUN (show_pbr_ipset,
+ show_pbr_ipset_cmd,
+ "show pbr ipset [WORD]",
+ SHOW_STR
+ "Policy-Based Routing\n"
+ "IPset Context information\n"
+ "IPset Name information\n")
+{
+ int idx = 0;
+ int found = 0;
+ found = argv_find(argv, argc, "WORD", &idx);
+ if (!found)
+ zebra_pbr_show_ipset_list(vty, NULL);
+ else
+ zebra_pbr_show_ipset_list(vty, argv[idx]->arg);
+ return CMD_SUCCESS;
+}
+
+/* policy routing contexts */
+DEFUN (show_pbr_iptable,
+ show_pbr_iptable_cmd,
+ "show pbr iptable",
+ SHOW_STR
+ "Policy-Based Routing\n"
+ "IPtable Context information\n")
+{
+ zebra_pbr_show_iptable(vty);
+ return CMD_SUCCESS;
+}
+
/* Static ip route configuration write function. */
static int zebra_ip_config(struct vty *vty)
{
install_element(CONFIG_NODE, &ip_multicast_mode_cmd);
install_element(CONFIG_NODE, &no_ip_multicast_mode_cmd);
install_element(CONFIG_NODE, &ip_route_blackhole_cmd);
+ install_element(CONFIG_NODE,
+ &ip_route_address_interface_cmd);
+ install_element(CONFIG_NODE, &ip_route_cmd);
install_element(VRF_NODE, &ip_route_blackhole_vrf_cmd);
- install_element(CONFIG_NODE, &ip_route_address_interface_cmd);
install_element(VRF_NODE, &ip_route_address_interface_vrf_cmd);
- install_element(CONFIG_NODE, &ip_route_cmd);
install_element(VRF_NODE, &ip_route_vrf_cmd);
+
install_element(CONFIG_NODE, &ip_zebra_import_table_distance_cmd);
install_element(CONFIG_NODE, &no_ip_zebra_import_table_cmd);
install_element(CONFIG_NODE, &zebra_workqueue_timer_cmd);
install_element(VIEW_NODE, &show_vrf_vni_cmd);
install_element(VIEW_NODE, &show_route_cmd);
install_element(VIEW_NODE, &show_route_table_cmd);
+ if (vrf_is_backend_netns())
+ install_element(VIEW_NODE, &show_route_table_vrf_cmd);
install_element(VIEW_NODE, &show_route_detail_cmd);
install_element(VIEW_NODE, &show_route_summary_cmd);
install_element(VIEW_NODE, &show_ip_nht_cmd);
install_element(VIEW_NODE, &show_ip_rpf_cmd);
install_element(VIEW_NODE, &show_ip_rpf_addr_cmd);
- install_element(CONFIG_NODE, &ipv6_route_blackhole_cmd);
- install_element(VRF_NODE, &ipv6_route_blackhole_vrf_cmd);
- install_element(CONFIG_NODE, &ipv6_route_address_interface_cmd);
- install_element(VRF_NODE, &ipv6_route_address_interface_vrf_cmd);
+ install_element(CONFIG_NODE,
+ &ipv6_route_blackhole_cmd);
+ install_element(CONFIG_NODE,
+ &ipv6_route_address_interface_cmd);
install_element(CONFIG_NODE, &ipv6_route_cmd);
+ install_element(VRF_NODE, &ipv6_route_blackhole_vrf_cmd);
+ install_element(VRF_NODE,
+ &ipv6_route_address_interface_vrf_cmd);
install_element(VRF_NODE, &ipv6_route_vrf_cmd);
install_element(CONFIG_NODE, &ip_nht_default_route_cmd);
install_element(CONFIG_NODE, &no_ip_nht_default_route_cmd);
install_element(VIEW_NODE, &show_evpn_neigh_vni_neigh_cmd);
install_element(VIEW_NODE, &show_evpn_neigh_vni_vtep_cmd);
+ install_element(VIEW_NODE, &show_pbr_ipset_cmd);
+ install_element(VIEW_NODE, &show_pbr_iptable_cmd);
+
install_element(CONFIG_NODE, &default_vrf_vni_mapping_cmd);
install_element(CONFIG_NODE, &no_default_vrf_vni_mapping_cmd);
install_element(VRF_NODE, &vrf_vni_mapping_cmd);
memcpy(&lookup.p, host, sizeof(*host));
hle = RB_FIND(host_rb_tree_entry, hrbe, &lookup);
- if (hle)
+ if (hle) {
RB_REMOVE(host_rb_tree_entry, hrbe, hle);
+ XFREE(MTYPE_HOST_PREFIX, hle);
+ }
return;
}