/libtool.orig
/changelog-auto
/test-driver
+/test-suite.log
/Makefile
/Makefile.in
*.pb.cc
*_clippy.c
*.bc
+*.ll
*.cg.json
*.cg.dot
*.cg.svg
BFD_SESS_FLAG_PASSIVE = 1 << 10, /* Passive mode */
};
-/* BFD session hash keys */
+/*
+ * BFD session hash key.
+ *
+ * This structure must not have any padding bytes because their value is
+ * unspecified after the struct assignment. Even when all fields of two keys
+ * are the same, if the padding bytes are different, then the calculated hash
+ * value is different, and the hash lookup will fail.
+ *
+ * Currently, the structure fields are correctly aligned, and the "packed"
+ * attribute is added as a precaution. "family" and "mhop" fields are two-bytes
+ * to eliminate unaligned memory access to "peer" and "local".
+ */
struct bfd_key {
uint16_t family;
- uint8_t mhop;
+ uint16_t mhop;
struct in6_addr peer;
struct in6_addr local;
char ifname[MAXNAMELEN];
char vrfname[MAXNAMELEN];
-};
+} __attribute__((packed));
struct bfd_session_stats {
uint64_t rx_ctrl_pkt;
MIX(transit_hash_key_make(bgp_attr_get_transit(attr)));
if (attr->encap_subtlvs)
MIX(encap_hash_key_make(attr->encap_subtlvs));
+ if (attr->srv6_l3vpn)
+ MIX(srv6_l3vpn_hash_key_make(attr->srv6_l3vpn));
+ if (attr->srv6_vpn)
+ MIX(srv6_vpn_hash_key_make(attr->srv6_vpn));
#ifdef ENABLE_BGP_VNC
struct bgp_attr_encap_subtlv *vnc_subtlvs =
bgp_attr_get_vnc_subtlvs(attr);
if (new->lcommunity != old->lcommunity)
lcommunity_free(&new->lcommunity);
+
+ if (new->srv6_l3vpn != old->srv6_l3vpn) {
+ srv6_l3vpn_free(new->srv6_l3vpn);
+ new->srv6_l3vpn = NULL;
+ }
+
+ if (new->srv6_vpn != old->srv6_vpn) {
+ srv6_vpn_free(new->srv6_vpn);
+ new->srv6_vpn = NULL;
+ }
}
/* Free bgp attribute and aspath. */
encap_free(attr->encap_subtlvs);
attr->encap_subtlvs = NULL;
}
+ if (attr->srv6_l3vpn && !attr->srv6_l3vpn->refcnt) {
+ srv6_l3vpn_free(attr->srv6_l3vpn);
+ attr->srv6_l3vpn = NULL;
+ }
+ if (attr->srv6_vpn && !attr->srv6_vpn->refcnt) {
+ srv6_vpn_free(attr->srv6_vpn);
+ attr->srv6_vpn = NULL;
+ }
#ifdef ENABLE_BGP_VNC
struct bgp_attr_encap_subtlv *vnc_subtlvs =
bgp_attr_get_vnc_subtlvs(attr);
sizeof(struct bgp_attr_srv6_vpn));
attr->srv6_vpn->sid_flags = sid_flags;
sid_copy(&attr->srv6_vpn->sid, &ipv6_sid);
+ attr->srv6_vpn = srv6_vpn_intern(attr->srv6_vpn);
}
/* Placeholder code for the SRv6 L3 Service type */
attr->srv6_l3vpn->sid_flags = sid_flags;
attr->srv6_l3vpn->endpoint_behavior = endpoint_behavior;
sid_copy(&attr->srv6_l3vpn->sid, &ipv6_sid);
+ attr->srv6_l3vpn = srv6_l3vpn_intern(attr->srv6_l3vpn);
}
/* Placeholder code for Unsupported TLV */
}
/* SRv6 Service Information Attribute. */
- if (afi == AFI_IP && safi == SAFI_MPLS_VPN) {
+ if ((afi == AFI_IP || afi == AFI_IP6) && safi == SAFI_MPLS_VPN) {
if (attr->srv6_l3vpn) {
stream_putc(s, BGP_ATTR_FLAG_OPTIONAL
| BGP_ATTR_FLAG_TRANS);
bgp_instance_up(bgp);
vpn_leak_zebra_vrf_label_update(bgp, AFI_IP);
vpn_leak_zebra_vrf_label_update(bgp, AFI_IP6);
+ vpn_leak_zebra_vrf_sid_update(bgp, AFI_IP);
+ vpn_leak_zebra_vrf_sid_update(bgp, AFI_IP6);
vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP,
bgp_get_default(), bgp);
vpn_leak_postchange(BGP_VPN_POLICY_DIR_FROMVPN, AFI_IP,
DEFINE_MTYPE(BGPD, BGP_SRV6_L3VPN, "BGP prefix-sid srv6 l3vpn servcie");
DEFINE_MTYPE(BGPD, BGP_SRV6_VPN, "BGP prefix-sid srv6 vpn service");
+DEFINE_MTYPE(BGPD, BGP_SRV6_SID, "BGP srv6 segment-id");
+DEFINE_MTYPE(BGPD, BGP_SRV6_FUNCTION, "BGP srv6 function");
DECLARE_MTYPE(BGP_SRV6_L3VPN);
DECLARE_MTYPE(BGP_SRV6_VPN);
+DECLARE_MTYPE(BGP_SRV6_SID);
+DECLARE_MTYPE(BGP_SRV6_FUNCTION);
#endif /* _QUAGGA_BGP_MEMORY_H */
#include "bgpd/bgp_nexthop.h"
#include "bgpd/bgp_nht.h"
#include "bgpd/bgp_evpn.h"
+#include "bgpd/bgp_memory.h"
#ifdef ENABLE_BGP_VNC
#include "bgpd/rfapi/rfapi_backend.h"
bgp->vpn_policy[afi].tovpn_zebra_vrf_label_last_sent = label;
}
+/*
+ * This function informs zebra of the srv6-function this vrf sets on routes
+ * leaked to VPN. Zebra should install this srv6-function in the kernel with
+ * an action of "End.DT4/6's IP FIB to route the PDU."
+ */
+void vpn_leak_zebra_vrf_sid_update(struct bgp *bgp, afi_t afi)
+{
+ int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL);
+ enum seg6local_action_t act;
+ struct seg6local_context ctx = {};
+ struct in6_addr *tovpn_sid = NULL;
+ struct in6_addr *tovpn_sid_ls = NULL;
+ struct vrf *vrf;
+ char buf[256] = {0};
+
+ if (bgp->vrf_id == VRF_UNKNOWN) {
+ if (debug)
+ zlog_debug("%s: vrf %s: afi %s: vrf_id not set, can't set zebra vrf label",
+ __func__, bgp->name_pretty, afi2str(afi));
+ return;
+ }
+
+ tovpn_sid = bgp->vpn_policy[afi].tovpn_sid;
+ if (!tovpn_sid) {
+ if (debug)
+ zlog_debug("%s: vrf %s: afi %s: sid not set", __func__,
+ bgp->name_pretty, afi2str(afi));
+ return;
+ }
+
+ if (debug) {
+ inet_ntop(AF_INET6, tovpn_sid, buf, sizeof(buf));
+ zlog_debug("%s: vrf %s: afi %s: setting sid %s for vrf id %d",
+ __func__, bgp->name_pretty, afi2str(afi), buf,
+ bgp->vrf_id);
+ }
+
+ vrf = vrf_lookup_by_id(bgp->vrf_id);
+ if (!vrf)
+ return;
+
+ ctx.table = vrf->data.l.table_id;
+ act = afi == AFI_IP ? ZEBRA_SEG6_LOCAL_ACTION_END_DT4
+ : ZEBRA_SEG6_LOCAL_ACTION_END_DT6;
+ zclient_send_localsid(zclient, tovpn_sid, bgp->vrf_id, act, &ctx);
+
+ tovpn_sid_ls = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr));
+ *tovpn_sid_ls = *tovpn_sid;
+ bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent = tovpn_sid_ls;
+}
+
+/*
+ * If zebra tells us vrf has become unconfigured, tell zebra not to
+ * use this srv6-function to forward to the vrf anymore
+ */
+void vpn_leak_zebra_vrf_sid_withdraw(struct bgp *bgp, afi_t afi)
+{
+ int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL);
+
+ if (bgp->vrf_id == VRF_UNKNOWN) {
+ if (debug)
+ zlog_debug("%s: vrf %s: afi %s: vrf_id not set, can't set zebra vrf label",
+ __func__, bgp->name_pretty, afi2str(afi));
+ return;
+ }
+
+ if (debug)
+ zlog_debug("%s: deleting sid for vrf %s afi (id=%d)", __func__,
+ bgp->name_pretty, bgp->vrf_id);
+
+ zclient_send_localsid(zclient,
+ bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent,
+ bgp->vrf_id, ZEBRA_SEG6_LOCAL_ACTION_UNSPEC, NULL);
+ XFREE(MTYPE_BGP_SRV6_SID,
+ bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent);
+}
+
int vpn_leak_label_callback(
mpls_label_t label,
void *labelid,
return 0;
}
+static void sid_register(struct bgp *bgp, const struct in6_addr *sid,
+ const char *locator_name)
+{
+ struct bgp_srv6_function *func;
+ func = XCALLOC(MTYPE_BGP_SRV6_FUNCTION,
+ sizeof(struct bgp_srv6_function));
+ func->sid = *sid;
+ snprintf(func->locator_name, sizeof(func->locator_name),
+ "%s", locator_name);
+ listnode_add(bgp->srv6_functions, func);
+}
+
+static bool sid_exist(struct bgp *bgp, const struct in6_addr *sid)
+{
+ struct listnode *node;
+ struct bgp_srv6_function *func;
+
+ for (ALL_LIST_ELEMENTS_RO(bgp->srv6_functions, node, func))
+ if (sid_same(&func->sid, sid))
+ return true;
+ return false;
+}
+
+/*
+ * if index != 0: try to allocate as index-mode
+ * else: try to allocate as auto-mode
+ */
+static bool alloc_new_sid(struct bgp *bgp, uint32_t index,
+ struct in6_addr *sid)
+{
+ struct listnode *node;
+ struct prefix_ipv6 *chunk;
+ struct in6_addr sid_buf;
+ bool alloced = false;
+
+ if (!bgp || !sid)
+ return false;
+
+ for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, chunk)) {
+ sid_buf = chunk->prefix;
+ if (index != 0) {
+ sid_buf.s6_addr[15] = index;
+ if (sid_exist(bgp, &sid_buf))
+ return false;
+ alloced = true;
+ break;
+ }
+
+ for (size_t i = 1; i < 255; i++) {
+ sid_buf.s6_addr[15] = (i & 0xff00) >> 8;
+ sid_buf.s6_addr[14] = (i & 0x00ff);
+
+ if (sid_exist(bgp, &sid_buf))
+ continue;
+ alloced = true;
+ break;
+ }
+ }
+
+ if (!alloced)
+ return false;
+
+ sid_register(bgp, &sid_buf, bgp->srv6_locator_name);
+ *sid = sid_buf;
+ return true;
+}
+
+void ensure_vrf_tovpn_sid(struct bgp *bgp_vpn, struct bgp *bgp_vrf, afi_t afi)
+{
+ int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF);
+ bool alloced = false;
+ char buf[256];
+ struct in6_addr *sid;
+ uint32_t tovpn_sid_index = 0;
+ bool tovpn_sid_auto = false;
+
+ if (debug)
+ zlog_debug("%s: try to allocate new SID for vrf %s: afi %s",
+ __func__, bgp_vrf->name_pretty, afi2str(afi));
+
+ /* skip when tovpn sid is already allocated on vrf instance */
+ if (bgp_vrf->vpn_policy[afi].tovpn_sid)
+ return;
+
+ /*
+ * skip when bgp vpn instance ins't allocated
+ * or srv6 locator chunk isn't allocated
+ */
+ if (!bgp_vpn || !bgp_vpn->srv6_locator_chunks || !bgp_vrf)
+ return;
+
+ tovpn_sid_index = bgp_vrf->vpn_policy[afi].tovpn_sid_index;
+ tovpn_sid_auto = CHECK_FLAG(bgp_vrf->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_SID_AUTO);
+
+ /* skip when VPN isn't configured on vrf-instance */
+ if (tovpn_sid_index == 0 && !tovpn_sid_auto)
+ return;
+
+ /* check invalid case both configured index and auto */
+ if (tovpn_sid_index != 0 && tovpn_sid_index) {
+ zlog_err("%s: index-mode and auto-mode both selected. ignored.",
+ __func__);
+ return;
+ }
+
+ sid = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr));
+ alloced = alloc_new_sid(bgp_vpn, tovpn_sid_index, sid);
+ if (!alloced) {
+ zlog_debug("%s: not allocated new sid for vrf %s: afi %s",
+ __func__, bgp_vrf->name_pretty, afi2str(afi));
+ return;
+ }
+
+ if (debug) {
+ inet_ntop(AF_INET6, sid, buf, sizeof(buf));
+ zlog_debug("%s: new sid %s allocated for vrf %s: afi %s",
+ __func__, buf, bgp_vrf->name_pretty,
+ afi2str(afi));
+ }
+ bgp_vrf->vpn_policy[afi].tovpn_sid = sid;
+}
+
static bool ecom_intersect(struct ecommunity *e1, struct ecommunity *e2)
{
uint32_t i, j;
extra->num_labels = num_labels;
}
+/*
+ * make encoded route SIDs match specified encoded sid set
+ */
+static void setsids(struct bgp_path_info *bpi,
+ struct in6_addr *sid,
+ uint32_t num_sids)
+{
+ uint32_t i;
+ struct bgp_path_info_extra *extra;
+
+ if (num_sids)
+ assert(sid);
+ assert(num_sids <= BGP_MAX_SIDS);
+
+ if (!num_sids) {
+ if (bpi->extra)
+ bpi->extra->num_sids = 0;
+ return;
+ }
+
+ extra = bgp_path_info_extra_get(bpi);
+ for (i = 0; i < num_sids; i++)
+ memcpy(&extra->sid[i], &sid[i], sizeof(struct in6_addr));
+ extra->num_sids = num_sids;
+}
+
/*
* returns pointer to new bgp_path_info upon success
*/
struct bgp_path_info *bpi;
struct bgp_path_info *bpi_ultimate;
struct bgp_path_info *new;
+ uint32_t num_sids = 0;
+
+ if (new_attr->srv6_l3vpn || new_attr->srv6_vpn)
+ num_sids = 1;
if (debug)
zlog_debug(
if (!labelssame)
setlabels(bpi, label, num_labels);
+ /*
+ * rewrite sid
+ */
+ if (num_sids) {
+ if (new_attr->srv6_l3vpn)
+ setsids(bpi, &new_attr->srv6_l3vpn->sid,
+ num_sids);
+ else if (new_attr->srv6_vpn)
+ setsids(bpi, &new_attr->srv6_vpn->sid,
+ num_sids);
+ }
+
if (nexthop_self_flag)
bgp_path_info_set_flag(bn, bpi, BGP_PATH_ANNC_NH_SELF);
bgp_path_info_extra_get(new);
+ /*
+ * rewrite sid
+ */
+ if (num_sids) {
+ if (new_attr->srv6_l3vpn)
+ setsids(new, &new_attr->srv6_l3vpn->sid, num_sids);
+ else if (new_attr->srv6_vpn)
+ setsids(new, &new_attr->srv6_vpn->sid, num_sids);
+ }
+
if (num_labels)
setlabels(new, label, num_labels);
SET_FLAG(static_attr.flag, ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID));
static_attr.originator_id = bgp_vpn->router_id;
+ /* Set SID for SRv6 VPN */
+ if (bgp_vrf->vpn_policy[afi].tovpn_sid) {
+ static_attr.srv6_l3vpn = XCALLOC(MTYPE_BGP_SRV6_L3VPN,
+ sizeof(struct bgp_attr_srv6_l3vpn));
+ static_attr.srv6_l3vpn->sid_flags = 0x00;
+ static_attr.srv6_l3vpn->endpoint_behavior = 0xffff;
+ memcpy(&static_attr.srv6_l3vpn->sid,
+ bgp_vrf->vpn_policy[afi].tovpn_sid,
+ sizeof(static_attr.srv6_l3vpn->sid));
+ }
+
new_attr = bgp_attr_intern(
&static_attr); /* hashed refcounted everything */
extern void vpn_leak_zebra_vrf_label_update(struct bgp *bgp, afi_t afi);
extern void vpn_leak_zebra_vrf_label_withdraw(struct bgp *bgp, afi_t afi);
+extern void vpn_leak_zebra_vrf_sid_update(struct bgp *bgp, afi_t afi);
+extern void vpn_leak_zebra_vrf_sid_withdraw(struct bgp *bgp, afi_t afi);
extern int vpn_leak_label_callback(mpls_label_t label, void *lblid, bool alloc);
+extern void ensure_vrf_tovpn_sid(struct bgp *vpn, struct bgp *vrf, afi_t afi);
extern void vrf_import_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
afi_t afi, safi_t safi);
void vrf_unimport_from_vrf(struct bgp *to_bgp, struct bgp *from_bgp,
vpn_leak_zebra_vrf_label_update(bgp_vrf, afi);
}
+ if (!bgp_vrf->vpn_policy[afi].tovpn_sid)
+ ensure_vrf_tovpn_sid(bgp_vpn, bgp_vrf, afi);
+
+ if (sid_diff(bgp_vrf->vpn_policy[afi].tovpn_sid,
+ bgp_vrf->vpn_policy[afi]
+ .tovpn_zebra_vrf_sid_last_sent)) {
+ vpn_leak_zebra_vrf_sid_update(bgp_vrf, afi);
+ }
+
vpn_leak_from_vrf_update_all(bgp_vpn, bgp_vrf, afi);
}
}
int bgp_global_afi_safis_afi_safi_ipv6_unicast_vpn_config_import_vpn_modify(
struct nb_cb_modify_args *args)
{
+ bool is_enable = false;
+ struct bgp *bgp;
+
switch (args->event) {
case NB_EV_VALIDATE:
+ bgp = nb_running_get_entry(args->dnode, NULL, false);
+ if (!bgp)
+ return NB_OK;
+
+ if (bgp->inst_type != BGP_INSTANCE_TYPE_VRF
+ && bgp->inst_type != BGP_INSTANCE_TYPE_DEFAULT) {
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "import|export vpn valid only for bgp vrf or default instance");
+ return NB_ERR_VALIDATION;
+ }
+
+ break;
case NB_EV_PREPARE:
case NB_EV_ABORT:
+ return NB_OK;
case NB_EV_APPLY:
- /* TODO: implement me. */
+ if (yang_dnode_get_bool(args->dnode, NULL))
+ is_enable = true;
+
+ return bgp_global_afi_safi_ip_unicast_vpn_config_import_export_vpn_modify(
+ args, "import", is_enable);
break;
}
int bgp_global_afi_safis_afi_safi_ipv6_unicast_vpn_config_export_vpn_modify(
struct nb_cb_modify_args *args)
{
+ bool is_enable = false;
+ struct bgp *bgp;
+
switch (args->event) {
case NB_EV_VALIDATE:
+ bgp = nb_running_get_entry(args->dnode, NULL, false);
+ if (!bgp)
+ return NB_OK;
+
+ if (bgp->inst_type != BGP_INSTANCE_TYPE_VRF
+ && bgp->inst_type != BGP_INSTANCE_TYPE_DEFAULT) {
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "import|export vpn valid only for bgp vrf or default instance");
+ return NB_ERR_VALIDATION;
+ }
+ break;
case NB_EV_PREPARE:
case NB_EV_ABORT:
+ return NB_OK;
case NB_EV_APPLY:
- /* TODO: implement me. */
+ if (yang_dnode_get_bool(args->dnode, NULL))
+ is_enable = true;
+
+ return bgp_global_afi_safi_ip_unicast_vpn_config_import_export_vpn_modify(
+ args, "export", is_enable);
break;
}
frr_with_privs(&bgpd_privs) {
sock = vrf_socket(ainfo->ai_family,
ainfo->ai_socktype,
- ainfo->ai_protocol, bgp->vrf_id,
+ ainfo->ai_protocol,
+ (bgp->inst_type
+ != BGP_INSTANCE_TYPE_VIEW
+ ? bgp->vrf_id : VRF_DEFAULT),
(bgp->inst_type
== BGP_INSTANCE_TYPE_VRF
? bgp->name : NULL));
static int bgp_isvalid_labeled_nexthop(struct bgp_nexthop_cache *bnc)
{
+ /*
+ * In the case of MPLS-VPN, the label is learned from LDP or other
+ * protocols, and nexthop tracking is enabled for the label.
+ * The value is recorded as BGP_NEXTHOP_LABELED_VALID.
+ * In the case of SRv6-VPN, we need to track the reachability to the
+ * SID (in other words, IPv6 address). As in MPLS, we need to record
+ * the value as BGP_NEXTHOP_SID_VALID. However, this function is
+ * currently not implemented, and this function assumes that all
+ * Transit routes for SRv6-VPN are valid.
+ */
return (bgp_zebra_num_connects() == 0
- || (bnc && CHECK_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID)
- && bnc->nexthop_num > 0));
+ || (bnc && bnc->nexthop_num > 0
+ && (CHECK_FLAG(bnc->flags, BGP_NEXTHOP_LABELED_VALID)
+ || bnc->bgp->srv6_enabled)));
}
static void bgp_unlink_nexthop_check(struct bgp_nexthop_cache *bnc)
NB_OP_MODIFY, "false");
}
- ret = nb_cli_apply_changes(vty, base_xpath);
+ ret = nb_cli_apply_changes_clear_pending(vty, base_xpath);
if (ret == CMD_SUCCESS) {
VTY_PUSH_XPATH(BGP_NODE, base_xpath);
* For backward compatibility with old commands we still
* need to use the qobj infrastructure.
*/
- nb_cli_pending_commit_check(vty);
bgp = bgp_lookup(as, name);
if (bgp)
VTY_PUSH_CONTEXT(BGP_NODE, bgp);
nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
- return nb_cli_apply_changes(vty, base_xpath);
+ /* We want to finish any classic config after a no router */
+ return nb_cli_apply_changes_clear_pending(vty, base_xpath);
}
void cli_show_router_bgp(struct vty *vty, struct lyd_node *dnode,
nb_cli_enqueue_change(vty, base_xpath, NB_OP_DESTROY, NULL);
- return nb_cli_apply_changes(vty, NULL);
+ /*
+ * Need to commit any pending so this command doesn't merge with a
+ * create into a modify, which BGP can't handle
+ */
+ return nb_cli_apply_changes_clear_pending(vty, NULL);
}
DEFUN_YANG(no_neighbor_interface_config,
nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
- return nb_cli_apply_changes(vty, base_xpath);
+ /*
+ * Need to commit any pending so this command doesn't merge with a
+ * create into a modify, which BGP can't handle
+ */
+ return nb_cli_apply_changes_clear_pending(vty, base_xpath);
}
DEFUN_YANG(no_neighbor_peer_group,
nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
- return nb_cli_apply_changes(vty, base_xpath);
+ /*
+ * Need to commit any pending so this command doesn't merge with a
+ * create into a modify, which BGP can't handle
+ */
+ return nb_cli_apply_changes_clear_pending(vty, base_xpath);
}
DEFUN_YANG(no_neighbor_interface_peer_group_remote_as,
nb_cli_enqueue_change(vty, base_xpath, NB_OP_DESTROY, NULL);
- return nb_cli_apply_changes(vty, NULL);
+ /*
+ * Need to commit any pending so this command doesn't merge with a
+ * create into a modify, which BGP can't handle
+ */
+ return nb_cli_apply_changes_clear_pending(vty, NULL);
}
DEFUN_YANG(neighbor_local_as,
nb_cli_enqueue_change(vty, "./peer-group", NB_OP_DESTROY, NULL);
- return nb_cli_apply_changes(vty, base_xpath);
+ /*
+ * Need to commit any pending so this command doesn't merge with a
+ * create into a modify, which BGP can't handle
+ */
+ return nb_cli_apply_changes_clear_pending(vty, base_xpath);
}
ALIAS_HIDDEN(no_neighbor_set_peer_group, no_neighbor_set_peer_group_hidden_cmd,
return CMD_SUCCESS;
}
+DEFPY (af_sid_vpn_export,
+ af_sid_vpn_export_cmd,
+ "[no] sid vpn export <(1-255)$sid_idx|auto$sid_auto>",
+ NO_STR
+ "sid value for VRF\n"
+ "Between current address-family and vpn\n"
+ "For routes leaked from current address-family to vpn\n"
+ "Sid allocation index\n"
+ "Automatically assign a label\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ afi_t afi;
+ int debug = 0;
+ int idx = 0;
+ bool yes = true;
+
+ if (argv_find(argv, argc, "no", &idx))
+ yes = false;
+ debug = (BGP_DEBUG(vpn, VPN_LEAK_TO_VRF) |
+ BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF));
+
+ afi = vpn_policy_getafi(vty, bgp, false);
+ if (afi == AFI_MAX)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ if (!yes) {
+ /* implement me */
+ vty_out(vty, "It's not implemented");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ /* skip when it's already configured */
+ if ((sid_idx != 0 && bgp->vpn_policy[afi].tovpn_sid_index != 0)
+ || (sid_auto && CHECK_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_SID_AUTO)))
+ return CMD_SUCCESS;
+
+ /*
+ * mode change between sid_idx and sid_auto isn't supported.
+ * user must negate sid vpn export when they want to change the mode
+ */
+ if ((sid_auto && bgp->vpn_policy[afi].tovpn_sid_index != 0)
+ || (sid_idx != 0 && CHECK_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_SID_AUTO))) {
+ vty_out(vty, "it's already configured as %s.\n",
+ sid_auto ? "auto-mode" : "idx-mode");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ /* pre-change */
+ vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, afi,
+ bgp_get_default(), bgp);
+
+ if (sid_auto) {
+ /* SID allocation auto-mode */
+ if (debug)
+ zlog_debug("%s: auto sid alloc.", __func__);
+ SET_FLAG(bgp->vpn_policy[afi].flags,
+ BGP_VPN_POLICY_TOVPN_SID_AUTO);
+ } else {
+ /* SID allocation index-mode */
+ if (debug)
+ zlog_debug("%s: idx %ld sid alloc.", __func__, sid_idx);
+ bgp->vpn_policy[afi].tovpn_sid_index = sid_idx;
+ }
+
+ /* post-change */
+ vpn_leak_postchange(BGP_VPN_POLICY_DIR_TOVPN, afi,
+ bgp_get_default(), bgp);
+ return CMD_SUCCESS;
+}
+
ALIAS (af_label_vpn_export,
af_no_label_vpn_export_cmd,
"no label vpn export",
return CMD_SUCCESS;
}
+DEFUN_NOSH (bgp_segment_routing_srv6,
+ bgp_segment_routing_srv6_cmd,
+ "segment-routing srv6",
+ "Segment-Routing configuration\n"
+ "Segment-Routing SRv6 configuration\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ bgp->srv6_enabled = true;
+ vty->node = BGP_SRV6_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFPY (bgp_srv6_locator,
+ bgp_srv6_locator_cmd,
+ "locator NAME$name",
+ "Specify SRv6 locator\n"
+ "Specify SRv6 locator\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ int ret;
+
+ if (strlen(bgp->srv6_locator_name) > 0
+ && strcmp(name, bgp->srv6_locator_name) != 0) {
+ vty_out(vty, "srv6 locator is already configured\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ snprintf(bgp->srv6_locator_name,
+ sizeof(bgp->srv6_locator_name), "%s", name);
+
+ ret = bgp_zebra_srv6_manager_get_locator_chunk(name);
+ if (ret < 0)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (show_bgp_srv6,
+ show_bgp_srv6_cmd,
+ "show bgp segment-routing srv6",
+ SHOW_STR
+ BGP_STR
+ "BGP Segment Routing\n"
+ "BGP Segment Routing SRv6\n")
+{
+ struct bgp *bgp;
+ struct listnode *node;
+ struct prefix_ipv6 *chunk;
+ struct bgp_srv6_function *func;
+ struct in6_addr *tovpn4_sid;
+ struct in6_addr *tovpn6_sid;
+ char buf[256];
+ char buf_tovpn4_sid[256];
+ char buf_tovpn6_sid[256];
+
+ bgp = bgp_get_default();
+ if (!bgp)
+ return CMD_SUCCESS;
+
+ vty_out(vty, "locator_name: %s\n", bgp->srv6_locator_name);
+ vty_out(vty, "locator_chunks:\n");
+ for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, chunk)) {
+ prefix2str(chunk, buf, sizeof(buf));
+ vty_out(vty, "- %s\n", buf);
+ }
+
+ vty_out(vty, "functions:\n");
+ for (ALL_LIST_ELEMENTS_RO(bgp->srv6_functions, node, func)) {
+ inet_ntop(AF_INET6, &func->sid, buf, sizeof(buf));
+ vty_out(vty, "- sid: %s\n", buf);
+ vty_out(vty, " locator: %s\n", func->locator_name);
+ }
+
+ vty_out(vty, "bgps:\n");
+ for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp)) {
+ vty_out(vty, "- name: %s\n",
+ bgp->name ? bgp->name : "default");
+
+ tovpn4_sid = bgp->vpn_policy[AFI_IP].tovpn_sid;
+ tovpn6_sid = bgp->vpn_policy[AFI_IP6].tovpn_sid;
+ if (tovpn4_sid)
+ inet_ntop(AF_INET6, tovpn4_sid, buf_tovpn4_sid,
+ sizeof(buf_tovpn4_sid));
+ if (tovpn6_sid)
+ inet_ntop(AF_INET6, tovpn6_sid, buf_tovpn6_sid,
+ sizeof(buf_tovpn6_sid));
+
+ vty_out(vty, " vpn_policy[AFI_IP].tovpn_sid: %s\n",
+ tovpn4_sid ? buf_tovpn4_sid : "none");
+ vty_out(vty, " vpn_policy[AFI_IP6].tovpn_sid: %s\n",
+ tovpn6_sid ? buf_tovpn6_sid : "none");
+ }
+
+ return CMD_SUCCESS;
+}
+
DEFUN_NOSH (exit_address_family,
exit_address_family_cmd,
"exit-address-family",
if (CHECK_FLAG(bgp->flags, BGP_FLAG_SHUTDOWN))
vty_out(vty, " bgp shutdown\n");
+ if (bgp->srv6_enabled) {
+ vty_frame(vty, " !\n segment-routing srv6\n");
+ if (strlen(bgp->srv6_locator_name))
+ vty_out(vty, " locator %s\n",
+ bgp->srv6_locator_name);
+ }
+
+
/* IPv4 unicast configuration. */
bgp_config_write_family(vty, bgp, AFI_IP, SAFI_UNICAST);
.prompt = "%s(config-router-af-vpnv6)# ",
};
+static struct cmd_node bgp_srv6_node = {
+ .name = "bgp srv6",
+ .node = BGP_SRV6_NODE,
+ .parent_node = BGP_NODE,
+ .prompt = "%s(config-router-srv6)# ",
+};
+
static void community_list_vty(void);
static void bgp_ac_neighbor(vector comps, struct cmd_token *token)
install_node(&bgp_evpn_vni_node);
install_node(&bgp_flowspecv4_node);
install_node(&bgp_flowspecv6_node);
+ install_node(&bgp_srv6_node);
/* Install default VTY commands to new nodes. */
install_default(BGP_NODE);
install_default(BGP_FLOWSPECV6_NODE);
install_default(BGP_EVPN_NODE);
install_default(BGP_EVPN_VNI_NODE);
+ install_default(BGP_SRV6_NODE);
/* "bgp local-mac" hidden commands. */
install_element(CONFIG_NODE, &bgp_local_mac_cmd);
/* tcp-mss command */
install_element(BGP_NODE, &neighbor_tcp_mss_cmd);
install_element(BGP_NODE, &no_neighbor_tcp_mss_cmd);
+
+ /* srv6 commands */
+ install_element(VIEW_NODE, &show_bgp_srv6_cmd);
+ install_element(BGP_NODE, &bgp_segment_routing_srv6_cmd);
+ install_element(BGP_SRV6_NODE, &bgp_srv6_locator_cmd);
+ install_element(BGP_IPV4_NODE, &af_sid_vpn_export_cmd);
+ install_element(BGP_IPV6_NODE, &af_sid_vpn_export_cmd);
}
#include "memory.h"
unsigned int valid_nh_count = 0;
int has_valid_label = 0;
bool allow_recursion = false;
+ int has_valid_sid = 0;
uint8_t distance;
struct peer *peer;
struct bgp_path_info *mpinfo;
sizeof(struct ethaddr));
api_nh->weight = nh_weight;
+ if (mpinfo->extra
+ && !sid_zero(&mpinfo->extra->sid[0])
+ && !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) {
+ has_valid_sid = 1;
+ memcpy(&api_nh->seg6_segs, &mpinfo->extra->sid[0],
+ sizeof(api_nh->seg6_segs));
+ }
+
valid_nh_count++;
}
+ if (has_valid_sid && !(CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)))
+ SET_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6);
+
is_add = (valid_nh_count || nhg_id) ? true : false;
if (is_add && CHECK_FLAG(bm->flags, BM_FLAG_SEND_EXTRA_DATA_TO_ZEBRA)) {
char eth_buf[ETHER_ADDR_STRLEN + 7] = {'\0'};
char buf1[ETHER_ADDR_STRLEN];
char label_buf[20];
+ char sid_buf[20];
+ char segs_buf[256];
int i;
zlog_debug(
&& !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE))
snprintf(label_buf, sizeof(label_buf),
"label %u", api_nh->labels[0]);
+ if (has_valid_sid
+ && !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)) {
+ inet_ntop(AF_INET6, &api_nh->seg6_segs,
+ sid_buf, sizeof(sid_buf));
+ snprintf(segs_buf, sizeof(segs_buf), "segs %s",
+ sid_buf);
+ }
if (CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE)
&& !is_zero_mac(&api_nh->rmac))
snprintf(eth_buf, sizeof(eth_buf), " RMAC %s",
prefix_mac2str(&api_nh->rmac,
buf1, sizeof(buf1)));
- zlog_debug(" nhop [%d]: %s if %u VRF %u wt %u %s %s",
+ zlog_debug(" nhop [%d]: %s if %u VRF %u wt %u %s %s %s",
i + 1, nh_buf, api_nh->ifindex,
api_nh->vrf_id, api_nh->weight,
- label_buf, eth_buf);
+ label_buf, segs_buf, eth_buf);
}
int recursion_flag = 0;
return 0;
}
+static void bgp_zebra_process_srv6_locator_chunk(ZAPI_CALLBACK_ARGS)
+{
+ struct stream *s = NULL;
+ struct bgp *bgp = bgp_get_default();
+ struct listnode *node;
+ struct prefix_ipv6 *c;
+ struct srv6_locator_chunk s6c = {};
+ struct prefix_ipv6 *chunk = NULL;
+
+ s = zclient->ibuf;
+ zapi_srv6_locator_chunk_decode(s, &s6c);
+
+ if (strcmp(bgp->srv6_locator_name, s6c.locator_name) != 0) {
+ zlog_err("%s: Locator name unmatch %s:%s", __func__,
+ bgp->srv6_locator_name, s6c.locator_name);
+ return;
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, c)) {
+ if (!prefix_cmp(c, &s6c.prefix))
+ return;
+ }
+
+ chunk = prefix_ipv6_new();
+ *chunk = s6c.prefix;
+ listnode_add(bgp->srv6_locator_chunks, chunk);
+ vpn_leak_postchange_all();
+}
+
void bgp_zebra_init(struct thread_master *master, unsigned short instance)
{
zclient_num_connects = 0;
zclient->iptable_notify_owner = iptable_notify_owner;
zclient->route_notify_owner = bgp_zebra_route_notify_owner;
zclient->instance = instance;
+ zclient->process_srv6_locator_chunk =
+ bgp_zebra_process_srv6_locator_chunk;
}
void bgp_zebra_destroy(void)
zlog_debug("send capabilty success");
return BGP_GR_SUCCESS;
}
+
+int bgp_zebra_srv6_manager_get_locator_chunk(const char *name)
+{
+ return srv6_manager_get_locator_chunk(zclient, name);
+}
extern int bgp_zebra_send_capabilities(struct bgp *bgp, bool disable);
extern int bgp_zebra_update(afi_t afi, safi_t safi, vrf_id_t vrf_id, int type);
extern int bgp_zebra_stale_timer_update(struct bgp *bgp);
+extern int bgp_zebra_srv6_manager_get_locator_chunk(const char *name);
#endif /* _QUAGGA_BGP_ZEBRA_H */
return BGP_GR_SUCCESS;
}
+static void bgp_srv6_init(struct bgp *bgp)
+{
+ bgp->srv6_enabled = false;
+ memset(bgp->srv6_locator_name, 0, sizeof(bgp->srv6_locator_name));
+ bgp->srv6_locator_chunks = list_new();
+ bgp->srv6_functions = list_new();
+}
+
+static void bgp_srv6_cleanup(struct bgp *bgp)
+{
+ if (bgp->srv6_locator_chunks)
+ list_delete(&bgp->srv6_locator_chunks);
+ if (bgp->srv6_functions)
+ list_delete(&bgp->srv6_functions);
+}
+
/* Allocate new peer object, implicitely locked. */
struct peer *peer_new(struct bgp *bgp)
{
bgp_evpn_init(bgp);
bgp_evpn_vrf_es_init(bgp);
bgp_pbr_init(bgp);
+ bgp_srv6_init(bgp);
/*initilize global GR FSM */
bgp_global_gr_init(bgp);
bgp_evpn_cleanup(bgp);
bgp_pbr_cleanup(bgp);
+ bgp_srv6_cleanup(bgp);
XFREE(MTYPE_BGP_EVPN_INFO, bgp->evpn_info);
for (afi = AFI_IP; afi < AFI_MAX; afi++) {
#include "lib/json.h"
#include "vrf.h"
#include "vty.h"
+#include "srv6.h"
#include "iana_afi.h"
/* For union sockunion. */
#define BGP_VPN_POLICY_TOVPN_LABEL_AUTO (1 << 0)
#define BGP_VPN_POLICY_TOVPN_RD_SET (1 << 1)
#define BGP_VPN_POLICY_TOVPN_NEXTHOP_SET (1 << 2)
+#define BGP_VPN_POLICY_TOVPN_SID_AUTO (1 << 3)
/*
* If we are importing another vrf into us keep a list of
* vrf names that we are being exported to.
*/
struct list *export_vrf;
+
+ /*
+ * Segment-Routing SRv6 Mode
+ */
+ uint32_t tovpn_sid_index; /* unset => set to 0 */
+ struct in6_addr *tovpn_sid;
+ struct in6_addr *tovpn_zebra_vrf_sid_last_sent;
};
/*
uint32_t routes_deleted;
};
+struct bgp_srv6_function {
+ struct in6_addr sid;
+ char locator_name[SRV6_LOCNAME_SIZE];
+};
+
/* BGP instance structure. */
struct bgp {
/* AS number of this BGP instance. */
/* BGP route flap dampening configuration */
struct bgp_damp_config damp[AFI_MAX][SAFI_MAX];
+ /* BGP VPN SRv6 backend */
+ bool srv6_enabled;
+ char srv6_locator_name[SRV6_LOCNAME_SIZE];
+ struct list *srv6_locator_chunks;
+ struct list *srv6_functions;
+
QOBJ_FIELDS;
};
DECLARE_QOBJ_TYPE(bgp);
Requirements:
- Directory name for a new topotest must not contain hyphen (``-``) characters.
- To separate words, use underscores (``_``). For example, `tests/topotests/bgp_new_example`.
+ To separate words, use underscores (``_``). For example, ``tests/topotests/bgp_new_example``.
- Test code should always be declared inside functions that begin with the
``test_`` prefix. Functions beginning with different prefixes will not be run
by pytest.
# Use neigh information on selection of nexthop for multipath hops
net.ipv4.fib_multipath_use_neigh=1
-
-# Allows Apps to Work with VRF
-net.ipv4.tcp_l3mdev_accept=1
```
Show all configured BFD peers information and current status.
-.. clicmd:: show bfd [vrf NAME$vrf_name] peer <WORD$label|<A.B.C.D|X:X::X:X>$peer [{multihop|local-address <A.B.C.D|X:X::X:X>$local|interface IFNAME$ifname}]> [json]
+.. clicmd:: show bfd [vrf NAME] peer <WORD|<A.B.C.D|X:X::X:X> [{multihop|local-address <A.B.C.D|X:X::X:X>|interface IFNAME}]> [json]
Show status for a specific BFD peer.
The CLI will disallow attempts to configure incompatible leaking
modes.
+.. _bgp-l3vpn-srv6:
+
+L3VPN SRv6
+----------
+
+.. clicmd:: segment-routing srv6
+
+ Use SRv6 backend with BGP L3VPN, and go to its configuration node.
+
+.. clicmd:: locator NAME
+
+ Specify the SRv6 locator to be used for SRv6 L3VPN. The Locator name must
+ be set in zebra, but user can set it in any order.
.. _bgp-evpn:
Display Information about update-group events in FRR.
+Segment-Routing IPv6
+--------------------
+
+.. clicmd:: show bgp segment-routing srv6
+
+ This command displays information about SRv6 L3VPN in bgpd. Specifically,
+ what kind of Locator is being used, and its Locator chunk information.
+ And the SID of the SRv6 Function that is actually managed on bgpd.
+ In the following example, bgpd is using a Locator named loc1, and two SRv6
+ Functions are managed to perform VPNv6 VRF redirect for vrf10 and vrf20.
+
+::
+
+ router# show bgp segment-routing srv6
+ locator_name: loc1
+ locator_chunks:
+ - 2001:db8:1:1::/64
+ functions:
+ - sid: 2001:db8:1:1::100
+ locator: loc1
+ - sid: 2001:db8:1:1::200
+ locator: loc1
+ bgps:
+ - name: default
+ vpn_policy[AFI_IP].tovpn_sid: none
+ vpn_policy[AFI_IP6].tovpn_sid: none
+ - name: vrf10
+ vpn_policy[AFI_IP].tovpn_sid: none
+ vpn_policy[AFI_IP6].tovpn_sid: 2001:db8:1:1::100
+ - name: vrf20
+ vpn_policy[AFI_IP].tovpn_sid: none
+ vpn_policy[AFI_IP6].tovpn_sid: 2001:db8:1:1::200
+
+
.. _bgp-route-reflector:
Route Reflector
:makevar:`VRF forwarding`
General information on Linux VRF support can be found in
- https://www.kernel.org/doc/Documentation/networking/vrf.txt. Kernel
- support for VRFs was introduced in 4.3 and improved upon through
- 4.13, which is the version most used in FRR testing (as of June
- 2018). Additional background on using Linux VRFs and kernel specific
- features can be found in
- http://schd.ws/hosted_files/ossna2017/fe/vrf-tutorial-oss.pdf.
-
- A separate BGP TCP socket is opened per VRF.
-
- **Important note** as of June 2018, Kernel versions 4.14-4.18 have a
- known bug where VRF-specific TCP sockets are not properly handled. When
- running these kernel versions, if unable to establish any VRF BGP
- adjacencies, downgrade to 4.13. The issue was fixed in 4.14.57, 4.17.9
- and more recent kernel versions.
+ https://www.kernel.org/doc/Documentation/networking/vrf.txt.
+
+ Kernel support for VRFs was introduced in 4.3, but there are known issues
+ in versions up to 4.15 (for IPv4) and 5.0 (for IPv6). The FRR CI system
+ doesn't perform VRF tests on older kernel versions, and VRFs may not work
+ on them. If you experience issues with VRF support, you should upgrade your
+ kernel version.
+
+ .. seealso:: :ref:`zebra-vrf`
Building
^^^^^^^^
Send opaque ZAPI messages with subtype ``type``. Sharpd will send
a stream of messages if the count is greater than one.
-.. clicmd:: sharp send opaque unicast type (1-255) $proto_str [{instance (0-1000) | session (1-1000)}] (1-1000)
+.. clicmd:: sharp send opaque unicast type (1-255) PROTOCOL [{instance (0-1000) | session (1-1000)}] (1-1000)
Send unicast opaque ZAPI messages with subtype ``type``. The
protocol, instance, and session_id identify a single target zapi
client. Sharpd will send a stream of messages if the count is
greater than one.
-.. clicmd:: sharp send opaque <reg | unreg> $proto_str [{instance (0-1000) | session (1-1000)}] type (1-1000)
+.. clicmd:: sharp send opaque <reg | unreg> PROTOCOL [{instance (0-1000) | session (1-1000)}] type (1-1000)
Send opaque ZAPI registration and unregistration messages for a
single subtype. The messages must specify a protocol daemon by
Show imported Traffic Engineering Data Base
+.. clicmd:: sharp install seg6-routes [vrf NAME] <A.B.C.D|X:X::X:X> nexthop-seg6 X:X::X:X encap X:X::X:X (1-1000000)
+ This command installs a route for SRv6 Transit behavior (on Linux it is
+ known as seg6 route). The count, destination, vrf, etc. have the same
+ meaning as in the ``sharp install routes`` command. With this command,
+ sharpd will request zebra to configure seg6 route via ZEBRA_ROUTE_ADD
+ ZAPI. As in the following example.
+
+::
+
+ router# sharp install seg6-routes 1::A nexthop-seg6 2001::2 encap A:: 1
+ router# sharp install seg6-routes 1::B nexthop-seg6 2001::2 encap B:: 1
+
+ router# show ipv6 route
+ D>* 1::A/128 [150/0] via 2001::2, dum0, seg6 a::, weight 1, 00:00:01
+ D>* 1::B/128 [150/0] via 2001::2, dum0, seg6 b::, weight 1, 00:00:01
+
+ bash# ip -6 route list
+ 1::A encap seg6 mode encap segs 1 [ a:: ] via 2001::2 dev dum0 proto 194 metric 20 pref medium
+ 1::B encap seg6 mode encap segs 1 [ b:: ] via 2001::2 dev dum0 proto 194 metric 20 pref medium
+
+.. clicmd:: sharp install seg6local-routes [vrf NAME] X:X::X:X nexthop-seg6local NAME ACTION ARGS.. (1-1000000)
+
+ This command installs a route for SRv6 Endpoint behavior (on Linux it is
+ known as seg6local route). The count, destination, vrf, etc. have the same
+ meaning as in the ``sharp install routes`` command. With this command,
+ sharpd will request zebra to configure seg6local route via ZEBRA_ROUTE_ADD
+ ZAPI. As in the following example.
+
+ There are many End Functions defined in SRv6, which have been standardized
+ in RFC 8986. The current implementation supports End, End.X, End.T, End.DX4,
+ and End.DT6, which can be configured as follows.
+
+::
+
+ router# sharp install seg6local-routes 1::1 nexthop-seg6local dum0 End 1
+ router# sharp install seg6local-routes 1::2 nexthop-seg6local dum0 End_X 2001::1 1
+ router# sharp install seg6local-routes 1::3 nexthop-seg6local dum0 End_T 10 1
+ router# sharp install seg6local-routes 1::4 nexthop-seg6local dum0 End_DX4 10.0.0.1 1
+ router# sharp install seg6local-routes 1::5 nexthop-seg6local dum0 End_DT6 10 1
+
+ router# show ipv6 route
+ D>* 1::1/128 [150/0] is directly connected, dum0, seg6local End USP, weight 1, 00:00:05
+ D>* 1::2/128 [150/0] is directly connected, dum0, seg6local End.X nh6 2001::1, weight 1, 00:00:05
+ D>* 1::3/128 [150/0] is directly connected, dum0, seg6local End.T table 10, weight 1, 00:00:05
+ D>* 1::4/128 [150/0] is directly connected, dum0, seg6local End.DX4 nh4 10.0.0.1, weight 1, 00:00:05
+ D>* 1::5/128 [150/0] is directly connected, dum0, seg6local End.DT6 table 10, weight 1, 00:00:05
+
+ bash# ip -6 route
+ 1::1 encap seg6local action End dev dum0 proto 194 metric 20 pref medium
+ 1::2 encap seg6local action End.X nh6 2001::1 dev dum0 proto 194 metric 20 pref medium
+ 1::3 encap seg6local action End.T table 10 dev dum0 proto 194 metric 20 pref medium
+ 1::4 encap seg6local action End.DX4 nh4 10.0.0.1 dev dum0 proto 194 metric 20 pref medium
+ 1::5 encap seg6local action End.DT6 table 10 dev dum0 proto 194 metric 20 pref medium
+
+.. clicmd:: show sharp segment-routing srv6
+
+ This command shows us what SRv6 locator chunk, sharp is holding as zclient.
+ An SRv6 locator is defined for each SRv6 router, and a single locator may
+ be shared by multiple protocols.
+
+ In the FRRouting implementation, the Locator chunk get request is executed
+ by a routing protocol daemon such as sharpd or bgpd, And then Zebra
+ allocates a Locator Chunk, which is a subset of the Locator Prefix, and
+ notifies the requesting protocol daemon of this information.
+
+ This command example shows how the locator chunk of sharpd itself is
+ allocated.
+
+::
+
+ router# show segment-routing srv6 locator
+ Locator:
+ Name ID 2 2001:db8:2:2::/64 Up
+
+ router# show sharp segment-routing srv6
+ Locator loc1 has 1 prefix chunks
+ 2001:db8:1:1::/64
+
+.. clicmd:: sharp srv6-manager get-locator-chunk
+
+ This command requests the SRv6 locator to allocate a locator chunk via ZAPI.
+ This chunk can be owned by the protocol daemon, and the chunk obtained by
+ sharpd will not be used by the SRv6 mechanism of another routing protocol.
+
+ Since this request is made asynchronously, it can be issued before the SRv6
+ locator is configured on the zebra side, and as soon as it is ready on the
+ zebra side, sharpd can check the allocated locator chunk via zapi.
+
+::
+
+ router# show segment-routing srv6 locator loc1 detail
+ Name: loc1
+ Prefix: 2001:db8:1:1::/64
+ Chunks:
+ - prefix: 2001:db8:1:1::/64, owner: system
+
+ router# show sharp segment-routing srv6
+ (nothing)
+
+ router# sharp srv6-manager get-locator-chunk loc1
+
+ router# show segment-routing srv6 locator loc1 detail
+ Name: loc1
+ Prefix: 2001:db8:1:1::/64
+ Chunks:
+ - prefix: 2001:db8:1:1::/64, owner: sharp
+
+ router# show sharp segment-routing srv6
+ Locator loc1 has 1 prefix chunks
+ 2001:db8:1:1::/64
+
+.. clicmd:: sharp srv6-manager release-locator-chunk
+
+ This command releases a locator chunk that has already been allocated by
+ ZAPI. The freed chunk will have its owner returned to the system and will
+ be available to another protocol daemon.
+
+::
+
+ router# show segment-routing srv6 locator loc1 detail
+ Name: loc1
+ Prefix: 2001:db8:1:1::/64
+ Chunks:
+ - prefix: 2001:db8:1:1::/64, owner: sharp
+
+ router# show sharp segment-routing srv6
+ Locator loc1 has 1 prefix chunks
+ 2001:db8:1:1::/64
+
+ router# sharp srv6-manager release-locator-chunk loc1
+
+ router# show segment-routing srv6 locator loc1 detail
+ Name: loc1
+ Prefix: 2001:db8:1:1::/64
+ Chunks:
+ - prefix: 2001:db8:1:1::/64, owner: system
+
+ router# show sharp segment-routing srv6
+ (nothing)
this interface. MPLS-TE must be enable at the OSPF
(:ref:`ospf-traffic-engineering`) or ISIS
(:ref:`isis-traffic-engineering`) router level in complement to
- this. Disable link parameters for this interface.
+ this.
Under link parameter statement, the following commands set the different TE values:
-.. clicmd:: link-params [enable]
+.. clicmd:: enable
Enable link parameters for this interface.
-.. clicmd:: link-params [metric (0-4294967295)]
+.. clicmd:: metric (0-4294967295)
-.. clicmd:: link-params max-bw BANDWIDTH
+.. clicmd:: max-bw BANDWIDTH
-.. clicmd:: link-params max-rsv-bw BANDWIDTH
+.. clicmd:: max-rsv-bw BANDWIDTH
-.. clicmd:: link-params unrsv-bw (0-7) BANDWIDTH
+.. clicmd:: unrsv-bw (0-7) BANDWIDTH
-.. clicmd:: link-params admin-grp BANDWIDTH
+.. clicmd:: admin-grp BANDWIDTH
These commands specifies the Traffic Engineering parameters of the interface
in conformity to RFC3630 (OSPF) or RFC5305 (ISIS). There are respectively
Note that BANDIWDTH is specified in IEEE floating point format and express
in Bytes/second.
-.. clicmd:: link-param delay (0-16777215) [min (0-16777215) | max (0-16777215)]
+.. clicmd:: delay (0-16777215) [min (0-16777215) | max (0-16777215)]
-.. clicmd:: link-param delay-variation (0-16777215)
+.. clicmd:: delay-variation (0-16777215)
-.. clicmd:: link-param packet-loss PERCENTAGE
+.. clicmd:: packet-loss PERCENTAGE
-.. clicmd:: link-param res-bw BANDWIDTH
+.. clicmd:: res-bw BANDWIDTH
-.. clicmd:: link-param ava-bw BANDWIDTH
+.. clicmd:: ava-bw BANDWIDTH
-.. clicmd:: link-param use-bw BANDWIDTH
+.. clicmd:: use-bw BANDWIDTH
These command specifies additional Traffic Engineering parameters of the
interface in conformity to draft-ietf-ospf-te-metrics-extension-05.txt and
(µs). Loss is specified in PERCENTAGE ranging from 0 to 50.331642% by step
of 0.000003.
-.. clicmd:: link-param neighbor <A.B.C.D> as (0-65535)
-
-.. clicmd:: link-param no neighbor
+.. clicmd:: neighbor <A.B.C.D> as (0-65535)
Specifies the remote ASBR IP address and Autonomous System (AS) number
for InterASv2 link in OSPF (RFC5392). Note that this option is not yet
supported for ISIS (RFC5316).
+Nexthop Tracking
+================
+
+Nexthop tracking doesn't resolve nexthops via the default route by default.
+Allowing this might be useful when e.g. you want to allow BGP to peer across
+the default route.
+
.. clicmd:: ip nht resolve-via-default
- Allows nexthop tracking to resolve via the default route. This is useful
- when e.g. you want to allow BGP to peer across the default route.
+ Allow IPv4 nexthop tracking to resolve via the default route. This parameter
+ is configured per-VRF, so the command is also available in the VRF subnode.
-.. _zebra-vrf:
+.. clicmd:: ipv6 nht resolve-via-default
+
+ Allow IPv6 nexthop tracking to resolve via the default route. This parameter
+ is configured per-VRF, so the command is also available in the VRF subnode.
Administrative Distance
=======================
Currently FRR only supports Route Replace semantics using the Linux
Kernel.
+.. _zebra-vrf:
+
Virtual Routing and Forwarding
==============================
21 Static 10.125.0.2 IPv4 Explicit Null
+.. _zebra-srv6:
+
+Segment-Routing IPv6
+====================
+
+Segment-Routing is source routing paradigm that allows
+network operator to encode network intent into the packets.
+SRv6 is an implementation of Segment-Routing
+with application of IPv6 and segment-routing-header.
+
+All routing daemon can use the Segment-Routing base
+framework implemented on zebra to use SRv6 routing mechanism.
+In that case, user must configure initial srv6 setting on
+FRR's cli or frr.conf or zebra.conf. This section shows how
+to configure SRv6 on FRR. Of course SRv6 can be used as standalone,
+and this section also helps that case.
+
+.. index:: show segment-routing srv6 locator [json]
+.. clicmd:: show segment-routing srv6 locator [json]
+
+ This command dump SRv6-locator configured on zebra. SRv6-locator is used
+ to route to the node before performing the SRv6-function. and that works as
+ aggregation of SRv6-function's IDs. Following console log shows two
+ SRv6-locators loc1 and loc2. All locators are identified by unique IPv6
+ prefix. User can get that information as JSON string when ``json`` key word
+ at the end of cli is presented.
+
+::
+
+ router# sh segment-routing srv6 locator
+ Locator:
+ Name ID Prefix Status
+ -------------------- ------- ------------------------ -------
+ loc1 1 2001:db8:1:1::/64 Up
+ loc2 2 2001:db8:2:2::/64 Up
+
+.. index:: show segment-routing srv6 locator NAME detail [json]
+.. clicmd:: show segment-routing srv6 locator NAME detail [json]
+
+ As shown in the example, by specifying the name of the locator, you
+ can see the detailed information for each locator. Locator can be
+ represented by a single IPv6 prefix, but SRv6 is designed to share this
+ Locator among multiple Routing Protocols. For this purpose, zebra divides
+ the IPv6 prefix block that makes the Locator unique into multiple chunks,
+ and manages the ownership of each chunk.
+
+ For example, loc1 has system as its owner. For example, loc1 is owned by
+ system, which means that it is not yet proprietary to any routing protocol.
+ For example, loc2 has sharp as its owner. This means that the shaprd for
+ function development holds the owner of the chunk of this locator, and no
+ other routing protocol will use this area.
+
+::
+
+ router# show segment-routing srv6 locator loc1 detail
+ Name: loc1
+ Prefix: 2001:db8:1:1::/64
+ Chunks:
+ - prefix: 2001:db8:1:1::/64, owner: system
+
+ router# show segment-routing srv6 locator loc2 detail
+ Name: loc2
+ Prefix: 2001:db8:2:2::/64
+ Chunks:
+ - prefix: 2001:db8:2:2::/64, owner: sharp
+
+.. index:: segment-routing
+.. clicmd:: segment-routing
+
+ Move from configure mode to segment-routing node.
+
+.. index:: srv6
+.. clicmd:: srv6
+
+ Move from segment-routing node to srv6 node.
+
+.. index:: locators
+.. clicmd:: locators
+
+ Move from srv6 node to locator node. In this locator node, user can
+ configure detailed settings such as the actual srv6 locator.
+
+.. index:: locator NAME
+.. clicmd:: locator NAME
+
+ Create a new locator. If the name of an existing locator is specified,
+ move to specified locator's configuration node to change the settings it.
+
+.. index:: prefix X:X::X:X/M [function-bits-length 32]
+.. clicmd:: prefix X:X::X:X/M [function-bits-length 32]
+
+ Set the ipv6 prefix block of the locator. SRv6 locator is defined by
+ RFC8986. The actual routing protocol specifies the locator and allocates a
+ SID to be used by each routing protocol. This SID is included in the locator
+ as an IPv6 prefix.
+
+ Following example console log shows the typical configuration of SRv6
+ data-plane. After a new SRv6 locator, named loc1, is created, loc1's prefix
+ is configured as ``2001:db8:1:1::/64``. If user or some routing daemon
+ allocates new SID on this locator, new SID will allocated in range of this
+ prefix. For example, if some routing daemon creates new SID on locator
+ (``2001:db8:1:1::/64``), Then new SID will be ``2001:db8:1:1:7::/80``,
+ ``2001:db8:1:1:8::/80``, and so on. Each locator has default SID that is
+ SRv6 local function "End". Usually default SID is allocated as
+ ``PREFIX:1::``. (``PREFIX`` is locator's prefix) For example, if user
+ configure the locator's prefix as ``2001:db8:1:1::/64``, then default SID
+ will be ``2001:db8:1:1:1::``)
+
+ The function bits range is 16bits by default. If operator want to change
+ function bits range, they can configure with ``function-bits-length``
+ option.
+
+::
+
+ router# configure terminal
+ router(config)# segment-routinig
+ router(config-sr)# srv6
+ router(config-srv6)# locators
+ router(config-srv6-locs)# locator loc1
+ router(config-srv6-loc)# prefix 2001:db8:1:1::/64
+
+ router(config-srv6-loc)# show run
+ ...
+ segment-routing
+ srv6
+ locators
+ locator loc1
+ prefix 2001:db8:1:1::/64
+ !
+ ...
+
.. _multicast-rib-commands:
Multicast RIB Commands
as_str, vrf ? vrf : VRF_DEFAULT_NAME);
nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
- return nb_cli_apply_changes(vty, NULL);
+ return nb_cli_apply_changes_clear_pending(vty, NULL);
}
void eigrp_cli_show_header(struct vty *vty, struct lyd_node *dnode,
bfd_sess_set_ipv6_addrs(adj->bfd_session, &src_ip.ipv6,
&dst_ip.ipv6);
bfd_sess_set_interface(adj->bfd_session, adj->circuit->interface->name);
+ bfd_sess_set_vrf(adj->bfd_session, adj->circuit->interface->vrf_id);
bfd_sess_set_profile(adj->bfd_session, circuit->bfd_config.profile);
bfd_sess_install(adj->bfd_session);
return;
circuit->circ_type = CIRCUIT_T_BROADCAST;
} else if (if_is_pointopoint(ifp)) {
circuit->circ_type = CIRCUIT_T_P2P;
- } else if (if_is_loopback(ifp)) {
+ } else if (if_is_loopback_or_vrf(ifp)) {
circuit->circ_type = CIRCUIT_T_LOOPBACK;
circuit->is_passive = 1;
} else {
if (circuit->is_passive == passive)
return ferr_ok();
- if (if_is_loopback(circuit->interface) && !passive)
+ if (if_is_loopback_or_vrf(circuit->interface) && !passive)
return ferr_cfg_invalid("loopback is always passive");
if (circuit->state != C_STATE_UP) {
nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
- return nb_cli_apply_changes(
+ return nb_cli_apply_changes_clear_pending(
vty, "/frr-isisd:isis/instance[area-tag='%s'][vrf='%s']", tag,
vrf_name);
}
/* check if the interface is a loopback and if so set it as passive */
ifp = nb_running_get_entry(NULL, VTY_CURR_XPATH, false);
- if (ifp && if_is_loopback(ifp))
+ if (ifp && if_is_loopback_or_vrf(ifp))
nb_cli_enqueue_change(vty, "./frr-isisd:isis/passive",
NB_OP_MODIFY, "true");
/* check if the interface is a loopback and if so set it as passive */
ifp = nb_running_get_entry(NULL, VTY_CURR_XPATH, false);
- if (ifp && if_is_loopback(ifp))
+ if (ifp && if_is_loopback_or_vrf(ifp))
nb_cli_enqueue_change(vty, "./frr-isisd:isis/passive",
NB_OP_MODIFY, "true");
ifp = circuit->interface;
if (!ifp)
return NB_OK;
- if (if_is_loopback(ifp)) {
+ if (if_is_loopback_or_vrf(ifp)) {
snprintf(args->errmsg, args->errmsg_len,
"Loopback is always passive");
return NB_ERR_VALIDATION;
case PCEP_PCC_NODE:
ret = PCEP_NODE;
break;
+ case SRV6_NODE:
+ ret = SEGMENT_ROUTING_NODE;
+ break;
+ case SRV6_LOCS_NODE:
+ ret = SRV6_NODE;
+ break;
+ case SRV6_LOC_NODE:
+ ret = SRV6_LOCS_NODE;
+ break;
default:
ret = CONFIG_NODE;
break;
* non-YANG command.
*/
if (matched_element->attr != CMD_ATTR_YANG)
- nb_cli_pending_commit_check(vty);
+ (void)nb_cli_pending_commit_check(vty);
}
ret = matched_element->func(matched_element, vty, argc, argv);
BGP_VNC_L2_GROUP_NODE, /* BGP VNC L2 group */
RFP_DEFAULTS_NODE, /* RFP defaults node */
BGP_EVPN_NODE, /* BGP EVPN node. */
+ BGP_SRV6_NODE, /* BGP SRv6 node. */
OSPF_NODE, /* OSPF protocol mode */
OSPF6_NODE, /* OSPF protocol for IPv6 mode */
LDP_NODE, /* LDP protocol mode */
PCEP_PCE_CONFIG_NODE, /* PCE shared configuration node */
PCEP_PCE_NODE, /* PCE configuration node */
PCEP_PCC_NODE, /* PCC configuration node */
+ SRV6_NODE, /* SRv6 node */
+ SRV6_LOCS_NODE, /* SRv6 locators node */
+ SRV6_LOC_NODE, /* SRv6 locator node */
VTY_NODE, /* Vty node. */
FPM_NODE, /* Dataplane FPM node. */
LINK_PARAMS_NODE, /* Link-parameters node */
if (read)
frrzmq_check_events(cbp, &cb->write, ZMQ_POLLOUT);
- _thread_add_read_write(t->xref, t->master, frrzmq_read_msg, cbp,
- cb->fd, &cb->read.thread);
+ thread_add_read(t->master, frrzmq_read_msg, cbp,
+ cb->fd, &cb->read.thread);
return 0;
out_err:
if (events & ZMQ_POLLIN) {
thread_cancel(&cb->read.thread);
- _thread_add_event(xref, master, frrzmq_read_msg, cbp, fd,
+ thread_add_event(master, frrzmq_read_msg, cbp, fd,
&cb->read.thread);
} else
- _thread_add_read_write(xref, master, frrzmq_read_msg, cbp, fd,
- &cb->read.thread);
+ thread_add_read(master, frrzmq_read_msg, cbp, fd,
+ &cb->read.thread);
return 0;
}
if (written)
frrzmq_check_events(cbp, &cb->read, ZMQ_POLLIN);
- _thread_add_read_write(t->xref, t->master, frrzmq_write_msg, cbp,
- cb->fd, &cb->write.thread);
+ thread_add_write(t->master, frrzmq_write_msg, cbp,
+ cb->fd, &cb->write.thread);
return 0;
out_err:
_thread_add_event(xref, master, frrzmq_write_msg, cbp, fd,
&cb->write.thread);
} else
- _thread_add_read_write(xref, master, frrzmq_write_msg, cbp, fd,
- &cb->write.thread);
+ thread_add_write(master, frrzmq_write_msg, cbp, fd,
+ &cb->write.thread);
return 0;
}
thread_cancel(&core->thread);
if ((*cb)->read.cancelled && !(*cb)->read.thread
- && (*cb)->write.cancelled && (*cb)->write.thread)
+ && (*cb)->write.cancelled && !(*cb)->write.thread)
XFREE(MTYPE_ZEROMQ_CB, *cb);
}
len = sizeof(events);
if (zmq_getsockopt(cb->zmqsock, ZMQ_EVENTS, &events, &len))
return;
- if (events & event && core->thread && !core->cancelled) {
+ if ((events & event) && core->thread && !core->cancelled) {
struct thread_master *tm = core->thread->master;
+
thread_cancel(&core->thread);
- thread_add_event(tm, (event == ZMQ_POLLIN ? frrzmq_read_msg
- : frrzmq_write_msg),
- cbp, cb->fd, &core->thread);
+ if (event == ZMQ_POLLIN)
+ thread_add_event(tm, frrzmq_read_msg,
+ cbp, cb->fd, &core->thread);
+ else
+ thread_add_event(tm, frrzmq_write_msg,
+ cbp, cb->fd, &core->thread);
}
}
p = connected->destination;
if (p) {
- strncat(logbuf, inet_ntop(p->family, &p->u.prefix, buf, BUFSIZ),
- BUFSIZ - strlen(logbuf));
+ strlcat(logbuf, inet_ntop(p->family, &p->u.prefix, buf, BUFSIZ),
+ BUFSIZ);
}
zlog_info("%s", logbuf);
}
vrf_name);
nb_cli_enqueue_change(vty, ".", NB_OP_CREATE, NULL);
- ret = nb_cli_apply_changes(vty, xpath_list);
+ ret = nb_cli_apply_changes_clear_pending(vty, xpath_list);
if (ret == CMD_SUCCESS) {
VTY_PUSH_XPATH(INTERFACE_NODE, xpath_list);
* all interface-level commands are converted to the new
* northbound model.
*/
- nb_cli_pending_commit_check(vty);
ifp = if_lookup_by_name(ifname, vrf_id);
if (ifp)
VTY_PUSH_CONTEXT(INTERFACE_NODE, ifp);
#include "module.h"
#include "defaults.h"
#include "lib_vty.h"
+#include "northbound_cli.h"
/* Looking up memory status from vty interface. */
#include "vector.h"
{
callback.readin_time = monotime(NULL);
+ vty->pending_allowed = 1;
+
if (callback.start_config)
(*callback.start_config)();
{
time_t readin_time;
char readin_time_str[MONOTIME_STRLEN];
+ int ret;
readin_time = monotime(NULL);
readin_time -= callback.readin_time;
frrtime_to_interval(readin_time, readin_time_str,
sizeof(readin_time_str));
+ vty->pending_allowed = 0;
+ ret = nb_cli_pending_commit_check(vty);
+
zlog_info("Configuration Read in Took: %s", readin_time_str);
if (callback.end_config)
(*callback.end_config)();
- return CMD_SUCCESS;
+ return ret;
}
void cmd_init_config_callbacks(void (*start_config_cb)(void),
DEFINE_MTYPE_STATIC(LIB, NEXTHOP, "Nexthop");
DEFINE_MTYPE_STATIC(LIB, NH_LABEL, "Nexthop label");
+DEFINE_MTYPE_STATIC(LIB, NH_SRV6, "Nexthop srv6");
static int _nexthop_labels_cmp(const struct nexthop *nh1,
const struct nexthop *nh2)
(nhl1->num_labels * sizeof(mpls_label_t)));
}
+static int _nexthop_srv6_cmp(const struct nexthop *nh1,
+ const struct nexthop *nh2)
+{
+ int ret = 0;
+
+ if (!nh1->nh_srv6 && !nh2->nh_srv6)
+ return 0;
+
+ if (nh1->nh_srv6 && !nh2->nh_srv6)
+ return 1;
+
+ if (!nh1->nh_srv6 && nh2->nh_srv6)
+ return -1;
+
+ if (nh1->nh_srv6->seg6local_action > nh2->nh_srv6->seg6local_action)
+ return 1;
+
+ if (nh2->nh_srv6->seg6local_action < nh1->nh_srv6->seg6local_action)
+ return -1;
+
+ ret = memcmp(&nh1->nh_srv6->seg6local_ctx,
+ &nh2->nh_srv6->seg6local_ctx,
+ sizeof(struct seg6local_context));
+ if (ret != 0)
+ return ret;
+
+ ret = memcmp(&nh1->nh_srv6->seg6_segs,
+ &nh2->nh_srv6->seg6_segs,
+ sizeof(struct in6_addr));
+
+ return ret;
+}
+
int nexthop_g_addr_cmp(enum nexthop_types_t type, const union g_addr *addr1,
const union g_addr *addr2)
{
return ret;
ret = _nexthop_labels_cmp(next1, next2);
+ if (ret != 0)
+ return ret;
+
+ ret = _nexthop_srv6_cmp(next1, next2);
return ret;
}
void nexthop_free(struct nexthop *nexthop)
{
nexthop_del_labels(nexthop);
+ nexthop_del_srv6_seg6local(nexthop);
+ nexthop_del_srv6_seg6(nexthop);
if (nexthop->resolved)
nexthops_free(nexthop->resolved);
XFREE(MTYPE_NEXTHOP, nexthop);
nexthop->nh_label_type = ZEBRA_LSP_NONE;
}
+void nexthop_add_srv6_seg6local(struct nexthop *nexthop, uint32_t action,
+ const struct seg6local_context *ctx)
+{
+ if (action == ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
+ return;
+
+ if (!nexthop->nh_srv6)
+ nexthop->nh_srv6 = XCALLOC(MTYPE_NH_SRV6,
+ sizeof(struct nexthop_srv6));
+
+ nexthop->nh_srv6->seg6local_action = action;
+ nexthop->nh_srv6->seg6local_ctx = *ctx;
+}
+
+void nexthop_del_srv6_seg6local(struct nexthop *nexthop)
+{
+ if (!nexthop->nh_srv6)
+ return;
+
+ nexthop->nh_srv6->seg6local_action = ZEBRA_SEG6_LOCAL_ACTION_UNSPEC;
+
+ if (sid_zero(&nexthop->nh_srv6->seg6_segs))
+ XFREE(MTYPE_NH_SRV6, nexthop->nh_srv6);
+}
+
+void nexthop_add_srv6_seg6(struct nexthop *nexthop,
+ const struct in6_addr *segs)
+{
+ if (!segs)
+ return;
+
+ if (!nexthop->nh_srv6)
+ nexthop->nh_srv6 = XCALLOC(MTYPE_NH_SRV6,
+ sizeof(struct nexthop_srv6));
+
+ nexthop->nh_srv6->seg6_segs = *segs;
+}
+
+void nexthop_del_srv6_seg6(struct nexthop *nexthop)
+{
+ if (!nexthop->nh_srv6)
+ return;
+
+ memset(&nexthop->nh_srv6->seg6_segs, 0,
+ sizeof(nexthop->nh_srv6->seg6_segs));
+
+ if (nexthop->nh_srv6->seg6local_action ==
+ ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
+ XFREE(MTYPE_NH_SRV6, nexthop->nh_srv6);
+}
+
const char *nexthop2str(const struct nexthop *nexthop, char *str, int size)
{
switch (nexthop->type) {
key = jhash_1word(nexthop->backup_idx[i], key);
}
+ if (nexthop->nh_srv6) {
+ key = jhash_1word(nexthop->nh_srv6->seg6local_action, key);
+ key = jhash(&nexthop->nh_srv6->seg6local_ctx,
+ sizeof(nexthop->nh_srv6->seg6local_ctx), key);
+ key = jhash(&nexthop->nh_srv6->seg6_segs,
+ sizeof(nexthop->nh_srv6->seg6_segs), key);
+ }
+
return key;
}
nexthop_add_labels(copy, nexthop->nh_label_type,
nexthop->nh_label->num_labels,
&nexthop->nh_label->label[0]);
+
+ if (nexthop->nh_srv6) {
+ if (nexthop->nh_srv6->seg6local_action !=
+ ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
+ nexthop_add_srv6_seg6local(copy,
+ nexthop->nh_srv6->seg6local_action,
+ &nexthop->nh_srv6->seg6local_ctx);
+ if (!sid_zero(&nexthop->nh_srv6->seg6_segs))
+ nexthop_add_srv6_seg6(copy,
+ &nexthop->nh_srv6->seg6_segs);
+ }
}
void nexthop_copy(struct nexthop *copy, const struct nexthop *nexthop,
#include "prefix.h"
#include "mpls.h"
#include "vxlan.h"
+#include "srv6.h"
#ifdef __cplusplus
extern "C" {
/* SR-TE color used for matching SR-TE policies */
uint32_t srte_color;
+
+ /* SRv6 information */
+ struct nexthop_srv6 *nh_srv6;
};
/* Utility to append one nexthop to another. */
void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t ltype,
uint8_t num_labels, const mpls_label_t *labels);
void nexthop_del_labels(struct nexthop *);
+void nexthop_add_srv6_seg6local(struct nexthop *nexthop, uint32_t action,
+ const struct seg6local_context *ctx);
+void nexthop_del_srv6_seg6local(struct nexthop *nexthop);
+void nexthop_add_srv6_seg6(struct nexthop *nexthop,
+ const struct in6_addr *segs);
+void nexthop_del_srv6_seg6(struct nexthop *nexthop);
/*
* Allocate a new nexthop object and initialize it from various args.
return 'n';
}
+#if 0 /* Used below in nb_config_diff inside normally disabled code */
static inline void nb_config_diff_dnode_log_path(const char *context,
const char *path,
const struct lyd_node *dnode)
nb_config_diff_dnode_log_path(context, path, dnode);
free(path);
}
+#endif
/* Calculate the delta between two different configurations. */
static void nb_config_diff(const struct nb_config *config1,
LY_ERR err;
char *path;
-#if 0 /* Useful (but noisy) when debugging diff code, and for improving later \
- */
+#if 0 /* Useful (noisy) when debugging diff code, and for improving later */
if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
LY_LIST_FOR(config1->dnode, root) {
LYD_TREE_DFS_BEGIN(root, dnode) {
LYD_DIFF_DEFAULTS, &diff);
assert(!err);
- if (diff && DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL))
- nb_config_diff_dnode_log("iterating diff", diff);
+ if (diff && DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
+ char *s;
+
+ if (!lyd_print_mem(&s, diff, LYD_JSON,
+ LYD_PRINT_WITHSIBLINGS | LYD_PRINT_WD_ALL)) {
+ zlog_debug("%s: %s", __func__, s);
+ free(s);
+ }
+ }
uint32_t seq = 0;
+
LY_LIST_FOR (diff, root) {
LYD_TREE_DFS_BEGIN (root, dnode) {
op = nb_lyd_diff_get_op(dnode);
path = lyd_path(dnode, LYD_PATH_STD, NULL, 0);
-#if 0 /* Useful (but noisy) when debugging diff code, and for improving later \
- */
+#if 0 /* Useful (noisy) when debugging diff code, and for improving later */
if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
char context[80];
snprintf(context, sizeof(context),
}
}
- lyd_free_tree(diff);
+ lyd_free_all(diff);
}
int nb_candidate_edit(struct nb_config *candidate,
xpath_edit, err);
return NB_ERR;
} else if (dnode) {
+ /* Create default nodes */
+ LY_ERR err = lyd_new_implicit_tree(
+ dnode, LYD_IMPLICIT_NO_STATE, NULL);
+ if (err) {
+ flog_warn(EC_LIB_LIBYANG,
+ "%s: lyd_new_implicit_all failed: %d",
+ __func__, err);
+ }
/*
* create dependency
*
ly_native_ctx, dep_xpath,
NULL, LYD_NEW_PATH_UPDATE,
&dep_dnode);
+ /* Create default nodes */
+ if (!err)
+ err = lyd_new_implicit_tree(
+ dep_dnode,
+ LYD_IMPLICIT_NO_STATE, NULL);
if (err) {
flog_warn(
EC_LIB_LIBYANG,
extern struct debug nb_dbg_cbs_rpc;
extern struct debug nb_dbg_notif;
extern struct debug nb_dbg_events;
+extern struct debug nb_dbg_libyang;
/* Global running configuration. */
extern struct nb_config *running_config;
default:
vty_out(vty, "%% Configuration failed.\n\n");
vty_show_nb_errors(vty, ret, errmsg);
- if (vty->t_pending_commit)
+ if (vty->pending_commit)
vty_out(vty,
"The following commands were dynamically grouped into the same transaction and rejected:\n%s",
vty->pending_cmds_buf);
static void nb_cli_pending_commit_clear(struct vty *vty)
{
- THREAD_OFF(vty->t_pending_commit);
- vty->backoff_cmd_count = 0;
+ vty->pending_commit = 0;
XFREE(MTYPE_TMP, vty->pending_cmds_buf);
vty->pending_cmds_buflen = 0;
vty->pending_cmds_bufpos = 0;
}
-static int nb_cli_pending_commit_cb(struct thread *thread)
+int nb_cli_pending_commit_check(struct vty *vty)
{
- struct vty *vty = THREAD_ARG(thread);
-
- (void)nb_cli_classic_commit(vty);
- nb_cli_pending_commit_clear(vty);
+ int ret = CMD_SUCCESS;
- return 0;
-}
-
-void nb_cli_pending_commit_check(struct vty *vty)
-{
- if (vty->t_pending_commit) {
- (void)nb_cli_classic_commit(vty);
+ if (vty->pending_commit) {
+ ret = nb_cli_classic_commit(vty);
nb_cli_pending_commit_clear(vty);
}
-}
-
-static bool nb_cli_backoff_start(struct vty *vty)
-{
- struct timeval now, delta;
-
- /*
- * Start the configuration backoff timer only if 100 YANG-modeled
- * commands or more were entered within the last second.
- */
- monotime(&now);
- if (monotime_since(&vty->backoff_start, &delta) >= 1000000) {
- vty->backoff_start = now;
- vty->backoff_cmd_count = 1;
- return false;
- }
- if (++vty->backoff_cmd_count < 100)
- return false;
- return true;
+ return ret;
}
static int nb_cli_schedule_command(struct vty *vty)
vty->pending_cmds_buflen);
/* Schedule the commit operation. */
- THREAD_OFF(vty->t_pending_commit);
- thread_add_timer_msec(master, nb_cli_pending_commit_cb, vty, 100,
- &vty->t_pending_commit);
+ vty->pending_commit = 1;
return CMD_SUCCESS;
}
change->value = value;
}
-int nb_cli_apply_changes(struct vty *vty, const char *xpath_base_fmt, ...)
+static int nb_cli_apply_changes_internal(struct vty *vty,
+ const char *xpath_base,
+ bool clear_pending)
{
- char xpath_base[XPATH_MAXLEN] = {};
bool error = false;
- VTY_CHECK_XPATH;
-
- /* Parse the base XPath format string. */
- if (xpath_base_fmt) {
- va_list ap;
+ if (xpath_base == NULL)
+ xpath_base = "";
- va_start(ap, xpath_base_fmt);
- vsnprintf(xpath_base, sizeof(xpath_base), xpath_base_fmt, ap);
- va_end(ap);
- }
+ VTY_CHECK_XPATH;
/* Edit candidate configuration. */
for (size_t i = 0; i < vty->num_cfg_changes; i++) {
/* Handle relative XPaths. */
memset(xpath, 0, sizeof(xpath));
if (vty->xpath_index > 0
- && ((xpath_base_fmt && xpath_base[0] == '.')
- || change->xpath[0] == '.'))
+ && (xpath_base[0] == '.' || change->xpath[0] == '.'))
strlcpy(xpath, VTY_CURR_XPATH, sizeof(xpath));
- if (xpath_base_fmt) {
+ if (xpath_base[0]) {
if (xpath_base[0] == '.')
strlcat(xpath, xpath_base + 1, sizeof(xpath));
else
}
/*
- * Do an implicit commit when using the classic CLI mode.
+ * Maybe do an implicit commit when using the classic CLI mode.
*
* NOTE: the implicit commit might be scheduled to run later when
* too many commands are being sent at the same time. This is a
* faster.
*/
if (frr_get_cli_mode() == FRR_CLI_CLASSIC) {
- if (vty->t_pending_commit || nb_cli_backoff_start(vty))
+ if (clear_pending) {
+ if (vty->pending_commit)
+ return nb_cli_pending_commit_check(vty);
+ } else if (vty->pending_allowed)
return nb_cli_schedule_command(vty);
+ assert(!vty->pending_commit);
return nb_cli_classic_commit(vty);
}
return CMD_SUCCESS;
}
+int nb_cli_apply_changes(struct vty *vty, const char *xpath_base_fmt, ...)
+{
+ char xpath_base[XPATH_MAXLEN] = {};
+
+ /* Parse the base XPath format string. */
+ if (xpath_base_fmt) {
+ va_list ap;
+
+ va_start(ap, xpath_base_fmt);
+ vsnprintf(xpath_base, sizeof(xpath_base), xpath_base_fmt, ap);
+ va_end(ap);
+ }
+ return nb_cli_apply_changes_internal(vty, xpath_base, false);
+}
+
+int nb_cli_apply_changes_clear_pending(struct vty *vty,
+ const char *xpath_base_fmt, ...)
+{
+ char xpath_base[XPATH_MAXLEN] = {};
+
+ /* Parse the base XPath format string. */
+ if (xpath_base_fmt) {
+ va_list ap;
+
+ va_start(ap, xpath_base_fmt);
+ vsnprintf(xpath_base, sizeof(xpath_base), xpath_base_fmt, ap);
+ va_end(ap);
+ }
+ return nb_cli_apply_changes_internal(vty, xpath_base, true);
+}
+
int nb_cli_rpc(struct vty *vty, const char *xpath, struct list *input,
struct list *output)
{
const char *value);
/*
- * Apply enqueued changes to the candidate configuration.
+ * Apply enqueued changes to the candidate configuration, do not batch,
+ * and apply any pending commits along with the currently enqueued.
+ *
+ * vty
+ * The vty context.
+ *
+ * xpath_base_fmt
+ * Prepend the given XPath (absolute or relative) to all enqueued
+ * configuration changes. This is an optional parameter.
+ *
+ * Returns:
+ * CMD_SUCCESS on success, CMD_WARNING_CONFIG_FAILED otherwise.
+ */
+extern int nb_cli_apply_changes_clear_pending(struct vty *vty,
+ const char *xpath_base_fmt, ...);
+
+/*
+ * Apply enqueued changes to the candidate configuration, this function
+ * may not immediately apply the changes, instead adding them to a pending
+ * queue.
*
* vty
* The vty context.
*
* vty
* The vty context.
+ *
+ * Returns
+ * CMD_SUCCESS on success (or no pending), CMD_WARNING_CONFIG_FAILED
+ * otherwise.
*/
-extern void nb_cli_pending_commit_check(struct vty *vty);
+extern int nb_cli_pending_commit_check(struct vty *vty);
/* Prototypes of internal functions. */
extern void nb_cli_show_config_prepare(struct nb_config *config,
#ifndef __cplusplus
#define prefixtype(uname, typename, fieldname) \
typename *fieldname;
+#define TRANSPARENT_UNION __attribute__((transparent_union))
#else
#define prefixtype(uname, typename, fieldname) \
typename *fieldname; \
uname(typename *x) { this->fieldname = x; }
+#define TRANSPARENT_UNION
#endif
union prefixptr {
prefixtype(prefixptr, struct prefix_evpn, evp)
prefixtype(prefixptr, struct prefix_fs, fs)
prefixtype(prefixptr, struct prefix_rd, rd)
-} __attribute__((transparent_union));
+} TRANSPARENT_UNION;
union prefixconstptr {
prefixtype(prefixconstptr, const struct prefix, p)
prefixtype(prefixconstptr, const struct prefix_evpn, evp)
prefixtype(prefixconstptr, const struct prefix_fs, fs)
prefixtype(prefixconstptr, const struct prefix_rd, rd)
-} __attribute__((transparent_union));
+} TRANSPARENT_UNION;
+
+#undef prefixtype
+#undef TRANSPARENT_UNION
#ifndef INET_ADDRSTRLEN
#define INET_ADDRSTRLEN 16
#include "srv6.h"
#include "log.h"
+DEFINE_QOBJ_TYPE(srv6_locator);
+DEFINE_MTYPE_STATIC(LIB, SRV6_LOCATOR, "SRV6 locator");
+DEFINE_MTYPE_STATIC(LIB, SRV6_LOCATOR_CHUNK, "SRV6 locator chunk");
+
const char *seg6local_action2str(uint32_t action)
{
switch (action) {
}
const char *seg6local_context2str(char *str, size_t size,
- struct seg6local_context *ctx, uint32_t action)
+ const struct seg6local_context *ctx,
+ uint32_t action)
{
char b0[128];
return str;
}
}
+
+struct srv6_locator *srv6_locator_alloc(const char *name)
+{
+ struct srv6_locator *locator = NULL;
+
+ locator = XCALLOC(MTYPE_SRV6_LOCATOR, sizeof(struct srv6_locator));
+ strlcpy(locator->name, name, sizeof(locator->name));
+ locator->chunks = list_new();
+ QOBJ_REG(locator, srv6_locator);
+ return locator;
+}
+
+struct srv6_locator_chunk *srv6_locator_chunk_alloc(void)
+{
+ struct srv6_locator_chunk *chunk = NULL;
+
+ chunk = XCALLOC(MTYPE_SRV6_LOCATOR_CHUNK,
+ sizeof(struct srv6_locator_chunk));
+ return chunk;
+}
+
+void srv6_locator_free(struct srv6_locator *locator)
+{
+ XFREE(MTYPE_SRV6_LOCATOR, locator);
+}
+
+void srv6_locator_chunk_free(struct srv6_locator_chunk *chunk)
+{
+ XFREE(MTYPE_SRV6_LOCATOR_CHUNK, chunk);
+}
+
+json_object *srv6_locator_chunk_json(const struct srv6_locator_chunk *chunk)
+{
+ char str[256];
+ json_object *jo_root = NULL;
+
+ jo_root = json_object_new_object();
+ prefix2str(&chunk->prefix, str, sizeof(str));
+ json_object_string_add(jo_root, "prefix", str);
+ json_object_string_add(jo_root, "proto",
+ zebra_route_string(chunk->proto));
+
+ return jo_root;
+}
+
+json_object *srv6_locator_json(const struct srv6_locator *loc)
+{
+ char str[256];
+ struct listnode *node;
+ struct srv6_locator_chunk *chunk;
+ json_object *jo_root = NULL;
+ json_object *jo_chunk = NULL;
+ json_object *jo_chunks = NULL;
+
+ jo_root = json_object_new_object();
+
+ /* set name */
+ json_object_string_add(jo_root, "name", loc->name);
+
+ /* set prefix */
+ prefix2str(&loc->prefix, str, sizeof(str));
+ json_object_string_add(jo_root, "prefix", str);
+
+ /* set function_bits_length */
+ json_object_int_add(jo_root, "functionBitsLength",
+ loc->function_bits_length);
+
+ /* set status_up */
+ json_object_boolean_add(jo_root, "statusUp",
+ loc->status_up);
+
+ /* set chunks */
+ jo_chunks = json_object_new_array();
+ json_object_object_add(jo_root, "chunks", jo_chunks);
+ for (ALL_LIST_ELEMENTS_RO((struct list *)loc->chunks, node, chunk)) {
+ jo_chunk = srv6_locator_chunk_json(chunk);
+ json_object_array_add(jo_chunks, jo_chunk);
+ }
+
+ return jo_root;
+}
#define _FRR_SRV6_H
#include <zebra.h>
+#include "prefix.h"
+#include "json.h"
+
#include <arpa/inet.h>
#include <netinet/in.h>
#define SRV6_MAX_SIDS 16
+#define SRV6_LOCNAME_SIZE 256
#ifdef __cplusplus
extern "C" {
uint32_t table;
};
+struct srv6_locator {
+ char name[SRV6_LOCNAME_SIZE];
+ struct prefix_ipv6 prefix;
+
+ /*
+ * Bit length of SRv6 locator described in
+ * draft-ietf-bess-srv6-services-05#section-3.2.1
+ */
+ uint8_t block_bits_length;
+ uint8_t node_bits_length;
+ uint8_t function_bits_length;
+ uint8_t argument_bits_length;
+
+ int algonum;
+ uint64_t current;
+ bool status_up;
+ struct list *chunks;
+
+ QOBJ_FIELDS;
+};
+DECLARE_QOBJ_TYPE(srv6_locator);
+
+struct srv6_locator_chunk {
+ char locator_name[SRV6_LOCNAME_SIZE];
+ struct prefix_ipv6 prefix;
+
+ /*
+ * Bit length of SRv6 locator described in
+ * draft-ietf-bess-srv6-services-05#section-3.2.1
+ */
+ uint8_t block_bits_length;
+ uint8_t node_bits_length;
+ uint8_t function_bits_length;
+ uint8_t argument_bits_length;
+
+ /*
+ * For Zclient communication values
+ */
+ uint8_t keep;
+ uint8_t proto;
+ uint16_t instance;
+ uint32_t session_id;
+};
+
+struct nexthop_srv6 {
+ /* SRv6 localsid info for Endpoint-behaviour */
+ enum seg6local_action_t seg6local_action;
+ struct seg6local_context seg6local_ctx;
+
+ /* SRv6 Headend-behaviour */
+ struct in6_addr seg6_segs;
+};
+
static inline const char *seg6_mode2str(enum seg6_mode_t mode)
{
switch (mode) {
const char *
seg6local_action2str(uint32_t action);
-const char *
-seg6local_context2str(char *str, size_t size,
- struct seg6local_context *ctx, uint32_t action);
+const char *seg6local_context2str(char *str, size_t size,
+ const struct seg6local_context *ctx,
+ uint32_t action);
int snprintf_seg6_segs(char *str,
size_t size, const struct seg6_segs *segs);
+extern struct srv6_locator *srv6_locator_alloc(const char *name);
+extern struct srv6_locator_chunk *srv6_locator_chunk_alloc(void);
+extern void srv6_locator_free(struct srv6_locator *locator);
+extern void srv6_locator_chunk_free(struct srv6_locator_chunk *chunk);
+json_object *srv6_locator_chunk_json(const struct srv6_locator_chunk *chunk);
+json_object *srv6_locator_json(const struct srv6_locator *loc);
+
#ifdef __cplusplus
}
#endif
vrfname);
nb_cli_enqueue_change(vty, xpath_list, NB_OP_CREATE, NULL);
- ret = nb_cli_apply_changes(vty, xpath_list);
+ ret = nb_cli_apply_changes_clear_pending(vty, xpath_list);
if (ret == CMD_SUCCESS) {
VTY_PUSH_XPATH(VRF_NODE, xpath_list);
- nb_cli_pending_commit_check(vty);
vrfp = vrf_lookup_by_name(vrfname);
if (vrfp)
VTY_PUSH_CONTEXT(VRF_NODE, vrfp);
return vrf_default_name;
}
-int vrf_bind(vrf_id_t vrf_id, int fd, const char *name)
+int vrf_bind(vrf_id_t vrf_id, int fd, const char *ifname)
{
int ret = 0;
struct interface *ifp;
+ struct vrf *vrf;
+
+ if (fd < 0)
+ return -1;
+
+ if (vrf_id == VRF_UNKNOWN)
+ return -1;
+
+ /* can't bind to a VRF that doesn't exist */
+ vrf = vrf_lookup_by_id(vrf_id);
+ if (!vrf_is_enabled(vrf))
+ return -1;
+
+ if (ifname && strcmp(ifname, vrf->name)) {
+ /* binding to a regular interface */
+
+ /* can't bind to an interface that doesn't exist */
+ ifp = if_lookup_by_name(ifname, vrf_id);
+ if (!ifp)
+ return -1;
+ } else {
+ /* binding to a VRF device */
+
+ /* nothing to do for netns */
+ if (vrf_is_backend_netns())
+ return 0;
+
+ /* nothing to do for default vrf */
+ if (vrf_id == VRF_DEFAULT)
+ return 0;
+
+ ifname = vrf->name;
+ }
- if (fd < 0 || name == NULL)
- return fd;
- /* the device should exist
- * otherwise we should return
- * case ifname = vrf in netns mode => return
- */
- ifp = if_lookup_by_name(name, vrf_id);
- if (!ifp)
- return fd;
#ifdef SO_BINDTODEVICE
- ret = setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, name, strlen(name)+1);
+ ret = setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, ifname,
+ strlen(ifname) + 1);
if (ret < 0)
- zlog_debug("bind to interface %s failed, errno=%d", name,
- errno);
+ zlog_err("bind to interface %s failed, errno=%d", ifname,
+ errno);
#endif /* SO_BINDTODEVICE */
return ret;
}
const char *name);
/*
- * Binds a socket to a VRF device.
+ * Binds a socket to an interface (ifname) in a VRF (vrf_id).
*
- * If name is null, the socket is not bound, irrespective of any other
- * arguments.
+ * If ifname is NULL or is equal to the VRF name then bind to a VRF device.
+ * Otherwise, bind to the specified interface in the specified VRF.
*
- * name should be the name of the VRF device. vrf_id should be the
- * corresponding vrf_id (the ifindex of the device).
+ * Returns 0 on success and -1 on failure.
*/
-extern int vrf_bind(vrf_id_t vrf_id, int fd, const char *name);
+extern int vrf_bind(vrf_id_t vrf_id, int fd, const char *ifname);
/* VRF ioctl operations */
extern int vrf_getaddrinfo(const char *node, const char *service,
{
vty->xpath_index = 0;
- /* Perform pending commit if any. */
- nb_cli_pending_commit_check(vty);
+ /* Perform any pending commits. */
+ (void)nb_cli_pending_commit_check(vty);
/* Check if there's a pending confirmed commit. */
if (vty->t_confirmed_commit_timeout) {
struct nb_config *candidate_config_base;
/* Dynamic transaction information. */
- struct timeval backoff_start;
- size_t backoff_cmd_count;
- struct thread *t_pending_commit;
+ bool pending_allowed;
+ bool pending_commit;
char *pending_cmds_buf;
size_t pending_cmds_buflen;
size_t pending_cmds_bufpos;
#include "lib_errors.h"
#include "srte.h"
#include "printfrr.h"
+#include "srv6.h"
DEFINE_MTYPE_STATIC(LIB, ZCLIENT, "Zclient");
DEFINE_MTYPE_STATIC(LIB, REDIST_INST, "Redistribution instance IDs");
return zclient_send_message(zclient);
}
+enum zclient_send_status zclient_send_localsid(struct zclient *zclient,
+ const struct in6_addr *sid, ifindex_t oif,
+ enum seg6local_action_t action,
+ const struct seg6local_context *context)
+{
+ struct prefix_ipv6 p = {};
+ struct zapi_route api = {};
+ struct nexthop nh = {};
+
+ p.family = AF_INET6;
+ p.prefixlen = 128;
+ p.prefix = *sid;
+
+ api.vrf_id = VRF_DEFAULT;
+ api.type = ZEBRA_ROUTE_BGP;
+ api.instance = 0;
+ api.safi = SAFI_UNICAST;
+ memcpy(&api.prefix, &p, sizeof(p));
+
+ if (action == ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
+ return zclient_route_send(ZEBRA_ROUTE_DELETE, zclient, &api);
+
+ SET_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION);
+ SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
+
+ nh.type = NEXTHOP_TYPE_IFINDEX;
+ nh.ifindex = oif;
+ SET_FLAG(nh.flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL);
+ nexthop_add_srv6_seg6local(&nh, action, context);
+
+ zapi_nexthop_from_nexthop(&api.nexthops[0], &nh);
+ api.nexthop_num = 1;
+
+ return zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api);
+}
+
/* Send register requests to zebra daemon for the information in a VRF. */
void zclient_send_reg_requests(struct zclient *zclient, vrf_id_t vrf_id)
{
return memcmp(next1->labels, next2->labels, next1->label_num);
}
+static int zapi_nexthop_srv6_cmp(const struct zapi_nexthop *next1,
+ const struct zapi_nexthop *next2)
+{
+ int ret = 0;
+
+ ret = memcmp(&next1->seg6_segs, &next2->seg6_segs,
+ sizeof(struct in6_addr));
+ if (ret != 0)
+ return ret;
+
+ if (next1->seg6local_action > next2->seg6local_action)
+ return 1;
+
+ if (next1->seg6local_action < next2->seg6local_action)
+ return -1;
+
+ return memcmp(&next1->seg6local_ctx, &next2->seg6local_ctx,
+ sizeof(struct seg6local_context));
+}
+
static int zapi_nexthop_cmp_no_labels(const struct zapi_nexthop *next1,
const struct zapi_nexthop *next2)
{
return ret;
ret = zapi_nexthop_labels_cmp(next1, next2);
+ if (ret != 0)
+ return ret;
+
+ ret = zapi_nexthop_srv6_cmp(next1, next2);
return ret;
}
stream_putc(s, api_nh->backup_idx[i]);
}
+ if (CHECK_FLAG(nh_flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL)) {
+ stream_putl(s, api_nh->seg6local_action);
+ stream_write(s, &api_nh->seg6local_ctx,
+ sizeof(struct seg6local_context));
+ }
+
+ if (CHECK_FLAG(nh_flags, ZAPI_NEXTHOP_FLAG_SEG6))
+ stream_write(s, &api_nh->seg6_segs,
+ sizeof(struct in6_addr));
+
done:
return ret;
}
+int zapi_srv6_locator_chunk_encode(struct stream *s,
+ const struct srv6_locator_chunk *c)
+{
+ stream_putw(s, strlen(c->locator_name));
+ stream_put(s, c->locator_name, strlen(c->locator_name));
+ stream_putw(s, c->prefix.prefixlen);
+ stream_put(s, &c->prefix.prefix, sizeof(c->prefix.prefix));
+ stream_putc(s, c->block_bits_length);
+ stream_putc(s, c->node_bits_length);
+ stream_putc(s, c->function_bits_length);
+ stream_putc(s, c->argument_bits_length);
+ return 0;
+}
+
+int zapi_srv6_locator_chunk_decode(struct stream *s,
+ struct srv6_locator_chunk *c)
+{
+ uint16_t len = 0;
+
+ c->prefix.family = AF_INET6;
+
+ STREAM_GETW(s, len);
+ if (len > SRV6_LOCNAME_SIZE)
+ goto stream_failure;
+
+ STREAM_GET(c->locator_name, s, len);
+ STREAM_GETW(s, c->prefix.prefixlen);
+ STREAM_GET(&c->prefix.prefix, s, sizeof(c->prefix.prefix));
+ STREAM_GETC(s, c->block_bits_length);
+ STREAM_GETC(s, c->node_bits_length);
+ STREAM_GETC(s, c->function_bits_length);
+ STREAM_GETC(s, c->argument_bits_length);
+ return 0;
+
+stream_failure:
+ return -1;
+}
+
static int zapi_nhg_encode(struct stream *s, int cmd, struct zapi_nhg *api_nhg)
{
int i;
STREAM_GETC(s, api_nh->backup_idx[i]);
}
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL)) {
+ STREAM_GETL(s, api_nh->seg6local_action);
+ STREAM_GET(&api_nh->seg6local_ctx, s,
+ sizeof(struct seg6local_context));
+ }
+
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6))
+ STREAM_GET(&api_nh->seg6_segs, s,
+ sizeof(struct in6_addr));
+
/* Success */
ret = 0;
memcpy(n->backup_idx, znh->backup_idx, n->backup_num);
}
+ if (znh->seg6local_action != ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
+ nexthop_add_srv6_seg6local(n, znh->seg6local_action,
+ &znh->seg6local_ctx);
+
+ if (!sid_zero(&znh->seg6_segs))
+ nexthop_add_srv6_seg6(n, &znh->seg6_segs);
+
return n;
}
memcpy(znh->backup_idx, nh->backup_idx, znh->backup_num);
}
+ if (nh->nh_srv6) {
+ if (nh->nh_srv6->seg6local_action !=
+ ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) {
+ SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL);
+ znh->seg6local_action = nh->nh_srv6->seg6local_action;
+ memcpy(&znh->seg6local_ctx,
+ &nh->nh_srv6->seg6local_ctx,
+ sizeof(struct seg6local_context));
+ }
+
+ if (!sid_zero(&nh->nh_srv6->seg6_segs)) {
+ SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_SEG6);
+ memcpy(&znh->seg6_segs, &nh->nh_srv6->seg6_segs,
+ sizeof(struct in6_addr));
+ }
+ }
+
return 0;
}
return -1;
}
+/**
+ * Function to request a srv6-locator chunk in an Asyncronous way
+ *
+ * @param zclient Zclient used to connect to table manager (zebra)
+ * @param locator_name Name of SRv6-locator
+ * @result 0 on success, -1 otherwise
+ */
+int srv6_manager_get_locator_chunk(struct zclient *zclient,
+ const char *locator_name)
+{
+ struct stream *s;
+ const size_t len = strlen(locator_name);
+
+ if (zclient_debug)
+ zlog_debug("Getting SRv6-Locator Chunk %s", locator_name);
+
+ if (zclient->sock < 0)
+ return -1;
+
+ /* send request */
+ s = zclient->obuf;
+ stream_reset(s);
+ zclient_create_header(s, ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK,
+ VRF_DEFAULT);
+
+ /* locator_name */
+ stream_putw(s, len);
+ stream_put(s, locator_name, len);
+
+ /* Put length at the first point of the stream. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ return zclient_send_message(zclient);
+}
+
+/**
+ * Function to release a srv6-locator chunk
+ *
+ * @param zclient Zclient used to connect to table manager (zebra)
+ * @param locator_name Name of SRv6-locator
+ * @result 0 on success, -1 otherwise
+ */
+int srv6_manager_release_locator_chunk(struct zclient *zclient,
+ const char *locator_name)
+{
+ struct stream *s;
+ const size_t len = strlen(locator_name);
+
+ if (zclient_debug)
+ zlog_debug("Releasing SRv6-Locator Chunk %s", locator_name);
+
+ if (zclient->sock < 0)
+ return -1;
+
+ /* send request */
+ s = zclient->obuf;
+ stream_reset(s);
+ zclient_create_header(s, ZEBRA_SRV6_MANAGER_RELEASE_LOCATOR_CHUNK,
+ VRF_DEFAULT);
+
+ /* locator_name */
+ stream_putw(s, len);
+ stream_put(s, locator_name, len);
+
+ /* Put length at the first point of the stream. */
+ stream_putw_at(s, 0, stream_get_endp(s));
+
+ return zclient_send_message(zclient);
+}
+
/*
* Asynchronous label chunk request
*
enum zclient_send_status zclient_send_mlag_deregister(struct zclient *client)
{
- return zebra_message_send(client, ZEBRA_MLAG_CLIENT_UNREGISTER, VRF_DEFAULT);
+ return zebra_message_send(client, ZEBRA_MLAG_CLIENT_UNREGISTER,
+ VRF_DEFAULT);
}
enum zclient_send_status zclient_send_mlag_data(struct zclient *client,
case ZEBRA_MLAG_FORWARD_MSG:
zclient_mlag_handle_msg(command, zclient, length, vrf_id);
break;
+ case ZEBRA_SRV6_LOCATOR_ADD:
+ if (zclient->srv6_locator_add)
+ (*zclient->srv6_locator_add)(command, zclient, length,
+ vrf_id);
+ break;
+ case ZEBRA_SRV6_LOCATOR_DELETE:
+ if (zclient->srv6_locator_delete)
+ (*zclient->srv6_locator_delete)(command, zclient,
+ length, vrf_id);
+ break;
+ case ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK:
+ if (zclient->process_srv6_locator_chunk)
+ (*zclient->process_srv6_locator_chunk)(command, zclient,
+ length, vrf_id);
+ break;
case ZEBRA_ERROR:
zclient_handle_error(command, zclient, length, vrf_id);
break;
#include "mlag.h"
#include "srte.h"
+#include "srv6.h"
#ifdef __cplusplus
extern "C" {
ZEBRA_NHG_NOTIFY_OWNER,
ZEBRA_EVPN_REMOTE_NH_ADD,
ZEBRA_EVPN_REMOTE_NH_DEL,
+ ZEBRA_SRV6_LOCATOR_ADD,
+ ZEBRA_SRV6_LOCATOR_DELETE,
+ ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK,
+ ZEBRA_SRV6_MANAGER_RELEASE_LOCATOR_CHUNK,
ZEBRA_ERROR,
ZEBRA_CLIENT_CAPABILITIES,
ZEBRA_OPAQUE_MESSAGE,
int (*mlag_process_down)(void);
int (*mlag_handle_msg)(struct stream *msg, int len);
int (*nhg_notify_owner)(ZAPI_CALLBACK_ARGS);
+ int (*srv6_locator_add)(ZAPI_CALLBACK_ARGS);
+ int (*srv6_locator_delete)(ZAPI_CALLBACK_ARGS);
+ int (*srv6_function_add)(ZAPI_CALLBACK_ARGS);
+ int (*srv6_function_delete)(ZAPI_CALLBACK_ARGS);
+ void (*process_srv6_locator_chunk)(ZAPI_CALLBACK_ARGS);
int (*handle_error)(enum zebra_error_types error);
int (*opaque_msg_handler)(ZAPI_CALLBACK_ARGS);
int (*opaque_register_handler)(ZAPI_CALLBACK_ARGS);
/* SR-TE color. */
uint32_t srte_color;
+
+ /* SRv6 localsid info for Endpoint-behaviour */
+ uint32_t seg6local_action;
+ struct seg6local_context seg6local_ctx;
+
+ /* SRv6 Headend-behaviour */
+ struct in6_addr seg6_segs;
};
/*
#define ZAPI_NEXTHOP_FLAG_LABEL 0x02
#define ZAPI_NEXTHOP_FLAG_WEIGHT 0x04
#define ZAPI_NEXTHOP_FLAG_HAS_BACKUP 0x08 /* Nexthop has a backup */
+#define ZAPI_NEXTHOP_FLAG_SEG6 0x10
+#define ZAPI_NEXTHOP_FLAG_SEG6LOCAL 0x20
/*
* ZAPI Nexthop Group. For use with protocol creation of nexthop groups.
zclient_send_vrf_label(struct zclient *zclient, vrf_id_t vrf_id, afi_t afi,
mpls_label_t label, enum lsp_types_t ltype);
+extern enum zclient_send_status
+zclient_send_localsid(struct zclient *zclient, const struct in6_addr *sid,
+ ifindex_t oif, enum seg6local_action_t action,
+ const struct seg6local_context *context);
+
extern void zclient_send_reg_requests(struct zclient *, vrf_id_t);
extern void zclient_send_dereg_requests(struct zclient *, vrf_id_t);
extern enum zclient_send_status
uint32_t *start, uint32_t *end);
extern int tm_release_table_chunk(struct zclient *zclient, uint32_t start,
uint32_t end);
+extern int srv6_manager_get_locator_chunk(struct zclient *zclient,
+ const char *locator_name);
+extern int srv6_manager_release_locator_chunk(struct zclient *zclient,
+ const char *locator_name);
extern enum zclient_send_status zebra_send_sr_policy(struct zclient *zclient,
int cmd,
struct zapi_labels *zl);
extern int zapi_labels_decode(struct stream *s, struct zapi_labels *zl);
+extern int zapi_srv6_locator_chunk_encode(struct stream *s,
+ const struct srv6_locator_chunk *c);
+extern int zapi_srv6_locator_chunk_decode(struct stream *s,
+ struct srv6_locator_chunk *c);
+
extern enum zclient_send_status zebra_send_pw(struct zclient *zclient,
int command, struct zapi_pw *pw);
extern int zebra_read_pw_status_update(ZAPI_CALLBACK_ARGS,
bfd_sess_set_ipv6_addrs(on->bfd_session, on->ospf6_if->linklocal_addr,
&on->linklocal_addr);
bfd_sess_set_interface(on->bfd_session, oi->interface->name);
+ bfd_sess_set_vrf(on->bfd_session, oi->interface->vrf_id);
bfd_sess_set_profile(on->bfd_session, oi->bfd_config.profile);
}
{
if (if_is_pointopoint(ifp))
return OSPF_IFTYPE_POINTOPOINT;
- else if (if_is_loopback(ifp))
+ else if (if_is_loopback_or_vrf(ifp))
return OSPF_IFTYPE_LOOPBACK;
else
return OSPF_IFTYPE_BROADCAST;
if (if_is_operative(ifp)
&& (ospf6_interface_get_linklocal_address(oi->interface)
- || if_is_loopback(oi->interface)))
+ || if_is_loopback_or_vrf(oi->interface)))
thread_execute(master, interface_up, oi, 0);
else
thread_execute(master, interface_down, oi, 0);
/* check interface has a link-local address */
if (!(ospf6_interface_get_linklocal_address(oi->interface)
- || if_is_loopback(oi->interface))) {
+ || if_is_loopback_or_vrf(oi->interface))) {
if (IS_OSPF6_DEBUG_INTERFACE)
zlog_debug(
"Interface %s has no link local address, can't execute [InterfaceUp]",
/* Schedule Hello */
if (!CHECK_FLAG(oi->flag, OSPF6_INTERFACE_PASSIVE)
- && !if_is_loopback(oi->interface)) {
+ && !if_is_loopback_or_vrf(oi->interface)) {
oi->thread_send_hello = NULL;
thread_add_event(master, ospf6_hello_send, oi, 0,
&oi->thread_send_hello);
THREAD_OFF(oi->thread_sso);
/* don't send hellos over loopback interface */
- if (!if_is_loopback(oi->interface))
+ if (!if_is_loopback_or_vrf(oi->interface))
thread_add_event(master, ospf6_hello_send, oi, 0,
&oi->thread_send_hello);
{
struct listnode *node, *nnode;
struct ospf6_area *oa;
+ struct vrf *vrf;
QOBJ_UNREG(o);
ospf6_distance_reset(o);
route_table_finish(o->distance_table);
+ if (o->vrf_id != VRF_UNKNOWN) {
+ vrf = vrf_lookup_by_id(o->vrf_id);
+ if (vrf)
+ ospf6_vrf_unlink(o, vrf);
+ }
+
XFREE(MTYPE_OSPF6_TOP, o->name);
XFREE(MTYPE_OSPF6_TOP, o);
}
/* Final cleanup of ospf instance */
static void ospf_finish_final(struct ospf *ospf)
{
- struct vrf *vrf;
+ struct vrf *vrf = vrf_lookup_by_id(ospf->vrf_id);
struct route_node *rn;
struct ospf_nbr_nbma *nbr_nbma;
struct ospf_lsa *lsa;
+ struct interface *ifp;
struct ospf_interface *oi;
struct ospf_area *area;
struct ospf_vl_data *vl_data;
if (ospf->vrf_id == VRF_DEFAULT)
ospf_ldp_sync_gbl_exit(ospf, true);
+ /* Remove ospf interface config params: only passive-interface */
+ FOR_ALL_INTERFACES (vrf, ifp) {
+ struct ospf_if_params *params;
+
+ params = IF_DEF_PARAMS(ifp);
+ if (OSPF_IF_PARAM_CONFIGURED(params, passive_interface))
+ UNSET_IF_PARAM(params, passive_interface);
+ }
+
/* Reset interface. */
for (ALL_LIST_ELEMENTS(ospf->oiflist, node, nnode, oi))
ospf_if_free(oi);
ospf->max_multipath = MULTIPATH_NUM;
ospf_delete(ospf);
- if (ospf->name) {
- vrf = vrf_lookup_by_name(ospf->name);
- if (vrf)
- ospf_vrf_unlink(ospf, vrf);
- XFREE(MTYPE_OSPF_TOP, ospf->name);
- } else {
- vrf = vrf_lookup_by_id(VRF_DEFAULT);
- if (vrf)
- ospf_vrf_unlink(ospf, vrf);
- }
+ if (vrf)
+ ospf_vrf_unlink(ospf, vrf);
+ if (ospf->name)
+ XFREE(MTYPE_OSPF_TOP, ospf->name);
XFREE(MTYPE_OSPF_TOP, ospf);
}
dnode, "./constraints/bandwidth/value");
required = yang_dnode_get_bool(
dnode, "./constraints/bandwidth/required");
- vty_out(vty, " %sbandwidth",
- required ? "required " : "");
+ vty_out(vty, " bandwidth");
config_write_float(vty, bandwidth);
+ if (required)
+ vty_out(vty, " required");
vty_out(vty, "\n");
}
if (yang_dnode_exists(dnode,
session_logic_handle
->session_event_queue);
pthread_mutex_unlock(
- &(session_logic_handle_
+ &(session_logic_handle
->session_list_mutex));
continue;
}
session_logic_handle->session_event_queue);
pthread_mutex_unlock(
- &(session_logic_handle_->session_list_mutex));
+ &(session_logic_handle->session_list_mutex));
}
session_logic_handle->session_logic_condition = false;
pim_ifp->bfd_config.min_rx, pim_ifp->bfd_config.min_tx);
bfd_sess_set_ipv4_addrs(neigh->bfd_session, NULL, &neigh->source_addr);
bfd_sess_set_interface(neigh->bfd_session, neigh->interface->name);
+ bfd_sess_set_vrf(neigh->bfd_session, neigh->interface->vrf_id);
bfd_sess_set_profile(neigh->bfd_session, pim_ifp->bfd_config.profile);
bfd_sess_install(neigh->bfd_session);
}
nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
- return nb_cli_apply_changes(vty, NULL);
+ return nb_cli_apply_changes_clear_pending(vty, NULL);
}
void cli_show_router_rip(struct vty *vty, struct lyd_node *dnode,
nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
- return nb_cli_apply_changes(vty, NULL);
+ return nb_cli_apply_changes_clear_pending(vty, NULL);
}
void cli_show_router_ripng(struct vty *vty, struct lyd_node *dnode,
#ifndef __SHARP_GLOBAL_H__
#define __SHARP_GLOBAL_H__
+#include "lib/srv6.h"
+
DECLARE_MGROUP(SHARPD);
struct sharp_routes {
uint32_t removed_routes;
int32_t repeat;
+ /* ZAPI_ROUTE's flag */
+ uint32_t flags;
+
uint8_t inst;
vrf_id_t vrf_id;
char opaque[ZAPI_MESSAGE_OPAQUE_LENGTH];
};
+struct sharp_srv6_locator {
+ /* name of locator */
+ char name[SRV6_LOCNAME_SIZE];
+
+ /* list of struct prefix_ipv6 */
+ struct list *chunks;
+};
+
struct sharp_global {
/* Global data about route install/deletions */
struct sharp_routes r;
/* Traffic Engineering Database */
struct ls_ted *ted;
+
+ /* list of sharp_srv6_locator */
+ struct list *srv6_locators;
};
extern struct sharp_global sg;
memset(&sg, 0, sizeof(sg));
sg.nhs = list_new();
sg.ted = NULL;
+ sg.srv6_locators = list_new();
}
static void sharp_start_configuration(void)
#include "sharpd/sharp_vty_clippy.c"
#endif
+DEFINE_MTYPE_STATIC(SHARPD, SRV6_LOCATOR, "SRv6 Locator");
+
DEFPY(watch_redistribute, watch_redistribute_cmd,
"sharp watch [vrf NAME$vrf_name] redistribute " FRR_REDIST_STR_SHARPD,
"Sharp routing Protocol\n"
rts = routes;
sharp_install_routes_helper(&prefix, sg.r.vrf_id, sg.r.inst, nhgid,
&sg.r.nhop_group, &sg.r.backup_nhop_group,
- rts, sg.r.opaque);
+ rts, 0, sg.r.opaque);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (install_seg6_routes,
+ install_seg6_routes_cmd,
+ "sharp install seg6-routes [vrf NAME$vrf_name]\
+ <A.B.C.D$start4|X:X::X:X$start6>\
+ nexthop-seg6 X:X::X:X$seg6_nh6 encap X:X::X:X$seg6_seg\
+ (1-1000000)$routes [repeat (2-1000)$rpt]",
+ "Sharp routing Protocol\n"
+ "install some routes\n"
+ "Routes to install\n"
+ "The vrf we would like to install into if non-default\n"
+ "The NAME of the vrf\n"
+ "v4 Address to start /32 generation at\n"
+ "v6 Address to start /32 generation at\n"
+ "Nexthop-seg6 to use\n"
+ "V6 Nexthop address to use\n"
+ "Encap mode\n"
+ "Segment List to use\n"
+ "How many to create\n"
+ "Should we repeat this command\n"
+ "How many times to repeat this command\n")
+{
+ struct vrf *vrf;
+ struct prefix prefix;
+ uint32_t route_flags = 0;
+
+ sg.r.total_routes = routes;
+ sg.r.installed_routes = 0;
+
+ if (rpt >= 2)
+ sg.r.repeat = rpt * 2;
+ else
+ sg.r.repeat = 0;
+
+ memset(&prefix, 0, sizeof(prefix));
+ memset(&sg.r.orig_prefix, 0, sizeof(sg.r.orig_prefix));
+ memset(&sg.r.nhop, 0, sizeof(sg.r.nhop));
+ memset(&sg.r.nhop_group, 0, sizeof(sg.r.nhop_group));
+ memset(&sg.r.backup_nhop, 0, sizeof(sg.r.nhop));
+ memset(&sg.r.backup_nhop_group, 0, sizeof(sg.r.nhop_group));
+ sg.r.opaque[0] = '\0';
+ sg.r.inst = 0;
+
+ if (start4.s_addr != INADDR_ANY) {
+ prefix.family = AF_INET;
+ prefix.prefixlen = 32;
+ prefix.u.prefix4 = start4;
+ } else {
+ prefix.family = AF_INET6;
+ prefix.prefixlen = 128;
+ prefix.u.prefix6 = start6;
+ }
+ sg.r.orig_prefix = prefix;
+
+ if (!vrf_name)
+ vrf_name = VRF_DEFAULT_NAME;
+
+ vrf = vrf_lookup_by_name(vrf_name);
+ if (!vrf) {
+ vty_out(vty, "The vrf NAME specified: %s does not exist\n",
+ vrf_name);
+ return CMD_WARNING;
+ }
+
+ sg.r.nhop.type = NEXTHOP_TYPE_IPV6;
+ sg.r.nhop.gate.ipv6 = seg6_nh6;
+ sg.r.nhop.vrf_id = vrf->vrf_id;
+ sg.r.nhop_group.nexthop = &sg.r.nhop;
+ nexthop_add_srv6_seg6(&sg.r.nhop, &seg6_seg);
+
+ sg.r.vrf_id = vrf->vrf_id;
+ sharp_install_routes_helper(&prefix, sg.r.vrf_id, sg.r.inst, 0,
+ &sg.r.nhop_group, &sg.r.backup_nhop_group,
+ routes, route_flags, sg.r.opaque);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (install_seg6local_routes,
+ install_seg6local_routes_cmd,
+ "sharp install seg6local-routes [vrf NAME$vrf_name]\
+ X:X::X:X$start6\
+ nexthop-seg6local NAME$seg6l_oif\
+ <End$seg6l_end|\
+ End_X$seg6l_endx X:X::X:X$seg6l_endx_nh6|\
+ End_T$seg6l_endt (1-4294967295)$seg6l_endt_table|\
+ End_DX4$seg6l_enddx4 A.B.C.D$seg6l_enddx4_nh4|\
+ End_DT6$seg6l_enddt6 (1-4294967295)$seg6l_enddt6_table>\
+ (1-1000000)$routes [repeat (2-1000)$rpt]",
+ "Sharp routing Protocol\n"
+ "install some routes\n"
+ "Routes to install\n"
+ "The vrf we would like to install into if non-default\n"
+ "The NAME of the vrf\n"
+ "v6 Address to start /32 generation at\n"
+ "Nexthop-seg6local to use\n"
+ "Output device to use\n"
+ "SRv6 End function to use\n"
+ "SRv6 End.X function to use\n"
+ "V6 Nexthop address to use\n"
+ "SRv6 End.T function to use\n"
+ "Redirect table id to use\n"
+ "SRv6 End.DX4 function to use\n"
+ "V4 Nexthop address to use\n"
+ "SRv6 End.DT6 function to use\n"
+ "Redirect table id to use\n"
+ "How many to create\n"
+ "Should we repeat this command\n"
+ "How many times to repeat this command\n")
+{
+ struct vrf *vrf;
+ uint32_t route_flags = 0;
+ struct seg6local_context ctx = {};
+ enum seg6local_action_t action;
+
+ sg.r.total_routes = routes;
+ sg.r.installed_routes = 0;
+
+ if (rpt >= 2)
+ sg.r.repeat = rpt * 2;
+ else
+ sg.r.repeat = 0;
+
+ memset(&sg.r.orig_prefix, 0, sizeof(sg.r.orig_prefix));
+ memset(&sg.r.nhop, 0, sizeof(sg.r.nhop));
+ memset(&sg.r.nhop_group, 0, sizeof(sg.r.nhop_group));
+ memset(&sg.r.backup_nhop, 0, sizeof(sg.r.nhop));
+ memset(&sg.r.backup_nhop_group, 0, sizeof(sg.r.nhop_group));
+ sg.r.opaque[0] = '\0';
+ sg.r.inst = 0;
+ sg.r.orig_prefix.family = AF_INET6;
+ sg.r.orig_prefix.prefixlen = 128;
+ sg.r.orig_prefix.u.prefix6 = start6;
+
+ if (!vrf_name)
+ vrf_name = VRF_DEFAULT_NAME;
+
+ vrf = vrf_lookup_by_name(vrf_name);
+ if (!vrf) {
+ vty_out(vty, "The vrf NAME specified: %s does not exist\n",
+ vrf_name);
+ return CMD_WARNING;
+ }
+
+ if (seg6l_enddx4) {
+ action = ZEBRA_SEG6_LOCAL_ACTION_END_DX4;
+ ctx.nh4 = seg6l_enddx4_nh4;
+ } else if (seg6l_endx) {
+ action = ZEBRA_SEG6_LOCAL_ACTION_END_X;
+ ctx.nh6 = seg6l_endx_nh6;
+ } else if (seg6l_endt) {
+ action = ZEBRA_SEG6_LOCAL_ACTION_END_T;
+ ctx.table = seg6l_endt_table;
+ } else if (seg6l_enddt6) {
+ action = ZEBRA_SEG6_LOCAL_ACTION_END_DT6;
+ ctx.table = seg6l_enddt6_table;
+ } else {
+ action = ZEBRA_SEG6_LOCAL_ACTION_END;
+ }
+
+ sg.r.nhop.type = NEXTHOP_TYPE_IFINDEX;
+ sg.r.nhop.ifindex = ifname2ifindex(seg6l_oif, vrf->vrf_id);
+ sg.r.nhop.vrf_id = vrf->vrf_id;
+ sg.r.nhop_group.nexthop = &sg.r.nhop;
+ nexthop_add_srv6_seg6local(&sg.r.nhop, action, &ctx);
+
+ sg.r.vrf_id = vrf->vrf_id;
+ sharp_install_routes_helper(&sg.r.orig_prefix, sg.r.vrf_id, sg.r.inst,
+ 0, &sg.r.nhop_group,
+ &sg.r.backup_nhop_group, routes,
+ route_flags, sg.r.opaque);
return CMD_SUCCESS;
}
return CMD_SUCCESS;
}
+DEFPY (sharp_srv6_manager_get_locator_chunk,
+ sharp_srv6_manager_get_locator_chunk_cmd,
+ "sharp srv6-manager get-locator-chunk NAME$locator_name",
+ SHARP_STR
+ "Segment-Routing IPv6\n"
+ "Get SRv6 locator-chunk\n"
+ "SRv6 Locator name\n")
+{
+ int ret;
+ struct listnode *node;
+ struct sharp_srv6_locator *loc;
+ struct sharp_srv6_locator *loc_found = NULL;
+
+ for (ALL_LIST_ELEMENTS_RO(sg.srv6_locators, node, loc)) {
+ if (strcmp(loc->name, locator_name))
+ continue;
+ loc_found = loc;
+ break;
+ }
+ if (!loc_found) {
+ loc = XCALLOC(MTYPE_SRV6_LOCATOR,
+ sizeof(struct sharp_srv6_locator));
+ loc->chunks = list_new();
+ snprintf(loc->name, SRV6_LOCNAME_SIZE, "%s", locator_name);
+ listnode_add(sg.srv6_locators, loc);
+ }
+
+ ret = sharp_zebra_srv6_manager_get_locator_chunk(locator_name);
+ if (ret < 0)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ return CMD_SUCCESS;
+}
+
DEFUN (show_sharp_ted,
show_sharp_ted_cmd,
"show sharp ted [<vertex [A.B.C.D]|edge [A.B.C.D]|subnet [A.B.C.D/M]>] [verbose|json]",
json, JSON_C_TO_STRING_PRETTY));
json_object_free(json);
}
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (sharp_srv6_manager_release_locator_chunk,
+ sharp_srv6_manager_release_locator_chunk_cmd,
+ "sharp srv6-manager release-locator-chunk NAME$locator_name",
+ SHARP_STR
+ "Segment-Routing IPv6\n"
+ "Release SRv6 locator-chunk\n"
+ "SRv6 Locator name\n")
+{
+ int ret;
+ struct listnode *loc_node;
+ struct sharp_srv6_locator *loc;
+
+ for (ALL_LIST_ELEMENTS_RO(sg.srv6_locators, loc_node, loc)) {
+ if (!strcmp(loc->name, locator_name)) {
+ list_delete_all_node(loc->chunks);
+ list_delete(&loc->chunks);
+ listnode_delete(sg.srv6_locators, loc);
+ break;
+ }
+ }
+
+ ret = sharp_zebra_srv6_manager_release_locator_chunk(locator_name);
+ if (ret < 0)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (show_sharp_segment_routing_srv6,
+ show_sharp_segment_routing_srv6_cmd,
+ "show sharp segment-routing srv6 [json]",
+ SHOW_STR
+ SHARP_STR
+ "Segment-Routing\n"
+ "Segment-Routing IPv6\n"
+ JSON_STR)
+{
+ char str[256];
+ struct listnode *loc_node;
+ struct listnode *chunk_node;
+ struct sharp_srv6_locator *loc;
+ struct prefix_ipv6 *chunk;
+ bool uj = use_json(argc, argv);
+ json_object *jo_locs = NULL;
+ json_object *jo_loc = NULL;
+ json_object *jo_chunks = NULL;
+
+ if (uj) {
+ jo_locs = json_object_new_array();
+ for (ALL_LIST_ELEMENTS_RO(sg.srv6_locators, loc_node, loc)) {
+ jo_loc = json_object_new_object();
+ json_object_array_add(jo_locs, jo_loc);
+ json_object_string_add(jo_loc, "name", loc->name);
+ jo_chunks = json_object_new_array();
+ json_object_object_add(jo_loc, "chunks", jo_chunks);
+ for (ALL_LIST_ELEMENTS_RO(loc->chunks, chunk_node,
+ chunk)) {
+ prefix2str(chunk, str, sizeof(str));
+ json_array_string_add(jo_chunks, str);
+ }
+ }
+
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(
+ jo_locs, JSON_C_TO_STRING_PRETTY));
+ json_object_free(jo_locs);
+ } else {
+ for (ALL_LIST_ELEMENTS_RO(sg.srv6_locators, loc_node, loc)) {
+ vty_out(vty, "Locator %s has %d prefix chunks\n",
+ loc->name, listcount(loc->chunks));
+ for (ALL_LIST_ELEMENTS_RO(loc->chunks, chunk_node,
+ chunk)) {
+ prefix2str(chunk, str, sizeof(str));
+ vty_out(vty, " %s\n", str);
+ }
+ vty_out(vty, "\n");
+ }
+ }
+
return CMD_SUCCESS;
}
{
install_element(ENABLE_NODE, &install_routes_data_dump_cmd);
install_element(ENABLE_NODE, &install_routes_cmd);
+ install_element(ENABLE_NODE, &install_seg6_routes_cmd);
+ install_element(ENABLE_NODE, &install_seg6local_routes_cmd);
install_element(ENABLE_NODE, &remove_routes_cmd);
install_element(ENABLE_NODE, &vrf_label_cmd);
install_element(ENABLE_NODE, &sharp_nht_data_dump_cmd);
install_element(ENABLE_NODE, &show_debugging_sharpd_cmd);
install_element(ENABLE_NODE, &show_sharp_ted_cmd);
+ install_element(ENABLE_NODE, &sharp_srv6_manager_get_locator_chunk_cmd);
+ install_element(ENABLE_NODE,
+ &sharp_srv6_manager_release_locator_chunk_cmd);
+ install_element(ENABLE_NODE, &show_sharp_segment_routing_srv6_cmd);
+
return;
}
vrf_id_t vrf_id;
uint8_t instance;
uint32_t nhgid;
+ uint32_t flags;
const struct nexthop_group *nhg;
const struct nexthop_group *backup_nhg;
enum where_to_restart restart;
*/
static bool route_add(const struct prefix *p, vrf_id_t vrf_id, uint8_t instance,
uint32_t nhgid, const struct nexthop_group *nhg,
- const struct nexthop_group *backup_nhg, char *opaque)
+ const struct nexthop_group *backup_nhg, uint32_t flags,
+ char *opaque)
{
struct zapi_route api;
struct zapi_nexthop *api_nh;
api.safi = SAFI_UNICAST;
memcpy(&api.prefix, p, sizeof(*p));
+ api.flags = flags;
SET_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION);
SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP);
uint32_t nhgid,
const struct nexthop_group *nhg,
const struct nexthop_group *backup_nhg,
- uint32_t routes, char *opaque)
+ uint32_t routes, uint32_t flags,
+ char *opaque)
{
uint32_t temp, i;
bool v4 = false;
for (i = count; i < routes; i++) {
bool buffered = route_add(p, vrf_id, (uint8_t)instance, nhgid,
- nhg, backup_nhg, opaque);
+ nhg, backup_nhg, flags, opaque);
if (v4)
p->u.prefix4.s_addr = htonl(++temp);
else
wb.instance = instance;
wb.nhgid = nhgid;
wb.nhg = nhg;
+ wb.flags = flags;
wb.backup_nhg = backup_nhg;
wb.opaque = opaque;
wb.restart = SHARP_INSTALL_ROUTES_RESTART;
uint8_t instance, uint32_t nhgid,
const struct nexthop_group *nhg,
const struct nexthop_group *backup_nhg,
- uint32_t routes, char *opaque)
+ uint32_t routes, uint32_t flags, char *opaque)
{
zlog_debug("Inserting %u routes", routes);
monotime(&sg.r.t_start);
sharp_install_routes_restart(p, 0, vrf_id, instance, nhgid, nhg,
- backup_nhg, routes, opaque);
+ backup_nhg, routes, flags, opaque);
}
static void sharp_remove_routes_restart(struct prefix *p, uint32_t count,
sharp_install_routes_helper(&p, sg.r.vrf_id, sg.r.inst,
sg.r.nhgid, &sg.r.nhop_group,
&sg.r.backup_nhop_group,
- sg.r.total_routes, sg.r.opaque);
+ sg.r.total_routes, sg.r.flags,
+ sg.r.opaque);
}
}
case SHARP_INSTALL_ROUTES_RESTART:
sharp_install_routes_restart(
&wb.p, wb.count, wb.vrf_id, wb.instance, wb.nhgid,
- wb.nhg, wb.backup_nhg, wb.routes, wb.opaque);
+ wb.nhg, wb.backup_nhg, wb.routes, wb.flags,
+ wb.opaque);
return;
case SHARP_DELETE_ROUTES_RESTART:
sharp_remove_routes_restart(&wb.p, wb.count, wb.vrf_id,
return 0;
}
+int sharp_zebra_srv6_manager_get_locator_chunk(const char *locator_name)
+{
+ return srv6_manager_get_locator_chunk(zclient, locator_name);
+}
+
+int sharp_zebra_srv6_manager_release_locator_chunk(const char *locator_name)
+{
+ return srv6_manager_release_locator_chunk(zclient, locator_name);
+}
+
+static void sharp_zebra_process_srv6_locator_chunk(ZAPI_CALLBACK_ARGS)
+{
+ struct stream *s = NULL;
+ struct srv6_locator_chunk s6c = {};
+ struct listnode *node, *nnode;
+ struct sharp_srv6_locator *loc;
+
+ s = zclient->ibuf;
+ zapi_srv6_locator_chunk_decode(s, &s6c);
+
+ for (ALL_LIST_ELEMENTS(sg.srv6_locators, node, nnode, loc)) {
+ struct prefix_ipv6 *chunk = NULL;
+ struct listnode *chunk_node;
+ struct prefix_ipv6 *c;
+
+ if (strcmp(loc->name, s6c.locator_name) != 0) {
+ zlog_err("%s: Locator name unmatch %s:%s", __func__,
+ loc->name, s6c.locator_name);
+ continue;
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(loc->chunks, chunk_node, c))
+ if (!prefix_cmp(c, &s6c.prefix))
+ return;
+
+ chunk = prefix_ipv6_new();
+ *chunk = s6c.prefix;
+ listnode_add(loc->chunks, chunk);
+ return;
+ }
+
+ zlog_err("%s: can't get locator_chunk!!", __func__);
+}
+
void sharp_zebra_init(void)
{
struct zclient_options opt = {.receive_notify = true};
zclient->redistribute_route_add = sharp_redistribute_route;
zclient->redistribute_route_del = sharp_redistribute_route;
zclient->opaque_msg_handler = sharp_opaque_handler;
+ zclient->process_srv6_locator_chunk =
+ sharp_zebra_process_srv6_locator_chunk;
}
uint8_t instance, uint32_t nhgid,
const struct nexthop_group *nhg,
const struct nexthop_group *backup_nhg,
- uint32_t routes, char *opaque);
+ uint32_t routes, uint32_t flags,
+ char *opaque);
extern void sharp_remove_routes_helper(struct prefix *p, vrf_id_t vrf_id,
uint8_t instance, uint32_t routes);
extern void sharp_redistribute_vrf(struct vrf *vrf, int source);
+extern int sharp_zebra_srv6_manager_get_locator_chunk(const char *lname);
+extern int sharp_zebra_srv6_manager_release_locator_chunk(const char *lname);
+extern void sharp_install_seg6local_route_helper(struct prefix *p,
+ uint8_t instance,
+ enum seg6local_action_t act,
+ struct seg6local_context *ctx);
+
#endif
/* Execute command (non-strict). */
ret = cmd_execute_command(vline, test->vty, NULL, 0);
+ if (ret == CMD_SUCCESS) {
+ /* Commit any pending changes, irnore error */
+ ret = nb_cli_pending_commit_check(test->vty);
+ }
if (ret != CMD_SUCCESS) {
test->state = TEST_COMMAND_ERROR;
test->error = str_printf(
cmd, ret);
}
- nb_cli_pending_commit_check(test->vty);
-
/* Free memory. */
cmd_free_strvec(vline);
XFREE(MTYPE_TMP, cmd);
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from lib.common_config import adjust_router_l3mdev
# Required to instantiate the topology builder class.
from mininet.topo import Topo
logger.info("result: " + output)
router = tgen.gears["r2"]
- adjust_router_l3mdev(tgen, "r2")
for cmd in cmds_vrflite:
logger.info("cmd to r2: " + cmd.format("r2"))
output = router.run(cmd.format("r2"))
--- /dev/null
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.1.0",
+ "v4mask": 24,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "r2": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"}
+ ],
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [
+ {"redist_type": "static"}
+ ],
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "keepalivetimer": 1,
+ "holddowntimer": 3
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "static_routes":[
+ {
+ "network":"100.0.10.1/32",
+ "no_of_ip":5,
+ "next_hop":"Null0"
+ },
+ {
+ "network":"1::1/128",
+ "no_of_ip":5,
+ "next_hop":"Null0"
+ }]
+ },
+ "r2": {
+ "links": {
+ "r1": {"ipv4": "auto", "ipv6": "auto"},
+ "r4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "r1": {"ipv4": "auto", "ipv6": "auto"},
+ "r4": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "300",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4": {
+ "links": {
+ "r2": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"}
+ },
+ "bgp": {
+ "local_as": "400",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r4": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r4": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+{\r
+ "ipv4base": "10.0.0.0",\r
+ "ipv4mask": 24,\r
+ "ipv6base": "fd00::",\r
+ "ipv6mask": 64,\r
+ "link_ip_start": {\r
+ "ipv4": "10.0.1.0",\r
+ "v4mask": 24,\r
+ "ipv6": "fd00::",\r
+ "v6mask": 64\r
+ },\r
+ "lo_prefix": {\r
+ "ipv4": "1.0.",\r
+ "v4mask": 32,\r
+ "ipv6": "2001:DB8:F::",\r
+ "v6mask": 128\r
+ },\r
+ "routers": {\r
+ "r1": {\r
+ "links": {\r
+ "r2": {"ipv4": "auto", "ipv6": "auto"},\r
+ "r3": {"ipv4": "auto", "ipv6": "auto"}\r
+ },\r
+ "bgp": {\r
+ "local_as": "100",\r
+ "address_family": {\r
+ "ipv4": {\r
+ "unicast": {\r
+ "redistribute": [\r
+ {"redist_type": "static"}\r
+ ],\r
+ "neighbor": {\r
+ "r2": {\r
+ "dest_link": {\r
+ "r1": {\r
+ "keepalivetimer": 1,\r
+ "holddowntimer": 3\r
+ }\r
+ }\r
+ },\r
+ "r3": {\r
+ "dest_link": {\r
+ "r1": {\r
+ "keepalivetimer": 1,\r
+ "holddowntimer": 3\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ },\r
+ "ipv6": {\r
+ "unicast": {\r
+ "redistribute": [\r
+ {"redist_type": "static"}\r
+ ],\r
+ "neighbor": {\r
+ "r2": {\r
+ "dest_link": {\r
+ "r1": {\r
+ "keepalivetimer": 1,\r
+ "holddowntimer": 3\r
+ }\r
+ }\r
+ },\r
+ "r3": {\r
+ "dest_link": {\r
+ "r1": {\r
+ "keepalivetimer": 1,\r
+ "holddowntimer": 3\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ },\r
+ "static_routes":[\r
+ {\r
+ "network":"100.0.10.1/32",\r
+ "no_of_ip":5,\r
+ "next_hop":"Null0"\r
+ },\r
+ {\r
+ "network":"1::1/128",\r
+ "no_of_ip":5,\r
+ "next_hop":"Null0"\r
+ }]\r
+ },\r
+ "r2": {\r
+ "links": {\r
+ "r1": {"ipv4": "auto", "ipv6": "auto"},\r
+ "r4": {"ipv4": "auto", "ipv6": "auto"}\r
+ },\r
+ "bgp": {\r
+ "local_as": "100",\r
+ "address_family": {\r
+ "ipv4": {\r
+ "unicast": {\r
+ "neighbor": {\r
+ "r1": {\r
+ "dest_link": {\r
+ "r2": {}\r
+ }\r
+ },\r
+ "r4": {\r
+ "dest_link": {\r
+ "r2": {}\r
+ }\r
+ }\r
+ }\r
+ }\r
+ },\r
+ "ipv6": {\r
+ "unicast": {\r
+ "neighbor": {\r
+ "r1": {\r
+ "dest_link": {\r
+ "r2": {}\r
+ }\r
+ },\r
+ "r4": {\r
+ "dest_link": {\r
+ "r2": {}\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ },\r
+ "r3": {\r
+ "links": {\r
+ "r1": {"ipv4": "auto", "ipv6": "auto"},\r
+ "r4": {"ipv4": "auto", "ipv6": "auto"}\r
+ },\r
+ "bgp": {\r
+ "local_as": "100",\r
+ "address_family": {\r
+ "ipv4": {\r
+ "unicast": {\r
+ "neighbor": {\r
+ "r1": {\r
+ "dest_link": {\r
+ "r3": {}\r
+ }\r
+ },\r
+ "r4": {\r
+ "dest_link": {\r
+ "r3": {}\r
+ }\r
+ }\r
+ }\r
+ }\r
+ },\r
+ "ipv6": {\r
+ "unicast": {\r
+ "neighbor": {\r
+ "r1": {\r
+ "dest_link": {\r
+ "r3": {}\r
+ }\r
+ },\r
+ "r4": {\r
+ "dest_link": {\r
+ "r3": {}\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ },\r
+ "r4": {\r
+ "links": {\r
+ "r2": {"ipv4": "auto", "ipv6": "auto"},\r
+ "r3": {"ipv4": "auto", "ipv6": "auto"}\r
+ },\r
+ "bgp": {\r
+ "local_as": "200",\r
+ "address_family": {\r
+ "ipv4": {\r
+ "unicast": {\r
+ "neighbor": {\r
+ "r2": {\r
+ "dest_link": {\r
+ "r4": {}\r
+ }\r
+ },\r
+ "r3": {\r
+ "dest_link": {\r
+ "r4": {}\r
+ }\r
+ }\r
+ }\r
+ }\r
+ },\r
+ "ipv6": {\r
+ "unicast": {\r
+ "neighbor": {\r
+ "r2": {\r
+ "dest_link": {\r
+ "r4": {}\r
+ }\r
+ },\r
+ "r3": {\r
+ "dest_link": {\r
+ "r4": {}\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+ }\r
+}
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""
+Following tests are covered to test ecmp functionality on BGP GSHUT.
+1. Verify graceful-shutdown functionality with eBGP peers
+2. Verify graceful-shutdown functionality when daemons
+ bgpd/zebra/staticd and frr services are restarted with eBGP peers
+"""
+
+import os
+import sys
+import time
+import json
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+from time import sleep
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ verify_rib,
+ create_static_routes,
+ check_address_types,
+ interface_status,
+ reset_config_on_routers,
+ step,
+ get_frr_ipv6_linklocal,
+ kill_router_daemons,
+ start_router_daemons,
+ stop_router,
+ start_router,
+ create_route_maps,
+ create_bgp_community_lists,
+ delete_route_maps,
+ required_linux_kernel_version,
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence,
+ create_router_bgp,
+ clear_bgp,
+ verify_bgp_rib,
+ verify_bgp_attributes,
+)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/ebgp_gshut_topo1.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ logger.info("Could not read file:", jsonFile)
+
+# Global variables
+NETWORK = {"ipv4": "100.0.10.1/32", "ipv6": "1::1/128"}
+NEXT_HOP_IP_1 = {"ipv4": "10.0.2.1", "ipv6": "fd00:0:0:1::1"}
+NEXT_HOP_IP_2 = {"ipv4": "10.0.4.2", "ipv6": "fd00:0:0:3::2"}
+PREFERRED_NEXT_HOP = "link_local"
+BGP_CONVERGENCE = False
+
+
+class GenerateTopo(Topo):
+ """
+ Test topology builder
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # This function only purpose is to create topology
+ # as defined in input json file.
+ #
+ # Create topology (setup module)
+ # Creating 2 routers topology, r1, r2in IBGP
+ # Bring up topology
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ global ADDR_TYPES
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.16")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(GenerateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ ADDR_TYPES = check_address_types()
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format(
+ BGP_CONVERGENCE
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+###########################
+# Local APIs
+###########################
+
+
+def next_hop_per_address_family(
+ tgen, dut, peer, addr_type, next_hop_dict, preferred_next_hop=PREFERRED_NEXT_HOP
+):
+ """
+ This function returns link_local or global next_hop per address-family
+ """
+
+ intferface = topo["routers"][peer]["links"]["{}".format(dut)]["interface"]
+ if addr_type == "ipv6" and "link_local" in preferred_next_hop:
+ next_hop = get_frr_ipv6_linklocal(tgen, peer, intf=intferface)
+ else:
+ next_hop = next_hop_dict[addr_type]
+
+ return next_hop
+
+
+###########################
+# TESTCASES
+###########################
+
+
+def test_verify_graceful_shutdown_functionality_with_eBGP_peers_p0(request):
+ """
+ Verify graceful-shutdown functionality with eBGP peers
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ reset_config_on_routers(tgen)
+
+ step("Done in base config: Configure base config as per the topology")
+ step("Base config should be up, verify using BGP convergence")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Done in base config: Advertise prefixes from R1")
+ step("Verify BGP routes are received at R3 with best path from R3 to R1")
+
+ for addr_type in ADDR_TYPES:
+ dut = "r3"
+ next_hop1 = next_hop_per_address_family(
+ tgen, "r3", "r1", addr_type, NEXT_HOP_IP_1
+ )
+ next_hop2 = next_hop_per_address_family(
+ tgen, "r3", "r4", addr_type, NEXT_HOP_IP_2
+ )
+
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, input_topo, next_hop=[next_hop1, next_hop2]
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop=next_hop1)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("On R1 configure:")
+ step("Create standard bgp community-list to permit graceful-shutdown:")
+ input_dict_1 = {
+ "r1": {
+ "bgp_community_lists": [
+ {
+ "community_type": "standard",
+ "action": "permit",
+ "name": "GSHUT",
+ "value": "graceful-shutdown",
+ }
+ ]
+ }
+ }
+
+ result = create_bgp_community_lists(tgen, input_dict_1)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Create route-map to set community GSHUT in OUT direction")
+
+ input_dict_2 = {
+ "r1": {
+ "route_maps": {
+ "GSHUT-OUT": [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "set": {"community": {"num": "graceful-shutdown"}},
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_route_maps(tgen, input_dict_2)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_3 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "GSHUT-OUT",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "GSHUT-OUT",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "FRR is setting local-pref to 0 by-default on receiver GSHUT community, "
+ "below step is not needed, but keeping for reference"
+ )
+ step(
+ "On R3, apply route-map IN direction to match GSHUT community "
+ "and set local-preference to 0."
+ )
+
+ step(
+ "Verify BGP convergence on R3 and ensure all the neighbours state "
+ "is established"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify BGP routes on R3:")
+ step("local pref for routes coming from R1 is set to 0.")
+
+ for addr_type in ADDR_TYPES:
+ rmap_dict = {"r1": {"route_maps": {"GSHUT-OUT": [{"set": {"locPrf": 0}}],}}}
+
+ static_routes = [NETWORK[addr_type]]
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, static_routes, "GSHUT-OUT", rmap_dict
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Ensure that best path is selected from R4 to R3.")
+
+ for addr_type in ADDR_TYPES:
+ dut = "r3"
+ next_hop1 = next_hop_per_address_family(
+ tgen, "r3", "r1", addr_type, NEXT_HOP_IP_1
+ )
+ next_hop2 = next_hop_per_address_family(
+ tgen, "r3", "r4", addr_type, NEXT_HOP_IP_2
+ )
+
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, input_topo, next_hop=[next_hop1, next_hop2]
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop=next_hop2)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_verify_restarting_zebra_bgpd_staticd_frr_with_eBGP_peers_p0(request):
+ """
+ Verify graceful-shutdown functionality when daemons bgpd/zebra/staticd and
+ frr services are restarted with eBGP peers
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ reset_config_on_routers(tgen)
+
+ step("Done in base config: Configure base config as per the topology")
+ step("Base config should be up, verify using BGP convergence")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Done in base config: Advertise prefixes from R1")
+ step("Verify BGP routes are received at R3 with best path from R3 to R1")
+
+ for addr_type in ADDR_TYPES:
+ dut = "r3"
+ next_hop1 = next_hop_per_address_family(
+ tgen, "r3", "r1", addr_type, NEXT_HOP_IP_1
+ )
+ next_hop2 = next_hop_per_address_family(
+ tgen, "r3", "r4", addr_type, NEXT_HOP_IP_2
+ )
+
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, input_topo, next_hop=[next_hop1, next_hop2]
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop=next_hop1)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("On R1 configure:")
+ step("Create standard bgp community-list to permit graceful-shutdown:")
+ input_dict_1 = {
+ "r1": {
+ "bgp_community_lists": [
+ {
+ "community_type": "standard",
+ "action": "permit",
+ "name": "GSHUT",
+ "value": "graceful-shutdown",
+ }
+ ]
+ }
+ }
+
+ result = create_bgp_community_lists(tgen, input_dict_1)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Create route-map to set community GSHUT in OUT direction")
+
+ input_dict_2 = {
+ "r1": {
+ "route_maps": {
+ "GSHUT-OUT": [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "set": {"community": {"num": "graceful-shutdown"}},
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_route_maps(tgen, input_dict_2)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_3 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "GSHUT-OUT",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "GSHUT-OUT",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "FRR is setting local-pref to 0 by-default on receiver GSHUT community, "
+ "below step is not needed, but keeping for reference"
+ )
+ step(
+ "On R3, apply route-map IN direction to match GSHUT community "
+ "and set local-preference to 0."
+ )
+
+ step(
+ "Verify BGP convergence on R3 and ensure all the neighbours state "
+ "is established"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify BGP routes on R3:")
+ step("local pref for routes coming from R1 is set to 0.")
+
+ for addr_type in ADDR_TYPES:
+ rmap_dict = {"r1": {"route_maps": {"GSHUT-OUT": [{"set": {"locPrf": 0}}]}}}
+
+ static_routes = [NETWORK[addr_type]]
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, static_routes, "GSHUT-OUT", rmap_dict
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Ensure that best path is selected from R4 to R3.")
+
+ for addr_type in ADDR_TYPES:
+ dut = "r3"
+ next_hop1 = next_hop_per_address_family(
+ tgen, "r3", "r1", addr_type, NEXT_HOP_IP_1
+ )
+ next_hop2 = next_hop_per_address_family(
+ tgen, "r3", "r4", addr_type, NEXT_HOP_IP_2
+ )
+
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, input_topo, next_hop=[next_hop1, next_hop2]
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop=next_hop2)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Restart daemons and frr services")
+
+ for daemon in ["bgpd", "zebra", "staticd", "frr"]:
+ if daemon != "frr":
+ kill_router_daemons(tgen, "r3", ["staticd"])
+ start_router_daemons(tgen, "r3", ["staticd"])
+ else:
+ stop_router(tgen, "r3")
+ start_router(tgen, "r3")
+
+ step(
+ "Verify BGP convergence on R3 and ensure all the neighbours state "
+ "is established"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Verify BGP routes on R3:")
+ step("local pref for routes coming from R1 is set to 0.")
+
+ for addr_type in ADDR_TYPES:
+ rmap_dict = {"r1": {"route_maps": {"GSHUT-OUT": [{"set": {"locPrf": 0}}]}}}
+
+ static_routes = [NETWORK[addr_type]]
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, static_routes, "GSHUT-OUT", rmap_dict
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Ensure that best path is selected from R4 to R3.")
+
+ for addr_type in ADDR_TYPES:
+ dut = "r3"
+ next_hop1 = next_hop_per_address_family(
+ tgen, "r3", "r1", addr_type, NEXT_HOP_IP_1
+ )
+ next_hop2 = next_hop_per_address_family(
+ tgen, "r3", "r4", addr_type, NEXT_HOP_IP_2
+ )
+
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, input_topo, next_hop=[next_hop1, next_hop2]
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop=next_hop2)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
--- /dev/null
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""
+Following tests are covered to test ecmp functionality on BGP GSHUT.
+1. Verify graceful-shutdown functionality with iBGP peers
+2. Verify graceful-shutdown functionality after
+ deleting/re-adding route-map with iBGP peers
+"""
+
+import os
+import sys
+import time
+import json
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from mininet.topo import Topo
+from time import sleep
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ verify_rib,
+ create_static_routes,
+ check_address_types,
+ interface_status,
+ reset_config_on_routers,
+ step,
+ get_frr_ipv6_linklocal,
+ kill_router_daemons,
+ start_router_daemons,
+ stop_router,
+ start_router,
+ create_route_maps,
+ create_bgp_community_lists,
+ delete_route_maps,
+ required_linux_kernel_version,
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence,
+ create_router_bgp,
+ clear_bgp,
+ verify_bgp_rib,
+ verify_bgp_attributes,
+)
+from lib.topojson import build_topo_from_json, build_config_from_json
+
+# Reading the data from JSON File for topology and configuration creation
+jsonFile = "{}/ibgp_gshut_topo1.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ logger.info("Could not read file:", jsonFile)
+
+# Global variables
+NETWORK = {"ipv4": "100.0.10.1/32", "ipv6": "1::1/128"}
+NEXT_HOP_IP_1 = {"ipv4": "10.0.3.1", "ipv6": "fd00:0:0:3::1"}
+NEXT_HOP_IP_2 = {"ipv4": "10.0.4.1", "ipv6": "fd00:0:0:2::1"}
+PREFERRED_NEXT_HOP = "link_local"
+BGP_CONVERGENCE = False
+
+
+class GenerateTopo(Topo):
+ """
+ Test topology builder
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # This function only purpose is to create topology
+ # as defined in input json file.
+ #
+ # Create topology (setup module)
+ # Creating 2 routers topology, r1, r2in IBGP
+ # Bring up topology
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+
+ global ADDR_TYPES
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("4.16")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(GenerateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ ADDR_TYPES = check_address_types()
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format(
+ BGP_CONVERGENCE
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+###########################
+# Local APIs
+###########################
+
+
+def next_hop_per_address_family(
+ tgen, dut, peer, addr_type, next_hop_dict, preferred_next_hop=PREFERRED_NEXT_HOP
+):
+ """
+ This function returns link_local or global next_hop per address-family
+ """
+
+ intferface = topo["routers"][peer]["links"]["{}".format(dut)]["interface"]
+ if addr_type == "ipv6" and "link_local" in preferred_next_hop:
+ next_hop = get_frr_ipv6_linklocal(tgen, peer, intf=intferface)
+ else:
+ next_hop = next_hop_dict[addr_type]
+
+ return next_hop
+
+
+###########################
+# TESTCASES
+###########################
+
+
+def test_verify_graceful_shutdown_functionality_with_iBGP_peers_p0(request):
+ """
+ Verify graceful-shutdown functionality with iBGP peers
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ reset_config_on_routers(tgen)
+
+ step("Done in base config: Configure base config as per the topology")
+ step("Base config should be up, verify using BGP convergence")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Done in base config: Advertise prefixes from R1")
+ step("Verify BGP routes are received at R4 with best path from R3 to R1")
+
+ for addr_type in ADDR_TYPES:
+ dut = "r4"
+ next_hop1 = next_hop_per_address_family(
+ tgen, "r4", "r2", addr_type, NEXT_HOP_IP_1
+ )
+ next_hop2 = next_hop_per_address_family(
+ tgen, "r4", "r3", addr_type, NEXT_HOP_IP_2
+ )
+
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, input_topo, next_hop=[next_hop1, next_hop2]
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, addr_type, dut, input_topo, next_hop=[next_hop1, next_hop2]
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("On R1 configure:")
+ step("Create standard bgp community-list to permit graceful-shutdown:")
+ input_dict_1 = {
+ "r1": {
+ "bgp_community_lists": [
+ {
+ "community_type": "standard",
+ "action": "permit",
+ "name": "GSHUT",
+ "value": "graceful-shutdown",
+ }
+ ]
+ }
+ }
+
+ result = create_bgp_community_lists(tgen, input_dict_1)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Create route-map to set community GSHUT in OUT direction")
+
+ input_dict_2 = {
+ "r1": {
+ "route_maps": {
+ "GSHUT-OUT": [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "set": {"community": {"num": "graceful-shutdown"}},
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_route_maps(tgen, input_dict_2)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_3 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "GSHUT-OUT",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "GSHUT-OUT",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "FRR is setting local-pref to 0 by-default on receiver GSHUT community, "
+ "below step is not needed, but keeping for reference"
+ )
+ step(
+ "On R3, apply route-map IN direction to match GSHUT community "
+ "and set local-preference to 0."
+ )
+
+ step(
+ "Verify BGP convergence on R4 and ensure all the neighbours state "
+ "is established"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify BGP routes on R4:")
+ step("local pref for routes coming from R1 is set to 0.")
+
+ for addr_type in ADDR_TYPES:
+ rmap_dict = {"r1": {"route_maps": {"GSHUT-OUT": [{"set": {"locPrf": 0}}],}}}
+
+ static_routes = [NETWORK[addr_type]]
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, static_routes, "GSHUT-OUT", rmap_dict
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Ensure that best path is selected from R4 to R3.")
+
+ for addr_type in ADDR_TYPES:
+ dut = "r4"
+ next_hop1 = next_hop_per_address_family(
+ tgen, "r4", "r2", addr_type, NEXT_HOP_IP_1
+ )
+ next_hop2 = next_hop_per_address_family(
+ tgen, "r4", "r3", addr_type, NEXT_HOP_IP_2
+ )
+
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, input_topo, next_hop=[next_hop1, next_hop2]
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop=next_hop1)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_verify_deleting_re_adding_route_map_with_iBGP_peers_p0(request):
+ """
+ Verify graceful-shutdown functionality after deleting/re-adding route-map
+ with iBGP peers
+ """
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ reset_config_on_routers(tgen)
+
+ step("Done in base config: Configure base config as per the topology")
+ step("Base config should be up, verify using BGP convergence")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Done in base config: Advertise prefixes from R1")
+ step("Verify BGP routes are received at R4 with best path from R3 to R1")
+
+ for addr_type in ADDR_TYPES:
+ dut = "r4"
+ next_hop1 = next_hop_per_address_family(
+ tgen, "r4", "r2", addr_type, NEXT_HOP_IP_1
+ )
+ next_hop2 = next_hop_per_address_family(
+ tgen, "r4", "r3", addr_type, NEXT_HOP_IP_2
+ )
+
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, input_topo, next_hop=[next_hop1, next_hop2]
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, addr_type, dut, input_topo, next_hop=[next_hop1, next_hop2]
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("On R1 configure:")
+ step("Create standard bgp community-list to permit graceful-shutdown:")
+ input_dict_1 = {
+ "r1": {
+ "bgp_community_lists": [
+ {
+ "community_type": "standard",
+ "action": "permit",
+ "name": "GSHUT",
+ "value": "graceful-shutdown",
+ }
+ ]
+ }
+ }
+
+ result = create_bgp_community_lists(tgen, input_dict_1)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Create route-map to set community GSHUT in OUT direction")
+
+ input_dict_2 = {
+ "r1": {
+ "route_maps": {
+ "GSHUT-OUT": [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "set": {"community": {"num": "graceful-shutdown"}},
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_route_maps(tgen, input_dict_2)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_3 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "GSHUT-OUT",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "GSHUT-OUT",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "FRR is setting local-pref to 0 by-default on receiver GSHUT community, "
+ "below step is not needed, but keeping for reference"
+ )
+ step(
+ "On R3, apply route-map IN direction to match GSHUT community "
+ "and set local-preference to 0."
+ )
+
+ step(
+ "Verify BGP convergence on R4 and ensure all the neighbours state "
+ "is established"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify BGP routes on R4:")
+ step("local pref for routes coming from R1 is set to 0.")
+
+ for addr_type in ADDR_TYPES:
+ rmap_dict = {"r1": {"route_maps": {"GSHUT-OUT": [{"set": {"locPrf": 0}}],}}}
+
+ static_routes = [NETWORK[addr_type]]
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, static_routes, "GSHUT-OUT", rmap_dict
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Ensure that best path is selected from R4 to R3.")
+
+ for addr_type in ADDR_TYPES:
+ dut = "r4"
+ next_hop1 = next_hop_per_address_family(
+ tgen, "r4", "r2", addr_type, NEXT_HOP_IP_1
+ )
+ next_hop2 = next_hop_per_address_family(
+ tgen, "r4", "r3", addr_type, NEXT_HOP_IP_2
+ )
+
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, input_topo, next_hop=[next_hop1, next_hop2]
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop=next_hop1)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Delete route-map from R1")
+ del_rmap_dict = {
+ "r1": {
+ "route_maps": {
+ "GSHUT-OUT": [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "set": {
+ "community": {"num": "graceful-shutdown", "delete": True}
+ },
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_route_maps(tgen, del_rmap_dict)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify BGP convergence on R3 and ensure that all neighbor state "
+ "is established"
+ )
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify BGP routes on R4:")
+ step("Ensure that best path is selected from R1->R3")
+
+ for addr_type in ADDR_TYPES:
+ dut = "r4"
+ next_hop1 = next_hop_per_address_family(
+ tgen, "r4", "r2", addr_type, NEXT_HOP_IP_1
+ )
+ next_hop2 = next_hop_per_address_family(
+ tgen, "r4", "r3", addr_type, NEXT_HOP_IP_2
+ )
+
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(tgen, addr_type, dut, input_topo, next_hop=[next_hop1])
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop=[next_hop1])
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Re-add route-map in R1")
+ result = create_route_maps(tgen, input_dict_2)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify BGP convergence on R3 and ensure all the neighbours state "
+ "is established"
+ )
+
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify BGP routes on R4:")
+ step("local pref for routes coming from R1 is set to 0.")
+
+ for addr_type in ADDR_TYPES:
+ rmap_dict = {"r1": {"route_maps": {"GSHUT-OUT": [{"set": {"locPrf": 0}}]}}}
+
+ static_routes = [NETWORK[addr_type]]
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, static_routes, "GSHUT-OUT", rmap_dict
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Ensure that best path is selected from R4 to R3.")
+
+ for addr_type in ADDR_TYPES:
+ dut = "r4"
+ next_hop1 = next_hop_per_address_family(
+ tgen, "r4", "r2", addr_type, NEXT_HOP_IP_1
+ )
+ next_hop2 = next_hop_per_address_family(
+ tgen, "r4", "r3", addr_type, NEXT_HOP_IP_2
+ )
+
+ input_topo = {key: topo["routers"][key] for key in ["r1"]}
+ result = verify_bgp_rib(
+ tgen, addr_type, dut, input_topo, next_hop=[next_hop1, next_hop2]
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(tgen, addr_type, dut, input_topo, next_hop=next_hop1)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from lib.ltemplate import ltemplateRtrCmd
-from lib.common_config import adjust_router_l3mdev
# Required to instantiate the topology builder class.
from mininet.topo import Topo
"ip link set dev {0}-cust1 up",
]
for rtr in rtrs:
- # adjust handling of VRF traffic
- adjust_router_l3mdev(tgen, rtr)
-
for cmd in cmds:
cc.doCmd(tgen, rtr, cmd.format(rtr))
cc.doCmd(tgen, rtr, "ip link set dev {0}-eth4 master {0}-cust1".format(rtr))
"ip link set dev {0}-cust2 up",
]
for rtr in rtrs:
- # adjust handling of VRF traffic
- adjust_router_l3mdev(tgen, rtr)
-
for cmd in cmds:
cc.doCmd(tgen, rtr, cmd.format(rtr))
cc.doCmd(tgen, rtr, "ip link set dev {0}-eth0 master {0}-cust2".format(rtr))
from lib.lutil import luCommand
-from lib.common_config import kernel_requires_l3mdev_adjustment
-
-l3mdev_accept = kernel_requires_l3mdev_adjustment()
-l3mdev_rtrs = ["r1", "r3", "r4", "ce4"]
-for rtr in l3mdev_rtrs:
- luCommand(rtr, "sysctl net.ipv4.tcp_l3mdev_accept", " = \d*", "none", "")
- found = luLast()
- luCommand(
- rtr, "ss -naep", ":179", "pass", "IPv4:bgp, l3mdev{}".format(found.group(0))
- )
- luCommand(rtr, "ss -naep", ":.*:179", "pass", "IPv6:bgp")
- luCommand(
- rtr,
- "sysctl net.ipv4.tcp_l3mdev_accept",
- " = {}".format(l3mdev_accept),
- "pass",
- "l3mdev matches expected (real/expected{}/{})".format(
- found.group(0), l3mdev_accept
- ),
- )
rtrs = ["r1", "r3", "r4"]
for rtr in rtrs:
--- /dev/null
+group controller {
+ neighbor 10.0.0.1 {
+ router-id 10.0.0.101;
+ local-address 10.0.0.101;
+ local-as 2;
+ peer-as 1;
+
+ family {
+ ipv6 mpls-vpn;
+ }
+
+ static {
+ route 2001:1::/64 {
+ rd 2:10;
+ next-hop 2001::2;
+ extended-community [ target:2:10 ];
+ label 3;
+ attribute [0x28 0xc0 0x0500150020010db800010001000000000000000100ffff00 ];
+ }
+ route 2001:2::/64 {
+ rd 2:10;
+ next-hop 2001::2;
+ extended-community [ target:2:10 ];
+ label 3;
+ attribute [0x28 0xc0 0x0500150020010db800010001000000000000000100ffff00 ];
+ }
+ }
+ }
+}
--- /dev/null
+
+[exabgp.api]
+encoder = text
+highres = false
+respawn = false
+socket = ''
+
+[exabgp.bgp]
+openwait = 60
+
+[exabgp.cache]
+attributes = true
+nexthops = true
+
+[exabgp.daemon]
+daemonize = true
+pid = '/var/run/exabgp/exabgp.pid'
+user = 'exabgp'
+
+[exabgp.log]
+all = false
+configuration = true
+daemon = true
+destination = '/var/log/exabgp.log'
+enable = true
+level = INFO
+message = false
+network = true
+packets = false
+parser = false
+processes = true
+reactor = true
+rib = false
+routes = false
+short = false
+timers = false
+
+[exabgp.pdb]
+enable = false
+
+[exabgp.profile]
+enable = false
+file = ''
+
+[exabgp.reactor]
+speed = 1.0
+
+[exabgp.tcp]
+acl = false
+bind = ''
+delay = 0
+once = false
+port = 179
--- /dev/null
+log stdout notifications
+log monitor notifications
+!log commands
+!
+!debug bgp zebra
+!debug bgp neighbor-events
+!debug bgp vnc verbose
+!debug bgp update-groups
+!debug bgp updates in
+!debug bgp updates out
+!debug bgp vpn label
+!debug bgp vpn leak-from-vrf
+!debug bgp vpn leak-to-vrf
+!debug bgp vpn rmap-event
+!
+router bgp 1
+ bgp router-id 10.0.0.1
+ no bgp default ipv4-unicast
+ no bgp ebgp-requires-policy
+ neighbor 10.0.0.101 remote-as 2
+ neighbor 10.0.0.101 timers 3 10
+ !
+ address-family ipv6 vpn
+ neighbor 10.0.0.101 activate
+ exit-address-family
+!
--- /dev/null
+{
+ "2:10":{
+ "prefix":"2001:1::\/64",
+ "advertisedTo":{
+ "10.0.0.101":{
+ }
+ },
+ "paths":[
+ {
+ "aspath":{
+ "string":"2",
+ "segments":[
+ {
+ "type":"as-sequence",
+ "list":[
+ 2
+ ]
+ }
+ ],
+ "length":1
+ },
+ "origin":"IGP",
+ "valid":true,
+ "bestpath":{
+ "overall":true
+ },
+ "extendedCommunity":{
+ "string":"RT:2:10"
+ },
+ "remoteLabel":3,
+ "remoteSid":"2001:db8:1:1::1",
+ "nexthops":[
+ {
+ "ip":"2001::2",
+ "afi":"ipv6",
+ "scope":"global",
+ "metric":0,
+ "accessible":true,
+ "used":true
+ }
+ ],
+ "peer":{
+ "peerId":"10.0.0.101",
+ "routerId":"10.0.0.101",
+ "type":"external"
+ }
+ }
+ ]
+ }
+}
--- /dev/null
+{
+ "2:10":{
+ "prefix":"2001:2::\/64",
+ "advertisedTo":{
+ "10.0.0.101":{
+ }
+ },
+ "paths":[
+ {
+ "aspath":{
+ "string":"2",
+ "segments":[
+ {
+ "type":"as-sequence",
+ "list":[
+ 2
+ ]
+ }
+ ],
+ "length":1
+ },
+ "origin":"IGP",
+ "valid":true,
+ "bestpath":{
+ "overall":true
+ },
+ "extendedCommunity":{
+ "string":"RT:2:10"
+ },
+ "remoteLabel":3,
+ "remoteSid":"2001:db8:1:1::1",
+ "nexthops":[
+ {
+ "ip":"2001::2",
+ "afi":"ipv6",
+ "scope":"global",
+ "metric":0,
+ "accessible":true,
+ "used":true
+ }
+ ],
+ "peer":{
+ "peerId":"10.0.0.101",
+ "routerId":"10.0.0.101",
+ "type":"external"
+ }
+ }
+ ]
+ }
+}
--- /dev/null
+hostname r1
+!
+interface r1-eth0
+ ip address 10.0.0.1/24
+ no shutdown
+!
+line vty
--- /dev/null
+#!/usr/bin/env python
+
+#
+# test_bgp_prefix_sid2.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2020 by LINE Corporation
+# Copyright (c) 2020 by Hiroki Shirokura <slank.dev@gmail.com>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_bgp_prefix_sid2.py: Test BGP topology with EBGP on prefix-sid
+"""
+
+import json
+import os
+import sys
+import functools
+import pytest
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from mininet.topo import Topo
+
+
+class TemplateTopo(Topo):
+ def build(self, **_opts):
+ tgen = get_topogen(self)
+ router = tgen.add_router("r1")
+ switch = tgen.add_switch("s1")
+ switch.add_link(router)
+
+ switch = tgen.gears["s1"]
+ peer1 = tgen.add_exabgp_peer(
+ "peer1", ip="10.0.0.101", defaultRoute="via 10.0.0.1"
+ )
+ switch.add_link(peer1)
+
+
+def setup_module(module):
+ tgen = Topogen(TemplateTopo, module.__name__)
+ tgen.start_topology()
+
+ router = tgen.gears["r1"]
+ router.load_config(
+ TopoRouter.RD_ZEBRA,
+ os.path.join(CWD, "{}/zebra.conf".format("r1"))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP,
+ os.path.join(CWD, "{}/bgpd.conf".format("r1"))
+ )
+ router.start()
+
+ logger.info("starting exaBGP")
+ peer_list = tgen.exabgp_peers()
+ for pname, peer in peer_list.items():
+ logger.info("starting exaBGP on {}".format(pname))
+ peer_dir = os.path.join(CWD, pname)
+ env_file = os.path.join(CWD, pname, "exabgp.env")
+ logger.info("Running ExaBGP peer on {}".format(pname))
+ peer.start(peer_dir, env_file)
+ logger.info(pname)
+
+
+def teardown_module(module):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def open_json_file(filename):
+ try:
+ with open(filename, "r") as f:
+ return json.load(f)
+ except IOError:
+ assert False, "Could not read file {}".format(filename)
+
+
+def test_r1_rib():
+ def _check(name, cmd, expected_file):
+ logger.info("polling")
+ tgen = get_topogen()
+ router = tgen.gears[name]
+ output = json.loads(router.vtysh_cmd(cmd))
+ expected = open_json_file("{}/{}".format(CWD, expected_file))
+ return topotest.json_cmp(output, expected)
+
+ def check(name, cmd, expected_file):
+ logger.info("[+] check {} \"{}\" {}".format(name, cmd, expected_file))
+ tgen = get_topogen()
+ func = functools.partial(_check, name, cmd, expected_file)
+ success, result = topotest.run_and_expect(func, None, count=10, wait=0.5)
+ assert result is None, 'Failed'
+
+ check("r1", "show bgp ipv6 vpn 2001:1::/64 json", "r1/vpnv6_rib_entry1.json")
+ check("r1", "show bgp ipv6 vpn 2001:2::/64 json", "r1/vpnv6_rib_entry2.json")
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ ret = pytest.main(args)
+ sys.exit(ret)
--- /dev/null
+frr defaults traditional
+!
+hostname ce1
+password zebra
+!
+log stdout notifications
+log commands
+log file bgpd.log
--- /dev/null
+{
+ "::/0": [
+ {
+ "prefix": "::/0",
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 73,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "ip": "2001:1::1",
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "active": true,
+ "weight": 1
+ }
+ ]
+ }
+ ],
+ "2001:1::/64": [
+ {
+ "prefix": "2001:1::/64",
+ "protocol": "connected",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth0",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+log file zebra.log
+!
+hostname ce1
+!
+interface eth0
+ ipv6 address 2001:1::2/64
+!
+ip forwarding
+ipv6 forwarding
+!
+ipv6 route ::/0 2001:1::1
+!
+line vty
+!
--- /dev/null
+frr defaults traditional
+!
+hostname ce2
+password zebra
+!
+log stdout notifications
+log commands
+log file bgpd.log
--- /dev/null
+{
+ "::/0": [
+ {
+ "prefix": "::/0",
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 73,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "ip": "2001:2::1",
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "active": true,
+ "weight": 1
+ }
+ ]
+ }
+ ],
+ "2001:2::/64": [
+ {
+ "prefix": "2001:2::/64",
+ "protocol": "connected",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth0",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+log file zebra.log
+!
+hostname ce2
+!
+interface eth0
+ ipv6 address 2001:2::2/64
+!
+ip forwarding
+ipv6 forwarding
+!
+ipv6 route ::/0 2001:2::1
+!
+line vty
+!
--- /dev/null
+frr defaults traditional
+!
+hostname ce3
+password zebra
+!
+log stdout notifications
+log commands
+log file bgpd.log
--- /dev/null
+{
+ "::/0": [
+ {
+ "prefix": "::/0",
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 73,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "ip": "2001:3::1",
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "active": true,
+ "weight": 1
+ }
+ ]
+ }
+ ],
+ "2001:3::/64": [
+ {
+ "prefix": "2001:3::/64",
+ "protocol": "connected",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth0",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+log file zebra.log
+!
+hostname ce3
+!
+interface eth0
+ ipv6 address 2001:3::2/64
+!
+ip forwarding
+ipv6 forwarding
+!
+ipv6 route ::/0 2001:3::1
+!
+line vty
+!
--- /dev/null
+frr defaults traditional
+!
+hostname ce4
+password zebra
+!
+log stdout notifications
+log commands
+log file bgpd.log
--- /dev/null
+{
+ "::/0": [
+ {
+ "prefix": "::/0",
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 73,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "ip": "2001:4::1",
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "active": true,
+ "weight": 1
+ }
+ ]
+ }
+ ],
+ "2001:4::/64": [
+ {
+ "prefix": "2001:4::/64",
+ "protocol": "connected",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth0",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+log file zebra.log
+!
+hostname ce4
+!
+interface eth0
+ ipv6 address 2001:4::2/64
+!
+ip forwarding
+ipv6 forwarding
+!
+ipv6 route ::/0 2001:4::1
+!
+line vty
+!
--- /dev/null
+frr defaults traditional
+!
+hostname ce5
+password zebra
+!
+log stdout notifications
+log commands
+log file bgpd.log
--- /dev/null
+{
+ "::/0": [
+ {
+ "prefix": "::/0",
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 73,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "ip": "2001:5::1",
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "active": true,
+ "weight": 1
+ }
+ ]
+ }
+ ],
+ "2001:5::/64": [
+ {
+ "prefix": "2001:5::/64",
+ "protocol": "connected",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth0",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+log file zebra.log
+!
+hostname ce5
+!
+interface eth0
+ ipv6 address 2001:5::2/64
+!
+ip forwarding
+ipv6 forwarding
+!
+ipv6 route ::/0 2001:5::1
+!
+line vty
+!
--- /dev/null
+frr defaults traditional
+!
+hostname ce6
+password zebra
+!
+log stdout notifications
+log commands
+log file bgpd.log
--- /dev/null
+{
+ "::/0": [
+ {
+ "prefix": "::/0",
+ "protocol": "static",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 1,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 73,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "ip": "2001:6::1",
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "active": true,
+ "weight": 1
+ }
+ ]
+ }
+ ],
+ "2001:6::/64": [
+ {
+ "prefix": "2001:6::/64",
+ "protocol": "connected",
+ "vrfId": 0,
+ "vrfName": "default",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 254,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth0",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+log file zebra.log
+!
+hostname ce6
+!
+interface eth0
+ ipv6 address 2001:6::2/64
+!
+ip forwarding
+ipv6 forwarding
+!
+ipv6 route ::/0 2001:6::1
+!
+line vty
+!
--- /dev/null
+frr defaults traditional
+!
+hostname r1
+password zebra
+!
+log stdout notifications
+log monitor notifications
+log commands
+!
+!debug bgp neighbor-events
+!debug bgp zebra
+!debug bgp vnc verbose
+!debug bgp update-groups
+!debug bgp updates in
+!debug bgp updates out
+!debug bgp vpn label
+!debug bgp vpn leak-from-vrf
+!debug bgp vpn leak-to-vrf
+!debug bgp vpn rmap-event
+!
+router bgp 1
+ bgp router-id 1.1.1.1
+ no bgp ebgp-requires-policy
+ no bgp default ipv4-unicast
+ neighbor 2001::2 remote-as 2
+ neighbor 2001::2 timers 3 10
+ neighbor 2001::2 timers connect 1
+ !
+ address-family ipv6 vpn
+ neighbor 2001::2 activate
+ exit-address-family
+ !
+ segment-routing srv6
+ locator loc1
+ !
+!
+router bgp 1 vrf vrf10
+ bgp router-id 1.1.1.1
+ no bgp ebgp-requires-policy
+ no bgp default ipv4-unicast
+ !
+ address-family ipv6 unicast
+ sid vpn export auto
+ rd vpn export 1:10
+ rt vpn both 99:99
+ import vpn
+ export vpn
+ redistribute connected
+ exit-address-family
+!
+router bgp 1 vrf vrf20
+ bgp router-id 1.1.1.1
+ no bgp ebgp-requires-policy
+ no bgp default ipv4-unicast
+ !
+ address-family ipv6 unicast
+ sid vpn export auto
+ rd vpn export 1:20
+ rt vpn both 88:88
+ import vpn
+ export vpn
+ redistribute connected
+ exit-address-family
+!
--- /dev/null
+{
+ "vrfId": 0,
+ "vrfName": "default",
+ "tableVersion": 2,
+ "routerId": "1.1.1.1",
+ "defaultLocPrf": 100,
+ "localAS": 1,
+ "routes": {
+ "routeDistinguishers": {
+ "1:10": {
+ "2001:1::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:1::",
+ "prefixLen": 64,
+ "network": "2001:1::/64",
+ "metric": 0,
+ "weight": 32768,
+ "peerId": "(unspec)",
+ "path": "",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf10",
+ "nexthops": [
+ {
+ "ip": "::",
+ "hostname": "r1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "2001:3::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:3::",
+ "prefixLen": 64,
+ "network": "2001:3::/64",
+ "metric": 0,
+ "weight": 32768,
+ "peerId": "(unspec)",
+ "path": "",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf10",
+ "nexthops": [
+ {
+ "ip": "::",
+ "hostname": "r1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "1:20": {
+ "2001:5::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:5::",
+ "prefixLen": 64,
+ "network": "2001:5::/64",
+ "metric": 0,
+ "weight": 32768,
+ "peerId": "(unspec)",
+ "path": "",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf20",
+ "nexthops": [
+ {
+ "ip": "::",
+ "hostname": "r1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "2:10": {
+ "2001:2::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:2::",
+ "prefixLen": 64,
+ "network": "2001:2::/64",
+ "metric": 0,
+ "weight": 0,
+ "peerId": "2001::2",
+ "path": "2",
+ "origin": "incomplete",
+ "nexthops": [
+ {
+ "ip": "2001::2",
+ "hostname": "r2",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "2:20": {
+ "2001:4::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:4::",
+ "prefixLen": 64,
+ "network": "2001:4::/64",
+ "metric": 0,
+ "weight": 0,
+ "peerId": "2001::2",
+ "path": "2",
+ "origin": "incomplete",
+ "nexthops": [
+ {
+ "ip": "2001::2",
+ "hostname": "r2",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "2001:6::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:6::",
+ "prefixLen": 64,
+ "network": "2001:6::/64",
+ "metric": 0,
+ "weight": 0,
+ "peerId": "2001::2",
+ "path": "2",
+ "origin": "incomplete",
+ "nexthops": [
+ {
+ "ip": "2001::2",
+ "hostname": "r2",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
--- /dev/null
+{
+ "2001:1::/64": [
+ {
+ "prefix": "2001:1::/64",
+ "protocol": "connected",
+ "vrfName": "vrf10",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 10,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth1",
+ "active": true
+ }
+ ]
+ }
+ ],
+ "2001:2::/64": [
+ {
+ "prefix": "2001:2::/64",
+ "protocol": "bgp",
+ "vrfName": "vrf10",
+ "selected": true,
+ "destSelected": true,
+ "distance": 20,
+ "metric": 0,
+ "installed": true,
+ "table": 10,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "vrf": "default",
+ "active": true,
+ "labels": [
+ 3
+ ],
+ "weight": 1,
+ "seg6": {
+ "segs": "2001:db8:2:2::100"
+ }
+ }
+ ],
+ "asPath": "2"
+ }
+ ],
+ "2001:3::/64": [
+ {
+ "prefix": "2001:3::/64",
+ "protocol": "connected",
+ "vrfName": "vrf10",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 10,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth2",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "2001:4::/64": [
+ {
+ "prefix": "2001:4::/64",
+ "protocol": "bgp",
+ "vrfName": "vrf20",
+ "selected": true,
+ "destSelected": true,
+ "distance": 20,
+ "metric": 0,
+ "installed": true,
+ "table": 20,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "vrf": "default",
+ "active": true,
+ "labels": [
+ 3
+ ],
+ "weight": 1,
+ "seg6": {
+ "segs": "2001:db8:2:2::200"
+ }
+ }
+ ],
+ "asPath": "2"
+ }
+ ],
+ "2001:5::/64": [
+ {
+ "prefix": "2001:5::/64",
+ "protocol": "connected",
+ "vrfName": "vrf20",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 20,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth3",
+ "active": true
+ }
+ ]
+ }
+ ],
+ "2001:6::/64": [
+ {
+ "prefix": "2001:6::/64",
+ "protocol": "bgp",
+ "vrfName": "vrf20",
+ "selected": true,
+ "destSelected": true,
+ "distance": 20,
+ "metric": 0,
+ "installed": true,
+ "table": 20,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "vrf": "default",
+ "active": true,
+ "labels": [
+ 3
+ ],
+ "weight": 1,
+ "seg6": {
+ "segs": "2001:db8:2:2::200"
+ }
+ }
+ ],
+ "asPath": "2"
+ }
+ ]
+}
--- /dev/null
+log file zebra.log
+!
+hostname r1
+password zebra
+!
+log stdout notifications
+log monitor notifications
+log commands
+!
+debug zebra packet
+debug zebra dplane
+debug zebra kernel
+!
+interface eth0
+ ipv6 address 2001::1/64
+!
+interface eth1 vrf vrf10
+ ipv6 address 2001:1::1/64
+!
+interface eth2 vrf vrf10
+ ipv6 address 2001:3::1/64
+!
+interface eth3 vrf vrf20
+ ipv6 address 2001:5::1/64
+!
+segment-routing
+ srv6
+ locators
+ locator loc1
+ prefix 2001:db8:1:1::/64
+ !
+ !
+!
+ip forwarding
+ipv6 forwarding
+!
+ipv6 route 2001:db8:2:2::/64 2001::2
+!
+line vty
+!
--- /dev/null
+frr defaults traditional
+!
+hostname r2
+password zebra
+!
+log stdout notifications
+log monitor notifications
+log commands
+!
+!debug bgp neighbor-events
+!debug bgp zebra
+!debug bgp vnc verbose
+!debug bgp update-groups
+!debug bgp updates in
+!debug bgp updates out
+!debug bgp updates
+!debug bgp vpn label
+!debug bgp vpn leak-from-vrf
+!debug bgp vpn leak-to-vrf
+!debug bgp vpn rmap-event
+!
+router bgp 2
+ bgp router-id 2.2.2.2
+ no bgp ebgp-requires-policy
+ no bgp default ipv4-unicast
+ neighbor 2001::1 remote-as 1
+ neighbor 2001::1 timers 3 10
+ neighbor 2001::1 timers connect 1
+ !
+ address-family ipv6 vpn
+ neighbor 2001::1 activate
+ exit-address-family
+ !
+ segment-routing srv6
+ locator loc1
+ !
+!
+router bgp 2 vrf vrf10
+ bgp router-id 2.2.2.2
+ no bgp ebgp-requires-policy
+ no bgp default ipv4-unicast
+ !
+ address-family ipv6 unicast
+ sid vpn export auto
+ rd vpn export 2:10
+ rt vpn both 99:99
+ import vpn
+ export vpn
+ redistribute connected
+ exit-address-family
+!
+router bgp 2 vrf vrf20
+ bgp router-id 2.2.2.2
+ no bgp ebgp-requires-policy
+ no bgp default ipv4-unicast
+ !
+ address-family ipv6 unicast
+ sid vpn export auto
+ rd vpn export 2:20
+ rt vpn both 88:88
+ import vpn
+ export vpn
+ redistribute connected
+ exit-address-family
+!
--- /dev/null
+{
+ "vrfId": 0,
+ "vrfName": "default",
+ "tableVersion": 2,
+ "routerId": "2.2.2.2",
+ "defaultLocPrf": 100,
+ "localAS": 2,
+ "routes": {
+ "routeDistinguishers": {
+ "1:10": {
+ "2001:1::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:1::",
+ "prefixLen": 64,
+ "network": "2001:1::/64",
+ "metric": 0,
+ "weight": 0,
+ "peerId": "2001::1",
+ "path": "1",
+ "origin": "incomplete",
+ "nexthops": [
+ {
+ "ip": "2001::1",
+ "hostname": "r1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "2001:3::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:3::",
+ "prefixLen": 64,
+ "network": "2001:3::/64",
+ "metric": 0,
+ "weight": 0,
+ "peerId": "2001::1",
+ "path": "1",
+ "origin": "incomplete",
+ "nexthops": [
+ {
+ "ip": "2001::1",
+ "hostname": "r1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "1:20": {
+ "2001:5::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:5::",
+ "prefixLen": 64,
+ "network": "2001:5::/64",
+ "metric": 0,
+ "weight": 0,
+ "peerId": "2001::1",
+ "path": "1",
+ "origin": "incomplete",
+ "nexthops": [
+ {
+ "ip": "2001::1",
+ "hostname": "r1",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "2:10": {
+ "2001:2::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:2::",
+ "prefixLen": 64,
+ "network": "2001:2::/64",
+ "metric": 0,
+ "weight": 32768,
+ "peerId": "(unspec)",
+ "path": "",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf10",
+ "nexthops": [
+ {
+ "ip": "::",
+ "hostname": "r2",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ },
+ "2:20": {
+ "2001:4::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:4::",
+ "prefixLen": 64,
+ "network": "2001:4::/64",
+ "metric": 0,
+ "weight": 32768,
+ "peerId": "(unspec)",
+ "path": "",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf20",
+ "nexthops": [
+ {
+ "ip": "::",
+ "hostname": "r2",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "2001:6::/64": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "selectionReason": "First path received",
+ "pathFrom": "external",
+ "prefix": "2001:6::",
+ "prefixLen": 64,
+ "network": "2001:6::/64",
+ "metric": 0,
+ "weight": 32768,
+ "peerId": "(unspec)",
+ "path": "",
+ "origin": "incomplete",
+ "announceNexthopSelf": true,
+ "nhVrfName": "vrf20",
+ "nexthops": [
+ {
+ "ip": "::",
+ "hostname": "r2",
+ "afi": "ipv6",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
--- /dev/null
+{
+ "2001:1::/64": [
+ {
+ "prefix": "2001:1::/64",
+ "protocol": "bgp",
+ "vrfName": "vrf10",
+ "selected": true,
+ "destSelected": true,
+ "distance": 20,
+ "metric": 0,
+ "installed": true,
+ "table": 10,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "vrf": "default",
+ "active": true,
+ "labels": [
+ 3
+ ],
+ "weight": 1,
+ "seg6": {
+ "segs": "2001:db8:1:1::100"
+ }
+ }
+ ],
+ "asPath": "1"
+ }
+ ],
+ "2001:2::/64": [
+ {
+ "prefix": "2001:2::/64",
+ "protocol": "connected",
+ "vrfName": "vrf10",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 10,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth1",
+ "active": true
+ }
+ ]
+ }
+ ],
+ "2001:3::/64": [
+ {
+ "prefix": "2001:3::/64",
+ "protocol": "bgp",
+ "vrfName": "vrf10",
+ "selected": true,
+ "destSelected": true,
+ "distance": 20,
+ "metric": 0,
+ "installed": true,
+ "table": 10,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "vrf": "default",
+ "active": true,
+ "labels": [
+ 3
+ ],
+ "weight": 1,
+ "seg6": {
+ "segs": "2001:db8:1:1::100"
+ }
+ }
+ ],
+ "asPath": "1"
+ }
+ ]
+}
--- /dev/null
+{
+ "2001:4::/64": [
+ {
+ "prefix": "2001:4::/64",
+ "protocol": "connected",
+ "vrfName": "vrf20",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 20,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth2",
+ "active": true
+ }
+ ]
+ }
+ ],
+ "2001:5::/64": [
+ {
+ "prefix": "2001:5::/64",
+ "protocol": "bgp",
+ "vrfName": "vrf20",
+ "selected": true,
+ "destSelected": true,
+ "distance": 20,
+ "metric": 0,
+ "installed": true,
+ "table": 20,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "afi": "ipv6",
+ "interfaceName": "eth0",
+ "vrf": "default",
+ "active": true,
+ "labels": [
+ 3
+ ],
+ "weight": 1,
+ "seg6": {
+ "segs": "2001:db8:1:1::200"
+ }
+ }
+ ],
+ "asPath": "1"
+ }
+ ],
+ "2001:6::/64": [
+ {
+ "prefix": "2001:6::/64",
+ "protocol": "connected",
+ "vrfName": "vrf20",
+ "selected": true,
+ "destSelected": true,
+ "distance": 0,
+ "metric": 0,
+ "installed": true,
+ "table": 20,
+ "internalStatus": 16,
+ "internalFlags": 8,
+ "internalNextHopNum": 1,
+ "internalNextHopActiveNum": 1,
+ "nexthops": [
+ {
+ "flags": 3,
+ "fib": true,
+ "directlyConnected": true,
+ "interfaceName": "eth3",
+ "active": true
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+log file zebra.log
+!
+hostname r2
+password zebra
+!
+log stdout notifications
+log monitor notifications
+log commands
+!
+debug zebra packet
+debug zebra dplane
+debug zebra kernel
+!
+interface eth0
+ ipv6 address 2001::2/64
+!
+interface eth1 vrf vrf10
+ ipv6 address 2001:2::1/64
+!
+interface eth2 vrf vrf20
+ ipv6 address 2001:4::1/64
+!
+interface eth3 vrf vrf20
+ ipv6 address 2001:6::1/64
+!
+segment-routing
+ srv6
+ locators
+ locator loc1
+ prefix 2001:db8:2:2::/64
+ !
+ !
+!
+ip forwarding
+ipv6 forwarding
+!
+ipv6 route 2001:db8:1:1::/64 2001::1
+!
+line vty
+!
--- /dev/null
+#!/usr/bin/env python
+
+#
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2018, LabN Consulting, L.L.C.
+# Authored by Lou Berger <lberger@labn.net>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+import os
+import re
+import sys
+import json
+import functools
+import pytest
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from lib.common_config import required_linux_kernel_version
+from mininet.topo import Topo
+
+
+class Topology(Topo):
+ """
+ CE1 CE3 CE5
+ (eth0) (eth0) (eth0)
+ :2 :2 :2
+ | | |
+ 2001: 2001: 2001:
+ 1::/64 3::/64 5::/64
+ | | |
+ :1 :1 :1
+ +-(eth1)--(eth2)---(eth3)-+
+ | \ / | |
+ | (vrf10) (vrf20) |
+ | R1 |
+ +----------(eth0)---------+
+ :1
+ |
+ 2001::/64
+ |
+ :2
+ (eth0)
+ +----------(eth0)--------------+
+ | R2 |
+ | (vrf10) (vrf20) |
+ | / / \ |
+ +-(eth1)-----(eth2)-----(eth3)-+
+ :1 :1 :1
+ | | |
+ +------+ +------+ +------+
+ / 2001: \ / 2001: \ / 2001: \
+ \ 2::/64 / \ 4::/64 / \ 6::/64 /
+ +------+ +------+ +------+
+ | | |
+ :2 :2 :2
+ (eth0) (eth0) (eth0)
+ CE2 CE4 CE6
+ """
+ def build(self, *_args, **_opts):
+ tgen = get_topogen(self)
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_router("ce1")
+ tgen.add_router("ce2")
+ tgen.add_router("ce3")
+ tgen.add_router("ce4")
+ tgen.add_router("ce5")
+ tgen.add_router("ce6")
+
+ tgen.add_link(tgen.gears["r1"], tgen.gears["r2"], "eth0", "eth0")
+ tgen.add_link(tgen.gears["ce1"], tgen.gears["r1"], "eth0", "eth1")
+ tgen.add_link(tgen.gears["ce2"], tgen.gears["r2"], "eth0", "eth1")
+ tgen.add_link(tgen.gears["ce3"], tgen.gears["r1"], "eth0", "eth2")
+ tgen.add_link(tgen.gears["ce4"], tgen.gears["r2"], "eth0", "eth2")
+ tgen.add_link(tgen.gears["ce5"], tgen.gears["r1"], "eth0", "eth3")
+ tgen.add_link(tgen.gears["ce6"], tgen.gears["r2"], "eth0", "eth3")
+
+
+def setup_module(mod):
+ result = required_linux_kernel_version("4.15")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
+ tgen = Topogen(Topology, mod.__name__)
+ tgen.start_topology()
+ router_list = tgen.routers()
+ for rname, router in tgen.routers().items():
+ router.run("/bin/bash {}/{}/setup.sh".format(CWD, rname))
+ router.load_config(TopoRouter.RD_ZEBRA,
+ os.path.join(CWD, '{}/zebra.conf'.format(rname)))
+ router.load_config(TopoRouter.RD_BGP,
+ os.path.join(CWD, '{}/bgpd.conf'.format(rname)))
+
+ tgen.gears["r1"].run("ip link add vrf10 type vrf table 10")
+ tgen.gears["r1"].run("ip link set vrf10 up")
+ tgen.gears["r1"].run("ip link add vrf20 type vrf table 20")
+ tgen.gears["r1"].run("ip link set vrf20 up")
+ tgen.gears["r1"].run("ip link set eth1 master vrf10")
+ tgen.gears["r1"].run("ip link set eth2 master vrf10")
+ tgen.gears["r1"].run("ip link set eth3 master vrf20")
+
+ tgen.gears["r2"].run("ip link add vrf10 type vrf table 10")
+ tgen.gears["r2"].run("ip link set vrf10 up")
+ tgen.gears["r2"].run("ip link add vrf20 type vrf table 20")
+ tgen.gears["r2"].run("ip link set vrf20 up")
+ tgen.gears["r2"].run("ip link set eth1 master vrf10")
+ tgen.gears["r2"].run("ip link set eth2 master vrf20")
+ tgen.gears["r2"].run("ip link set eth3 master vrf20")
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def open_json_file(filename):
+ try:
+ with open(filename, "r") as f:
+ return json.load(f)
+ except IOError:
+ assert False, "Could not read file {}".format(filename)
+
+
+def test_rib():
+ def _check(name, cmd, expected_file):
+ logger.info("polling")
+ tgen = get_topogen()
+ router = tgen.gears[name]
+ output = json.loads(router.vtysh_cmd(cmd))
+ expected = open_json_file("{}/{}".format(CWD, expected_file))
+ return topotest.json_cmp(output, expected)
+
+ def check(name, cmd, expected_file):
+ logger.info("[+] check {} \"{}\" {}".format(name, cmd, expected_file))
+ tgen = get_topogen()
+ func = functools.partial(_check, name, cmd, expected_file)
+ success, result = topotest.run_and_expect(func, None, count=10, wait=0.5)
+ assert result is None, 'Failed'
+
+ check("r1", "show bgp ipv6 vpn json", "r1/vpnv6_rib.json")
+ check("r2", "show bgp ipv6 vpn json", "r2/vpnv6_rib.json")
+ check("r1", "show ipv6 route vrf vrf10 json", "r1/vrf10_rib.json")
+ check("r1", "show ipv6 route vrf vrf20 json", "r1/vrf20_rib.json")
+ check("r2", "show ipv6 route vrf vrf10 json", "r2/vrf10_rib.json")
+ check("r2", "show ipv6 route vrf vrf20 json", "r2/vrf20_rib.json")
+ check("ce1", "show ipv6 route json", "ce1/ipv6_rib.json")
+ check("ce2", "show ipv6 route json", "ce2/ipv6_rib.json")
+ check("ce3", "show ipv6 route json", "ce3/ipv6_rib.json")
+ check("ce4", "show ipv6 route json", "ce4/ipv6_rib.json")
+ check("ce5", "show ipv6 route json", "ce5/ipv6_rib.json")
+ check("ce6", "show ipv6 route json", "ce6/ipv6_rib.json")
+
+
+def test_ping():
+ def _check(name, dest_addr, match):
+ tgen = get_topogen()
+ output = tgen.gears[name].run("ping6 {} -c 1 -w 1".format(dest_addr))
+ logger.info(output)
+ assert match in output, "ping fail"
+
+ def check(name, dest_addr, match):
+ logger.info("[+] check {} {} {}".format(name, dest_addr, match))
+ tgen = get_topogen()
+ func = functools.partial(_check, name, dest_addr, match)
+ success, result = topotest.run_and_expect(func, None, count=10, wait=0.5)
+ assert result is None, 'Failed'
+
+ check("ce1", "2001:2::2", " 0% packet loss")
+ check("ce1", "2001:3::2", " 0% packet loss")
+ check("ce1", "2001:4::2", " 100% packet loss")
+ check("ce1", "2001:5::2", " 100% packet loss")
+ check("ce1", "2001:6::2", " 100% packet loss")
+ check("ce4", "2001:1::2", " 100% packet loss")
+ check("ce4", "2001:2::2", " 100% packet loss")
+ check("ce4", "2001:3::2", " 100% packet loss")
+ check("ce4", "2001:5::2", " 0% packet loss")
+ check("ce4", "2001:6::2", " 0% packet loss")
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from lib.common_config import adjust_router_l3mdev
+from lib.common_config import required_linux_kernel_version
# Required to instantiate the topology builder class.
from mininet.topo import Topo
def setup_module(mod):
"Sets up the pytest environment"
+
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("5.0")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
tgen = Topogen(BGPIPV6RTADVVRFTopo, mod.__name__)
tgen.start_topology()
for cmd in cmds:
output = tgen.net[rname].cmd(cmd.format(rname))
- # adjust handling of vrf traffic
- adjust_router_l3mdev(tgen, rname)
-
for rname, router in router_list.items():
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from lib.topotest import iproute2_is_vrf_capable
-from lib.common_config import (
- required_linux_kernel_version,
- adjust_router_l3mdev,
-)
+from lib.common_config import required_linux_kernel_version
from mininet.topo import Topo
for cmd in cmds:
output = tgen.net[rname].cmd(cmd.format(rname))
- # adjust handling of vrf traffic
- adjust_router_l3mdev(tgen, rname)
-
for rname, router in tgen.routers().items():
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
logger.debug("Exiting lib API: verify_ip_nht()")
return False
-
-
-def kernel_requires_l3mdev_adjustment():
- """
- Checks if the L3 master device needs to be adjusted to handle VRF traffic
- based on kernel version.
-
- Returns
- -------
- 1 or 0
- """
-
- if version_cmp(platform.release(), "4.15") >= 0:
- return 1
- return 0
-
-
-def adjust_router_l3mdev(tgen, router):
- """
- Adjusts a routers L3 master device to handle VRF traffic depending on kernel
- version.
-
- Parameters
- ----------
- * `tgen` : tgen object
- * `router` : router id to be configured.
-
- Returns
- -------
- True
- """
-
- l3mdev_accept = kernel_requires_l3mdev_adjustment()
-
- logger.info(
- "router {0}: setting net.ipv4.tcp_l3mdev_accept={1}".format(
- router, l3mdev_accept
- )
- )
-
- output = tgen.net[router].cmd("sysctl -n net.ipv4.tcp_l3mdev_accept")
- logger.info("router {0}: existing tcp_l3mdev_accept was {1}".format(router, output))
-
- tgen.net[router].cmd(
- "sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept)
- )
-
- return True
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from lib.topotest import iproute2_is_vrf_capable
-from lib.common_config import (
- required_linux_kernel_version,
- adjust_router_l3mdev,
-)
+from lib.common_config import required_linux_kernel_version
#####################################################
##
def setup_module(mod):
"Sets up the pytest environment"
+ # Required linux kernel version for this suite to run.
+ result = required_linux_kernel_version("5.0")
+ if result is not True:
+ pytest.skip("Kernel requirements are not met")
+
tgen = Topogen(NetworkTopo, mod.__name__)
tgen.start_topology()
for cmd in cmds2:
output = tgen.net[rname].cmd(cmd.format(rname))
- # adjust handling of vrf traffic
- adjust_router_l3mdev(tgen, rname)
-
for rname, router in tgen.routers().items():
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
--- /dev/null
+[
+ {
+ "name": "loc1",
+ "chunks": [
+ "2001:db8:1:1::/64"
+ ]
+ }
+]
--- /dev/null
+[
+ {
+ "name": "loc3",
+ "chunks": []
+ }
+]
--- /dev/null
+[
+ {
+ "name": "loc3",
+ "chunks": [
+ "2001:db8:3:3::/64"
+ ]
+ }
+]
--- /dev/null
+{
+ "2001:db8:1:1:1::/80":[
+ {
+ "prefix":"2001:db8:1:1:1::/80",
+ "protocol":"static",
+ "selected":true,
+ "installed":true,
+ "nexthops":[{
+ "fib":true,
+ "active":true,
+ "seg6local":{ "action":"End" }
+ }]
+ }
+ ],
+ "2001:db8:2:2:1::/80":[
+ {
+ "prefix":"2001:db8:2:2:1::/80",
+ "protocol":"static",
+ "selected":true,
+ "installed":true,
+ "nexthops":[{
+ "fib":true,
+ "active":true,
+ "seg6local":{ "action":"End" }
+
+ }]
+ }
+ ]
+}
--- /dev/null
+{
+ "locators":[
+ {
+ "name": "loc1",
+ "prefix": "2001:db8:1:1::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:1:1::/64",
+ "proto": "system"
+ }
+ ]
+ },
+ {
+ "name": "loc2",
+ "prefix": "2001:db8:2:2::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:2:2::/64",
+ "proto": "system"
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "locators":[
+ {
+ "name": "loc1",
+ "prefix": "2001:db8:1:1::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:1:1::/64",
+ "proto": "sharp"
+ }
+ ]
+ },
+ {
+ "name": "loc2",
+ "prefix": "2001:db8:2:2::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:2:2::/64",
+ "proto": "system"
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "locators":[
+ {
+ "name": "loc1",
+ "prefix": "2001:db8:1:1::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:1:1::/64",
+ "proto": "system"
+ }
+ ]
+ },
+ {
+ "name": "loc2",
+ "prefix": "2001:db8:2:2::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:2:2::/64",
+ "proto": "system"
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "locators":[
+ {
+ "name": "loc1",
+ "prefix": "2001:db8:1:1::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:1:1::/64",
+ "proto": "system"
+ }
+ ]
+ },
+ {
+ "name": "loc2",
+ "prefix": "2001:db8:2:2::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:2:2::/64",
+ "proto": "system"
+ }
+ ]
+ },
+ {
+ "name":"loc3",
+ "statusUp":false,
+ "chunks":[
+ {
+ "proto":"sharp"
+ }
+ ]
+ }
+ ]
+}
+
--- /dev/null
+{
+ "locators":[
+ {
+ "name": "loc1",
+ "prefix": "2001:db8:1:1::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:1:1::/64",
+ "proto": "system"
+ }
+ ]
+ },
+ {
+ "name": "loc2",
+ "prefix": "2001:db8:2:2::/64",
+ "statusUp": true,
+ "chunks": [
+ {
+ "prefix": "2001:db8:2:2::/64",
+ "proto": "system"
+ }
+ ]
+ },
+ {
+ "name": "loc3",
+ "prefix": "2001:db8:3:3::/64",
+ "statusUp": true,
+ "chunks":[
+ {
+ "prefix": "2001:db8:3:3::/64",
+ "proto": "sharp"
+ }
+ ]
+ }
+ ]
+}
+
--- /dev/null
+ip link add dummy0 type dummy
+ip link set dummy0 up
--- /dev/null
+hostname r1
+!
+log stdout notifications
+log monitor notifications
+log commands
+log file sharpd.log debugging
+!
--- /dev/null
+hostname r1
+!
+debug zebra events
+debug zebra rib detailed
+!
+log stdout notifications
+log monitor notifications
+log commands
+log file zebra.log debugging
+!
+segment-routing
+ srv6
+ locators
+ locator loc1
+ prefix 2001:db8:1:1::/64
+ !
+ locator loc2
+ prefix 2001:db8:2:2::/64
+ !
+ !
+ !
+!
--- /dev/null
+#!/usr/bin/env python
+
+#
+# test_srv6_manager.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2020 by
+# LINE Corporation, Hiroki Shirokura <slank.dev@gmail.com>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_srv6_manager.py:
+Test for SRv6 manager on zebra
+"""
+
+import os
+import sys
+import json
+import time
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from mininet.topo import Topo
+
+
+def open_json_file(filename):
+ try:
+ with open(filename, "r") as f:
+ return json.load(f)
+ except IOError:
+ assert False, "Could not read file {}".format(filename)
+
+
+class TemplateTopo(Topo):
+ def build(self, *_args, **_opts):
+ tgen = get_topogen(self)
+ tgen.add_router('r1')
+
+
+def setup_module(mod):
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen.start_topology()
+ router_list = tgen.routers()
+ for rname, router in tgen.routers().items():
+ router.run("/bin/bash {}/{}/setup.sh".format(CWD, rname))
+ router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format(rname)))
+ router.load_config(TopoRouter.RD_BGP, os.path.join(CWD, '{}/bgpd.conf'.format(rname)))
+ router.load_config(TopoRouter.RD_SHARP, os.path.join(CWD, '{}/sharpd.conf'.format(rname)))
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_srv6():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ router = tgen.gears['r1']
+
+ def _check_srv6_locator(router, expected_locator_file):
+ logger.info("checking zebra locator status")
+ output = json.loads(router.vtysh_cmd("show segment-routing srv6 locator json"))
+ expected = open_json_file("{}/{}".format(CWD, expected_locator_file))
+ return topotest.json_cmp(output, expected)
+
+ def _check_sharpd_chunk(router, expected_chunk_file):
+ logger.info("checking sharpd locator chunk status")
+ output = json.loads(router.vtysh_cmd("show sharp segment-routing srv6 json"))
+ expected = open_json_file("{}/{}".format(CWD, expected_chunk_file))
+ return topotest.json_cmp(output, expected)
+
+ def check_srv6_locator(router, expected_file):
+ func = functools.partial(_check_srv6_locator, router, expected_file)
+ success, result = topotest.run_and_expect(func, None, count=5, wait=0.5)
+ assert result is None, 'Failed'
+
+ def check_sharpd_chunk(router, expected_file):
+ func = functools.partial(_check_sharpd_chunk, router, expected_file)
+ success, result = topotest.run_and_expect(func, None, count=5, wait=0.5)
+ assert result is None, 'Failed'
+
+ logger.info("Test1 for Locator Configuration")
+ check_srv6_locator(router, "expected_locators1.json")
+ check_sharpd_chunk(router, "expected_chunks1.json")
+
+ logger.info("Test2 get chunk for locator loc1")
+ router.vtysh_cmd("sharp srv6-manager get-locator-chunk loc1")
+ check_srv6_locator(router, "expected_locators2.json")
+ check_sharpd_chunk(router, "expected_chunks2.json")
+
+ logger.info("Test3 release chunk for locator loc1")
+ router.vtysh_cmd("sharp srv6-manager release-locator-chunk loc1")
+ check_srv6_locator(router, "expected_locators3.json")
+ check_sharpd_chunk(router, "expected_chunks3.json")
+
+ logger.info("Test4 get chunk for non-exist locator by zclient")
+ router.vtysh_cmd("sharp srv6-manager get-locator-chunk loc3")
+ check_srv6_locator(router, "expected_locators4.json")
+ check_sharpd_chunk(router, "expected_chunks4.json")
+
+ logger.info("Test5 Test for Zclient. after locator loc3 was configured")
+ router.vtysh_cmd(
+ """
+ configure terminal
+ segment-routing
+ srv6
+ locators
+ locator loc3
+ prefix 2001:db8:3:3::/64
+ """
+ )
+ check_srv6_locator(router, "expected_locators5.json")
+ check_sharpd_chunk(router, "expected_chunks5.json")
+
+
+if __name__ == '__main__':
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
--- /dev/null
+[
+ {
+ "in": {
+ "dest": "1::1",
+ "nh": "2001::1",
+ "sid": "a::"
+ },
+ "out":[{
+ "prefix":"1::1/128",
+ "protocol":"sharp",
+ "selected":true,
+ "destSelected":true,
+ "distance":150,
+ "metric":0,
+ "installed":true,
+ "table":254,
+ "nexthops":[{
+ "flags":3,
+ "fib":true,
+ "active":true,
+ "seg6": { "segs": "a::" }
+ }]
+ }]
+ }
+]
--- /dev/null
+ip link add vrf10 type vrf table 10
+ip link set vrf10 up
+ip link add dum0 type dummy
+ip link set dum0 up
+sysctl -w net.ipv6.conf.dum0.disable_ipv6=0
--- /dev/null
+log file zebra.log
+!
+log stdout notifications
+log monitor notifications
+log commands
+!
+debug zebra packet
+debug zebra dplane
+debug zebra kernel msgdump
+!
+interface dum0
+ ipv6 address 2001::1/64
+!
--- /dev/null
+#!/usr/bin/env python
+
+#
+# test_zebra_seg6_route.py
+#
+# Copyright (c) 2020 by
+# LINE Corporation, Hiroki Shirokura <slank.dev@gmail.com>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_zebra_seg6_route.py: Test seg6 route addition with zapi.
+"""
+
+import os
+import re
+import sys
+import pytest
+import json
+import platform
+from functools import partial
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from lib.common_config import shutdown_bringup_interface
+from mininet.topo import Topo
+
+
+def open_json_file(filename):
+ try:
+ with open(filename, "r") as f:
+ return json.load(f)
+ except IOError:
+ assert False, "Could not read file {}".format(filename)
+
+
+class TemplateTopo(Topo):
+ def build(self, **_opts):
+ tgen = get_topogen(self)
+ tgen.add_router("r1")
+
+
+def setup_module(mod):
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen.start_topology()
+ router_list = tgen.routers()
+ for rname, router in tgen.routers().items():
+ router.run("/bin/bash {}".format(os.path.join(CWD, "{}/setup.sh".format(rname))))
+ router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format(rname)))
+ router.load_config(TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname)))
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_zebra_seg6local_routes():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ logger.info("Test for seg6local route install via ZAPI was start.")
+ r1 = tgen.gears["r1"]
+
+ def check(router, dest, nh, sid, expected):
+ router.vtysh_cmd("sharp install seg6-routes {} "\
+ "nexthop-seg6 {} encap {} 1".format(dest, nh, sid))
+ output = json.loads(router.vtysh_cmd("show ipv6 route {} json".format(dest)))
+ output = output.get('{}/128'.format(dest))
+ if output is None:
+ return False
+ return topotest.json_cmp(output, expected)
+
+ manifests = open_json_file(os.path.join(CWD, "{}/routes.json".format("r1")))
+ for manifest in manifests:
+ logger.info("CHECK {} {} {}".format(manifest['in']['dest'],
+ manifest['in']['nh'],
+ manifest['in']['sid']))
+ test_func = partial(check, r1,
+ manifest['in']['dest'],
+ manifest['in']['nh'],
+ manifest['in']['sid'],
+ manifest['out'])
+ success, result = topotest.run_and_expect(test_func, None, count=5, wait=1)
+ assert result is None, 'Failed'
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
--- /dev/null
+[
+ {
+ "in": {
+ "dest": "1::1",
+ "context": "End"
+ },
+ "out":[{
+ "prefix":"1::1/128",
+ "protocol":"sharp",
+ "selected":true,
+ "destSelected":true,
+ "distance":150,
+ "metric":0,
+ "installed":true,
+ "table":254,
+ "nexthops":[{
+ "flags":3,
+ "fib":true,
+ "active":true,
+ "directlyConnected":true,
+ "interfaceName": "dum0",
+ "seg6local": { "action": "End" }
+ }]
+ }]
+ },
+ {
+ "in": {
+ "dest": "2::1",
+ "context": "End_X 2001::1"
+ },
+ "out":[{
+ "prefix":"2::1/128",
+ "protocol":"sharp",
+ "selected":true,
+ "destSelected":true,
+ "distance":150,
+ "metric":0,
+ "installed":true,
+ "table":254,
+ "nexthops":[{
+ "flags":3,
+ "fib":true,
+ "active":true,
+ "directlyConnected":true,
+ "interfaceName": "dum0",
+ "seg6local": { "action": "End.X" }
+ }]
+ }]
+ },
+ {
+ "in": {
+ "dest": "3::1",
+ "context": "End_T 10"
+ },
+ "out":[{
+ "prefix":"3::1/128",
+ "protocol":"sharp",
+ "selected":true,
+ "destSelected":true,
+ "distance":150,
+ "metric":0,
+ "installed":true,
+ "table":254,
+ "nexthops":[{
+ "flags":3,
+ "fib":true,
+ "active":true,
+ "directlyConnected":true,
+ "interfaceName": "dum0",
+ "seg6local": { "action": "End.T" }
+ }]
+ }]
+ },
+ {
+ "in": {
+ "dest": "4::1",
+ "context": "End_DX4 10.0.0.1"
+ },
+ "out":[{
+ "prefix":"4::1/128",
+ "protocol":"sharp",
+ "selected":true,
+ "destSelected":true,
+ "distance":150,
+ "metric":0,
+ "installed":true,
+ "table":254,
+ "nexthops":[{
+ "flags":3,
+ "fib":true,
+ "active":true,
+ "directlyConnected":true,
+ "interfaceName": "dum0",
+ "seg6local": { "action": "End.DX4" }
+ }]
+ }]
+ }
+]
--- /dev/null
+ip link add dum0 type dummy
+ip link set dum0 up
+sysctl -w net.ipv6.conf.dum0.disable_ipv6=0
--- /dev/null
+log file zebra.log
+!
+log stdout notifications
+log monitor notifications
+log commands
+!
+debug zebra packet
+debug zebra dplane
+debug zebra kernel msgdump
--- /dev/null
+#!/usr/bin/env python
+
+#
+# test_zebra_seg6local_route.py
+#
+# Copyright (c) 2020 by
+# LINE Corporation, Hiroki Shirokura <slank.dev@gmail.com>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_zebra_seg6local_route.py: Test seg6local route addition with zapi.
+"""
+
+import os
+import re
+import sys
+import pytest
+import json
+import platform
+from functools import partial
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from lib.common_config import shutdown_bringup_interface
+from mininet.topo import Topo
+
+
+def open_json_file(filename):
+ try:
+ with open(filename, "r") as f:
+ return json.load(f)
+ except IOError:
+ assert False, "Could not read file {}".format(filename)
+
+
+class TemplateTopo(Topo):
+ def build(self, **_opts):
+ tgen = get_topogen(self)
+ tgen.add_router("r1")
+
+
+def setup_module(mod):
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen.start_topology()
+ router_list = tgen.routers()
+ for rname, router in tgen.routers().items():
+ router.run("/bin/bash {}".format(os.path.join(CWD, "{}/setup.sh".format(rname))))
+ router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format(rname)))
+ router.load_config(TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname)))
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_zebra_seg6local_routes():
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ logger.info("Test for seg6local route install via ZAPI was start.")
+ r1 = tgen.gears["r1"]
+
+ def check(router, dest, context, expected):
+ router.vtysh_cmd("sharp install seg6local-routes {} "\
+ "nexthop-seg6local dum0 {} 1".format(dest, context))
+ output = json.loads(router.vtysh_cmd("show ipv6 route {} json".format(dest)))
+ output = output.get('{}/128'.format(dest))
+ if output is None:
+ return False
+ return topotest.json_cmp(output, expected)
+
+ manifests = open_json_file(os.path.join(CWD, "{}/routes.json".format("r1")))
+ for manifest in manifests:
+ logger.info("CHECK {} {}".format(manifest['in']['dest'],
+ manifest['in']['context']))
+ test_func = partial(check, r1,
+ manifest['in']['dest'],
+ manifest['in']['context'],
+ manifest['out'])
+ success, result = topotest.run_and_expect(test_func, None, count=5, wait=1)
+ assert result is None, 'Failed'
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
/frrcommon.sh
/frr.service
/frr@.service
+/frr-llvm-cg
#include <json-c/json.h>
+#include "frr-llvm-debuginfo.h"
+
/* if you want to use this without the special FRRouting defines,
* remove the following #define
*/
#define FRR_SPECIFIC
+static struct dbginfo *dbginfo;
+
static void dbgloc_add(struct json_object *jsobj, LLVMValueRef obj)
{
unsigned file_len = 0;
return ret;
}
+static bool try_struct_fptr(struct json_object *js_call, LLVMValueRef gep,
+ const char *prefix)
+{
+ unsigned long long val = 0;
+ bool ret = false;
+ LLVMTypeRef ptrtype = LLVMTypeOf(LLVMGetOperand(gep, 0));
+ LLVMValueRef idx;
+
+ /* middle steps like struct a -> struct b a_member; -> fptr */
+ for (int i = 1; ptrtype && i < LLVMGetNumOperands(gep) - 1; i++) {
+ if (LLVMGetTypeKind(ptrtype) == LLVMPointerTypeKind
+ || LLVMGetTypeKind(ptrtype) == LLVMArrayTypeKind
+ || LLVMGetTypeKind(ptrtype) == LLVMVectorTypeKind) {
+ ptrtype = LLVMGetElementType(ptrtype);
+ continue;
+ }
+
+ if (LLVMGetTypeKind(ptrtype) != LLVMStructTypeKind)
+ return false;
+
+ idx = LLVMGetOperand(gep, i);
+ if (!LLVMIsConstant(idx))
+ return false;
+ val = LLVMConstIntGetZExtValue(idx);
+
+ unsigned n = LLVMGetNumContainedTypes(ptrtype);
+ LLVMTypeRef arr[n];
+
+ if (val > n)
+ return false;
+
+ LLVMGetSubtypes(ptrtype, arr);
+ ptrtype = arr[val];
+ }
+
+ if (!ptrtype)
+ return false;
+
+ idx = LLVMGetOperand(gep, LLVMGetNumOperands(gep) - 1);
+ if (!LLVMIsConstant(idx))
+ return false;
+
+ val = LLVMConstIntGetZExtValue(idx);
+
+ char *sname = NULL, *mname = NULL;
+
+ if (dbginfo_struct_member(dbginfo, ptrtype, val, &sname, &mname)) {
+ fprintf(stderr, "%s: call to struct %s->%s\n", prefix, sname,
+ mname);
+
+ json_object_object_add(js_call, "type",
+ json_object_new_string("struct_memb"));
+ json_object_object_add(js_call, "struct",
+ json_object_new_string(sname));
+ json_object_object_add(js_call, "member",
+ json_object_new_string(mname));
+ ret = true;
+ }
+ free(sname);
+ free(mname);
+
+ return ret;
+}
+
static bool details_fptr_vars = false;
static bool details_fptr_consts = true;
prefix, hdr_written);
return;
+ case LLVMConstantExprValueKind:
+ switch (LLVMGetConstOpcode(value)) {
+ case LLVMGetElementPtr:
+ if (try_struct_fptr(js_call, value, prefix)) {
+ *hdr_written = true;
+ return;
+ }
+
+ fprintf(stderr,
+ "%s: calls function pointer from unhandled const GEP\n",
+ prefix);
+ *hdr_written = true;
+ /* fallthru */
+ default:
+ /* to help the user / development */
+ if (!*hdr_written) {
+ fprintf(stderr,
+ "%s: calls function pointer from constexpr\n",
+ prefix);
+ *hdr_written = true;
+ }
+ dump = LLVMPrintValueToString(value);
+ fprintf(stderr, "%s- [opcode=%d] %s\n", prefix,
+ LLVMGetConstOpcode(value), dump);
+ LLVMDisposeMessage(dump);
+ }
+ return;
+
default:
/* to help the user / development */
if (!*hdr_written) {
#ifdef FRR_SPECIFIC
static bool is_thread_sched(const char *name, size_t len)
{
-#define thread_prefix "funcname_"
+#define thread_prefix "_"
static const char *const names[] = {
thread_prefix "thread_add_read_write",
thread_prefix "thread_add_timer",
}
#endif
+static bool _check_val(bool cond, const char *text, LLVMValueRef dumpval)
+{
+ if (cond)
+ return true;
+
+ char *dump = LLVMPrintValueToString(dumpval);
+ fprintf(stderr, "check failed: %s\ndump:\n\t%s\n", text, dump);
+ LLVMDisposeMessage(dump);
+ return false;
+}
+
+#define check_val(cond, dump) \
+ if (!_check_val(cond, #cond, dump)) \
+ return;
+
+static char *get_string(LLVMValueRef value)
+{
+ if (!LLVMIsAConstant(value))
+ return strdup("!NOT-A-CONST");
+
+ if (LLVMGetValueKind(value) == LLVMConstantExprValueKind
+ && LLVMGetConstOpcode(value) == LLVMGetElementPtr) {
+ value = LLVMGetOperand(value, 0);
+
+ if (!LLVMIsAConstant(value))
+ return strdup("!NOT-A-CONST-2");
+ }
+
+ if (LLVMIsAGlobalVariable(value))
+ value = LLVMGetInitializer(value);
+
+ size_t len = 0;
+ const char *sval = LLVMGetAsString(value, &len);
+
+ return strndup(sval, len);
+}
+
+static void handle_yang_module(struct json_object *js_special,
+ LLVMValueRef yang_mod)
+{
+ check_val(LLVMIsAGlobalVariable(yang_mod), yang_mod);
+
+ LLVMValueRef value;
+
+ value = LLVMGetInitializer(yang_mod);
+ LLVMValueKind kind = LLVMGetValueKind(value);
+
+ check_val(kind == LLVMConstantStructValueKind, value);
+
+ size_t var_len = 0;
+ const char *var_name = LLVMGetValueName2(yang_mod, &var_len);
+ char buf_name[var_len + 1];
+
+ memcpy(buf_name, var_name, var_len);
+ buf_name[var_len] = '\0';
+
+ struct json_object *js_yang, *js_yangmod, *js_items;
+
+ js_yang = js_get_or_make(js_special, "yang", json_object_new_object);
+ js_yangmod = js_get_or_make(js_yang, buf_name, json_object_new_object);
+ js_items = js_get_or_make(js_yangmod, "items", json_object_new_array);
+
+ char *mod_name = get_string(LLVMGetOperand(value, 0));
+ json_object_object_add(js_yangmod, "name",
+ json_object_new_string(mod_name));
+ free(mod_name);
+
+ value = LLVMGetOperand(value, 1);
+ kind = LLVMGetValueKind(value);
+ check_val(kind == LLVMConstantArrayValueKind, value);
+
+ unsigned len = LLVMGetArrayLength(LLVMTypeOf(value));
+
+ for (unsigned i = 0; i < len - 1; i++) {
+ struct json_object *js_item, *js_cbs;
+ LLVMValueRef item = LLVMGetOperand(value, i);
+ char *xpath = get_string(LLVMGetOperand(item, 0));
+
+ js_item = json_object_new_object();
+ json_object_array_add(js_items, js_item);
+
+ json_object_object_add(js_item, "xpath",
+ json_object_new_string(xpath));
+ js_cbs = js_get_or_make(js_item, "cbs", json_object_new_object);
+
+ free(xpath);
+
+ LLVMValueRef cbs = LLVMGetOperand(item, 1);
+
+ check_val(LLVMGetValueKind(cbs) == LLVMConstantStructValueKind,
+ value);
+
+ LLVMTypeRef cbs_type = LLVMTypeOf(cbs);
+ unsigned cblen = LLVMCountStructElementTypes(cbs_type);
+
+ for (unsigned i = 0; i < cblen; i++) {
+ LLVMValueRef cb = LLVMGetOperand(cbs, i);
+
+ char *sname = NULL;
+ char *mname = NULL;
+
+ if (dbginfo_struct_member(dbginfo, cbs_type, i, &sname,
+ &mname)) {
+ (void)0;
+ }
+
+ if (LLVMIsAFunction(cb)) {
+ size_t fn_len;
+ const char *fn_name;
+
+ fn_name = LLVMGetValueName2(cb, &fn_len);
+
+ json_object_object_add(
+ js_cbs, mname,
+ json_object_new_string_len(fn_name,
+ fn_len));
+ }
+
+ free(sname);
+ free(mname);
+ }
+ }
+}
+
+static void handle_daemoninfo(struct json_object *js_special,
+ LLVMValueRef daemoninfo)
+{
+ check_val(LLVMIsAGlobalVariable(daemoninfo), daemoninfo);
+
+ LLVMTypeRef type;
+ LLVMValueRef value;
+ unsigned len;
+
+ type = LLVMGlobalGetValueType(daemoninfo);
+ value = LLVMGetInitializer(daemoninfo);
+ LLVMValueKind kind = LLVMGetValueKind(value);
+
+ check_val(kind == LLVMConstantStructValueKind, value);
+
+ int yang_idx = -1;
+
+ len = LLVMCountStructElementTypes(type);
+
+ LLVMTypeRef fieldtypes[len];
+ LLVMGetSubtypes(type, fieldtypes);
+
+ for (unsigned i = 0; i < len; i++) {
+ LLVMTypeRef t = fieldtypes[i];
+
+ if (LLVMGetTypeKind(t) != LLVMPointerTypeKind)
+ continue;
+ t = LLVMGetElementType(t);
+ if (LLVMGetTypeKind(t) != LLVMPointerTypeKind)
+ continue;
+ t = LLVMGetElementType(t);
+ if (LLVMGetTypeKind(t) != LLVMStructTypeKind)
+ continue;
+
+ const char *name = LLVMGetStructName(t);
+ if (!strcmp(name, "struct.frr_yang_module_info"))
+ yang_idx = i;
+ }
+
+ if (yang_idx == -1)
+ return;
+
+ LLVMValueRef yang_mods = LLVMGetOperand(value, yang_idx);
+ LLVMValueRef yang_size = LLVMGetOperand(value, yang_idx + 1);
+
+ check_val(LLVMIsConstant(yang_size), yang_size);
+
+ unsigned long long ival = LLVMConstIntGetZExtValue(yang_size);
+
+ check_val(LLVMGetValueKind(yang_mods) == LLVMConstantExprValueKind
+ && LLVMGetConstOpcode(yang_mods) == LLVMGetElementPtr,
+ yang_mods);
+
+ yang_mods = LLVMGetOperand(yang_mods, 0);
+
+ check_val(LLVMIsAGlobalVariable(yang_mods), yang_mods);
+
+ yang_mods = LLVMGetInitializer(yang_mods);
+
+ check_val(LLVMGetValueKind(yang_mods) == LLVMConstantArrayValueKind,
+ yang_mods);
+
+ len = LLVMGetArrayLength(LLVMTypeOf(yang_mods));
+
+ if (len != ival)
+ fprintf(stderr, "length mismatch - %llu vs. %u\n", ival, len);
+
+ for (unsigned i = 0; i < len; i++) {
+ char *dump;
+
+ LLVMValueRef item = LLVMGetOperand(yang_mods, i);
+ LLVMValueKind kind = LLVMGetValueKind(item);
+
+ check_val(kind == LLVMGlobalVariableValueKind
+ || kind == LLVMConstantExprValueKind,
+ item);
+
+ if (kind == LLVMGlobalVariableValueKind)
+ continue;
+
+ LLVMOpcode opcode = LLVMGetConstOpcode(item);
+ switch (opcode) {
+ case LLVMBitCast:
+ item = LLVMGetOperand(item, 0);
+ handle_yang_module(js_special, item);
+ break;
+
+ default:
+ dump = LLVMPrintValueToString(item);
+ printf("[%u] = [opcode=%u] %s\n", i, opcode, dump);
+ LLVMDisposeMessage(dump);
+ }
+ }
+}
+
static void process_call(struct json_object *js_calls,
struct json_object *js_special,
LLVMValueRef instr,
LLVMValueRef called = LLVMGetCalledValue(instr);
+ if (LLVMIsAInlineAsm(called))
+ return;
+
if (LLVMIsAConstantExpr(called)) {
LLVMOpcode opcode = LLVMGetConstOpcode(called);
snprintf(prefix, sizeof(prefix), "%.*s:%d:%.*s()",
(int)file_len, file, line, (int)name_len, name_c);
+ if (LLVMIsALoadInst(called)
+ && LLVMIsAGetElementPtrInst(LLVMGetOperand(called, 0))
+ && try_struct_fptr(js_call, LLVMGetOperand(called, 0),
+ prefix))
+ goto out_struct_fptr;
+
while (LLVMIsALoadInst(last) || LLVMIsAGetElementPtrInst(last))
/* skipping over details for GEP here, but meh. */
last = LLVMGetOperand(last, 0);
prefix);
} else {
char *dump = LLVMPrintValueToString(called);
- printf("\t%s\n", dump);
+ fprintf(stderr, "%s: ??? %s\n", prefix, dump);
LLVMDisposeMessage(dump);
}
- return;
#ifdef FRR_SPECIFIC
- } else if (!strcmp(called_name, "install_element")) {
+ } else if (!strcmp(called_name, "_install_element")) {
called_type = FN_INSTALL_ELEMENT;
LLVMValueRef param0 = LLVMGetOperand(instr, 0);
json_object_new_string_len(called_name, called_len));
LLVMValueRef fparam;
- if (strstr(called_name, "_read_"))
- fparam = LLVMGetOperand(instr, 2);
- else
- fparam = LLVMGetOperand(instr, 1);
+ fparam = LLVMGetOperand(instr, 2);
assert(fparam);
size_t target_len = 0;
* - zclient->* ?
*/
#endif /* FRR_SPECIFIC */
+ } else if (!strcmp(called_name, "frr_preinit")) {
+ LLVMValueRef daemoninfo = LLVMGetOperand(instr, 0);
+
+ handle_daemoninfo(js_special, daemoninfo);
+
+ json_object_object_add(
+ js_call, "target",
+ json_object_new_string_len(called_name, called_len));
} else {
json_object_object_add(
js_call, "target",
json_object_new_string_len(called_name, called_len));
}
+out_struct_fptr:
for (unsigned argno = 0; argno < n_args; argno++) {
LLVMValueRef param = LLVMGetOperand(instr, argno);
size_t target_len;
// done with the memory buffer now, so dispose of it
LLVMDisposeMemoryBuffer(memoryBuffer);
+ dbginfo = dbginfo_load(module);
+
struct json_object *js_root, *js_funcs, *js_special;
js_root = json_object_new_object();
--- /dev/null
+// This is free and unencumbered software released into the public domain.
+//
+// Anyone is free to copy, modify, publish, use, compile, sell, or
+// distribute this software, either in source code form or as a compiled
+// binary, for any purpose, commercial or non-commercial, and by any
+// means.
+//
+// In jurisdictions that recognize copyright laws, the author or authors
+// of this software dedicate any and all copyright interest in the
+// software to the public domain. We make this dedication for the benefit
+// of the public at large and to the detriment of our heirs and
+// successors. We intend this dedication to be an overt act of
+// relinquishment in perpetuity of all present and future rights to this
+// software under copyright law.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+// OTHER DEALINGS IN THE SOFTWARE.
+//
+// For more information, please refer to <http://unlicense.org/>
+
+#include <llvm-c/BitReader.h>
+#include <llvm-c/BitWriter.h>
+#include <llvm-c/Core.h>
+#include <llvm-c/DebugInfo.h>
+
+#include <llvm/IR/Module.h>
+#include <llvm/IR/Value.h>
+#include <llvm/IR/Type.h>
+#include <llvm/IR/DebugInfo.h>
+#include <llvm/IR/DebugInfoMetadata.h>
+#include <llvm/Support/raw_ostream.h>
+
+#include <map>
+
+#include "frr-llvm-debuginfo.h"
+
+/* llvm::DebugInfoFinder is unfortunately not exposed in the llvm-c API... */
+
+struct dbginfo {
+ llvm::DebugInfoFinder finder;
+ std::map<std::string, llvm::DICompositeType *> tab;
+};
+
+struct dbginfo *dbginfo_load(LLVMModuleRef _mod)
+{
+ llvm::Module *mod = llvm::unwrap(_mod);
+ struct dbginfo *info = new dbginfo();
+
+ info->finder.processModule(*mod);
+
+ for (auto ty : info->finder.types()) {
+ if (ty->getMetadataID() != llvm::Metadata::DICompositeTypeKind)
+ continue;
+
+ llvm::DICompositeType *cty = (llvm::DICompositeType *)ty;
+ /* empty forward declarations aka "struct foobar;" */
+ if (cty->getElements().size() == 0)
+ continue;
+
+ info->tab.emplace(std::move(ty->getName().str()), cty);
+ }
+
+ return info;
+}
+
+bool dbginfo_struct_member(struct dbginfo *info, LLVMTypeRef _typ,
+ unsigned long long idx, char **struct_name,
+ char **member_name)
+{
+ *struct_name = NULL;
+ *member_name = NULL;
+
+ llvm::Type *typ = llvm::unwrap(_typ);
+
+ if (!typ->isStructTy())
+ return false;
+
+ llvm::StructType *styp = (llvm::StructType *)typ;
+ auto sname = styp->getStructName();
+
+ if (!sname.startswith("struct."))
+ return false;
+ sname = sname.drop_front(7);
+
+ size_t dot = sname.find_last_of(".");
+ if (dot != sname.npos)
+ sname = sname.take_front(dot);
+
+ auto item = info->tab.find(sname.str());
+ if (item == info->tab.end())
+ return false;
+
+ auto elements = item->second->getElements();
+ if (idx >= elements.size())
+ return false;
+
+ auto elem = elements[idx];
+
+ if (elem->getMetadataID() != llvm::Metadata::DIDerivedTypeKind)
+ return false;
+
+ llvm::DIDerivedType *dtyp = (llvm::DIDerivedType *)elem;
+
+ *struct_name = strdup(sname.str().c_str());
+ *member_name = strdup(dtyp->getName().str().c_str());
+ return true;
+}
--- /dev/null
+// This is free and unencumbered software released into the public domain.
+//
+// Anyone is free to copy, modify, publish, use, compile, sell, or
+// distribute this software, either in source code form or as a compiled
+// binary, for any purpose, commercial or non-commercial, and by any
+// means.
+//
+// In jurisdictions that recognize copyright laws, the author or authors
+// of this software dedicate any and all copyright interest in the
+// software to the public domain. We make this dedication for the benefit
+// of the public at large and to the detriment of our heirs and
+// successors. We intend this dedication to be an overt act of
+// relinquishment in perpetuity of all present and future rights to this
+// software under copyright law.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+// IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+// OTHER DEALINGS IN THE SOFTWARE.
+//
+// For more information, please refer to <http://unlicense.org/>
+
+#ifndef _FRR_LLVM_DEBUGINFO_H
+#define _FRR_LLVM_DEBUGINFO_H
+
+#include <stdbool.h>
+#include <llvm-c/Core.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct dbginfo;
+
+extern struct dbginfo *dbginfo_load(LLVMModuleRef mod);
+extern bool dbginfo_struct_member(struct dbginfo *di, LLVMTypeRef typ,
+ unsigned long long idx, char **struct_name,
+ char **member_name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FRR_LLVM_DEBUGINFO_H */
llvm_version = $(shell echo __clang_major__ | $(CC) -xc -P -E -)
tools_frr_llvm_cg_CPPFLAGS = $(CPPFLAGS_BASE)
tools_frr_llvm_cg_CFLAGS = $(AM_CFLAGS) `llvm-config-$(llvm_version) --cflags`
+tools_frr_llvm_cg_CXXFLAGS = $(AM_CXXFLAGS) -O0 -ggdb3 `llvm-config-$(llvm_version) --cxxflags`
tools_frr_llvm_cg_LDFLAGS = `llvm-config-$(llvm_version) --ldflags --libs`
tools_frr_llvm_cg_SOURCES = \
tools/frr-llvm-cg.c \
+ tools/frr-llvm-debuginfo.cpp \
+ # end
+
+noinst_HEADERS += \
+ tools/frr-llvm-debuginfo.h \
# end
EXTRA_DIST += \
.prompt = "%s(config-route-map)# ",
};
+static struct cmd_node srv6_node = {
+ .name = "srv6",
+ .node = SRV6_NODE,
+ .parent_node = SEGMENT_ROUTING_NODE,
+ .prompt = "%s(config-srv6)# ",
+};
+
+static struct cmd_node srv6_locs_node = {
+ .name = "srv6-locators",
+ .node = SRV6_LOCS_NODE,
+ .parent_node = SRV6_NODE,
+ .prompt = "%s(config-srv6-locators)# ",
+};
+
+static struct cmd_node srv6_loc_node = {
+ .name = "srv6-locator",
+ .node = SRV6_LOC_NODE,
+ .parent_node = SRV6_LOCS_NODE,
+ .prompt = "%s(config-srv6-locator)# ",
+};
+
#ifdef HAVE_PBRD
static struct cmd_node pbr_map_node = {
.name = "pbr-map",
.parent_node = BGP_NODE,
.prompt = "%s(config-bgp-bmp)# "
};
+
+static struct cmd_node bgp_srv6_node = {
+ .name = "bgp srv6",
+ .node = BGP_SRV6_NODE,
+ .parent_node = BGP_NODE,
+ .prompt = "%s(config-router-srv6)# ",
+};
#endif /* HAVE_BGPD */
#ifdef HAVE_OSPFD
return vtysh_end();
}
+DEFUNSH(VTYSH_SR, srv6, srv6_cmd,
+ "srv6",
+ "Segment-Routing SRv6 configration\n")
+{
+ vty->node = SRV6_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_SR, srv6_locators, srv6_locators_cmd,
+ "locators",
+ "Segment-Routing SRv6 locators configration\n")
+{
+ vty->node = SRV6_LOCS_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_SR, srv6_locator, srv6_locator_cmd,
+ "locator WORD",
+ "Segment Routing SRv6 locator\n"
+ "Specify locator-name\n")
+{
+ vty->node = SRV6_LOC_NODE;
+ return CMD_SUCCESS;
+}
+
#ifdef HAVE_BGPD
DEFUNSH(VTYSH_BGPD, router_bgp, router_bgp_cmd,
"router bgp [(1-4294967295) [<view|vrf> WORD]]",
return CMD_SUCCESS;
}
+DEFUNSH(VTYSH_BGPD,
+ bgp_srv6,
+ bgp_srv6_cmd,
+ "segment-routing srv6",
+ "Segment-Routing configuration\n"
+ "Segment-Routing SRv6 configuration\n")
+{
+ vty->node = BGP_SRV6_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_BGPD,
+ exit_bgp_srv6,
+ exit_bgp_srv6_cmd,
+ "exit",
+ "exit Segment-Routing SRv6 configuration\n")
+{
+ if (vty->node == BGP_SRV6_NODE)
+ vty->node = BGP_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_BGPD,
+ quit_bgp_srv6,
+ quit_bgp_srv6_cmd,
+ "quit",
+ "quit Segment-Routing SRv6 configuration\n")
+{
+ if (vty->node == BGP_SRV6_NODE)
+ vty->node = BGP_NODE;
+ return CMD_SUCCESS;
+}
+
DEFUNSH(VTYSH_BGPD, address_family_evpn, address_family_evpn_cmd,
"address-family <l2vpn evpn>",
"Enter Address Family command mode\n"
#endif /* HAVE_FABRICD */
#if defined(HAVE_PATHD)
-DEFUNSH(VTYSH_PATHD, segment_routing, segment_routing_cmd,
+DEFUNSH(VTYSH_SR, segment_routing, segment_routing_cmd,
"segment-routing",
"Configure segment routing\n")
{
return CMD_SUCCESS;
}
+DEFUNSH(VTYSH_SR, exit_srv6_config, exit_srv6_config_cmd, "exit",
+ "Exit from SRv6 configuration mode\n")
+{
+ if (vty->node == SRV6_NODE)
+ vty->node = SEGMENT_ROUTING_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_SR, exit_srv6_locs_config, exit_srv6_locs_config_cmd, "exit",
+ "Exit from SRv6-locator configuration mode\n")
+{
+ if (vty->node == SRV6_LOCS_NODE)
+ vty->node = SRV6_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUNSH(VTYSH_SR, exit_srv6_loc_config, exit_srv6_loc_config_cmd, "exit",
+ "Exit from SRv6-locators configuration mode\n")
+{
+ if (vty->node == SRV6_LOC_NODE)
+ vty->node = SRV6_LOCS_NODE;
+ return CMD_SUCCESS;
+}
+
#ifdef HAVE_RIPD
DEFUNSH(VTYSH_RIPD, vtysh_exit_ripd, vtysh_exit_ripd_cmd, "exit",
"Exit current mode and down to previous mode\n")
install_element(BMP_NODE, &bmp_exit_cmd);
install_element(BMP_NODE, &bmp_quit_cmd);
install_element(BMP_NODE, &vtysh_end_all_cmd);
+
+ install_node(&bgp_srv6_node);
+ install_element(BGP_NODE, &bgp_srv6_cmd);
+ install_element(BGP_SRV6_NODE, &exit_bgp_srv6_cmd);
+ install_element(BGP_SRV6_NODE, &quit_bgp_srv6_cmd);
+ install_element(BGP_SRV6_NODE, &vtysh_end_all_cmd);
#endif /* HAVE_BGPD */
/* ripd */
install_element(CONFIG_NODE, &vtysh_end_all_cmd);
install_element(ENABLE_NODE, &vtysh_end_all_cmd);
+ /* SRv6 Data-plane */
+ install_node(&srv6_node);
+ install_element(SEGMENT_ROUTING_NODE, &srv6_cmd);
+ install_element(SRV6_NODE, &srv6_locators_cmd);
+ install_element(SRV6_NODE, &exit_srv6_config_cmd);
+ install_element(SRV6_NODE, &vtysh_end_all_cmd);
+
+ install_node(&srv6_locs_node);
+ install_element(SRV6_LOCS_NODE, &srv6_locator_cmd);
+ install_element(SRV6_LOCS_NODE, &exit_srv6_locs_config_cmd);
+ install_element(SRV6_LOCS_NODE, &vtysh_end_all_cmd);
+
+ install_node(&srv6_loc_node);
+ install_element(SRV6_LOC_NODE, &exit_srv6_loc_config_cmd);
+ install_element(SRV6_LOC_NODE, &vtysh_end_all_cmd);
+
install_element(ENABLE_NODE, &vtysh_show_running_config_cmd);
install_element(ENABLE_NODE, &vtysh_copy_running_config_cmd);
install_element(ENABLE_NODE, &vtysh_copy_to_running_cmd);
#define VTYSH_KEYS VTYSH_RIPD|VTYSH_EIGRPD
/* Daemons who can process nexthop-group configs */
#define VTYSH_NH_GROUP VTYSH_PBRD|VTYSH_SHARPD
+#define VTYSH_SR VTYSH_ZEBRA|VTYSH_PATHD
enum vtysh_write_integrated {
WRITE_INTEGRATED_UNSPECIFIED,
config = config_get(PROTOCOL_NODE, line);
else if (strncmp(line, "mpls", strlen("mpls")) == 0)
config = config_get(MPLS_NODE, line);
+ else if (strncmp(line, "segment-routing",
+ strlen("segment-routing"))
+ == 0)
+ config = config_get(SEGMENT_ROUTING_NODE, line);
else if (strncmp(line, "bfd", strlen("bfd")) == 0)
config = config_get(BFD_NODE, line);
else {
#include "zebra/zebra_nb.h"
#include "zebra/zebra_opaque.h"
#include "zebra/zebra_srte.h"
+#include "zebra/zebra_srv6.h"
+#include "zebra/zebra_srv6_vty.h"
#define ZEBRA_PTM_SUPPORT
zebra_pbr_init();
zebra_opaque_init();
zebra_srte_init();
+ zebra_srv6_init();
+ zebra_srv6_vty_init();
/* For debug purpose. */
/* SET_FLAG (zebra_debug_event, ZEBRA_DEBUG_EVENT); */
#ifdef HAVE_NETLINK
+/* The following definition is to workaround an issue in the Linux kernel
+ * header files with redefinition of 'struct in6_addr' in both
+ * netinet/in.h and linux/in6.h.
+ * Reference - https://sourceware.org/ml/libc-alpha/2013-01/msg00599.html
+ */
+#define _LINUX_IN6_H
+
#include <net/if_arp.h>
#include <linux/lwtunnel.h>
#include <linux/mpls_iptunnel.h>
+#include <linux/seg6_iptunnel.h>
+#include <linux/seg6_local.h>
#include <linux/neighbour.h>
#include <linux/rtnetlink.h>
#include <linux/nexthop.h>
#include "if.h"
#include "log.h"
#include "prefix.h"
+#include "plist.h"
+#include "plist_int.h"
#include "connected.h"
#include "table.h"
#include "memory.h"
return num_labels;
}
+static enum seg6local_action_t
+parse_encap_seg6local(struct rtattr *tb,
+ struct seg6local_context *ctx)
+{
+ struct rtattr *tb_encap[256] = {};
+ enum seg6local_action_t act = ZEBRA_SEG6_LOCAL_ACTION_UNSPEC;
+
+ netlink_parse_rtattr_nested(tb_encap, 256, tb);
+
+ if (tb_encap[SEG6_LOCAL_ACTION])
+ act = *(uint32_t *)RTA_DATA(tb_encap[SEG6_LOCAL_ACTION]);
+
+ if (tb_encap[SEG6_LOCAL_NH4])
+ ctx->nh4 = *(struct in_addr *)RTA_DATA(
+ tb_encap[SEG6_LOCAL_NH4]);
+
+ if (tb_encap[SEG6_LOCAL_NH6])
+ ctx->nh6 = *(struct in6_addr *)RTA_DATA(
+ tb_encap[SEG6_LOCAL_NH6]);
+
+ if (tb_encap[SEG6_LOCAL_TABLE])
+ ctx->table = *(uint32_t *)RTA_DATA(tb_encap[SEG6_LOCAL_TABLE]);
+
+ return act;
+}
+
+static int parse_encap_seg6(struct rtattr *tb, struct in6_addr *segs)
+{
+ struct rtattr *tb_encap[256] = {};
+ struct seg6_iptunnel_encap *ipt = NULL;
+ struct in6_addr *segments = NULL;
+
+ netlink_parse_rtattr_nested(tb_encap, 256, tb);
+
+ /*
+ * TODO: It's not support multiple SID list.
+ */
+ if (tb_encap[SEG6_IPTUNNEL_SRH]) {
+ ipt = (struct seg6_iptunnel_encap *)
+ RTA_DATA(tb_encap[SEG6_IPTUNNEL_SRH]);
+ segments = ipt->srh[0].segments;
+ *segs = segments[0];
+ return 1;
+ }
+
+ return 0;
+}
+
+
static struct nexthop
parse_nexthop_unicast(ns_id_t ns_id, struct rtmsg *rtm, struct rtattr **tb,
enum blackhole_type bh_type, int index, void *prefsrc,
struct nexthop nh = {0};
mpls_label_t labels[MPLS_MAX_LABELS] = {0};
int num_labels = 0;
+ enum seg6local_action_t seg6l_act = ZEBRA_SEG6_LOCAL_ACTION_UNSPEC;
+ struct seg6local_context seg6l_ctx = {};
+ struct in6_addr seg6_segs = {};
+ int num_segs = 0;
vrf_id_t nh_vrf_id = vrf_id;
size_t sz = (afi == AFI_IP) ? 4 : 16;
== LWTUNNEL_ENCAP_MPLS) {
num_labels = parse_encap_mpls(tb[RTA_ENCAP], labels);
}
+ if (tb[RTA_ENCAP] && tb[RTA_ENCAP_TYPE]
+ && *(uint16_t *)RTA_DATA(tb[RTA_ENCAP_TYPE])
+ == LWTUNNEL_ENCAP_SEG6_LOCAL) {
+ seg6l_act = parse_encap_seg6local(tb[RTA_ENCAP], &seg6l_ctx);
+ }
+ if (tb[RTA_ENCAP] && tb[RTA_ENCAP_TYPE]
+ && *(uint16_t *)RTA_DATA(tb[RTA_ENCAP_TYPE])
+ == LWTUNNEL_ENCAP_SEG6) {
+ num_segs = parse_encap_seg6(tb[RTA_ENCAP], &seg6_segs);
+ }
if (rtm->rtm_flags & RTNH_F_ONLINK)
SET_FLAG(nh.flags, NEXTHOP_FLAG_ONLINK);
if (num_labels)
nexthop_add_labels(&nh, ZEBRA_LSP_STATIC, num_labels, labels);
+ if (seg6l_act != ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
+ nexthop_add_srv6_seg6local(&nh, seg6l_act, &seg6l_ctx);
+
+ if (num_segs)
+ nexthop_add_srv6_seg6(&nh, &seg6_segs);
+
return nh;
}
/* MPLS labels */
mpls_label_t labels[MPLS_MAX_LABELS] = {0};
int num_labels = 0;
+ enum seg6local_action_t seg6l_act = ZEBRA_SEG6_LOCAL_ACTION_UNSPEC;
+ struct seg6local_context seg6l_ctx = {};
+ struct in6_addr seg6_segs = {};
+ int num_segs = 0;
struct rtattr *rtnh_tb[RTA_MAX + 1] = {};
int len = RTA_PAYLOAD(tb[RTA_MULTIPATH]);
num_labels = parse_encap_mpls(
rtnh_tb[RTA_ENCAP], labels);
}
+ if (rtnh_tb[RTA_ENCAP] && rtnh_tb[RTA_ENCAP_TYPE]
+ && *(uint16_t *)RTA_DATA(rtnh_tb[RTA_ENCAP_TYPE])
+ == LWTUNNEL_ENCAP_SEG6_LOCAL) {
+ seg6l_act = parse_encap_seg6local(
+ rtnh_tb[RTA_ENCAP], &seg6l_ctx);
+ }
+ if (rtnh_tb[RTA_ENCAP] && rtnh_tb[RTA_ENCAP_TYPE]
+ && *(uint16_t *)RTA_DATA(rtnh_tb[RTA_ENCAP_TYPE])
+ == LWTUNNEL_ENCAP_SEG6) {
+ num_segs = parse_encap_seg6(rtnh_tb[RTA_ENCAP],
+ &seg6_segs);
+ }
}
if (gate && rtm->rtm_family == AF_INET) {
nexthop_add_labels(nh, ZEBRA_LSP_STATIC,
num_labels, labels);
+ if (seg6l_act != ZEBRA_SEG6_LOCAL_ACTION_UNSPEC)
+ nexthop_add_srv6_seg6local(nh, seg6l_act,
+ &seg6l_ctx);
+
+ if (num_segs)
+ nexthop_add_srv6_seg6(nh, &seg6_segs);
+
if (rtnh->rtnh_flags & RTNH_F_ONLINK)
SET_FLAG(nh->flags, NEXTHOP_FLAG_ONLINK);
return true;
}
+static ssize_t fill_seg6ipt_encap(char *buffer, size_t buflen,
+ const struct in6_addr *seg)
+{
+ struct seg6_iptunnel_encap *ipt;
+ struct ipv6_sr_hdr *srh;
+ const size_t srhlen = 24;
+
+ /*
+ * Caution: Support only SINGLE-SID, not MULTI-SID
+ * This function only supports the case where segs represents
+ * a single SID. If you want to extend the SRv6 functionality,
+ * you should improve the Boundary Check.
+ * Ex. In case of set a SID-List include multiple-SIDs as an
+ * argument of the Transit Behavior, we must support variable
+ * boundary check for buflen.
+ */
+ if (buflen < (sizeof(struct seg6_iptunnel_encap) +
+ sizeof(struct ipv6_sr_hdr) + 16))
+ return -1;
+
+ memset(buffer, 0, buflen);
+
+ ipt = (struct seg6_iptunnel_encap *)buffer;
+ ipt->mode = SEG6_IPTUN_MODE_ENCAP;
+ srh = ipt->srh;
+ srh->hdrlen = (srhlen >> 3) - 1;
+ srh->type = 4;
+ srh->segments_left = 0;
+ srh->first_segment = 0;
+ memcpy(&srh->segments[0], seg, sizeof(struct in6_addr));
+
+ return srhlen + 4;
+}
+
/* This function takes a nexthop as argument and adds
* the appropriate netlink attributes to an existing
* netlink message.
sizeof(label_buf)))
return false;
+ if (nexthop->nh_srv6) {
+ if (nexthop->nh_srv6->seg6local_action !=
+ ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) {
+ struct rtattr *nest;
+ const struct seg6local_context *ctx;
+
+ ctx = &nexthop->nh_srv6->seg6local_ctx;
+ if (!nl_attr_put16(nlmsg, req_size, RTA_ENCAP_TYPE,
+ LWTUNNEL_ENCAP_SEG6_LOCAL))
+ return false;
+
+ nest = nl_attr_nest(nlmsg, req_size, RTA_ENCAP);
+ if (!nest)
+ return false;
+
+ switch (nexthop->nh_srv6->seg6local_action) {
+ case ZEBRA_SEG6_LOCAL_ACTION_END:
+ if (!nl_attr_put32(nlmsg, req_size,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END))
+ return false;
+ break;
+ case ZEBRA_SEG6_LOCAL_ACTION_END_X:
+ if (!nl_attr_put32(nlmsg, req_size,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END_X))
+ return false;
+ if (!nl_attr_put(nlmsg, req_size,
+ SEG6_LOCAL_NH6, &ctx->nh6,
+ sizeof(struct in6_addr)))
+ return false;
+ break;
+ case ZEBRA_SEG6_LOCAL_ACTION_END_T:
+ if (!nl_attr_put32(nlmsg, req_size,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END_T))
+ return false;
+ if (!nl_attr_put32(nlmsg, req_size,
+ SEG6_LOCAL_TABLE,
+ ctx->table))
+ return false;
+ break;
+ case ZEBRA_SEG6_LOCAL_ACTION_END_DX4:
+ if (!nl_attr_put32(nlmsg, req_size,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END_DX4))
+ return false;
+ if (!nl_attr_put(nlmsg, req_size,
+ SEG6_LOCAL_NH4, &ctx->nh4,
+ sizeof(struct in_addr)))
+ return false;
+ break;
+ case ZEBRA_SEG6_LOCAL_ACTION_END_DT6:
+ if (!nl_attr_put32(nlmsg, req_size,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END_DT6))
+ return false;
+ if (!nl_attr_put32(nlmsg, req_size,
+ SEG6_LOCAL_TABLE,
+ ctx->table))
+ return false;
+ break;
+ default:
+ zlog_err("%s: unsupport seg6local behaviour action=%u",
+ __func__,
+ nexthop->nh_srv6->seg6local_action);
+ return false;
+ }
+ nl_attr_nest_end(nlmsg, nest);
+ }
+
+ if (!sid_zero(&nexthop->nh_srv6->seg6_segs)) {
+ char tun_buf[4096];
+ ssize_t tun_len;
+ struct rtattr *nest;
+
+ if (!nl_attr_put16(nlmsg, req_size, RTA_ENCAP_TYPE,
+ LWTUNNEL_ENCAP_SEG6))
+ return false;
+ nest = nl_attr_nest(nlmsg, req_size, RTA_ENCAP);
+ if (!nest)
+ return false;
+ tun_len = fill_seg6ipt_encap(tun_buf, sizeof(tun_buf),
+ &nexthop->nh_srv6->seg6_segs);
+ if (tun_len < 0)
+ return false;
+ if (!nl_attr_put(nlmsg, req_size, SEG6_IPTUNNEL_SRH,
+ tun_buf, tun_len))
+ return false;
+ nl_attr_nest_end(nlmsg, nest);
+ }
+ }
+
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
rtmsg->rtm_flags |= RTNH_F_ONLINK;
nl_attr_nest_end(&req->n, nest);
}
+ if (nh->nh_srv6) {
+ if (nh->nh_srv6->seg6local_action !=
+ ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) {
+ uint32_t action;
+ uint16_t encap;
+ struct rtattr *nest;
+ const struct seg6local_context *ctx;
+
+ req->nhm.nh_family = AF_INET6;
+ action = nh->nh_srv6->seg6local_action;
+ ctx = &nh->nh_srv6->seg6local_ctx;
+ encap = LWTUNNEL_ENCAP_SEG6_LOCAL;
+ if (!nl_attr_put(&req->n, buflen,
+ NHA_ENCAP_TYPE,
+ &encap,
+ sizeof(uint16_t)))
+ return 0;
+
+ nest = nl_attr_nest(&req->n, buflen,
+ NHA_ENCAP | NLA_F_NESTED);
+ if (!nest)
+ return 0;
+
+ switch (action) {
+ case SEG6_LOCAL_ACTION_END:
+ if (!nl_attr_put32(
+ &req->n, buflen,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END))
+ return 0;
+ break;
+ case SEG6_LOCAL_ACTION_END_X:
+ if (!nl_attr_put32(
+ &req->n, buflen,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END_X))
+ return 0;
+ if (!nl_attr_put(
+ &req->n, buflen,
+ SEG6_LOCAL_NH6, &ctx->nh6,
+ sizeof(struct in6_addr)))
+ return 0;
+ break;
+ case SEG6_LOCAL_ACTION_END_T:
+ if (!nl_attr_put32(
+ &req->n, buflen,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END_T))
+ return 0;
+ if (!nl_attr_put32(
+ &req->n, buflen,
+ SEG6_LOCAL_TABLE,
+ ctx->table))
+ return 0;
+ break;
+ case SEG6_LOCAL_ACTION_END_DX4:
+ if (!nl_attr_put32(
+ &req->n, buflen,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END_DX4))
+ return 0;
+ if (!nl_attr_put(
+ &req->n, buflen,
+ SEG6_LOCAL_NH4, &ctx->nh4,
+ sizeof(struct in_addr)))
+ return 0;
+ break;
+ case SEG6_LOCAL_ACTION_END_DT6:
+ if (!nl_attr_put32(
+ &req->n, buflen,
+ SEG6_LOCAL_ACTION,
+ SEG6_LOCAL_ACTION_END_DT6))
+ return 0;
+ if (!nl_attr_put32(
+ &req->n, buflen,
+ SEG6_LOCAL_TABLE,
+ ctx->table))
+ return 0;
+ break;
+ default:
+ zlog_err("%s: unsupport seg6local behaviour action=%u",
+ __func__, action);
+ return 0;
+ }
+ nl_attr_nest_end(&req->n, nest);
+ }
+
+ if (!sid_zero(&nh->nh_srv6->seg6_segs)) {
+ char tun_buf[4096];
+ ssize_t tun_len;
+ struct rtattr *nest;
+
+ if (!nl_attr_put16(&req->n, buflen,
+ NHA_ENCAP_TYPE,
+ LWTUNNEL_ENCAP_SEG6))
+ return 0;
+ nest = nl_attr_nest(&req->n, buflen,
+ NHA_ENCAP | NLA_F_NESTED);
+ if (!nest)
+ return 0;
+ tun_len = fill_seg6ipt_encap(tun_buf,
+ sizeof(tun_buf),
+ &nh->nh_srv6->seg6_segs);
+ if (tun_len < 0)
+ return 0;
+ if (!nl_attr_put(&req->n, buflen,
+ SEG6_IPTUNNEL_SRH,
+ tun_buf, tun_len))
+ return 0;
+ nl_attr_nest_end(&req->n, nest);
+ }
+ }
+
nexthop_done:
if (IS_ZEBRA_DEBUG_KERNEL)
zebra/zebra_mlag_vty.c \
zebra/zebra_evpn_mh.c \
zebra/zebra_mpls_vty.c \
+ zebra/zebra_srv6_vty.c \
zebra/zebra_ptm.c \
zebra/zebra_pw.c \
zebra/zebra_routemap.c \
zebra/zebra_mpls_openbsd.c \
zebra/zebra_mpls_null.c \
zebra/zebra_mpls_vty.c \
+ zebra/zebra_srv6.c \
+ zebra/zebra_srv6_vty.c \
zebra/zebra_mroute.c \
zebra/zebra_nb.c \
zebra/zebra_nb_config.c \
zebra/zebra_mlag_vty.c \
zebra/zebra_routemap.c \
zebra/zebra_vty.c \
+ zebra/zebra_srv6_vty.c \
# end
noinst_HEADERS += \
zebra/zebra_mlag.h \
zebra/zebra_mlag_vty.h \
zebra/zebra_mpls.h \
+ zebra/zebra_srv6.h \
+ zebra/zebra_srv6_vty.h \
zebra/zebra_mroute.h \
zebra/zebra_nb.h \
zebra/zebra_netns_id.h \
#include "zebra/connected.h"
#include "zebra/zebra_opaque.h"
#include "zebra/zebra_srte.h"
+#include "zebra/zebra_srv6.h"
DEFINE_MTYPE_STATIC(ZEBRA, OPAQUE, "Opaque Data");
&api_nh->labels[0]);
}
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL)
+ && api_nh->type != NEXTHOP_TYPE_BLACKHOLE) {
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: adding seg6local action %s",
+ __func__,
+ seg6local_action2str(
+ api_nh->seg6local_action));
+
+ nexthop_add_srv6_seg6local(nexthop,
+ api_nh->seg6local_action,
+ &api_nh->seg6local_ctx);
+ }
+
+ if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6)
+ && api_nh->type != NEXTHOP_TYPE_BLACKHOLE) {
+ if (IS_ZEBRA_DEBUG_RECV)
+ zlog_debug("%s: adding seg6", __func__);
+
+ nexthop_add_srv6_seg6(nexthop, &api_nh->seg6_segs);
+ }
+
if (IS_ZEBRA_DEBUG_RECV) {
labelbuf[0] = '\0';
nhbuf[0] = '\0';
return zserv_send_message(client, s);
}
+int zsend_srv6_manager_get_locator_chunk_response(struct zserv *client,
+ vrf_id_t vrf_id,
+ struct srv6_locator *loc)
+{
+ struct srv6_locator_chunk chunk = {};
+ struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
+
+ strlcpy(chunk.locator_name, loc->name, sizeof(chunk.locator_name));
+ chunk.prefix = loc->prefix;
+ chunk.block_bits_length = loc->block_bits_length;
+ chunk.node_bits_length = loc->node_bits_length;
+ chunk.function_bits_length = loc->function_bits_length;
+ chunk.argument_bits_length = loc->argument_bits_length;
+ chunk.keep = 0;
+ chunk.proto = client->proto;
+ chunk.instance = client->instance;
+
+ zclient_create_header(s, ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK, vrf_id);
+ zapi_srv6_locator_chunk_encode(s, &chunk);
+ stream_putw_at(s, 0, stream_get_endp(s));
+ return zserv_send_message(client, s);
+}
+
/* Send response to a table manager connect request to client */
static void zread_table_manager_connect(struct zserv *client,
struct stream *msg, vrf_id_t vrf_id)
}
}
+static void zread_srv6_manager_get_locator_chunk(struct zserv *client,
+ struct stream *msg,
+ vrf_id_t vrf_id)
+{
+ struct stream *s = msg;
+ uint16_t len;
+ char locator_name[SRV6_LOCNAME_SIZE] = {0};
+
+ /* Get data. */
+ STREAM_GETW(s, len);
+ STREAM_GET(locator_name, s, len);
+
+ /* call hook to get a chunk using wrapper */
+ struct srv6_locator *loc = NULL;
+ srv6_manager_get_locator_chunk_call(&loc, client, locator_name, vrf_id);
+
+stream_failure:
+ return;
+}
+
+static void zread_srv6_manager_release_locator_chunk(struct zserv *client,
+ struct stream *msg,
+ vrf_id_t vrf_id)
+{
+ struct stream *s = msg;
+ uint16_t len;
+ char locator_name[SRV6_LOCNAME_SIZE] = {0};
+
+ /* Get data. */
+ STREAM_GETW(s, len);
+ STREAM_GET(locator_name, s, len);
+
+ /* call hook to release a chunk using wrapper */
+ srv6_manager_release_locator_chunk_call(client, locator_name, vrf_id);
+
+stream_failure:
+ return;
+}
+
+static void zread_srv6_manager_request(ZAPI_HANDLER_ARGS)
+{
+ switch (hdr->command) {
+ case ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK:
+ zread_srv6_manager_get_locator_chunk(client, msg,
+ zvrf_id(zvrf));
+ break;
+ case ZEBRA_SRV6_MANAGER_RELEASE_LOCATOR_CHUNK:
+ zread_srv6_manager_release_locator_chunk(client, msg,
+ zvrf_id(zvrf));
+ break;
+ default:
+ zlog_err("%s: unknown SRv6 Manager command", __func__);
+ break;
+ }
+}
+
static void zread_pseudowire(ZAPI_HANDLER_ARGS)
{
struct stream *s;
[ZEBRA_MLAG_CLIENT_REGISTER] = zebra_mlag_client_register,
[ZEBRA_MLAG_CLIENT_UNREGISTER] = zebra_mlag_client_unregister,
[ZEBRA_MLAG_FORWARD_MSG] = zebra_mlag_forward_client_msg,
+ [ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK] = zread_srv6_manager_request,
+ [ZEBRA_SRV6_MANAGER_RELEASE_LOCATOR_CHUNK] = zread_srv6_manager_request,
[ZEBRA_CLIENT_CAPABILITIES] = zread_client_capabilities,
[ZEBRA_NEIGH_DISCOVER] = zread_neigh_discover,
[ZEBRA_NHG_ADD] = zread_nhg_add,
#include "zebra/zebra_pbr.h"
#include "zebra/zebra_errors.h"
#include "zebra/label_manager.h"
+#include "zebra/zebra_srv6.h"
#ifdef __cplusplus
extern void zapi_opaque_free(struct opaque *opaque);
+extern int zsend_zebra_srv6_locator_add(struct zserv *client,
+ struct srv6_locator *loc);
+extern int zsend_zebra_srv6_locator_delete(struct zserv *client,
+ struct srv6_locator *loc);
+extern int zsend_srv6_manager_get_locator_chunk_response(struct zserv *client,
+ vrf_id_t vrf_id, struct srv6_locator *loc);
+
#ifdef __cplusplus
}
#endif
.description = "Zebra has detected a situation where there are two vrf devices with the exact same tableid. This is considered a complete misconfiguration of VRF devices and breaks a fundamental assumption in FRR about how VRF's work",
.suggestion = "Use different table id's for the VRF's in question"
},
+ {
+ .code = EC_ZEBRA_SRV6M_UNRELEASED_LOCATOR_CHUNK,
+ .title = "Zebra did not free any srv6 locator chunks",
+ .description = "Zebra's srv6-locator chunk cleanup procedure ran, but no srv6 locator chunks were released.",
+ .suggestion = "Ignore this error.",
+ },
{
.code = END_FERR,
}
EC_ZEBRA_VRF_MISCONFIGURED,
EC_ZEBRA_ES_CREATE,
EC_ZEBRA_GRE_SET_UPDATE,
+ EC_ZEBRA_SRV6M_UNRELEASED_LOCATOR_CHUNK,
};
void zebra_error_init(void);
nh = nhg_ctx_get_nh(*ctx);
nexthop_del_labels(nh);
+ nexthop_del_srv6_seg6local(nh);
+ nexthop_del_srv6_seg6(nh);
done:
XFREE(MTYPE_NHG_CTX, *ctx);
/* The copy may have allocated labels; free them if necessary. */
nexthop_del_labels(&lookup);
+ nexthop_del_srv6_seg6local(&lookup);
+ nexthop_del_srv6_seg6(&lookup);
if (IS_ZEBRA_DEBUG_NHG_DETAIL)
zlog_debug("%s: nh %pNHv => %p (%u)",
--- /dev/null
+/*
+ * Zebra SRv6 definitions
+ * Copyright (C) 2020 Hiroki Shirokura, LINE Corporation
+ * Copyright (C) 2020 Masakazu Asama
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#include "network.h"
+#include "prefix.h"
+#include "stream.h"
+#include "srv6.h"
+#include "zebra/debug.h"
+#include "zebra/zapi_msg.h"
+#include "zebra/zserv.h"
+#include "zebra/zebra_router.h"
+#include "zebra/zebra_srv6.h"
+#include "zebra/zebra_errors.h"
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <arpa/inet.h>
+#include <netinet/in.h>
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <arpa/inet.h>
+#include <netinet/in.h>
+
+
+DEFINE_MGROUP(SRV6_MGR, "SRv6 Manager");
+DEFINE_MTYPE_STATIC(SRV6_MGR, SRV6M_CHUNK, "SRv6 Manager Chunk");
+
+/* define hooks for the basic API, so that it can be specialized or served
+ * externally
+ */
+
+DEFINE_HOOK(srv6_manager_client_connect,
+ (struct zserv *client, vrf_id_t vrf_id),
+ (client, vrf_id));
+DEFINE_HOOK(srv6_manager_client_disconnect,
+ (struct zserv *client), (client));
+DEFINE_HOOK(srv6_manager_get_chunk,
+ (struct srv6_locator **loc,
+ struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id),
+ (loc, client, locator_name, vrf_id));
+DEFINE_HOOK(srv6_manager_release_chunk,
+ (struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id),
+ (client, locator_name, vrf_id));
+
+/* define wrappers to be called in zapi_msg.c (as hooks must be called in
+ * source file where they were defined)
+ */
+
+void srv6_manager_client_connect_call(struct zserv *client, vrf_id_t vrf_id)
+{
+ hook_call(srv6_manager_client_connect, client, vrf_id);
+}
+
+void srv6_manager_get_locator_chunk_call(struct srv6_locator **loc,
+ struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id)
+{
+ hook_call(srv6_manager_get_chunk, loc, client, locator_name, vrf_id);
+}
+
+void srv6_manager_release_locator_chunk_call(struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id)
+{
+ hook_call(srv6_manager_release_chunk, client, locator_name, vrf_id);
+}
+
+int srv6_manager_client_disconnect_cb(struct zserv *client)
+{
+ hook_call(srv6_manager_client_disconnect, client);
+ return 0;
+}
+
+static int zebra_srv6_cleanup(struct zserv *client)
+{
+ return 0;
+}
+
+void zebra_srv6_locator_add(struct srv6_locator *locator)
+{
+ struct zebra_srv6 *srv6 = zebra_srv6_get_default();
+ struct srv6_locator *tmp;
+
+ tmp = zebra_srv6_locator_lookup(locator->name);
+ if (!tmp)
+ listnode_add(srv6->locators, locator);
+}
+
+void zebra_srv6_locator_delete(struct srv6_locator *locator)
+{
+ struct zebra_srv6 *srv6 = zebra_srv6_get_default();
+
+ listnode_delete(srv6->locators, locator);
+}
+
+struct srv6_locator *zebra_srv6_locator_lookup(const char *name)
+{
+ struct zebra_srv6 *srv6 = zebra_srv6_get_default();
+ struct srv6_locator *locator;
+ struct listnode *node;
+
+ for (ALL_LIST_ELEMENTS_RO(srv6->locators, node, locator))
+ if (!strncmp(name, locator->name, SRV6_LOCNAME_SIZE))
+ return locator;
+ return NULL;
+}
+
+struct zebra_srv6 *zebra_srv6_get_default(void)
+{
+ static struct zebra_srv6 srv6;
+ static bool first_execution = true;
+
+ if (first_execution) {
+ first_execution = false;
+ srv6.locators = list_new();
+ }
+ return &srv6;
+}
+
+/**
+ * Core function, assigns srv6-locator chunks
+ *
+ * It first searches through the list to check if there's one available
+ * (previously released). Otherwise it creates and assigns a new one
+ *
+ * @param proto Daemon protocol of client, to identify the owner
+ * @param instance Instance, to identify the owner
+ * @param session_id SessionID of client
+ * @param name Name of SRv6-locator
+ * @return Pointer to the assigned srv6-locator chunk,
+ * or NULL if the request could not be satisfied
+ */
+static struct srv6_locator *
+assign_srv6_locator_chunk(uint8_t proto,
+ uint16_t instance,
+ uint32_t session_id,
+ const char *locator_name)
+{
+ bool chunk_found = false;
+ struct listnode *node = NULL;
+ struct srv6_locator *loc = NULL;
+ struct srv6_locator_chunk *chunk = NULL;
+
+ loc = zebra_srv6_locator_lookup(locator_name);
+ if (!loc) {
+ zlog_info("%s: locator %s was not found",
+ __func__, locator_name);
+
+ loc = srv6_locator_alloc(locator_name);
+ if (!loc) {
+ zlog_info("%s: locator %s can't allocated",
+ __func__, locator_name);
+ return NULL;
+ }
+
+ loc->status_up = false;
+ chunk = srv6_locator_chunk_alloc();
+ chunk->proto = 0;
+ listnode_add(loc->chunks, chunk);
+ zebra_srv6_locator_add(loc);
+ }
+
+ for (ALL_LIST_ELEMENTS_RO((struct list *)loc->chunks, node, chunk)) {
+ if (chunk->proto != 0 && chunk->proto != proto)
+ continue;
+ chunk_found = true;
+ break;
+ }
+
+ if (!chunk_found) {
+ zlog_info("%s: locator is already owned", __func__);
+ return NULL;
+ }
+
+ chunk->proto = proto;
+ return loc;
+}
+
+static int zebra_srv6_manager_get_locator_chunk(struct srv6_locator **loc,
+ struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id)
+{
+ int ret = 0;
+
+ *loc = assign_srv6_locator_chunk(client->proto, client->instance,
+ client->session_id, locator_name);
+
+ if (!*loc)
+ zlog_err("Unable to assign locator chunk to %s instance %u",
+ zebra_route_string(client->proto), client->instance);
+ else if (IS_ZEBRA_DEBUG_PACKET)
+ zlog_info("Assigned locator chunk %s to %s instance %u",
+ (*loc)->name, zebra_route_string(client->proto),
+ client->instance);
+
+ if (*loc && (*loc)->status_up)
+ ret = zsend_srv6_manager_get_locator_chunk_response(client,
+ vrf_id,
+ *loc);
+ return ret;
+}
+
+/**
+ * Core function, release no longer used srv6-locator chunks
+ *
+ * @param proto Daemon protocol of client, to identify the owner
+ * @param instance Instance, to identify the owner
+ * @param session_id Zclient session ID, to identify the zclient session
+ * @param locator_name SRv6-locator name, to identify the actual locator
+ * @return 0 on success, -1 otherwise
+ */
+static int release_srv6_locator_chunk(uint8_t proto, uint16_t instance,
+ uint32_t session_id,
+ const char *locator_name)
+{
+ int ret = -1;
+ struct listnode *node;
+ struct srv6_locator_chunk *chunk;
+ struct srv6_locator *loc = NULL;
+
+ loc = zebra_srv6_locator_lookup(locator_name);
+ if (!loc)
+ return -1;
+
+ if (IS_ZEBRA_DEBUG_PACKET)
+ zlog_debug("%s: Releasing srv6-locator on %s", __func__,
+ locator_name);
+
+ for (ALL_LIST_ELEMENTS_RO((struct list *)loc->chunks, node, chunk)) {
+ if (chunk->proto != proto ||
+ chunk->instance != instance ||
+ chunk->session_id != session_id)
+ continue;
+ chunk->proto = NO_PROTO;
+ chunk->instance = 0;
+ chunk->session_id = 0;
+ chunk->keep = 0;
+ ret = 0;
+ break;
+ }
+
+ if (ret != 0)
+ flog_err(EC_ZEBRA_SRV6M_UNRELEASED_LOCATOR_CHUNK,
+ "%s: SRv6 locator chunk not released", __func__);
+
+ return ret;
+}
+
+static int zebra_srv6_manager_release_locator_chunk(struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id)
+{
+ if (vrf_id != VRF_DEFAULT) {
+ zlog_err("SRv6 locator doesn't support vrf");
+ return -1;
+ }
+
+ return release_srv6_locator_chunk(client->proto, client->instance,
+ client->session_id, locator_name);
+}
+
+/**
+ * Release srv6-locator chunks from a client.
+ *
+ * Called on client disconnection or reconnection. It only releases chunks
+ * with empty keep value.
+ *
+ * @param proto Daemon protocol of client, to identify the owner
+ * @param instance Instance, to identify the owner
+ * @return Number of chunks released
+ */
+int release_daemon_srv6_locator_chunks(struct zserv *client)
+{
+ int ret;
+ int count = 0;
+ struct zebra_srv6 *srv6 = zebra_srv6_get_default();
+ struct listnode *loc_node;
+ struct listnode *chunk_node;
+ struct srv6_locator *loc;
+ struct srv6_locator_chunk *chunk;
+
+ if (IS_ZEBRA_DEBUG_PACKET)
+ zlog_debug("%s: Releasing chunks for client proto %s, instance %d, session %u",
+ __func__, zebra_route_string(client->proto),
+ client->instance, client->session_id);
+
+ for (ALL_LIST_ELEMENTS_RO(srv6->locators, loc_node, loc)) {
+ for (ALL_LIST_ELEMENTS_RO(loc->chunks, chunk_node, chunk)) {
+ if (chunk->proto == client->proto &&
+ chunk->instance == client->instance &&
+ chunk->session_id == client->session_id &&
+ chunk->keep == 0) {
+ ret = release_srv6_locator_chunk(
+ chunk->proto, chunk->instance,
+ chunk->session_id, loc->name);
+ if (ret == 0)
+ count++;
+ }
+ }
+ }
+
+ if (IS_ZEBRA_DEBUG_PACKET)
+ zlog_debug("%s: Released %d srv6-locator chunks",
+ __func__, count);
+
+ return count;
+}
+
+void zebra_srv6_init(void)
+{
+ hook_register(zserv_client_close, zebra_srv6_cleanup);
+ hook_register(srv6_manager_get_chunk,
+ zebra_srv6_manager_get_locator_chunk);
+ hook_register(srv6_manager_release_chunk,
+ zebra_srv6_manager_release_locator_chunk);
+}
+
+bool zebra_srv6_is_enable(void)
+{
+ struct zebra_srv6 *srv6 = zebra_srv6_get_default();
+
+ return listcount(srv6->locators);
+}
--- /dev/null
+/*
+ * Zebra SRv6 definitions
+ * Copyright (C) 2020 Hiroki Shirokura, LINE Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _ZEBRA_SRV6_H
+#define _ZEBRA_SRV6_H
+
+#include <zebra.h>
+#include <arpa/inet.h>
+#include <netinet/in.h>
+
+#include "qobj.h"
+#include "prefix.h"
+#include <pthread.h>
+#include <plist.h>
+
+/* SRv6 instance structure. */
+struct zebra_srv6 {
+ struct list *locators;
+};
+
+/* declare hooks for the basic API, so that it can be specialized or served
+ * externally. Also declare a hook when those functions have been registered,
+ * so that any external module wanting to replace those can react
+ */
+
+DECLARE_HOOK(srv6_manager_client_connect,
+ (struct zserv *client, vrf_id_t vrf_id),
+ (client, vrf_id));
+DECLARE_HOOK(srv6_manager_client_disconnect,
+ (struct zserv *client), (client));
+DECLARE_HOOK(srv6_manager_get_chunk,
+ (struct srv6_locator **loc,
+ struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id),
+ (mc, client, keep, size, base, vrf_id));
+DECLARE_HOOK(srv6_manager_release_chunk,
+ (struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id),
+ (client, locator_name, vrf_id));
+
+
+extern void zebra_srv6_locator_add(struct srv6_locator *locator);
+extern void zebra_srv6_locator_delete(struct srv6_locator *locator);
+extern struct srv6_locator *zebra_srv6_locator_lookup(const char *name);
+
+extern void zebra_srv6_init(void);
+extern struct zebra_srv6 *zebra_srv6_get_default(void);
+extern bool zebra_srv6_is_enable(void);
+
+extern void srv6_manager_client_connect_call(struct zserv *client,
+ vrf_id_t vrf_id);
+extern void srv6_manager_get_locator_chunk_call(struct srv6_locator **loc,
+ struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id);
+extern void srv6_manager_release_locator_chunk_call(struct zserv *client,
+ const char *locator_name,
+ vrf_id_t vrf_id);
+extern int srv6_manager_client_disconnect_cb(struct zserv *client);
+extern int release_daemon_srv6_locator_chunks(struct zserv *client);
+
+#endif /* _ZEBRA_SRV6_H */
--- /dev/null
+/*
+ * Zebra SRv6 VTY functions
+ * Copyright (C) 2020 Hiroki Shirokura, LINE Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#include "memory.h"
+#include "if.h"
+#include "prefix.h"
+#include "command.h"
+#include "table.h"
+#include "rib.h"
+#include "nexthop.h"
+#include "vrf.h"
+#include "srv6.h"
+#include "lib/json.h"
+
+#include "zebra/zserv.h"
+#include "zebra/zebra_router.h"
+#include "zebra/zebra_vrf.h"
+#include "zebra/zebra_srv6.h"
+#include "zebra/zebra_srv6_vty.h"
+#include "zebra/zebra_rnh.h"
+#include "zebra/redistribute.h"
+#include "zebra/zebra_routemap.h"
+#include "zebra/zebra_dplane.h"
+
+#ifndef VTYSH_EXTRACT_PL
+#include "zebra/zebra_srv6_vty_clippy.c"
+#endif
+
+static int zebra_sr_config(struct vty *vty);
+
+static struct cmd_node sr_node = {
+ .name = "sr",
+ .node = SEGMENT_ROUTING_NODE,
+ .parent_node = CONFIG_NODE,
+ .prompt = "%s(config-sr)# ",
+ .config_write = zebra_sr_config,
+};
+
+static struct cmd_node srv6_node = {
+ .name = "srv6",
+ .node = SRV6_NODE,
+ .parent_node = SEGMENT_ROUTING_NODE,
+ .prompt = "%s(config-srv6)# ",
+
+};
+
+static struct cmd_node srv6_locs_node = {
+ .name = "srv6-locators",
+ .node = SRV6_LOCS_NODE,
+ .parent_node = SRV6_NODE,
+ .prompt = "%s(config-srv6-locators)# ",
+};
+
+static struct cmd_node srv6_loc_node = {
+ .name = "srv6-locator",
+ .node = SRV6_LOC_NODE,
+ .parent_node = SRV6_LOCS_NODE,
+ .prompt = "%s(config-srv6-locator)# "
+};
+
+DEFUN (show_srv6_locator,
+ show_srv6_locator_cmd,
+ "show segment-routing srv6 locator [json]",
+ SHOW_STR
+ "Segment Routing\n"
+ "Segment Routing SRv6\n"
+ "Locator Information\n"
+ JSON_STR)
+{
+ const bool uj = use_json(argc, argv);
+ struct zebra_srv6 *srv6 = zebra_srv6_get_default();
+ struct srv6_locator *locator;
+ struct listnode *node;
+ char str[256];
+ int id;
+ json_object *json = NULL;
+ json_object *json_locators = NULL;
+ json_object *json_locator = NULL;
+
+ if (uj) {
+ json = json_object_new_object();
+ json_locators = json_object_new_array();
+ json_object_object_add(json, "locators", json_locators);
+
+ for (ALL_LIST_ELEMENTS_RO(srv6->locators, node, locator)) {
+ json_locator = srv6_locator_json(locator);
+ if (!json_locator)
+ continue;
+ json_object_array_add(json_locators, json_locator);
+
+ }
+
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(json,
+ JSON_C_TO_STRING_PRETTY));
+ json_object_free(json);
+ } else {
+ vty_out(vty, "Locator:\n");
+ vty_out(vty, "Name ID Prefix Status\n");
+ vty_out(vty, "-------------------- ------- ------------------------ -------\n");
+
+ id = 1;
+ for (ALL_LIST_ELEMENTS_RO(srv6->locators, node, locator)) {
+ prefix2str(&locator->prefix, str, sizeof(str));
+ vty_out(vty, "%-20s %7d %-24s %s\n",
+ locator->name, id, str,
+ locator->status_up ? "Up" : "Down");
+ ++id;
+ }
+ vty_out(vty, "\n");
+ }
+
+ return CMD_SUCCESS;
+}
+
+DEFUN (show_srv6_locator_detail,
+ show_srv6_locator_detail_cmd,
+ "show segment-routing srv6 locator NAME detail [json]",
+ SHOW_STR
+ "Segment Routing\n"
+ "Segment Routing SRv6\n"
+ "Locator Information\n"
+ "Locator Name\n"
+ "Detailed information\n"
+ JSON_STR)
+{
+ const bool uj = use_json(argc, argv);
+ struct zebra_srv6 *srv6 = zebra_srv6_get_default();
+ struct srv6_locator *locator;
+ struct listnode *node;
+ char str[256];
+ const char *locator_name = argv[4]->arg;
+
+ if (uj) {
+ vty_out(vty, "JSON format isn't supported\n");
+ return CMD_WARNING;
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(srv6->locators, node, locator)) {
+ struct listnode *node;
+ struct srv6_locator_chunk *chunk;
+
+ if (strcmp(locator->name, locator_name) != 0)
+ continue;
+
+ prefix2str(&locator->prefix, str, sizeof(str));
+ vty_out(vty, "Name: %s\n", locator->name);
+ vty_out(vty, "Prefix: %s\n", str);
+ vty_out(vty, "Function-Bit-Len: %u\n",
+ locator->function_bits_length);
+
+ vty_out(vty, "Chunks:\n");
+ for (ALL_LIST_ELEMENTS_RO((struct list *)locator->chunks, node,
+ chunk)) {
+ prefix2str(&chunk->prefix, str, sizeof(str));
+ vty_out(vty, "- prefix: %s, owner: %s\n", str,
+ zebra_route_string(chunk->proto));
+ }
+ }
+
+
+ return CMD_SUCCESS;
+}
+
+DEFUN_NOSH (segment_routing,
+ segment_routing_cmd,
+ "segment-routing",
+ "Segment Routing\n")
+{
+ vty->node = SEGMENT_ROUTING_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUN_NOSH (srv6,
+ srv6_cmd,
+ "srv6",
+ "Segment Routing SRv6\n")
+{
+ vty->node = SRV6_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUN_NOSH (srv6_locators,
+ srv6_locators_cmd,
+ "locators",
+ "Segment Routing SRv6 locators\n")
+{
+ vty->node = SRV6_LOCS_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFUN_NOSH (srv6_locator,
+ srv6_locator_cmd,
+ "locator WORD",
+ "Segment Routing SRv6 locator\n"
+ "Specify locator-name\n")
+{
+ struct srv6_locator *locator = NULL;
+
+ locator = zebra_srv6_locator_lookup(argv[1]->arg);
+ if (locator) {
+ VTY_PUSH_CONTEXT(SRV6_LOC_NODE, locator);
+ locator->status_up = true;
+ return CMD_SUCCESS;
+ }
+
+ locator = srv6_locator_alloc(argv[1]->arg);
+ if (!locator) {
+ vty_out(vty, "%% Alloc failed\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ locator->status_up = true;
+
+ VTY_PUSH_CONTEXT(SRV6_LOC_NODE, locator);
+ vty->node = SRV6_LOC_NODE;
+ return CMD_SUCCESS;
+}
+
+DEFPY (locator_prefix,
+ locator_prefix_cmd,
+ "prefix X:X::X:X/M$prefix [func-bits (16-64)$func_bit_len]",
+ "Configure SRv6 locator prefix\n"
+ "Specify SRv6 locator prefix\n"
+ "Configure SRv6 locator function length in bits\n"
+ "Specify SRv6 locator function length in bits\n")
+{
+ VTY_DECLVAR_CONTEXT(srv6_locator, locator);
+ struct srv6_locator_chunk *chunk = NULL;
+ struct listnode *node = NULL;
+
+ locator->prefix = *prefix;
+
+ /*
+ * TODO(slankdev): please support variable node-bit-length.
+ * In draft-ietf-bess-srv6-services-05#section-3.2.1.
+ * Locator block length and Locator node length are defined.
+ * Which are defined as "locator-len == block-len + node-len".
+ * In current implementation, node bits length is hardcoded as 24.
+ * It should be supported various val.
+ *
+ * Cisco IOS-XR support only following pattern.
+ * (1) Teh locator length should be 64-bits long.
+ * (2) The SID block portion (MSBs) cannot exceed 40 bits.
+ * If this value is less than 40 bits,
+ * user should use a pattern of zeros as a filler.
+ * (3) The Node Id portion (LSBs) cannot exceed 24 bits.
+ */
+ locator->block_bits_length = prefix->prefixlen - 24;
+ locator->node_bits_length = 24;
+ locator->function_bits_length = func_bit_len;
+ locator->argument_bits_length = 0;
+
+ if (list_isempty(locator->chunks)) {
+ chunk = srv6_locator_chunk_alloc();
+ chunk->prefix = *prefix;
+ chunk->proto = 0;
+ listnode_add(locator->chunks, chunk);
+ } else {
+ for (ALL_LIST_ELEMENTS_RO(locator->chunks, node, chunk)) {
+ uint8_t zero[16] = {0};
+
+ if (memcmp(&chunk->prefix.prefix, zero, 16) == 0) {
+ struct zserv *client;
+ struct listnode *client_node;
+
+ chunk->prefix = *prefix;
+ for (ALL_LIST_ELEMENTS_RO(zrouter.client_list,
+ client_node,
+ client)) {
+ struct srv6_locator *tmp;
+
+ if (client->proto != chunk->proto)
+ continue;
+
+ srv6_manager_get_locator_chunk_call(
+ &tmp, client,
+ locator->name,
+ VRF_DEFAULT);
+ }
+ }
+ }
+ }
+
+ zebra_srv6_locator_add(locator);
+ return CMD_SUCCESS;
+}
+
+static int zebra_sr_config(struct vty *vty)
+{
+ struct zebra_srv6 *srv6 = zebra_srv6_get_default();
+ struct listnode *node;
+ struct srv6_locator *locator;
+ char str[256];
+
+ vty_out(vty, "!\n");
+ if (zebra_srv6_is_enable()) {
+ vty_out(vty, "segment-routing\n");
+ vty_out(vty, " srv6\n");
+ vty_out(vty, " locators\n");
+ for (ALL_LIST_ELEMENTS_RO(srv6->locators, node, locator)) {
+ inet_ntop(AF_INET6, &locator->prefix.prefix,
+ str, sizeof(str));
+ vty_out(vty, " locator %s\n", locator->name);
+ vty_out(vty, " prefix %s/%u\n", str,
+ locator->prefix.prefixlen);
+ vty_out(vty, " !\n");
+ }
+ vty_out(vty, " !\n");
+ vty_out(vty, " !\n");
+ vty_out(vty, "!\n");
+ }
+ return 0;
+}
+
+void zebra_srv6_vty_init(void)
+{
+ /* Install nodes and its default commands */
+ install_node(&sr_node);
+ install_node(&srv6_node);
+ install_node(&srv6_locs_node);
+ install_node(&srv6_loc_node);
+ install_default(SEGMENT_ROUTING_NODE);
+ install_default(SRV6_NODE);
+ install_default(SRV6_LOCS_NODE);
+ install_default(SRV6_LOC_NODE);
+
+ /* Command for change node */
+ install_element(CONFIG_NODE, &segment_routing_cmd);
+ install_element(SEGMENT_ROUTING_NODE, &srv6_cmd);
+ install_element(SRV6_NODE, &srv6_locators_cmd);
+ install_element(SRV6_LOCS_NODE, &srv6_locator_cmd);
+
+ /* Command for configuration */
+ install_element(SRV6_LOC_NODE, &locator_prefix_cmd);
+
+ /* Command for operation */
+ install_element(VIEW_NODE, &show_srv6_locator_cmd);
+ install_element(VIEW_NODE, &show_srv6_locator_detail_cmd);
+}
--- /dev/null
+/*
+ * Zebra SRv6 VTY functions
+ * Copyright (C) 2020 Hiroki Shirokura, LINE Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _ZEBRA_SRV6_VTY_H
+#define _ZEBRA_SRV6_VTY_H
+
+extern void zebra_srv6_vty_init(void);
+
+#endif /* _ZEBRA_SRV6_VTY_H */
sizeof(buf), 1));
}
+ if (nexthop->nh_srv6) {
+ seg6local_context2str(buf, sizeof(buf),
+ &nexthop->nh_srv6->seg6local_ctx,
+ nexthop->nh_srv6->seg6local_action);
+ vty_out(vty, ", seg6local %s %s", seg6local_action2str(
+ nexthop->nh_srv6->seg6local_action), buf);
+
+ inet_ntop(AF_INET6, &nexthop->nh_srv6->seg6_segs, buf,
+ sizeof(buf));
+ vty_out(vty, ", seg6 %s", buf);
+ }
+
if (nexthop->weight)
vty_out(vty, ", weight %u", nexthop->weight);
char buf[SRCDEST2STR_BUFFER];
json_object *json_labels = NULL;
json_object *json_backups = NULL;
+ json_object *json_seg6local = NULL;
+ json_object *json_seg6 = NULL;
int i;
json_object_int_add(json_nexthop, "flags",
if (nexthop->srte_color)
json_object_int_add(json_nexthop, "srteColor",
nexthop->srte_color);
+
+ if (nexthop->nh_srv6) {
+ json_seg6local = json_object_new_object();
+ json_object_string_add(
+ json_seg6local, "action", seg6local_action2str(
+ nexthop->nh_srv6->seg6local_action));
+ json_object_object_add(json_nexthop, "seg6local",
+ json_seg6local);
+
+ json_seg6 = json_object_new_object();
+ inet_ntop(AF_INET6, &nexthop->nh_srv6->seg6_segs, buf,
+ sizeof(buf));
+ json_object_string_add(json_seg6, "segs", buf);
+ json_object_object_add(json_nexthop, "seg6", json_seg6);
+ }
}
static void vty_show_ip_route(struct vty *vty, struct route_node *rn,