/aclocal.m4
/libtool
/libtool.orig
-/changelog-auto
/test-driver
/test-suite.log
DISTCLEANFILES =
SUFFIXES =
-examplesdir = $(exampledir)
-
bin_PROGRAMS =
sbin_PROGRAMS =
sbin_SCRIPTS =
module_LTLIBRARIES =
pkginclude_HEADERS =
nodist_pkginclude_HEADERS =
-dist_examples_DATA =
dist_yangmodels_DATA =
man_MANS =
vtysh_scan =
m4/README.txt \
m4/libtool-whole-archive.patch \
config.version \
- changelog-auto \
- changelog-auto.in \
\
python/clidef.py \
python/clippy/__init__.py \
--sysconfdir=$_sysconfdir \
--libdir=$_libdir \
--localstatedir=$_localstatedir \
- --enable-systemd=no \
--enable-rpki \
--enable-vtysh \
--enable-multipath=64 \
if (prefix->family == AF_INET) {
flush_interface_routes(ifc->ifp, 0);
babel_ifp = babel_get_if_nfo(ifc->ifp);
- if (babel_ifp->ipv4 != NULL
- && memcmp(babel_ifp->ipv4, &prefix->u.prefix4, 4) == 0) {
- free(babel_ifp->ipv4);
- babel_ifp->ipv4 = NULL;
+ if (babel_ifp->ipv4 != NULL
+ && memcmp(babel_ifp->ipv4, &prefix->u.prefix4, IPV4_MAX_BYTELEN)
+ == 0) {
+ free(babel_ifp->ipv4);
+ babel_ifp->ipv4 = NULL;
}
}
return 0;
FOR_ALL_INTERFACES_ADDRESSES(ifp, connected, node) {
- if(connected->address->family == AF_INET6 &&
- memcmp(&connected->address->u.prefix6, address, 16) == 0)
- return 1;
+ if (connected->address->family == AF_INET6
+ && memcmp(&connected->address->u.prefix6, address,
+ IPV6_MAX_BYTELEN)
+ == 0)
+ return 1;
}
return 0;
babel_prefix_eq(struct prefix *prefix, unsigned char *p, int plen)
{
if(prefix->family == AF_INET6) {
- if(prefix->prefixlen != plen ||
- memcmp(&prefix->u.prefix6, p, 16) != 0)
- return 0;
+ if (prefix->prefixlen != plen
+ || memcmp(&prefix->u.prefix6, p, IPV6_MAX_BYTELEN) != 0)
+ return 0;
} else if(prefix->family == AF_INET) {
- if(plen < 96 || !v4mapped(p) || prefix->prefixlen != plen - 96 ||
- memcmp(&prefix->u.prefix4, p + 12, 4) != 0)
- return 0;
+ if (plen < 96 || !v4mapped(p) || prefix->prefixlen != plen - 96
+ || memcmp(&prefix->u.prefix4, p + 12, IPV4_MAX_BYTELEN) != 0)
+ return 0;
} else {
return 0;
}
show_babel_routes_sub(struct babel_route *route, struct vty *vty,
struct prefix *prefix)
{
- const unsigned char *nexthop =
- memcmp(route->nexthop, route->neigh->address, 16) == 0 ?
- NULL : route->nexthop;
- char channels[100];
-
- if(prefix && !babel_prefix_eq(prefix, route->src->prefix, route->src->plen))
- return;
-
- if(route->channels[0] == 0)
- channels[0] = '\0';
- else {
- int k, j = 0;
- snprintf(channels, sizeof(channels), " chan (");
- j = strlen(channels);
- for(k = 0; k < DIVERSITY_HOPS; k++) {
- if(route->channels[k] == 0)
- break;
- if(k > 0)
- channels[j++] = ',';
- snprintf(channels + j, 100 - j, "%u", route->channels[k]);
- j = strlen(channels);
- }
- snprintf(channels + j, 100 - j, ")");
- if(k == 0)
- channels[0] = '\0';
+ const unsigned char *nexthop =
+ memcmp(route->nexthop, route->neigh->address, IPV6_MAX_BYTELEN)
+ == 0
+ ? NULL
+ : route->nexthop;
+ char channels[100];
+
+ if (prefix
+ && !babel_prefix_eq(prefix, route->src->prefix, route->src->plen))
+ return;
+
+ if (route->channels[0] == 0)
+ channels[0] = '\0';
+ else {
+ int k, j = 0;
+ snprintf(channels, sizeof(channels), " chan (");
+ j = strlen(channels);
+ for (k = 0; k < DIVERSITY_HOPS; k++) {
+ if (route->channels[k] == 0)
+ break;
+ if (k > 0)
+ channels[j++] = ',';
+ snprintf(channels + j, 100 - j, "%u",
+ route->channels[k]);
+ j = strlen(channels);
+ }
+ snprintf(channels + j, 100 - j, ")");
+ if (k == 0)
+ channels[0] = '\0';
}
vty_out (vty,
switch (family) {
case AF_INET:
uchar_to_inaddr(&api_nh->gate.ipv4, gate);
- if (IPV4_ADDR_SAME (&api_nh->gate.ipv4, &quagga_prefix.u.prefix4) &&
- quagga_prefix.prefixlen == 32) {
- api_nh->type = NEXTHOP_TYPE_IFINDEX;
- } else {
- api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
+ if (IPV4_ADDR_SAME(&api_nh->gate.ipv4, &quagga_prefix.u.prefix4)
+ && quagga_prefix.prefixlen == IPV4_MAX_BITLEN) {
+ api_nh->type = NEXTHOP_TYPE_IFINDEX;
+ } else {
+ api_nh->type = NEXTHOP_TYPE_IPV4_IFINDEX;
}
break;
case AF_INET6:
return (int)l;
}
-int
-in_prefix(const unsigned char *restrict address,
- const unsigned char *restrict prefix, unsigned char plen)
-{
- unsigned char m;
-
- if(plen > 128)
- plen = 128;
-
- if(memcmp(address, prefix, plen / 8) != 0)
- return 0;
-
- if(plen % 8 == 0)
- return 1;
-
- m = 0xFF << (8 - (plen % 8));
-
- return ((address[plen / 8] & m) == (prefix[plen / 8] & m));
-}
-
unsigned char *
mask_prefix(unsigned char *restrict ret,
const unsigned char *restrict prefix, unsigned char plen)
{
- if(plen >= 128) {
- memcpy(ret, prefix, 16);
- return ret;
+ if (plen >= IPV6_MAX_BITLEN) {
+ memcpy(ret, prefix, IPV6_MAX_BYTELEN);
+ return ret;
}
memset(ret, 0, 16);
rc = inet_pton(AF_INET6, address, &ina6);
if(rc > 0) {
- memcpy(addr_r, &ina6, 16);
- if(af_r) *af_r = AF_INET6;
- return 0;
+ memcpy(addr_r, &ina6, IPV6_MAX_BYTELEN);
+ if (af_r)
+ *af_r = AF_INET6;
+ return 0;
}
return -1;
void
in6addr_to_uchar(unsigned char *dest, const struct in6_addr *src)
{
- memcpy(dest, src, 16);
+ memcpy(dest, src, IPV6_MAX_BYTELEN);
}
void
uchar_to_in6addr(struct in6_addr *dest, const unsigned char *src)
{
- memcpy(dest, src, 16);
+ memcpy(dest, src, IPV6_MAX_BYTELEN);
}
int
void timeval_min_sec(struct timeval *d, time_t secs);
int parse_nat(const char *string) ATTRIBUTE ((pure));
int parse_msec(const char *string) ATTRIBUTE ((pure));
-int in_prefix(const unsigned char *restrict address,
- const unsigned char *restrict prefix, unsigned char plen)
- ATTRIBUTE ((pure));
unsigned char *mask_prefix(unsigned char *restrict ret,
const unsigned char *restrict prefix,
unsigned char plen);
struct bfd_dplane_ctx *bdc;
bdc = XCALLOC(MTYPE_BFDD_DPLANE_CTX, sizeof(*bdc));
- if (bdc == NULL)
- return NULL;
bdc->sock = sock;
bdc->inbuf = stream_new(BFD_DPLANE_CLIENT_BUF_SIZE);
if (num >= AS_SEGMENT_MAX)
return seg; /* we don't do huge prepends */
- if ((newas = assegment_data_new(seg->length + num)) == NULL)
+ newas = assegment_data_new(seg->length + num);
+ if (newas == NULL)
return seg;
for (i = 0; i < num; i++)
}
/* Check all mandatory well-known attributes are present */
- if ((ret = bgp_attr_check(peer, attr)) < 0)
+ ret = bgp_attr_check(peer, attr);
+ if (ret < 0)
goto done;
/*
new->length = total;
p = new->value;
if (st->family == AF_INET) {
- memcpy(p, &(st->ip_address.v4.s_addr), 4);
- p += 4;
+ memcpy(p, &(st->ip_address.v4.s_addr), IPV4_MAX_BYTELEN);
+ p += IPV4_MAX_BYTELEN;
} else {
assert(st->family == AF_INET6);
- memcpy(p, &(st->ip_address.v6.s6_addr), 16);
- p += 16;
+ memcpy(p, &(st->ip_address.v6.s6_addr), IPV6_MAX_BYTELEN);
+ p += IPV6_MAX_BYTELEN;
}
memcpy(p, &(st->as4), 4);
return new;
}
if (subtlv->length == 8) {
st->family = AF_INET;
- memcpy(&st->ip_address.v4.s_addr, subtlv->value, 4);
+ memcpy(&st->ip_address.v4.s_addr, subtlv->value,
+ IPV4_MAX_BYTELEN);
} else {
st->family = AF_INET6;
- memcpy(&(st->ip_address.v6.s6_addr), subtlv->value, 16);
+ memcpy(&(st->ip_address.v6.s6_addr), subtlv->value,
+ IPV6_MAX_BYTELEN);
}
i = subtlv->length - 4;
ptr_get_be32(subtlv->value + i, &st->as4);
gw_afi = AF_INET;
} else {
SET_IPADDR_V6(&p.prefix.prefix_addr.ip);
- memcpy(&p.prefix.prefix_addr.ip.ipaddr_v6, pfx, 16);
- pfx += 16;
- memcpy(&evpn.gw_ip.ipv6, pfx, 16);
- pfx += 16;
+ memcpy(&p.prefix.prefix_addr.ip.ipaddr_v6, pfx,
+ IPV6_MAX_BYTELEN);
+ pfx += IPV6_MAX_BYTELEN;
+ memcpy(&evpn.gw_ip.ipv6, pfx, IPV6_MAX_BYTELEN);
+ pfx += IPV6_MAX_BYTELEN;
gw_afi = AF_INET6;
}
bool old_active;
bool new_active;
- old_active = !!CHECK_FLAG(es_vtep->flags, BGP_EVPNES_VTEP_ACTIVE);
+ old_active = CHECK_FLAG(es_vtep->flags, BGP_EVPNES_VTEP_ACTIVE);
/* currently we need an active EVI reference to use the VTEP as
* a nexthop. this may change...
*/
else
UNSET_FLAG(es_vtep->flags, BGP_EVPNES_VTEP_ACTIVE);
- new_active = !!CHECK_FLAG(es_vtep->flags, BGP_EVPNES_VTEP_ACTIVE);
+ new_active = CHECK_FLAG(es_vtep->flags, BGP_EVPNES_VTEP_ACTIVE);
if ((old_active != new_active) || (new_active && param_change)) {
bool new_active;
uint32_t ead_activity_flags;
- old_active = !!CHECK_FLAG(evi_vtep->flags, BGP_EVPN_EVI_VTEP_ACTIVE);
+ old_active = CHECK_FLAG(evi_vtep->flags, BGP_EVPN_EVI_VTEP_ACTIVE);
if (bgp_mh_info->ead_evi_rx)
/* Both EAD-per-ES and EAD-per-EVI routes must be rxed from a PE
else
UNSET_FLAG(evi_vtep->flags, BGP_EVPN_EVI_VTEP_ACTIVE);
- new_active = !!CHECK_FLAG(evi_vtep->flags, BGP_EVPN_EVI_VTEP_ACTIVE);
+ new_active = CHECK_FLAG(evi_vtep->flags, BGP_EVPN_EVI_VTEP_ACTIVE);
if (old_active == new_active)
return;
enum overlay_index_type *oly)
{
*oly = OVERLAY_INDEX_TYPE_NONE;
- if (argv_find(argv, argc, "gateway-ip", oly_idx)) {
- if (oly)
- *oly = OVERLAY_INDEX_GATEWAY_IP;
- }
+ if (argv_find(argv, argc, "gateway-ip", oly_idx))
+ *oly = OVERLAY_INDEX_GATEWAY_IP;
return 1;
}
BGP_FLOWSPEC_STRING_DISPLAY_MAX);
break;
case BGP_FLOWSPEC_CONVERT_TO_NON_OPAQUE:
- if (prefix) {
- if (prefix_local.family == AF_INET)
- PREFIX_COPY_IPV4(prefix, &prefix_local);
- else
- PREFIX_COPY_IPV6(prefix, &prefix_local);
- }
+ if (prefix)
+ prefix_copy(prefix, &prefix_local);
break;
case BGP_FLOWSPEC_VALIDATE_ONLY:
default:
from_peer->last_major_event = last_maj_evt;
peer->remote_id = from_peer->remote_id;
peer->last_reset = from_peer->last_reset;
+ peer->max_packet_size = from_peer->max_packet_size;
peer->peer_gr_present_state = from_peer->peer_gr_present_state;
peer->peer_gr_new_status_flag = from_peer->peer_gr_new_status_flag;
return BGP_NLRI_PARSE_ERROR_LABEL_LENGTH;
}
- if ((afi == AFI_IP && p.prefixlen > 32)
- || (afi == AFI_IP6 && p.prefixlen > 128))
+ if ((afi == AFI_IP && p.prefixlen > IPV4_MAX_BITLEN)
+ || (afi == AFI_IP6 && p.prefixlen > IPV6_MAX_BITLEN))
return BGP_NLRI_PARSE_ERROR_PREFIX_LENGTH;
/* Fetch prefix from NLRI packet */
{
struct bgp_path_info_mpath *mpath;
- if ((mpath = path->mpath) == NULL) {
+ mpath = path->mpath;
+ if (mpath == NULL) {
if (!set || (cum_bw == 0 && !all_paths_lb))
return;
case AF_INET:
/* save */
nexthop_orig.u.prefix4 = path_vpn->attr->mp_nexthop_global_in;
- nexthop_orig.prefixlen = 32;
+ nexthop_orig.prefixlen = IPV4_MAX_BITLEN;
if (CHECK_FLAG(bgp_vrf->af_flags[afi][safi],
BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
case AF_INET6:
/* save */
nexthop_orig.u.prefix6 = path_vpn->attr->mp_nexthop_global;
- nexthop_orig.prefixlen = 128;
+ nexthop_orig.prefixlen = IPV6_MAX_BITLEN;
if (CHECK_FLAG(bgp_vrf->af_flags[afi][safi],
BGP_CONFIG_VRF_TO_VRF_IMPORT)) {
su2.sin6.sin6_port = 0;
/* For addresses, use the non-extended signature functionality */
- if ((su2.sa.sa_family == AF_INET && prefixlen == IPV4_MAX_PREFIXLEN)
- || (su2.sa.sa_family == AF_INET6
- && prefixlen == IPV6_MAX_PREFIXLEN))
+ if ((su2.sa.sa_family == AF_INET && prefixlen == IPV4_MAX_BITLEN)
+ || (su2.sa.sa_family == AF_INET6 && prefixlen == IPV6_MAX_BITLEN))
ret = sockopt_tcp_signature(socket, &su2, password);
else
ret = sockopt_tcp_signature_ext(socket, &su2, prefixlen,
peer->su.sa.sa_family) {
uint16_t prefixlen =
peer->su.sa.sa_family == AF_INET
- ? IPV4_MAX_PREFIXLEN
- : IPV6_MAX_PREFIXLEN;
+ ? IPV4_MAX_BITLEN
+ : IPV6_MAX_BITLEN;
/*
* if we have stored a BGP vrf instance in the
if (peer->password) {
uint16_t prefixlen = peer->su.sa.sa_family == AF_INET
- ? IPV4_MAX_PREFIXLEN
- : IPV6_MAX_PREFIXLEN;
+ ? IPV4_MAX_BITLEN
+ : IPV6_MAX_BITLEN;
bgp_md5_set_connect(peer->fd, &peer->su, prefixlen,
peer->password);
|| IN6_IS_ADDR_LINKLOCAL(
&pi->attr->mp_nexthop_global)))
p->u.prefix6 = pi->attr->mp_nexthop_local;
- else
+ /* If we receive MR_REACH with (GA)::(LL)
+ * then check for route-map to choose GA or LL
+ */
+ else if (pi->attr->mp_nexthop_len
+ == BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL) {
+ if (pi->attr->mp_nexthop_prefer_global)
+ p->u.prefix6 =
+ pi->attr->mp_nexthop_global;
+ else
+ p->u.prefix6 =
+ pi->attr->mp_nexthop_local;
+ } else
p->u.prefix6 = pi->attr->mp_nexthop_global;
p->prefixlen = IPV6_MAX_BITLEN;
}
|| path->attr->srte_color != 0)
SET_FLAG(path->flags, BGP_PATH_IGP_CHANGED);
- path_valid = !!CHECK_FLAG(path->flags, BGP_PATH_VALID);
+ path_valid = CHECK_FLAG(path->flags, BGP_PATH_VALID);
if (path_valid != bnc_is_valid_nexthop) {
if (path_valid) {
/* No longer valid, clear flag; also for EVPN
if (!table)
continue;
- if ((rm = bgp_node_match(table, &match)) == NULL)
+ rm = bgp_node_match(table, &match);
+ if (rm == NULL)
continue;
const struct prefix *rm_p = bgp_dest_get_prefix(rm);
json_object_free(json_paths);
}
} else {
- if ((dest = bgp_node_match(rib, &match)) != NULL) {
+ dest = bgp_node_match(rib, &match);
+ if (dest != NULL) {
const struct prefix *dest_p = bgp_dest_get_prefix(dest);
if (!prefix_check
|| dest_p->prefixlen == match.prefixlen) {
if (bdistance->distance != distance) {
snprintf(errmsg, errmsg_len,
"Distance does not match configured\n");
+ bgp_dest_unlock_node(dest);
return CMD_WARNING_CONFIG_FAILED;
}
table = bgp_dest_get_bgp_table_info(dest);
if (!table)
continue;
- if ((rm = bgp_node_match(table, &match)) == NULL)
+ rm = bgp_node_match(table, &match);
+ if (rm == NULL)
continue;
const struct prefix *rm_p = bgp_dest_get_prefix(dest);
bgp_dest_unlock_node(rm);
}
} else {
- if ((dest = bgp_node_match(bgp->rib[afi][safi], &match))
- != NULL) {
+ dest = bgp_node_match(bgp->rib[afi][safi], &match);
+ if (dest != NULL) {
const struct prefix *dest_p = bgp_dest_get_prefix(dest);
if (!prefix_check
#include "memory.h"
#include "log.h"
#include "frrlua.h"
+#include "frrscript.h"
#ifdef HAVE_LIBPCREPOSIX
#include <pcreposix.h>
#else
return RMAP_NOMATCH;
}
- enum frrlua_rm_status status_failure = LUA_RM_FAILURE,
+ enum frrlua_rm_status lrm_status = LUA_RM_FAILURE,
status_nomatch = LUA_RM_NOMATCH,
status_match = LUA_RM_MATCH,
status_match_and_change = LUA_RM_MATCH_AND_CHANGE;
- /* Make result values available */
- struct frrscript_env env[] = {
- {"integer", "RM_FAILURE", &status_failure},
- {"integer", "RM_NOMATCH", &status_nomatch},
- {"integer", "RM_MATCH", &status_match},
- {"integer", "RM_MATCH_AND_CHANGE", &status_match_and_change},
- {"integer", "action", &status_failure},
- {"prefix", "prefix", prefix},
- {"attr", "attributes", path->attr},
- {"peer", "peer", path->peer},
- {}};
-
- struct frrscript_env results[] = {
- {"integer", "action"},
- {"attr", "attributes"},
- {},
- };
-
- int result = frrscript_call(fs, env);
+ struct attr newattr = *path->attr;
+
+ int result = frrscript_call(
+ fs, ("RM_FAILURE", (long long *)&lrm_status),
+ ("RM_NOMATCH", (long long *)&status_nomatch),
+ ("RM_MATCH", (long long *)&status_match),
+ ("RM_MATCH_AND_CHANGE", (long long *)&status_match_and_change),
+ ("action", (long long *)&lrm_status), ("prefix", prefix),
+ ("attributes", &newattr), ("peer", path->peer));
if (result) {
zlog_err("Issue running script rule; defaulting to no match");
return RMAP_NOMATCH;
}
- enum frrlua_rm_status *lrm_status =
- frrscript_get_result(fs, &results[0]);
-
int status = RMAP_NOMATCH;
- switch (*lrm_status) {
+ switch (lrm_status) {
case LUA_RM_FAILURE:
zlog_err(
"Executing route-map match script '%s' failed; defaulting to no match",
zlog_debug("Updating attribute based on script's values");
uint32_t locpref = 0;
- struct attr *newattr = frrscript_get_result(fs, &results[1]);
- path->attr->med = newattr->med;
+ path->attr->med = newattr.med;
if (path->attr->flag & ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF))
locpref = path->attr->local_pref;
- if (locpref != newattr->local_pref) {
+ if (locpref != newattr.local_pref) {
SET_FLAG(path->attr->flag,
ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF));
- path->attr->local_pref = newattr->local_pref;
+ path->attr->local_pref = newattr.local_pref;
}
-
- aspath_free(newattr->aspath);
- XFREE(MTYPE_TMP, newattr);
break;
case LUA_RM_MATCH:
status = RMAP_MATCH;
break;
}
- XFREE(MTYPE_TMP, lrm_status);
frrscript_unload(fs);
return status;
if (type != RMAP_EVENT_MATCH_DELETED) {
/* ignore the mundane, the types without any dependency */
if (arg == NULL) {
- if ((tmpstr = route_map_get_match_arg(index, command))
- != NULL)
+ tmpstr = route_map_get_match_arg(index, command);
+ if (tmpstr != NULL)
dep_name =
XSTRDUP(MTYPE_ROUTE_MAP_RULE, tmpstr);
} else {
#include "bgp_aspath.h"
#include "frratomic.h"
#include "frrscript.h"
-#include "frrlua.h"
-static void lua_pushpeer(lua_State *L, const struct peer *peer)
+void lua_pushpeer(lua_State *L, const struct peer *peer)
{
lua_newtable(L);
lua_pushinteger(L, peer->as);
lua_setfield(L, -2, "stats");
}
-static void lua_pushattr(lua_State *L, const struct attr *attr)
+void lua_pushattr(lua_State *L, const struct attr *attr)
{
lua_newtable(L);
lua_pushinteger(L, attr->med);
lua_setfield(L, -2, "localpref");
}
-static void *lua_toattr(lua_State *L, int idx)
+void lua_decode_attr(lua_State *L, int idx, struct attr *attr)
{
- struct attr *attr = XCALLOC(MTYPE_TMP, sizeof(struct attr));
-
lua_getfield(L, -1, "metric");
attr->med = lua_tointeger(L, -1);
lua_pop(L, 1);
lua_getfield(L, -1, "localpref");
attr->local_pref = lua_tointeger(L, -1);
lua_pop(L, 1);
+}
+
+void *lua_toattr(lua_State *L, int idx)
+{
+ struct attr *attr = XCALLOC(MTYPE_TMP, sizeof(struct attr));
+ lua_decode_attr(L, idx, attr);
return attr;
}
#define __BGP_SCRIPT__
#include <zebra.h>
+#include "bgpd.h"
#ifdef HAVE_SCRIPTING
+#include "frrlua.h"
+
/*
* Initialize scripting stuff.
*/
void bgp_script_init(void);
+void lua_pushpeer(lua_State *L, const struct peer *peer);
+
+void lua_pushattr(lua_State *L, const struct attr *attr);
+
+void lua_decode_attr(lua_State *L, int idx, struct attr *attr);
+
+void *lua_toattr(lua_State *L, int idx);
+
#endif /* HAVE_SCRIPTING */
#endif /* __BGP_SCRIPT__ */
key);
key = jhash_1word(peer->v_routeadv, key);
key = jhash_1word(peer->change_local_as, key);
+ key = jhash_1word(peer->max_packet_size, key);
if (peer->group)
key = jhash_1word(jhash(peer->group->name,
struct update_subgroup *subgrp;
struct peer_af *paf;
struct bgp_filter *filter;
+ struct peer *peer = UPDGRP_PEER(updgrp);
int match = 0;
if (!ctx)
CHECK_FLAG(subgrp->flags, SUBGRP_FLAG_NEEDS_REFRESH)
? "R"
: "");
+ if (peer)
+ vty_out(vty, " Max packet size: %d\n",
+ peer->max_packet_size);
if (subgrp->peer_count > 0) {
vty_out(vty, " Peers:\n");
SUBGRP_FOREACH_PEER (subgrp, paf)
return;
/* Lookup existing adjacency */
- if ((adj = adj_lookup(dest, subgrp, addpath_tx_id)) != NULL) {
+ adj = adj_lookup(dest, subgrp, addpath_tx_id);
+ if (adj != NULL) {
/* Clean up previous advertisement. */
if (adj->adv)
bgp_advertise_clean_subgroup(subgrp, adj);
bgp_dest_get_prefix(dest), &tmp_pi);
if (ret == RMAP_DENYMATCH) {
+ /* The aspath belongs to 'attr' */
+ tmp_attr.aspath = NULL;
bgp_attr_flush(&tmp_attr);
continue;
} else {
return ret;
}
+/*
+ * Convert an afi_t/safi_t pair to matching BGP_DEFAULT_AF* flag.
+ *
+ * afi
+ * address-family identifier
+ *
+ * safi
+ * subsequent address-family identifier
+ *
+ * Returns:
+ * default_af string corresponding to the supplied afi/safi pair.
+ * If afi/safi is invalid or if flag for afi/safi doesn't exist,
+ * return -1.
+ */
+static const char *get_bgp_default_af_flag(afi_t afi, safi_t safi)
+{
+ switch (afi) {
+ case AFI_IP:
+ switch (safi) {
+ case SAFI_UNICAST:
+ return "ipv4-unicast";
+ case SAFI_MULTICAST:
+ return "ipv4-multicast";
+ case SAFI_MPLS_VPN:
+ return "ipv4-vpn";
+ case SAFI_ENCAP:
+ return "ipv4-encap";
+ case SAFI_LABELED_UNICAST:
+ return "ipv4-labeled-unicast";
+ case SAFI_FLOWSPEC:
+ return "ipv4-flowspec";
+ default:
+ return "unknown-afi/safi";
+ }
+ break;
+ case AFI_IP6:
+ switch (safi) {
+ case SAFI_UNICAST:
+ return "ipv6-unicast";
+ case SAFI_MULTICAST:
+ return "ipv6-multicast";
+ case SAFI_MPLS_VPN:
+ return "ipv6-vpn";
+ case SAFI_ENCAP:
+ return "ipv6-encap";
+ case SAFI_LABELED_UNICAST:
+ return "ipv6-labeled-unicast";
+ case SAFI_FLOWSPEC:
+ return "ipv6-flowspec";
+ default:
+ return "unknown-afi/safi";
+ }
+ break;
+ case AFI_L2VPN:
+ switch (safi) {
+ case SAFI_EVPN:
+ return "l2vpn-evpn";
+ default:
+ return "unknown-afi/safi";
+ }
+ case AFI_UNSPEC:
+ case AFI_MAX:
+ return "unknown-afi/safi";
+ }
+ /* all AFIs are accounted for above, so this shouldn't happen */
+ return "unknown-afi/safi";
+}
+
int bgp_get_vty(struct bgp **bgp, as_t *as, const char *name,
enum bgp_instance_type inst_type)
{
uint32_t med_admin_val;
vty_out(vty, " bgp max-med administrative");
- if ((med_admin_val =
- yang_dnode_get_uint32(dnode, "./max-med-admin"))
- != BGP_MAXMED_VALUE_DEFAULT)
+ med_admin_val = yang_dnode_get_uint32(dnode, "./max-med-admin");
+ if (med_admin_val != BGP_MAXMED_VALUE_DEFAULT)
vty_out(vty, " %u", med_admin_val);
vty_out(vty, "\n");
}
return CMD_SUCCESS;
}
-/* "no bgp default ipv6-unicast". */
-DEFUN(no_bgp_default_ipv6_unicast, no_bgp_default_ipv6_unicast_cmd,
- "no bgp default ipv6-unicast", NO_STR
+DEFPY(bgp_default_afi_safi, bgp_default_afi_safi_cmd,
+ "[no] bgp default <ipv4-unicast|"
+ "ipv4-multicast|"
+ "ipv4-vpn|"
+ "ipv4-labeled-unicast|"
+ "ipv4-flowspec|"
+ "ipv6-unicast|"
+ "ipv6-multicast|"
+ "ipv6-vpn|"
+ "ipv6-labeled-unicast|"
+ "ipv6-flowspec|"
+ "l2vpn-evpn>$afi_safi",
+ NO_STR
"BGP specific commands\n"
"Configure BGP defaults\n"
- "Activate ipv6-unicast for a peer by default\n")
+ "Activate ipv4-unicast for a peer by default\n"
+ "Activate ipv4-multicast for a peer by default\n"
+ "Activate ipv4-vpn for a peer by default\n"
+ "Activate ipv4-labeled-unicast for a peer by default\n"
+ "Activate ipv4-flowspec for a peer by default\n"
+ "Activate ipv6-unicast for a peer by default\n"
+ "Activate ipv6-multicast for a peer by default\n"
+ "Activate ipv6-vpn for a peer by default\n"
+ "Activate ipv6-labeled-unicast for a peer by default\n"
+ "Activate ipv6-flowspec for a peer by default\n"
+ "Activate l2vpn-evpn for a peer by default\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
- UNSET_FLAG(bgp->flags, BGP_FLAG_DEFAULT_IPV6);
- return CMD_SUCCESS;
-}
+ char afi_safi_str[strlen(afi_safi) + 1];
+ char *afi_safi_str_tok;
-DEFUN(bgp_default_ipv6_unicast, bgp_default_ipv6_unicast_cmd,
- "bgp default ipv6-unicast",
- "BGP specific commands\n"
- "Configure BGP defaults\n"
- "Activate ipv6-unicast for a peer by default\n")
-{
- VTY_DECLVAR_CONTEXT(bgp, bgp);
- SET_FLAG(bgp->flags, BGP_FLAG_DEFAULT_IPV6);
- return CMD_SUCCESS;
-}
+ strlcpy(afi_safi_str, afi_safi, sizeof(afi_safi_str));
+ char *afi_str = strtok_r(afi_safi_str, "-", &afi_safi_str_tok);
+ char *safi_str = strtok_r(NULL, "-", &afi_safi_str_tok);
+ afi_t afi = bgp_vty_afi_from_str(afi_str);
+ safi_t safi;
-/* "no bgp default ipv4-unicast". */
-DEFUN (no_bgp_default_ipv4_unicast,
- no_bgp_default_ipv4_unicast_cmd,
- "no bgp default ipv4-unicast",
- NO_STR
- "BGP specific commands\n"
- "Configure BGP defaults\n"
- "Activate ipv4-unicast for a peer by default\n")
-{
- VTY_DECLVAR_CONTEXT(bgp, bgp);
- SET_FLAG(bgp->flags, BGP_FLAG_NO_DEFAULT_IPV4);
- return CMD_SUCCESS;
-}
+ if (strmatch(safi_str, "labeled"))
+ safi = bgp_vty_safi_from_str("labeled-unicast");
+ else
+ safi = bgp_vty_safi_from_str(safi_str);
+
+ if (no)
+ bgp->default_af[afi][safi] = false;
+ else {
+ if ((safi == SAFI_LABELED_UNICAST
+ && bgp->default_af[afi][SAFI_UNICAST])
+ || (safi == SAFI_UNICAST
+ && bgp->default_af[afi][SAFI_LABELED_UNICAST]))
+ bgp_vty_return(vty, BGP_ERR_PEER_SAFI_CONFLICT);
+ else
+ bgp->default_af[afi][safi] = true;
+ }
-DEFUN (bgp_default_ipv4_unicast,
- bgp_default_ipv4_unicast_cmd,
- "bgp default ipv4-unicast",
- "BGP specific commands\n"
- "Configure BGP defaults\n"
- "Activate ipv4-unicast for a peer by default\n")
-{
- VTY_DECLVAR_CONTEXT(bgp, bgp);
- UNSET_FLAG(bgp->flags, BGP_FLAG_NO_DEFAULT_IPV4);
return CMD_SUCCESS;
}
if (table == NULL)
continue;
- if ((rm = bgp_node_match(table, &match)) != NULL) {
+ rm = bgp_node_match(table, &match);
+ if (rm != NULL) {
const struct prefix *rm_p =
bgp_dest_get_prefix(rm);
}
}
} else {
- if ((dest = bgp_node_match(rib, &match)) != NULL) {
+ dest = bgp_node_match(rib, &match);
+ if (dest != NULL) {
const struct prefix *dest_p = bgp_dest_get_prefix(dest);
if (dest_p->prefixlen == match.prefixlen) {
}
} else {
if (peer->afc[afi][safi]) {
- if ((afi == AFI_IP || afi == AFI_IP6)
- && safi == SAFI_UNICAST) {
- if (afi == AFI_IP
- && CHECK_FLAG(bgp->flags,
- BGP_FLAG_NO_DEFAULT_IPV4)) {
- vty_out(vty, " neighbor %s activate\n",
- addr);
- } else if (afi == AFI_IP6
- && !CHECK_FLAG(
- bgp->flags,
- BGP_FLAG_DEFAULT_IPV6)) {
- vty_out(vty, " neighbor %s activate\n",
- addr);
- }
- } else {
+ if (safi == SAFI_ENCAP)
+ vty_out(vty, " neighbor %s activate\n", addr);
+ else if (!bgp->default_af[afi][safi])
vty_out(vty, " neighbor %s activate\n", addr);
- }
} else {
- if ((afi == AFI_IP || afi == AFI_IP6)
- && safi == SAFI_UNICAST) {
- if (afi == AFI_IP
- && !CHECK_FLAG(bgp->flags,
- BGP_FLAG_NO_DEFAULT_IPV4)) {
- vty_out(vty,
- " no neighbor %s activate\n",
- addr);
- } else if (afi == AFI_IP6
- && CHECK_FLAG(
- bgp->flags,
- BGP_FLAG_DEFAULT_IPV6)) {
- vty_out(vty,
- " no neighbor %s activate\n",
- addr);
- }
- }
+ if (bgp->default_af[afi][safi])
+ vty_out(vty, " no neighbor %s activate\n",
+ addr);
}
}
struct peer *peer;
struct listnode *node, *nnode;
struct listnode *mnode, *mnnode;
+ afi_t afi;
+ safi_t safi;
if (bm->rmap_update_timer != RMAP_DEFAULT_UPDATE_TIMER)
vty_out(vty, "bgp route-map delay-timer %u\n",
? ""
: "no ");
- /* BGP default ipv4-unicast. */
- if (CHECK_FLAG(bgp->flags, BGP_FLAG_NO_DEFAULT_IPV4))
- vty_out(vty, " no bgp default ipv4-unicast\n");
-
- /* BGP default ipv6-unicast. */
- if (CHECK_FLAG(bgp->flags, BGP_FLAG_DEFAULT_IPV6))
- vty_out(vty, " bgp default ipv6-unicast\n");
+ /* BGP default <afi>-<safi> */
+ FOREACH_AFI_SAFI (afi, safi) {
+ if (afi == AFI_IP && safi == SAFI_UNICAST) {
+ if (!bgp->default_af[afi][safi])
+ vty_out(vty, " no bgp default %s\n",
+ get_bgp_default_af_flag(afi,
+ safi));
+ } else if (bgp->default_af[afi][safi])
+ vty_out(vty, " bgp default %s\n",
+ get_bgp_default_af_flag(afi, safi));
+ }
/* BGP default local-preference. */
if (bgp->default_local_pref != BGP_DEFAULT_LOCAL_PREF)
install_element(BGP_NODE, &bgp_bestpath_bw_cmd);
install_element(BGP_NODE, &no_bgp_bestpath_bw_cmd);
- /* "no bgp default ipv4-unicast" commands. */
- install_element(BGP_NODE, &no_bgp_default_ipv4_unicast_cmd);
- install_element(BGP_NODE, &bgp_default_ipv4_unicast_cmd);
-
- /* "no bgp default ipv6-unicast" commands. */
- install_element(BGP_NODE, &no_bgp_default_ipv6_unicast_cmd);
- install_element(BGP_NODE, &bgp_default_ipv6_unicast_cmd);
+ /* "no bgp default <afi>-<safi>" commands. */
+ install_element(BGP_NODE, &bgp_default_afi_safi_cmd);
/* "bgp network import-check" commands. */
install_element(BGP_NODE, &bgp_network_import_check_cmd);
return 0;
}
-static int if_get_ipv6_local(struct interface *ifp, struct in6_addr *addr)
+static bool if_get_ipv6_local(struct interface *ifp, struct in6_addr *addr)
{
struct listnode *cnode;
struct connected *connected;
if (cp->family == AF_INET6)
if (IN6_IS_ADDR_LINKLOCAL(&cp->u.prefix6)) {
memcpy(addr, &cp->u.prefix6, IPV6_MAX_BYTELEN);
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static int if_get_ipv4_address(struct interface *ifp, struct in_addr *addr)
{
int ret = 0;
struct interface *ifp = NULL;
+ bool v6_ll_avail = true;
memset(nexthop, 0, sizeof(struct bgp_nexthop));
* route-map to
* specify the global IPv6 nexthop.
*/
- if_get_ipv6_local(ifp, &nexthop->v6_global);
+ v6_ll_avail =
+ if_get_ipv6_local(ifp, &nexthop->v6_global);
memcpy(&nexthop->v6_local, &nexthop->v6_global,
IPV6_MAX_BYTELEN);
} else
- if_get_ipv6_local(ifp, &nexthop->v6_local);
+ v6_ll_avail =
+ if_get_ipv6_local(ifp, &nexthop->v6_local);
+ /*
+ * If we are a v4 connection and we are not doing unnumbered
+ * not having a v6 LL address is ok
+ */
+ if (!v6_ll_avail && !peer->conf_if)
+ v6_ll_avail = true;
if (if_lookup_by_ipv4(&remote->sin.sin_addr, peer->bgp->vrf_id))
peer->shared_network = 1;
else
remote->sin6.sin6_scope_id,
peer->bgp->vrf_id);
if (direct)
- if_get_ipv6_local(ifp, &nexthop->v6_local);
+ v6_ll_avail = if_get_ipv6_local(
+ ifp, &nexthop->v6_local);
} else
/* Link-local address. */
{
/* If we have identified the local interface, there is no error for now.
*/
- return true;
+ return v6_ll_avail;
}
static struct in6_addr *
peer->bgp = bgp_lock(bgp);
peer = peer_lock(peer); /* initial reference */
peer->password = NULL;
- peer->max_packet_size = BGP_MAX_PACKET_SIZE;
+ peer->max_packet_size = BGP_STANDARD_MESSAGE_MAX_PACKET_SIZE;
/* Set default flags. */
FOREACH_AFI_SAFI (afi, safi) {
(void)peer_sort(peer_dst);
peer_dst->rmap_type = peer_src->rmap_type;
+ peer_dst->max_packet_size = peer_src->max_packet_size;
+
/* Timers */
peer_dst->holdtime = peer_src->holdtime;
peer_dst->keepalive = peer_src->keepalive;
*/
for (ALL_LIST_ELEMENTS_RO(ifp->connected, node, ifc)) {
if (ifc->address && (ifc->address->family == AF_INET)) {
- PREFIX_COPY_IPV4(&p, CONNECTED_PREFIX(ifc));
+ prefix_copy(&p, CONNECTED_PREFIX(ifc));
if (p.prefixlen == 30) {
peer->su.sa.sa_family = AF_INET;
addr = ntohl(p.u.prefix4.s_addr);
SET_FLAG(peer->flags, PEER_FLAG_CONFIG_NODE);
- /* If address family is IPv4 and `bgp default ipv4-unicast` (default),
- * then activate the neighbor for this AF.
- * If address family is IPv6 and `bgp default ipv6-unicast`
- * (non-default), then activate the neighbor for this AF.
+ /* If 'bgp default <afi>-<safi>' is configured, then activate the
+ * neighbor for the corresponding address family. IPv4 Unicast is
+ * the only address family enabled by default without expliict
+ * configuration.
*/
FOREACH_AFI_SAFI (afi, safi) {
- if ((afi == AFI_IP || afi == AFI_IP6) && safi == SAFI_UNICAST) {
- if ((afi == AFI_IP
- && !CHECK_FLAG(bgp->flags,
- BGP_FLAG_NO_DEFAULT_IPV4))
- || (afi == AFI_IP6
- && CHECK_FLAG(bgp->flags,
- BGP_FLAG_DEFAULT_IPV6))) {
- peer->afc[afi][safi] = 1;
- peer_af_create(peer, afi, safi);
- }
+ if (bgp->default_af[afi][safi]) {
+ peer->afc[afi][safi] = 1;
+ peer_af_create(peer, afi, safi);
}
}
{
struct peer_group *group;
afi_t afi;
+ safi_t safi;
group = peer_group_lookup(bgp, name);
if (group)
for (afi = AFI_IP; afi < AFI_MAX; afi++)
group->listen_range[afi] = list_new();
group->conf = peer_new(bgp);
- if (!CHECK_FLAG(bgp->flags, BGP_FLAG_NO_DEFAULT_IPV4))
- group->conf->afc[AFI_IP][SAFI_UNICAST] = 1;
- if (CHECK_FLAG(bgp->flags, BGP_FLAG_DEFAULT_IPV6))
- group->conf->afc[AFI_IP6][SAFI_UNICAST] = 1;
+ FOREACH_AFI_SAFI (afi, safi) {
+ if (bgp->default_af[afi][safi])
+ group->conf->afc[afi][safi] = 1;
+ }
XFREE(MTYPE_BGP_PEER_HOST, group->conf->host);
group->conf->host = XSTRDUP(MTYPE_BGP_PEER_HOST, name);
group->conf->group = group;
afi_t afi;
safi_t safi;
- if ((bgp = XCALLOC(MTYPE_BGP, sizeof(struct bgp))) == NULL)
- return NULL;
+ bgp = XCALLOC(MTYPE_BGP, sizeof(struct bgp));
if (BGP_DEBUG(zebra, ZEBRA)) {
if (inst_type == BGP_INSTANCE_TYPE_DEFAULT)
atomic_store_explicit(&bgp->rpkt_quanta, BGP_READ_PACKET_MAX,
memory_order_relaxed);
bgp->coalesce_time = BGP_DEFAULT_SUBGROUP_COALESCE_TIME;
+ bgp->default_af[AFI_IP][SAFI_UNICAST] = true;
QOBJ_REG(bgp, bgp);
#define BGP_FLAG_DETERMINISTIC_MED (1 << 1)
#define BGP_FLAG_MED_MISSING_AS_WORST (1 << 2)
#define BGP_FLAG_MED_CONFED (1 << 3)
-#define BGP_FLAG_NO_DEFAULT_IPV4 (1 << 4)
-#define BGP_FLAG_NO_CLIENT_TO_CLIENT (1 << 5)
-#define BGP_FLAG_COMPARE_ROUTER_ID (1 << 7)
-#define BGP_FLAG_ASPATH_IGNORE (1 << 8)
-#define BGP_FLAG_IMPORT_CHECK (1 << 9)
-#define BGP_FLAG_NO_FAST_EXT_FAILOVER (1 << 10)
-#define BGP_FLAG_LOG_NEIGHBOR_CHANGES (1 << 11)
+#define BGP_FLAG_NO_CLIENT_TO_CLIENT (1 << 4)
+#define BGP_FLAG_COMPARE_ROUTER_ID (1 << 5)
+#define BGP_FLAG_ASPATH_IGNORE (1 << 6)
+#define BGP_FLAG_IMPORT_CHECK (1 << 7)
+#define BGP_FLAG_NO_FAST_EXT_FAILOVER (1 << 8)
+#define BGP_FLAG_LOG_NEIGHBOR_CHANGES (1 << 9)
/* This flag is set when we have full BGP Graceful-Restart mode enable */
-#define BGP_FLAG_GRACEFUL_RESTART (1 << 12)
-
-#define BGP_FLAG_ASPATH_CONFED (1 << 13)
-#define BGP_FLAG_ASPATH_MULTIPATH_RELAX (1 << 14)
-#define BGP_FLAG_RR_ALLOW_OUTBOUND_POLICY (1 << 15)
-#define BGP_FLAG_DISABLE_NH_CONNECTED_CHK (1 << 16)
-#define BGP_FLAG_MULTIPATH_RELAX_AS_SET (1 << 17)
-#define BGP_FLAG_FORCE_STATIC_PROCESS (1 << 18)
-#define BGP_FLAG_SHOW_HOSTNAME (1 << 19)
-#define BGP_FLAG_GR_PRESERVE_FWD (1 << 20)
-#define BGP_FLAG_GRACEFUL_SHUTDOWN (1 << 21)
-#define BGP_FLAG_DELETE_IN_PROGRESS (1 << 22)
-#define BGP_FLAG_SELECT_DEFER_DISABLE (1 << 23)
-#define BGP_FLAG_GR_DISABLE_EOR (1 << 24)
-#define BGP_FLAG_EBGP_REQUIRES_POLICY (1 << 25)
-#define BGP_FLAG_SHOW_NEXTHOP_HOSTNAME (1 << 26)
+#define BGP_FLAG_GRACEFUL_RESTART (1 << 10)
+
+#define BGP_FLAG_ASPATH_CONFED (1 << 11)
+#define BGP_FLAG_ASPATH_MULTIPATH_RELAX (1 << 12)
+#define BGP_FLAG_RR_ALLOW_OUTBOUND_POLICY (1 << 13)
+#define BGP_FLAG_DISABLE_NH_CONNECTED_CHK (1 << 14)
+#define BGP_FLAG_MULTIPATH_RELAX_AS_SET (1 << 15)
+#define BGP_FLAG_FORCE_STATIC_PROCESS (1 << 16)
+#define BGP_FLAG_SHOW_HOSTNAME (1 << 17)
+#define BGP_FLAG_GR_PRESERVE_FWD (1 << 18)
+#define BGP_FLAG_GRACEFUL_SHUTDOWN (1 << 19)
+#define BGP_FLAG_DELETE_IN_PROGRESS (1 << 20)
+#define BGP_FLAG_SELECT_DEFER_DISABLE (1 << 21)
+#define BGP_FLAG_GR_DISABLE_EOR (1 << 22)
+#define BGP_FLAG_EBGP_REQUIRES_POLICY (1 << 23)
+#define BGP_FLAG_SHOW_NEXTHOP_HOSTNAME (1 << 24)
/* This flag is set if the instance is in administrative shutdown */
-#define BGP_FLAG_SHUTDOWN (1 << 27)
-#define BGP_FLAG_SUPPRESS_FIB_PENDING (1 << 28)
-#define BGP_FLAG_SUPPRESS_DUPLICATES (1 << 29)
-#define BGP_FLAG_DEFAULT_IPV6 (1 << 30)
-#define BGP_FLAG_PEERTYPE_MULTIPATH_RELAX (1 << 31)
+#define BGP_FLAG_SHUTDOWN (1 << 25)
+#define BGP_FLAG_SUPPRESS_FIB_PENDING (1 << 26)
+#define BGP_FLAG_SUPPRESS_DUPLICATES (1 << 27)
+#define BGP_FLAG_PEERTYPE_MULTIPATH_RELAX (1 << 29)
+
+ /* BGP default address-families.
+ * New peers inherit enabled afi/safis from bgp instance.
+ */
+ uint16_t default_af[AFI_MAX][SAFI_MAX];
enum global_mode GLOBAL_GR_FSM[BGP_GLOBAL_GR_MODE]
[BGP_GLOBAL_GR_EVENT_CMD];
memset(&rprefix, 0, sizeof(rprefix));
rprefix.prefix.addr_family = target->addr_family;
if (target->addr_family == AF_INET) {
- rprefix.length = 32;
+ rprefix.length = IPV4_MAX_BITLEN;
} else {
- rprefix.length = 128;
+ rprefix.length = IPV6_MAX_BITLEN;
}
pNHE = rfapiEthRouteTable2NextHopList(
memset(&rprefix, 0, sizeof(rprefix));
rprefix.prefix.addr_family = target->addr_family;
if (target->addr_family == AF_INET) {
- rprefix.length = 32;
+ rprefix.length = IPV4_MAX_BITLEN;
} else {
- rprefix.length = 128;
+ rprefix.length = IPV6_MAX_BITLEN;
}
pNHE = rfapiEthRouteNode2NextHopList(
case 8:
if (p) {
p->family = AF_INET;
- p->prefixlen = 32;
+ p->prefixlen = IPV4_MAX_BITLEN;
memcpy(p->u.val, pEncap->value,
4);
}
case 20:
if (p) {
p->family = AF_INET6;
- p->prefixlen = 128;
+ p->prefixlen = IPV6_MAX_BITLEN;
memcpy(p->u.val, pEncap->value,
16);
}
if (p) {
p->family = bpi->extra->vnc.import.un_family;
p->u.prefix4 = bpi->extra->vnc.import.un.addr4;
- p->prefixlen = 32;
+ p->prefixlen = IPV4_MAX_BITLEN;
}
return 0;
case AF_INET6:
if (p) {
p->family = bpi->extra->vnc.import.un_family;
p->u.prefix6 = bpi->extra->vnc.import.un.addr6;
- p->prefixlen = 128;
+ p->prefixlen = IPV6_MAX_BITLEN;
}
return 0;
default:
switch (p->family = BGP_MP_NEXTHOP_FAMILY(attr->mp_nexthop_len)) {
case AF_INET:
p->u.prefix4 = attr->mp_nexthop_global_in;
- p->prefixlen = 32;
+ p->prefixlen = IPV4_MAX_BITLEN;
break;
case AF_INET6:
p->u.prefix6 = attr->mp_nexthop_global;
- p->prefixlen = 128;
+ p->prefixlen = IPV6_MAX_BITLEN;
break;
default:
{
if (afi == AFI_IP) {
p->family = AF_INET;
- p->prefixlen = 32;
+ p->prefixlen = IPV4_MAX_BITLEN;
p->u.prefix4 = attr->nexthop;
} else {
rfapiNexthop2Prefix(attr, p);
switch (BGP_MP_NEXTHOP_FAMILY(attr->mp_nexthop_len)) {
case AF_INET:
prefix->family = AF_INET;
- prefix->prefixlen = 32;
+ prefix->prefixlen = IPV4_MAX_BITLEN;
prefix->u.prefix4 = attr->mp_nexthop_global_in;
break;
case AF_INET6:
prefix->family = AF_INET6;
- prefix->prefixlen = 128;
+ prefix->prefixlen = IPV6_MAX_BITLEN;
prefix->u.prefix6 = attr->mp_nexthop_global;
break;
default:
#define RFAPI_HOST_PREFIX(prefix) \
(((prefix)->family == AF_INET) \
- ? ((prefix)->prefixlen == 32) \
+ ? ((prefix)->prefixlen == IPV4_MAX_BITLEN) \
: (((prefix)->family == AF_INET6) \
- ? ((prefix)->prefixlen == 128) \
+ ? ((prefix)->prefixlen == IPV6_MAX_BITLEN) \
: 0))
extern int rfapi_find_rfd(struct bgp *bgp, struct rfapi_ip_addr *vn_addr,
raddr->addr_family = qprefix->family;
switch (qprefix->family) {
case AF_INET:
- if (qprefix->prefixlen != 32)
+ if (qprefix->prefixlen != IPV4_MAX_BITLEN)
return -1;
raddr->addr.v4 = qprefix->u.prefix4;
break;
case AF_INET6:
- if (qprefix->prefixlen != 128)
+ if (qprefix->prefixlen != IPV6_MAX_BITLEN)
return -1;
raddr->addr.v6 = qprefix->u.prefix6;
break;
switch (hia->addr_family) {
case AF_INET:
- pfx->prefixlen = 32;
+ pfx->prefixlen = IPV4_MAX_BITLEN;
pfx->u.prefix4 = hia->addr.v4;
break;
case AF_INET6:
- pfx->prefixlen = 128;
+ pfx->prefixlen = IPV6_MAX_BITLEN;
pfx->u.prefix6 = hia->addr.v6;
break;
default:
}
switch (p->family) {
case AF_INET:
- if (p->prefixlen != 32) {
+ if (p->prefixlen != IPV4_MAX_BITLEN) {
vty_out(vty, "Not a host address: \"%s\"%s", str,
HVTYNL);
return CMD_WARNING;
}
break;
case AF_INET6:
- if (p->prefixlen != 128) {
+ if (p->prefixlen != IPV6_MAX_BITLEN) {
vty_out(vty, "Not a host address: \"%s\"%s", str,
HVTYNL);
return CMD_WARNING;
memset((uint8_t *)pfx_ce, 0, sizeof(*pfx_ce));
memcpy(&pfx_ce->u.prefix4, ecp + 2, 4);
pfx_ce->family = AF_INET;
- pfx_ce->prefixlen = 32;
+ pfx_ce->prefixlen = IPV4_MAX_BITLEN;
return 0;
}
orig_nexthop.family =
BGP_MP_NEXTHOP_FAMILY(orig->mp_nexthop_len);
if (orig_nexthop.family == AF_INET) {
- orig_nexthop.prefixlen = 32;
+ orig_nexthop.prefixlen = IPV4_MAX_BITLEN;
orig_nexthop.u.prefix4 = orig->mp_nexthop_global_in;
} else if (orig_nexthop.family == AF_INET6) {
- orig_nexthop.prefixlen = 128;
+ orig_nexthop.prefixlen = IPV6_MAX_BITLEN;
orig_nexthop.u.prefix6 = orig->mp_nexthop_global;
} else {
return -1; /* FAIL - can't compute nexthop */
{
switch (p->family) {
case AF_INET:
- return (p->prefixlen == 32);
+ return (p->prefixlen == IPV4_MAX_BITLEN);
case AF_INET6:
- return (p->prefixlen == 128);
+ return (p->prefixlen == IPV6_MAX_BITLEN);
}
return 0;
}
vnaddr.addr_family = vn_pfx->family;
switch (vn_pfx->family) {
case AF_INET:
- if (vn_pfx->prefixlen != 32) {
+ if (vn_pfx->prefixlen != IPV4_MAX_BITLEN) {
vnc_zlog_debug_verbose(
"%s: redist VN plen (%d) != 32, skipping",
__func__, vn_pfx->prefixlen);
break;
case AF_INET6:
- if (vn_pfx->prefixlen != 128) {
+ if (vn_pfx->prefixlen != IPV6_MAX_BITLEN) {
vnc_zlog_debug_verbose(
"%s: redist VN plen (%d) != 128, skipping",
__func__, vn_pfx->prefixlen);
vnaddr.addr_family = vn_pfx->family;
switch (vn_pfx->family) {
case AF_INET:
- if (vn_pfx->prefixlen != 32) {
+ if (vn_pfx->prefixlen != IPV4_MAX_BITLEN) {
vnc_zlog_debug_verbose(
"%s: redist VN plen (%d) != 32, skipping",
__func__, vn_pfx->prefixlen);
break;
case AF_INET6:
- if (vn_pfx->prefixlen != 128) {
+ if (vn_pfx->prefixlen != IPV6_MAX_BITLEN) {
vnc_zlog_debug_verbose(
"%s: redist VN plen (%d) != 128, skipping",
__func__, vn_pfx->prefixlen);
vnaddr.addr_family = bgp->rfapi_cfg->rfg_redist->vn_prefix.family;
switch (bgp->rfapi_cfg->rfg_redist->vn_prefix.family) {
case AF_INET:
- if (bgp->rfapi_cfg->rfg_redist->vn_prefix.prefixlen != 32) {
+ if (bgp->rfapi_cfg->rfg_redist->vn_prefix.prefixlen
+ != IPV4_MAX_BITLEN) {
vnc_zlog_debug_verbose(
"%s: redist nve group VN prefix len (%d) != 32, skipping",
__func__,
bgp->rfapi_cfg->rfg_redist->vn_prefix.u.prefix4;
break;
case AF_INET6:
- if (bgp->rfapi_cfg->rfg_redist->vn_prefix.prefixlen != 128) {
+ if (bgp->rfapi_cfg->rfg_redist->vn_prefix.prefixlen
+ != IPV6_MAX_BITLEN) {
vnc_zlog_debug_verbose(
"%s: redist nve group VN prefix len (%d) != 128, skipping",
__func__,
switch (pfx_un.prefix.addr_family) {
case AF_INET:
- if (pfx_un.length != 32) {
+ if (pfx_un.length != IPV4_MAX_BITLEN) {
vnc_zlog_debug_verbose(
"%s: redist nve group UN prefix len (%d) != 32, skipping",
__func__, pfx_un.length);
}
break;
case AF_INET6:
- if (pfx_un.length != 128) {
+ if (pfx_un.length != IPV6_MAX_BITLEN) {
vnc_zlog_debug_verbose(
"%s: redist nve group UN prefix len (%d) != 128, skipping",
__func__, pfx_un.length);
# builds some git commit of FRR in some different configurations
# usage: buildtest.sh [commit [configurations...]]
-basecfg="--prefix=/usr --enable-user=frr --enable-group=frr --enable-vty-group=frr --enable-configfile-mask=0660 --enable-logfile-mask=0640 --enable-vtysh --sysconfdir=/etc/frr --enable-exampledir=/etc/frr/samples --localstatedir=/var/run/frr --libdir=/usr/lib64/frr --enable-rtadv --disable-static --enable-isisd --enable-multipath=0 --enable-pimd --enable-werror"
+basecfg="--prefix=/usr --enable-user=frr --enable-group=frr --enable-vty-group=frr --enable-configfile-mask=0660 --enable-logfile-mask=0640 --enable-vtysh --sysconfdir=/etc/frr --localstatedir=/var/run/frr --libdir=/usr/lib64/frr --enable-rtadv --disable-static --enable-isisd --enable-multipath=0 --enable-pimd --enable-werror"
configs_base="gcc|$basecfg"
+++ /dev/null
-frr (@VERSION@-0) UNRELEASED; urgency=medium
-
- * autoconf changelog entry -- for git autobuilds only.
- remove and replace when creating releases!
- (tools/tarsource.sh will handle this)
-
- -- FRRouting-Dev <dev@lists.frrouting.org> Thu, 25 Oct 2018 16:36:50 +0200
-
-frr (7.5-0) RELEASED; urgency=medium
- BFD
- Profile support
- Minimum ttl support
- BGP
- rpki VRF support
- GR fixes
- Add wide option to display of routes
- Add `maximum-prefix <num> force`
- Add `bestpath-routes` to neighbor command
- Add `bgp shutdown message MSG...` command
- Add v6 Flowspec support
- Add `neighbor <neigh> shutdown rtt` command
- Allow update-delay to be applied globaly
- EVPN
- Beginning of MultiHoming Support
- ISIS
- Segment Routing Support
- VRF Support
- Guard against adj timer display overflow
- Add support for Anycast-SIDs
- Add support for Topology Independent LFA (TI-LFA)
- Add `lsp-gen-interval 2` to isis configuration
- OSPF
- Segment Routing support for ECMP
- Various LSA fixes
- Prevent crash if transferring config amongst instances
- PBR
- Adding json support to commands
- DSCP/ECN based PBR Matching
- PIM
- Add more json support to commands
- Fix missing mesh-group commands
- MSDP SA forwarding
- Clear (s,g,rpt) ifchannel on (*, G) prune received
- Fix igmp querier election and IP address mapping
- Crash fix when RP is removed
- STATIC
- Northbound Support
- YANG
- Filter and route-map Support
- OSPF model definition
- BGP model definition
- VTYSH
- Speed up output across daemons
- Fix build-time errors for some --enable flags
- Speed up output of configuration across daemons
- ZEBRA
- nexthop group support for FPM
- northbound support for rib model
- Backup nexthop support
- netlink batching support
- Allow upper level protocols to request ARP
- Add json output for zebra ES, ES-EVI and access vlan dumps
-
- Upgrade to using libyang1.0.184
-
- RPM
- Moved RPKI to subpackage
- Added SNMP subpackage
-
- As always there are too many bugfixes to list individually. This release
- compromises just over 1k of commits by the community, with contributors from
- 70 people.
-
- -- FRRouting-Dev <dev@lists.frrouting.org> Tue, 2 Nov 2020 08:04:23 +0400
-
-frr (6.0-2) testing; urgency=medium
-
- * add install-info to build deps
- * remove trailing whitespace from control
- * cleanup tcp-zebra configure options
- * drop unused SMUX client OID MIBs
- * remove /proc check
- * remove --enable-poll
- * remove libtool .la files
- * drop texlive-latex-base, texlive-generic-recommended build deps
- * consistently allow python2 or python3
- * remove bad USE_* options, add WERROR
- * drop libncurses5 dep
- * remove backports mechanism
- * use better dependency for pythontools (binNMU compatible)
- * remove bogus shlib:Depends on frr-dbg
- * create frr-snmp and frr-rpki-rtrlib
- * make frr-pythontools a "Recommends:"
- * use redistclean target
- * update to Debian Policy version 4.2.1
- * raise debhelper compat level to 9
- * ditch development-only files
- * modernise dh_missing and use fail mode
- * disable zeromq and FPM
- * always install /etc/init.d/frr
- * put frr-doc package in 'doc' section
- * install HTML docs, drop tools/
- * fix install for {frr,rfptest,ospfclient}
- * add watch file
- * change python dependency and shebang to python3:any
- * use set -e in maintscripts
- * put myself in as maintainer
- * update copyright file
- * closes: #863249
-
- -- David Lamparter <equinox-debian@diac24.net> Thu, 25 Oct 2018 16:36:50 +0200
-
-frr (6.0-1) RELEASED; urgency=medium
-
- * New Enabled: PIM draft Unnumbered
-
- -- FRRouting-Dev <dev@lists.frrouting.org> Wed, 18 Oct 2017 17:01:42 -0700
-
-frr (3.0-1) RELEASED; urgency=medium
-
- * Added Debian 9 Backport
-
- -- FRRouting-Dev <dev@lists.frrouting.org> Mon, 16 Oct 2017 03:28:00 -0700
-
-frr (3.0-0) RELEASED; urgency=medium
-
- * New Enabled: BGP Shutdown Message
- * New Enabled: BGP Large Community
- * New Enabled: BGP RFC 7432 Partial Support w/ Ethernet VPN
- * New Enabled: BGP EVPN RT-5
- * New Enabled: LDP RFC 5561
- * New Enabled: LDP RFC 5918
- * New Enabled: LDP RFC 5919
- * New Enabled: LDP RFC 6667
- * New Enabled: LDP RFC 7473
- * New Enabled: OSPF RFC 4552
- * New Enabled: ISIS SPF Backoff draft
- * New Enabled: PIM Unnumbered Interfaces
- * New Enabled: PIM RFC 4611
- * New Enabled: PIM Sparse Mode
- * New Enabled: NHRP RFC 2332
- * New Enabled: Label Manager
- * Switched from hardening-wrapper to dpkg-buildflags.
-
- -- FRRouting-Dev <dev@lists.frrouting.org> Fri, 13 Oct 2017 16:17:26 -0700
-
-frr (2.0-0) RELEASED; urgency=medium
-
- * Switchover to FRR
-
- -- FRRouting-Dev <dev@lists.frrouting.org> Mon, 23 Jan 2017 16:30:22 -0400
-
-quagga (0.99.24+cl3u5) RELEASED; urgency=medium
-
- * Closes: CM-12846 - Resolve Memory leaks in 'show ip bgp neighbor json'
- * Closes: CM-5878 - Display all ospf peers with 'show ip ospf neighbor detail all'
- * Closes: CM-5794 - Add support for IPv6 static to null0
- * Closes: CM-13060 - Reduce JSON memory usage.
- * Closes: CM-10394 - protect 'could not get instance' error messages with debug
- * Closes: CM-11173 - Move netlink error messages undeer a debug
- * Closes: CM-13328 - Fixes route missing in hardware after reboot
-
- -- dev-support <dev-support@cumulusnetworks.com> Fri, 11 Nov 2016 22:13:29 -0400
-
-quagga (0.99.24+cl3u4) RELEASED; urgency=medium
-
- * Closes: CM-12687 - Buffer overflow in zebra RA code
-
- -- dev-support <dev-support@cumulusnetworks.com> Wed, 31 Aug 2016 12:36:10 -0400
-
-quagga (0.99.24+cl3u3) RELEASED; urgency=medium
-
- * New Enabled: Merge up-to 0.99.24 code from upstream
- * New Enabled: Additional CLI simplification
- * New Enabled: Various Bug Fixes
-
- -- dev-support <dev-support@cumulusnetworks.com> Thu, 04 Aug 2016 08:43:36 -0700
-
-quagga (0.99.23.1-1+cl3u2) RELEASED; urgency=medium
-
- * New Enabled: VRF - See Documentation for how to use
- * New Enabled: Improved interface statistics
- * New Enabled: Various vtysh improvements
- * New Enabled: Numerous compile warnings and SA fixes
- * New Enabled: Improved priviledge handlingA
- * New Enabled: Various OSPF CLI fixes
- * New Enabled: Prefix-list Performance Improvements.
- * New Enabled: Allow more than 1k peers in Quagga
- and Performance Improvements
- * New Enabled: Systemd integration
- * New Enabled: Various ISIS fixes
- * New Enabled: BGP MRT improvements
- * New Enabled: Lowered default MRAI timers
- * New Enabled: Lowered default 'timers connect'
- * New Enabled: 'bgp log-neighbor-changes' enabled by default
- * New Enabled: BGP default keepalive to 3s and holdtime to 9s
- * New Enabled: OSPF spf timers are now '0 50 5000' by default
- * New Enabled: BGP hostname is displayed by default
- * New Enabled: BGP 'no-as-set' is the default for
- 'bgp as-path multipath-relax"
- * New Enabled: RA is on by default if using 5549 on an interface
- * New Enabled: peer-group restrictions relaxed, update-groups determine
- outbund policy anyway
- * New Enabled: BGP enabled 'maximum-paths 64' by default
- * New Enabled: OSPF "log-adjacency-changes" on by default
- * New Enabled: Zebra: Add IPv6 protocol filtering support
- * and setting src of IPv6 routes.
- * New Enabled: BGP and OSPF JSON commands added.
- * New Enabled: BGP Enable multiple instances support by default
- * New Enabled: 'banner motd file' command
- * New Enabled: Remove bad default passwords from default conf
- * New Enabled: BGP addpath TX
- * New Enabled: Simplified configuration for BGP Unnumbered
-
- * New Deprecated: Remove unused 'show memory XXX' functionality
- * New Deprecated: Remove babel protocol
-
- * Closes: CM-10435 Addition on hidden command
- "bfd multihop/singlehop" and "ptm-enable" per interface command
- * Closes: CM-9974 Get route counts right for show ip route summary
- * Closes: CM-9786 BGP memory leak in peer hostname
- * Closes: CM-9340 BGP: Ensure correct sequence of processing at exit
- * Closes: CM-9270 ripd: Fix crash when a default route is passed to rip
- * Closes: CM-9255 BGPD crash around bgp_config_write ()
- * Closes: CM-9134 ospf6d: Fix for crash when non area 0 network
- entered first
- * Closes: CM-8934 OSPFv3: Check area before scheduling SPF
- * Closes: CM-8514 zebra: Crash upon disabling a link
- * Closes: CM-8295 BGP crash in group_announce_route_walkcb
- * Closes: CM-8191 BGP: crash in update_subgroup_merge()
- * Closes: CM-8015 lib: Memory reporting fails over 2GB
- * Closes: CM-7926 BGP: crash from not NULLing freed pointers
-
- -- dev-support <dev-support@cumulusnetworks.com> Wed, 04 May 2016 16:22:52 -0700
-
-quagga (0.99.23.1-1) unstable; urgency=medium
-
- * New upstream release
- * Added .png figures for info files to quagga-doc package.
- * Changed dependency from iproute to iproute2 (thanks to Andreas
- Henriksson). Closes: #753736
- * Added texlive-fonts-recommended to build-depends to get ecrm1095 font
- (thanks to Christoph Biedl). Closes: #651545
-
- -- Christian Brunotte <ch@debian.org> Tue, 30 Sep 2014 00:20:12 +0200
-
-quagga (0.99.23-1) unstable; urgency=low
-
- * New upstream release
- * Removed debian/patches/readline-6.3.diff which was already in upstream.
-
- -- Christian Hammers <ch@debian.org> Tue, 08 Jul 2014 09:15:48 +0200
-
-quagga (0.99.22.4-4) unstable; urgency=medium
-
- * Fix build failure with readline-6.3 (thanks to Matthias Klose).
- Closes: #741774
-
- -- Christian Hammers <ch@debian.org> Sun, 23 Mar 2014 15:28:42 +0100
-
-quagga (0.99.22.4-3) unstable; urgency=low
-
- * Added status to init script (thanks to Peter J. Holzer). Closes: #730625
- * Init script now sources /lib/lsb/init-functions.
- * Switched from hardening-wrapper to dpkg-buildflags.
-
- -- Christian Hammers <ch@debian.org> Wed, 01 Jan 2014 19:12:01 +0100
-
-quagga (0.99.22.4-2) unstable; urgency=low
-
- * Fixed typo in package description (thanks to Davide Prina).
- Closes: #625860
- * Added Italian Debconf translation (thanks to Beatrice Torracca)
- Closes: #729798
-
- -- Christian Hammers <ch@debian.org> Tue, 26 Nov 2013 00:47:11 +0100
-
-quagga (0.99.22.4-1) unstable; urgency=high
-
- * SECURITY:
- "ospfd: CVE-2013-2236, stack overrun in apiserver
-
- the OSPF API-server (exporting the LSDB and allowing announcement of
- Opaque-LSAs) writes past the end of fixed on-stack buffers. This leads
- to an exploitable stack overflow.
-
- For this condition to occur, the following two conditions must be true:
- - Quagga is configured with --enable-opaque-lsa
- - ospfd is started with the "-a" command line option
-
- If either of these does not hold, the relevant code is not executed and
- the issue does not get triggered."
- Closes: #726724
-
- * New upstream release
- - ospfd: protect vs. VU#229804 (malformed Router-LSA)
- (Quagga is said to be non-vulnerable but still adds some protection)
-
- -- Christian Hammers <ch@debian.org> Thu, 24 Oct 2013 22:58:37 +0200
-
-quagga (0.99.22.1-2) unstable; urgency=low
-
- * Added autopkgtests (thanks to Yolanda Robla). Closes: #710147
- * Added "status" command to init script (thanks to James Andrewartha).
- Closes: #690013
- * Added "libsnmp-dev" to Build-Deps. There not needed for the official
- builds but for people who compile Quagga themselves to activate the
- SNMP feature (which for licence reasons cannot be done by Debian).
- Thanks to Ben Winslow). Closes: #694852
- * Changed watchquagga_options to an array so that quotes can finally
- be used as expected. Closes: #681088
- * Fixed bug that prevented restarting only the watchquagga daemon
- (thanks to Harald Kappe). Closes: #687124
-
- -- Christian Hammers <ch@debian.org> Sat, 27 Jul 2013 16:06:25 +0200
-
-quagga (0.99.22.1-1) unstable; urgency=low
-
- * New upstream release
- - ospfd restore nexthop IP for p2p interfaces
- - ospfd: fix LSA initialization for build without opaque LSA
- - ripd: correctly redistribute ifindex routes (BZ#664)
- - bgpd: fix lost passwords of grouped neighbors
- * Removed 91_ld_as_needed.diff as it was found in the upstream source.
-
- -- Christian Hammers <ch@debian.org> Mon, 22 Apr 2013 22:21:20 +0200
-
-quagga (0.99.22-1) unstable; urgency=low
-
- * New upstream release.
- - [bgpd] The semantics of default-originate route-map have changed.
- The route-map is now used to advertise the default route conditionally.
- The old behaviour which allowed to set attributes on the originated
- default route is no longer supported.
- - [bgpd] this version of bgpd implements draft-idr-error-handling. This was
- added in 0.99.21 and may not be desirable. If you need a version
- without this behaviour, please use 0.99.20.1. There will be a
- runtime configuration switch for this in future versions.
- - [isisd] is in "beta" state.
- - [ospf6d] is in "alpha/experimental" state
- - More changes are documented in the upstream changelog!
- * debian/watch: Adjusted to new savannah.gnu.org site, thanks to Bart
- Martens.
- * debian/patches/99_CVE-2012-1820_bgp_capability_orf.diff removed as its
- in the changelog.
- * debian/patches/99_distribute_list.diff removed as its in the changelog.
- * debian/patches/10_doc__Makefiles__makeinfo-force.diff removed as it
- was just for Debian woody.
-
- -- Christian Hammers <ch@debian.org> Thu, 14 Feb 2013 00:22:00 +0100
-
-quagga (0.99.21-4) unstable; urgency=medium
-
- * Fixed regression bug that caused OSPF "distribute-list" statements to be
- silently ignored. The patch has already been applied upstream but there
- has been no new Quagga release since then.
- Thanks to Hans van Kranenburg for reporting. Closes: #697240
-
- -- Christian Hammers <ch@debian.org> Sun, 06 Jan 2013 15:50:32 +0100
-
-quagga (0.99.21-3) unstable; urgency=high
-
- * SECURITY:
- CVE-2012-1820 - Quagga contained a bug in BGP OPEN message handling.
- A denial-of-service condition could be caused by an attacker controlling
- one of the pre-configured BGP peers. In most cases this means, that the
- attack must be originated from an adjacent network. Closes: #676510
-
- -- Christian Hammers <ch@debian.org> Fri, 08 Jun 2012 01:15:32 +0200
-
-quagga (0.99.21-2) unstable; urgency=low
-
- * Renamed babeld.8 to quagga-babeld.8 as it conflicted with the
- original mapage of the babeld package which users might want to
- install in parallel as it is slightly more capable. Closes: #671916
-
- -- Christian Hammers <ch@debian.org> Thu, 10 May 2012 07:53:01 +0200
-
-quagga (0.99.21-1) unstable; urgency=low
-
- * New upstream release
- - [bgpd] BGP multipath support has been merged
- - [bgpd] SAFI (Multicast topology) support has been extended to propagate
- the topology to zebra.
- - [bgpd] AS path limit functionality has been removed
- - [babeld] a new routing daemon implementing the BABEL ad-hoc mesh routing
- protocol has been merged.
- - [isisd] a major overhaul has been picked up. Please note that isisd is
- STILL NOT SUITABLE FOR PRODUCTION USE.
- - a lot of bugs have been fixed
- * Added watchquagga daemon.
- * Added DEP-3 conforming patch comments.
-
- -- Christian Hammers <ch@debian.org> Sun, 06 May 2012 15:33:33 +0200
-
-quagga (0.99.20.1-1) unstable; urgency=high
-
- * SECURITY:
- CVE-2012-0249 - Quagga ospfd DoS on malformed LS-Update packet
- CVE-2012-0250 - Quagga ospfd DoS on malformed Network-LSA data
- CVE-2012-0255 - Quagga bgpd DoS on malformed OPEN message
- * New upstream release. Closes: #664033
-
- -- Christian Hammers <ch@debian.org> Fri, 16 Mar 2012 22:14:05 +0100
-
-quagga (0.99.20-4) unstable; urgency=low
-
- * Switch to dpkg-source 3.0 (quilt) format.
- * Switch to changelog-format-1.0.
-
- -- Christian Hammers <ch@debian.org> Sat, 25 Feb 2012 18:52:06 +0100
-
-quagga (0.99.20-3) unstable; urgency=low
-
- * Added --sysconfdir back to the configure options (thanks to Sven-Haegar
- Koch). Closes: #645649
-
- -- Christian Hammers <ch@debian.org> Tue, 18 Oct 2011 00:24:37 +0200
-
-quagga (0.99.20-2) unstable; urgency=low
-
- * Bumped standards version to 0.9.2.
- * Migrated to "dh" build system.
- * Added quagga-dbg package.
-
- -- Christian Hammers <ch@debian.org> Fri, 14 Oct 2011 23:59:26 +0200
-
-quagga (0.99.20-1) unstable; urgency=low
-
- * New upstream release:
- "The primary focus of this release is a fix of SEGV regression in ospfd,
- which was introduced in 0.99.19. It also features a series of minor
- improvements, including better RFC compliance in bgpd, better support
- of FreeBSD and some enhancements to isisd."
- * Fixes off-by-one bug (removed 20_ospf6_area_argv.dpatch). Closes: #519488
-
- -- Christian Hammers <ch@debian.org> Fri, 30 Sep 2011 00:59:24 +0200
-
-quagga (0.99.19-1) unstable; urgency=high
-
- * SECURITY:
- "This release provides security fixes, which address assorted
- vulnerabilities in bgpd, ospfd and ospf6d (CVE-2011-3323,
- CVE-2011-3324, CVE-2011-3325, CVE-2011-3326 and CVE-2011-3327).
- * New upstream release.
- * Removed incorporated debian/patches/92_opaque_lsa_enable.dpatch.
- * Removed incorporated debian/patches/93_opaque_lsa_fix.dpatch.
- * Removed obsolete debian/README.Debian.Woody and README.Debian.MD5.
-
- -- Christian Hammers <ch@debian.org> Tue, 27 Sep 2011 00:16:27 +0200
-
-quagga (0.99.18-1) unstable; urgency=low
-
- * SECURITY:
- "This release fixes 2 denial of services in bgpd, which can be remotely
- triggered by malformed AS-Pathlimit or Extended-Community attributes.
- These issues have been assigned CVE-2010-1674 and CVE-2010-1675.
- Support for AS-Pathlimit has been removed with this release."
- * Added Brazilian Portuguese debconf translation. Closes: #617735
- * Changed section for quagga-doc from "doc" to "net".
- * Added patch to fix FTBFS with latest GCC. Closes: #614459
-
- -- Christian Hammers <ch@debian.org> Tue, 22 Mar 2011 23:13:34 +0100
-
-quagga (0.99.17-4) unstable; urgency=low
-
- * Added comment to init script (thanks to Marc Haber). Closes: #599524
-
- -- Christian Hammers <ch@debian.org> Thu, 13 Jan 2011 23:53:29 +0100
-
-quagga (0.99.17-3) unstable; urgency=low
-
- * Fix FTBFS with ld --as-needed (thanks to Matthias Klose at Ubuntu).
- Closes: #609555
-
- -- Christian Hammers <ch@debian.org> Thu, 13 Jan 2011 23:27:06 +0100
-
-quagga (0.99.17-2) unstable; urgency=low
-
- * Added Danisch Debconf translation (thanks to Joe Dalton). Closes: #596259
-
- -- Christian Hammers <ch@debian.org> Sat, 18 Sep 2010 12:20:07 +0200
-
-quagga (0.99.17-1) unstable; urgency=high
-
- * SECURITY:
- "This release provides two important bugfixes, which address remote crash
- possibility in bgpd discovered by CROSS team.":
- 1. Stack buffer overflow by processing certain Route-Refresh messages
- CVE-2010-2948
- 2. DoS (crash) while processing certain BGP update AS path messages
- CVE-2010-2949
- Closes: #594262
-
- -- Christian Hammers <ch@debian.org> Wed, 25 Aug 2010 00:52:48 +0200
-
-quagga (0.99.16-1) unstable; urgency=low
-
- * New upstream release. Closes: #574527
- * Added chrpath to debian/rules to fix rpath problems that lintian spottet.
-
- -- Christian Hammers <ch@debian.org> Sun, 21 Mar 2010 17:05:40 +0100
-
-quagga (0.99.15-2) unstable; urgency=low
-
- * Applied patch for off-by-one bug in ospf6d that caused a segmentation
- fault when using the "area a.b.c.d filter-list prefix" command (thanks
- to Steinar H. Gunderson). Closes: 519488
-
- -- Christian Hammers <ch@debian.org> Sun, 14 Feb 2010 20:02:03 +0100
-
-quagga (0.99.15-1) unstable; urgency=low
-
- * New upstream release
- "This fixes some annoying little ospfd and ospf6d regressions, which made
- 0.99.14 a bit of a problem release (...) This release still contains a
- regression in the "no ip address ..." command, at least on Linux.
- See bug #486, which contains a workaround patch. This release should be
- considered a 1.0.0 release candidate. Please test this release as widely
- as possible."
- * Fixed wrong port number in zebra.8 (thanks to Thijs Kinkhorst).
- Closes: #517860
- * Added Russian Debconf tanslation (thanks to Yuri Kozlov).
- Closes: #539464
- * Removed so-version in build-dep to libreadline-dev on request of
- Matthias Klose.
- * Added README.source with reference to dpatch as suggested by lintian.
- * Bumped standards versionto 3.8.3.
-
- -- Christian Hammers <ch@debian.org> Sun, 13 Sep 2009 18:12:06 +0200
-
-quagga (0.99.14-1) unstable; urgency=low
-
- * New upstream release
- "This release contains a regression fix for ospf6d, various small fixes
- and some hopefully very significant bgpd stability fixes.
- This release should be considered a 1.0.0 release candidate. Please test
- this release as widely as possible."
- * Fixes bug with premature LSA aging in ospf6d. Closes: #535030
- * Fixes section number in zebra.8 manpage. Closes: #517860
-
- -- Christian Hammers <ch@debian.org> Sat, 25 Jul 2009 00:40:38 +0200
-
-quagga (0.99.13-2) unstable; urgency=low
-
- * Added Japanese Debconf translation (thanks to Hideki Yamane).
- Closes: #510714
- * When checking for obsoleted config options in preinst, print filename
- where it occures (thanks to Michael Bussmann). Closes: #339489
-
- -- Christian Hammers <ch@debian.org> Sun, 19 Jul 2009 17:13:23 +0200
-
-quagga (0.99.13-1) unstable; urgency=low
-
- * New upstream release
- "This release is contains a number of small fixes, for potentially
- irritating issues, as well as small enhancements to vtysh and support
- for linking to PCRE (a much faster regex library)."
- * Added build-dep to gawk as configure required it for memtypes.awk
- * Replaced build-dep to gs-gpl with ghostscript as requested by lintian
- * Minor changes to copyright and control files to make lintian happy.
-
- -- Christian Hammers <ch@debian.org> Wed, 24 Jun 2009 17:53:28 +0200
-
-quagga (0.99.12-1) unstable; urgency=high
-
- * New upstream release
- "This release fixes an urgent bug in bgpd where it could hit an assert
- if it received a long AS_PATH with a 4-byte ASN." Noteworthy bugfixes:
- + [bgpd] Fix bgp ipv4/ipv6 accept handling
- + [bgpd] AS4 bugfix by Chris Caputo
- + [bgpd] Allow accepted peers to progress even if realpeer is in Connect
- + [ospfd] Switch Fletcher checksum back to old ospfd version
-
- -- Christian Hammers <ch@debian.org> Mon, 22 Jun 2009 00:16:33 +0200
-
-quagga (0.99.11-1) unstable; urgency=low
-
- * New upstream release
- "Most regressions in 0.99 over 0.98 are now believed to be fixed. This
- release should be considered a release-candidate for a new stable series."
- + bgpd: Preliminary UI and Linux-IPv4 support for TCP-MD5 merged
- + zebra: ignore dead routes in RIB update
- + [ospfd] Default route needs to be refreshed after neighbour state change
- + [zebra:netlink] Set proto/scope on all route update messages
- * Removed debian/patches/20_*bgp*md5*.dpatch due to upstream support.
-
- -- Christian Hammers <ch@debian.org> Thu, 09 Oct 2008 22:56:38 +0200
-
-quagga (0.99.10-1) unstable; urgency=medium
-
- * New upstream release
- + bgpd: 4-Byte AS Number support
- + Sessions were incorrectly reset if a partial AS-Pathlimit attribute
- was received.
- + Advertisement of Multi-Protocol prefixes (i.e. non-IPv4) had been
- broken in the 0.99.9 release. Closes: #467656
-
- -- Christian Hammers <ch@debian.org> Tue, 08 Jul 2008 23:32:42 +0200
-
-quagga (0.99.9-6) unstable; urgency=low
-
- * Fixed FTBFS by adding a build-dep to libpcre3-dev (thanks to Luk Claes).
- Closes: #469891
-
- -- Christian Hammers <ch@debian.org> Sat, 12 Apr 2008 12:53:51 +0200
-
-quagga (0.99.9-5) unstable; urgency=low
-
- * C.J. Adams-Collier and Paul Jakma suggested to build against libpcre3
- which is supposed to be faster.
-
- -- Christian Hammers <ch@debian.org> Sun, 02 Mar 2008 13:19:42 +0100
-
-quagga (0.99.9-4) unstable; urgency=low
-
- * Added hardening-wrapper to the build-deps (thanks to Moritz Muehlenhoff).
-
- -- Christian Hammers <ch@debian.org> Tue, 29 Jan 2008 22:33:56 +0100
-
-quagga (0.99.9-3) unstable; urgency=low
-
- * Replaced the BGP patch by a new one so that the package builds again
- with kernels above 2.6.21!
- * debian/control:
- + Moved quagga-doc to section doc to make lintian happy.
- * Added Spanish debconf translation (thanks to Carlos Galisteo de Cabo).
- Closes: #428574
- * debian/control: (thanks to Marco Rodrigues)
- + Bump Standards-Version to 3.7.3 (no changes needed).
- + Add Homepage field.
-
- -- Christian Hammers <ch@debian.org> Mon, 28 Jan 2008 22:29:18 +0100
-
-quagga (0.99.9-2.1) unstable; urgency=low
-
- * Non-maintainer upload.
- * debian/rules: fixed bashisms. (Closes: #459122)
-
- -- Miguel Angel Ruiz Manzano <debianized@gmail.com> Tue, 22 Jan 2008 14:37:21 -0300
-
-quagga (0.99.9-2) unstable; urgency=low
-
- * Added CVE id for the security bug to the last changelog entry.
- Closes: 442133
-
- -- Christian Hammers <ch@debian.org> Tue, 25 Sep 2007 22:01:31 +0200
-
-quagga (0.99.9-1) unstable; urgency=high
-
- * SECURITY:
- "This release fixes two potential DoS conditions in bgpd, reported by Mu
- Security, where a bgpd could be crashed if a peer sent a malformed OPEN
- message or a malformed COMMUNITY attribute. Only configured peers can do
- this, hence we consider these issues to be very low impact." CVE-2007-4826
-
- -- Christian Hammers <ch@debian.org> Wed, 12 Sep 2007 21:12:41 +0200
-
-quagga (0.99.8-1) unstable; urgency=low
-
- * New upstream version.
-
- -- Christian Hammers <ch@debian.org> Fri, 17 Aug 2007 00:07:04 +0200
-
-quagga (0.99.7-3) unstable; urgency=medium
-
- * Applied patch for FTBFS with linux-libc-dev (thanks to Andrew J. Schorr
- and Lucas Nussbaum). Closes: #429003
-
- -- Christian Hammers <ch@debian.org> Fri, 22 Jun 2007 21:34:55 +0200
-
-quagga (0.99.7-2) unstable; urgency=low
-
- * Added Florian Weimar as co-maintainer. Closes: 421977
- * Added Dutch debconf translation (thanks to Bart Cornelis).
- Closes: #420932
- * Added Portuguese debconf translation (thanks to Rui Branco).
- Closes: #421185
- * Improved package description (thanks to Reuben Thomas).
- Closes: #418933
- * Added CVE Id to 0.99.6-5 changelog entry.
-
- -- Christian Hammers <ch@debian.org> Wed, 02 May 2007 20:27:12 +0200
-
-quagga (0.99.7-1) unstable; urgency=low
-
- * New upstream release. Closes: #421553
-
- -- Christian Hammers <ch@debian.org> Mon, 30 Apr 2007 14:22:34 +0200
-
-quagga (0.99.6-6) unstable; urgency=medium
-
- * Fixes FTBFS with tetex-live. Closes: #420468
-
- -- Christian Hammers <ch@debian.org> Mon, 23 Apr 2007 21:34:13 +0200
-
-quagga (0.99.6-5) unstable; urgency=high
-
- * SECURITY:
- The bgpd daemon was vulnerable to a Denial-of-Service. Configured peers
- could cause a Quagga bgpd to, typically, assert() and abort. The DoS
- could be triggered by peers by sending an UPDATE message with a crafted,
- malformed Multi-Protocol reachable/unreachable NLRI attribute.
- This is CVE-2007-1995 and Quagga Bug#354. Closes: #418323
-
- -- Christian Hammers <ch@debian.org> Thu, 12 Apr 2007 23:21:58 +0200
-
-quagga (0.99.6-4) unstable; urgency=low
-
- * Improved note in README.Debian for SNMP self-builders (thanks to Matthias
- Wamser). Closes: #414788
-
- -- Christian Hammers <ch@debian.org> Wed, 14 Mar 2007 02:18:57 +0100
-
-quagga (0.99.6-3) unstable; urgency=low
-
- * Updated German Debconf translation (thanks to Matthias Julius).
- Closes: #409327
-
- -- Christian Hammers <ch@debian.org> Sat, 10 Feb 2007 15:06:16 +0100
-
-quagga (0.99.6-2) unstable; urgency=low
-
- * Updated config.guess/config.sub as suggested by lintian.
- * Corrected README.Debian text regarding the WANT_SNMP flag.
-
- -- Christian Hammers <ch@debian.org> Sun, 17 Dec 2006 01:45:37 +0100
-
-quagga (0.99.6-1) unstable; urgency=low
-
- * New upstream release. Closes: #402361
-
- -- Christian Hammers <ch@debian.org> Mon, 11 Dec 2006 00:28:09 +0100
-
-quagga (0.99.5-5) unstable; urgency=high
-
- * Changed Depends on adduser to Pre-Depends to avoid uninstallability
- in certain cases (thanks to Steve Langasek, Lucas Nussbaum).
- Closes: #398562
-
- -- Christian Hammers <ch@debian.org> Wed, 15 Nov 2006 17:46:34 +0100
-
-quagga (0.99.5-4) unstable; urgency=low
-
- * Added default PAM file and some explanations regarding PAM authentication
- of vtysh which could prevent the start at boot-time when used wrong.
- Now PAM permits anybody to access the vtysh tool (a malicious user could
- build his own vtysh without PAM anyway) and the access is controled by
- the read/write permissions of the vtysh socket which are only granted to
- users belonging to the quaggavty group (thanks to Wakko Warner).
- Closes: #389496
- * Added "case" to prerm script so that the Debconf question is not called a
- second time in e.g. "new-prerm abort-upgrade" after being NACKed in the
- old-prerm.
-
- -- Christian Hammers <ch@debian.org> Fri, 3 Nov 2006 01:22:15 +0100
-
-quagga (0.99.5-3) unstable; urgency=medium
-
- * Backport CVS fix for an OSPF DD Exchange regression (thanks to Matt
- Brown). Closes: #391040
-
- -- Christian Hammers <ch@debian.org> Wed, 25 Oct 2006 19:47:11 +0200
-
-quagga (0.99.5-2) unstable; urgency=medium
-
- * Added LSB info section to initscript.
- * Removed unnecessary depends to libncurses5 to make checklib happy.
- The one to libcap should remain though as it is just temporarily
- unused.
-
- -- Christian Hammers <ch@debian.org> Thu, 21 Sep 2006 00:04:07 +0200
-
-quagga (0.99.5-1) unstable; urgency=low
-
- * New upstream release. Closes: #38704
- * Upstream fixes ospfd documentary inconsistency. Closes: #347897
- * Changed debconf question in prerm to "high" (thanks to Rafal Pietrak).
-
- -- Christian Hammers <ch@debian.org> Mon, 11 Sep 2006 23:43:42 +0200
-
-quagga (0.99.4-4) unstable; urgency=low
-
- * Recreate /var/run if not present because /var is e.g. on a tmpfs
- filesystem (thanks to Martin Pitt). Closes: #376142
- * Removed nonexistant option from ospfd.8 manpage (thanks to
- David Medberry). Closes: 378274
-
- -- Christian Hammers <ch@debian.org> Sat, 15 Jul 2006 20:22:12 +0200
-
-quagga (0.99.4-3) unstable; urgency=low
-
- * Removed invalid semicolon from rules file (thanks to Philippe Gramoulle).
-
- -- Christian Hammers <ch@debian.org> Tue, 27 Jun 2006 23:36:07 +0200
-
-quagga (0.99.4-2) unstable; urgency=high
-
- * Set urgency to high as 0.99.4-1 fixes a security problem!
- * Fixed building of the info file.
-
- -- Christian Hammers <ch@debian.org> Sun, 14 May 2006 23:04:28 +0200
-
-quagga (0.99.4-1) unstable; urgency=low
-
- * New upstream release to fix a security problem in the telnet interface
- of the BGP daemon which could be used for DoS attacks (CVE-2006-2276).
- Closes: 366980
-
- -- Christian Hammers <ch@debian.org> Sat, 13 May 2006 19:54:40 +0200
-
-quagga (0.99.3-3) unstable; urgency=low
-
- * Added CVE numbers for the security patch in 0.99.3-2.
-
- -- Christian Hammers <ch@debian.org> Sat, 6 May 2006 17:14:22 +0200
-
-quagga (0.99.3-2) unstable; urgency=high
-
- * SECURITY:
- Added security bugfix patch from upstream BTS for security problem
- that could lead to injected routes when using RIPv1.
- CVE-2006-2223 - missing configuration to disable RIPv1 or require
- plaintext or MD5 authentication
- CVE-2006-2224 - lack of enforcement of RIPv2 authentication requirements
- Closes: #365940
- * First amd64 upload.
-
- -- Christian Hammers <ch@debian.org> Thu, 4 May 2006 00:22:09 +0200
-
-quagga (0.99.3-1) unstable; urgency=low
-
- * New upstream release
-
- -- Christian Hammers <ch@debian.org> Wed, 25 Jan 2006 13:37:27 +0100
-
-quagga (0.99.2-1) unstable; urgency=low
-
- * New upstream release
- Closes: #330248, #175553
-
- -- Christian Hammers <ch@debian.org> Wed, 16 Nov 2005 00:25:52 +0100
-
-quagga (0.99.1-7) unstable; urgency=low
-
- * Changed debian/rules check for mounted /proc directory to check
- for /proc/1 as not all systems (e.g. 2.6 arm kernels) have
- /proc/kcore which is a optional feature only (thanks to Lennert
- Buytenhek). Closes: #335695
- * Added Swedish Debconf translation (thanks to Daniel Nylander).
- Closes: #331367
-
- -- Christian Hammers <ch@debian.org> Thu, 27 Oct 2005 20:53:19 +0200
-
-quagga (0.99.1-6) unstable; urgency=low
-
- * Fixed debconf dependency as requested by Joey Hess.
-
- -- Christian Hammers <ch@debian.org> Mon, 26 Sep 2005 20:47:35 +0200
-
-quagga (0.99.1-5) unstable; urgency=low
-
- * Rebuild with libreadline5-dev as build-dep as requested by
- Matthias Klose. Closes: #326306
- * Made initscript more fault tolerant against missing lines in
- /etc/quagga/daemons (thanks to Ralf Hildebrandt). Closes: #323774
- * Added dependency to adduser.
-
- -- Christian Hammers <ch@debian.org> Tue, 13 Sep 2005 21:42:17 +0200
-
-quagga (0.99.1-4) unstable; urgency=low
-
- * Added French Debconf translation (thanks to Mohammed Adnene Trojette).
- Closes: #319324
- * Added Czech Debconf translation (thanks to Miroslav Kure).
- Closes: #318127
-
- -- Christian Hammers <ch@debian.org> Sun, 31 Jul 2005 04:19:41 +0200
-
-quagga (0.99.1-3) unstable; urgency=low
-
- * A Debconf question now asks the admin before upgrading if the daemon
- should really be stopped as this could lead to the loss of network
- connectivity or BGP flaps (thanks to Michael Horn and Achilleas Kotsis).
- Also added a hint about setting Quagga "on hold" to README.Debian.
- Closes: #315467
- * Added patch to build on Linux/ARM.
-
- -- Christian Hammers <ch@debian.org> Sun, 10 Jul 2005 22:19:38 +0200
-
-quagga (0.99.1-2) unstable; urgency=low
-
- * Fixed SNMP enabled command in debian/rules (thanks to Christoph Kluenter).
- Closes: #306840
-
- -- Christian Hammers <ch@debian.org> Sat, 4 Jun 2005 14:04:01 +0200
-
-quagga (0.99.1-1) unstable; urgency=low
-
- * New upstream version. Among others:
- - BGP graceful restart and "match ip route-source" added
- - support for interface renaming
- - improved threading for better responsivness under load
- * Switched to dpatch to make diffs cleaner.
- * Made autoreconf unnecessary.
- * Replaced quagga.dvi and quagga.ps by quagga.pdf in quagga-doc.
- (the PostScript would have needed Makefile corrections and PDF
- is more preferable anyway)
- * Added isisd to the list of daemons in /etc/init.d/quagga (thanks
- to Ernesto Elbe).
- * Added hint for "netlink-listen: overrun" messages (thanks to
- Hasso Tepper).
- * Added preinst check that bails out if old smux options are in use
- as Quagga would not start up else anyway (thanks to Bjorn Mork).
- Closes: #308320
-
- -- Christian Hammers <ch@debian.org> Fri, 13 May 2005 01:18:24 +0200
-
-quagga (0.98.3-7) unstable; urgency=high
-
- * Removed SNMP support as linking against NetSNMP introduced a dependency
- to OpenSSL which is not compatible to the GPL which governs this
- application (thanks to Faidon Liambotis). See README.Debian for more
- information. Closes: #306840
- * Changed listening address of ospf6d and ripngd from 127.0.0.1 to "::1".
- * Added build-dep to groff to let drafz-zebra-00.txt build correctly.
-
- -- Christian Hammers <ch@debian.org> Wed, 4 May 2005 20:08:14 +0200
-
-quagga (0.98.3-6) testing-proposed-updates; urgency=high
-
- * Removed "Recommends kernel-image-2.4" as aptitude then
- installes a kernel-image for an arbitrary architecture as long
- as it fullfill that recommendation which can obviously fatal
- at the next reboot :) Also it is a violation of the policy
- which mandates a reference to real packages (thanks to Holger Levsen).
- Closes: #307281
-
- -- Christian Hammers <ch@debian.org> Tue, 3 May 2005 22:53:39 +0200
-
-quagga (0.98.3-5) unstable; urgency=high
-
- * The patch which tried to remove the OpenSSL dependency, which is
- not only unneccessary but also a violation of the licence and thus RC,
- stopped working a while ago, since autoreconf is no longer run before
- building the binaries. So now ./configure is patched directly (thanks
- to Faidon Liambotis for reporting). Closes: #306840
- * Raised Debhelper compatibility level from 3 to 4. Nothing changed.
- * Added build-dep to texinfo (>= 4.7) to ease work for www.backports.org.
-
- -- Christian Hammers <ch@debian.org> Fri, 29 Apr 2005 02:31:03 +0200
-
-quagga (0.98.3-4) unstable; urgency=low
-
- * Removed Debconf upgrade note as it was considered a Debconf abuse
- and apart from that so obvious that it was not even worth to be
- put into NEWS.Debian (thanks to Steve Langasek). Closes: #306384
-
- -- Christian Hammers <ch@debian.org> Wed, 27 Apr 2005 00:10:24 +0200
-
-quagga (0.98.3-3) unstable; urgency=medium
-
- * Adding the debconf module due to a lintian suggestion is a very
- bad idea if no db_stop is called as the script hangs then (thanks
- to Tore Anderson for reporting). Closes: #306324
-
- -- Christian Hammers <ch@debian.org> Mon, 25 Apr 2005 21:55:58 +0200
-
-quagga (0.98.3-2) unstable; urgency=low
-
- * Added debconf confmodule to postinst as lintian suggested.
-
- -- Christian Hammers <ch@debian.org> Sun, 24 Apr 2005 13:16:00 +0200
-
-quagga (0.98.3-1) unstable; urgency=low
-
- * New upstream release.
- Mmost notably fixes last regression in bgpd (reannounce of prefixes
- with changed attributes works again), race condition in netlink
- handling while using IPv6, MTU changes handling in ospfd and several
- crashes in ospfd, bgpd and ospf6d.
-
- -- Christian Hammers <ch@debian.org> Mon, 4 Apr 2005 12:51:24 +0200
-
-quagga (0.98.2-2) unstable; urgency=low
-
- * Added patch to let Quagga compile with gcc-4.0 (thanks to
- Andreas Jochens). Closes: #300949
-
- -- Christian Hammers <ch@debian.org> Fri, 25 Mar 2005 19:33:30 +0100
-
-quagga (0.98.2-1) unstable; urgency=medium
-
- * Quoting the upstream announcement:
- The 0.98.1 release unfortunately was a brown paper bag release with
- respect to ospfd. [...] 0.98.2 has been released, with one crucial change
- to fix the unfortunate mistake in 0.98.1, which caused problems if
- ospfd became DR.
- * Note: the upstream tarball had a strange problem, apparently redhat.spec
- was twice in it? At least debuild gave a strange error message so I
- unpacked it by hand. No changes were made to the .orig.tar.gz!
-
- -- Christian Hammers <ch@debian.org> Fri, 4 Feb 2005 01:31:36 +0100
-
-quagga (0.98.1-1) unstable; urgency=medium
-
- * New upstream version
- "fixing a fatal OSPF + MD5 auth regression, and a non-fatal high-load
- regression in bgpd which were present in the 0.98.0 release."
- * Upstream version fixes bug in ospfd that could lead to crash when OSPF
- packages had a MTU > 1500. Closes: #290566
- * Added notice regarding capability kernel support to README.Debian
- (thanks to Florian Weimer). Closes: #291509
- * Changed permission setting in postinst script (thanks to Bastian Blank).
- Closes: #292690
-
- -- Christian Hammers <ch@debian.org> Tue, 1 Feb 2005 02:01:27 +0100
-
-quagga (0.98.0-3) unstable; urgency=low
-
- * Fixed problem in init script. Closes: #290317
- * Removed obsolete "smux peer enable" patch.
-
- -- Christian Hammers <ch@debian.org> Fri, 14 Jan 2005 17:37:27 +0100
-
-quagga (0.98.0-2) unstable; urgency=low
-
- * Updated broken TCP MD5 patch for BGP (thanks to John P. Looney
- for telling me).
-
- -- Christian Hammers <ch@debian.org> Thu, 13 Jan 2005 02:03:54 +0100
-
-quagga (0.98.0-1) unstable; urgency=low
-
- * New upstream release
- * Added kernel-image-2.6 as alternative to 2.4 to the recommends
- (thanks to Faidon Liambotis). Closes: #289530
-
- -- Christian Hammers <ch@debian.org> Mon, 10 Jan 2005 19:36:17 +0100
-
-quagga (0.97.5-1) unstable; urgency=low
-
- * New upstream version.
- * Added Czech debconf translation (thanks to Miroslav Kure).
- Closes: #287293
- * Added Brazilian debconf translation (thanks to Andre Luis Lopes).
- Closes: #279352
-
- -- Christian Hammers <ch@debian.org> Wed, 5 Jan 2005 23:49:57 +0100
-
-quagga (0.97.4-2) unstable; urgency=low
-
- * Fixed quagga.info build problem.
-
- -- Christian Hammers <ch@debian.org> Wed, 5 Jan 2005 22:38:01 +0100
-
-quagga (0.97.4-1) unstable; urgency=low
-
- * New upstream release.
-
- -- Christian Hammers <ch@debian.org> Tue, 4 Jan 2005 01:45:22 +0100
-
-quagga (0.97.3-2) unstable; urgency=low
-
- * Included isisd in the daemon list.
- * Wrote an isisd manpage.
- * It is now ensured that zebra is always the last daemon to be stopped.
- * (Thanks to Hasso Tepper for mailing me a long list of suggestions
- which lead to this release)
-
- -- Christian Hammers <ch@debian.org> Sat, 18 Dec 2004 13:14:55 +0100
-
-quagga (0.97.3-1) unstable; urgency=medium
-
- * New upstream version.
- - Fixes important OSPF bug.
- * Added ht-20040911-smux.patch regarding Quagga bug #112.
- * Updated ht-20041109-0.97.3-bgp-md5.patch for BGP with TCP MD5
- (thanks to Matthias Wamser).
-
- -- Christian Hammers <ch@debian.org> Tue, 9 Nov 2004 17:45:26 +0100
-
-quagga (0.97.2-4) unstable; urgency=low
-
- * Added Portuguese debconf translation (thanks to Andre Luis Lopes).
- Closes: #279352
- * Disabled ospfapi server by default on recommendation of Paul Jakma.
-
- -- Christian Hammers <ch@debian.org> Sun, 7 Nov 2004 15:07:05 +0100
-
-quagga (0.97.2-3) unstable; urgency=low
-
- * Added Andrew Schorrs VTY Buffer patch from the [quagga-dev 1729].
-
- -- Christian Hammers <ch@debian.org> Tue, 2 Nov 2004 00:46:56 +0100
-
-quagga (0.97.2-2) unstable; urgency=low
-
- * Changed file and directory permissions and ownerships according to a
- suggestion from Paul Jakma. Still not perfect though.
- * Fixed upstream vtysh.conf.sample file.
- * "ip ospf network broadcast" is now saved correctly. Closes: #244116
- * Daemon options are now in /etc/quagga/debian.conf to be user
- configurable (thanks to Simon Raven and Hasso Tepper). Closes: #266715
-
- -- Christian Hammers <ch@debian.org> Tue, 26 Oct 2004 23:35:45 +0200
-
-quagga (0.97.2-1) unstable; urgency=low
-
- * New upstream version.
- Closes: #254541
- * Fixed warning on unmodular kernels (thanks to Christoph Biedl).
- Closes: #277973
-
- -- Christian Hammers <ch@debian.org> Mon, 25 Oct 2004 00:47:04 +0200
-
-quagga (0.97.1-2) unstable; urgency=low
-
- * Version 0.97 introduced shared libraries. They are now included.
- (thanks to Raf D'Halleweyn). Closes: #277446
-
- -- Christian Hammers <ch@debian.org> Wed, 20 Oct 2004 15:32:06 +0200
-
-quagga (0.97.1-1) unstable; urgency=low
-
- * New upstream version.
- * Removed some obsolete files from debian/patches.
- * Added patch from upstream bug 113. Closes: #254541
- * Added patch from upstream that fixes a compilation problem in the
- ospfclient code (thanks to Hasso Tepper).
- * Updated German debconf translation (thanks to Jens Nachtigall)
- Closes: #277059
-
- -- Christian Hammers <ch@debian.org> Mon, 18 Oct 2004 01:16:35 +0200
-
-quagga (0.96.5-11) unstable; urgency=low
-
- * Fixed /tmp/buildd/* paths in binaries.
- For some unknown reason the upstream Makefile modified a .h file at
- the end of the "debian/rules build" target. During the following
- "make install" one library got thus be re*compiled* - with /tmp/buildd
- paths as sysconfdir (thanks to Peder Chr. Norgaard). Closes: #274050
-
- -- Christian Hammers <ch@debian.org> Fri, 1 Oct 2004 01:21:02 +0200
-
-quagga (0.96.5-10) unstable; urgency=medium
-
- * The BGP routing daemon might freeze on network disturbances when
- their peer is also a Quagga/Zebra router.
- Applied patch from http://bugzilla.quagga.net/show_bug.cgi?id=102
- which has been confirmed by the upstream author.
- (thanks to Gunther Stammwitz)
- * Changed --enable-pam to --with-libpam (thanks to Hasso Tepper).
- Closes: #264562
- * Added patch for vtysh (thanks to Hasso Tepper). Closes: #215919
-
- -- Christian Hammers <ch@debian.org> Mon, 9 Aug 2004 15:33:02 +0200
-
-quagga (0.96.5-9) unstable; urgency=low
-
- * Rewrote the documentation chapter about SNMP support. Closes: #195653
- * Added MPLS docs.
-
- -- Christian Hammers <ch@debian.org> Thu, 29 Jul 2004 21:01:52 +0200
-
-quagga (0.96.5-8) unstable; urgency=low
-
- * Adjusted a grep in the initscript to also match a modprobe message
- from older modutils packages (thanks to Faidon Paravoid).
-
- -- Christian Hammers <ch@debian.org> Wed, 28 Jul 2004 21:19:02 +0200
-
-quagga (0.96.5-7) unstable; urgency=low
-
- * Added a "cd /etc/quagga/" to the init script as quagga tries to load
- the config file first from the current working dir and then from the
- config dir which could lead to confusion (thanks to Marco d'Itri).
- Closes: #255078
- * Removed warning regarding problems with the Debian kernels from
- README.Debian as they are no longer valid (thanks to Raphael Hertzog).
- Closes: #257580
- * Added patch from Hasso Tepper that makes "terminal length 0" work
- in vtysh (thanks to Matthias Wamser). Closes: #252579
-
- -- Christian Hammers <ch@debian.org> Thu, 8 Jul 2004 21:53:21 +0200
-
-quagga (0.96.5-6) unstable; urgency=low
-
- * Try to load the capability module as it is needed now.
-
- -- Christian Hammers <ch@debian.org> Tue, 8 Jun 2004 23:25:29 +0200
-
-quagga (0.96.5-5) unstable; urgency=low
-
- * Changed the homedir of the quagga user to /etc/quagga/ to allow
- admins to put ~/.ssh/authorized_keys there (thanks to Matthias Wamser).
- Closes: #252577
-
- -- Christian Hammers <ch@debian.org> Sat, 5 Jun 2004 14:47:31 +0200
-
-quagga (0.96.5-4) unstable; urgency=medium
-
- * Fixed rules file to use the renamed ./configure option --enable-tcp-md5
- (thanks to Matthias Wamser). Closes: #252141
-
- -- Christian Hammers <ch@debian.org> Tue, 1 Jun 2004 22:58:32 +0200
-
-quagga (0.96.5-3) unstable; urgency=low
-
- * Provided default binary package name to all build depends that were
- virtual packages (thanks to Goswin von Brederlow). Closes: #251625
-
- -- Christian Hammers <ch@debian.org> Sat, 29 May 2004 22:48:53 +0200
-
-quagga (0.96.5-2) unstable; urgency=low
-
- * New upstream version.
- * New md5 patch version (thanks to Niklas Jakobsson and Hasso Tepper).
- Closes: #250985
- * Fixes info file generation (thanks to Peder Chr. Norgaard).
- Closes: #250992
- * Added catalan debconf translation (thanks to Aleix Badia i Bosch).
- Closes: #250118
- * PATCHES:
- This release contains BGP4 MD5 support which requires a kernel patch
- to work. See /usr/share/doc/quagga/README.Debian.MD5.
- (The patch is ht-20040525-0.96.5-bgp-md5.patch from Hasso Tepper)
-
- -- Christian Hammers <ch@debian.org> Thu, 27 May 2004 20:09:37 +0200
-
-quagga (0.96.5-1) unstable; urgency=low
-
- * New upstream version.
- * PATCHES:
- This release contains BGP4 MD5 support which also requires a kernel patch.
- See /usr/share/doc/quagga/README.Debian.MD5 and search for CAN-2004-0230.
-
- -- Christian Hammers <ch@debian.org> Sun, 16 May 2004 17:40:40 +0200
-
-quagga (0.96.4x-10) unstable; urgency=low
-
- * SECURITY:
- This release contains support for MD5 for BGP which is one suggested
- prevention of the actually long known TCP SYN/RST attacks which got
- much news in the last days as ideas were revealed that made them much
- easier probable agains especially the BGP sessions than commonly known.
- There are a lot of arguments agains the MD5 approach but some ISPs
- started to require it.
- See: CAN-2004-0230, http://www.us-cert.gov/cas/techalerts/TA04-111A.html
- * PATCHES:
- This release contains the MD5 patch from Hasso Tepper. It also seems to
- required a kernel patch. See /usr/share/doc/quagga/README.Debian.MD5.
-
- -- Christian Hammers <ch@debian.org> Thu, 29 Apr 2004 01:01:38 +0200
-
-quagga (0.96.4x-9) unstable; urgency=low
-
- * Fixed daemon loading order (thanks to Matt Kemner).
- * Fixed typo in init script (thanks to Charlie Brett). Closes: #238582
-
- -- Christian Hammers <ch@debian.org> Sun, 4 Apr 2004 15:32:18 +0200
-
-quagga (0.96.4x-8) unstable; urgency=low
-
- * Patched upstream source so that quagga header files end up in
- /usr/include/quagga/. Closes: #233792
-
- -- Christian Hammers <ch@debian.org> Mon, 23 Feb 2004 01:42:53 +0100
-
-quagga (0.96.4x-7) unstable; urgency=low
-
- * Fixed info file installation (thanks to Holger Dietze). Closes: #227579
- * Added Japanese translation (thanks to Hideki Yamane). Closes: #227812
-
- -- Christian Hammers <ch@debian.org> Sun, 18 Jan 2004 17:28:29 +0100
-
-quagga (0.96.4x-6) unstable; urgency=low
-
- * Added dependency to iproute.
- * Initscript now checks not only for the pid file but also for the
- daemons presence (thanks to Phil Gregory). Closes: #224389
- * Added my patch to configure file permissions.
-
- -- Christian Hammers <ch@debian.org> Mon, 15 Dec 2003 22:34:29 +0100
-
-quagga (0.96.4x-5) unstable; urgency=low
-
- * Added patch which gives bgpd the CAP_NET_RAW capability to allow it
- to bind to special IPv6 link-local interfaces (Thanks to Bastian Blank).
- Closes: #222930
- * Made woody backport easier by applying Colin Watsons po-debconf hack.
- Thanks to Marc Haber for suggesting it. Closes: #223527
- * Made woody backport easier by applying a patch that removes some
- obscure whitespaces inside an C macro. (Thanks to Marc Haber).
- Closes: #223529
- * Now uses /usr/bin/pager. Closes: #204070
- * Added note about the "official woody backports" on my homepage.
-
- -- Christian Hammers <ch@debian.org> Mon, 15 Dec 2003 20:39:06 +0100
-
-quagga (0.96.4x-4) unstable; urgency=high
-
- * SECURITY:
- Fixes another bug that was originally reported against Zebra.
- .
- http://rhn.redhat.com/errata/RHSA-2003-307.html
- Herbert Xu reported that Zebra can accept spoofed messages sent on the
- kernel netlink interface by other users on the local machine. This could
- lead to a local denial of service attack. The Common Vulnerabilities and
- Exposures project (cve.mitre.org) has assigned the name CAN-2003-0858 to
- this issue.
-
- * Minor improvements to init script (thanks to Iustin Pop).
- Closes: #220938
-
- -- Christian Hammers <ch@debian.org> Sat, 22 Nov 2003 13:27:57 +0100
-
-quagga (0.96.4x-3) unstable; urgency=low
-
- * Changed "more" to "/usr/bin/pager" as default pager if $PAGER or
- $VTYSH_PAGER is not set (thanks to Bastian Blank). Closes: #204070
- * Made the directory (but not the config/log files!) world accessible
- again on user request (thanks to Anand Kumria)). Closes: #213129
- * No longer providing sample configuration in /etc/quagga/. They are
- now only available in /usr/share/doc/quagga/ to avoid accidently
- using them without changing the adresses (thanks to Marc Haber).
- Closes: #215918
-
- -- Christian Hammers <ch@debian.org> Sun, 16 Nov 2003 16:59:30 +0100
-
-quagga (0.96.4x-2) unstable; urgency=low
-
- * Fixed permission problem with pidfile (thanks to Kir Kostuchenko).
- Closes: #220938
-
- -- Christian Hammers <ch@debian.org> Sun, 16 Nov 2003 14:24:08 +0100
-
-quagga (0.96.4x-1) unstable; urgency=low
-
- * Reupload of 0.96.4. Last upload-in-a-hurry produced a totally
- crappy .tar.gz file. Closes: #220621
-
- -- Christian Hammers <ch@debian.org> Fri, 14 Nov 2003 19:45:57 +0100
-
-quagga (0.96.4-1) unstable; urgency=high
-
- * SECURITY: Remote DoS of protocol daemons.
- Fix for a remote triggerable crash in vty layer. The management
- ports ("telnet myrouter ospfd") should not be open to the internet!
-
- * New upstream version.
- - OSPF bugfixes.
- - Some improvements for bgp and rip.
-
- -- Christian Hammers <ch@debian.org> Thu, 13 Nov 2003 11:52:27 +0100
-
-quagga (0.96.3-3) unstable; urgency=low
-
- * Fixed pid file generation by substituting the daemons "-d" by the
- start-stop-daemon option "--background" (thanks to Micha Gaisser).
- Closes: #218103
-
- -- Christian Hammers <ch@debian.org> Wed, 29 Oct 2003 05:17:49 +0100
-
-quagga (0.96.3-2) unstable; urgency=low
-
- * Readded GNOME-PRODUCT-ZEBRA-MIB.
-
- -- Christian Hammers <ch@debian.org> Thu, 23 Oct 2003 06:17:03 +0200
-
-quagga (0.96.3-1) unstable; urgency=medium
-
- * New upstream version.
- * Removed -u and -e in postrm due to problems with debhelper and userdel
- (thanks to Adam Majer and Jaakko Niemi). Closes: #216770
- * Removed SNMP MIBs as they are now included in libsnmp-base (thanks to
- David Engel and Peter Gervai). Closes: #216138, #216086
- * Fixed seq command in init script (thanks to Marc Haber). Closes: #215915
- * Improved /proc check (thanks to Marc Haber). Closes: #212331
-
- -- Christian Hammers <ch@debian.org> Thu, 23 Oct 2003 03:42:02 +0200
-
-quagga (0.96.2-9) unstable; urgency=medium
-
- * Removed /usr/share/info/dir.* which were accidently there and prevented
- the installation by dpkg (thanks to Simon Raven). Closes: #212614
- * Reworded package description (thanks to Anand Kumria). Closes: #213125
- * Added french debconf translation (thanks to Christian Perrier).
- Closes: #212803
-
- -- Christian Hammers <ch@debian.org> Tue, 7 Oct 2003 13:26:58 +0200
-
-quagga (0.96.2-8) unstable; urgency=low
-
- * debian/rules now checks if /proc is mounted as ./configure needs
- it but just fails with an obscure error message if it is absent.
- (Thanks to Norbert Tretkowski). Closes: #212331
-
- -- Christian Hammers <ch@debian.org> Tue, 23 Sep 2003 12:57:38 +0200
-
-quagga (0.96.2-7) unstable; urgency=low
-
- * Last build was rejected due to a buggy dpkg-dev version. Rebuild.
-
- -- Christian Hammers <ch@debian.org> Mon, 22 Sep 2003 20:34:12 +0200
-
-quagga (0.96.2-6) unstable; urgency=low
-
- * Fixed init script so that is is now possible to just start
- the bgpd but not the zebra daemon. Also daemons are now actually
- started in the order defined their priority. (Thanks to Thomas Kaehn
- and Jochen Friedrich) Closes: #210924
-
- -- Christian Hammers <ch@debian.org> Fri, 19 Sep 2003 21:17:02 +0200
-
-quagga (0.96.2-5) unstable; urgency=low
-
- * For using quagga as BGP route server or similar, it is not
- wanted to have the zebra daemon running too. For this reason
- it can now be disabled in /etc/quagga/daemons, too.
- (Thanks to Jochen Friedrich). Closes: #210924
- * Attached *unapplied* patch for the ISIS protocol. I did not dare
- to apply it as long as upstream does not do it but this way give
- users the possibilities to use it if they like to.
- (Thanks to Remco van Mook)
-
- -- Christian Hammers <ch@debian.org> Wed, 17 Sep 2003 19:57:31 +0200
-
-quagga (0.96.2-4) unstable; urgency=low
-
- * Enabled IPV6 router advertisement feature by default on user request
- (thanks to Jochen Friedrich and Hasso Tepper). Closes: #210732
- * Updated GNU autoconf to let it build on hppa/parisc64 (thanks to
- lamont). Closes: #210492
-
- -- Christian Hammers <ch@debian.org> Sat, 13 Sep 2003 14:11:13 +0200
-
-quagga (0.96.2-3) unstable; urgency=medium
-
- * Removed unnecessary "-lcrypto" to avoid dependency against OpenSSL
- which would require further copyright addtions.
-
- -- Christian Hammers <ch@debian.org> Wed, 10 Sep 2003 01:37:28 +0200
-
-quagga (0.96.2-2) unstable; urgency=low
-
- * Added note that config files of quagga are in /etc/quagga and
- not /etc/zebra for the zebra users that migrate to quagga.
- (Thanks to Roberto Suarez Soto for the idea)
- * Fixed setgid rights in /etc/quagga.
-
- -- Christian Hammers <ch@debian.org> Wed, 27 Aug 2003 14:05:39 +0200
-
-quagga (0.96.2-1) unstable; urgency=low
-
- * This package has formally been known as "zebra-pj"!
- * New upstream release.
- Fixes "anoying OSPF problem".
- * Modified group ownerships so that vtysh can now be used by normal
- uses if they are in the quaggavty group.
-
- -- Christian Hammers <ch@debian.org> Mon, 25 Aug 2003 23:40:14 +0200
-
-quagga (0.96.1-1) unstable; urgency=low
-
- * Zebra-pj, the fork of zebra has been renamed to quagga as the original
- upstream author asked the new project membed not to use "zebra" in the
- name. zebra-pj is obsolete.
-
- -- Christian Hammers <ch@debian.org> Mon, 18 Aug 2003 23:37:20 +0200
-
-zebra-pj (0.94+cvs20030721-1) unstable; urgency=low
-
- * New CVS build.
- - OSPF changes (integration of the OSPF API?)
- - code cleanups (for ipv6?)
- * Tightened Build-Deps to gcc-2.95 as 3.x does not compile a stable ospfd.
- This is a known problem and has been discussed on the mailing list.
- No other solutions so far.
-
- -- Christian Hammers <ch@debian.org> Mon, 21 Jul 2003 23:52:00 +0200
-
-zebra-pj (0.94+cvs20030701-1) unstable; urgency=low
-
- * Initial Release.
-
- -- Christian Hammers <ch@debian.org> Tue, 1 Jul 2003 01:58:06 +0200
AC_PATH_PROG([PERL], [perl])
PKG_PROG_PKG_CONFIG
-dnl default is to match previous behavior
-exampledir=${sysconfdir}
-AC_ARG_ENABLE([exampledir],
- AS_HELP_STRING([--enable-exampledir],
- [specify alternate directory for examples]),
- exampledir="$enableval",)
-dnl XXX add --exampledir to autoconf standard directory list somehow
-AC_SUBST([exampledir])
-
dnl default is to match previous behavior
pkgsrcrcdir=""
AC_ARG_ENABLE([pkgsrcrcdir],
])
fi
+AM_CONDITIONAL([SCRIPTING], [test "$enable_scripting" = "yes"])
+
if test "$enable_scripting" = "yes"; then
AX_PROG_LUA([5.3], [5.4], [], [
AC_MSG_ERROR([Lua 5.3 is required to build with Lua support. No other version is supported.])
AX_LUA_LIBS([
AC_DEFINE([HAVE_SCRIPTING], [1], [Have support for scripting])
LIBS="$LIBS $LUA_LIB"
+ SCRIPTING=true
], [
+ SCRIPTING=false
AC_MSG_ERROR([Lua 5.3 libraries are required to build with Lua support. No other version is supported.])
])
fi
AS_HELP_STRING([--disable-irdp], [disable IRDP server support in zebra (enabled by default if supported)]))
AC_ARG_ENABLE([capabilities],
AS_HELP_STRING([--disable-capabilities], [disable using POSIX capabilities]))
-AC_ARG_ENABLE([rusage],
- AS_HELP_STRING([--disable-rusage], [disable using getrusage]))
AC_ARG_ENABLE([gcc_ultra_verbose],
AS_HELP_STRING([--enable-gcc-ultra-verbose], [enable ultra verbose GCC warnings]))
AC_ARG_ENABLE([backtrace],
AS_HELP_STRING([--enable-fpm], [enable Forwarding Plane Manager support]))
AC_ARG_ENABLE([pcep],
AS_HELP_STRING([--enable-pcep], [enable PCEP support for pathd]))
-AC_ARG_ENABLE([systemd],
- AS_HELP_STRING([--enable-systemd], [enable Systemd support]))
AC_ARG_ENABLE([werror],
AS_HELP_STRING([--enable-werror], [enable -Werror (recommended for developers only)]))
AC_ARG_ENABLE([cumulus],
;;
esac
-case "${enable_systemd}" in
- "no") ;;
- "yes")
- AC_CHECK_LIB([systemd], [sd_notify], [LIBS="$LIBS -lsystemd"])
- if test "$ac_cv_lib_systemd_sd_notify" = "no"; then
- AC_MSG_ERROR([enable systemd has been specified but systemd development env not found on your system])
- else
- AC_DEFINE([HAVE_SYSTEMD], [1], [Compile systemd support in])
- fi
- ;;
- "*") ;;
-esac
-
if test "$enable_rr_semantics" != "no" ; then
AC_DEFINE([HAVE_V6_RR_SEMANTICS], [1], [Compile in v6 Route Replacement Semantics])
fi
if test "$enable_datacenter" = "yes" ; then
AC_DEFINE([HAVE_DATACENTER], [1], [Compile extensions for a DataCenter])
- AC_MSG_WARN([The --enable-datacenter compile time option is deprecated. Please modify the init script to pass -F datacenter to the daemons instead.])
DFLT_NAME="datacenter"
else
DFLT_NAME="traditional"
AC_DEFINE([HAVE_CLOCK_MONOTONIC], [1], [Have monotonic clock])
], [AC_MSG_RESULT([no])], [FRR_INCLUDES])
+AC_CHECK_DECL([CLOCK_THREAD_CPUTIME_ID], [
+ AC_DEFINE([HAVE_CLOCK_THREAD_CPUTIME_ID], [1], [Have cpu-time clock])
+], [AC_MSG_RESULT([no])], [FRR_INCLUDES])
+
AC_SEARCH_LIBS([clock_nanosleep], [rt], [
AC_DEFINE([HAVE_CLOCK_NANOSLEEP], [1], [Have clock_nanosleep()])
])
AC_CONFIG_FILES([
config.version
- changelog-auto
redhat/frr.spec
alpine/APKBUILD
snapcraft/snapcraft.yaml
linker flags : ${LDFLAGS} ${SAN_FLAGS} ${LIBS} ${LIBCAP} ${LIBREADLINE} ${LIBM}
state file directory : ${frr_statedir}
config file directory : `eval echo \`echo ${sysconfdir}\``
-example directory : `eval echo \`echo ${exampledir}\``
module directory : ${CFG_MODULE}
script directory : ${CFG_SCRIPT}
user to run as : ${enable_user}
The above user and group must have read/write access to the state file
directory and to the config files in the config file directory."
+if test -n "$enable_datacenter"; then
+ AC_MSG_WARN([The --enable-datacenter compile time option is deprecated. Please modify the init script to pass -F datacenter to the daemons instead.])
+fi
+if test -n "$enable_time_check"; then
+ AC_MSG_WARN([The --enable-time-check compile time option is deprecated. Please use the service cputime-stats configuration option instead.])
+fi
+if test -n "$enable_cpu_time"; then
+ AC_MSG_WARN([The --enable-cpu-time compile time option is deprecated. Please use the service cputime-warning NNN configuration option instead.])
+fi
+
if test "$enable_doc" != "no" -a "$frr_py_mod_sphinx" = "false"; then
AC_MSG_WARN([sphinx is missing but required to build documentation])
fi
controls whether the RPKI module is built.
Will be enabled by default at some point, adds some extra dependencies.
-- pkg.frr.nosystemd
- Disables both systemd unit file installation as well as watchfrr sd_notify
- support at startup. Removes libsystemd dependency.
-
Note that all options have a "no" form; if you want to have your decision
be sticky regardless of changes to what it defaults to, then always use one
of the two. For example, all occurrences of <pkg.frr.rtrlib> will at some
The main frr package has the exact same contents regardless of rtrlib or snmp
choices. The options only control frr-snmp and frr-rpki-rtrlib packages.
-The main frr package does NOT have the same contents if pkg.frr.nosystemd is
-used. This option should only be used for systems that do not have systemd,
-e.g. Ubuntu 14.04.
-
* Debian Policy compliance notes
================================
+++ /dev/null
-../changelog-auto
\ No newline at end of file
--- /dev/null
+frr (8.1~dev-1) UNRELEASED; urgency=medium
+
+ * New upstream release...
+
+ -- Ondřej Surý <ondrej@debian.org> Tue, 04 May 2021 22:52:47 +0200
+
+frr (7.5.1-1) unstable; urgency=medium
+
+ * Update the d/gbp.conf for 7.5.1 release
+ * Use wrap-and-sort -a to unify debian/ wrapping and sorting
+ * Work around the sphinx-build error that doesn't copy images to texinfo
+ * Change the upstream-tag in d/gbp.conf to track the upstream tarballs
+
+ -- Ondřej Surý <ondrej@debian.org> Mon, 08 Mar 2021 09:40:19 +0100
+
+frr (7.5-1) unstable; urgency=medium
+
+ * New upstream version 7.5
+
+ -- Ondřej Surý <ondrej@debian.org> Sun, 14 Feb 2021 21:38:50 +0100
+
+frr (7.4-2) unstable; urgency=medium
+
+ * Bump libyang dependency to >= 1.0.184-1~
+ * Make the autopkgtest more resilient (Closes: #980111)
+ * Adjust the ax_python.m4 to hardcode python3.9
+
+ -- Ondřej Surý <ondrej@debian.org> Sun, 07 Feb 2021 13:15:07 +0100
+
+frr (7.4-1.1) unstable; urgency=medium
+
+ * Non-maintainer upload.
+ * Backport upstream fix for FTBFS with Python 3.9. (Closes: #972767)
+
+ -- Adrian Bunk <bunk@debian.org> Thu, 21 Jan 2021 16:06:12 +0200
+
+frr (7.4-1) unstable; urgency=medium
+
+ [ Ondřej Surý ]
+ * Use dh_installinit capabilities to install frr.tmpfile
+ * Remove unused debian/watchfrr.rc file
+ * Add missing lsof dependency
+ * Remove mention of pkg.frr.snmp build profile from debian/README.Debian
+ * Make lsb-base a hard dependency
+ * Update gbp.conf for 7.4 release
+ * Update and simplify d/watch
+ * Change the debian source format from 3.0 (git) to 3.0 (quilt)
+ * Convert the package to dh compat level 10
+ * Add myself to Uploaders
+ * Bump standards version to 4.5.0.2 (latest) - no change
+ * Use wrap-and-sort -a to unify debian/ wrapping and sorting
+ * Work around the sphinx-build error that doesn't copy images to texinfo
+ (Properly closes: #955067)
+ * Depend on debhelper >= 9.20160709 and drop dh-systemd dependency
+ (Closes: #958626)
+
+ -- Ondřej Surý <ondrej@debian.org> Mon, 10 Aug 2020 11:50:45 +0200
+
+frr (7.3.1-1) unstable; urgency=medium
+
+ [ David Lamparter ]
+ * allow cross-compile with sbuild --host
+
+ [ Ondřej Surý ]
+ * Add myself to Uploaders
+ * Add d/gbp.conf
+ * Update changelog for 7.3.1-1~1.gbp2292a4 release
+ * Change the source format from git to quilt to use git-buildpackage
+ * Don't install frr-doc texinfo images, they are gone (Closes: #955067)
+ * Bump the dh_compat to 10
+
+ -- Ondřej Surý <ondrej@debian.org> Mon, 01 Jun 2020 08:41:03 +0200
+
+frr (7.3-1) unstable; urgency=medium
+
+ * new upstream release
+
+ -- David Lamparter <equinox-debian@diac24.net> Tue, 25 Feb 2020 17:45:16 +0100
+
+frr (7.2.1-1) unstable; urgency=medium
+
+ * new upstream release
+ * daemon man pages renamed to frr-* (closes: #944392)
+ * fix/improve multi-arch markers on doc
+ * fix git URLs to point to debian branch
+
+ -- David Lamparter <equinox-debian@diac24.net> Mon, 20 Jan 2020 17:06:21 +0100
+
+frr (7.2-1) unstable; urgency=medium
+
+ * New upstream release
+
+ -- Jafar Al-Gharaibeh <jafar@atcorp.com> Sun, 03 Nov 2019 18:45:23 +0100
+
+frr (6.0.2-2) unstable; urgency=medium
+
+ * remove bogus libjson0 build-dep (closes: #921349)
+ * fix broken systemd dependency spec
+ * add proper Conflicts: for quagga and pimd (closes: #921376)
+
+ -- David Lamparter <equinox-debian@diac24.net> Mon, 04 Feb 2019 22:16:07 +0100
+
+frr (6.0.2-1) unstable; urgency=medium
+
+ * Packaging has been more or less completely reworked, based off the old
+ Quagga packaging that hung around in git. Refer to "changelog-auto.in"
+ in the source root directory for the old changelog.
+ * Initial release of FRR for Debian. (closes: #863249)
+
+ -- David Lamparter <equinox-debian@diac24.net> Sun, 27 Jan 2019 17:27:02 +0100
+
+frr (6.0-2) testing; urgency=medium
+
+ * add install-info to build deps
+ * remove trailing whitespace from control
+ * cleanup tcp-zebra configure options
+ * drop unused SMUX client OID MIBs
+ * remove /proc check
+ * remove --enable-poll
+ * remove libtool .la files
+ * drop texlive-latex-base, texlive-generic-recommended build deps
+ * consistently allow python2 or python3
+ * remove bad USE_* options, add WERROR
+ * drop libncurses5 dep
+ * remove backports mechanism
+ * use better dependency for pythontools (binNMU compatible)
+ * remove bogus shlib:Depends on frr-dbg
+ * create frr-snmp and frr-rpki-rtrlib
+ * make frr-pythontools a "Recommends:"
+ * use redistclean target
+ * update to Debian Policy version 4.2.1
+ * raise debhelper compat level to 9
+ * ditch development-only files
+ * modernise dh_missing and use fail mode
+ * disable zeromq and FPM
+ * always install /etc/init.d/frr
+ * put frr-doc package in 'doc' section
+ * install HTML docs, drop tools/
+ * fix install for {frr,rfptest,ospfclient}
+ * add watch file
+ * change python dependency and shebang to python3:any
+ * use set -e in maintscripts
+ * put myself in as maintainer
+ * update copyright file
+ * closes: #863249
+
+ -- David Lamparter <equinox-debian@diac24.net> Thu, 25 Oct 2018 16:36:50 +0200
+
+frr (6.0-1) RELEASED; urgency=medium
+
+ * New Enabled: PIM draft Unnumbered
+
+ -- FRRouting-Dev <dev@lists.frrouting.org> Wed, 18 Oct 2017 17:01:42 -0700
+
+frr (3.0-1) RELEASED; urgency=medium
+
+ * Added Debian 9 Backport
+
+ -- FRRouting-Dev <dev@lists.frrouting.org> Mon, 16 Oct 2017 03:28:00 -0700
+
+frr (3.0-0) RELEASED; urgency=medium
+
+ * New Enabled: BGP Shutdown Message
+ * New Enabled: BGP Large Community
+ * New Enabled: BGP RFC 7432 Partial Support w/ Ethernet VPN
+ * New Enabled: BGP EVPN RT-5
+ * New Enabled: LDP RFC 5561
+ * New Enabled: LDP RFC 5918
+ * New Enabled: LDP RFC 5919
+ * New Enabled: LDP RFC 6667
+ * New Enabled: LDP RFC 7473
+ * New Enabled: OSPF RFC 4552
+ * New Enabled: ISIS SPF Backoff draft
+ * New Enabled: PIM Unnumbered Interfaces
+ * New Enabled: PIM RFC 4611
+ * New Enabled: PIM Sparse Mode
+ * New Enabled: NHRP RFC 2332
+ * New Enabled: Label Manager
+ * Switched from hardening-wrapper to dpkg-buildflags.
+
+ -- FRRouting-Dev <dev@lists.frrouting.org> Fri, 13 Oct 2017 16:17:26 -0700
+
+frr (2.0-0) RELEASED; urgency=medium
+
+ * Switchover to FRR
+
+ -- FRRouting-Dev <dev@lists.frrouting.org> Mon, 23 Jan 2017 16:30:22 -0400
+
+quagga (0.99.24+cl3u5) RELEASED; urgency=medium
+
+ * Closes: CM-12846 - Resolve Memory leaks in 'show ip bgp neighbor json'
+ * Closes: CM-5878 - Display all ospf peers with 'show ip ospf neighbor detail all'
+ * Closes: CM-5794 - Add support for IPv6 static to null0
+ * Closes: CM-13060 - Reduce JSON memory usage.
+ * Closes: CM-10394 - protect 'could not get instance' error messages with debug
+ * Closes: CM-11173 - Move netlink error messages undeer a debug
+ * Closes: CM-13328 - Fixes route missing in hardware after reboot
+
+ -- dev-support <dev-support@cumulusnetworks.com> Fri, 11 Nov 2016 22:13:29 -0400
+
+quagga (0.99.24+cl3u4) RELEASED; urgency=medium
+
+ * Closes: CM-12687 - Buffer overflow in zebra RA code
+
+ -- dev-support <dev-support@cumulusnetworks.com> Wed, 31 Aug 2016 12:36:10 -0400
+
+quagga (0.99.24+cl3u3) RELEASED; urgency=medium
+
+ * New Enabled: Merge up-to 0.99.24 code from upstream
+ * New Enabled: Additional CLI simplification
+ * New Enabled: Various Bug Fixes
+
+ -- dev-support <dev-support@cumulusnetworks.com> Thu, 04 Aug 2016 08:43:36 -0700
+
+quagga (0.99.23.1-1+cl3u2) RELEASED; urgency=medium
+
+ * New Enabled: VRF - See Documentation for how to use
+ * New Enabled: Improved interface statistics
+ * New Enabled: Various vtysh improvements
+ * New Enabled: Numerous compile warnings and SA fixes
+ * New Enabled: Improved priviledge handlingA
+ * New Enabled: Various OSPF CLI fixes
+ * New Enabled: Prefix-list Performance Improvements.
+ * New Enabled: Allow more than 1k peers in Quagga
+ and Performance Improvements
+ * New Enabled: Systemd integration
+ * New Enabled: Various ISIS fixes
+ * New Enabled: BGP MRT improvements
+ * New Enabled: Lowered default MRAI timers
+ * New Enabled: Lowered default 'timers connect'
+ * New Enabled: 'bgp log-neighbor-changes' enabled by default
+ * New Enabled: BGP default keepalive to 3s and holdtime to 9s
+ * New Enabled: OSPF spf timers are now '0 50 5000' by default
+ * New Enabled: BGP hostname is displayed by default
+ * New Enabled: BGP 'no-as-set' is the default for
+ 'bgp as-path multipath-relax"
+ * New Enabled: RA is on by default if using 5549 on an interface
+ * New Enabled: peer-group restrictions relaxed, update-groups determine
+ outbund policy anyway
+ * New Enabled: BGP enabled 'maximum-paths 64' by default
+ * New Enabled: OSPF "log-adjacency-changes" on by default
+ * New Enabled: Zebra: Add IPv6 protocol filtering support
+ * and setting src of IPv6 routes.
+ * New Enabled: BGP and OSPF JSON commands added.
+ * New Enabled: BGP Enable multiple instances support by default
+ * New Enabled: 'banner motd file' command
+ * New Enabled: Remove bad default passwords from default conf
+ * New Enabled: BGP addpath TX
+ * New Enabled: Simplified configuration for BGP Unnumbered
+
+ * New Deprecated: Remove unused 'show memory XXX' functionality
+ * New Deprecated: Remove babel protocol
+
+ * Closes: CM-10435 Addition on hidden command
+ "bfd multihop/singlehop" and "ptm-enable" per interface command
+ * Closes: CM-9974 Get route counts right for show ip route summary
+ * Closes: CM-9786 BGP memory leak in peer hostname
+ * Closes: CM-9340 BGP: Ensure correct sequence of processing at exit
+ * Closes: CM-9270 ripd: Fix crash when a default route is passed to rip
+ * Closes: CM-9255 BGPD crash around bgp_config_write ()
+ * Closes: CM-9134 ospf6d: Fix for crash when non area 0 network
+ entered first
+ * Closes: CM-8934 OSPFv3: Check area before scheduling SPF
+ * Closes: CM-8514 zebra: Crash upon disabling a link
+ * Closes: CM-8295 BGP crash in group_announce_route_walkcb
+ * Closes: CM-8191 BGP: crash in update_subgroup_merge()
+ * Closes: CM-8015 lib: Memory reporting fails over 2GB
+ * Closes: CM-7926 BGP: crash from not NULLing freed pointers
+
+ -- dev-support <dev-support@cumulusnetworks.com> Wed, 04 May 2016 16:22:52 -0700
+
+quagga (0.99.23.1-1) unstable; urgency=medium
+
+ * New upstream release
+ * Added .png figures for info files to quagga-doc package.
+ * Changed dependency from iproute to iproute2 (thanks to Andreas
+ Henriksson). Closes: #753736
+ * Added texlive-fonts-recommended to build-depends to get ecrm1095 font
+ (thanks to Christoph Biedl). Closes: #651545
+
+ -- Christian Brunotte <ch@debian.org> Tue, 30 Sep 2014 00:20:12 +0200
+
+quagga (0.99.23-1) unstable; urgency=low
+
+ * New upstream release
+ * Removed debian/patches/readline-6.3.diff which was already in upstream.
+
+ -- Christian Hammers <ch@debian.org> Tue, 08 Jul 2014 09:15:48 +0200
+
+quagga (0.99.22.4-4) unstable; urgency=medium
+
+ * Fix build failure with readline-6.3 (thanks to Matthias Klose).
+ Closes: #741774
+
+ -- Christian Hammers <ch@debian.org> Sun, 23 Mar 2014 15:28:42 +0100
+
+quagga (0.99.22.4-3) unstable; urgency=low
+
+ * Added status to init script (thanks to Peter J. Holzer). Closes: #730625
+ * Init script now sources /lib/lsb/init-functions.
+ * Switched from hardening-wrapper to dpkg-buildflags.
+
+ -- Christian Hammers <ch@debian.org> Wed, 01 Jan 2014 19:12:01 +0100
+
+quagga (0.99.22.4-2) unstable; urgency=low
+
+ * Fixed typo in package description (thanks to Davide Prina).
+ Closes: #625860
+ * Added Italian Debconf translation (thanks to Beatrice Torracca)
+ Closes: #729798
+
+ -- Christian Hammers <ch@debian.org> Tue, 26 Nov 2013 00:47:11 +0100
+
+quagga (0.99.22.4-1) unstable; urgency=high
+
+ * SECURITY:
+ "ospfd: CVE-2013-2236, stack overrun in apiserver
+
+ the OSPF API-server (exporting the LSDB and allowing announcement of
+ Opaque-LSAs) writes past the end of fixed on-stack buffers. This leads
+ to an exploitable stack overflow.
+
+ For this condition to occur, the following two conditions must be true:
+ - Quagga is configured with --enable-opaque-lsa
+ - ospfd is started with the "-a" command line option
+
+ If either of these does not hold, the relevant code is not executed and
+ the issue does not get triggered."
+ Closes: #726724
+
+ * New upstream release
+ - ospfd: protect vs. VU#229804 (malformed Router-LSA)
+ (Quagga is said to be non-vulnerable but still adds some protection)
+
+ -- Christian Hammers <ch@debian.org> Thu, 24 Oct 2013 22:58:37 +0200
+
+quagga (0.99.22.1-2) unstable; urgency=low
+
+ * Added autopkgtests (thanks to Yolanda Robla). Closes: #710147
+ * Added "status" command to init script (thanks to James Andrewartha).
+ Closes: #690013
+ * Added "libsnmp-dev" to Build-Deps. There not needed for the official
+ builds but for people who compile Quagga themselves to activate the
+ SNMP feature (which for licence reasons cannot be done by Debian).
+ Thanks to Ben Winslow). Closes: #694852
+ * Changed watchquagga_options to an array so that quotes can finally
+ be used as expected. Closes: #681088
+ * Fixed bug that prevented restarting only the watchquagga daemon
+ (thanks to Harald Kappe). Closes: #687124
+
+ -- Christian Hammers <ch@debian.org> Sat, 27 Jul 2013 16:06:25 +0200
+
+quagga (0.99.22.1-1) unstable; urgency=low
+
+ * New upstream release
+ - ospfd restore nexthop IP for p2p interfaces
+ - ospfd: fix LSA initialization for build without opaque LSA
+ - ripd: correctly redistribute ifindex routes (BZ#664)
+ - bgpd: fix lost passwords of grouped neighbors
+ * Removed 91_ld_as_needed.diff as it was found in the upstream source.
+
+ -- Christian Hammers <ch@debian.org> Mon, 22 Apr 2013 22:21:20 +0200
+
+quagga (0.99.22-1) unstable; urgency=low
+
+ * New upstream release.
+ - [bgpd] The semantics of default-originate route-map have changed.
+ The route-map is now used to advertise the default route conditionally.
+ The old behaviour which allowed to set attributes on the originated
+ default route is no longer supported.
+ - [bgpd] this version of bgpd implements draft-idr-error-handling. This was
+ added in 0.99.21 and may not be desirable. If you need a version
+ without this behaviour, please use 0.99.20.1. There will be a
+ runtime configuration switch for this in future versions.
+ - [isisd] is in "beta" state.
+ - [ospf6d] is in "alpha/experimental" state
+ - More changes are documented in the upstream changelog!
+ * debian/watch: Adjusted to new savannah.gnu.org site, thanks to Bart
+ Martens.
+ * debian/patches/99_CVE-2012-1820_bgp_capability_orf.diff removed as its
+ in the changelog.
+ * debian/patches/99_distribute_list.diff removed as its in the changelog.
+ * debian/patches/10_doc__Makefiles__makeinfo-force.diff removed as it
+ was just for Debian woody.
+
+ -- Christian Hammers <ch@debian.org> Thu, 14 Feb 2013 00:22:00 +0100
+
+quagga (0.99.21-4) unstable; urgency=medium
+
+ * Fixed regression bug that caused OSPF "distribute-list" statements to be
+ silently ignored. The patch has already been applied upstream but there
+ has been no new Quagga release since then.
+ Thanks to Hans van Kranenburg for reporting. Closes: #697240
+
+ -- Christian Hammers <ch@debian.org> Sun, 06 Jan 2013 15:50:32 +0100
+
+quagga (0.99.21-3) unstable; urgency=high
+
+ * SECURITY:
+ CVE-2012-1820 - Quagga contained a bug in BGP OPEN message handling.
+ A denial-of-service condition could be caused by an attacker controlling
+ one of the pre-configured BGP peers. In most cases this means, that the
+ attack must be originated from an adjacent network. Closes: #676510
+
+ -- Christian Hammers <ch@debian.org> Fri, 08 Jun 2012 01:15:32 +0200
+
+quagga (0.99.21-2) unstable; urgency=low
+
+ * Renamed babeld.8 to quagga-babeld.8 as it conflicted with the
+ original mapage of the babeld package which users might want to
+ install in parallel as it is slightly more capable. Closes: #671916
+
+ -- Christian Hammers <ch@debian.org> Thu, 10 May 2012 07:53:01 +0200
+
+quagga (0.99.21-1) unstable; urgency=low
+
+ * New upstream release
+ - [bgpd] BGP multipath support has been merged
+ - [bgpd] SAFI (Multicast topology) support has been extended to propagate
+ the topology to zebra.
+ - [bgpd] AS path limit functionality has been removed
+ - [babeld] a new routing daemon implementing the BABEL ad-hoc mesh routing
+ protocol has been merged.
+ - [isisd] a major overhaul has been picked up. Please note that isisd is
+ STILL NOT SUITABLE FOR PRODUCTION USE.
+ - a lot of bugs have been fixed
+ * Added watchquagga daemon.
+ * Added DEP-3 conforming patch comments.
+
+ -- Christian Hammers <ch@debian.org> Sun, 06 May 2012 15:33:33 +0200
+
+quagga (0.99.20.1-1) unstable; urgency=high
+
+ * SECURITY:
+ CVE-2012-0249 - Quagga ospfd DoS on malformed LS-Update packet
+ CVE-2012-0250 - Quagga ospfd DoS on malformed Network-LSA data
+ CVE-2012-0255 - Quagga bgpd DoS on malformed OPEN message
+ * New upstream release. Closes: #664033
+
+ -- Christian Hammers <ch@debian.org> Fri, 16 Mar 2012 22:14:05 +0100
+
+quagga (0.99.20-4) unstable; urgency=low
+
+ * Switch to dpkg-source 3.0 (quilt) format.
+ * Switch to changelog-format-1.0.
+
+ -- Christian Hammers <ch@debian.org> Sat, 25 Feb 2012 18:52:06 +0100
+
+quagga (0.99.20-3) unstable; urgency=low
+
+ * Added --sysconfdir back to the configure options (thanks to Sven-Haegar
+ Koch). Closes: #645649
+
+ -- Christian Hammers <ch@debian.org> Tue, 18 Oct 2011 00:24:37 +0200
+
+quagga (0.99.20-2) unstable; urgency=low
+
+ * Bumped standards version to 0.9.2.
+ * Migrated to "dh" build system.
+ * Added quagga-dbg package.
+
+ -- Christian Hammers <ch@debian.org> Fri, 14 Oct 2011 23:59:26 +0200
+
+quagga (0.99.20-1) unstable; urgency=low
+
+ * New upstream release:
+ "The primary focus of this release is a fix of SEGV regression in ospfd,
+ which was introduced in 0.99.19. It also features a series of minor
+ improvements, including better RFC compliance in bgpd, better support
+ of FreeBSD and some enhancements to isisd."
+ * Fixes off-by-one bug (removed 20_ospf6_area_argv.dpatch). Closes: #519488
+
+ -- Christian Hammers <ch@debian.org> Fri, 30 Sep 2011 00:59:24 +0200
+
+quagga (0.99.19-1) unstable; urgency=high
+
+ * SECURITY:
+ "This release provides security fixes, which address assorted
+ vulnerabilities in bgpd, ospfd and ospf6d (CVE-2011-3323,
+ CVE-2011-3324, CVE-2011-3325, CVE-2011-3326 and CVE-2011-3327).
+ * New upstream release.
+ * Removed incorporated debian/patches/92_opaque_lsa_enable.dpatch.
+ * Removed incorporated debian/patches/93_opaque_lsa_fix.dpatch.
+ * Removed obsolete debian/README.Debian.Woody and README.Debian.MD5.
+
+ -- Christian Hammers <ch@debian.org> Tue, 27 Sep 2011 00:16:27 +0200
+
+quagga (0.99.18-1) unstable; urgency=low
+
+ * SECURITY:
+ "This release fixes 2 denial of services in bgpd, which can be remotely
+ triggered by malformed AS-Pathlimit or Extended-Community attributes.
+ These issues have been assigned CVE-2010-1674 and CVE-2010-1675.
+ Support for AS-Pathlimit has been removed with this release."
+ * Added Brazilian Portuguese debconf translation. Closes: #617735
+ * Changed section for quagga-doc from "doc" to "net".
+ * Added patch to fix FTBFS with latest GCC. Closes: #614459
+
+ -- Christian Hammers <ch@debian.org> Tue, 22 Mar 2011 23:13:34 +0100
+
+quagga (0.99.17-4) unstable; urgency=low
+
+ * Added comment to init script (thanks to Marc Haber). Closes: #599524
+
+ -- Christian Hammers <ch@debian.org> Thu, 13 Jan 2011 23:53:29 +0100
+
+quagga (0.99.17-3) unstable; urgency=low
+
+ * Fix FTBFS with ld --as-needed (thanks to Matthias Klose at Ubuntu).
+ Closes: #609555
+
+ -- Christian Hammers <ch@debian.org> Thu, 13 Jan 2011 23:27:06 +0100
+
+quagga (0.99.17-2) unstable; urgency=low
+
+ * Added Danisch Debconf translation (thanks to Joe Dalton). Closes: #596259
+
+ -- Christian Hammers <ch@debian.org> Sat, 18 Sep 2010 12:20:07 +0200
+
+quagga (0.99.17-1) unstable; urgency=high
+
+ * SECURITY:
+ "This release provides two important bugfixes, which address remote crash
+ possibility in bgpd discovered by CROSS team.":
+ 1. Stack buffer overflow by processing certain Route-Refresh messages
+ CVE-2010-2948
+ 2. DoS (crash) while processing certain BGP update AS path messages
+ CVE-2010-2949
+ Closes: #594262
+
+ -- Christian Hammers <ch@debian.org> Wed, 25 Aug 2010 00:52:48 +0200
+
+quagga (0.99.16-1) unstable; urgency=low
+
+ * New upstream release. Closes: #574527
+ * Added chrpath to debian/rules to fix rpath problems that lintian spottet.
+
+ -- Christian Hammers <ch@debian.org> Sun, 21 Mar 2010 17:05:40 +0100
+
+quagga (0.99.15-2) unstable; urgency=low
+
+ * Applied patch for off-by-one bug in ospf6d that caused a segmentation
+ fault when using the "area a.b.c.d filter-list prefix" command (thanks
+ to Steinar H. Gunderson). Closes: 519488
+
+ -- Christian Hammers <ch@debian.org> Sun, 14 Feb 2010 20:02:03 +0100
+
+quagga (0.99.15-1) unstable; urgency=low
+
+ * New upstream release
+ "This fixes some annoying little ospfd and ospf6d regressions, which made
+ 0.99.14 a bit of a problem release (...) This release still contains a
+ regression in the "no ip address ..." command, at least on Linux.
+ See bug #486, which contains a workaround patch. This release should be
+ considered a 1.0.0 release candidate. Please test this release as widely
+ as possible."
+ * Fixed wrong port number in zebra.8 (thanks to Thijs Kinkhorst).
+ Closes: #517860
+ * Added Russian Debconf tanslation (thanks to Yuri Kozlov).
+ Closes: #539464
+ * Removed so-version in build-dep to libreadline-dev on request of
+ Matthias Klose.
+ * Added README.source with reference to dpatch as suggested by lintian.
+ * Bumped standards versionto 3.8.3.
+
+ -- Christian Hammers <ch@debian.org> Sun, 13 Sep 2009 18:12:06 +0200
+
+quagga (0.99.14-1) unstable; urgency=low
+
+ * New upstream release
+ "This release contains a regression fix for ospf6d, various small fixes
+ and some hopefully very significant bgpd stability fixes.
+ This release should be considered a 1.0.0 release candidate. Please test
+ this release as widely as possible."
+ * Fixes bug with premature LSA aging in ospf6d. Closes: #535030
+ * Fixes section number in zebra.8 manpage. Closes: #517860
+
+ -- Christian Hammers <ch@debian.org> Sat, 25 Jul 2009 00:40:38 +0200
+
+quagga (0.99.13-2) unstable; urgency=low
+
+ * Added Japanese Debconf translation (thanks to Hideki Yamane).
+ Closes: #510714
+ * When checking for obsoleted config options in preinst, print filename
+ where it occures (thanks to Michael Bussmann). Closes: #339489
+
+ -- Christian Hammers <ch@debian.org> Sun, 19 Jul 2009 17:13:23 +0200
+
+quagga (0.99.13-1) unstable; urgency=low
+
+ * New upstream release
+ "This release is contains a number of small fixes, for potentially
+ irritating issues, as well as small enhancements to vtysh and support
+ for linking to PCRE (a much faster regex library)."
+ * Added build-dep to gawk as configure required it for memtypes.awk
+ * Replaced build-dep to gs-gpl with ghostscript as requested by lintian
+ * Minor changes to copyright and control files to make lintian happy.
+
+ -- Christian Hammers <ch@debian.org> Wed, 24 Jun 2009 17:53:28 +0200
+
+quagga (0.99.12-1) unstable; urgency=high
+
+ * New upstream release
+ "This release fixes an urgent bug in bgpd where it could hit an assert
+ if it received a long AS_PATH with a 4-byte ASN." Noteworthy bugfixes:
+ + [bgpd] Fix bgp ipv4/ipv6 accept handling
+ + [bgpd] AS4 bugfix by Chris Caputo
+ + [bgpd] Allow accepted peers to progress even if realpeer is in Connect
+ + [ospfd] Switch Fletcher checksum back to old ospfd version
+
+ -- Christian Hammers <ch@debian.org> Mon, 22 Jun 2009 00:16:33 +0200
+
+quagga (0.99.11-1) unstable; urgency=low
+
+ * New upstream release
+ "Most regressions in 0.99 over 0.98 are now believed to be fixed. This
+ release should be considered a release-candidate for a new stable series."
+ + bgpd: Preliminary UI and Linux-IPv4 support for TCP-MD5 merged
+ + zebra: ignore dead routes in RIB update
+ + [ospfd] Default route needs to be refreshed after neighbour state change
+ + [zebra:netlink] Set proto/scope on all route update messages
+ * Removed debian/patches/20_*bgp*md5*.dpatch due to upstream support.
+
+ -- Christian Hammers <ch@debian.org> Thu, 09 Oct 2008 22:56:38 +0200
+
+quagga (0.99.10-1) unstable; urgency=medium
+
+ * New upstream release
+ + bgpd: 4-Byte AS Number support
+ + Sessions were incorrectly reset if a partial AS-Pathlimit attribute
+ was received.
+ + Advertisement of Multi-Protocol prefixes (i.e. non-IPv4) had been
+ broken in the 0.99.9 release. Closes: #467656
+
+ -- Christian Hammers <ch@debian.org> Tue, 08 Jul 2008 23:32:42 +0200
+
+quagga (0.99.9-6) unstable; urgency=low
+
+ * Fixed FTBFS by adding a build-dep to libpcre3-dev (thanks to Luk Claes).
+ Closes: #469891
+
+ -- Christian Hammers <ch@debian.org> Sat, 12 Apr 2008 12:53:51 +0200
+
+quagga (0.99.9-5) unstable; urgency=low
+
+ * C.J. Adams-Collier and Paul Jakma suggested to build against libpcre3
+ which is supposed to be faster.
+
+ -- Christian Hammers <ch@debian.org> Sun, 02 Mar 2008 13:19:42 +0100
+
+quagga (0.99.9-4) unstable; urgency=low
+
+ * Added hardening-wrapper to the build-deps (thanks to Moritz Muehlenhoff).
+
+ -- Christian Hammers <ch@debian.org> Tue, 29 Jan 2008 22:33:56 +0100
+
+quagga (0.99.9-3) unstable; urgency=low
+
+ * Replaced the BGP patch by a new one so that the package builds again
+ with kernels above 2.6.21!
+ * debian/control:
+ + Moved quagga-doc to section doc to make lintian happy.
+ * Added Spanish debconf translation (thanks to Carlos Galisteo de Cabo).
+ Closes: #428574
+ * debian/control: (thanks to Marco Rodrigues)
+ + Bump Standards-Version to 3.7.3 (no changes needed).
+ + Add Homepage field.
+
+ -- Christian Hammers <ch@debian.org> Mon, 28 Jan 2008 22:29:18 +0100
+
+quagga (0.99.9-2.1) unstable; urgency=low
+
+ * Non-maintainer upload.
+ * debian/rules: fixed bashisms. (Closes: #459122)
+
+ -- Miguel Angel Ruiz Manzano <debianized@gmail.com> Tue, 22 Jan 2008 14:37:21 -0300
+
+quagga (0.99.9-2) unstable; urgency=low
+
+ * Added CVE id for the security bug to the last changelog entry.
+ Closes: 442133
+
+ -- Christian Hammers <ch@debian.org> Tue, 25 Sep 2007 22:01:31 +0200
+
+quagga (0.99.9-1) unstable; urgency=high
+
+ * SECURITY:
+ "This release fixes two potential DoS conditions in bgpd, reported by Mu
+ Security, where a bgpd could be crashed if a peer sent a malformed OPEN
+ message or a malformed COMMUNITY attribute. Only configured peers can do
+ this, hence we consider these issues to be very low impact." CVE-2007-4826
+
+ -- Christian Hammers <ch@debian.org> Wed, 12 Sep 2007 21:12:41 +0200
+
+quagga (0.99.8-1) unstable; urgency=low
+
+ * New upstream version.
+
+ -- Christian Hammers <ch@debian.org> Fri, 17 Aug 2007 00:07:04 +0200
+
+quagga (0.99.7-3) unstable; urgency=medium
+
+ * Applied patch for FTBFS with linux-libc-dev (thanks to Andrew J. Schorr
+ and Lucas Nussbaum). Closes: #429003
+
+ -- Christian Hammers <ch@debian.org> Fri, 22 Jun 2007 21:34:55 +0200
+
+quagga (0.99.7-2) unstable; urgency=low
+
+ * Added Florian Weimar as co-maintainer. Closes: 421977
+ * Added Dutch debconf translation (thanks to Bart Cornelis).
+ Closes: #420932
+ * Added Portuguese debconf translation (thanks to Rui Branco).
+ Closes: #421185
+ * Improved package description (thanks to Reuben Thomas).
+ Closes: #418933
+ * Added CVE Id to 0.99.6-5 changelog entry.
+
+ -- Christian Hammers <ch@debian.org> Wed, 02 May 2007 20:27:12 +0200
+
+quagga (0.99.7-1) unstable; urgency=low
+
+ * New upstream release. Closes: #421553
+
+ -- Christian Hammers <ch@debian.org> Mon, 30 Apr 2007 14:22:34 +0200
+
+quagga (0.99.6-6) unstable; urgency=medium
+
+ * Fixes FTBFS with tetex-live. Closes: #420468
+
+ -- Christian Hammers <ch@debian.org> Mon, 23 Apr 2007 21:34:13 +0200
+
+quagga (0.99.6-5) unstable; urgency=high
+
+ * SECURITY:
+ The bgpd daemon was vulnerable to a Denial-of-Service. Configured peers
+ could cause a Quagga bgpd to, typically, assert() and abort. The DoS
+ could be triggered by peers by sending an UPDATE message with a crafted,
+ malformed Multi-Protocol reachable/unreachable NLRI attribute.
+ This is CVE-2007-1995 and Quagga Bug#354. Closes: #418323
+
+ -- Christian Hammers <ch@debian.org> Thu, 12 Apr 2007 23:21:58 +0200
+
+quagga (0.99.6-4) unstable; urgency=low
+
+ * Improved note in README.Debian for SNMP self-builders (thanks to Matthias
+ Wamser). Closes: #414788
+
+ -- Christian Hammers <ch@debian.org> Wed, 14 Mar 2007 02:18:57 +0100
+
+quagga (0.99.6-3) unstable; urgency=low
+
+ * Updated German Debconf translation (thanks to Matthias Julius).
+ Closes: #409327
+
+ -- Christian Hammers <ch@debian.org> Sat, 10 Feb 2007 15:06:16 +0100
+
+quagga (0.99.6-2) unstable; urgency=low
+
+ * Updated config.guess/config.sub as suggested by lintian.
+ * Corrected README.Debian text regarding the WANT_SNMP flag.
+
+ -- Christian Hammers <ch@debian.org> Sun, 17 Dec 2006 01:45:37 +0100
+
+quagga (0.99.6-1) unstable; urgency=low
+
+ * New upstream release. Closes: #402361
+
+ -- Christian Hammers <ch@debian.org> Mon, 11 Dec 2006 00:28:09 +0100
+
+quagga (0.99.5-5) unstable; urgency=high
+
+ * Changed Depends on adduser to Pre-Depends to avoid uninstallability
+ in certain cases (thanks to Steve Langasek, Lucas Nussbaum).
+ Closes: #398562
+
+ -- Christian Hammers <ch@debian.org> Wed, 15 Nov 2006 17:46:34 +0100
+
+quagga (0.99.5-4) unstable; urgency=low
+
+ * Added default PAM file and some explanations regarding PAM authentication
+ of vtysh which could prevent the start at boot-time when used wrong.
+ Now PAM permits anybody to access the vtysh tool (a malicious user could
+ build his own vtysh without PAM anyway) and the access is controled by
+ the read/write permissions of the vtysh socket which are only granted to
+ users belonging to the quaggavty group (thanks to Wakko Warner).
+ Closes: #389496
+ * Added "case" to prerm script so that the Debconf question is not called a
+ second time in e.g. "new-prerm abort-upgrade" after being NACKed in the
+ old-prerm.
+
+ -- Christian Hammers <ch@debian.org> Fri, 3 Nov 2006 01:22:15 +0100
+
+quagga (0.99.5-3) unstable; urgency=medium
+
+ * Backport CVS fix for an OSPF DD Exchange regression (thanks to Matt
+ Brown). Closes: #391040
+
+ -- Christian Hammers <ch@debian.org> Wed, 25 Oct 2006 19:47:11 +0200
+
+quagga (0.99.5-2) unstable; urgency=medium
+
+ * Added LSB info section to initscript.
+ * Removed unnecessary depends to libncurses5 to make checklib happy.
+ The one to libcap should remain though as it is just temporarily
+ unused.
+
+ -- Christian Hammers <ch@debian.org> Thu, 21 Sep 2006 00:04:07 +0200
+
+quagga (0.99.5-1) unstable; urgency=low
+
+ * New upstream release. Closes: #38704
+ * Upstream fixes ospfd documentary inconsistency. Closes: #347897
+ * Changed debconf question in prerm to "high" (thanks to Rafal Pietrak).
+
+ -- Christian Hammers <ch@debian.org> Mon, 11 Sep 2006 23:43:42 +0200
+
+quagga (0.99.4-4) unstable; urgency=low
+
+ * Recreate /var/run if not present because /var is e.g. on a tmpfs
+ filesystem (thanks to Martin Pitt). Closes: #376142
+ * Removed nonexistant option from ospfd.8 manpage (thanks to
+ David Medberry). Closes: 378274
+
+ -- Christian Hammers <ch@debian.org> Sat, 15 Jul 2006 20:22:12 +0200
+
+quagga (0.99.4-3) unstable; urgency=low
+
+ * Removed invalid semicolon from rules file (thanks to Philippe Gramoulle).
+
+ -- Christian Hammers <ch@debian.org> Tue, 27 Jun 2006 23:36:07 +0200
+
+quagga (0.99.4-2) unstable; urgency=high
+
+ * Set urgency to high as 0.99.4-1 fixes a security problem!
+ * Fixed building of the info file.
+
+ -- Christian Hammers <ch@debian.org> Sun, 14 May 2006 23:04:28 +0200
+
+quagga (0.99.4-1) unstable; urgency=low
+
+ * New upstream release to fix a security problem in the telnet interface
+ of the BGP daemon which could be used for DoS attacks (CVE-2006-2276).
+ Closes: 366980
+
+ -- Christian Hammers <ch@debian.org> Sat, 13 May 2006 19:54:40 +0200
+
+quagga (0.99.3-3) unstable; urgency=low
+
+ * Added CVE numbers for the security patch in 0.99.3-2.
+
+ -- Christian Hammers <ch@debian.org> Sat, 6 May 2006 17:14:22 +0200
+
+quagga (0.99.3-2) unstable; urgency=high
+
+ * SECURITY:
+ Added security bugfix patch from upstream BTS for security problem
+ that could lead to injected routes when using RIPv1.
+ CVE-2006-2223 - missing configuration to disable RIPv1 or require
+ plaintext or MD5 authentication
+ CVE-2006-2224 - lack of enforcement of RIPv2 authentication requirements
+ Closes: #365940
+ * First amd64 upload.
+
+ -- Christian Hammers <ch@debian.org> Thu, 4 May 2006 00:22:09 +0200
+
+quagga (0.99.3-1) unstable; urgency=low
+
+ * New upstream release
+
+ -- Christian Hammers <ch@debian.org> Wed, 25 Jan 2006 13:37:27 +0100
+
+quagga (0.99.2-1) unstable; urgency=low
+
+ * New upstream release
+ Closes: #330248, #175553
+
+ -- Christian Hammers <ch@debian.org> Wed, 16 Nov 2005 00:25:52 +0100
+
+quagga (0.99.1-7) unstable; urgency=low
+
+ * Changed debian/rules check for mounted /proc directory to check
+ for /proc/1 as not all systems (e.g. 2.6 arm kernels) have
+ /proc/kcore which is a optional feature only (thanks to Lennert
+ Buytenhek). Closes: #335695
+ * Added Swedish Debconf translation (thanks to Daniel Nylander).
+ Closes: #331367
+
+ -- Christian Hammers <ch@debian.org> Thu, 27 Oct 2005 20:53:19 +0200
+
+quagga (0.99.1-6) unstable; urgency=low
+
+ * Fixed debconf dependency as requested by Joey Hess.
+
+ -- Christian Hammers <ch@debian.org> Mon, 26 Sep 2005 20:47:35 +0200
+
+quagga (0.99.1-5) unstable; urgency=low
+
+ * Rebuild with libreadline5-dev as build-dep as requested by
+ Matthias Klose. Closes: #326306
+ * Made initscript more fault tolerant against missing lines in
+ /etc/quagga/daemons (thanks to Ralf Hildebrandt). Closes: #323774
+ * Added dependency to adduser.
+
+ -- Christian Hammers <ch@debian.org> Tue, 13 Sep 2005 21:42:17 +0200
+
+quagga (0.99.1-4) unstable; urgency=low
+
+ * Added French Debconf translation (thanks to Mohammed Adnene Trojette).
+ Closes: #319324
+ * Added Czech Debconf translation (thanks to Miroslav Kure).
+ Closes: #318127
+
+ -- Christian Hammers <ch@debian.org> Sun, 31 Jul 2005 04:19:41 +0200
+
+quagga (0.99.1-3) unstable; urgency=low
+
+ * A Debconf question now asks the admin before upgrading if the daemon
+ should really be stopped as this could lead to the loss of network
+ connectivity or BGP flaps (thanks to Michael Horn and Achilleas Kotsis).
+ Also added a hint about setting Quagga "on hold" to README.Debian.
+ Closes: #315467
+ * Added patch to build on Linux/ARM.
+
+ -- Christian Hammers <ch@debian.org> Sun, 10 Jul 2005 22:19:38 +0200
+
+quagga (0.99.1-2) unstable; urgency=low
+
+ * Fixed SNMP enabled command in debian/rules (thanks to Christoph Kluenter).
+ Closes: #306840
+
+ -- Christian Hammers <ch@debian.org> Sat, 4 Jun 2005 14:04:01 +0200
+
+quagga (0.99.1-1) unstable; urgency=low
+
+ * New upstream version. Among others:
+ - BGP graceful restart and "match ip route-source" added
+ - support for interface renaming
+ - improved threading for better responsivness under load
+ * Switched to dpatch to make diffs cleaner.
+ * Made autoreconf unnecessary.
+ * Replaced quagga.dvi and quagga.ps by quagga.pdf in quagga-doc.
+ (the PostScript would have needed Makefile corrections and PDF
+ is more preferable anyway)
+ * Added isisd to the list of daemons in /etc/init.d/quagga (thanks
+ to Ernesto Elbe).
+ * Added hint for "netlink-listen: overrun" messages (thanks to
+ Hasso Tepper).
+ * Added preinst check that bails out if old smux options are in use
+ as Quagga would not start up else anyway (thanks to Bjorn Mork).
+ Closes: #308320
+
+ -- Christian Hammers <ch@debian.org> Fri, 13 May 2005 01:18:24 +0200
+
+quagga (0.98.3-7) unstable; urgency=high
+
+ * Removed SNMP support as linking against NetSNMP introduced a dependency
+ to OpenSSL which is not compatible to the GPL which governs this
+ application (thanks to Faidon Liambotis). See README.Debian for more
+ information. Closes: #306840
+ * Changed listening address of ospf6d and ripngd from 127.0.0.1 to "::1".
+ * Added build-dep to groff to let drafz-zebra-00.txt build correctly.
+
+ -- Christian Hammers <ch@debian.org> Wed, 4 May 2005 20:08:14 +0200
+
+quagga (0.98.3-6) testing-proposed-updates; urgency=high
+
+ * Removed "Recommends kernel-image-2.4" as aptitude then
+ installes a kernel-image for an arbitrary architecture as long
+ as it fullfill that recommendation which can obviously fatal
+ at the next reboot :) Also it is a violation of the policy
+ which mandates a reference to real packages (thanks to Holger Levsen).
+ Closes: #307281
+
+ -- Christian Hammers <ch@debian.org> Tue, 3 May 2005 22:53:39 +0200
+
+quagga (0.98.3-5) unstable; urgency=high
+
+ * The patch which tried to remove the OpenSSL dependency, which is
+ not only unneccessary but also a violation of the licence and thus RC,
+ stopped working a while ago, since autoreconf is no longer run before
+ building the binaries. So now ./configure is patched directly (thanks
+ to Faidon Liambotis for reporting). Closes: #306840
+ * Raised Debhelper compatibility level from 3 to 4. Nothing changed.
+ * Added build-dep to texinfo (>= 4.7) to ease work for www.backports.org.
+
+ -- Christian Hammers <ch@debian.org> Fri, 29 Apr 2005 02:31:03 +0200
+
+quagga (0.98.3-4) unstable; urgency=low
+
+ * Removed Debconf upgrade note as it was considered a Debconf abuse
+ and apart from that so obvious that it was not even worth to be
+ put into NEWS.Debian (thanks to Steve Langasek). Closes: #306384
+
+ -- Christian Hammers <ch@debian.org> Wed, 27 Apr 2005 00:10:24 +0200
+
+quagga (0.98.3-3) unstable; urgency=medium
+
+ * Adding the debconf module due to a lintian suggestion is a very
+ bad idea if no db_stop is called as the script hangs then (thanks
+ to Tore Anderson for reporting). Closes: #306324
+
+ -- Christian Hammers <ch@debian.org> Mon, 25 Apr 2005 21:55:58 +0200
+
+quagga (0.98.3-2) unstable; urgency=low
+
+ * Added debconf confmodule to postinst as lintian suggested.
+
+ -- Christian Hammers <ch@debian.org> Sun, 24 Apr 2005 13:16:00 +0200
+
+quagga (0.98.3-1) unstable; urgency=low
+
+ * New upstream release.
+ Mmost notably fixes last regression in bgpd (reannounce of prefixes
+ with changed attributes works again), race condition in netlink
+ handling while using IPv6, MTU changes handling in ospfd and several
+ crashes in ospfd, bgpd and ospf6d.
+
+ -- Christian Hammers <ch@debian.org> Mon, 4 Apr 2005 12:51:24 +0200
+
+quagga (0.98.2-2) unstable; urgency=low
+
+ * Added patch to let Quagga compile with gcc-4.0 (thanks to
+ Andreas Jochens). Closes: #300949
+
+ -- Christian Hammers <ch@debian.org> Fri, 25 Mar 2005 19:33:30 +0100
+
+quagga (0.98.2-1) unstable; urgency=medium
+
+ * Quoting the upstream announcement:
+ The 0.98.1 release unfortunately was a brown paper bag release with
+ respect to ospfd. [...] 0.98.2 has been released, with one crucial change
+ to fix the unfortunate mistake in 0.98.1, which caused problems if
+ ospfd became DR.
+ * Note: the upstream tarball had a strange problem, apparently redhat.spec
+ was twice in it? At least debuild gave a strange error message so I
+ unpacked it by hand. No changes were made to the .orig.tar.gz!
+
+ -- Christian Hammers <ch@debian.org> Fri, 4 Feb 2005 01:31:36 +0100
+
+quagga (0.98.1-1) unstable; urgency=medium
+
+ * New upstream version
+ "fixing a fatal OSPF + MD5 auth regression, and a non-fatal high-load
+ regression in bgpd which were present in the 0.98.0 release."
+ * Upstream version fixes bug in ospfd that could lead to crash when OSPF
+ packages had a MTU > 1500. Closes: #290566
+ * Added notice regarding capability kernel support to README.Debian
+ (thanks to Florian Weimer). Closes: #291509
+ * Changed permission setting in postinst script (thanks to Bastian Blank).
+ Closes: #292690
+
+ -- Christian Hammers <ch@debian.org> Tue, 1 Feb 2005 02:01:27 +0100
+
+quagga (0.98.0-3) unstable; urgency=low
+
+ * Fixed problem in init script. Closes: #290317
+ * Removed obsolete "smux peer enable" patch.
+
+ -- Christian Hammers <ch@debian.org> Fri, 14 Jan 2005 17:37:27 +0100
+
+quagga (0.98.0-2) unstable; urgency=low
+
+ * Updated broken TCP MD5 patch for BGP (thanks to John P. Looney
+ for telling me).
+
+ -- Christian Hammers <ch@debian.org> Thu, 13 Jan 2005 02:03:54 +0100
+
+quagga (0.98.0-1) unstable; urgency=low
+
+ * New upstream release
+ * Added kernel-image-2.6 as alternative to 2.4 to the recommends
+ (thanks to Faidon Liambotis). Closes: #289530
+
+ -- Christian Hammers <ch@debian.org> Mon, 10 Jan 2005 19:36:17 +0100
+
+quagga (0.97.5-1) unstable; urgency=low
+
+ * New upstream version.
+ * Added Czech debconf translation (thanks to Miroslav Kure).
+ Closes: #287293
+ * Added Brazilian debconf translation (thanks to Andre Luis Lopes).
+ Closes: #279352
+
+ -- Christian Hammers <ch@debian.org> Wed, 5 Jan 2005 23:49:57 +0100
+
+quagga (0.97.4-2) unstable; urgency=low
+
+ * Fixed quagga.info build problem.
+
+ -- Christian Hammers <ch@debian.org> Wed, 5 Jan 2005 22:38:01 +0100
+
+quagga (0.97.4-1) unstable; urgency=low
+
+ * New upstream release.
+
+ -- Christian Hammers <ch@debian.org> Tue, 4 Jan 2005 01:45:22 +0100
+
+quagga (0.97.3-2) unstable; urgency=low
+
+ * Included isisd in the daemon list.
+ * Wrote an isisd manpage.
+ * It is now ensured that zebra is always the last daemon to be stopped.
+ * (Thanks to Hasso Tepper for mailing me a long list of suggestions
+ which lead to this release)
+
+ -- Christian Hammers <ch@debian.org> Sat, 18 Dec 2004 13:14:55 +0100
+
+quagga (0.97.3-1) unstable; urgency=medium
+
+ * New upstream version.
+ - Fixes important OSPF bug.
+ * Added ht-20040911-smux.patch regarding Quagga bug #112.
+ * Updated ht-20041109-0.97.3-bgp-md5.patch for BGP with TCP MD5
+ (thanks to Matthias Wamser).
+
+ -- Christian Hammers <ch@debian.org> Tue, 9 Nov 2004 17:45:26 +0100
+
+quagga (0.97.2-4) unstable; urgency=low
+
+ * Added Portuguese debconf translation (thanks to Andre Luis Lopes).
+ Closes: #279352
+ * Disabled ospfapi server by default on recommendation of Paul Jakma.
+
+ -- Christian Hammers <ch@debian.org> Sun, 7 Nov 2004 15:07:05 +0100
+
+quagga (0.97.2-3) unstable; urgency=low
+
+ * Added Andrew Schorrs VTY Buffer patch from the [quagga-dev 1729].
+
+ -- Christian Hammers <ch@debian.org> Tue, 2 Nov 2004 00:46:56 +0100
+
+quagga (0.97.2-2) unstable; urgency=low
+
+ * Changed file and directory permissions and ownerships according to a
+ suggestion from Paul Jakma. Still not perfect though.
+ * Fixed upstream vtysh.conf.sample file.
+ * "ip ospf network broadcast" is now saved correctly. Closes: #244116
+ * Daemon options are now in /etc/quagga/debian.conf to be user
+ configurable (thanks to Simon Raven and Hasso Tepper). Closes: #266715
+
+ -- Christian Hammers <ch@debian.org> Tue, 26 Oct 2004 23:35:45 +0200
+
+quagga (0.97.2-1) unstable; urgency=low
+
+ * New upstream version.
+ Closes: #254541
+ * Fixed warning on unmodular kernels (thanks to Christoph Biedl).
+ Closes: #277973
+
+ -- Christian Hammers <ch@debian.org> Mon, 25 Oct 2004 00:47:04 +0200
+
+quagga (0.97.1-2) unstable; urgency=low
+
+ * Version 0.97 introduced shared libraries. They are now included.
+ (thanks to Raf D'Halleweyn). Closes: #277446
+
+ -- Christian Hammers <ch@debian.org> Wed, 20 Oct 2004 15:32:06 +0200
+
+quagga (0.97.1-1) unstable; urgency=low
+
+ * New upstream version.
+ * Removed some obsolete files from debian/patches.
+ * Added patch from upstream bug 113. Closes: #254541
+ * Added patch from upstream that fixes a compilation problem in the
+ ospfclient code (thanks to Hasso Tepper).
+ * Updated German debconf translation (thanks to Jens Nachtigall)
+ Closes: #277059
+
+ -- Christian Hammers <ch@debian.org> Mon, 18 Oct 2004 01:16:35 +0200
+
+quagga (0.96.5-11) unstable; urgency=low
+
+ * Fixed /tmp/buildd/* paths in binaries.
+ For some unknown reason the upstream Makefile modified a .h file at
+ the end of the "debian/rules build" target. During the following
+ "make install" one library got thus be re*compiled* - with /tmp/buildd
+ paths as sysconfdir (thanks to Peder Chr. Norgaard). Closes: #274050
+
+ -- Christian Hammers <ch@debian.org> Fri, 1 Oct 2004 01:21:02 +0200
+
+quagga (0.96.5-10) unstable; urgency=medium
+
+ * The BGP routing daemon might freeze on network disturbances when
+ their peer is also a Quagga/Zebra router.
+ Applied patch from http://bugzilla.quagga.net/show_bug.cgi?id=102
+ which has been confirmed by the upstream author.
+ (thanks to Gunther Stammwitz)
+ * Changed --enable-pam to --with-libpam (thanks to Hasso Tepper).
+ Closes: #264562
+ * Added patch for vtysh (thanks to Hasso Tepper). Closes: #215919
+
+ -- Christian Hammers <ch@debian.org> Mon, 9 Aug 2004 15:33:02 +0200
+
+quagga (0.96.5-9) unstable; urgency=low
+
+ * Rewrote the documentation chapter about SNMP support. Closes: #195653
+ * Added MPLS docs.
+
+ -- Christian Hammers <ch@debian.org> Thu, 29 Jul 2004 21:01:52 +0200
+
+quagga (0.96.5-8) unstable; urgency=low
+
+ * Adjusted a grep in the initscript to also match a modprobe message
+ from older modutils packages (thanks to Faidon Paravoid).
+
+ -- Christian Hammers <ch@debian.org> Wed, 28 Jul 2004 21:19:02 +0200
+
+quagga (0.96.5-7) unstable; urgency=low
+
+ * Added a "cd /etc/quagga/" to the init script as quagga tries to load
+ the config file first from the current working dir and then from the
+ config dir which could lead to confusion (thanks to Marco d'Itri).
+ Closes: #255078
+ * Removed warning regarding problems with the Debian kernels from
+ README.Debian as they are no longer valid (thanks to Raphael Hertzog).
+ Closes: #257580
+ * Added patch from Hasso Tepper that makes "terminal length 0" work
+ in vtysh (thanks to Matthias Wamser). Closes: #252579
+
+ -- Christian Hammers <ch@debian.org> Thu, 8 Jul 2004 21:53:21 +0200
+
+quagga (0.96.5-6) unstable; urgency=low
+
+ * Try to load the capability module as it is needed now.
+
+ -- Christian Hammers <ch@debian.org> Tue, 8 Jun 2004 23:25:29 +0200
+
+quagga (0.96.5-5) unstable; urgency=low
+
+ * Changed the homedir of the quagga user to /etc/quagga/ to allow
+ admins to put ~/.ssh/authorized_keys there (thanks to Matthias Wamser).
+ Closes: #252577
+
+ -- Christian Hammers <ch@debian.org> Sat, 5 Jun 2004 14:47:31 +0200
+
+quagga (0.96.5-4) unstable; urgency=medium
+
+ * Fixed rules file to use the renamed ./configure option --enable-tcp-md5
+ (thanks to Matthias Wamser). Closes: #252141
+
+ -- Christian Hammers <ch@debian.org> Tue, 1 Jun 2004 22:58:32 +0200
+
+quagga (0.96.5-3) unstable; urgency=low
+
+ * Provided default binary package name to all build depends that were
+ virtual packages (thanks to Goswin von Brederlow). Closes: #251625
+
+ -- Christian Hammers <ch@debian.org> Sat, 29 May 2004 22:48:53 +0200
+
+quagga (0.96.5-2) unstable; urgency=low
+
+ * New upstream version.
+ * New md5 patch version (thanks to Niklas Jakobsson and Hasso Tepper).
+ Closes: #250985
+ * Fixes info file generation (thanks to Peder Chr. Norgaard).
+ Closes: #250992
+ * Added catalan debconf translation (thanks to Aleix Badia i Bosch).
+ Closes: #250118
+ * PATCHES:
+ This release contains BGP4 MD5 support which requires a kernel patch
+ to work. See /usr/share/doc/quagga/README.Debian.MD5.
+ (The patch is ht-20040525-0.96.5-bgp-md5.patch from Hasso Tepper)
+
+ -- Christian Hammers <ch@debian.org> Thu, 27 May 2004 20:09:37 +0200
+
+quagga (0.96.5-1) unstable; urgency=low
+
+ * New upstream version.
+ * PATCHES:
+ This release contains BGP4 MD5 support which also requires a kernel patch.
+ See /usr/share/doc/quagga/README.Debian.MD5 and search for CAN-2004-0230.
+
+ -- Christian Hammers <ch@debian.org> Sun, 16 May 2004 17:40:40 +0200
+
+quagga (0.96.4x-10) unstable; urgency=low
+
+ * SECURITY:
+ This release contains support for MD5 for BGP which is one suggested
+ prevention of the actually long known TCP SYN/RST attacks which got
+ much news in the last days as ideas were revealed that made them much
+ easier probable agains especially the BGP sessions than commonly known.
+ There are a lot of arguments agains the MD5 approach but some ISPs
+ started to require it.
+ See: CAN-2004-0230, http://www.us-cert.gov/cas/techalerts/TA04-111A.html
+ * PATCHES:
+ This release contains the MD5 patch from Hasso Tepper. It also seems to
+ required a kernel patch. See /usr/share/doc/quagga/README.Debian.MD5.
+
+ -- Christian Hammers <ch@debian.org> Thu, 29 Apr 2004 01:01:38 +0200
+
+quagga (0.96.4x-9) unstable; urgency=low
+
+ * Fixed daemon loading order (thanks to Matt Kemner).
+ * Fixed typo in init script (thanks to Charlie Brett). Closes: #238582
+
+ -- Christian Hammers <ch@debian.org> Sun, 4 Apr 2004 15:32:18 +0200
+
+quagga (0.96.4x-8) unstable; urgency=low
+
+ * Patched upstream source so that quagga header files end up in
+ /usr/include/quagga/. Closes: #233792
+
+ -- Christian Hammers <ch@debian.org> Mon, 23 Feb 2004 01:42:53 +0100
+
+quagga (0.96.4x-7) unstable; urgency=low
+
+ * Fixed info file installation (thanks to Holger Dietze). Closes: #227579
+ * Added Japanese translation (thanks to Hideki Yamane). Closes: #227812
+
+ -- Christian Hammers <ch@debian.org> Sun, 18 Jan 2004 17:28:29 +0100
+
+quagga (0.96.4x-6) unstable; urgency=low
+
+ * Added dependency to iproute.
+ * Initscript now checks not only for the pid file but also for the
+ daemons presence (thanks to Phil Gregory). Closes: #224389
+ * Added my patch to configure file permissions.
+
+ -- Christian Hammers <ch@debian.org> Mon, 15 Dec 2003 22:34:29 +0100
+
+quagga (0.96.4x-5) unstable; urgency=low
+
+ * Added patch which gives bgpd the CAP_NET_RAW capability to allow it
+ to bind to special IPv6 link-local interfaces (Thanks to Bastian Blank).
+ Closes: #222930
+ * Made woody backport easier by applying Colin Watsons po-debconf hack.
+ Thanks to Marc Haber for suggesting it. Closes: #223527
+ * Made woody backport easier by applying a patch that removes some
+ obscure whitespaces inside an C macro. (Thanks to Marc Haber).
+ Closes: #223529
+ * Now uses /usr/bin/pager. Closes: #204070
+ * Added note about the "official woody backports" on my homepage.
+
+ -- Christian Hammers <ch@debian.org> Mon, 15 Dec 2003 20:39:06 +0100
+
+quagga (0.96.4x-4) unstable; urgency=high
+
+ * SECURITY:
+ Fixes another bug that was originally reported against Zebra.
+ .
+ http://rhn.redhat.com/errata/RHSA-2003-307.html
+ Herbert Xu reported that Zebra can accept spoofed messages sent on the
+ kernel netlink interface by other users on the local machine. This could
+ lead to a local denial of service attack. The Common Vulnerabilities and
+ Exposures project (cve.mitre.org) has assigned the name CAN-2003-0858 to
+ this issue.
+
+ * Minor improvements to init script (thanks to Iustin Pop).
+ Closes: #220938
+
+ -- Christian Hammers <ch@debian.org> Sat, 22 Nov 2003 13:27:57 +0100
+
+quagga (0.96.4x-3) unstable; urgency=low
+
+ * Changed "more" to "/usr/bin/pager" as default pager if $PAGER or
+ $VTYSH_PAGER is not set (thanks to Bastian Blank). Closes: #204070
+ * Made the directory (but not the config/log files!) world accessible
+ again on user request (thanks to Anand Kumria)). Closes: #213129
+ * No longer providing sample configuration in /etc/quagga/. They are
+ now only available in /usr/share/doc/quagga/ to avoid accidently
+ using them without changing the adresses (thanks to Marc Haber).
+ Closes: #215918
+
+ -- Christian Hammers <ch@debian.org> Sun, 16 Nov 2003 16:59:30 +0100
+
+quagga (0.96.4x-2) unstable; urgency=low
+
+ * Fixed permission problem with pidfile (thanks to Kir Kostuchenko).
+ Closes: #220938
+
+ -- Christian Hammers <ch@debian.org> Sun, 16 Nov 2003 14:24:08 +0100
+
+quagga (0.96.4x-1) unstable; urgency=low
+
+ * Reupload of 0.96.4. Last upload-in-a-hurry produced a totally
+ crappy .tar.gz file. Closes: #220621
+
+ -- Christian Hammers <ch@debian.org> Fri, 14 Nov 2003 19:45:57 +0100
+
+quagga (0.96.4-1) unstable; urgency=high
+
+ * SECURITY: Remote DoS of protocol daemons.
+ Fix for a remote triggerable crash in vty layer. The management
+ ports ("telnet myrouter ospfd") should not be open to the internet!
+
+ * New upstream version.
+ - OSPF bugfixes.
+ - Some improvements for bgp and rip.
+
+ -- Christian Hammers <ch@debian.org> Thu, 13 Nov 2003 11:52:27 +0100
+
+quagga (0.96.3-3) unstable; urgency=low
+
+ * Fixed pid file generation by substituting the daemons "-d" by the
+ start-stop-daemon option "--background" (thanks to Micha Gaisser).
+ Closes: #218103
+
+ -- Christian Hammers <ch@debian.org> Wed, 29 Oct 2003 05:17:49 +0100
+
+quagga (0.96.3-2) unstable; urgency=low
+
+ * Readded GNOME-PRODUCT-ZEBRA-MIB.
+
+ -- Christian Hammers <ch@debian.org> Thu, 23 Oct 2003 06:17:03 +0200
+
+quagga (0.96.3-1) unstable; urgency=medium
+
+ * New upstream version.
+ * Removed -u and -e in postrm due to problems with debhelper and userdel
+ (thanks to Adam Majer and Jaakko Niemi). Closes: #216770
+ * Removed SNMP MIBs as they are now included in libsnmp-base (thanks to
+ David Engel and Peter Gervai). Closes: #216138, #216086
+ * Fixed seq command in init script (thanks to Marc Haber). Closes: #215915
+ * Improved /proc check (thanks to Marc Haber). Closes: #212331
+
+ -- Christian Hammers <ch@debian.org> Thu, 23 Oct 2003 03:42:02 +0200
+
+quagga (0.96.2-9) unstable; urgency=medium
+
+ * Removed /usr/share/info/dir.* which were accidently there and prevented
+ the installation by dpkg (thanks to Simon Raven). Closes: #212614
+ * Reworded package description (thanks to Anand Kumria). Closes: #213125
+ * Added french debconf translation (thanks to Christian Perrier).
+ Closes: #212803
+
+ -- Christian Hammers <ch@debian.org> Tue, 7 Oct 2003 13:26:58 +0200
+
+quagga (0.96.2-8) unstable; urgency=low
+
+ * debian/rules now checks if /proc is mounted as ./configure needs
+ it but just fails with an obscure error message if it is absent.
+ (Thanks to Norbert Tretkowski). Closes: #212331
+
+ -- Christian Hammers <ch@debian.org> Tue, 23 Sep 2003 12:57:38 +0200
+
+quagga (0.96.2-7) unstable; urgency=low
+
+ * Last build was rejected due to a buggy dpkg-dev version. Rebuild.
+
+ -- Christian Hammers <ch@debian.org> Mon, 22 Sep 2003 20:34:12 +0200
+
+quagga (0.96.2-6) unstable; urgency=low
+
+ * Fixed init script so that is is now possible to just start
+ the bgpd but not the zebra daemon. Also daemons are now actually
+ started in the order defined their priority. (Thanks to Thomas Kaehn
+ and Jochen Friedrich) Closes: #210924
+
+ -- Christian Hammers <ch@debian.org> Fri, 19 Sep 2003 21:17:02 +0200
+
+quagga (0.96.2-5) unstable; urgency=low
+
+ * For using quagga as BGP route server or similar, it is not
+ wanted to have the zebra daemon running too. For this reason
+ it can now be disabled in /etc/quagga/daemons, too.
+ (Thanks to Jochen Friedrich). Closes: #210924
+ * Attached *unapplied* patch for the ISIS protocol. I did not dare
+ to apply it as long as upstream does not do it but this way give
+ users the possibilities to use it if they like to.
+ (Thanks to Remco van Mook)
+
+ -- Christian Hammers <ch@debian.org> Wed, 17 Sep 2003 19:57:31 +0200
+
+quagga (0.96.2-4) unstable; urgency=low
+
+ * Enabled IPV6 router advertisement feature by default on user request
+ (thanks to Jochen Friedrich and Hasso Tepper). Closes: #210732
+ * Updated GNU autoconf to let it build on hppa/parisc64 (thanks to
+ lamont). Closes: #210492
+
+ -- Christian Hammers <ch@debian.org> Sat, 13 Sep 2003 14:11:13 +0200
+
+quagga (0.96.2-3) unstable; urgency=medium
+
+ * Removed unnecessary "-lcrypto" to avoid dependency against OpenSSL
+ which would require further copyright addtions.
+
+ -- Christian Hammers <ch@debian.org> Wed, 10 Sep 2003 01:37:28 +0200
+
+quagga (0.96.2-2) unstable; urgency=low
+
+ * Added note that config files of quagga are in /etc/quagga and
+ not /etc/zebra for the zebra users that migrate to quagga.
+ (Thanks to Roberto Suarez Soto for the idea)
+ * Fixed setgid rights in /etc/quagga.
+
+ -- Christian Hammers <ch@debian.org> Wed, 27 Aug 2003 14:05:39 +0200
+
+quagga (0.96.2-1) unstable; urgency=low
+
+ * This package has formally been known as "zebra-pj"!
+ * New upstream release.
+ Fixes "anoying OSPF problem".
+ * Modified group ownerships so that vtysh can now be used by normal
+ uses if they are in the quaggavty group.
+
+ -- Christian Hammers <ch@debian.org> Mon, 25 Aug 2003 23:40:14 +0200
+
+quagga (0.96.1-1) unstable; urgency=low
+
+ * Zebra-pj, the fork of zebra has been renamed to quagga as the original
+ upstream author asked the new project membed not to use "zebra" in the
+ name. zebra-pj is obsolete.
+
+ -- Christian Hammers <ch@debian.org> Mon, 18 Aug 2003 23:37:20 +0200
+
+zebra-pj (0.94+cvs20030721-1) unstable; urgency=low
+
+ * New CVS build.
+ - OSPF changes (integration of the OSPF API?)
+ - code cleanups (for ipv6?)
+ * Tightened Build-Deps to gcc-2.95 as 3.x does not compile a stable ospfd.
+ This is a known problem and has been discussed on the mailing list.
+ No other solutions so far.
+
+ -- Christian Hammers <ch@debian.org> Mon, 21 Jul 2003 23:52:00 +0200
+
+zebra-pj (0.94+cvs20030701-1) unstable; urgency=low
+
+ * Initial Release.
+
+ -- Christian Hammers <ch@debian.org> Tue, 1 Jul 2003 01:58:06 +0200
librtr-dev <!pkg.frr.nortrlib>,
libsnmp-dev,
libssh-dev <!pkg.frr.nortrlib>,
- libsystemd-dev <!pkg.frr.nosystemd>,
libyang2-dev,
lsb-base,
pkg-config,
[DEFAULT]
pristine-tar = False
debian-branch = master
-upstream-tree=SLOPPY
+upstream-tree = SLOPPY
CONF_RPKI=--disable-rpki
endif
-ifeq ($(filter pkg.frr.nosystemd,$(DEB_BUILD_PROFILES)),)
- DH_WITHOUT_SYSTEMD=
- CONF_SYSTEMD=--enable-systemd=yes
-else
- DH_WITHOUT_SYSTEMD=--without=systemd
- CONF_SYSTEMD=--enable-systemd=no
-endif
-
ifeq ($(filter pkg.frr.lua,$(DEB_BUILD_PROFILES)),)
CONF_LUA=--disable-scripting
else
export PYTHON=python3
%:
- dh $@ $(DH_WITHOUT_SYSTEMD)
+ dh $@
override_dh_auto_configure:
$(shell dpkg-buildflags --export=sh); \
LIBTOOLFLAGS="-rpath /usr/lib/$(DEB_HOST_MULTIARCH)/frr" \
--disable-dependency-tracking \
\
- $(CONF_SYSTEMD) \
$(CONF_RPKI) \
$(CONF_LUA) \
--with-libpam \
sed -e '1c #!/usr/bin/python3' -i debian/tmp/usr/lib/frr/generate_support_bundle.py
# let dh_systemd_* and dh_installinit do their thing automatically
-ifeq ($(filter pkg.frr.nosystemd,$(DEB_BUILD_PROFILES)),)
cp tools/frr.service debian/frr.service
-endif
cp tools/frrinit.sh debian/frr.init
-rm -f debian/tmp/usr/lib/frr/frr
service frr reload
-vtysh -c 'show running-config' | grep -q 'ip route 198.51.100.64/28 127.0.0.1'
+# wait for the new config to load
+for __t in $(seq 1 10); do
+ if vtysh -c 'show running-config' | grep -q 'ip route 198.51.100.64/28 127.0.0.1'; then
+ break
+ fi
+ sleep "$__t"
+done
+
+# fail if the old config is still loaded
if vtysh -c 'show running-config' | grep -q 'ip route 198.51.100.0/28 127.0.0.1'; then
exit 1
fi
sudo pacman -S \
git autoconf automake libtool make cmake pcre readline texinfo \
pkg-config pam json-c bison flex python-pytest \
- c-ares python systemd python2-ipaddress python-sphinx \
- systemd-libs net-snmp perl libcap libelf
+ c-ares python python2-ipaddress python-sphinx \
+ net-snmp perl libcap libelf
.. include:: building-libyang.rst
--enable-user=frr \
--enable-group=frr \
--enable-vty-group=frrvty \
- --disable-exampledir \
--disable-ldpd \
--enable-fpm \
--with-pkg-git-version \
sudo yum install git autoconf automake libtool make \
readline-devel texinfo net-snmp-devel groff pkgconfig \
json-c-devel pam-devel bison flex pytest c-ares-devel \
- python-devel systemd-devel python-sphinx libcap-devel \
+ python-devel python-sphinx libcap-devel \
elfutils-libelf-devel
.. include:: building-libyang.rst
--enable-user=frr \
--enable-group=frr \
--enable-vty-group=frrvty \
- --enable-systemd=yes \
- --disable-exampledir \
--disable-ldpd \
--enable-fpm \
--with-pkg-git-version \
sudo dnf install --enablerepo=PowerTools git autoconf pcre-devel \
automake libtool make readline-devel texinfo net-snmp-devel pkgconfig \
groff pkgconfig json-c-devel pam-devel bison flex python2-pytest \
- c-ares-devel python2-devel systemd-devel libcap-devel \
+ c-ares-devel python2-devel libcap-devel \
elfutils-libelf-devel
.. include:: building-libyang.rst
--enable-user=frr \
--enable-group=frr \
--enable-vty-group=frrvty \
- --enable-systemd=yes \
- --disable-exampledir \
--disable-ldpd \
--enable-fpm \
--with-pkg-git-version \
sudo apt-get install git autoconf automake libtool make \
libreadline-dev texinfo libjson-c-dev pkg-config bison flex python3-pip \
- libc-ares-dev python3-dev python3-sphinx build-essential libsystemd-dev \
+ libc-ares-dev python3-dev python3-sphinx build-essential \
libsnmp-dev libcap-dev libelf-dev
Install newer pytest (>3.0) from pip
cd frr
./bootstrap.sh
./configure \
- --enable-exampledir=/usr/share/doc/frr/examples/ \
--localstatedir=/var/run/frr \
--sbindir=/usr/lib/frr \
--sysconfdir=/etc/frr \
sudo apt-get install git autoconf automake libtool make \
libreadline-dev texinfo libjson-c-dev pkg-config bison flex \
libc-ares-dev python3-dev python3-pytest python3-sphinx build-essential \
- libsnmp-dev libsystemd-dev libcap-dev libelf-dev
+ libsnmp-dev libcap-dev libelf-dev
.. include:: building-libyang.rst
cd frr
./bootstrap.sh
./configure \
- --enable-exampledir=/usr/share/doc/frr/examples/ \
--localstatedir=/var/opt/frr \
--sbindir=/usr/lib/frr \
--sysconfdir=/etc/frr \
sudo dnf install git autoconf automake libtool make \
readline-devel texinfo net-snmp-devel groff pkgconfig json-c-devel \
pam-devel python3-pytest bison flex c-ares-devel python3-devel \
- python3-sphinx perl-core patch systemd-devel libcap-devel \
+ python3-sphinx perl-core patch libcap-devel \
elfutils-libelf-devel
.. include:: building-libyang.rst
export CPPFLAGS="-I/usr/pkg/include"
./configure \
--sysconfdir=/usr/pkg/etc/frr \
- --enable-exampledir=/usr/pkg/share/examples/frr \
--enable-pkgsrcrcdir=/usr/pkg/share/examples/rc.d \
--localstatedir=/var/run/frr \
--enable-multipath=64 \
export CPPFLAGS="-I/usr/pkg/include"
./configure \
--sysconfdir=/usr/pkg/etc/frr \
- --enable-exampledir=/usr/pkg/share/examples/frr \
--enable-pkgsrcrcdir=/usr/pkg/share/examples/rc.d \
--localstatedir=/var/run/frr \
--enable-multipath=64 \
zypper in git autoconf automake libtool make \
readline-devel texinfo net-snmp-devel groff pkgconfig libjson-c-devel\
pam-devel python3-pytest bison flex c-ares-devel python3-devel\
- python3-Sphinx perl patch systemd-devel libcap-devel libyang-devel \
+ python3-Sphinx perl patch libcap-devel libyang-devel \
libelf-devel
Building & Installing FRR
apt-get install \
git autoconf automake libtool make libreadline-dev texinfo \
pkg-config libpam0g-dev libjson-c-dev bison flex python3-pytest \
- libc-ares-dev python3-dev libsystemd-dev python-ipaddress python3-sphinx \
- install-info build-essential libsystemd-dev libsnmp-dev perl libcap-dev \
+ libc-ares-dev python3-dev python-ipaddress python3-sphinx \
+ install-info build-essential libsnmp-dev perl libcap-dev \
libelf-dev
.. include:: building-libyang.rst
sudo apt-get install \
git autoconf automake libtool make libreadline-dev texinfo \
pkg-config libpam0g-dev libjson-c-dev bison flex python3-pytest \
- libc-ares-dev python3-dev libsystemd-dev python-ipaddress python3-sphinx \
- install-info build-essential libsystemd-dev libsnmp-dev perl libcap-dev \
+ libc-ares-dev python3-dev python-ipaddress python3-sphinx \
+ install-info build-essential libsnmp-dev perl libcap-dev \
libelf-dev
.. include:: building-libyang.rst
sudo apt-get install \
git autoconf automake libtool make libreadline-dev texinfo \
pkg-config libpam0g-dev libjson-c-dev bison flex python3-pytest \
- libc-ares-dev python3-dev libsystemd-dev python-ipaddress python3-sphinx \
- install-info build-essential libsystemd-dev libsnmp-dev perl \
+ libc-ares-dev python3-dev python-ipaddress python3-sphinx \
+ install-info build-essential libsnmp-dev perl \
libcap-dev python2 libelf-dev
Note that Ubuntu 20 no longer installs python 2.x, so it must be
5. Update Changelog for Debian Packages:
- Edit :file:`changelog-auto.in`:
+ Update :file:`debian/changelog`:
- - Change last (top of list) entry from ``@VERSION@`` to the **last**
- released version number. For example, if ``<version>`` is ``7.3`` and the
- last public release was ``7.2``, you would use ``7.2``, changing the file
- like so::
+ - Run following with **last** release version number and debian revision
+ (usually -1) as argument to ``dch --newversion VERSION``. For example, if
+ ``<version>`` is ``7.3`` then you will run ``dch --newversion 7.3-1``.
- frr (@VERSION@) RELEASED; urgency=medium
-
- to::
-
- frr (7.2) RELEASED; urgency=medium
-
- - Add a new entry to the top of the list with a ``@VERSION@`` tag. Make sure
- to watch the format.
-
- - Add the changelog text below this entry.
+ - The ``dch`` will run an editor, and you should add the changelog text below
+ this entry, usually that would be: **New upstream version**.
- Verify the changelog format using ``dpkg-parsechangelog``. In the
repository root:
FRR's build time options to your liking. The full option listing can be
obtained by running ``./configure -h``. The options shown below are examples.
-.. note::
-
- If your platform uses ``systemd``, please make sure to add
- ``--enable-systemd=yes`` to your configure options.
-
.. code-block:: console
git clone https://github.com/frrouting/frr.git frr
./configure \
--prefix=/usr \
--includedir=\${prefix}/include \
- --enable-exampledir=\${prefix}/share/doc/frr/examples \
--bindir=\${prefix}/bin \
--sbindir=\${prefix}/lib/frr \
--libdir=\${prefix}/lib/frr \
.. code-block:: shell
- sudo mk-build-deps --install debian/control
+ sudo mk-build-deps --install --remove debian/control
Alternatively, you can manually install build dependencies for your
platform as outlined in :ref:`building`.
-4. Run ``tools/tarsource.sh -V``:
+4. Install `git-buildpackage` package:
.. code-block:: shell
- ./tools/tarsource.sh -V
-
- This script sets up the ``debian/changelog-auto`` file with proper version
- information.
+ sudo apt-get install git-buildpackage
5. (optional) Append a distribution identifier if needed (see below under
:ref:`multi-dist`.)
-6. Build Debian Package:
+6. Build Debian Binary and/or Source Packages:
.. code-block:: shell
- dpkg-buildpackage $options
+ gbp buildpackage --git-builder=dpkg-buildpackage --git-debian-branch="$(git rev-parse --abbrev-ref HEAD)" $options
Where `$options` may contain any or all of the following items:
* build profiles specified with ``-P``, e.g.
- ``-Ppkg.frr.nortrlib,pkg.frr.nosystemd``.
+ ``-Ppkg.frr.nortrlib,pkg.frr.rtrlib``.
Multiple values are separated by commas and there must not be a space
after the ``-P``.
+================+===================+=========================================+
| pkg.frr.rtrlib | pkg.frr.nortrlib | builds frr-rpki-rtrlib package (or not) |
+----------------+-------------------+-----------------------------------------+
- | n/a | pkg.frr.nosystemd | removes libsystemd dependency and |
- | | | disables unit file installation |
- +----------------+-------------------+-----------------------------------------+
-
- .. note::
-
- The ``pkg.frr.nosystemd`` option is only intended to support Ubuntu
- 14.04 (and should be enabled when building for that.)
* the ``-uc -us`` options to disable signing the packages with your GPG key
(git builds of the `master` or `stable/X.X` branches won't be signed by
default since their target release is set to ``UNRELEASED``.)
+ * the ``--build=type`` accepts following options (see ``dpkg-buildpackage`` manual page):
+
+ * ``source`` builds the source package
+ * ``any`` builds the architecture specific binary packages
+ * ``all`` build the architecture independent binary packages
+ * ``binary`` build the architecture specific and independent binary packages (alias for ``any,all``)
+ * ``full`` builds everything (alias for ``source,any,all``)
+
+ Alternatively, you might want to replace ``dpkg-buildpackage`` with
+ ``debuild`` wrapper that also runs ``lintian`` and ``debsign`` on the final
+ packages.
+
7. Done!
If all worked correctly, then you should end up with the Debian packages in
a manually maintained changelog that contains proper Debian release
versioning.
- Furthermore, official Debian packages are built in ``3.0 (quilt)`` format
- with an "orig" tarball and a "debian" tarball. These tarballs are created
- by the ``tarsource.sh`` tool on any branch. The git repository however
- contains a ``3.0 (git)`` source format specifier to easily allow direct
- git builds.
-
.. _multi-dist:
You can optionally append a distribution identifier in case you want to
make multiple versions of the package available in the same repository.
-Do the following after creating the changelog with `tarsource.sh`:
.. code-block:: shell
yum install rpm-build net-snmp-devel pam-devel libcap-devel
- If your platform uses systemd::
-
- yum install systemd-devel
-
For CentOS 7 and CentOS 8, the package will be built using python3
and requires additional python3 packages::
int status_ok = 0, status_fail = 1;
struct prefix p = ...;
- struct frrscript_env env[] = {
- {"integer", "STATUS_FAIL", &status_fail},
- {"integer", "STATUS_OK", &status_ok},
- {"prefix", "myprefix", &p},
- {}};
-
- int result = frrscript_call(fs, env);
+ int result = frrscript_call(fs,
+ ("STATUS_FAIL", &status_fail),
+ ("STATUS_OK", &status_ok),
+ ("prefix", &p));
To execute a loaded script, we need to define the inputs. These inputs are
-passed by binding values to variable names that will be accessible within the
+passed in by binding values to variable names that will be accessible within the
Lua environment. Basically, all communication with the script takes place via
global variables within the script, and to provide inputs we predefine globals
-before the script runs. This is done by passing ``frrscript_call()`` an array
-of ``struct frrscript_env``. Each struct has three fields. The first identifies
-the type of the value being passed; more on this later. The second defines the
-name of the global variable within the script environment to bind the third
-argument (the value) to.
+before the script runs. This is done by passing ``frrscript_call()`` a list of
+parenthesized pairs, where the first and second fields identify, respectively,
+the name of the global variable within the script environment and the value it
+is bound to.
The script is then executed and returns a general status code. In the success
case this will be 0, otherwise it will be nonzero. The script itself does not
Querying State
^^^^^^^^^^^^^^
-When a chunk is executed, its state at exit is preserved and can be inspected.
-
-After running a script, results may be retrieved by querying the script's
-state. Again this is done by retrieving the values of global variables, which
-are known to the script author to be "output" variables.
-
-A result is retrieved like so:
-
-.. code-block:: c
-
- struct frrscript_env myresult = {"string", "myresult"};
-
- char *myresult = frrscript_get_result(fs, &myresult);
-
- ... do something ...
-
- XFREE(MTYPE_TMP, myresult);
-
+.. todo::
-As with arguments, results are retrieved by providing a ``struct
-frrscript_env`` specifying a type and a global name. No value is necessary, nor
-is it modified by ``frrscript_get_result()``. That function simply extracts the
-requested value from the script state and returns it.
-
-In most cases the returned value will be allocated with ``MTYPE_TMP`` and will
-need to be freed after use.
+ This section will be updated once ``frrscript_get_result`` has been
+ updated to work with the new ``frrscript_call`` and the rest of the new API.
Unloading
frrscript_unload(fs);
-Values returned by ``frrscript_get_result`` are still valid after the script
-they were retrieved from is unloaded.
-
-Note that you must unload and then load the script if you want to reset its
-state, for example to run it again with different inputs. Otherwise the state
-from the previous run carries over into subsequent runs.
-
.. _marshalling:
Marshalling
^^^^^^^^^^^
-Earlier sections glossed over the meaning of the type name field in ``struct
-frrscript_env`` and how data is passed between C and Lua. Lua, as a dynamically
+Earlier sections glossed over the types of values that can be passed into
+``frrscript_call`` and how data is passed between C and Lua. Lua, as a dynamically
typed, garbage collected language, cannot directly use C values without some
kind of marshalling / unmarshalling system to translate types between the two
runtimes.
Lua scripts must provide a function that marshalls the C data into a Lua
representation and pushes it on the stack. C code wishing to retrieve data from
Lua must provide a corresponding unmarshalling function that retrieves a Lua
-value from the stack and converts it to the corresponding C type. These two
-functions, together with a chosen name of the type they operate on, are
-referred to as ``codecs`` in FRR.
-
-A codec is defined as:
-
-.. code-block:: c
-
- typedef void (*encoder_func)(lua_State *, const void *);
- typedef void *(*decoder_func)(lua_State *, int);
-
- struct frrscript_codec {
- const char *typename;
- encoder_func encoder;
- decoder_func decoder;
- };
+value from the stack and converts it to the corresponding C type. These
+functions are known as encoders and decoders in FRR.
-A typename string and two function pointers.
-
-``typename`` can be anything you want. For example, for the combined types of
-``struct prefix`` and its equivalent in Lua I have chosen the name ``prefix``.
-There is no restriction on naming here, it is just a human name used as a key
-and specified when passing and retrieving values.
-
-``encoder`` is a function that takes a ``lua_State *`` and a C type and pushes
+An encoder is a function that takes a ``lua_State *`` and a C type and pushes
onto the Lua stack a value representing the C type. For C structs, the usual
case, this will typically be a Lua table (tables are the only datastructure Lua
has). For example, here is the encoder function for ``struct prefix``:
.. code-block:: c
- void lua_pushprefix(lua_State *L, const struct prefix *prefix)
+ void lua_pushprefix(lua_State *L, struct prefix *prefix)
{
char buffer[PREFIX_STRLEN];
lua_setfield(L, -2, "family");
}
-This function pushes a single value onto the Lua stack. It is a table whose equivalent in Lua is:
+This function pushes a single value onto the Lua stack. It is a table whose
+equivalent in Lua is:
.. code-block:: c
{ ["network"] = "1.2.3.4/24", ["prefixlen"] = 24, ["family"] = 2 }
-``decoder`` does the reverse; it takes a ``lua_State *`` and an index into the
-stack, and unmarshalls a Lua value there into the corresponding C type. Again
-for ``struct prefix``:
+Decoders are a bit more involved. They do the reverse; a decoder function takes
+a ``lua_State *``, pops a value off the Lua stack and converts it back into its
+C type.
+However, since Lua programs have the ability to directly modify their inputs
+(i.e. values passed in via ``frrscript_call``), we need two separate decoder
+functions, called ``lua_decode_*`` and ``lua_to*``.
+A ``lua_decode_*`` function takes a ``lua_State*``, an index, and a C type, and
+unmarshalls a Lua value into that C type.
+Again, for ``struct prefix``:
+
+.. code-block:: c
+
+ void lua_decode_prefix(lua_State *L, int idx, struct prefix *prefix)
+ {
+ lua_getfield(L, idx, "network");
+ (void)str2prefix(lua_tostring(L, -1), prefix);
+ lua_pop(L, 1);
+ /* pop the table */
+ lua_pop(L, 1);
+ }
+
+.. warning::
+
+ ``lua_decode_prefix`` functions should leave the Lua stack completely empty
+ when they return.
+ For decoders that unmarshall fields from tables, remember to pop the table
+ at the end.
+
+
+A ``lua_to*`` function perform a similar role except that it first allocates
+memory for the new C type before decoding the value from the Lua stack, then
+returns a pointer to the newly allocated C type.
+This function can and should be implemented using ``lua_decode_*``:
.. code-block:: c
{
struct prefix *p = XCALLOC(MTYPE_TMP, sizeof(struct prefix));
- lua_getfield(L, idx, "network");
- str2prefix(lua_tostring(L, -1), p);
- lua_pop(L, 1);
-
+ lua_decode_prefix(L, idx, p);
return p;
}
-By convention these functions should be called ``lua_to*``, as this is the
-naming convention used by the Lua C library for the basic types e.g.
-``lua_tointeger`` and ``lua_tostring``.
The returned data must always be copied off the stack and the copy must be
allocated with ``MTYPE_TMP``. This way it is possible to unload the script
(destroy the state) without invalidating any references to values stored in it.
+Note that it is the caller's responsibility to free the data.
-To register a new type with its corresponding encoding functions:
-
-.. code-block:: c
-
- struct frrscript_codec frrscript_codecs_lib[] = {
- {.typename = "prefix",
- .encoder = (encoder_func)lua_pushprefix,
- .decoder = lua_toprefix},
- {.typename = "sockunion",
- .encoder = (encoder_func)lua_pushsockunion,
- .decoder = lua_tosockunion},
- ...
- {}};
+For consistency, we should always name functions of the first type
+``lua_decode_*``.
+Functions of the second type should be named ``lua_to*``, as this is the
+naming convention used by the Lua C library for the basic types e.g.
+``lua_tointeger`` and ``lua_tostring``.
- frrscript_register_type_codecs(frrscript_codecs_lib);
+This two-function design allows the compiler to warn if a value passed into
+``frrscript_call`` does not have a encoder and decoder for that type.
+The ``lua_to*`` functions enable us to easily create decoders for nested
+structures.
+
+To register a new type with its corresponding encoding and decoding functions,
+add the mapping in the following macros in ``frrscript.h``:
+
+.. code-block:: diff
+
+ #define ENCODE_ARGS_WITH_STATE(L, value) \
+ _Generic((value), \
+ ...
+ - struct peer * : lua_pushpeer \
+ + struct peer * : lua_pushpeer, \
+ + struct prefix * : lua_pushprefix \
+ )(L, value)
+
+ #define DECODE_ARGS_WITH_STATE(L, value) \
+ _Generic((value), \
+ ...
+ - struct peer * : lua_decode_peer \
+ + struct peer * : lua_decode_peer, \
+ + struct prefix * : lua_decode_prefix \
+ )(L, -1, value)
+
+
+At compile time, the compiler will search for encoders/decoders for the type of
+each value passed in via ``frrscript_call``. If a encoder/decoder cannot be
+found, it will appear as a compile warning. Note that the types must
+match *exactly*.
+In the above example, we defined encoders/decoders for a value of
+``struct prefix *``, but not ``struct prefix`` or ``const struct prefix *``.
+
+``const`` values are a special case. We want to use them in our Lua scripts
+but not modify them, so creating a decoder for them would be meaningless.
+But we still need a decoder for the type of value so that the compiler will be
+satisfied.
+For that, use ``lua_decode_noop``:
+
+.. code-block:: diff
+
+ #define DECODE_ARGS_WITH_STATE(L, value) \
+ _Generic((value), \
+ ...
+ + const struct prefix * : lua_decode_noop \
+ )(L, -1, value)
-From this point on the type names are available to be used when calling any
-script and getting its results.
.. note::
--prefix=/usr/lib/frr --sysconfdir=/etc/frr \
--localstatedir=/var/run/frr \
--sbindir=/usr/lib/frr --bindir=/usr/lib/frr \
- --enable-exampledir=/usr/lib/frr/examples \
--with-moduledir=/usr/lib/frr/modules \
--enable-multipath=0 --enable-rtadv \
--enable-tcp-zebra --enable-fpm --enable-pimd \
Set domainname of the router. It is only for current ``vtysh``, it will not
be saved to any configuration file even with ``write file``.
-.. clicmd:: domainname DOMAINNAME
-
- Set domainname of the router.
-
.. clicmd:: password PASSWORD
Set password for vty interface. The ``no`` form of the command deletes the
Set enable password. The ``no`` form of the command deletes the enable
password.
+.. clicmd:: service cputime-stats
+
+ Collect CPU usage statistics for individual FRR event handlers and CLI
+ commands. This is enabled by default and can be disabled if the extra
+ overhead causes a noticeable slowdown on your system.
+
+ Disabling these statistics will also make the
+ :clicmd:`service cputime-warning (1-4294967295)` limit non-functional.
+
+.. clicmd:: service cputime-warning (1-4294967295)
+
+ Warn if the CPU usage of an event handler or CLI command exceeds the
+ specified limit (in milliseconds.) Such warnings are generally indicative
+ of some routine in FRR mistakenly blocking/hogging the processing loop and
+ should be reported as a FRR bug.
+
+ The default limit is 5 seconds (i.e. 5000), but this can be changed by the
+ deprecated ``--enable-time-check=...`` compile-time option.
+
+ This command has no effect if :clicmd:`service cputime-stats` is disabled.
+
+.. clicmd:: service walltime-warning (1-4294967295)
+
+ Warn if the total wallclock time spent handling an event or executing a CLI
+ command exceeds the specified limit (in milliseconds.) This includes time
+ spent waiting for I/O or other tasks executing and may produce excessive
+ warnings if the system is overloaded. (This may still be useful to
+ provide an immediate sign that FRR is not operating correctly due to
+ externally caused starvation.)
+
+ The default limit is 5 seconds as above, including the same deprecated
+ ``--enable-time-check=...`` compile-time option.
+
.. clicmd:: log trap LEVEL
These commands are deprecated and are present only for historical
.. clicmd:: bgp default ipv4-unicast
- This command allows the user to specify that v4 peering is turned
- on by default or not. This command defaults to on and is not displayed.
+ This command allows the user to specify that the IPv4 Unicast address
+ family is turned on by default or not. This command defaults to on
+ and is not displayed.
The `no bgp default ipv4-unicast` form of the command is displayed.
+.. clicmd:: bgp default ipv4-multicast
+
+ This command allows the user to specify that the IPv4 Multicast address
+ family is turned on by default or not. This command defaults to off
+ and is not displayed.
+ The `bgp default ipv4-multicast` form of the command is displayed.
+
+.. clicmd:: bgp default ipv4-vpn
+
+ This command allows the user to specify that the IPv4 MPLS VPN address
+ family is turned on by default or not. This command defaults to off
+ and is not displayed.
+ The `bgp default ipv4-vpn` form of the command is displayed.
+
+.. clicmd:: bgp default ipv4-flowspec
+
+ This command allows the user to specify that the IPv4 Flowspec address
+ family is turned on by default or not. This command defaults to off
+ and is not displayed.
+ The `bgp default ipv4-flowspec` form of the command is displayed.
+
.. clicmd:: bgp default ipv6-unicast
- This command allows the user to specify that v6 peering is turned
- on by default or not. This command defaults to off and is not displayed.
+ This command allows the user to specify that the IPv6 Unicast address
+ family is turned on by default or not. This command defaults to off
+ and is not displayed.
The `bgp default ipv6-unicast` form of the command is displayed.
+.. clicmd:: bgp default ipv6-multicast
+
+ This command allows the user to specify that the IPv6 Multicast address
+ family is turned on by default or not. This command defaults to off
+ and is not displayed.
+ The `bgp default ipv6-multicast` form of the command is displayed.
+
+.. clicmd:: bgp default ipv6-vpn
+
+ This command allows the user to specify that the IPv6 MPLS VPN address
+ family is turned on by default or not. This command defaults to off
+ and is not displayed.
+ The `bgp default ipv6-vpn` form of the command is displayed.
+
+.. clicmd:: bgp default ipv6-flowspec
+
+ This command allows the user to specify that the IPv6 Flowspec address
+ family is turned on by default or not. This command defaults to off
+ and is not displayed.
+ The `bgp default ipv6-flowspec` form of the command is displayed.
+
+.. clicmd:: bgp default l2vpn-evpn
+
+ This command allows the user to specify that the L2VPN EVPN address
+ family is turned on by default or not. This command defaults to off
+ and is not displayed.
+ The `bgp default l2vpn-evpn` form of the command is displayed.
+
.. clicmd:: bgp default show-hostname
This command shows the hostname of the peer in certain BGP commands
PE2 receives this type-5 route and imports it into the vrf based on route
targets. BGP prefix imported into the vrf uses gateway IP as its BGP nexthop.
This route is installed into zebra if following conditions are satisfied:
+
1. Gateway IP nexthop is L3 reachable.
2. PE2 has received EVPN type-2 route with IP field set to gateway IP.
Topology requirements:
+
1. This feature is supported for asymmetric routing model only. While
sending packets to SN1, ingress PE (PE2) performs routing and
egress PE (PE1) performs only bridging.
1. CLI to add gateway IP while generating EVPN type-5 route from a BGP IPv4/IPv6
prefix:
-.. index:: advertise <ipv4|ipv6> unicast [gateway-ip]
-.. clicmd:: [no] advertise <ipv4|ipv6> unicast [gateway-ip]
+.. clicmd:: advertise <ipv4|ipv6> unicast [gateway-ip]
When this CLI is configured for a BGP vrf under L2VPN EVPN address family, EVPN
type-5 routes are generated for BGP prefixes in the vrf. Nexthop of the BGP
2. Add gateway IP to EVPN type-5 route using a route-map:
-.. index:: set evpn gateway-ip <ipv4|ipv6> <addr>
-.. clicmd:: [no] set evpn gateway-ip <ipv4|ipv6> <addr>
+.. clicmd:: set evpn gateway-ip <ipv4|ipv6> <addr>
When route-map with above set clause is applied as outbound policy in BGP, it
will set the gateway-ip in EVPN type-5 NLRI.
"enable-resolve-overlay-index" configuration enabled to recursively resolve the
overlay index nexthop and install the prefix into zebra.
-.. index:: enable-resolve-overlay-index
-.. clicmd:: [no] enable-resolve-overlay-index
+.. clicmd:: enable-resolve-overlay-index
Example configuration:
software available on your machine. This is needed for systemd integration, if you
disable watchfrr you cannot have any systemd integration.
-.. option:: --enable-systemd
-
- Build watchfrr with systemd integration, this will allow FRR to communicate with
- systemd to tell systemd if FRR has come up properly.
-
.. option:: --enable-werror
Build with all warnings converted to errors as a compile option. This
.. option:: --enable-datacenter
+ This option is deprecated as it is superseded by the `-F` (profile) command
+ line option which allows adjusting the setting at startup rather than
+ compile time.
+
Enable system defaults to work as if in a Data Center. See defaults.h
for what is changed by this configure option.
.. option:: --enable-time-check XXX
- When this is enabled with a XXX value in microseconds, any thread that
- runs for over this value will cause a warning to be issued to the log.
- If you do not specify any value or don't include this option then
- the default time is 5 seconds. If --disable-time-check is specified
- then no warning is issued for any thread run length.
+ This option is deprecated as it was replaced by the
+ :clicmd:`service cputime-stats` CLI command, which may be adjusted at
+ runtime rather than being a compile-time setting. See there for further
+ detail.
.. option:: --disable-cpu-time
- Disable cpu process accounting, this command also disables the `show thread cpu`
- command. If this option is disabled, --enable-time-check is ignored. This
- disabling of cpu time effectively means that the getrusage call is skipped.
- Since this is a process switch into the kernel, systems with high FRR
- load might see improvement in behavior. Be aware that `show thread cpu`
- is considered a good data gathering tool from the perspective of developers.
+ This option is deprecated as it was replaced by the
+ :clicmd:`service cputime-warning NNN` CLI command, which may be adjusted at
+ runtime rather than being a compile-time setting. See there for further
+ detail.
.. option:: --enable-pcreposix
Set StrongSWAN vici interface socket path [/var/run/charon.vici].
+.. note::
+
+ The former ``--enable-systemd`` option does not exist anymore. Support for
+ systemd is now always available through built-in functions, without
+ depending on libsystemd.
+
Python dependency, documentation and tests
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
./configure \
--prefix=/usr \
- --enable-exampledir=/usr/share/doc/frr/examples/ \
--localstatedir=/var/run/frr \
--sbindir=/usr/lib/frr \
--sysconfdir=/etc/frr \
Router Advertisement
====================
+.. clicmd:: show ipv6 nd ra-interfaces [vrf <VRFNAME|all>]
+
+ Show configured route advertisement interfaces. VRF subcommand only
+ applicable for netns-based vrfs.
+
.. clicmd:: ipv6 nd suppress-ra
Don't send router advertisement messages. The ``no`` form of this command
of packets to process before returning. The default value of this parameter
is 20.
+.. clicmd:: clear ipv6 ospf6 process [vrf NAME]
+
+ This command clears up the database and routing tables and resets the
+ neighborship by restarting the interface state machine. This will be
+ helpful when there is a change in router-id and if user wants the router-id
+ change to take effect, user can use this cli instead of restarting the
+ ospf6d daemon.
.. _ospf6-area:
OSPF6 area
==========
-.. index:: [no] area A.B.C.D nssa
-.. clicmd:: [no] area A.B.C.D nssa
+.. clicmd:: area A.B.C.D nssa
NSSA Support in OSPFv3
=======================
The configuration of NSSA areas in OSPFv3 is supported using the CLI command
-area A.B.C.D nssa in ospf6 router configuration mode.
+``area A.B.C.D nssa`` in ospf6 router configuration mode.
The following functionalities are implemented as per RFC 3101:
1. Advertising Type-7 LSA into NSSA area when external route is redistributed
2. Processing Type-7 LSA received from neighbor and installing route in the
route table
3. Support for NSSA ABR functionality which is generating Type-5 LSA when
- backbone area is configured. Currently translation od TYpe-7 LSA to Type-5 LSA
+ backbone area is configured. Currently translation of Type-7 LSA to Type-5 LSA
is enabled by default.
4. Support for NSSA Translator functionality when there are multiple NSSA ABR
in an area
To start OSPF process you have to specify the OSPF router.
-.. clicmd:: router ospf [(1-65535)] vrf NAME
+.. clicmd:: router ospf [{(1-65535)|vrf NAME}]
Enable or disable the OSPF process.
+ Multiple instances don't support `vrf NAME`.
+
.. clicmd:: ospf router-id A.B.C.D
:ref:`bfd-pim-peer-config`
-.. _pim-multicast-rib-insertion:
+.. _pim-multicast-rib:
-PIM Multicast RIB insertion:
-============================
+PIM Multicast RIB
+=================
In order to influence Multicast RPF lookup, it is possible to insert
into zebra routes for the Multicast RIB. These routes are only
Commands available for MSDP:
+.. clicmd:: ip msdp timers (2-600) (3-600) [(1-600)]
+
+ Configure global MSDP timers.
+
+ First value is the keep-alive interval and it must be less than the
+ second value which is hold-time. This configures the interval in
+ seconds between keep-alive messages. The default value is 60 seconds.
+
+ Second value is the hold-time and it must be greater than the keep-alive
+ interval. This configures the interval in seconds before closing a non
+ responding connection. The default value is 75.
+
+ Third value is the connection retry interval and it is optional. This
+ configures the interval between connection attempts. The default value
+ is 30 seconds.
.. clicmd:: ip msdp mesh-group WORD member A.B.C.D
to configure SRv6 on FRR. Of course SRv6 can be used as standalone,
and this section also helps that case.
-.. index:: show segment-routing srv6 locator [json]
.. clicmd:: show segment-routing srv6 locator [json]
This command dump SRv6-locator configured on zebra. SRv6-locator is used
loc1 1 2001:db8:1:1::/64 Up
loc2 2 2001:db8:2:2::/64 Up
-.. index:: show segment-routing srv6 locator NAME detail [json]
.. clicmd:: show segment-routing srv6 locator NAME detail [json]
As shown in the example, by specifying the name of the locator, you
Chunks:
- prefix: 2001:db8:2:2::/64, owner: sharp
-.. index:: segment-routing
.. clicmd:: segment-routing
Move from configure mode to segment-routing node.
-.. index:: srv6
.. clicmd:: srv6
Move from segment-routing node to srv6 node.
-.. index:: locators
.. clicmd:: locators
Move from srv6 node to locator node. In this locator node, user can
configure detailed settings such as the actual srv6 locator.
-.. index:: locator NAME
.. clicmd:: locator NAME
Create a new locator. If the name of an existing locator is specified,
move to specified locator's configuration node to change the settings it.
-.. index:: prefix X:X::X:X/M [function-bits-length 32]
.. clicmd:: prefix X:X::X:X/M [function-bits-length 32]
Set the ipv6 prefix block of the locator. SRv6 locator is defined by
RUN yum install -y rpm-build autoconf automake libtool make \
readline-devel texinfo net-snmp-devel groff pkgconfig \
json-c-devel pam-devel bison flex pytest c-ares-devel \
- python3-devel python3-sphinx systemd-devel libcap-devel \
+ python3-devel python3-sphinx libcap-devel \
https://ci1.netdef.org/artifact/LIBYANG-LIBYANGV2/shared/build-2/CentOS-7-x86_64-Packages/libyang2-2.0.0.10.g2eb910e4-1.el7.x86_64.rpm \
https://ci1.netdef.org/artifact/LIBYANG-LIBYANGV2/shared/build-2/CentOS-7-x86_64-Packages/libyang2-devel-2.0.0.10.g2eb910e4-1.el7.x86_64.rpm \
https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-0.7.0-1.el7.centos.x86_64.rpm \
RUN dnf install --enablerepo=powertools -y rpm-build git autoconf pcre-devel \
automake libtool make readline-devel texinfo net-snmp-devel pkgconfig \
groff pkgconfig json-c-devel pam-devel bison flex python3-pytest \
- c-ares-devel python3-devel python3-sphinx systemd-devel libcap-devel platform-python-devel \
+ c-ares-devel python3-devel python3-sphinx libcap-devel platform-python-devel \
https://ci1.netdef.org/artifact/LIBYANG-LIBYANGV2/shared/build-2/CentOS-8-x86_64-Packages/libyang2-2.0.0.10.g2eb910e4-1.el8.x86_64.rpm \
https://ci1.netdef.org/artifact/LIBYANG-LIBYANGV2/shared/build-2/CentOS-8-x86_64-Packages/libyang2-devel-2.0.0.10.g2eb910e4-1.el8.x86_64.rpm \
https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-110/CentOS-7-x86_64-Packages/librtr-0.7.0-1.el7.centos.x86_64.rpm \
apt-get install -y \
git autoconf automake libtool make libreadline-dev texinfo \
pkg-config libpam0g-dev libjson-c-dev bison flex python3-pytest \
- libc-ares-dev python3-dev libsystemd-dev python-ipaddress python3-sphinx \
- install-info build-essential libsystemd-dev libsnmp-dev perl libcap-dev \
+ libc-ares-dev python3-dev python-ipaddress python3-sphinx \
+ install-info build-essential libsnmp-dev perl libcap-dev \
libelf-dev \
sudo gdb iputils-ping time \
mininet python-pip iproute2 iperf && \
apt-get install -y \
git autoconf automake libtool make libreadline-dev texinfo \
pkg-config libpam0g-dev libjson-c-dev bison flex python3-pytest \
- libc-ares-dev python3-dev libsystemd-dev python-ipaddress python3-sphinx \
- install-info build-essential libsystemd-dev libsnmp-dev perl \
+ libc-ares-dev python3-dev python-ipaddress python3-sphinx \
+ install-info build-essential libsnmp-dev perl \
libcap-dev python2 libelf-dev \
sudo gdb curl iputils-ping time \
libgrpc++-dev libgrpc-dev protobuf-compiler-grpc \
+ lua5.3 liblua5.3-dev \
mininet iproute2 iperf && \
curl https://bootstrap.pypa.io/pip/2.7/get-pip.py --output /tmp/get-pip.py && \
python2 /tmp/get-pip.py && \
--enable-group=frr \
--enable-vty-group=frrvty \
--enable-snmp=agentx \
+ --enable-scripting \
--with-pkg-extra-version=-my-manual-build && \
make -j $(nproc) && \
sudo make install
}
struct prefix *pref = prefix_new();
- PREFIX_COPY_IPV4(pref, p);
+ prefix_copy(pref, p);
rn->info = (void *)pref;
/* Schedule Router ID Update. */
void cli_show_isis_mpls_te(struct vty *vty, struct lyd_node *dnode,
bool show_defaults)
{
- vty_out(vty, " mpls-te on\n");
+ vty_out(vty, " mpls-te on\n");
}
/*
void cli_show_isis_mpls_te_router_addr(struct vty *vty, struct lyd_node *dnode,
bool show_defaults)
{
- vty_out(vty, " mpls-te router-address %s\n",
+ vty_out(vty, " mpls-te router-address %s\n",
yang_dnode_get_string(dnode, NULL));
}
return NB_OK;
}
+struct sysid_iter {
+ struct area_addr *addr;
+ bool same;
+};
+
+static int sysid_iter_cb(const struct lyd_node *dnode, void *arg)
+{
+ struct sysid_iter *iter = arg;
+ struct area_addr addr;
+ const char *net;
+
+ net = yang_dnode_get_string(dnode, NULL);
+ addr.addr_len = dotformat2buff(addr.area_addr, net);
+
+ if (memcmp(GETSYSID(iter->addr), GETSYSID((&addr)), ISIS_SYS_ID_LEN)) {
+ iter->same = false;
+ return YANG_ITER_STOP;
+ }
+
+ return YANG_ITER_CONTINUE;
+}
+
/*
* XPath: /frr-isisd:isis/instance/area-address
*/
struct isis_area *area;
struct area_addr addr, *addrr = NULL, *addrp = NULL;
struct listnode *node;
+ struct sysid_iter iter;
uint8_t buff[255];
const char *net_title = yang_dnode_get_string(args->dnode, NULL);
switch (args->event) {
case NB_EV_VALIDATE:
- area = nb_running_get_entry(args->dnode, NULL, false);
- if (area == NULL)
- return NB_ERR_VALIDATION;
addr.addr_len = dotformat2buff(buff, net_title);
memcpy(addr.area_addr, buff, addr.addr_len);
if (addr.area_addr[addr.addr_len - 1] != 0) {
"nsel byte (last byte) in area address must be 0");
return NB_ERR_VALIDATION;
}
- if (area->isis->sysid_set) {
- /* Check that the SystemID portions match */
- if (memcmp(area->isis->sysid, GETSYSID((&addr)),
- ISIS_SYS_ID_LEN)) {
- snprintf(
- args->errmsg, args->errmsg_len,
- "System ID must not change when defining additional area addresses");
- return NB_ERR_VALIDATION;
- }
+
+ iter.addr = &addr;
+ iter.same = true;
+
+ yang_dnode_iterate(sysid_iter_cb, &iter, args->dnode,
+ "../area-address");
+
+ if (!iter.same) {
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "System ID must not change when defining additional area addresses");
+ return NB_ERR_VALIDATION;
}
break;
case NB_EV_PREPARE:
int isis_instance_mpls_ldp_sync_create(struct nb_cb_create_args *args)
{
struct isis_area *area;
+ const char *vrfname;
switch (args->event) {
case NB_EV_VALIDATE:
- area = nb_running_get_entry(args->dnode, NULL, false);
- if (area == NULL || area->isis == NULL)
- return NB_ERR_VALIDATION;
+ vrfname = yang_dnode_get_string(
+ lyd_parent(lyd_parent(args->dnode)), "./vrf");
- if (area->isis->vrf_id != VRF_DEFAULT) {
+ if (strcmp(vrfname, VRF_DEFAULT_NAME)) {
snprintf(args->errmsg, args->errmsg_len,
"LDP-Sync only runs on Default VRF");
return NB_ERR_VALIDATION;
{
struct isis_area *area;
uint16_t holddown;
+ const char *vrfname;
switch (args->event) {
case NB_EV_VALIDATE:
- area = nb_running_get_entry(args->dnode, NULL, false);
- if (area == NULL || area->isis == NULL)
- return NB_ERR_VALIDATION;
+ vrfname = yang_dnode_get_string(
+ lyd_parent(lyd_parent(lyd_parent(args->dnode))),
+ "./vrf");
- if (area->isis->vrf_id != VRF_DEFAULT) {
+ if (strcmp(vrfname, VRF_DEFAULT_NAME)) {
snprintf(args->errmsg, args->errmsg_len,
"LDP-Sync only runs on Default VRF");
return NB_ERR_VALIDATION;
struct isis_circuit *circuit;
struct ldp_sync_info *ldp_sync_info;
bool ldp_sync_enable;
- struct interface *ifp;
+ const char *vrfname;
switch (args->event) {
case NB_EV_VALIDATE:
- ifp = nb_running_get_entry(
- lyd_parent(lyd_parent(lyd_parent(args->dnode))), NULL,
- false);
- if (ifp == NULL)
- return NB_ERR_VALIDATION;
- if (if_is_loopback(ifp)) {
- snprintf(args->errmsg, args->errmsg_len,
- "LDP-Sync does not run on loopback interface");
- return NB_ERR_VALIDATION;
- }
-
- circuit = nb_running_get_entry(args->dnode, NULL, false);
- if (circuit == NULL || circuit->area == NULL)
- break;
-
- if (circuit->isis->vrf_id != VRF_DEFAULT) {
+ vrfname = yang_dnode_get_string(
+ lyd_parent(lyd_parent(lyd_parent(args->dnode))),
+ "./vrf");
+ if (strcmp(vrfname, VRF_DEFAULT_NAME)) {
snprintf(args->errmsg, args->errmsg_len,
"LDP-Sync only runs on Default VRF");
return NB_ERR_VALIDATION;
struct isis_circuit *circuit;
struct ldp_sync_info *ldp_sync_info;
uint16_t holddown;
- struct interface *ifp;
+ const char *vrfname;
switch (args->event) {
case NB_EV_VALIDATE:
-
- ifp = nb_running_get_entry(
- lyd_parent(lyd_parent(lyd_parent(args->dnode))), NULL,
- false);
- if (ifp == NULL)
- return NB_ERR_VALIDATION;
- if (if_is_loopback(ifp)) {
- snprintf(args->errmsg, args->errmsg_len,
- "LDP-Sync does not run on loopback interface");
- return NB_ERR_VALIDATION;
- }
-
- circuit = nb_running_get_entry(args->dnode, NULL, false);
- if (circuit == NULL || circuit->area == NULL)
- break;
-
- if (circuit->isis->vrf_id != VRF_DEFAULT) {
+ vrfname = yang_dnode_get_string(
+ lyd_parent(lyd_parent(lyd_parent(args->dnode))),
+ "./vrf");
+ if (strcmp(vrfname, VRF_DEFAULT_NAME)) {
snprintf(args->errmsg, args->errmsg_len,
"LDP-Sync only runs on Default VRF");
return NB_ERR_VALIDATION;
{
struct isis_circuit *circuit;
struct ldp_sync_info *ldp_sync_info;
- struct interface *ifp;
+ const char *vrfname;
switch (args->event) {
case NB_EV_VALIDATE:
- ifp = nb_running_get_entry(
- lyd_parent(lyd_parent(lyd_parent(args->dnode))), NULL,
- false);
- if (ifp == NULL)
- return NB_ERR_VALIDATION;
- if (if_is_loopback(ifp)) {
- snprintf(args->errmsg, args->errmsg_len,
- "LDP-Sync does not run on loopback interface");
- return NB_ERR_VALIDATION;
- }
-
- circuit = nb_running_get_entry(args->dnode, NULL, false);
- if (circuit == NULL || circuit->area == NULL)
- break;
-
- if (circuit->isis->vrf_id != VRF_DEFAULT) {
+ vrfname = yang_dnode_get_string(
+ lyd_parent(lyd_parent(lyd_parent(args->dnode))),
+ "./vrf");
+ if (strcmp(vrfname, VRF_DEFAULT_NAME)) {
snprintf(args->errmsg, args->errmsg_len,
"LDP-Sync only runs on Default VRF");
return NB_ERR_VALIDATION;
/* update neighbor router address */
switch (prefix->family) {
case AF_INET:
- if (depth == 2 && prefix->prefixlen == 32)
+ if (depth == 2 && prefix->prefixlen == IPV4_MAX_BITLEN)
adj->router_address = prefix->u.prefix4;
break;
case AF_INET6:
- if (depth == 2 && prefix->prefixlen == 128
+ if (depth == 2 && prefix->prefixlen == IPV6_MAX_BITLEN
&& (!src_p || !src_p->prefixlen)) {
adj->router_address6 = prefix->u.prefix6;
}
}
p.prefixlen = stream_getc(s);
- if (p.prefixlen > 128) {
+ if (p.prefixlen > IPV6_MAX_BITLEN) {
sbuf_push(log, indent, "Prefixlen %u is implausible for IPv6\n",
p.prefixlen);
return 1;
rv->down = (control & ISIS_EXTENDED_IP_REACH_DOWN);
rv->prefix.family = AF_INET;
rv->prefix.prefixlen = control & 0x3f;
- if (rv->prefix.prefixlen > 32) {
+ if (rv->prefix.prefixlen > IPV4_MAX_BITLEN) {
sbuf_push(log, indent, "Prefixlen %u is implausible for IPv4\n",
rv->prefix.prefixlen);
goto out;
rv->prefix.family = AF_INET6;
rv->prefix.prefixlen = stream_getc(s);
- if (rv->prefix.prefixlen > 128) {
+ if (rv->prefix.prefixlen > IPV6_MAX_BITLEN) {
sbuf_push(log, indent, "Prefixlen %u is implausible for IPv6\n",
rv->prefix.prefixlen);
goto out;
static int isis_zebra_link_params(ZAPI_CALLBACK_ARGS)
{
struct interface *ifp;
+ bool changed = false;
- ifp = zebra_interface_link_params_read(zclient->ibuf, vrf_id);
+ ifp = zebra_interface_link_params_read(zclient->ibuf, vrf_id, &changed);
- if (ifp == NULL)
+ if (ifp == NULL || !changed)
return 0;
/* Update TE TLV */
map->fec.prefix.prefixlen = buf[off];
off += sizeof(uint8_t);
if ((map->fec.prefix.af == AF_IPV4
- && map->fec.prefix.prefixlen > IPV4_MAX_PREFIXLEN)
+ && map->fec.prefix.prefixlen > IPV4_MAX_BITLEN)
|| (map->fec.prefix.af == AF_IPV6
- && map->fec.prefix.prefixlen > IPV6_MAX_PREFIXLEN)) {
+ && map->fec.prefix.prefixlen > IPV6_MAX_BITLEN)) {
session_shutdown(nbr, S_BAD_TLV_VAL, msg->id,
msg->type);
return (-1);
/* should we allocate a label for this fec? */
switch (fn->fec.type) {
case FEC_TYPE_IPV4:
- if ((ldeconf->ipv4.flags & F_LDPD_AF_ALLOCHOSTONLY) &&
- fn->fec.u.ipv4.prefixlen != 32)
+ if ((ldeconf->ipv4.flags & F_LDPD_AF_ALLOCHOSTONLY)
+ && fn->fec.u.ipv4.prefixlen != IPV4_MAX_BITLEN)
return (NO_LABEL);
if (lde_acl_check(ldeconf->ipv4.acl_label_allocate_for,
AF_INET, (union ldpd_addr *)&fn->fec.u.ipv4.prefix,
return (NO_LABEL);
break;
case FEC_TYPE_IPV6:
- if ((ldeconf->ipv6.flags & F_LDPD_AF_ALLOCHOSTONLY) &&
- fn->fec.u.ipv6.prefixlen != 128)
+ if ((ldeconf->ipv6.flags & F_LDPD_AF_ALLOCHOSTONLY)
+ && fn->fec.u.ipv6.prefixlen != IPV6_MAX_BITLEN)
return (NO_LABEL);
if (lde_acl_check(ldeconf->ipv6.acl_label_allocate_for,
AF_INET6, (union ldpd_addr *)&fn->fec.u.ipv6.prefix,
case AF_INET:
if (prefixlen == 0)
return (0);
- if (prefixlen > 32)
+ if (prefixlen > IPV4_MAX_BITLEN)
fatalx("ldp_prefixcmp: bad IPv4 prefixlen");
mask = htonl(prefixlen2mask(prefixlen));
aa = htonl(a->v4.s_addr) & mask;
case AF_INET6:
if (prefixlen == 0)
return (0);
- if (prefixlen > 128)
+ if (prefixlen > IPV6_MAX_BITLEN)
fatalx("ldp_prefixcmp: bad IPv6 prefixlen");
for (i = 0; i < prefixlen / 8; i++)
if (a->v6.s6_addr[i] != b->v6.s6_addr[i])
iov_size =
((iov_index > IOV_MAX) ? IOV_MAX : iov_index);
- if ((nbytes = writev(fd, c_iov, iov_size)) < 0) {
+ nbytes = writev(fd, c_iov, iov_size);
+ if (nbytes < 0) {
flog_err(EC_LIB_SOCKET,
"%s: writev to fd %d failed: %s",
__func__, fd, safe_strerror(errno));
}
}
#else /* IOV_MAX */
- if ((nbytes = writev(fd, iov, iov_index)) < 0)
+ nbytes = writev(fd, iov, iov_index);
+ if (nbytes < 0)
flog_err(EC_LIB_SOCKET, "%s: writev to fd %d failed: %s",
__func__, fd, safe_strerror(errno));
#endif /* IOV_MAX */
/* Buffer is not empty, so do not attempt to write the new data.
*/
nbytes = 0;
- else if ((nbytes = write(fd, p, size)) < 0) {
- if (ERRNO_IO_RETRY(errno))
- nbytes = 0;
- else {
- flog_err(EC_LIB_SOCKET, "%s: write error on fd %d: %s",
- __func__, fd, safe_strerror(errno));
- return BUFFER_ERROR;
+ else {
+ nbytes = write(fd, p, size);
+ if (nbytes < 0) {
+ if (ERRNO_IO_RETRY(errno))
+ nbytes = 0;
+ else {
+ flog_err(EC_LIB_SOCKET,
+ "%s: write error on fd %d: %s",
+ __func__, fd, safe_strerror(errno));
+ return BUFFER_ERROR;
+ }
}
}
/* Add any remaining data to the buffer. */
/* Install top node of command vector. */
void install_node(struct cmd_node *node)
{
+#define CMD_HASH_STR_SIZE 256
+ char hash_name[CMD_HASH_STR_SIZE];
+
vector_set_index(cmdvec, node->node, node);
node->cmdgraph = graph_new();
node->cmd_vector = vector_init(VECTOR_MIN_SIZE);
cmd_token_new(START_TKN, CMD_ATTR_NORMAL, NULL, NULL);
graph_new_node(node->cmdgraph, token,
(void (*)(void *)) & cmd_token_del);
- node->cmd_hash = hash_create_size(16, cmd_hash_key, cmd_hash_cmp,
- "Command Hash");
+
+ snprintf(hash_name, sizeof(hash_name), "Command Hash: %s", node->name);
+ node->cmd_hash =
+ hash_create_size(16, cmd_hash_key, cmd_hash_cmp, hash_name);
}
/* Return prompt character of specified node. */
}
log_config_write(vty);
+ /* print disable always, but enable only if default is flipped
+ * => prep for future removal of compile-time knob
+ */
+ if (!cputime_enabled)
+ vty_out(vty, "no service cputime-stats\n");
+#ifdef EXCLUDE_CPU_TIME
+ else
+ vty_out(vty, "service cputime-stats\n");
+#endif
+
+ if (!cputime_threshold)
+ vty_out(vty, "no service cputime-warning\n");
+#if defined(CONSUMED_TIME_CHECK) && CONSUMED_TIME_CHECK != 5000000
+ else /* again, always print non-default */
+#else
+ else if (cputime_threshold != 5000000)
+#endif
+ vty_out(vty, "service cputime-warning %lu\n",
+ cputime_threshold);
+
+ if (!walltime_threshold)
+ vty_out(vty, "no service walltime-warning\n");
+#if defined(CONSUMED_TIME_CHECK) && CONSUMED_TIME_CHECK != 5000000
+ else /* again, always print non-default */
+#else
+ else if (walltime_threshold != 5000000)
+#endif
+ vty_out(vty, "service walltime-warning %lu\n",
+ walltime_threshold);
+
if (host.advanced)
vty_out(vty, "service advanced-vty\n");
struct prefix p;
(void)str2prefix("1.2.3.4/24", &p);
-
struct frrscript *fs = frrscript_load(argv[1]->arg, NULL);
if (fs == NULL) {
vty_out(vty, "Script '/etc/frr/scripts/%s.lua' not found\n",
argv[1]->arg);
} else {
- int ret = frrscript_call(fs, NULL);
+ int ret = frrscript_call(fs, ("p", &p));
+ char buf[40];
+ prefix2str(&p, buf, sizeof(buf));
+ vty_out(vty, "p: %s\n", buf);
vty_out(vty, "Script result: %d\n", ret);
}
str++;
}
- if (atoi(sp) > 32)
+ if (atoi(sp) > IPV4_MAX_BITLEN)
return no_match;
return exact_match;
if (*endptr != '\0')
return no_match;
- if (mask < 0 || mask > 128)
+ if (mask < 0 || mask > IPV6_MAX_BITLEN)
return no_match;
return exact_match;
#define MACRO_REPEAT(NAME, ...) \
MACRO_VARIANT(_MACRO_REPEAT, ##__VA_ARGS__)(NAME, ##__VA_ARGS__)
+/* per-arglist repeat macro, use like this:
+ * #define foo(...) MAP_LISTS(F, ##__VA_ARGS__)
+ * where F is a n-ary function where n is the number of args in each arglist.
+ * e.g.: MAP_LISTS(f, (a, b), (c, d))
+ * expands to: f(a, b); f(c, d)
+ */
+
+#define ESC(...) __VA_ARGS__
+#define MAP_LISTS(M, ...) \
+ _CONCAT(_MAP_LISTS_, PP_NARG(__VA_ARGS__))(M, ##__VA_ARGS__)
+#define _MAP_LISTS_0(M)
+#define _MAP_LISTS_1(M, _1) ESC(M _1)
+#define _MAP_LISTS_2(M, _1, _2) ESC(M _1; M _2)
+#define _MAP_LISTS_3(M, _1, _2, _3) ESC(M _1; M _2; M _3)
+#define _MAP_LISTS_4(M, _1, _2, _3, _4) ESC(M _1; M _2; M _3; M _4)
+#define _MAP_LISTS_5(M, _1, _2, _3, _4, _5) ESC(M _1; M _2; M _3; M _4; M _5)
+#define _MAP_LISTS_6(M, _1, _2, _3, _4, _5, _6) \
+ ESC(M _1; M _2; M _3; M _4; M _5; M _6)
+#define _MAP_LISTS_7(M, _1, _2, _3, _4, _5, _6, _7) \
+ ESC(M _1; M _2; M _3; M _4; M _5; M _6; M _7)
+#define _MAP_LISTS_8(M, _1, _2, _3, _4, _5, _6, _7, _8) \
+ ESC(M _1; M _2; M _3; M _4; M _5; M _6; M _7; M _8)
+
/*
* for warnings on macros, put in the macro content like this:
* #define MACRO BLA CPP_WARN("MACRO has been deprecated")
char *vm;
snprintf(status_child, sizeof(status_child), "/proc/%d/status", pid);
- if ((fd = open(status_child, O_RDONLY)) < 0)
+ fd = open(status_child, O_RDONLY);
+ if (fd < 0)
return -1;
read(fd, buf, 4095);
masklen2ip(p->prefixlen, &mask);
check_mask = mask.s_addr & ~filter->mask_mask.s_addr;
- if (memcmp(&check_addr, &filter->addr.s_addr, 4) == 0
- && memcmp(&check_mask, &filter->mask.s_addr, 4) == 0)
+ if (memcmp(&check_addr, &filter->addr.s_addr, IPV4_MAX_BYTELEN)
+ == 0
+ && memcmp(&check_mask, &filter->mask.s_addr,
+ IPV4_MAX_BYTELEN)
+ == 0)
return 1;
- } else if (memcmp(&check_addr, &filter->addr.s_addr, 4) == 0)
+ } else if (memcmp(&check_addr, &filter->addr.s_addr, IPV4_MAX_BYTELEN)
+ == 0)
return 1;
return 0;
}
/*
- * Encoders.
- *
* This section has functions that convert internal FRR datatypes into Lua
- * datatypes.
+ * datatypes: one encoder function and two decoder functions for each type.
+ *
*/
void lua_pushprefix(lua_State *L, const struct prefix *prefix)
lua_setfield(L, -2, "family");
}
-void *lua_toprefix(lua_State *L, int idx)
+void lua_decode_prefix(lua_State *L, int idx, struct prefix *prefix)
{
- struct prefix *p = XCALLOC(MTYPE_TMP, sizeof(struct prefix));
-
lua_getfield(L, idx, "network");
- (void)str2prefix(lua_tostring(L, -1), p);
+ (void)str2prefix(lua_tostring(L, -1), prefix);
lua_pop(L, 1);
+ /* pop the table */
+ lua_pop(L, 1);
+}
+void *lua_toprefix(lua_State *L, int idx)
+{
+ struct prefix *p = XCALLOC(MTYPE_TMP, sizeof(struct prefix));
+ lua_decode_prefix(L, idx, p);
return p;
}
lua_setfield(L, -2, "linklayer_type");
}
-void *lua_tointerface(lua_State *L, int idx)
+void lua_decode_interface(lua_State *L, int idx, struct interface *ifp)
{
- struct interface *ifp = XCALLOC(MTYPE_TMP, sizeof(struct interface));
-
lua_getfield(L, idx, "name");
strlcpy(ifp->name, lua_tostring(L, -1), sizeof(ifp->name));
lua_pop(L, 1);
lua_getfield(L, idx, "linklayer_type");
ifp->ll_type = lua_tointeger(L, -1);
lua_pop(L, 1);
+ /* pop the table */
+ lua_pop(L, 1);
+}
+void *lua_tointerface(lua_State *L, int idx)
+{
+ struct interface *ifp = XCALLOC(MTYPE_TMP, sizeof(struct interface));
+ lua_decode_interface(L, idx, ifp);
return ifp;
}
void lua_pushinaddr(lua_State *L, const struct in_addr *addr)
{
char buf[INET_ADDRSTRLEN];
+
inet_ntop(AF_INET, addr, buf, sizeof(buf));
lua_newtable(L);
lua_setfield(L, -2, "string");
}
-void *lua_toinaddr(lua_State *L, int idx)
+void lua_decode_inaddr(lua_State *L, int idx, struct in_addr *inaddr)
{
- struct in_addr *inaddr = XCALLOC(MTYPE_TMP, sizeof(struct in_addr));
-
lua_getfield(L, idx, "value");
inaddr->s_addr = lua_tointeger(L, -1);
lua_pop(L, 1);
+ /* pop the table */
+ lua_pop(L, 1);
+}
+void *lua_toinaddr(lua_State *L, int idx)
+{
+ struct in_addr *inaddr = XCALLOC(MTYPE_TMP, sizeof(struct in_addr));
+ lua_decode_inaddr(L, idx, inaddr);
return inaddr;
}
void lua_pushin6addr(lua_State *L, const struct in6_addr *addr)
{
char buf[INET6_ADDRSTRLEN];
+
inet_ntop(AF_INET6, addr, buf, sizeof(buf));
lua_newtable(L);
lua_setfield(L, -2, "string");
}
-void *lua_toin6addr(lua_State *L, int idx)
+void lua_decode_in6addr(lua_State *L, int idx, struct in6_addr *in6addr)
{
- struct in6_addr *in6addr = XCALLOC(MTYPE_TMP, sizeof(struct in6_addr));
-
lua_getfield(L, idx, "string");
inet_pton(AF_INET6, lua_tostring(L, -1), in6addr);
lua_pop(L, 1);
+ /* pop the table */
+ lua_pop(L, 1);
+}
+void *lua_toin6addr(lua_State *L, int idx)
+{
+ struct in6_addr *in6addr = XCALLOC(MTYPE_TMP, sizeof(struct in6_addr));
+ lua_decode_in6addr(L, idx, in6addr);
return in6addr;
}
void lua_pushsockunion(lua_State *L, const union sockunion *su)
{
char buf[SU_ADDRSTRLEN];
+
sockunion2str(su, buf, sizeof(buf));
lua_newtable(L);
lua_setfield(L, -2, "string");
}
-void *lua_tosockunion(lua_State *L, int idx)
+void lua_decode_sockunion(lua_State *L, int idx, union sockunion *su)
{
- union sockunion *su = XCALLOC(MTYPE_TMP, sizeof(union sockunion));
-
lua_getfield(L, idx, "string");
str2sockunion(lua_tostring(L, -1), su);
+ lua_pop(L, 1);
+ /* pop the table */
+ lua_pop(L, 1);
+}
+
+void *lua_tosockunion(lua_State *L, int idx)
+{
+ union sockunion *su = XCALLOC(MTYPE_TMP, sizeof(union sockunion));
+ lua_decode_sockunion(L, idx, su);
return su;
}
lua_pushinteger(L, *time);
}
+void lua_decode_timet(lua_State *L, int idx, time_t *t)
+{
+ *t = lua_tointeger(L, idx);
+ lua_pop(L, 1);
+}
+
void *lua_totimet(lua_State *L, int idx)
{
time_t *t = XCALLOC(MTYPE_TMP, sizeof(time_t));
- *t = lua_tointeger(L, idx);
-
+ lua_decode_timet(L, idx, t);
return t;
}
lua_pushinteger(L, *num);
}
-void *lua_tointegerp(lua_State *L, int idx)
+void lua_decode_integerp(lua_State *L, int idx, long long *num)
{
int isnum;
- long long *num = XCALLOC(MTYPE_TMP, sizeof(long long));
-
*num = lua_tonumberx(L, idx, &isnum);
+ lua_pop(L, 1);
assert(isnum);
+}
+
+void *lua_tointegerp(lua_State *L, int idx)
+{
+ long long *num = XCALLOC(MTYPE_TMP, sizeof(long long));
+ lua_decode_integerp(L, idx, num);
return num;
}
+void lua_decode_stringp(lua_State *L, int idx, char *str)
+{
+ strlcpy(str, lua_tostring(L, idx), strlen(str) + 1);
+ lua_pop(L, 1);
+}
+
void *lua_tostringp(lua_State *L, int idx)
{
char *string = XSTRDUP(MTYPE_TMP, lua_tostring(L, idx));
return string;
}
+/*
+ * Decoder for const values, since we cannot modify them.
+ */
+void lua_decode_noop(lua_State *L, int idx, const void *ptr)
+{
+}
+
/*
* Logging.
*
*/
void lua_pushprefix(lua_State *L, const struct prefix *prefix);
+void lua_decode_prefix(lua_State *L, int idx, struct prefix *prefix);
+
/*
* Converts the Lua value at idx to a prefix.
*
*/
void lua_pushinterface(lua_State *L, const struct interface *ifp);
+void lua_decode_interface(lua_State *L, int idx, struct interface *ifp);
+
/*
* Converts the Lua value at idx to an interface.
*
*/
void lua_pushinaddr(lua_State *L, const struct in_addr *addr);
+void lua_decode_inaddr(lua_State *L, int idx, struct in_addr *addr);
+
/*
* Converts the Lua value at idx to an in_addr.
*
*/
void lua_pushin6addr(lua_State *L, const struct in6_addr *addr);
+void lua_decode_in6addr(lua_State *L, int idx, struct in6_addr *addr);
+
/*
* Converts the Lua value at idx to an in6_addr.
*
*/
void lua_pushtimet(lua_State *L, const time_t *time);
+void lua_decode_timet(lua_State *L, int idx, time_t *time);
+
/*
* Converts the Lua value at idx to a time_t.
*
*/
void lua_pushsockunion(lua_State *L, const union sockunion *su);
+void lua_decode_sockunion(lua_State *L, int idx, union sockunion *su);
+
/*
* Converts the Lua value at idx to a sockunion.
*
*/
void lua_pushintegerp(lua_State *L, const long long *num);
+void lua_decode_integerp(lua_State *L, int idx, long long *num);
+
/*
* Converts the Lua value at idx to an int.
*
*/
void *lua_tointegerp(lua_State *L, int idx);
+void lua_decode_stringp(lua_State *L, int idx, char *str);
+
/*
* Pop string.
*
*/
void *lua_tostringp(lua_State *L, int idx);
+/*
+ * No-op decocder
+ */
+void lua_decode_noop(lua_State *L, int idx, const void *ptr);
+
/*
* Retrieve an integer from table on the top of the stack.
*
/* Generic script APIs */
-int frrscript_call(struct frrscript *fs, struct frrscript_env *env)
+int _frrscript_call(struct frrscript *fs)
{
- struct frrscript_codec c = {};
- const void *arg;
- const char *bindname;
-
- /* Encode script arguments */
- for (int i = 0; env && env[i].val != NULL; i++) {
- bindname = env[i].name;
- c.typename = env[i].typename;
- arg = env[i].val;
-
- struct frrscript_codec *codec = hash_lookup(codec_hash, &c);
- assert(codec && "No encoder for type");
- codec->encoder(fs->L, arg);
-
- lua_setglobal(fs->L, bindname);
- }
int ret = lua_pcall(fs->L, 0, 0, 0);
#include <lua.h>
#include "frrlua.h"
+#include "../bgpd/bgp_script.h"
#ifdef __cplusplus
extern "C" {
*/
void frrscript_init(const char *scriptdir);
+#define ENCODE_ARGS(name, value) \
+ do { \
+ ENCODE_ARGS_WITH_STATE(L, value); \
+ lua_setglobal(L, name); \
+ } while (0)
+
+#define DECODE_ARGS(name, value) \
+ do { \
+ lua_getglobal(L, name); \
+ DECODE_ARGS_WITH_STATE(L, value); \
+ } while (0)
+
+/*
+ * Maps the type of value to its encoder/decoder.
+ * Add new mappings here.
+ *
+ * L
+ * Lua state
+ * scriptdir
+ * Directory in which to look for scripts
+ */
+#define ENCODE_ARGS_WITH_STATE(L, value) \
+ _Generic((value), \
+long long * : lua_pushintegerp, \
+struct prefix * : lua_pushprefix, \
+struct interface * : lua_pushinterface, \
+struct in_addr * : lua_pushinaddr, \
+struct in6_addr * : lua_pushin6addr, \
+union sockunion * : lua_pushsockunion, \
+time_t * : lua_pushtimet, \
+char * : lua_pushstring_wrapper, \
+struct attr * : lua_pushattr, \
+struct peer * : lua_pushpeer, \
+const struct prefix * : lua_pushprefix \
+)(L, value)
+
+#define DECODE_ARGS_WITH_STATE(L, value) \
+ _Generic((value), \
+long long * : lua_decode_integerp, \
+struct prefix * : lua_decode_prefix, \
+struct interface * : lua_decode_interface, \
+struct in_addr * : lua_decode_inaddr, \
+struct in6_addr * : lua_decode_in6addr, \
+union sockunion * : lua_decode_sockunion, \
+time_t * : lua_decode_timet, \
+char * : lua_decode_stringp, \
+struct attr * : lua_decode_attr, \
+struct peer * : lua_decode_noop, \
+const struct prefix * : lua_decode_noop \
+)(L, -1, value)
/*
* Call script.
* fs
* The script to call; this is obtained from frrscript_load().
*
- * env
- * The script's environment. Specify this as an array of frrscript_env.
- *
* Returns:
* 0 if the script ran successfully, nonzero otherwise.
*/
-int frrscript_call(struct frrscript *fs, struct frrscript_env *env);
+int _frrscript_call(struct frrscript *fs);
+/*
+ * Wrapper for call script. Maps values passed in to their encoder
+ * and decoder types.
+ *
+ * fs
+ * The script to call; this is obtained from frrscript_load().
+ *
+ * Returns:
+ * 0 if the script ran successfully, nonzero otherwise.
+ */
+#define frrscript_call(fs, ...) \
+ ({ \
+ lua_State *L = fs->L; \
+ MAP_LISTS(ENCODE_ARGS, ##__VA_ARGS__); \
+ int ret = _frrscript_call(fs); \
+ if (ret == 0) { \
+ MAP_LISTS(DECODE_ARGS, ##__VA_ARGS__); \
+ } \
+ ret; \
+ })
/*
* Get result from finished script.
#include "frr_pthread.h"
#include "defaults.h"
#include "frrscript.h"
+#include "systemd.h"
DEFINE_HOOK(frr_late_init, (struct thread_master * tm), (tm));
DEFINE_HOOK(frr_config_pre, (struct thread_master * tm), (tm));
startup_fds |= UINT64_C(0x1) << (uint64_t)i;
}
+
+ /* note this doesn't do anything, it just grabs state, so doing it
+ * early in _preinit is perfect.
+ */
+ systemd_init_env();
}
bool frr_is_startup_fd(int fd)
/* Create Edge and add it to the TED */
new = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_edge));
- if (!new)
- return NULL;
new->attributes = attributes;
new->key = key;
struct ls_ted *new;
new = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_ted));
- if (new == NULL)
- return new;
/* Set basic information for this ted */
new->key = key;
size_t len;
node = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_node));
- if (node == NULL)
- return NULL;
STREAM_GET(&node->adv, s, sizeof(struct ls_node_id));
STREAM_GETW(s, node->flags);
size_t len;
attr = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_attributes));
- if (attr == NULL)
- return NULL;
attr->srlgs = NULL;
STREAM_GET(&attr->adv, s, sizeof(struct ls_node_id));
size_t len;
ls_pref = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_prefix));
- if (ls_pref == NULL)
- return NULL;
STREAM_GET(&ls_pref->adv, s, sizeof(struct ls_node_id));
STREAM_GETW(s, ls_pref->flags);
struct ls_message *msg;
msg = XCALLOC(MTYPE_LS_DB, sizeof(struct ls_message));
- if (msg == NULL)
- return NULL;
/* Read LS Message header */
STREAM_GETC(s, msg->event);
/* According to the Single UNIX Spec, the return value for F_GETFL
should
never be negative. */
- if ((flags = fcntl(fd, F_GETFL)) < 0) {
+ flags = fcntl(fd, F_GETFL);
+ if (flags < 0) {
flog_err(EC_LIB_SYSTEM_CALL,
"fcntl(F_GETFL) failed for fd %d: %s", fd,
safe_strerror(errno));
bool zero = false;
int byte = bytex, tmp, a, b;
- if ((tmp = byte - 200) >= 0) {
+ tmp = byte - 200;
+ if (tmp >= 0) {
*pos++ = '2';
zero = true;
byte = tmp;
- } else if ((tmp = byte - 100) >= 0) {
- *pos++ = '1';
- zero = true;
- byte = tmp;
+ } else {
+ tmp = byte - 100;
+ if (tmp >= 0) {
+ *pos++ = '1';
+ zero = true;
+ byte = tmp;
+ }
}
/* make sure the compiler knows the value range of "byte" */
/* Get prefix length. */
plen = (uint8_t)atoi(++pnt);
- if (plen > IPV4_MAX_PREFIXLEN)
+ if (plen > IPV4_MAX_BITLEN)
return 0;
p->family = AF_INET;
l = strlen(buf);
buf[l++] = '/';
byte = p->prefixlen;
- if ((tmp = p->prefixlen - 100) >= 0) {
+ tmp = p->prefixlen - 100;
+ if (tmp >= 0) {
buf[l++] = '1';
z = true;
byte = tmp;
destination = ntohl(p->prefix.s_addr);
- if (p->prefixlen == IPV4_MAX_PREFIXLEN)
+ if (p->prefixlen == IPV4_MAX_BITLEN)
;
/* do nothing for host routes */
else if (IN_CLASSC(destination)) {
struct in_addr mask;
masklen2ip(masklen, &mask);
- return (masklen != IPV4_MAX_PREFIXLEN - 1) ?
- /* normal case */
- (hostaddr | ~mask.s_addr)
- :
- /* For prefix 31 return 255.255.255.255 (RFC3021) */
- htonl(0xFFFFFFFF);
+ return (masklen != IPV4_MAX_BITLEN - 1)
+ ?
+ /* normal case */
+ (hostaddr | ~mask.s_addr)
+ :
+ /* For prefix 31 return 255.255.255.255 (RFC3021) */
+ htonl(0xFFFFFFFF);
}
/* Utility function to convert ipv4 netmask to prefixes
/* Max bit/byte length of IPv4 address. */
#define IPV4_MAX_BYTELEN 4
#define IPV4_MAX_BITLEN 32
-#define IPV4_MAX_PREFIXLEN 32
#define IPV4_ADDR_CMP(D,S) memcmp ((D), (S), IPV4_MAX_BYTELEN)
static inline bool ipv4_addr_same(const struct in_addr *a,
/* Max bit/byte length of IPv6 address. */
#define IPV6_MAX_BYTELEN 16
#define IPV6_MAX_BITLEN 128
-#define IPV6_MAX_PREFIXLEN 128
#define IPV6_ADDR_CMP(D,S) memcmp ((D), (S), IPV6_MAX_BYTELEN)
#define IPV6_ADDR_SAME(D,S) (memcmp ((D), (S), IPV6_MAX_BYTELEN) == 0)
#define IPV6_ADDR_COPY(D,S) memcpy ((D), (S), IPV6_MAX_BYTELEN)
extern int str2prefix_ipv4(const char *, struct prefix_ipv4 *);
extern void apply_mask_ipv4(struct prefix_ipv4 *);
-#define PREFIX_COPY(DST, SRC) \
- *((struct prefix *)(DST)) = *((const struct prefix *)(SRC))
-#define PREFIX_COPY_IPV4(DST, SRC) \
- *((struct prefix_ipv4 *)(DST)) = *((const struct prefix_ipv4 *)(SRC))
-
extern int prefix_ipv4_any(const struct prefix_ipv4 *);
extern void apply_classful_mask_ipv4(struct prefix_ipv4 *);
extern int str2prefix_ipv6(const char *, struct prefix_ipv6 *);
extern void apply_mask_ipv6(struct prefix_ipv6 *);
-#define PREFIX_COPY_IPV6(DST, SRC) \
- *((struct prefix_ipv6 *)(DST)) = *((const struct prefix_ipv6 *)(SRC))
-
extern int ip6_masklen(struct in6_addr);
extern void masklen2ip6(const int, struct in6_addr *);
};
DECLARE_QOBJ_TYPE(route_map_index);
+/* route map maximum length. Not strictly the maximum xpath length but cannot be
+ * greater
+ */
+#define RMAP_NAME_MAXLEN XPATH_MAXLEN
+
/* Route map list structure. */
struct route_map {
/* Name of route map. */
int ret;
#if defined(IP_PKTINFO)
- if ((ret = setsockopt(sock, IPPROTO_IP, IP_PKTINFO, &val, sizeof(val)))
- < 0)
+ ret = setsockopt(sock, IPPROTO_IP, IP_PKTINFO, &val, sizeof(val));
+ if (ret < 0)
flog_err(EC_LIB_SOCKET,
"Can't set IP_PKTINFO option for fd %d to %d: %s",
sock, val, safe_strerror(errno));
#elif defined(IP_RECVIF)
- if ((ret = setsockopt(sock, IPPROTO_IP, IP_RECVIF, &val, sizeof(val)))
- < 0)
+ ret = setsockopt(sock, IPPROTO_IP, IP_RECVIF, &val, sizeof(val));
+ if (ret < 0)
flog_err(EC_LIB_SOCKET,
"Can't set IP_RECVIF option for fd %d to %d: %s", sock,
val, safe_strerror(errno));
#endif /* GNU_LINUX */
- if ((ret = setsockopt(sock, IPPROTO_TCP, optname, &md5sig,
- sizeof(md5sig)))
- < 0) {
- /* ENOENT is harmless. It is returned when we clear a password
- for which
- one was not previously set. */
+ ret = setsockopt(sock, IPPROTO_TCP, optname, &md5sig, sizeof(md5sig));
+ if (ret < 0) {
if (ENOENT == errno)
ret = 0;
else
}
}
-static int in6addr_cmp(const struct in6_addr *addr1,
- const struct in6_addr *addr2)
+int in6addr_cmp(const struct in6_addr *addr1, const struct in6_addr *addr2)
{
unsigned int i;
const uint8_t *p1, *p2;
/* Prototypes. */
extern int str2sockunion(const char *, union sockunion *);
extern const char *sockunion2str(const union sockunion *, char *, size_t);
+int in6addr_cmp(const struct in6_addr *addr1, const struct in6_addr *addr2);
extern int sockunion_cmp(const union sockunion *, const union sockunion *);
extern int sockunion_same(const union sockunion *, const union sockunion *);
extern unsigned int sockunion_hash(const union sockunion *);
return -1;
}
- if ((nbytes = read(fd, s->data + s->endp, size)) >= 0) {
+ nbytes = read(fd, s->data + s->endp, size);
+ if (nbytes >= 0) {
s->endp += nbytes;
return nbytes;
}
return -1;
}
- if ((nbytes = recvfrom(fd, s->data + s->endp, size, flags, from,
- fromlen))
- >= 0) {
+ nbytes = recvfrom(fd, s->data + s->endp, size, flags, from, fromlen);
+ if (nbytes >= 0) {
s->endp += nbytes;
return nbytes;
}
lib/northbound_cli.c \
lib/plist.c \
lib/routemap_cli.c \
+ lib/thread.c \
lib/vty.c \
# end
SUFFIXES += .xref
%.xref: % $(CLIPPY)
- $(AM_V_XRELFO) $(CLIPPY) $(top_srcdir)/python/xrelfo.py $(XRELFO_FLAGS) -o $@ $<
+ $(AM_V_XRELFO) $(CLIPPY) $(top_srcdir)/python/xrelfo.py $(WERROR) $(XRELFO_FLAGS) -o $@ $<
# dependencies added in python/makefile.py
frr.xref:
*/
#include <zebra.h>
+#include <sys/un.h>
#include "thread.h"
#include "systemd.h"
+#include "lib_errors.h"
-#if defined HAVE_SYSTEMD
-#include <systemd/sd-daemon.h>
-#endif
+/* these are cleared from env so they don't "leak" into things we fork(),
+ * particularly for watchfrr starting individual daemons
+ *
+ * watchdog_pid is currently not used since watchfrr starts forking.
+ * (TODO: handle that better, somehow?)
+ */
+static pid_t watchdog_pid = -1;
+static intmax_t watchdog_msec;
+
+/* not used yet, but can trigger auto-switch to journald logging */
+bool sd_stdout_is_journal;
+bool sd_stderr_is_journal;
+
+static char *notify_socket;
-/*
- * Wrapper this silliness if we
- * don't have systemd
+/* talk to whatever entity claims to be systemd ;)
+ *
+ * refer to sd_notify docs for messages systemd accepts over this socket.
+ * This function should be functionally equivalent to sd_notify().
*/
static void systemd_send_information(const char *info)
{
-#if defined HAVE_SYSTEMD
- sd_notify(0, info);
-#else
- return;
-#endif
-}
+ int sock;
+ struct sockaddr_un sun;
-/*
- * A return of 0 means that we are not watchdoged
- */
-static int systemd_get_watchdog_time(int the_process)
-{
-#if defined HAVE_SYSTEMD
- uint64_t usec;
- char *watchdog = NULL;
- int ret;
-
- ret = sd_watchdog_enabled(0, &usec);
-
- /*
- * If return is 0 -> we don't want watchdog
- * if return is < 0, some sort of failure occurred
- */
- if (ret < 0)
- return 0;
-
- /*
- * systemd can return that this process
- * is not the expected sender of the watchdog timer
- * If we set the_process = 0 then we expect to
- * be able to send the watchdog to systemd
- * irrelevant of the pid of this process.
- */
- if (ret == 0 && the_process)
- return 0;
-
- if (ret == 0 && !the_process) {
- watchdog = getenv("WATCHDOG_USEC");
- if (!watchdog)
- return 0;
-
- usec = atol(watchdog);
- }
+ if (!notify_socket)
+ return;
+
+ sock = socket(AF_UNIX, SOCK_DGRAM, 0);
+ if (sock < 0)
+ return;
+
+ sun.sun_family = AF_UNIX;
+ strlcpy(sun.sun_path, notify_socket, sizeof(sun.sun_path));
- return (usec / 1000000) / 3;
-#else
- return 0;
-#endif
+ /* linux abstract unix socket namespace */
+ if (sun.sun_path[0] == '@')
+ sun.sun_path[0] = '\0';
+
+ /* nothing we can do if this errors out... */
+ sendto(sock, info, strlen(info), 0, (struct sockaddr *)&sun,
+ sizeof(sun));
+
+ close(sock);
}
void systemd_send_stopping(void)
systemd_send_information("STOPPING=1");
}
-/*
- * How many seconds should we wait between watchdog sends
- */
-static int wsecs = 0;
static struct thread_master *systemd_master = NULL;
static int systemd_send_watchdog(struct thread *t)
{
systemd_send_information("WATCHDOG=1");
- thread_add_timer(systemd_master, systemd_send_watchdog, NULL, wsecs,
- NULL);
-
+ assert(watchdog_msec > 0);
+ thread_add_timer_msec(systemd_master, systemd_send_watchdog, NULL,
+ watchdog_msec, NULL);
return 1;
}
-void systemd_send_started(struct thread_master *m, int the_process)
+void systemd_send_started(struct thread_master *m)
{
assert(m != NULL);
- wsecs = systemd_get_watchdog_time(the_process);
systemd_master = m;
systemd_send_information("READY=1");
- if (wsecs != 0) {
- systemd_send_information("WATCHDOG=1");
- thread_add_timer(m, systemd_send_watchdog, m, wsecs, NULL);
- }
+ if (watchdog_msec > 0)
+ systemd_send_watchdog(NULL);
}
void systemd_send_status(const char *status)
snprintf(buffer, sizeof(buffer), "STATUS=%s", status);
systemd_send_information(buffer);
}
+
+static intmax_t getenv_int(const char *varname, intmax_t dflt)
+{
+ char *val, *err;
+ intmax_t intval;
+
+ val = getenv(varname);
+ if (!val)
+ return dflt;
+
+ intval = strtoimax(val, &err, 0);
+ if (*err || !*val)
+ return dflt;
+ return intval;
+}
+
+void systemd_init_env(void)
+{
+ char *tmp;
+ uintmax_t dev, ino;
+ int len;
+ struct stat st;
+
+ notify_socket = getenv("NOTIFY_SOCKET");
+
+ /* no point in setting up watchdog w/o notify socket */
+ if (notify_socket) {
+ intmax_t watchdog_usec;
+
+ watchdog_pid = getenv_int("WATCHDOG_PID", -1);
+ if (watchdog_pid <= 0)
+ watchdog_pid = -1;
+
+ /* note this is the deadline, hence the divide by 3 */
+ watchdog_usec = getenv_int("WATCHDOG_USEC", 0);
+ if (watchdog_usec >= 3000)
+ watchdog_msec = watchdog_usec / 3000;
+ else {
+ if (watchdog_usec != 0)
+ flog_err(
+ EC_LIB_UNAVAILABLE,
+ "systemd expects a %jd microsecond watchdog timer, but FRR only supports millisecond resolution!",
+ watchdog_usec);
+ watchdog_msec = 0;
+ }
+ }
+
+ tmp = getenv("JOURNAL_STREAM");
+ if (tmp && sscanf(tmp, "%ju:%ju%n", &dev, &ino, &len) == 2
+ && (size_t)len == strlen(tmp)) {
+ if (fstat(1, &st) == 0 && st.st_dev == (dev_t)dev
+ && st.st_ino == (ino_t)ino)
+ sd_stdout_is_journal = true;
+ if (fstat(2, &st) == 0 && st.st_dev == (dev_t)dev
+ && st.st_ino == (ino_t)ino)
+ sd_stderr_is_journal = true;
+ }
+
+ /* these should *not* be passed to any other process we start */
+ unsetenv("WATCHDOG_PID");
+ unsetenv("WATCHDOG_USEC");
+}
*
* Design point is that if systemd is not being used on this system
* then these functions becomes a no-op.
- *
- * To turn on systemd compilation, use --enable-systemd on
- * configure run.
*/
void systemd_send_stopping(void);
* the_process - Should we send watchdog if we are not the requested
* process?
*/
-void systemd_send_started(struct thread_master *master, int the_process);
+void systemd_send_started(struct thread_master *master);
/*
* status - A status string to send to systemd
*/
void systemd_send_status(const char *status);
+/*
+ * grab startup state from env vars
+ */
+void systemd_init_env(void);
+
#ifdef __cplusplus
}
#endif
memset(&p, 0, sizeof(struct prefix_ipv4));
p.family = AF_INET;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
p.prefix = *addr;
return route_node_match(table, (struct prefix *)&p);
memset(&p, 0, sizeof(struct prefix_ipv6));
p.family = AF_INET6;
- p.prefixlen = IPV6_MAX_PREFIXLEN;
+ p.prefixlen = IPV6_MAX_BITLEN;
p.prefix = *addr;
return route_node_match(table, &p);
static void thread_free(struct thread_master *master, struct thread *thread);
+#ifndef EXCLUDE_CPU_TIME
+#define EXCLUDE_CPU_TIME 0
+#endif
+#ifndef CONSUMED_TIME_CHECK
+#define CONSUMED_TIME_CHECK 0
+#endif
+
+bool cputime_enabled = !EXCLUDE_CPU_TIME;
+unsigned long cputime_threshold = CONSUMED_TIME_CHECK;
+unsigned long walltime_threshold = CONSUMED_TIME_CHECK;
+
/* CLI start ---------------------------------------------------------------- */
+#ifndef VTYSH_EXTRACT_PL
+#include "lib/thread_clippy.c"
+#endif
+
static unsigned int cpu_record_hash_key(const struct cpu_thread_history *a)
{
int size = sizeof(a->func);
XFREE(MTYPE_THREAD_STATS, hist);
}
-#ifndef EXCLUDE_CPU_TIME
static void vty_out_cpu_thread_history(struct vty *vty,
struct cpu_thread_history *a)
{
struct thread_master *m;
struct listnode *ln;
+ if (!cputime_enabled)
+ vty_out(vty,
+ "\n"
+ "Collecting CPU time statistics is currently disabled. Following statistics\n"
+ "will be zero or may display data from when collection was enabled. Use the\n"
+ " \"service cputime-stats\" command to start collecting data.\n"
+ "\nCounters and wallclock times are always maintained and should be accurate.\n");
+
memset(&tmp, 0, sizeof(tmp));
tmp.funcname = "TOTAL";
tmp.types = filter;
if (tmp.total_calls > 0)
vty_out_cpu_thread_history(vty, &tmp);
}
-#endif
static void cpu_record_hash_clear(struct hash_bucket *bucket, void *args[])
{
return filter;
}
-#ifndef EXCLUDE_CPU_TIME
DEFUN_NOSH (show_thread_cpu,
show_thread_cpu_cmd,
"show thread cpu [FILTER]",
cpu_record_print(vty, filter);
return CMD_SUCCESS;
}
-#endif
+
+DEFPY (service_cputime_stats,
+ service_cputime_stats_cmd,
+ "[no] service cputime-stats",
+ NO_STR
+ "Set up miscellaneous service\n"
+ "Collect CPU usage statistics\n")
+{
+ cputime_enabled = !no;
+ return CMD_SUCCESS;
+}
+
+DEFPY (service_cputime_warning,
+ service_cputime_warning_cmd,
+ "[no] service cputime-warning (1-4294967295)",
+ NO_STR
+ "Set up miscellaneous service\n"
+ "Warn for tasks exceeding CPU usage threshold\n"
+ "Warning threshold in milliseconds\n")
+{
+ if (no)
+ cputime_threshold = 0;
+ else
+ cputime_threshold = cputime_warning * 1000;
+ return CMD_SUCCESS;
+}
+
+ALIAS (service_cputime_warning,
+ no_service_cputime_warning_cmd,
+ "no service cputime-warning",
+ NO_STR
+ "Set up miscellaneous service\n"
+ "Warn for tasks exceeding CPU usage threshold\n")
+
+DEFPY (service_walltime_warning,
+ service_walltime_warning_cmd,
+ "[no] service walltime-warning (1-4294967295)",
+ NO_STR
+ "Set up miscellaneous service\n"
+ "Warn for tasks exceeding total wallclock threshold\n"
+ "Warning threshold in milliseconds\n")
+{
+ if (no)
+ walltime_threshold = 0;
+ else
+ walltime_threshold = walltime_warning * 1000;
+ return CMD_SUCCESS;
+}
+
+ALIAS (service_walltime_warning,
+ no_service_walltime_warning_cmd,
+ "no service walltime-warning",
+ NO_STR
+ "Set up miscellaneous service\n"
+ "Warn for tasks exceeding total wallclock threshold\n")
static void show_thread_poll_helper(struct vty *vty, struct thread_master *m)
{
void thread_cmd_init(void)
{
-#ifndef EXCLUDE_CPU_TIME
install_element(VIEW_NODE, &show_thread_cpu_cmd);
-#endif
install_element(VIEW_NODE, &show_thread_poll_cmd);
install_element(ENABLE_NODE, &clear_thread_cpu_cmd);
+
+ install_element(CONFIG_NODE, &service_cputime_stats_cmd);
+ install_element(CONFIG_NODE, &service_cputime_warning_cmd);
+ install_element(CONFIG_NODE, &no_service_cputime_warning_cmd);
+ install_element(CONFIG_NODE, &service_walltime_warning_cmd);
+ install_element(CONFIG_NODE, &no_service_walltime_warning_cmd);
}
/* CLI end ------------------------------------------------------------------ */
unsigned long thread_consumed_time(RUSAGE_T *now, RUSAGE_T *start,
unsigned long *cputime)
{
+#ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
+ *cputime = (now->cpu.tv_sec - start->cpu.tv_sec) * TIMER_SECOND_MICRO
+ + (now->cpu.tv_nsec - start->cpu.tv_nsec) / 1000;
+#else
/* This is 'user + sys' time. */
*cputime = timeval_elapsed(now->cpu.ru_utime, start->cpu.ru_utime)
+ timeval_elapsed(now->cpu.ru_stime, start->cpu.ru_stime);
+#endif
return timeval_elapsed(now->real, start->real);
}
void thread_getrusage(RUSAGE_T *r)
{
+ monotime(&r->real);
+ if (!cputime_enabled) {
+ memset(&r->cpu, 0, sizeof(r->cpu));
+ return;
+ }
+
+#ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
+ /* not currently implemented in Linux's vDSO, but maybe at some point
+ * in the future?
+ */
+ clock_gettime(CLOCK_THREAD_CPUTIME_ID, &r->cpu);
+#else /* !HAVE_CLOCK_THREAD_CPUTIME_ID */
#if defined RUSAGE_THREAD
#define FRR_RUSAGE RUSAGE_THREAD
#else
#define FRR_RUSAGE RUSAGE_SELF
#endif
- monotime(&r->real);
-#ifndef EXCLUDE_CPU_TIME
getrusage(FRR_RUSAGE, &(r->cpu));
#endif
}
*/
void thread_call(struct thread *thread)
{
-#ifndef EXCLUDE_CPU_TIME
- _Atomic unsigned long realtime, cputime;
- unsigned long exp;
- unsigned long helper;
-#endif
RUSAGE_T before, after;
+ /* if the thread being called is the CLI, it may change cputime_enabled
+ * ("service cputime-stats" command), which can result in nonsensical
+ * and very confusing warnings
+ */
+ bool cputime_enabled_here = cputime_enabled;
+
if (thread->master->ready_run_loop)
before = thread->master->last_getrusage;
else
GETRUSAGE(&after);
thread->master->last_getrusage = after;
-#ifndef EXCLUDE_CPU_TIME
- realtime = thread_consumed_time(&after, &before, &helper);
- cputime = helper;
+ unsigned long walltime, cputime;
+ unsigned long exp;
+
+ walltime = thread_consumed_time(&after, &before, &cputime);
- /* update realtime */
- atomic_fetch_add_explicit(&thread->hist->real.total, realtime,
+ /* update walltime */
+ atomic_fetch_add_explicit(&thread->hist->real.total, walltime,
memory_order_seq_cst);
exp = atomic_load_explicit(&thread->hist->real.max,
memory_order_seq_cst);
- while (exp < realtime
+ while (exp < walltime
&& !atomic_compare_exchange_weak_explicit(
- &thread->hist->real.max, &exp, realtime,
- memory_order_seq_cst, memory_order_seq_cst))
+ &thread->hist->real.max, &exp, walltime,
+ memory_order_seq_cst, memory_order_seq_cst))
;
- /* update cputime */
- atomic_fetch_add_explicit(&thread->hist->cpu.total, cputime,
- memory_order_seq_cst);
- exp = atomic_load_explicit(&thread->hist->cpu.max,
- memory_order_seq_cst);
- while (exp < cputime
- && !atomic_compare_exchange_weak_explicit(
- &thread->hist->cpu.max, &exp, cputime,
- memory_order_seq_cst, memory_order_seq_cst))
- ;
+ if (cputime_enabled_here && cputime_enabled) {
+ /* update cputime */
+ atomic_fetch_add_explicit(&thread->hist->cpu.total, cputime,
+ memory_order_seq_cst);
+ exp = atomic_load_explicit(&thread->hist->cpu.max,
+ memory_order_seq_cst);
+ while (exp < cputime
+ && !atomic_compare_exchange_weak_explicit(
+ &thread->hist->cpu.max, &exp, cputime,
+ memory_order_seq_cst, memory_order_seq_cst))
+ ;
+ }
atomic_fetch_add_explicit(&thread->hist->total_calls, 1,
memory_order_seq_cst);
atomic_fetch_or_explicit(&thread->hist->types, 1 << thread->add_type,
memory_order_seq_cst);
-#ifdef CONSUMED_TIME_CHECK
- if (cputime > CONSUMED_TIME_CHECK) {
+ if (cputime_enabled_here && cputime_enabled && cputime_threshold
+ && cputime > cputime_threshold) {
/*
- * We have a CPU Hog on our hands. The time FRR
- * has spent doing actual work ( not sleeping )
- * is greater than 5 seconds.
+ * We have a CPU Hog on our hands. The time FRR has spent
+ * doing actual work (not sleeping) is greater than 5 seconds.
* Whinge about it now, so we're aware this is yet another task
* to fix.
*/
EC_LIB_SLOW_THREAD_CPU,
"CPU HOG: task %s (%lx) ran for %lums (cpu time %lums)",
thread->xref->funcname, (unsigned long)thread->func,
- realtime / 1000, cputime / 1000);
- } else if (realtime > CONSUMED_TIME_CHECK) {
+ walltime / 1000, cputime / 1000);
+
+ } else if (walltime_threshold && walltime > walltime_threshold) {
/*
- * The runtime for a task is greater than 5 seconds, but
- * the cpu time is under 5 seconds. Let's whine
- * about this because this could imply some sort of
- * scheduling issue.
+ * The runtime for a task is greater than 5 seconds, but the
+ * cpu time is under 5 seconds. Let's whine about this because
+ * this could imply some sort of scheduling issue.
*/
atomic_fetch_add_explicit(&thread->hist->total_wall_warn,
1, memory_order_seq_cst);
EC_LIB_SLOW_THREAD_WALL,
"STARVATION: task %s (%lx) ran for %lums (cpu time %lums)",
thread->xref->funcname, (unsigned long)thread->func,
- realtime / 1000, cputime / 1000);
+ walltime / 1000, cputime / 1000);
}
-#endif /* CONSUMED_TIME_CHECK */
-#endif /* Exclude CPU Time */
}
/* Execute thread */
extern "C" {
#endif
+extern bool cputime_enabled;
+extern unsigned long cputime_threshold;
+/* capturing wallclock time is always enabled since it is fast (reading
+ * hardware TSC w/o syscalls)
+ */
+extern unsigned long walltime_threshold;
+
struct rusage_t {
+#ifdef HAVE_CLOCK_THREAD_CPUTIME_ID
+ struct timespec cpu;
+#else
struct rusage cpu;
+#endif
struct timeval real;
};
#define RUSAGE_T struct rusage_t
zlog_notice("%s%s", prompt_str, buf);
}
-#ifdef CONSUMED_TIME_CHECK
- {
- RUSAGE_T before;
- RUSAGE_T after;
- unsigned long realtime, cputime;
+ RUSAGE_T before;
+ RUSAGE_T after;
+ unsigned long walltime, cputime;
- GETRUSAGE(&before);
-#endif /* CONSUMED_TIME_CHECK */
+ /* cmd_execute() may change cputime_enabled if we're executing the
+ * "service cputime-stats" command, which can result in nonsensical
+ * and very confusing warnings
+ */
+ bool cputime_enabled_here = cputime_enabled;
- ret = cmd_execute(vty, buf, NULL, 0);
+ GETRUSAGE(&before);
- /* Get the name of the protocol if any */
- protocolname = frr_protoname;
+ ret = cmd_execute(vty, buf, NULL, 0);
-#ifdef CONSUMED_TIME_CHECK
- GETRUSAGE(&after);
- realtime = thread_consumed_time(&after, &before, &cputime);
- if (cputime > CONSUMED_TIME_CHECK) {
- /* Warn about CPU hog that must be fixed. */
- flog_warn(
- EC_LIB_SLOW_THREAD_CPU,
- "CPU HOG: command took %lums (cpu time %lums): %s",
- realtime / 1000, cputime / 1000, buf);
- } else if (realtime > CONSUMED_TIME_CHECK) {
- flog_warn(
- EC_LIB_SLOW_THREAD_WALL,
- "STARVATION: command took %lums (cpu time %lums): %s",
- realtime / 1000, cputime / 1000, buf);
- }
- }
-#endif /* CONSUMED_TIME_CHECK */
+ GETRUSAGE(&after);
+
+ walltime = thread_consumed_time(&after, &before, &cputime);
+
+ if (cputime_enabled_here && cputime_enabled && cputime_threshold
+ && cputime > cputime_threshold)
+ /* Warn about CPU hog that must be fixed. */
+ flog_warn(EC_LIB_SLOW_THREAD_CPU,
+ "CPU HOG: command took %lums (cpu time %lums): %s",
+ walltime / 1000, cputime / 1000, buf);
+ else if (walltime_threshold && walltime > walltime_threshold)
+ flog_warn(EC_LIB_SLOW_THREAD_WALL,
+ "STARVATION: command took %lums (cpu time %lums): %s",
+ walltime / 1000, cputime / 1000, buf);
+
+ /* Get the name of the protocol if any */
+ protocolname = frr_protoname;
if (ret != CMD_SUCCESS)
switch (ret) {
struct nexthop nh = {};
p.family = AF_INET6;
- p.prefixlen = 128;
+ p.prefixlen = IPV6_MAX_BITLEN;
p.prefix = *sid;
api.vrf_id = VRF_DEFAULT;
STREAM_GETC(s, api->prefix.prefixlen);
switch (api->prefix.family) {
case AF_INET:
- if (api->prefix.prefixlen > IPV4_MAX_PREFIXLEN) {
+ if (api->prefix.prefixlen > IPV4_MAX_BITLEN) {
flog_err(
EC_LIB_ZAPI_ENCODE,
"%s: V4 prefixlen is %d which should not be more than 32",
}
break;
case AF_INET6:
- if (api->prefix.prefixlen > IPV6_MAX_PREFIXLEN) {
+ if (api->prefix.prefixlen > IPV6_MAX_BITLEN) {
flog_err(
EC_LIB_ZAPI_ENCODE,
"%s: v6 prefixlen is %d which should not be more than 128",
if (CHECK_FLAG(api->message, ZAPI_MESSAGE_SRCPFX)) {
api->src_prefix.family = AF_INET6;
STREAM_GETC(s, api->src_prefix.prefixlen);
- if (api->src_prefix.prefixlen > IPV6_MAX_PREFIXLEN) {
+ if (api->src_prefix.prefixlen > IPV6_MAX_BITLEN) {
flog_err(
EC_LIB_ZAPI_ENCODE,
"%s: SRC Prefix prefixlen received: %d is too large",
}
struct interface *zebra_interface_link_params_read(struct stream *s,
- vrf_id_t vrf_id)
+ vrf_id_t vrf_id,
+ bool *changed)
{
struct if_link_params *iflp;
+ struct if_link_params iflp_copy;
ifindex_t ifindex;
+ bool params_changed = false;
STREAM_GETL(s, ifindex);
return NULL;
}
+ if (ifp->link_params == NULL)
+ params_changed = true;
+
if ((iflp = if_link_params_get(ifp)) == NULL)
return NULL;
+ memcpy(&iflp_copy, iflp, sizeof(iflp_copy));
+
if (link_params_set_value(s, iflp) != 0)
goto stream_failure;
+ if (memcmp(&iflp_copy, iflp, sizeof(iflp_copy)))
+ params_changed = true;
+
+ if (changed)
+ *changed = params_changed;
+
return ifp;
stream_failure:
zclient->t_read = NULL;
/* Read zebra header (if we don't have it already). */
- if ((already = stream_get_endp(zclient->ibuf)) < ZEBRA_HEADER_SIZE) {
+ already = stream_get_endp(zclient->ibuf);
+ if (already < ZEBRA_HEADER_SIZE) {
ssize_t nbyte;
if (((nbyte = stream_read_try(zclient->ibuf, zclient->sock,
ZEBRA_HEADER_SIZE - already))
return zclient_failed(zclient);
}
if (nbyte != (ssize_t)(ZEBRA_HEADER_SIZE - already)) {
- /* Try again later. */
zclient_event(ZCLIENT_READ, zclient);
return 0;
}
extern int zebra_router_id_update_read(struct stream *s, struct prefix *rid);
extern struct interface *zebra_interface_link_params_read(struct stream *s,
- vrf_id_t vrf_id);
+ vrf_id_t vrf_id,
+ bool *changed);
extern size_t zebra_interface_link_params_write(struct stream *,
struct interface *);
extern enum zclient_send_status
+ OSPF6_PREFIX_SPACE(external->prefix.prefix_length);
memset(&fwd_addr, 0, sizeof(struct prefix));
fwd_addr.family = AF_INET6;
- fwd_addr.prefixlen = IPV6_MAX_PREFIXLEN;
+ fwd_addr.prefixlen = IPV6_MAX_BITLEN;
memcpy(&fwd_addr.u.prefix6, (caddr_t)external + offset,
sizeof(struct in6_addr));
static int ospf6_asbr_routemap_update_timer(struct thread *thread)
{
- void **arg;
- int arg_type;
- struct ospf6 *ospf6;
+ struct ospf6 *ospf6 = THREAD_ARG(thread);
struct ospf6_redist *red;
-
- arg = THREAD_ARG(thread);
- ospf6 = (struct ospf6 *)arg[0];
- arg_type = (int)(intptr_t)arg[1];
+ int type;
ospf6->t_distribute_update = NULL;
- red = ospf6_redist_lookup(ospf6, arg_type, 0);
+ for (type = 0; type < ZEBRA_ROUTE_MAX; type++) {
+ red = ospf6_redist_lookup(ospf6, type, 0);
- if (red && ROUTEMAP_NAME(red))
- ROUTEMAP(red) = route_map_lookup_by_name(ROUTEMAP_NAME(red));
- if (red && ROUTEMAP(red)) {
- if (IS_OSPF6_DEBUG_ASBR)
- zlog_debug("%s: route-map %s update, reset redist %s",
- __func__, ROUTEMAP_NAME(red),
- ZROUTE_NAME(arg_type));
+ if (!red)
+ continue;
+
+ if (!CHECK_FLAG(red->flag, OSPF6_IS_RMAP_CHANGED))
+ continue;
+
+ if (ROUTEMAP_NAME(red))
+ ROUTEMAP(red) =
+ route_map_lookup_by_name(ROUTEMAP_NAME(red));
- ospf6_zebra_no_redistribute(arg_type, ospf6->vrf_id);
- ospf6_zebra_redistribute(arg_type, ospf6->vrf_id);
+ if (ROUTEMAP(red)) {
+ if (IS_OSPF6_DEBUG_ASBR)
+ zlog_debug(
+ "%s: route-map %s update, reset redist %s",
+ __func__, ROUTEMAP_NAME(red),
+ ZROUTE_NAME(type));
+
+ ospf6_zebra_no_redistribute(type, ospf6->vrf_id);
+ ospf6_zebra_redistribute(type, ospf6->vrf_id);
+ }
+
+ UNSET_FLAG(red->flag, OSPF6_IS_RMAP_CHANGED);
}
- XFREE(MTYPE_OSPF6_DIST_ARGS, arg);
return 0;
}
-void ospf6_asbr_distribute_list_update(int type, struct ospf6 *ospf6)
+void ospf6_asbr_distribute_list_update(struct ospf6 *ospf6,
+ struct ospf6_redist *red)
{
- void **args = NULL;
+ SET_FLAG(red->flag, OSPF6_IS_RMAP_CHANGED);
if (ospf6->t_distribute_update)
return;
- args = XCALLOC(MTYPE_OSPF6_DIST_ARGS, sizeof(void *) * 2);
-
- args[0] = ospf6;
- args[1] = (void *)((ptrdiff_t)type);
-
if (IS_OSPF6_DEBUG_ASBR)
- zlog_debug("%s: trigger redistribute %s reset thread", __func__,
- ZROUTE_NAME(type));
+ zlog_debug("%s: trigger redistribute reset thread", __func__);
ospf6->t_distribute_update = NULL;
- thread_add_timer_msec(master, ospf6_asbr_routemap_update_timer, args,
+ thread_add_timer_msec(master, ospf6_asbr_routemap_update_timer, ospf6,
OSPF_MIN_LS_INTERVAL,
&ospf6->t_distribute_update);
}
type));
route_map_counter_increment(ROUTEMAP(red));
-
- ospf6_asbr_distribute_list_update(type, ospf6);
+ ospf6_asbr_distribute_list_update(ospf6, red);
} else {
/*
* if the mapname matches a
red = ospf6_redist_lookup(ospf6, type, 0);
if (red && ROUTEMAP_NAME(red)
&& (strcmp(ROUTEMAP_NAME(red), name) == 0))
- ospf6_asbr_distribute_list_update(type, ospf6);
+ ospf6_asbr_distribute_list_update(ospf6, red);
}
}
}
/* create/update binding in external_id_table */
prefix_id.family = AF_INET;
- prefix_id.prefixlen = 32;
+ prefix_id.prefixlen = IPV4_MAX_BITLEN;
prefix_id.u.prefix4.s_addr = htonl(info->id);
node = route_node_get(ospf6->external_id_table, &prefix_id);
node->info = match;
/* create/update binding in external_id_table */
prefix_id.family = AF_INET;
- prefix_id.prefixlen = 32;
+ prefix_id.prefixlen = IPV4_MAX_BITLEN;
prefix_id.u.prefix4.s_addr = htonl(info->id);
node = route_node_get(ospf6->external_id_table, &prefix_id);
node->info = route;
/* remove binding in external_id_table */
prefix_id.family = AF_INET;
- prefix_id.prefixlen = 32;
+ prefix_id.prefixlen = IPV4_MAX_BITLEN;
prefix_id.u.prefix4.s_addr = htonl(info->id);
node = route_node_lookup(ospf6->external_id_table, &prefix_id);
assert(node);
install_element(OSPF6_NODE, &no_ospf6_redistribute_cmd);
}
-void ospf6_asbr_redistribute_reset(struct ospf6 *ospf6)
+void ospf6_asbr_redistribute_disable(struct ospf6 *ospf6)
{
int type;
struct ospf6_redist *red;
}
}
+void ospf6_asbr_redistribute_reset(struct ospf6 *ospf6)
+{
+ int type;
+ struct ospf6_redist *red;
+ char buf[RMAP_NAME_MAXLEN];
+
+ for (type = 0; type <= ZEBRA_ROUTE_MAX; type++) {
+ buf[0] = '\0';
+ if (type == ZEBRA_ROUTE_OSPF6)
+ continue;
+ red = ospf6_redist_lookup(ospf6, type, 0);
+ if (!red)
+ continue;
+
+ if (type == DEFAULT_ROUTE) {
+ ospf6_redistribute_default_set(
+ ospf6, ospf6->default_originate);
+ continue;
+ }
+ if (ROUTEMAP_NAME(red))
+ strlcpy(buf, ROUTEMAP_NAME(red), sizeof(buf));
+
+ ospf6_asbr_redistribute_unset(ospf6, red, type);
+ if (buf[0])
+ ospf6_asbr_routemap_set(red, buf);
+ ospf6_asbr_redistribute_set(ospf6, type);
+ }
+}
+
void ospf6_asbr_terminate(void)
{
/* Cleanup route maps */
struct ospf6 *ospf6);
extern void ospf6_asbr_init(void);
+extern void ospf6_asbr_redistribute_disable(struct ospf6 *ospf6);
extern void ospf6_asbr_redistribute_reset(struct ospf6 *ospf6);
extern void ospf6_asbr_terminate(void);
extern void ospf6_asbr_send_externals_to_area(struct ospf6_area *);
extern void ospf6_asbr_update_route_ecmp_path(struct ospf6_route *old,
struct ospf6_route *route,
struct ospf6 *ospf6);
-extern void ospf6_asbr_distribute_list_update(int type, struct ospf6 *ospf6);
+extern void ospf6_asbr_distribute_list_update(struct ospf6 *ospf6,
+ struct ospf6_redist *red);
struct ospf6_redist *ospf6_redist_lookup(struct ospf6 *ospf6, int type,
unsigned short instance);
extern void ospf6_asbr_routemap_update(const char *mapname);
}
/* Clear the specified interface structure */
-static void ospf6_interface_clear(struct vty *vty, struct interface *ifp)
+void ospf6_interface_clear(struct interface *ifp)
{
struct ospf6_interface *oi;
if (argc == 4) /* Clear all the ospfv3 interfaces. */
{
FOR_ALL_INTERFACES (vrf, ifp)
- ospf6_interface_clear(vty, ifp);
+ ospf6_interface_clear(ifp);
} else /* Interface name is specified. */
{
if ((ifp = if_lookup_by_name(argv[idx_ifname]->arg,
argv[idx_ifname]->arg);
return CMD_WARNING;
}
- ospf6_interface_clear(vty, ifp);
+ ospf6_interface_clear(ifp);
}
return CMD_SUCCESS;
extern int neighbor_change(struct thread *);
extern void ospf6_interface_init(void);
+extern void ospf6_interface_clear(struct interface *ifp);
extern void install_element_ospf6_clear_interface(void);
adv_router);
json_object_int_add(json_obj, "lsSequenceNumber",
(unsigned long)ntohl(lsa->header->seqnum));
- json_object_int_add(json_obj, "checkSum",
+ json_object_int_add(json_obj, "checksum",
ntohs(lsa->header->checksum));
json_object_int_add(json_obj, "length",
ntohs(lsa->header->length));
return OSPF6_READ_CONTINUE;
}
+ /*
+ * Drop packet destined to another VRF.
+ * This happens when raw_l3mdev_accept is set to 1.
+ */
+ if (ospf6->vrf_id != oi->interface->vrf_id)
+ return OSPF6_READ_CONTINUE;
+
oh = (struct ospf6_header *)recvbuf;
if (ospf6_rxpacket_examin(oi, oh, len) != MSG_OK)
return OSPF6_READ_CONTINUE;
{ .val_bool = false },
);
+#ifndef VTYSH_EXTRACT_PL
+#include "ospf6d/ospf6_top_clippy.c"
+#endif
+
/* global ospf6d variable */
static struct ospf6_master ospf6_master;
struct ospf6_master *om6;
/* XXX: This also changes persistent settings */
/* Unregister redistribution */
- ospf6_asbr_redistribute_reset(o);
+ ospf6_asbr_redistribute_disable(o);
ospf6_lsdb_remove_all(o->lsdb);
ospf6_route_remove_all(o->route_table);
if (ospf6->router_id_static != 0)
ospf6->router_id = ospf6->router_id_static;
else
- ospf6->router_id = om6->zebra_router_id;
+ ospf6->router_id = ospf6->router_id_zebra;
}
/* start ospf6 */
return CMD_SUCCESS;
}
+static void ospf6_db_clear(struct ospf6 *ospf6)
+{
+ struct ospf6_interface *oi;
+ struct interface *ifp;
+ struct vrf *vrf = vrf_lookup_by_id(ospf6->vrf_id);
+ struct listnode *node, *nnode;
+ struct ospf6_area *oa;
+
+ FOR_ALL_INTERFACES (vrf, ifp) {
+ if (if_is_operative(ifp) && ifp->info != NULL) {
+ oi = (struct ospf6_interface *)ifp->info;
+ ospf6_lsdb_remove_all(oi->lsdb);
+ ospf6_lsdb_remove_all(oi->lsdb_self);
+ ospf6_lsdb_remove_all(oi->lsupdate_list);
+ ospf6_lsdb_remove_all(oi->lsack_list);
+ }
+ }
+
+ for (ALL_LIST_ELEMENTS(ospf6->area_list, node, nnode, oa)) {
+ ospf6_lsdb_remove_all(oa->lsdb);
+ ospf6_lsdb_remove_all(oa->lsdb_self);
+
+ ospf6_spf_table_finish(oa->spf_table);
+ ospf6_route_remove_all(oa->route_table);
+ }
+
+ ospf6_lsdb_remove_all(ospf6->lsdb);
+ ospf6_lsdb_remove_all(ospf6->lsdb_self);
+ ospf6_route_remove_all(ospf6->route_table);
+ ospf6_route_remove_all(ospf6->brouter_table);
+}
+
+static void ospf6_process_reset(struct ospf6 *ospf6)
+{
+ struct interface *ifp;
+ struct vrf *vrf = vrf_lookup_by_id(ospf6->vrf_id);
+
+ ospf6_flush_self_originated_lsas_now(ospf6);
+ ospf6->inst_shutdown = 0;
+ ospf6_db_clear(ospf6);
+
+ ospf6_router_id_update(ospf6);
+
+ ospf6_asbr_redistribute_reset(ospf6);
+ FOR_ALL_INTERFACES (vrf, ifp)
+ ospf6_interface_clear(ifp);
+}
+
+DEFPY (clear_router_ospf6,
+ clear_router_ospf6_cmd,
+ "clear ipv6 ospf6 process [vrf NAME$name]",
+ CLEAR_STR
+ IP6_STR
+ OSPF6_STR
+ "Reset OSPF Process\n"
+ VRF_CMD_HELP_STR)
+{
+ struct ospf6 *ospf6;
+ const char *vrf_name = VRF_DEFAULT_NAME;
+
+ if (name != NULL)
+ vrf_name = name;
+
+ ospf6 = ospf6_lookup_by_vrf_name(vrf_name);
+ if (ospf6 == NULL)
+ vty_out(vty, "OSPFv3 is not configured\n");
+ else
+ ospf6_process_reset(ospf6);
+
+ return CMD_SUCCESS;
+}
+
/* change Router_ID commands. */
DEFUN(ospf6_router_id,
ospf6_router_id_cmd,
for (ALL_LIST_ELEMENTS_RO(o->area_list, node, oa)) {
if (oa->full_nbrs) {
vty_out(vty,
- "For this router-id change to take effect, save config and restart ospf6d\n");
+ "For this router-id change to take effect, run the \"clear ipv6 ospf6 process\" command\n");
return CMD_SUCCESS;
}
}
for (ALL_LIST_ELEMENTS_RO(o->area_list, node, oa)) {
if (oa->full_nbrs) {
vty_out(vty,
- "For this router-id change to take effect, save config and restart ospf6d\n");
+ "For this router-id change to take effect, run the \"clear ipv6 ospf6 process\" command\n");
return CMD_SUCCESS;
}
}
o->router_id = 0;
- if (o->router_id_zebra.s_addr)
- o->router_id = (uint32_t)o->router_id_zebra.s_addr;
+ if (o->router_id_zebra)
+ o->router_id = o->router_id_zebra;
return CMD_SUCCESS;
}
.config_write = config_write_ospf6,
};
+void install_element_ospf6_clear_process(void)
+{
+ install_element(ENABLE_NODE, &clear_router_ospf6_cmd);
+}
+
/* Install ospf related commands. */
void ospf6_top_init(void)
{
struct list *ospf6;
/* OSPFv3 thread master. */
struct thread_master *master;
- in_addr_t zebra_router_id;
};
/* ospf6->config_flags */
OSPF6_LOG_ADJACENCY_DETAIL = (1 << 1),
};
+/* For processing route-map change update in the callback */
+#define OSPF6_IS_RMAP_CHANGED 0x01
struct ospf6_redist {
uint8_t instance;
+ uint8_t flag;
/* Redistribute metric info. */
struct {
int type; /* External metric type (E1 or E2). */
/* static router id */
in_addr_t router_id_static;
- struct in_addr router_id_zebra;
+ in_addr_t router_id_zebra;
/* start time */
struct timeval starttime;
/* prototypes */
extern void ospf6_master_init(struct thread_master *master);
+extern void install_element_ospf6_clear_process(void);
extern void ospf6_top_init(void);
extern void ospf6_delete(struct ospf6 *o);
extern void ospf6_router_id_update(struct ospf6 *ospf6);
zebra_router_id_update_read(zclient->ibuf, &router_id);
- om6->zebra_router_id = router_id.u.prefix4.s_addr;
+ if (IS_OSPF6_DEBUG_ZEBRA(RECV))
+ zlog_debug("Zebra router-id update %pI4 vrf %s id %u",
+ &router_id.u.prefix4, ospf6_vrf_id_to_name(vrf_id),
+ vrf_id);
+
o = ospf6_lookup_by_vrf_id(vrf_id);
if (o == NULL)
return 0;
- o->router_id_zebra = router_id.u.prefix4;
- if (IS_OSPF6_DEBUG_ZEBRA(RECV))
- zlog_debug("%s: zebra router-id %pI4 update", __func__,
- &router_id.u.prefix4);
+ o->router_id_zebra = router_id.u.prefix4.s_addr;
ospf6_router_id_update(o);
install_element_ospf6_debug_flood();
install_element_ospf6_debug_nssa();
+ install_element_ospf6_clear_process();
install_element_ospf6_clear_interface();
install_element(ENABLE_NODE, &show_debugging_ospf6_cmd);
ospf6d_ospf6d_snmp_la_LIBADD = lib/libfrrsnmp.la
clippy_scan += \
+ ospf6d/ospf6_top.c \
ospf6d/ospf6_asbr.c \
# end
return false;
/*Host route shouldn't be configured as summary addres*/
- if (p->prefixlen == IPV4_MAX_PREFIXLEN)
+ if (p->prefixlen == IPV4_MAX_BITLEN)
return false;
return true;
#include "ospfd/ospf_dump.h"
#include "ospfd/ospf_ldp_sync.h"
#include "ospfd/ospf_route.h"
+#include "ospfd/ospf_te.h"
DEFINE_QOBJ_TYPE(ospf_interface);
DEFINE_HOOK(ospf_vl_add, (struct ospf_vl_data * vd), (vd));
struct ospf_interface *rninfo = NULL;
p = *prefix;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
/* route_node_get implicitely locks */
if ((rn = route_node_lookup(IF_OIFS(ifp), &p))) {
struct prefix p;
p = *oi->address;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
apply_mask(&p);
rn = route_node_get(IF_OIFS(ifp), &p);
struct prefix p;
p = *oi->address;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
rn = route_node_lookup(IF_OIFS(oi->ifp), &p);
assert(rn);
struct route_node *rn;
p.family = AF_INET;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
p.prefix = addr;
rn = route_node_lookup(IF_OIFS_PARAMS(ifp), (struct prefix *)&p);
if (!rn || !rn->info)
struct route_node *rn;
p.family = AF_INET;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
p.prefix = addr;
rn = route_node_lookup(IF_OIFS_PARAMS(ifp), (struct prefix *)&p);
struct route_node *rn;
p.family = AF_INET;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
p.prefix = addr;
apply_mask_ipv4(&p);
ospf_if_update(ospf, ifp);
+ if (HAS_LINK_PARAMS(ifp))
+ ospf_mpls_te_update_if(ifp);
+
hook_call(ospf_if_update, ifp);
return 0;
ospf_if_up(oi);
}
+ if (HAS_LINK_PARAMS(ifp))
+ ospf_mpls_te_update_if(ifp);
+
return 0;
}
/**
* Delete Subnet that correspond to the given IPv4 address and export deletion
- * information before removal. Prefix length is fixed to IPV4_MAX_PREFIXLEN.
+ * information before removal. Prefix length is fixed to IPV4_MAX_BITLEN.
*
* @param ted Links State Database
* @param addr IPv4 address
/* Search subnet that correspond to the address/32 as prefix */
p.family = AF_INET;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
p.u.prefix4 = addr;
subnet = ls_find_subnet(ted, p);
ntohs(rl->link[i].metric));
/* Add corresponding subnet */
p.family = AF_INET;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
p.u.prefix4 = rl->link[i].link_data;
metric = ntohs(rl->link[i].metric);
ospf_te_update_subnet(ted, vertex, p, metric);
case LSA_LINK_TYPE_STUB:
/* Keep only /32 prefix */
p.prefixlen = ip_masklen(rl->link[i].link_data);
- if (p.prefixlen == IPV4_MAX_PREFIXLEN) {
+ if (p.prefixlen == IPV4_MAX_BITLEN) {
p.family = AF_INET;
p.u.prefix4 = rl->link[i].link_id;
metric = ntohs(rl->link[i].metric);
/* Update corresponding Subnets */
p.family = AF_INET;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
p.u.prefix4 = attr->standard.local;
ospf_te_update_subnet(ted, edge->source, p, attr->standard.te_metric);
p.family = AF_INET;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
p.u.prefix4 = attr->standard.remote_addr;
ospf_te_update_subnet(ted, vertex, p, attr->standard.te_metric);
stub_prefix.family = AF_INET;
child_prefix.family = AF_INET;
- child_prefix.prefixlen = IPV4_MAX_PREFIXLEN;
+ child_prefix.prefixlen = IPV4_MAX_BITLEN;
p = ((uint8_t *)root->lsa) + OSPF_LSA_HEADER_SIZE + 4;
lim = ((uint8_t *)root->lsa) + ntohs(root->lsa->length);
if (id == NULL)
lp->prefixlen = 0;
else if (adv_router == NULL) {
- lp->prefixlen = 32;
+ lp->prefixlen = IPV4_MAX_BITLEN;
lp->id = *id;
} else {
lp->prefixlen = 64;
"N E1");
json_object_int_add(json_route, "cost",
er->cost);
+ json_object_int_add(json_route, "tag",
+ er->u.ext.tag);
} else {
vty_out(vty,
"N E1 %-18s [%d] tag: %" ROUTE_TAG_PRI
"N E2");
json_object_int_add(json_route, "cost",
er->cost);
+ json_object_int_add(json_route, "type2cost",
+ er->u.ext.type2_cost);
+ json_object_int_add(json_route, "tag",
+ er->u.ext.tag);
} else {
vty_out(vty,
"N E2 %-18s [%d/%d] tag: %" ROUTE_TAG_PRI
ifp = c->ifp;
p = *c->address;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
rn = route_node_lookup(IF_OIFS(ifp), &p);
if (!rn) {
static int ospf_interface_link_params(ZAPI_CALLBACK_ARGS)
{
struct interface *ifp;
+ bool changed = false;
- ifp = zebra_interface_link_params_read(zclient->ibuf, vrf_id);
+ ifp = zebra_interface_link_params_read(zclient->ibuf, vrf_id, &changed);
- if (ifp == NULL)
+ if (ifp == NULL || !changed)
return 0;
/* Update TE TLV */
candidate->preference = preference;
candidate->policy = policy;
candidate->type = SRTE_CANDIDATE_TYPE_UNDEFINED;
- candidate->discriminator = rand();
+ candidate->discriminator = frr_weak_random();
candidate->protocol_origin = origin;
if (originator != NULL) {
strlcpy(candidate->originator, originator,
api_nh->ifindex = nhop->ifindex;
break;
case NEXTHOP_TYPE_IPV6:
- memcpy(&api_nh->gate.ipv6, &nhop->gate.ipv6, 16);
+ memcpy(&api_nh->gate.ipv6, &nhop->gate.ipv6,
+ IPV6_MAX_BYTELEN);
break;
case NEXTHOP_TYPE_IPV6_IFINDEX:
api_nh->ifindex = nhop->ifindex;
- memcpy(&api_nh->gate.ipv6, &nhop->gate.ipv6, 16);
+ memcpy(&api_nh->gate.ipv6, &nhop->gate.ipv6,
+ IPV6_MAX_BYTELEN);
break;
case NEXTHOP_TYPE_BLACKHOLE:
api_nh->bh_type = nhop->bh_type;
case NEXTHOP_TYPE_IPV4_IFINDEX:
p.family = AF_INET;
p.u.prefix4.s_addr = nhop->gate.ipv4.s_addr;
- p.prefixlen = 32;
+ p.prefixlen = IPV4_MAX_BITLEN;
break;
case NEXTHOP_TYPE_IPV6:
case NEXTHOP_TYPE_IPV6_IFINDEX:
p.family = AF_INET6;
- memcpy(&p.u.prefix6, &nhop->gate.ipv6, 16);
- p.prefixlen = 128;
+ memcpy(&p.u.prefix6, &nhop->gate.ipv6, IPV6_MAX_BYTELEN);
+ p.prefixlen = IPV6_MAX_BITLEN;
if (IN6_IS_ADDR_LINKLOCAL(&nhop->gate.ipv6))
/*
* Don't bother tracking link locals, just track their
bshdr = (struct bsm_hdr *)(buf + PIM_MSG_HEADER_LEN);
pim_inet4_dump("<bsr?>", bshdr->bsr_addr.addr, bsr_str,
sizeof(bsr_str));
- if (bshdr->hm_len > 32) {
+ if (bshdr->hm_len > IPV4_MAX_BITLEN) {
zlog_warn("Bad hashmask length for IPv4; got %hhu, expected value in range 0-32",
bshdr->hm_len);
pim->bsm_dropped++;
if (!uj && !found_oif) {
vty_out(vty,
- "%-15s %-15s %-15s %-6s %-16s %-16s %-3d %8s\n",
+ "%-15s %-15s %-8s %-6s %-16s %-16s %-3d %8s\n",
src_str, grp_str, state_str, "none", in_ifname,
"none", 0, "--:--:--");
}
json_ifp_out);
} else {
vty_out(vty,
- "%-15s %-15s %-6s %-16s %-16s %-3d %8s %s\n",
- src_str, grp_str, proto, in_ifname,
- out_ifname, ttl, oif_uptime,
- pim->vrf->name);
+ "%-15s %-15s %-8s %-6s %-16s %-16s %-3d %8s\n",
+ src_str, grp_str, "-", proto, in_ifname,
+ out_ifname, ttl, oif_uptime);
if (first && !fill) {
src_str[0] = '\0';
grp_str[0] = '\0';
if (!uj && !found_oif) {
vty_out(vty,
- "%-15s %-15s %-6s %-16s %-16s %-3d %8s %s\n",
- src_str, grp_str, proto, in_ifname, "none", 0,
- "--:--:--", pim->vrf->name);
+ "%-15s %-15s %-8s %-6s %-16s %-16s %-3d %8s\n",
+ src_str, grp_str, "-", proto, in_ifname, "none",
+ 0, "--:--:--");
}
}
"Desired min transmit interval\n")
#endif /* !HAVE_BFDD */
- DEFUN (ip_msdp_peer,
- ip_msdp_peer_cmd,
- "ip msdp peer A.B.C.D source A.B.C.D",
- IP_STR
- CFG_MSDP_STR
- "Configure MSDP peer\n"
- "peer ip address\n"
- "Source address for TCP connection\n"
- "local ip address\n")
+DEFPY(ip_msdp_peer, ip_msdp_peer_cmd,
+ "ip msdp peer A.B.C.D$peer source A.B.C.D$source",
+ IP_STR
+ CFG_MSDP_STR
+ "Configure MSDP peer\n"
+ "Peer IP address\n"
+ "Source address for TCP connection\n"
+ "Local IP address\n")
{
const char *vrfname;
char temp_xpath[XPATH_MAXLEN];
return CMD_WARNING_CONFIG_FAILED;
snprintf(msdp_peer_source_xpath, sizeof(msdp_peer_source_xpath),
- FRR_PIM_AF_XPATH,
- "frr-pim:pimd", "pim", vrfname, "frr-routing:ipv4");
+ FRR_PIM_AF_XPATH, "frr-pim:pimd", "pim", vrfname,
+ "frr-routing:ipv4");
snprintf(temp_xpath, sizeof(temp_xpath),
- "/msdp-peer[peer-ip='%s']/source-ip",
- argv[3]->arg);
+ "/msdp-peer[peer-ip='%s']/source-ip", peer_str);
strlcat(msdp_peer_source_xpath, temp_xpath,
sizeof(msdp_peer_source_xpath));
nb_cli_enqueue_change(vty, msdp_peer_source_xpath, NB_OP_MODIFY,
- argv[5]->arg);
+ source_str);
return nb_cli_apply_changes(vty, NULL);
}
+DEFPY(ip_msdp_timers, ip_msdp_timers_cmd,
+ "ip msdp timers (2-600)$keepalive (3-600)$holdtime [(1-600)$connretry]",
+ IP_STR
+ CFG_MSDP_STR
+ "MSDP timers configuration\n"
+ "Keep alive period (in seconds)\n"
+ "Hold time period (in seconds)\n"
+ "Connection retry period (in seconds)\n")
+{
+ const char *vrfname;
+ char xpath[XPATH_MAXLEN];
+
+ vrfname = pim_cli_get_vrf_name(vty);
+ if (vrfname == NULL)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ snprintf(xpath, sizeof(xpath), FRR_PIM_MSDP_XPATH, "frr-pim:pimd",
+ "pim", vrfname, "frr-routing:ipv4");
+ nb_cli_enqueue_change(vty, "./hold-time", NB_OP_MODIFY, holdtime_str);
+ nb_cli_enqueue_change(vty, "./keep-alive", NB_OP_MODIFY, keepalive_str);
+ if (connretry_str)
+ nb_cli_enqueue_change(vty, "./connection-retry", NB_OP_MODIFY,
+ connretry_str);
+ else
+ nb_cli_enqueue_change(vty, "./connection-retry", NB_OP_DESTROY,
+ NULL);
+
+ nb_cli_apply_changes(vty, xpath);
+
+ return CMD_SUCCESS;
+}
+
DEFUN (no_ip_msdp_peer,
no_ip_msdp_peer_cmd,
"no ip msdp peer A.B.C.D",
json_row = json_object_new_object();
json_object_string_add(json_row, "peer", peer_str);
json_object_string_add(json_row, "local", local_str);
- json_object_string_add(json_row, "meshGroupName",
- mp->mesh_group_name);
+ if (mp->flags & PIM_MSDP_PEERF_IN_GROUP)
+ json_object_string_add(json_row,
+ "meshGroupName",
+ mp->mesh_group_name);
json_object_string_add(json_row, "state", state_str);
json_object_string_add(json_row, "upTime", timebuf);
json_object_string_add(json_row, "keepAliveTimer",
} else {
vty_out(vty, "Peer : %s\n", peer_str);
vty_out(vty, " Local : %s\n", local_str);
- vty_out(vty, " Mesh Group : %s\n",
- mp->mesh_group_name);
+ if (mp->flags & PIM_MSDP_PEERF_IN_GROUP)
+ vty_out(vty, " Mesh Group : %s\n",
+ mp->mesh_group_name);
vty_out(vty, " State : %s\n", state_str);
vty_out(vty, " Uptime : %s\n", timebuf);
install_element(CONFIG_NODE, &debug_bsm_cmd);
install_element(CONFIG_NODE, &no_debug_bsm_cmd);
+ install_element(CONFIG_NODE, &ip_msdp_timers_cmd);
+ install_element(VRF_NODE, &ip_msdp_timers_cmd);
install_element(CONFIG_NODE, &ip_msdp_mesh_group_member_cmd);
install_element(VRF_NODE, &ip_msdp_mesh_group_member_cmd);
install_element(CONFIG_NODE, &no_ip_msdp_mesh_group_member_cmd);
p.family = AF_INET;
p.u.prefix4 = addr;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
for (ALL_LIST_ELEMENTS_RO(pim_ifp->pim_neighbor_list, neighnode,
neigh)) {
AFI_IP, pim->spt.plist);
struct prefix g;
g.family = AF_INET;
- g.prefixlen = IPV4_MAX_PREFIXLEN;
+ g.prefixlen = IPV4_MAX_BITLEN;
g.u.prefix4 = up->sg.grp;
if (prefix_list_apply(plist, &g)
g.family = AF_INET;
g.u.prefix4 = rec_group;
- g.prefixlen = 32;
+ g.prefixlen = IPV4_MAX_BITLEN;
/* determine filtering status for group */
filtered = pim_is_group_filtered(ifp->info, &rec_group);
pim_instance_mlag_init(pim);
pim->last_route_change_time = -1;
+
+ /* MSDP global timer defaults. */
+ pim->msdp.hold_time = PIM_MSDP_PEER_HOLD_TIME;
+ pim->msdp.keep_alive = PIM_MSDP_PEER_KA_TIME;
+ pim->msdp.connection_retry = PIM_MSDP_PEER_CONNECT_RETRY_TIME;
+
return pim;
}
return buf;
}
-char *pim_msdp_peer_key_dump(struct pim_msdp_peer *mp, char *buf, int buf_size,
- bool long_format)
-{
- char peer_str[INET_ADDRSTRLEN];
- char local_str[INET_ADDRSTRLEN];
-
- pim_inet4_dump("<peer?>", mp->peer, peer_str, sizeof(peer_str));
- if (long_format) {
- pim_inet4_dump("<local?>", mp->local, local_str,
- sizeof(local_str));
- snprintf(buf, buf_size, "MSDP peer %s local %s mg %s", peer_str,
- local_str, mp->mesh_group_name);
- } else {
- snprintf(buf, buf_size, "MSDP peer %s", peer_str);
- }
-
- return buf;
-}
-
static void pim_msdp_peer_state_chg_log(struct pim_msdp_peer *mp)
{
char state_str[PIM_MSDP_STATE_STRLEN];
THREAD_OFF(mp->hold_timer);
if (start) {
thread_add_timer(pim->msdp.master, pim_msdp_peer_hold_timer_cb,
- mp, PIM_MSDP_PEER_HOLD_TIME, &mp->hold_timer);
+ mp, pim->msdp.hold_time, &mp->hold_timer);
}
}
if (start) {
thread_add_timer(mp->pim->msdp.master,
pim_msdp_peer_ka_timer_cb, mp,
- PIM_MSDP_PEER_KA_TIME, &mp->ka_timer);
+ mp->pim->msdp.keep_alive, &mp->ka_timer);
}
}
{
THREAD_OFF(mp->cr_timer);
if (start) {
- thread_add_timer(
- mp->pim->msdp.master, pim_msdp_peer_cr_timer_cb, mp,
- PIM_MSDP_PEER_CONNECT_RETRY_TIME, &mp->cr_timer);
+ thread_add_timer(mp->pim->msdp.master,
+ pim_msdp_peer_cr_timer_cb, mp,
+ mp->pim->msdp.connection_retry, &mp->cr_timer);
}
}
}
/* 11.2.A1: create a new peer and transition state to listen or connecting */
-static enum pim_msdp_err pim_msdp_peer_new(struct pim_instance *pim,
- struct in_addr peer_addr,
- struct in_addr local_addr,
- const char *mesh_group_name,
- struct pim_msdp_peer **mp_p)
+struct pim_msdp_peer *pim_msdp_peer_add(struct pim_instance *pim,
+ const struct in_addr *peer,
+ const struct in_addr *local,
+ const char *mesh_group_name)
{
struct pim_msdp_peer *mp;
mp = XCALLOC(MTYPE_PIM_MSDP_PEER, sizeof(*mp));
mp->pim = pim;
- mp->peer = peer_addr;
+ mp->peer = *peer;
pim_inet4_dump("<peer?>", mp->peer, mp->key_str, sizeof(mp->key_str));
pim_msdp_addr2su(&mp->su_peer, mp->peer);
- mp->local = local_addr;
+ mp->local = *local;
/* XXX: originator_id setting needs to move to the mesh group */
- pim->msdp.originator_id = local_addr;
+ pim->msdp.originator_id = *local;
pim_msdp_addr2su(&mp->su_local, mp->local);
- mp->mesh_group_name = XSTRDUP(MTYPE_PIM_MSDP_MG_NAME, mesh_group_name);
+ if (mesh_group_name)
+ mp->mesh_group_name =
+ XSTRDUP(MTYPE_PIM_MSDP_MG_NAME, mesh_group_name);
+
mp->state = PIM_MSDP_INACTIVE;
mp->fd = -1;
strlcpy(mp->last_reset, "-", sizeof(mp->last_reset));
} else {
pim_msdp_peer_connect(mp);
}
- if (mp_p) {
- *mp_p = mp;
- }
- return PIM_MSDP_ERR_NONE;
+ return mp;
}
struct pim_msdp_peer *pim_msdp_peer_find(struct pim_instance *pim,
return hash_lookup(pim->msdp.peer_hash, &lookup);
}
-/* add peer configuration if it doesn't already exist */
-enum pim_msdp_err pim_msdp_peer_add(struct pim_instance *pim,
- struct in_addr peer_addr,
- struct in_addr local_addr,
- const char *mesh_group_name,
- struct pim_msdp_peer **mp_p)
-{
- struct pim_msdp_peer *mp;
-
- if (mp_p) {
- *mp_p = NULL;
- }
-
- if (peer_addr.s_addr == local_addr.s_addr) {
- /* skip session setup if config is invalid */
- if (PIM_DEBUG_MSDP_EVENTS) {
- char peer_str[INET_ADDRSTRLEN];
-
- pim_inet4_dump("<peer?>", peer_addr, peer_str,
- sizeof(peer_str));
- zlog_debug("%s add skipped as DIP=SIP", peer_str);
- }
- return PIM_MSDP_ERR_SIP_EQ_DIP;
- }
-
- mp = pim_msdp_peer_find(pim, peer_addr);
- if (mp) {
- if (mp_p) {
- *mp_p = mp;
- }
- return PIM_MSDP_ERR_PEER_EXISTS;
- }
-
- return pim_msdp_peer_new(pim, peer_addr, local_addr, mesh_group_name,
- mp_p);
-}
-
/* release all mem associated with a peer */
static void pim_msdp_peer_free(struct pim_msdp_peer *mp)
{
}
/* delete the peer config */
-static enum pim_msdp_err pim_msdp_peer_do_del(struct pim_msdp_peer *mp)
+void pim_msdp_peer_del(struct pim_msdp_peer **mp)
{
+ if (*mp == NULL)
+ return;
+
/* stop the tcp connection and shutdown all timers */
- pim_msdp_peer_stop_tcp_conn(mp, true /* chg_state */);
+ pim_msdp_peer_stop_tcp_conn(*mp, true /* chg_state */);
/* remove the session from various tables */
- listnode_delete(mp->pim->msdp.peer_list, mp);
- hash_release(mp->pim->msdp.peer_hash, mp);
+ listnode_delete((*mp)->pim->msdp.peer_list, *mp);
+ hash_release((*mp)->pim->msdp.peer_hash, *mp);
if (PIM_DEBUG_MSDP_EVENTS) {
- zlog_debug("MSDP peer %s deleted", mp->key_str);
+ zlog_debug("MSDP peer %s deleted", (*mp)->key_str);
}
/* free up any associated memory */
- pim_msdp_peer_free(mp);
-
- return PIM_MSDP_ERR_NONE;
+ pim_msdp_peer_free(*mp);
+ *mp = NULL;
}
-enum pim_msdp_err pim_msdp_peer_del(struct pim_instance *pim,
- struct in_addr peer_addr)
+void pim_msdp_peer_change_source(struct pim_msdp_peer *mp,
+ const struct in_addr *addr)
{
- struct pim_msdp_peer *mp;
+ pim_msdp_peer_stop_tcp_conn(mp, true);
- mp = pim_msdp_peer_find(pim, peer_addr);
- if (!mp) {
- return PIM_MSDP_ERR_NO_PEER;
- }
+ mp->local = *addr;
- return pim_msdp_peer_do_del(mp);
+ if (PIM_MSDP_PEER_IS_LISTENER(mp))
+ pim_msdp_peer_listen(mp);
+ else
+ pim_msdp_peer_connect(mp);
}
/* peer hash and peer list helpers */
{
/* Delete active peer session if any */
if (mbr->mp) {
- pim_msdp_peer_do_del(mbr->mp);
+ pim_msdp_peer_del(&mbr->mp);
}
listnode_delete(mg->mbr_list, mbr);
/* SIP is being removed - tear down all active peer sessions */
for (ALL_LIST_ELEMENTS_RO(mg->mbr_list, mbr_node, mbr)) {
- if (mbr->mp) {
- pim_msdp_peer_do_del(mbr->mp);
- mbr->mp = NULL;
- }
+ if (mbr->mp)
+ pim_msdp_peer_del(&mbr->mp);
}
if (PIM_DEBUG_MSDP_EVENTS) {
zlog_debug("MSDP mesh-group %s src cleared",
bool written = false;
for (ALL_LIST_ELEMENTS_RO(pim->msdp.peer_list, node, mp)) {
- /* Non meshed peers have the group name set to 'default'. */
- if (strcmp(mp->mesh_group_name, "default"))
+ /* Skip meshed group peers. */
+ if (mp->flags & PIM_MSDP_PEERF_IN_GROUP)
continue;
vty_out(vty, "%sip msdp peer %pI4 source %pI4\n", spaces,
/* Create data structures and start TCP connection. */
for (ALL_LIST_ELEMENTS_RO(mg->mbr_list, mbr_node, mbr))
- pim_msdp_peer_add(pim, mbr->mbr_ip, mg->src_ip,
- mg->mesh_group_name, &mbr->mp);
+ mbr->mp = pim_msdp_peer_add(pim, &mbr->mbr_ip, &mg->src_ip,
+ mg->mesh_group_name);
if (PIM_DEBUG_MSDP_EVENTS)
zlog_debug("MSDP mesh-group %s src %pI4 set",
/* if valid SIP has been configured add peer session */
if (mg->src_ip.s_addr != INADDR_ANY)
- pim_msdp_peer_add(pim, mbr->mbr_ip, mg->src_ip,
- mg->mesh_group_name, &mbr->mp);
+ mbr->mp = pim_msdp_peer_add(pim, &mbr->mbr_ip, &mg->src_ip,
+ mg->mesh_group_name);
if (PIM_DEBUG_MSDP_EVENTS)
zlog_debug("MSDP mesh-group %s mbr %pI4 created",
PIM_MSDP_PEERF_NONE = 0,
PIM_MSDP_PEERF_LISTENER = (1 << 0),
#define PIM_MSDP_PEER_IS_LISTENER(mp) (mp->flags & PIM_MSDP_PEERF_LISTENER)
- PIM_MSDP_PEERF_SA_JUST_SENT = (1 << 1)
+ PIM_MSDP_PEERF_SA_JUST_SENT = (1 << 1),
+ /** Flag to signalize that peer belongs to a group. */
+ PIM_MSDP_PEERF_IN_GROUP = (1 << 2),
};
struct pim_msdp_peer {
/** List of mesh groups. */
struct pim_mesh_group_list mglist;
+
+ /** MSDP global hold time period. */
+ uint32_t hold_time;
+ /** MSDP global keep alive period. */
+ uint32_t keep_alive;
+ /** MSDP global connection retry period. */
+ uint32_t connection_retry;
};
#define PIM_MSDP_PEER_READ_ON(mp) \
struct pim_instance;
void pim_msdp_init(struct pim_instance *pim, struct thread_master *master);
void pim_msdp_exit(struct pim_instance *pim);
-enum pim_msdp_err pim_msdp_peer_add(struct pim_instance *pim,
- struct in_addr peer, struct in_addr local,
- const char *mesh_group_name,
- struct pim_msdp_peer **mp_p);
-enum pim_msdp_err pim_msdp_peer_del(struct pim_instance *pim,
- struct in_addr peer_addr);
char *pim_msdp_state_dump(enum pim_msdp_peer_state state, char *buf,
int buf_size);
struct pim_msdp_peer *pim_msdp_peer_find(struct pim_instance *pim,
void pim_msdp_peer_stop_tcp_conn(struct pim_msdp_peer *mp, bool chg_state);
void pim_msdp_peer_reset_tcp_conn(struct pim_msdp_peer *mp, const char *rc_str);
int pim_msdp_write(struct thread *thread);
-char *pim_msdp_peer_key_dump(struct pim_msdp_peer *mp, char *buf, int buf_size,
- bool long_format);
int pim_msdp_config_write(struct pim_instance *pim, struct vty *vty,
const char *spaces);
bool pim_msdp_peer_config_write(struct vty *vty, struct pim_instance *pim,
*/
void pim_msdp_mg_mbr_del(struct pim_msdp_mg *mg, struct pim_msdp_mg_mbr *mbr);
+/**
+ * Allocates MSDP peer data structure, adds peer to group name
+ * `mesh_group_name` and starts state machine. If no group name is provided then
+ * the peer will work standalone.
+ *
+ * \param pim PIM instance
+ * \param peer_addr peer address
+ * \param local_addr local listening address
+ * \param mesh_group_name mesh group name (or `NULL` for peers without group).
+ */
+struct pim_msdp_peer *pim_msdp_peer_add(struct pim_instance *pim,
+ const struct in_addr *peer_addr,
+ const struct in_addr *local_addr,
+ const char *mesh_group_name);
+
+/**
+ * Stops peer state machine and free memory.
+ */
+void pim_msdp_peer_del(struct pim_msdp_peer **mp);
+
+/**
+ * Changes peer source address.
+ *
+ * NOTE:
+ * This will cause the connection to drop and start again.
+ */
+void pim_msdp_peer_change_source(struct pim_msdp_peer *mp,
+ const struct in_addr *addr);
+
#endif
sg.grp.s_addr = stream_get_ipv4(mp->ibuf);
sg.src.s_addr = stream_get_ipv4(mp->ibuf);
- if (prefix_len != 32) {
+ if (prefix_len != IPV4_MAX_BITLEN) {
/* ignore SA update if the prefix length is not 32 */
flog_err(EC_PIM_MSDP_PACKET,
"rxed sa update with invalid prefix length %d",
* If the message group is not set, i.e. "default", then we assume that
* the message must be forwarded.*/
for (ALL_LIST_ELEMENTS_RO(mp->pim->msdp.peer_list, peer_node, peer)) {
- if (!pim_msdp_peer_rpf_check(peer, rp)
- && (strcmp(mp->mesh_group_name, peer->mesh_group_name)
- || !strcmp(mp->mesh_group_name, "default"))) {
- pim_msdp_pkt_sa_tx_one_to_one_peer(peer, rp, sg);
- }
+ /* Not a RPF peer, so skip it. */
+ if (pim_msdp_peer_rpf_check(peer, rp))
+ continue;
+ /* Don't forward inside the meshed group. */
+ if ((mp->flags & PIM_MSDP_PEERF_IN_GROUP)
+ && strcmp(mp->mesh_group_name, peer->mesh_group_name) == 0)
+ continue;
+
+ pim_msdp_pkt_sa_tx_one_to_one_peer(peer, rp, sg);
}
}
.destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_ssm_pingd_source_ip_destroy,
}
},
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/hold-time",
+ .cbs = {
+ .modify = pim_msdp_hold_time_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/keep-alive",
+ .cbs = {
+ .modify = pim_msdp_keep_alive_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/connection-retry",
+ .cbs = {
+ .modify = pim_msdp_connection_retry_modify,
+ }
+ },
{
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-mesh-groups",
.cbs = {
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-peer/source-ip",
.cbs = {
.modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_source_ip_modify,
- .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_source_ip_destroy,
}
},
{
struct nb_cb_create_args *args);
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ssm_pingd_source_ip_destroy(
struct nb_cb_destroy_args *args);
+int pim_msdp_hold_time_modify(struct nb_cb_modify_args *args);
+int pim_msdp_keep_alive_modify(struct nb_cb_modify_args *args);
+int pim_msdp_connection_retry_modify(struct nb_cb_modify_args *args);
int pim_msdp_mesh_group_create(struct nb_cb_create_args *args);
int pim_msdp_mesh_group_destroy(struct nb_cb_destroy_args *args);
int pim_msdp_mesh_group_members_create(struct nb_cb_create_args *args);
struct nb_cb_destroy_args *args);
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_source_ip_modify(
struct nb_cb_modify_args *args);
-int routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_source_ip_destroy(
- struct nb_cb_destroy_args *args);
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_create(
struct nb_cb_create_args *args);
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_destroy(
#define FRR_IGMP_JOIN_XPATH \
"./frr-igmp:igmp/address-family[address-family='%s']/" \
"static-group[group-addr='%s'][source-addr='%s']"
+#define FRR_PIM_MSDP_XPATH FRR_PIM_AF_XPATH "/msdp"
+
#endif /* _FRR_PIM_NB_H_ */
return ret;
}
-static int ip_msdp_peer_cmd_worker(struct pim_instance *pim,
- struct in_addr peer_addr,
- struct in_addr local_addr,
- char *errmsg, size_t errmsg_len)
-{
- enum pim_msdp_err result;
- int ret = NB_OK;
-
- result = pim_msdp_peer_add(pim, peer_addr, local_addr, "default",
- NULL /* mp_p */);
- switch (result) {
- case PIM_MSDP_ERR_NONE:
- break;
- case PIM_MSDP_ERR_OOM:
- ret = NB_ERR;
- snprintf(errmsg, errmsg_len,
- "%% Out of memory");
- break;
- case PIM_MSDP_ERR_PEER_EXISTS:
- ret = NB_ERR;
- snprintf(errmsg, errmsg_len,
- "%% Peer exists");
- break;
- case PIM_MSDP_ERR_MAX_MESH_GROUPS:
- ret = NB_ERR;
- snprintf(errmsg, errmsg_len,
- "%% Only one mesh-group allowed currently");
- break;
- default:
- ret = NB_ERR;
- snprintf(errmsg, errmsg_len,
- "%% peer add failed");
- }
-
- return ret;
-}
-
-static int ip_no_msdp_peer_cmd_worker(struct pim_instance *pim,
- struct in_addr peer_addr,
- char *errmsg, size_t errmsg_len)
-{
- enum pim_msdp_err result;
-
- result = pim_msdp_peer_del(pim, peer_addr);
- switch (result) {
- case PIM_MSDP_ERR_NONE:
- break;
- case PIM_MSDP_ERR_NO_PEER:
- snprintf(errmsg, errmsg_len,
- "%% Peer does not exist");
- break;
- default:
- snprintf(errmsg, errmsg_len,
- "%% peer del failed");
- }
-
- return result ? NB_ERR : NB_OK;
-}
-
static int pim_rp_cmd_worker(struct pim_instance *pim,
struct in_addr rp_addr,
struct prefix group, const char *plist,
return NB_OK;
}
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/hold-time
+ */
+int pim_msdp_hold_time_modify(struct nb_cb_modify_args *args)
+{
+ struct pim_instance *pim;
+ struct vrf *vrf;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if (yang_dnode_get_uint32(args->dnode, NULL)
+ <= yang_dnode_get_uint32(args->dnode, "../keep-alive")) {
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "Hold time must be greater than keep alive interval");
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ pim->msdp.hold_time = yang_dnode_get_uint32(args->dnode, NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/keep-alive
+ */
+int pim_msdp_keep_alive_modify(struct nb_cb_modify_args *args)
+{
+ struct pim_instance *pim;
+ struct vrf *vrf;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ if (yang_dnode_get_uint32(args->dnode, NULL)
+ >= yang_dnode_get_uint32(args->dnode, "../hold-time")) {
+ snprintf(
+ args->errmsg, args->errmsg_len,
+ "Keep alive must be less than hold time interval");
+ return NB_ERR_VALIDATION;
+ }
+ break;
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ pim->msdp.keep_alive = yang_dnode_get_uint32(args->dnode, NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/connection-retry
+ */
+int pim_msdp_connection_retry_modify(struct nb_cb_modify_args *args)
+{
+ struct pim_instance *pim;
+ struct vrf *vrf;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ pim->msdp.connection_retry =
+ yang_dnode_get_uint32(args->dnode, NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
/*
* XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-mesh-groups
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_create(
struct nb_cb_create_args *args)
{
+ struct pim_msdp_peer *mp;
+ struct pim_instance *pim;
+ struct vrf *vrf;
+ struct ipaddr peer_ip;
+ struct ipaddr source_ip;
+
switch (args->event) {
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
+ break;
case NB_EV_APPLY:
+ vrf = nb_running_get_entry(args->dnode, NULL, true);
+ pim = vrf->info;
+ yang_dnode_get_ip(&peer_ip, args->dnode, "./peer-ip");
+ yang_dnode_get_ip(&source_ip, args->dnode, "./source-ip");
+ mp = pim_msdp_peer_add(pim, &peer_ip.ipaddr_v4,
+ &source_ip.ipaddr_v4, NULL);
+ nb_running_set_entry(args->dnode, mp);
break;
}
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_destroy(
struct nb_cb_destroy_args *args)
{
- int result;
- struct pim_instance *pim;
- struct ipaddr peer_ip;
- struct vrf *vrf;
+ struct pim_msdp_peer *mp;
switch (args->event) {
case NB_EV_VALIDATE:
case NB_EV_ABORT:
break;
case NB_EV_APPLY:
- vrf = nb_running_get_entry(args->dnode, NULL, true);
- pim = vrf->info;
- yang_dnode_get_ip(&peer_ip, args->dnode, "./peer-ip");
- result = ip_no_msdp_peer_cmd_worker(pim, peer_ip.ip._v4_addr,
- args->errmsg,
- args->errmsg_len);
-
- if (result)
- return NB_ERR_INCONSISTENCY;
-
+ mp = nb_running_unset_entry(args->dnode);
+ pim_msdp_peer_del(&mp);
break;
}
int routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_source_ip_modify(
struct nb_cb_modify_args *args)
{
- int result;
- struct vrf *vrf;
- struct pim_instance *pim;
- struct ipaddr peer_ip;
+ struct pim_msdp_peer *mp;
struct ipaddr source_ip;
- const struct lyd_node *mesh_group_name_dnode;
- const char *mesh_group_name;
switch (args->event) {
case NB_EV_VALIDATE:
- mesh_group_name_dnode =
- yang_dnode_get(args->dnode,
- "../../msdp-mesh-group/mesh-group-name");
- if (mesh_group_name_dnode) {
- mesh_group_name =
- yang_dnode_get_string(mesh_group_name_dnode,
- ".");
- if (strcmp(mesh_group_name, "default")) {
- /* currently only one mesh-group can exist at a
- * time
- */
- snprintf(args->errmsg, args->errmsg_len,
- "%% Only one mesh-group allowed currently");
- return NB_ERR_VALIDATION;
- }
- }
- break;
case NB_EV_PREPARE:
case NB_EV_ABORT:
break;
case NB_EV_APPLY:
- vrf = nb_running_get_entry(args->dnode, NULL, true);
- pim = vrf->info;
- yang_dnode_get_ip(&peer_ip, args->dnode, "../peer-ip");
+ mp = nb_running_get_entry(args->dnode, NULL, true);
yang_dnode_get_ip(&source_ip, args->dnode, NULL);
-
- result = ip_msdp_peer_cmd_worker(pim, peer_ip.ip._v4_addr,
- source_ip.ip._v4_addr,
- args->errmsg,
- args->errmsg_len);
-
- if (result)
- return NB_ERR_INCONSISTENCY;
-
- break;
- }
-
- return NB_OK;
-}
-
-int routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_source_ip_destroy(
- struct nb_cb_destroy_args *args)
-{
- switch (args->event) {
- case NB_EV_VALIDATE:
- case NB_EV_PREPARE:
- case NB_EV_ABORT:
- case NB_EV_APPLY:
+ pim_msdp_peer_change_source(mp, &source_ip.ipaddr_v4);
break;
}
p.family = AF_INET;
p.u.prefix4 = sg->src;
- p.prefixlen = 32;
+ p.prefixlen = IPV4_MAX_BITLEN;
length = pim_encode_addr_ucast(b1, &p);
b1length += length;
plist = prefix_list_lookup(AFI_IP, pim->register_plist);
src.family = AF_INET;
- src.prefixlen = IPV4_MAX_PREFIXLEN;
+ src.prefixlen = IPV4_MAX_BITLEN;
src.u.prefix4 = sg.src;
if (prefix_list_apply(plist, &src) == PREFIX_DENY) {
}
rp_info->group.family = AF_INET;
rp_info->rp.rpf_addr.family = AF_INET;
- rp_info->rp.rpf_addr.prefixlen = IPV4_MAX_PREFIXLEN;
+ rp_info->rp.rpf_addr.prefixlen = IPV4_MAX_BITLEN;
rp_info->rp.rpf_addr.u.prefix4.s_addr = INADDR_NONE;
listnode_add(pim->rp_list, rp_info);
rp_info = XCALLOC(MTYPE_PIM_RP, sizeof(*rp_info));
rp_info->rp.rpf_addr.family = AF_INET;
- rp_info->rp.rpf_addr.prefixlen = IPV4_MAX_PREFIXLEN;
+ rp_info->rp.rpf_addr.prefixlen = IPV4_MAX_BITLEN;
rp_info->rp.rpf_addr.u.prefix4 = rp_addr;
prefix_copy(&rp_info->group, &group);
rp_info->rp_src = rp_src_flag;
memset(&g, 0, sizeof(g));
g.family = AF_INET;
- g.prefixlen = 32;
+ g.prefixlen = IPV4_MAX_BITLEN;
g.u.prefix4 = group;
rp_info = pim_rp_find_match_group(pim, &g);
memset(&g, 0, sizeof(g));
g.family = AF_INET;
- g.prefixlen = 32;
+ g.prefixlen = IPV4_MAX_BITLEN;
g.u.prefix4 = group;
rp_info = pim_rp_find_match_group(pim, &g);
memset(&g, 0, sizeof(g));
g.family = AF_INET;
- g.prefixlen = 32;
+ g.prefixlen = IPV4_MAX_BITLEN;
g.u.prefix4 = group;
rp_info = pim_rp_find_match_group(pim, &g);
p->family = AF_INET; /* notice: AF_INET !=
PIM_MSG_ADDRESS_FAMILY_IPV4 */
memcpy(&p->u.prefix4, addr, sizeof(struct in_addr));
- p->prefixlen = IPV4_MAX_PREFIXLEN;
+ p->prefixlen = IPV4_MAX_BITLEN;
addr += sizeof(struct in_addr);
break;
}
p->family = AF_INET6;
- p->prefixlen = IPV6_MAX_PREFIXLEN;
+ p->prefixlen = IPV6_MAX_BITLEN;
memcpy(&p->u.prefix6, addr, sizeof(struct in6_addr));
addr += sizeof(struct in6_addr);
messages
received with any other mask length.
*/
- if (mask_len != 32) {
+ if (mask_len != IPV4_MAX_BITLEN) {
zlog_warn("%s: IPv4 bad source address mask: %d",
__func__, mask_len);
return -4;
np = prefix_list_lookup(AFI_IP, nlist);
g.family = AF_INET;
- g.prefixlen = IPV4_MAX_PREFIXLEN;
+ g.prefixlen = IPV4_MAX_BITLEN;
frr_each (rb_pim_upstream, &pim->upstream_head, up) {
if (up->sg.src.s_addr != INADDR_ANY)
group.family = AF_INET;
group.u.prefix4 = group_addr;
- group.prefixlen = IPV4_MAX_PREFIXLEN;
+ group.prefixlen = IPV4_MAX_BITLEN;
return prefix_match(&group_224, &group);
}
group.family = AF_INET;
group.u.prefix4 = group_addr;
- group.prefixlen = 32;
+ group.prefixlen = IPV4_MAX_BITLEN;
return prefix_match(&group_all, &group);
}
return false;
grp_pfx.family = AF_INET;
- grp_pfx.prefixlen = 32;
+ grp_pfx.prefixlen = IPV4_MAX_BITLEN;
grp_pfx.u.prefix4 = *grp;
pl = prefix_list_lookup(AFI_IP, pim_ifp->boundary_oil_plist);
}
}
+ if (pim->msdp.hold_time != PIM_MSDP_PEER_HOLD_TIME
+ || pim->msdp.keep_alive != PIM_MSDP_PEER_KA_TIME
+ || pim->msdp.connection_retry != PIM_MSDP_PEER_CONNECT_RETRY_TIME) {
+ vty_out(vty, "%sip msdp timers %u %u", spaces,
+ pim->msdp.hold_time, pim->msdp.keep_alive);
+ if (pim->msdp.connection_retry
+ != PIM_MSDP_PEER_CONNECT_RETRY_TIME)
+ vty_out(vty, " %u", pim->msdp.connection_retry);
+ vty_out(vty, "\n");
+ }
+
return writes;
}
nexthop_tab[num_ifindex].ifindex = stream_getl(s);
p.family = AF_INET6;
- p.prefixlen = IPV6_MAX_PREFIXLEN;
+ p.prefixlen = IPV6_MAX_BITLEN;
memcpy(&p.u.prefix6,
&nexthop_tab[num_ifindex].nexthop_addr.u.prefix6,
sizeof(struct in6_addr));
argp.add_argument('--out-by-file', type=str, help='write by-file JSON output')
argp.add_argument('-Wlog-format', action='store_const', const=True)
argp.add_argument('-Wlog-args', action='store_const', const=True)
+ argp.add_argument('-Werror', action='store_const', const=True)
argp.add_argument('--profile', action='store_const', const=True)
argp.add_argument('binaries', metavar='BINARY', nargs='+', type=str, help='files to read (ELF files or libtool objects)')
args = argp.parse_args()
traceback.print_exc()
for option in dir(args):
- if option.startswith('W'):
+ if option.startswith('W') and option != 'Werror':
checks = sorted(xrelfo.check(args))
sys.stderr.write(''.join([c[-1] for c in checks]))
+
+ if args.Werror and len(checks) > 0:
+ errors += 1
break
BuildRequires: pam-devel
%endif
%if "%{initsystem}" == "systemd"
-BuildRequires: systemd
-BuildRequires: systemd-devel
Requires(post): systemd
Requires(preun): systemd
Requires(postun): systemd
--disable-bgp-vnc \
%endif
--enable-isisd \
-%if "%{initsystem}" == "systemd"
- --enable-systemd \
-%endif
--enable-rpki \
%if %{with_bfdd}
--enable-bfdd \
* destination addr */
to.sin_addr = connected->destination->u.prefix4;
else if (connected->address->prefixlen
- < IPV4_MAX_PREFIXLEN)
+ < IPV4_MAX_BITLEN)
/* calculate the appropriate broadcast
* address */
to.sin_addr.s_addr = ipv4_broadcast_addr(
&rp->p)) {
if ((ifc->address->prefixlen
!= rp->p.prefixlen)
- && (rp->p.prefixlen != 32))
+ && (rp->p.prefixlen
+ != IPV4_MAX_BITLEN))
continue;
} else {
memcpy(&classfull, &rp->p,
/* use specified broadcast or peer destination
* addr */
to.sin_addr = ifc->destination->u.prefix4;
- else if (ifc->address->prefixlen < IPV4_MAX_PREFIXLEN)
+ else if (ifc->address->prefixlen < IPV4_MAX_BITLEN)
/* calculate the appropriate broadcast address
*/
to.sin_addr.s_addr = ipv4_broadcast_addr(
/* - is the prefix length valid (i.e., between 0 and 128,
inclusive) */
- if (rte->prefixlen > 128) {
+ if (rte->prefixlen > IPV6_MAX_BITLEN) {
zlog_warn("Invalid prefix length %pI6/%d from %pI6%%%s",
&rte->addr, rte->prefixlen,
&from->sin6_addr, ifp->name);
if (n) {
type_import = false;
- p.prefixlen = 128;
- memcpy(&p.u.prefix6, &nhop, 16);
+ p.prefixlen = IPV6_MAX_BITLEN;
+ memcpy(&p.u.prefix6, &nhop, IPV6_MAX_BYTELEN);
p.family = AF_INET6;
} else {
type_import = true;
if (n) {
type_import = false;
- p.prefixlen = 32;
+ p.prefixlen = IPV4_MAX_BITLEN;
p.u.prefix4 = nhop;
p.family = AF_INET;
}
if (start4.s_addr != INADDR_ANY) {
prefix.family = AF_INET;
- prefix.prefixlen = 32;
+ prefix.prefixlen = IPV4_MAX_BITLEN;
prefix.u.prefix4 = start4;
} else {
prefix.family = AF_INET6;
- prefix.prefixlen = 128;
+ prefix.prefixlen = IPV6_MAX_BITLEN;
prefix.u.prefix6 = start6;
}
sg.r.orig_prefix = prefix;
if (start4.s_addr != INADDR_ANY) {
prefix.family = AF_INET;
- prefix.prefixlen = 32;
+ prefix.prefixlen = IPV4_MAX_BITLEN;
prefix.u.prefix4 = start4;
} else {
prefix.family = AF_INET6;
- prefix.prefixlen = 128;
+ prefix.prefixlen = IPV6_MAX_BITLEN;
prefix.u.prefix6 = start6;
}
sg.r.orig_prefix = prefix;
if (start4.s_addr != INADDR_ANY) {
prefix.family = AF_INET;
- prefix.prefixlen = 32;
+ prefix.prefixlen = IPV4_MAX_BITLEN;
prefix.u.prefix4 = start4;
} else {
prefix.family = AF_INET6;
- prefix.prefixlen = 128;
+ prefix.prefixlen = IPV6_MAX_BITLEN;
prefix.u.prefix6 = start6;
}
if (dst4.s_addr != INADDR_ANY) {
prefix.family = AF_INET;
- prefix.prefixlen = 32;
+ prefix.prefixlen = IPV4_MAX_BITLEN;
prefix.u.prefix4 = dst4;
} else {
prefix.family = AF_INET6;
nh->nh_valid = !!nh_num;
if (nhp->family == AF_INET6
- && memcmp(&nhp->u.prefix6, &nh->addr.ipv6, 16) == 0)
+ && memcmp(&nhp->u.prefix6, &nh->addr.ipv6, IPV6_MAX_BYTELEN)
+ == 0)
nh->nh_valid = !!nh_num;
if (nh->state == STATIC_START)
/lib/test_atomlist
/lib/test_buffer
/lib/test_checksum
+/lib/test_frrscript
+/lib/test_frrlua
/lib/test_graph
/lib/test_heavy
/lib/test_heavy_thread
#include "bgpd/bgp_zebra.h"
#include "bgpd/bgp_network.h"
#include "lib/routing_nb.h"
+#include "lib/northbound_cli.h"
#include "bgpd/bgp_nb.h"
#ifdef ENABLE_BGP_VNC
--- /dev/null
+/*
+ * frrlua unit tests
+ * Copyright (C) 2021 Donald Lee
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+#include "string.h"
+#include "stdio.h"
+#include "lib/frrlua.h"
+
+static void test_encode_decode(void)
+{
+ lua_State *L = luaL_newstate();
+
+ long long a = 123;
+ long long b = a;
+
+ lua_pushintegerp(L, &a);
+ lua_decode_integerp(L, -1, &a);
+ assert(a == b);
+ assert(lua_gettop(L) == 0);
+
+ time_t time_a = 100;
+ time_t time_b = time_a;
+
+ lua_pushtimet(L, &time_a);
+ lua_decode_timet(L, -1, &time_a);
+ assert(time_a == time_b);
+ assert(lua_gettop(L) == 0);
+
+ char str_b[] = "Hello", str_a[6];
+
+ strlcpy(str_a, str_b, sizeof(str_b));
+ lua_pushstring_wrapper(L, str_a);
+ lua_decode_stringp(L, -1, str_a);
+ assert(strncmp(str_a, str_b, sizeof(str_b)) == 0);
+ assert(lua_gettop(L) == 0);
+
+ char p_b_str[] = "10.0.0.0/24", p_a_str[12];
+ struct prefix p_a;
+
+ strlcpy(p_a_str, p_b_str, sizeof(p_b_str));
+ str2prefix(p_a_str, &p_a);
+ lua_pushprefix(L, &p_a);
+ lua_decode_prefix(L, -1, &p_a);
+ prefix2str(&p_a, p_a_str, sizeof(p_b_str));
+ assert(strncmp(p_a_str, p_b_str, sizeof(p_b_str)) == 0);
+ assert(lua_gettop(L) == 0);
+
+ struct interface ifp_a;
+ struct interface ifp_b = ifp_a;
+
+ lua_pushinterface(L, &ifp_a);
+ lua_decode_interface(L, -1, &ifp_a);
+ assert(strncmp(ifp_a.name, ifp_b.name, sizeof(ifp_b.name)) == 0);
+ assert(ifp_a.ifindex == ifp_b.ifindex);
+ assert(ifp_a.status == ifp_b.status);
+ assert(ifp_a.flags == ifp_b.flags);
+ assert(ifp_a.metric == ifp_b.metric);
+ assert(ifp_a.speed == ifp_b.speed);
+ assert(ifp_a.mtu == ifp_b.mtu);
+ assert(ifp_a.mtu6 == ifp_b.mtu6);
+ assert(ifp_a.bandwidth == ifp_b.bandwidth);
+ assert(ifp_a.link_ifindex == ifp_b.link_ifindex);
+ assert(ifp_a.ll_type == ifp_b.ll_type);
+ assert(lua_gettop(L) == 0);
+
+ struct in_addr addr_a;
+ struct in_addr addr_b = addr_a;
+
+ lua_pushinaddr(L, &addr_a);
+ lua_decode_inaddr(L, -1, &addr_a);
+ assert(addr_a.s_addr == addr_b.s_addr);
+ assert(lua_gettop(L) == 0);
+
+ struct in6_addr in6addr_a;
+ struct in6_addr in6addr_b = in6addr_a;
+
+ lua_pushin6addr(L, &in6addr_a);
+ lua_decode_in6addr(L, -1, &in6addr_a);
+ assert(in6addr_cmp(&in6addr_a, &in6addr_b) == 0);
+ assert(lua_gettop(L) == 0);
+
+ union sockunion su_a, su_b;
+
+ memset(&su_a, 0, sizeof(union sockunion));
+ memset(&su_b, 0, sizeof(union sockunion));
+ lua_pushsockunion(L, &su_a);
+ lua_decode_sockunion(L, -1, &su_a);
+ assert(sockunion_cmp(&su_a, &su_b) == 0);
+ assert(lua_gettop(L) == 0);
+}
+
+int main(int argc, char **argv)
+{
+ test_encode_decode();
+}
--- /dev/null
+import frrtest
+import pytest
+
+if 'S["SCRIPTING_TRUE"]=""\n' not in open("../config.status").readlines():
+ class TestFrrlua:
+ @pytest.mark.skipif(True, reason="Test unsupported")
+ def test_exit_cleanly(self):
+ pass
+else:
+
+ class TestFrrlua(frrtest.TestMultiOut):
+ program = "./test_frrlua"
+
+ TestFrrlua.exit_cleanly()
--- /dev/null
+/*
+ * frrscript unit tests
+ * Copyright (C) 2021 Donald Lee
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#include "lib/frrscript.h"
+
+int main(int argc, char **argv)
+{
+ frrscript_init("./lib");
+
+ struct frrscript *fs = frrscript_load("script1", NULL);
+ long long a = 100, b = 200;
+ int result = frrscript_call(fs, ("a", &a), ("b", &b));
+
+ assert(result == 0);
+ assert(a == 300);
+ assert(b == 200);
+
+ return 0;
+}
--- /dev/null
+import frrtest
+import pytest
+
+if 'S["SCRIPTING_TRUE"]=""\n' not in open("../config.status").readlines():
+ class TestFrrscript:
+ @pytest.mark.skipif(True, reason="Test unsupported")
+ def test_exit_cleanly(self):
+ pass
+else:
+
+ class TestFrrscript(frrtest.TestMultiOut):
+ program = "./test_frrscript"
+
+ TestFrrscript.exit_cleanly()
IGNORE_ZEBRA = --ignore=zebra/
endif
+if SCRIPTING
+TESTS_SCRIPTING = \
+ tests/lib/test_frrlua \
+ tests/lib/test_frrscript \
+ #end
+else
+TESTS_SCRIPTING =
+endif
+
clippy_scan += \
tests/lib/cli/test_cli.c \
tests/ospf6d/test_lsdb.c \
$(TESTS_OSPFD) \
$(TESTS_OSPF6D) \
$(TESTS_ZEBRA) \
+ $(TESTS_SCRIPTING) \
# end
if GRPC
tests_lib_test_checksum_CPPFLAGS = $(TESTS_CPPFLAGS)
tests_lib_test_checksum_LDADD = $(ALL_TESTS_LDADD)
tests_lib_test_checksum_SOURCES = tests/lib/test_checksum.c
+if SCRIPTING
+tests_lib_test_frrlua_CFLAGS = $(TESTS_CFLAGS)
+tests_lib_test_frrlua_CPPFLAGS = $(TESTS_CPPFLAGS)
+tests_lib_test_frrlua_LDADD = $(ALL_TESTS_LDADD)
+tests_lib_test_frrlua_SOURCES = tests/lib/test_frrlua.c
+tests_lib_test_frrscript_CFLAGS = $(TESTS_CFLAGS)
+tests_lib_test_frrscript_CPPFLAGS = $(TESTS_CPPFLAGS)
+tests_lib_test_frrscript_LDADD = $(ALL_TESTS_LDADD)
+tests_lib_test_frrscript_SOURCES = tests/lib/test_frrscript.c
+endif
tests_lib_test_graph_CFLAGS = $(TESTS_CFLAGS)
tests_lib_test_graph_CPPFLAGS = $(TESTS_CPPFLAGS)
tests_lib_test_graph_LDADD = $(ALL_TESTS_LDADD)
tests/zebra/test_lm_plugin.refout \
# end
+
+if SCRIPTING
+EXTRA_DIST += \
+ tests/lib/test_frrscript.py \
+ tests/lib/test_frrlua.py \
+ #end
+endif
+
.PHONY: tests/tests.xml
tests/tests.xml: $(check_PROGRAMS)
( cd tests; $(PYTHON) ../$(srcdir)/tests/runtests.py --junitxml=tests.xml -v ../$(srcdir)/tests $(IGNORE_BGPD) $(IGNORE_ISISD) $(IGNORE_OSPF6D); )
--- /dev/null
+!
+router bgp 65001
+!
--- /dev/null
+!
+interface r1-eth0
+ ip address 192.168.255.1/24
+!
+ip forwarding
+!
+
--- /dev/null
+!
+router bgp 65001
+ no bgp default ipv4-unicast
+ bgp default ipv6-unicast
+!
--- /dev/null
+!
+interface r2-eth0
+ ip address 192.168.255.2/24
+!
+ip forwarding
+!
--- /dev/null
+!
+router bgp 65001
+ no bgp default ipv4-unicast
+ bgp default l2vpn-evpn
+!
--- /dev/null
+!
+interface r3-eth0
+ ip address 192.168.255.3/24
+!
+ip forwarding
+!
--- /dev/null
+!
+router bgp 65001
+ bgp default ipv6-unicast
+ bgp default l2vpn-evpn
+!
--- /dev/null
+!
+interface r3-eth0
+ ip address 192.168.255.3/24
+!
+ip forwarding
+!
--- /dev/null
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2021 by
+# Donatas Abraitis <donatas.abraitis@gmail.com>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Test if `bgp default ipv4-unicast`, `bgp default ipv6-unicast`
+and `bgp default l2vpn-evpn` commands work as expected.
+
+STEP 1: 'Check if neighbor 192.168.255.254 is enabled for ipv4 address-family only'
+STEP 2: 'Check if neighbor 192.168.255.254 is enabled for ipv6 address-family only'
+STEP 3: 'Check if neighbor 192.168.255.254 is enabled for l2vpn evpn address-family only'
+STEP 4: 'Check if neighbor 192.168.255.254 is enabled for ipv4/ipv6 unicast and l2vpn evpn address-families'
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+pytestmark = [pytest.mark.bgpd]
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+from mininet.topo import Topo
+from lib.common_config import step
+
+
+class TemplateTopo(Topo):
+ def build(self, *_args, **_opts):
+ tgen = get_topogen(self)
+
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
+
+
+def setup_module(mod):
+ tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_default_ipv4_ipv6_unicast():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Check if neighbor 192.168.255.254 is enabled for ipv4 address-family only")
+
+ def _bgp_neighbor_ipv4_af_only():
+ tgen.gears["r1"].vtysh_cmd(
+ "conf t\nrouter bgp\nneighbor 192.168.255.254 remote-as external"
+ )
+
+ output = json.loads(tgen.gears["r1"].vtysh_cmd("show bgp summary json"))
+
+ if len(output.keys()) == 1 and "ipv4Unicast" in output:
+ return True
+ return False
+
+ assert _bgp_neighbor_ipv4_af_only() == True
+
+ step("Check if neighbor 192.168.255.254 is enabled for ipv6 address-family only")
+
+ def _bgp_neighbor_ipv6_af_only():
+ tgen.gears["r2"].vtysh_cmd(
+ "conf t\nrouter bgp\nneighbor 192.168.255.254 remote-as external"
+ )
+
+ output = json.loads(tgen.gears["r2"].vtysh_cmd("show bgp summary json"))
+
+ if len(output.keys()) == 1 and "ipv6Unicast" in output:
+ return True
+ return False
+
+ assert _bgp_neighbor_ipv6_af_only() == True
+
+ step("Check if neighbor 192.168.255.254 is enabled for evpn address-family only")
+
+ def _bgp_neighbor_evpn_af_only():
+ tgen.gears["r3"].vtysh_cmd(
+ "conf t\nrouter bgp\nneighbor 192.168.255.254 remote-as external"
+ )
+
+ output = json.loads(tgen.gears["r3"].vtysh_cmd("show bgp summary json"))
+
+ if len(output.keys()) == 1 and "l2VpnEvpn" in output:
+ return True
+ return False
+
+ assert _bgp_neighbor_evpn_af_only() == True
+
+ step(
+ "Check if neighbor 192.168.255.254 is enabled for ipv4/ipv6 unicast and evpn address-families"
+ )
+
+ def _bgp_neighbor_ipv4_ipv6_and_evpn_af():
+ tgen.gears["r4"].vtysh_cmd(
+ "conf t\nrouter bgp\nneighbor 192.168.255.254 remote-as external"
+ )
+
+ output = json.loads(tgen.gears["r4"].vtysh_cmd("show bgp summary json"))
+
+ if (
+ len(output.keys()) == 3
+ and "ipv4Unicast" in output
+ and "ipv6Unicast" in output
+ and "l2VpnEvpn" in output
+ ):
+ return True
+ return False
+
+ assert _bgp_neighbor_ipv4_ipv6_and_evpn_af() == True
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
+++ /dev/null
-!
-router bgp 65001
-!
+++ /dev/null
-!
-interface r1-eth0
- ip address 192.168.255.1/24
-!
-ip forwarding
-!
-
+++ /dev/null
-!
-router bgp 65001
- no bgp default ipv4-unicast
- bgp default ipv6-unicast
-!
+++ /dev/null
-!
-interface r2-eth0
- ip address 192.168.255.2/24
-!
-ip forwarding
-!
+++ /dev/null
-!
-router bgp 65001
- bgp default ipv6-unicast
-!
+++ /dev/null
-!
-interface r3-eth0
- ip address 192.168.255.3/24
-!
-ip forwarding
-!
+++ /dev/null
-#!/usr/bin/env python
-
-#
-# Copyright (c) 2021 by
-# Donatas Abraitis <donatas.abraitis@gmail.com>
-#
-# Permission to use, copy, modify, and/or distribute this software
-# for any purpose with or without fee is hereby granted, provided
-# that the above copyright notice and this permission notice appear
-# in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
-# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
-# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
-# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
-# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
-# OF THIS SOFTWARE.
-#
-
-"""
-Test if `bgp default ipv4-unicast` and `bgp default ipv6-unicast`
-commands work as expected.
-
-STEP 1: 'Check if neighbor 192.168.255.254 is enabled for ipv4 address-family only'
-STEP 2: 'Check if neighbor 192.168.255.254 is enabled for ipv6 address-family only'
-STEP 3: 'Check if neighbor 192.168.255.254 is enabled for ipv4 and ipv6 address-families'
-"""
-
-import os
-import sys
-import json
-import pytest
-import functools
-
-pytestmark = [pytest.mark.bgpd]
-
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, "../"))
-
-# pylint: disable=C0413
-from lib import topotest
-from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
-from lib.common_config import step
-
-
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
-
- for routern in range(1, 5):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
- switch.add_link(tgen.gears["r4"])
-
-
-def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
- tgen.start_topology()
-
- router_list = tgen.routers()
-
- for i, (rname, router) in enumerate(router_list.items(), 1):
- router.load_config(
- TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
- )
- router.load_config(
- TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
- )
-
- tgen.start_router()
-
-
-def teardown_module(mod):
- tgen = get_topogen()
- tgen.stop_topology()
-
-
-def test_bgp_default_ipv4_ipv6_unicast():
- tgen = get_topogen()
-
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
- step("Check if neighbor 192.168.255.254 is enabled for ipv4 address-family only")
-
- def _bgp_neighbor_ipv4_af_only():
- tgen.gears["r1"].vtysh_cmd(
- "conf t\nrouter bgp\nneighbor 192.168.255.254 remote-as external"
- )
-
- output = json.loads(tgen.gears["r1"].vtysh_cmd("show bgp summary json"))
-
- if "ipv4Unicast" in output and "ipv6Unicast" not in output:
- return True
- return False
-
- assert _bgp_neighbor_ipv4_af_only() == True
-
- step("Check if neighbor 192.168.255.254 is enabled for ipv6 address-family only")
-
- def _bgp_neighbor_ipv6_af_only():
- tgen.gears["r2"].vtysh_cmd(
- "conf t\nrouter bgp\nneighbor 192.168.255.254 remote-as external"
- )
-
- output = json.loads(tgen.gears["r2"].vtysh_cmd("show bgp summary json"))
-
- if "ipv4Unicast" not in output and "ipv6Unicast" in output:
- return True
- return False
-
- assert _bgp_neighbor_ipv6_af_only() == True
-
- step(
- "Check if neighbor 192.168.255.254 is enabled for ipv4 and ipv6 address-families"
- )
-
- def _bgp_neighbor_ipv4_and_ipv6_af():
- tgen.gears["r3"].vtysh_cmd(
- "conf t\nrouter bgp\nneighbor 192.168.255.254 remote-as external"
- )
-
- output = json.loads(tgen.gears["r3"].vtysh_cmd("show bgp summary json"))
-
- if "ipv4Unicast" in output and "ipv6Unicast" in output:
- return True
- return False
-
- assert _bgp_neighbor_ipv4_and_ipv6_af() == True
-
-
-if __name__ == "__main__":
- args = ["-s"] + sys.argv[1:]
- sys.exit(pytest.main(args))
# OF THIS SOFTWARE.
#
-#################################
-# TOPOLOGY
-#################################
-"""
-
- +-------+
- +------- | R2 |
- | +-------+
- | |
- +-------+ |
- | R1 | |
- +-------+ |
- | |
- | +-------+ +-------+
- +---------- | R3 |----------| R4 |
- +-------+ +-------+
-
-"""
-
-#################################
-# TEST SUMMARY
-#################################
-"""
-Following tests are covered to test route-map functionality:
-TC_34:
- Verify if route-maps is applied in both inbound and
- outbound direction to same neighbor/interface.
-TC_36:
- Test permit/deny statements operation in route-maps with a
- permutation and combination of permit/deny in prefix-lists
-TC_35:
- Test multiple sequence numbers in a single route-map for different
- match/set clauses.
-TC_37:
- Test add/remove route-maps with multiple set
- clauses and without any match statement.(Set only)
-TC_38:
- Test add/remove route-maps with multiple match
- clauses and without any set statement.(Match only)
-"""
-
import sys
import json
import time
create_bgp_community_lists,
interface_status,
create_route_maps,
+ create_static_routes,
create_prefix_lists,
verify_route_maps,
check_address_types,
)
from lib.topojson import build_topo_from_json, build_config_from_json
+#################################
+# TOPOLOGY
+#################################
+"""
+
+ +-------+
+ +------- | R2 |
+ | +-------+
+ | |
+ +-------+ |
+ | R1 | |
+ +-------+ |
+ | |
+ | +-------+ +-------+
+ +---------- | R3 |----------| R4 |
+ +-------+ +-------+
+
+"""
+
+#################################
+# TEST SUMMARY
+#################################
+"""
+Following tests are covered to test route-map functionality:
+TC_34:
+ Verify if route-maps is applied in both inbound and
+ outbound direction to same neighbor/interface.
+TC_36:
+ Test permit/deny statements operation in route-maps with a
+ permutation and combination of permit/deny in prefix-lists
+TC_35:
+ Test multiple sequence numbers in a single route-map for different
+ match/set clauses.
+TC_37:
+ Test add/remove route-maps with multiple set
+ clauses and without any match statement.(Set only)
+TC_38:
+ Test add/remove route-maps with multiple match
+ clauses and without any set statement.(Match only)
+"""
# Global variables
bgp_convergence = False
result = verify_rib(
tgen, adt, dut, input_dict_2, protocol=protocol, expected=False
)
- assert result is not True, "Testcase {} : Failed \n"
- "routes are not present in rib \n Error: {}".format(tc_name, result)
+ assert result is not True, ("Testcase {} : Failed \n"
+ "routes are not present in rib \n Error: {}".format(tc_name, result))
logger.info("Expected behaviour: {}".format(result))
# Verifying RIB routes
result = verify_rib(
tgen, adt, dut, input_dict, protocol=protocol, expected=False
)
- assert result is not True, "Testcase {} : Failed \n "
- "routes are not present in rib \n Error: {}".format(tc_name, result)
+ assert result is not True, ("Testcase {} : Failed \n "
+ "routes are not present in rib \n Error: {}".format(tc_name, result))
logger.info("Expected behaviour: {}".format(result))
write_test_footer(tc_name)
}
# tgen.mininet_cli()
- result = verify_rib(
- tgen, adt, dut, input_dict_2, protocol=protocol, expected=False
- )
if "deny" in [prefix_action, rmap_action]:
- assert result is not True, "Testcase {} : Failed \n "
- "Routes are still present \n Error: {}".format(tc_name, result)
+ result = verify_rib(
+ tgen, adt, dut, input_dict_2, protocol=protocol, expected=False
+ )
+ assert result is not True, ("Testcase {} : Failed \n "
+ "Routes are still present \n Error: {}".format(tc_name, result))
logger.info("Expected behaviour: {}".format(result))
else:
+ result = verify_rib(
+ tgen, adt, dut, input_dict_2, protocol=protocol
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
#############################################
# Verification APIs
#############################################
-@retry(attempts=4, wait=2, return_is_str=True)
+@retry(retry_timeout=8)
def verify_router_id(tgen, topo, input_dict, expected=True):
"""
Running command "show ip bgp json" for DUT and reading router-id
return True
-@retry(attempts=50, wait=3, return_is_str=True)
+@retry(retry_timeout=150)
def verify_bgp_convergence(tgen, topo, dut=None, expected=True):
"""
API will verify if BGP is converged with in the given time frame.
return True
-@retry(attempts=4, wait=4, return_is_str=True)
+@retry(retry_timeout=16)
def verify_bgp_community(
tgen, addr_type, router, network, input_dict=None, vrf=None, bestpath=False, expected=True
):
return True
-@retry(attempts=4, wait=2, return_is_str=True)
+@retry(retry_timeout=8)
def verify_as_numbers(tgen, topo, input_dict, expected=True):
"""
This API is to verify AS numbers for given DUT by running
return True
-@retry(attempts=50, wait=3, return_is_str=True)
+@retry(retry_timeout=150)
def verify_bgp_convergence_from_running_config(tgen, dut=None, expected=True):
"""
API to verify BGP convergence b/w loopback and physical interface.
return True
-@retry(attempts=4, wait=4, return_is_str=True)
+@retry(retry_timeout=16)
def verify_bgp_attributes(
tgen,
addr_type,
return True
-@retry(attempts=4, wait=2, return_is_str=True)
+@retry(retry_timeout=8)
def verify_best_path_as_per_bgp_attribute(
tgen, addr_type, router, input_dict, attribute, expected=True
):
return True
-@retry(attempts=5, wait=2, return_is_str=True)
+@retry(retry_timeout=10)
def verify_best_path_as_per_admin_distance(
tgen, addr_type, router, input_dict, attribute, expected=True
):
return True
-@retry(attempts=5, wait=2, return_is_str=True, initial_wait=2)
+@retry(retry_timeout=10, initial_wait=2)
def verify_bgp_rib(
tgen, addr_type, dut, input_dict, next_hop=None, aspath=None, multi_nh=None, expected=True
):
return True
-@retry(attempts=5, wait=2, return_is_str=True)
+@retry(retry_timeout=10)
def verify_graceful_restart(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
"""
This API is to verify verify_graceful_restart configuration of DUT and
return True
-@retry(attempts=5, wait=2, return_is_str=True)
+@retry(retry_timeout=10)
def verify_r_bit(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
"""
This API is to verify r_bit in the BGP gr capability advertised
return True
-@retry(attempts=5, wait=2, return_is_str=True)
+@retry(retry_timeout=10)
def verify_eor(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
"""
This API is to verify EOR
return True
-@retry(attempts=4, wait=2, return_is_str=True)
+@retry(retry_timeout=8)
def verify_f_bit(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
"""
This API is to verify f_bit in the BGP gr capability advertised
return True
-@retry(attempts=5, wait=2, return_is_str=True)
+@retry(retry_timeout=10)
def verify_graceful_restart_timers(tgen, topo, addr_type, input_dict, dut, peer):
"""
This API is to verify graceful restart timers, configured and recieved
return True
-@retry(attempts=4, wait=2, return_is_str=True)
+@retry(retry_timeout=8)
def verify_gr_address_family(tgen, topo, addr_type, addr_family, dut, expected=True):
"""
This API is to verify gr_address_family in the BGP gr capability advertised
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
-@retry(attempts=6, wait=2, return_is_str=True)
+@retry(retry_timeout=12)
def verify_attributes_for_evpn_routes(
tgen,
topo,
return False
-@retry(attempts=5, wait=2, return_is_str=True)
+@retry(retry_timeout=10)
def verify_evpn_routes(
tgen, topo, dut, input_dict, routeType=5, EthTag=0, next_hop=None, expected=True
):
#
from collections import OrderedDict
-from datetime import datetime
+from datetime import datetime, timedelta
from time import sleep
from copy import deepcopy
from subprocess import call
import ipaddress
import platform
-if sys.version_info[0] > 2:
- import io
- import configparser
-else:
- import StringIO
+try:
+ # Imports from python2
+ from StringIO import StringIO
import ConfigParser as configparser
+except ImportError:
+ # Imports from python3
+ from io import StringIO
+ import configparser
from lib.topolog import logger, logger_config
from lib.topogen import TopoRouter, get_topogen
],
}
+def is_string(value):
+ try:
+ return isinstance(value, basestring)
+ except NameError:
+ return isinstance(value, str)
+
if config.has_option("topogen", "verbosity"):
loglevel = config.get("topogen", "verbosity")
loglevel = loglevel.upper()
return True
-def getStrIO():
- """
- Return a StringIO object appropriate for the current python version.
- """
- if sys.version_info[0] > 2:
- return io.StringIO()
- else:
- return StringIO.StringIO()
-
-
def reset_config_on_routers(tgen, routerName=None):
"""
Resets configuration on routers to the snapshot created using input JSON
raise InvalidCLIError("Unknown error in %s", output)
f = open(dname, "r")
- delta = getStrIO()
+ delta = StringIO()
delta.write("configure terminal\n")
t_delta = f.read()
output = router.vtysh_multicmd(delta.getvalue(), pretty_output=False)
delta.close()
- delta = getStrIO()
+ delta = StringIO()
cfg = router.run("vtysh -c 'show running'")
for line in cfg.split("\n"):
line = line.strip()
tgen = get_topogen()
router_list = tgen.routers()
- test_name = sys._getframe(2).f_code.co_name
+ test_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
+
TMPDIR = os.path.join(LOGDIR, tgen.modname)
+ bundle_procs = {}
for rname, rnode in router_list.items():
- logger.info("Generating support bundle for {}".format(rname))
+ logger.info("Spawn collection of support bundle for %s", rname)
rnode.run("mkdir -p /var/log/frr")
+ bundle_procs[rname] = tgen.net[rname].popen(
+ "/usr/lib/frr/generate_support_bundle.py",
+ stdin=None,
+ stdout=SUB_PIPE,
+ stderr=SUB_PIPE,
+ )
- # Support only python3 going forward
- bundle_log = rnode.run("env python3 /usr/lib/frr/generate_support_bundle.py")
-
- logger.info(bundle_log)
-
+ for rname, rnode in router_list.items():
dst_bundle = "{}/{}/support_bundles/{}".format(TMPDIR, rname, test_name)
src_bundle = "/var/log/frr"
+
+ output, error = bundle_procs[rname].communicate()
+
+ logger.info("Saving support bundle for %s", rname)
+ if output:
+ logger.info(
+ "Output from collecting support bundle for %s:\n%s", rname, output
+ )
+ if error:
+ logger.warning(
+ "Error from collecting support bundle for %s:\n%s", rname, error
+ )
rnode.run("rm -rf {}".format(dst_bundle))
rnode.run("mkdir -p {}".format(dst_bundle))
rnode.run("mv -f {}/* {}".format(src_bundle, dst_bundle))
return True
-def retry(attempts=3, wait=2, return_is_str=True, initial_wait=0, return_is_dict=False):
+def retry(retry_timeout, initial_wait=0, expected=True, diag_pct=0.75):
"""
- Retries function execution, if return is an errormsg or exception
-
- * `attempts`: Number of attempts to make
- * `wait`: Number of seconds to wait between each attempt
- * `return_is_str`: Return val is an errormsg in case of failure
- * `initial_wait`: Sleeps for this much seconds before executing function
-
+ Fixture: Retries function while it's return value is an errormsg (str), False, or it raises an exception.
+
+ * `retry_timeout`: Retry for at least this many seconds; after waiting initial_wait seconds
+ * `initial_wait`: Sleeps for this many seconds before first executing function
+ * `expected`: if False then the return logic is inverted, except for exceptions,
+ (i.e., a False or errmsg (str) function return ends the retry loop,
+ and returns that False or str value)
+ * `diag_pct`: Percentage of `retry_timeout` to keep testing after negative result would have
+ been returned in order to see if a positive result comes after. This is an
+ important diagnostic tool, and normally should not be disabled. Calls to wrapped
+ functions though, can override the `diag_pct` value to make it larger in case more
+ diagnostic retrying is appropriate.
"""
def _retry(func):
@wraps(func)
def func_retry(*args, **kwargs):
- _wait = kwargs.pop("wait", wait)
- _attempts = kwargs.pop("attempts", attempts)
- _attempts = int(_attempts)
- if _attempts < 0:
- raise ValueError("attempts must be 0 or greater")
+ # We will continue to retry diag_pct of the timeout value to see if test would have passed with a
+ # longer retry timeout value.
+ saved_failure = None
+
+ retry_sleep = 2
+
+ # Allow the wrapped function's args to override the fixtures
+ _retry_timeout = kwargs.pop("retry_timeout", retry_timeout)
+ _expected = kwargs.pop("expected", expected)
+ _initial_wait = kwargs.pop("initial_wait", initial_wait)
+ _diag_pct = kwargs.pop("diag_pct", diag_pct)
+
+ start_time = datetime.now()
+ retry_until = datetime.now() + timedelta(seconds=_retry_timeout + _initial_wait)
if initial_wait > 0:
logger.info("Waiting for [%s]s as initial delay", initial_wait)
sleep(initial_wait)
- _return_is_str = kwargs.pop("return_is_str", return_is_str)
- _return_is_dict = kwargs.pop("return_is_str", return_is_dict)
- _expected = kwargs.setdefault("expected", True)
- kwargs.pop("expected")
- for i in range(1, _attempts + 1):
+ invert_logic = not _expected
+ while True:
+ seconds_left = (retry_until - datetime.now()).total_seconds()
try:
ret = func(*args, **kwargs)
logger.debug("Function returned %s", ret)
- if _return_is_str and isinstance(ret, bool) and _expected:
- return ret
- if (
- isinstance(ret, str) or isinstance(ret, unicode)
- ) and _expected is False:
- return ret
- if _return_is_dict and isinstance(ret, dict):
- return ret
-
- if _attempts == i:
- generate_support_bundle()
- return ret
- except Exception as err:
- if _attempts == i:
- generate_support_bundle()
- logger.info("Max number of attempts (%r) reached", _attempts)
- raise
- else:
- logger.info("Function returned %s", err)
- if i < _attempts:
- logger.info("Retry [#%r] after sleeping for %ss" % (i, _wait))
- sleep(_wait)
+
+ negative_result = ret is False or is_string(ret)
+ if negative_result == invert_logic:
+ # Simple case, successful result in time
+ if not saved_failure:
+ return ret
+
+ # Positive result, but happened after timeout failure, very important to
+ # note for fixing tests.
+ logger.warning("RETRY DIAGNOSTIC: SUCCEED after FAILED with requested timeout of %.1fs; however, succeeded in %.1fs, investigate timeout timing",
+ _retry_timeout, (datetime.now() - start_time).total_seconds())
+ if isinstance(saved_failure, Exception):
+ raise saved_failure # pylint: disable=E0702
+ return saved_failure
+
+ except Exception as error:
+ logger.info("Function raised exception: %s", str(error))
+ ret = error
+
+ if seconds_left < 0 and saved_failure:
+ logger.info("RETRY DIAGNOSTIC: Retry timeout reached, still failing")
+ if isinstance(saved_failure, Exception):
+ raise saved_failure # pylint: disable=E0702
+ return saved_failure
+
+ if seconds_left < 0:
+ logger.info("Retry timeout of %ds reached", _retry_timeout)
+
+ saved_failure = ret
+ retry_extra_delta = timedelta(seconds=seconds_left + _retry_timeout * _diag_pct)
+ retry_until = datetime.now() + retry_extra_delta
+ seconds_left = retry_extra_delta.total_seconds()
+
+ # Generate bundle after setting remaining diagnostic retry time
+ generate_support_bundle()
+
+ # If user has disabled diagnostic retries return now
+ if not _diag_pct:
+ if isinstance(saved_failure, Exception):
+ raise saved_failure
+ return saved_failure
+
+ if saved_failure:
+ logger.info("RETRY DIAG: [failure] Sleeping %ds until next retry with %.1f retry time left - too see if timeout was too short",
+ retry_sleep, seconds_left)
+ else:
+ logger.info("Sleeping %ds until next retry with %.1f retry time left",
+ retry_sleep, seconds_left)
+ sleep(retry_sleep)
func_retry._original = func
return func_retry
#############################################
# Verification APIs
#############################################
-@retry(attempts=6, wait=2, return_is_str=True)
+@retry(retry_timeout=12)
def verify_rib(
tgen,
addr_type,
return True
-@retry(attempts=6, wait=2, return_is_str=True)
+@retry(retry_timeout=12)
def verify_fib_routes(tgen, addr_type, dut, input_dict, next_hop=None):
"""
Data will be read from input_dict or input JSON file, API will generate
return True
-@retry(attempts=3, wait=4, return_is_str=True)
+@retry(retry_timeout=12)
def verify_route_maps(tgen, input_dict):
"""
Running "show route-map" command and verifying given route-map
return True
-@retry(attempts=4, wait=4, return_is_str=True)
+@retry(retry_timeout=16)
def verify_bgp_community(tgen, addr_type, router, network, input_dict=None):
"""
API to veiryf BGP large community is attached in route for any given
return True
-@retry(attempts=3, wait=4, return_is_str=True)
+@retry(retry_timeout=12)
def verify_evpn_vni(tgen, input_dict):
"""
API to verify evpn vni details using "show evpn vni detail json"
return False
-@retry(attempts=3, wait=4, return_is_str=True)
+@retry(retry_timeout=12)
def verify_vrf_vni(tgen, input_dict):
"""
API to verify vrf vni details using "show vrf vni json"
################################
# Verification procs
################################
-@retry(attempts=40, wait=2, return_is_str=True)
+@retry(retry_timeout=80)
def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False, expected=True):
"""
This API is to verify ospf neighborship by running
################################
# Verification procs
################################
-@retry(attempts=10, wait=2, return_is_str=True)
+@retry(retry_timeout=20)
def verify_ospf6_neighbor(tgen, topo, dut=None, input_dict=None, lan=False):
"""
This API is to verify ospf neighborship by running
return result
-@retry(attempts=21, wait=2, return_is_str=True)
+@retry(retry_timeout=40)
def verify_ospf_rib(
tgen, dut, input_dict, next_hop=None, tag=None, metric=None, fib=None, expected=True
):
return result
-@retry(attempts=10, wait=2, return_is_str=True)
+@retry(retry_timeout=20)
def verify_ospf_interface(tgen, topo, dut=None, lan=False, input_dict=None, expected=True):
"""
This API is to verify ospf routes by running
return result
-@retry(attempts=11, wait=2, return_is_str=True)
+@retry(retry_timeout=20)
def verify_ospf_database(tgen, topo, dut, input_dict, expected=True):
"""
This API is to verify ospf lsa's by running
return result
-@retry(attempts=10, wait=2, return_is_str=True)
+@retry(retry_timeout=20)
def verify_ospf_summary(tgen, topo, dut, input_dict, expected=True):
"""
This API is to verify ospf routes by running
-@retry(attempts=10, wait=3, return_is_str=True)
+@retry(retry_timeout=30)
def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
tag=None, metric=None, fib=None):
"""
return result
-@retry(attempts=3, wait=2, return_is_str=True)
+@retry(retry_timeout=6)
def verify_ospf6_interface(tgen, topo, dut=None,lan=False, input_dict=None):
"""
This API is to verify ospf routes by running
return result
-@retry(attempts=11, wait=2, return_is_str=True)
+@retry(retry_timeout=20)
def verify_ospf6_database(tgen, topo, dut, input_dict):
"""
This API is to verify ospf lsa's by running
config_data = []
for lnk in input_dict[router]['links'].keys():
if "ospf6" not in input_dict[router]['links'][lnk]:
- logger.debug("Router %s: ospf6 configs is not present in"
- "input_dict, passed input_dict", router,
- input_dict)
+ logger.debug("Router %s: ospf6 config is not present in"
+ "input_dict, passed input_dict %s", router,
+ str(input_dict))
continue
ospf_data = input_dict[router]['links'][lnk]['ospf6']
data_ospf_area = ospf_data.setdefault("area", None)
#############################################
# Verification APIs
#############################################
-@retry(attempts=6, wait=2, return_is_str=True)
+@retry(retry_timeout=12)
def verify_pim_neighbors(tgen, topo, dut=None, iface=None, nbr_ip=None, expected=True):
"""
Verify all PIM neighbors are up and running, config is verified
return True
-@retry(attempts=21, wait=2, return_is_str=True)
+@retry(retry_timeout=40)
def verify_igmp_groups(tgen, dut, interface, group_addresses, expected=True):
"""
Verify IGMP groups are received from an intended interface
return True
-@retry(attempts=31, wait=2, return_is_str=True)
+@retry(retry_timeout=60)
def verify_upstream_iif(
tgen, dut, iif, src_address, group_addresses, joinState=None, refCount=1, expected=True
):
return True
-@retry(attempts=6, wait=2, return_is_str=True)
+@retry(retry_timeout=12)
def verify_join_state_and_timer(tgen, dut, iif, src_address, group_addresses, expected=True):
"""
Verify join state is updated correctly and join timer is
return True
-@retry(attempts=41, wait=2, return_is_dict=True)
+@retry(retry_timeout=80)
def verify_ip_mroutes(
tgen, dut, src_address, group_addresses, iif, oil, return_uptime=False, mwait=0, expected=True
):
return True if return_uptime == False else uptime_dict
-@retry(attempts=31, wait=2, return_is_str=True)
+@retry(retry_timeout=60)
def verify_pim_rp_info(
tgen, topo, dut, group_addresses, oif=None, rp=None, source=None, iamrp=None, expected=True
):
return True
-@retry(attempts=31, wait=2, return_is_str=True)
+@retry(retry_timeout=60)
def verify_pim_state(
tgen, dut, iif, oil, group_addresses, src_address=None, installed_fl=None, expected=True
):
return output_dict
-@retry(attempts=21, wait=2, return_is_str=True)
+@retry(retry_timeout=40)
def verify_pim_interface(tgen, topo, dut, interface=None, interface_ip=None, expected=True):
"""
Verify all PIM interface are up and running, config is verified
return True
-@retry(attempts=10, wait=2, return_is_str=True)
+@retry(retry_timeout=20)
def clear_ip_mroute_verify(tgen, dut, expected=True):
"""
Clear ip mroute by running "clear ip mroute" cli and verify
return rp_details
-@retry(attempts=6, wait=2, return_is_str=True)
+@retry(retry_timeout=12)
def verify_pim_grp_rp_source(tgen, topo, dut, grp_addr, rp_source, rpadd=None, expected=True):
"""
Verify pim rp info by running "show ip pim rp-info" cli
return errormsg
-@retry(attempts=31, wait=2, return_is_str=True)
+@retry(retry_timeout=60)
def verify_pim_bsr(tgen, topo, dut, bsr_ip, expected=True):
"""
Verify all PIM interface are up and running, config is verified
return True
-@retry(attempts=31, wait=2, return_is_str=True)
+@retry(retry_timeout=60)
def verify_ip_pim_upstream_rpf(tgen, topo, dut, interface, group_addresses, rp=None, expected=True):
"""
Verify IP PIM upstream rpf, config is verified
return result
-@retry(attempts=31, wait=2, return_is_str=True)
+@retry(retry_timeout=60)
def verify_ip_pim_join(tgen, topo, dut, interface, group_addresses, src_address=None, expected=True):
"""
Verify ip pim join by running "show ip pim join" cli
return True
-@retry(attempts=31, wait=2, return_is_dict=True)
+@retry(retry_timeout=60)
def verify_igmp_config(tgen, input_dict, stats_return=False, expected=True):
"""
Verify igmp interface details, verifying following configs:
return True if stats_return == False else igmp_stats
-@retry(attempts=31, wait=2, return_is_str=True)
+@retry(retry_timeout=60)
def verify_pim_config(tgen, input_dict, expected=True):
"""
Verify pim interface details, verifying following configs:
return True
-@retry(attempts=21, wait=2, return_is_dict=True)
+@retry(retry_timeout=40)
def verify_multicast_traffic(tgen, input_dict, return_traffic=False, expected=True):
"""
Verify multicast traffic by running
return refCount
-@retry(attempts=21, wait=2, return_is_str=True)
+@retry(retry_timeout=40)
def verify_multicast_flag_state(tgen, dut, src_address, group_addresses, flag, expected=True):
"""
Verify flag state for mroutes and make sure (*, G)/(S, G) are having
return True
-@retry(attempts=21, wait=2, return_is_str=True)
+@retry(retry_timeout=40)
def verify_igmp_interface(tgen, topo, dut, igmp_iface, interface_ip, expected=True):
"""
Verify all IGMP interface are up and running, config is verified
ip igmp
!
ip pim rp 10.254.254.1
+ip msdp timers 10 20 3
ip msdp mesh-group mg-1 source 10.254.254.1
ip msdp mesh-group mg-1 member 10.254.254.2
ip msdp mesh-group mg-1 member 10.254.254.3
ip pim
!
ip pim rp 10.254.254.2
+ip msdp timers 10 20 3
ip msdp mesh-group mg-1 source 10.254.254.2
ip msdp mesh-group mg-1 member 10.254.254.1
ip msdp mesh-group mg-1 member 10.254.254.3
ip igmp
!
ip pim rp 10.254.254.3
+ip msdp timers 10 20 3
ip msdp mesh-group mg-1 source 10.254.254.3
ip msdp mesh-group mg-1 member 10.254.254.1
ip msdp mesh-group mg-1 member 10.254.254.2
"show ip msdp peer json",
{peer: {"state": "established", "saCount": sa_count}}
)
- _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ _, result = topotest.run_and_expect(test_func, None, count=40, wait=2)
assertmsg = '"{}" MSDP connection failure'.format(router)
assert result is None, assertmsg
--- /dev/null
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 192.168.0.2 remote-as 65002
+ neighbor 192.168.1.2 remote-as 65003
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
--- /dev/null
+debug pim
+debug pim zebra
+!
+interface lo
+ ip pim
+ ip pim use-source 10.254.254.1
+!
+interface r1-eth0
+ ip pim
+!
+interface r1-eth1
+ ip pim
+!
+interface r1-eth2
+ ip pim
+ ip igmp
+!
+ip msdp timers 10 20 3
+ip msdp peer 192.168.0.2 source 192.168.0.1
+ip msdp peer 192.168.1.2 source 192.168.1.1
+ip pim rp 10.254.254.1
--- /dev/null
+ip forwarding
+!
+interface r1-eth0
+ ip address 192.168.0.1/24
+!
+interface r1-eth1
+ ip address 192.168.1.1/24
+!
+interface r1-eth2
+ ip address 192.168.10.1/24
+!
+interface lo
+ ip address 10.254.254.1/32
+!
--- /dev/null
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.0.1 remote-as 65001
+ neighbor 192.168.2.2 remote-as 65004
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
--- /dev/null
+debug pim
+debug pim zebra
+!
+interface lo
+ ip pim
+ ip pim use-source 10.254.254.2
+!
+interface r2-eth0
+ ip pim
+!
+interface r2-eth1
+ ip pim
+!
+ip msdp timers 10 20 3
+ip msdp peer 192.168.0.1 source 192.168.0.2
+ip msdp peer 192.168.2.2 source 192.168.2.1
+ip pim rp 10.254.254.2
--- /dev/null
+ip forwarding
+!
+interface r2-eth0
+ ip address 192.168.0.2/24
+!
+interface r2-eth1
+ ip address 192.168.2.1/24
+!
+interface lo
+ ip address 10.254.254.2/32
+!
--- /dev/null
+router bgp 65003
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as 65001
+ neighbor 192.168.3.2 remote-as 65004
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
--- /dev/null
+debug pim
+debug pim zebra
+!
+interface lo
+ ip pim
+ ip pim use-source 10.254.254.3
+!
+interface r3-eth0
+ ip pim
+!
+interface r3-eth1
+ ip pim
+!
+ip msdp timers 10 20 3
+ip msdp peer 192.168.1.1 source 192.168.1.2
+ip msdp peer 192.168.3.2 source 192.168.3.1
+ip pim rp 10.254.254.3
--- /dev/null
+ip forwarding
+!
+interface r3-eth0
+ ip address 192.168.1.2/24
+!
+interface r3-eth1
+ ip address 192.168.3.1/24
+!
+interface lo
+ ip address 10.254.254.3/32
+!
--- /dev/null
+router bgp 65004
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.1 remote-as 65002
+ neighbor 192.168.3.1 remote-as 65003
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
+
--- /dev/null
+debug pim
+debug pim zebra
+!
+interface lo
+ ip pim
+ ip pim use-source 10.254.254.4
+!
+interface r4-eth0
+ ip pim
+!
+interface r4-eth1
+ ip pim
+!
+interface r4-eth2
+ ip pim
+ ip igmp
+!
+ip msdp timers 10 20 3
+ip msdp peer 192.168.2.1 source 192.168.2.2
+ip msdp peer 192.168.3.1 source 192.168.3.2
+ip pim rp 10.254.254.4
--- /dev/null
+ip forwarding
+!
+interface r4-eth0
+ ip address 192.168.2.2/24
+!
+interface r4-eth1
+ ip address 192.168.3.2/24
+!
+interface r4-eth2
+ ip address 192.168.4.1/24
+!
+interface lo
+ ip address 10.254.254.4/32
+!
--- /dev/null
+#!/usr/bin/env python
+
+#
+# test_msdp_topo1.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2021 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_msdp_topo1.py: Test the FRR PIM MSDP peer.
+"""
+
+import os
+import sys
+import json
+import socket
+import tempfile
+from functools import partial
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.pimd]
+
+#
+# Test global variables:
+# They are used to handle communicating with external application.
+#
+APP_SOCK_PATH = '/tmp/topotests/apps.sock'
+HELPER_APP_PATH = os.path.join(CWD, "../lib/mcast-tester.py")
+app_listener = None
+app_clients = {}
+
+
+def listen_to_applications():
+ "Start listening socket to connect with applications."
+ # Remove old socket.
+ try:
+ os.unlink(APP_SOCK_PATH)
+ except OSError:
+ pass
+
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
+ sock.bind(APP_SOCK_PATH)
+ sock.listen(10)
+ global app_listener
+ app_listener = sock
+
+
+def accept_host(host):
+ "Accept connection from application running in hosts."
+ global app_listener, app_clients
+ conn = app_listener.accept()
+ app_clients[host] = {
+ 'fd': conn[0],
+ 'address': conn[1]
+ }
+
+
+def close_applications():
+ "Signal applications to stop and close all sockets."
+ global app_listener, app_clients
+
+ # Close listening socket.
+ app_listener.close()
+
+ # Remove old socket.
+ try:
+ os.unlink(APP_SOCK_PATH)
+ except OSError:
+ pass
+
+ # Close all host connections.
+ for host in ["h1", "h2"]:
+ if app_clients.get(host) is None:
+ continue
+ app_clients[host]["fd"].close()
+
+
+class MSDPTopo1(Topo):
+ "Test topology builder"
+
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # Create 4 routers
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r4"])
+
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
+
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r4"])
+
+ # Create a host connected and direct at r4:
+ tgen.add_host("h1", "192.168.4.100/24", "192.168.4.1")
+ switch.add_link(tgen.gears["h1"])
+
+ # Create a host connected and direct at r1:
+ switch = tgen.add_switch("s6")
+ tgen.add_host("h2", "192.168.10.100/24", "192.168.10.1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["h2"])
+
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(MSDPTopo1, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ for rname, router in router_list.items():
+ daemon_file = "{}/{}/zebra.conf".format(CWD, rname)
+ if os.path.isfile(daemon_file):
+ router.load_config(TopoRouter.RD_ZEBRA, daemon_file)
+
+ daemon_file = "{}/{}/bgpd.conf".format(CWD, rname)
+ if os.path.isfile(daemon_file):
+ router.load_config(TopoRouter.RD_BGP, daemon_file)
+
+ daemon_file = "{}/{}/pimd.conf".format(CWD, rname)
+ if os.path.isfile(daemon_file):
+ router.load_config(TopoRouter.RD_PIM, daemon_file)
+
+ # Initialize all routers.
+ tgen.start_router()
+
+ # Start applications socket.
+ listen_to_applications()
+
+
+def teardown_module(mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+ close_applications()
+ tgen.stop_topology()
+
+
+def test_bgp_convergence():
+ "Wait for BGP protocol convergence"
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("waiting for protocols to converge")
+
+ def expect_loopback_route(router, iptype, route, proto):
+ "Wait until route is present on RIB for protocol."
+ logger.info("waiting route {} in {}".format(route, router))
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[router],
+ "show {} route json".format(iptype),
+ {route: [{"protocol": proto}]},
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=130, wait=1)
+ assertmsg = '"{}" convergence failure'.format(router)
+ assert result is None, assertmsg
+
+ # Wait for R1
+ expect_loopback_route("r1", "ip", "10.254.254.2/32", "bgp")
+ expect_loopback_route("r1", "ip", "10.254.254.3/32", "bgp")
+ expect_loopback_route("r1", "ip", "10.254.254.4/32", "bgp")
+
+ # Wait for R2
+ expect_loopback_route("r2", "ip", "10.254.254.1/32", "bgp")
+ expect_loopback_route("r2", "ip", "10.254.254.3/32", "bgp")
+ expect_loopback_route("r2", "ip", "10.254.254.4/32", "bgp")
+
+ # Wait for R3
+ expect_loopback_route("r3", "ip", "10.254.254.1/32", "bgp")
+ expect_loopback_route("r3", "ip", "10.254.254.2/32", "bgp")
+ expect_loopback_route("r3", "ip", "10.254.254.4/32", "bgp")
+
+ # Wait for R4
+ expect_loopback_route("r4", "ip", "10.254.254.1/32", "bgp")
+ expect_loopback_route("r4", "ip", "10.254.254.2/32", "bgp")
+ expect_loopback_route("r4", "ip", "10.254.254.3/32", "bgp")
+
+
+def test_mroute_install():
+ "Test that multicast routes propagated and installed"
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ tgen.gears["h1"].run("{} '{}' '{}' '{}' &".format(
+ HELPER_APP_PATH, APP_SOCK_PATH, '229.1.2.3', 'h1-eth0'))
+ accept_host("h1")
+
+ tgen.gears["h2"].run("{} --send='0.7' '{}' '{}' '{}' &".format(
+ HELPER_APP_PATH, APP_SOCK_PATH, '229.1.2.3', 'h2-eth0'))
+ accept_host("h2")
+
+ #
+ # Test R1 mroute
+ #
+ expect_1 = {
+ '229.1.2.3': {
+ '192.168.10.100': {
+ 'iif': 'r1-eth2',
+ 'flags': 'SFT',
+ 'oil': {
+ 'r1-eth0': {
+ 'source': '192.168.10.100',
+ 'group': '229.1.2.3'
+ },
+ 'r1-eth1': None
+ }
+ }
+ }
+ }
+ # Create a deep copy of `expect_1`.
+ expect_2 = json.loads(json.dumps(expect_1))
+ # The route will be either via R2 or R3.
+ expect_2['229.1.2.3']['192.168.10.100']['oil']['r1-eth0'] = None
+ expect_2['229.1.2.3']['192.168.10.100']['oil']['r1-eth1'] = {
+ 'source': '192.168.10.100',
+ 'group': '229.1.2.3'
+ }
+
+ def test_r1_mroute():
+ "Test r1 multicast routing table function"
+ out = tgen.gears['r1'].vtysh_cmd('show ip mroute json', isjson=True)
+ if topotest.json_cmp(out, expect_1) is None:
+ return None
+ return topotest.json_cmp(out, expect_2)
+
+ logger.info('Waiting for R1 multicast routes')
+ _, val = topotest.run_and_expect(test_r1_mroute, None, count=55, wait=2)
+ assert val is None, 'multicast route convergence failure'
+
+ #
+ # Test routers 2 and 3.
+ #
+ # NOTE: only one of the paths will get the multicast route.
+ #
+ expect_r2 = {
+ "229.1.2.3": {
+ "192.168.10.100": {
+ "iif": "r2-eth0",
+ "flags": "S",
+ "oil": {
+ "r2-eth1": {
+ "source": "192.168.10.100",
+ "group": "229.1.2.3",
+ }
+ }
+ }
+ }
+ }
+ expect_r3 = {
+ "229.1.2.3": {
+ "192.168.10.100": {
+ "iif": "r3-eth0",
+ "flags": "S",
+ "oil": {
+ "r3-eth1": {
+ "source": "192.168.10.100",
+ "group": "229.1.2.3",
+ }
+ }
+ }
+ }
+ }
+
+ def test_r2_r3_mroute():
+ "Test r2/r3 multicast routing table function"
+ r2_out = tgen.gears['r2'].vtysh_cmd('show ip mroute json', isjson=True)
+ r3_out = tgen.gears['r3'].vtysh_cmd('show ip mroute json', isjson=True)
+
+ if topotest.json_cmp(r2_out, expect_r2) is not None:
+ return topotest.json_cmp(r3_out, expect_r3)
+
+ return topotest.json_cmp(r2_out, expect_r2)
+
+ logger.info('Waiting for R2 and R3 multicast routes')
+ _, val = topotest.run_and_expect(test_r2_r3_mroute, None, count=55, wait=2)
+ assert val is None, 'multicast route convergence failure'
+
+ #
+ # Test router 4
+ #
+ expect_4 = {
+ "229.1.2.3": {
+ "*": {
+ "iif": "lo",
+ "flags": "SC",
+ "oil": {
+ "pimreg": {
+ "source": "*",
+ "group": "229.1.2.3",
+ "inboundInterface": "lo",
+ "outboundInterface": "pimreg"
+ },
+ "r4-eth2": {
+ "source": "*",
+ "group": "229.1.2.3",
+ "inboundInterface": "lo",
+ "outboundInterface": "r4-eth2"
+ }
+ }
+ },
+ "192.168.10.100": {
+ "iif": "r4-eth0",
+ "flags": "ST",
+ "oil": {
+ "r4-eth2": {
+ "source": "192.168.10.100",
+ "group": "229.1.2.3",
+ "inboundInterface": "r4-eth0",
+ "outboundInterface": "r4-eth2",
+ }
+ }
+ }
+ }
+ }
+
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears['r4'], "show ip mroute json", expect_4,
+ )
+ logger.info('Waiting for R4 multicast routes')
+ _, val = topotest.run_and_expect(test_func, None, count=55, wait=2)
+ assert val is None, 'multicast route convergence failure'
+
+
+def test_msdp():
+ """
+ Test MSDP convergence.
+
+ MSDP non meshed groups must propagate the whole SA database (not just
+ their own) to all peers because not all peers talk with each other.
+
+ This setup leads to a potential loop that can be prevented by checking
+ the route's first AS in AS path: it must match the remote eBGP AS number.
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1_expect = {
+ "192.168.0.2": {
+ "peer": "192.168.0.2",
+ "local": "192.168.0.1",
+ "state": "established"
+ },
+ "192.168.1.2": {
+ "peer": "192.168.1.2",
+ "local": "192.168.1.1",
+ "state": "established"
+ }
+ }
+ r1_sa_expect = {
+ "229.1.2.3": {
+ "192.168.10.100": {
+ "source": "192.168.10.100",
+ "group": "229.1.2.3",
+ "rp": "-",
+ "local": "yes",
+ "sptSetup": "-"
+ }
+ }
+ }
+ r2_expect = {
+ "192.168.0.1": {
+ "peer": "192.168.0.1",
+ "local": "192.168.0.2",
+ "state": "established"
+ },
+ "192.168.2.2": {
+ "peer": "192.168.2.2",
+ "local": "192.168.2.1",
+ "state": "established"
+ }
+ }
+ # Only R2 or R3 will get this SA.
+ r2_r3_sa_expect = {
+ "229.1.2.3": {
+ "192.168.10.100": {
+ "source": "192.168.10.100",
+ "group": "229.1.2.3",
+ "rp": "192.168.1.1",
+ "local": "no",
+ "sptSetup": "no",
+ }
+ }
+ }
+ r3_expect = {
+ "192.168.1.1": {
+ "peer": "192.168.1.1",
+ "local": "192.168.1.2",
+ "state": "established"
+ },
+ "192.168.3.2": {
+ "peer": "192.168.3.2",
+ "local": "192.168.3.1",
+ "state": "established"
+ }
+ }
+ r4_expect = {
+ "192.168.2.1": {
+ "peer": "192.168.2.1",
+ "local": "192.168.2.2",
+ "state": "established"
+ },
+ "192.168.3.1": {
+ "peer": "192.168.3.1",
+ "local": "192.168.3.2",
+ "state": "established"
+ }
+ }
+ r4_sa_expect = {
+ "229.1.2.3": {
+ "192.168.10.100": {
+ "source": "192.168.10.100",
+ "group": "229.1.2.3",
+ "rp": "192.168.1.1",
+ "local": "no",
+ "sptSetup": "yes"
+ }
+ }
+ }
+
+ for router in [('r1', r1_expect, r1_sa_expect),
+ ('r2', r2_expect, r2_r3_sa_expect),
+ ('r3', r3_expect, r2_r3_sa_expect),
+ ('r4', r4_expect, r4_sa_expect)]:
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[router[0]], "show ip msdp peer json", router[1]
+ )
+ logger.info('Waiting for {} msdp peer data'.format(router[0]))
+ _, val = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert val is None, 'multicast route convergence failure'
+
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[router[0]], "show ip msdp sa json", router[2]
+ )
+ logger.info('Waiting for {} msdp SA data'.format(router[0]))
+ _, val = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert val is None, 'multicast route convergence failure'
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
# Verify mroute not installed
step("Verify mroute not installed in l1")
result = verify_ip_mroutes(
- tgen, dut, src_addr, GROUP_ADDRESS, iif, oil, wait=20, expected=False
+ tgen, dut, src_addr, GROUP_ADDRESS, iif, oil, retry_timeout=20, expected=False
)
assert (
result is not True
), "Testcase {} :Failed \n Error : rp expected {} rp received {}".format(
tc_name,
rp_add1,
+ rp2[group] if group in rp2 else None
)
# Verify if that rp is installed
{"dut": "r2", "src_address": source, "iif": "r2-f1-eth0", "oil": "r2-l1-eth2"},
{"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-r2-eth3"},
]
+ # On timeout change from default of 80 to 120: failures logs indicate times 90+
+ # seconds for success on the 2nd entry in the above table. Using 100s here restores
+ # previous 80 retries with 2s wait if we assume .5s per vtysh/show ip mroute runtime
+ # (41 * (2 + .5)) == 102.
for data in input_dict:
result = verify_ip_mroutes(
- tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
+ tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"],
+ retry_timeout=102
)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("r1: Verify show ip igmp group without any IGMP join")
dut = "r1"
interface = "r1-r0-eth0"
- result = verify_igmp_groups(tgen, dut, interface, GROUP_ADDRESS)
+ result = verify_igmp_groups(tgen, dut, interface, GROUP_ADDRESS, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: igmp group present without any IGMP join \n Error: {}".format(
step("r1: Verify RP info")
result = verify_pim_rp_info(
- tgen, TOPO, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+ tgen, TOPO, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE, expected=False
)
assert (
result is not True
)
step("r1: Verify upstream IIF interface")
- result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: upstream IIF interface present \n Error: {}".format(tc_name, result)
)
step("r1: Verify upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: upstream join state is up and join timer is running \n Error: {}".format(
)
)
+ # 20
step("r1: Verify PIM state")
- result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS)
+ result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS, expected=False)
assert result is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, result
)
step("r1: Verify ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert (
result is not True
), "Testcase {} : Failed \n " "r1: mroutes are still present \n Error: {}".format(
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r3: (S, G) upstream join state is up and join timer is running\n Error: {}".format(
"r1 : OIL should be same and IIF should be cleared on R1 verify"
"using show ip pim state"
)
- result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS)
+ result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"OIL is not same and IIF is not cleared on R1 \n Error: {}".format(
)
step("r1: upstream IIF should be unknown , verify using show ip pim" "upstream")
- result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: upstream IIF is not unknown \n Error: {}".format(tc_name, result)
"r1: join state should not be joined and join timer should stop,"
"verify using show ip pim upstream"
)
- result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: join state is joined and timer is not stopped \n Error: {}".format(
assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
step("r1: (*, G) cleared from mroute table using show ip mroute")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: (*, G) are not cleared from mroute table \n Error: {}".format(
rp_address = "1.0.2.17"
iif = "r1-r2-eth1"
result = verify_pim_rp_info(
- tgen, TOPO, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE
+ tgen, TOPO, dut, GROUP_RANGE_ALL, iif, rp_address, SOURCE, expected=False
)
assert (
result is not True
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify upstream IIF interface")
- result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: upstream IFF interface is present \n Error: {}".format(tc_name, result)
step("r1: Verify upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: upstream join state is joined and timer is running \n Error: {}".format(
)
step("r1: Verify PIM state")
- result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS)
+ result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS, expected=False)
assert (
result is not True
), "Testcase {} : Failed \n " "r1: PIM state is up\n Error: {}".format(
)
step("r1: Verify ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert (
result is not True
), "Testcase {} : Failed \n " "r1: mroutes are still present\n Error: {}".format(
step("r1 : Verify upstream IIF interface")
iif = "r1-r2-eth1"
- result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ result = verify_upstream_iif(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: upstream IIF interface is present\n Error: {}".format(tc_name, result)
)
step("r1 : Verify upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
+ result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: upstream join state is joined and timer is running\n Error: {}".format(
)
step("r1 : Verify PIM state")
- result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS)
+ result = verify_pim_state(tgen, dut, iif, oif, GROUP_ADDRESS, expected=False)
assert (
result is not True
), "Testcase {} : Failed \n " "r1: PIM state is up\n Error: {}".format(
)
step("r1 : Verify ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert (
result is not True
), "Testcase {} : Failed \n " "r1: mroutes are still present\n Error: {}".format(
step("r1 : Verify rp-info for group 225.1.1.1")
iif = "r1-r4-eth3"
- result = verify_pim_rp_info(tgen, TOPO, dut, GROUP_RANGE, oif, rp_address_2, SOURCE)
+ result = verify_pim_rp_info(
+ tgen, TOPO, dut, GROUP_RANGE, oif, rp_address_2, SOURCE, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r1: rp-info is present for group 225.1.1.1 \n Error: {}".format(
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r3: (S, G) upstream join state is joined and join"
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (S, G) upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r2: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS)
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
oil = "r1-r0-eth0"
logger.info("waiting for 10 sec to make sure old mroute time is higher")
sleep(10)
+ # Why do we then wait 60 seconds below before checking the routes?
uptime_before = verify_ip_mroutes(
tgen, dut, STAR, GROUP_ADDRESS, iif, oil, return_uptime=True, mwait=60
)
logger.info("Waiting for 5sec to get PIMd restarted and mroute" " re-learned..")
sleep(5)
+ # Why do we then wait 10 seconds below before checking the routes?
uptime_after = verify_ip_mroutes(
tgen, dut, STAR, GROUP_ADDRESS, iif, oil, return_uptime=True, mwait=10
)
step("r3: Verify (S, G) upstream join state and join timer")
result = verify_join_state_and_timer(
- tgen, dut, iif, SOURCE_ADDRESS, group_address_list
+ tgen, dut, iif, SOURCE_ADDRESS, group_address_list, expected=False
)
assert result is not True, (
"Testcase {} : Failed \n "
step("r2: Verify (S, G) upstream join state and join timer")
result = verify_join_state_and_timer(
- tgen, dut, iif, SOURCE_ADDRESS, group_address_list
+ tgen, dut, iif, SOURCE_ADDRESS, group_address_list, expected=False
)
assert result is not True, (
"Testcase {} : Failed \n "
step("r2: Verify (S, G) upstream join state and join timer")
result = verify_join_state_and_timer(
- tgen, dut, iif, SOURCE_ADDRESS, group_address_list
+ tgen, dut, iif, SOURCE_ADDRESS, group_address_list, expected=False
)
assert result is not True, (
"Testcase {} : Failed \n "
step("r3: Verify (S, G) upstream join state and join timer")
result = verify_join_state_and_timer(
- tgen, dut, iif, SOURCE_ADDRESS, group_address_list
+ tgen, dut, iif, SOURCE_ADDRESS, group_address_list, expected=False
)
assert result is not True, (
"Testcase {} : Failed \n "
step("r2: Verify (S, G) upstream join state and join timer")
result = verify_join_state_and_timer(
- tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False
)
assert result is not True, (
"Testcase {} : Failed \n "
step("r3: Verify (S, G) upstream join state and join timer")
result = verify_join_state_and_timer(
- tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False
)
assert result is not True, (
"Testcase {} : Failed \n "
step("r4: Verify (S, G) upstream join state and join timer")
result = verify_join_state_and_timer(
- tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False
)
assert result is not True, (
"Testcase {} : Failed \n "
step("r3: Verify (S, G) upstream join state and join timer")
result = verify_join_state_and_timer(
- tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False
)
assert result is not True, "Testcase {} :Failed \n Error: {}".format(
tc_name, result
step("r2: Verify (S, G) upstream join state and join timer")
result = verify_join_state_and_timer(
- tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False
)
assert result is not True, (
"Testcase {} : Failed \n "
step("r3: Verify (S, G) upstream join state and join timer")
result = verify_join_state_and_timer(
- tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, expected=False
)
assert result is not True, (
"Testcase {} : Failed \n "
step("r4: Verify (S, G) upstream join state and join timer")
result = verify_join_state_and_timer(
- tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False
)
assert result is not True, (
"Testcase {} : Failed \n "
step("r3: Verify (S, G) upstream join state and join timer")
result = verify_join_state_and_timer(
- tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, expected=False
)
assert result is not True, (
"Testcase {} : Failed \n "
step(
"Verify after shut of R1 to R3 link , verify (*,G) entries got"
- "cleared from all the node R1, R2, R3"
+ " cleared from all the node R1, R2, R3"
)
step("r1: Verify (*, G) ip mroutes")
dut = "r1"
iif = "r1-r3-eth2"
oif = "r1-r0-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: (*,G) mroutes are not cleared after shut of R1 to R3 link\n Error: {}".format(
dut = "r2"
iif = "lo"
oif = "r2-r3-eth1"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r2: (*,G) mroutes are not cleared after shut of R1 to R3 link\n Error: {}".format(
dut = "r3"
iif = "r3-r2-eth1"
oif = "r3-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r3: (*,G) mroutes are not cleared after shut of R1 to R3 link\n Error: {}".format(
dut = "r1"
iif = "r1-r2-eth1"
oif = "r1-r0-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: (*,G) mroutes are not cleared after shut of R1 to R0 link\n Error: {}".format(
dut = "r2"
iif = "lo"
oif = "r2-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r2: (*,G) mroutes are not cleared after shut of R1 to R0 link\n Error: {}".format(
dut = "r1"
iif = "r1-r2-eth1"
oif = "r1-r0-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: (*,G) mroutes are not cleared after shut of R1 to R2 and R3 link\n Error: {}".format(
dut = "r2"
iif = "lo"
oif = "r2-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r2: (*,G) mroutes are not cleared after shut of R1 to R2 and R3 link\n Error: {}".format(
--- /dev/null
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 24
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32
+ },
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "type": "loopback"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv4": "auto"
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ }
+ },
+ "ospf": {
+ "router_id": "100.1.1.0",
+ "area": [
+ {
+ "id": "0.0.0.2",
+ "type": "nssa"
+ }
+ ],
+ "neighbors": {
+ "r1": {},
+ "r3": {}
+ }
+ }
+ },
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3-link0": {
+ "ipv4": "auto",
+ "description": "DummyIntftoR3"
+ }
+ },
+ "ospf": {
+ "router_id": "100.1.1.1",
+ "area": [
+ {
+ "id": "0.0.0.2",
+ "type": "nssa"
+ }
+ ],
+ "neighbors": {
+ "r0": {},
+ "r2": {},
+ "r3": {}
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv4": "auto"
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r3": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ }
+ },
+ "ospf": {
+ "router_id": "100.1.1.2",
+ "area": [
+ {
+ "id": "0.0.0.2",
+ "type": "nssa"
+ }
+ ],
+ "neighbors": {
+ "r1": {},
+ "r3": {}
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "type": "loopback"
+ },
+ "r0": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 4,
+ "network": "point-to-point"
+ }
+ },
+ "r1": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r2": {
+ "ipv4": "auto",
+ "ospf": {
+ "area": "0.0.0.2",
+ "hello_interval": 1,
+ "dead_interval": 4
+ }
+ },
+ "r1-link0": {
+ "ipv4": "auto",
+ "description": "DummyIntftoR1",
+ "ospf": {
+ "area": "0.0.0.3"
+ }
+ }
+ },
+ "ospf": {
+ "router_id": "100.1.1.3",
+ "area": [
+ {
+ "id": "0.0.0.2",
+ "type": "nssa"
+ }
+ ],
+ "neighbors": {
+ "r0": {},
+ "r1": {},
+ "r2": {}
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
write_test_footer(tc_name)
+def test_ospf_type5_summary_tc42_p0(request):
+ """OSPF summarisation functionality."""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ protocol = "ospf"
+
+ step(
+ "Configure 5 static routes from the same network on R0"
+ "5 static routes from different networks and redistribute in R0"
+ )
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv4"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv4"], "next_hop": "blackhole"},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r0"
+ red_static(dut)
+
+ step("Verify that routes are learnt on R1.")
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Configure External Route summary in R0 to summarise 5"
+ " routes to one route. with aggregate timer as 6 sec"
+ )
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "8"}
+ ],
+ "aggr_timer": 6,
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
+ input_dict = {
+ "r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]}
+ }
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step("Delete the configured summary")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "del_aggr_timer": True,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
+ tc_name
+ )
+
+ step("show ip ospf summary should not have any summary address.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
+
+ dut = "r1"
+ step("All 5 routes are advertised after deletion of configured summary.")
+
+ result = verify_ospf_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("configure the summary again and delete static routes .")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "8"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv4"], "next_hop": "blackhole", "delete": True}
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
+ step("Verify that summary route is withdrawn from R1.")
+
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step("Add back static routes.")
+ input_dict_static_rtes = {
+ "r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]}
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary"
+ " address on R0 and only one route is sent to R1."
+ )
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_static_rtes, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show configure summaries.")
+
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Configure new static route which is matching configured summary.")
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [{"network": NETWORK_11["ipv4"], "next_hop": "blackhole"}]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # step("verify that summary lsa is not refreshed.")
+ # show ip ospf database command is not working, waiting for DEV fix.
+
+ step("Delete one of the static route.")
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK_11["ipv4"], "next_hop": "blackhole", "delete": True}
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # step("verify that summary lsa is not refreshed.")
+ # show ip ospf database command is not working, waiting for DEV fix.
+
+ # step("Verify that deleted static route is removed from ospf LSDB.")
+ # show ip ospf database command is not working, waiting for DEV fix.
+
+ step(
+ "Configure redistribute connected and configure ospf external"
+ " summary address to summarise the connected routes."
+ )
+
+ dut = "r0"
+ red_connected(dut)
+ clear_ospf(tgen, dut)
+
+ ip = topo["routers"]["r0"]["links"]["r3"]["ipv4"]
+
+ ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network)
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {"summary-address": [{"prefix": ip_net.split("/")[0], "mask": "8"}]}
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured "
+ "summary address on R0 and only one route is sent to R1."
+ )
+
+ input_dict_summary = {"r0": {"static_routes": [{"network": "10.0.0.0/8"}]}}
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Shut one of the interface")
+ intf = topo["routers"]["r0"]["links"]["r3-link0"]["interface"]
+ shutdown_bringup_interface(tgen, dut, intf, False)
+
+ # step("verify that summary lsa is not refreshed.")
+ # show ip ospf database command is not working, waiting for DEV fix.
+
+ # step("Verify that deleted connected route is removed from ospf LSDB.")
+ # show ip ospf database command is not working, waiting for DEV fix.
+
+ step("Un do shut the interface")
+ shutdown_bringup_interface(tgen, dut, intf, True)
+
+ # step("verify that summary lsa is not refreshed.")
+ # show ip ospf database command is not working, waiting for DEV fix.
+
+ # step("Verify that deleted connected route is removed from ospf LSDB.")
+ # show ip ospf database command is not working, waiting for DEV fix.
+
+ step("Delete OSPF process.")
+ ospf_del = {"r0": {"ospf": {"delete": True}}}
+ result = create_router_ospf(tgen, topo, ospf_del)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+ step("Reconfigure ospf process with summary")
+ reset_config_on_routers(tgen)
+
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv4"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv4"], "next_hop": "blackhole"},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r0"
+ red_static(dut)
+ red_connected(dut)
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "8"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 and only one route is sent to R1."
+ )
+
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
+
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "8"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # step("verify that summary lsa is not refreshed.")
+ # show ip ospf database command is not working, waiting for DEV fix.
+
+ step("Delete the redistribute command in ospf.")
+ dut = "r0"
+ red_connected(dut, config=False)
+ red_static(dut, config=False)
+
+ step("Verify that summary route is withdrawn from the peer.")
+
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "metric": "1234",
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ospf_type5_summary_tc45_p0(request):
+ """OSPF summarisation with Tag option"""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ step("Configure OSPF on all the routers of the topology.")
+ reset_config_on_routers(tgen)
+
+ protocol = "ospf"
+
+ step(
+ "Configure 5 static routes from the same network on R0"
+ "5 static routes from different networks and redistribute in R0"
+ )
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv4"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv4"], "next_hop": "blackhole"},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r0"
+ red_static(dut)
+
+ step("Verify that routes are learnt on R1.")
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Configure External Route summary in R0 to summarise 5" " routes to one route."
+ )
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "tag": "1234",
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary"
+ " address on R0 and only one route is sent to R1 with configured tag."
+ )
+ input_dict_summary = {
+ "r0": {"static_routes": [{"network": SUMMARY["ipv4"][0], "tag": "1234"}]}
+ }
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries with tag.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 1234,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Delete the configured summary")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "tag": "1234",
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
+ tc_name
+ )
+
+ step("show ip ospf summary should not have any summary address.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 1234,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
+
+ step("Configure Min tag value")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "8", "tag": 1}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ input_dict_summary = {
+ "r0": {"static_routes": [{"network": SUMMARY["ipv4"][0], "tag": "1"}]}
+ }
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries with tag.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 1,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Configure Max Tag Value")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "tag": 4294967295,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_summary = {
+ "r0": {"static_routes": [{"network": SUMMARY["ipv4"][0], "tag": "4294967295"}]}
+ }
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Verify that boundary values tags are used for summary route"
+ " using show ip ospf route command."
+ )
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 4294967295,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("configure new static route with different tag.")
+ input_dict_static_rtes_11 = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK_11["ipv4"], "next_hop": "blackhole", "tag": "88888"}
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes_11)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("New tag has not been used by summary address.")
+
+ input_dict_summary = {
+ "r0": {"static_routes": [{"network": SUMMARY["ipv4"][0], "tag": "88888"}]}
+ }
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary, tag="88888", expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen,
+ "ipv4",
+ dut,
+ input_dict_summary,
+ protocol=protocol,
+ tag="88888",
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step(
+ "Verify that boundary values tags are used for summary route"
+ " using show ip ospf route command."
+ )
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 88888,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Delete the configured summary address")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "tag": 4294967295,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that 6 routes are advertised to neighbour with 5 routes"
+ " without any tag, 1 route with tag."
+ )
+
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that summary address is flushed from neighbor.")
+
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step("Configure summary first & then configure matching static route.")
+
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv4"], "next_hop": "blackhole", "delete": True},
+ {"network": NETWORK2["ipv4"], "next_hop": "blackhole", "delete": True},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "8"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv4"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv4"], "next_hop": "blackhole"},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Repeat steps 1 to 10 of summarisation in non Back bone area.")
+ reset_config_on_routers(tgen)
+
+ step("Change the area id on the interface on R0")
+ input_dict = {
+ "r0": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
+ "ospf": {"area": "0.0.0.0"},
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r0": {
+ "links": {
+ "r1": {
+ "interface": topo["routers"]["r0"]["links"]["r1"]["interface"],
+ "ospf": {"area": "0.0.0.1"},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Change the area id on the interface ")
+ input_dict = {
+ "r1": {
+ "links": {
+ "r0": {
+ "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
+ "ospf": {"area": "0.0.0.0"},
+ "delete": True,
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r1": {
+ "links": {
+ "r0": {
+ "interface": topo["routers"]["r1"]["links"]["r0"]["interface"],
+ "ospf": {"area": "0.0.0.1"},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ ospf_covergence = verify_ospf_neighbor(tgen, topo)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step(
+ "Configure 5 static routes from the same network on R0"
+ "5 static routes from different networks and redistribute in R0"
+ )
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv4"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv4"], "next_hop": "blackhole"},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r0"
+ red_static(dut)
+
+ step("Verify that routes are learnt on R1.")
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Configure External Route summary in R0 to summarise 5" " routes to one route."
+ )
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "tag": "1234",
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary"
+ " address on R0 and only one route is sent to R1 with configured tag."
+ )
+ input_dict_summary = {
+ "r0": {"static_routes": [{"network": SUMMARY["ipv4"][0], "tag": "1234"}]}
+ }
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries with tag.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 1234,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Delete the configured summary")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
+ tc_name
+ )
+
+ step("show ip ospf summary should not have any summary address.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 1234,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
+
+ step("Configure Min tag value")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "8", "tag": 1}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ input_dict_summary = {
+ "r0": {"static_routes": [{"network": SUMMARY["ipv4"][0], "tag": "1"}]}
+ }
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries with tag.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 1,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Configure Max Tag Value")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "tag": 4294967295,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_summary = {
+ "r0": {"static_routes": [{"network": SUMMARY["ipv4"][0], "tag": "4294967295"}]}
+ }
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Verify that boundary values tags are used for summary route"
+ " using show ip ospf route command."
+ )
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 4294967295,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("configure new static route with different tag.")
+ input_dict_static_rtes_11 = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK_11["ipv4"], "next_hop": "blackhole", "tag": "88888"}
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes_11)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("New tag has not been used by summary address.")
+
+ input_dict_summary = {
+ "r0": {"static_routes": [{"network": SUMMARY["ipv4"][0], "tag": "88888"}]}
+ }
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary, tag="88888", expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen,
+ "ipv4",
+ dut,
+ input_dict_summary,
+ protocol=protocol,
+ tag="88888",
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step(
+ "Verify that boundary values tags are used for summary route"
+ " using show ip ospf route command."
+ )
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 88888,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Delete the configured summary address")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "tag": 4294967295,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that 6 routes are advertised to neighbour with 5 routes"
+ " without any tag, 1 route with tag."
+ )
+
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that summary address is flushed from neighbor.")
+
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step("Configure summary first & then configure matching static route.")
+
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv4"], "next_hop": "blackhole", "delete": True},
+ {"network": NETWORK2["ipv4"], "next_hop": "blackhole", "delete": True},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "8"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv4"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv4"], "next_hop": "blackhole"},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ospf_type5_summary_tc46_p0(request):
+ """OSPF summarisation with advertise and no advertise option"""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ step("Configure OSPF on all the routers of the topology.")
+ reset_config_on_routers(tgen)
+
+ protocol = "ospf"
+
+ step(
+ "Configure 5 static routes from the same network on R0"
+ "5 static routes from different networks and redistribute in R0"
+ )
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv4"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv4"], "next_hop": "blackhole"},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r0"
+ red_static(dut)
+
+ step("Verify that routes are learnt on R1.")
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Configure External Route summary in R0 to summarise 5"
+ " routes to one route with no advertise option."
+ )
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "advertise": False,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary"
+ " address on R0 and summary route is not advertised to neighbor as"
+ " no advertise is configured.."
+ )
+
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the " "configured summaries.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Delete the configured summary")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Summary has 5 sec delay timer, sleep 5 secs...")
+ sleep(5)
+
+ step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
+ tc_name
+ )
+
+ step("show ip ospf summary should not have any summary address.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 1234,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
+
+ step("Reconfigure summary with no advertise.")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "advertise": False,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary"
+ " address on R0 and summary route is not advertised to neighbor as"
+ " no advertise is configured.."
+ )
+
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the " "configured summaries.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step(
+ "Change summary address from no advertise to advertise "
+ "(summary-address 10.0.0.0 255.255.0.0)"
+ )
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "advertise": False,
+ }
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "8"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
+ input_dict = {
+ "r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]}
+ }
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes is present in RIB".format(tc_name)
+
+ write_test_footer(tc_name)
+
+
+def test_ospf_type5_summary_tc47_p0(request):
+ """OSPF summarisation with route map filtering."""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ protocol = "ospf"
+
+ step(
+ "Configure 5 static routes from the same network on R0"
+ "5 static routes from different networks and redistribute in R0"
+ )
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv4"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv4"], "next_hop": "blackhole"},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r0"
+ red_static(dut)
+
+ step("Verify that routes are learnt on R1.")
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Configure External Route summary in R0 to summarise 5" " routes to one route."
+ )
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "8"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
+ input_dict = {
+ "r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]}
+ }
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step(
+ "configure route map and add rule to permit configured static "
+ "routes, redistribute static & connected routes with the route map."
+ )
+
+ # Create ip prefix list
+ pfx_list = {
+ "r0": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, pfx_list)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv4": [
+ {
+ "action": "permit",
+ "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}},
+ "seq_id": 10,
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_red_r1 = {
+ "r0": {
+ "ospf": {
+ "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv4"}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured"
+ "summary address on R0 and only one route is sent to R1. Verify that "
+ "show ip ospf summary should show the configure summaries."
+ )
+
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Change the rule from permit to deny in configured route map.")
+
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv4": [
+ {
+ "action": "deny",
+ "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}},
+ "seq_id": 10,
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("summary route has 5 secs dealy, sleep 5 secs")
+ sleep(5)
+ step("Verify that advertised summary route is flushed from neighbor.")
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step("Delete the configured route map.")
+
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv4": [
+ {
+ "action": "permit",
+ "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}},
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_red_r1 = {"r0": {"ospf": {"redistribute": [{"redist_type": "static"}]}}}
+ result = create_router_ospf(tgen, topo, ospf_red_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured"
+ "summary address on R0 and only one route is sent to R1. Verify that "
+ "show ip ospf summary should show the configure summaries."
+ )
+
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Reconfigure the route map with denying configure summary address.")
+
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv4": [
+ {
+ "action": "permit",
+ "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}},
+ "seq_id": 10,
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Create ip prefix list
+ pfx_list = {
+ "r0": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": SUMMARY["ipv4"][0], "action": "deny"}
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, pfx_list)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that advertised summary route is not flushed from neighbor.")
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Redistribute static/connected routes without route map.")
+
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv4": [
+ {
+ "action": "permit",
+ "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}},
+ "seq_id": 10,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured"
+ "summary address on R0 and only one route is sent to R1. Verify that "
+ "show ip ospf summary should show the configure summaries."
+ )
+
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step(
+ "Configure rule to deny all the routes in route map and configure"
+ " redistribute command in ospf using route map."
+ )
+
+ # Create ip prefix list
+ pfx_list = {
+ "r0": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "deny"}
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, pfx_list)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv4": [
+ {
+ "action": "permit",
+ "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}},
+ "seq_id": 10,
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_red_r1 = {
+ "r0": {
+ "ospf": {
+ "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv4"}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that no summary route is originated.")
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv4": [
+ {
+ "action": "permit",
+ "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}},
+ "seq_id": 10,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Configure cli in this order - 2 static routes, a route map to "
+ "permit those routes, summary address in ospf to match the "
+ "configured static route network, redistribute the static "
+ "routes with route map"
+ )
+
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [{"network": NETWORK2["ipv4"], "next_hop": "blackhole"}]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ routemaps = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv4": [
+ {
+ "action": "permit",
+ "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}},
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv4"][1].split("/")[0], "mask": "8"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Create ip prefix list
+ pfx_list = {
+ "r0": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, pfx_list)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": "12.0.0.0/8"}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ "12.0.0.0/8": {
+ "Summary address": "12.0.0.0/8",
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Change route map rule for 1 of the routes to deny.")
+ # Create ip prefix list
+ pfx_list = {
+ "r0": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": NETWORK2["ipv4"][0], "action": "deny"},
+ {"seqid": 20, "network": "any", "action": "permit"},
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, pfx_list)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that originated type 5 summary lsa is not refreshed because"
+ "of the route map events."
+ )
+
+ input_dict_summary = {"r0": {"static_routes": [{"network": "12.0.0.0/8"}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("add rule in route map to deny configured summary address.")
+ # Create ip prefix list
+ pfx_list = {
+ "r0": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "12.0.0.0/8", "action": "deny"},
+ {"seqid": 20, "network": "any", "action": "permit"},
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, pfx_list)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that summary route is not denied, summary route should be"
+ " originated if matching prefixes are present."
+ )
+
+ input_dict_summary = {"r0": {"static_routes": [{"network": "12.0.0.0/8"}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ write_test_footer(tc_name)
+
+
+def test_ospf_type5_summary_tc51_p2(request):
+ """OSPF CLI Show.
+
+ verify ospf ASBR summary config and show commands behaviours.
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ step("Configure all the supported OSPF ASBR summary commands on DUT.")
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "tag": 4294967295,
+ },
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "16",
+ "advertise": True,
+ },
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ },
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ },
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure and re configure all the commands 10 times in a loop.")
+
+ for itrate in range(0, 10):
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "tag": 4294967295,
+ },
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "16",
+ "advertise": True,
+ },
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ },
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ },
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "8",
+ "tag": 4294967295,
+ "delete": True,
+ },
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "16",
+ "advertise": True,
+ "delete": True,
+ },
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ "delete": True,
+ },
+ {
+ "prefix": SUMMARY["ipv4"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ "delete": True,
+ },
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify the show commands")
+
+ input_dict = {
+ SUMMARY["ipv4"][2]: {
+ "Summary address": SUMMARY["ipv4"][2],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 0,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ write_test_footer(tc_name)
+
+
+def test_ospf_type5_summary_tc49_p2(request):
+ """OSPF summarisation Chaos."""
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+
+ protocol = "ospf"
+
+ step(
+ "Configure 5 static routes from the same network on R0"
+ "5 static routes from different networks and redistribute in R0"
+ )
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv4"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv4"], "next_hop": "blackhole"},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r0"
+ red_static(dut)
+
+ step("Verify that routes are learnt on R1.")
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Configure External Route summary in R0 to summarise 5" " routes to one route."
+ )
+
+ ospf_summ_r1 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "8"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
+ input_dict = {
+ "r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]}
+ }
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step("Reload the FRR router")
+ # stop/start -> restart FRR router and verify
+ stop_router(tgen, "r0")
+ start_router(tgen, "r0")
+
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
+ input_dict = {
+ "r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]}
+ }
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step("Kill OSPFd daemon on R0.")
+ kill_router_daemons(tgen, "r0", ["ospfd"])
+
+ step("Bring up OSPFd daemon on R0.")
+ start_router_daemons(tgen, "r0", ["ospfd"])
+
+ step("Verify OSPF neighbors are up after bringing back ospfd in R0")
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf_neighbor(tgen, topo)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
+ input_dict = {
+ "r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]}
+ }
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ step("restart zebrad")
+ kill_router_daemons(tgen, "r0", ["zebra"])
+
+ step("Bring up zebra daemon on R0.")
+ start_router_daemons(tgen, "r0", ["zebra"])
+
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][0]}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ SUMMARY["ipv4"][0]: {
+ "Summary address": SUMMARY["ipv4"][0],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
+ input_dict = {
+ "r0": {"static_routes": [{"network": NETWORK["ipv4"], "next_hop": "blackhole"}]}
+ }
+ dut = "r1"
+ result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+ write_test_footer(tc_name)
+
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
--- /dev/null
+#!/usr/bin/python
+
+#
+# Copyright (c) 2020 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""OSPF Summarisation Functionality Automation."""
+import os
+import sys
+import time
+import pytest
+import json
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from mininet.topo import Topo
+from lib.topogen import Topogen, get_topogen
+import ipaddress
+from time import sleep
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ kill_router_daemons,
+ write_test_footer,
+ reset_config_on_routers,
+ stop_router,
+ start_router,
+ verify_rib,
+ create_static_routes,
+ step,
+ start_router_daemons,
+ create_route_maps,
+ shutdown_bringup_interface,
+ topo_daemons,
+ create_prefix_lists,
+ create_route_maps,
+ create_interfaces_cfg,
+)
+from lib.topolog import logger
+from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.ospf import (
+ verify_ospf_neighbor,
+ clear_ospf,
+ verify_ospf_rib,
+ create_router_ospf,
+ verify_ospf_summary,
+)
+
+# Global variables
+topo = None
+# Reading the data from JSON File for topology creation
+jsonFile = "{}/ospf_asbr_summary_type7_lsa.json".format(CWD)
+try:
+ with open(jsonFile, "r") as topoJson:
+ topo = json.load(topoJson)
+except IOError:
+ assert False, "Could not read file {}".format(jsonFile)
+
+NETWORK = {
+ "ipv4": [
+ "11.0.20.1/32",
+ "11.0.20.2/32",
+ "11.0.20.3/32",
+ "11.0.20.4/32",
+ "11.0.20.5/32",
+ ]
+}
+NETWORK2 = {
+ "ipv4": [
+ "12.0.20.1/32",
+ "12.0.20.2/32",
+ "12.0.20.3/32",
+ "12.0.20.4/32",
+ "12.0.20.5/32",
+ ]
+}
+NETWORK3 = {
+ "ipv4": [
+ "13.0.20.1/32",
+ "13.0.20.2/32",
+ "13.0.20.3/32",
+ "13.0.20.4/32",
+ "13.0.20.5/32",
+ ]
+}
+SUMMARY = {"ipv4": ["11.0.20.1/8", "12.0.0.0/8", "13.0.0.0/8", "11.0.0.0/8"]}
+"""
+TOPOOLOGY =
+ Please view in a fixed-width font such as Courier.
+ +---+ A1 +---+
+ +R1 +------------+R2 |
+ +-+-+- +--++
+ | -- -- |
+ | -- A0 -- |
+ A0| ---- |
+ | ---- | A2
+ | -- -- |
+ | -- -- |
+ +-+-+- +-+-+
+ +R0 +-------------+R3 |
+ +---+ A3 +---+
+
+
+
+TESTCASES =
+1. OSPF summarisation with type7 LSAs.
+
+"""
+
+
+class CreateTopo(Topo):
+ """
+ Test topology builder.
+
+ * `Topo`: Topology object
+ """
+
+ def build(self, *_args, **_opts):
+ """Build function."""
+ tgen = get_topogen(self)
+
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+ global topo
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ tgen = Topogen(CreateTopo, mod.__name__)
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf_neighbor(tgen, topo)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
+
+def red_static(dut, config=True):
+ """Local def for Redstribute static routes inside ospf."""
+ global topo
+ tgen = get_topogen()
+ if config:
+ ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": "static"}]}}}
+ else:
+ ospf_red = {
+ dut: {"ospf": {"redistribute": [{"redist_type": "static", "delete": True}]}}
+ }
+ result = create_router_ospf(tgen, topo, ospf_red)
+ assert result is True, "Testcase : Failed \n Error: {}".format(result)
+
+
+def red_connected(dut, config=True):
+ """Local def for Redstribute connected routes inside ospf."""
+ global topo
+ tgen = get_topogen()
+ if config:
+ ospf_red = {dut: {"ospf": {"redistribute": [{"redist_type": "connected"}]}}}
+ else:
+ ospf_red = {
+ dut: {
+ "ospf": {
+ "redistribute": [{"redist_type": "connected", "del_action": True}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red)
+ assert result is True, "Testcase: Failed \n Error: {}".format(result)
+
+
+# ##################################
+# Test cases start here.
+# ##################################
+
+
+def test_ospf_type5_summary_tc44_p0(request):
+ """OSPF summarisation with type7 LSAs"""
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ step("Bring up the base config as per the topology")
+ step("Configure area 1 as NSSA Area")
+
+ reset_config_on_routers(tgen)
+
+ dut = "r0"
+ protocol = "ospf"
+
+ red_static(dut)
+ input_dict_static_rtes = {
+ "r0": {
+ "static_routes": [
+ {"network": NETWORK["ipv4"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv4"], "next_hop": "blackhole"},
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that routes are learnt on R1.")
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step(
+ "Configure External Route summary in R0 to summarise 5" " routes to one route."
+ )
+
+ ospf_summ_r0 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv4"][0].split("/")[0], "mask": "8"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify that external routes are summarised to configured summary "
+ "address on R0 after 5 secs of delay timer expiry and only one "
+ "route is sent to R1."
+ )
+
+ step(
+ "Configure summary & redistribute static/connected route with " "metric type 2"
+ )
+
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv4"][3]}]}}
+ dut = "r1"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_summary)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ step("Verify that show ip ospf summary should show the summaries.")
+ input_dict = {
+ SUMMARY["ipv4"][3]: {
+ "Summary address": SUMMARY["ipv4"][3],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Learn type 7 lsa from neighbours")
+
+ dut = "r1"
+ protocol = "ospf"
+
+ red_static(dut)
+ input_dict_static_rtes = {
+ "r1": {
+ "static_routes": [{"network": NETWORK3["ipv4"], "next_hop": "blackhole"}]
+ }
+ }
+ result = create_static_routes(tgen, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that routes are learnt on R0.")
+ dut = "r0"
+
+ result = verify_ospf_rib(tgen, dut, input_dict_static_rtes)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+ ospf_summ_r0 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv4"][2].split("/")[0], "mask": "8"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that type7 LSAs received from neighbor are not summarised.")
+ input_dict = {
+ "13.0.0.0/8": {
+ "Summary address": "13.0.0.0/8",
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 0,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ step("Verify that already originated summary is intact.")
+ input_dict = {
+ SUMMARY["ipv4"][3]: {
+ "Summary address": SUMMARY["ipv4"][3],
+ "Metric-type": "E2",
+ "Metric": 20,
+ "Tag": 0,
+ "External route count": 5,
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+
+ dut = "r1"
+ aggr_timer = {"r1": {"ospf": {"aggr_timer": 6}}}
+ result = create_router_ospf(tgen, topo, aggr_timer)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ ospf_summ_r0 = {
+ "r0": {
+ "ospf": {
+ "summary-address": [
+ {"prefix": SUMMARY["ipv4"][2].split("/")[0], "mask": "8"}
+ ]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_summ_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "wait for 6+1 seconds as ospf aggregation start after 6 secs as "
+ "per the above aggr_timer command"
+ )
+ sleep(7)
+ dut = "r1"
+ aggr_timer = {"r1": {"ospf": {"del_aggr_timer": 6}}}
+ result = create_router_ospf(tgen, topo, aggr_timer)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
sleep(6)
dut = "r2"
ospf_covergence = verify_ospf_neighbor(
- tgen, topo, dut=dut, expected=False, attempts=5
+ tgen, topo, dut=dut, expected=False, retry_timeout=10
)
assert ospf_covergence is not True, "setup_module :Failed \n Error:" " {}".format(
ospf_covergence
sleep(6)
dut = "r1"
ospf_covergence = verify_ospf_neighbor(
- tgen, topo, dut=dut, expected=False, attempts=3
+ tgen, topo, dut=dut, expected=False, retry_timeout=6
)
assert ospf_covergence is not True, "setup_module :Failed \n Error:" " {}".format(
ospf_covergence
sleep(6)
dut = "r2"
ospf_covergence = verify_ospf_neighbor(
- tgen, topo, dut=dut, expected=False, attempts=5
+ tgen, topo, dut=dut, expected=False, retry_timeout=10
)
assert ospf_covergence is not True, "setup_module :Failed \n Error:" " {}".format(
ospf_covergence
step("Verify that the neighbour is not FULL between R1 and R2.")
dut = "r1"
ospf_covergence = verify_ospf_neighbor(
- tgen, topo, dut=dut, expected=False, attempts=5
+ tgen, topo, dut=dut, expected=False, retry_timeout=10
)
assert ospf_covergence is not True, "setup_module :Failed \n Error:" " {}".format(
ospf_covergence
step("Verify that route is withdrawn from R2.")
dut = "r1"
result = verify_ospf_rib(
- tgen, dut, input_dict, next_hop=nh, attempts=5, expected=False
+ tgen, dut, input_dict, next_hop=nh, retry_timeout=10, expected=False
)
assert (
result is not True
input_dict,
protocol=protocol,
next_hop=nh,
- attempts=5,
+ retry_timeout=10,
expected=False,
)
assert (
step("Verify that route is withdrawn from R2.")
dut = "r1"
result = verify_ospf_rib(
- tgen, dut, input_dict, next_hop=nh, attempts=5, expected=False
+ tgen, dut, input_dict, next_hop=nh, retry_timeout=10, expected=False
)
assert (
result is not True
input_dict,
protocol=protocol,
next_hop=nh,
- attempts=5,
+ retry_timeout=10,
expected=False,
)
assert (
step("Verify that all the routes are withdrawn from R0")
dut = "r1"
result = verify_ospf_rib(
- tgen, dut, input_dict, next_hop=nh, attempts=5, expected=False
+ tgen, dut, input_dict, next_hop=nh, retry_timeout=10, expected=False
)
assert (
result is not True
input_dict,
protocol=protocol,
next_hop=nh,
- attempts=5,
+ retry_timeout=10,
expected=False,
)
assert (
dut = "r1"
protocol = "ospf"
- result = verify_ospf_rib(tgen, dut, input_dict, attempts=2, expected=False)
+ result = verify_ospf_rib(tgen, dut, input_dict, retry_timeout=4, expected=False)
assert (
result is not True
), "Testcase {} : Failed \n " "r1: OSPF routes are present \n Error: {}".format(
)
result = verify_rib(
- tgen, "ipv4", dut, input_dict, protocol=protocol, attempts=2, expected=False
+ tgen, "ipv4", dut, input_dict, protocol=protocol, retry_timeout=4, expected=False
)
assert (
result is not True
input_dict,
protocol=protocol,
next_hop=nh,
- attempts=5,
+ retry_timeout=10,
expected=False,
)
assert result is not True, (
next_hop=nh,
protocol=protocol,
fib=True,
+ retry_timeout=6,
expected=False,
- wait=2,
- attempts=3,
)
assert (
result is not True
protocol=protocol,
fib=True,
expected=False,
- wait=2,
- attempts=3,
+ retry_timeout=6,
)
assert (
result is not True
protocol=protocol,
fib=True,
expected=False,
- wait=2,
- attempts=3,
+ retry_timeout=6,
)
assert (
result is not True
protocol=protocol,
fib=True,
expected=False,
- wait=2,
- attempts=3,
+ retry_timeout=6,
)
assert (
result is not True
- ), "Testcase {} : Failed \nError: Routes " " are missing in RIB".format(tc_name)
+ ), "Testcase {} : Failed \nError: Routes " " are present in RIB".format(tc_name)
step(
"Remove the static route configured with nexthop N1 to N8, one"
protocol=protocol,
fib=True,
expected=False,
- wait=2,
- attempts=3,
+ retry_timeout=6,
)
assert (
result is not True
protocol=protocol,
fib=True,
expected=False,
- wait=2,
- attempts=3,
+ retry_timeout=6,
)
assert (
result is not True
#!/bin/sh
#
# Written by Daniil Baturin, 2018
+# Rewritten by Ondřej Surý, 2020
# This file is public domain
set -e
-cd "`dirname $0`"
-cd ..
+cd "$(dirname "$0")/.."
-if [ "`id -u`" = 0 ]; then
+#
+# Checking requirements
+#
+
+if [ "$(id -u)" = 0 ]; then
echo "Running as root - installing dependencies"
- apt-get install fakeroot debhelper devscripts
+ apt-get install fakeroot debhelper devscripts git-buildpackage lsb-release
mk-build-deps --install debian/control
exit 0
fi
git diff-index --quiet HEAD || echo "Warning: git working directory is not clean!"
-echo "Preparing the build"
-tools/tarsource.sh -V
+############################
+# Build the Debian package #
+############################
-echo "Building the Debian package"
-if test $# -eq 0; then
- dpkg-buildpackage -b -uc -us
-else
- dpkg-buildpackage "$@"
-fi
+#
+# Now we will construct an "upstream" version out of:
+# 1. version in AC_INIT
+# 2. the unix time from the last commit (HEAD)
+# (alternatively %Y%m%d%H%M%S could be used here)
+# 4. Debian version (always -1)
+#
+
+UPSTREAM_VERSION=$(sed -ne 's/AC_INIT(\[frr\],\s\[\([^]]*\)\],.*/\1/p' configure.ac | sed -e 's/-\(\(dev\|alpha\|beta\)\d*\)/~\1/')
+LAST_TIMESTAMP=$(git log --format=format:%ad --date=format:%s -1 "HEAD")
+DEBIAN_VERSION="$UPSTREAM_VERSION-$LAST_TIMESTAMP-1"
+DEBIAN_BRANCH=$(git rev-parse --abbrev-ref HEAD)
+
+#
+# We add a Debian changelog entry, and use artifical "since commit"
+# so there's not a whole git history in the debian/changelog.
+#
+# The --snapshot option appends ~1.<shorthash> to the debian version, so for the
+# release build, this needs to be replaces with --release
+#
+
+echo "Adding new snapshot debian/changelog entry for $DEBIAN_VERSION..."
+
+gbp dch \
+ --debian-branch="$DEBIAN_BRANCH" \
+ --new-version="$DEBIAN_VERSION" \
+ --dch-opt="--force-bad-version" \
+ --since="HEAD~" \
+ --snapshot \
+ --commit \
+ --git-author
+
+echo "Building package..."
+
+#
+# git-buildpackage will use $BUILDER command to just build new binary package
+#
+
+BUILDER="dpkg-buildpackage -uc -us --build=binary --no-check-builddeps --no-pre-clean -sa"
+UPSTREAM_COMPRESSION=xz
+
+gbp buildpackage \
+ --git-export-dir="$WORKDIR" \
+ --git-builder="$BUILDER" \
+ --git-debian-branch="$DEBIAN_BRANCH" \
+ --git-force-create \
+ --git-compression=$UPSTREAM_COMPRESSION \
+ --git-no-pristine-tar \
+ --git-ignore-new
--- /dev/null
+// No need checking against NULL for XMALLOC/XCALLOC.
+// If that happens, we have more problems with memory.
+
+@@
+type T;
+T *ptr;
+@@
+
+ptr =
+(
+XCALLOC(...)
+|
+XMALLOC(...)
+)
+...
+- if (ptr == NULL)
+- return ...;
if found_add_bfd_nbr:
lines_to_del_to_del.append((ctx_keys, line))
+ """
+ Neighbor changes of route-maps need to be accounted for in that we
+ do not want to do a `no route-map...` `route-map ....` when changing
+ a route-map. This is bad mojo as that we will send/receive
+ data we don't want.
+ Additionally we need to ensure that if we have different afi/safi
+ variants that they actually match and if we are going from a very
+ old style command such that the neighbor command is under the
+ `router bgp ..` node that we need to handle that appropriately
+ """
+ re_nbr_rm = re.search("neighbor(.*)route-map(.*)(in|out)$", line)
+ if re_nbr_rm:
+ adjust_for_bgp_node = 0
+ neighbor_name = re_nbr_rm.group(1)
+ rm_name_del = re_nbr_rm.group(2)
+ dir = re_nbr_rm.group(3)
+ search = "neighbor%sroute-map(.*)%s" % (neighbor_name, dir)
+ save_line = "EMPTY"
+ for (ctx_keys_al, add_line) in lines_to_add:
+ if ctx_keys_al[0].startswith("router bgp"):
+ if add_line:
+ rm_match = re.search(search, add_line)
+ if rm_match:
+ rm_name_add = rm_match.group(1)
+ if rm_name_add == rm_name_del:
+ continue
+ if len(ctx_keys_al) == 1:
+ save_line = line
+ adjust_for_bgp_node = 1
+ else:
+ if (
+ len(ctx_keys) > 1
+ and len(ctx_keys_al) > 1
+ and ctx_keys[1] == ctx_keys_al[1]
+ ):
+ lines_to_del_to_del.append((ctx_keys_al, line))
+
+ if adjust_for_bgp_node == 1:
+ for (ctx_keys_dl, dl_line) in lines_to_del:
+ if (
+ ctx_keys_dl[0].startswith("router bgp")
+ and len(ctx_keys_dl) > 1
+ and ctx_keys_dl[1] == "address-family ipv4 unicast"
+ ):
+ if save_line == dl_line:
+ lines_to_del_to_del.append((ctx_keys_dl, save_line))
+
"""
We changed how we display the neighbor interface command. Older
versions of frr would display the following:
#!/usr/bin/env python3
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; see the file COPYING; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+#
########################################################
### Python Script to generate the FRR support bundle ###
########################################################
+import argparse
+import logging
import os
import subprocess
-import datetime
-
-ETC_DIR = "/etc/frr/"
-LOG_DIR = "/var/log/frr/"
-SUCCESS = 1
-FAIL = 0
-
-inputFile = ETC_DIR + "support_bundle_commands.conf"
-
-# Create the output file name
-def createOutputFile(procName):
- fileName = procName + "_support_bundle.log"
- oldFile = LOG_DIR + fileName
- cpFileCmd = "cp " + oldFile + " " + oldFile + ".prev"
- rmFileCmd = "rm -rf " + oldFile
- print("Making backup of " + oldFile)
- os.system(cpFileCmd)
- print("Removing " + oldFile)
- os.system(rmFileCmd)
- return fileName
-
-
-# Open the output file for this process
-def openOutputFile(fileName):
- crt_file_cmd = LOG_DIR + fileName
- print(crt_file_cmd)
- try:
- outputFile = open(crt_file_cmd, "w")
- return outputFile
- except IOError:
- return ()
-
-
-# Close the output file for this process
-def closeOutputFile(f):
- try:
- f.close()
- return SUCCESS
- except IOError:
- return FAIL
-
-
-# Execute the command over vtysh and store in the
-# output file
-def executeCommand(cmd, outputFile):
- cmd_exec_str = 'vtysh -c "' + cmd + '" '
+import tempfile
+
+def open_with_backup(path):
+ if os.path.exists(path):
+ print("Making backup of " + path)
+ subprocess.check_call("mv {0} {0}.prev".format(path))
+ return open(path, "w")
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-c", "--config", default="/etc/frr/support_bundle_commands.conf", help="input config")
+ parser.add_argument("-l", "--log-dir", default="/var/log/frr", help="directory for logfiles")
+ args = parser.parse_args()
+
+ collecting = False # file format has sentinels (seem superfluous)
+ proc_cmds = {}
+ proc = None
+ temp = None
+
+ # Collect all the commands for each daemon
try:
- cmd_output = subprocess.check_output(cmd_exec_str, shell=True)
- try:
- dateTime = datetime.datetime.now()
- outputFile.write(">>[" + str(dateTime) + "]" + cmd + "\n")
- outputFile.write(str(cmd_output))
- outputFile.write(
- "########################################################\n"
- )
- outputFile.write("\n")
- except Exception as e:
- print("Writing to output file Failed: ", e)
- except subprocess.CalledProcessError as e:
- dateTime = datetime.datetime.now()
- outputFile.write(">>[" + str(dateTime) + "]" + cmd + "\n")
- outputFile.write(e.output)
- outputFile.write("########################################################\n")
- outputFile.write("\n")
- print("Error:" + e.output)
-
-
-# Process the support bundle configuration file
-# and call appropriate functions
-def processConfFile():
-
- lines = list()
- outputFile = None
-
- try:
- with open(inputFile, "r") as supportBundleConfFile:
- for l in supportBundleConfFile:
- lines.append(l.rstrip())
- except IOError:
- print("conf file {} not present".format(inputFile))
- return
-
- for line in lines:
- if len(line) == 0 or line[0] == "#":
- continue
-
- cmd_line = line.split(":")
- if cmd_line[0] == "PROC_NAME":
- outputFileName = createOutputFile(cmd_line[1])
- if outputFileName:
- print(outputFileName, "created for", cmd_line[1])
- elif cmd_line[0] == "CMD_LIST_START":
- outputFile = openOutputFile(outputFileName)
- if outputFile:
- print(outputFileName, "opened")
- else:
- print(outputFileName, "open failed")
- return FAIL
- elif cmd_line[0] == "CMD_LIST_END":
- if closeOutputFile(outputFile):
- print(outputFileName, "closed")
+ for line in open(args.config):
+ line = line.rstrip()
+ if len(line) == 0 or line[0] == "#":
+ continue
+
+ cmd_line = line.split(":")
+ if cmd_line[0] == "PROC_NAME":
+ proc = cmd_line[1]
+ temp = tempfile.NamedTemporaryFile("w+")
+ collecting = False
+ elif cmd_line[0] == "CMD_LIST_START":
+ collecting = True
+ elif cmd_line[0] == "CMD_LIST_END":
+ collecting = False
+ temp.flush()
+ proc_cmds[proc] = open(temp.name)
+ temp.close()
+ elif collecting:
+ temp.write(line + "\n")
else:
- print(outputFileName, "close failed")
- else:
- print("Execute:", cmd_line[0])
- executeCommand(cmd_line[0], outputFile)
-
+ print("Ignoring unexpected input " + line.rstrip())
+ except IOError as error:
+ logging.fatal("Cannot read config file: %s: %s", args.config, str(error))
+ return
-# Main Function
-processConfFile()
+ # Spawn a vtysh to fetch each set of commands
+ procs = []
+ for proc in proc_cmds:
+ ofn = os.path.join(args.log_dir, proc + "_support_bundle.log")
+ p = subprocess.Popen(
+ ["/usr/bin/env", "vtysh", "-t"],
+ stdin=proc_cmds[proc],
+ stdout=open_with_backup(ofn),
+ stderr=subprocess.STDOUT,
+ )
+ procs.append(p)
+
+ for p in procs:
+ p.wait()
+
+if __name__ == "__main__":
+ main()
+++ /dev/null
-#!/bin/bash
-# 2018 by David Lamparter, placed in the Public Domain
-
-help() {
- cat <<EOF
-FRR tarball/dsc helper, intended to run from a git checkout
-
-Usage:
- ./tarsource.sh [-dDn] [-i GITPATH] [-o OUTDIR] [-S KEYID]
- [-C COMMIT] [-e EXTRAVERSION] [-z gz|xz]
-
-options:
- -i GITPATH path to git working tree or bare repository.
- - default: parent directory containing this script
- -o OUTDIR path to place the generated output files in.
- - default: current directory
- -C COMMIT build tarball for specified git commit
- - default: current HEAD
- -e EXTRAVERSION override automatic package extraversion
- - default "-YYYYMMDD-NN-gGGGGGGGGGGGG", but the script
- autodetects if a release tag is checked out
- -z gz|xz compression format to use
- - default: xz
- -S KEYID sign the output with gpg key
- -d use dirty git tree with local changes
- -D generate Debian .dsc and .debian.tar.xz too
- (note: output files are moved to parent directory)
- -l remove Debian auto-build changelog entry
- (always done for releases)
- -V write version information to config.version and exit
- -n allow executing from non-git source (NOT RECOMMENDED)
- -h show this help text
-
-Note(1) that this script tries very hard to generate a deterministic,
-reproducible tarball by eliminating timestamps and similar things. However,
-since the tarball includes autoconf/automake files, the versions of these
-tools need to be _exactly_ identical to get the same tarball.
-
-Note(2) the debian ".orig" tarball is always identical to the "plain" tarball
-generated without the -D option.
-
-Note(3) if you want the tool to identify github PRs, you need to edit your
-.git/config to fetch PRs from github like this:
-
- [remote "origin"]
- url = git@github.com:frrouting/frr.git
- fetch = +refs/heads/*:refs/remotes/origin/*
-ADD: fetch = +refs/pull/*/head:refs/remotes/origin/pull/*
-EOF
-}
-
-set -e
-
-options=`getopt -o 'hi:o:C:S:e:z:DdnlV' -l help -- "$@"`
-debian=false
-dirty=false
-nongit=false
-zip=xz
-adjchangelog=false
-writeversion=false
-extraset=false
-set - $options
-while test $# -gt 0; do
- arg="$1"; shift; optarg=$1
- case "$arg" in
- -h|--help) help; exit 0;;
- -d) dirty=true;;
- -D) debian=true;;
- -n) nongit=true;;
- -i) eval src=$optarg; shift;;
- -C) eval commit=$optarg; shift;;
- -o) eval outdir=$optarg; shift;;
- -e) eval extraver=$optarg; extraset=true; shift;;
- -z) eval zip=$optarg; shift;;
- -S) eval keyid=$optarg; shift;;
- -l) adjchangelog=true;;
- -V) writeversion=true;;
- --) break;;
- *) echo something went wrong with getopt >&2
- exit 1
- ;;
- esac
-done
-
-cwd="`pwd`"
-outdir="${outdir:-$cwd}"
-
-if test -e "$outdir" -a \! -d "$outdir"; then
- echo "output $outdir must be a directory" >&2
- exit 1
-elif test \! -d "$outdir"; then
- mkdir -p "$outdir"
-fi
-
-cd "$outdir"
-outdir="`pwd`"
-cd "$cwd"
-cd "`dirname $0`/.."
-selfdir="`pwd`"
-src="${src:-$selfdir}"
-
-if $writeversion; then
- if $nongit; then
- echo "The -V option cannot be used without a git tree" >&2
- exit 1
- fi
- dirty=true
-fi
-
-case "$zip" in
-gz) ziptarget=dist-gzip; ziptool="gzip -n -9"; unzip="gzip -k -c";;
-xz) ziptarget=dist-xz; ziptool="xz -z -e"; unzip="xz -d -k -c";;
-*) echo "unknown compression format $zip" >&2
- exit 1
-esac
-
-# always overwrite file ownership in tars
-taropt="--owner=root --group=root"
-
-onexit() {
- rv="$?"
- set +e
- test -n "$tmpdir" -a -d "$tmpdir" && rm -rf "$tmpdir"
-
- if test "$rv" -ne 0; then
- echo -e "\n\033[31;1mfailed\n" >&2
- if test "$dirty" = true; then
- echo please try running the script without the -d option.>&2
- fi
- fi
- exit $rv
-}
-trap onexit EXIT
-tmpdir="`mktemp -d -t frrtar.XXXXXX`"
-
-if test -e "$src/.git"; then
- commit="`git -C \"$src\" rev-parse \"${commit:-HEAD}\"`"
-
- if $dirty; then
- cd "$src"
- echo -e "\033[31;1mgit: using dirty worktree in $src\033[m" >&2
- else
- echo -e "\033[33;1mgit: preparing a clean clone of $src\033[m"
- branch="${tmpdir##*/}"
- cd "$tmpdir"
-
- git -C "$src" branch "$branch" "$commit"
- git clone --single-branch -s -b "$branch" "$src" source
- git -C "$src" branch -D "$branch"
- cd source
- fi
-
- # if we're creating a tarball from git, force the timestamps inside
- # the tar to match the commit date - this makes the tarball itself
- # reproducible
- gitts="`TZ=UTC git show -s --format=%cd --date=local $commit`"
- gitts="`TZ=UTC date -d "$gitts" '+%Y-%m-%dT%H:%M:%SZ'`"
- taropt="--mtime=$gitts $taropt"
-
- # check if we're on a release tag
- gittag="`git -C \"$src\" describe --tags --match 'frr-*' --first-parent --long $commit`"
- gittag="${gittag%-g*}"
- gittag="${gittag%-*}"
-
- # if there have been changes to packaging or tests, it's still the
- # same release
- changes="`git diff --name-only "$gittag" $commit | \
- egrep -v '\.git|^m4/|^config|^README|^alpine/|^debian/|^pkgsrc/|^ports/|^redhat/|^snapcraft/|^solaris/|^tests/|^tools/|^gdb/|^docker/|^\.' | \
- wc -l`"
- if test "$changes" -eq 0; then
- adjchangelog=true
- echo "detected release build for tag $gittag" >&2
- $extraset || extraver=""
- elif ! $adjchangelog; then
- gitdate="`TZ=UTC date -d "$gitts" '+%Y%m%d'`"
- gitrev="`git rev-parse --short $commit`"
- dayseq="`git rev-list --since \"${gitts%T*} 00:00:00 +0000\" $commit | wc -l`"
- dayseq="`printf '%02d' $(( $dayseq - 1 ))`"
-
- $extraset || extraver="-$gitdate-$dayseq-g$gitrev"
-
- git -C "$src" remote -v | grep fetch | sed -e 's% (fetch)$%%' \
- | egrep -i '\b(git@github\.com:frrouting/frr\.git|https://github\.com/FRRouting/frr\.git)$' \
- | while read remote; do
- remote="${remote%% *}"
-
- git -C "$src" var -l | egrep "^remote.$remote.fetch=" \
- | while read fetch; do
- fetch="${fetch#*=}"
- from="${fetch%:*}"
- to="${fetch#*:}"
- if test "$from" = "+refs/pull/*/head"; then
- name="`git -C \"$src\" name-rev --name-only --refs \"$to\" $commit`"
- test "$name" = "undefined" && continue
- realname="${name%~*}"
- realname="${realname%%^*}"
- realname="${realname%%@*}"
- if test "$realname" = "$name"; then
- echo "${name##*/}" > "$tmpdir/.gitpr"
- break
- fi
- fi
- done || true
- test -n "$gitpr" && break
- done || true
- test $extraset = false -a -f "$tmpdir/.gitpr" && extraver="-PR`cat \"$tmpdir/.gitpr\"`$extraver"
- fi
-
- debsrc="git ls-files debian/"
-else
- if $nongit; then
- echo -e "\033[31;1mWARNING: this script should be executed from a git tree\033[m" >&2
- else
- echo -e "\033[31;1mERROR: this script should be executed from a git tree\033[m" >&2
- exit 1
- fi
- debsrc="echo debian"
-fi
-
-if $writeversion; then
- pkgver="`egrep ^AC_INIT configure.ac`"
- pkgver="${pkgver#*,}"
- pkgver="${pkgver%,*}"
- pkgver="`echo $pkgver`" # strip whitespace
- pkgver="${pkgver#[}"
- pkgver="${pkgver%]}"
-
- echo -e "\033[32;1mwriting version ID \033[36;1mfrr-$pkgver$extraver\033[m"
-
- cat > config.version <<EOF
-# config.version override by tarsource.sh
-EXTRAVERSION="$extraver"
-DIST_PACKAGE_VERSION="$pkgver$extraver"
-gitts="$gitts"
-taropt="$taropt"
-EOF
- sed -e "s%@VERSION@%$pkgver$extraver%" \
- < changelog-auto.in \
- > changelog-auto
- exit 0
-fi
-
-echo -e "\033[33;1mpreparing source tree\033[m"
-
-# config.version will also overwrite gitts and taropt when tarsource.sh
-# was used to write the config.version file before - but configure will
-# overwrite config.version down below!
-if test -f config.version; then
- # never executed for clean git build
- . ./config.version
- if $nongit; then
- $extraset || extraver="$EXTRAVERSION"
- fi
-fi
-if test \! -f configure; then
- # always executed for clean git build
- ./bootstrap.sh
-fi
-if test "$EXTRAVERSION" != "$extraver" -o \! -f config.status; then
- # always executed for clean git build
- # options don't matter really - we just want to make a dist tarball
- ./configure --with-pkg-extra-version=$extraver
-fi
-
-. ./config.version
-PACKAGE_VERSION="$DIST_PACKAGE_VERSION"
-
-echo -e "\033[33;1mpacking up \033[36;1mfrr-$PACKAGE_VERSION\033[m"
-
-make GZIP_ENV="-n9" am__tar="tar -chof - $taropt \"\$\$tardir\"" $ziptarget
-mv frr-${PACKAGE_VERSION}.tar.$zip "$outdir" || true
-lsfiles="frr-${PACKAGE_VERSION}.tar.$zip"
-
-if $debian; then
- mkdir -p "$tmpdir/debian/source"
- cat debian/changelog > "$tmpdir/debian/changelog"
- if $adjchangelog; then
- if grep -q 'autoconf changelog entry' debian/changelog; then
- tail -n +9 debian/changelog > "$tmpdir/debian/changelog"
- fi
- fi
- echo '3.0 (quilt)' > "$tmpdir/debian/source/format"
- DEBVER="`dpkg-parsechangelog -l\"$tmpdir/debian/changelog\" -SVersion`"
-
- eval $debsrc | tar -cho $taropt \
- --exclude-vcs --exclude debian/source/format \
- --exclude debian/changelog \
- --exclude debian/changelog-auto \
- --exclude debian/changelog-auto.in \
- --exclude debian/subdir.am \
- -T - -f ../frr_${DEBVER}.debian.tar
- # add specially prepared files from above
- tar -uf ../frr_${DEBVER}.debian.tar $taropt -C "$tmpdir" debian/source/format debian/changelog
-
- test -f ../frr_${DEBVER}.debian.tar.$zip && rm -f ../frr_${DEBVER}.debian.tar.$zip
- $ziptool ../frr_${DEBVER}.debian.tar
-
- # pack up debian files proper
- ln -s "$outdir/frr-${PACKAGE_VERSION}.tar.$zip" ../frr_${PACKAGE_VERSION}.orig.tar.$zip
- dpkg-source -l"$tmpdir/debian/changelog" \
- --format='3.0 (custom)' --target-format='3.0 (quilt)' \
- -b . frr_${PACKAGE_VERSION}.orig.tar.$zip frr_${DEBVER}.debian.tar.$zip
-
- dpkg-genchanges -sa -S > ../frr_${DEBVER}_source.changes
-
- test -n "$keyid" && debsign ../frr_${DEBVER}_source.changes -k"$keyid"
-
- mv ../frr_${DEBVER}_source.changes "$outdir" || true
- mv ../frr_${DEBVER}.dsc "$outdir" || true
- mv ../frr_${DEBVER}.debian.tar.$zip "$outdir" || true
- if test -h ../frr_${PACKAGE_VERSION}.orig.tar.$zip; then
- rm ../frr_${PACKAGE_VERSION}.orig.tar.$zip || true
- fi
- ln -s frr-${PACKAGE_VERSION}.tar.$zip "$outdir/frr_${PACKAGE_VERSION}.orig.tar.$zip" || true
-
- cd "$outdir"
-
- lsfiles="$lsfiles \
- frr_${DEBVER}.dsc \
- frr_${DEBVER}.debian.tar.$zip \
- frr_${PACKAGE_VERSION}.orig.tar.$zip \
- frr_${DEBVER}_source.changes"
-fi
-
-cd "$outdir"
-if test -n "$keyid"; then
- $unzip frr-${PACKAGE_VERSION}.tar.$zip > frr-${PACKAGE_VERSION}.tar
- test -f frr-${PACKAGE_VERSION}.tar.asc && rm frr-${PACKAGE_VERSION}.tar.asc
- if gpg -a --detach-sign -u "$keyid" frr-${PACKAGE_VERSION}.tar; then
- lsfiles="$lsfiles frr-${PACKAGE_VERSION}.tar.asc"
- fi
- rm frr-${PACKAGE_VERSION}.tar
-fi
-
-echo -e "\n\033[32;1mdone: \033[36;1mfrr-$PACKAGE_VERSION\033[m\n"
-ls -l $lsfiles
/* VTY shell pager name. */
char *vtysh_pager_name = NULL;
+/* VTY should add timestamp */
+bool vtysh_add_timestamp;
+
/* VTY shell client structure */
struct vtysh_client {
int fd;
}
}
+ if (vtysh_add_timestamp && strncmp(line, "exit", 4)) {
+ char ts[48];
+
+ (void)quagga_timestamp(3, ts, sizeof(ts));
+ vty_out(vty, "%% %s\n\n", ts);
+ }
+
saved_ret = ret = cmd_execute(vty, line, &cmd, 1);
saved_node = vty->node;
return show_per_daemon(vty, argv, argc, "Thread statistics for %s:\n");
}
-#ifndef EXCLUDE_CPU_TIME
DEFUN (vtysh_show_thread,
vtysh_show_thread_cmd,
"show thread cpu [FILTER]",
{
return show_per_daemon(vty, argv, argc, "Thread statistics for %s:\n");
}
-#endif
DEFUN (vtysh_show_work_queues,
vtysh_show_work_queues_cmd,
install_element(VIEW_NODE, &vtysh_show_modules_cmd);
install_element(VIEW_NODE, &vtysh_show_work_queues_cmd);
install_element(VIEW_NODE, &vtysh_show_work_queues_daemon_cmd);
-#ifndef EXCLUDE_CPU_TIME
install_element(VIEW_NODE, &vtysh_show_thread_cmd);
-#endif
install_element(VIEW_NODE, &vtysh_show_poll_cmd);
/* Logging */
extern int user_mode;
+extern bool vtysh_add_timestamp;
+
#endif /* VTYSH_H */
int vtysh_read_config(const char *config_default_dir, bool dry_run)
{
FILE *confp = NULL;
+ bool save;
int ret;
confp = fopen(config_default_dir, "r");
return CMD_ERR_NO_FILE;
}
+ save = vtysh_add_timestamp;
+ vtysh_add_timestamp = false;
+
ret = vtysh_read_file(confp, dry_run);
fclose(confp);
+ vtysh_add_timestamp = save;
+
return (ret);
}
{"writeconfig", no_argument, NULL, 'w'},
{"pathspace", required_argument, NULL, 'N'},
{"user", no_argument, NULL, 'u'},
+ {"timestamp", no_argument, NULL, 't'},
{0}};
/* Read a string, and return a pointer to it. Returns NULL on EOF. */
int opt;
int dryrun = 0;
int boot_flag = 0;
+ bool ts_flag = false;
const char *daemon_name = NULL;
const char *inputfile = NULL;
struct cmd_rec {
/* Option handling. */
while (1) {
- opt = getopt_long(argc, argv, "be:c:d:nf:H:mEhCwN:u", longopts,
+ opt = getopt_long(argc, argv, "be:c:d:nf:H:mEhCwN:ut", longopts,
0);
if (opt == EOF)
case 'u':
user_mode = 1;
break;
+ case 't':
+ ts_flag = true;
+ break;
case 'w':
writeconfig = 1;
break;
if (!user_mode)
vtysh_execute("enable");
+ vtysh_add_timestamp = ts_flag;
+
while (cmd != NULL) {
char *eol;
if (!user_mode)
vtysh_execute("enable");
+ vtysh_add_timestamp = ts_flag;
+
/* Preparation for longjmp() in sigtstp(). */
sigsetjmp(jmpbuf, 1);
jmpflag = 1;
return -1;
}
-#if defined HAVE_SYSTEMD
char buffer[512];
snprintf(buffer, sizeof(buffer), "restarting %s", restart->name);
systemd_send_status(buffer);
-#endif
/* Note: time_elapsed test must come before the force test, since we
need
restart->pid = 0;
}
-#if defined HAVE_SYSTEMD
systemd_send_status("FRR Operational");
-#endif
+
/* Calculate the new restart interval. */
if (update_interval) {
if (delay.tv_sec > 2 * gs.max_restart_interval)
fp = fopen(started, "w");
if (fp)
fclose(fp);
-#if defined HAVE_SYSTEMD
- systemd_send_started(master, 0);
+
+ systemd_send_started(master);
systemd_send_status("FRR Operational");
-#endif
sent = 1;
}
"RP keep alive Timer in seconds.";
}
}
+
+ grouping msdp-timers {
+ leaf hold-time {
+ type uint32 {
+ range 3..600;
+ }
+ units seconds;
+ default 75;
+ description
+ "Hold period is started at the MSDP peer connection establishment
+ and is reset every new message. When the period expires the
+ connection is closed.
+
+ This value needs to be greater than `keep-alive-period`.";
+ }
+
+ leaf keep-alive {
+ type uint32 {
+ range 2..600;
+ }
+ units seconds;
+ default 60;
+ description
+ "To maintain a connection established it is necessary to send
+ keep alive messages in a certain frequency and this allows its
+ configuration.
+
+ This value needs to be lesser than `hold-time-period`.";
+ }
+
+ leaf connection-retry {
+ type uint32 {
+ range 1..600;
+ }
+ units seconds;
+ default 30;
+ description
+ "This period is used in the MSDP peer with the highest IP value
+ in the pair and configures the interval between TCP connection
+ attempts.";
+ }
+ }
+
grouping per-af-global-pim-config-attributes {
description
"A grouping defining per address family pim global attributes";
"Enable ssmpingd operation.";
}
+ /* Global timers configuration. */
+ container msdp {
+ description "Global MSDP configuration.";
+ uses msdp-timers;
+ }
+
list msdp-mesh-groups {
key "name";
description
}
leaf source-ip {
+ mandatory true;
type inet:ip-address;
description
"MSDP source IP address.";
if (!if_is_loopback(ifp) && ifc->address->family == AF_INET &&
!IS_ZEBRA_IF_VRF(ifp)) {
- if (ifc->address->prefixlen == 32)
+ if (ifc->address->prefixlen == IPV4_MAX_BITLEN)
SET_FLAG(ifc->flags, ZEBRA_IFA_UNNUMBERED);
else
UNSET_FLAG(ifc->flags, ZEBRA_IFA_UNNUMBERED);
void connected_up(struct interface *ifp, struct connected *ifc)
{
afi_t afi;
- struct prefix p = {0};
+ struct prefix p;
struct nexthop nh = {
.type = NEXTHOP_TYPE_IFINDEX,
.ifindex = ifp->ifindex,
/* Ensure 'down' flag is cleared */
UNSET_FLAG(ifc->conf, ZEBRA_IFC_DOWN);
- PREFIX_COPY(&p, CONNECTED_PREFIX(ifc));
+ prefix_copy(&p, CONNECTED_PREFIX(ifc));
/* Apply mask to the network. */
apply_mask(&p);
* resolve to the same network and mask
*/
for (ALL_LIST_ELEMENTS_RO(ifp->connected, cnode, c)) {
- struct prefix cp = {0};
+ struct prefix cp;
- PREFIX_COPY(&cp, CONNECTED_PREFIX(c));
+ prefix_copy(&cp, CONNECTED_PREFIX(c));
apply_mask(&cp);
if (prefix_same(&cp, &p) &&
p = prefix_ipv4_new();
p->family = AF_INET;
p->prefix = *addr;
- p->prefixlen = CHECK_FLAG(flags, ZEBRA_IFA_PEER) ? IPV4_MAX_PREFIXLEN
- : prefixlen;
+ p->prefixlen =
+ CHECK_FLAG(flags, ZEBRA_IFA_PEER) ? IPV4_MAX_BITLEN : prefixlen;
ifc->address = (struct prefix *)p;
/* If there is a peer address. */
}
/* no destination address was supplied */
- if (!dest && (prefixlen == IPV4_MAX_PREFIXLEN)
- && if_is_pointopoint(ifp))
+ if (!dest && (prefixlen == IPV4_MAX_BITLEN) && if_is_pointopoint(ifp))
zlog_debug(
"PtP interface %s with addr %pI4/%d needs a peer address",
ifp->name, addr, prefixlen);
return;
}
- PREFIX_COPY(&p, CONNECTED_PREFIX(ifc));
+ prefix_copy(&p, CONNECTED_PREFIX(ifc));
/* Apply mask to the network. */
apply_mask(&p);
for (ALL_LIST_ELEMENTS_RO(ifp->connected, cnode, c)) {
struct prefix cp;
- PREFIX_COPY(&cp, CONNECTED_PREFIX(c));
+ prefix_copy(&cp, CONNECTED_PREFIX(c));
apply_mask(&cp);
if (prefix_same(&p, &cp) &&
memset(&p, 0, sizeof(struct prefix));
p.family = AF_INET;
p.u.prefix4 = *addr;
- p.prefixlen = CHECK_FLAG(flags, ZEBRA_IFA_PEER) ? IPV4_MAX_PREFIXLEN
- : prefixlen;
+ p.prefixlen =
+ CHECK_FLAG(flags, ZEBRA_IFA_PEER) ? IPV4_MAX_BITLEN : prefixlen;
if (dest) {
memset(&d, 0, sizeof(struct prefix));
}
if (peer_str) {
- if (lp.prefixlen != 32) {
+ if (lp.prefixlen != IPV4_MAX_BITLEN) {
vty_out(vty,
"%% Local prefix length for P-t-P address must be /32\n");
return CMD_WARNING_CONFIG_FAILED;
}
if (peer_str) {
- if (lp.prefixlen != 32) {
+ if (lp.prefixlen != IPV4_MAX_BITLEN) {
vty_out(vty,
"%% Local prefix length for P-t-P address must be /32\n");
return CMD_WARNING_CONFIG_FAILED;
p.family = AF_INET;
p.u.prefix4 = dest.sin.sin_addr;
if (flags & RTF_HOST)
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
else
p.prefixlen = ip_masklen(mask.sin.sin_addr);
p.family = AF_INET6;
p.u.prefix6 = dest.sin6.sin6_addr;
if (flags & RTF_HOST)
- p.prefixlen = IPV6_MAX_PREFIXLEN;
+ p.prefixlen = IPV6_MAX_BITLEN;
else
p.prefixlen = ip6_masklen(mask.sin6.sin6_addr);
work_queue_free_and_null(&zrouter.lsp_process_q);
vrf_terminate();
- rtadv_terminate();
ns_walk_func(zebra_ns_early_shutdown, NULL, NULL);
zebra_ns_notify_close();
#define RNODE_NEXT_RE(rn, re) RE_DEST_NEXT_ROUTE(rib_dest_from_rnode(rn), re)
#if defined(HAVE_RTADV)
+PREDECL_SORTLIST_UNIQ(adv_if_list);
/* Structure which hold status of router advertisement. */
struct rtadv {
int sock;
- int adv_if_count;
- int adv_msec_if_count;
+ struct adv_if_list_head adv_if;
+ struct adv_if_list_head adv_msec_if;
struct thread *ra_read;
struct thread *ra_timer;
};
+
+/* adv list node */
+struct adv_if {
+ char name[INTERFACE_NAMSIZ];
+ struct adv_if_list_item list_item;
+};
+
+static int adv_if_cmp(const struct adv_if *a, const struct adv_if *b)
+{
+ return if_cmp_name_func(a->name, b->name);
+}
+
+DECLARE_SORTLIST_UNIQ(adv_if_list, struct adv_if, list_item, adv_if_cmp);
#endif /* HAVE_RTADV */
/*
bool fromkernel);
extern struct route_entry *rib_match(afi_t afi, safi_t safi, vrf_id_t vrf_id,
- union g_addr *addr,
+ const union g_addr *addr,
struct route_node **rn_out);
extern struct route_entry *rib_match_ipv4_multicast(vrf_id_t vrf_id,
struct in_addr addr,
case AFI_IP:
p->u.prefix4.s_addr = INADDR_ANY;
p->family = AF_INET;
- p->prefixlen = 32;
+ p->prefixlen = IPV4_MAX_BITLEN;
if (zvrf->rid_user_assigned.u.prefix4.s_addr != INADDR_ANY)
p->u.prefix4.s_addr =
zvrf->rid_user_assigned.u.prefix4.s_addr;
case AFI_IP6:
p->u.prefix6 = in6addr_any;
p->family = AF_INET6;
- p->prefixlen = 128;
+ p->prefixlen = IPV6_MAX_BITLEN;
if (!router_id_v6_is_any(&zvrf->rid6_user_assigned))
addr = &zvrf->rid6_user_assigned.u.prefix6;
else if (!list_isempty(zvrf->rid6_lo_sorted_list)) {
if (!inet_pton(AF_INET, argv[idx]->arg, &rid.u.prefix4))
return CMD_WARNING_CONFIG_FAILED;
- rid.prefixlen = 32;
+ rid.prefixlen = IPV4_MAX_BITLEN;
rid.family = AF_INET;
argv_find(argv, argc, "NAME", &idx);
if (!inet_pton(AF_INET6, argv[idx]->arg, &rid.u.prefix6))
return CMD_WARNING_CONFIG_FAILED;
- rid.prefixlen = 128;
+ rid.prefixlen = IPV6_MAX_BITLEN;
rid.family = AF_INET6;
argv_find(argv, argc, "NAME", &idx);
if (!inet_pton(AF_INET, argv[idx]->arg, &rid.u.prefix4))
return CMD_WARNING_CONFIG_FAILED;
- rid.prefixlen = 32;
+ rid.prefixlen = IPV4_MAX_BITLEN;
rid.family = AF_INET;
router_id_set(AFI_IP, &rid, zvrf);
if (!inet_pton(AF_INET6, argv[idx]->arg, &rid.u.prefix6))
return CMD_WARNING_CONFIG_FAILED;
- rid.prefixlen = 128;
+ rid.prefixlen = IPV6_MAX_BITLEN;
rid.family = AF_INET6;
router_id_set(AFI_IP6, &rid, zvrf);
zvrf->rid6_lo_sorted_list->cmp = router_id_v6_cmp;
zvrf->rid_user_assigned.family = AF_INET;
- zvrf->rid_user_assigned.prefixlen = 32;
+ zvrf->rid_user_assigned.prefixlen = IPV4_MAX_BITLEN;
zvrf->rid6_user_assigned.family = AF_INET6;
- zvrf->rid6_user_assigned.prefixlen = 128;
+ zvrf->rid6_user_assigned.prefixlen = IPV6_MAX_BITLEN;
}
#endif
DEFINE_MTYPE_STATIC(ZEBRA, RTADV_PREFIX, "Router Advertisement Prefix");
+DEFINE_MTYPE_STATIC(ZEBRA, ADV_IF, "Advertised Interface");
#ifdef OPEN_BSD
#include <netinet/icmp6.h>
static int if_join_all_router(int, struct interface *);
static int if_leave_all_router(int, struct interface *);
-static int rtadv_get_socket(struct zebra_vrf *zvrf)
+static struct zebra_vrf *rtadv_interface_get_zvrf(const struct interface *ifp)
{
- if (zvrf->rtadv.sock > 0)
- return zvrf->rtadv.sock;
- return zrouter.rtadv_sock;
+ /* We use the default vrf for rtadv handling except in netns */
+ if (!vrf_is_backend_netns())
+ return vrf_info_lookup(VRF_DEFAULT);
+
+ return vrf_info_lookup(ifp->vrf_id);
}
static int rtadv_increment_received(struct zebra_vrf *zvrf, ifindex_t *ifindex)
int period;
zvrf->rtadv.ra_timer = NULL;
- if (zvrf->rtadv.adv_msec_if_count == 0) {
+ if (adv_if_list_count(&zvrf->rtadv.adv_msec_if) == 0) {
period = 1000; /* 1 s */
rtadv_event(zvrf, RTADV_TIMER, 1 /* 1 s */);
} else {
ifp->ifindex);
}
- rtadv_send_packet(rtadv_get_socket(zvrf),
- ifp, RA_ENABLE);
+ rtadv_send_packet(zvrf->rtadv.sock, ifp,
+ RA_ENABLE);
} else {
zif->rtadv.AdvIntervalTimer -= period;
if (zif->rtadv.AdvIntervalTimer <= 0) {
zif->rtadv
.MaxRtrAdvInterval;
rtadv_send_packet(
- rtadv_get_socket(zvrf),
- ifp, RA_ENABLE);
+ zvrf->rtadv.sock, ifp,
+ RA_ENABLE);
}
}
}
static void rtadv_process_solicit(struct interface *ifp)
{
- struct zebra_vrf *zvrf = vrf_info_lookup(ifp->vrf_id);
+ struct zebra_vrf *zvrf;
struct zebra_if *zif;
+ zvrf = rtadv_interface_get_zvrf(ifp);
assert(zvrf);
zif = ifp->info;
if ((zif->rtadv.UseFastRexmit)
|| (zif->rtadv.AdvIntervalTimer <=
(zif->rtadv.MaxRtrAdvInterval - MIN_DELAY_BETWEEN_RAS))) {
- rtadv_send_packet(rtadv_get_socket(zvrf), ifp, RA_ENABLE);
+ rtadv_send_packet(zvrf->rtadv.sock, ifp, RA_ENABLE);
zif->rtadv.AdvIntervalTimer = zif->rtadv.MaxRtrAdvInterval;
} else
zif->rtadv.AdvIntervalTimer = MIN_DELAY_BETWEEN_RAS;
/* Create entry for neighbor if not known. */
p.family = AF_INET6;
IPV6_ADDR_COPY(&p.u.prefix6, &addr->sin6_addr);
- p.prefixlen = IPV6_MAX_PREFIXLEN;
+ p.prefixlen = IPV6_MAX_BITLEN;
if (!nbr_connected_check(ifp, &p))
nbr_connected_add_ipv6(ifp, &addr->sin6_addr);
zvrf->rtadv.ra_read = NULL;
/* Register myself. */
- rtadv_event(zvrf, RTADV_READ, sock);
+ rtadv_event(zvrf, RTADV_READ, 0);
len = rtadv_recv_packet(zvrf, sock, buf, sizeof(buf), &from, &ifindex,
&hoplimit);
return sock;
}
+static struct adv_if *adv_if_new(const char *name)
+{
+ struct adv_if *new;
+
+ new = XCALLOC(MTYPE_ADV_IF, sizeof(struct adv_if));
+
+ strlcpy(new->name, name, sizeof(new->name));
+
+ return new;
+}
+
+static void adv_if_free(struct adv_if *adv_if)
+{
+ XFREE(MTYPE_ADV_IF, adv_if);
+}
+
+static bool adv_if_is_empty_internal(const struct adv_if_list_head *adv_if_head)
+{
+ return adv_if_list_count(adv_if_head) ? false : true;
+}
+
+static struct adv_if *adv_if_add_internal(struct adv_if_list_head *adv_if_head,
+ const char *name)
+{
+ struct adv_if adv_if_lookup = {};
+ struct adv_if *adv_if = NULL;
+
+ strlcpy(adv_if_lookup.name, name, sizeof(adv_if_lookup.name));
+ adv_if = adv_if_list_find(adv_if_head, &adv_if_lookup);
+
+ if (adv_if != NULL)
+ return adv_if;
+
+ adv_if = adv_if_new(adv_if_lookup.name);
+ adv_if_list_add(adv_if_head, adv_if);
+
+ return NULL;
+}
+
+static struct adv_if *adv_if_del_internal(struct adv_if_list_head *adv_if_head,
+ const char *name)
+{
+ struct adv_if adv_if_lookup = {};
+ struct adv_if *adv_if = NULL;
+
+ strlcpy(adv_if_lookup.name, name, sizeof(adv_if_lookup.name));
+ adv_if = adv_if_list_find(adv_if_head, &adv_if_lookup);
+
+ if (adv_if == NULL)
+ return NULL;
+
+ adv_if_list_del(adv_if_head, adv_if);
+
+ return adv_if;
+}
+
+static void adv_if_clean_internal(struct adv_if_list_head *adv_if_head)
+{
+ struct adv_if *node = NULL;
+
+ if (!adv_if_is_empty_internal(adv_if_head)) {
+ frr_each_safe (adv_if_list, adv_if_head, node) {
+ adv_if_list_del(adv_if_head, node);
+ adv_if_free(node);
+ }
+ }
+
+ adv_if_list_fini(adv_if_head);
+}
+
+
+/*
+ * Add to list. On Success, return NULL, otherwise return already existing
+ * adv_if.
+ */
+static struct adv_if *adv_if_add(struct zebra_vrf *zvrf, const char *name)
+{
+ struct adv_if *adv_if = NULL;
+
+ adv_if = adv_if_add_internal(&zvrf->rtadv.adv_if, name);
+
+ if (adv_if != NULL)
+ return adv_if;
+
+ if (IS_ZEBRA_DEBUG_EVENT) {
+ struct vrf *vrf = zvrf->vrf;
+
+ zlog_debug("%s: %s:%u IF %s count: %zu", __func__,
+ VRF_LOGNAME(vrf), zvrf_id(zvrf), name,
+ adv_if_list_count(&zvrf->rtadv.adv_if));
+ }
+
+ return NULL;
+}
+
+/*
+ * Del from list. On Success, return the adv_if, otherwise return NULL. Caller
+ * frees.
+ */
+static struct adv_if *adv_if_del(struct zebra_vrf *zvrf, const char *name)
+{
+ struct adv_if *adv_if = NULL;
+
+ adv_if = adv_if_del_internal(&zvrf->rtadv.adv_if, name);
+
+ if (adv_if == NULL)
+ return NULL;
+
+ if (IS_ZEBRA_DEBUG_EVENT) {
+ struct vrf *vrf = zvrf->vrf;
+
+ zlog_debug("%s: %s:%u IF %s count: %zu", __func__,
+ VRF_LOGNAME(vrf), zvrf_id(zvrf), name,
+ adv_if_list_count(&zvrf->rtadv.adv_if));
+ }
+
+ return adv_if;
+}
+
+/*
+ * Add to list. On Success, return NULL, otherwise return already existing
+ * adv_if.
+ */
+static struct adv_if *adv_msec_if_add(struct zebra_vrf *zvrf, const char *name)
+{
+ struct adv_if *adv_if = NULL;
+
+ adv_if = adv_if_add_internal(&zvrf->rtadv.adv_msec_if, name);
+
+ if (adv_if != NULL)
+ return adv_if;
+
+ if (IS_ZEBRA_DEBUG_EVENT) {
+ struct vrf *vrf = zvrf->vrf;
+
+ zlog_debug("%s: %s:%u IF %s count: %zu", __func__,
+ VRF_LOGNAME(vrf), zvrf_id(zvrf), name,
+ adv_if_list_count(&zvrf->rtadv.adv_msec_if));
+ }
+
+ return NULL;
+}
+
+/*
+ * Del from list. On Success, return the adv_if, otherwise return NULL. Caller
+ * frees.
+ */
+static struct adv_if *adv_msec_if_del(struct zebra_vrf *zvrf, const char *name)
+{
+ struct adv_if *adv_if = NULL;
+
+ adv_if = adv_if_del_internal(&zvrf->rtadv.adv_msec_if, name);
+
+ if (adv_if == NULL)
+ return NULL;
+
+ if (IS_ZEBRA_DEBUG_EVENT) {
+ struct vrf *vrf = zvrf->vrf;
+
+ zlog_debug("%s: %s:%u IF %s count: %zu", __func__,
+ VRF_LOGNAME(vrf), zvrf_id(zvrf), name,
+ adv_if_list_count(&zvrf->rtadv.adv_msec_if));
+ }
+
+ return adv_if;
+}
+
+/* Clean adv_if list, called on vrf terminate */
+static void adv_if_clean(struct zebra_vrf *zvrf)
+{
+ if (IS_ZEBRA_DEBUG_EVENT) {
+ struct vrf *vrf = zvrf->vrf;
+
+ zlog_debug("%s: %s:%u count: %zu -> 0", __func__,
+ VRF_LOGNAME(vrf), zvrf_id(zvrf),
+ adv_if_list_count(&zvrf->rtadv.adv_if));
+ }
+
+ adv_if_clean_internal(&zvrf->rtadv.adv_if);
+}
+
+/* Clean adv_msec_if list, called on vrf terminate */
+static void adv_msec_if_clean(struct zebra_vrf *zvrf)
+{
+ if (IS_ZEBRA_DEBUG_EVENT) {
+ struct vrf *vrf = zvrf->vrf;
+
+ zlog_debug("%s: %s:%u count: %zu -> 0", __func__,
+ VRF_LOGNAME(vrf), zvrf_id(zvrf),
+ adv_if_list_count(&zvrf->rtadv.adv_msec_if));
+ }
+
+ adv_if_clean_internal(&zvrf->rtadv.adv_msec_if);
+}
+
static struct rtadv_prefix *rtadv_prefix_new(void)
{
return XCALLOC(MTYPE_RTADV_PREFIX, sizeof(struct rtadv_prefix));
{
struct zebra_if *zif;
struct zebra_vrf *zvrf;
+ struct adv_if *adv_if = NULL;
zif = ifp->info;
- zvrf = vrf_info_lookup(ifp->vrf_id);
+
+ zvrf = rtadv_interface_get_zvrf(ifp);
if (status == RA_SUPPRESS) {
/* RA is currently enabled */
if (zif->rtadv.AdvSendAdvertisements) {
- rtadv_send_packet(rtadv_get_socket(zvrf), ifp,
- RA_SUPPRESS);
+ rtadv_send_packet(zvrf->rtadv.sock, ifp, RA_SUPPRESS);
zif->rtadv.AdvSendAdvertisements = 0;
zif->rtadv.AdvIntervalTimer = 0;
- zvrf->rtadv.adv_if_count--;
- if_leave_all_router(rtadv_get_socket(zvrf), ifp);
+ adv_if = adv_if_del(zvrf, ifp->name);
+ if (adv_if == NULL)
+ return; /* Nothing to delete */
+
+ adv_if_free(adv_if);
+
+ if_leave_all_router(zvrf->rtadv.sock, ifp);
- if (zvrf->rtadv.adv_if_count == 0)
+ if (adv_if_list_count(&zvrf->rtadv.adv_if) == 0)
rtadv_event(zvrf, RTADV_STOP, 0);
}
} else {
if (!zif->rtadv.AdvSendAdvertisements) {
zif->rtadv.AdvSendAdvertisements = 1;
zif->rtadv.AdvIntervalTimer = 0;
- zvrf->rtadv.adv_if_count++;
-
if ((zif->rtadv.MaxRtrAdvInterval >= 1000)
&& zif->rtadv.UseFastRexmit) {
/*
RTADV_NUM_FAST_REXMITS;
}
- if_join_all_router(rtadv_get_socket(zvrf), ifp);
+ adv_if = adv_if_add(zvrf, ifp->name);
+ if (adv_if != NULL)
+ return; /* Alread added */
+
+ if_join_all_router(zvrf->rtadv.sock, ifp);
- if (zvrf->rtadv.adv_if_count == 1)
- rtadv_event(zvrf, RTADV_START,
- rtadv_get_socket(zvrf));
+ if (adv_if_list_count(&zvrf->rtadv.adv_if) == 1)
+ rtadv_event(zvrf, RTADV_START, 0);
}
}
}
zebra_route_string(client->proto));
return;
}
- if (ifp->vrf_id != zvrf_id(zvrf)) {
+ if (vrf_is_backend_netns() && ifp->vrf_id != zvrf_id(zvrf)) {
struct vrf *vrf = zvrf->vrf;
zlog_debug(
struct zebra_vrf *zvrf;
zif = ifp->info;
- zvrf = vrf_info_lookup(ifp->vrf_id);
+ zvrf = rtadv_interface_get_zvrf(ifp);
if (zif->rtadv.AdvSendAdvertisements)
- rtadv_send_packet(rtadv_get_socket(zvrf), ifp, RA_SUPPRESS);
+ rtadv_send_packet(zvrf->rtadv.sock, ifp, RA_SUPPRESS);
}
/*
zebra_interface_radv_set(client, hdr, msg, zvrf, 1);
}
+static void show_zvrf_rtadv_adv_if_helper(struct vty *vty,
+ struct adv_if_list_head *adv_if_head)
+{
+ struct adv_if *node = NULL;
+
+ if (!adv_if_is_empty_internal(adv_if_head)) {
+ frr_each (adv_if_list, adv_if_head, node) {
+ vty_out(vty, " %s\n", node->name);
+ }
+ }
+
+ vty_out(vty, "\n");
+}
+
+static void show_zvrf_rtadv_helper(struct vty *vty, struct zebra_vrf *zvrf)
+{
+ vty_out(vty, "VRF: %s\n", zvrf_name(zvrf));
+ vty_out(vty, " Interfaces:\n");
+ show_zvrf_rtadv_adv_if_helper(vty, &zvrf->rtadv.adv_if);
+
+ vty_out(vty, " Interfaces(msec):\n");
+ show_zvrf_rtadv_adv_if_helper(vty, &zvrf->rtadv.adv_msec_if);
+}
+
+DEFPY(show_ipv6_nd_ra_if, show_ipv6_nd_ra_if_cmd,
+ "show ipv6 nd ra-interfaces [vrf<NAME$vrf_name|all$vrf_all>]",
+ SHOW_STR IP6_STR
+ "Neighbor discovery\n"
+ "Route Advertisement Interfaces\n" VRF_FULL_CMD_HELP_STR)
+{
+ struct zebra_vrf *zvrf = NULL;
+
+ if (!vrf_is_backend_netns() && (vrf_name || vrf_all)) {
+ vty_out(vty,
+ "%% VRF subcommand only applicable for netns-based vrfs.\n");
+ return CMD_WARNING;
+ }
+
+ if (vrf_all) {
+ struct vrf *vrf;
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ struct zebra_vrf *zvrf;
+
+ zvrf = vrf->info;
+ if (!zvrf)
+ continue;
+
+ show_zvrf_rtadv_helper(vty, zvrf);
+ }
+
+ return CMD_SUCCESS;
+ }
+
+ if (vrf_name)
+ zvrf = zebra_vrf_lookup_by_name(vrf_name);
+ else
+ zvrf = zebra_vrf_lookup_by_name(VRF_DEFAULT_NAME);
+
+ if (!zvrf) {
+ vty_out(vty, "%% VRF '%s' specified does not exist\n",
+ vrf_name);
+ return CMD_WARNING;
+ }
+
+ show_zvrf_rtadv_helper(vty, zvrf);
+
+ return CMD_SUCCESS;
+}
+
DEFUN (ipv6_nd_ra_fast_retrans,
ipv6_nd_ra_fast_retrans_cmd,
"ipv6 nd ra-fast-retrans",
unsigned interval;
struct zebra_if *zif = ifp->info;
struct zebra_vrf *zvrf;
+ struct adv_if *adv_if;
- zvrf = vrf_info_lookup(ifp->vrf_id);
+ zvrf = rtadv_interface_get_zvrf(ifp);
interval = strtoul(argv[idx_number]->arg, NULL, 10);
if ((zif->rtadv.AdvDefaultLifetime != -1
return CMD_WARNING_CONFIG_FAILED;
}
- if (zif->rtadv.MaxRtrAdvInterval % 1000)
- zvrf->rtadv.adv_msec_if_count--;
+ if (zif->rtadv.MaxRtrAdvInterval % 1000) {
+ adv_if = adv_msec_if_del(zvrf, ifp->name);
+ if (adv_if != NULL)
+ adv_if_free(adv_if);
+ }
if (interval % 1000)
- zvrf->rtadv.adv_msec_if_count++;
+ (void)adv_msec_if_add(zvrf, ifp->name);
SET_FLAG(zif->rtadv.ra_configured, VTY_RA_INTERVAL_CONFIGURED);
zif->rtadv.MaxRtrAdvInterval = interval;
unsigned interval;
struct zebra_if *zif = ifp->info;
struct zebra_vrf *zvrf;
+ struct adv_if *adv_if;
- zvrf = vrf_info_lookup(ifp->vrf_id);
+ zvrf = rtadv_interface_get_zvrf(ifp);
interval = strtoul(argv[idx_number]->arg, NULL, 10);
if ((zif->rtadv.AdvDefaultLifetime != -1
return CMD_WARNING_CONFIG_FAILED;
}
- if (zif->rtadv.MaxRtrAdvInterval % 1000)
- zvrf->rtadv.adv_msec_if_count--;
+ if (zif->rtadv.MaxRtrAdvInterval % 1000) {
+ adv_if = adv_msec_if_del(zvrf, ifp->name);
+ if (adv_if != NULL)
+ adv_if_free(adv_if);
+ }
/* convert to milliseconds */
interval = interval * 1000;
VTY_DECLVAR_CONTEXT(interface, ifp);
struct zebra_if *zif = ifp->info;
struct zebra_vrf *zvrf = NULL;
+ struct adv_if *adv_if;
- zvrf = vrf_info_lookup(ifp->vrf_id);
+ zvrf = rtadv_interface_get_zvrf(ifp);
- if (zif->rtadv.MaxRtrAdvInterval % 1000)
- zvrf->rtadv.adv_msec_if_count--;
+ if (zif->rtadv.MaxRtrAdvInterval % 1000) {
+ adv_if = adv_msec_if_del(zvrf, ifp->name);
+ if (adv_if != NULL)
+ adv_if_free(adv_if);
+ }
UNSET_FLAG(zif->rtadv.ra_configured, VTY_RA_INTERVAL_CONFIGURED);
static void rtadv_event(struct zebra_vrf *zvrf, enum rtadv_event event, int val)
{
- struct rtadv *rtadv = &zvrf->rtadv;
+ struct rtadv *rtadv;
if (IS_ZEBRA_DEBUG_EVENT) {
struct vrf *vrf = zvrf->vrf;
VRF_LOGNAME(vrf), event, val);
}
+ rtadv = &zvrf->rtadv;
+
switch (event) {
case RTADV_START:
- thread_add_read(zrouter.master, rtadv_read, zvrf, val,
+ thread_add_read(zrouter.master, rtadv_read, zvrf, rtadv->sock,
&rtadv->ra_read);
thread_add_event(zrouter.master, rtadv_timer, zvrf, 0,
&rtadv->ra_timer);
&rtadv->ra_timer);
break;
case RTADV_READ:
- thread_add_read(zrouter.master, rtadv_read, zvrf, val,
+ thread_add_read(zrouter.master, rtadv_read, zvrf, rtadv->sock,
&rtadv->ra_read);
break;
default:
return;
}
-void rtadv_init(struct zebra_vrf *zvrf)
+void rtadv_vrf_init(struct zebra_vrf *zvrf)
{
- if (vrf_is_backend_netns()) {
- zvrf->rtadv.sock = rtadv_make_socket(zvrf->zns->ns_id);
- zrouter.rtadv_sock = -1;
- } else {
- zvrf->rtadv.sock = -1;
- if (zrouter.rtadv_sock < 0)
- zrouter.rtadv_sock =
- rtadv_make_socket(zvrf->zns->ns_id);
- }
+ if (!vrf_is_backend_netns() && (zvrf_id(zvrf) != VRF_DEFAULT))
+ return;
+
+ zvrf->rtadv.sock = rtadv_make_socket(zvrf->zns->ns_id);
}
void rtadv_vrf_terminate(struct zebra_vrf *zvrf)
{
+ if (!vrf_is_backend_netns() && (zvrf_id(zvrf) != VRF_DEFAULT))
+ return;
+
rtadv_event(zvrf, RTADV_STOP, 0);
if (zvrf->rtadv.sock >= 0) {
close(zvrf->rtadv.sock);
zvrf->rtadv.sock = -1;
}
- zvrf->rtadv.adv_if_count = 0;
- zvrf->rtadv.adv_msec_if_count = 0;
-}
-
-void rtadv_terminate(void)
-{
- if (zrouter.rtadv_sock >= 0) {
- close(zrouter.rtadv_sock);
- zrouter.rtadv_sock = -1;
- }
+ adv_if_clean(zvrf);
+ adv_msec_if_clean(zvrf);
}
void rtadv_cmd_init(void)
hook_register(zebra_if_extra_info, nd_dump_vty);
hook_register(zebra_if_config_wr, rtadv_config_write);
+ install_element(VIEW_NODE, &show_ipv6_nd_ra_if_cmd);
+
install_element(INTERFACE_NODE, &ipv6_nd_ra_fast_retrans_cmd);
install_element(INTERFACE_NODE, &no_ipv6_nd_ra_fast_retrans_cmd);
install_element(INTERFACE_NODE, &ipv6_nd_ra_retrans_interval_cmd);
}
#else
-void rtadv_init(struct zebra_vrf *zvrf)
-{
- /* Empty.*/;
-}
-void rtadv_terminate(void)
+void rtadv_vrf_init(struct zebra_vrf *zvrf)
{
/* Empty.*/;
}
+
void rtadv_cmd_init(void)
{
/* Empty.*/;
RA_SUPPRESS,
};
-extern void rtadv_init(struct zebra_vrf *zvrf);
+extern void rtadv_vrf_init(struct zebra_vrf *zvrf);
extern void rtadv_vrf_terminate(struct zebra_vrf *zvrf);
-extern void rtadv_terminate(void);
extern void rtadv_stop_ra(struct interface *ifp);
extern void rtadv_stop_ra_all(void);
extern void rtadv_cmd_init(void);
p.family = AF_INET6;
IPV6_ADDR_COPY(&p.u.prefix6, address);
- p.prefixlen = IPV6_MAX_PREFIXLEN;
+ p.prefixlen = IPV6_MAX_BITLEN;
ifc = listnode_head(ifp->nbr_connected);
if (!ifc) {
p.family = AF_INET6;
IPV6_ADDR_COPY(&p.u.prefix6, address);
- p.prefixlen = IPV6_MAX_PREFIXLEN;
+ p.prefixlen = IPV6_MAX_BITLEN;
ifc = nbr_connected_check(ifp, &p);
if (!ifc)
int af;
int status;
uint32_t flags;
+ uint32_t nhg_id;
union g_addr dest;
mpls_label_t local_label;
mpls_label_t remote_label;
- /* Nexthops */
- struct nexthop_group nhg;
+ /* Nexthops that are valid and installed */
+ struct nexthop_group fib_nhg;
+
+ /* Primary and backup nexthop sets, copied from the resolving route. */
+ struct nexthop_group primary_nhg;
+ struct nexthop_group backup_nhg;
union pw_protocol_fields fields;
};
case DPLANE_OP_PW_INSTALL:
case DPLANE_OP_PW_UNINSTALL:
/* Free allocated nexthops */
- if (ctx->u.pw.nhg.nexthop) {
+ if (ctx->u.pw.fib_nhg.nexthop) {
/* This deals with recursive nexthops too */
- nexthops_free(ctx->u.pw.nhg.nexthop);
+ nexthops_free(ctx->u.pw.fib_nhg.nexthop);
+
+ ctx->u.pw.fib_nhg.nexthop = NULL;
+ }
+ if (ctx->u.pw.primary_nhg.nexthop) {
+ nexthops_free(ctx->u.pw.primary_nhg.nexthop);
+
+ ctx->u.pw.primary_nhg.nexthop = NULL;
+ }
+ if (ctx->u.pw.backup_nhg.nexthop) {
+ nexthops_free(ctx->u.pw.backup_nhg.nexthop);
- ctx->u.pw.nhg.nexthop = NULL;
+ ctx->u.pw.backup_nhg.nexthop = NULL;
}
break;
{
DPLANE_CTX_VALID(ctx);
- return &(ctx->u.pw.nhg);
+ return &(ctx->u.pw.fib_nhg);
+}
+
+const struct nexthop_group *
+dplane_ctx_get_pw_primary_nhg(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return &(ctx->u.pw.primary_nhg);
+}
+
+const struct nexthop_group *
+dplane_ctx_get_pw_backup_nhg(const struct zebra_dplane_ctx *ctx)
+{
+ DPLANE_CTX_VALID(ctx);
+
+ return &(ctx->u.pw.backup_nhg);
}
/* Accessors for interface information */
enum dplane_op_e op,
struct zebra_pw *pw)
{
+ int ret = EINVAL;
struct prefix p;
afi_t afi;
struct route_table *table;
struct route_node *rn;
struct route_entry *re;
const struct nexthop_group *nhg;
+ struct nexthop *nh, *newnh, *last_nh;
if (IS_ZEBRA_DEBUG_DPLANE_DETAIL)
zlog_debug("init dplane ctx %s: pw '%s', loc %u, rem %u",
*/
memcpy(&p.u, &pw->nexthop, sizeof(pw->nexthop));
p.family = pw->af;
- p.prefixlen = ((pw->af == AF_INET) ?
- IPV4_MAX_PREFIXLEN : IPV6_MAX_PREFIXLEN);
+ p.prefixlen = ((pw->af == AF_INET) ? IPV4_MAX_BITLEN : IPV6_MAX_BITLEN);
afi = (pw->af == AF_INET) ? AFI_IP : AFI_IP6;
table = zebra_vrf_table(afi, SAFI_UNICAST, pw->vrf_id);
- if (table) {
- rn = route_node_match(table, &p);
- if (rn) {
- RNODE_FOREACH_RE(rn, re) {
- if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED))
- break;
+ if (table == NULL)
+ goto done;
+
+ rn = route_node_match(table, &p);
+ if (rn == NULL)
+ goto done;
+
+ re = NULL;
+ RNODE_FOREACH_RE(rn, re) {
+ if (CHECK_FLAG(re->flags, ZEBRA_FLAG_SELECTED))
+ break;
+ }
+
+ if (re) {
+ /* We'll capture a 'fib' list of nexthops that meet our
+ * criteria: installed, and labelled.
+ */
+ nhg = rib_get_fib_nhg(re);
+ last_nh = NULL;
+
+ if (nhg && nhg->nexthop) {
+ for (ALL_NEXTHOPS_PTR(nhg, nh)) {
+ if (!CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)
+ || CHECK_FLAG(nh->flags,
+ NEXTHOP_FLAG_RECURSIVE)
+ || nh->nh_label == NULL)
+ continue;
+
+ newnh = nexthop_dup(nh, NULL);
+
+ if (last_nh)
+ NEXTHOP_APPEND(last_nh, newnh);
+ else
+ ctx->u.pw.fib_nhg.nexthop = newnh;
+ last_nh = newnh;
}
+ }
- if (re) {
- nhg = rib_get_fib_nhg(re);
- if (nhg && nhg->nexthop)
- copy_nexthops(&(ctx->u.pw.nhg.nexthop),
- nhg->nexthop, NULL);
-
- /* Include any installed backup nexthops */
- nhg = rib_get_fib_backup_nhg(re);
- if (nhg && nhg->nexthop)
- copy_nexthops(&(ctx->u.pw.nhg.nexthop),
- nhg->nexthop, NULL);
+ /* Include any installed backup nexthops also. */
+ nhg = rib_get_fib_backup_nhg(re);
+ if (nhg && nhg->nexthop) {
+ for (ALL_NEXTHOPS_PTR(nhg, nh)) {
+ if (!CHECK_FLAG(nh->flags, NEXTHOP_FLAG_ACTIVE)
+ || CHECK_FLAG(nh->flags,
+ NEXTHOP_FLAG_RECURSIVE)
+ || nh->nh_label == NULL)
+ continue;
+
+ newnh = nexthop_dup(nh, NULL);
+
+ if (last_nh)
+ NEXTHOP_APPEND(last_nh, newnh);
+ else
+ ctx->u.pw.fib_nhg.nexthop = newnh;
+ last_nh = newnh;
}
- route_unlock_node(rn);
+ }
+
+ /* Copy primary nexthops; recursive info is included too */
+ assert(re->nhe != NULL); /* SA warning */
+ copy_nexthops(&(ctx->u.pw.primary_nhg.nexthop),
+ re->nhe->nhg.nexthop, NULL);
+ ctx->u.pw.nhg_id = re->nhe->id;
+
+ /* Copy backup nexthop info, if present */
+ if (re->nhe->backup_info && re->nhe->backup_info->nhe) {
+ copy_nexthops(&(ctx->u.pw.backup_nhg.nexthop),
+ re->nhe->backup_info->nhe->nhg.nexthop,
+ NULL);
}
}
+ route_unlock_node(rn);
- return AOK;
+ ret = AOK;
+
+done:
+ return ret;
}
/**
const struct zebra_dplane_ctx *ctx);
const struct nexthop_group *dplane_ctx_get_pw_nhg(
const struct zebra_dplane_ctx *ctx);
+const struct nexthop_group *
+dplane_ctx_get_pw_primary_nhg(const struct zebra_dplane_ctx *ctx);
+const struct nexthop_group *
+dplane_ctx_get_pw_backup_nhg(const struct zebra_dplane_ctx *ctx);
/* Accessors for interface information */
uint32_t dplane_ctx_get_intf_metric(const struct zebra_dplane_ctx *ctx);
DEFINE_MTYPE_STATIC(ZEBRA, NHLFE, "MPLS nexthop object");
int mpls_enabled;
+bool mpls_pw_reach_strict; /* Strict reachability checking */
/* static function declarations */
/* Lookup nexthop in IPv4 routing table. */
memset(&p, 0, sizeof(struct prefix_ipv4));
p.family = AF_INET;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
p.prefix = nexthop->gate.ipv4;
rn = route_node_match(table, (struct prefix *)&p);
/* Lookup nexthop in IPv6 routing table. */
memset(&p, 0, sizeof(struct prefix_ipv6));
p.family = AF_INET6;
- p.prefixlen = IPV6_MAX_PREFIXLEN;
+ p.prefixlen = IPV6_MAX_BITLEN;
p.prefix = nexthop->gate.ipv6;
rn = route_node_match(table, (struct prefix *)&p);
void zebra_mpls_init(void)
{
mpls_enabled = 0;
+ mpls_pw_reach_strict = false;
if (mpls_kernel_init() < 0) {
flog_warn(EC_ZEBRA_MPLS_SUPPORT_DISABLED,
/* Global variables. */
extern int mpls_enabled;
+extern bool mpls_pw_reach_strict; /* Strict pseudowire reachability checking */
#ifdef __cplusplus
}
kr_state.rtseq = 1;
+ /* Strict pseudowire reachability checking required for obsd */
+ mpls_pw_reach_strict = true;
+
return 0;
}
return 1;
}
- if (top &&
- ((top->family == AF_INET && top->prefixlen == 32
- && nexthop->gate.ipv4.s_addr == top->u.prefix4.s_addr)
- || (top->family == AF_INET6 && top->prefixlen == 128
- && memcmp(&nexthop->gate.ipv6, &top->u.prefix6, 16) == 0))) {
+ if (top
+ && ((top->family == AF_INET && top->prefixlen == IPV4_MAX_BITLEN
+ && nexthop->gate.ipv4.s_addr == top->u.prefix4.s_addr)
+ || (top->family == AF_INET6 && top->prefixlen == IPV6_MAX_BITLEN
+ && memcmp(&nexthop->gate.ipv6, &top->u.prefix6,
+ IPV6_MAX_BYTELEN)
+ == 0))) {
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug(
" :%s: Attempting to install a max prefixlength route through itself",
switch (afi) {
case AFI_IP:
p.family = AF_INET;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
p.u.prefix4 = *ipv4;
break;
case AFI_IP6:
p.family = AF_INET6;
- p.prefixlen = IPV6_MAX_PREFIXLEN;
+ p.prefixlen = IPV6_MAX_BITLEN;
p.u.prefix6 = nexthop->gate.ipv6;
break;
default:
* host route.
*/
if (prefix_same(&rn->p, top))
- if (((afi == AFI_IP) && (rn->p.prefixlen != 32))
- || ((afi == AFI_IP6) && (rn->p.prefixlen != 128))) {
+ if (((afi == AFI_IP)
+ && (rn->p.prefixlen != IPV4_MAX_BITLEN))
+ || ((afi == AFI_IP6)
+ && (rn->p.prefixlen != IPV6_MAX_BITLEN))) {
if (IS_ZEBRA_DEBUG_RIB_DETAILED)
zlog_debug(
" %s: Matched against ourself and prefix length is not max bit length",
const struct prefix *p = pu.p;
char buf[PREFIX2STR_BUFFER];
- if ((p->family == AF_INET && p->prefixlen == IPV4_MAX_PREFIXLEN) ||
- (p->family == AF_INET6 && p->prefixlen == IPV6_MAX_PREFIXLEN)) {
+ if ((p->family == AF_INET && p->prefixlen == IPV4_MAX_BITLEN)
+ || (p->family == AF_INET6 && p->prefixlen == IPV6_MAX_BITLEN)) {
snprintf(str, size, "%s", inet_ntop(p->family, &p->u.prefix,
buf, PREFIX2STR_BUFFER));
return str;
static void zebra_pw_install(struct zebra_pw *);
static void zebra_pw_uninstall(struct zebra_pw *);
static int zebra_pw_install_retry(struct thread *);
-static int zebra_pw_check_reachability(struct zebra_pw *);
+static int zebra_pw_check_reachability(const struct zebra_pw *);
static void zebra_pw_update_status(struct zebra_pw *, int);
static inline int zebra_pw_compare(const struct zebra_pw *a,
zsend_pw_update(pw->client, pw);
}
-static int zebra_pw_check_reachability(struct zebra_pw *pw)
+static int zebra_pw_check_reachability_strict(const struct zebra_pw *pw,
+ struct route_entry *re)
+{
+ const struct nexthop *nexthop;
+ const struct nexthop_group *nhg;
+ bool found_p = false;
+ bool fail_p = false;
+
+ /* TODO: consider GRE/L2TPv3 tunnels in addition to MPLS LSPs */
+
+ /* All active nexthops must be labelled; look at
+ * primary and backup fib lists, in case there's been
+ * a backup nexthop activation.
+ */
+ nhg = rib_get_fib_nhg(re);
+ if (nhg && nhg->nexthop) {
+ for (ALL_NEXTHOPS_PTR(nhg, nexthop)) {
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
+ continue;
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) {
+ if (nexthop->nh_label != NULL)
+ found_p = true;
+ else {
+ fail_p = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (fail_p)
+ goto done;
+
+ nhg = rib_get_fib_backup_nhg(re);
+ if (nhg && nhg->nexthop) {
+ for (ALL_NEXTHOPS_PTR(nhg, nexthop)) {
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
+ continue;
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE)) {
+ if (nexthop->nh_label != NULL)
+ found_p = true;
+ else {
+ fail_p = true;
+ break;
+ }
+ }
+ }
+ }
+
+done:
+
+ if (fail_p || !found_p) {
+ if (IS_ZEBRA_DEBUG_PW)
+ zlog_debug("%s: unlabeled route for %s",
+ __func__, pw->ifname);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int zebra_pw_check_reachability(const struct zebra_pw *pw)
{
struct route_entry *re;
- struct nexthop *nexthop;
+ const struct nexthop *nexthop;
+ const struct nexthop_group *nhg;
+ bool found_p = false;
/* TODO: consider GRE/L2TPv3 tunnels in addition to MPLS LSPs */
- /* find route to the remote end of the pseudowire */
+ /* Find route to the remote end of the pseudowire */
re = rib_match(family2afi(pw->af), SAFI_UNICAST, pw->vrf_id,
&pw->nexthop, NULL);
if (!re) {
return -1;
}
- /*
- * Need to ensure that there's a label binding for all nexthops.
- * Otherwise, ECMP for this route could render the pseudowire unusable.
+ /* Stricter checking for some OSes (OBSD, e.g.) */
+ if (mpls_pw_reach_strict)
+ return zebra_pw_check_reachability_strict(pw, re);
+
+ /* There must be at least one installed labelled nexthop;
+ * look at primary and backup fib lists, in case there's been
+ * a backup nexthop activation.
*/
- for (ALL_NEXTHOPS(re->nhe->nhg, nexthop)) {
- if (!nexthop->nh_label) {
- if (IS_ZEBRA_DEBUG_PW)
- zlog_debug("%s: unlabeled route for %s",
- __func__, pw->ifname);
- return -1;
+ nhg = rib_get_fib_nhg(re);
+ if (nhg && nhg->nexthop) {
+ for (ALL_NEXTHOPS_PTR(nhg, nexthop)) {
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
+ continue;
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE) &&
+ nexthop->nh_label != NULL) {
+ found_p = true;
+ break;
+ }
+ }
+ }
+
+ if (found_p)
+ return 0;
+
+ nhg = rib_get_fib_backup_nhg(re);
+ if (nhg && nhg->nexthop) {
+ for (ALL_NEXTHOPS_PTR(nhg, nexthop)) {
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
+ continue;
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE) &&
+ nexthop->nh_label != NULL) {
+ found_p = true;
+ break;
+ }
}
}
+ if (!found_p) {
+ if (IS_ZEBRA_DEBUG_PW)
+ zlog_debug("%s: unlabeled route for %s",
+ __func__, pw->ifname);
+ return -1;
+ }
+
return 0;
}
}
struct route_entry *rib_match(afi_t afi, safi_t safi, vrf_id_t vrf_id,
- union g_addr *addr, struct route_node **rn_out)
+ const union g_addr *addr,
+ struct route_node **rn_out)
{
struct prefix p;
struct route_table *table;
p.family = afi;
if (afi == AFI_IP) {
p.u.prefix4 = addr->ipv4;
- p.prefixlen = IPV4_MAX_PREFIXLEN;
+ p.prefixlen = IPV4_MAX_BITLEN;
} else {
p.u.prefix6 = addr->ipv6;
- p.prefixlen = IPV6_MAX_PREFIXLEN;
+ p.prefixlen = IPV6_MAX_BITLEN;
}
rn = route_node_match(table, &p);
zrouter.packets_to_process = ZEBRA_ZAPI_PACKETS_TO_PROCESS;
- zrouter.rtadv_sock = -1;
-
zebra_vxlan_init();
zebra_mlag_init();
struct hash *iptable_hash;
- /* used if vrf backend is not network namespace */
- int rtadv_sock;
-
/* A sequence number used for tracking routes */
_Atomic uint32_t sequence_num;
else
zvrf->zns = zebra_ns_lookup(NS_DEFAULT);
#if defined(HAVE_RTADV)
- rtadv_init(zvrf);
+ rtadv_vrf_init(zvrf);
#endif
/* Inform clients that the VRF is now active. This is an
json_object_string_add(json_route, "prefix",
srcdest_rnode2str(rn, buf, sizeof(buf)));
+ json_object_int_add(json_route, "prefixLen", rn->p.prefixlen);
json_object_string_add(json_route, "protocol",
zebra_route_string(re->type));
prefix2str(&rn->p, buf, sizeof(buf));
json_object_object_add(json, buf, json_prefix);
- vty_out(vty, "%s\n", json_object_to_json_string_ext(
- json, JSON_C_TO_STRING_PRETTY));
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_PRETTY
+ | JSON_C_TO_STRING_NOSLASHESCAPE));
json_object_free(json);
}
}
if (use_json) {
- vty_out(vty, "%s\n", json_object_to_json_string_ext(json,
- JSON_C_TO_STRING_PRETTY));
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json,
+ JSON_C_TO_STRING_PRETTY
+ | JSON_C_TO_STRING_NOSLASHESCAPE));
json_object_free(json);
}
}