if (BGP_DEBUG(flowspec, FLOWSPEC)) {
char return_string[BGP_FLOWSPEC_NLRI_STRING_MAX];
- char local_string[BGP_FLOWSPEC_NLRI_STRING_MAX];
+ char local_string[BGP_FLOWSPEC_NLRI_STRING_MAX * 2];
char ec_string[BGP_FLOWSPEC_NLRI_STRING_MAX];
char *s = NULL;
p.u.prefix_flowspec.prefixlen,
return_string,
NLRI_STRING_FORMAT_MIN, NULL);
- snprintf(ec_string, BGP_FLOWSPEC_NLRI_STRING_MAX,
+ snprintf(ec_string, sizeof(ec_string),
"EC{none}");
if (attr && attr->ecommunity) {
s = ecommunity_ecom2str(attr->ecommunity,
ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
- snprintf(ec_string,
- BGP_FLOWSPEC_NLRI_STRING_MAX,
+ snprintf(ec_string, sizeof(ec_string),
"EC{%s}",
s == NULL ? "none" : s);
if (s)
ecommunity_strfree(&s);
}
- snprintf(local_string, BGP_FLOWSPEC_NLRI_STRING_MAX,
+ snprintf(local_string, sizeof(local_string),
"FS Rx %s %s %s %s", withdraw ?
"Withdraw":"Update",
afi2str(afi), return_string,
new->extra->parent = bgp_info_lock(parent);
bgp_lock_node((struct bgp_node *)((struct bgp_info *)parent)->net);
if (bgp_orig)
- new->extra->bgp_orig = bgp_orig;
+ new->extra->bgp_orig = bgp_lock(bgp_orig);
if (nexthop_orig)
new->extra->nexthop_orig = *nexthop_orig;
{
struct prefix p;
struct prefix *addr;
- struct bgp_node *rn;
+ struct bgp_node *rn = NULL;
struct bgp_connected_ref *bc;
addr = ifc->address;
p = *(CONNECTED_PREFIX(ifc));
+ apply_mask(&p);
if (addr->family == AF_INET) {
- apply_mask_ipv4((struct prefix_ipv4 *)&p);
-
if (prefix_ipv4_any((struct prefix_ipv4 *)&p))
return;
bgp_address_del(bgp, addr);
rn = bgp_node_lookup(bgp->connected_table[AFI_IP], &p);
- if (!rn)
- return;
-
- bc = rn->info;
- bc->refcnt--;
- if (bc->refcnt == 0) {
- XFREE(MTYPE_BGP_CONN, bc);
- rn->info = NULL;
- }
- bgp_unlock_node(rn);
- bgp_unlock_node(rn);
} else if (addr->family == AF_INET6) {
- apply_mask_ipv6((struct prefix_ipv6 *)&p);
-
if (IN6_IS_ADDR_UNSPECIFIED(&p.u.prefix6))
return;
rn = bgp_node_lookup(bgp->connected_table[AFI_IP6],
(struct prefix *)&p);
- if (!rn)
- return;
+ }
- bc = rn->info;
- bc->refcnt--;
- if (bc->refcnt == 0) {
- XFREE(MTYPE_BGP_CONN, bc);
- rn->info = NULL;
- }
- bgp_unlock_node(rn);
- bgp_unlock_node(rn);
+ if (!rn)
+ return;
+
+ bc = rn->info;
+ bc->refcnt--;
+ if (bc->refcnt == 0) {
+ XFREE(MTYPE_BGP_CONN, bc);
+ rn->info = NULL;
+ }
+ bgp_unlock_node(rn);
+ bgp_unlock_node(rn);
+}
+
+static void bgp_connected_cleanup(struct route_table *table,
+ struct route_node *rn)
+{
+ struct bgp_connected_ref *bc;
+
+ bc = rn->info;
+ if (!bc)
+ return;
+
+ bc->refcnt--;
+ if (bc->refcnt == 0) {
+ XFREE(MTYPE_BGP_CONN, bc);
+ rn->info = NULL;
}
}
bgp_table_unlock(bgp->nexthop_cache_table[afi]);
bgp->nexthop_cache_table[afi] = NULL;
+ bgp->connected_table[afi]->route_table->cleanup =
+ bgp_connected_cleanup;
bgp_table_unlock(bgp->connected_table[afi]);
bgp->connected_table[afi] = NULL;
static void bgp_info_free(struct bgp_info *binfo)
{
/* unlink reference to parent, if any. */
- if (binfo->extra && binfo->extra->parent) {
- bgp_info_unlock((struct bgp_info *)binfo->extra->parent);
- bgp_unlock_node((struct bgp_node *)((struct bgp_info *)binfo
- ->extra->parent)->net);
- binfo->extra->parent = NULL;
+ if (binfo->extra) {
+ if (binfo->extra->parent) {
+ bgp_unlock_node(
+ (struct bgp_node *)((struct bgp_info *)binfo
+ ->extra->parent)
+ ->net);
+ bgp_info_unlock(
+ (struct bgp_info *)binfo->extra->parent);
+ binfo->extra->parent = NULL;
+ }
+
+ if (binfo->extra->bgp_orig)
+ bgp_unlock(binfo->extra->bgp_orig);
}
if (binfo->attr)
XFREE(MTYPE_BGP_AGGREGATE, aggregate);
}
+static int bgp_aggregate_info_same(struct bgp_info *ri, struct aspath *aspath,
+ struct community *comm)
+{
+ static struct aspath *ae = NULL;
+
+ if (!ae)
+ ae = aspath_empty();
+
+ if (!ri)
+ return 0;
+
+ if (!aspath_cmp(ri->attr->aspath, (aspath) ? aspath : ae))
+ return 0;
+
+ if (!community_cmp(ri->attr->community, comm))
+ return 0;
+
+ return 1;
+}
+
+static void bgp_aggregate_install(struct bgp *bgp, afi_t afi, safi_t safi,
+ struct prefix *p, uint8_t origin,
+ struct aspath *aspath,
+ struct community *community,
+ uint8_t atomic_aggregate,
+ struct bgp_aggregate *aggregate)
+{
+ struct bgp_node *rn;
+ struct bgp_table *table;
+ struct bgp_info *ri, *new;
+
+ table = bgp->rib[afi][safi];
+
+ rn = bgp_node_get(table, p);
+
+ for (ri = rn->info; ri; ri = ri->next)
+ if (ri->peer == bgp->peer_self && ri->type == ZEBRA_ROUTE_BGP
+ && ri->sub_type == BGP_ROUTE_AGGREGATE)
+ break;
+
+ if (aggregate->count > 0) {
+ /*
+ * If the aggregate information has not changed
+ * no need to re-install it again.
+ */
+ if (bgp_aggregate_info_same(rn->info, aspath, community)) {
+ bgp_unlock_node(rn);
+
+ if (aspath)
+ aspath_free(aspath);
+ if (community)
+ community_free(community);
+
+ return;
+ }
+
+ /*
+ * Mark the old as unusable
+ */
+ if (ri)
+ bgp_info_delete(rn, ri);
+
+ new = info_make(
+ ZEBRA_ROUTE_BGP, BGP_ROUTE_AGGREGATE, 0, bgp->peer_self,
+ bgp_attr_aggregate_intern(bgp, origin, aspath,
+ community, aggregate->as_set,
+ atomic_aggregate),
+ rn);
+ SET_FLAG(new->flags, BGP_INFO_VALID);
+
+ bgp_info_add(rn, new);
+ bgp_process(bgp, rn, afi, safi);
+ } else {
+ for (ri = rn->info; ri; ri = ri->next)
+ if (ri->peer == bgp->peer_self
+ && ri->type == ZEBRA_ROUTE_BGP
+ && ri->sub_type == BGP_ROUTE_AGGREGATE)
+ break;
+
+ /* Withdraw static BGP route from routing table. */
+ if (ri) {
+ bgp_info_delete(rn, ri);
+ bgp_process(bgp, rn, afi, safi);
+ }
+ }
+
+ bgp_unlock_node(rn);
+}
+
/* Update an aggregate as routes are added/removed from the BGP table */
static void bgp_aggregate_route(struct bgp *bgp, struct prefix *p,
struct bgp_info *rinew, afi_t afi, safi_t safi,
struct aspath *asmerge = NULL;
struct community *community = NULL;
struct community *commerge = NULL;
-#if defined(AGGREGATE_NEXTHOP_CHECK)
- struct in_addr nexthop;
- uint32_t med = 0;
-#endif
struct bgp_info *ri;
- struct bgp_info *new;
- int first = 1;
unsigned long match = 0;
uint8_t atomic_aggregate = 0;
- /* Record adding route's nexthop and med. */
- if (rinew) {
-#if defined(AGGREGATE_NEXTHOP_CHECK)
- nexthop = rinew->attr->nexthop;
- med = rinew->attr->med;
-#endif
- }
-
/* ORIGIN attribute: If at least one route among routes that are
aggregated has ORIGIN with the value INCOMPLETE, then the
aggregated route must have the ORIGIN attribute with the value
top = bgp_node_get(table, p);
for (rn = bgp_node_get(table, p); rn;
- rn = bgp_route_next_until(rn, top))
- if (rn->p.prefixlen > p->prefixlen) {
- match = 0;
-
- for (ri = rn->info; ri; ri = ri->next) {
- if (BGP_INFO_HOLDDOWN(ri))
- continue;
+ rn = bgp_route_next_until(rn, top)) {
+ if (rn->p.prefixlen <= p->prefixlen)
+ continue;
- if (del && ri == del)
- continue;
+ match = 0;
- if (!rinew && first) {
-#if defined(AGGREGATE_NEXTHOP_CHECK)
- nexthop = ri->attr->nexthop;
- med = ri->attr->med;
-#endif
- first = 0;
- }
+ for (ri = rn->info; ri; ri = ri->next) {
+ if (BGP_INFO_HOLDDOWN(ri))
+ continue;
-#ifdef AGGREGATE_NEXTHOP_CHECK
- if (!IPV4_ADDR_SAME(&ri->attr->nexthop,
- &nexthop)
- || ri->attr->med != med) {
- if (aspath)
- aspath_free(aspath);
- if (community)
- community_free(community);
- bgp_unlock_node(rn);
- bgp_unlock_node(top);
- return;
- }
-#endif /* AGGREGATE_NEXTHOP_CHECK */
+ if (del && ri == del)
+ continue;
- if (ri->attr->flag
- & ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE))
- atomic_aggregate = 1;
+ if (ri->attr->flag
+ & ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE))
+ atomic_aggregate = 1;
- if (ri->sub_type != BGP_ROUTE_AGGREGATE) {
- if (aggregate->summary_only) {
- (bgp_info_extra_get(ri))
- ->suppress++;
- bgp_info_set_flag(
- rn, ri,
- BGP_INFO_ATTR_CHANGED);
- match++;
- }
+ if (ri->sub_type == BGP_ROUTE_AGGREGATE)
+ continue;
- aggregate->count++;
-
- if (origin < ri->attr->origin)
- origin = ri->attr->origin;
-
- if (aggregate->as_set) {
- if (aspath) {
- asmerge = aspath_aggregate(
- aspath,
- ri->attr->aspath);
- aspath_free(aspath);
- aspath = asmerge;
- } else
- aspath = aspath_dup(
- ri->attr->aspath);
-
- if (ri->attr->community) {
- if (community) {
- commerge = community_merge(
- community,
- ri->attr->community);
- community = community_uniq_sort(
- commerge);
- community_free(
- commerge);
- } else
- community = community_dup(
- ri->attr->community);
- }
- }
- }
+ /*
+ * summary-only aggregate route suppress
+ * aggregated route announcements.
+ */
+ if (aggregate->summary_only) {
+ (bgp_info_extra_get(ri))->suppress++;
+ bgp_info_set_flag(rn, ri,
+ BGP_INFO_ATTR_CHANGED);
+ match++;
}
- if (match)
- bgp_process(bgp, rn, afi, safi);
+
+ aggregate->count++;
+
+ /*
+ * If at least one route among routes that are
+ * aggregated has ORIGIN with the value INCOMPLETE,
+ * then the aggregated route MUST have the ORIGIN
+ * attribute with the value INCOMPLETE. Otherwise, if
+ * at least one route among routes that are aggregated
+ * has ORIGIN with the value EGP, then the aggregated
+ * route MUST have the ORIGIN attribute with the value
+ * EGP.
+ */
+ if (origin < ri->attr->origin)
+ origin = ri->attr->origin;
+
+ if (!aggregate->as_set)
+ continue;
+
+ /*
+ * as-set aggregate route generate origin, as path,
+ * and community aggregation.
+ */
+ if (aspath) {
+ asmerge = aspath_aggregate(aspath,
+ ri->attr->aspath);
+ aspath_free(aspath);
+ aspath = asmerge;
+ } else
+ aspath = aspath_dup(ri->attr->aspath);
+
+ if (!ri->attr->community)
+ continue;
+
+ if (community) {
+ commerge = community_merge(community,
+ ri->attr->community);
+ community = community_uniq_sort(commerge);
+ community_free(commerge);
+ } else
+ community = community_dup(ri->attr->community);
}
+ if (match)
+ bgp_process(bgp, rn, afi, safi);
+ }
bgp_unlock_node(top);
if (rinew) {
}
}
- if (aggregate->count > 0) {
- rn = bgp_node_get(table, p);
- new = info_make(
- ZEBRA_ROUTE_BGP, BGP_ROUTE_AGGREGATE, 0, bgp->peer_self,
- bgp_attr_aggregate_intern(bgp, origin, aspath,
- community, aggregate->as_set,
- atomic_aggregate),
- rn);
- SET_FLAG(new->flags, BGP_INFO_VALID);
+ bgp_aggregate_install(bgp, afi, safi, p, origin, aspath, community,
+ atomic_aggregate, aggregate);
- bgp_info_add(rn, new);
- bgp_unlock_node(rn);
- bgp_process(bgp, rn, afi, safi);
- } else {
+ if (aggregate->count == 0) {
if (aspath)
aspath_free(aspath);
if (community)
}
}
-void bgp_aggregate_delete(struct bgp *, struct prefix *, afi_t, safi_t,
- struct bgp_aggregate *);
+static void bgp_aggregate_delete(struct bgp *bgp, struct prefix *p, afi_t afi,
+ safi_t safi, struct bgp_aggregate *aggregate)
+{
+ struct bgp_table *table;
+ struct bgp_node *top;
+ struct bgp_node *rn;
+ struct bgp_info *ri;
+ unsigned long match;
+
+ table = bgp->rib[afi][safi];
+
+ /* If routes exists below this node, generate aggregate routes. */
+ top = bgp_node_get(table, p);
+ for (rn = bgp_node_get(table, p); rn;
+ rn = bgp_route_next_until(rn, top)) {
+ if (rn->p.prefixlen <= p->prefixlen)
+ continue;
+ match = 0;
+
+ for (ri = rn->info; ri; ri = ri->next) {
+ if (BGP_INFO_HOLDDOWN(ri))
+ continue;
+
+ if (ri->sub_type == BGP_ROUTE_AGGREGATE)
+ continue;
+
+ if (aggregate->summary_only && ri->extra) {
+ ri->extra->suppress--;
+
+ if (ri->extra->suppress == 0) {
+ bgp_info_set_flag(
+ rn, ri, BGP_INFO_ATTR_CHANGED);
+ match++;
+ }
+ }
+ aggregate->count--;
+ }
+
+ /* If this node was suppressed, process the change. */
+ if (match)
+ bgp_process(bgp, rn, afi, safi);
+ }
+ bgp_unlock_node(top);
+}
void bgp_aggregate_increment(struct bgp *bgp, struct prefix *p,
struct bgp_info *ri, afi_t afi, safi_t safi)
struct bgp_aggregate *aggregate;
struct bgp_table *table;
- /* MPLS-VPN aggregation is not yet supported. */
- if ((safi == SAFI_MPLS_VPN) || (safi == SAFI_ENCAP)
- || (safi == SAFI_EVPN)
- || (safi == SAFI_FLOWSPEC))
- return;
-
table = bgp->aggregate[afi][safi];
/* No aggregates configured. */
struct bgp_aggregate *aggregate;
struct bgp_table *table;
- /* MPLS-VPN aggregation is not yet supported. */
- if ((safi == SAFI_MPLS_VPN) || (safi == SAFI_ENCAP)
- || (safi == SAFI_EVPN)
- || (safi == SAFI_FLOWSPEC))
- return;
-
table = bgp->aggregate[afi][safi];
/* No aggregates configured. */
bgp_unlock_node(child);
}
-/* Called via bgp_aggregate_set when the user configures aggregate-address */
-static void bgp_aggregate_add(struct bgp *bgp, struct prefix *p, afi_t afi,
- safi_t safi, struct bgp_aggregate *aggregate)
-{
- struct bgp_table *table;
- struct bgp_node *top;
- struct bgp_node *rn;
- struct bgp_info *new;
- struct bgp_info *ri;
- unsigned long match;
- uint8_t origin = BGP_ORIGIN_IGP;
- struct aspath *aspath = NULL;
- struct aspath *asmerge = NULL;
- struct community *community = NULL;
- struct community *commerge = NULL;
- uint8_t atomic_aggregate = 0;
-
- table = bgp->rib[afi][safi];
-
- /* Sanity check. */
- if (afi == AFI_IP && p->prefixlen == IPV4_MAX_BITLEN)
- return;
- if (afi == AFI_IP6 && p->prefixlen == IPV6_MAX_BITLEN)
- return;
-
- /* If routes exists below this node, generate aggregate routes. */
- top = bgp_node_get(table, p);
- for (rn = bgp_node_get(table, p); rn;
- rn = bgp_route_next_until(rn, top)) {
- if (rn->p.prefixlen <= p->prefixlen)
- continue;
-
- match = 0;
-
- for (ri = rn->info; ri; ri = ri->next) {
- if (BGP_INFO_HOLDDOWN(ri))
- continue;
-
- if (ri->attr->flag
- & ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE))
- atomic_aggregate = 1;
-
- if (ri->sub_type == BGP_ROUTE_AGGREGATE)
- continue;
-
- /* summary-only aggregate route suppress
- * aggregated route announcement. */
- if (aggregate->summary_only) {
- (bgp_info_extra_get(ri))->suppress++;
- bgp_info_set_flag(rn, ri,
- BGP_INFO_ATTR_CHANGED);
- match++;
- }
-
- /* If at least one route among routes that are
- * aggregated has ORIGIN with the value INCOMPLETE,
- * then the aggregated route MUST have the ORIGIN
- * attribute with the value INCOMPLETE. Otherwise, if
- * at least one route among routes that are aggregated
- * has ORIGIN with the value EGP, then the aggregated
- * route MUST have the ORIGIN attribute with the value
- * EGP.
- */
- if (origin < ri->attr->origin)
- origin = ri->attr->origin;
-
- /* as-set aggregate route generate origin, as path,
- * community aggregation. */
- if (aggregate->as_set) {
- if (aspath) {
- asmerge = aspath_aggregate(
- aspath, ri->attr->aspath);
- aspath_free(aspath);
- aspath = asmerge;
- } else
- aspath = aspath_dup(ri->attr->aspath);
-
- if (ri->attr->community) {
- if (community) {
- commerge = community_merge(
- community,
- ri->attr->community);
- community = community_uniq_sort(
- commerge);
- community_free(commerge);
- } else
- community = community_dup(
- ri->attr->community);
- }
- }
- aggregate->count++;
- }
-
- /* If this node is suppressed, process the change. */
- if (match)
- bgp_process(bgp, rn, afi, safi);
- }
- bgp_unlock_node(top);
-
- /* Add aggregate route to BGP table. */
- if (aggregate->count) {
- rn = bgp_node_get(table, p);
- new = info_make(
- ZEBRA_ROUTE_BGP, BGP_ROUTE_AGGREGATE, 0, bgp->peer_self,
- bgp_attr_aggregate_intern(bgp, origin, aspath,
- community, aggregate->as_set,
- atomic_aggregate),
- rn);
- SET_FLAG(new->flags, BGP_INFO_VALID);
-
- bgp_info_add(rn, new);
- bgp_unlock_node(rn);
-
- /* Process change. */
- bgp_process(bgp, rn, afi, safi);
- } else {
- if (aspath)
- aspath_free(aspath);
- if (community)
- community_free(community);
- }
-}
-
-void bgp_aggregate_delete(struct bgp *bgp, struct prefix *p, afi_t afi,
- safi_t safi, struct bgp_aggregate *aggregate)
-{
- struct bgp_table *table;
- struct bgp_node *top;
- struct bgp_node *rn;
- struct bgp_info *ri;
- unsigned long match;
-
- table = bgp->rib[afi][safi];
-
- if (afi == AFI_IP && p->prefixlen == IPV4_MAX_BITLEN)
- return;
- if (afi == AFI_IP6 && p->prefixlen == IPV6_MAX_BITLEN)
- return;
-
- /* If routes exists below this node, generate aggregate routes. */
- top = bgp_node_get(table, p);
- for (rn = bgp_node_get(table, p); rn;
- rn = bgp_route_next_until(rn, top)) {
- if (rn->p.prefixlen <= p->prefixlen)
- continue;
- match = 0;
-
- for (ri = rn->info; ri; ri = ri->next) {
- if (BGP_INFO_HOLDDOWN(ri))
- continue;
-
- if (ri->sub_type == BGP_ROUTE_AGGREGATE)
- continue;
-
- if (aggregate->summary_only && ri->extra) {
- ri->extra->suppress--;
-
- if (ri->extra->suppress == 0) {
- bgp_info_set_flag(
- rn, ri, BGP_INFO_ATTR_CHANGED);
- match++;
- }
- }
- aggregate->count--;
- }
-
- /* If this node was suppressed, process the change. */
- if (match)
- bgp_process(bgp, rn, afi, safi);
- }
- bgp_unlock_node(top);
-
- /* Delete aggregate route from BGP table. */
- rn = bgp_node_get(table, p);
-
- for (ri = rn->info; ri; ri = ri->next)
- if (ri->peer == bgp->peer_self && ri->type == ZEBRA_ROUTE_BGP
- && ri->sub_type == BGP_ROUTE_AGGREGATE)
- break;
-
- /* Withdraw static BGP route from routing table. */
- if (ri) {
- bgp_info_delete(rn, ri);
- bgp_process(bgp, rn, afi, safi);
- }
-
- /* Unlock bgp_node_lookup. */
- bgp_unlock_node(rn);
-}
-
/* Aggregate route attribute. */
#define AGGREGATE_SUMMARY_ONLY 1
#define AGGREGATE_AS_SET 1
struct bgp_node *rn;
struct bgp_aggregate *aggregate;
- if (safi == SAFI_FLOWSPEC)
- return CMD_WARNING_CONFIG_FAILED;
-
/* Convert string to prefix structure. */
ret = str2prefix(prefix_str, &p);
if (!ret) {
}
aggregate = rn->info;
- if (aggregate->safi == SAFI_UNICAST)
- bgp_aggregate_delete(bgp, &p, afi, SAFI_UNICAST, aggregate);
- if (aggregate->safi == SAFI_LABELED_UNICAST)
- bgp_aggregate_delete(bgp, &p, afi, SAFI_LABELED_UNICAST,
- aggregate);
- if (aggregate->safi == SAFI_MULTICAST)
- bgp_aggregate_delete(bgp, &p, afi, SAFI_MULTICAST, aggregate);
+ bgp_aggregate_delete(bgp, &p, afi, safi, aggregate);
+ bgp_aggregate_install(bgp, afi, safi, &p, 0, NULL, NULL, 0, aggregate);
/* Unlock aggregate address configuration. */
rn->info = NULL;
struct bgp_node *rn;
struct bgp_aggregate *aggregate;
- if (safi == SAFI_FLOWSPEC)
- return CMD_WARNING_CONFIG_FAILED;
-
/* Convert string to prefix structure. */
ret = str2prefix(prefix_str, &p);
if (!ret) {
}
apply_mask(&p);
+ if ((afi == AFI_IP && p.prefixlen == IPV4_MAX_BITLEN) ||
+ (afi == AFI_IP6 && p.prefixlen == IPV6_MAX_BITLEN)) {
+ vty_out(vty, "Specified prefix: %s will not result in any useful aggregation, disallowing\n",
+ prefix_str);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
/* Old configuration check. */
rn = bgp_node_get(bgp->aggregate[afi][safi], &p);
rn->info = aggregate;
/* Aggregate address insert into BGP routing table. */
- if (safi == SAFI_UNICAST)
- bgp_aggregate_add(bgp, &p, afi, SAFI_UNICAST, aggregate);
- if (safi == SAFI_LABELED_UNICAST)
- bgp_aggregate_add(bgp, &p, afi, SAFI_LABELED_UNICAST,
- aggregate);
- if (safi == SAFI_MULTICAST)
- bgp_aggregate_add(bgp, &p, afi, SAFI_MULTICAST, aggregate);
+ bgp_aggregate_route(bgp, &p, NULL, afi, safi, NULL, aggregate);
return CMD_SUCCESS;
}
|| (safi == SAFI_EVPN
&& BGP_ATTR_NEXTHOP_AFI_IP6(attr))
|| (BGP_ATTR_NEXTHOP_AFI_IP6(attr))) {
- char buf_a[BUFSIZ];
- char buf_b[BUFSIZ];
+ char buf_a[512];
+ char buf_b[512];
char buf_c[BUFSIZ];
if (attr->mp_nexthop_len
== BGP_ATTR_NHLEN_IPV6_GLOBAL) {
struct prefix *p;
struct prefix_rd *prd;
struct bgp_static *bgp_static;
- char buf[PREFIX_STRLEN];
+ char buf[PREFIX_STRLEN * 2];
char buf2[SU_ADDRSTRLEN];
char rdbuf[RD_ADDRSTRLEN];
BGP_UPTIME_LEN, 0, NULL));
if (peer->status == Established)
- if (peer->afc_recv[afi][pfx_rcd_safi])
+ if (peer->afc_recv[afi][safi])
vty_out(vty, " %12ld",
peer->pcount[afi]
[pfx_rcd_safi]);
return 0;
}
+/*
+ * On shutdown we call the cleanup function which
+ * does a free of the link list nodes, free up
+ * the data we are pointing at too.
+ */
+static void bgp_vrf_string_name_delete(void *data)
+{
+ char *vname = data;
+
+ XFREE(MTYPE_TMP, vname);
+}
+
/* BGP instance creation by `router bgp' commands. */
static struct bgp *bgp_create(as_t *as, const char *name,
enum bgp_instance_type inst_type)
MPLS_LABEL_NONE;
bgp->vpn_policy[afi].import_vrf = list_new();
+ bgp->vpn_policy[afi].import_vrf->del =
+ bgp_vrf_string_name_delete;
bgp->vpn_policy[afi].export_vrf = list_new();
+ bgp->vpn_policy[afi].export_vrf->del =
+ bgp_vrf_string_name_delete;
}
if (name) {
bgp->name = XSTRDUP(MTYPE_BGP, name);
rmap = &bgp->table_map[afi][safi];
if (rmap->name)
XFREE(MTYPE_ROUTE_MAP_NAME, rmap->name);
-
- /*
- * Yes this is per AFI, but
- * the list_delete_and_null nulls the pointer
- * and we'll not leak anything on going down
- * and the if test will fail on the second safi.
- */
- if (bgp->vpn_policy[afi].import_vrf)
- list_delete_and_null(&bgp->vpn_policy[afi].import_vrf);
- if (bgp->vpn_policy[afi].export_vrf)
- list_delete_and_null(&bgp->vpn_policy[afi].export_vrf);
}
bgp_scan_finish(bgp);
bgp_evpn_cleanup(bgp);
bgp_pbr_cleanup(bgp);
+
+ for (afi = AFI_IP; afi < AFI_MAX; afi++) {
+ vpn_policy_direction_t dir;
+
+ if (bgp->vpn_policy[afi].import_vrf)
+ list_delete_and_null(&bgp->vpn_policy[afi].import_vrf);
+ if (bgp->vpn_policy[afi].export_vrf)
+ list_delete_and_null(&bgp->vpn_policy[afi].export_vrf);
+
+ dir = BGP_VPN_POLICY_DIR_FROMVPN;
+ if (bgp->vpn_policy[afi].rtlist[dir])
+ ecommunity_free(&bgp->vpn_policy[afi].rtlist[dir]);
+ dir = BGP_VPN_POLICY_DIR_TOVPN;
+ if (bgp->vpn_policy[afi].rtlist[dir])
+ ecommunity_free(&bgp->vpn_policy[afi].rtlist[dir]);
+ }
+
if (bgp->name)
XFREE(MTYPE_BGP, bgp->name);
if (bgp->name_pretty)
dnl how to fix it but no real progress on implementation
dnl when they fix it, remove this
AC_DEFINE(IPV6_MINHOPCOUNT, 73, Linux ipv6 Min Hop Count)
-
- AC_CHECK_DECLS([IFLA_INFO_SLAVE_KIND], [], [], [#include <linux/if_link.h>])
;;
openbsd*)
AC_MSG_RESULT([OpenBSD])
char frr_protoname[256] = "NONE";
char frr_protonameinst[256] = "NONE";
-char config_default[256];
+char config_default[512];
char frr_zclientpath[256];
-static char pidfile_default[256];
+static char pidfile_default[512];
static char vtypath_default[256];
bool debug_memstats_at_exit = 0;
DECLARE_KOOH(frr_fini, (), ())
extern void frr_fini(void);
-extern char config_default[256];
+extern char config_default[512];
extern char frr_zclientpath[256];
extern const char frr_sysconfdir[];
extern const char frr_vtydir[];
size_t err_len)
{
void *handle = NULL;
- char name[PATH_MAX], fullpath[PATH_MAX], *args;
+ char name[PATH_MAX], fullpath[PATH_MAX * 2], *args;
struct frrmod_runtime *rtinfo, **rtinfop;
const struct frrmod_info *info;
struct cmd_node vrf_node = {VRF_NODE, "%s(config-vrf)# ", 1};
-DEFUN_NOSH (vrf_netns,
- vrf_netns_cmd,
- "netns NAME",
- "Attach VRF to a Namespace\n"
- "The file name in " NS_RUN_DIR ", or a full pathname\n")
+DEFUN (vrf_netns,
+ vrf_netns_cmd,
+ "netns NAME",
+ "Attach VRF to a Namespace\n"
+ "The file name in " NS_RUN_DIR ", or a full pathname\n")
{
int idx_name = 1, ret;
char *pathname = ns_netns_pathname(vty, argv[idx_name]->arg);
/* Neighbor structure */
struct ospf6_neighbor {
/* Neighbor Router ID String */
- char name[32];
+ char name[36];
/* OSPFv3 Interface this neighbor belongs to */
struct ospf6_interface *ospf6_if;
vty_out(vty, "\n");
vty_out(vty, "Upstream Join Timer: %d secs\n", qpim_t_periodic);
vty_out(vty, "Join/Prune Holdtime: %d secs\n", PIM_JP_HOLDTIME);
- vty_out(vty, "PIM ECMP: %s\n", qpim_ecmp_enable ? "Enable" : "Disable");
+ vty_out(vty, "PIM ECMP: %s\n", pim->ecmp_enable ? "Enable" : "Disable");
vty_out(vty, "PIM ECMP Rebalance: %s\n",
- qpim_ecmp_rebalance_enable ? "Enable" : "Disable");
+ pim->ecmp_rebalance_enable ? "Enable" : "Disable");
vty_out(vty, "\n");
"Enable PIM ECMP \n")
{
PIM_DECLVAR_CONTEXT(vrf, pim);
- qpim_ecmp_enable = 1;
+ pim->ecmp_enable = true;
return CMD_SUCCESS;
}
"Disable PIM ECMP \n")
{
PIM_DECLVAR_CONTEXT(vrf, pim);
- qpim_ecmp_enable = 0;
+ pim->ecmp_enable = false;
return CMD_SUCCESS;
}
"Enable PIM ECMP Rebalance\n")
{
PIM_DECLVAR_CONTEXT(vrf, pim);
- qpim_ecmp_enable = 1;
- qpim_ecmp_rebalance_enable = 1;
+ pim->ecmp_enable = true;
+ pim->ecmp_rebalance_enable = true;
return CMD_SUCCESS;
}
"Disable PIM ECMP Rebalance\n")
{
PIM_DECLVAR_CONTEXT(vrf, pim);
- qpim_ecmp_rebalance_enable = 0;
+ pim->ecmp_rebalance_enable = false;
return CMD_SUCCESS;
}
pim->keep_alive_time = PIM_KEEPALIVE_PERIOD;
pim->rp_keep_alive_time = PIM_RP_KEEPALIVE_PERIOD;
+ pim->ecmp_enable = false;
+ pim->ecmp_rebalance_enable = false;
pim->vrf_id = vrf->vrf_id;
pim->vrf = vrf;
unsigned int keep_alive_time;
unsigned int rp_keep_alive_time;
+ bool ecmp_enable;
+ bool ecmp_rebalance_enable;
+
/* If we need to rescan all our upstreams */
struct thread *rpf_cache_refresher;
int64_t rpf_cache_refresh_requests;
metric is less than nexthop update.
*/
- if (qpim_ecmp_rebalance_enable == 0) {
+ if (pim->ecmp_rebalance_enable == 0) {
uint8_t curr_route_valid = 0;
// Check if current nexthop is present in new updated
// Nexthop list.
}
}
}
- if (qpim_ecmp_enable) {
+ if (pim->ecmp_enable) {
// PIM ECMP flag is enable then choose ECMP path.
hash_val = pim_compute_ecmp_hash(src, grp);
mod_val = hash_val % pnc->nexthop_num;
"%s: (%s,%s)(%s) selected nhop interface %s addr %s mod_val %u iter %d ecmp %d",
__PRETTY_FUNCTION__, buf2, buf3,
pim->vrf->name, ifp->name, buf, mod_val,
- nh_iter, qpim_ecmp_enable);
+ nh_iter, pim->ecmp_enable);
}
}
nh_iter++;
nexthop = nexthop_from_zapi_nexthop(&nhr.nexthops[i]);
switch (nexthop->type) {
case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IFINDEX:
case NEXTHOP_TYPE_IPV4_IFINDEX:
case NEXTHOP_TYPE_IPV6:
case NEXTHOP_TYPE_BLACKHOLE:
break;
+ case NEXTHOP_TYPE_IFINDEX:
+ /*
+ * Connected route (i.e. no nexthop), use
+ * RPF address from nexthop cache (i.e.
+ * destination) as PIM nexthop.
+ */
+ nexthop->type = NEXTHOP_TYPE_IPV4;
+ nexthop->gate.ipv4 =
+ pnc->rpf.rpf_addr.u.prefix4;
+ break;
case NEXTHOP_TYPE_IPV6_IFINDEX:
ifp1 = if_lookup_by_index(nexthop->ifindex,
pim->vrf_id);
}
// If PIM ECMP enable then choose ECMP path.
- if (qpim_ecmp_enable) {
+ if (pim->ecmp_enable) {
hash_val = pim_compute_ecmp_hash(src, grp);
mod_val = hash_val % num_ifindex;
if (PIM_DEBUG_PIM_NHT_DETAIL)
}
// If PIM ECMP enable then choose ECMP path.
- if (qpim_ecmp_enable) {
+ if (pim->ecmp_enable) {
hash_val = pim_compute_ecmp_hash(src, grp);
mod_val = hash_val % num_ifindex;
if (PIM_DEBUG_PIM_NHT_DETAIL)
spaces);
++writes;
}
- if (qpim_ecmp_rebalance_enable) {
+ if (pim->ecmp_rebalance_enable) {
vty_out(vty, "%sip pim ecmp rebalance\n", spaces);
++writes;
- } else if (qpim_ecmp_enable) {
+ } else if (pim->ecmp_enable) {
vty_out(vty, "%sip pim ecmp\n", spaces);
++writes;
}
}
switch (nexthop_type) {
case NEXTHOP_TYPE_IFINDEX:
+ nexthop_tab[num_ifindex].ifindex = stream_getl(s);
+ /*
+ * Connected route (i.e. no nexthop), use
+ * address passed in as PIM nexthop. This will
+ * allow us to work in cases where we are
+ * trying to find a route for this box.
+ */
+ nexthop_tab[num_ifindex].nexthop_addr.family = AF_INET;
+ nexthop_tab[num_ifindex].nexthop_addr.prefixlen =
+ IPV4_MAX_BITLEN;
+ nexthop_tab[num_ifindex].nexthop_addr.u.prefix4 =
+ addr;
+ ++num_ifindex;
+ break;
case NEXTHOP_TYPE_IPV4_IFINDEX:
case NEXTHOP_TYPE_IPV4:
nexthop_tab[num_ifindex].nexthop_addr.family = AF_INET;
- if (nexthop_type == NEXTHOP_TYPE_IPV4_IFINDEX
- || nexthop_type == NEXTHOP_TYPE_IPV4) {
- nexthop_tab[num_ifindex]
- .nexthop_addr.u.prefix4.s_addr =
- stream_get_ipv4(s);
- } else {
- nexthop_tab[num_ifindex]
- .nexthop_addr.u.prefix4.s_addr =
- PIM_NET_INADDR_ANY;
- }
+ nexthop_tab[num_ifindex].nexthop_addr.u.prefix4.s_addr =
+ stream_get_ipv4(s);
nexthop_tab[num_ifindex].ifindex = stream_getl(s);
- nexthop_tab[num_ifindex].protocol_distance = distance;
- nexthop_tab[num_ifindex].route_metric = metric;
++num_ifindex;
break;
case NEXTHOP_TYPE_IPV6_IFINDEX:
}
++num_ifindex;
break;
- default:
- /* do nothing */
- {
- char addr_str[INET_ADDRSTRLEN];
- pim_inet4_dump("<addr?>", addr, addr_str,
- sizeof(addr_str));
- zlog_warn(
- "%s: found non-ifindex nexthop type=%d for address %s(%s)",
- __PRETTY_FUNCTION__, nexthop_type,
- addr_str, pim->vrf->name);
- }
- break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_BLACKHOLE:
+ /* ignore */
+ continue;
}
+ nexthop_tab[num_ifindex].protocol_distance = distance;
+ nexthop_tab[num_ifindex].route_metric = metric;
}
return num_ifindex;
struct pim_assert_metric qpim_infinite_assert_metric;
long qpim_rpf_cache_refresh_delay_msec = 50;
int qpim_packet_process = PIM_DEFAULT_PACKET_PROCESS;
-uint8_t qpim_ecmp_enable = 0;
-uint8_t qpim_ecmp_rebalance_enable = 0;
struct pim_instance *pimg = NULL;
int32_t qpim_register_suppress_time = PIM_REGISTER_SUPPRESSION_TIME_DEFAULT;
import pytest
import platform
+import socket
-if platform.uname()[0] == 'SunOS':
+
+##
+# on musl, ntop compresses a single :0: -> :: which is against RFC
+##
+def inet_ntop_broken():
+ addr = '1:2:3:4:0:6:7:8'
+ return socket.inet_ntop(socket.AF_INET6,
+ socket.inet_pton(socket.AF_INET6, addr)) != addr
+
+
+if platform.uname()[0] == 'SunOS' or inet_ntop_broken():
class TestFuzzIsisTLV:
- @pytest.mark.skipif(True, reason='Test unsupported on SunOS')
+ @pytest.mark.skipif(True, reason='Test unsupported')
def test_exit_cleanly(self):
pass
else:
* the usual vtysh's stdin interface. This is the function being registered with
* readline() api's.
*/
-static int vtysh_rl_describe(void)
+static int vtysh_rl_describe(int a, int b)
{
int ret;
#define FRR_CONFIG_NAME "frr.conf"
/* Configuration file name and directory. */
-static char vtysh_config[MAXPATHLEN];
-char frr_config[MAXPATHLEN];
+static char vtysh_config[MAXPATHLEN * 3];
+char frr_config[MAXPATHLEN * 3];
char vtydir[MAXPATHLEN];
static char history_file[MAXPATHLEN];
if (linkinfo[IFLA_INFO_KIND])
kind = RTA_DATA(linkinfo[IFLA_INFO_KIND]);
-#if HAVE_DECL_IFLA_INFO_SLAVE_KIND
if (linkinfo[IFLA_INFO_SLAVE_KIND])
slave_kind = RTA_DATA(linkinfo[IFLA_INFO_SLAVE_KIND]);
-#endif
netlink_determine_zebra_iftype(kind, &zif_type);
}
if (linkinfo[IFLA_INFO_KIND])
kind = RTA_DATA(linkinfo[IFLA_INFO_KIND]);
-#if HAVE_DECL_IFLA_INFO_SLAVE_KIND
if (linkinfo[IFLA_INFO_SLAVE_KIND])
slave_kind = RTA_DATA(linkinfo[IFLA_INFO_SLAVE_KIND]);
-#endif
netlink_determine_zebra_iftype(kind, &zif_type);
}
proto_str = zebra_route_string(proto);
/* lookup the client to relay the msg to */
- zserv = zebra_find_client(proto, instance);
+ zserv = zserv_find_client(proto, instance);
if (!zserv) {
zlog_err(
"Error relaying LM response: can't find client %s, instance %u",
obuf = stream_new(ZEBRA_MAX_PACKET_SIZ);
- hook_register(zapi_client_close, release_daemon_label_chunks);
+ hook_register(zserv_client_close, release_daemon_label_chunks);
}
/**
#include "logicalrouter.h"
#include "libfrr.h"
#include "routemap.h"
+#include "frr_pthread.h"
#include "zebra/rib.h"
#include "zebra/zserv.h"
/* Needed for BSD routing socket. */
pid = getpid();
- /* This must be done only after locking pidfile (bug #403). */
- zebra_zserv_socket_init(zserv_path);
+ /* Intialize pthread library */
+ frr_pthread_init();
+
+ /* Start Zebra API server */
+ zserv_start(zserv_path);
/* Init label manager */
label_manager_init(lblmgr_path);
*/
-enum southbound_results {
- SOUTHBOUND_INSTALL_SUCCESS,
- SOUTHBOUND_INSTALL_FAILURE,
- SOUTHBOUND_DELETE_SUCCESS,
- SOUTHBOUND_DELETE_FAILURE,
+enum dp_results {
+ DP_INSTALL_SUCCESS,
+ DP_INSTALL_FAILURE,
+ DP_DELETE_SUCCESS,
+ DP_DELETE_FAILURE,
+};
+
+enum dp_req_result {
+ DP_REQUEST_QUEUED,
+ DP_REQUEST_SUCCESS,
+ DP_REQUEST_FAILURE,
};
/*
* semantics so we will end up with a delete than
* a re-add.
*/
-extern void kernel_route_rib(struct route_node *rn, struct prefix *p,
- struct prefix *src_p, struct route_entry *old,
- struct route_entry *new);
+extern enum dp_req_result kernel_route_rib(struct route_node *rn,
+ struct prefix *p,
+ struct prefix *src_p,
+ struct route_entry *old,
+ struct route_entry *new);
/*
* So route install/failure may not be immediately known
*/
extern void kernel_route_rib_pass_fail(struct route_node *rn, struct prefix *p,
struct route_entry *re,
- enum southbound_results res);
+ enum dp_results res);
extern int kernel_address_add_ipv4(struct interface *, struct connected *);
extern int kernel_address_delete_ipv4(struct interface *, struct connected *);
extern int kernel_interface_set_master(struct interface *master,
struct interface *slave);
-extern void kernel_add_lsp(zebra_lsp_t *lsp);
-extern void kernel_upd_lsp(zebra_lsp_t *lsp);
-extern void kernel_del_lsp(zebra_lsp_t *lsp);
+extern enum dp_req_result kernel_add_lsp(zebra_lsp_t *lsp);
+extern enum dp_req_result kernel_upd_lsp(zebra_lsp_t *lsp);
+extern enum dp_req_result kernel_del_lsp(zebra_lsp_t *lsp);
/*
* Add the ability to pass back up the lsp install/delete
* the install/failure to set/unset flags and to notify
* as needed.
*/
-extern void kernel_lsp_pass_fail(zebra_lsp_t *lsp, enum southbound_results res);
+extern void kernel_lsp_pass_fail(zebra_lsp_t *lsp, enum dp_results res);
extern int mpls_kernel_init(void);
return suc;
}
-void kernel_route_rib(struct route_node *rn, struct prefix *p,
- struct prefix *src_p, struct route_entry *old,
- struct route_entry *new)
+enum dp_req_result kernel_route_rib(struct route_node *rn,
+ struct prefix *p,
+ struct prefix *src_p,
+ struct route_entry *old,
+ struct route_entry *new)
{
int ret = 0;
new, 0);
}
kernel_route_rib_pass_fail(rn, p, new,
- (!ret) ? SOUTHBOUND_INSTALL_SUCCESS
- : SOUTHBOUND_INSTALL_FAILURE);
- return;
+ (!ret) ? DP_INSTALL_SUCCESS
+ : DP_INSTALL_FAILURE);
+ return DP_REQUEST_SUCCESS;
}
if (old) {
ret = netlink_route_multipath(RTM_DELROUTE, p, src_p, old, 0);
kernel_route_rib_pass_fail(rn, p, old,
- (!ret) ? SOUTHBOUND_DELETE_SUCCESS
- : SOUTHBOUND_DELETE_FAILURE);
+ (!ret) ? DP_DELETE_SUCCESS
+ : DP_DELETE_FAILURE);
}
+
+ return DP_REQUEST_SUCCESS;
}
int kernel_neigh_update(int add, int ifindex, uint32_t addr, char *lla,
return 0;
}
-void kernel_route_rib(struct route_node *rn, struct prefix *p,
- struct prefix *src_p, struct route_entry *old,
- struct route_entry *new)
+enum dp_req_result kernel_route_rib(struct route_node *rn,
+ struct prefix *p,
+ struct prefix *src_p,
+ struct route_entry *old,
+ struct route_entry *new)
{
int route = 0;
if (new) {
kernel_route_rib_pass_fail(
rn, p, new,
- (!route) ? SOUTHBOUND_INSTALL_SUCCESS
- : SOUTHBOUND_INSTALL_FAILURE);
+ (!route) ? DP_INSTALL_SUCCESS
+ : DP_INSTALL_FAILURE);
} else {
kernel_route_rib_pass_fail(rn, p, old,
(!route)
- ? SOUTHBOUND_DELETE_SUCCESS
- : SOUTHBOUND_DELETE_FAILURE);
+ ? DP_DELETE_SUCCESS
+ : DP_DELETE_FAILURE);
}
+
+ return DP_REQUEST_SUCCESS;
}
int kernel_neigh_update(int add, int ifindex, uint32_t addr, char *lla,
* goes in the rule to denote relative ordering; it may or may not be the
* same as the rule's user-defined sequence number.
*/
-void kernel_add_pbr_rule(struct zebra_pbr_rule *rule)
+enum dp_req_result kernel_add_pbr_rule(struct zebra_pbr_rule *rule)
{
int ret = 0;
ret = netlink_rule_update(RTM_NEWRULE, rule);
kernel_pbr_rule_add_del_status(rule,
- (!ret) ? SOUTHBOUND_INSTALL_SUCCESS
- : SOUTHBOUND_INSTALL_FAILURE);
+ (!ret) ? DP_INSTALL_SUCCESS
+ : DP_INSTALL_FAILURE);
+
+ return DP_REQUEST_SUCCESS;
}
/*
* Uninstall specified rule for a specific interface.
*/
-void kernel_del_pbr_rule(struct zebra_pbr_rule *rule)
+enum dp_req_result kernel_del_pbr_rule(struct zebra_pbr_rule *rule)
{
int ret = 0;
ret = netlink_rule_update(RTM_DELRULE, rule);
kernel_pbr_rule_add_del_status(rule,
- (!ret) ? SOUTHBOUND_DELETE_SUCCESS
- : SOUTHBOUND_DELETE_FAILURE);
+ (!ret) ? DP_DELETE_SUCCESS
+ : DP_DELETE_FAILURE);
+
+ return DP_REQUEST_SUCCESS;
}
/*
#include "zebra/rule_netlink.h"
#include "zebra/zebra_pbr.h"
-void kernel_add_pbr_rule(struct zebra_pbr_rule *rule)
+enum dp_req_result kernel_add_pbr_rule(struct zebra_pbr_rule *rule)
{
+ zlog_err("%s not Implemented for this platform", __PRETTY_FUNCTION__);
+ return DP_REQUEST_FAILURE;
}
-void kernel_del_pbr_rule(struct zebra_pbr_rule *rule)
+
+enum dp_req_result kernel_del_pbr_rule(struct zebra_pbr_rule *rule)
{
+ zlog_err("%s not Implemented for this platform", __PRETTY_FUNCTION__);
+ return DP_REQUEST_FAILURE;
}
#endif
return;
tbl_mgr.lc_list = list_new();
tbl_mgr.lc_list->del = delete_table_chunk;
- hook_register(zapi_client_close, release_daemon_table_chunks);
+ hook_register(zserv_client_close, release_daemon_table_chunks);
}
/**
zserv_encode_interface(s, ifp);
client->ifadd_cnt++;
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
/* Interface deletion from zebra daemon. */
zserv_encode_interface(s, ifp);
client->ifdel_cnt++;
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
int zsend_vrf_add(struct zserv *client, struct zebra_vrf *zvrf)
zserv_encode_vrf(s, zvrf);
client->vrfadd_cnt++;
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
/* VRF deletion from zebra daemon. */
zserv_encode_vrf(s, zvrf);
client->vrfdel_cnt++;
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
int zsend_interface_link_params(struct zserv *client, struct interface *ifp)
/* Write packet size. */
stream_putw_at(s, 0, stream_get_endp(s));
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
/* Interface address is added/deleted. Send ZEBRA_INTERFACE_ADDRESS_ADD or
stream_putw_at(s, 0, stream_get_endp(s));
client->connected_rt_add_cnt++;
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
static int zsend_interface_nbr_address(int cmd, struct zserv *client,
/* Write packet size. */
stream_putw_at(s, 0, stream_get_endp(s));
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
/* Interface address addition. */
stream_putw_at(s, 0, stream_get_endp(s));
client->if_vrfchg_cnt++;
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
/* Add new nbr connected IPv6 address */
else
client->ifdown_cnt++;
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
int zsend_redistribute_route(int cmd, struct zserv *client, struct prefix *p,
zebra_route_string(api.type), api.vrf_id,
buf_prefix);
}
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
/*
stream_putw_at(s, 0, stream_get_endp(s));
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
int zsend_route_notify_owner(struct route_entry *re, struct prefix *p,
struct stream *s;
uint8_t blen;
- client = zebra_find_client(re->type, re->instance);
+ client = zserv_find_client(re->type, re->instance);
if (!client || !client->notify_owner) {
if (IS_ZEBRA_DEBUG_PACKET) {
char buff[PREFIX_STRLEN];
stream_putw_at(s, 0, stream_get_endp(s));
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
void zsend_rule_notify_owner(struct zebra_pbr_rule *rule,
stream_putw_at(s, 0, stream_get_endp(s));
- zebra_server_send_message(client, s);
+ zserv_send_message(client, s);
}
void zsend_ipset_notify_owner(struct zebra_pbr_ipset *ipset,
stream_put(s, ipset->ipset_name, ZEBRA_IPSET_NAME_SIZE);
stream_putw_at(s, 0, stream_get_endp(s));
- zebra_server_send_message(client, s);
+ zserv_send_message(client, s);
}
void zsend_ipset_entry_notify_owner(struct zebra_pbr_ipset_entry *ipset,
stream_put(s, ipset->backpointer->ipset_name, ZEBRA_IPSET_NAME_SIZE);
stream_putw_at(s, 0, stream_get_endp(s));
- zebra_server_send_message(client, s);
+ zserv_send_message(client, s);
}
void zsend_iptable_notify_owner(struct zebra_pbr_iptable *iptable,
stream_putl(s, iptable->unique);
stream_putw_at(s, 0, stream_get_endp(s));
- zebra_server_send_message(client, s);
+ zserv_send_message(client, s);
}
/* Router-id is updated. Send ZEBRA_ROUTER_ID_ADD to client. */
/* Write packet size. */
stream_putw_at(s, 0, stream_get_endp(s));
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
/*
/* Put length at the first point of the stream. */
stream_putw_at(s, 0, stream_get_endp(s));
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
/* Send response to a get label chunk request to client */
/* Write packet size. */
stream_putw_at(s, 0, stream_get_endp(s));
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
static int zsend_table_manager_connect_response(struct zserv *client,
stream_putw_at(s, 0, stream_get_endp(s));
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
/* Inbound message handling ------------------------------------------------ */
stream_putl(s, multipath_num);
stream_putw_at(s, 0, stream_get_endp(s));
- zebra_server_send_message(client, s);
+ zserv_send_message(client, s);
}
/* Tie up route-type and client->sock */
[ZEBRA_IPTABLE_DELETE] = zread_iptable,
};
-void zserv_handle_commands(struct zserv *client, struct zmsghdr *hdr,
- struct stream *msg, struct zebra_vrf *zvrf)
+#if defined(HANDLE_ZAPI_FUZZING)
+extern struct zebra_privs_t zserv_privs;
+
+static void zserv_write_incoming(struct stream *orig, uint16_t command)
{
- if (hdr->command > array_size(zserv_handlers)
- || zserv_handlers[hdr->command] == NULL)
- zlog_info("Zebra received unknown command %d", hdr->command);
- else
- zserv_handlers[hdr->command](client, hdr, msg, zvrf);
+ char fname[MAXPATHLEN];
+ struct stream *copy;
+ int fd = -1;
+
+ copy = stream_dup(orig);
+ stream_set_getp(copy, 0);
+
+ zserv_privs.change(ZPRIVS_RAISE);
+ snprintf(fname, MAXPATHLEN, "%s/%u", DAEMON_VTY_DIR, command);
+ fd = open(fname, O_CREAT | O_WRONLY | O_EXCL, 0644);
+ stream_flush(copy, fd);
+ close(fd);
+ zserv_privs.change(ZPRIVS_LOWER);
+ stream_free(copy);
+}
+#endif
+
+void zserv_handle_commands(struct zserv *client, struct stream *msg)
+{
+ struct zmsghdr hdr;
+ struct zebra_vrf *zvrf;
- stream_free(msg);
+ zapi_parse_header(msg, &hdr);
+
+#if defined(HANDLE_ZAPI_FUZZING)
+ zserv_write_incoming(msg, hdr.command);
+#endif
+
+ hdr.length -= ZEBRA_HEADER_SIZE;
+
+ /* lookup vrf */
+ zvrf = zebra_vrf_lookup_by_id(hdr.vrf_id);
+ if (!zvrf) {
+ if (IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV)
+ zlog_warn("ZAPI message specifies unknown VRF: %d",
+ hdr.vrf_id);
+ return;
+ }
+
+ if (hdr.command > array_size(zserv_handlers)
+ || zserv_handlers[hdr.command] == NULL)
+ zlog_info("Zebra received unknown command %d", hdr.command);
+ else
+ zserv_handlers[hdr.command](client, &hdr, msg, zvrf);
}
* client
* the client datastructure
*
- * hdr
- * the message header
- *
* msg
- * the message contents, without the header
- *
- * zvrf
- * the vrf
+ * the message
*/
-extern void zserv_handle_commands(struct zserv *client, struct zmsghdr *hdr,
- struct stream *msg, struct zebra_vrf *zvrf);
+extern void zserv_handle_commands(struct zserv *client, struct stream *msg);
extern int zsend_vrf_add(struct zserv *zclient, struct zebra_vrf *zvrf);
extern int zsend_vrf_delete(struct zserv *zclient, struct zebra_vrf *zvrf);
stream_put_prefix(s, &rn->p);
stream_putl(s, fec->label);
stream_putw_at(s, 0, stream_get_endp(s));
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
/*
lsp = (zebra_lsp_t *)backet->data;
if (CHECK_FLAG(lsp->flags, LSP_FLAG_INSTALLED))
- kernel_del_lsp(lsp);
+ (void)kernel_del_lsp(lsp);
}
/*
if (newbest) {
UNSET_FLAG(lsp->flags, LSP_FLAG_CHANGED);
- kernel_add_lsp(lsp);
-
- zvrf->lsp_installs++;
+ switch (kernel_add_lsp(lsp)) {
+ case DP_REQUEST_QUEUED:
+ zlog_err("No current DataPlane interfaces can return this, please fix");
+ break;
+ case DP_REQUEST_FAILURE:
+ break;
+ case DP_REQUEST_SUCCESS:
+ zvrf->lsp_installs++;
+ break;
+ }
}
} else {
/* Installed, may need an update and/or delete. */
if (!newbest) {
- kernel_del_lsp(lsp);
-
- zvrf->lsp_removals++;
+ switch (kernel_del_lsp(lsp)) {
+ case DP_REQUEST_QUEUED:
+ zlog_err("No current DataPlane interfaces can return this, please fix");
+ break;
+ case DP_REQUEST_FAILURE:
+ break;
+ case DP_REQUEST_SUCCESS:
+ zvrf->lsp_removals++;
+ break;
+ }
} else if (CHECK_FLAG(lsp->flags, LSP_FLAG_CHANGED)) {
zebra_nhlfe_t *nhlfe;
struct nexthop *nexthop;
}
}
- kernel_upd_lsp(lsp);
-
- zvrf->lsp_installs++;
+ switch (kernel_upd_lsp(lsp)) {
+ case DP_REQUEST_QUEUED:
+ zlog_err("No current DataPlane interfaces can return this, please fix");
+ break;
+ case DP_REQUEST_FAILURE:
+ break;
+ case DP_REQUEST_SUCCESS:
+ zvrf->lsp_installs++;
+ break;
+ }
}
}
/* Public functions */
-void kernel_lsp_pass_fail(zebra_lsp_t *lsp, enum southbound_results res)
+void kernel_lsp_pass_fail(zebra_lsp_t *lsp, enum dp_results res)
{
struct nexthop *nexthop;
zebra_nhlfe_t *nhlfe;
return;
switch (res) {
- case SOUTHBOUND_INSTALL_FAILURE:
+ case DP_INSTALL_FAILURE:
UNSET_FLAG(lsp->flags, LSP_FLAG_INSTALLED);
clear_nhlfe_installed(lsp);
zlog_warn("LSP Install Failure: %u", lsp->ile.in_label);
break;
- case SOUTHBOUND_INSTALL_SUCCESS:
+ case DP_INSTALL_SUCCESS:
SET_FLAG(lsp->flags, LSP_FLAG_INSTALLED);
for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
nexthop = nhlfe->nexthop;
SET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
}
break;
- case SOUTHBOUND_DELETE_SUCCESS:
+ case DP_DELETE_SUCCESS:
UNSET_FLAG(lsp->flags, LSP_FLAG_INSTALLED);
clear_nhlfe_installed(lsp);
break;
- case SOUTHBOUND_DELETE_FAILURE:
+ case DP_DELETE_FAILURE:
zlog_warn("LSP Deletion Failure: %u", lsp->ile.in_label);
break;
}
if (!mpls_processq_init(&zebrad))
mpls_enabled = 1;
- hook_register(zapi_client_close, zebra_mpls_cleanup_fecs_for_client);
+ hook_register(zserv_client_close, zebra_mpls_cleanup_fecs_for_client);
}
/*
* Install Label Forwarding entry into the kernel.
*/
-void kernel_add_lsp(zebra_lsp_t *lsp)
+enum dp_req_result kernel_add_lsp(zebra_lsp_t *lsp)
{
int ret;
if (!lsp || !lsp->best_nhlfe) { // unexpected
- kernel_lsp_pass_fail(lsp, SOUTHBOUND_INSTALL_FAILURE);
- return;
+ kernel_lsp_pass_fail(lsp, DP_INSTALL_FAILURE);
+ return DP_REQUEST_FAILURE;
}
ret = netlink_mpls_multipath(RTM_NEWROUTE, lsp);
kernel_lsp_pass_fail(lsp,
- (!ret) ? SOUTHBOUND_INSTALL_SUCCESS
- : SOUTHBOUND_INSTALL_FAILURE);
+ (!ret) ? DP_INSTALL_SUCCESS
+ : DP_INSTALL_FAILURE);
+
+ return DP_REQUEST_SUCCESS;
}
/*
* through the metric field (before kernel-MPLS). This shouldn't be an issue
* any longer, so REPLACE can be reintroduced.
*/
-void kernel_upd_lsp(zebra_lsp_t *lsp)
+enum dp_req_result kernel_upd_lsp(zebra_lsp_t *lsp)
{
int ret;
if (!lsp || !lsp->best_nhlfe) { // unexpected
- kernel_lsp_pass_fail(lsp, SOUTHBOUND_INSTALL_FAILURE);
- return;
+ kernel_lsp_pass_fail(lsp, DP_INSTALL_FAILURE);
+ return DP_REQUEST_FAILURE;
}
ret = netlink_mpls_multipath(RTM_NEWROUTE, lsp);
kernel_lsp_pass_fail(lsp,
- (!ret) ? SOUTHBOUND_INSTALL_SUCCESS
- : SOUTHBOUND_INSTALL_FAILURE);
+ (!ret) ? DP_INSTALL_SUCCESS
+ : DP_INSTALL_FAILURE);
+
+ return DP_REQUEST_SUCCESS;
}
/*
* Delete Label Forwarding entry from the kernel.
*/
-void kernel_del_lsp(zebra_lsp_t *lsp)
+enum dp_req_result kernel_del_lsp(zebra_lsp_t *lsp)
{
int ret;
if (!lsp) { // unexpected
- kernel_lsp_pass_fail(lsp, SOUTHBOUND_DELETE_FAILURE);
- return;
+ kernel_lsp_pass_fail(lsp, DP_DELETE_FAILURE);
+ return DP_REQUEST_FAILURE;
}
if (!CHECK_FLAG(lsp->flags, LSP_FLAG_INSTALLED)) {
- kernel_lsp_pass_fail(lsp, SOUTHBOUND_DELETE_FAILURE);
- return;
+ kernel_lsp_pass_fail(lsp, DP_DELETE_FAILURE);
+ return DP_REQUEST_FAILURE;
}
ret = netlink_mpls_multipath(RTM_DELROUTE, lsp);
kernel_lsp_pass_fail(lsp,
- (!ret) ? SOUTHBOUND_DELETE_SUCCESS
- : SOUTHBOUND_DELETE_FAILURE);
+ (!ret) ? DP_DELETE_SUCCESS
+ : DP_DELETE_FAILURE);
+
+ return DP_REQUEST_SUCCESS;
}
int mpls_kernel_init(void)
#if !defined(HAVE_NETLINK) && !defined(OPEN_BSD)
-void kernel_add_lsp(zebra_lsp_t *lsp)
+enum dp_req_result kernel_add_lsp(zebra_lsp_t *lsp)
{
- return;
+ return DP_REQUEST_SUCCESS;
}
-void kernel_upd_lsp(zebra_lsp_t *lsp)
+
+enum dp_req_result kernel_upd_lsp(zebra_lsp_t *lsp)
{
- return;
+ return DP_REQUEST_SUCCESS;
}
-void kernel_del_lsp(zebra_lsp_t *lsp)
+
+enum dp_req_result kernel_del_lsp(zebra_lsp_t *lsp)
{
- return;
+ return DP_REQUEST_SUCCESS;
}
+
int mpls_kernel_init(void)
{
return -1;
return (0);
}
-void kernel_add_lsp(zebra_lsp_t *lsp)
+enum dp_req_result kernel_add_lsp(zebra_lsp_t *lsp)
{
int ret;
if (!lsp || !lsp->best_nhlfe) { // unexpected
- kernel_lsp_pass_fail(lsp, SOUTHBOUND_INSTALL_FAILURE);
- return;
+ kernel_lsp_pass_fail(lsp, DP_INSTALL_FAILURE);
+ return DP_REQUEST_FAILURE;
}
ret = kernel_lsp_cmd(RTM_ADD, lsp);
kernel_lsp_pass_fail(lsp,
- (!ret) ? SOUTHBOUND_INSTALL_SUCCESS
- : SOUTHBOUND_INSTALL_FAILURE);
+ (!ret) ? DP_INSTALL_SUCCESS
+ : DP_INSTALL_FAILURE);
+
+ return DP_REQUEST_SUCCESS;
}
-void kernel_upd_lsp(zebra_lsp_t *lsp)
+enum dp_req_result kernel_upd_lsp(zebra_lsp_t *lsp)
{
int ret;
if (!lsp || !lsp->best_nhlfe) { // unexpected
- kernel_lsp_pass_fail(lsp, SOUTHBOUND_INSTALL_FAILURE);
- return;
+ kernel_lsp_pass_fail(lsp, DP_INSTALL_FAILURE);
+ return DP_REQUEST_FAILURE;
}
ret = kernel_lsp_cmd(RTM_CHANGE, lsp);
kernel_lsp_pass_fail(lsp,
- (!ret) ? SOUTHBOUND_INSTALL_SUCCESS
- : SOUTHBOUND_INSTALL_FAILURE);
- return;
+ (!ret) ? DP_INSTALL_SUCCESS
+ : DP_INSTALL_FAILURE);
+ return DP_REQUEST_SUCCESS;
}
-void kernel_del_lsp(zebra_lsp_t *lsp)
+enum dp_req_result kernel_del_lsp(zebra_lsp_t *lsp)
{
int ret;
if (!lsp) { // unexpected
- kernel_lsp_pass_fail(lsp, SOUTHBOUND_DELETE_FAILURE);
- return;
+ kernel_lsp_pass_fail(lsp, DP_DELETE_FAILURE);
+ return DP_REQUEST_FAILURE;
}
if (!CHECK_FLAG(lsp->flags, LSP_FLAG_INSTALLED)) {
- kernel_lsp_pass_fail(lsp, SOUTHBOUND_DELETE_FAILURE);
- return;
+ kernel_lsp_pass_fail(lsp, DP_DELETE_FAILURE);
+ return DP_REQUEST_FAILURE;
}
ret = kernel_lsp_cmd(RTM_DELETE, lsp);
kernel_lsp_pass_fail(lsp,
- (!ret) ? SOUTHBOUND_DELETE_SUCCESS
- : SOUTHBOUND_DELETE_FAILURE);
+ (!ret) ? DP_DELETE_SUCCESS
+ : DP_DELETE_FAILURE);
+
+ return DP_REQUEST_SUCCESS;
}
static int kmpw_install(struct zebra_pw *pw)
stream_putl(s, suc);
stream_putw_at(s, 0, stream_get_endp(s));
- zebra_server_send_message(client, s);
+ zserv_send_message(client, s);
}
#include "zebra/rt.h"
#include "zebra/zapi_msg.h"
#include "zebra/zebra_memory.h"
+#include "zebra/zserv.h"
/* definitions */
DEFINE_MTYPE_STATIC(ZEBRA, PBR_IPTABLE_IFNAME, "PBR interface list")
rule = (struct zebra_pbr_rule *)arg;
- kernel_del_pbr_rule(rule);
+ (void)kernel_del_pbr_rule(rule);
XFREE(MTYPE_TMP, rule);
}
pbr_rule_lookup_unique(zns, rule->rule.unique, rule->ifp);
(void)hash_get(zns->rules_hash, rule, pbr_rule_alloc_intern);
- kernel_add_pbr_rule(rule);
+ (void)kernel_add_pbr_rule(rule);
/*
* Rule Replace semantics, if we have an old, install the
* new rule, look above, and then delete the old
struct zebra_pbr_rule *lookup;
lookup = hash_lookup(zns->rules_hash, rule);
- kernel_del_pbr_rule(rule);
+ (void)kernel_del_pbr_rule(rule);
if (lookup) {
hash_release(zns->rules_hash, lookup);
int *sock = data;
if (rule->sock == *sock) {
- kernel_del_pbr_rule(rule);
+ (void)kernel_del_pbr_rule(rule);
hash_release(zns->rules_hash, rule);
XFREE(MTYPE_TMP, rule);
}
void zebra_pbr_init(void)
{
- hook_register(zapi_client_close, zebra_pbr_client_close_cleanup);
+ hook_register(zserv_client_close, zebra_pbr_client_close_cleanup);
}
static void *pbr_ipset_alloc_intern(void *arg)
ret = hook_call(zebra_pbr_ipset_wrap_script_update,
zns, 1, ipset);
kernel_pbr_ipset_add_del_status(ipset,
- ret ? SOUTHBOUND_INSTALL_SUCCESS
- : SOUTHBOUND_INSTALL_FAILURE);
+ ret ? DP_INSTALL_SUCCESS
+ : DP_INSTALL_FAILURE);
}
void zebra_pbr_destroy_ipset(struct zebra_ns *zns,
ret = hook_call(zebra_pbr_ipset_entry_wrap_script_update,
zns, 1, ipset);
kernel_pbr_ipset_entry_add_del_status(ipset,
- ret ? SOUTHBOUND_INSTALL_SUCCESS
- : SOUTHBOUND_INSTALL_FAILURE);
+ ret ? DP_INSTALL_SUCCESS
+ : DP_INSTALL_FAILURE);
}
void zebra_pbr_del_ipset_entry(struct zebra_ns *zns,
pbr_iptable_alloc_intern);
ret = hook_call(zebra_pbr_iptable_wrap_script_update, zns, 1, iptable);
kernel_pbr_iptable_add_del_status(iptable,
- ret ? SOUTHBOUND_INSTALL_SUCCESS
- : SOUTHBOUND_INSTALL_FAILURE);
+ ret ? DP_INSTALL_SUCCESS
+ : DP_INSTALL_FAILURE);
}
void zebra_pbr_del_iptable(struct zebra_ns *zns,
* Handle success or failure of rule (un)install in the kernel.
*/
void kernel_pbr_rule_add_del_status(struct zebra_pbr_rule *rule,
- enum southbound_results res)
+ enum dp_results res)
{
switch (res) {
- case SOUTHBOUND_INSTALL_SUCCESS:
+ case DP_INSTALL_SUCCESS:
zsend_rule_notify_owner(rule, ZAPI_RULE_INSTALLED);
break;
- case SOUTHBOUND_INSTALL_FAILURE:
+ case DP_INSTALL_FAILURE:
zsend_rule_notify_owner(rule, ZAPI_RULE_FAIL_INSTALL);
break;
- case SOUTHBOUND_DELETE_SUCCESS:
+ case DP_DELETE_SUCCESS:
zsend_rule_notify_owner(rule, ZAPI_RULE_REMOVED);
break;
- case SOUTHBOUND_DELETE_FAILURE:
+ case DP_DELETE_FAILURE:
zsend_rule_notify_owner(rule, ZAPI_RULE_FAIL_REMOVE);
break;
}
* Handle success or failure of ipset (un)install in the kernel.
*/
void kernel_pbr_ipset_add_del_status(struct zebra_pbr_ipset *ipset,
- enum southbound_results res)
+ enum dp_results res)
{
switch (res) {
- case SOUTHBOUND_INSTALL_SUCCESS:
+ case DP_INSTALL_SUCCESS:
zsend_ipset_notify_owner(ipset, ZAPI_IPSET_INSTALLED);
break;
- case SOUTHBOUND_INSTALL_FAILURE:
+ case DP_INSTALL_FAILURE:
zsend_ipset_notify_owner(ipset, ZAPI_IPSET_FAIL_INSTALL);
break;
- case SOUTHBOUND_DELETE_SUCCESS:
+ case DP_DELETE_SUCCESS:
zsend_ipset_notify_owner(ipset, ZAPI_IPSET_REMOVED);
break;
- case SOUTHBOUND_DELETE_FAILURE:
+ case DP_DELETE_FAILURE:
zsend_ipset_notify_owner(ipset, ZAPI_IPSET_FAIL_REMOVE);
break;
}
*/
void kernel_pbr_ipset_entry_add_del_status(
struct zebra_pbr_ipset_entry *ipset,
- enum southbound_results res)
+ enum dp_results res)
{
switch (res) {
- case SOUTHBOUND_INSTALL_SUCCESS:
+ case DP_INSTALL_SUCCESS:
zsend_ipset_entry_notify_owner(ipset,
ZAPI_IPSET_ENTRY_INSTALLED);
break;
- case SOUTHBOUND_INSTALL_FAILURE:
+ case DP_INSTALL_FAILURE:
zsend_ipset_entry_notify_owner(ipset,
ZAPI_IPSET_ENTRY_FAIL_INSTALL);
break;
- case SOUTHBOUND_DELETE_SUCCESS:
+ case DP_DELETE_SUCCESS:
zsend_ipset_entry_notify_owner(ipset,
ZAPI_IPSET_ENTRY_REMOVED);
break;
- case SOUTHBOUND_DELETE_FAILURE:
+ case DP_DELETE_FAILURE:
zsend_ipset_entry_notify_owner(ipset,
ZAPI_IPSET_ENTRY_FAIL_REMOVE);
break;
* Handle success or failure of ipset (un)install in the kernel.
*/
void kernel_pbr_iptable_add_del_status(struct zebra_pbr_iptable *iptable,
- enum southbound_results res)
+ enum dp_results res)
{
switch (res) {
- case SOUTHBOUND_INSTALL_SUCCESS:
+ case DP_INSTALL_SUCCESS:
zsend_iptable_notify_owner(iptable, ZAPI_IPTABLE_INSTALLED);
break;
- case SOUTHBOUND_INSTALL_FAILURE:
+ case DP_INSTALL_FAILURE:
zsend_iptable_notify_owner(iptable, ZAPI_IPTABLE_FAIL_INSTALL);
break;
- case SOUTHBOUND_DELETE_SUCCESS:
+ case DP_DELETE_SUCCESS:
zsend_iptable_notify_owner(iptable,
ZAPI_IPTABLE_REMOVED);
break;
- case SOUTHBOUND_DELETE_FAILURE:
+ case DP_DELETE_FAILURE:
zsend_iptable_notify_owner(iptable,
ZAPI_IPTABLE_FAIL_REMOVE);
break;
* forwarding plane may not coincide, hence the API requires a separate
* rule priority - maps to preference/FRA_PRIORITY on Linux.
*/
-extern void kernel_add_pbr_rule(struct zebra_pbr_rule *rule);
+extern enum dp_req_result kernel_add_pbr_rule(struct zebra_pbr_rule *rule);
/*
* Uninstall specified rule for a specific interface.
*/
-extern void kernel_del_pbr_rule(struct zebra_pbr_rule *rule);
+extern enum dp_req_result kernel_del_pbr_rule(struct zebra_pbr_rule *rule);
/*
* Get to know existing PBR rules in the kernel - typically called at startup.
*/
extern void kernel_read_pbr_rules(struct zebra_ns *zns);
-enum southbound_results;
+enum dp_results;
/*
* Handle success or failure of rule (un)install in the kernel.
*/
extern void kernel_pbr_rule_add_del_status(struct zebra_pbr_rule *rule,
- enum southbound_results res);
+ enum dp_results res);
/*
* Handle success or failure of ipset kinds (un)install in the kernel.
*/
extern void kernel_pbr_ipset_add_del_status(struct zebra_pbr_ipset *ipset,
- enum southbound_results res);
+ enum dp_results res);
extern void kernel_pbr_ipset_entry_add_del_status(
struct zebra_pbr_ipset_entry *ipset,
- enum southbound_results res);
+ enum dp_results res);
extern void kernel_pbr_iptable_add_del_status(struct zebra_pbr_iptable *iptable,
- enum southbound_results res);
+ enum dp_results res);
/*
* Handle rule delete notification from kernel.
ptm_cb.ptm_sock = -1;
- hook_register(zapi_client_close, zebra_ptm_bfd_client_deregister);
+ hook_register(zserv_client_close, zebra_ptm_bfd_client_deregister);
}
void zebra_ptm_finish(void)
stream_putw_at(s, 0, stream_get_endp(s));
client->if_bfd_cnt++;
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
void zebra_interface_bfd_update(struct interface *ifp, struct prefix *dp,
stream_putw_at(s, 0, stream_get_endp(s));
client->bfd_peer_replay_cnt++;
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
void zebra_bfd_peer_replay_req(void)
RB_INIT(zebra_pw_head, &zvrf->pseudowires);
RB_INIT(zebra_static_pw_head, &zvrf->static_pseudowires);
- hook_register(zapi_client_close, zebra_pw_client_close);
+ hook_register(zserv_client_close, zebra_pw_client_close);
}
void zebra_pw_exit(struct zebra_vrf *zvrf)
void kernel_route_rib_pass_fail(struct route_node *rn, struct prefix *p,
struct route_entry *re,
- enum southbound_results res)
+ enum dp_results res)
{
struct nexthop *nexthop;
char buf[PREFIX_STRLEN];
dest = rib_dest_from_rnode(rn);
switch (res) {
- case SOUTHBOUND_INSTALL_SUCCESS:
+ case DP_INSTALL_SUCCESS:
dest->selected_fib = re;
for (ALL_NEXTHOPS(re->ng, nexthop)) {
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
}
zsend_route_notify_owner(re, p, ZAPI_ROUTE_INSTALLED);
break;
- case SOUTHBOUND_INSTALL_FAILURE:
+ case DP_INSTALL_FAILURE:
/*
* I am not sure this is the right thing to do here
* but the code always set selected_fib before
zlog_warn("%u:%s: Route install failed", re->vrf_id,
prefix2str(p, buf, sizeof(buf)));
break;
- case SOUTHBOUND_DELETE_SUCCESS:
+ case DP_DELETE_SUCCESS:
/*
* The case where selected_fib is not re is
* when we have received a system route
zsend_route_notify_owner(re, p, ZAPI_ROUTE_REMOVED);
break;
- case SOUTHBOUND_DELETE_FAILURE:
+ case DP_DELETE_FAILURE:
/*
* Should we set this to NULL if the
* delete fails?
* the kernel.
*/
hook_call(rib_update, rn, "installing in kernel");
- kernel_route_rib(rn, p, src_p, old, re);
- zvrf->installs++;
+ switch (kernel_route_rib(rn, p, src_p, old, re)) {
+ case DP_REQUEST_QUEUED:
+ zlog_err("No current known DataPlane interfaces can return this, please fix");
+ break;
+ case DP_REQUEST_FAILURE:
+ zlog_err("No current known Rib Install Failure cases, please fix");
+ break;
+ case DP_REQUEST_SUCCESS:
+ zvrf->installs++;
+ break;
+ }
return;
}
* the kernel.
*/
hook_call(rib_update, rn, "uninstalling from kernel");
- kernel_route_rib(rn, p, src_p, re, NULL);
- if (zvrf)
- zvrf->removals++;
+ switch (kernel_route_rib(rn, p, src_p, re, NULL)) {
+ case DP_REQUEST_QUEUED:
+ zlog_err("No current known DataPlane interfaces can return this, please fix");
+ break;
+ case DP_REQUEST_FAILURE:
+ zlog_err("No current known RIB Install Failure cases, please fix");
+ break;
+ case DP_REQUEST_SUCCESS:
+ if (zvrf)
+ zvrf->removals++;
+ break;
+ }
return;
}
void zebra_rnh_init(void)
{
- hook_register(zapi_client_close, zebra_client_cleanup_rnh);
+ hook_register(zserv_client_close, zebra_client_cleanup_rnh);
}
static inline struct route_table *get_rnh_table(vrf_id_t vrfid, int family,
client->nh_last_upd_time = monotime(NULL);
client->last_write_cmd = cmd;
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
static void print_nh(struct nexthop *nexthop, struct vty *vty)
{
uint32_t packets = strtoul(argv[2]->arg, NULL, 10);
- zebrad.packets_to_process = packets;
+ atomic_store_explicit(&zebrad.packets_to_process, packets,
+ memory_order_relaxed);
return CMD_SUCCESS;
}
"Zapi Protocol\n"
"Number of packets to process before relinquishing thread\n")
{
- zebrad.packets_to_process = ZEBRA_ZAPI_PACKETS_TO_PROCESS;
+ atomic_store_explicit(&zebrad.packets_to_process,
+ ZEBRA_ZAPI_PACKETS_TO_PROCESS,
+ memory_order_relaxed);
return CMD_SUCCESS;
}
static zebra_neigh_t *zvni_neigh_add(zebra_vni_t *zvni, struct ipaddr *ip,
struct ethaddr *mac);
static int zvni_neigh_del(zebra_vni_t *zvni, zebra_neigh_t *n);
-static int zvni_neigh_del_hash_entry(struct hash_backet *backet, void *arg);
static void zvni_neigh_del_from_vtep(zebra_vni_t *zvni, int uninstall,
struct in_addr *r_vtep_ip);
static void zvni_neigh_del_all(zebra_vni_t *zvni, int uninstall, int upd_client,
static void *zvni_mac_alloc(void *p);
static zebra_mac_t *zvni_mac_add(zebra_vni_t *zvni, struct ethaddr *macaddr);
static int zvni_mac_del(zebra_vni_t *zvni, zebra_mac_t *mac);
-static int zvni_mac_del_hash_entry(struct hash_backet *backet, void *arg);
static void zvni_mac_del_from_vtep(zebra_vni_t *zvni, int uninstall,
struct in_addr *r_vtep_ip);
static void zvni_mac_del_all(zebra_vni_t *zvni, int uninstall, int upd_client,
struct zserv *client = NULL;
struct stream *s = NULL;
- client = zebra_find_client(ZEBRA_ROUTE_BGP, 0);
+ client = zserv_find_client(ZEBRA_ROUTE_BGP, 0);
/* BGP may not be running. */
if (!client)
return 0;
else
client->macipdel_cnt++;
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
/*
/*
* Free neighbor hash entry (callback)
*/
-static int zvni_neigh_del_hash_entry(struct hash_backet *backet, void *arg)
+static void zvni_neigh_del_hash_entry(struct hash_backet *backet, void *arg)
{
struct neigh_walk_ctx *wctx = arg;
zebra_neigh_t *n = backet->data;
if (wctx->uninstall)
zvni_neigh_uninstall(wctx->zvni, n);
- return zvni_neigh_del(wctx->zvni, n);
+ zvni_neigh_del(wctx->zvni, n);
}
- return 0;
+ return;
}
/*
wctx.flags = DEL_REMOTE_NEIGH_FROM_VTEP;
wctx.r_vtep_ip = *r_vtep_ip;
- hash_iterate(zvni->neigh_table,
- (void (*)(struct hash_backet *,
- void *))zvni_neigh_del_hash_entry,
- &wctx);
+ hash_iterate(zvni->neigh_table, zvni_neigh_del_hash_entry, &wctx);
}
/*
wctx.upd_client = upd_client;
wctx.flags = flags;
- hash_iterate(zvni->neigh_table,
- (void (*)(struct hash_backet *,
- void *))zvni_neigh_del_hash_entry,
- &wctx);
+ hash_iterate(zvni->neigh_table, zvni_neigh_del_hash_entry, &wctx);
}
/*
/*
* Free MAC hash entry (callback)
*/
-static int zvni_mac_del_hash_entry(struct hash_backet *backet, void *arg)
+static void zvni_mac_del_hash_entry(struct hash_backet *backet, void *arg)
{
struct mac_walk_ctx *wctx = arg;
zebra_mac_t *mac = backet->data;
if (wctx->uninstall)
zvni_mac_uninstall(wctx->zvni, mac, 0);
- return zvni_mac_del(wctx->zvni, mac);
+ zvni_mac_del(wctx->zvni, mac);
}
- return 0;
+ return;
}
/*
wctx.flags = DEL_REMOTE_MAC_FROM_VTEP;
wctx.r_vtep_ip = *r_vtep_ip;
- hash_iterate(zvni->mac_table, (void (*)(struct hash_backet *,
- void *))zvni_mac_del_hash_entry,
- &wctx);
+ hash_iterate(zvni->mac_table, zvni_mac_del_hash_entry, &wctx);
}
/*
wctx.upd_client = upd_client;
wctx.flags = flags;
- hash_iterate(zvni->mac_table, (void (*)(struct hash_backet *,
- void *))zvni_mac_del_hash_entry,
- &wctx);
+ hash_iterate(zvni->mac_table, zvni_mac_del_hash_entry, &wctx);
}
/*
struct zserv *client;
struct stream *s;
- client = zebra_find_client(ZEBRA_ROUTE_BGP, 0);
+ client = zserv_find_client(ZEBRA_ROUTE_BGP, 0);
/* BGP may not be running. */
if (!client)
return 0;
zebra_route_string(client->proto));
client->vniadd_cnt++;
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
/*
struct zserv *client;
struct stream *s;
- client = zebra_find_client(ZEBRA_ROUTE_BGP, 0);
+ client = zserv_find_client(ZEBRA_ROUTE_BGP, 0);
/* BGP may not be running. */
if (!client)
return 0;
zebra_route_string(client->proto));
client->vnidel_cnt++;
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
/*
struct ethaddr rmac;
char buf[ETHER_ADDR_STRLEN];
- client = zebra_find_client(ZEBRA_ROUTE_BGP, 0);
+ client = zserv_find_client(ZEBRA_ROUTE_BGP, 0);
/* BGP may not be running. */
if (!client)
return 0;
zebra_route_string(client->proto));
client->l3vniadd_cnt++;
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
/*
struct stream *s = NULL;
struct zserv *client = NULL;
- client = zebra_find_client(ZEBRA_ROUTE_BGP, 0);
+ client = zserv_find_client(ZEBRA_ROUTE_BGP, 0);
/* BGP may not be running. */
if (!client)
return 0;
zebra_route_string(client->proto));
client->l3vnidel_cnt++;
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
static void zebra_vxlan_process_l3vni_oper_up(zebra_l3vni_t *zl3vni)
struct stream *s = NULL;
char buf[PREFIX_STRLEN];
- client = zebra_find_client(ZEBRA_ROUTE_BGP, 0);
+ client = zserv_find_client(ZEBRA_ROUTE_BGP, 0);
/* BGP may not be running. */
if (!client)
return 0;
else
client->prefixdel_cnt++;
- return zebra_server_send_message(client, s);
+ return zserv_send_message(client, s);
}
/* re-add remote rmac if needed */
#include "lib/vty.h" /* for vty_out, vty (ptr only) */
#include "lib/zassert.h" /* for assert */
#include "lib/zclient.h" /* for zmsghdr, ZEBRA_HEADER_SIZE, ZEBRA... */
+#include "lib/frr_pthread.h" /* for frr_pthread_new, frr_pthread_stop... */
+#include "lib/frratomic.h" /* for atomic_load_explicit, atomic_stor... */
#include "zebra/debug.h" /* for various debugging macros */
#include "zebra/rib.h" /* for rib_score_proto */
#include "zebra/zserv.h" /* for zserv */
/* clang-format on */
-/* Event list of zebra. */
-enum event { ZEBRA_READ, ZEBRA_WRITE };
/* privileges */
extern struct zebra_privs_t zserv_privs;
-/* post event into client */
-static void zebra_event(struct zserv *client, enum event event);
-
-
-/* Public interface --------------------------------------------------------- */
-
-int zebra_server_send_message(struct zserv *client, struct stream *msg)
-{
- stream_fifo_push(client->obuf_fifo, msg);
- zebra_event(client, ZEBRA_WRITE);
- return 0;
-}
-
-/* Lifecycle ---------------------------------------------------------------- */
-
-/* Hooks for client connect / disconnect */
-DEFINE_HOOK(zapi_client_connect, (struct zserv *client), (client));
-DEFINE_KOOH(zapi_client_close, (struct zserv *client), (client));
-
-/* free zebra client information. */
-static void zebra_client_free(struct zserv *client)
-{
- hook_call(zapi_client_close, client);
-
- /* Close file descriptor. */
- if (client->sock) {
- unsigned long nroutes;
-
- close(client->sock);
- nroutes = rib_score_proto(client->proto, client->instance);
- zlog_notice(
- "client %d disconnected. %lu %s routes removed from the rib",
- client->sock, nroutes,
- zebra_route_string(client->proto));
- client->sock = -1;
- }
-
- /* Free stream buffers. */
- if (client->ibuf_work)
- stream_free(client->ibuf_work);
- if (client->obuf_work)
- stream_free(client->obuf_work);
- if (client->ibuf_fifo)
- stream_fifo_free(client->ibuf_fifo);
- if (client->obuf_fifo)
- stream_fifo_free(client->obuf_fifo);
- if (client->wb)
- buffer_free(client->wb);
-
- /* Release threads. */
- if (client->t_read)
- thread_cancel(client->t_read);
- if (client->t_write)
- thread_cancel(client->t_write);
- if (client->t_suicide)
- thread_cancel(client->t_suicide);
-
- /* Free bitmaps. */
- for (afi_t afi = AFI_IP; afi < AFI_MAX; afi++)
- for (int i = 0; i < ZEBRA_ROUTE_MAX; i++)
- vrf_bitmap_free(client->redist[afi][i]);
-
- vrf_bitmap_free(client->redist_default);
- vrf_bitmap_free(client->ifinfo);
- vrf_bitmap_free(client->ridinfo);
-
- XFREE(MTYPE_TMP, client);
-}
/*
- * Called from client thread to terminate itself.
+ * Client thread events.
+ *
+ * These are used almost exclusively by client threads to drive their own event
+ * loops. The only exception is in zebra_client_create(), which pushes an
+ * initial ZSERV_CLIENT_READ event to start the API handler loop.
*/
-static void zebra_client_close(struct zserv *client)
-{
- listnode_delete(zebrad.client_list, client);
- zebra_client_free(client);
-}
-
-/* Make new client. */
-static void zebra_client_create(int sock)
-{
- struct zserv *client;
- int i;
- afi_t afi;
-
- client = XCALLOC(MTYPE_TMP, sizeof(struct zserv));
-
- /* Make client input/output buffer. */
- client->sock = sock;
- client->ibuf_fifo = stream_fifo_new();
- client->obuf_fifo = stream_fifo_new();
- client->ibuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ);
- client->obuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ);
- client->wb = buffer_new(0);
-
- /* Set table number. */
- client->rtm_table = zebrad.rtm_table_default;
-
- client->connect_time = monotime(NULL);
- /* Initialize flags */
- for (afi = AFI_IP; afi < AFI_MAX; afi++)
- for (i = 0; i < ZEBRA_ROUTE_MAX; i++)
- client->redist[afi][i] = vrf_bitmap_init();
- client->redist_default = vrf_bitmap_init();
- client->ifinfo = vrf_bitmap_init();
- client->ridinfo = vrf_bitmap_init();
-
- /* by default, it's not a synchronous client */
- client->is_synchronous = 0;
+enum zserv_client_event {
+ /* Schedule a socket read */
+ ZSERV_CLIENT_READ,
+ /* Schedule a buffer write */
+ ZSERV_CLIENT_WRITE,
+};
- /* Add this client to linked list. */
- listnode_add(zebrad.client_list, client);
-
- zebra_vrf_update_all(client);
+/*
+ * Main thread events.
+ *
+ * These are used by client threads to notify the main thread about various
+ * events and to make processing requests.
+ */
+enum zserv_event {
+ /* Schedule listen job on Zebra API socket */
+ ZSERV_ACCEPT,
+ /* The calling client has packets on its input buffer */
+ ZSERV_PROCESS_MESSAGES,
+ /* The calling client wishes to be killed */
+ ZSERV_HANDLE_CLOSE,
+};
- hook_call(zapi_client_connect, client);
+/*
+ * Zebra server event driver for all client threads.
+ *
+ * This is essentially a wrapper around thread_add_event() that centralizes
+ * those scheduling calls into one place.
+ *
+ * All calls to this function schedule an event on the pthread running the
+ * provided client.
+ *
+ * client
+ * the client in question, and thread target
+ *
+ * event
+ * the event to notify them about
+ */
+static void zserv_client_event(struct zserv *client,
+ enum zserv_client_event event);
- /* start read loop */
- zebra_event(client, ZEBRA_READ);
-}
+/*
+ * Zebra server event driver for the main thread.
+ *
+ * This is essentially a wrapper around thread_add_event() that centralizes
+ * those scheduling calls into one place.
+ *
+ * All calls to this function schedule an event on Zebra's main pthread.
+ *
+ * client
+ * the client in question
+ *
+ * event
+ * the event to notify the main thread about
+ */
+static void zserv_event(struct zserv *client, enum zserv_event event);
-static int zserv_delayed_close(struct thread *thread)
-{
- struct zserv *client = THREAD_ARG(thread);
- client->t_suicide = NULL;
- zebra_client_close(client);
- return 0;
-}
+/* Client thread lifecycle -------------------------------------------------- */
/*
* Log zapi message to zlog.
zlog_hexdump(msg->data, STREAM_READABLE(msg));
}
-static int zserv_flush_data(struct thread *thread)
+/*
+ * Gracefully shut down a client connection.
+ *
+ * Cancel any pending tasks for the client's thread. Then schedule a task on the
+ * main thread to shut down the calling thread.
+ *
+ * Must be called from the client pthread, never the main thread.
+ */
+static void zserv_client_close(struct zserv *client)
{
- struct zserv *client = THREAD_ARG(thread);
-
- client->t_write = NULL;
- if (client->t_suicide) {
- zebra_client_close(client);
- return -1;
- }
- switch (buffer_flush_available(client->wb, client->sock)) {
- case BUFFER_ERROR:
- zlog_warn(
- "%s: buffer_flush_available failed on zserv client fd %d, closing",
- __func__, client->sock);
- zebra_client_close(client);
- client = NULL;
- break;
- case BUFFER_PENDING:
- client->t_write = NULL;
- thread_add_write(zebrad.master, zserv_flush_data, client,
- client->sock, &client->t_write);
- break;
- case BUFFER_EMPTY:
- break;
- }
-
- if (client)
- client->last_write_time = monotime(NULL);
- return 0;
+ atomic_store_explicit(&client->pthread->running, false,
+ memory_order_seq_cst);
+ THREAD_OFF(client->t_read);
+ THREAD_OFF(client->t_write);
+ zserv_event(client, ZSERV_HANDLE_CLOSE);
}
/*
- * Write a single packet.
+ * Write all pending messages to client socket.
+ *
+ * This function first attempts to flush any buffered data. If unsuccessful,
+ * the function reschedules itself and returns. If successful, it pops all
+ * available messages from the output queue and continues to write data
+ * directly to the socket until the socket would block. If the socket never
+ * blocks and all data is written, the function returns without rescheduling
+ * itself. If the socket ends up throwing EWOULDBLOCK, the remaining data is
+ * buffered and the function reschedules itself.
+ *
+ * The utility of the buffer is that it allows us to vastly reduce lock
+ * contention by allowing us to pop *all* messages off the output queue at once
+ * instead of locking and unlocking each time we want to pop a single message
+ * off the queue. The same thing could arguably be accomplished faster by
+ * allowing the main thread to write directly into the buffer instead of
+ * enqueuing packets onto an intermediary queue, but the intermediary queue
+ * allows us to expose information about input and output queues to the user in
+ * terms of number of packets rather than size of data.
*/
static int zserv_write(struct thread *thread)
{
struct zserv *client = THREAD_ARG(thread);
struct stream *msg;
- int writerv;
-
- if (client->t_suicide)
- return -1;
-
- if (client->is_synchronous)
- return 0;
-
- msg = stream_fifo_pop(client->obuf_fifo);
- stream_set_getp(msg, 0);
- client->last_write_cmd = stream_getw_from(msg, 6);
+ uint32_t wcmd;
+ struct stream_fifo *cache;
- writerv = buffer_write(client->wb, client->sock, STREAM_DATA(msg),
- stream_get_endp(msg));
-
- stream_free(msg);
-
- switch (writerv) {
+ /* If we have any data pending, try to flush it first */
+ switch (buffer_flush_all(client->wb, client->sock)) {
case BUFFER_ERROR:
- zlog_warn(
- "%s: buffer_write failed to zserv client fd %d, closing",
- __func__, client->sock);
- /*
- * Schedule a delayed close since many of the functions that
- * call this one do not check the return code. They do not
- * allow for the possibility that an I/O error may have caused
- * the client to be deleted.
- */
- client->t_suicide = NULL;
- thread_add_event(zebrad.master, zserv_delayed_close, client, 0,
- &client->t_suicide);
- return -1;
- case BUFFER_EMPTY:
- THREAD_OFF(client->t_write);
- break;
+ goto zwrite_fail;
case BUFFER_PENDING:
- thread_add_write(zebrad.master, zserv_flush_data, client,
- client->sock, &client->t_write);
+ atomic_store_explicit(&client->last_write_time,
+ (uint32_t)monotime(NULL),
+ memory_order_relaxed);
+ zserv_client_event(client, ZSERV_CLIENT_WRITE);
+ return 0;
+ case BUFFER_EMPTY:
break;
}
- if (client->obuf_fifo->count)
- zebra_event(client, ZEBRA_WRITE);
-
- client->last_write_time = monotime(NULL);
- return 0;
-}
-
-#if defined(HANDLE_ZAPI_FUZZING)
-static void zserv_write_incoming(struct stream *orig, uint16_t command)
-{
- char fname[MAXPATHLEN];
- struct stream *copy;
- int fd = -1;
+ cache = stream_fifo_new();
- copy = stream_dup(orig);
- stream_set_getp(copy, 0);
+ pthread_mutex_lock(&client->obuf_mtx);
+ {
+ while (stream_fifo_head(client->obuf_fifo))
+ stream_fifo_push(cache,
+ stream_fifo_pop(client->obuf_fifo));
+ }
+ pthread_mutex_unlock(&client->obuf_mtx);
- zserv_privs.change(ZPRIVS_RAISE);
- snprintf(fname, MAXPATHLEN, "%s/%u", DAEMON_VTY_DIR, command);
- fd = open(fname, O_CREAT | O_WRONLY | O_EXCL, 0644);
- stream_flush(copy, fd);
- close(fd);
- zserv_privs.change(ZPRIVS_LOWER);
- stream_free(copy);
-}
-#endif
+ if (cache->tail) {
+ msg = cache->tail;
+ stream_set_getp(msg, 0);
+ wcmd = stream_getw_from(msg, 6);
+ }
-static int zserv_process_messages(struct thread *thread)
-{
- struct zserv *client = THREAD_ARG(thread);
- struct zebra_vrf *zvrf;
- struct zmsghdr hdr;
- struct stream *msg;
- bool hdrvalid;
+ while (stream_fifo_head(cache)) {
+ msg = stream_fifo_pop(cache);
+ buffer_put(client->wb, STREAM_DATA(msg), stream_get_endp(msg));
+ stream_free(msg);
+ }
- do {
- msg = stream_fifo_pop(client->ibuf_fifo);
+ stream_fifo_free(cache);
- /* break if out of messages */
- if (!msg)
- continue;
+ /* If we have any data pending, try to flush it first */
+ switch (buffer_flush_all(client->wb, client->sock)) {
+ case BUFFER_ERROR:
+ goto zwrite_fail;
+ case BUFFER_PENDING:
+ atomic_store_explicit(&client->last_write_time,
+ (uint32_t)monotime(NULL),
+ memory_order_relaxed);
+ zserv_client_event(client, ZSERV_CLIENT_WRITE);
+ return 0;
+ case BUFFER_EMPTY:
+ break;
+ }
- /* read & check header */
- hdrvalid = zapi_parse_header(msg, &hdr);
- if (!hdrvalid && IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV) {
- const char *emsg = "Message has corrupt header";
- zserv_log_message(emsg, msg, NULL);
- }
- if (!hdrvalid)
- continue;
-
- hdr.length -= ZEBRA_HEADER_SIZE;
- /* lookup vrf */
- zvrf = zebra_vrf_lookup_by_id(hdr.vrf_id);
- if (!zvrf && IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV) {
- const char *emsg = "Message specifies unknown VRF";
- zserv_log_message(emsg, msg, &hdr);
- }
- if (!zvrf)
- continue;
+ atomic_store_explicit(&client->last_write_cmd, wcmd,
+ memory_order_relaxed);
- /* process commands */
- zserv_handle_commands(client, &hdr, msg, zvrf);
+ atomic_store_explicit(&client->last_write_time,
+ (uint32_t)monotime(NULL), memory_order_relaxed);
- } while (msg);
+ return 0;
+zwrite_fail:
+ zlog_warn("%s: could not write to %s [fd = %d], closing.", __func__,
+ zebra_route_string(client->proto), client->sock);
+ zserv_client_close(client);
return 0;
}
-/* Handler of zebra service request. */
+/*
+ * Read and process data from a client socket.
+ *
+ * The responsibilities here are to read raw data from the client socket,
+ * validate the header, encapsulate it into a single stream object, push it
+ * onto the input queue and then notify the main thread that there is new data
+ * available.
+ *
+ * This function first looks for any data in the client structure's working
+ * input buffer. If data is present, it is assumed that reading stopped in a
+ * previous invocation of this task and needs to be resumed to finish a message.
+ * Otherwise, the socket data stream is assumed to be at the beginning of a new
+ * ZAPI message (specifically at the header). The header is read and validated.
+ * If the header passed validation then the length field found in the header is
+ * used to compute the total length of the message. That much data is read (but
+ * not inspected), appended to the header, placed into a stream and pushed onto
+ * the client's input queue. A task is then scheduled on the main thread to
+ * process the client's input queue. Finally, if all of this was successful,
+ * this task reschedules itself.
+ *
+ * Any failure in any of these actions is handled by terminating the client.
+ */
static int zserv_read(struct thread *thread)
{
+ struct zserv *client = THREAD_ARG(thread);
int sock;
- struct zserv *client;
size_t already;
-#if defined(HANDLE_ZAPI_FUZZING)
- int packets = 1;
-#else
- int packets = zebrad.packets_to_process;
-#endif
- /* Get thread data. Reset reading thread because I'm running. */
- sock = THREAD_FD(thread);
- client = THREAD_ARG(thread);
+ struct stream_fifo *cache;
+ uint32_t p2p_orig;
- if (client->t_suicide) {
- zebra_client_close(client);
- return -1;
- }
+ uint32_t p2p;
+ struct zmsghdr hdr;
- while (packets) {
- struct zmsghdr hdr;
+ p2p_orig = atomic_load_explicit(&zebrad.packets_to_process,
+ memory_order_relaxed);
+ cache = stream_fifo_new();
+ p2p = p2p_orig;
+ sock = THREAD_FD(thread);
+
+ while (p2p) {
ssize_t nb;
bool hdrvalid;
char errmsg[256];
"Message has corrupt header\n%s: socket %d message length %u exceeds buffer size %lu",
__func__, sock, hdr.length,
(unsigned long)STREAM_SIZE(client->ibuf_work));
+ zserv_log_message(errmsg, client->ibuf_work, &hdr);
goto zread_fail;
}
}
}
-#if defined(HANDLE_ZAPI_FUZZING)
- zserv_write_incoming(client->ibuf_work, command);
-#endif
-
/* Debug packet information. */
if (IS_ZEBRA_DEBUG_EVENT)
zlog_debug("zebra message comes from socket [%d]",
if (IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV)
zserv_log_message(NULL, client->ibuf_work, &hdr);
- client->last_read_time = monotime(NULL);
- client->last_read_cmd = hdr.command;
-
stream_set_getp(client->ibuf_work, 0);
struct stream *msg = stream_dup(client->ibuf_work);
- stream_fifo_push(client->ibuf_fifo, msg);
+ stream_fifo_push(cache, msg);
+ stream_reset(client->ibuf_work);
+ p2p--;
+ }
- if (client->t_suicide)
- goto zread_fail;
+ if (p2p < p2p_orig) {
+ /* update session statistics */
+ atomic_store_explicit(&client->last_read_time, monotime(NULL),
+ memory_order_relaxed);
+ atomic_store_explicit(&client->last_read_cmd, hdr.command,
+ memory_order_relaxed);
+
+ /* publish read packets on client's input queue */
+ pthread_mutex_lock(&client->ibuf_mtx);
+ {
+ while (cache->head)
+ stream_fifo_push(client->ibuf_fifo,
+ stream_fifo_pop(cache));
+ }
+ pthread_mutex_unlock(&client->ibuf_mtx);
+
+ /* Schedule job to process those packets */
+ zserv_event(client, ZSERV_PROCESS_MESSAGES);
- --packets;
- stream_reset(client->ibuf_work);
}
if (IS_ZEBRA_DEBUG_PACKET)
- zlog_debug("Read %d packets",
- zebrad.packets_to_process - packets);
-
- /* Schedule job to process those packets */
- thread_add_event(zebrad.master, &zserv_process_messages, client, 0,
- NULL);
+ zlog_debug("Read %d packets", p2p_orig - p2p);
/* Reschedule ourselves */
- zebra_event(client, ZEBRA_READ);
+ zserv_client_event(client, ZSERV_CLIENT_READ);
+
+ stream_fifo_free(cache);
return 0;
zread_fail:
- zebra_client_close(client);
+ stream_fifo_free(cache);
+ zserv_client_close(client);
return -1;
}
-static void zebra_event(struct zserv *client, enum event event)
+static void zserv_client_event(struct zserv *client,
+ enum zserv_client_event event)
{
switch (event) {
- case ZEBRA_READ:
- thread_add_read(zebrad.master, zserv_read, client, client->sock,
- &client->t_read);
+ case ZSERV_CLIENT_READ:
+ thread_add_read(client->pthread->master, zserv_read, client,
+ client->sock, &client->t_read);
break;
- case ZEBRA_WRITE:
- thread_add_write(zebrad.master, zserv_write, client,
+ case ZSERV_CLIENT_WRITE:
+ thread_add_write(client->pthread->master, zserv_write, client,
client->sock, &client->t_write);
break;
}
}
-/* Accept code of zebra server socket. */
-static int zebra_accept(struct thread *thread)
+/* Main thread lifecycle ---------------------------------------------------- */
+
+/*
+ * Read and process messages from a client.
+ *
+ * This task runs on the main pthread. It is scheduled by client pthreads when
+ * they have new messages available on their input queues. The client is passed
+ * as the task argument.
+ *
+ * Each message is popped off the client's input queue and the action associated
+ * with the message is executed. This proceeds until there are no more messages,
+ * an error occurs, or the processing limit is reached.
+ *
+ * The client's I/O thread can push at most zebrad.packets_to_process messages
+ * onto the input buffer before notifying us there are packets to read. As long
+ * as we always process zebrad.packets_to_process messages here, then we can
+ * rely on the read thread to handle queuing this task enough times to process
+ * everything on the input queue.
+ */
+static int zserv_process_messages(struct thread *thread)
+{
+ struct zserv *client = THREAD_ARG(thread);
+ struct stream *msg;
+ struct stream_fifo *cache = stream_fifo_new();
+
+ uint32_t p2p = zebrad.packets_to_process;
+
+ pthread_mutex_lock(&client->ibuf_mtx);
+ {
+ uint32_t i;
+ for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo);
+ ++i) {
+ msg = stream_fifo_pop(client->ibuf_fifo);
+ stream_fifo_push(cache, msg);
+ }
+
+ msg = NULL;
+ }
+ pthread_mutex_unlock(&client->ibuf_mtx);
+
+ while (stream_fifo_head(cache)) {
+ msg = stream_fifo_pop(cache);
+ zserv_handle_commands(client, msg);
+ stream_free(msg);
+ }
+
+ stream_fifo_free(cache);
+
+ return 0;
+}
+
+int zserv_send_message(struct zserv *client, struct stream *msg)
+{
+ /*
+ * This is a somewhat poorly named variable added with Zebra's portion
+ * of the label manager. That component does not use the regular
+ * zserv/zapi_msg interface for handling its messages, as the client
+ * itself runs in-process. Instead it uses synchronous writes on the
+ * zserv client's socket directly in the zread* handlers for its
+ * message types. Furthermore, it cannot handle the usual messages
+ * Zebra sends (such as those for interface changes) and so has added
+ * this flag and check here as a hack to suppress all messages that it
+ * does not explicitly know about.
+ *
+ * In any case this needs to be cleaned up at some point.
+ *
+ * See also:
+ * zread_label_manager_request
+ * zsend_label_manager_connect_response
+ * zsend_assign_label_chunk_response
+ * ...
+ */
+ if (client->is_synchronous)
+ return 0;
+
+ pthread_mutex_lock(&client->obuf_mtx);
+ {
+ stream_fifo_push(client->obuf_fifo, msg);
+ }
+ pthread_mutex_unlock(&client->obuf_mtx);
+
+ zserv_client_event(client, ZSERV_CLIENT_WRITE);
+
+ return 0;
+}
+
+
+/* Hooks for client connect / disconnect */
+DEFINE_HOOK(zserv_client_connect, (struct zserv *client), (client));
+DEFINE_KOOH(zserv_client_close, (struct zserv *client), (client));
+
+/*
+ * Deinitialize zebra client.
+ *
+ * - Deregister and deinitialize related internal resources
+ * - Gracefully close socket
+ * - Free associated resources
+ * - Free client structure
+ *
+ * This does *not* take any action on the struct thread * fields. These are
+ * managed by the owning pthread and any tasks associated with them must have
+ * been stopped prior to invoking this function.
+ */
+static void zserv_client_free(struct zserv *client)
+{
+ hook_call(zserv_client_close, client);
+
+ /* Close file descriptor. */
+ if (client->sock) {
+ unsigned long nroutes;
+
+ close(client->sock);
+ nroutes = rib_score_proto(client->proto, client->instance);
+ zlog_notice(
+ "client %d disconnected. %lu %s routes removed from the rib",
+ client->sock, nroutes,
+ zebra_route_string(client->proto));
+ client->sock = -1;
+ }
+
+ /* Free stream buffers. */
+ if (client->ibuf_work)
+ stream_free(client->ibuf_work);
+ if (client->obuf_work)
+ stream_free(client->obuf_work);
+ if (client->ibuf_fifo)
+ stream_fifo_free(client->ibuf_fifo);
+ if (client->obuf_fifo)
+ stream_fifo_free(client->obuf_fifo);
+ if (client->wb)
+ buffer_free(client->wb);
+
+ /* Free buffer mutexes */
+ pthread_mutex_destroy(&client->obuf_mtx);
+ pthread_mutex_destroy(&client->ibuf_mtx);
+
+ /* Free bitmaps. */
+ for (afi_t afi = AFI_IP; afi < AFI_MAX; afi++)
+ for (int i = 0; i < ZEBRA_ROUTE_MAX; i++)
+ vrf_bitmap_free(client->redist[afi][i]);
+
+ vrf_bitmap_free(client->redist_default);
+ vrf_bitmap_free(client->ifinfo);
+ vrf_bitmap_free(client->ridinfo);
+
+ XFREE(MTYPE_TMP, client);
+}
+
+/*
+ * Finish closing a client.
+ *
+ * This task is scheduled by a ZAPI client pthread on the main pthread when it
+ * wants to stop itself. When this executes, the client connection should
+ * already have been closed. This task's responsibility is to gracefully
+ * terminate the client thread, update relevant internal datastructures and
+ * free any resources allocated by the main thread.
+ */
+static int zserv_handle_client_close(struct thread *thread)
+{
+ struct zserv *client = THREAD_ARG(thread);
+
+ /*
+ * Ensure these have been nulled. This does not equate to the
+ * associated task(s) being scheduled or unscheduled on the client
+ * pthread's threadmaster.
+ */
+ assert(!client->t_read);
+ assert(!client->t_write);
+
+ /* synchronously stop thread */
+ frr_pthread_stop(client->pthread, NULL);
+
+ /* destroy frr_pthread */
+ frr_pthread_destroy(client->pthread);
+ client->pthread = NULL;
+
+ listnode_delete(zebrad.client_list, client);
+ zserv_client_free(client);
+ return 0;
+}
+
+/*
+ * Create a new client.
+ *
+ * This is called when a new connection is accept()'d on the ZAPI socket. It
+ * initializes new client structure, notifies any subscribers of the connection
+ * event and spawns the client's thread.
+ *
+ * sock
+ * client's socket file descriptor
+ */
+static void zserv_client_create(int sock)
+{
+ struct zserv *client;
+ int i;
+ afi_t afi;
+
+ client = XCALLOC(MTYPE_TMP, sizeof(struct zserv));
+
+ /* Make client input/output buffer. */
+ client->sock = sock;
+ client->ibuf_fifo = stream_fifo_new();
+ client->obuf_fifo = stream_fifo_new();
+ client->ibuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ client->obuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ);
+ pthread_mutex_init(&client->ibuf_mtx, NULL);
+ pthread_mutex_init(&client->obuf_mtx, NULL);
+ client->wb = buffer_new(0);
+
+ /* Set table number. */
+ client->rtm_table = zebrad.rtm_table_default;
+
+ atomic_store_explicit(&client->connect_time, (uint32_t) monotime(NULL),
+ memory_order_relaxed);
+
+ /* Initialize flags */
+ for (afi = AFI_IP; afi < AFI_MAX; afi++)
+ for (i = 0; i < ZEBRA_ROUTE_MAX; i++)
+ client->redist[afi][i] = vrf_bitmap_init();
+ client->redist_default = vrf_bitmap_init();
+ client->ifinfo = vrf_bitmap_init();
+ client->ridinfo = vrf_bitmap_init();
+
+ /* by default, it's not a synchronous client */
+ client->is_synchronous = 0;
+
+ /* Add this client to linked list. */
+ listnode_add(zebrad.client_list, client);
+
+ struct frr_pthread_attr zclient_pthr_attrs = {
+ .id = frr_pthread_get_id(),
+ .start = frr_pthread_attr_default.start,
+ .stop = frr_pthread_attr_default.stop
+ };
+ client->pthread =
+ frr_pthread_new(&zclient_pthr_attrs, "Zebra API client thread");
+
+ zebra_vrf_update_all(client);
+
+ /* start read loop */
+ zserv_client_event(client, ZSERV_CLIENT_READ);
+
+ /* call callbacks */
+ hook_call(zserv_client_connect, client);
+
+ /* start pthread */
+ frr_pthread_run(client->pthread, NULL);
+}
+
+/*
+ * Accept socket connection.
+ */
+static int zserv_accept(struct thread *thread)
{
int accept_sock;
int client_sock;
accept_sock = THREAD_FD(thread);
/* Reregister myself. */
- thread_add_read(zebrad.master, zebra_accept, NULL, accept_sock, NULL);
+ zserv_event(NULL, ZSERV_ACCEPT);
len = sizeof(struct sockaddr_in);
client_sock = accept(accept_sock, (struct sockaddr *)&client, &len);
set_nonblocking(client_sock);
/* Create new zebra client. */
- zebra_client_create(client_sock);
+ zserv_client_create(client_sock);
return 0;
}
-/* Make zebra server socket, wiping any existing one (see bug #403). */
-void zebra_zserv_socket_init(char *path)
+void zserv_start(char *path)
{
int ret;
- int sock;
mode_t old_mask;
struct sockaddr_storage sa;
socklen_t sa_len;
old_mask = umask(0077);
/* Make UNIX domain socket. */
- sock = socket(sa.ss_family, SOCK_STREAM, 0);
- if (sock < 0) {
+ zebrad.sock = socket(sa.ss_family, SOCK_STREAM, 0);
+ if (zebrad.sock < 0) {
zlog_warn("Can't create zserv socket: %s",
safe_strerror(errno));
zlog_warn(
}
if (sa.ss_family != AF_UNIX) {
- sockopt_reuseaddr(sock);
- sockopt_reuseport(sock);
+ sockopt_reuseaddr(zebrad.sock);
+ sockopt_reuseport(zebrad.sock);
} else {
struct sockaddr_un *suna = (struct sockaddr_un *)&sa;
if (suna->sun_path[0])
}
zserv_privs.change(ZPRIVS_RAISE);
- setsockopt_so_recvbuf(sock, 1048576);
- setsockopt_so_sendbuf(sock, 1048576);
+ setsockopt_so_recvbuf(zebrad.sock, 1048576);
+ setsockopt_so_sendbuf(zebrad.sock, 1048576);
zserv_privs.change(ZPRIVS_LOWER);
if (sa.ss_family != AF_UNIX && zserv_privs.change(ZPRIVS_RAISE))
zlog_err("Can't raise privileges");
- ret = bind(sock, (struct sockaddr *)&sa, sa_len);
+ ret = bind(zebrad.sock, (struct sockaddr *)&sa, sa_len);
if (ret < 0) {
zlog_warn("Can't bind zserv socket on %s: %s", path,
safe_strerror(errno));
zlog_warn(
"zebra can't provide full functionality due to above error");
- close(sock);
+ close(zebrad.sock);
+ zebrad.sock = -1;
return;
}
if (sa.ss_family != AF_UNIX && zserv_privs.change(ZPRIVS_LOWER))
zlog_err("Can't lower privileges");
- ret = listen(sock, 5);
+ ret = listen(zebrad.sock, 5);
if (ret < 0) {
zlog_warn("Can't listen to zserv socket %s: %s", path,
safe_strerror(errno));
zlog_warn(
"zebra can't provide full functionality due to above error");
- close(sock);
+ close(zebrad.sock);
+ zebrad.sock = -1;
return;
}
umask(old_mask);
- thread_add_read(zebrad.master, zebra_accept, NULL, sock, NULL);
+ zserv_event(NULL, ZSERV_ACCEPT);
+}
+
+void zserv_event(struct zserv *client, enum zserv_event event)
+{
+ switch (event) {
+ case ZSERV_ACCEPT:
+ thread_add_read(zebrad.master, zserv_accept, NULL, zebrad.sock,
+ NULL);
+ break;
+ case ZSERV_PROCESS_MESSAGES:
+ thread_add_event(zebrad.master, zserv_process_messages, client,
+ 0, NULL);
+ break;
+ case ZSERV_HANDLE_CLOSE:
+ thread_add_event(zebrad.master, zserv_handle_client_close,
+ client, 0, NULL);
+ }
}
+
+/* General purpose ---------------------------------------------------------- */
+
#define ZEBRA_TIME_BUF 32
static char *zserv_time_buf(time_t *time1, char *buf, int buflen)
{
{
char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF];
char wbuf[ZEBRA_TIME_BUF], nhbuf[ZEBRA_TIME_BUF], mbuf[ZEBRA_TIME_BUF];
+ time_t connect_time, last_read_time, last_write_time;
+ uint16_t last_read_cmd, last_write_cmd;
vty_out(vty, "Client: %s", zebra_route_string(client->proto));
if (client->instance)
vty_out(vty, "FD: %d \n", client->sock);
vty_out(vty, "Route Table ID: %d \n", client->rtm_table);
+ connect_time = (time_t) atomic_load_explicit(&client->connect_time,
+ memory_order_relaxed);
+
vty_out(vty, "Connect Time: %s \n",
- zserv_time_buf(&client->connect_time, cbuf, ZEBRA_TIME_BUF));
+ zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF));
if (client->nh_reg_time) {
vty_out(vty, "Nexthop Registry Time: %s \n",
zserv_time_buf(&client->nh_reg_time, nhbuf,
} else
vty_out(vty, "Not registered for Nexthop Updates\n");
+ last_read_time = (time_t)atomic_load_explicit(&client->last_read_time,
+ memory_order_relaxed);
+ last_write_time = (time_t)atomic_load_explicit(&client->last_write_time,
+ memory_order_relaxed);
+
+ last_read_cmd = atomic_load_explicit(&client->last_read_cmd,
+ memory_order_relaxed);
+ last_write_cmd = atomic_load_explicit(&client->last_write_cmd,
+ memory_order_relaxed);
+
vty_out(vty, "Last Msg Rx Time: %s \n",
- zserv_time_buf(&client->last_read_time, rbuf, ZEBRA_TIME_BUF));
+ zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF));
vty_out(vty, "Last Msg Tx Time: %s \n",
- zserv_time_buf(&client->last_write_time, wbuf, ZEBRA_TIME_BUF));
- if (client->last_read_time)
+ zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF));
+ if (last_read_cmd)
vty_out(vty, "Last Rcvd Cmd: %s \n",
- zserv_command_string(client->last_read_cmd));
- if (client->last_write_time)
+ zserv_command_string(last_read_cmd));
+ if (last_write_cmd)
vty_out(vty, "Last Sent Cmd: %s \n",
- zserv_command_string(client->last_write_cmd));
+ zserv_command_string(last_write_cmd));
vty_out(vty, "\n");
vty_out(vty, "Type Add Update Del \n");
{
char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF];
char wbuf[ZEBRA_TIME_BUF];
+ time_t connect_time, last_read_time, last_write_time;
+
+ connect_time = (time_t)atomic_load_explicit(&client->connect_time,
+ memory_order_relaxed);
+ last_read_time = (time_t)atomic_load_explicit(&client->last_read_time,
+ memory_order_relaxed);
+ last_write_time = (time_t)atomic_load_explicit(&client->last_write_time,
+ memory_order_relaxed);
vty_out(vty, "%-8s%12s %12s%12s%8d/%-8d%8d/%-8d\n",
zebra_route_string(client->proto),
- zserv_time_buf(&client->connect_time, cbuf, ZEBRA_TIME_BUF),
- zserv_time_buf(&client->last_read_time, rbuf, ZEBRA_TIME_BUF),
- zserv_time_buf(&client->last_write_time, wbuf, ZEBRA_TIME_BUF),
+ zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF),
+ zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF),
+ zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF),
client->v4_route_add_cnt + client->v4_route_upd8_cnt,
client->v4_route_del_cnt,
client->v6_route_add_cnt + client->v6_route_upd8_cnt,
client->v6_route_del_cnt);
}
-struct zserv *zebra_find_client(uint8_t proto, unsigned short instance)
+struct zserv *zserv_find_client(uint8_t proto, unsigned short instance)
{
struct listnode *node, *nnode;
struct zserv *client;
struct thread t;
zebra_client_create(-1);
- client = zebrad.client_list->head->data;
+
+ frr_pthread_stop(client->pthread, NULL);
+ frr_pthread_destroy(client->pthread);
+ client->pthread = NULL;
+
t.arg = client;
fd = open(input, O_RDONLY | O_NONBLOCK);
t.u.fd = fd;
- zebra_client_read(&t);
+ zserv_read(&t);
close(fd);
}
{
/* Client list init. */
zebrad.client_list = list_new();
- zebrad.client_list->del = (void (*)(void *))zebra_client_free;
+ zebrad.client_list->del = (void (*)(void *)) zserv_client_free;
+
+ /* Misc init. */
+ zebrad.sock = -1;
install_element(ENABLE_NODE, &show_zebra_client_cmd);
install_element(ENABLE_NODE, &show_zebra_client_summary_cmd);
/* Client structure. */
struct zserv {
+ /* Client pthread */
+ struct frr_pthread *pthread;
+
/* Client file descriptor. */
int sock;
/* Input/output buffer to the client. */
+ pthread_mutex_t ibuf_mtx;
struct stream_fifo *ibuf_fifo;
+ pthread_mutex_t obuf_mtx;
struct stream_fifo *obuf_fifo;
/* Private I/O buffers */
struct thread *t_read;
struct thread *t_write;
- /* Thread for delayed close. */
- struct thread *t_suicide;
-
/* default routing table this client munges */
int rtm_table;
uint32_t prefixadd_cnt;
uint32_t prefixdel_cnt;
- time_t connect_time;
- time_t last_read_time;
- time_t last_write_time;
time_t nh_reg_time;
time_t nh_dereg_time;
time_t nh_last_upd_time;
- int last_read_cmd;
- int last_write_cmd;
+ /*
+ * Session information.
+ *
+ * These are not synchronous with respect to each other. For instance,
+ * last_read_cmd may contain a value that has been read in the future
+ * relative to last_read_time.
+ */
+
+ /* monotime of client creation */
+ _Atomic uint32_t connect_time;
+ /* monotime of last message received */
+ _Atomic uint32_t last_read_time;
+ /* monotime of last message sent */
+ _Atomic uint32_t last_write_time;
+ /* command code of last message read */
+ _Atomic uint16_t last_read_cmd;
+ /* command code of last message written */
+ _Atomic uint16_t last_write_cmd;
};
#define ZAPI_HANDLER_ARGS \
struct zebra_vrf *zvrf
/* Hooks for client connect / disconnect */
-DECLARE_HOOK(zapi_client_connect, (struct zserv *client), (client));
-DECLARE_KOOH(zapi_client_close, (struct zserv *client), (client));
+DECLARE_HOOK(zserv_client_connect, (struct zserv *client), (client));
+DECLARE_KOOH(zserv_client_close, (struct zserv *client), (client));
/* Zebra instance */
struct zebra_t {
struct thread_master *master;
struct list *client_list;
+ /* Socket */
+ int sock;
+
/* default table */
uint32_t rtm_table_default;
/* LSP work queue */
struct work_queue *lsp_process_q;
-#define ZEBRA_ZAPI_PACKETS_TO_PROCESS 10
- uint32_t packets_to_process;
+#define ZEBRA_ZAPI_PACKETS_TO_PROCESS 1000
+ _Atomic uint32_t packets_to_process;
};
extern struct zebra_t zebrad;
extern unsigned int multipath_num;
-/* Prototypes. */
+/*
+ * Initialize Zebra API server.
+ *
+ * Installs CLI commands and creates the client list.
+ */
extern void zserv_init(void);
-extern void zebra_zserv_socket_init(char *path);
-extern int zebra_server_send_message(struct zserv *client, struct stream *msg);
-extern struct zserv *zebra_find_client(uint8_t proto, unsigned short instance);
+/*
+ * Start Zebra API server.
+ *
+ * Allocates resources, creates the server socket and begins listening on the
+ * socket.
+ *
+ * path
+ * where to place the Unix domain socket
+ */
+extern void zserv_start(char *path);
+
+/*
+ * Send a message to a connected Zebra API client.
+ *
+ * client
+ * the client to send to
+ *
+ * msg
+ * the message to send
+ */
+extern int zserv_send_message(struct zserv *client, struct stream *msg);
+
+/*
+ * Retrieve a client by its protocol and instance number.
+ *
+ * proto
+ * protocol number
+ *
+ * instance
+ * instance number
+ *
+ * Returns:
+ * The Zebra API client.
+ */
+extern struct zserv *zserv_find_client(uint8_t proto, unsigned short instance);
#if defined(HANDLE_ZAPI_FUZZING)
extern void zserv_read_file(char *input);