]> git.proxmox.com Git - mirror_frr.git/commitdiff
Merge pull request #2298 from qlyoung/pipe-actions-vtysh
authorRuss White <russ@riw.us>
Fri, 8 Jun 2018 11:39:36 +0000 (07:39 -0400)
committerGitHub <noreply@github.com>
Fri, 8 Jun 2018 11:39:36 +0000 (07:39 -0400)
*: add support for `|` actions

49 files changed:
bgpd/bgp_flowspec.c
bgpd/bgp_mplsvpn.c
bgpd/bgp_nexthop.c
bgpd/bgp_route.c
bgpd/bgp_vty.c
bgpd/bgpd.c
configure.ac
lib/libfrr.c
lib/libfrr.h
lib/module.c
lib/vrf.c
ospf6d/ospf6_neighbor.h
pimd/pim_cmd.c
pimd/pim_instance.c
pimd/pim_instance.h
pimd/pim_nht.c
pimd/pim_vty.c
pimd/pim_zlookup.c
pimd/pimd.c
tests/isisd/test_fuzz_isis_tlv.py
vtysh/vtysh.c
vtysh/vtysh_main.c
zebra/if_netlink.c
zebra/label_manager.c
zebra/main.c
zebra/rt.h
zebra/rt_netlink.c
zebra/rt_socket.c
zebra/rule_netlink.c
zebra/rule_socket.c
zebra/table_manager.c
zebra/zapi_msg.c
zebra/zapi_msg.h
zebra/zebra_mpls.c
zebra/zebra_mpls_netlink.c
zebra/zebra_mpls_null.c
zebra/zebra_mpls_openbsd.c
zebra/zebra_mroute.c
zebra/zebra_pbr.c
zebra/zebra_pbr.h
zebra/zebra_ptm.c
zebra/zebra_ptm_redistribute.c
zebra/zebra_pw.c
zebra/zebra_rib.c
zebra/zebra_rnh.c
zebra/zebra_vty.c
zebra/zebra_vxlan.c
zebra/zserv.c
zebra/zserv.h

index 6eb1e39884d386233a7ecd6e2f1a762fae213dd5..884c5aa51a182aab9ae5360a027b7bb4b810ba63 100644 (file)
@@ -148,7 +148,7 @@ int bgp_nlri_parse_flowspec(struct peer *peer, struct attr *attr,
 
                if (BGP_DEBUG(flowspec, FLOWSPEC)) {
                        char return_string[BGP_FLOWSPEC_NLRI_STRING_MAX];
-                       char local_string[BGP_FLOWSPEC_NLRI_STRING_MAX];
+                       char local_string[BGP_FLOWSPEC_NLRI_STRING_MAX * 2];
                        char ec_string[BGP_FLOWSPEC_NLRI_STRING_MAX];
                        char *s = NULL;
 
@@ -157,20 +157,19 @@ int bgp_nlri_parse_flowspec(struct peer *peer, struct attr *attr,
                                               p.u.prefix_flowspec.prefixlen,
                                               return_string,
                                               NLRI_STRING_FORMAT_MIN, NULL);
-                       snprintf(ec_string, BGP_FLOWSPEC_NLRI_STRING_MAX,
+                       snprintf(ec_string, sizeof(ec_string),
                                 "EC{none}");
                        if (attr && attr->ecommunity) {
                                s = ecommunity_ecom2str(attr->ecommunity,
                                                ECOMMUNITY_FORMAT_ROUTE_MAP, 0);
-                               snprintf(ec_string,
-                                        BGP_FLOWSPEC_NLRI_STRING_MAX,
+                               snprintf(ec_string, sizeof(ec_string),
                                         "EC{%s}",
                                        s == NULL ? "none" : s);
 
                                if (s)
                                        ecommunity_strfree(&s);
                        }
-                       snprintf(local_string, BGP_FLOWSPEC_NLRI_STRING_MAX,
+                       snprintf(local_string, sizeof(local_string),
                                 "FS Rx %s %s %s %s", withdraw ?
                                 "Withdraw":"Update",
                                 afi2str(afi), return_string,
index b58e9da6f41042aec42f763b6042b80be3be12e5..eef711aa59bf907b1be46e9345a9b250756753ec 100644 (file)
@@ -573,7 +573,7 @@ leak_update(
        new->extra->parent = bgp_info_lock(parent);
        bgp_lock_node((struct bgp_node *)((struct bgp_info *)parent)->net);
        if (bgp_orig)
-               new->extra->bgp_orig = bgp_orig;
+               new->extra->bgp_orig = bgp_lock(bgp_orig);
        if (nexthop_orig)
                new->extra->nexthop_orig = *nexthop_orig;
 
index 3700778c73d9527fac8a039a3b2224025b079b63..fd8d894878a55aa277a2cfbf177bcba85efbd2a9 100644 (file)
@@ -326,35 +326,21 @@ void bgp_connected_delete(struct bgp *bgp, struct connected *ifc)
 {
        struct prefix p;
        struct prefix *addr;
-       struct bgp_node *rn;
+       struct bgp_node *rn = NULL;
        struct bgp_connected_ref *bc;
 
        addr = ifc->address;
 
        p = *(CONNECTED_PREFIX(ifc));
+       apply_mask(&p);
        if (addr->family == AF_INET) {
-               apply_mask_ipv4((struct prefix_ipv4 *)&p);
-
                if (prefix_ipv4_any((struct prefix_ipv4 *)&p))
                        return;
 
                bgp_address_del(bgp, addr);
 
                rn = bgp_node_lookup(bgp->connected_table[AFI_IP], &p);
-               if (!rn)
-                       return;
-
-               bc = rn->info;
-               bc->refcnt--;
-               if (bc->refcnt == 0) {
-                       XFREE(MTYPE_BGP_CONN, bc);
-                       rn->info = NULL;
-               }
-               bgp_unlock_node(rn);
-               bgp_unlock_node(rn);
        } else if (addr->family == AF_INET6) {
-               apply_mask_ipv6((struct prefix_ipv6 *)&p);
-
                if (IN6_IS_ADDR_UNSPECIFIED(&p.u.prefix6))
                        return;
 
@@ -363,17 +349,34 @@ void bgp_connected_delete(struct bgp *bgp, struct connected *ifc)
 
                rn = bgp_node_lookup(bgp->connected_table[AFI_IP6],
                                     (struct prefix *)&p);
-               if (!rn)
-                       return;
+       }
 
-               bc = rn->info;
-               bc->refcnt--;
-               if (bc->refcnt == 0) {
-                       XFREE(MTYPE_BGP_CONN, bc);
-                       rn->info = NULL;
-               }
-               bgp_unlock_node(rn);
-               bgp_unlock_node(rn);
+       if (!rn)
+               return;
+
+       bc = rn->info;
+       bc->refcnt--;
+       if (bc->refcnt == 0) {
+               XFREE(MTYPE_BGP_CONN, bc);
+               rn->info = NULL;
+       }
+       bgp_unlock_node(rn);
+       bgp_unlock_node(rn);
+}
+
+static void bgp_connected_cleanup(struct route_table *table,
+                                 struct route_node *rn)
+{
+       struct bgp_connected_ref *bc;
+
+       bc = rn->info;
+       if (!bc)
+               return;
+
+       bc->refcnt--;
+       if (bc->refcnt == 0) {
+               XFREE(MTYPE_BGP_CONN, bc);
+               rn->info = NULL;
        }
 }
 
@@ -656,6 +659,8 @@ void bgp_scan_finish(struct bgp *bgp)
                bgp_table_unlock(bgp->nexthop_cache_table[afi]);
                bgp->nexthop_cache_table[afi] = NULL;
 
+               bgp->connected_table[afi]->route_table->cleanup =
+                       bgp_connected_cleanup;
                bgp_table_unlock(bgp->connected_table[afi]);
                bgp->connected_table[afi] = NULL;
 
index 591af0f8cfe64384ed6b403c19821c7a20755c54..f84deede0d81aed5cbc442bb1da36c37f16e8402 100644 (file)
@@ -206,11 +206,19 @@ struct bgp_info *bgp_info_new(void)
 static void bgp_info_free(struct bgp_info *binfo)
 {
        /* unlink reference to parent, if any. */
-       if (binfo->extra && binfo->extra->parent) {
-               bgp_info_unlock((struct bgp_info *)binfo->extra->parent);
-               bgp_unlock_node((struct bgp_node *)((struct bgp_info *)binfo
-                                                   ->extra->parent)->net);
-               binfo->extra->parent = NULL;
+       if (binfo->extra) {
+               if (binfo->extra->parent) {
+                       bgp_unlock_node(
+                               (struct bgp_node *)((struct bgp_info *)binfo
+                                                           ->extra->parent)
+                                       ->net);
+                       bgp_info_unlock(
+                               (struct bgp_info *)binfo->extra->parent);
+                       binfo->extra->parent = NULL;
+               }
+
+               if (binfo->extra->bgp_orig)
+                       bgp_unlock(binfo->extra->bgp_orig);
        }
 
        if (binfo->attr)
@@ -5433,6 +5441,95 @@ static void bgp_aggregate_free(struct bgp_aggregate *aggregate)
        XFREE(MTYPE_BGP_AGGREGATE, aggregate);
 }
 
+static int bgp_aggregate_info_same(struct bgp_info *ri, struct aspath *aspath,
+                                  struct community *comm)
+{
+       static struct aspath *ae = NULL;
+
+       if (!ae)
+               ae = aspath_empty();
+
+       if (!ri)
+               return 0;
+
+       if (!aspath_cmp(ri->attr->aspath, (aspath) ? aspath : ae))
+               return 0;
+
+       if (!community_cmp(ri->attr->community, comm))
+               return 0;
+
+       return 1;
+}
+
+static void bgp_aggregate_install(struct bgp *bgp, afi_t afi, safi_t safi,
+                                 struct prefix *p, uint8_t origin,
+                                 struct aspath *aspath,
+                                 struct community *community,
+                                 uint8_t atomic_aggregate,
+                                 struct bgp_aggregate *aggregate)
+{
+       struct bgp_node *rn;
+       struct bgp_table *table;
+       struct bgp_info *ri, *new;
+
+       table = bgp->rib[afi][safi];
+
+       rn = bgp_node_get(table, p);
+
+       for (ri = rn->info; ri; ri = ri->next)
+               if (ri->peer == bgp->peer_self && ri->type == ZEBRA_ROUTE_BGP
+                   && ri->sub_type == BGP_ROUTE_AGGREGATE)
+                       break;
+
+       if (aggregate->count > 0) {
+               /*
+                * If the aggregate information has not changed
+                * no need to re-install it again.
+                */
+               if (bgp_aggregate_info_same(rn->info, aspath, community)) {
+                       bgp_unlock_node(rn);
+
+                       if (aspath)
+                               aspath_free(aspath);
+                       if (community)
+                               community_free(community);
+
+                       return;
+               }
+
+               /*
+                * Mark the old as unusable
+                */
+               if (ri)
+                       bgp_info_delete(rn, ri);
+
+               new = info_make(
+                       ZEBRA_ROUTE_BGP, BGP_ROUTE_AGGREGATE, 0, bgp->peer_self,
+                       bgp_attr_aggregate_intern(bgp, origin, aspath,
+                                                 community, aggregate->as_set,
+                                                 atomic_aggregate),
+                       rn);
+               SET_FLAG(new->flags, BGP_INFO_VALID);
+
+               bgp_info_add(rn, new);
+               bgp_process(bgp, rn, afi, safi);
+       } else {
+               for (ri = rn->info; ri; ri = ri->next)
+                       if (ri->peer == bgp->peer_self
+                           && ri->type == ZEBRA_ROUTE_BGP
+                           && ri->sub_type == BGP_ROUTE_AGGREGATE)
+                               break;
+
+               /* Withdraw static BGP route from routing table. */
+               if (ri) {
+                       bgp_info_delete(rn, ri);
+                       bgp_process(bgp, rn, afi, safi);
+               }
+       }
+
+       bgp_unlock_node(rn);
+}
+
 /* Update an aggregate as routes are added/removed from the BGP table */
 static void bgp_aggregate_route(struct bgp *bgp, struct prefix *p,
                                struct bgp_info *rinew, afi_t afi, safi_t safi,
@@ -5447,24 +5544,10 @@ static void bgp_aggregate_route(struct bgp *bgp, struct prefix *p,
        struct aspath *asmerge = NULL;
        struct community *community = NULL;
        struct community *commerge = NULL;
-#if defined(AGGREGATE_NEXTHOP_CHECK)
-       struct in_addr nexthop;
-       uint32_t med = 0;
-#endif
        struct bgp_info *ri;
-       struct bgp_info *new;
-       int first = 1;
        unsigned long match = 0;
        uint8_t atomic_aggregate = 0;
 
-       /* Record adding route's nexthop and med. */
-       if (rinew) {
-#if defined(AGGREGATE_NEXTHOP_CHECK)
-               nexthop = rinew->attr->nexthop;
-               med = rinew->attr->med;
-#endif
-       }
-
        /* ORIGIN attribute: If at least one route among routes that are
           aggregated has ORIGIN with the value INCOMPLETE, then the
           aggregated route must have the ORIGIN attribute with the value
@@ -5479,88 +5562,81 @@ static void bgp_aggregate_route(struct bgp *bgp, struct prefix *p,
 
        top = bgp_node_get(table, p);
        for (rn = bgp_node_get(table, p); rn;
-            rn = bgp_route_next_until(rn, top))
-               if (rn->p.prefixlen > p->prefixlen) {
-                       match = 0;
-
-                       for (ri = rn->info; ri; ri = ri->next) {
-                               if (BGP_INFO_HOLDDOWN(ri))
-                                       continue;
+            rn = bgp_route_next_until(rn, top)) {
+               if (rn->p.prefixlen <= p->prefixlen)
+                       continue;
 
-                               if (del && ri == del)
-                                       continue;
+               match = 0;
 
-                               if (!rinew && first) {
-#if defined(AGGREGATE_NEXTHOP_CHECK)
-                                       nexthop = ri->attr->nexthop;
-                                       med = ri->attr->med;
-#endif
-                                       first = 0;
-                               }
+               for (ri = rn->info; ri; ri = ri->next) {
+                       if (BGP_INFO_HOLDDOWN(ri))
+                               continue;
 
-#ifdef AGGREGATE_NEXTHOP_CHECK
-                               if (!IPV4_ADDR_SAME(&ri->attr->nexthop,
-                                                   &nexthop)
-                                   || ri->attr->med != med) {
-                                       if (aspath)
-                                               aspath_free(aspath);
-                                       if (community)
-                                               community_free(community);
-                                       bgp_unlock_node(rn);
-                                       bgp_unlock_node(top);
-                                       return;
-                               }
-#endif /* AGGREGATE_NEXTHOP_CHECK */
+                       if (del && ri == del)
+                               continue;
 
-                               if (ri->attr->flag
-                                   & ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE))
-                                       atomic_aggregate = 1;
+                       if (ri->attr->flag
+                           & ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE))
+                               atomic_aggregate = 1;
 
-                               if (ri->sub_type != BGP_ROUTE_AGGREGATE) {
-                                       if (aggregate->summary_only) {
-                                               (bgp_info_extra_get(ri))
-                                                       ->suppress++;
-                                               bgp_info_set_flag(
-                                                       rn, ri,
-                                                       BGP_INFO_ATTR_CHANGED);
-                                               match++;
-                                       }
+                       if (ri->sub_type == BGP_ROUTE_AGGREGATE)
+                               continue;
 
-                                       aggregate->count++;
-
-                                       if (origin < ri->attr->origin)
-                                               origin = ri->attr->origin;
-
-                                       if (aggregate->as_set) {
-                                               if (aspath) {
-                                                       asmerge = aspath_aggregate(
-                                                               aspath,
-                                                               ri->attr->aspath);
-                                                       aspath_free(aspath);
-                                                       aspath = asmerge;
-                                               } else
-                                                       aspath = aspath_dup(
-                                                               ri->attr->aspath);
-
-                                               if (ri->attr->community) {
-                                                       if (community) {
-                                                               commerge = community_merge(
-                                                                       community,
-                                                                       ri->attr->community);
-                                                               community = community_uniq_sort(
-                                                                       commerge);
-                                                               community_free(
-                                                                       commerge);
-                                                       } else
-                                                               community = community_dup(
-                                                                       ri->attr->community);
-                                               }
-                                       }
-                               }
+                       /*
+                        * summary-only aggregate route suppress
+                        * aggregated route announcements.
+                        */
+                       if (aggregate->summary_only) {
+                               (bgp_info_extra_get(ri))->suppress++;
+                               bgp_info_set_flag(rn, ri,
+                                                 BGP_INFO_ATTR_CHANGED);
+                               match++;
                        }
-                       if (match)
-                               bgp_process(bgp, rn, afi, safi);
+
+                       aggregate->count++;
+
+                       /*
+                        * If at least one route among routes that are
+                        * aggregated has ORIGIN with the value INCOMPLETE,
+                        * then the aggregated route MUST have the ORIGIN
+                        * attribute with the value INCOMPLETE.  Otherwise, if
+                        * at least one route among routes that are aggregated
+                        * has ORIGIN with the value EGP, then the aggregated
+                        * route MUST have the ORIGIN attribute with the value
+                        * EGP.
+                        */
+                       if (origin < ri->attr->origin)
+                               origin = ri->attr->origin;
+
+                       if (!aggregate->as_set)
+                               continue;
+
+                       /*
+                        * as-set aggregate route generate origin, as path,
+                        * and community aggregation.
+                        */
+                       if (aspath) {
+                               asmerge = aspath_aggregate(aspath,
+                                                          ri->attr->aspath);
+                               aspath_free(aspath);
+                               aspath = asmerge;
+                       } else
+                               aspath = aspath_dup(ri->attr->aspath);
+
+                       if (!ri->attr->community)
+                               continue;
+
+                       if (community) {
+                               commerge = community_merge(community,
+                                                          ri->attr->community);
+                               community = community_uniq_sort(commerge);
+                               community_free(commerge);
+                       } else
+                               community = community_dup(ri->attr->community);
                }
+               if (match)
+                       bgp_process(bgp, rn, afi, safi);
+       }
        bgp_unlock_node(top);
 
        if (rinew) {
@@ -5596,20 +5672,10 @@ static void bgp_aggregate_route(struct bgp *bgp, struct prefix *p,
                }
        }
 
-       if (aggregate->count > 0) {
-               rn = bgp_node_get(table, p);
-               new = info_make(
-                       ZEBRA_ROUTE_BGP, BGP_ROUTE_AGGREGATE, 0, bgp->peer_self,
-                       bgp_attr_aggregate_intern(bgp, origin, aspath,
-                                                 community, aggregate->as_set,
-                                                 atomic_aggregate),
-                       rn);
-               SET_FLAG(new->flags, BGP_INFO_VALID);
+       bgp_aggregate_install(bgp, afi, safi, p, origin, aspath, community,
+                             atomic_aggregate, aggregate);
 
-               bgp_info_add(rn, new);
-               bgp_unlock_node(rn);
-               bgp_process(bgp, rn, afi, safi);
-       } else {
+       if (aggregate->count == 0) {
                if (aspath)
                        aspath_free(aspath);
                if (community)
@@ -5617,8 +5683,50 @@ static void bgp_aggregate_route(struct bgp *bgp, struct prefix *p,
        }
 }
 
-void bgp_aggregate_delete(struct bgp *, struct prefix *, afi_t, safi_t,
-                         struct bgp_aggregate *);
+static void bgp_aggregate_delete(struct bgp *bgp, struct prefix *p, afi_t afi,
+                                safi_t safi, struct bgp_aggregate *aggregate)
+{
+       struct bgp_table *table;
+       struct bgp_node *top;
+       struct bgp_node *rn;
+       struct bgp_info *ri;
+       unsigned long match;
+
+       table = bgp->rib[afi][safi];
+
+       /* If routes exists below this node, generate aggregate routes. */
+       top = bgp_node_get(table, p);
+       for (rn = bgp_node_get(table, p); rn;
+            rn = bgp_route_next_until(rn, top)) {
+               if (rn->p.prefixlen <= p->prefixlen)
+                       continue;
+               match = 0;
+
+               for (ri = rn->info; ri; ri = ri->next) {
+                       if (BGP_INFO_HOLDDOWN(ri))
+                               continue;
+
+                       if (ri->sub_type == BGP_ROUTE_AGGREGATE)
+                               continue;
+
+                       if (aggregate->summary_only && ri->extra) {
+                               ri->extra->suppress--;
+
+                               if (ri->extra->suppress == 0) {
+                                       bgp_info_set_flag(
+                                               rn, ri, BGP_INFO_ATTR_CHANGED);
+                                       match++;
+                               }
+                       }
+                       aggregate->count--;
+               }
+
+               /* If this node was suppressed, process the change. */
+               if (match)
+                       bgp_process(bgp, rn, afi, safi);
+       }
+       bgp_unlock_node(top);
+}
 
 void bgp_aggregate_increment(struct bgp *bgp, struct prefix *p,
                             struct bgp_info *ri, afi_t afi, safi_t safi)
@@ -5628,12 +5736,6 @@ void bgp_aggregate_increment(struct bgp *bgp, struct prefix *p,
        struct bgp_aggregate *aggregate;
        struct bgp_table *table;
 
-       /* MPLS-VPN aggregation is not yet supported. */
-       if ((safi == SAFI_MPLS_VPN) || (safi == SAFI_ENCAP)
-           || (safi == SAFI_EVPN)
-           || (safi == SAFI_FLOWSPEC))
-               return;
-
        table = bgp->aggregate[afi][safi];
 
        /* No aggregates configured. */
@@ -5667,12 +5769,6 @@ void bgp_aggregate_decrement(struct bgp *bgp, struct prefix *p,
        struct bgp_aggregate *aggregate;
        struct bgp_table *table;
 
-       /* MPLS-VPN aggregation is not yet supported. */
-       if ((safi == SAFI_MPLS_VPN) || (safi == SAFI_ENCAP)
-           || (safi == SAFI_EVPN)
-           || (safi == SAFI_FLOWSPEC))
-               return;
-
        table = bgp->aggregate[afi][safi];
 
        /* No aggregates configured. */
@@ -5695,196 +5791,6 @@ void bgp_aggregate_decrement(struct bgp *bgp, struct prefix *p,
        bgp_unlock_node(child);
 }
 
-/* Called via bgp_aggregate_set when the user configures aggregate-address */
-static void bgp_aggregate_add(struct bgp *bgp, struct prefix *p, afi_t afi,
-                             safi_t safi, struct bgp_aggregate *aggregate)
-{
-       struct bgp_table *table;
-       struct bgp_node *top;
-       struct bgp_node *rn;
-       struct bgp_info *new;
-       struct bgp_info *ri;
-       unsigned long match;
-       uint8_t origin = BGP_ORIGIN_IGP;
-       struct aspath *aspath = NULL;
-       struct aspath *asmerge = NULL;
-       struct community *community = NULL;
-       struct community *commerge = NULL;
-       uint8_t atomic_aggregate = 0;
-
-       table = bgp->rib[afi][safi];
-
-       /* Sanity check. */
-       if (afi == AFI_IP && p->prefixlen == IPV4_MAX_BITLEN)
-               return;
-       if (afi == AFI_IP6 && p->prefixlen == IPV6_MAX_BITLEN)
-               return;
-
-       /* If routes exists below this node, generate aggregate routes. */
-       top = bgp_node_get(table, p);
-       for (rn = bgp_node_get(table, p); rn;
-            rn = bgp_route_next_until(rn, top)) {
-               if (rn->p.prefixlen <= p->prefixlen)
-                       continue;
-
-               match = 0;
-
-               for (ri = rn->info; ri; ri = ri->next) {
-                       if (BGP_INFO_HOLDDOWN(ri))
-                               continue;
-
-                       if (ri->attr->flag
-                           & ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE))
-                               atomic_aggregate = 1;
-
-                       if (ri->sub_type == BGP_ROUTE_AGGREGATE)
-                               continue;
-
-                       /* summary-only aggregate route suppress
-                        * aggregated route announcement.  */
-                       if (aggregate->summary_only) {
-                               (bgp_info_extra_get(ri))->suppress++;
-                               bgp_info_set_flag(rn, ri,
-                                                 BGP_INFO_ATTR_CHANGED);
-                               match++;
-                       }
-
-                       /* If at least one route among routes that are
-                        * aggregated has ORIGIN with the value INCOMPLETE,
-                        * then the aggregated route MUST have the ORIGIN
-                        * attribute with the value INCOMPLETE.  Otherwise, if
-                        * at least one route among routes that are aggregated
-                        * has ORIGIN with the value EGP, then the aggregated
-                        * route MUST have the ORIGIN attribute with the value
-                        * EGP.
-                        */
-                       if (origin < ri->attr->origin)
-                               origin = ri->attr->origin;
-
-                       /* as-set aggregate route generate origin, as path,
-                        * community aggregation.  */
-                       if (aggregate->as_set) {
-                               if (aspath) {
-                                       asmerge = aspath_aggregate(
-                                               aspath, ri->attr->aspath);
-                                       aspath_free(aspath);
-                                       aspath = asmerge;
-                               } else
-                                       aspath = aspath_dup(ri->attr->aspath);
-
-                               if (ri->attr->community) {
-                                       if (community) {
-                                               commerge = community_merge(
-                                                       community,
-                                                       ri->attr->community);
-                                               community = community_uniq_sort(
-                                                       commerge);
-                                               community_free(commerge);
-                                       } else
-                                               community = community_dup(
-                                                       ri->attr->community);
-                               }
-                       }
-                       aggregate->count++;
-               }
-
-               /* If this node is suppressed, process the change. */
-               if (match)
-                       bgp_process(bgp, rn, afi, safi);
-       }
-       bgp_unlock_node(top);
-
-       /* Add aggregate route to BGP table. */
-       if (aggregate->count) {
-               rn = bgp_node_get(table, p);
-               new = info_make(
-                       ZEBRA_ROUTE_BGP, BGP_ROUTE_AGGREGATE, 0, bgp->peer_self,
-                       bgp_attr_aggregate_intern(bgp, origin, aspath,
-                                                 community, aggregate->as_set,
-                                                 atomic_aggregate),
-                       rn);
-               SET_FLAG(new->flags, BGP_INFO_VALID);
-
-               bgp_info_add(rn, new);
-               bgp_unlock_node(rn);
-
-               /* Process change. */
-               bgp_process(bgp, rn, afi, safi);
-       } else {
-               if (aspath)
-                       aspath_free(aspath);
-               if (community)
-                       community_free(community);
-       }
-}
-
-void bgp_aggregate_delete(struct bgp *bgp, struct prefix *p, afi_t afi,
-                         safi_t safi, struct bgp_aggregate *aggregate)
-{
-       struct bgp_table *table;
-       struct bgp_node *top;
-       struct bgp_node *rn;
-       struct bgp_info *ri;
-       unsigned long match;
-
-       table = bgp->rib[afi][safi];
-
-       if (afi == AFI_IP && p->prefixlen == IPV4_MAX_BITLEN)
-               return;
-       if (afi == AFI_IP6 && p->prefixlen == IPV6_MAX_BITLEN)
-               return;
-
-       /* If routes exists below this node, generate aggregate routes. */
-       top = bgp_node_get(table, p);
-       for (rn = bgp_node_get(table, p); rn;
-            rn = bgp_route_next_until(rn, top)) {
-               if (rn->p.prefixlen <= p->prefixlen)
-                       continue;
-               match = 0;
-
-               for (ri = rn->info; ri; ri = ri->next) {
-                       if (BGP_INFO_HOLDDOWN(ri))
-                               continue;
-
-                       if (ri->sub_type == BGP_ROUTE_AGGREGATE)
-                               continue;
-
-                       if (aggregate->summary_only && ri->extra) {
-                               ri->extra->suppress--;
-
-                               if (ri->extra->suppress == 0) {
-                                       bgp_info_set_flag(
-                                               rn, ri, BGP_INFO_ATTR_CHANGED);
-                                       match++;
-                               }
-                       }
-                       aggregate->count--;
-               }
-
-               /* If this node was suppressed, process the change. */
-               if (match)
-                       bgp_process(bgp, rn, afi, safi);
-       }
-       bgp_unlock_node(top);
-
-       /* Delete aggregate route from BGP table. */
-       rn = bgp_node_get(table, p);
-
-       for (ri = rn->info; ri; ri = ri->next)
-               if (ri->peer == bgp->peer_self && ri->type == ZEBRA_ROUTE_BGP
-                   && ri->sub_type == BGP_ROUTE_AGGREGATE)
-                       break;
-
-       /* Withdraw static BGP route from routing table. */
-       if (ri) {
-               bgp_info_delete(rn, ri);
-               bgp_process(bgp, rn, afi, safi);
-       }
-
-       /* Unlock bgp_node_lookup. */
-       bgp_unlock_node(rn);
-}
-
 /* Aggregate route attribute. */
 #define AGGREGATE_SUMMARY_ONLY 1
 #define AGGREGATE_AS_SET       1
@@ -5898,9 +5804,6 @@ static int bgp_aggregate_unset(struct vty *vty, const char *prefix_str,
        struct bgp_node *rn;
        struct bgp_aggregate *aggregate;
 
-       if (safi == SAFI_FLOWSPEC)
-               return CMD_WARNING_CONFIG_FAILED;
-
        /* Convert string to prefix structure. */
        ret = str2prefix(prefix_str, &p);
        if (!ret) {
@@ -5918,13 +5821,8 @@ static int bgp_aggregate_unset(struct vty *vty, const char *prefix_str,
        }
 
        aggregate = rn->info;
-       if (aggregate->safi == SAFI_UNICAST)
-               bgp_aggregate_delete(bgp, &p, afi, SAFI_UNICAST, aggregate);
-       if (aggregate->safi == SAFI_LABELED_UNICAST)
-               bgp_aggregate_delete(bgp, &p, afi, SAFI_LABELED_UNICAST,
-                                    aggregate);
-       if (aggregate->safi == SAFI_MULTICAST)
-               bgp_aggregate_delete(bgp, &p, afi, SAFI_MULTICAST, aggregate);
+       bgp_aggregate_delete(bgp, &p, afi, safi, aggregate);
+       bgp_aggregate_install(bgp, afi, safi, &p, 0, NULL, NULL, 0, aggregate);
 
        /* Unlock aggregate address configuration. */
        rn->info = NULL;
@@ -5944,9 +5842,6 @@ static int bgp_aggregate_set(struct vty *vty, const char *prefix_str, afi_t afi,
        struct bgp_node *rn;
        struct bgp_aggregate *aggregate;
 
-       if (safi == SAFI_FLOWSPEC)
-               return CMD_WARNING_CONFIG_FAILED;
-
        /* Convert string to prefix structure. */
        ret = str2prefix(prefix_str, &p);
        if (!ret) {
@@ -5955,6 +5850,13 @@ static int bgp_aggregate_set(struct vty *vty, const char *prefix_str, afi_t afi,
        }
        apply_mask(&p);
 
+       if ((afi == AFI_IP && p.prefixlen == IPV4_MAX_BITLEN) ||
+           (afi == AFI_IP6 && p.prefixlen == IPV6_MAX_BITLEN)) {
+               vty_out(vty, "Specified prefix: %s will not result in any useful aggregation, disallowing\n",
+                       prefix_str);
+               return CMD_WARNING_CONFIG_FAILED;
+       }
+
        /* Old configuration check. */
        rn = bgp_node_get(bgp->aggregate[afi][safi], &p);
 
@@ -5977,13 +5879,7 @@ static int bgp_aggregate_set(struct vty *vty, const char *prefix_str, afi_t afi,
        rn->info = aggregate;
 
        /* Aggregate address insert into BGP routing table. */
-       if (safi == SAFI_UNICAST)
-               bgp_aggregate_add(bgp, &p, afi, SAFI_UNICAST, aggregate);
-       if (safi == SAFI_LABELED_UNICAST)
-               bgp_aggregate_add(bgp, &p, afi, SAFI_LABELED_UNICAST,
-                                 aggregate);
-       if (safi == SAFI_MULTICAST)
-               bgp_aggregate_add(bgp, &p, afi, SAFI_MULTICAST, aggregate);
+       bgp_aggregate_route(bgp, &p, NULL, afi, safi, NULL, aggregate);
 
        return CMD_SUCCESS;
 }
@@ -7055,8 +6951,8 @@ void route_vty_out_tag(struct vty *vty, struct prefix *p,
                           || (safi == SAFI_EVPN
                               && BGP_ATTR_NEXTHOP_AFI_IP6(attr))
                           || (BGP_ATTR_NEXTHOP_AFI_IP6(attr))) {
-                       char buf_a[BUFSIZ];
-                       char buf_b[BUFSIZ];
+                       char buf_a[512];
+                       char buf_b[512];
                        char buf_c[BUFSIZ];
                        if (attr->mp_nexthop_len
                            == BGP_ATTR_NHLEN_IPV6_GLOBAL) {
@@ -11413,7 +11309,7 @@ static void bgp_config_write_network_evpn(struct vty *vty, struct bgp *bgp,
        struct prefix *p;
        struct prefix_rd *prd;
        struct bgp_static *bgp_static;
-       char buf[PREFIX_STRLEN];
+       char buf[PREFIX_STRLEN * 2];
        char buf2[SU_ADDRSTRLEN];
        char rdbuf[RD_ADDRSTRLEN];
 
index c275fdb7be2655f6aa5fc683b2ef02b47275d77d..d4ebd54e46902638a605ff4d219c76db77b526b1 100644 (file)
@@ -7979,7 +7979,7 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
                                            BGP_UPTIME_LEN, 0, NULL));
 
                        if (peer->status == Established)
-                               if (peer->afc_recv[afi][pfx_rcd_safi])
+                               if (peer->afc_recv[afi][safi])
                                        vty_out(vty, " %12ld",
                                                peer->pcount[afi]
                                                            [pfx_rcd_safi]);
index 0c7b642438184e4274c90a4bc7f4f1041b66a0f7..5d858ae14fa1a3385160d230ebb65c5c43010fed 100644 (file)
@@ -2890,6 +2890,18 @@ static int bgp_startup_timer_expire(struct thread *thread)
        return 0;
 }
 
+/*
+ * On shutdown we call the cleanup function which
+ * does a free of the link list nodes,  free up
+ * the data we are pointing at too.
+ */
+static void bgp_vrf_string_name_delete(void *data)
+{
+       char *vname = data;
+
+       XFREE(MTYPE_TMP, vname);
+}
+
 /* BGP instance creation by `router bgp' commands. */
 static struct bgp *bgp_create(as_t *as, const char *name,
                              enum bgp_instance_type inst_type)
@@ -3000,7 +3012,11 @@ static struct bgp *bgp_create(as_t *as, const char *name,
                        MPLS_LABEL_NONE;
 
                bgp->vpn_policy[afi].import_vrf = list_new();
+               bgp->vpn_policy[afi].import_vrf->del =
+                       bgp_vrf_string_name_delete;
                bgp->vpn_policy[afi].export_vrf = list_new();
+               bgp->vpn_policy[afi].export_vrf->del =
+                       bgp_vrf_string_name_delete;
        }
        if (name) {
                bgp->name = XSTRDUP(MTYPE_BGP, name);
@@ -3412,17 +3428,6 @@ void bgp_free(struct bgp *bgp)
                rmap = &bgp->table_map[afi][safi];
                if (rmap->name)
                        XFREE(MTYPE_ROUTE_MAP_NAME, rmap->name);
-
-               /*
-                * Yes this is per AFI, but
-                * the list_delete_and_null nulls the pointer
-                * and we'll not leak anything on going down
-                * and the if test will fail on the second safi.
-                */
-               if (bgp->vpn_policy[afi].import_vrf)
-                       list_delete_and_null(&bgp->vpn_policy[afi].import_vrf);
-               if (bgp->vpn_policy[afi].export_vrf)
-                       list_delete_and_null(&bgp->vpn_policy[afi].export_vrf);
        }
 
        bgp_scan_finish(bgp);
@@ -3434,6 +3439,23 @@ void bgp_free(struct bgp *bgp)
 
        bgp_evpn_cleanup(bgp);
        bgp_pbr_cleanup(bgp);
+
+       for (afi = AFI_IP; afi < AFI_MAX; afi++) {
+               vpn_policy_direction_t dir;
+
+               if (bgp->vpn_policy[afi].import_vrf)
+                       list_delete_and_null(&bgp->vpn_policy[afi].import_vrf);
+               if (bgp->vpn_policy[afi].export_vrf)
+                       list_delete_and_null(&bgp->vpn_policy[afi].export_vrf);
+
+               dir = BGP_VPN_POLICY_DIR_FROMVPN;
+               if (bgp->vpn_policy[afi].rtlist[dir])
+                       ecommunity_free(&bgp->vpn_policy[afi].rtlist[dir]);
+               dir = BGP_VPN_POLICY_DIR_TOVPN;
+               if (bgp->vpn_policy[afi].rtlist[dir])
+                       ecommunity_free(&bgp->vpn_policy[afi].rtlist[dir]);
+       }
+
        if (bgp->name)
                XFREE(MTYPE_BGP, bgp->name);
        if (bgp->name_pretty)
index 7c7de19e1f8683c5d1278e7eeca1430d4812f90c..35997a87ec612063e2b6d3752aa64f1204cb423d 100755 (executable)
@@ -920,8 +920,6 @@ case "$host_os" in
     dnl how to fix it but no real progress on implementation
     dnl when they fix it, remove this
     AC_DEFINE(IPV6_MINHOPCOUNT, 73, Linux ipv6 Min Hop Count)
-
-    AC_CHECK_DECLS([IFLA_INFO_SLAVE_KIND], [], [], [#include <linux/if_link.h>])
     ;;
   openbsd*)
     AC_MSG_RESULT([OpenBSD])
index 99311836340993adf117da00a7367014c9254d25..88203fbeb6bb57e424d5a1a2a5bfbd7db0d78fd2 100644 (file)
@@ -47,9 +47,9 @@ const char frr_moduledir[] = MODULE_PATH;
 char frr_protoname[256] = "NONE";
 char frr_protonameinst[256] = "NONE";
 
-char config_default[256];
+char config_default[512];
 char frr_zclientpath[256];
-static char pidfile_default[256];
+static char pidfile_default[512];
 static char vtypath_default[256];
 
 bool debug_memstats_at_exit = 0;
index fe6c46670a3c6b312cb83fcd59b535a0d7f3c623..7ffa780bfb9a75d8d6316a0b73d564d9399650d4 100644 (file)
@@ -113,7 +113,7 @@ extern void frr_early_fini(void);
 DECLARE_KOOH(frr_fini, (), ())
 extern void frr_fini(void);
 
-extern char config_default[256];
+extern char config_default[512];
 extern char frr_zclientpath[256];
 extern const char frr_sysconfdir[];
 extern const char frr_vtydir[];
index 3f13307d82c266b7f2ddf2162f46ebf8abd5c9a0..0c853640035a9c34d7aca1e1056e597513b51bc3 100644 (file)
@@ -75,7 +75,7 @@ struct frrmod_runtime *frrmod_load(const char *spec, const char *dir, char *err,
                                   size_t err_len)
 {
        void *handle = NULL;
-       char name[PATH_MAX], fullpath[PATH_MAX], *args;
+       char name[PATH_MAX], fullpath[PATH_MAX * 2], *args;
        struct frrmod_runtime *rtinfo, **rtinfop;
        const struct frrmod_info *info;
 
index e1176d152603a742cd80260daf227583e127caed..797b25d01dcbe0871557a47d2c3e75544d470857 100644 (file)
--- a/lib/vrf.c
+++ b/lib/vrf.c
@@ -710,11 +710,11 @@ DEFUN_NOSH (no_vrf,
 
 struct cmd_node vrf_node = {VRF_NODE, "%s(config-vrf)# ", 1};
 
-DEFUN_NOSH (vrf_netns,
-           vrf_netns_cmd,
-           "netns NAME",
-           "Attach VRF to a Namespace\n"
-           "The file name in " NS_RUN_DIR ", or a full pathname\n")
+DEFUN (vrf_netns,
+       vrf_netns_cmd,
+       "netns NAME",
+       "Attach VRF to a Namespace\n"
+       "The file name in " NS_RUN_DIR ", or a full pathname\n")
 {
        int idx_name = 1, ret;
        char *pathname = ns_netns_pathname(vty, argv[idx_name]->arg);
index 0c4926edbb4bc9d0614f9a9a5e6cb74f943d96fe..840683cc2f37fab76c2e4d31ba9f339f7535b429 100644 (file)
@@ -35,7 +35,7 @@ extern unsigned char conf_debug_ospf6_neighbor;
 /* Neighbor structure */
 struct ospf6_neighbor {
        /* Neighbor Router ID String */
-       char name[32];
+       char name[36];
 
        /* OSPFv3 Interface this neighbor belongs to */
        struct ospf6_interface *ospf6_if;
index 81191eb96c6371064fbf3b2d9b7b1a202cac8df1..55222ecddbd81ab973ca2702eefdc6a77fbc570e 100644 (file)
@@ -4428,9 +4428,9 @@ static void pim_cmd_show_ip_multicast_helper(struct pim_instance *pim,
        vty_out(vty, "\n");
        vty_out(vty, "Upstream Join Timer: %d secs\n", qpim_t_periodic);
        vty_out(vty, "Join/Prune Holdtime: %d secs\n", PIM_JP_HOLDTIME);
-       vty_out(vty, "PIM ECMP: %s\n", qpim_ecmp_enable ? "Enable" : "Disable");
+       vty_out(vty, "PIM ECMP: %s\n", pim->ecmp_enable ? "Enable" : "Disable");
        vty_out(vty, "PIM ECMP Rebalance: %s\n",
-               qpim_ecmp_rebalance_enable ? "Enable" : "Disable");
+               pim->ecmp_rebalance_enable ? "Enable" : "Disable");
 
        vty_out(vty, "\n");
 
@@ -5734,7 +5734,7 @@ DEFUN (ip_pim_ecmp,
        "Enable PIM ECMP \n")
 {
        PIM_DECLVAR_CONTEXT(vrf, pim);
-       qpim_ecmp_enable = 1;
+       pim->ecmp_enable = true;
 
        return CMD_SUCCESS;
 }
@@ -5748,7 +5748,7 @@ DEFUN (no_ip_pim_ecmp,
        "Disable PIM ECMP \n")
 {
        PIM_DECLVAR_CONTEXT(vrf, pim);
-       qpim_ecmp_enable = 0;
+       pim->ecmp_enable = false;
 
        return CMD_SUCCESS;
 }
@@ -5762,8 +5762,8 @@ DEFUN (ip_pim_ecmp_rebalance,
        "Enable PIM ECMP Rebalance\n")
 {
        PIM_DECLVAR_CONTEXT(vrf, pim);
-       qpim_ecmp_enable = 1;
-       qpim_ecmp_rebalance_enable = 1;
+       pim->ecmp_enable = true;
+       pim->ecmp_rebalance_enable = true;
 
        return CMD_SUCCESS;
 }
@@ -5778,7 +5778,7 @@ DEFUN (no_ip_pim_ecmp_rebalance,
        "Disable PIM ECMP Rebalance\n")
 {
        PIM_DECLVAR_CONTEXT(vrf, pim);
-       qpim_ecmp_rebalance_enable = 0;
+       pim->ecmp_rebalance_enable = false;
 
        return CMD_SUCCESS;
 }
index 7e5bb34e31ee391f97cec7139f8a6c93e09eb509..cb70ee79046f888aaf41d468e414bcf53d02546a 100644 (file)
@@ -77,6 +77,8 @@ static struct pim_instance *pim_instance_init(struct vrf *vrf)
        pim->keep_alive_time = PIM_KEEPALIVE_PERIOD;
        pim->rp_keep_alive_time = PIM_RP_KEEPALIVE_PERIOD;
 
+       pim->ecmp_enable = false;
+       pim->ecmp_rebalance_enable = false;
 
        pim->vrf_id = vrf->vrf_id;
        pim->vrf = vrf;
index 75f011513fabf270052910ef01694151f6724968..b447075e9a934c5f6c5683b18a1c04df1bb45634 100644 (file)
@@ -95,6 +95,9 @@ struct pim_instance {
        unsigned int keep_alive_time;
        unsigned int rp_keep_alive_time;
 
+       bool ecmp_enable;
+       bool ecmp_rebalance_enable;
+
        /* If we need to rescan all our upstreams */
        struct thread *rpf_cache_refresher;
        int64_t rpf_cache_refresh_requests;
index 5ca57c0406b1122d0a14735348e6a94dc87759dc..3cbd11a9ae2a44be70fc1cc5f285e89ed0976bdb 100644 (file)
@@ -449,7 +449,7 @@ int pim_ecmp_nexthop_search(struct pim_instance *pim,
                   metric is less than nexthop update.
                 */
 
-               if (qpim_ecmp_rebalance_enable == 0) {
+               if (pim->ecmp_rebalance_enable == 0) {
                        uint8_t curr_route_valid = 0;
                        // Check if current nexthop is present in new updated
                        // Nexthop list.
@@ -499,7 +499,7 @@ int pim_ecmp_nexthop_search(struct pim_instance *pim,
                        }
                }
        }
-       if (qpim_ecmp_enable) {
+       if (pim->ecmp_enable) {
                // PIM ECMP flag is enable then choose ECMP path.
                hash_val = pim_compute_ecmp_hash(src, grp);
                mod_val = hash_val % pnc->nexthop_num;
@@ -586,7 +586,7 @@ int pim_ecmp_nexthop_search(struct pim_instance *pim,
                                        "%s: (%s,%s)(%s) selected nhop interface %s addr %s mod_val %u iter %d ecmp %d",
                                        __PRETTY_FUNCTION__, buf2, buf3,
                                        pim->vrf->name, ifp->name, buf, mod_val,
-                                       nh_iter, qpim_ecmp_enable);
+                                       nh_iter, pim->ecmp_enable);
                        }
                }
                nh_iter++;
@@ -657,11 +657,20 @@ int pim_parse_nexthop_update(int command, struct zclient *zclient,
                        nexthop = nexthop_from_zapi_nexthop(&nhr.nexthops[i]);
                        switch (nexthop->type) {
                        case NEXTHOP_TYPE_IPV4:
-                       case NEXTHOP_TYPE_IFINDEX:
                        case NEXTHOP_TYPE_IPV4_IFINDEX:
                        case NEXTHOP_TYPE_IPV6:
                        case NEXTHOP_TYPE_BLACKHOLE:
                                break;
+                       case NEXTHOP_TYPE_IFINDEX:
+                               /*
+                                * Connected route (i.e. no nexthop), use
+                                * RPF address from nexthop cache (i.e.
+                                * destination) as PIM nexthop.
+                                */
+                               nexthop->type = NEXTHOP_TYPE_IPV4;
+                               nexthop->gate.ipv4 =
+                                       pnc->rpf.rpf_addr.u.prefix4;
+                               break;
                        case NEXTHOP_TYPE_IPV6_IFINDEX:
                                ifp1 = if_lookup_by_index(nexthop->ifindex,
                                                          pim->vrf_id);
@@ -808,7 +817,7 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim,
        }
 
        // If PIM ECMP enable then choose ECMP path.
-       if (qpim_ecmp_enable) {
+       if (pim->ecmp_enable) {
                hash_val = pim_compute_ecmp_hash(src, grp);
                mod_val = hash_val % num_ifindex;
                if (PIM_DEBUG_PIM_NHT_DETAIL)
@@ -942,7 +951,7 @@ int pim_ecmp_fib_lookup_if_vif_index(struct pim_instance *pim,
        }
 
        // If PIM ECMP enable then choose ECMP path.
-       if (qpim_ecmp_enable) {
+       if (pim->ecmp_enable) {
                hash_val = pim_compute_ecmp_hash(src, grp);
                mod_val = hash_val % num_ifindex;
                if (PIM_DEBUG_PIM_NHT_DETAIL)
index 688bc42c3dddf3c8682bf4e7b6c8920bba00278b..862b2cc1483d987e7834d41a3d0b92232cf08e69 100644 (file)
@@ -214,10 +214,10 @@ int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty)
                                spaces);
                ++writes;
        }
-       if (qpim_ecmp_rebalance_enable) {
+       if (pim->ecmp_rebalance_enable) {
                vty_out(vty, "%sip pim ecmp rebalance\n", spaces);
                ++writes;
-       } else if (qpim_ecmp_enable) {
+       } else if (pim->ecmp_enable) {
                vty_out(vty, "%sip pim ecmp\n", spaces);
                ++writes;
        }
index 37e2bfbbfd7d7eb5f39a39989791b13a8d29d08c..9295b231cb0236ac97cc7428f1d20c32b150d8d2 100644 (file)
@@ -217,22 +217,26 @@ static int zclient_read_nexthop(struct pim_instance *pim,
                }
                switch (nexthop_type) {
                case NEXTHOP_TYPE_IFINDEX:
+                       nexthop_tab[num_ifindex].ifindex = stream_getl(s);
+                       /*
+                        * Connected route (i.e. no nexthop), use
+                        * address passed in as PIM nexthop.  This will
+                        * allow us to work in cases where we are
+                        * trying to find a route for this box.
+                        */
+                       nexthop_tab[num_ifindex].nexthop_addr.family = AF_INET;
+                       nexthop_tab[num_ifindex].nexthop_addr.prefixlen =
+                               IPV4_MAX_BITLEN;
+                       nexthop_tab[num_ifindex].nexthop_addr.u.prefix4 =
+                               addr;
+                       ++num_ifindex;
+                       break;
                case NEXTHOP_TYPE_IPV4_IFINDEX:
                case NEXTHOP_TYPE_IPV4:
                        nexthop_tab[num_ifindex].nexthop_addr.family = AF_INET;
-                       if (nexthop_type == NEXTHOP_TYPE_IPV4_IFINDEX
-                           || nexthop_type == NEXTHOP_TYPE_IPV4) {
-                               nexthop_tab[num_ifindex]
-                                       .nexthop_addr.u.prefix4.s_addr =
-                                       stream_get_ipv4(s);
-                       } else {
-                               nexthop_tab[num_ifindex]
-                                       .nexthop_addr.u.prefix4.s_addr =
-                                       PIM_NET_INADDR_ANY;
-                       }
+                       nexthop_tab[num_ifindex].nexthop_addr.u.prefix4.s_addr =
+                               stream_get_ipv4(s);
                        nexthop_tab[num_ifindex].ifindex = stream_getl(s);
-                       nexthop_tab[num_ifindex].protocol_distance = distance;
-                       nexthop_tab[num_ifindex].route_metric = metric;
                        ++num_ifindex;
                        break;
                case NEXTHOP_TYPE_IPV6_IFINDEX:
@@ -272,19 +276,13 @@ static int zclient_read_nexthop(struct pim_instance *pim,
                        }
                        ++num_ifindex;
                        break;
-               default:
-                       /* do nothing */
-                       {
-                               char addr_str[INET_ADDRSTRLEN];
-                               pim_inet4_dump("<addr?>", addr, addr_str,
-                                              sizeof(addr_str));
-                               zlog_warn(
-                                       "%s: found non-ifindex nexthop type=%d for address %s(%s)",
-                                       __PRETTY_FUNCTION__, nexthop_type,
-                                       addr_str, pim->vrf->name);
-                       }
-                       break;
+               case NEXTHOP_TYPE_IPV6:
+               case NEXTHOP_TYPE_BLACKHOLE:
+                       /* ignore */
+                       continue;
                }
+               nexthop_tab[num_ifindex].protocol_distance = distance;
+               nexthop_tab[num_ifindex].route_metric = metric;
        }
 
        return num_ifindex;
index 551f6047d7aecc2b67420cf87514cb694c03a7cd..5f871026266ae423d21f3fa00153b80524b5df2a 100644 (file)
@@ -53,8 +53,6 @@ int qpim_t_periodic =
 struct pim_assert_metric qpim_infinite_assert_metric;
 long qpim_rpf_cache_refresh_delay_msec = 50;
 int qpim_packet_process = PIM_DEFAULT_PACKET_PROCESS;
-uint8_t qpim_ecmp_enable = 0;
-uint8_t qpim_ecmp_rebalance_enable = 0;
 struct pim_instance *pimg = NULL;
 
 int32_t qpim_register_suppress_time = PIM_REGISTER_SUPPRESSION_TIME_DEFAULT;
index bf700bfee23519366df2171a50364e4fcf86cb77..d96e3c4feee4540e778e4543d6705f76ffcb27c6 100644 (file)
@@ -2,10 +2,21 @@ import frrtest
 
 import pytest
 import platform
+import socket
 
-if platform.uname()[0] == 'SunOS':
+
+##
+# on musl, ntop compresses a single :0: -> :: which is against RFC
+##
+def inet_ntop_broken():
+    addr = '1:2:3:4:0:6:7:8'
+    return socket.inet_ntop(socket.AF_INET6,
+                            socket.inet_pton(socket.AF_INET6, addr)) != addr
+
+
+if platform.uname()[0] == 'SunOS' or inet_ntop_broken():
     class TestFuzzIsisTLV:
-        @pytest.mark.skipif(True, reason='Test unsupported on SunOS')
+        @pytest.mark.skipif(True, reason='Test unsupported')
         def test_exit_cleanly(self):
             pass
 else:
index d28c879d5760a14491c07345a6cab015ca69e948..dd680cb9f43fc743d45e491b03668a45a9f6c670 100644 (file)
@@ -1016,7 +1016,7 @@ static int vtysh_process_questionmark(const char *input, int input_len)
  * the usual vtysh's stdin interface. This is the function being registered with
  * readline() api's.
  */
-static int vtysh_rl_describe(void)
+static int vtysh_rl_describe(int a, int b)
 {
        int ret;
 
index ff5e6bb7842062b59aec257a444641800a62e7c4..ad7d072d3daa7430983aac4de233a9a0dd82bd6a 100644 (file)
@@ -52,8 +52,8 @@ static gid_t elevgid, realgid;
 #define FRR_CONFIG_NAME "frr.conf"
 
 /* Configuration file name and directory. */
-static char vtysh_config[MAXPATHLEN];
-char frr_config[MAXPATHLEN];
+static char vtysh_config[MAXPATHLEN * 3];
+char frr_config[MAXPATHLEN * 3];
 char vtydir[MAXPATHLEN];
 static char history_file[MAXPATHLEN];
 
index f153cc3510997f2226f28dee62b5aa11264a551f..e6d324ab6a64d0697e44943f31c7bdd55e620bcb 100644 (file)
@@ -619,10 +619,8 @@ static int netlink_interface(struct nlmsghdr *h, ns_id_t ns_id, int startup)
                if (linkinfo[IFLA_INFO_KIND])
                        kind = RTA_DATA(linkinfo[IFLA_INFO_KIND]);
 
-#if HAVE_DECL_IFLA_INFO_SLAVE_KIND
                if (linkinfo[IFLA_INFO_SLAVE_KIND])
                        slave_kind = RTA_DATA(linkinfo[IFLA_INFO_SLAVE_KIND]);
-#endif
 
                netlink_determine_zebra_iftype(kind, &zif_type);
        }
@@ -1137,10 +1135,8 @@ int netlink_link_change(struct nlmsghdr *h, ns_id_t ns_id, int startup)
                if (linkinfo[IFLA_INFO_KIND])
                        kind = RTA_DATA(linkinfo[IFLA_INFO_KIND]);
 
-#if HAVE_DECL_IFLA_INFO_SLAVE_KIND
                if (linkinfo[IFLA_INFO_SLAVE_KIND])
                        slave_kind = RTA_DATA(linkinfo[IFLA_INFO_SLAVE_KIND]);
-#endif
 
                netlink_determine_zebra_iftype(kind, &zif_type);
        }
index f3fa3ba94efd618e9730c46527f8c2f64f14390f..b24a4b68dc0d7b2e97fb3c4baa9e81013ac9e9c3 100644 (file)
@@ -99,7 +99,7 @@ static int relay_response_back(void)
        proto_str = zebra_route_string(proto);
 
        /* lookup the client to relay the msg to */
-       zserv = zebra_find_client(proto, instance);
+       zserv = zserv_find_client(proto, instance);
        if (!zserv) {
                zlog_err(
                        "Error relaying LM response: can't find client %s, instance %u",
@@ -350,7 +350,7 @@ void label_manager_init(char *lm_zserv_path)
 
        obuf = stream_new(ZEBRA_MAX_PACKET_SIZ);
 
-       hook_register(zapi_client_close, release_daemon_label_chunks);
+       hook_register(zserv_client_close, release_daemon_label_chunks);
 }
 
 /**
index 9c721f0a7ee9683c1752ac4c0cecd9c69d069910..c5246999fa47ebe5dcd457f44b724716259e7459 100644 (file)
@@ -37,6 +37,7 @@
 #include "logicalrouter.h"
 #include "libfrr.h"
 #include "routemap.h"
+#include "frr_pthread.h"
 
 #include "zebra/rib.h"
 #include "zebra/zserv.h"
@@ -378,8 +379,11 @@ int main(int argc, char **argv)
        /* Needed for BSD routing socket. */
        pid = getpid();
 
-       /* This must be done only after locking pidfile (bug #403). */
-       zebra_zserv_socket_init(zserv_path);
+       /* Intialize pthread library */
+       frr_pthread_init();
+
+       /* Start Zebra API server */
+       zserv_start(zserv_path);
 
        /* Init label manager */
        label_manager_init(lblmgr_path);
index fd298fd23669432d9800babcf6bddd34ae477c44..ad1fe9a1f57704432b8cc55329419cdd9279b0a3 100644 (file)
  */
 
 
-enum southbound_results {
-       SOUTHBOUND_INSTALL_SUCCESS,
-       SOUTHBOUND_INSTALL_FAILURE,
-       SOUTHBOUND_DELETE_SUCCESS,
-       SOUTHBOUND_DELETE_FAILURE,
+enum dp_results {
+       DP_INSTALL_SUCCESS,
+       DP_INSTALL_FAILURE,
+       DP_DELETE_SUCCESS,
+       DP_DELETE_FAILURE,
+};
+
+enum dp_req_result {
+       DP_REQUEST_QUEUED,
+       DP_REQUEST_SUCCESS,
+       DP_REQUEST_FAILURE,
 };
 
 /*
@@ -60,9 +66,11 @@ enum southbound_results {
  * semantics so we will end up with a delete than
  * a re-add.
  */
-extern void kernel_route_rib(struct route_node *rn, struct prefix *p,
-                            struct prefix *src_p, struct route_entry *old,
-                            struct route_entry *new);
+extern enum dp_req_result kernel_route_rib(struct route_node *rn,
+                                          struct prefix *p,
+                                          struct prefix *src_p,
+                                          struct route_entry *old,
+                                          struct route_entry *new);
 
 /*
  * So route install/failure may not be immediately known
@@ -71,7 +79,7 @@ extern void kernel_route_rib(struct route_node *rn, struct prefix *p,
  */
 extern void kernel_route_rib_pass_fail(struct route_node *rn, struct prefix *p,
                                       struct route_entry *re,
-                                      enum southbound_results res);
+                                      enum dp_results res);
 
 extern int kernel_address_add_ipv4(struct interface *, struct connected *);
 extern int kernel_address_delete_ipv4(struct interface *, struct connected *);
@@ -82,9 +90,9 @@ extern int kernel_neigh_update(int cmd, int ifindex, uint32_t addr, char *lla,
 extern int kernel_interface_set_master(struct interface *master,
                                       struct interface *slave);
 
-extern void kernel_add_lsp(zebra_lsp_t *lsp);
-extern void kernel_upd_lsp(zebra_lsp_t *lsp);
-extern void kernel_del_lsp(zebra_lsp_t *lsp);
+extern enum dp_req_result kernel_add_lsp(zebra_lsp_t *lsp);
+extern enum dp_req_result kernel_upd_lsp(zebra_lsp_t *lsp);
+extern enum dp_req_result kernel_del_lsp(zebra_lsp_t *lsp);
 
 /*
  * Add the ability to pass back up the lsp install/delete
@@ -95,7 +103,7 @@ extern void kernel_del_lsp(zebra_lsp_t *lsp);
  * the install/failure to set/unset flags and to notify
  * as needed.
  */
-extern void kernel_lsp_pass_fail(zebra_lsp_t *lsp, enum southbound_results res);
+extern void kernel_lsp_pass_fail(zebra_lsp_t *lsp, enum dp_results res);
 
 extern int mpls_kernel_init(void);
 
index e9b3e59d0e6581dc3ebdc65cef3584d16c285172..a5f288f54117f155eb1d8b652c0f9dd27e4d4528 100644 (file)
@@ -1689,9 +1689,11 @@ int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *in)
        return suc;
 }
 
-void kernel_route_rib(struct route_node *rn, struct prefix *p,
-                     struct prefix *src_p, struct route_entry *old,
-                     struct route_entry *new)
+enum dp_req_result kernel_route_rib(struct route_node *rn,
+                                   struct prefix *p,
+                                   struct prefix *src_p,
+                                   struct route_entry *old,
+                                   struct route_entry *new)
 {
        int ret = 0;
 
@@ -1721,18 +1723,20 @@ void kernel_route_rib(struct route_node *rn, struct prefix *p,
                                                      new, 0);
                }
                kernel_route_rib_pass_fail(rn, p, new,
-                                          (!ret) ? SOUTHBOUND_INSTALL_SUCCESS
-                                                 : SOUTHBOUND_INSTALL_FAILURE);
-               return;
+                                          (!ret) ? DP_INSTALL_SUCCESS
+                                                 : DP_INSTALL_FAILURE);
+               return DP_REQUEST_SUCCESS;
        }
 
        if (old) {
                ret = netlink_route_multipath(RTM_DELROUTE, p, src_p, old, 0);
 
                kernel_route_rib_pass_fail(rn, p, old,
-                                          (!ret) ? SOUTHBOUND_DELETE_SUCCESS
-                                                 : SOUTHBOUND_DELETE_FAILURE);
+                                          (!ret) ? DP_DELETE_SUCCESS
+                                                 : DP_DELETE_FAILURE);
        }
+
+       return DP_REQUEST_SUCCESS;
 }
 
 int kernel_neigh_update(int add, int ifindex, uint32_t addr, char *lla,
index f9cd1cbab0ff2097fbcab68e4b1db8cef9111bae..3380f225c0a35b7dec80f7e0d8782752d803c104 100644 (file)
@@ -385,9 +385,11 @@ static int kernel_rtm(int cmd, struct prefix *p, struct route_entry *re)
        return 0;
 }
 
-void kernel_route_rib(struct route_node *rn, struct prefix *p,
-                     struct prefix *src_p, struct route_entry *old,
-                     struct route_entry *new)
+enum dp_req_result kernel_route_rib(struct route_node *rn,
+                                   struct prefix *p,
+                                   struct prefix *src_p,
+                                   struct route_entry *old,
+                                   struct route_entry *new)
 {
        int route = 0;
 
@@ -411,14 +413,16 @@ void kernel_route_rib(struct route_node *rn, struct prefix *p,
        if (new) {
                kernel_route_rib_pass_fail(
                        rn, p, new,
-                       (!route) ? SOUTHBOUND_INSTALL_SUCCESS
-                                : SOUTHBOUND_INSTALL_FAILURE);
+                       (!route) ? DP_INSTALL_SUCCESS
+                                : DP_INSTALL_FAILURE);
        } else {
                kernel_route_rib_pass_fail(rn, p, old,
                                           (!route)
-                                                  ? SOUTHBOUND_DELETE_SUCCESS
-                                                  : SOUTHBOUND_DELETE_FAILURE);
+                                                  ? DP_DELETE_SUCCESS
+                                                  : DP_DELETE_FAILURE);
        }
+
+       return DP_REQUEST_SUCCESS;
 }
 
 int kernel_neigh_update(int add, int ifindex, uint32_t addr, char *lla,
index f0ed8f2f5d2a57d0e15af1c4ba61aa858c5da54d..bcffdf47221e9980a4eb7d18f076322112db7d0e 100644 (file)
@@ -142,27 +142,31 @@ static int netlink_rule_update(int cmd, struct zebra_pbr_rule *rule)
  * goes in the rule to denote relative ordering; it may or may not be the
  * same as the rule's user-defined sequence number.
  */
-void kernel_add_pbr_rule(struct zebra_pbr_rule *rule)
+enum dp_req_result kernel_add_pbr_rule(struct zebra_pbr_rule *rule)
 {
        int ret = 0;
 
        ret = netlink_rule_update(RTM_NEWRULE, rule);
        kernel_pbr_rule_add_del_status(rule,
-                                      (!ret) ? SOUTHBOUND_INSTALL_SUCCESS
-                                             : SOUTHBOUND_INSTALL_FAILURE);
+                                      (!ret) ? DP_INSTALL_SUCCESS
+                                             : DP_INSTALL_FAILURE);
+
+       return DP_REQUEST_SUCCESS;
 }
 
 /*
  * Uninstall specified rule for a specific interface.
  */
-void kernel_del_pbr_rule(struct zebra_pbr_rule *rule)
+enum dp_req_result kernel_del_pbr_rule(struct zebra_pbr_rule *rule)
 {
        int ret = 0;
 
        ret = netlink_rule_update(RTM_DELRULE, rule);
        kernel_pbr_rule_add_del_status(rule,
-                                      (!ret) ? SOUTHBOUND_DELETE_SUCCESS
-                                             : SOUTHBOUND_DELETE_FAILURE);
+                                      (!ret) ? DP_DELETE_SUCCESS
+                                             : DP_DELETE_FAILURE);
+
+       return DP_REQUEST_SUCCESS;
 }
 
 /*
index 46c53f9e02247730288fac16f8561dfed02d39eb..ecd642d80786b76d6aa75d7dab36e152d2114d8e 100644 (file)
 #include "zebra/rule_netlink.h"
 #include "zebra/zebra_pbr.h"
 
-void kernel_add_pbr_rule(struct zebra_pbr_rule *rule)
+enum dp_req_result kernel_add_pbr_rule(struct zebra_pbr_rule *rule)
 {
+       zlog_err("%s not Implemented for this platform", __PRETTY_FUNCTION__);
+       return DP_REQUEST_FAILURE;
 }
-void kernel_del_pbr_rule(struct zebra_pbr_rule *rule)
+
+enum dp_req_result kernel_del_pbr_rule(struct zebra_pbr_rule *rule)
 {
+       zlog_err("%s not Implemented for this platform", __PRETTY_FUNCTION__);
+       return DP_REQUEST_FAILURE;
 }
 
 #endif
index cb8c384436977bb44228e598f2d5c710f0973ad2..5bcc2c40d6d085a6f5568d89fe565d2ee56f604d 100644 (file)
@@ -78,7 +78,7 @@ void table_manager_enable(ns_id_t ns_id)
                return;
        tbl_mgr.lc_list = list_new();
        tbl_mgr.lc_list->del = delete_table_chunk;
-       hook_register(zapi_client_close, release_daemon_table_chunks);
+       hook_register(zserv_client_close, release_daemon_table_chunks);
 }
 
 /**
index 943329b1962bc0d3b6121d07f36394e94dff0ffd..b17bbc95c26ceab0b2fa6f6bebeb002a797420f8 100644 (file)
@@ -162,7 +162,7 @@ int zsend_interface_add(struct zserv *client, struct interface *ifp)
        zserv_encode_interface(s, ifp);
 
        client->ifadd_cnt++;
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 /* Interface deletion from zebra daemon. */
@@ -174,7 +174,7 @@ int zsend_interface_delete(struct zserv *client, struct interface *ifp)
        zserv_encode_interface(s, ifp);
 
        client->ifdel_cnt++;
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 int zsend_vrf_add(struct zserv *client, struct zebra_vrf *zvrf)
@@ -185,7 +185,7 @@ int zsend_vrf_add(struct zserv *client, struct zebra_vrf *zvrf)
        zserv_encode_vrf(s, zvrf);
 
        client->vrfadd_cnt++;
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 /* VRF deletion from zebra daemon. */
@@ -198,7 +198,7 @@ int zsend_vrf_delete(struct zserv *client, struct zebra_vrf *zvrf)
        zserv_encode_vrf(s, zvrf);
 
        client->vrfdel_cnt++;
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 int zsend_interface_link_params(struct zserv *client, struct interface *ifp)
@@ -230,7 +230,7 @@ int zsend_interface_link_params(struct zserv *client, struct interface *ifp)
        /* Write packet size. */
        stream_putw_at(s, 0, stream_get_endp(s));
 
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 /* Interface address is added/deleted. Send ZEBRA_INTERFACE_ADDRESS_ADD or
@@ -309,7 +309,7 @@ int zsend_interface_address(int cmd, struct zserv *client,
        stream_putw_at(s, 0, stream_get_endp(s));
 
        client->connected_rt_add_cnt++;
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 static int zsend_interface_nbr_address(int cmd, struct zserv *client,
@@ -340,7 +340,7 @@ static int zsend_interface_nbr_address(int cmd, struct zserv *client,
        /* Write packet size. */
        stream_putw_at(s, 0, stream_get_endp(s));
 
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 /* Interface address addition. */
@@ -438,7 +438,7 @@ int zsend_interface_vrf_update(struct zserv *client, struct interface *ifp,
        stream_putw_at(s, 0, stream_get_endp(s));
 
        client->if_vrfchg_cnt++;
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 /* Add new nbr connected IPv6 address */
@@ -511,7 +511,7 @@ int zsend_interface_update(int cmd, struct zserv *client, struct interface *ifp)
        else
                client->ifdown_cnt++;
 
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 int zsend_redistribute_route(int cmd, struct zserv *client, struct prefix *p,
@@ -602,7 +602,7 @@ int zsend_redistribute_route(int cmd, struct zserv *client, struct prefix *p,
                           zebra_route_string(api.type), api.vrf_id,
                           buf_prefix);
        }
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 /*
@@ -655,7 +655,7 @@ static int zsend_ipv4_nexthop_lookup_mrib(struct zserv *client,
 
        stream_putw_at(s, 0, stream_get_endp(s));
 
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 int zsend_route_notify_owner(struct route_entry *re, struct prefix *p,
@@ -665,7 +665,7 @@ int zsend_route_notify_owner(struct route_entry *re, struct prefix *p,
        struct stream *s;
        uint8_t blen;
 
-       client = zebra_find_client(re->type, re->instance);
+       client = zserv_find_client(re->type, re->instance);
        if (!client || !client->notify_owner) {
                if (IS_ZEBRA_DEBUG_PACKET) {
                        char buff[PREFIX_STRLEN];
@@ -703,7 +703,7 @@ int zsend_route_notify_owner(struct route_entry *re, struct prefix *p,
 
        stream_putw_at(s, 0, stream_get_endp(s));
 
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 void zsend_rule_notify_owner(struct zebra_pbr_rule *rule,
@@ -739,7 +739,7 @@ void zsend_rule_notify_owner(struct zebra_pbr_rule *rule,
 
        stream_putw_at(s, 0, stream_get_endp(s));
 
-       zebra_server_send_message(client, s);
+       zserv_send_message(client, s);
 }
 
 void zsend_ipset_notify_owner(struct zebra_pbr_ipset *ipset,
@@ -769,7 +769,7 @@ void zsend_ipset_notify_owner(struct zebra_pbr_ipset *ipset,
        stream_put(s, ipset->ipset_name, ZEBRA_IPSET_NAME_SIZE);
        stream_putw_at(s, 0, stream_get_endp(s));
 
-       zebra_server_send_message(client, s);
+       zserv_send_message(client, s);
 }
 
 void zsend_ipset_entry_notify_owner(struct zebra_pbr_ipset_entry *ipset,
@@ -799,7 +799,7 @@ void zsend_ipset_entry_notify_owner(struct zebra_pbr_ipset_entry *ipset,
        stream_put(s, ipset->backpointer->ipset_name, ZEBRA_IPSET_NAME_SIZE);
        stream_putw_at(s, 0, stream_get_endp(s));
 
-       zebra_server_send_message(client, s);
+       zserv_send_message(client, s);
 }
 
 void zsend_iptable_notify_owner(struct zebra_pbr_iptable *iptable,
@@ -828,7 +828,7 @@ void zsend_iptable_notify_owner(struct zebra_pbr_iptable *iptable,
        stream_putl(s, iptable->unique);
        stream_putw_at(s, 0, stream_get_endp(s));
 
-       zebra_server_send_message(client, s);
+       zserv_send_message(client, s);
 }
 
 /* Router-id is updated. Send ZEBRA_ROUTER_ID_ADD to client. */
@@ -855,7 +855,7 @@ int zsend_router_id_update(struct zserv *client, struct prefix *p,
        /* Write packet size. */
        stream_putw_at(s, 0, stream_get_endp(s));
 
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 /*
@@ -873,7 +873,7 @@ int zsend_pw_update(struct zserv *client, struct zebra_pw *pw)
        /* Put length at the first point of the stream. */
        stream_putw_at(s, 0, stream_get_endp(s));
 
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 /* Send response to a get label chunk request to client */
@@ -952,7 +952,7 @@ static int zsend_assign_table_chunk_response(struct zserv *client,
        /* Write packet size. */
        stream_putw_at(s, 0, stream_get_endp(s));
 
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 static int zsend_table_manager_connect_response(struct zserv *client,
@@ -968,7 +968,7 @@ static int zsend_table_manager_connect_response(struct zserv *client,
 
        stream_putw_at(s, 0, stream_get_endp(s));
 
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 /* Inbound message handling ------------------------------------------------ */
@@ -2194,7 +2194,7 @@ static void zsend_capabilities(struct zserv *client, struct zebra_vrf *zvrf)
        stream_putl(s, multipath_num);
 
        stream_putw_at(s, 0, stream_get_endp(s));
-       zebra_server_send_message(client, s);
+       zserv_send_message(client, s);
 }
 
 /* Tie up route-type and client->sock */
@@ -3017,14 +3017,53 @@ void (*zserv_handlers[])(ZAPI_HANDLER_ARGS) = {
        [ZEBRA_IPTABLE_DELETE] = zread_iptable,
 };
 
-void zserv_handle_commands(struct zserv *client, struct zmsghdr *hdr,
-                          struct stream *msg, struct zebra_vrf *zvrf)
+#if defined(HANDLE_ZAPI_FUZZING)
+extern struct zebra_privs_t zserv_privs;
+
+static void zserv_write_incoming(struct stream *orig, uint16_t command)
 {
-       if (hdr->command > array_size(zserv_handlers)
-           || zserv_handlers[hdr->command] == NULL)
-               zlog_info("Zebra received unknown command %d", hdr->command);
-       else
-               zserv_handlers[hdr->command](client, hdr, msg, zvrf);
+       char fname[MAXPATHLEN];
+       struct stream *copy;
+       int fd = -1;
+
+       copy = stream_dup(orig);
+       stream_set_getp(copy, 0);
+
+       zserv_privs.change(ZPRIVS_RAISE);
+       snprintf(fname, MAXPATHLEN, "%s/%u", DAEMON_VTY_DIR, command);
+       fd = open(fname, O_CREAT | O_WRONLY | O_EXCL, 0644);
+       stream_flush(copy, fd);
+       close(fd);
+       zserv_privs.change(ZPRIVS_LOWER);
+       stream_free(copy);
+}
+#endif
+
+void zserv_handle_commands(struct zserv *client, struct stream *msg)
+{
+       struct zmsghdr hdr;
+       struct zebra_vrf *zvrf;
 
-       stream_free(msg);
+       zapi_parse_header(msg, &hdr);
+
+#if defined(HANDLE_ZAPI_FUZZING)
+       zserv_write_incoming(msg, hdr.command);
+#endif
+
+       hdr.length -= ZEBRA_HEADER_SIZE;
+
+       /* lookup vrf */
+       zvrf = zebra_vrf_lookup_by_id(hdr.vrf_id);
+       if (!zvrf) {
+               if (IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV)
+                       zlog_warn("ZAPI message specifies unknown VRF: %d",
+                                 hdr.vrf_id);
+               return;
+       }
+
+       if (hdr.command > array_size(zserv_handlers)
+           || zserv_handlers[hdr.command] == NULL)
+               zlog_info("Zebra received unknown command %d", hdr.command);
+       else
+               zserv_handlers[hdr.command](client, &hdr, msg, zvrf);
 }
index 1658c9852d388daff40f4bb6a503306275eb65e0..f27897580a98d16e17a23092b91c3ccc15f93684 100644 (file)
  * client
  *    the client datastructure
  *
- * hdr
- *    the message header
- *
  * msg
- *    the message contents, without the header
- *
- * zvrf
- *    the vrf
+ *    the message
  */
-extern void zserv_handle_commands(struct zserv *client, struct zmsghdr *hdr,
-                                 struct stream *msg, struct zebra_vrf *zvrf);
+extern void zserv_handle_commands(struct zserv *client, struct stream *msg);
 
 extern int zsend_vrf_add(struct zserv *zclient, struct zebra_vrf *zvrf);
 extern int zsend_vrf_delete(struct zserv *zclient, struct zebra_vrf *zvrf);
index 3ad640653f92a972568ebbc69d98707c22b71895..fe0837a63a75b6767de74b34bd086b6990fa5a17 100644 (file)
@@ -463,7 +463,7 @@ static int fec_send(zebra_fec_t *fec, struct zserv *client)
        stream_put_prefix(s, &rn->p);
        stream_putl(s, fec->label);
        stream_putw_at(s, 0, stream_get_endp(s));
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 /*
@@ -860,7 +860,7 @@ static void lsp_uninstall_from_kernel(struct hash_backet *backet, void *ctxt)
 
        lsp = (zebra_lsp_t *)backet->data;
        if (CHECK_FLAG(lsp->flags, LSP_FLAG_INSTALLED))
-               kernel_del_lsp(lsp);
+               (void)kernel_del_lsp(lsp);
 }
 
 /*
@@ -914,17 +914,31 @@ static wq_item_status lsp_process(struct work_queue *wq, void *data)
                if (newbest) {
 
                        UNSET_FLAG(lsp->flags, LSP_FLAG_CHANGED);
-                       kernel_add_lsp(lsp);
-
-                       zvrf->lsp_installs++;
+                       switch (kernel_add_lsp(lsp)) {
+                       case DP_REQUEST_QUEUED:
+                               zlog_err("No current DataPlane interfaces can return this, please fix");
+                               break;
+                       case DP_REQUEST_FAILURE:
+                               break;
+                       case DP_REQUEST_SUCCESS:
+                               zvrf->lsp_installs++;
+                               break;
+                       }
                }
        } else {
                /* Installed, may need an update and/or delete. */
                if (!newbest) {
 
-                       kernel_del_lsp(lsp);
-
-                       zvrf->lsp_removals++;
+                       switch (kernel_del_lsp(lsp)) {
+                       case DP_REQUEST_QUEUED:
+                               zlog_err("No current DataPlane interfaces can return this, please fix");
+                               break;
+                       case DP_REQUEST_FAILURE:
+                               break;
+                       case DP_REQUEST_SUCCESS:
+                               zvrf->lsp_removals++;
+                               break;
+                       }
                } else if (CHECK_FLAG(lsp->flags, LSP_FLAG_CHANGED)) {
                        zebra_nhlfe_t *nhlfe;
                        struct nexthop *nexthop;
@@ -953,9 +967,16 @@ static wq_item_status lsp_process(struct work_queue *wq, void *data)
                                }
                        }
 
-                       kernel_upd_lsp(lsp);
-
-                       zvrf->lsp_installs++;
+                       switch (kernel_upd_lsp(lsp)) {
+                       case DP_REQUEST_QUEUED:
+                               zlog_err("No current DataPlane interfaces can return this, please fix");
+                               break;
+                       case DP_REQUEST_FAILURE:
+                               break;
+                       case DP_REQUEST_SUCCESS:
+                               zvrf->lsp_installs++;
+                               break;
+                       }
                }
        }
 
@@ -1687,7 +1708,7 @@ static int mpls_processq_init(struct zebra_t *zebra)
 
 /* Public functions */
 
-void kernel_lsp_pass_fail(zebra_lsp_t *lsp, enum southbound_results res)
+void kernel_lsp_pass_fail(zebra_lsp_t *lsp, enum dp_results res)
 {
        struct nexthop *nexthop;
        zebra_nhlfe_t *nhlfe;
@@ -1696,12 +1717,12 @@ void kernel_lsp_pass_fail(zebra_lsp_t *lsp, enum southbound_results res)
                return;
 
        switch (res) {
-       case SOUTHBOUND_INSTALL_FAILURE:
+       case DP_INSTALL_FAILURE:
                UNSET_FLAG(lsp->flags, LSP_FLAG_INSTALLED);
                clear_nhlfe_installed(lsp);
                zlog_warn("LSP Install Failure: %u", lsp->ile.in_label);
                break;
-       case SOUTHBOUND_INSTALL_SUCCESS:
+       case DP_INSTALL_SUCCESS:
                SET_FLAG(lsp->flags, LSP_FLAG_INSTALLED);
                for (nhlfe = lsp->nhlfe_list; nhlfe; nhlfe = nhlfe->next) {
                        nexthop = nhlfe->nexthop;
@@ -1712,11 +1733,11 @@ void kernel_lsp_pass_fail(zebra_lsp_t *lsp, enum southbound_results res)
                        SET_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB);
                }
                break;
-       case SOUTHBOUND_DELETE_SUCCESS:
+       case DP_DELETE_SUCCESS:
                UNSET_FLAG(lsp->flags, LSP_FLAG_INSTALLED);
                clear_nhlfe_installed(lsp);
                break;
-       case SOUTHBOUND_DELETE_FAILURE:
+       case DP_DELETE_FAILURE:
                zlog_warn("LSP Deletion Failure: %u", lsp->ile.in_label);
                break;
        }
@@ -2916,5 +2937,5 @@ void zebra_mpls_init(void)
        if (!mpls_processq_init(&zebrad))
                mpls_enabled = 1;
 
-       hook_register(zapi_client_close, zebra_mpls_cleanup_fecs_for_client);
+       hook_register(zserv_client_close, zebra_mpls_cleanup_fecs_for_client);
 }
index d7c231c37e0ff7fcf66d73fce11d6c3e7fa90f6f..245a7717845576692800f7c094cb3fa068b270c1 100644 (file)
 /*
  * Install Label Forwarding entry into the kernel.
  */
-void kernel_add_lsp(zebra_lsp_t *lsp)
+enum dp_req_result kernel_add_lsp(zebra_lsp_t *lsp)
 {
        int ret;
 
        if (!lsp || !lsp->best_nhlfe) { // unexpected
-               kernel_lsp_pass_fail(lsp, SOUTHBOUND_INSTALL_FAILURE);
-               return;
+               kernel_lsp_pass_fail(lsp, DP_INSTALL_FAILURE);
+               return DP_REQUEST_FAILURE;
        }
 
        ret = netlink_mpls_multipath(RTM_NEWROUTE, lsp);
 
        kernel_lsp_pass_fail(lsp,
-                            (!ret) ? SOUTHBOUND_INSTALL_SUCCESS
-                                   : SOUTHBOUND_INSTALL_FAILURE);
+                            (!ret) ? DP_INSTALL_SUCCESS
+                                   : DP_INSTALL_FAILURE);
+
+       return DP_REQUEST_SUCCESS;
 }
 
 /*
@@ -56,44 +58,48 @@ void kernel_add_lsp(zebra_lsp_t *lsp)
  * through the metric field (before kernel-MPLS). This shouldn't be an issue
  * any longer, so REPLACE can be reintroduced.
  */
-void kernel_upd_lsp(zebra_lsp_t *lsp)
+enum dp_req_result kernel_upd_lsp(zebra_lsp_t *lsp)
 {
        int ret;
 
        if (!lsp || !lsp->best_nhlfe) { // unexpected
-               kernel_lsp_pass_fail(lsp, SOUTHBOUND_INSTALL_FAILURE);
-               return;
+               kernel_lsp_pass_fail(lsp, DP_INSTALL_FAILURE);
+               return DP_REQUEST_FAILURE;
        }
 
        ret = netlink_mpls_multipath(RTM_NEWROUTE, lsp);
 
        kernel_lsp_pass_fail(lsp,
-                            (!ret) ? SOUTHBOUND_INSTALL_SUCCESS
-                                   : SOUTHBOUND_INSTALL_FAILURE);
+                            (!ret) ? DP_INSTALL_SUCCESS
+                                   : DP_INSTALL_FAILURE);
+
+       return DP_REQUEST_SUCCESS;
 }
 
 /*
  * Delete Label Forwarding entry from the kernel.
  */
-void kernel_del_lsp(zebra_lsp_t *lsp)
+enum dp_req_result kernel_del_lsp(zebra_lsp_t *lsp)
 {
        int ret;
 
        if (!lsp) { // unexpected
-               kernel_lsp_pass_fail(lsp, SOUTHBOUND_DELETE_FAILURE);
-               return;
+               kernel_lsp_pass_fail(lsp, DP_DELETE_FAILURE);
+               return DP_REQUEST_FAILURE;
        }
 
        if (!CHECK_FLAG(lsp->flags, LSP_FLAG_INSTALLED)) {
-               kernel_lsp_pass_fail(lsp, SOUTHBOUND_DELETE_FAILURE);
-               return;
+               kernel_lsp_pass_fail(lsp, DP_DELETE_FAILURE);
+               return DP_REQUEST_FAILURE;
        }
 
        ret = netlink_mpls_multipath(RTM_DELROUTE, lsp);
 
        kernel_lsp_pass_fail(lsp,
-                            (!ret) ? SOUTHBOUND_DELETE_SUCCESS
-                                   : SOUTHBOUND_DELETE_FAILURE);
+                            (!ret) ? DP_DELETE_SUCCESS
+                                   : DP_DELETE_FAILURE);
+
+       return DP_REQUEST_SUCCESS;
 }
 
 int mpls_kernel_init(void)
index 6b5318325d2c64f83aa2b798650f7e392ede3322..d1371d3343b9fed90a0e9f1c82b0368b24eae715 100644 (file)
 
 #if !defined(HAVE_NETLINK) && !defined(OPEN_BSD)
 
-void kernel_add_lsp(zebra_lsp_t *lsp)
+enum dp_req_result kernel_add_lsp(zebra_lsp_t *lsp)
 {
-       return;
+       return DP_REQUEST_SUCCESS;
 }
-void kernel_upd_lsp(zebra_lsp_t *lsp)
+
+enum dp_req_result kernel_upd_lsp(zebra_lsp_t *lsp)
 {
-       return;
+       return DP_REQUEST_SUCCESS;
 }
-void kernel_del_lsp(zebra_lsp_t *lsp)
+
+enum dp_req_result kernel_del_lsp(zebra_lsp_t *lsp)
 {
-       return;
+       return DP_REQUEST_SUCCESS;
 }
+
 int mpls_kernel_init(void)
 {
        return -1;
index 2d75353c71ff4b0901661067c099d36c59f9e44c..412fe7d3dd3f440b4cc9667b3aefa1f523838efe 100644 (file)
@@ -285,58 +285,62 @@ static int kernel_lsp_cmd(int action, zebra_lsp_t *lsp)
        return (0);
 }
 
-void kernel_add_lsp(zebra_lsp_t *lsp)
+enum dp_req_result kernel_add_lsp(zebra_lsp_t *lsp)
 {
        int ret;
 
        if (!lsp || !lsp->best_nhlfe) { // unexpected
-               kernel_lsp_pass_fail(lsp, SOUTHBOUND_INSTALL_FAILURE);
-               return;
+               kernel_lsp_pass_fail(lsp, DP_INSTALL_FAILURE);
+               return DP_REQUEST_FAILURE;
        }
 
        ret = kernel_lsp_cmd(RTM_ADD, lsp);
 
        kernel_lsp_pass_fail(lsp,
-                            (!ret) ? SOUTHBOUND_INSTALL_SUCCESS
-                                   : SOUTHBOUND_INSTALL_FAILURE);
+                            (!ret) ? DP_INSTALL_SUCCESS
+                                   : DP_INSTALL_FAILURE);
+
+       return DP_REQUEST_SUCCESS;
 }
 
-void kernel_upd_lsp(zebra_lsp_t *lsp)
+enum dp_req_result kernel_upd_lsp(zebra_lsp_t *lsp)
 {
        int ret;
 
        if (!lsp || !lsp->best_nhlfe) { // unexpected
-               kernel_lsp_pass_fail(lsp, SOUTHBOUND_INSTALL_FAILURE);
-               return;
+               kernel_lsp_pass_fail(lsp, DP_INSTALL_FAILURE);
+               return DP_REQUEST_FAILURE;
        }
 
        ret = kernel_lsp_cmd(RTM_CHANGE, lsp);
 
        kernel_lsp_pass_fail(lsp,
-                            (!ret) ? SOUTHBOUND_INSTALL_SUCCESS
-                                   : SOUTHBOUND_INSTALL_FAILURE);
-       return;
+                            (!ret) ? DP_INSTALL_SUCCESS
+                                   : DP_INSTALL_FAILURE);
+       return DP_REQUEST_SUCCESS;
 }
 
-void kernel_del_lsp(zebra_lsp_t *lsp)
+enum dp_req_result kernel_del_lsp(zebra_lsp_t *lsp)
 {
        int ret;
 
        if (!lsp) { // unexpected
-               kernel_lsp_pass_fail(lsp, SOUTHBOUND_DELETE_FAILURE);
-               return;
+               kernel_lsp_pass_fail(lsp, DP_DELETE_FAILURE);
+               return DP_REQUEST_FAILURE;
        }
 
        if (!CHECK_FLAG(lsp->flags, LSP_FLAG_INSTALLED)) {
-               kernel_lsp_pass_fail(lsp, SOUTHBOUND_DELETE_FAILURE);
-               return;
+               kernel_lsp_pass_fail(lsp, DP_DELETE_FAILURE);
+               return DP_REQUEST_FAILURE;
        }
 
        ret = kernel_lsp_cmd(RTM_DELETE, lsp);
 
        kernel_lsp_pass_fail(lsp,
-                            (!ret) ? SOUTHBOUND_DELETE_SUCCESS
-                                   : SOUTHBOUND_DELETE_FAILURE);
+                            (!ret) ? DP_DELETE_SUCCESS
+                                   : DP_DELETE_FAILURE);
+
+       return DP_REQUEST_SUCCESS;
 }
 
 static int kmpw_install(struct zebra_pw *pw)
index 042bd3769e774c1fcaea995aeb46ddf764bd9325..3af3cd5bb2fc72ef91efd76cc52c76ed34a9cb05 100644 (file)
@@ -67,5 +67,5 @@ stream_failure:
        stream_putl(s, suc);
 
        stream_putw_at(s, 0, stream_get_endp(s));
-       zebra_server_send_message(client, s);
+       zserv_send_message(client, s);
 }
index 6a42aaecb464db307dc2bc65e31b374a5f56965d..d511c8c6ec1479e4eb9d2262dad25b8107944afc 100644 (file)
@@ -30,6 +30,7 @@
 #include "zebra/rt.h"
 #include "zebra/zapi_msg.h"
 #include "zebra/zebra_memory.h"
+#include "zebra/zserv.h"
 
 /* definitions */
 DEFINE_MTYPE_STATIC(ZEBRA, PBR_IPTABLE_IFNAME, "PBR interface list")
@@ -78,7 +79,7 @@ void zebra_pbr_rules_free(void *arg)
 
        rule = (struct zebra_pbr_rule *)arg;
 
-       kernel_del_pbr_rule(rule);
+       (void)kernel_del_pbr_rule(rule);
        XFREE(MTYPE_TMP, rule);
 }
 
@@ -368,7 +369,7 @@ void zebra_pbr_add_rule(struct zebra_ns *zns, struct zebra_pbr_rule *rule)
                pbr_rule_lookup_unique(zns, rule->rule.unique, rule->ifp);
 
        (void)hash_get(zns->rules_hash, rule, pbr_rule_alloc_intern);
-       kernel_add_pbr_rule(rule);
+       (void)kernel_add_pbr_rule(rule);
        /*
         * Rule Replace semantics, if we have an old, install the
         * new rule, look above, and then delete the old
@@ -382,7 +383,7 @@ void zebra_pbr_del_rule(struct zebra_ns *zns, struct zebra_pbr_rule *rule)
        struct zebra_pbr_rule *lookup;
 
        lookup = hash_lookup(zns->rules_hash, rule);
-       kernel_del_pbr_rule(rule);
+       (void)kernel_del_pbr_rule(rule);
 
        if (lookup) {
                hash_release(zns->rules_hash, lookup);
@@ -399,7 +400,7 @@ static void zebra_pbr_cleanup_rules(struct hash_backet *b, void *data)
        int *sock = data;
 
        if (rule->sock == *sock) {
-               kernel_del_pbr_rule(rule);
+               (void)kernel_del_pbr_rule(rule);
                hash_release(zns->rules_hash, rule);
                XFREE(MTYPE_TMP, rule);
        }
@@ -463,7 +464,7 @@ static int zebra_pbr_client_close_cleanup(struct zserv *client)
 
 void zebra_pbr_init(void)
 {
-       hook_register(zapi_client_close, zebra_pbr_client_close_cleanup);
+       hook_register(zserv_client_close, zebra_pbr_client_close_cleanup);
 }
 
 static void *pbr_ipset_alloc_intern(void *arg)
@@ -489,8 +490,8 @@ void zebra_pbr_create_ipset(struct zebra_ns *zns,
        ret = hook_call(zebra_pbr_ipset_wrap_script_update,
                  zns, 1, ipset);
        kernel_pbr_ipset_add_del_status(ipset,
-                                       ret ? SOUTHBOUND_INSTALL_SUCCESS
-                                       : SOUTHBOUND_INSTALL_FAILURE);
+                                       ret ? DP_INSTALL_SUCCESS
+                                       : DP_INSTALL_FAILURE);
 }
 
 void zebra_pbr_destroy_ipset(struct zebra_ns *zns,
@@ -573,8 +574,8 @@ void zebra_pbr_add_ipset_entry(struct zebra_ns *zns,
        ret = hook_call(zebra_pbr_ipset_entry_wrap_script_update,
                  zns, 1, ipset);
        kernel_pbr_ipset_entry_add_del_status(ipset,
-                                       ret ? SOUTHBOUND_INSTALL_SUCCESS
-                                       : SOUTHBOUND_INSTALL_FAILURE);
+                                       ret ? DP_INSTALL_SUCCESS
+                                       : DP_INSTALL_FAILURE);
 }
 
 void zebra_pbr_del_ipset_entry(struct zebra_ns *zns,
@@ -616,8 +617,8 @@ void zebra_pbr_add_iptable(struct zebra_ns *zns,
                       pbr_iptable_alloc_intern);
        ret = hook_call(zebra_pbr_iptable_wrap_script_update, zns, 1, iptable);
        kernel_pbr_iptable_add_del_status(iptable,
-                                         ret ? SOUTHBOUND_INSTALL_SUCCESS
-                                         : SOUTHBOUND_INSTALL_FAILURE);
+                                         ret ? DP_INSTALL_SUCCESS
+                                         : DP_INSTALL_FAILURE);
 }
 
 void zebra_pbr_del_iptable(struct zebra_ns *zns,
@@ -648,19 +649,19 @@ void zebra_pbr_del_iptable(struct zebra_ns *zns,
  * Handle success or failure of rule (un)install in the kernel.
  */
 void kernel_pbr_rule_add_del_status(struct zebra_pbr_rule *rule,
-                                   enum southbound_results res)
+                                   enum dp_results res)
 {
        switch (res) {
-       case SOUTHBOUND_INSTALL_SUCCESS:
+       case DP_INSTALL_SUCCESS:
                zsend_rule_notify_owner(rule, ZAPI_RULE_INSTALLED);
                break;
-       case SOUTHBOUND_INSTALL_FAILURE:
+       case DP_INSTALL_FAILURE:
                zsend_rule_notify_owner(rule, ZAPI_RULE_FAIL_INSTALL);
                break;
-       case SOUTHBOUND_DELETE_SUCCESS:
+       case DP_DELETE_SUCCESS:
                zsend_rule_notify_owner(rule, ZAPI_RULE_REMOVED);
                break;
-       case SOUTHBOUND_DELETE_FAILURE:
+       case DP_DELETE_FAILURE:
                zsend_rule_notify_owner(rule, ZAPI_RULE_FAIL_REMOVE);
                break;
        }
@@ -670,19 +671,19 @@ void kernel_pbr_rule_add_del_status(struct zebra_pbr_rule *rule,
  * Handle success or failure of ipset (un)install in the kernel.
  */
 void kernel_pbr_ipset_add_del_status(struct zebra_pbr_ipset *ipset,
-                                   enum southbound_results res)
+                                   enum dp_results res)
 {
        switch (res) {
-       case SOUTHBOUND_INSTALL_SUCCESS:
+       case DP_INSTALL_SUCCESS:
                zsend_ipset_notify_owner(ipset, ZAPI_IPSET_INSTALLED);
                break;
-       case SOUTHBOUND_INSTALL_FAILURE:
+       case DP_INSTALL_FAILURE:
                zsend_ipset_notify_owner(ipset, ZAPI_IPSET_FAIL_INSTALL);
                break;
-       case SOUTHBOUND_DELETE_SUCCESS:
+       case DP_DELETE_SUCCESS:
                zsend_ipset_notify_owner(ipset, ZAPI_IPSET_REMOVED);
                break;
-       case SOUTHBOUND_DELETE_FAILURE:
+       case DP_DELETE_FAILURE:
                zsend_ipset_notify_owner(ipset, ZAPI_IPSET_FAIL_REMOVE);
                break;
        }
@@ -693,22 +694,22 @@ void kernel_pbr_ipset_add_del_status(struct zebra_pbr_ipset *ipset,
  */
 void kernel_pbr_ipset_entry_add_del_status(
                        struct zebra_pbr_ipset_entry *ipset,
-                       enum southbound_results res)
+                       enum dp_results res)
 {
        switch (res) {
-       case SOUTHBOUND_INSTALL_SUCCESS:
+       case DP_INSTALL_SUCCESS:
                zsend_ipset_entry_notify_owner(ipset,
                                               ZAPI_IPSET_ENTRY_INSTALLED);
                break;
-       case SOUTHBOUND_INSTALL_FAILURE:
+       case DP_INSTALL_FAILURE:
                zsend_ipset_entry_notify_owner(ipset,
                                               ZAPI_IPSET_ENTRY_FAIL_INSTALL);
                break;
-       case SOUTHBOUND_DELETE_SUCCESS:
+       case DP_DELETE_SUCCESS:
                zsend_ipset_entry_notify_owner(ipset,
                                               ZAPI_IPSET_ENTRY_REMOVED);
                break;
-       case SOUTHBOUND_DELETE_FAILURE:
+       case DP_DELETE_FAILURE:
                zsend_ipset_entry_notify_owner(ipset,
                                               ZAPI_IPSET_ENTRY_FAIL_REMOVE);
                break;
@@ -719,20 +720,20 @@ void kernel_pbr_ipset_entry_add_del_status(
  * Handle success or failure of ipset (un)install in the kernel.
  */
 void kernel_pbr_iptable_add_del_status(struct zebra_pbr_iptable *iptable,
-                                      enum southbound_results res)
+                                      enum dp_results res)
 {
        switch (res) {
-       case SOUTHBOUND_INSTALL_SUCCESS:
+       case DP_INSTALL_SUCCESS:
                zsend_iptable_notify_owner(iptable, ZAPI_IPTABLE_INSTALLED);
                break;
-       case SOUTHBOUND_INSTALL_FAILURE:
+       case DP_INSTALL_FAILURE:
                zsend_iptable_notify_owner(iptable, ZAPI_IPTABLE_FAIL_INSTALL);
                break;
-       case SOUTHBOUND_DELETE_SUCCESS:
+       case DP_DELETE_SUCCESS:
                zsend_iptable_notify_owner(iptable,
                                           ZAPI_IPTABLE_REMOVED);
                break;
-       case SOUTHBOUND_DELETE_FAILURE:
+       case DP_DELETE_FAILURE:
                zsend_iptable_notify_owner(iptable,
                                           ZAPI_IPTABLE_FAIL_REMOVE);
                break;
index 31fc553581f8911f2f804bf8a72feb3d0c68fb5f..6cbafd6daaed1dd771c41395d6e8fb40344c3e69 100644 (file)
@@ -162,37 +162,37 @@ void zebra_pbr_del_iptable(struct zebra_ns *zns,
  * forwarding plane may not coincide, hence the API requires a separate
  * rule priority - maps to preference/FRA_PRIORITY on Linux.
  */
-extern void kernel_add_pbr_rule(struct zebra_pbr_rule *rule);
+extern enum dp_req_result kernel_add_pbr_rule(struct zebra_pbr_rule *rule);
 
 /*
  * Uninstall specified rule for a specific interface.
  */
-extern void kernel_del_pbr_rule(struct zebra_pbr_rule *rule);
+extern enum dp_req_result kernel_del_pbr_rule(struct zebra_pbr_rule *rule);
 
 /*
  * Get to know existing PBR rules in the kernel - typically called at startup.
  */
 extern void kernel_read_pbr_rules(struct zebra_ns *zns);
 
-enum southbound_results;
+enum dp_results;
 /*
  * Handle success or failure of rule (un)install in the kernel.
  */
 extern void kernel_pbr_rule_add_del_status(struct zebra_pbr_rule *rule,
-                                          enum southbound_results res);
+                                          enum dp_results res);
 
 /*
  * Handle success or failure of ipset kinds (un)install in the kernel.
  */
 extern void kernel_pbr_ipset_add_del_status(struct zebra_pbr_ipset *ipset,
-                                          enum southbound_results res);
+                                          enum dp_results res);
 
 extern void kernel_pbr_ipset_entry_add_del_status(
                                struct zebra_pbr_ipset_entry *ipset,
-                               enum southbound_results res);
+                               enum dp_results res);
 
 extern void kernel_pbr_iptable_add_del_status(struct zebra_pbr_iptable *iptable,
-                             enum southbound_results res);
+                             enum dp_results res);
 
 /*
  * Handle rule delete notification from kernel.
index d20f93f521a8bdf65513b6058aa64c013dea8dbc..8c23bf34cf1670c080003fafe9dc4ece86d73fb9 100644 (file)
@@ -126,7 +126,7 @@ void zebra_ptm_init(void)
 
        ptm_cb.ptm_sock = -1;
 
-       hook_register(zapi_client_close, zebra_ptm_bfd_client_deregister);
+       hook_register(zserv_client_close, zebra_ptm_bfd_client_deregister);
 }
 
 void zebra_ptm_finish(void)
index 74771476987f4b9418a2d6ca0f36bb33eef25804..815f61d157a083b4dd42789ea1e35053d6125aaf 100644 (file)
@@ -66,7 +66,7 @@ static int zsend_interface_bfd_update(int cmd, struct zserv *client,
        stream_putw_at(s, 0, stream_get_endp(s));
 
        client->if_bfd_cnt++;
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 void zebra_interface_bfd_update(struct interface *ifp, struct prefix *dp,
@@ -101,7 +101,7 @@ static int zsend_bfd_peer_replay(int cmd, struct zserv *client)
        stream_putw_at(s, 0, stream_get_endp(s));
 
        client->bfd_peer_replay_cnt++;
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 void zebra_bfd_peer_replay_req(void)
index 28e09fe1934470826c2898bf76c2c5df4bb5cb08..bf76f7e86b7d32758764e8110fc17e2e230f79ad 100644 (file)
@@ -292,7 +292,7 @@ void zebra_pw_init(struct zebra_vrf *zvrf)
        RB_INIT(zebra_pw_head, &zvrf->pseudowires);
        RB_INIT(zebra_static_pw_head, &zvrf->static_pseudowires);
 
-       hook_register(zapi_client_close, zebra_pw_client_close);
+       hook_register(zserv_client_close, zebra_pw_client_close);
 }
 
 void zebra_pw_exit(struct zebra_vrf *zvrf)
index dec4ed06a97e5bd937d3cb61a087a975df35bef1..879e7e831769b877523da38a3563a9b816eaed08 100644 (file)
@@ -1014,7 +1014,7 @@ int zebra_rib_labeled_unicast(struct route_entry *re)
 
 void kernel_route_rib_pass_fail(struct route_node *rn, struct prefix *p,
                                struct route_entry *re,
-                               enum southbound_results res)
+                               enum dp_results res)
 {
        struct nexthop *nexthop;
        char buf[PREFIX_STRLEN];
@@ -1023,7 +1023,7 @@ void kernel_route_rib_pass_fail(struct route_node *rn, struct prefix *p,
        dest = rib_dest_from_rnode(rn);
 
        switch (res) {
-       case SOUTHBOUND_INSTALL_SUCCESS:
+       case DP_INSTALL_SUCCESS:
                dest->selected_fib = re;
                for (ALL_NEXTHOPS(re->ng, nexthop)) {
                        if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
@@ -1036,7 +1036,7 @@ void kernel_route_rib_pass_fail(struct route_node *rn, struct prefix *p,
                }
                zsend_route_notify_owner(re, p, ZAPI_ROUTE_INSTALLED);
                break;
-       case SOUTHBOUND_INSTALL_FAILURE:
+       case DP_INSTALL_FAILURE:
                /*
                 * I am not sure this is the right thing to do here
                 * but the code always set selected_fib before
@@ -1048,7 +1048,7 @@ void kernel_route_rib_pass_fail(struct route_node *rn, struct prefix *p,
                zlog_warn("%u:%s: Route install failed", re->vrf_id,
                          prefix2str(p, buf, sizeof(buf)));
                break;
-       case SOUTHBOUND_DELETE_SUCCESS:
+       case DP_DELETE_SUCCESS:
                /*
                 * The case where selected_fib is not re is
                 * when we have received a system route
@@ -1063,7 +1063,7 @@ void kernel_route_rib_pass_fail(struct route_node *rn, struct prefix *p,
 
                zsend_route_notify_owner(re, p, ZAPI_ROUTE_REMOVED);
                break;
-       case SOUTHBOUND_DELETE_FAILURE:
+       case DP_DELETE_FAILURE:
                /*
                 * Should we set this to NULL if the
                 * delete fails?
@@ -1123,8 +1123,17 @@ void rib_install_kernel(struct route_node *rn, struct route_entry *re,
         * the kernel.
         */
        hook_call(rib_update, rn, "installing in kernel");
-       kernel_route_rib(rn, p, src_p, old, re);
-       zvrf->installs++;
+       switch (kernel_route_rib(rn, p, src_p, old, re)) {
+       case DP_REQUEST_QUEUED:
+               zlog_err("No current known DataPlane interfaces can return this, please fix");
+               break;
+       case DP_REQUEST_FAILURE:
+               zlog_err("No current known Rib Install Failure cases, please fix");
+               break;
+       case DP_REQUEST_SUCCESS:
+               zvrf->installs++;
+               break;
+       }
 
        return;
 }
@@ -1150,9 +1159,18 @@ void rib_uninstall_kernel(struct route_node *rn, struct route_entry *re)
         * the kernel.
         */
        hook_call(rib_update, rn, "uninstalling from kernel");
-       kernel_route_rib(rn, p, src_p, re, NULL);
-       if (zvrf)
-               zvrf->removals++;
+       switch (kernel_route_rib(rn, p, src_p, re, NULL)) {
+       case DP_REQUEST_QUEUED:
+               zlog_err("No current known DataPlane interfaces can return this, please fix");
+               break;
+       case DP_REQUEST_FAILURE:
+               zlog_err("No current known RIB Install Failure cases, please fix");
+               break;
+       case DP_REQUEST_SUCCESS:
+               if (zvrf)
+                       zvrf->removals++;
+               break;
+       }
 
        return;
 }
index 90c39bcc6f01e319f79da7b6121f8fdcf043587a..d482e0ab3da3ec1d796c304e3eaddf9eb031022d 100644 (file)
@@ -73,7 +73,7 @@ int zebra_rnh_ipv6_default_route = 0;
 
 void zebra_rnh_init(void)
 {
-       hook_register(zapi_client_close, zebra_client_cleanup_rnh);
+       hook_register(zserv_client_close, zebra_client_cleanup_rnh);
 }
 
 static inline struct route_table *get_rnh_table(vrf_id_t vrfid, int family,
@@ -1106,7 +1106,7 @@ static int send_client(struct rnh *rnh, struct zserv *client, rnh_type_t type,
 
        client->nh_last_upd_time = monotime(NULL);
        client->last_write_cmd = cmd;
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 static void print_nh(struct nexthop *nexthop, struct vty *vty)
index f7548f618f532264d2660108c43fc6c756c4b85a..eb11941a3a5f5843aa394f0dcfcf350ac2473d78 100644 (file)
@@ -3544,7 +3544,8 @@ DEFUN_HIDDEN (zebra_packet_process,
 {
        uint32_t packets = strtoul(argv[2]->arg, NULL, 10);
 
-       zebrad.packets_to_process = packets;
+       atomic_store_explicit(&zebrad.packets_to_process, packets,
+                             memory_order_relaxed);
 
        return CMD_SUCCESS;
 }
@@ -3557,7 +3558,9 @@ DEFUN_HIDDEN (no_zebra_packet_process,
              "Zapi Protocol\n"
              "Number of packets to process before relinquishing thread\n")
 {
-       zebrad.packets_to_process = ZEBRA_ZAPI_PACKETS_TO_PROCESS;
+       atomic_store_explicit(&zebrad.packets_to_process,
+                             ZEBRA_ZAPI_PACKETS_TO_PROCESS,
+                             memory_order_relaxed);
 
        return CMD_SUCCESS;
 }
index 74c1f3f1781cc47a309ed8b25f2ddd2eefd78f7e..98fa7ccf677a46d0e325cf075271d408ec6eb094 100644 (file)
@@ -82,7 +82,6 @@ static void *zvni_neigh_alloc(void *p);
 static zebra_neigh_t *zvni_neigh_add(zebra_vni_t *zvni, struct ipaddr *ip,
                                     struct ethaddr *mac);
 static int zvni_neigh_del(zebra_vni_t *zvni, zebra_neigh_t *n);
-static int zvni_neigh_del_hash_entry(struct hash_backet *backet, void *arg);
 static void zvni_neigh_del_from_vtep(zebra_vni_t *zvni, int uninstall,
                                     struct in_addr *r_vtep_ip);
 static void zvni_neigh_del_all(zebra_vni_t *zvni, int uninstall, int upd_client,
@@ -138,7 +137,6 @@ static int mac_cmp(const void *p1, const void *p2);
 static void *zvni_mac_alloc(void *p);
 static zebra_mac_t *zvni_mac_add(zebra_vni_t *zvni, struct ethaddr *macaddr);
 static int zvni_mac_del(zebra_vni_t *zvni, zebra_mac_t *mac);
-static int zvni_mac_del_hash_entry(struct hash_backet *backet, void *arg);
 static void zvni_mac_del_from_vtep(zebra_vni_t *zvni, int uninstall,
                                   struct in_addr *r_vtep_ip);
 static void zvni_mac_del_all(zebra_vni_t *zvni, int uninstall, int upd_client,
@@ -1195,7 +1193,7 @@ static int zvni_macip_send_msg_to_client(vni_t vni, struct ethaddr *macaddr,
        struct zserv *client = NULL;
        struct stream *s = NULL;
 
-       client = zebra_find_client(ZEBRA_ROUTE_BGP, 0);
+       client = zserv_find_client(ZEBRA_ROUTE_BGP, 0);
        /* BGP may not be running. */
        if (!client)
                return 0;
@@ -1237,7 +1235,7 @@ static int zvni_macip_send_msg_to_client(vni_t vni, struct ethaddr *macaddr,
        else
                client->macipdel_cnt++;
 
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 /*
@@ -1335,7 +1333,7 @@ static int zvni_neigh_del(zebra_vni_t *zvni, zebra_neigh_t *n)
 /*
  * Free neighbor hash entry (callback)
  */
-static int zvni_neigh_del_hash_entry(struct hash_backet *backet, void *arg)
+static void zvni_neigh_del_hash_entry(struct hash_backet *backet, void *arg)
 {
        struct neigh_walk_ctx *wctx = arg;
        zebra_neigh_t *n = backet->data;
@@ -1353,10 +1351,10 @@ static int zvni_neigh_del_hash_entry(struct hash_backet *backet, void *arg)
                if (wctx->uninstall)
                        zvni_neigh_uninstall(wctx->zvni, n);
 
-               return zvni_neigh_del(wctx->zvni, n);
+               zvni_neigh_del(wctx->zvni, n);
        }
 
-       return 0;
+       return;
 }
 
 /*
@@ -1376,10 +1374,7 @@ static void zvni_neigh_del_from_vtep(zebra_vni_t *zvni, int uninstall,
        wctx.flags = DEL_REMOTE_NEIGH_FROM_VTEP;
        wctx.r_vtep_ip = *r_vtep_ip;
 
-       hash_iterate(zvni->neigh_table,
-                    (void (*)(struct hash_backet *,
-                              void *))zvni_neigh_del_hash_entry,
-                    &wctx);
+       hash_iterate(zvni->neigh_table, zvni_neigh_del_hash_entry, &wctx);
 }
 
 /*
@@ -1399,10 +1394,7 @@ static void zvni_neigh_del_all(zebra_vni_t *zvni, int uninstall, int upd_client,
        wctx.upd_client = upd_client;
        wctx.flags = flags;
 
-       hash_iterate(zvni->neigh_table,
-                    (void (*)(struct hash_backet *,
-                              void *))zvni_neigh_del_hash_entry,
-                    &wctx);
+       hash_iterate(zvni->neigh_table, zvni_neigh_del_hash_entry, &wctx);
 }
 
 /*
@@ -2230,7 +2222,7 @@ static int zvni_mac_del(zebra_vni_t *zvni, zebra_mac_t *mac)
 /*
  * Free MAC hash entry (callback)
  */
-static int zvni_mac_del_hash_entry(struct hash_backet *backet, void *arg)
+static void zvni_mac_del_hash_entry(struct hash_backet *backet, void *arg)
 {
        struct mac_walk_ctx *wctx = arg;
        zebra_mac_t *mac = backet->data;
@@ -2250,10 +2242,10 @@ static int zvni_mac_del_hash_entry(struct hash_backet *backet, void *arg)
                if (wctx->uninstall)
                        zvni_mac_uninstall(wctx->zvni, mac, 0);
 
-               return zvni_mac_del(wctx->zvni, mac);
+               zvni_mac_del(wctx->zvni, mac);
        }
 
-       return 0;
+       return;
 }
 
 /*
@@ -2273,9 +2265,7 @@ static void zvni_mac_del_from_vtep(zebra_vni_t *zvni, int uninstall,
        wctx.flags = DEL_REMOTE_MAC_FROM_VTEP;
        wctx.r_vtep_ip = *r_vtep_ip;
 
-       hash_iterate(zvni->mac_table, (void (*)(struct hash_backet *,
-                                               void *))zvni_mac_del_hash_entry,
-                    &wctx);
+       hash_iterate(zvni->mac_table, zvni_mac_del_hash_entry, &wctx);
 }
 
 /*
@@ -2295,9 +2285,7 @@ static void zvni_mac_del_all(zebra_vni_t *zvni, int uninstall, int upd_client,
        wctx.upd_client = upd_client;
        wctx.flags = flags;
 
-       hash_iterate(zvni->mac_table, (void (*)(struct hash_backet *,
-                                               void *))zvni_mac_del_hash_entry,
-                    &wctx);
+       hash_iterate(zvni->mac_table, zvni_mac_del_hash_entry, &wctx);
 }
 
 /*
@@ -2779,7 +2767,7 @@ static int zvni_send_add_to_client(zebra_vni_t *zvni)
        struct zserv *client;
        struct stream *s;
 
-       client = zebra_find_client(ZEBRA_ROUTE_BGP, 0);
+       client = zserv_find_client(ZEBRA_ROUTE_BGP, 0);
        /* BGP may not be running. */
        if (!client)
                return 0;
@@ -2801,7 +2789,7 @@ static int zvni_send_add_to_client(zebra_vni_t *zvni)
                           zebra_route_string(client->proto));
 
        client->vniadd_cnt++;
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 /*
@@ -2812,7 +2800,7 @@ static int zvni_send_del_to_client(vni_t vni)
        struct zserv *client;
        struct stream *s;
 
-       client = zebra_find_client(ZEBRA_ROUTE_BGP, 0);
+       client = zserv_find_client(ZEBRA_ROUTE_BGP, 0);
        /* BGP may not be running. */
        if (!client)
                return 0;
@@ -2831,7 +2819,7 @@ static int zvni_send_del_to_client(vni_t vni)
                           zebra_route_string(client->proto));
 
        client->vnidel_cnt++;
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 /*
@@ -3747,7 +3735,7 @@ static int zl3vni_send_add_to_client(zebra_l3vni_t *zl3vni)
        struct ethaddr rmac;
        char buf[ETHER_ADDR_STRLEN];
 
-       client = zebra_find_client(ZEBRA_ROUTE_BGP, 0);
+       client = zserv_find_client(ZEBRA_ROUTE_BGP, 0);
        /* BGP may not be running. */
        if (!client)
                return 0;
@@ -3779,7 +3767,7 @@ static int zl3vni_send_add_to_client(zebra_l3vni_t *zl3vni)
                        zebra_route_string(client->proto));
 
        client->l3vniadd_cnt++;
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 /*
@@ -3790,7 +3778,7 @@ static int zl3vni_send_del_to_client(zebra_l3vni_t *zl3vni)
        struct stream *s = NULL;
        struct zserv *client = NULL;
 
-       client = zebra_find_client(ZEBRA_ROUTE_BGP, 0);
+       client = zserv_find_client(ZEBRA_ROUTE_BGP, 0);
        /* BGP may not be running. */
        if (!client)
                return 0;
@@ -3809,7 +3797,7 @@ static int zl3vni_send_del_to_client(zebra_l3vni_t *zl3vni)
                           zebra_route_string(client->proto));
 
        client->l3vnidel_cnt++;
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 static void zebra_vxlan_process_l3vni_oper_up(zebra_l3vni_t *zl3vni)
@@ -3922,7 +3910,7 @@ static int ip_prefix_send_to_client(vrf_id_t vrf_id, struct prefix *p,
        struct stream *s = NULL;
        char buf[PREFIX_STRLEN];
 
-       client = zebra_find_client(ZEBRA_ROUTE_BGP, 0);
+       client = zserv_find_client(ZEBRA_ROUTE_BGP, 0);
        /* BGP may not be running. */
        if (!client)
                return 0;
@@ -3946,7 +3934,7 @@ static int ip_prefix_send_to_client(vrf_id_t vrf_id, struct prefix *p,
        else
                client->prefixdel_cnt++;
 
-       return zebra_server_send_message(client, s);
+       return zserv_send_message(client, s);
 }
 
 /* re-add remote rmac if needed */
index 7dcd654240d50eb5927101883a33365a846b1348..a099cfc057ddf941e9f694ee49d0aebd44b32deb 100644 (file)
@@ -52,6 +52,8 @@
 #include "lib/vty.h"              /* for vty_out, vty (ptr only) */
 #include "lib/zassert.h"          /* for assert */
 #include "lib/zclient.h"          /* for zmsghdr, ZEBRA_HEADER_SIZE, ZEBRA... */
+#include "lib/frr_pthread.h"      /* for frr_pthread_new, frr_pthread_stop... */
+#include "lib/frratomic.h"        /* for atomic_load_explicit, atomic_stor... */
 
 #include "zebra/debug.h"          /* for various debugging macros */
 #include "zebra/rib.h"            /* for rib_score_proto */
 #include "zebra/zserv.h"          /* for zserv */
 /* clang-format on */
 
-/* Event list of zebra. */
-enum event { ZEBRA_READ, ZEBRA_WRITE };
 /* privileges */
 extern struct zebra_privs_t zserv_privs;
-/* post event into client */
-static void zebra_event(struct zserv *client, enum event event);
-
-
-/* Public interface --------------------------------------------------------- */
-
-int zebra_server_send_message(struct zserv *client, struct stream *msg)
-{
-       stream_fifo_push(client->obuf_fifo, msg);
-       zebra_event(client, ZEBRA_WRITE);
-       return 0;
-}
-
-/* Lifecycle ---------------------------------------------------------------- */
-
-/* Hooks for client connect / disconnect */
-DEFINE_HOOK(zapi_client_connect, (struct zserv *client), (client));
-DEFINE_KOOH(zapi_client_close, (struct zserv *client), (client));
-
-/* free zebra client information. */
-static void zebra_client_free(struct zserv *client)
-{
-       hook_call(zapi_client_close, client);
-
-       /* Close file descriptor. */
-       if (client->sock) {
-               unsigned long nroutes;
-
-               close(client->sock);
-               nroutes = rib_score_proto(client->proto, client->instance);
-               zlog_notice(
-                       "client %d disconnected. %lu %s routes removed from the rib",
-                       client->sock, nroutes,
-                       zebra_route_string(client->proto));
-               client->sock = -1;
-       }
-
-       /* Free stream buffers. */
-       if (client->ibuf_work)
-               stream_free(client->ibuf_work);
-       if (client->obuf_work)
-               stream_free(client->obuf_work);
-       if (client->ibuf_fifo)
-               stream_fifo_free(client->ibuf_fifo);
-       if (client->obuf_fifo)
-               stream_fifo_free(client->obuf_fifo);
-       if (client->wb)
-               buffer_free(client->wb);
-
-       /* Release threads. */
-       if (client->t_read)
-               thread_cancel(client->t_read);
-       if (client->t_write)
-               thread_cancel(client->t_write);
-       if (client->t_suicide)
-               thread_cancel(client->t_suicide);
-
-       /* Free bitmaps. */
-       for (afi_t afi = AFI_IP; afi < AFI_MAX; afi++)
-               for (int i = 0; i < ZEBRA_ROUTE_MAX; i++)
-                       vrf_bitmap_free(client->redist[afi][i]);
-
-       vrf_bitmap_free(client->redist_default);
-       vrf_bitmap_free(client->ifinfo);
-       vrf_bitmap_free(client->ridinfo);
-
-       XFREE(MTYPE_TMP, client);
-}
 
 /*
- * Called from client thread to terminate itself.
+ * Client thread events.
+ *
+ * These are used almost exclusively by client threads to drive their own event
+ * loops. The only exception is in zebra_client_create(), which pushes an
+ * initial ZSERV_CLIENT_READ event to start the API handler loop.
  */
-static void zebra_client_close(struct zserv *client)
-{
-       listnode_delete(zebrad.client_list, client);
-       zebra_client_free(client);
-}
-
-/* Make new client. */
-static void zebra_client_create(int sock)
-{
-       struct zserv *client;
-       int i;
-       afi_t afi;
-
-       client = XCALLOC(MTYPE_TMP, sizeof(struct zserv));
-
-       /* Make client input/output buffer. */
-       client->sock = sock;
-       client->ibuf_fifo = stream_fifo_new();
-       client->obuf_fifo = stream_fifo_new();
-       client->ibuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ);
-       client->obuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ);
-       client->wb = buffer_new(0);
-
-       /* Set table number. */
-       client->rtm_table = zebrad.rtm_table_default;
-
-       client->connect_time = monotime(NULL);
-       /* Initialize flags */
-       for (afi = AFI_IP; afi < AFI_MAX; afi++)
-               for (i = 0; i < ZEBRA_ROUTE_MAX; i++)
-                       client->redist[afi][i] = vrf_bitmap_init();
-       client->redist_default = vrf_bitmap_init();
-       client->ifinfo = vrf_bitmap_init();
-       client->ridinfo = vrf_bitmap_init();
-
-       /* by default, it's not a synchronous client */
-       client->is_synchronous = 0;
+enum zserv_client_event {
+       /* Schedule a socket read */
+       ZSERV_CLIENT_READ,
+       /* Schedule a buffer write */
+       ZSERV_CLIENT_WRITE,
+};
 
-       /* Add this client to linked list. */
-       listnode_add(zebrad.client_list, client);
-
-       zebra_vrf_update_all(client);
+/*
+ * Main thread events.
+ *
+ * These are used by client threads to notify the main thread about various
+ * events and to make processing requests.
+ */
+enum zserv_event {
+       /* Schedule listen job on Zebra API socket */
+       ZSERV_ACCEPT,
+       /* The calling client has packets on its input buffer */
+       ZSERV_PROCESS_MESSAGES,
+       /* The calling client wishes to be killed */
+       ZSERV_HANDLE_CLOSE,
+};
 
-       hook_call(zapi_client_connect, client);
+/*
+ * Zebra server event driver for all client threads.
+ *
+ * This is essentially a wrapper around thread_add_event() that centralizes
+ * those scheduling calls into one place.
+ *
+ * All calls to this function schedule an event on the pthread running the
+ * provided client.
+ *
+ * client
+ *    the client in question, and thread target
+ *
+ * event
+ *    the event to notify them about
+ */
+static void zserv_client_event(struct zserv *client,
+                              enum zserv_client_event event);
 
-       /* start read loop */
-       zebra_event(client, ZEBRA_READ);
-}
+/*
+ * Zebra server event driver for the main thread.
+ *
+ * This is essentially a wrapper around thread_add_event() that centralizes
+ * those scheduling calls into one place.
+ *
+ * All calls to this function schedule an event on Zebra's main pthread.
+ *
+ * client
+ *    the client in question
+ *
+ * event
+ *    the event to notify the main thread about
+ */
+static void zserv_event(struct zserv *client, enum zserv_event event);
 
-static int zserv_delayed_close(struct thread *thread)
-{
-       struct zserv *client = THREAD_ARG(thread);
 
-       client->t_suicide = NULL;
-       zebra_client_close(client);
-       return 0;
-}
+/* Client thread lifecycle -------------------------------------------------- */
 
 /*
  * Log zapi message to zlog.
@@ -220,176 +157,157 @@ static void zserv_log_message(const char *errmsg, struct stream *msg,
        zlog_hexdump(msg->data, STREAM_READABLE(msg));
 }
 
-static int zserv_flush_data(struct thread *thread)
+/*
+ * Gracefully shut down a client connection.
+ *
+ * Cancel any pending tasks for the client's thread. Then schedule a task on the
+ * main thread to shut down the calling thread.
+ *
+ * Must be called from the client pthread, never the main thread.
+ */
+static void zserv_client_close(struct zserv *client)
 {
-       struct zserv *client = THREAD_ARG(thread);
-
-       client->t_write = NULL;
-       if (client->t_suicide) {
-               zebra_client_close(client);
-               return -1;
-       }
-       switch (buffer_flush_available(client->wb, client->sock)) {
-       case BUFFER_ERROR:
-               zlog_warn(
-                       "%s: buffer_flush_available failed on zserv client fd %d, closing",
-                       __func__, client->sock);
-               zebra_client_close(client);
-               client = NULL;
-               break;
-       case BUFFER_PENDING:
-               client->t_write = NULL;
-               thread_add_write(zebrad.master, zserv_flush_data, client,
-                                client->sock, &client->t_write);
-               break;
-       case BUFFER_EMPTY:
-               break;
-       }
-
-       if (client)
-               client->last_write_time = monotime(NULL);
-       return 0;
+       atomic_store_explicit(&client->pthread->running, false,
+                             memory_order_seq_cst);
+       THREAD_OFF(client->t_read);
+       THREAD_OFF(client->t_write);
+       zserv_event(client, ZSERV_HANDLE_CLOSE);
 }
 
 /*
- * Write a single packet.
+ * Write all pending messages to client socket.
+ *
+ * This function first attempts to flush any buffered data. If unsuccessful,
+ * the function reschedules itself and returns. If successful, it pops all
+ * available messages from the output queue and continues to write data
+ * directly to the socket until the socket would block. If the socket never
+ * blocks and all data is written, the function returns without rescheduling
+ * itself. If the socket ends up throwing EWOULDBLOCK, the remaining data is
+ * buffered and the function reschedules itself.
+ *
+ * The utility of the buffer is that it allows us to vastly reduce lock
+ * contention by allowing us to pop *all* messages off the output queue at once
+ * instead of locking and unlocking each time we want to pop a single message
+ * off the queue. The same thing could arguably be accomplished faster by
+ * allowing the main thread to write directly into the buffer instead of
+ * enqueuing packets onto an intermediary queue, but the intermediary queue
+ * allows us to expose information about input and output queues to the user in
+ * terms of number of packets rather than size of data.
  */
 static int zserv_write(struct thread *thread)
 {
        struct zserv *client = THREAD_ARG(thread);
        struct stream *msg;
-       int writerv;
-
-       if (client->t_suicide)
-               return -1;
-
-       if (client->is_synchronous)
-               return 0;
-
-       msg = stream_fifo_pop(client->obuf_fifo);
-       stream_set_getp(msg, 0);
-       client->last_write_cmd = stream_getw_from(msg, 6);
+       uint32_t wcmd;
+       struct stream_fifo *cache;
 
-       writerv = buffer_write(client->wb, client->sock, STREAM_DATA(msg),
-                              stream_get_endp(msg));
-
-       stream_free(msg);
-
-       switch (writerv) {
+       /* If we have any data pending, try to flush it first */
+       switch (buffer_flush_all(client->wb, client->sock)) {
        case BUFFER_ERROR:
-               zlog_warn(
-                       "%s: buffer_write failed to zserv client fd %d, closing",
-                       __func__, client->sock);
-               /*
-                * Schedule a delayed close since many of the functions that
-                * call this one do not check the return code. They do not
-                * allow for the possibility that an I/O error may have caused
-                * the client to be deleted.
-                */
-               client->t_suicide = NULL;
-               thread_add_event(zebrad.master, zserv_delayed_close, client, 0,
-                                &client->t_suicide);
-               return -1;
-       case BUFFER_EMPTY:
-               THREAD_OFF(client->t_write);
-               break;
+               goto zwrite_fail;
        case BUFFER_PENDING:
-               thread_add_write(zebrad.master, zserv_flush_data, client,
-                                client->sock, &client->t_write);
+               atomic_store_explicit(&client->last_write_time,
+                                     (uint32_t)monotime(NULL),
+                                     memory_order_relaxed);
+               zserv_client_event(client, ZSERV_CLIENT_WRITE);
+               return 0;
+       case BUFFER_EMPTY:
                break;
        }
 
-       if (client->obuf_fifo->count)
-               zebra_event(client, ZEBRA_WRITE);
-
-       client->last_write_time = monotime(NULL);
-       return 0;
-}
-
-#if defined(HANDLE_ZAPI_FUZZING)
-static void zserv_write_incoming(struct stream *orig, uint16_t command)
-{
-       char fname[MAXPATHLEN];
-       struct stream *copy;
-       int fd = -1;
+       cache = stream_fifo_new();
 
-       copy = stream_dup(orig);
-       stream_set_getp(copy, 0);
+       pthread_mutex_lock(&client->obuf_mtx);
+       {
+               while (stream_fifo_head(client->obuf_fifo))
+                       stream_fifo_push(cache,
+                                        stream_fifo_pop(client->obuf_fifo));
+       }
+       pthread_mutex_unlock(&client->obuf_mtx);
 
-       zserv_privs.change(ZPRIVS_RAISE);
-       snprintf(fname, MAXPATHLEN, "%s/%u", DAEMON_VTY_DIR, command);
-       fd = open(fname, O_CREAT | O_WRONLY | O_EXCL, 0644);
-       stream_flush(copy, fd);
-       close(fd);
-       zserv_privs.change(ZPRIVS_LOWER);
-       stream_free(copy);
-}
-#endif
+       if (cache->tail) {
+               msg = cache->tail;
+               stream_set_getp(msg, 0);
+               wcmd = stream_getw_from(msg, 6);
+       }
 
-static int zserv_process_messages(struct thread *thread)
-{
-       struct zserv *client = THREAD_ARG(thread);
-       struct zebra_vrf *zvrf;
-       struct zmsghdr hdr;
-       struct stream *msg;
-       bool hdrvalid;
+       while (stream_fifo_head(cache)) {
+               msg = stream_fifo_pop(cache);
+               buffer_put(client->wb, STREAM_DATA(msg), stream_get_endp(msg));
+               stream_free(msg);
+       }
 
-       do {
-               msg = stream_fifo_pop(client->ibuf_fifo);
+       stream_fifo_free(cache);
 
-               /* break if out of messages */
-               if (!msg)
-                       continue;
+       /* If we have any data pending, try to flush it first */
+       switch (buffer_flush_all(client->wb, client->sock)) {
+       case BUFFER_ERROR:
+               goto zwrite_fail;
+       case BUFFER_PENDING:
+               atomic_store_explicit(&client->last_write_time,
+                                     (uint32_t)monotime(NULL),
+                                     memory_order_relaxed);
+               zserv_client_event(client, ZSERV_CLIENT_WRITE);
+               return 0;
+       case BUFFER_EMPTY:
+               break;
+       }
 
-               /* read & check header */
-               hdrvalid = zapi_parse_header(msg, &hdr);
-               if (!hdrvalid && IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV) {
-                       const char *emsg = "Message has corrupt header";
-                       zserv_log_message(emsg, msg, NULL);
-               }
-               if (!hdrvalid)
-                       continue;
-
-               hdr.length -= ZEBRA_HEADER_SIZE;
-               /* lookup vrf */
-               zvrf = zebra_vrf_lookup_by_id(hdr.vrf_id);
-               if (!zvrf && IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV) {
-                       const char *emsg = "Message specifies unknown VRF";
-                       zserv_log_message(emsg, msg, &hdr);
-               }
-               if (!zvrf)
-                       continue;
+       atomic_store_explicit(&client->last_write_cmd, wcmd,
+                             memory_order_relaxed);
 
-               /* process commands */
-               zserv_handle_commands(client, &hdr, msg, zvrf);
+       atomic_store_explicit(&client->last_write_time,
+                             (uint32_t)monotime(NULL), memory_order_relaxed);
 
-       } while (msg);
+       return 0;
 
+zwrite_fail:
+       zlog_warn("%s: could not write to %s [fd = %d], closing.", __func__,
+                 zebra_route_string(client->proto), client->sock);
+       zserv_client_close(client);
        return 0;
 }
 
-/* Handler of zebra service request. */
+/*
+ * Read and process data from a client socket.
+ *
+ * The responsibilities here are to read raw data from the client socket,
+ * validate the header, encapsulate it into a single stream object, push it
+ * onto the input queue and then notify the main thread that there is new data
+ * available.
+ *
+ * This function first looks for any data in the client structure's working
+ * input buffer. If data is present, it is assumed that reading stopped in a
+ * previous invocation of this task and needs to be resumed to finish a message.
+ * Otherwise, the socket data stream is assumed to be at the beginning of a new
+ * ZAPI message (specifically at the header). The header is read and validated.
+ * If the header passed validation then the length field found in the header is
+ * used to compute the total length of the message. That much data is read (but
+ * not inspected), appended to the header, placed into a stream and pushed onto
+ * the client's input queue. A task is then scheduled on the main thread to
+ * process the client's input queue. Finally, if all of this was successful,
+ * this task reschedules itself.
+ *
+ * Any failure in any of these actions is handled by terminating the client.
+ */
 static int zserv_read(struct thread *thread)
 {
+       struct zserv *client = THREAD_ARG(thread);
        int sock;
-       struct zserv *client;
        size_t already;
-#if defined(HANDLE_ZAPI_FUZZING)
-       int packets = 1;
-#else
-       int packets = zebrad.packets_to_process;
-#endif
-       /* Get thread data.  Reset reading thread because I'm running. */
-       sock = THREAD_FD(thread);
-       client = THREAD_ARG(thread);
+       struct stream_fifo *cache;
+       uint32_t p2p_orig;
 
-       if (client->t_suicide) {
-               zebra_client_close(client);
-               return -1;
-       }
+       uint32_t p2p;
+       struct zmsghdr hdr;
 
-       while (packets) {
-               struct zmsghdr hdr;
+       p2p_orig = atomic_load_explicit(&zebrad.packets_to_process,
+                                       memory_order_relaxed);
+       cache = stream_fifo_new();
+       p2p = p2p_orig;
+       sock = THREAD_FD(thread);
+
+       while (p2p) {
                ssize_t nb;
                bool hdrvalid;
                char errmsg[256];
@@ -449,6 +367,7 @@ static int zserv_read(struct thread *thread)
                                "Message has corrupt header\n%s: socket %d message length %u exceeds buffer size %lu",
                                __func__, sock, hdr.length,
                                (unsigned long)STREAM_SIZE(client->ibuf_work));
+                       zserv_log_message(errmsg, client->ibuf_work, &hdr);
                        goto zread_fail;
                }
 
@@ -468,10 +387,6 @@ static int zserv_read(struct thread *thread)
                        }
                }
 
-#if defined(HANDLE_ZAPI_FUZZING)
-               zserv_write_incoming(client->ibuf_work, command);
-#endif
-
                /* Debug packet information. */
                if (IS_ZEBRA_DEBUG_EVENT)
                        zlog_debug("zebra message comes from socket [%d]",
@@ -480,55 +395,319 @@ static int zserv_read(struct thread *thread)
                if (IS_ZEBRA_DEBUG_PACKET && IS_ZEBRA_DEBUG_RECV)
                        zserv_log_message(NULL, client->ibuf_work, &hdr);
 
-               client->last_read_time = monotime(NULL);
-               client->last_read_cmd = hdr.command;
-
                stream_set_getp(client->ibuf_work, 0);
                struct stream *msg = stream_dup(client->ibuf_work);
 
-               stream_fifo_push(client->ibuf_fifo, msg);
+               stream_fifo_push(cache, msg);
+               stream_reset(client->ibuf_work);
+               p2p--;
+       }
 
-               if (client->t_suicide)
-                       goto zread_fail;
+       if (p2p < p2p_orig) {
+               /* update session statistics */
+               atomic_store_explicit(&client->last_read_time, monotime(NULL),
+                                     memory_order_relaxed);
+               atomic_store_explicit(&client->last_read_cmd, hdr.command,
+                                     memory_order_relaxed);
+
+               /* publish read packets on client's input queue */
+               pthread_mutex_lock(&client->ibuf_mtx);
+               {
+                       while (cache->head)
+                               stream_fifo_push(client->ibuf_fifo,
+                                                stream_fifo_pop(cache));
+               }
+               pthread_mutex_unlock(&client->ibuf_mtx);
+
+               /* Schedule job to process those packets */
+               zserv_event(client, ZSERV_PROCESS_MESSAGES);
 
-               --packets;
-               stream_reset(client->ibuf_work);
        }
 
        if (IS_ZEBRA_DEBUG_PACKET)
-               zlog_debug("Read %d packets",
-                          zebrad.packets_to_process - packets);
-
-       /* Schedule job to process those packets */
-       thread_add_event(zebrad.master, &zserv_process_messages, client, 0,
-                        NULL);
+               zlog_debug("Read %d packets", p2p_orig - p2p);
 
        /* Reschedule ourselves */
-       zebra_event(client, ZEBRA_READ);
+       zserv_client_event(client, ZSERV_CLIENT_READ);
+
+       stream_fifo_free(cache);
 
        return 0;
 
 zread_fail:
-       zebra_client_close(client);
+       stream_fifo_free(cache);
+       zserv_client_close(client);
        return -1;
 }
 
-static void zebra_event(struct zserv *client, enum event event)
+static void zserv_client_event(struct zserv *client,
+                              enum zserv_client_event event)
 {
        switch (event) {
-       case ZEBRA_READ:
-               thread_add_read(zebrad.master, zserv_read, client, client->sock,
-                               &client->t_read);
+       case ZSERV_CLIENT_READ:
+               thread_add_read(client->pthread->master, zserv_read, client,
+                               client->sock, &client->t_read);
                break;
-       case ZEBRA_WRITE:
-               thread_add_write(zebrad.master, zserv_write, client,
+       case ZSERV_CLIENT_WRITE:
+               thread_add_write(client->pthread->master, zserv_write, client,
                                 client->sock, &client->t_write);
                break;
        }
 }
 
-/* Accept code of zebra server socket. */
-static int zebra_accept(struct thread *thread)
+/* Main thread lifecycle ---------------------------------------------------- */
+
+/*
+ * Read and process messages from a client.
+ *
+ * This task runs on the main pthread. It is scheduled by client pthreads when
+ * they have new messages available on their input queues. The client is passed
+ * as the task argument.
+ *
+ * Each message is popped off the client's input queue and the action associated
+ * with the message is executed. This proceeds until there are no more messages,
+ * an error occurs, or the processing limit is reached.
+ *
+ * The client's I/O thread can push at most zebrad.packets_to_process messages
+ * onto the input buffer before notifying us there are packets to read. As long
+ * as we always process zebrad.packets_to_process messages here, then we can
+ * rely on the read thread to handle queuing this task enough times to process
+ * everything on the input queue.
+ */
+static int zserv_process_messages(struct thread *thread)
+{
+       struct zserv *client = THREAD_ARG(thread);
+       struct stream *msg;
+       struct stream_fifo *cache = stream_fifo_new();
+
+       uint32_t p2p = zebrad.packets_to_process;
+
+       pthread_mutex_lock(&client->ibuf_mtx);
+       {
+               uint32_t i;
+               for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo);
+                    ++i) {
+                       msg = stream_fifo_pop(client->ibuf_fifo);
+                       stream_fifo_push(cache, msg);
+               }
+
+               msg = NULL;
+       }
+       pthread_mutex_unlock(&client->ibuf_mtx);
+
+       while (stream_fifo_head(cache)) {
+               msg = stream_fifo_pop(cache);
+               zserv_handle_commands(client, msg);
+               stream_free(msg);
+       }
+
+       stream_fifo_free(cache);
+
+       return 0;
+}
+
+int zserv_send_message(struct zserv *client, struct stream *msg)
+{
+       /*
+        * This is a somewhat poorly named variable added with Zebra's portion
+        * of the label manager. That component does not use the regular
+        * zserv/zapi_msg interface for handling its messages, as the client
+        * itself runs in-process. Instead it uses synchronous writes on the
+        * zserv client's socket directly in the zread* handlers for its
+        * message types. Furthermore, it cannot handle the usual messages
+        * Zebra sends (such as those for interface changes) and so has added
+        * this flag and check here as a hack to suppress all messages that it
+        * does not explicitly know about.
+        *
+        * In any case this needs to be cleaned up at some point.
+        *
+        * See also:
+        *    zread_label_manager_request
+        *    zsend_label_manager_connect_response
+        *    zsend_assign_label_chunk_response
+        *    ...
+        */
+       if (client->is_synchronous)
+               return 0;
+
+       pthread_mutex_lock(&client->obuf_mtx);
+       {
+               stream_fifo_push(client->obuf_fifo, msg);
+       }
+       pthread_mutex_unlock(&client->obuf_mtx);
+
+       zserv_client_event(client, ZSERV_CLIENT_WRITE);
+
+       return 0;
+}
+
+
+/* Hooks for client connect / disconnect */
+DEFINE_HOOK(zserv_client_connect, (struct zserv *client), (client));
+DEFINE_KOOH(zserv_client_close, (struct zserv *client), (client));
+
+/*
+ * Deinitialize zebra client.
+ *
+ * - Deregister and deinitialize related internal resources
+ * - Gracefully close socket
+ * - Free associated resources
+ * - Free client structure
+ *
+ * This does *not* take any action on the struct thread * fields. These are
+ * managed by the owning pthread and any tasks associated with them must have
+ * been stopped prior to invoking this function.
+ */
+static void zserv_client_free(struct zserv *client)
+{
+       hook_call(zserv_client_close, client);
+
+       /* Close file descriptor. */
+       if (client->sock) {
+               unsigned long nroutes;
+
+               close(client->sock);
+               nroutes = rib_score_proto(client->proto, client->instance);
+               zlog_notice(
+                       "client %d disconnected. %lu %s routes removed from the rib",
+                       client->sock, nroutes,
+                       zebra_route_string(client->proto));
+               client->sock = -1;
+       }
+
+       /* Free stream buffers. */
+       if (client->ibuf_work)
+               stream_free(client->ibuf_work);
+       if (client->obuf_work)
+               stream_free(client->obuf_work);
+       if (client->ibuf_fifo)
+               stream_fifo_free(client->ibuf_fifo);
+       if (client->obuf_fifo)
+               stream_fifo_free(client->obuf_fifo);
+       if (client->wb)
+               buffer_free(client->wb);
+
+       /* Free buffer mutexes */
+       pthread_mutex_destroy(&client->obuf_mtx);
+       pthread_mutex_destroy(&client->ibuf_mtx);
+
+       /* Free bitmaps. */
+       for (afi_t afi = AFI_IP; afi < AFI_MAX; afi++)
+               for (int i = 0; i < ZEBRA_ROUTE_MAX; i++)
+                       vrf_bitmap_free(client->redist[afi][i]);
+
+       vrf_bitmap_free(client->redist_default);
+       vrf_bitmap_free(client->ifinfo);
+       vrf_bitmap_free(client->ridinfo);
+
+       XFREE(MTYPE_TMP, client);
+}
+
+/*
+ * Finish closing a client.
+ *
+ * This task is scheduled by a ZAPI client pthread on the main pthread when it
+ * wants to stop itself. When this executes, the client connection should
+ * already have been closed. This task's responsibility is to gracefully
+ * terminate the client thread, update relevant internal datastructures and
+ * free any resources allocated by the main thread.
+ */
+static int zserv_handle_client_close(struct thread *thread)
+{
+       struct zserv *client = THREAD_ARG(thread);
+
+       /*
+        * Ensure these have been nulled. This does not equate to the
+        * associated task(s) being scheduled or unscheduled on the client
+        * pthread's threadmaster.
+        */
+       assert(!client->t_read);
+       assert(!client->t_write);
+
+       /* synchronously stop thread */
+       frr_pthread_stop(client->pthread, NULL);
+
+       /* destroy frr_pthread */
+       frr_pthread_destroy(client->pthread);
+       client->pthread = NULL;
+
+       listnode_delete(zebrad.client_list, client);
+       zserv_client_free(client);
+       return 0;
+}
+
+/*
+ * Create a new client.
+ *
+ * This is called when a new connection is accept()'d on the ZAPI socket. It
+ * initializes new client structure, notifies any subscribers of the connection
+ * event and spawns the client's thread.
+ *
+ * sock
+ *    client's socket file descriptor
+ */
+static void zserv_client_create(int sock)
+{
+       struct zserv *client;
+       int i;
+       afi_t afi;
+
+       client = XCALLOC(MTYPE_TMP, sizeof(struct zserv));
+
+       /* Make client input/output buffer. */
+       client->sock = sock;
+       client->ibuf_fifo = stream_fifo_new();
+       client->obuf_fifo = stream_fifo_new();
+       client->ibuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ);
+       client->obuf_work = stream_new(ZEBRA_MAX_PACKET_SIZ);
+       pthread_mutex_init(&client->ibuf_mtx, NULL);
+       pthread_mutex_init(&client->obuf_mtx, NULL);
+       client->wb = buffer_new(0);
+
+       /* Set table number. */
+       client->rtm_table = zebrad.rtm_table_default;
+
+       atomic_store_explicit(&client->connect_time, (uint32_t) monotime(NULL),
+                             memory_order_relaxed);
+
+       /* Initialize flags */
+       for (afi = AFI_IP; afi < AFI_MAX; afi++)
+               for (i = 0; i < ZEBRA_ROUTE_MAX; i++)
+                       client->redist[afi][i] = vrf_bitmap_init();
+       client->redist_default = vrf_bitmap_init();
+       client->ifinfo = vrf_bitmap_init();
+       client->ridinfo = vrf_bitmap_init();
+
+       /* by default, it's not a synchronous client */
+       client->is_synchronous = 0;
+
+       /* Add this client to linked list. */
+       listnode_add(zebrad.client_list, client);
+
+       struct frr_pthread_attr zclient_pthr_attrs = {
+               .id = frr_pthread_get_id(),
+               .start = frr_pthread_attr_default.start,
+               .stop = frr_pthread_attr_default.stop
+       };
+       client->pthread =
+               frr_pthread_new(&zclient_pthr_attrs, "Zebra API client thread");
+
+       zebra_vrf_update_all(client);
+
+       /* start read loop */
+       zserv_client_event(client, ZSERV_CLIENT_READ);
+
+       /* call callbacks */
+       hook_call(zserv_client_connect, client);
+
+       /* start pthread */
+       frr_pthread_run(client->pthread, NULL);
+}
+
+/*
+ * Accept socket connection.
+ */
+static int zserv_accept(struct thread *thread)
 {
        int accept_sock;
        int client_sock;
@@ -538,7 +717,7 @@ static int zebra_accept(struct thread *thread)
        accept_sock = THREAD_FD(thread);
 
        /* Reregister myself. */
-       thread_add_read(zebrad.master, zebra_accept, NULL, accept_sock, NULL);
+       zserv_event(NULL, ZSERV_ACCEPT);
 
        len = sizeof(struct sockaddr_in);
        client_sock = accept(accept_sock, (struct sockaddr *)&client, &len);
@@ -553,16 +732,14 @@ static int zebra_accept(struct thread *thread)
        set_nonblocking(client_sock);
 
        /* Create new zebra client. */
-       zebra_client_create(client_sock);
+       zserv_client_create(client_sock);
 
        return 0;
 }
 
-/* Make zebra server socket, wiping any existing one (see bug #403). */
-void zebra_zserv_socket_init(char *path)
+void zserv_start(char *path)
 {
        int ret;
-       int sock;
        mode_t old_mask;
        struct sockaddr_storage sa;
        socklen_t sa_len;
@@ -575,8 +752,8 @@ void zebra_zserv_socket_init(char *path)
        old_mask = umask(0077);
 
        /* Make UNIX domain socket. */
-       sock = socket(sa.ss_family, SOCK_STREAM, 0);
-       if (sock < 0) {
+       zebrad.sock = socket(sa.ss_family, SOCK_STREAM, 0);
+       if (zebrad.sock < 0) {
                zlog_warn("Can't create zserv socket: %s",
                          safe_strerror(errno));
                zlog_warn(
@@ -585,8 +762,8 @@ void zebra_zserv_socket_init(char *path)
        }
 
        if (sa.ss_family != AF_UNIX) {
-               sockopt_reuseaddr(sock);
-               sockopt_reuseport(sock);
+               sockopt_reuseaddr(zebrad.sock);
+               sockopt_reuseport(zebrad.sock);
        } else {
                struct sockaddr_un *suna = (struct sockaddr_un *)&sa;
                if (suna->sun_path[0])
@@ -594,40 +771,62 @@ void zebra_zserv_socket_init(char *path)
        }
 
        zserv_privs.change(ZPRIVS_RAISE);
-       setsockopt_so_recvbuf(sock, 1048576);
-       setsockopt_so_sendbuf(sock, 1048576);
+       setsockopt_so_recvbuf(zebrad.sock, 1048576);
+       setsockopt_so_sendbuf(zebrad.sock, 1048576);
        zserv_privs.change(ZPRIVS_LOWER);
 
        if (sa.ss_family != AF_UNIX && zserv_privs.change(ZPRIVS_RAISE))
                zlog_err("Can't raise privileges");
 
-       ret = bind(sock, (struct sockaddr *)&sa, sa_len);
+       ret = bind(zebrad.sock, (struct sockaddr *)&sa, sa_len);
        if (ret < 0) {
                zlog_warn("Can't bind zserv socket on %s: %s", path,
                          safe_strerror(errno));
                zlog_warn(
                        "zebra can't provide full functionality due to above error");
-               close(sock);
+               close(zebrad.sock);
+               zebrad.sock = -1;
                return;
        }
        if (sa.ss_family != AF_UNIX && zserv_privs.change(ZPRIVS_LOWER))
                zlog_err("Can't lower privileges");
 
-       ret = listen(sock, 5);
+       ret = listen(zebrad.sock, 5);
        if (ret < 0) {
                zlog_warn("Can't listen to zserv socket %s: %s", path,
                          safe_strerror(errno));
                zlog_warn(
                        "zebra can't provide full functionality due to above error");
-               close(sock);
+               close(zebrad.sock);
+               zebrad.sock = -1;
                return;
        }
 
        umask(old_mask);
 
-       thread_add_read(zebrad.master, zebra_accept, NULL, sock, NULL);
+       zserv_event(NULL, ZSERV_ACCEPT);
+}
+
+void zserv_event(struct zserv *client, enum zserv_event event)
+{
+       switch (event) {
+       case ZSERV_ACCEPT:
+               thread_add_read(zebrad.master, zserv_accept, NULL, zebrad.sock,
+                               NULL);
+               break;
+       case ZSERV_PROCESS_MESSAGES:
+               thread_add_event(zebrad.master, zserv_process_messages, client,
+                                0, NULL);
+               break;
+       case ZSERV_HANDLE_CLOSE:
+               thread_add_event(zebrad.master, zserv_handle_client_close,
+                                client, 0, NULL);
+       }
 }
 
+
+/* General purpose ---------------------------------------------------------- */
+
 #define ZEBRA_TIME_BUF 32
 static char *zserv_time_buf(time_t *time1, char *buf, int buflen)
 {
@@ -663,6 +862,8 @@ static void zebra_show_client_detail(struct vty *vty, struct zserv *client)
 {
        char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF];
        char wbuf[ZEBRA_TIME_BUF], nhbuf[ZEBRA_TIME_BUF], mbuf[ZEBRA_TIME_BUF];
+       time_t connect_time, last_read_time, last_write_time;
+       uint16_t last_read_cmd, last_write_cmd;
 
        vty_out(vty, "Client: %s", zebra_route_string(client->proto));
        if (client->instance)
@@ -673,8 +874,11 @@ static void zebra_show_client_detail(struct vty *vty, struct zserv *client)
        vty_out(vty, "FD: %d \n", client->sock);
        vty_out(vty, "Route Table ID: %d \n", client->rtm_table);
 
+       connect_time = (time_t) atomic_load_explicit(&client->connect_time,
+                                                    memory_order_relaxed);
+
        vty_out(vty, "Connect Time: %s \n",
-               zserv_time_buf(&client->connect_time, cbuf, ZEBRA_TIME_BUF));
+               zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF));
        if (client->nh_reg_time) {
                vty_out(vty, "Nexthop Registry Time: %s \n",
                        zserv_time_buf(&client->nh_reg_time, nhbuf,
@@ -688,16 +892,26 @@ static void zebra_show_client_detail(struct vty *vty, struct zserv *client)
        } else
                vty_out(vty, "Not registered for Nexthop Updates\n");
 
+       last_read_time = (time_t)atomic_load_explicit(&client->last_read_time,
+                                                     memory_order_relaxed);
+       last_write_time = (time_t)atomic_load_explicit(&client->last_write_time,
+                                                      memory_order_relaxed);
+
+       last_read_cmd = atomic_load_explicit(&client->last_read_cmd,
+                                            memory_order_relaxed);
+       last_write_cmd = atomic_load_explicit(&client->last_write_cmd,
+                                             memory_order_relaxed);
+
        vty_out(vty, "Last Msg Rx Time: %s \n",
-               zserv_time_buf(&client->last_read_time, rbuf, ZEBRA_TIME_BUF));
+               zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF));
        vty_out(vty, "Last Msg Tx Time: %s \n",
-               zserv_time_buf(&client->last_write_time, wbuf, ZEBRA_TIME_BUF));
-       if (client->last_read_time)
+               zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF));
+       if (last_read_cmd)
                vty_out(vty, "Last Rcvd Cmd: %s \n",
-                       zserv_command_string(client->last_read_cmd));
-       if (client->last_write_time)
+                       zserv_command_string(last_read_cmd));
+       if (last_write_cmd)
                vty_out(vty, "Last Sent Cmd: %s \n",
-                       zserv_command_string(client->last_write_cmd));
+                       zserv_command_string(last_write_cmd));
        vty_out(vty, "\n");
 
        vty_out(vty, "Type        Add        Update     Del \n");
@@ -731,19 +945,27 @@ static void zebra_show_client_brief(struct vty *vty, struct zserv *client)
 {
        char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF];
        char wbuf[ZEBRA_TIME_BUF];
+       time_t connect_time, last_read_time, last_write_time;
+
+       connect_time = (time_t)atomic_load_explicit(&client->connect_time,
+                                                   memory_order_relaxed);
+       last_read_time = (time_t)atomic_load_explicit(&client->last_read_time,
+                                                     memory_order_relaxed);
+       last_write_time = (time_t)atomic_load_explicit(&client->last_write_time,
+                                                      memory_order_relaxed);
 
        vty_out(vty, "%-8s%12s %12s%12s%8d/%-8d%8d/%-8d\n",
                zebra_route_string(client->proto),
-               zserv_time_buf(&client->connect_time, cbuf, ZEBRA_TIME_BUF),
-               zserv_time_buf(&client->last_read_time, rbuf, ZEBRA_TIME_BUF),
-               zserv_time_buf(&client->last_write_time, wbuf, ZEBRA_TIME_BUF),
+               zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF),
+               zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF),
+               zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF),
                client->v4_route_add_cnt + client->v4_route_upd8_cnt,
                client->v4_route_del_cnt,
                client->v6_route_add_cnt + client->v6_route_upd8_cnt,
                client->v6_route_del_cnt);
 }
 
-struct zserv *zebra_find_client(uint8_t proto, unsigned short instance)
+struct zserv *zserv_find_client(uint8_t proto, unsigned short instance)
 {
        struct listnode *node, *nnode;
        struct zserv *client;
@@ -805,13 +1027,17 @@ void zserv_read_file(char *input)
        struct thread t;
 
        zebra_client_create(-1);
-       client = zebrad.client_list->head->data;
+
+       frr_pthread_stop(client->pthread, NULL);
+       frr_pthread_destroy(client->pthread);
+       client->pthread = NULL;
+
        t.arg = client;
 
        fd = open(input, O_RDONLY | O_NONBLOCK);
        t.u.fd = fd;
 
-       zebra_client_read(&t);
+       zserv_read(&t);
 
        close(fd);
 }
@@ -821,7 +1047,10 @@ void zserv_init(void)
 {
        /* Client list init. */
        zebrad.client_list = list_new();
-       zebrad.client_list->del = (void (*)(void *))zebra_client_free;
+       zebrad.client_list->del = (void (*)(void *)) zserv_client_free;
+
+       /* Misc init. */
+       zebrad.sock = -1;
 
        install_element(ENABLE_NODE, &show_zebra_client_cmd);
        install_element(ENABLE_NODE, &show_zebra_client_summary_cmd);
index a5b5acbb3346ea6473c6f6eb3e2f7aba9860153d..78cc200fa8c4576aff24a4669ebba1e1143e44ac 100644 (file)
 
 /* Client structure. */
 struct zserv {
+       /* Client pthread */
+       struct frr_pthread *pthread;
+
        /* Client file descriptor. */
        int sock;
 
        /* Input/output buffer to the client. */
+       pthread_mutex_t ibuf_mtx;
        struct stream_fifo *ibuf_fifo;
+       pthread_mutex_t obuf_mtx;
        struct stream_fifo *obuf_fifo;
 
        /* Private I/O buffers */
@@ -68,9 +73,6 @@ struct zserv {
        struct thread *t_read;
        struct thread *t_write;
 
-       /* Thread for delayed close. */
-       struct thread *t_suicide;
-
        /* default routing table this client munges */
        int rtm_table;
 
@@ -129,15 +131,28 @@ struct zserv {
        uint32_t prefixadd_cnt;
        uint32_t prefixdel_cnt;
 
-       time_t connect_time;
-       time_t last_read_time;
-       time_t last_write_time;
        time_t nh_reg_time;
        time_t nh_dereg_time;
        time_t nh_last_upd_time;
 
-       int last_read_cmd;
-       int last_write_cmd;
+       /*
+        * Session information.
+        *
+        * These are not synchronous with respect to each other. For instance,
+        * last_read_cmd may contain a value that has been read in the future
+        * relative to last_read_time.
+        */
+
+       /* monotime of client creation */
+       _Atomic uint32_t connect_time;
+       /* monotime of last message received */
+       _Atomic uint32_t last_read_time;
+       /* monotime of last message sent */
+       _Atomic uint32_t last_write_time;
+       /* command code of last message read */
+       _Atomic uint16_t last_read_cmd;
+       /* command code of last message written */
+       _Atomic uint16_t last_write_cmd;
 };
 
 #define ZAPI_HANDLER_ARGS                                                      \
@@ -145,8 +160,8 @@ struct zserv {
                struct zebra_vrf *zvrf
 
 /* Hooks for client connect / disconnect */
-DECLARE_HOOK(zapi_client_connect, (struct zserv *client), (client));
-DECLARE_KOOH(zapi_client_close, (struct zserv *client), (client));
+DECLARE_HOOK(zserv_client_connect, (struct zserv *client), (client));
+DECLARE_KOOH(zserv_client_close, (struct zserv *client), (client));
 
 /* Zebra instance */
 struct zebra_t {
@@ -154,6 +169,9 @@ struct zebra_t {
        struct thread_master *master;
        struct list *client_list;
 
+       /* Socket */
+       int sock;
+
        /* default table */
        uint32_t rtm_table_default;
 
@@ -165,18 +183,54 @@ struct zebra_t {
        /* LSP work queue */
        struct work_queue *lsp_process_q;
 
-#define ZEBRA_ZAPI_PACKETS_TO_PROCESS 10
-       uint32_t packets_to_process;
+#define ZEBRA_ZAPI_PACKETS_TO_PROCESS 1000
+       _Atomic uint32_t packets_to_process;
 };
 extern struct zebra_t zebrad;
 extern unsigned int multipath_num;
 
-/* Prototypes. */
+/*
+ * Initialize Zebra API server.
+ *
+ * Installs CLI commands and creates the client list.
+ */
 extern void zserv_init(void);
-extern void zebra_zserv_socket_init(char *path);
-extern int zebra_server_send_message(struct zserv *client, struct stream *msg);
 
-extern struct zserv *zebra_find_client(uint8_t proto, unsigned short instance);
+/*
+ * Start Zebra API server.
+ *
+ * Allocates resources, creates the server socket and begins listening on the
+ * socket.
+ *
+ * path
+ *    where to place the Unix domain socket
+ */
+extern void zserv_start(char *path);
+
+/*
+ * Send a message to a connected Zebra API client.
+ *
+ * client
+ *    the client to send to
+ *
+ * msg
+ *    the message to send
+ */
+extern int zserv_send_message(struct zserv *client, struct stream *msg);
+
+/*
+ * Retrieve a client by its protocol and instance number.
+ *
+ * proto
+ *    protocol number
+ *
+ * instance
+ *    instance number
+ *
+ * Returns:
+ *    The Zebra API client.
+ */
+extern struct zserv *zserv_find_client(uint8_t proto, unsigned short instance);
 
 #if defined(HANDLE_ZAPI_FUZZING)
 extern void zserv_read_file(char *input);