--- /dev/null
+name: Warn before merging if a "freeze" label exists
+
+on:
+ pull_request_target:
+ types: [synchronize, opened, reopened, labeled, unlabeled]
+
+jobs:
+ freeze_warning:
+ if: ${{ contains(github.event.*.labels.*.name, 'freeze') }}
+ name: Warn before merging if a "freeze" label exists
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check for "freeze" label
+ run: |
+ echo "Pull request is labeled as 'freeze'"
+ echo "This workflow fails so that the pull request cannot be merged."
+ exit 1
rc = setsockopt(protocol_socket, IPPROTO_IPV6, IPV6_JOIN_GROUP,
(char*)&mreq, sizeof(mreq));
- if(rc < 0) {
- flog_err_sys(EC_LIB_SOCKET,
- "setsockopt(IPV6_JOIN_GROUP) on interface '%s': %s",
- ifp->name, safe_strerror(errno));
- /* This is probably due to a missing link-local address,
- so down this interface, and wait until the main loop
- tries to up it again. */
- interface_reset(ifp);
- return -1;
+ if (rc < 0 && errno != EADDRINUSE) {
+ flog_err_sys(EC_LIB_SOCKET,
+ "setsockopt(IPV6_JOIN_GROUP) on interface '%s': %s",
+ ifp->name, safe_strerror(errno));
+ /* This is probably due to a missing link-local address,
+ so down this interface, and wait until the main loop
+ tries to up it again. */
+ interface_reset(ifp);
+ return -1;
}
set_timeout(&babel_ifp->hello_timeout, babel_ifp->hello_interval);
}
/* Threads. */
- thread_add_read(master, &babel_read_protocol, NULL, protocol_socket, &babel_routing_process->t_read);
+ thread_add_read(master, babel_read_protocol, NULL, protocol_socket, &babel_routing_process->t_read);
/* wait a little: zebra will announce interfaces, addresses, routes... */
thread_add_timer_msec(master, babel_init_routing_process, NULL, 200L, &babel_routing_process->t_update);
}
}
+PRINTFRR(6, 7)
static void cp_debug(bool mhop, struct sockaddr_any *peer,
struct sockaddr_any *local, ifindex_t ifindex,
vrf_id_t vrfid, const char *fmt, ...)
/* Implement RFC 5880 6.8.6 */
if (mlen < BFD_PKT_LEN) {
cp_debug(is_mhop, &peer, &local, ifindex, vrfid,
- "too small (%ld bytes)", mlen);
+ "too small (%zd bytes)", mlen);
return;
}
/*
* Functions
*/
+PRINTFRR(2, 3)
static void debug_printbpc(const struct bfd_peer_cfg *bpc, const char *fmt, ...)
{
char timers[3][128] = {};
}
/* Printing functions */
-/* Feed the AS_PATH to the vty; the suffix string follows it only in case
+/* Feed the AS_PATH to the vty; the space suffix follows it only in case
* AS_PATH wasn't empty.
*/
-void aspath_print_vty(struct vty *vty, const char *format, struct aspath *as,
- const char *suffix)
+void aspath_print_vty(struct vty *vty, struct aspath *as)
{
- assert(format);
- vty_out(vty, format, as->str);
- if (as->str_len && strlen(suffix))
- vty_out(vty, "%s", suffix);
+ vty_out(vty, "%s%s", as->str, as->str_len ? " " : "");
}
static void aspath_show_all_iterator(struct hash_bucket *bucket,
extern struct aspath *aspath_intern(struct aspath *aspath);
extern void aspath_unintern(struct aspath **aspath);
extern const char *aspath_print(struct aspath *aspath);
-extern void aspath_print_vty(struct vty *vty, const char *format,
- struct aspath *aspath, const char *suffix);
+extern void aspath_print_vty(struct vty *vty, struct aspath *aspath);
extern void aspath_print_all_vty(struct vty *vty);
extern unsigned int aspath_key_make(const void *p);
extern unsigned int aspath_get_first_as(struct aspath *aspath);
if (bgp_dump->filename[0] != DIRECTORY_SEP) {
snprintf(fullpath, sizeof(fullpath), "%s/%s", vty_get_cwd(),
bgp_dump->filename);
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+ /* user supplied date/time format string */
ret = strftime(realpath, MAXPATHLEN, fullpath, &tm);
} else
ret = strftime(realpath, MAXPATHLEN, bgp_dump->filename, &tm);
+#pragma GCC diagnostic pop
if (ret == 0) {
flog_warn(EC_BGP_DUMP, "%s: strftime error", __func__);
* Note: Route re-advertisement happens elsewhere after other processing
* other changes.
*/
-static int handle_tunnel_ip_change(struct bgp *bgp, struct bgpevpn *vpn,
- struct in_addr originator_ip)
+static void handle_tunnel_ip_change(struct bgp *bgp, struct bgpevpn *vpn,
+ struct in_addr originator_ip)
{
struct prefix_evpn p;
+ if (IPV4_ADDR_SAME(&vpn->originator_ip, &originator_ip))
+ return;
+
/* If VNI is not live, we only need to update the originator ip */
if (!is_vni_live(vpn)) {
vpn->originator_ip = originator_ip;
- return 0;
+ return;
}
/* Update the tunnel-ip hash */
bgp_tip_del(bgp, &vpn->originator_ip);
- bgp_tip_add(bgp, &originator_ip);
-
- /* filter routes as martian nexthop db has changed */
- bgp_filter_evpn_routes_upon_martian_nh_change(bgp);
+ if (bgp_tip_add(bgp, &originator_ip))
+ /* The originator_ip was not already present in the
+ * bgp martian next-hop table as a tunnel-ip, so we
+ * need to go back and filter routes matching the new
+ * martian next-hop.
+ */
+ bgp_filter_evpn_routes_upon_martian_nh_change(bgp);
/* Need to withdraw type-3 route as the originator IP is part
* of the key.
/* Update the tunnel IP and re-advertise all routes for this VNI. */
vpn->originator_ip = originator_ip;
- return 0;
+ return;
}
static struct bgp_path_info *
mpls_label_t label[BGP_MAX_LABELS] = {};
uint32_t num_labels = 0;
uint32_t eth_tag;
- int ret;
+ int ret = 0;
/* Type-2 route should be either 33, 37 or 49 bytes or an
* additional 3 bytes if there is a second label (VNI):
/* Process the route. */
if (attr)
- ret = bgp_update(peer, (struct prefix *)&p, addpath_id, attr,
- afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL,
- &prd, &label[0], num_labels, 0, &evpn);
+ bgp_update(peer, (struct prefix *)&p, addpath_id, attr, afi,
+ safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd,
+ &label[0], num_labels, 0, &evpn);
else
- ret = bgp_withdraw(peer, (struct prefix *)&p, addpath_id, attr,
- afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL,
- &prd, &label[0], num_labels, &evpn);
+ bgp_withdraw(peer, (struct prefix *)&p, addpath_id, attr, afi,
+ safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd,
+ &label[0], num_labels, &evpn);
goto done;
fail:
struct prefix_evpn p;
uint8_t ipaddr_len;
uint32_t eth_tag;
- int ret;
/* Type-3 route should be either 17 or 29 bytes: RD (8), Eth Tag (4),
* IP len (1) and IP (4 or 16).
/* Process the route. */
if (attr)
- ret = bgp_update(peer, (struct prefix *)&p, addpath_id, attr,
- afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL,
- &prd, NULL, 0, 0, NULL);
+ bgp_update(peer, (struct prefix *)&p, addpath_id, attr, afi,
+ safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd, NULL,
+ 0, 0, NULL);
else
- ret = bgp_withdraw(peer, (struct prefix *)&p, addpath_id, attr,
- afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL,
- &prd, NULL, 0, NULL);
- return ret;
+ bgp_withdraw(peer, (struct prefix *)&p, addpath_id, attr, afi,
+ safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd,
+ NULL, 0, NULL);
+ return 0;
}
/*
uint8_t ippfx_len;
uint32_t eth_tag;
mpls_label_t label; /* holds the VNI as in the packet */
- int ret;
bool is_valid_update = true;
/* Type-5 route should be 34 or 58 bytes:
/* Process the route. */
if (attr && is_valid_update)
- ret = bgp_update(peer, (struct prefix *)&p, addpath_id, attr,
- afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL,
- &prd, &label, 1, 0, &evpn);
+ bgp_update(peer, (struct prefix *)&p, addpath_id, attr, afi,
+ safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd,
+ &label, 1, 0, &evpn);
else {
if (!is_valid_update) {
char attr_str[BUFSIZ] = {0};
peer->hostname, peer->bgp->vrf_id, &p,
attr_str);
}
- ret = bgp_withdraw(peer, (struct prefix *)&p, addpath_id, attr,
- afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL,
- &prd, &label, 1, &evpn);
+ bgp_withdraw(peer, (struct prefix *)&p, addpath_id, attr, afi,
+ safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd,
+ &label, 1, &evpn);
}
- return ret;
+ return 0;
}
static void evpn_mpattr_encode_type5(struct stream *s, const struct prefix *p,
/* If tunnel endpoint IP has changed, update (and delete prior
* type-3 route, if needed.)
*/
- if (!IPV4_ADDR_SAME(&vpn->originator_ip, &originator_ip))
- handle_tunnel_ip_change(bgp, vpn, originator_ip);
+ handle_tunnel_ip_change(bgp, vpn, originator_ip);
/* Update all routes with new endpoint IP and/or export RT
* for VRFs
SET_FLAG(vpn->flags, VNI_FLAG_LIVE);
/* tunnel is now active, add tunnel-ip to db */
- bgp_tip_add(bgp, &originator_ip);
-
- /* filter routes as nexthop database has changed */
- bgp_filter_evpn_routes_upon_martian_nh_change(bgp);
+ if (bgp_tip_add(bgp, &originator_ip))
+ /* The originator_ip was not already present in the
+ * bgp martian next-hop table as a tunnel-ip, so we
+ * need to go back and filter routes matching the new
+ * martian next-hop.
+ */
+ bgp_filter_evpn_routes_upon_martian_nh_change(bgp);
/*
* Create EVPN type-3 route and schedule for processing.
struct attr *attr, uint8_t *pfx, int psize,
uint32_t addpath_id)
{
- int ret;
esi_t esi;
uint8_t ipaddr_len;
struct in_addr vtep_ip;
build_evpn_type4_prefix(&p, &esi, vtep_ip);
/* Process the route. */
if (attr) {
- ret = bgp_update(peer, (struct prefix *)&p, addpath_id, attr,
- afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL,
- &prd, NULL, 0, 0, NULL);
+ bgp_update(peer, (struct prefix *)&p, addpath_id, attr, afi,
+ safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd, NULL,
+ 0, 0, NULL);
} else {
- ret = bgp_withdraw(peer, (struct prefix *)&p, addpath_id, attr,
- afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL,
- &prd, NULL, 0, NULL);
+ bgp_withdraw(peer, (struct prefix *)&p, addpath_id, attr, afi,
+ safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd,
+ NULL, 0, NULL);
}
- return ret;
+ return 0;
}
/* Check if a prefix belongs to the local ES */
struct attr *attr, uint8_t *pfx, int psize,
uint32_t addpath_id)
{
- int ret;
struct prefix_rd prd;
esi_t esi;
uint32_t eth_tag;
build_evpn_type1_prefix(&p, eth_tag, &esi, vtep_ip);
/* Process the route. */
if (attr) {
- ret = bgp_update(peer, (struct prefix *)&p, addpath_id, attr,
- afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL,
- &prd, NULL, 0, 0, NULL);
+ bgp_update(peer, (struct prefix *)&p, addpath_id, attr, afi,
+ safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd, NULL,
+ 0, 0, NULL);
} else {
- ret = bgp_withdraw(peer, (struct prefix *)&p, addpath_id, attr,
- afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL,
- &prd, NULL, 0, NULL);
+ bgp_withdraw(peer, (struct prefix *)&p, addpath_id, attr, afi,
+ safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, &prd,
+ NULL, 0, NULL);
}
- return ret;
+ return 0;
}
void bgp_evpn_mh_config_ead_export_rt(struct bgp *bgp,
evpn_show_all_routes(vty, bgp, type, json, detail);
- if (uj)
- vty_json(vty, json);
+ if (uj) {
+ if (detail) {
+ vty_out(vty, "%s\n", json_object_to_json_string(json));
+ json_object_free(json);
+ } else {
+ vty_json(vty, json);
+ }
+ }
+
return CMD_SUCCESS;
}
safi_t safi;
int psize = 0;
struct prefix p;
- int ret;
void *temp;
/* Start processing the NLRI - there may be multiple in the MP_REACH */
}
/* Process the route. */
if (!withdraw)
- ret = bgp_update(peer, &p, 0, attr,
- afi, safi,
- ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL,
- NULL, NULL, 0, 0, NULL);
+ bgp_update(peer, &p, 0, attr, afi, safi,
+ ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, NULL,
+ NULL, 0, 0, NULL);
else
- ret = bgp_withdraw(peer, &p, 0, attr,
- afi, safi,
- ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL,
- NULL, NULL, 0, NULL);
- if (ret) {
- flog_err(EC_BGP_FLOWSPEC_INSTALLATION,
- "Flowspec NLRI failed to be %s.",
- attr ? "added" : "withdrawn");
- return BGP_NLRI_PARSE_ERROR;
- }
+ bgp_withdraw(peer, &p, 0, attr, afi, safi,
+ ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, NULL,
+ NULL, 0, NULL);
}
return BGP_NLRI_PARSE_OK;
}
}
}
+ if (peer->hostname) {
+ XFREE(MTYPE_BGP_PEER_HOST, peer->hostname);
+ peer->hostname = NULL;
+ }
if (from_peer->hostname != NULL) {
- if (peer->hostname) {
- XFREE(MTYPE_BGP_PEER_HOST, peer->hostname);
- peer->hostname = NULL;
- }
-
peer->hostname = from_peer->hostname;
from_peer->hostname = NULL;
}
+ if (peer->domainname) {
+ XFREE(MTYPE_BGP_PEER_HOST, peer->domainname);
+ peer->domainname = NULL;
+ }
if (from_peer->domainname != NULL) {
- if (peer->domainname) {
- XFREE(MTYPE_BGP_PEER_HOST, peer->domainname);
- peer->domainname = NULL;
- }
-
peer->domainname = from_peer->domainname;
from_peer->domainname = NULL;
}
memcpy(&evpn, bgp_attr_get_evpn_overlay(pi->attr),
sizeof(evpn));
- int32_t ret = bgp_update(peer, p,
- pi->addpath_rx_id,
- pi->attr, AFI_L2VPN, SAFI_EVPN,
- ZEBRA_ROUTE_BGP,
- BGP_ROUTE_NORMAL, &prd,
- label_pnt, num_labels,
- 1, evpn);
-
- if (ret < 0)
- bgp_dest_unlock_node(dest);
+ bgp_update(peer, p, pi->addpath_rx_id, pi->attr,
+ AFI_L2VPN, SAFI_EVPN, ZEBRA_ROUTE_BGP,
+ BGP_ROUTE_NORMAL, &prd, label_pnt,
+ num_labels, 1, evpn);
}
}
}
bgp->tip_hash = NULL;
}
-void bgp_tip_add(struct bgp *bgp, struct in_addr *tip)
+/* Add/Update Tunnel-IP entry of bgp martian next-hop table.
+ *
+ * Returns true only if we add a _new_ TIP so the caller knows that an
+ * actionable change has occurred. If we find an existing TIP then we
+ * only need to update the refcnt, since the collection of known TIPs
+ * has not changed.
+ */
+bool bgp_tip_add(struct bgp *bgp, struct in_addr *tip)
{
struct tip_addr tmp;
struct tip_addr *addr;
+ bool tip_added = false;
tmp.addr = *tip;
- addr = hash_get(bgp->tip_hash, &tmp, bgp_tip_hash_alloc);
+ addr = hash_lookup(bgp->tip_hash, &tmp);
+ if (!addr) {
+ addr = hash_get(bgp->tip_hash, &tmp, bgp_tip_hash_alloc);
+ tip_added = true;
+ }
+
addr->refcnt++;
+
+ return tip_added;
}
void bgp_tip_del(struct bgp *bgp, struct in_addr *tip)
extern void bgp_scan_vty_init(void);
extern void bgp_address_init(struct bgp *bgp);
extern void bgp_address_destroy(struct bgp *bgp);
-extern void bgp_tip_add(struct bgp *bgp, struct in_addr *tip);
+extern bool bgp_tip_add(struct bgp *bgp, struct in_addr *tip);
extern void bgp_tip_del(struct bgp *bgp, struct in_addr *tip);
extern void bgp_tip_hash_init(struct bgp *bgp);
extern void bgp_tip_hash_destroy(struct bgp *bgp);
return false;
}
-int bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
- struct attr *attr, afi_t afi, safi_t safi, int type,
- int sub_type, struct prefix_rd *prd, mpls_label_t *label,
- uint32_t num_labels, int soft_reconfig,
- struct bgp_route_evpn *evpn)
+void bgp_update(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
+ struct attr *attr, afi_t afi, safi_t safi, int type,
+ int sub_type, struct prefix_rd *prd, mpls_label_t *label,
+ uint32_t num_labels, int soft_reconfig,
+ struct bgp_route_evpn *evpn)
{
int ret;
int aspath_loop_count = 0;
bgp_dest_unlock_node(dest);
bgp_attr_unintern(&attr_new);
- return 0;
+ return;
}
/* Withdraw/Announce before we fully processed the withdraw */
ret = bgp_damp_update(pi, dest, afi, safi);
if (ret == BGP_DAMP_SUPPRESSED) {
bgp_dest_unlock_node(dest);
- return 0;
+ return;
}
}
bgp_unlink_nexthop(pi);
bgp_path_info_delete(dest, pi);
}
- return 0;
+ return;
} // End of implicit withdraw
/* Received Logging. */
bgp_path_info_delete(dest, new);
}
- return 0;
+ return;
/* This BGP update is filtered. Log the reason then update BGP
entry. */
}
#endif
- return 0;
+ return;
}
-int bgp_withdraw(struct peer *peer, const struct prefix *p, uint32_t addpath_id,
- struct attr *attr, afi_t afi, safi_t safi, int type,
- int sub_type, struct prefix_rd *prd, mpls_label_t *label,
- uint32_t num_labels, struct bgp_route_evpn *evpn)
+void bgp_withdraw(struct peer *peer, const struct prefix *p,
+ uint32_t addpath_id, struct attr *attr, afi_t afi,
+ safi_t safi, int type, int sub_type, struct prefix_rd *prd,
+ mpls_label_t *label, uint32_t num_labels,
+ struct bgp_route_evpn *evpn)
{
struct bgp *bgp;
char pfx_buf[BGP_PRD_PATH_STRLEN];
peer->host, pfx_buf);
}
bgp_dest_unlock_node(dest);
- return 0;
+ return;
}
/* Lookup withdrawn route. */
/* Unlock bgp_node_get() lock. */
bgp_dest_unlock_node(dest);
- return 0;
+ return;
}
void bgp_default_originate(struct peer *peer, afi_t afi, safi_t safi,
}
}
-static int bgp_soft_reconfig_table_update(struct peer *peer,
- struct bgp_dest *dest,
- struct bgp_adj_in *ain, afi_t afi,
- safi_t safi, struct prefix_rd *prd)
+static void bgp_soft_reconfig_table_update(struct peer *peer,
+ struct bgp_dest *dest,
+ struct bgp_adj_in *ain, afi_t afi,
+ safi_t safi, struct prefix_rd *prd)
{
struct bgp_path_info *pi;
uint32_t num_labels = 0;
else
memset(&evpn, 0, sizeof(evpn));
- return bgp_update(peer, bgp_dest_get_prefix(dest), ain->addpath_rx_id,
- ain->attr, afi, safi, ZEBRA_ROUTE_BGP,
- BGP_ROUTE_NORMAL, prd, label_pnt, num_labels, 1,
- &evpn);
+ bgp_update(peer, bgp_dest_get_prefix(dest), ain->addpath_rx_id,
+ ain->attr, afi, safi, ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, prd,
+ label_pnt, num_labels, 1, &evpn);
}
static void bgp_soft_reconfig_table(struct peer *peer, afi_t afi, safi_t safi,
struct bgp_table *table,
struct prefix_rd *prd)
{
- int ret;
struct bgp_dest *dest;
struct bgp_adj_in *ain;
if (ain->peer != peer)
continue;
- ret = bgp_soft_reconfig_table_update(peer, dest, ain,
- afi, safi, prd);
-
- if (ret < 0) {
- bgp_dest_unlock_node(dest);
- return;
- }
+ bgp_soft_reconfig_table_update(peer, dest, ain, afi,
+ safi, prd);
}
}
static void bgp_soft_reconfig_table_task(struct thread *thread)
{
uint32_t iter, max_iter;
- int ret;
struct bgp_dest *dest;
struct bgp_adj_in *ain;
struct peer *peer;
if (ain->peer != peer)
continue;
- ret = bgp_soft_reconfig_table_update(
+ bgp_soft_reconfig_table_update(
peer, dest, ain, table->afi,
table->safi, prd);
iter++;
-
- if (ret < 0) {
- bgp_dest_unlock_node(dest);
- listnode_delete(
- table->soft_reconfig_peers,
- peer);
- bgp_announce_route(peer, table->afi,
- table->safi, false);
- if (list_isempty(
- table->soft_reconfig_peers)) {
- list_delete(
- &table->soft_reconfig_peers);
- bgp_soft_reconfig_table_flag(
- table, false);
- return;
- }
- }
}
}
}
uint8_t *lim;
struct prefix p;
int psize;
- int ret;
afi_t afi;
safi_t safi;
bool addpath_capable;
/* Normal process. */
if (attr)
- ret = bgp_update(peer, &p, addpath_id, attr, afi, safi,
- ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL,
- NULL, NULL, 0, 0, NULL);
+ bgp_update(peer, &p, addpath_id, attr, afi, safi,
+ ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, NULL,
+ NULL, 0, 0, NULL);
else
- ret = bgp_withdraw(peer, &p, addpath_id, attr, afi,
- safi, ZEBRA_ROUTE_BGP,
- BGP_ROUTE_NORMAL, NULL, NULL, 0,
- NULL);
+ bgp_withdraw(peer, &p, addpath_id, attr, afi, safi,
+ ZEBRA_ROUTE_BGP, BGP_ROUTE_NORMAL, NULL,
+ NULL, 0, NULL);
/* Do not send BGP notification twice when maximum-prefix count
* overflow. */
if (CHECK_FLAG(peer->sflags, PEER_STATUS_PREFIX_OVERFLOW))
return BGP_NLRI_PARSE_ERROR_PREFIX_OVERFLOW;
-
- /* Address family configuration mismatch. */
- if (ret < 0)
- return BGP_NLRI_PARSE_ERROR_ADDRESS_FAMILY;
}
/* Packet length consistency check. */
json_object_string_add(json_path, "path",
attr->aspath->str);
else
- aspath_print_vty(vty, "%s", attr->aspath, " ");
+ aspath_print_vty(vty, attr->aspath);
}
/* Print origin */
/* Print aspath */
if (attr->aspath)
- aspath_print_vty(vty, "%s", attr->aspath, " ");
+ aspath_print_vty(vty, attr->aspath);
/* Print origin */
vty_out(vty, "%s", bgp_origin_str[attr->origin]);
use_json, NULL));
if (attr->aspath)
- aspath_print_vty(vty, "%s", attr->aspath, " ");
+ aspath_print_vty(vty, attr->aspath);
vty_out(vty, "%s", bgp_origin_str[attr->origin]);
vty_out(vty, "%*s ", 8, " ");
if (attr->aspath)
- aspath_print_vty(vty, "%s", attr->aspath, " ");
+ aspath_print_vty(vty, attr->aspath);
vty_out(vty, "%s", bgp_origin_str[attr->origin]);
attr->aspath->json);
} else {
if (attr->aspath->segments)
- aspath_print_vty(vty, " %s", attr->aspath, "");
+ vty_out(vty, " %s", attr->aspath->str);
else
vty_out(vty, " Local");
}
const char *, const char *, const char *);
/* this is primarily for MPLS-VPN */
-extern int bgp_update(struct peer *peer, const struct prefix *p,
- uint32_t addpath_id, struct attr *attr,
- afi_t afi, safi_t safi, int type, int sub_type,
- struct prefix_rd *prd, mpls_label_t *label,
- uint32_t num_labels, int soft_reconfig,
- struct bgp_route_evpn *evpn);
-extern int bgp_withdraw(struct peer *peer, const struct prefix *p,
- uint32_t addpath_id, struct attr *attr, afi_t afi,
- safi_t safi, int type, int sub_type,
- struct prefix_rd *prd, mpls_label_t *label,
- uint32_t num_labels, struct bgp_route_evpn *evpn);
+extern void bgp_update(struct peer *peer, const struct prefix *p,
+ uint32_t addpath_id, struct attr *attr, afi_t afi,
+ safi_t safi, int type, int sub_type,
+ struct prefix_rd *prd, mpls_label_t *label,
+ uint32_t num_labels, int soft_reconfig,
+ struct bgp_route_evpn *evpn);
+extern void bgp_withdraw(struct peer *peer, const struct prefix *p,
+ uint32_t addpath_id, struct attr *attr, afi_t afi,
+ safi_t safi, int type, int sub_type,
+ struct prefix_rd *prd, mpls_label_t *label,
+ uint32_t num_labels, struct bgp_route_evpn *evpn);
/* for bgp_nexthop and bgp_damp */
extern void bgp_process(struct bgp *, struct bgp_dest *, afi_t, safi_t);
if (json)
json_object_string_add(json, "remoteGrMode", mode);
else
- vty_out(vty, mode, "\n");
+ vty_out(vty, "%s\n", mode);
}
static void bgp_show_neighbor_graceful_restart_local_mode(struct vty *vty,
if (json)
json_object_string_add(json, "localGrMode", mode);
else
- vty_out(vty, mode, "\n");
+ vty_out(vty, "%s\n", mode);
}
static void bgp_show_neighbor_graceful_restart_capability_per_afi_safi(
XFREE(MTYPE_PEER_DESC, peer->desc);
XFREE(MTYPE_BGP_PEER_HOST, peer->host);
+ XFREE(MTYPE_BGP_PEER_HOST, peer->hostname);
XFREE(MTYPE_BGP_PEER_HOST, peer->domainname);
XFREE(MTYPE_BGP_PEER_IFNAME, peer->ifname);
return 0;
}
+PRINTFRR(2, 3)
static int rfapiStdioPrintf(void *stream, const char *format, ...)
{
FILE *file = NULL;
AC_C_FLAG([-funwind-tables])
AC_C_FLAG([-Wall])
AC_C_FLAG([-Wextra])
+AC_C_FLAG([-Wformat-nonliteral])
+AC_C_FLAG([-Wformat-security])
AC_C_FLAG([-Wstrict-prototypes])
AC_C_FLAG([-Wmissing-prototypes])
AC_C_FLAG([-Wmissing-declarations])
vrf_name);
nb_cli_enqueue_change(vty, ".", NB_OP_CREATE, NULL);
- ret = nb_cli_apply_changes(vty, base_xpath);
+ ret = nb_cli_apply_changes(vty, "%s", base_xpath);
if (ret == CMD_SUCCESS)
VTY_PUSH_XPATH(ISIS_NODE, base_xpath);
overload ? "true" : "false");
}
- return nb_cli_apply_changes(vty, base_xpath);
+ return nb_cli_apply_changes(vty, "%s", base_xpath);
}
void cli_show_isis_mt_ipv4_multicast(struct vty *vty,
vlog(LOG_ERR, emsg, ap);
logit(LOG_ERR, "%s", strerror(errno));
} else {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+ /* format extended above */
vlog(LOG_ERR, nfmt, ap);
+#pragma GCC diagnostic pop
free(nfmt);
}
va_end(ap);
struct thread_master *tm;
/** Pointer to zebra client data structure. */
struct zclient *zc;
- /** Zebra next hop tracking (NHT) client. */
- struct zclient *nht_zclient;
/** Debugging state. */
bool debugging;
/*
* Prototypes
*/
-static void bfd_nht_zclient_connect(struct thread *thread);
-
-static void bfd_nht_zclient_connected(struct zclient *zclient);
-static int bfd_nht_update(ZAPI_CALLBACK_ARGS);
static void bfd_source_cache_get(struct bfd_session_params *session);
static void bfd_source_cache_put(struct bfd_session_params *session);
-static inline void
-bfd_source_cache_register(const struct bfd_source_cache *source)
-{
- zclient_send_rnh(bsglobal.nht_zclient, ZEBRA_NEXTHOP_REGISTER,
- &source->address, SAFI_UNICAST, false, false,
- source->vrf_id);
-}
-
-static inline void
-bfd_source_cache_unregister(const struct bfd_source_cache *source)
-{
- zclient_send_rnh(bsglobal.nht_zclient, ZEBRA_NEXTHOP_UNREGISTER,
- &source->address, SAFI_UNICAST, false, false,
- source->vrf_id);
-}
-
-
/*
* bfd_get_peer_info - Extract the Peer information for which the BFD session
* went down from the message sent from Zebra to clients.
if (!SLIST_EMPTY(&bsglobal.source_list))
zlog_warn("BFD integration source cache not empty");
- zclient_stop(bsglobal.nht_zclient);
- zclient_free(bsglobal.nht_zclient);
-
return 0;
}
-static zclient_handler *const bfd_nht_handlers[] = {
- [ZEBRA_NEXTHOP_UPDATE] = bfd_nht_update,
-};
-
void bfd_protocol_integration_init(struct zclient *zc, struct thread_master *tm)
{
- struct zclient_options bfd_nht_options = zclient_options_default;
-
/* Initialize data structure. */
TAILQ_INIT(&bsglobal.bsplist);
SLIST_INIT(&bsglobal.source_list);
/* Send the client registration */
bfd_client_sendmsg(zc, ZEBRA_BFD_CLIENT_REGISTER, VRF_DEFAULT);
- /* Start NHT client (for automatic source decisions). */
- bsglobal.nht_zclient =
- zclient_new(tm, &bfd_nht_options, bfd_nht_handlers,
- array_size(bfd_nht_handlers));
- bsglobal.nht_zclient->sock = -1;
- bsglobal.nht_zclient->privs = zc->privs;
- bsglobal.nht_zclient->zebra_connected = bfd_nht_zclient_connected;
- thread_add_timer(tm, bfd_nht_zclient_connect, bsglobal.nht_zclient, 1,
- &bsglobal.nht_zclient->t_connect);
-
hook_register(frr_fini, bfd_protocol_integration_finish);
}
session->source_cache = source;
source->refcount = 1;
- bfd_source_cache_register(source);
-
return;
}
return;
}
- bfd_source_cache_unregister(session->source_cache);
SLIST_REMOVE(&bsglobal.source_list, session->source_cache,
bfd_source_cache, entry);
XFREE(MTYPE_BFD_SOURCE, session->source_cache);
return false;
}
-static void bfd_nht_zclient_connect(struct thread *thread)
-{
- struct zclient *zclient = THREAD_ARG(thread);
-
- if (bsglobal.debugging)
- zlog_debug("BFD NHT zclient connection attempt");
-
- if (zclient_start(zclient) == -1) {
- if (bsglobal.debugging)
- zlog_debug("BFD NHT zclient connection failed");
-
- thread_add_timer(bsglobal.tm, bfd_nht_zclient_connect, zclient,
- 3, &zclient->t_connect);
- return;
- }
-
- if (bsglobal.debugging)
- zlog_debug("BFD NHT zclient connection succeeded");
-}
-
-static void bfd_nht_zclient_connected(struct zclient *zclient)
-{
- struct bfd_source_cache *source;
-
- if (bsglobal.debugging)
- zlog_debug("BFD NHT zclient connected");
-
- SLIST_FOREACH (source, &bsglobal.source_list, entry)
- bfd_source_cache_register(source);
-}
-
-static int bfd_nht_update(ZAPI_CALLBACK_ARGS)
+int bfd_nht_update(const struct prefix *match, const struct zapi_route *route)
{
struct bfd_source_cache *source;
- struct zapi_route route;
- struct prefix match;
-
- if (!zapi_nexthop_update_decode(zclient->ibuf, &match, &route)) {
- zlog_warn("BFD NHT update decode failure");
- return 0;
- }
- if (cmd != ZEBRA_NEXTHOP_UPDATE)
- return 0;
if (bsglobal.debugging)
- zlog_debug("BFD NHT update for %pFX", &route.prefix);
+ zlog_debug("BFD NHT update for %pFX", &route->prefix);
SLIST_FOREACH (source, &bsglobal.source_list, entry) {
- if (source->vrf_id != route.vrf_id)
+ if (source->vrf_id != route->vrf_id)
continue;
- if (!prefix_same(&match, &source->address))
+ if (!prefix_same(match, &source->address))
continue;
- if (bfd_source_cache_update(source, &route))
+ if (bfd_source_cache_update(source, route))
bfd_source_cache_update_sessions(source);
}
*/
extern bool bfd_protocol_integration_shutting_down(void);
+/* Update nexthop-tracking (nht) information for BFD auto source selection.
+ * The function must be called from the daemon callback function
+ * that deals with the ZEBRA_NEXTHOP_UPDATE zclient command
+ */
+extern int bfd_nht_update(const struct prefix *match,
+ const struct zapi_route *route);
+
#ifdef __cplusplus
}
#endif
#include "log.h"
+PRINTFRR(3, 0)
void vzlogx(const struct xref_logmsg *xref, int prio,
const char *format, va_list args)
{
extern int cmd_banner_motd_file(const char *);
extern void cmd_banner_motd_line(const char *line);
-/* struct host global, ick */
-extern struct host host;
-
struct cmd_variable_handler {
const char *tokenname, *varname;
void (*completions)(vector out, struct cmd_token *token);
return ferr_ok();
}
+PRINTFRR(7, 0)
static ferr_r ferr_set_va(const char *file, int line, const char *func,
enum ferr_kind kind, const char *pathname,
int errno_val, const char *text, va_list va)
/* do NOT call these functions directly. only for macro use! */
ferr_r ferr_set_internal(const char *file, int line, const char *func,
- enum ferr_kind kind, const char *text, ...);
+ enum ferr_kind kind, const char *text, ...)
+ PRINTFRR(5, 6);
ferr_r ferr_set_internal_ext(const char *file, int line, const char *func,
enum ferr_kind kind, const char *pathname,
- int errno_val, const char *text, ...);
+ int errno_val, const char *text, ...)
+ PRINTFRR(7, 8);
#define ferr_ok() 0
#include "vty.h"
/* print error message to vty; $ERR is replaced by the error's message */
-void vty_print_error(struct vty *vty, ferr_r err, const char *msg, ...);
+void vty_print_error(struct vty *vty, ferr_r err, const char *msg, ...)
+ PRINTFRR(3, 4);
#define CMD_FERR_DO(func, action, ...) \
do { \
nb_cli_enqueue_change(vty, "./source-any", NB_OP_CREATE, NULL);
}
- return nb_cli_apply_changes(vty, xpath_entry);
+ return nb_cli_apply_changes(vty, "%s", xpath_entry);
}
DEFPY_YANG(
NULL);
}
- return nb_cli_apply_changes(vty, xpath_entry);
+ return nb_cli_apply_changes(vty, "%s", xpath_entry);
}
DEFPY_YANG(
nb_cli_enqueue_change(vty, "./any", NB_OP_CREATE, NULL);
}
- return nb_cli_apply_changes(vty, xpath_entry);
+ return nb_cli_apply_changes(vty, "%s", xpath_entry);
}
DEFPY_YANG(
remark = argv_concat(argv, argc, 3);
nb_cli_enqueue_change(vty, "./remark", NB_OP_CREATE, remark);
- rv = nb_cli_apply_changes(vty, xpath);
+ rv = nb_cli_apply_changes(vty, "%s", xpath);
XFREE(MTYPE_TMP, remark);
return rv;
nb_cli_enqueue_change(vty, "./any", NB_OP_CREATE, NULL);
}
- return nb_cli_apply_changes(vty, xpath_entry);
+ return nb_cli_apply_changes(vty, "%s", xpath_entry);
}
DEFPY_YANG(
remark = argv_concat(argv, argc, 4);
nb_cli_enqueue_change(vty, "./remark", NB_OP_CREATE, remark);
- rv = nb_cli_apply_changes(vty, xpath);
+ rv = nb_cli_apply_changes(vty, "%s", xpath);
XFREE(MTYPE_TMP, remark);
return rv;
nb_cli_enqueue_change(vty, "./any", NB_OP_CREATE, NULL);
}
- return nb_cli_apply_changes(vty, xpath_entry);
+ return nb_cli_apply_changes(vty, "%s", xpath_entry);
}
DEFPY_YANG(
remark = argv_concat(argv, argc, 4);
nb_cli_enqueue_change(vty, "./remark", NB_OP_CREATE, remark);
- rv = nb_cli_apply_changes(vty, xpath);
+ rv = nb_cli_apply_changes(vty, "%s", xpath);
XFREE(MTYPE_TMP, remark);
return rv;
nb_cli_enqueue_change(vty, "./any", NB_OP_CREATE, NULL);
}
- return nb_cli_apply_changes(vty, xpath_entry);
+ return nb_cli_apply_changes(vty, "%s", xpath_entry);
}
DEFPY_YANG(
remark = argv_concat(argv, argc, 4);
nb_cli_enqueue_change(vty, "./remark", NB_OP_CREATE, remark);
- rv = nb_cli_apply_changes(vty, xpath);
+ rv = nb_cli_apply_changes(vty, "%s", xpath);
XFREE(MTYPE_TMP, remark);
return rv;
nb_cli_enqueue_change(vty, "./any", NB_OP_CREATE, NULL);
}
- return nb_cli_apply_changes(vty, xpath_entry);
+ return nb_cli_apply_changes(vty, "%s", xpath_entry);
}
DEFPY_YANG(
remark = argv_concat(argv, argc, 4);
nb_cli_enqueue_change(vty, "./remark", NB_OP_CREATE, remark);
- rv = nb_cli_apply_changes(vty, xpath);
+ rv = nb_cli_apply_changes(vty, "%s", xpath);
XFREE(MTYPE_TMP, remark);
return rv;
* prefix length <= le.
*/
if (yang_dnode_exists(args->dnode, xpath_le)) {
- le = yang_dnode_get_uint8(args->dnode, xpath_le);
+ le = yang_dnode_get_uint8(args->dnode, "%s", xpath_le);
if (p.prefixlen > le)
goto log_and_fail;
}
* prefix length <= ge.
*/
if (yang_dnode_exists(args->dnode, xpath_ge)) {
- ge = yang_dnode_get_uint8(args->dnode, xpath_ge);
+ ge = yang_dnode_get_uint8(args->dnode, "%s", xpath_ge);
if (p.prefixlen > ge)
goto log_and_fail;
}
*/
if (yang_dnode_exists(args->dnode, xpath_le)
&& yang_dnode_exists(args->dnode, xpath_ge)) {
- le = yang_dnode_get_uint8(args->dnode, xpath_le);
- ge = yang_dnode_get_uint8(args->dnode, xpath_ge);
+ le = yang_dnode_get_uint8(args->dnode, "%s", xpath_le);
+ ge = yang_dnode_get_uint8(args->dnode, "%s", xpath_ge);
if (ge > le)
goto log_and_fail;
}
return YANG_ITER_CONTINUE;
/* Check if different value. */
- if (strcmp(yang_dnode_get_string(dnode, ada->ada_xpath[idx]),
+ if (strcmp(yang_dnode_get_string(dnode, "%s",
+ ada->ada_xpath[idx]),
ada->ada_value[idx]))
return YANG_ITER_CONTINUE;
}
}
ada.ada_xpath[arg_idx] = cisco_entries[idx];
- ada.ada_value[arg_idx] =
- yang_dnode_get_string(entry_dnode, cisco_entries[idx]);
+ ada.ada_value[arg_idx] = yang_dnode_get_string(
+ entry_dnode, "%s", cisco_entries[idx]);
arg_idx++;
idx++;
}
}
ada.ada_xpath[arg_idx] = zebra_entries[idx];
- ada.ada_value[arg_idx] =
- yang_dnode_get_string(entry_dnode, zebra_entries[idx]);
+ ada.ada_value[arg_idx] = yang_dnode_get_string(
+ entry_dnode, "%s", zebra_entries[idx]);
arg_idx++;
idx++;
}
#endif
/* Forward declarations */
-extern struct zebra_dplane_ctx ctx;
+struct zebra_dplane_ctx;
extern void lua_pushzebra_dplane_ctx(lua_State *L,
const struct zebra_dplane_ctx *ctx);
extern void lua_decode_zebra_dplane_ctx(lua_State *L, int idx,
/* Library inits. */
cmd_init(1);
- host.name = strdup("test");
- host.domainname = strdup("testdomainname");
+ cmd_hostname_set("test");
+ cmd_domainname_set("testdomainname");
vty_init(master, true);
lib_cmd_init();
}
nb_cli_enqueue_change(vty, ".", NB_OP_CREATE, NULL);
- ret = nb_cli_apply_changes_clear_pending(vty, xpath_list);
+ ret = nb_cli_apply_changes_clear_pending(vty, "%s", xpath_list);
if (ret == CMD_SUCCESS) {
VTY_PUSH_XPATH(INTERFACE_NODE, xpath_list);
nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL);
- return nb_cli_apply_changes(vty, xpath_list);
+ return nb_cli_apply_changes(vty, "%s", xpath_list);
}
static void netns_ifname_split(const char *xpath, char *ifname, char *vrfname)
* CMD_SUCCESS on success, CMD_WARNING_CONFIG_FAILED otherwise.
*/
extern int nb_cli_apply_changes_clear_pending(struct vty *vty,
- const char *xpath_base_fmt, ...);
+ const char *xpath_base_fmt, ...)
+ PRINTFRR(2, 3);
/*
* Apply enqueued changes to the candidate configuration, this function
* CMD_SUCCESS on success, CMD_WARNING_CONFIG_FAILED otherwise.
*/
extern int nb_cli_apply_changes(struct vty *vty, const char *xpath_base_fmt,
- ...);
+ ...) PRINTFRR(2, 3);
/*
* Execute a YANG RPC or Action.
* when allocating a larger buffer in asnprintfrr()
*/
va_copy(ap, *vaf->va);
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+ /* can't format check this */
return vbprintfrr(buf, vaf->fmt, ap);
+#pragma GCC diagnostic pop
}
fmt[4] = ch;
fmt[5] = '\0';
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
snprintf(buf, sizeof(buf), fmt, prec, arg);
+#pragma GCC diagnostic pop
} else {
double arg = GETARG(double);
char fmt[5] = "%.*";
fmt[3] = ch;
fmt[4] = '\0';
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
snprintf(buf, sizeof(buf), fmt, prec, arg);
+#pragma GCC diagnostic pop
}
cp = buf;
/* for proper padding */
*
* @return pointer to the first cell of allocated row
*/
+PRINTFRR(3, 0)
static struct ttable_cell *ttable_insert_row_va(struct ttable *tt, int i,
const char *format, va_list ap)
{
abspad -= row[j].style.border.right_on ? 1 : 0;
/* print text */
- const char *fmt;
if (row[j].style.align == LEFT)
- fmt = "%-*s";
+ pos += sprintf(&buf[pos], "%-*s", abspad,
+ row[j].text);
else
- fmt = "%*s";
-
- pos += sprintf(&buf[pos], fmt, abspad, row[j].text);
+ pos += sprintf(&buf[pos], "%*s", abspad,
+ row[j].text);
/* print right padding */
for (int k = 0; k < row[j].style.rpad; k++)
snprintf(xpath_list, sizeof(xpath_list), FRR_VRF_KEY_XPATH, vrfname);
nb_cli_enqueue_change(vty, xpath_list, NB_OP_CREATE, NULL);
- ret = nb_cli_apply_changes_clear_pending(vty, xpath_list);
+ ret = nb_cli_apply_changes_clear_pending(vty, "%s", xpath_list);
if (ret == CMD_SUCCESS) {
VTY_PUSH_XPATH(VRF_NODE, xpath_list);
vrf = vrf_lookup_by_name(vrfname);
vty_out(vty, "%s", host.motd);
}
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+/* prompt formatting has a %s in the cmd_node prompt string.
+ *
+ * Also for some reason GCC emits the warning on the end of the function
+ * (optimization maybe?) rather than on the vty_out line, so this pragma
+ * wraps the entire function rather than just the vty_out line.
+ */
+
/* Put out prompt and wait input from user. */
static void vty_prompt(struct vty *vty)
{
vty_out(vty, cmd_prompt(vty->node), cmd_hostname_get());
}
}
+#pragma GCC diagnostic pop
/* Send WILL TELOPT_ECHO to remote server. */
static void vty_will_echo(struct vty *vty)
vty->address);
/* format the prompt */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+ /* prompt formatting has a %s in the cmd_node prompt string */
snprintf(prompt_str, sizeof(prompt_str), cmd_prompt(vty->node),
vty_str);
+#pragma GCC diagnostic pop
/* now log the command */
zlog_notice("%s%s", prompt_str, buf);
XFREE(MTYPE_TIMER_WHEEL, wheel);
}
-int wheel_stop(struct timer_wheel *wheel)
-{
- THREAD_OFF(wheel->timer);
- return 0;
-}
-
-int wheel_start(struct timer_wheel *wheel)
-{
- if (!wheel->timer)
- thread_add_timer_msec(wheel->master, wheel_timer_thread, wheel,
- wheel->nexttime, &wheel->timer);
-
- return 0;
-}
-
int wheel_add_item(struct timer_wheel *wheel, void *item)
{
long long slot;
*/
void wheel_delete(struct timer_wheel *);
-/*
- * Pause the Wheel from running
- */
-int wheel_stop(struct timer_wheel *wheel);
-
-/*
- * Start the wheel running again
- */
-int wheel_start(struct timer_wheel *wheel);
-
/*
* wheel - The Timer wheel being modified
* item - The generic data structure that will be handed
* Schema name of the libyang data node.
*/
extern const char *yang_dnode_get_schema_name(const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...)
+ PRINTFRR(2, 3);
/*
* Find a libyang data node by its YANG data path.
* The libyang data node if found, or NULL if not found.
*/
extern struct lyd_node *yang_dnode_getf(const struct lyd_node *dnode,
- const char *path_fmt, ...);
+ const char *path_fmt, ...)
+ PRINTFRR(2, 3);
/*
* Check if a libyang data node exists.
* true if a libyang data node was found, false otherwise.
*/
extern bool yang_dnode_existsf(const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(2, 3);
/*
* Iterate over all libyang data nodes that satisfy an XPath query.
*/
void yang_dnode_iterate(yang_dnode_iter_cb cb, void *arg,
const struct lyd_node *dnode, const char *xpath_fmt,
- ...);
+ ...) PRINTFRR(4, 5);
/*
* Check if the libyang data node contains a default value. Non-presence
* true if the data node contains the default value, false otherwise.
*/
extern bool yang_dnode_is_defaultf(const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(2, 3);
/*
* Check if the libyang data node and all of its children contain default
* Pointer to yang_data if found, NULL otherwise.
*/
extern struct yang_data *yang_data_list_find(const struct list *list,
- const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...)
+ PRINTFRR(2, 3);
/*
* Create and set up a libyang context (for use by the translator)
if (!mapping)
return YANG_TRANSLATE_NOTFOUND;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+ /* processing format strings from mapping node... */
n = sscanf(xpath, mapping->xpath_from_fmt, keys[0], keys[1], keys[2],
keys[3]);
+#pragma GCC diagnostic pop
if (n < 0) {
flog_warn(EC_LIB_YANG_TRANSLATION_ERROR,
"%s: sscanf() failed: %s", __func__,
return YANG_TRANSLATE_FAILURE;
}
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+ /* processing format strings from mapping node... */
snprintf(xpath, xpath_len, mapping->xpath_to_fmt, keys[0], keys[1],
keys[2], keys[3]);
+#pragma GCC diagnostic pop
return YANG_TRANSLATE_SUCCESS;
}
} \
} while (0)
+PRINTFRR(2, 0)
static inline const char *
yang_dnode_xpath_get_canon(const struct lyd_node *dnode, const char *xpath_fmt,
va_list ap)
return lyd_get_value(&__dleaf->node);
}
+PRINTFRR(2, 0)
static inline const struct lyd_value *
yang_dnode_xpath_get_value(const struct lyd_node *dnode, const char *xpath_fmt,
va_list ap)
extern bool yang_str2bool(const char *value);
extern struct yang_data *yang_data_new_bool(const char *xpath, bool value);
extern bool yang_dnode_get_bool(const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
-extern bool yang_get_default_bool(const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(2, 3);
+extern bool yang_get_default_bool(const char *xpath_fmt, ...) PRINTFRR(1, 2);
/* dec64 */
extern double yang_str2dec64(const char *xpath, const char *value);
extern struct yang_data *yang_data_new_dec64(const char *xpath, double value);
extern double yang_dnode_get_dec64(const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
-extern double yang_get_default_dec64(const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(2, 3);
+extern double yang_get_default_dec64(const char *xpath_fmt, ...) PRINTFRR(1, 2);
/* enum */
extern int yang_str2enum(const char *xpath, const char *value);
extern struct yang_data *yang_data_new_enum(const char *xpath, int value);
extern int yang_dnode_get_enum(const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
-extern int yang_get_default_enum(const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(2, 3);
+extern int yang_get_default_enum(const char *xpath_fmt, ...) PRINTFRR(1, 2);
/* int8 */
extern int8_t yang_str2int8(const char *value);
extern struct yang_data *yang_data_new_int8(const char *xpath, int8_t value);
extern int8_t yang_dnode_get_int8(const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
-extern int8_t yang_get_default_int8(const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(2, 3);
+extern int8_t yang_get_default_int8(const char *xpath_fmt, ...) PRINTFRR(1, 2);
/* int16 */
extern int16_t yang_str2int16(const char *value);
extern struct yang_data *yang_data_new_int16(const char *xpath, int16_t value);
extern int16_t yang_dnode_get_int16(const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
-extern int16_t yang_get_default_int16(const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(2, 3);
+extern int16_t yang_get_default_int16(const char *xpath_fmt, ...)
+ PRINTFRR(1, 2);
/* int32 */
extern int32_t yang_str2int32(const char *value);
extern struct yang_data *yang_data_new_int32(const char *xpath, int32_t value);
extern int32_t yang_dnode_get_int32(const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
-extern int32_t yang_get_default_int32(const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(2, 3);
+extern int32_t yang_get_default_int32(const char *xpath_fmt, ...)
+ PRINTFRR(1, 2);
/* int64 */
extern int64_t yang_str2int64(const char *value);
extern struct yang_data *yang_data_new_int64(const char *xpath, int64_t value);
extern int64_t yang_dnode_get_int64(const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
-extern int64_t yang_get_default_int64(const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(2, 3);
+extern int64_t yang_get_default_int64(const char *xpath_fmt, ...)
+ PRINTFRR(1, 2);
/* uint8 */
extern uint8_t yang_str2uint8(const char *value);
extern struct yang_data *yang_data_new_uint8(const char *xpath, uint8_t value);
extern uint8_t yang_dnode_get_uint8(const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
-extern uint8_t yang_get_default_uint8(const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(2, 3);
+extern uint8_t yang_get_default_uint8(const char *xpath_fmt, ...)
+ PRINTFRR(1, 2);
/* uint16 */
extern uint16_t yang_str2uint16(const char *value);
extern struct yang_data *yang_data_new_uint16(const char *xpath,
uint16_t value);
extern uint16_t yang_dnode_get_uint16(const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
-extern uint16_t yang_get_default_uint16(const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...)
+ PRINTFRR(2, 3);
+extern uint16_t yang_get_default_uint16(const char *xpath_fmt, ...)
+ PRINTFRR(1, 2);
/* uint32 */
extern uint32_t yang_str2uint32(const char *value);
extern struct yang_data *yang_data_new_uint32(const char *xpath,
uint32_t value);
extern uint32_t yang_dnode_get_uint32(const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
-extern uint32_t yang_get_default_uint32(const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...)
+ PRINTFRR(2, 3);
+extern uint32_t yang_get_default_uint32(const char *xpath_fmt, ...)
+ PRINTFRR(1, 2);
/* uint64 */
extern uint64_t yang_str2uint64(const char *value);
extern struct yang_data *yang_data_new_uint64(const char *xpath,
uint64_t value);
extern uint64_t yang_dnode_get_uint64(const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
-extern uint64_t yang_get_default_uint64(const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...)
+ PRINTFRR(2, 3);
+extern uint64_t yang_get_default_uint64(const char *xpath_fmt, ...)
+ PRINTFRR(1, 2);
/* string */
extern struct yang_data *yang_data_new_string(const char *xpath,
const char *value);
extern const char *yang_dnode_get_string(const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...)
+ PRINTFRR(2, 3);
extern void yang_dnode_get_string_buf(char *buf, size_t size,
const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
-extern const char *yang_get_default_string(const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...)
+ PRINTFRR(4, 5);
+extern const char *yang_get_default_string(const char *xpath_fmt, ...)
+ PRINTFRR(1, 2);
extern void yang_get_default_string_buf(char *buf, size_t size,
- const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...)
+ PRINTFRR(3, 4);
/* binary */
extern struct yang_data *yang_data_new_binary(const char *xpath,
const char *value, size_t len);
extern size_t yang_dnode_get_binary_buf(char *buf, size_t size,
const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...)
+ PRINTFRR(4, 5);
/* empty */
extern struct yang_data *yang_data_new_empty(const char *xpath);
extern bool yang_dnode_get_empty(const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(2, 3);
/* ip prefix */
extern void yang_str2prefix(const char *value, union prefixptr prefix);
union prefixconstptr prefix);
extern void yang_dnode_get_prefix(struct prefix *prefix,
const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(3, 4);
extern void yang_get_default_prefix(union prefixptr var, const char *xpath_fmt,
- ...);
+ ...) PRINTFRR(2, 3);
/* ipv4 */
extern void yang_str2ipv4(const char *value, struct in_addr *addr);
const struct in_addr *addr);
extern void yang_dnode_get_ipv4(struct in_addr *addr,
const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(3, 4);
extern void yang_get_default_ipv4(struct in_addr *var, const char *xpath_fmt,
- ...);
+ ...) PRINTFRR(2, 3);
/* ipv4p */
extern void yang_str2ipv4p(const char *value, union prefixptr prefix);
union prefixconstptr prefix);
extern void yang_dnode_get_ipv4p(union prefixptr prefix,
const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(3, 4);
extern void yang_get_default_ipv4p(union prefixptr var, const char *xpath_fmt,
- ...);
+ ...) PRINTFRR(2, 3);
/* ipv6 */
extern void yang_str2ipv6(const char *value, struct in6_addr *addr);
const struct in6_addr *addr);
extern void yang_dnode_get_ipv6(struct in6_addr *addr,
const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(3, 4);
extern void yang_get_default_ipv6(struct in6_addr *var, const char *xpath_fmt,
- ...);
+ ...) PRINTFRR(2, 3);
/* ipv6p */
extern void yang_str2ipv6p(const char *value, union prefixptr prefix);
union prefixconstptr prefix);
extern void yang_dnode_get_ipv6p(union prefixptr prefix,
const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(3, 4);
extern void yang_get_default_ipv6p(union prefixptr var, const char *xpath_fmt,
- ...);
+ ...) PRINTFRR(2, 3);
/* ip */
extern void yang_str2ip(const char *value, struct ipaddr *addr);
extern struct yang_data *yang_data_new_ip(const char *xpath,
const struct ipaddr *addr);
extern void yang_dnode_get_ip(struct ipaddr *addr, const struct lyd_node *dnode,
- const char *xpath_fmt, ...);
-extern void yang_get_default_ip(struct ipaddr *var, const char *xpath_fmt, ...);
+ const char *xpath_fmt, ...) PRINTFRR(3, 4);
+extern void yang_get_default_ip(struct ipaddr *var, const char *xpath_fmt, ...)
+ PRINTFRR(2, 3);
/* mac */
extern struct yang_data *yang_data_new_mac(const char *xpath,
fb.outpos_i = 0;
va_copy(args, msg->args);
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+ /* format-string checking is done further up the chain */
need += vbprintfrr(&fb, msg->fmt, args);
+#pragma GCC diagnostic pop
va_end(args);
msg->textlen = need;
fb.outpos_i = 0;
va_copy(args, msg->args);
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+ /* same as above */
vbprintfrr(&fb, msg->fmt, args);
+#pragma GCC diagnostic pop
va_end(args);
bputch(&fb, '\n');
struct of_cb_args args = {0};
struct of_cb_pref *p;
- yang_dnode_iterate(iter_objfun_cb, &args, dnode, path);
+ yang_dnode_iterate(iter_objfun_cb, &args, dnode, "%s", path);
for (p = args.first; p != NULL; p = p->next)
fun(p->type, arg);
}
#define MAX_PATH_NAME_SIZE 255
/* pceplib logging callback */
-static int pceplib_logging_cb(int level, const char *fmt, va_list args);
+static int pceplib_logging_cb(int level, const char *fmt, va_list args)
+ PRINTFRR(2, 0);
/* Socket callbacks */
static int pcep_lib_pceplib_socket_read_cb(void *fpt, void **thread, int fd,
#include <stdarg.h>
#include <stdio.h>
+#include "compiler.h"
#include "pcep_utils_logging.h"
/* Forward declaration */
-int pcep_stdout_logger(int priority, const char *format, va_list args);
+int pcep_stdout_logger(int priority, const char *format, va_list args)
+ PRINTFRR(2, 0);
static pcep_logger_func logger_func = pcep_stdout_logger;
static int logging_level_ = LOG_INFO;
"Connection retry period (in seconds)\n")
{
const char *vrfname;
- char xpath[XPATH_MAXLEN];
vrfname = pim_cli_get_vrf_name(vty);
if (vrfname == NULL)
return CMD_WARNING_CONFIG_FAILED;
- snprintf(xpath, sizeof(xpath), FRR_PIM_MSDP_XPATH, "frr-pim:pimd",
- "pim", vrfname, "frr-routing:ipv4");
nb_cli_enqueue_change(vty, "./hold-time", NB_OP_MODIFY, holdtime_str);
nb_cli_enqueue_change(vty, "./keep-alive", NB_OP_MODIFY, keepalive_str);
if (connretry_str)
nb_cli_enqueue_change(vty, "./connection-retry", NB_OP_DESTROY,
NULL);
- nb_cli_apply_changes(vty, xpath);
-
+ nb_cli_apply_changes(vty, FRR_PIM_MSDP_XPATH, "frr-pim:pimd", "pim",
+ vrfname, "frr-routing:ipv4");
return CMD_SUCCESS;
}
IGNORED_IN_NO_STR)
{
const char *vrfname;
- char xpath[XPATH_MAXLEN];
vrfname = pim_cli_get_vrf_name(vty);
if (vrfname == NULL)
return CMD_WARNING_CONFIG_FAILED;
- snprintf(xpath, sizeof(xpath), FRR_PIM_MSDP_XPATH, "frr-pim:pimd",
- "pim", vrfname, "frr-routing:ipv4");
-
nb_cli_enqueue_change(vty, "./hold-time", NB_OP_DESTROY, NULL);
nb_cli_enqueue_change(vty, "./keep-alive", NB_OP_DESTROY, NULL);
nb_cli_enqueue_change(vty, "./connection-retry", NB_OP_DESTROY, NULL);
- nb_cli_apply_changes(vty, xpath);
+ nb_cli_apply_changes(vty, FRR_PIM_MSDP_XPATH, "frr-pim:pimd", "pim",
+ vrfname, "frr-routing:ipv4");
return CMD_SUCCESS;
}
sizeof(rs_timer_xpath));
/* RFC4601 */
- v = yang_dnode_get_uint16(vty->candidate_config->dnode,
+ v = yang_dnode_get_uint16(vty->candidate_config->dnode, "%s",
rs_timer_xpath);
v = 3 * v + PIM_REGISTER_PROBE_TIME_DEFAULT;
if (v > UINT16_MAX)
return NB_OK;
}
- plist = yang_dnode_get_string(plist_dnode, plist_xpath);
+ plist = yang_dnode_get_string(plist_dnode, "%s", plist_xpath);
if (strcmp(prefix_list, plist)) {
vty_out(vty, "%% Unable to find specified RP\n");
return NB_OK;
}
}
- ret = nb_cli_apply_changes(vty, xpath_prefix);
+ ret = nb_cli_apply_changes(vty, "%s", xpath_prefix);
} else {
if (args->source)
snprintf(ab_xpath, sizeof(ab_xpath),
yang_dnode_get_path(dnode, ab_xpath, XPATH_MAXLEN);
nb_cli_enqueue_change(vty, ab_xpath, NB_OP_DESTROY, NULL);
- ret = nb_cli_apply_changes(vty, ab_xpath);
+ ret = nb_cli_apply_changes(vty, "%s", ab_xpath);
}
return ret;
return 1;
}
+ if (zclient->bfd_integration)
+ bfd_nht_update(&matched, &nhr);
+
if (matched.family == AF_INET6)
afi = AFI_IP6;
1.1.1.1
resolved via static
- is directly connected, r1-eth1
+ is directly connected, r1-eth1 (vrf default), weight 1
Client list: pbr(fd XX)
1.1.1.2
resolved via static
- is directly connected, r1-eth2
+ is directly connected, r1-eth2 (vrf default), weight 1
Client list: pbr(fd XX)
1.1.1.3
resolved via static
- is directly connected, r1-eth3
+ is directly connected, r1-eth3 (vrf default), weight 1
Client list: pbr(fd XX)
1.1.1.4
resolved via static
- is directly connected, r1-eth4
+ is directly connected, r1-eth4 (vrf default), weight 1
Client list: pbr(fd XX)
1.1.1.5
resolved via static
- is directly connected, r1-eth5
+ is directly connected, r1-eth5 (vrf default), weight 1
Client list: pbr(fd XX)
1.1.1.6
resolved via static
- is directly connected, r1-eth6
+ is directly connected, r1-eth6 (vrf default), weight 1
Client list: pbr(fd XX)
1.1.1.7
resolved via static
- is directly connected, r1-eth7
+ is directly connected, r1-eth7 (vrf default), weight 1
Client list: pbr(fd XX)
1.1.1.8
resolved via static
- is directly connected, r1-eth8
+ is directly connected, r1-eth8 (vrf default), weight 1
Client list: pbr(fd XX)
2.2.2.1
unresolved
Client list: pbr(fd XX)
192.168.0.2
resolved via connected
- is directly connected, r1-eth0
+ is directly connected, r1-eth0 (vrf default)
Client list: static(fd XX)
192.168.0.4
resolved via connected
- is directly connected, r1-eth0
+ is directly connected, r1-eth0 (vrf default)
Client list: static(fd XX)
192.168.7.10
resolved via connected
- is directly connected, r1-eth7
+ is directly connected, r1-eth7 (vrf default)
Client list: bgp(fd XX)
192.168.7.20(Connected)
resolved via connected
- is directly connected, r1-eth7
+ is directly connected, r1-eth7 (vrf default)
Client list: bgp(fd XX)
192.168.161.4
unresolved
fc00::2
resolved via connected
- is directly connected, r1-eth0
+ is directly connected, r1-eth0 (vrf default)
Client list: static(fd XX)
fc00:0:0:8::1000
resolved via connected
- is directly connected, r1-eth8
+ is directly connected, r1-eth8 (vrf default)
Client list: bgp(fd XX)
fc00:0:0:8::2000(Connected)
resolved via connected
- is directly connected, r1-eth8
+ is directly connected, r1-eth8 (vrf default)
Client list: bgp(fd XX)
-
\ No newline at end of file
+
--- /dev/null
+log file eigrpd.log
+
+interface r1-eth0
+ babel hello-interval 1000
+ babel wired
+ babel update-interval 50
+!
+interface r1-eth1
+ babel hello-interval 1000
+ babel wired
+ babel update-interval 50
+!
+router babel
+ network r1-eth0
+ network r1-eth1
+ redistribute ipv4 connected
+ redistribute ipv6 connected
+!
+line vty
+!
+
--- /dev/null
+{
+ "192.168.1.0/24":[
+ {
+ "prefix":"192.168.1.0/24",
+ "protocol":"connected",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "directlyConnected":true,
+ "interfaceName":"r1-eth0",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "192.168.2.0/24":[
+ {
+ "prefix":"192.168.2.0/24",
+ "prefixLen":24,
+ "protocol":"babel",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"193.1.1.2",
+ "afi":"ipv4",
+ "interfaceName":"r1-eth1"
+ }
+ ]
+ }
+ ],
+ "192.168.3.0/24":[
+ {
+ "prefix":"192.168.3.0/24",
+ "protocol":"babel",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"193.1.1.2",
+ "afi":"ipv4",
+ "interfaceName":"r1-eth1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "193.1.1.0/26":[
+ {
+ "prefix":"193.1.1.0/26",
+ "protocol":"connected",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "directlyConnected":true,
+ "interfaceName":"r1-eth1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "193.1.2.0/24":[
+ {
+ "prefix":"193.1.2.0/24",
+ "protocol":"babel",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"193.1.1.2",
+ "afi":"ipv4",
+ "interfaceName":"r1-eth1",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+log file zebra.log
+! debug zebra rib detail
+!
+hostname r1
+!
+interface r1-eth0
+ ip address 192.168.1.1/24
+ ipv6 address 192:168:1::1/64
+!
+interface r1-eth1
+ description to sw2 - babel interface
+ ip address 193.1.1.1/26
+ ipv6 address 193:1:1::1/64
+ no link-detect
+!
+ip forwarding
+ipv6 forwarding
+!
+!
+line vty
+!
+
--- /dev/null
+log file eigrpd.log
+!
+interface r2-eth0
+ babel hello-interval 1000
+ babel wired
+ babel update-interval 50
+!
+interface r2-eth1
+ babel hello-interval 1000
+ babel wired
+ babel update-interval 50
+!
+router babel
+ network r2-eth0
+ network r2-eth1
+ redistribute ipv4 connected
+ redistribute ipv6 connected
+!
--- /dev/null
+{
+ "192.168.1.0/24":[
+ {
+ "prefix":"192.168.1.0/24",
+ "protocol":"babel",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"193.1.1.1",
+ "afi":"ipv4",
+ "interfaceName":"r2-eth0",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "192.168.2.0/24":[
+ {
+ "prefix":"192.168.2.0/24",
+ "protocol":"babel",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"193.1.2.2",
+ "afi":"ipv4",
+ "interfaceName":"r2-eth1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "192.168.3.0/24":[
+ {
+ "prefix":"192.168.3.0/24",
+ "protocol":"babel",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"193.1.2.2",
+ "afi":"ipv4",
+ "interfaceName":"r2-eth1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "193.1.1.0/26":[
+ {
+ "prefix":"193.1.1.0/26",
+ "protocol":"connected",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "directlyConnected":true,
+ "interfaceName":"r2-eth0",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "193.1.2.0/24":[
+ {
+ "prefix":"193.1.2.0/24",
+ "protocol":"connected",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "directlyConnected":true,
+ "interfaceName":"r2-eth1",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+log file zebra.log
+!
+hostname r2
+!
+interface r2-eth0
+ description to sw2 - babel interface
+ ip address 193.1.1.2/26
+ ipv6 address 193:1:1::2/64
+ no link-detect
+!
+interface r2-eth1
+ description to sw3 - babel interface
+ ip address 193.1.2.1/24
+ ipv6 address 193:1:2::1/64
+ no link-detect
+!
+ip forwarding
+ipv6 forwarding
+!
+!
+line vty
+!
+
--- /dev/null
+log file eigrpd.log
+!
+interface r3-eth0
+ babel hello-interval 1000
+ babel wired
+ babel update-interval 50
+!
+interface r3-eth1
+ babel hello-interval 1000
+ babel wired
+ babel update-interval 50
+!
+router babel
+ network r3-eth0
+ network r3-eth1
+ redistribute ipv4 connected
+ redistribute ipv4 static
+ redistirbute ipv6 connected
--- /dev/null
+{
+ "192.168.1.0/24":[
+ {
+ "prefix":"192.168.1.0/24",
+ "protocol":"babel",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"193.1.2.1",
+ "afi":"ipv4",
+ "interfaceName":"r3-eth1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "192.168.2.0/24":[
+ {
+ "prefix":"192.168.2.0/24",
+ "protocol":"static",
+ "selected":true,
+ "distance":1,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"192.168.3.10",
+ "afi":"ipv4",
+ "interfaceName":"r3-eth0",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "192.168.3.0/24":[
+ {
+ "prefix":"192.168.3.0/24",
+ "protocol":"connected",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "directlyConnected":true,
+ "interfaceName":"r3-eth0",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "193.1.1.0/26":[
+ {
+ "prefix":"193.1.1.0/26",
+ "protocol":"babel",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"193.1.2.1",
+ "afi":"ipv4",
+ "interfaceName":"r3-eth1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "193.1.2.0/24":[
+ {
+ "prefix":"193.1.2.0/24",
+ "protocol":"connected",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "directlyConnected":true,
+ "interfaceName":"r3-eth1",
+ "active":true
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+log file zebra.log
+!
+hostname r3
+!
+interface r3-eth0
+ description to sw4 - Stub interface
+ ip address 192.168.3.1/24
+ ipv6 address 192:168:3::1/64
+ no link-detect
+!
+interface r3-eth1
+ description to sw3 - RIPv2 interface
+ ip address 193.1.2.2/24
+ ipv6 address 193:1:2::2/64
+ no link-detect
+!
+ip route 192.168.2.0/24 192.168.3.10
+ipv6 route 192:168:2::0/64 192:168:3::10
+!
+ip forwarding
+ipv6 forwarding
+!
+!
+line vty
+!
--- /dev/null
+#!/usr/bin/env python
+
+#
+# test_babel_topo1.py
+#
+# Copyright (c) 2017 by
+# Cumulus Networks, Inc.
+# Donald Sharp
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_babel_topo1.py: Testing BABEL
+
+"""
+
+import os
+import re
+import sys
+import pytest
+import json
+
+pytestmark = [pytest.mark.babeld]
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+
+#####################################################
+##
+## Network Topology Definition
+##
+#####################################################
+
+
+def build_topo(tgen):
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
+
+ # On main router
+ # First switch is for a dummy interface (for local network)
+ switch = tgen.add_switch("sw1")
+ switch.add_link(tgen.gears["r1"])
+
+ # Switches for BABEL
+ # switch 2 switch is for connection to BABEL router
+ switch = tgen.add_switch("sw2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ # switch 4 is stub on remote BABEL router
+ switch = tgen.add_switch("sw4")
+ switch.add_link(tgen.gears["r3"])
+
+ # switch 3 is between BABEL routers
+ switch = tgen.add_switch("sw3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+
+
+#####################################################
+##
+## Tests starting
+##
+#####################################################
+
+
+def setup_module(module):
+ "Setup topology"
+ tgen = Topogen(build_topo, module.__name__)
+ tgen.start_topology()
+
+ # This is a sample of configuration loading.
+ router_list = tgen.routers()
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BABEL, os.path.join(CWD, "{}/babeld.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(_mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ # This function tears down the whole topology.
+ tgen.stop_topology()
+
+
+def test_converge_protocols():
+ "Wait for protocol convergence"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ topotest.sleep(10, "Waiting for BABEL convergence")
+
+
+def test_zebra_ipv4_routingTable():
+ "Test 'show ip route'"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ failures = 0
+ router_list = tgen.routers().values()
+ for router in router_list:
+ output = router.vtysh_cmd("show ip route json", isjson=True)
+ refTableFile = "{}/{}/show_ip_route.json_ref".format(CWD, router.name)
+ expected = json.loads(open(refTableFile).read())
+
+ assertmsg = "Zebra IPv4 Routing Table verification failed for router {}".format(
+ router.name
+ )
+ assert topotest.json_cmp(output, expected) is None, assertmsg
+
+def test_shutdown_check_stderr():
+ if os.environ.get("TOPOTESTS_CHECK_STDERR") is None:
+ pytest.skip("Skipping test for Stderr output and memory leaks")
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Verifying unexpected STDERR output from daemons")
+
+ router_list = tgen.routers().values()
+ for router in router_list:
+ router.stop()
+
+ log = tgen.net[router.name].getStdErr("babeld")
+ if log:
+ logger.error("BABELd StdErr Log:" + log)
+ log = tgen.net[router.name].getStdErr("zebra")
+ if log:
+ logger.error("Zebra StdErr Log:" + log)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
!
+debug bgp neighbor
+!
router bgp 65001
no bgp ebgp-requires-policy
+ bgp default show-hostname
+ bgp default show-nexthop-hostname
neighbor 192.168.1.2 remote-as external
+ neighbor 192.168.1.2 timers 1 3
+ neighbor 192.168.1.2 timers connect 1
neighbor 192.168.1.2 dont-capability-negotiate
!
router bgp 65002
no bgp ebgp-requires-policy
neighbor 192.168.1.1 remote-as external
+ neighbor 192.168.1.1 timers 1 3
+ neighbor 192.168.1.1 timers connect 1
address-family ipv4 unicast
redistribute connected
exit-address-family
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import step
pytestmark = [pytest.mark.bgpd]
tgen.stop_topology()
+def bgp_converge(router):
+ output = json.loads(router.vtysh_cmd("show bgp ipv4 unicast summary json"))
+ expected = {
+ "peers": {
+ "192.168.1.2": {
+ "pfxRcd": 2,
+ "pfxSnt": 2,
+ "state": "Established",
+ "peerState": "OK",
+ }
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+
def test_bgp_dont_capability_negotiate():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- router = tgen.gears["r1"]
+ r1 = tgen.gears["r1"]
+
+ test_func = functools.partial(bgp_converge, r1)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Can't converge with dont-capability-negotiate"
+
+
+def test_bgp_check_fqdn():
+ tgen = get_topogen()
- def _bgp_converge(router):
- output = json.loads(router.vtysh_cmd("show bgp ipv4 unicast summary json"))
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+ r2 = tgen.gears["r2"]
+
+ def _bgp_check_fqdn(fqdn=None):
+ output = json.loads(r1.vtysh_cmd("show bgp ipv4 unicast 172.16.16.1/32 json"))
expected = {
- "peers": {
- "192.168.1.2": {
- "pfxRcd": 2,
- "pfxSnt": 2,
- "state": "Established",
- "peerState": "OK",
+ "paths": [
+ {
+ "nexthops": [
+ {
+ "hostname": fqdn,
+ }
+ ],
+ "peer": {
+ "hostname": fqdn,
+ },
}
- }
+ ]
}
return topotest.json_cmp(output, expected)
- test_func = functools.partial(_bgp_converge, router)
- success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ step("Enable all capabilities")
+ r1.vtysh_cmd(
+ """
+ configure terminal
+ router bgp
+ address-family ipv4 unicast
+ no neighbor 192.168.1.2 dont-capability-negotiate
+ end
+ clear bgp 192.168.1.2
+ """
+ )
+
+ step("Wait to converge")
+ test_func = functools.partial(bgp_converge, r1)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Can't converge with dont-capability-negotiate"
+
+ test_func = functools.partial(_bgp_check_fqdn, "r2")
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "FQDN capability enabled, but r1 can't see it"
+
+ step("Disable sending any capabilities from r2")
+ r2.vtysh_cmd(
+ """
+ configure terminal
+ router bgp
+ address-family ipv4 unicast
+ neighbor 192.168.1.1 dont-capability-negotiate
+ end
+ clear bgp 192.168.1.1
+ """
+ )
+
+ step("Wait to converge")
+ test_func = functools.partial(bgp_converge, r1)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert result is None, "Can't converge with dont-capability-negotiate"
+ step("Make sure FQDN capability is reset")
+ test_func = functools.partial(_bgp_check_fqdn)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "FQDN capability disabled, but we still have a hostname"
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
{
"vrfId":0,
"vrfName":"default",
- "tableVersion":1,
"routerId":"1.1.1.1",
"defaultLocPrf":100,
"localAS":65500,
"prefix":"10.201.0.0",
"prefixLen":24,
"network":"10.201.0.0\/24",
- "version":1,
"metric":0,
"weight":32768,
"peerId":"(unspec)",
"nexthops":[
{
"ip":"0.0.0.0",
+ "hostname":"r1",
"afi":"ipv4",
"used":true
}
"prefix":"10.200.0.0",
"prefixLen":24,
"network":"10.200.0.0\/24",
- "version":1,
"metric":0,
"locPrf":100,
"weight":0,
"nexthops":[
{
"ip":"10.125.0.2",
+ "hostname":"r2",
"afi":"ipv4",
"used":true
}
{
"vrfId":0,
"vrfName":"default",
- "tableVersion":1,
"routerId":"1.1.1.1",
"defaultLocPrf":100,
"localAS":65500,
"prefix":"10.201.0.0",
"prefixLen":24,
"network":"10.201.0.0\/24",
- "version":1,
"metric":0,
"weight":32768,
"peerId":"(unspec)",
"nexthops":[
{
"ip":"0.0.0.0",
+ "hostname":"r1",
"afi":"ipv4",
"used":true
}
"prefix":"10.200.0.0",
"prefixLen":24,
"network":"10.200.0.0\/24",
- "version":1,
"metric":0,
"locPrf":100,
"weight":0,
"nexthops":[
{
"ip":"10.125.0.2",
+ "hostname":"r2",
"afi":"ipv4",
"used":true
}
"prefix":"10.210.0.0",
"prefixLen":24,
"network":"10.210.0.0\/24",
- "version":1,
"metric":0,
"locPrf":100,
"weight":0,
"nexthops":[
{
"ip":"10.125.0.2",
+ "hostname":"r2",
"afi":"ipv4",
"used":true
}
tgen.stop_topology()
-def test_protocols_convergence():
+def router_json_cmp_exact_filter(router, cmd, expected):
+ # filter out tableVersion, version and nhVrfID
+ output = router.cmd('vtysh -c "{}" | grep -v ersion | grep -v nhVrfId'.format(cmd))
+ logger.info("{}: {}\n{}".format(router.name, cmd, output))
+
+ json_output = json.loads(output)
+
+ return topotest.json_cmp(json_output, expected, exact=True)
+
+
+def test_bgp_no_retain():
"""
- Assert that all protocols have converged
- statuses as they depend on it.
+ Check bgp no retain route-target all on r1
"""
+
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Check IPv4 VPN routing tables on r1
- logger.info("Checking IPv4 routes for convergence on r1")
- router = tgen.gears['r1']
+ logger.info("Checking VPNv4 routes for convergence on r1")
+ router = tgen.gears["r1"]
json_file = "{}/{}/ipv4_vpn_routes.json".format(CWD, router.name)
if not os.path.isfile(json_file):
logger.info("skipping file {}".format(json_file))
- assert 0, 'ipv4_vpn_routes.json file not found'
+ assert 0, "{} file not found".format(json_file)
return
expected = json.loads(open(json_file).read())
test_func = partial(
- topotest.router_json_cmp,
+ router_json_cmp_exact_filter,
router,
"show bgp ipv4 vpn json",
expected,
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
- # Check BGP IPv4 routing tables after unsetting no retain flag
- logger.info("Checking BGP IPv4 routes for convergence on r2")
- router = tgen.gears['r1']
- router.vtysh_cmd("configure\nrouter bgp 65500\naddress-family ipv4 vpn\nbgp retain route-target all\n")
+
+def test_bgp_retain():
+ """
+ Apply and check bgp retain route-target all on r1
+ """
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
# Check IPv4 VPN routing tables on r1
- logger.info("Checking IPv4 routes for convergence on r1")
- router = tgen.gears['r1']
+ logger.info("Checking VPNv4 routes on r1 after bgp no retain")
+ router = tgen.gears["r1"]
+ router.vtysh_cmd(
+ "configure\nrouter bgp 65500\naddress-family ipv4 vpn\nbgp retain route-target all\n"
+ )
json_file = "{}/{}/ipv4_vpn_routes_unfiltered.json".format(CWD, router.name)
if not os.path.isfile(json_file):
logger.info("skipping file {}".format(json_file))
- assert 0, 'ipv4_vpn_routes_unfiltered.json file not found'
+ assert 0, "{} file not found".format(json_file)
return
expected = json.loads(open(json_file).read())
test_func = partial(
- topotest.router_json_cmp,
+ router_json_cmp_exact_filter,
router,
"show bgp ipv4 vpn json",
expected,
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
+
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
# Exclude empty string at end of list
for d in dmns[:-1]:
if re.search(r"%s" % daemon, d):
- daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
+ daemonpidfile = d.rstrip()
+ daemonpid = self.cmd("cat %s" % daemonpidfile).rstrip()
if daemonpid.isdigit() and pid_exists(int(daemonpid)):
logger.info(
"{}: killing {}".format(
self.name,
- os.path.basename(d.rstrip().rsplit(".", 1)[0]),
+ os.path.basename(daemonpidfile.rsplit(".", 1)[0]),
)
)
- self.cmd("kill -9 %s" % daemonpid)
+ os.kill(int(daemonpid), signal.SIGKILL)
if pid_exists(int(daemonpid)):
numRunning += 1
while wait and numRunning > 0:
),
)
)
- self.cmd("kill -9 %s" % daemonpid)
+ os.kill(int(daemonpid), signal.SIGKILL)
if daemonpid.isdigit() and not pid_exists(
int(daemonpid)
):
numRunning -= 1
- self.cmd("rm -- {}".format(d.rstrip()))
+ self.cmd("rm -- {}".format(daemonpidfile))
if wait:
errors = self.checkRouterCores(reportOnce=True)
if self.checkRouterVersion("<", minErrorVersion):
return frrstr_join(argstr, argc + off, " ");
}
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+/* 'headline' is a format string with a %s for the daemon name
+ *
+ * Also for some reason GCC emits the warning on the end of the function
+ * (optimization maybe?) rather than on the vty_out line, so this pragma
+ * wraps the entire function rather than just the vty_out line.
+ */
+
static int show_per_daemon(struct vty *vty, struct cmd_token **argv, int argc,
const char *headline)
{
return ret;
}
+#pragma GCC diagnostic pop
static int show_one_daemon(struct vty *vty, struct cmd_token **argv, int argc,
const char *name)
{
static char buf[512];
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+ /* prompt formatting has a %s in the cmd_node prompt string. */
snprintf(buf, sizeof(buf), cmd_prompt(vty->node), cmd_hostname_get());
+#pragma GCC diagnostic pop
return buf;
}
restart->kills = 0;
{
char cmd[strlen(command) + strlen(restart->name) + 1];
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+ /* user supplied command string has a %s for the daemon name */
snprintf(cmd, sizeof(cmd), command, restart->name);
+#pragma GCC diagnostic pop
if ((restart->pid = run_background(cmd)) > 0) {
thread_add_timer(master, restart_kill, restart,
gs.restart_timeout, &restart->t_kill);
if (!IS_UP(gs.special))
break;
zlog_info("Phased restart: %s is now up.", gs.special->name);
- {
- struct daemon *dmn;
- for (dmn = gs.daemons; dmn; dmn = dmn->next) {
- if (dmn != gs.special)
- run_job(&dmn->restart, "start",
- gs.start_command, 1, 0);
- }
+ for (dmn = gs.daemons; dmn; dmn = dmn->next) {
+ if (dmn != gs.special)
+ run_job(&dmn->restart, "start",
+ gs.start_command, 1, 0);
}
gs.phase = PHASE_NONE;
THREAD_OFF(gs.t_phase_hanging);
* When a FPM server connection becomes a bottleneck, we must keep the
* data plane contexts until we get a chance to process them.
*/
- struct dplane_ctx_q ctxqueue;
+ struct dplane_ctx_list_head ctxqueue;
pthread_mutex_t ctxqueue_mutex;
/* data plane events. */
fnc->socket = -1;
fnc->disabled = true;
fnc->prov = prov;
- TAILQ_INIT(&fnc->ctxqueue);
+ dplane_ctx_q_init(&fnc->ctxqueue);
pthread_mutex_init(&fnc->ctxqueue_mutex, NULL);
/* Set default values. */
const struct zebra_dplane_info *zns;
- struct dplane_ctx_q ctx_list;
+ struct dplane_ctx_list_head ctx_list;
/*
* Pointer to the queue of completed contexts outbound back
* towards the dataplane module.
*/
- struct dplane_ctx_q *ctx_out_q;
+ struct dplane_ctx_list_head *ctx_out_q;
};
int netlink_config_write_helper(struct vty *vty)
bth->msgcnt = 0;
bth->zns = NULL;
- TAILQ_INIT(&(bth->ctx_list));
+ dplane_ctx_q_init(&(bth->ctx_list));
}
-static void nl_batch_init(struct nl_batch *bth, struct dplane_ctx_q *ctx_out_q)
+static void nl_batch_init(struct nl_batch *bth,
+ struct dplane_ctx_list_head *ctx_out_q)
{
/*
* If the size of the buffer has changed, free and then allocate a new
return FRR_NETLINK_ERROR;
}
-void kernel_update_multi(struct dplane_ctx_q *ctx_list)
+void kernel_update_multi(struct dplane_ctx_list_head *ctx_list)
{
struct nl_batch batch;
struct zebra_dplane_ctx *ctx;
- struct dplane_ctx_q handled_list;
+ struct dplane_ctx_list_head handled_list;
enum netlink_msg_status res;
- TAILQ_INIT(&handled_list);
+ dplane_ctx_q_init(&handled_list);
nl_batch_init(&batch, &handled_list);
while (true) {
nl_batch_send(&batch);
- TAILQ_INIT(ctx_list);
+ dplane_ctx_q_init(ctx_list);
dplane_ctx_list_append(ctx_list, &handled_list);
}
return 0;
}
-void kernel_update_multi(struct dplane_ctx_q *ctx_list)
+void kernel_update_multi(struct dplane_ctx_list_head *ctx_list)
{
struct zebra_dplane_ctx *ctx;
- struct dplane_ctx_q handled_list;
+ struct dplane_ctx_list_head handled_list;
enum zebra_dplane_result res = ZEBRA_DPLANE_REQUEST_SUCCESS;
- TAILQ_INIT(&handled_list);
+ dplane_ctx_q_init(&handled_list);
while (true) {
ctx = dplane_ctx_dequeue(ctx_list);
dplane_ctx_enqueue_tail(&handled_list, ctx);
}
- TAILQ_INIT(ctx_list);
+ dplane_ctx_q_init(ctx_list);
dplane_ctx_list_append(ctx_list, &handled_list);
}
/*
* Message batching interface.
*/
-extern void kernel_update_multi(struct dplane_ctx_q *ctx_list);
+extern void kernel_update_multi(struct dplane_ctx_list_head *ctx_list);
/*
* Called by the dplane pthread to read incoming OS messages and dispatch them.
#include "lib/frratomic.h"
#include "lib/frr_pthread.h"
#include "lib/memory.h"
-#include "lib/queue.h"
#include "lib/zebra.h"
#include "zebra/netconf_netlink.h"
#include "zebra/zebra_router.h"
uint32_t flags;
uint32_t status;
- TAILQ_ENTRY(dplane_intf_extra) link;
+ struct dplane_intf_extra_list_item dlink;
};
/*
struct nexthop_group old_backup_ng;
/* Optional list of extra interface info */
- TAILQ_HEAD(dp_intf_extra_q, dplane_intf_extra) intf_extra_q;
+ struct dplane_intf_extra_list_head intf_extra_list;
};
/*
struct zebra_dplane_info zd_ns_info;
/* Embedded list linkage */
- TAILQ_ENTRY(zebra_dplane_ctx) zd_q_entries;
+ struct dplane_ctx_list_item zd_entries;
};
/* Flag that can be set by a pre-kernel provider as a signal that an update
*/
#define DPLANE_CTX_FLAG_NO_KERNEL 0x01
+/* List types declared now that the structs involved are defined. */
+DECLARE_DLIST(dplane_ctx_list, struct zebra_dplane_ctx, zd_entries);
+DECLARE_DLIST(dplane_intf_extra_list, struct dplane_intf_extra, dlink);
+
+/* List for dplane plugins/providers */
+PREDECL_DLIST(dplane_prov_list);
/*
* Registration block for one dataplane provider.
_Atomic uint32_t dp_error_counter;
/* Queue of contexts inbound to the provider */
- struct dplane_ctx_q dp_ctx_in_q;
+ struct dplane_ctx_list_head dp_ctx_in_list;
/* Queue of completed contexts outbound from the provider back
* towards the dataplane module.
*/
- struct dplane_ctx_q dp_ctx_out_q;
+ struct dplane_ctx_list_head dp_ctx_out_list;
/* Embedded list linkage for provider objects */
- TAILQ_ENTRY(zebra_dplane_provider) dp_prov_link;
+ struct dplane_prov_list_item dp_link;
};
+/* Declare list of providers/plugins */
+DECLARE_DLIST(dplane_prov_list, struct zebra_dplane_provider, dp_link);
+
/* Declare types for list of zns info objects */
PREDECL_DLIST(zns_info_list);
pthread_mutex_t dg_mutex;
/* Results callback registered by zebra 'core' */
- int (*dg_results_cb)(struct dplane_ctx_q *ctxlist);
+ int (*dg_results_cb)(struct dplane_ctx_list_head *ctxlist);
/* Sentinel for beginning of shutdown */
volatile bool dg_is_shutdown;
volatile bool dg_run;
/* Update context queue inbound to the dataplane */
- TAILQ_HEAD(zdg_ctx_q, zebra_dplane_ctx) dg_update_ctx_q;
+ struct dplane_ctx_list_head dg_update_list;
/* Ordered list of providers */
- TAILQ_HEAD(zdg_prov_q, zebra_dplane_provider) dg_providers_q;
+ struct dplane_prov_list_head dg_providers;
/* List of info about each zns */
struct zns_info_list_head dg_zns_list;
*/
static void dplane_ctx_free_internal(struct zebra_dplane_ctx *ctx)
{
- struct dplane_intf_extra *if_extra, *if_tmp;
+ struct dplane_intf_extra *if_extra;
/*
* Some internal allocations may need to be freed, depending on
}
/* Optional extra interface info */
- TAILQ_FOREACH_SAFE(if_extra, &ctx->u.rinfo.intf_extra_q,
- link, if_tmp) {
- TAILQ_REMOVE(&ctx->u.rinfo.intf_extra_q, if_extra,
- link);
+ while ((if_extra = dplane_intf_extra_list_pop(
+ &ctx->u.rinfo.intf_extra_list)))
XFREE(MTYPE_DP_INTF, if_extra);
- }
break;
dplane_ctx_free(pctx);
}
+/* Init a list of contexts */
+void dplane_ctx_q_init(struct dplane_ctx_list_head *q)
+{
+ dplane_ctx_list_init(q);
+}
+
/* Enqueue a context block */
-void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
+void dplane_ctx_enqueue_tail(struct dplane_ctx_list_head *list,
const struct zebra_dplane_ctx *ctx)
{
- TAILQ_INSERT_TAIL(q, (struct zebra_dplane_ctx *)ctx, zd_q_entries);
+ dplane_ctx_list_add_tail(list, (struct zebra_dplane_ctx *)ctx);
}
/* Append a list of context blocks to another list */
-void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
- struct dplane_ctx_q *from_list)
+void dplane_ctx_list_append(struct dplane_ctx_list_head *to_list,
+ struct dplane_ctx_list_head *from_list)
{
- if (TAILQ_FIRST(from_list)) {
- TAILQ_CONCAT(to_list, from_list, zd_q_entries);
+ struct zebra_dplane_ctx *ctx;
- /* And clear 'from' list */
- TAILQ_INIT(from_list);
- }
+ while ((ctx = dplane_ctx_list_pop(from_list)) != NULL)
+ dplane_ctx_list_add_tail(to_list, ctx);
}
-struct zebra_dplane_ctx *dplane_ctx_get_head(struct dplane_ctx_q *q)
+struct zebra_dplane_ctx *dplane_ctx_get_head(struct dplane_ctx_list_head *q)
{
- struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
+ struct zebra_dplane_ctx *ctx = dplane_ctx_list_first(q);
return ctx;
}
/* Dequeue a context block from the head of a list */
-struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q)
+struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_list_head *q)
{
- struct zebra_dplane_ctx *ctx = TAILQ_FIRST(q);
-
- if (ctx)
- TAILQ_REMOVE(q, ctx, zd_q_entries);
+ struct zebra_dplane_ctx *ctx = dplane_ctx_list_pop(q);
return ctx;
}
const struct dplane_intf_extra *
dplane_ctx_get_intf_extra(const struct zebra_dplane_ctx *ctx)
{
- return TAILQ_FIRST(&ctx->u.rinfo.intf_extra_q);
+ return dplane_intf_extra_list_const_first(
+ &ctx->u.rinfo.intf_extra_list);
}
const struct dplane_intf_extra *
dplane_ctx_intf_extra_next(const struct zebra_dplane_ctx *ctx,
const struct dplane_intf_extra *ptr)
{
- return TAILQ_NEXT(ptr, link);
+ return dplane_intf_extra_list_const_next(&ctx->u.rinfo.intf_extra_list,
+ ptr);
}
vrf_id_t dplane_intf_extra_get_vrfid(const struct dplane_intf_extra *ptr)
if (!ctx || !re)
return ret;
- TAILQ_INIT(&ctx->u.rinfo.intf_extra_q);
+ dplane_intf_extra_list_init(&ctx->u.rinfo.intf_extra_list);
ctx->zd_op = op;
ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
if_extra->flags = ifp->flags;
if_extra->status = ifp->status;
- TAILQ_INSERT_TAIL(&ctx->u.rinfo.intf_extra_q,
- if_extra, link);
+ dplane_intf_extra_list_add_tail(
+ &ctx->u.rinfo.intf_extra_list,
+ if_extra);
}
}
/* Enqueue for processing by the dataplane pthread */
DPLANE_LOCK();
{
- TAILQ_INSERT_TAIL(&zdplane_info.dg_update_ctx_q, ctx,
- zd_q_entries);
+ dplane_ctx_list_add_tail(&zdplane_info.dg_update_list, ctx);
}
DPLANE_UNLOCK();
vty_out(vty, "Zebra dataplane providers:\n");
DPLANE_LOCK();
- prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
+ prov = dplane_prov_list_first(&zdplane_info.dg_providers);
DPLANE_UNLOCK();
/* Show counters, useful info from each registered provider */
prov->dp_name, prov->dp_id, in, in_q, in_max,
out, out_q, out_max);
- DPLANE_LOCK();
- prov = TAILQ_NEXT(prov, dp_prov_link);
- DPLANE_UNLOCK();
+ prov = dplane_prov_list_next(&zdplane_info.dg_providers, prov);
}
return CMD_SUCCESS;
p = XCALLOC(MTYPE_DP_PROV, sizeof(struct zebra_dplane_provider));
pthread_mutex_init(&(p->dp_mutex), NULL);
- TAILQ_INIT(&(p->dp_ctx_in_q));
- TAILQ_INIT(&(p->dp_ctx_out_q));
+ dplane_ctx_list_init(&p->dp_ctx_in_list);
+ dplane_ctx_list_init(&p->dp_ctx_out_list);
p->dp_flags = flags;
p->dp_priority = prio;
"provider-%u", p->dp_id);
/* Insert into list ordered by priority */
- TAILQ_FOREACH(last, &zdplane_info.dg_providers_q, dp_prov_link) {
+ frr_each (dplane_prov_list, &zdplane_info.dg_providers, last) {
if (last->dp_priority > p->dp_priority)
break;
}
if (last)
- TAILQ_INSERT_BEFORE(last, p, dp_prov_link);
+ dplane_prov_list_add_after(&zdplane_info.dg_providers, last, p);
else
- TAILQ_INSERT_TAIL(&zdplane_info.dg_providers_q, p,
- dp_prov_link);
+ dplane_prov_list_add_tail(&zdplane_info.dg_providers, p);
/* And unlock */
DPLANE_UNLOCK();
dplane_provider_lock(prov);
- ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
+ ctx = dplane_ctx_list_pop(&(prov->dp_ctx_in_list));
if (ctx) {
- TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
-
atomic_fetch_sub_explicit(&prov->dp_in_queued, 1,
memory_order_relaxed);
}
* Dequeue work to a list, return count
*/
int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
- struct dplane_ctx_q *listp)
+ struct dplane_ctx_list_head *listp)
{
int limit, ret;
struct zebra_dplane_ctx *ctx;
dplane_provider_lock(prov);
for (ret = 0; ret < limit; ret++) {
- ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
- if (ctx) {
- TAILQ_REMOVE(&(prov->dp_ctx_in_q), ctx, zd_q_entries);
-
- TAILQ_INSERT_TAIL(listp, ctx, zd_q_entries);
- } else {
+ ctx = dplane_ctx_list_pop(&(prov->dp_ctx_in_list));
+ if (ctx)
+ dplane_ctx_list_add_tail(listp, ctx);
+ else
break;
- }
}
if (ret > 0)
dplane_provider_lock(prov);
- TAILQ_INSERT_TAIL(&(prov->dp_ctx_out_q), ctx,
- zd_q_entries);
+ dplane_ctx_list_add_tail(&(prov->dp_ctx_out_list), ctx);
/* Maintain out-queue counters */
atomic_fetch_add_explicit(&(prov->dp_out_queued), 1,
*/
void dplane_provider_enqueue_to_zebra(struct zebra_dplane_ctx *ctx)
{
- struct dplane_ctx_q temp_list;
+ struct dplane_ctx_list_head temp_list;
/* Zebra's api takes a list, so we need to use a temporary list */
- TAILQ_INIT(&temp_list);
+ dplane_ctx_list_init(&temp_list);
- TAILQ_INSERT_TAIL(&temp_list, ctx, zd_q_entries);
+ dplane_ctx_list_add_tail(&temp_list, ctx);
(zdplane_info.dg_results_cb)(&temp_list);
}
*/
static int kernel_dplane_process_func(struct zebra_dplane_provider *prov)
{
- struct zebra_dplane_ctx *ctx, *tctx;
- struct dplane_ctx_q work_list;
+ struct zebra_dplane_ctx *ctx;
+ struct dplane_ctx_list_head work_list;
int counter, limit;
- TAILQ_INIT(&work_list);
+ dplane_ctx_list_init(&work_list);
limit = dplane_provider_get_work_limit(prov);
== DPLANE_OP_IPSET_ENTRY_DELETE))
kernel_dplane_process_ipset_entry(prov, ctx);
else
- TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
+ dplane_ctx_list_add_tail(&work_list, ctx);
}
kernel_update_multi(&work_list);
- TAILQ_FOREACH_SAFE (ctx, &work_list, zd_q_entries, tctx) {
+ while ((ctx = dplane_ctx_list_pop(&work_list)) != NULL) {
kernel_dplane_handle_result(ctx);
- TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
dplane_provider_enqueue_out_ctx(prov, ctx);
}
int dplane_clean_ctx_queue(bool (*context_cb)(struct zebra_dplane_ctx *ctx,
void *arg), void *val)
{
- struct zebra_dplane_ctx *ctx, *temp;
- struct dplane_ctx_q work_list;
+ struct zebra_dplane_ctx *ctx;
+ struct dplane_ctx_list_head work_list;
- TAILQ_INIT(&work_list);
+ dplane_ctx_list_init(&work_list);
if (context_cb == NULL)
return AOK;
/* Walk the pending context queue under the dplane lock. */
DPLANE_LOCK();
- TAILQ_FOREACH_SAFE(ctx, &zdplane_info.dg_update_ctx_q, zd_q_entries,
- temp) {
+ frr_each_safe (dplane_ctx_list, &zdplane_info.dg_update_list, ctx) {
if (context_cb(ctx, val)) {
- TAILQ_REMOVE(&zdplane_info.dg_update_ctx_q, ctx,
- zd_q_entries);
- TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
+ dplane_ctx_list_del(&zdplane_info.dg_update_list, ctx);
+ dplane_ctx_list_add_tail(&work_list, ctx);
}
}
/* Now free any contexts selected by the caller, without holding
* the lock.
*/
- TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, temp) {
- TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
+ while ((ctx = dplane_ctx_list_pop(&work_list)) != NULL)
dplane_ctx_fini(&ctx);
- }
return AOK;
}
zdplane_info.dg_is_shutdown = true;
/* Notify provider(s) of pending shutdown. */
- TAILQ_FOREACH(prov, &zdplane_info.dg_providers_q, dp_prov_link) {
+ frr_each (dplane_prov_list, &zdplane_info.dg_providers, prov) {
if (prov->dp_fini == NULL)
continue;
*/
DPLANE_LOCK();
{
- ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
- prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
+ ctx = dplane_ctx_list_first(&zdplane_info.dg_update_list);
+ prov = dplane_prov_list_first(&zdplane_info.dg_providers);
}
DPLANE_UNLOCK();
dplane_provider_lock(prov);
- ctx = TAILQ_FIRST(&(prov->dp_ctx_in_q));
+ ctx = dplane_ctx_list_first(&(prov->dp_ctx_in_list));
if (ctx == NULL)
- ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
+ ctx = dplane_ctx_list_first(&(prov->dp_ctx_out_list));
dplane_provider_unlock(prov);
if (ctx != NULL)
break;
- DPLANE_LOCK();
- prov = TAILQ_NEXT(prov, dp_prov_link);
- DPLANE_UNLOCK();
+ prov = dplane_prov_list_next(&zdplane_info.dg_providers, prov);
}
if (ctx != NULL)
*/
static void dplane_thread_loop(struct thread *event)
{
- struct dplane_ctx_q work_list;
- struct dplane_ctx_q error_list;
+ struct dplane_ctx_list_head work_list;
+ struct dplane_ctx_list_head error_list;
struct zebra_dplane_provider *prov;
- struct zebra_dplane_ctx *ctx, *tctx;
+ struct zebra_dplane_ctx *ctx;
int limit, counter, error_counter;
uint64_t curr, high;
bool reschedule = false;
limit = zdplane_info.dg_updates_per_cycle;
/* Init temporary lists used to move contexts among providers */
- TAILQ_INIT(&work_list);
- TAILQ_INIT(&error_list);
+ dplane_ctx_list_init(&work_list);
+ dplane_ctx_list_init(&error_list);
+
error_counter = 0;
/* Check for zebra shutdown */
DPLANE_LOCK();
/* Locate initial registered provider */
- prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
+ prov = dplane_prov_list_first(&zdplane_info.dg_providers);
/* Move new work from incoming list to temp list */
for (counter = 0; counter < limit; counter++) {
- ctx = TAILQ_FIRST(&zdplane_info.dg_update_ctx_q);
+ ctx = dplane_ctx_list_pop(&zdplane_info.dg_update_list);
if (ctx) {
- TAILQ_REMOVE(&zdplane_info.dg_update_ctx_q, ctx,
- zd_q_entries);
-
ctx->zd_provider = prov->dp_id;
- TAILQ_INSERT_TAIL(&work_list, ctx, zd_q_entries);
+ dplane_ctx_list_add_tail(&work_list, ctx);
} else {
break;
}
/* Capture current provider id in each context; check for
* error status.
*/
- TAILQ_FOREACH_SAFE(ctx, &work_list, zd_q_entries, tctx) {
+ frr_each_safe (dplane_ctx_list, &work_list, ctx) {
if (dplane_ctx_get_status(ctx) ==
ZEBRA_DPLANE_REQUEST_SUCCESS) {
ctx->zd_provider = prov->dp_id;
/* Move to error list; will be returned
* zebra main.
*/
- TAILQ_REMOVE(&work_list, ctx, zd_q_entries);
- TAILQ_INSERT_TAIL(&error_list,
- ctx, zd_q_entries);
+ dplane_ctx_list_del(&work_list, ctx);
+ dplane_ctx_list_add_tail(&error_list, ctx);
error_counter++;
}
}
/* Enqueue new work to the provider */
dplane_provider_lock(prov);
- if (TAILQ_FIRST(&work_list))
- TAILQ_CONCAT(&(prov->dp_ctx_in_q), &work_list,
- zd_q_entries);
+ while ((ctx = dplane_ctx_list_pop(&work_list)) != NULL)
+ dplane_ctx_list_add_tail(&(prov->dp_ctx_in_list), ctx);
atomic_fetch_add_explicit(&prov->dp_in_counter, counter,
memory_order_relaxed);
/* Reset the temp list (though the 'concat' may have done this
* already), and the counter
*/
- TAILQ_INIT(&work_list);
+ dplane_ctx_list_init(&work_list);
counter = 0;
/* Call into the provider code. Note that this is
dplane_provider_lock(prov);
while (counter < limit) {
- ctx = TAILQ_FIRST(&(prov->dp_ctx_out_q));
+ ctx = dplane_ctx_list_pop(&(prov->dp_ctx_out_list));
if (ctx) {
- TAILQ_REMOVE(&(prov->dp_ctx_out_q), ctx,
- zd_q_entries);
-
- TAILQ_INSERT_TAIL(&work_list,
- ctx, zd_q_entries);
+ dplane_ctx_list_add_tail(&work_list, ctx);
counter++;
} else
break;
counter, dplane_provider_get_name(prov));
/* Locate next provider */
- DPLANE_LOCK();
- prov = TAILQ_NEXT(prov, dp_prov_link);
- DPLANE_UNLOCK();
+ prov = dplane_prov_list_next(&zdplane_info.dg_providers, prov);
}
/*
/* Call through to zebra main */
(zdplane_info.dg_results_cb)(&error_list);
- TAILQ_INIT(&error_list);
+ dplane_ctx_list_init(&error_list);
/* Call through to zebra main */
(zdplane_info.dg_results_cb)(&work_list);
- TAILQ_INIT(&work_list);
+ dplane_ctx_list_init(&work_list);
}
/*
* Note that this call is in the main pthread, so providers must
* be prepared for that.
*/
- TAILQ_FOREACH(dp, &zdplane_info.dg_providers_q, dp_prov_link) {
+ frr_each (dplane_prov_list, &zdplane_info.dg_providers, dp) {
if (dp->dp_fini == NULL)
continue;
pthread_mutex_init(&zdplane_info.dg_mutex, NULL);
- TAILQ_INIT(&zdplane_info.dg_update_ctx_q);
- TAILQ_INIT(&zdplane_info.dg_providers_q);
+ dplane_prov_list_init(&zdplane_info.dg_providers);
+
+ dplane_ctx_list_init(&zdplane_info.dg_update_list);
zns_info_list_init(&zdplane_info.dg_zns_list);
zdplane_info.dg_updates_per_cycle = DPLANE_DEFAULT_NEW_WORK;
/* Call start callbacks for registered providers */
DPLANE_LOCK();
- prov = TAILQ_FIRST(&zdplane_info.dg_providers_q);
+ prov = dplane_prov_list_first(&zdplane_info.dg_providers);
DPLANE_UNLOCK();
while (prov) {
(prov->dp_start)(prov);
/* Locate next provider */
- DPLANE_LOCK();
- prov = TAILQ_NEXT(prov, dp_prov_link);
- DPLANE_UNLOCK();
+ prov = dplane_prov_list_next(&zdplane_info.dg_providers, prov);
}
frr_pthread_run(zdplane_info.dg_pthread, NULL);
/*
* Initialize the dataplane module at startup; called by zebra rib_init()
*/
-void zebra_dplane_init(int (*results_fp)(struct dplane_ctx_q *))
+void zebra_dplane_init(int (*results_fp)(struct dplane_ctx_list_head *))
{
zebra_dplane_init_internal();
zdplane_info.dg_results_cb = results_fp;
#include "lib/prefix.h"
#include "lib/nexthop.h"
#include "lib/nexthop_group.h"
-#include "lib/queue.h"
#include "lib/vlan.h"
#include "zebra/zebra_ns.h"
#include "zebra/rib.h"
* they cannot share existing global data structures safely.
*/
-/* Define a tailq list type for context blocks. The list is exposed/public,
+/* Define a list type for context blocks. The list is exposed/public,
* but the internal linkage in the context struct is private, so there
* are accessor apis that support enqueue and dequeue.
*/
-TAILQ_HEAD(dplane_ctx_q, zebra_dplane_ctx);
+
+PREDECL_DLIST(dplane_ctx_list);
/* Declare a type for (optional) extended interface info objects. */
-TAILQ_HEAD(dplane_intf_extra_q, dplane_intf_extra);
+PREDECL_DLIST(dplane_intf_extra_list);
/* Allocate a context object */
struct zebra_dplane_ctx *dplane_ctx_alloc(void);
/* Enqueue a context block to caller's tailq. This exists so that the
* context struct can remain opaque.
*/
-void dplane_ctx_enqueue_tail(struct dplane_ctx_q *q,
+void dplane_ctx_enqueue_tail(struct dplane_ctx_list_head *q,
const struct zebra_dplane_ctx *ctx);
/* Append a list of context blocks to another list - again, just keeping
* the context struct opaque.
*/
-void dplane_ctx_list_append(struct dplane_ctx_q *to_list,
- struct dplane_ctx_q *from_list);
+void dplane_ctx_list_append(struct dplane_ctx_list_head *to_list,
+ struct dplane_ctx_list_head *from_list);
/* Dequeue a context block from the head of caller's tailq */
-struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_q *q);
-struct zebra_dplane_ctx *dplane_ctx_get_head(struct dplane_ctx_q *q);
+struct zebra_dplane_ctx *dplane_ctx_dequeue(struct dplane_ctx_list_head *q);
+struct zebra_dplane_ctx *dplane_ctx_get_head(struct dplane_ctx_list_head *q);
+
+/* Init a list of contexts */
+void dplane_ctx_q_init(struct dplane_ctx_list_head *q);
/*
* Accessors for information from the context object
/* Dequeue work to a list, maintain counter and locking, return count */
int dplane_provider_dequeue_in_list(struct zebra_dplane_provider *prov,
- struct dplane_ctx_q *listp);
+ struct dplane_ctx_list_head *listp);
/* Current completed work queue length */
uint32_t dplane_provider_out_ctx_queue_len(struct zebra_dplane_provider *prov);
* so the expectation is that the contexts are queued for the zebra
* main pthread.
*/
-void zebra_dplane_init(int (*) (struct dplane_ctx_q *));
+void zebra_dplane_init(int (*)(struct dplane_ctx_list_head *));
/*
* Start the dataplane pthread. This step needs to be run later than the
/* Copy labels of the resolved route and the parent resolving to it */
if (policy) {
- int i = 0;
+ int label_num = 0;
/*
* Don't push the first SID if the corresponding action in the
*/
if (!newhop->nh_label || !newhop->nh_label->num_labels
|| newhop->nh_label->label[0] == MPLS_LABEL_IMPLICIT_NULL)
- i = 1;
+ label_num = 1;
- for (; i < policy->segment_list.label_num; i++)
- labels[num_labels++] = policy->segment_list.labels[i];
+ for (; label_num < policy->segment_list.label_num; label_num++)
+ labels[num_labels++] =
+ policy->segment_list.labels[label_num];
label_type = policy->segment_list.type;
} else if (newhop->nh_label) {
for (i = 0; i < newhop->nh_label->num_labels; i++) {
*/
static pthread_mutex_t dplane_mutex;
static struct thread *t_dplane;
-static struct dplane_ctx_q rib_dplane_q;
+static struct dplane_ctx_list_head rib_dplane_q;
DEFINE_HOOK(rib_update, (struct route_node * rn, const char *reason),
(rn, reason));
ctxnhg->nexthop != NULL ? "" : " (empty)");
/* Set the flag about the dedicated fib list */
- SET_FLAG(re->status, ROUTE_ENTRY_USE_FIB_NHG);
- if (ctxnhg->nexthop)
- copy_nexthops(&(re->fib_ng.nexthop), ctxnhg->nexthop, NULL);
+ if (zrouter.asic_notification_nexthop_control) {
+ SET_FLAG(re->status, ROUTE_ENTRY_USE_FIB_NHG);
+ if (ctxnhg->nexthop)
+ copy_nexthops(&(re->fib_ng.nexthop), ctxnhg->nexthop,
+ NULL);
+ }
check_backups:
}
}
-static void rib_update_handle_vrf(vrf_id_t vrf_id, enum rib_update_event event,
- int rtype)
-{
- struct route_table *table;
-
- if (IS_ZEBRA_DEBUG_EVENT)
- zlog_debug("%s: Handling VRF %s event %s", __func__,
- vrf_id_to_name(vrf_id), rib_update_event2str(event));
-
- /* Process routes of interested address-families. */
- table = zebra_vrf_table(AFI_IP, SAFI_UNICAST, vrf_id);
- if (table)
- rib_update_table(table, event, rtype);
-
- table = zebra_vrf_table(AFI_IP6, SAFI_UNICAST, vrf_id);
- if (table)
- rib_update_table(table, event, rtype);
-}
-
static void rib_update_handle_vrf_all(enum rib_update_event event, int rtype)
{
struct zebra_router_table *zrt;
struct rib_update_ctx {
enum rib_update_event event;
- bool vrf_all;
vrf_id_t vrf_id;
};
ctx = THREAD_ARG(thread);
- if (ctx->vrf_all)
- rib_update_handle_vrf_all(ctx->event, ZEBRA_ROUTE_ALL);
- else
- rib_update_handle_vrf(ctx->vrf_id, ctx->event, ZEBRA_ROUTE_ALL);
+ rib_update_handle_vrf_all(ctx->event, ZEBRA_ROUTE_ALL);
rib_update_ctx_fini(&ctx);
}
return;
ctx = rib_update_ctx_init(0, event);
- ctx->vrf_all = true;
thread_add_event(zrouter.master, rib_update_handler, ctx, 0,
&t_rib_update_threads[event]);
static void rib_process_dplane_results(struct thread *thread)
{
struct zebra_dplane_ctx *ctx;
- struct dplane_ctx_q ctxlist;
+ struct dplane_ctx_list_head ctxlist;
bool shut_p = false;
/* Dequeue a list of completed updates with one lock/unlock cycle */
do {
- TAILQ_INIT(&ctxlist);
+ dplane_ctx_q_init(&ctxlist);
/* Take lock controlling queue of results */
frr_with_mutex (&dplane_mutex) {
* the dataplane pthread. We enqueue the results here for processing by
* the main thread later.
*/
-static int rib_dplane_results(struct dplane_ctx_q *ctxlist)
+static int rib_dplane_results(struct dplane_ctx_list_head *ctxlist)
{
/* Take lock controlling queue of results */
frr_with_mutex (&dplane_mutex) {
/* Init dataplane, and register for results */
pthread_mutex_init(&dplane_mutex, NULL);
- TAILQ_INIT(&rib_dplane_q);
+ dplane_ctx_q_init(&rib_dplane_q);
zebra_dplane_init(rib_dplane_results);
}
return -1;
}
-static void print_nh(struct nexthop *nexthop, struct vty *vty,
- json_object *json)
+
+/*
+ * Render a nexthop into a json object; the caller allocates and owns
+ * the json object memory.
+ */
+void show_nexthop_json_helper(json_object *json_nexthop,
+ const struct nexthop *nexthop,
+ const struct route_entry *re)
{
- struct zebra_ns *zns = zebra_ns_lookup(nexthop->vrf_id);
+ json_object *json_labels = NULL;
+ json_object *json_backups = NULL;
+ json_object *json_seg6local = NULL;
+ json_object *json_seg6 = NULL;
+ int i;
+
+ json_object_int_add(json_nexthop, "flags", nexthop->flags);
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE))
+ json_object_boolean_true_add(json_nexthop, "duplicate");
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB))
+ json_object_boolean_true_add(json_nexthop, "fib");
switch (nexthop->type) {
case NEXTHOP_TYPE_IPV4:
case NEXTHOP_TYPE_IPV4_IFINDEX:
- if (json) {
- json_object_string_addf(json, "ip", "%pI4",
- &nexthop->gate.ipv4);
- if (nexthop->ifindex)
- json_object_string_add(
- json, "interface",
- ifindex2ifname_per_ns(
- zns, nexthop->ifindex));
- } else {
- vty_out(vty, " via %pI4", &nexthop->gate.ipv4);
- if (nexthop->ifindex)
- vty_out(vty, ", %s",
- ifindex2ifname_per_ns(
- zns, nexthop->ifindex));
+ json_object_string_addf(json_nexthop, "ip", "%pI4",
+ &nexthop->gate.ipv4);
+ json_object_string_add(json_nexthop, "afi", "ipv4");
+
+ if (nexthop->ifindex) {
+ json_object_int_add(json_nexthop, "interfaceIndex",
+ nexthop->ifindex);
+ json_object_string_add(json_nexthop, "interfaceName",
+ ifindex2ifname(nexthop->ifindex,
+ nexthop->vrf_id));
}
break;
case NEXTHOP_TYPE_IPV6:
case NEXTHOP_TYPE_IPV6_IFINDEX:
- if (json) {
- json_object_string_addf(json, "ip", "%pI6",
- &nexthop->gate.ipv6);
- if (nexthop->ifindex)
- json_object_string_add(
- json, "interface",
- ifindex2ifname_per_ns(
- zns, nexthop->ifindex));
- } else {
- vty_out(vty, " %pI6", &nexthop->gate.ipv6);
- if (nexthop->ifindex)
- vty_out(vty, ", via %s",
- ifindex2ifname_per_ns(
- zns, nexthop->ifindex));
+ json_object_string_addf(json_nexthop, "ip", "%pI6",
+ &nexthop->gate.ipv6);
+ json_object_string_add(json_nexthop, "afi", "ipv6");
+
+ if (nexthop->ifindex) {
+ json_object_int_add(json_nexthop, "interfaceIndex",
+ nexthop->ifindex);
+ json_object_string_add(json_nexthop, "interfaceName",
+ ifindex2ifname(nexthop->ifindex,
+ nexthop->vrf_id));
}
break;
+
case NEXTHOP_TYPE_IFINDEX:
- if (json) {
- json_object_string_add(
- json, "interface",
- ifindex2ifname_per_ns(zns, nexthop->ifindex));
- json_object_boolean_true_add(json, "directlyConnected");
- } else {
- vty_out(vty, " is directly connected, %s",
- ifindex2ifname_per_ns(zns, nexthop->ifindex));
+ json_object_boolean_true_add(json_nexthop, "directlyConnected");
+ json_object_int_add(json_nexthop, "interfaceIndex",
+ nexthop->ifindex);
+ json_object_string_add(
+ json_nexthop, "interfaceName",
+ ifindex2ifname(nexthop->ifindex, nexthop->vrf_id));
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ json_object_boolean_true_add(json_nexthop, "unreachable");
+ switch (nexthop->bh_type) {
+ case BLACKHOLE_REJECT:
+ json_object_boolean_true_add(json_nexthop, "reject");
+ break;
+ case BLACKHOLE_ADMINPROHIB:
+ json_object_boolean_true_add(json_nexthop,
+ "adminProhibited");
+ break;
+ case BLACKHOLE_NULL:
+ json_object_boolean_true_add(json_nexthop, "blackhole");
+ break;
+ case BLACKHOLE_UNSPEC:
+ break;
+ }
+ break;
+ }
+
+ /* This nexthop is a resolver for the parent nexthop.
+ * Set resolver flag for better clarity and delimiter
+ * in flat list of nexthops in json.
+ */
+ if (nexthop->rparent)
+ json_object_boolean_true_add(json_nexthop, "resolver");
+
+ if ((re == NULL || (nexthop->vrf_id != re->vrf_id)))
+ json_object_string_add(json_nexthop, "vrf",
+ vrf_id_to_name(nexthop->vrf_id));
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE))
+ json_object_boolean_true_add(json_nexthop, "duplicate");
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
+ json_object_boolean_true_add(json_nexthop, "active");
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
+ json_object_boolean_true_add(json_nexthop, "onLink");
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_LINKDOWN))
+ json_object_boolean_true_add(json_nexthop, "linkDown");
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
+ json_object_boolean_true_add(json_nexthop, "recursive");
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) {
+ json_backups = json_object_new_array();
+ for (i = 0; i < nexthop->backup_num; i++) {
+ json_object_array_add(
+ json_backups,
+ json_object_new_int(nexthop->backup_idx[i]));
}
+
+ json_object_object_add(json_nexthop, "backupIndex",
+ json_backups);
+ }
+
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ if (nexthop->src.ipv4.s_addr)
+ json_object_string_addf(json_nexthop, "source", "%pI4",
+ &nexthop->src.ipv4);
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, &in6addr_any))
+ json_object_string_addf(json_nexthop, "source", "%pI6",
+ &nexthop->src.ipv6);
+ break;
+ default:
+ break;
+ }
+
+ if (nexthop->nh_label && nexthop->nh_label->num_labels) {
+ json_labels = json_object_new_array();
+
+ for (int label_index = 0;
+ label_index < nexthop->nh_label->num_labels; label_index++)
+ json_object_array_add(
+ json_labels,
+ json_object_new_int(
+ nexthop->nh_label->label[label_index]));
+
+ json_object_object_add(json_nexthop, "labels", json_labels);
+ }
+
+ if (nexthop->weight)
+ json_object_int_add(json_nexthop, "weight", nexthop->weight);
+
+ if (nexthop->srte_color)
+ json_object_int_add(json_nexthop, "srteColor",
+ nexthop->srte_color);
+
+ if (nexthop->nh_srv6) {
+ json_seg6local = json_object_new_object();
+ json_object_string_add(
+ json_seg6local, "action",
+ seg6local_action2str(
+ nexthop->nh_srv6->seg6local_action));
+ json_object_object_add(json_nexthop, "seg6local",
+ json_seg6local);
+
+ json_seg6 = json_object_new_object();
+ json_object_string_addf(json_seg6, "segs", "%pI6",
+ &nexthop->nh_srv6->seg6_segs);
+ json_object_object_add(json_nexthop, "seg6", json_seg6);
+ }
+}
+
+/*
+ * Helper for nexthop output, used in the 'show ip route' path
+ */
+void show_route_nexthop_helper(struct vty *vty, const struct route_entry *re,
+ const struct nexthop *nexthop)
+{
+ char buf[MPLS_LABEL_STRLEN];
+ int i;
+
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ vty_out(vty, " via %pI4", &nexthop->gate.ipv4);
+ if (nexthop->ifindex)
+ vty_out(vty, ", %s",
+ ifindex2ifname(nexthop->ifindex,
+ nexthop->vrf_id));
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ vty_out(vty, " via %s",
+ inet_ntop(AF_INET6, &nexthop->gate.ipv6, buf,
+ sizeof(buf)));
+ if (nexthop->ifindex)
+ vty_out(vty, ", %s",
+ ifindex2ifname(nexthop->ifindex,
+ nexthop->vrf_id));
+ break;
+
+ case NEXTHOP_TYPE_IFINDEX:
+ vty_out(vty, " is directly connected, %s",
+ ifindex2ifname(nexthop->ifindex, nexthop->vrf_id));
break;
case NEXTHOP_TYPE_BLACKHOLE:
- if (json) {
- json_object_string_add(json, "interface", "Null0");
- json_object_boolean_true_add(json, "directlyConnected");
- } else {
- vty_out(vty, " is directly connected, Null0");
+ vty_out(vty, " unreachable");
+ switch (nexthop->bh_type) {
+ case BLACKHOLE_REJECT:
+ vty_out(vty, " (ICMP unreachable)");
+ break;
+ case BLACKHOLE_ADMINPROHIB:
+ vty_out(vty, " (ICMP admin-prohibited)");
+ break;
+ case BLACKHOLE_NULL:
+ vty_out(vty, " (blackhole)");
+ break;
+ case BLACKHOLE_UNSPEC:
+ break;
}
break;
+ }
+
+ if ((re == NULL || (nexthop->vrf_id != re->vrf_id)))
+ vty_out(vty, " (vrf %s)", vrf_id_to_name(nexthop->vrf_id));
+
+ if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
+ vty_out(vty, " inactive");
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
+ vty_out(vty, " onlink");
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_LINKDOWN))
+ vty_out(vty, " linkdown");
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
+ vty_out(vty, " (recursive)");
+
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ if (nexthop->src.ipv4.s_addr) {
+ vty_out(vty, ", src %pI4", &nexthop->src.ipv4);
+ /* SR-TE information */
+ if (nexthop->srte_color)
+ vty_out(vty, ", SR-TE color %u",
+ nexthop->srte_color);
+ }
+ break;
+ case NEXTHOP_TYPE_IPV6:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, &in6addr_any))
+ vty_out(vty, ", src %pI6", &nexthop->src.ipv6);
+ break;
default:
break;
}
- if (!json)
- vty_out(vty, "\n");
+ /* Label information */
+ if (nexthop->nh_label && nexthop->nh_label->num_labels) {
+ vty_out(vty, ", label %s",
+ mpls_label2str(nexthop->nh_label->num_labels,
+ nexthop->nh_label->label, buf,
+ sizeof(buf), 1));
+ }
+
+ if (nexthop->nh_srv6) {
+ seg6local_context2str(buf, sizeof(buf),
+ &nexthop->nh_srv6->seg6local_ctx,
+ nexthop->nh_srv6->seg6local_action);
+ vty_out(vty, ", seg6local %s %s",
+ seg6local_action2str(
+ nexthop->nh_srv6->seg6local_action),
+ buf);
+ vty_out(vty, ", seg6 %pI6", &nexthop->nh_srv6->seg6_segs);
+ }
+
+ if (nexthop->weight)
+ vty_out(vty, ", weight %u", nexthop->weight);
+
+ if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) {
+ vty_out(vty, ", backup %d", nexthop->backup_idx[0]);
+
+ for (i = 1; i < nexthop->backup_num; i++)
+ vty_out(vty, ",%d", nexthop->backup_idx[i]);
+ }
}
static void print_rnh(struct route_node *rn, struct vty *vty, json_object *json)
CHECK_FLAG(rnh->flags, ZEBRA_NHT_CONNECTED));
json_object_object_add(json_nht, "clientList",
json_client_array);
- json_object_object_add(json_nht, "gates", json_nexthop_array);
+ json_object_object_add(json_nht, "nexthops",
+ json_nexthop_array);
} else {
vty_out(vty, "%s%s\n",
inet_ntop(rn->p.family, &rn->p.u.prefix, buf, BUFSIZ),
json_nexthop = json_object_new_object();
json_object_array_add(json_nexthop_array,
json_nexthop);
+ show_nexthop_json_helper(json_nexthop, nexthop,
+ NULL);
+ } else {
+ show_route_nexthop_helper(vty, NULL, nexthop);
+ vty_out(vty, "\n");
}
- print_nh(nexthop, vty, json_nexthop);
}
} else {
if (json)
void rnh_set_hide_backups(bool hide_p);
bool rnh_get_hide_backups(void);
+void show_nexthop_json_helper(json_object *json_nexthop,
+ const struct nexthop *nexthop,
+ const struct route_entry *re);
+void show_route_nexthop_helper(struct vty *vty, const struct route_entry *re,
+ const struct nexthop *nexthop);
+
#ifdef __cplusplus
}
#endif
}
}
-/*
- * Helper for nexthop output, used in the 'show ip route' path
- */
-static void show_route_nexthop_helper(struct vty *vty,
- const struct route_entry *re,
- const struct nexthop *nexthop)
-{
- char buf[MPLS_LABEL_STRLEN];
- int i;
-
- switch (nexthop->type) {
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- vty_out(vty, " via %pI4", &nexthop->gate.ipv4);
- if (nexthop->ifindex)
- vty_out(vty, ", %s",
- ifindex2ifname(nexthop->ifindex,
- nexthop->vrf_id));
- break;
- case NEXTHOP_TYPE_IPV6:
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- vty_out(vty, " via %s",
- inet_ntop(AF_INET6, &nexthop->gate.ipv6, buf,
- sizeof(buf)));
- if (nexthop->ifindex)
- vty_out(vty, ", %s",
- ifindex2ifname(nexthop->ifindex,
- nexthop->vrf_id));
- break;
-
- case NEXTHOP_TYPE_IFINDEX:
- vty_out(vty, " is directly connected, %s",
- ifindex2ifname(nexthop->ifindex, nexthop->vrf_id));
- break;
- case NEXTHOP_TYPE_BLACKHOLE:
- vty_out(vty, " unreachable");
- switch (nexthop->bh_type) {
- case BLACKHOLE_REJECT:
- vty_out(vty, " (ICMP unreachable)");
- break;
- case BLACKHOLE_ADMINPROHIB:
- vty_out(vty, " (ICMP admin-prohibited)");
- break;
- case BLACKHOLE_NULL:
- vty_out(vty, " (blackhole)");
- break;
- case BLACKHOLE_UNSPEC:
- break;
- }
- break;
- }
-
- if ((re == NULL || (nexthop->vrf_id != re->vrf_id)))
- vty_out(vty, " (vrf %s)", vrf_id_to_name(nexthop->vrf_id));
-
- if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
- vty_out(vty, " inactive");
-
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
- vty_out(vty, " onlink");
-
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_LINKDOWN))
- vty_out(vty, " linkdown");
-
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
- vty_out(vty, " (recursive)");
-
- switch (nexthop->type) {
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- if (nexthop->src.ipv4.s_addr) {
- if (inet_ntop(AF_INET, &nexthop->src.ipv4, buf,
- sizeof(buf)))
- vty_out(vty, ", src %s", buf);
- /* SR-TE information */
- if (nexthop->srte_color)
- vty_out(vty, ", SR-TE color %u",
- nexthop->srte_color);
- }
- break;
- case NEXTHOP_TYPE_IPV6:
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- if (!IPV6_ADDR_SAME(&nexthop->src.ipv6, &in6addr_any)) {
- if (inet_ntop(AF_INET6, &nexthop->src.ipv6, buf,
- sizeof(buf)))
- vty_out(vty, ", src %s", buf);
- }
- break;
- default:
- break;
- }
-
- /* Label information */
- if (nexthop->nh_label && nexthop->nh_label->num_labels) {
- vty_out(vty, ", label %s",
- mpls_label2str(nexthop->nh_label->num_labels,
- nexthop->nh_label->label, buf,
- sizeof(buf), 1));
- }
-
- if (nexthop->nh_srv6) {
- seg6local_context2str(buf, sizeof(buf),
- &nexthop->nh_srv6->seg6local_ctx,
- nexthop->nh_srv6->seg6local_action);
- vty_out(vty, ", seg6local %s %s",
- seg6local_action2str(
- nexthop->nh_srv6->seg6local_action),
- buf);
-
- inet_ntop(AF_INET6, &nexthop->nh_srv6->seg6_segs, buf,
- sizeof(buf));
- vty_out(vty, ", seg6 %s", buf);
- }
-
- if (nexthop->weight)
- vty_out(vty, ", weight %u", nexthop->weight);
-
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) {
- vty_out(vty, ", backup %d", nexthop->backup_idx[0]);
-
- for (i = 1; i < nexthop->backup_num; i++)
- vty_out(vty, ",%d", nexthop->backup_idx[i]);
- }
-}
-
-
-/*
- * Render a nexthop into a json object; the caller allocates and owns
- * the json object memory.
- */
-static void show_nexthop_json_helper(json_object *json_nexthop,
- const struct nexthop *nexthop,
- const struct route_entry *re)
-{
- char buf[SRCDEST2STR_BUFFER];
- json_object *json_labels = NULL;
- json_object *json_backups = NULL;
- json_object *json_seg6local = NULL;
- json_object *json_seg6 = NULL;
- int i;
-
- json_object_int_add(json_nexthop, "flags",
- nexthop->flags);
-
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE))
- json_object_boolean_true_add(json_nexthop,
- "duplicate");
-
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_FIB))
- json_object_boolean_true_add(json_nexthop,
- "fib");
-
- switch (nexthop->type) {
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- json_object_string_addf(json_nexthop, "ip", "%pI4",
- &nexthop->gate.ipv4);
- json_object_string_add(json_nexthop, "afi",
- "ipv4");
-
- if (nexthop->ifindex) {
- json_object_int_add(json_nexthop,
- "interfaceIndex",
- nexthop->ifindex);
- json_object_string_add(
- json_nexthop, "interfaceName",
- ifindex2ifname(
- nexthop->ifindex,
- nexthop->vrf_id));
- }
- break;
- case NEXTHOP_TYPE_IPV6:
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- json_object_string_addf(json_nexthop, "ip", "%pI6",
- &nexthop->gate.ipv6);
- json_object_string_add(json_nexthop, "afi",
- "ipv6");
-
- if (nexthop->ifindex) {
- json_object_int_add(json_nexthop,
- "interfaceIndex",
- nexthop->ifindex);
- json_object_string_add(
- json_nexthop, "interfaceName",
- ifindex2ifname(
- nexthop->ifindex,
- nexthop->vrf_id));
- }
- break;
-
- case NEXTHOP_TYPE_IFINDEX:
- json_object_boolean_true_add(
- json_nexthop, "directlyConnected");
- json_object_int_add(json_nexthop,
- "interfaceIndex",
- nexthop->ifindex);
- json_object_string_add(
- json_nexthop, "interfaceName",
- ifindex2ifname(nexthop->ifindex,
- nexthop->vrf_id));
- break;
- case NEXTHOP_TYPE_BLACKHOLE:
- json_object_boolean_true_add(json_nexthop,
- "unreachable");
- switch (nexthop->bh_type) {
- case BLACKHOLE_REJECT:
- json_object_boolean_true_add(
- json_nexthop, "reject");
- break;
- case BLACKHOLE_ADMINPROHIB:
- json_object_boolean_true_add(json_nexthop,
- "adminProhibited");
- break;
- case BLACKHOLE_NULL:
- json_object_boolean_true_add(
- json_nexthop, "blackhole");
- break;
- case BLACKHOLE_UNSPEC:
- break;
- }
- break;
- }
-
- /* This nexthop is a resolver for the parent nexthop.
- * Set resolver flag for better clarity and delimiter
- * in flat list of nexthops in json.
- */
- if (nexthop->rparent)
- json_object_boolean_true_add(json_nexthop, "resolver");
-
- if ((re == NULL || (nexthop->vrf_id != re->vrf_id)))
- json_object_string_add(json_nexthop, "vrf",
- vrf_id_to_name(nexthop->vrf_id));
-
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE))
- json_object_boolean_true_add(json_nexthop,
- "duplicate");
-
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
- json_object_boolean_true_add(json_nexthop,
- "active");
-
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ONLINK))
- json_object_boolean_true_add(json_nexthop, "onLink");
-
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_LINKDOWN))
- json_object_boolean_true_add(json_nexthop, "linkDown");
-
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE))
- json_object_boolean_true_add(json_nexthop,
- "recursive");
-
- if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_HAS_BACKUP)) {
- json_backups = json_object_new_array();
- for (i = 0; i < nexthop->backup_num; i++) {
- json_object_array_add(
- json_backups,
- json_object_new_int(nexthop->backup_idx[i]));
- }
-
- json_object_object_add(json_nexthop, "backupIndex",
- json_backups);
- }
-
- switch (nexthop->type) {
- case NEXTHOP_TYPE_IPV4:
- case NEXTHOP_TYPE_IPV4_IFINDEX:
- if (nexthop->src.ipv4.s_addr) {
- if (inet_ntop(AF_INET,
- &nexthop->src.ipv4, buf,
- sizeof(buf)))
- json_object_string_add(
- json_nexthop, "source",
- buf);
- }
- break;
- case NEXTHOP_TYPE_IPV6:
- case NEXTHOP_TYPE_IPV6_IFINDEX:
- if (!IPV6_ADDR_SAME(&nexthop->src.ipv6,
- &in6addr_any)) {
- if (inet_ntop(AF_INET6,
- &nexthop->src.ipv6, buf,
- sizeof(buf)))
- json_object_string_add(
- json_nexthop, "source",
- buf);
- }
- break;
- default:
- break;
- }
-
- if (nexthop->nh_label
- && nexthop->nh_label->num_labels) {
- json_labels = json_object_new_array();
-
- for (int label_index = 0;
- label_index
- < nexthop->nh_label->num_labels;
- label_index++)
- json_object_array_add(
- json_labels,
- json_object_new_int(
- nexthop->nh_label->label
- [label_index]));
-
- json_object_object_add(json_nexthop, "labels",
- json_labels);
- }
-
- if (nexthop->weight)
- json_object_int_add(json_nexthop, "weight",
- nexthop->weight);
-
- if (nexthop->srte_color)
- json_object_int_add(json_nexthop, "srteColor",
- nexthop->srte_color);
-
- if (nexthop->nh_srv6) {
- json_seg6local = json_object_new_object();
- json_object_string_add(
- json_seg6local, "action", seg6local_action2str(
- nexthop->nh_srv6->seg6local_action));
- json_object_object_add(json_nexthop, "seg6local",
- json_seg6local);
-
- json_seg6 = json_object_new_object();
- inet_ntop(AF_INET6, &nexthop->nh_srv6->seg6_segs, buf,
- sizeof(buf));
- json_object_string_add(json_seg6, "segs", buf);
- json_object_object_add(json_nexthop, "seg6", json_seg6);
- }
-}
-
static void vty_show_ip_route(struct vty *vty, struct route_node *rn,
struct route_entry *re, json_object *json,
bool is_fib, bool show_ng)
zvrf_name(zvrf),
json_vrf);
json_object_object_add(json_vrf,
- "nexthops",
+ (afi == AFI_IP)
+ ? "ipv4"
+ : "ipv6",
json_nexthop);
} else {
vty_out(vty, "\nVRF %s:\n",
else
json_object_object_add(json, "default", json_vrf);
- json_object_object_add(json_vrf, "nexthops", json_nexthop);
+ json_object_object_add(json_vrf,
+ (afi == AFI_IP) ? "ipv4" : "ipv6",
+ json_nexthop);
}
zebra_print_rnh_table(vrf_id, afi, safi, vty, p, json_nexthop);