if (o1->subgroup > o2->subgroup)
return 1;
+ if (o1->addpath_tx_id < o2->addpath_tx_id)
+ return -1;
+
+ if (o1->addpath_tx_id > o2->addpath_tx_id)
+ return 1;
+
return 0;
}
RB_GENERATE(bgp_adj_out_rb, bgp_adj_out, adj_entry, bgp_adj_out_compare);
struct update_subgroup *subgrp,
uint32_t addpath_tx_id)
{
- struct bgp_adj_out *adj, lookup;
- struct peer *peer;
- afi_t afi;
- safi_t safi;
- int addpath_capable;
+ struct bgp_adj_out lookup;
if (!rn || !subgrp)
return NULL;
- peer = SUBGRP_PEER(subgrp);
- afi = SUBGRP_AFI(subgrp);
- safi = SUBGRP_SAFI(subgrp);
- addpath_capable = bgp_addpath_encode_tx(peer, afi, safi);
-
/* update-groups that do not support addpath will pass 0 for
- * addpath_tx_id so do not both matching against it */
+ * addpath_tx_id. */
lookup.subgroup = subgrp;
- adj = RB_FIND(bgp_adj_out_rb, &rn->adj_out, &lookup);
- if (adj) {
- if (addpath_capable) {
- if (adj->addpath_tx_id == addpath_tx_id)
- return adj;
- } else
- return adj;
- }
- return NULL;
+ lookup.addpath_tx_id = addpath_tx_id;
+
+ return RB_FIND(bgp_adj_out_rb, &rn->adj_out, &lookup);
}
static void adj_free(struct bgp_adj_out *adj)
adj = XCALLOC(MTYPE_BGP_ADJ_OUT, sizeof(struct bgp_adj_out));
adj->subgroup = subgrp;
+ adj->addpath_tx_id = addpath_tx_id;
+
if (rn) {
RB_INSERT(bgp_adj_out_rb, &rn->adj_out, adj);
bgp_lock_node(rn);
adj->rn = rn;
}
- adj->addpath_tx_id = addpath_tx_id;
TAILQ_INSERT_TAIL(&(subgrp->adjq), adj, subgrp_adj_train);
SUBGRP_INCR_STAT(subgrp, adj_count);
return adj;