XFREE(MTYPE_AS_SEG_DATA, asdata);
}
-const char *aspath_segment_type_str[] = {"as-invalid", "as-set", "as-sequence",
- "as-confed-sequence", "as-confed-set"};
+const char *const aspath_segment_type_str[] = {
+ "as-invalid", "as-set", "as-sequence", "as-confed-sequence",
+ "as-confed-set"
+};
/* Get a new segment. Note that 0 is an allowed length,
* and will result in a segment with no allocated data segment.
return count;
}
+/* Check if aspath has AS_SET or AS_CONFED_SET */
+bool aspath_check_as_sets(struct aspath *aspath)
+{
+ struct assegment *seg = aspath->segments;
+
+ while (seg) {
+ if (seg->type == AS_SET || seg->type == AS_CONFED_SET)
+ return true;
+ seg = seg->next;
+ }
+ return false;
+}
+
/* Estimate size aspath /might/ take if encoded into an
* ASPATH attribute.
*
if (make_json)
json_object_array_add(
jseg_list,
- json_object_new_int(seg->as[i]));
+ json_object_new_int64(seg->as[i]));
len += snprintf(str_buf + len, str_size - len, "%u",
seg->as[i]);
/* Not reached */
}
-/* Iterate over AS_PATH segments and wipe all occurences of the
+/* Iterate over AS_PATH segments and wipe all occurrences of the
* listed AS numbers. Hence some segments may lose some or even
* all data on the way, the operation is implemented as a smarter
* version of aspath_dup(), which allocates memory to hold the new
static void bgp_aggr_aspath_prepare(struct hash_backet *hb, void *arg)
{
- struct aspath *asmerge = NULL;
struct aspath *hb_aspath = hb->data;
struct aspath **aggr_aspath = arg;
- if (*aggr_aspath) {
- asmerge = aspath_aggregate(*aggr_aspath, hb_aspath);
- aspath_free(*aggr_aspath);
- *aggr_aspath = asmerge;
- } else
+ if (*aggr_aspath)
+ *aggr_aspath = aspath_aggregate(*aggr_aspath, hb_aspath);
+ else
*aggr_aspath = aspath_dup(hb_aspath);
}
void bgp_compute_aggregate_aspath(struct bgp_aggregate *aggregate,
struct aspath *aspath)
+{
+ bgp_compute_aggregate_aspath_hash(aggregate, aspath);
+
+ bgp_compute_aggregate_aspath_val(aggregate);
+
+}
+
+void bgp_compute_aggregate_aspath_hash(struct bgp_aggregate *aggregate,
+ struct aspath *aspath)
{
struct aspath *aggr_aspath = NULL;
*/
aggr_aspath = hash_get(aggregate->aspath_hash, aspath,
bgp_aggr_aspath_hash_alloc);
+ }
- /* Compute aggregate's as-path.
- */
+ /* Increment reference counter.
+ */
+ aggr_aspath->refcnt++;
+}
+
+void bgp_compute_aggregate_aspath_val(struct bgp_aggregate *aggregate)
+{
+ if (aggregate == NULL)
+ return;
+ /* Re-compute aggregate's as-path.
+ */
+ if (aggregate->aspath) {
+ aspath_free(aggregate->aspath);
+ aggregate->aspath = NULL;
+ }
+ if (aggregate->aspath_hash
+ && aggregate->aspath_hash->count) {
hash_iterate(aggregate->aspath_hash,
bgp_aggr_aspath_prepare,
&aggregate->aspath);
}
-
- /* Increment refernce counter.
- */
- aggr_aspath->refcnt++;
}
void bgp_remove_aspath_from_aggregate(struct bgp_aggregate *aggregate,
struct aspath *aggr_aspath = NULL;
struct aspath *ret_aspath = NULL;
- if ((aggregate == NULL) || (aspath == NULL))
- return;
-
- if (aggregate->aspath_hash == NULL)
+ if ((!aggregate)
+ || (!aggregate->aspath_hash)
+ || (!aspath))
return;
/* Look-up the aspath in the hash.
ret_aspath = hash_release(aggregate->aspath_hash,
aggr_aspath);
aspath_free(ret_aspath);
+ ret_aspath = NULL;
/* Remove aggregate's old as-path.
*/
aspath_free(aggregate->aspath);
aggregate->aspath = NULL;
- /* Compute aggregate's as-path.
- */
- hash_iterate(aggregate->aspath_hash,
- bgp_aggr_aspath_prepare,
- &aggregate->aspath);
+ bgp_compute_aggregate_aspath_val(aggregate);
+ }
+ }
+}
+
+void bgp_remove_aspath_from_aggregate_hash(struct bgp_aggregate *aggregate,
+ struct aspath *aspath)
+{
+ struct aspath *aggr_aspath = NULL;
+ struct aspath *ret_aspath = NULL;
+
+ if ((!aggregate)
+ || (!aggregate->aspath_hash)
+ || (!aspath))
+ return;
+
+ /* Look-up the aspath in the hash.
+ */
+ aggr_aspath = bgp_aggr_aspath_lookup(aggregate, aspath);
+ if (aggr_aspath) {
+ aggr_aspath->refcnt--;
+
+ if (aggr_aspath->refcnt == 0) {
+ ret_aspath = hash_release(aggregate->aspath_hash,
+ aggr_aspath);
+ aspath_free(ret_aspath);
+ ret_aspath = NULL;
}
}
}
+