2 * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
4 * Copyright (C) 2018 LabN Consulting, L.L.C.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 #include "workqueue.h"
34 #include "bgpd/bgpd.h"
35 #include "bgpd/bgp_labelpool.h"
36 #include "bgpd/bgp_debug.h"
37 #include "bgpd/bgp_errors.h"
38 #include "bgpd/bgp_route.h"
40 #define BGP_LABELPOOL_ENABLE_TESTS 0
42 #ifndef VTYSH_EXTRACT_PL
43 #include "bgpd/bgp_labelpool_clippy.c"
48 * Definitions and external declarations.
50 extern struct zclient
*zclient
;
52 #if BGP_LABELPOOL_ENABLE_TESTS
53 static void lptest_init(void);
54 static void lptest_finish(void);
58 * Remember where pool data are kept
60 static struct labelpool
*lp
;
63 * Number of labels requested at a time from the zebra label manager.
64 * We start small but double the request size each time up to a
67 * The label space is 20 bits which is shared with other FRR processes
68 * on this host, so to avoid greedily requesting a mostly wasted chunk,
69 * we limit the chunk size to 1/16 of the label space (that's the -4 bits
70 * in the definition below). This limit slightly increases our cost of
71 * finding free labels in our allocated chunks.
73 #define LP_CHUNK_SIZE_MIN 128
74 #define LP_CHUNK_SIZE_MAX (1 << (20 - 4))
76 DEFINE_MTYPE_STATIC(BGPD
, BGP_LABEL_CHUNK
, "BGP Label Chunk");
77 DEFINE_MTYPE_STATIC(BGPD
, BGP_LABEL_FIFO
, "BGP Label FIFO item");
78 DEFINE_MTYPE_STATIC(BGPD
, BGP_LABEL_CB
, "BGP Dynamic Label Assignment");
79 DEFINE_MTYPE_STATIC(BGPD
, BGP_LABEL_CBQ
, "BGP Dynamic Label Callback");
84 uint32_t nfree
; /* un-allocated count */
85 uint32_t idx_last_allocated
; /* start looking here */
86 bitfield_t allocated_map
;
93 mpls_label_t label
; /* MPLS_LABEL_NONE = not allocated */
95 void *labelid
; /* unique ID */
97 * callback for label allocation and loss
99 * allocated: false = lost
101 int (*cbfunc
)(mpls_label_t label
, void *lblid
, bool alloc
);
105 struct lp_fifo_item fifo
;
109 DECLARE_LIST(lp_fifo
, struct lp_fifo
, fifo
);
112 int (*cbfunc
)(mpls_label_t label
, void *lblid
, bool alloc
);
116 bool allocated
; /* false = lost */
119 static wq_item_status
lp_cbq_docallback(struct work_queue
*wq
, void *data
)
121 struct lp_cbq_item
*lcbq
= data
;
123 int debug
= BGP_DEBUG(labelpool
, LABELPOOL
);
126 zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
127 __func__
, lcbq
->labelid
, lcbq
->label
, lcbq
->allocated
);
129 if (lcbq
->label
== MPLS_LABEL_NONE
) {
130 /* shouldn't happen */
131 flog_err(EC_BGP_LABEL
, "%s: error: label==MPLS_LABEL_NONE",
136 rc
= (*(lcbq
->cbfunc
))(lcbq
->label
, lcbq
->labelid
, lcbq
->allocated
);
138 if (lcbq
->allocated
&& rc
) {
140 * Callback rejected allocation. This situation could arise
141 * if there was a label request followed by the requestor
142 * deciding it didn't need the assignment (e.g., config
143 * change) while the reply to the original request (with
144 * label) was in the work queue.
147 zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
148 __func__
, lcbq
->labelid
, lcbq
->label
);
150 uintptr_t lbl
= lcbq
->label
;
155 * If the rejected label was marked inuse by this labelid,
156 * release the label back to the pool.
158 * Further, if the rejected label was still assigned to
159 * this labelid in the LCB, delete the LCB.
161 if (!skiplist_search(lp
->inuse
, (void *)lbl
, &labelid
)) {
162 if (labelid
== lcbq
->labelid
) {
163 if (!skiplist_search(lp
->ledger
, labelid
,
165 if (lcbq
->label
== lcb
->label
)
166 skiplist_delete(lp
->ledger
,
169 skiplist_delete(lp
->inuse
, (void *)lbl
, NULL
);
177 static void lp_cbq_item_free(struct work_queue
*wq
, void *data
)
179 XFREE(MTYPE_BGP_LABEL_CBQ
, data
);
182 static void lp_lcb_free(void *goner
)
184 XFREE(MTYPE_BGP_LABEL_CB
, goner
);
187 static void lp_chunk_free(void *goner
)
189 struct lp_chunk
*chunk
= (struct lp_chunk
*)goner
;
191 bf_free(chunk
->allocated_map
);
192 XFREE(MTYPE_BGP_LABEL_CHUNK
, goner
);
195 void bgp_lp_init(struct thread_master
*master
, struct labelpool
*pool
)
197 if (BGP_DEBUG(labelpool
, LABELPOOL
))
198 zlog_debug("%s: entry", __func__
);
200 lp
= pool
; /* Set module pointer to pool data */
202 lp
->ledger
= skiplist_new(0, NULL
, lp_lcb_free
);
203 lp
->inuse
= skiplist_new(0, NULL
, NULL
);
204 lp
->chunks
= list_new();
205 lp
->chunks
->del
= lp_chunk_free
;
206 lp_fifo_init(&lp
->requests
);
207 lp
->callback_q
= work_queue_new(master
, "label callbacks");
209 lp
->callback_q
->spec
.workfunc
= lp_cbq_docallback
;
210 lp
->callback_q
->spec
.del_item_data
= lp_cbq_item_free
;
211 lp
->callback_q
->spec
.max_retries
= 0;
213 lp
->next_chunksize
= LP_CHUNK_SIZE_MIN
;
215 #if BGP_LABELPOOL_ENABLE_TESTS
220 /* check if a label callback was for a BGP LU node, and if so, unlock it */
221 static void check_bgp_lu_cb_unlock(struct lp_lcb
*lcb
)
223 if (lcb
->type
== LP_TYPE_BGP_LU
)
224 bgp_dest_unlock_node(lcb
->labelid
);
227 /* check if a label callback was for a BGP LU node, and if so, lock it */
228 static void check_bgp_lu_cb_lock(struct lp_lcb
*lcb
)
230 if (lcb
->type
== LP_TYPE_BGP_LU
)
231 bgp_dest_lock_node(lcb
->labelid
);
234 void bgp_lp_finish(void)
237 struct work_queue_item
*item
, *titem
;
239 #if BGP_LABELPOOL_ENABLE_TESTS
245 skiplist_free(lp
->ledger
);
248 skiplist_free(lp
->inuse
);
251 list_delete(&lp
->chunks
);
253 while ((lf
= lp_fifo_pop(&lp
->requests
))) {
254 check_bgp_lu_cb_unlock(&lf
->lcb
);
255 XFREE(MTYPE_BGP_LABEL_FIFO
, lf
);
257 lp_fifo_fini(&lp
->requests
);
259 /* we must unlock path infos for LU callbacks; but we cannot do that
260 * in the deletion callback of the workqueue, as that is also called
261 * to remove an element from the queue after it has been run, resulting
262 * in a double unlock. Hence we need to iterate over our queues and
263 * lists and manually perform the unlocking (ugh)
265 STAILQ_FOREACH_SAFE (item
, &lp
->callback_q
->items
, wq
, titem
)
266 check_bgp_lu_cb_unlock(item
->data
);
268 work_queue_free_and_null(&lp
->callback_q
);
273 static mpls_label_t
get_label_from_pool(void *labelid
)
275 struct listnode
*node
;
276 struct lp_chunk
*chunk
;
277 int debug
= BGP_DEBUG(labelpool
, LABELPOOL
);
282 for (ALL_LIST_ELEMENTS_RO(lp
->chunks
, node
, chunk
)) {
287 zlog_debug("%s: chunk first=%u last=%u",
288 __func__
, chunk
->first
, chunk
->last
);
291 * don't look in chunks with no available labels
297 * roll through bitfield starting where we stopped
300 index
= bf_find_next_clear_bit_wrap(
301 &chunk
->allocated_map
, chunk
->idx_last_allocated
+ 1,
305 * since chunk->nfree is non-zero, we should always get
308 assert(index
!= WORD_MAX
);
310 lbl
= chunk
->first
+ index
;
311 if (skiplist_insert(lp
->inuse
, (void *)lbl
, labelid
)) {
312 /* something is very wrong */
313 zlog_err("%s: unable to insert inuse label %u (id %p)",
314 __func__
, (uint32_t)lbl
, labelid
);
315 return MPLS_LABEL_NONE
;
321 bf_set_bit(chunk
->allocated_map
, index
);
322 chunk
->idx_last_allocated
= index
;
328 return MPLS_LABEL_NONE
;
332 * Success indicated by value of "label" field in returned LCB
334 static struct lp_lcb
*lcb_alloc(
337 int (*cbfunc
)(mpls_label_t label
, void *labelid
, bool allocated
))
340 * Set up label control block
342 struct lp_lcb
*new = XCALLOC(MTYPE_BGP_LABEL_CB
,
343 sizeof(struct lp_lcb
));
345 new->label
= get_label_from_pool(labelid
);
347 new->labelid
= labelid
;
348 new->cbfunc
= cbfunc
;
354 * Callers who need labels must supply a type, labelid, and callback.
355 * The type is a value defined in bgp_labelpool.h (add types as needed).
356 * The callback is for asynchronous notification of label allocation.
357 * The labelid is passed as an argument to the callback. It should be unique
358 * to the requested label instance.
360 * If zebra is not connected, callbacks with labels will be delayed
361 * until connection is established. If zebra connection is lost after
362 * labels have been assigned, existing assignments via this labelpool
363 * module will continue until reconnection.
365 * When connection to zebra is reestablished, previous label assignments
366 * will be invalidated (via callbacks having the "allocated" parameter unset)
367 * and new labels will be automatically reassigned by this labelpool module
368 * (that is, a requestor does not need to call bgp_lp_get() again if it is
369 * notified via callback that its label has been lost: it will eventually
370 * get another callback with a new label assignment).
372 * The callback function should return 0 to accept the allocation
373 * and non-zero to refuse it. The callback function return value is
374 * ignored for invalidations (i.e., when the "allocated" parameter is false)
376 * Prior requests for a given labelid are detected so that requests and
377 * assignments are not duplicated.
382 int (*cbfunc
)(mpls_label_t label
, void *labelid
, bool allocated
))
386 int debug
= BGP_DEBUG(labelpool
, LABELPOOL
);
389 zlog_debug("%s: labelid=%p", __func__
, labelid
);
392 * Have we seen this request before?
394 if (!skiplist_search(lp
->ledger
, labelid
, (void **)&lcb
)) {
397 lcb
= lcb_alloc(type
, labelid
, cbfunc
);
399 zlog_debug("%s: inserting lcb=%p label=%u",
400 __func__
, lcb
, lcb
->label
);
401 int rc
= skiplist_insert(lp
->ledger
, labelid
, lcb
);
404 /* shouldn't happen */
405 flog_err(EC_BGP_LABEL
,
406 "%s: can't insert new LCB into ledger list",
408 XFREE(MTYPE_BGP_LABEL_CB
, lcb
);
413 if (lcb
->label
!= MPLS_LABEL_NONE
) {
415 * Fast path: we filled the request from local pool (or
416 * this is a duplicate request that we filled already).
417 * Enqueue response work item with new label.
419 struct lp_cbq_item
*q
;
421 q
= XCALLOC(MTYPE_BGP_LABEL_CBQ
, sizeof(struct lp_cbq_item
));
423 q
->cbfunc
= lcb
->cbfunc
;
425 q
->label
= lcb
->label
;
426 q
->labelid
= lcb
->labelid
;
429 /* if this is a LU request, lock node before queueing */
430 check_bgp_lu_cb_lock(lcb
);
432 work_queue_add(lp
->callback_q
, q
);
441 zlog_debug("%s: slow path. lcb=%p label=%u",
442 __func__
, lcb
, lcb
->label
);
445 * Slow path: we are out of labels in the local pool,
446 * so remember the request and also get another chunk from
449 * We track number of outstanding label requests: don't
450 * need to get a chunk for each one.
453 struct lp_fifo
*lf
= XCALLOC(MTYPE_BGP_LABEL_FIFO
,
454 sizeof(struct lp_fifo
));
457 /* if this is a LU request, lock node before queueing */
458 check_bgp_lu_cb_lock(lcb
);
460 lp_fifo_add_tail(&lp
->requests
, lf
);
462 if (lp_fifo_count(&lp
->requests
) > lp
->pending_count
) {
463 if (!zclient
|| zclient
->sock
< 0)
465 if (zclient_send_get_label_chunk(zclient
, 0, lp
->next_chunksize
,
466 MPLS_LABEL_BASE_ANY
) !=
467 ZCLIENT_SEND_FAILURE
) {
468 lp
->pending_count
+= lp
->next_chunksize
;
469 if ((lp
->next_chunksize
<< 1) <= LP_CHUNK_SIZE_MAX
)
470 lp
->next_chunksize
<<= 1;
482 if (!skiplist_search(lp
->ledger
, labelid
, (void **)&lcb
)) {
483 if (label
== lcb
->label
&& type
== lcb
->type
) {
484 struct listnode
*node
;
485 struct lp_chunk
*chunk
;
486 uintptr_t lbl
= label
;
487 bool deallocated
= false;
489 /* no longer in use */
490 skiplist_delete(lp
->inuse
, (void *)lbl
, NULL
);
492 /* no longer requested */
493 skiplist_delete(lp
->ledger
, labelid
, NULL
);
496 * Find the chunk this label belongs to and
497 * deallocate the label
499 for (ALL_LIST_ELEMENTS_RO(lp
->chunks
, node
, chunk
)) {
502 if ((label
< chunk
->first
) ||
503 (label
> chunk
->last
))
506 index
= label
- chunk
->first
;
507 assert(bf_test_index(chunk
->allocated_map
,
509 bf_release_index(chunk
->allocated_map
, index
);
519 * zebra response giving us a chunk of labels
521 void bgp_lp_event_chunk(uint8_t keep
, uint32_t first
, uint32_t last
)
523 struct lp_chunk
*chunk
;
524 int debug
= BGP_DEBUG(labelpool
, LABELPOOL
);
529 flog_err(EC_BGP_LABEL
,
530 "%s: zebra label chunk invalid: first=%u, last=%u",
531 __func__
, first
, last
);
535 chunk
= XCALLOC(MTYPE_BGP_LABEL_CHUNK
, sizeof(struct lp_chunk
));
537 labelcount
= last
- first
+ 1;
539 chunk
->first
= first
;
541 chunk
->nfree
= labelcount
;
542 bf_init(chunk
->allocated_map
, labelcount
);
545 * Optimize for allocation by adding the new (presumably larger)
546 * chunk at the head of the list so it is examined first.
548 listnode_add_head(lp
->chunks
, chunk
);
550 lp
->pending_count
-= labelcount
;
553 zlog_debug("%s: %zu pending requests", __func__
,
554 lp_fifo_count(&lp
->requests
));
557 while (labelcount
&& (lf
= lp_fifo_first(&lp
->requests
))) {
560 void *labelid
= lf
->lcb
.labelid
;
562 if (skiplist_search(lp
->ledger
, labelid
, (void **)&lcb
)) {
563 /* request no longer in effect */
566 zlog_debug("%s: labelid %p: request no longer in effect",
569 /* if this was a BGP_LU request, unlock node
571 check_bgp_lu_cb_unlock(lcb
);
572 goto finishedrequest
;
576 if (lcb
->label
!= MPLS_LABEL_NONE
) {
577 /* request already has a label */
579 zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
581 lcb
->label
, lcb
->label
, lcb
);
583 /* if this was a BGP_LU request, unlock node
585 check_bgp_lu_cb_unlock(lcb
);
587 goto finishedrequest
;
590 lcb
->label
= get_label_from_pool(lcb
->labelid
);
592 if (lcb
->label
== MPLS_LABEL_NONE
) {
594 * Out of labels in local pool, await next chunk
597 zlog_debug("%s: out of labels, await more",
606 * we filled the request from local pool.
607 * Enqueue response work item with new label.
609 struct lp_cbq_item
*q
= XCALLOC(MTYPE_BGP_LABEL_CBQ
,
610 sizeof(struct lp_cbq_item
));
612 q
->cbfunc
= lcb
->cbfunc
;
614 q
->label
= lcb
->label
;
615 q
->labelid
= lcb
->labelid
;
619 zlog_debug("%s: assigning label %u to labelid %p",
620 __func__
, q
->label
, q
->labelid
);
622 work_queue_add(lp
->callback_q
, q
);
625 lp_fifo_del(&lp
->requests
, lf
);
626 XFREE(MTYPE_BGP_LABEL_FIFO
, lf
);
631 * continue using allocated labels until zebra returns
633 void bgp_lp_event_zebra_down(void)
639 * Inform owners of previously-allocated labels that their labels
640 * are not valid. Request chunk from zebra large enough to satisfy
641 * previously-allocated labels plus any outstanding requests.
643 void bgp_lp_event_zebra_up(void)
645 unsigned int labels_needed
;
646 unsigned int chunks_needed
;
651 lp
->reconnect_count
++;
653 * Get label chunk allocation request dispatched to zebra
655 labels_needed
= lp_fifo_count(&lp
->requests
) +
656 skiplist_count(lp
->inuse
);
658 if (labels_needed
> lp
->next_chunksize
) {
659 while ((lp
->next_chunksize
< labels_needed
) &&
660 (lp
->next_chunksize
<< 1 <= LP_CHUNK_SIZE_MAX
))
662 lp
->next_chunksize
<<= 1;
666 chunks_needed
= (labels_needed
/ lp
->next_chunksize
) + 1;
667 labels_needed
= chunks_needed
* lp
->next_chunksize
;
669 lm_init_ok
= lm_label_manager_connect(zclient
, 1) == 0;
672 zlog_err("%s: label manager connection error", __func__
);
676 zclient_send_get_label_chunk(zclient
, 0, labels_needed
,
677 MPLS_LABEL_BASE_ANY
);
678 lp
->pending_count
= labels_needed
;
681 * Invalidate current list of chunks
683 list_delete_all_node(lp
->chunks
);
686 * Invalidate any existing labels and requeue them as requests
688 while (!skiplist_first(lp
->inuse
, NULL
, &labelid
)) {
693 if (!skiplist_search(lp
->ledger
, labelid
, (void **)&lcb
)) {
695 if (lcb
->label
!= MPLS_LABEL_NONE
) {
699 struct lp_cbq_item
*q
;
701 q
= XCALLOC(MTYPE_BGP_LABEL_CBQ
,
702 sizeof(struct lp_cbq_item
));
703 q
->cbfunc
= lcb
->cbfunc
;
705 q
->label
= lcb
->label
;
706 q
->labelid
= lcb
->labelid
;
707 q
->allocated
= false;
708 check_bgp_lu_cb_lock(lcb
);
709 work_queue_add(lp
->callback_q
, q
);
711 lcb
->label
= MPLS_LABEL_NONE
;
717 struct lp_fifo
*lf
= XCALLOC(MTYPE_BGP_LABEL_FIFO
,
718 sizeof(struct lp_fifo
));
721 check_bgp_lu_cb_lock(lcb
);
722 lp_fifo_add_tail(&lp
->requests
, lf
);
725 skiplist_delete_first(lp
->inuse
);
729 DEFUN(show_bgp_labelpool_summary
, show_bgp_labelpool_summary_cmd
,
730 "show bgp labelpool summary [json]",
732 "BGP Labelpool information\n"
733 "BGP Labelpool summary\n" JSON_STR
)
735 bool uj
= use_json(argc
, argv
);
736 json_object
*json
= NULL
;
740 vty_out(vty
, "{}\n");
742 vty_out(vty
, "No existing BGP labelpool\n");
743 return (CMD_WARNING
);
747 json
= json_object_new_object();
748 #if CONFDATE > 20230131
749 CPP_NOTICE("Remove JSON object commands with keys starting with capital")
751 json_object_int_add(json
, "Ledger", skiplist_count(lp
->ledger
));
752 json_object_int_add(json
, "ledger", skiplist_count(lp
->ledger
));
753 json_object_int_add(json
, "InUse", skiplist_count(lp
->inuse
));
754 json_object_int_add(json
, "inUse", skiplist_count(lp
->inuse
));
755 json_object_int_add(json
, "Requests",
756 lp_fifo_count(&lp
->requests
));
757 json_object_int_add(json
, "requests",
758 lp_fifo_count(&lp
->requests
));
759 json_object_int_add(json
, "LabelChunks", listcount(lp
->chunks
));
760 json_object_int_add(json
, "labelChunks", listcount(lp
->chunks
));
761 json_object_int_add(json
, "Pending", lp
->pending_count
);
762 json_object_int_add(json
, "pending", lp
->pending_count
);
763 json_object_int_add(json
, "Reconnects", lp
->reconnect_count
);
764 json_object_int_add(json
, "reconnects", lp
->reconnect_count
);
767 vty_out(vty
, "Labelpool Summary\n");
768 vty_out(vty
, "-----------------\n");
769 vty_out(vty
, "%-13s %d\n",
770 "Ledger:", skiplist_count(lp
->ledger
));
771 vty_out(vty
, "%-13s %d\n", "InUse:", skiplist_count(lp
->inuse
));
772 vty_out(vty
, "%-13s %zu\n",
773 "Requests:", lp_fifo_count(&lp
->requests
));
774 vty_out(vty
, "%-13s %d\n",
775 "LabelChunks:", listcount(lp
->chunks
));
776 vty_out(vty
, "%-13s %d\n", "Pending:", lp
->pending_count
);
777 vty_out(vty
, "%-13s %d\n", "Reconnects:", lp
->reconnect_count
);
782 DEFUN(show_bgp_labelpool_ledger
, show_bgp_labelpool_ledger_cmd
,
783 "show bgp labelpool ledger [json]",
785 "BGP Labelpool information\n"
786 "BGP Labelpool ledger\n" JSON_STR
)
788 bool uj
= use_json(argc
, argv
);
789 json_object
*json
= NULL
, *json_elem
= NULL
;
790 struct lp_lcb
*lcb
= NULL
;
791 struct bgp_dest
*dest
;
793 const struct prefix
*p
;
798 vty_out(vty
, "{}\n");
800 vty_out(vty
, "No existing BGP labelpool\n");
801 return (CMD_WARNING
);
805 count
= skiplist_count(lp
->ledger
);
807 vty_out(vty
, "{}\n");
810 json
= json_object_new_array();
812 vty_out(vty
, "Prefix Label\n");
813 vty_out(vty
, "---------------------------\n");
816 for (rc
= skiplist_next(lp
->ledger
, (void **)&dest
, (void **)&lcb
,
818 !rc
; rc
= skiplist_next(lp
->ledger
, (void **)&dest
, (void **)&lcb
,
821 json_elem
= json_object_new_object();
822 json_object_array_add(json
, json_elem
);
826 if (!CHECK_FLAG(dest
->flags
, BGP_NODE_LABEL_REQUESTED
))
828 json_object_string_add(
829 json_elem
, "prefix", "INVALID");
830 json_object_int_add(json_elem
, "label",
833 vty_out(vty
, "%-18s %u\n",
834 "INVALID", lcb
->label
);
836 p
= bgp_dest_get_prefix(dest
);
838 json_object_string_addf(
839 json_elem
, "prefix", "%pFX", p
);
840 json_object_int_add(json_elem
, "label",
843 vty_out(vty
, "%-18pFX %u\n", p
,
849 json_object_string_add(json_elem
, "prefix",
851 json_object_int_add(json_elem
, "label",
854 vty_out(vty
, "%-18s %u\n", "VRF",
865 DEFUN(show_bgp_labelpool_inuse
, show_bgp_labelpool_inuse_cmd
,
866 "show bgp labelpool inuse [json]",
868 "BGP Labelpool information\n"
869 "BGP Labelpool inuse\n" JSON_STR
)
871 bool uj
= use_json(argc
, argv
);
872 json_object
*json
= NULL
, *json_elem
= NULL
;
873 struct bgp_dest
*dest
;
877 const struct prefix
*p
;
881 vty_out(vty
, "No existing BGP labelpool\n");
882 return (CMD_WARNING
);
886 vty_out(vty
, "{}\n");
888 vty_out(vty
, "No existing BGP labelpool\n");
889 return (CMD_WARNING
);
893 count
= skiplist_count(lp
->inuse
);
895 vty_out(vty
, "{}\n");
898 json
= json_object_new_array();
900 vty_out(vty
, "Prefix Label\n");
901 vty_out(vty
, "---------------------------\n");
903 for (rc
= skiplist_next(lp
->inuse
, (void **)&label
, (void **)&dest
,
905 !rc
; rc
= skiplist_next(lp
->ledger
, (void **)&label
,
906 (void **)&dest
, &cursor
)) {
907 if (skiplist_search(lp
->ledger
, dest
, (void **)&lcb
))
911 json_elem
= json_object_new_object();
912 json_object_array_add(json
, json_elem
);
917 if (!CHECK_FLAG(dest
->flags
, BGP_NODE_LABEL_REQUESTED
))
919 json_object_string_add(
920 json_elem
, "prefix", "INVALID");
921 json_object_int_add(json_elem
, "label",
924 vty_out(vty
, "INVALID %u\n",
927 p
= bgp_dest_get_prefix(dest
);
929 json_object_string_addf(
930 json_elem
, "prefix", "%pFX", p
);
931 json_object_int_add(json_elem
, "label",
934 vty_out(vty
, "%-18pFX %u\n", p
,
940 json_object_string_add(json_elem
, "prefix",
942 json_object_int_add(json_elem
, "label", label
);
944 vty_out(vty
, "%-18s %u\n", "VRF",
954 DEFUN(show_bgp_labelpool_requests
, show_bgp_labelpool_requests_cmd
,
955 "show bgp labelpool requests [json]",
957 "BGP Labelpool information\n"
958 "BGP Labelpool requests\n" JSON_STR
)
960 bool uj
= use_json(argc
, argv
);
961 json_object
*json
= NULL
, *json_elem
= NULL
;
962 struct bgp_dest
*dest
;
963 const struct prefix
*p
;
964 struct lp_fifo
*item
, *next
;
969 vty_out(vty
, "{}\n");
971 vty_out(vty
, "No existing BGP labelpool\n");
972 return (CMD_WARNING
);
976 count
= lp_fifo_count(&lp
->requests
);
978 vty_out(vty
, "{}\n");
981 json
= json_object_new_array();
983 vty_out(vty
, "Prefix \n");
984 vty_out(vty
, "----------------\n");
987 for (item
= lp_fifo_first(&lp
->requests
); item
; item
= next
) {
988 next
= lp_fifo_next_safe(&lp
->requests
, item
);
989 dest
= item
->lcb
.labelid
;
991 json_elem
= json_object_new_object();
992 json_object_array_add(json
, json_elem
);
994 switch (item
->lcb
.type
) {
996 if (!CHECK_FLAG(dest
->flags
,
997 BGP_NODE_LABEL_REQUESTED
)) {
999 json_object_string_add(
1000 json_elem
, "prefix", "INVALID");
1002 vty_out(vty
, "INVALID\n");
1004 p
= bgp_dest_get_prefix(dest
);
1006 json_object_string_addf(
1007 json_elem
, "prefix", "%pFX", p
);
1009 vty_out(vty
, "%-18pFX\n", p
);
1014 json_object_string_add(json_elem
, "prefix",
1017 vty_out(vty
, "VRF\n");
1022 vty_json(vty
, json
);
1026 DEFUN(show_bgp_labelpool_chunks
, show_bgp_labelpool_chunks_cmd
,
1027 "show bgp labelpool chunks [json]",
1029 "BGP Labelpool information\n"
1030 "BGP Labelpool chunks\n" JSON_STR
)
1032 bool uj
= use_json(argc
, argv
);
1033 json_object
*json
= NULL
, *json_elem
;
1034 struct listnode
*node
;
1035 struct lp_chunk
*chunk
;
1040 vty_out(vty
, "{}\n");
1042 vty_out(vty
, "No existing BGP labelpool\n");
1043 return (CMD_WARNING
);
1047 count
= listcount(lp
->chunks
);
1049 vty_out(vty
, "{}\n");
1052 json
= json_object_new_array();
1054 vty_out(vty
, "%10s %10s %10s %10s\n", "First", "Last", "Size",
1056 vty_out(vty
, "-------------------------------------------\n");
1059 for (ALL_LIST_ELEMENTS_RO(lp
->chunks
, node
, chunk
)) {
1062 size
= chunk
->last
- chunk
->first
+ 1;
1065 json_elem
= json_object_new_object();
1066 json_object_array_add(json
, json_elem
);
1067 json_object_int_add(json_elem
, "first", chunk
->first
);
1068 json_object_int_add(json_elem
, "last", chunk
->last
);
1069 json_object_int_add(json_elem
, "size", size
);
1070 json_object_int_add(json_elem
, "numberFree",
1073 vty_out(vty
, "%10u %10u %10u %10u\n", chunk
->first
,
1074 chunk
->last
, size
, chunk
->nfree
);
1077 vty_json(vty
, json
);
1081 #if BGP_LABELPOOL_ENABLE_TESTS
1082 /*------------------------------------------------------------------------
1083 * Testing code start
1084 *------------------------------------------------------------------------*/
1086 DEFINE_MTYPE_STATIC(BGPD
, LABELPOOL_TEST
, "Label pool test");
1088 #define LPT_STAT_INSERT_FAIL 0
1089 #define LPT_STAT_DELETE_FAIL 1
1090 #define LPT_STAT_ALLOCATED 2
1091 #define LPT_STAT_DEALLOCATED 3
1092 #define LPT_STAT_MAX 4
1094 const char *lpt_counter_names
[] = {
1095 "sl insert failures",
1096 "sl delete failures",
1098 "labels deallocated",
1101 static uint8_t lpt_generation
;
1102 static bool lpt_inprogress
;
1103 static struct skiplist
*lp_tests
;
1104 static unsigned int lpt_test_cb_tcb_lookup_fails
;
1105 static unsigned int lpt_release_tcb_lookup_fails
;
1106 static unsigned int lpt_test_event_tcb_lookup_fails
;
1107 static unsigned int lpt_stop_tcb_lookup_fails
;
1111 unsigned int request_maximum
;
1112 unsigned int request_blocksize
;
1113 uintptr_t request_count
; /* match type of labelid */
1115 struct skiplist
*labels
;
1116 struct timeval starttime
;
1117 struct skiplist
*timestamps_alloc
;
1118 struct skiplist
*timestamps_dealloc
;
1119 struct thread
*event_thread
;
1120 unsigned int counter
[LPT_STAT_MAX
];
1123 /* test parameters */
1124 #define LPT_MAX_COUNT 500000 /* get this many labels in all */
1125 #define LPT_BLKSIZE 10000 /* this many at a time, then yield */
1126 #define LPT_TS_INTERVAL 10000 /* timestamp every this many labels */
1129 static int test_cb(mpls_label_t label
, void *labelid
, bool allocated
)
1131 uintptr_t generation
;
1132 struct lp_test
*tcb
;
1134 generation
= ((uintptr_t)labelid
>> 24) & 0xff;
1136 if (skiplist_search(lp_tests
, (void *)generation
, (void **)&tcb
)) {
1138 /* couldn't find current test in progress */
1139 ++lpt_test_cb_tcb_lookup_fails
;
1140 return -1; /* reject allocation */
1144 ++tcb
->counter
[LPT_STAT_ALLOCATED
];
1145 if (!(tcb
->counter
[LPT_STAT_ALLOCATED
] % LPT_TS_INTERVAL
)) {
1148 time_ms
= monotime_since(&tcb
->starttime
, NULL
) / 1000;
1149 skiplist_insert(tcb
->timestamps_alloc
,
1150 (void *)(uintptr_t)tcb
1151 ->counter
[LPT_STAT_ALLOCATED
],
1154 if (skiplist_insert(tcb
->labels
, labelid
,
1155 (void *)(uintptr_t)label
)) {
1156 ++tcb
->counter
[LPT_STAT_INSERT_FAIL
];
1160 ++tcb
->counter
[LPT_STAT_DEALLOCATED
];
1161 if (!(tcb
->counter
[LPT_STAT_DEALLOCATED
] % LPT_TS_INTERVAL
)) {
1164 time_ms
= monotime_since(&tcb
->starttime
, NULL
) / 1000;
1165 skiplist_insert(tcb
->timestamps_dealloc
,
1166 (void *)(uintptr_t)tcb
1167 ->counter
[LPT_STAT_ALLOCATED
],
1170 if (skiplist_delete(tcb
->labels
, labelid
, 0)) {
1171 ++tcb
->counter
[LPT_STAT_DELETE_FAIL
];
1178 static void labelpool_test_event_handler(struct thread
*thread
)
1180 struct lp_test
*tcb
;
1182 if (skiplist_search(lp_tests
, (void *)(uintptr_t)(lpt_generation
),
1185 /* couldn't find current test in progress */
1186 ++lpt_test_event_tcb_lookup_fails
;
1191 * request a bunch of labels
1193 for (unsigned int i
= 0; (i
< tcb
->request_blocksize
) &&
1194 (tcb
->request_count
< tcb
->request_maximum
);
1199 ++tcb
->request_count
;
1202 * construct 32-bit id from request_count and generation
1204 id
= ((uintptr_t)tcb
->generation
<< 24) |
1205 (tcb
->request_count
& 0x00ffffff);
1206 bgp_lp_get(LP_TYPE_VRF
, (void *)id
, test_cb
);
1209 if (tcb
->request_count
< tcb
->request_maximum
)
1210 thread_add_event(bm
->master
, labelpool_test_event_handler
, NULL
,
1211 0, &tcb
->event_thread
);
1214 static void lptest_stop(void)
1216 struct lp_test
*tcb
;
1218 if (!lpt_inprogress
)
1221 if (skiplist_search(lp_tests
, (void *)(uintptr_t)(lpt_generation
),
1224 /* couldn't find current test in progress */
1225 ++lpt_stop_tcb_lookup_fails
;
1229 if (tcb
->event_thread
)
1230 thread_cancel(&tcb
->event_thread
);
1232 lpt_inprogress
= false;
1235 static int lptest_start(struct vty
*vty
)
1237 struct lp_test
*tcb
;
1239 if (lpt_inprogress
) {
1240 vty_out(vty
, "test already in progress\n");
1244 if (skiplist_count(lp_tests
) >=
1245 (1 << (8 * sizeof(lpt_generation
))) - 1) {
1247 * Too many test runs
1249 vty_out(vty
, "too many tests: clear first\n");
1254 * We pack the generation and request number into the labelid;
1255 * make sure they fit.
1257 unsigned int n1
= LPT_MAX_COUNT
;
1258 unsigned int sh
= 0;
1259 unsigned int label_bits
;
1261 label_bits
= 8 * (sizeof(tcb
->request_count
) - sizeof(lpt_generation
));
1263 /* n1 should be same type as tcb->request_maximum */
1264 assert(sizeof(n1
) == sizeof(tcb
->request_maximum
));
1268 sh
+= 1; /* number of bits needed to hold LPT_MAX_COUNT */
1270 if (sh
> label_bits
) {
1272 "Sorry, test iteration count too big on this platform (LPT_MAX_COUNT %u, need %u bits, but label_bits is only %u)\n",
1273 LPT_MAX_COUNT
, sh
, label_bits
);
1277 lpt_inprogress
= true;
1280 tcb
= XCALLOC(MTYPE_LABELPOOL_TEST
, sizeof(*tcb
));
1282 tcb
->generation
= lpt_generation
;
1283 tcb
->label_type
= LP_TYPE_VRF
;
1284 tcb
->request_maximum
= LPT_MAX_COUNT
;
1285 tcb
->request_blocksize
= LPT_BLKSIZE
;
1286 tcb
->labels
= skiplist_new(0, NULL
, NULL
);
1287 tcb
->timestamps_alloc
= skiplist_new(0, NULL
, NULL
);
1288 tcb
->timestamps_dealloc
= skiplist_new(0, NULL
, NULL
);
1289 thread_add_event(bm
->master
, labelpool_test_event_handler
, NULL
, 0,
1290 &tcb
->event_thread
);
1291 monotime(&tcb
->starttime
);
1293 skiplist_insert(lp_tests
, (void *)(uintptr_t)tcb
->generation
, tcb
);
1297 DEFPY(start_labelpool_perf_test
, start_labelpool_perf_test_cmd
,
1298 "debug bgp lptest start",
1307 static void lptest_print_stats(struct vty
*vty
, struct lp_test
*tcb
)
1311 vty_out(vty
, "Global Lookup Failures in test_cb: %5u\n",
1312 lpt_test_cb_tcb_lookup_fails
);
1313 vty_out(vty
, "Global Lookup Failures in release: %5u\n",
1314 lpt_release_tcb_lookup_fails
);
1315 vty_out(vty
, "Global Lookup Failures in event: %5u\n",
1316 lpt_test_event_tcb_lookup_fails
);
1317 vty_out(vty
, "Global Lookup Failures in stop: %5u\n",
1318 lpt_stop_tcb_lookup_fails
);
1322 if (skiplist_search(lp_tests
, (void *)(uintptr_t)lpt_generation
,
1324 vty_out(vty
, "Error: can't find test %u\n",
1330 vty_out(vty
, "Test Generation %u:\n", tcb
->generation
);
1332 vty_out(vty
, "Counter Value\n");
1333 for (i
= 0; i
< LPT_STAT_MAX
; ++i
) {
1334 vty_out(vty
, "%20s: %10u\n", lpt_counter_names
[i
],
1339 if (tcb
->timestamps_alloc
) {
1346 vty_out(vty
, "%10s %10s\n", "Count", "Seconds");
1349 while (!skiplist_next(tcb
->timestamps_alloc
, &Key
, &Value
,
1352 elapsed
= ((float)(uintptr_t)Value
) / 1000;
1354 vty_out(vty
, "%10llu %10.3f\n",
1355 (unsigned long long)(uintptr_t)Key
, elapsed
);
1361 DEFPY(show_labelpool_perf_test
, show_labelpool_perf_test_cmd
,
1362 "debug bgp lptest show",
1374 while (!skiplist_next(lp_tests
, &Key
, &Value
, &cursor
)) {
1375 lptest_print_stats(vty
, (struct lp_test
*)Value
);
1378 vty_out(vty
, "no test results\n");
1383 DEFPY(stop_labelpool_perf_test
, stop_labelpool_perf_test_cmd
,
1384 "debug bgp lptest stop",
1390 if (lpt_inprogress
) {
1392 lptest_print_stats(vty
, NULL
);
1394 vty_out(vty
, "no test in progress\n");
1399 DEFPY(clear_labelpool_perf_test
, clear_labelpool_perf_test_cmd
,
1400 "debug bgp lptest clear",
1406 if (lpt_inprogress
) {
1410 while (!skiplist_first(lp_tests
, NULL
, NULL
))
1411 /* del function of skiplist cleans up tcbs */
1412 skiplist_delete_first(lp_tests
);
1418 * With the "release" command, we can release labels at intervals through
1419 * the ID space. Thus we can to exercise the bitfield-wrapping behavior
1420 * of the allocator in a subsequent test.
1422 /* clang-format off */
1423 DEFPY(release_labelpool_perf_test
, release_labelpool_perf_test_cmd
,
1424 "debug bgp lptest release test GENERATION$generation every (1-5)$every_nth",
1432 "label fraction denominator\n")
1434 /* clang-format on */
1436 unsigned long testnum
;
1438 struct lp_test
*tcb
;
1440 testnum
= strtoul(generation
, &end
, 0);
1442 vty_out(vty
, "Invalid test number: \"%s\"\n", generation
);
1445 if (lpt_inprogress
&& (testnum
== lpt_generation
)) {
1447 "Error: Test %lu is still in progress (stop first)\n",
1452 if (skiplist_search(lp_tests
, (void *)(uintptr_t)testnum
,
1455 /* couldn't find current test in progress */
1456 vty_out(vty
, "Error: Can't look up test number: \"%lu\"\n",
1458 ++lpt_release_tcb_lookup_fails
;
1463 void *Value
, *cValue
;
1465 unsigned int iteration
;
1470 rc
= skiplist_next(tcb
->labels
, &Key
, &Value
, &cursor
);
1476 /* find next item before we delete this one */
1477 rc
= skiplist_next(tcb
->labels
, &Key
, &Value
, &cursor
);
1479 if (!(iteration
% every_nth
)) {
1480 bgp_lp_release(tcb
->label_type
, cKey
,
1481 (mpls_label_t
)(uintptr_t)cValue
);
1482 skiplist_delete(tcb
->labels
, cKey
, NULL
);
1483 ++tcb
->counter
[LPT_STAT_DEALLOCATED
];
1491 static void lptest_delete(void *val
)
1493 struct lp_test
*tcb
= (struct lp_test
*)val
;
1500 while (!skiplist_next(tcb
->labels
, &Key
, &Value
, &cursor
))
1501 bgp_lp_release(tcb
->label_type
, Key
,
1502 (mpls_label_t
)(uintptr_t)Value
);
1503 skiplist_free(tcb
->labels
);
1506 if (tcb
->timestamps_alloc
) {
1508 skiplist_free(tcb
->timestamps_alloc
);
1509 tcb
->timestamps_alloc
= NULL
;
1512 if (tcb
->timestamps_dealloc
) {
1514 skiplist_free(tcb
->timestamps_dealloc
);
1515 tcb
->timestamps_dealloc
= NULL
;
1518 if (tcb
->event_thread
)
1519 thread_cancel(&tcb
->event_thread
);
1521 memset(tcb
, 0, sizeof(*tcb
));
1523 XFREE(MTYPE_LABELPOOL_TEST
, tcb
);
1526 static void lptest_init(void)
1528 lp_tests
= skiplist_new(0, NULL
, lptest_delete
);
1531 static void lptest_finish(void)
1534 skiplist_free(lp_tests
);
1539 /*------------------------------------------------------------------------
1541 *------------------------------------------------------------------------*/
1542 #endif /* BGP_LABELPOOL_ENABLE_TESTS */
1544 void bgp_lp_vty_init(void)
1546 install_element(VIEW_NODE
, &show_bgp_labelpool_summary_cmd
);
1547 install_element(VIEW_NODE
, &show_bgp_labelpool_ledger_cmd
);
1548 install_element(VIEW_NODE
, &show_bgp_labelpool_inuse_cmd
);
1549 install_element(VIEW_NODE
, &show_bgp_labelpool_requests_cmd
);
1550 install_element(VIEW_NODE
, &show_bgp_labelpool_chunks_cmd
);
1552 #if BGP_LABELPOOL_ENABLE_TESTS
1553 install_element(ENABLE_NODE
, &start_labelpool_perf_test_cmd
);
1554 install_element(ENABLE_NODE
, &show_labelpool_perf_test_cmd
);
1555 install_element(ENABLE_NODE
, &stop_labelpool_perf_test_cmd
);
1556 install_element(ENABLE_NODE
, &release_labelpool_perf_test_cmd
);
1557 install_element(ENABLE_NODE
, &clear_labelpool_perf_test_cmd
);
1558 #endif /* BGP_LABELPOOL_ENABLE_TESTS */