2 * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
4 * Copyright (C) 2018 LabN Consulting, L.L.C.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 #include "workqueue.h"
34 #include "bgpd/bgpd.h"
35 #include "bgpd/bgp_labelpool.h"
36 #include "bgpd/bgp_debug.h"
37 #include "bgpd/bgp_errors.h"
38 #include "bgpd/bgp_route.h"
40 #define BGP_LABELPOOL_ENABLE_TESTS 0
42 #include "bgpd/bgp_labelpool_clippy.c"
46 * Definitions and external declarations.
48 extern struct zclient
*zclient
;
50 #if BGP_LABELPOOL_ENABLE_TESTS
51 static void lptest_init(void);
52 static void lptest_finish(void);
56 * Remember where pool data are kept
58 static struct labelpool
*lp
;
61 * Number of labels requested at a time from the zebra label manager.
62 * We start small but double the request size each time up to a
65 * The label space is 20 bits which is shared with other FRR processes
66 * on this host, so to avoid greedily requesting a mostly wasted chunk,
67 * we limit the chunk size to 1/16 of the label space (that's the -4 bits
68 * in the definition below). This limit slightly increases our cost of
69 * finding free labels in our allocated chunks.
71 #define LP_CHUNK_SIZE_MIN 128
72 #define LP_CHUNK_SIZE_MAX (1 << (20 - 4))
74 DEFINE_MTYPE_STATIC(BGPD
, BGP_LABEL_CHUNK
, "BGP Label Chunk");
75 DEFINE_MTYPE_STATIC(BGPD
, BGP_LABEL_FIFO
, "BGP Label FIFO item");
76 DEFINE_MTYPE_STATIC(BGPD
, BGP_LABEL_CB
, "BGP Dynamic Label Assignment");
77 DEFINE_MTYPE_STATIC(BGPD
, BGP_LABEL_CBQ
, "BGP Dynamic Label Callback");
82 uint32_t nfree
; /* un-allocated count */
83 uint32_t idx_last_allocated
; /* start looking here */
84 bitfield_t allocated_map
;
91 mpls_label_t label
; /* MPLS_LABEL_NONE = not allocated */
93 void *labelid
; /* unique ID */
95 * callback for label allocation and loss
97 * allocated: false = lost
99 int (*cbfunc
)(mpls_label_t label
, void *lblid
, bool alloc
);
103 struct lp_fifo_item fifo
;
107 DECLARE_LIST(lp_fifo
, struct lp_fifo
, fifo
);
110 int (*cbfunc
)(mpls_label_t label
, void *lblid
, bool alloc
);
114 bool allocated
; /* false = lost */
117 static wq_item_status
lp_cbq_docallback(struct work_queue
*wq
, void *data
)
119 struct lp_cbq_item
*lcbq
= data
;
121 int debug
= BGP_DEBUG(labelpool
, LABELPOOL
);
124 zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
125 __func__
, lcbq
->labelid
, lcbq
->label
, lcbq
->allocated
);
127 if (lcbq
->label
== MPLS_LABEL_NONE
) {
128 /* shouldn't happen */
129 flog_err(EC_BGP_LABEL
, "%s: error: label==MPLS_LABEL_NONE",
134 rc
= (*(lcbq
->cbfunc
))(lcbq
->label
, lcbq
->labelid
, lcbq
->allocated
);
136 if (lcbq
->allocated
&& rc
) {
138 * Callback rejected allocation. This situation could arise
139 * if there was a label request followed by the requestor
140 * deciding it didn't need the assignment (e.g., config
141 * change) while the reply to the original request (with
142 * label) was in the work queue.
145 zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
146 __func__
, lcbq
->labelid
, lcbq
->label
);
148 uintptr_t lbl
= lcbq
->label
;
153 * If the rejected label was marked inuse by this labelid,
154 * release the label back to the pool.
156 * Further, if the rejected label was still assigned to
157 * this labelid in the LCB, delete the LCB.
159 if (!skiplist_search(lp
->inuse
, (void *)lbl
, &labelid
)) {
160 if (labelid
== lcbq
->labelid
) {
161 if (!skiplist_search(lp
->ledger
, labelid
,
163 if (lcbq
->label
== lcb
->label
)
164 skiplist_delete(lp
->ledger
,
167 skiplist_delete(lp
->inuse
, (void *)lbl
, NULL
);
175 static void lp_cbq_item_free(struct work_queue
*wq
, void *data
)
177 XFREE(MTYPE_BGP_LABEL_CBQ
, data
);
180 static void lp_lcb_free(void *goner
)
182 XFREE(MTYPE_BGP_LABEL_CB
, goner
);
185 static void lp_chunk_free(void *goner
)
187 struct lp_chunk
*chunk
= (struct lp_chunk
*)goner
;
189 bf_free(chunk
->allocated_map
);
190 XFREE(MTYPE_BGP_LABEL_CHUNK
, goner
);
193 void bgp_lp_init(struct thread_master
*master
, struct labelpool
*pool
)
195 if (BGP_DEBUG(labelpool
, LABELPOOL
))
196 zlog_debug("%s: entry", __func__
);
198 lp
= pool
; /* Set module pointer to pool data */
200 lp
->ledger
= skiplist_new(0, NULL
, lp_lcb_free
);
201 lp
->inuse
= skiplist_new(0, NULL
, NULL
);
202 lp
->chunks
= list_new();
203 lp
->chunks
->del
= lp_chunk_free
;
204 lp_fifo_init(&lp
->requests
);
205 lp
->callback_q
= work_queue_new(master
, "label callbacks");
207 lp
->callback_q
->spec
.workfunc
= lp_cbq_docallback
;
208 lp
->callback_q
->spec
.del_item_data
= lp_cbq_item_free
;
209 lp
->callback_q
->spec
.max_retries
= 0;
211 lp
->next_chunksize
= LP_CHUNK_SIZE_MIN
;
213 #if BGP_LABELPOOL_ENABLE_TESTS
218 /* check if a label callback was for a BGP LU node, and if so, unlock it */
219 static void check_bgp_lu_cb_unlock(struct lp_lcb
*lcb
)
221 if (lcb
->type
== LP_TYPE_BGP_LU
)
222 bgp_dest_unlock_node(lcb
->labelid
);
225 /* check if a label callback was for a BGP LU node, and if so, lock it */
226 static void check_bgp_lu_cb_lock(struct lp_lcb
*lcb
)
228 if (lcb
->type
== LP_TYPE_BGP_LU
)
229 bgp_dest_lock_node(lcb
->labelid
);
232 void bgp_lp_finish(void)
235 struct work_queue_item
*item
, *titem
;
237 #if BGP_LABELPOOL_ENABLE_TESTS
243 skiplist_free(lp
->ledger
);
246 skiplist_free(lp
->inuse
);
249 list_delete(&lp
->chunks
);
251 while ((lf
= lp_fifo_pop(&lp
->requests
))) {
252 check_bgp_lu_cb_unlock(&lf
->lcb
);
253 XFREE(MTYPE_BGP_LABEL_FIFO
, lf
);
255 lp_fifo_fini(&lp
->requests
);
257 /* we must unlock path infos for LU callbacks; but we cannot do that
258 * in the deletion callback of the workqueue, as that is also called
259 * to remove an element from the queue after it has been run, resulting
260 * in a double unlock. Hence we need to iterate over our queues and
261 * lists and manually perform the unlocking (ugh)
263 STAILQ_FOREACH_SAFE (item
, &lp
->callback_q
->items
, wq
, titem
)
264 check_bgp_lu_cb_unlock(item
->data
);
266 work_queue_free_and_null(&lp
->callback_q
);
271 static mpls_label_t
get_label_from_pool(void *labelid
)
273 struct listnode
*node
;
274 struct lp_chunk
*chunk
;
275 int debug
= BGP_DEBUG(labelpool
, LABELPOOL
);
280 for (ALL_LIST_ELEMENTS_RO(lp
->chunks
, node
, chunk
)) {
285 zlog_debug("%s: chunk first=%u last=%u",
286 __func__
, chunk
->first
, chunk
->last
);
289 * don't look in chunks with no available labels
295 * roll through bitfield starting where we stopped
298 index
= bf_find_next_clear_bit_wrap(
299 &chunk
->allocated_map
, chunk
->idx_last_allocated
+ 1,
303 * since chunk->nfree is non-zero, we should always get
306 assert(index
!= WORD_MAX
);
308 lbl
= chunk
->first
+ index
;
309 if (skiplist_insert(lp
->inuse
, (void *)lbl
, labelid
)) {
310 /* something is very wrong */
311 zlog_err("%s: unable to insert inuse label %u (id %p)",
312 __func__
, (uint32_t)lbl
, labelid
);
313 return MPLS_LABEL_NONE
;
319 bf_set_bit(chunk
->allocated_map
, index
);
320 chunk
->idx_last_allocated
= index
;
326 return MPLS_LABEL_NONE
;
330 * Success indicated by value of "label" field in returned LCB
332 static struct lp_lcb
*lcb_alloc(
335 int (*cbfunc
)(mpls_label_t label
, void *labelid
, bool allocated
))
338 * Set up label control block
340 struct lp_lcb
*new = XCALLOC(MTYPE_BGP_LABEL_CB
,
341 sizeof(struct lp_lcb
));
343 new->label
= get_label_from_pool(labelid
);
345 new->labelid
= labelid
;
346 new->cbfunc
= cbfunc
;
352 * Callers who need labels must supply a type, labelid, and callback.
353 * The type is a value defined in bgp_labelpool.h (add types as needed).
354 * The callback is for asynchronous notification of label allocation.
355 * The labelid is passed as an argument to the callback. It should be unique
356 * to the requested label instance.
358 * If zebra is not connected, callbacks with labels will be delayed
359 * until connection is established. If zebra connection is lost after
360 * labels have been assigned, existing assignments via this labelpool
361 * module will continue until reconnection.
363 * When connection to zebra is reestablished, previous label assignments
364 * will be invalidated (via callbacks having the "allocated" parameter unset)
365 * and new labels will be automatically reassigned by this labelpool module
366 * (that is, a requestor does not need to call bgp_lp_get() again if it is
367 * notified via callback that its label has been lost: it will eventually
368 * get another callback with a new label assignment).
370 * The callback function should return 0 to accept the allocation
371 * and non-zero to refuse it. The callback function return value is
372 * ignored for invalidations (i.e., when the "allocated" parameter is false)
374 * Prior requests for a given labelid are detected so that requests and
375 * assignments are not duplicated.
380 int (*cbfunc
)(mpls_label_t label
, void *labelid
, bool allocated
))
384 int debug
= BGP_DEBUG(labelpool
, LABELPOOL
);
387 zlog_debug("%s: labelid=%p", __func__
, labelid
);
390 * Have we seen this request before?
392 if (!skiplist_search(lp
->ledger
, labelid
, (void **)&lcb
)) {
395 lcb
= lcb_alloc(type
, labelid
, cbfunc
);
397 zlog_debug("%s: inserting lcb=%p label=%u",
398 __func__
, lcb
, lcb
->label
);
399 int rc
= skiplist_insert(lp
->ledger
, labelid
, lcb
);
402 /* shouldn't happen */
403 flog_err(EC_BGP_LABEL
,
404 "%s: can't insert new LCB into ledger list",
406 XFREE(MTYPE_BGP_LABEL_CB
, lcb
);
411 if (lcb
->label
!= MPLS_LABEL_NONE
) {
413 * Fast path: we filled the request from local pool (or
414 * this is a duplicate request that we filled already).
415 * Enqueue response work item with new label.
417 struct lp_cbq_item
*q
;
419 q
= XCALLOC(MTYPE_BGP_LABEL_CBQ
, sizeof(struct lp_cbq_item
));
421 q
->cbfunc
= lcb
->cbfunc
;
423 q
->label
= lcb
->label
;
424 q
->labelid
= lcb
->labelid
;
427 /* if this is a LU request, lock node before queueing */
428 check_bgp_lu_cb_lock(lcb
);
430 work_queue_add(lp
->callback_q
, q
);
439 zlog_debug("%s: slow path. lcb=%p label=%u",
440 __func__
, lcb
, lcb
->label
);
443 * Slow path: we are out of labels in the local pool,
444 * so remember the request and also get another chunk from
447 * We track number of outstanding label requests: don't
448 * need to get a chunk for each one.
451 struct lp_fifo
*lf
= XCALLOC(MTYPE_BGP_LABEL_FIFO
,
452 sizeof(struct lp_fifo
));
455 /* if this is a LU request, lock node before queueing */
456 check_bgp_lu_cb_lock(lcb
);
458 lp_fifo_add_tail(&lp
->requests
, lf
);
460 if (lp_fifo_count(&lp
->requests
) > lp
->pending_count
) {
461 if (!zclient
|| zclient
->sock
< 0)
463 if (zclient_send_get_label_chunk(zclient
, 0, lp
->next_chunksize
,
464 MPLS_LABEL_BASE_ANY
) !=
465 ZCLIENT_SEND_FAILURE
) {
466 lp
->pending_count
+= lp
->next_chunksize
;
467 if ((lp
->next_chunksize
<< 1) <= LP_CHUNK_SIZE_MAX
)
468 lp
->next_chunksize
<<= 1;
480 if (!skiplist_search(lp
->ledger
, labelid
, (void **)&lcb
)) {
481 if (label
== lcb
->label
&& type
== lcb
->type
) {
482 struct listnode
*node
;
483 struct lp_chunk
*chunk
;
484 uintptr_t lbl
= label
;
485 bool deallocated
= false;
487 /* no longer in use */
488 skiplist_delete(lp
->inuse
, (void *)lbl
, NULL
);
490 /* no longer requested */
491 skiplist_delete(lp
->ledger
, labelid
, NULL
);
494 * Find the chunk this label belongs to and
495 * deallocate the label
497 for (ALL_LIST_ELEMENTS_RO(lp
->chunks
, node
, chunk
)) {
500 if ((label
< chunk
->first
) ||
501 (label
> chunk
->last
))
504 index
= label
- chunk
->first
;
505 assert(bf_test_index(chunk
->allocated_map
,
507 bf_release_index(chunk
->allocated_map
, index
);
517 * zebra response giving us a chunk of labels
519 void bgp_lp_event_chunk(uint8_t keep
, uint32_t first
, uint32_t last
)
521 struct lp_chunk
*chunk
;
522 int debug
= BGP_DEBUG(labelpool
, LABELPOOL
);
527 flog_err(EC_BGP_LABEL
,
528 "%s: zebra label chunk invalid: first=%u, last=%u",
529 __func__
, first
, last
);
533 chunk
= XCALLOC(MTYPE_BGP_LABEL_CHUNK
, sizeof(struct lp_chunk
));
535 labelcount
= last
- first
+ 1;
537 chunk
->first
= first
;
539 chunk
->nfree
= labelcount
;
540 bf_init(chunk
->allocated_map
, labelcount
);
543 * Optimize for allocation by adding the new (presumably larger)
544 * chunk at the head of the list so it is examined first.
546 listnode_add_head(lp
->chunks
, chunk
);
548 lp
->pending_count
-= labelcount
;
551 zlog_debug("%s: %zu pending requests", __func__
,
552 lp_fifo_count(&lp
->requests
));
555 while (labelcount
&& (lf
= lp_fifo_first(&lp
->requests
))) {
558 void *labelid
= lf
->lcb
.labelid
;
560 if (skiplist_search(lp
->ledger
, labelid
, (void **)&lcb
)) {
561 /* request no longer in effect */
564 zlog_debug("%s: labelid %p: request no longer in effect",
567 /* if this was a BGP_LU request, unlock node
569 check_bgp_lu_cb_unlock(lcb
);
570 goto finishedrequest
;
574 if (lcb
->label
!= MPLS_LABEL_NONE
) {
575 /* request already has a label */
577 zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
579 lcb
->label
, lcb
->label
, lcb
);
581 /* if this was a BGP_LU request, unlock node
583 check_bgp_lu_cb_unlock(lcb
);
585 goto finishedrequest
;
588 lcb
->label
= get_label_from_pool(lcb
->labelid
);
590 if (lcb
->label
== MPLS_LABEL_NONE
) {
592 * Out of labels in local pool, await next chunk
595 zlog_debug("%s: out of labels, await more",
604 * we filled the request from local pool.
605 * Enqueue response work item with new label.
607 struct lp_cbq_item
*q
= XCALLOC(MTYPE_BGP_LABEL_CBQ
,
608 sizeof(struct lp_cbq_item
));
610 q
->cbfunc
= lcb
->cbfunc
;
612 q
->label
= lcb
->label
;
613 q
->labelid
= lcb
->labelid
;
617 zlog_debug("%s: assigning label %u to labelid %p",
618 __func__
, q
->label
, q
->labelid
);
620 work_queue_add(lp
->callback_q
, q
);
623 lp_fifo_del(&lp
->requests
, lf
);
624 XFREE(MTYPE_BGP_LABEL_FIFO
, lf
);
629 * continue using allocated labels until zebra returns
631 void bgp_lp_event_zebra_down(void)
637 * Inform owners of previously-allocated labels that their labels
638 * are not valid. Request chunk from zebra large enough to satisfy
639 * previously-allocated labels plus any outstanding requests.
641 void bgp_lp_event_zebra_up(void)
643 unsigned int labels_needed
;
644 unsigned int chunks_needed
;
649 lp
->reconnect_count
++;
651 * Get label chunk allocation request dispatched to zebra
653 labels_needed
= lp_fifo_count(&lp
->requests
) +
654 skiplist_count(lp
->inuse
);
656 if (labels_needed
> lp
->next_chunksize
) {
657 while ((lp
->next_chunksize
< labels_needed
) &&
658 (lp
->next_chunksize
<< 1 <= LP_CHUNK_SIZE_MAX
))
660 lp
->next_chunksize
<<= 1;
664 chunks_needed
= (labels_needed
/ lp
->next_chunksize
) + 1;
665 labels_needed
= chunks_needed
* lp
->next_chunksize
;
667 lm_init_ok
= lm_label_manager_connect(zclient
, 1) == 0;
670 zlog_err("%s: label manager connection error", __func__
);
674 zclient_send_get_label_chunk(zclient
, 0, labels_needed
,
675 MPLS_LABEL_BASE_ANY
);
676 lp
->pending_count
= labels_needed
;
679 * Invalidate current list of chunks
681 list_delete_all_node(lp
->chunks
);
684 * Invalidate any existing labels and requeue them as requests
686 while (!skiplist_first(lp
->inuse
, NULL
, &labelid
)) {
691 if (!skiplist_search(lp
->ledger
, labelid
, (void **)&lcb
)) {
693 if (lcb
->label
!= MPLS_LABEL_NONE
) {
697 struct lp_cbq_item
*q
;
699 q
= XCALLOC(MTYPE_BGP_LABEL_CBQ
,
700 sizeof(struct lp_cbq_item
));
701 q
->cbfunc
= lcb
->cbfunc
;
703 q
->label
= lcb
->label
;
704 q
->labelid
= lcb
->labelid
;
705 q
->allocated
= false;
706 check_bgp_lu_cb_lock(lcb
);
707 work_queue_add(lp
->callback_q
, q
);
709 lcb
->label
= MPLS_LABEL_NONE
;
715 struct lp_fifo
*lf
= XCALLOC(MTYPE_BGP_LABEL_FIFO
,
716 sizeof(struct lp_fifo
));
719 check_bgp_lu_cb_lock(lcb
);
720 lp_fifo_add_tail(&lp
->requests
, lf
);
723 skiplist_delete_first(lp
->inuse
);
727 DEFUN(show_bgp_labelpool_summary
, show_bgp_labelpool_summary_cmd
,
728 "show bgp labelpool summary [json]",
730 "BGP Labelpool information\n"
731 "BGP Labelpool summary\n" JSON_STR
)
733 bool uj
= use_json(argc
, argv
);
734 json_object
*json
= NULL
;
738 vty_out(vty
, "{}\n");
740 vty_out(vty
, "No existing BGP labelpool\n");
741 return (CMD_WARNING
);
745 json
= json_object_new_object();
746 json_object_int_add(json
, "ledger", skiplist_count(lp
->ledger
));
747 json_object_int_add(json
, "inUse", skiplist_count(lp
->inuse
));
748 json_object_int_add(json
, "requests",
749 lp_fifo_count(&lp
->requests
));
750 json_object_int_add(json
, "labelChunks", listcount(lp
->chunks
));
751 json_object_int_add(json
, "pending", lp
->pending_count
);
752 json_object_int_add(json
, "reconnects", lp
->reconnect_count
);
755 vty_out(vty
, "Labelpool Summary\n");
756 vty_out(vty
, "-----------------\n");
757 vty_out(vty
, "%-13s %d\n",
758 "Ledger:", skiplist_count(lp
->ledger
));
759 vty_out(vty
, "%-13s %d\n", "InUse:", skiplist_count(lp
->inuse
));
760 vty_out(vty
, "%-13s %zu\n",
761 "Requests:", lp_fifo_count(&lp
->requests
));
762 vty_out(vty
, "%-13s %d\n",
763 "LabelChunks:", listcount(lp
->chunks
));
764 vty_out(vty
, "%-13s %d\n", "Pending:", lp
->pending_count
);
765 vty_out(vty
, "%-13s %d\n", "Reconnects:", lp
->reconnect_count
);
770 DEFUN(show_bgp_labelpool_ledger
, show_bgp_labelpool_ledger_cmd
,
771 "show bgp labelpool ledger [json]",
773 "BGP Labelpool information\n"
774 "BGP Labelpool ledger\n" JSON_STR
)
776 bool uj
= use_json(argc
, argv
);
777 json_object
*json
= NULL
, *json_elem
= NULL
;
778 struct lp_lcb
*lcb
= NULL
;
779 struct bgp_dest
*dest
;
781 const struct prefix
*p
;
786 vty_out(vty
, "{}\n");
788 vty_out(vty
, "No existing BGP labelpool\n");
789 return (CMD_WARNING
);
793 count
= skiplist_count(lp
->ledger
);
795 vty_out(vty
, "{}\n");
798 json
= json_object_new_array();
800 vty_out(vty
, "Prefix Label\n");
801 vty_out(vty
, "---------------------------\n");
804 for (rc
= skiplist_next(lp
->ledger
, (void **)&dest
, (void **)&lcb
,
806 !rc
; rc
= skiplist_next(lp
->ledger
, (void **)&dest
, (void **)&lcb
,
809 json_elem
= json_object_new_object();
810 json_object_array_add(json
, json_elem
);
814 if (!CHECK_FLAG(dest
->flags
, BGP_NODE_LABEL_REQUESTED
))
816 json_object_string_add(
817 json_elem
, "prefix", "INVALID");
818 json_object_int_add(json_elem
, "label",
821 vty_out(vty
, "%-18s %u\n",
822 "INVALID", lcb
->label
);
824 p
= bgp_dest_get_prefix(dest
);
826 json_object_string_addf(
827 json_elem
, "prefix", "%pFX", p
);
828 json_object_int_add(json_elem
, "label",
831 vty_out(vty
, "%-18pFX %u\n", p
,
837 json_object_string_add(json_elem
, "prefix",
839 json_object_int_add(json_elem
, "label",
842 vty_out(vty
, "%-18s %u\n", "VRF",
853 DEFUN(show_bgp_labelpool_inuse
, show_bgp_labelpool_inuse_cmd
,
854 "show bgp labelpool inuse [json]",
856 "BGP Labelpool information\n"
857 "BGP Labelpool inuse\n" JSON_STR
)
859 bool uj
= use_json(argc
, argv
);
860 json_object
*json
= NULL
, *json_elem
= NULL
;
861 struct bgp_dest
*dest
;
865 const struct prefix
*p
;
869 vty_out(vty
, "No existing BGP labelpool\n");
870 return (CMD_WARNING
);
874 vty_out(vty
, "{}\n");
876 vty_out(vty
, "No existing BGP labelpool\n");
877 return (CMD_WARNING
);
881 count
= skiplist_count(lp
->inuse
);
883 vty_out(vty
, "{}\n");
886 json
= json_object_new_array();
888 vty_out(vty
, "Prefix Label\n");
889 vty_out(vty
, "---------------------------\n");
891 for (rc
= skiplist_next(lp
->inuse
, (void **)&label
, (void **)&dest
,
893 !rc
; rc
= skiplist_next(lp
->ledger
, (void **)&label
,
894 (void **)&dest
, &cursor
)) {
895 if (skiplist_search(lp
->ledger
, dest
, (void **)&lcb
))
899 json_elem
= json_object_new_object();
900 json_object_array_add(json
, json_elem
);
905 if (!CHECK_FLAG(dest
->flags
, BGP_NODE_LABEL_REQUESTED
))
907 json_object_string_add(
908 json_elem
, "prefix", "INVALID");
909 json_object_int_add(json_elem
, "label",
912 vty_out(vty
, "INVALID %u\n",
915 p
= bgp_dest_get_prefix(dest
);
917 json_object_string_addf(
918 json_elem
, "prefix", "%pFX", p
);
919 json_object_int_add(json_elem
, "label",
922 vty_out(vty
, "%-18pFX %u\n", p
,
928 json_object_string_add(json_elem
, "prefix",
930 json_object_int_add(json_elem
, "label", label
);
932 vty_out(vty
, "%-18s %u\n", "VRF",
942 DEFUN(show_bgp_labelpool_requests
, show_bgp_labelpool_requests_cmd
,
943 "show bgp labelpool requests [json]",
945 "BGP Labelpool information\n"
946 "BGP Labelpool requests\n" JSON_STR
)
948 bool uj
= use_json(argc
, argv
);
949 json_object
*json
= NULL
, *json_elem
= NULL
;
950 struct bgp_dest
*dest
;
951 const struct prefix
*p
;
952 struct lp_fifo
*item
, *next
;
957 vty_out(vty
, "{}\n");
959 vty_out(vty
, "No existing BGP labelpool\n");
960 return (CMD_WARNING
);
964 count
= lp_fifo_count(&lp
->requests
);
966 vty_out(vty
, "{}\n");
969 json
= json_object_new_array();
971 vty_out(vty
, "Prefix \n");
972 vty_out(vty
, "----------------\n");
975 for (item
= lp_fifo_first(&lp
->requests
); item
; item
= next
) {
976 next
= lp_fifo_next_safe(&lp
->requests
, item
);
977 dest
= item
->lcb
.labelid
;
979 json_elem
= json_object_new_object();
980 json_object_array_add(json
, json_elem
);
982 switch (item
->lcb
.type
) {
984 if (!CHECK_FLAG(dest
->flags
,
985 BGP_NODE_LABEL_REQUESTED
)) {
987 json_object_string_add(
988 json_elem
, "prefix", "INVALID");
990 vty_out(vty
, "INVALID\n");
992 p
= bgp_dest_get_prefix(dest
);
994 json_object_string_addf(
995 json_elem
, "prefix", "%pFX", p
);
997 vty_out(vty
, "%-18pFX\n", p
);
1002 json_object_string_add(json_elem
, "prefix",
1005 vty_out(vty
, "VRF\n");
1010 vty_json(vty
, json
);
1014 DEFUN(show_bgp_labelpool_chunks
, show_bgp_labelpool_chunks_cmd
,
1015 "show bgp labelpool chunks [json]",
1017 "BGP Labelpool information\n"
1018 "BGP Labelpool chunks\n" JSON_STR
)
1020 bool uj
= use_json(argc
, argv
);
1021 json_object
*json
= NULL
, *json_elem
;
1022 struct listnode
*node
;
1023 struct lp_chunk
*chunk
;
1028 vty_out(vty
, "{}\n");
1030 vty_out(vty
, "No existing BGP labelpool\n");
1031 return (CMD_WARNING
);
1035 count
= listcount(lp
->chunks
);
1037 vty_out(vty
, "{}\n");
1040 json
= json_object_new_array();
1042 vty_out(vty
, "%10s %10s %10s %10s\n", "First", "Last", "Size",
1044 vty_out(vty
, "-------------------------------------------\n");
1047 for (ALL_LIST_ELEMENTS_RO(lp
->chunks
, node
, chunk
)) {
1050 size
= chunk
->last
- chunk
->first
+ 1;
1053 json_elem
= json_object_new_object();
1054 json_object_array_add(json
, json_elem
);
1055 json_object_int_add(json_elem
, "first", chunk
->first
);
1056 json_object_int_add(json_elem
, "last", chunk
->last
);
1057 json_object_int_add(json_elem
, "size", size
);
1058 json_object_int_add(json_elem
, "numberFree",
1061 vty_out(vty
, "%10u %10u %10u %10u\n", chunk
->first
,
1062 chunk
->last
, size
, chunk
->nfree
);
1065 vty_json(vty
, json
);
1069 #if BGP_LABELPOOL_ENABLE_TESTS
1070 /*------------------------------------------------------------------------
1071 * Testing code start
1072 *------------------------------------------------------------------------*/
1074 DEFINE_MTYPE_STATIC(BGPD
, LABELPOOL_TEST
, "Label pool test");
1076 #define LPT_STAT_INSERT_FAIL 0
1077 #define LPT_STAT_DELETE_FAIL 1
1078 #define LPT_STAT_ALLOCATED 2
1079 #define LPT_STAT_DEALLOCATED 3
1080 #define LPT_STAT_MAX 4
1082 const char *lpt_counter_names
[] = {
1083 "sl insert failures",
1084 "sl delete failures",
1086 "labels deallocated",
1089 static uint8_t lpt_generation
;
1090 static bool lpt_inprogress
;
1091 static struct skiplist
*lp_tests
;
1092 static unsigned int lpt_test_cb_tcb_lookup_fails
;
1093 static unsigned int lpt_release_tcb_lookup_fails
;
1094 static unsigned int lpt_test_event_tcb_lookup_fails
;
1095 static unsigned int lpt_stop_tcb_lookup_fails
;
1099 unsigned int request_maximum
;
1100 unsigned int request_blocksize
;
1101 uintptr_t request_count
; /* match type of labelid */
1103 struct skiplist
*labels
;
1104 struct timeval starttime
;
1105 struct skiplist
*timestamps_alloc
;
1106 struct skiplist
*timestamps_dealloc
;
1107 struct thread
*event_thread
;
1108 unsigned int counter
[LPT_STAT_MAX
];
1111 /* test parameters */
1112 #define LPT_MAX_COUNT 500000 /* get this many labels in all */
1113 #define LPT_BLKSIZE 10000 /* this many at a time, then yield */
1114 #define LPT_TS_INTERVAL 10000 /* timestamp every this many labels */
1117 static int test_cb(mpls_label_t label
, void *labelid
, bool allocated
)
1119 uintptr_t generation
;
1120 struct lp_test
*tcb
;
1122 generation
= ((uintptr_t)labelid
>> 24) & 0xff;
1124 if (skiplist_search(lp_tests
, (void *)generation
, (void **)&tcb
)) {
1126 /* couldn't find current test in progress */
1127 ++lpt_test_cb_tcb_lookup_fails
;
1128 return -1; /* reject allocation */
1132 ++tcb
->counter
[LPT_STAT_ALLOCATED
];
1133 if (!(tcb
->counter
[LPT_STAT_ALLOCATED
] % LPT_TS_INTERVAL
)) {
1136 time_ms
= monotime_since(&tcb
->starttime
, NULL
) / 1000;
1137 skiplist_insert(tcb
->timestamps_alloc
,
1138 (void *)(uintptr_t)tcb
1139 ->counter
[LPT_STAT_ALLOCATED
],
1142 if (skiplist_insert(tcb
->labels
, labelid
,
1143 (void *)(uintptr_t)label
)) {
1144 ++tcb
->counter
[LPT_STAT_INSERT_FAIL
];
1148 ++tcb
->counter
[LPT_STAT_DEALLOCATED
];
1149 if (!(tcb
->counter
[LPT_STAT_DEALLOCATED
] % LPT_TS_INTERVAL
)) {
1152 time_ms
= monotime_since(&tcb
->starttime
, NULL
) / 1000;
1153 skiplist_insert(tcb
->timestamps_dealloc
,
1154 (void *)(uintptr_t)tcb
1155 ->counter
[LPT_STAT_ALLOCATED
],
1158 if (skiplist_delete(tcb
->labels
, labelid
, 0)) {
1159 ++tcb
->counter
[LPT_STAT_DELETE_FAIL
];
1166 static void labelpool_test_event_handler(struct thread
*thread
)
1168 struct lp_test
*tcb
;
1170 if (skiplist_search(lp_tests
, (void *)(uintptr_t)(lpt_generation
),
1173 /* couldn't find current test in progress */
1174 ++lpt_test_event_tcb_lookup_fails
;
1179 * request a bunch of labels
1181 for (unsigned int i
= 0; (i
< tcb
->request_blocksize
) &&
1182 (tcb
->request_count
< tcb
->request_maximum
);
1187 ++tcb
->request_count
;
1190 * construct 32-bit id from request_count and generation
1192 id
= ((uintptr_t)tcb
->generation
<< 24) |
1193 (tcb
->request_count
& 0x00ffffff);
1194 bgp_lp_get(LP_TYPE_VRF
, (void *)id
, test_cb
);
1197 if (tcb
->request_count
< tcb
->request_maximum
)
1198 thread_add_event(bm
->master
, labelpool_test_event_handler
, NULL
,
1199 0, &tcb
->event_thread
);
1202 static void lptest_stop(void)
1204 struct lp_test
*tcb
;
1206 if (!lpt_inprogress
)
1209 if (skiplist_search(lp_tests
, (void *)(uintptr_t)(lpt_generation
),
1212 /* couldn't find current test in progress */
1213 ++lpt_stop_tcb_lookup_fails
;
1217 if (tcb
->event_thread
)
1218 thread_cancel(&tcb
->event_thread
);
1220 lpt_inprogress
= false;
1223 static int lptest_start(struct vty
*vty
)
1225 struct lp_test
*tcb
;
1227 if (lpt_inprogress
) {
1228 vty_out(vty
, "test already in progress\n");
1232 if (skiplist_count(lp_tests
) >=
1233 (1 << (8 * sizeof(lpt_generation
))) - 1) {
1235 * Too many test runs
1237 vty_out(vty
, "too many tests: clear first\n");
1242 * We pack the generation and request number into the labelid;
1243 * make sure they fit.
1245 unsigned int n1
= LPT_MAX_COUNT
;
1246 unsigned int sh
= 0;
1247 unsigned int label_bits
;
1249 label_bits
= 8 * (sizeof(tcb
->request_count
) - sizeof(lpt_generation
));
1251 /* n1 should be same type as tcb->request_maximum */
1252 assert(sizeof(n1
) == sizeof(tcb
->request_maximum
));
1256 sh
+= 1; /* number of bits needed to hold LPT_MAX_COUNT */
1258 if (sh
> label_bits
) {
1260 "Sorry, test iteration count too big on this platform (LPT_MAX_COUNT %u, need %u bits, but label_bits is only %u)\n",
1261 LPT_MAX_COUNT
, sh
, label_bits
);
1265 lpt_inprogress
= true;
1268 tcb
= XCALLOC(MTYPE_LABELPOOL_TEST
, sizeof(*tcb
));
1270 tcb
->generation
= lpt_generation
;
1271 tcb
->label_type
= LP_TYPE_VRF
;
1272 tcb
->request_maximum
= LPT_MAX_COUNT
;
1273 tcb
->request_blocksize
= LPT_BLKSIZE
;
1274 tcb
->labels
= skiplist_new(0, NULL
, NULL
);
1275 tcb
->timestamps_alloc
= skiplist_new(0, NULL
, NULL
);
1276 tcb
->timestamps_dealloc
= skiplist_new(0, NULL
, NULL
);
1277 thread_add_event(bm
->master
, labelpool_test_event_handler
, NULL
, 0,
1278 &tcb
->event_thread
);
1279 monotime(&tcb
->starttime
);
1281 skiplist_insert(lp_tests
, (void *)(uintptr_t)tcb
->generation
, tcb
);
1285 DEFPY(start_labelpool_perf_test
, start_labelpool_perf_test_cmd
,
1286 "debug bgp lptest start",
1295 static void lptest_print_stats(struct vty
*vty
, struct lp_test
*tcb
)
1299 vty_out(vty
, "Global Lookup Failures in test_cb: %5u\n",
1300 lpt_test_cb_tcb_lookup_fails
);
1301 vty_out(vty
, "Global Lookup Failures in release: %5u\n",
1302 lpt_release_tcb_lookup_fails
);
1303 vty_out(vty
, "Global Lookup Failures in event: %5u\n",
1304 lpt_test_event_tcb_lookup_fails
);
1305 vty_out(vty
, "Global Lookup Failures in stop: %5u\n",
1306 lpt_stop_tcb_lookup_fails
);
1310 if (skiplist_search(lp_tests
, (void *)(uintptr_t)lpt_generation
,
1312 vty_out(vty
, "Error: can't find test %u\n",
1318 vty_out(vty
, "Test Generation %u:\n", tcb
->generation
);
1320 vty_out(vty
, "Counter Value\n");
1321 for (i
= 0; i
< LPT_STAT_MAX
; ++i
) {
1322 vty_out(vty
, "%20s: %10u\n", lpt_counter_names
[i
],
1327 if (tcb
->timestamps_alloc
) {
1334 vty_out(vty
, "%10s %10s\n", "Count", "Seconds");
1337 while (!skiplist_next(tcb
->timestamps_alloc
, &Key
, &Value
,
1340 elapsed
= ((float)(uintptr_t)Value
) / 1000;
1342 vty_out(vty
, "%10llu %10.3f\n",
1343 (unsigned long long)(uintptr_t)Key
, elapsed
);
1349 DEFPY(show_labelpool_perf_test
, show_labelpool_perf_test_cmd
,
1350 "debug bgp lptest show",
1362 while (!skiplist_next(lp_tests
, &Key
, &Value
, &cursor
)) {
1363 lptest_print_stats(vty
, (struct lp_test
*)Value
);
1366 vty_out(vty
, "no test results\n");
1371 DEFPY(stop_labelpool_perf_test
, stop_labelpool_perf_test_cmd
,
1372 "debug bgp lptest stop",
1378 if (lpt_inprogress
) {
1380 lptest_print_stats(vty
, NULL
);
1382 vty_out(vty
, "no test in progress\n");
1387 DEFPY(clear_labelpool_perf_test
, clear_labelpool_perf_test_cmd
,
1388 "debug bgp lptest clear",
1394 if (lpt_inprogress
) {
1398 while (!skiplist_first(lp_tests
, NULL
, NULL
))
1399 /* del function of skiplist cleans up tcbs */
1400 skiplist_delete_first(lp_tests
);
1406 * With the "release" command, we can release labels at intervals through
1407 * the ID space. Thus we can to exercise the bitfield-wrapping behavior
1408 * of the allocator in a subsequent test.
1410 /* clang-format off */
1411 DEFPY(release_labelpool_perf_test
, release_labelpool_perf_test_cmd
,
1412 "debug bgp lptest release test GENERATION$generation every (1-5)$every_nth",
1420 "label fraction denominator\n")
1422 /* clang-format on */
1424 unsigned long testnum
;
1426 struct lp_test
*tcb
;
1428 testnum
= strtoul(generation
, &end
, 0);
1430 vty_out(vty
, "Invalid test number: \"%s\"\n", generation
);
1433 if (lpt_inprogress
&& (testnum
== lpt_generation
)) {
1435 "Error: Test %lu is still in progress (stop first)\n",
1440 if (skiplist_search(lp_tests
, (void *)(uintptr_t)testnum
,
1443 /* couldn't find current test in progress */
1444 vty_out(vty
, "Error: Can't look up test number: \"%lu\"\n",
1446 ++lpt_release_tcb_lookup_fails
;
1451 void *Value
, *cValue
;
1453 unsigned int iteration
;
1458 rc
= skiplist_next(tcb
->labels
, &Key
, &Value
, &cursor
);
1464 /* find next item before we delete this one */
1465 rc
= skiplist_next(tcb
->labels
, &Key
, &Value
, &cursor
);
1467 if (!(iteration
% every_nth
)) {
1468 bgp_lp_release(tcb
->label_type
, cKey
,
1469 (mpls_label_t
)(uintptr_t)cValue
);
1470 skiplist_delete(tcb
->labels
, cKey
, NULL
);
1471 ++tcb
->counter
[LPT_STAT_DEALLOCATED
];
1479 static void lptest_delete(void *val
)
1481 struct lp_test
*tcb
= (struct lp_test
*)val
;
1488 while (!skiplist_next(tcb
->labels
, &Key
, &Value
, &cursor
))
1489 bgp_lp_release(tcb
->label_type
, Key
,
1490 (mpls_label_t
)(uintptr_t)Value
);
1491 skiplist_free(tcb
->labels
);
1494 if (tcb
->timestamps_alloc
) {
1496 skiplist_free(tcb
->timestamps_alloc
);
1497 tcb
->timestamps_alloc
= NULL
;
1500 if (tcb
->timestamps_dealloc
) {
1502 skiplist_free(tcb
->timestamps_dealloc
);
1503 tcb
->timestamps_dealloc
= NULL
;
1506 if (tcb
->event_thread
)
1507 thread_cancel(&tcb
->event_thread
);
1509 memset(tcb
, 0, sizeof(*tcb
));
1511 XFREE(MTYPE_LABELPOOL_TEST
, tcb
);
1514 static void lptest_init(void)
1516 lp_tests
= skiplist_new(0, NULL
, lptest_delete
);
1519 static void lptest_finish(void)
1522 skiplist_free(lp_tests
);
1527 /*------------------------------------------------------------------------
1529 *------------------------------------------------------------------------*/
1530 #endif /* BGP_LABELPOOL_ENABLE_TESTS */
1532 void bgp_lp_vty_init(void)
1534 install_element(VIEW_NODE
, &show_bgp_labelpool_summary_cmd
);
1535 install_element(VIEW_NODE
, &show_bgp_labelpool_ledger_cmd
);
1536 install_element(VIEW_NODE
, &show_bgp_labelpool_inuse_cmd
);
1537 install_element(VIEW_NODE
, &show_bgp_labelpool_requests_cmd
);
1538 install_element(VIEW_NODE
, &show_bgp_labelpool_chunks_cmd
);
1540 #if BGP_LABELPOOL_ENABLE_TESTS
1541 install_element(ENABLE_NODE
, &start_labelpool_perf_test_cmd
);
1542 install_element(ENABLE_NODE
, &show_labelpool_perf_test_cmd
);
1543 install_element(ENABLE_NODE
, &stop_labelpool_perf_test_cmd
);
1544 install_element(ENABLE_NODE
, &release_labelpool_perf_test_cmd
);
1545 install_element(ENABLE_NODE
, &clear_labelpool_perf_test_cmd
);
1546 #endif /* BGP_LABELPOOL_ENABLE_TESTS */