]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_labelpool.c
bgpd: improve labelpool performance at scale
[mirror_frr.git] / bgpd / bgp_labelpool.c
1 /*
2 * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
3 *
4 * Copyright (C) 2018 LabN Consulting, L.L.C.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include <zebra.h>
22
23 #include "log.h"
24 #include "memory.h"
25 #include "stream.h"
26 #include "mpls.h"
27 #include "vty.h"
28 #include "linklist.h"
29 #include "skiplist.h"
30 #include "workqueue.h"
31 #include "zclient.h"
32 #include "mpls.h"
33
34 #include "bgpd/bgpd.h"
35 #include "bgpd/bgp_labelpool.h"
36 #include "bgpd/bgp_debug.h"
37 #include "bgpd/bgp_errors.h"
38 #include "bgpd/bgp_route.h"
39
40 #define BGP_LABELPOOL_ENABLE_TESTS 0
41
42 #ifndef VTYSH_EXTRACT_PL
43 #include "bgpd/bgp_labelpool_clippy.c"
44 #endif
45
46
47 /*
48 * Definitions and external declarations.
49 */
50 extern struct zclient *zclient;
51
52 #if BGP_LABELPOOL_ENABLE_TESTS
53 static void lptest_init(void);
54 static void lptest_finish(void);
55 #endif
56
57 /*
58 * Remember where pool data are kept
59 */
60 static struct labelpool *lp;
61
62 /*
63 * Number of labels requested at a time from the zebra label manager.
64 * We start small but double the request size each time up to a
65 * maximum size.
66 *
67 * The label space is 20 bits which is shared with other FRR processes
68 * on this host, so to avoid greedily requesting a mostly wasted chunk,
69 * we limit the chunk size to 1/16 of the label space (that's the -4 bits
70 * in the definition below). This limit slightly increases our cost of
71 * finding free labels in our allocated chunks.
72 */
73 #define LP_CHUNK_SIZE_MIN 128
74 #define LP_CHUNK_SIZE_MAX (1 << (20 - 4))
75
76 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk");
77 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO item");
78 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CB, "BGP Dynamic Label Assignment");
79 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CBQ, "BGP Dynamic Label Callback");
80
81 struct lp_chunk {
82 uint32_t first;
83 uint32_t last;
84 uint32_t nfree; /* un-allocated count */
85 uint32_t idx_last_allocated; /* start looking here */
86 bitfield_t allocated_map;
87 };
88
89 /*
90 * label control block
91 */
92 struct lp_lcb {
93 mpls_label_t label; /* MPLS_LABEL_NONE = not allocated */
94 int type;
95 void *labelid; /* unique ID */
96 /*
97 * callback for label allocation and loss
98 *
99 * allocated: false = lost
100 */
101 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
102 };
103
104 struct lp_fifo {
105 struct lp_fifo_item fifo;
106 struct lp_lcb lcb;
107 };
108
109 DECLARE_LIST(lp_fifo, struct lp_fifo, fifo);
110
111 struct lp_cbq_item {
112 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
113 int type;
114 mpls_label_t label;
115 void *labelid;
116 bool allocated; /* false = lost */
117 };
118
119 static wq_item_status lp_cbq_docallback(struct work_queue *wq, void *data)
120 {
121 struct lp_cbq_item *lcbq = data;
122 int rc;
123 int debug = BGP_DEBUG(labelpool, LABELPOOL);
124
125 if (debug)
126 zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
127 __func__, lcbq->labelid, lcbq->label, lcbq->allocated);
128
129 if (lcbq->label == MPLS_LABEL_NONE) {
130 /* shouldn't happen */
131 flog_err(EC_BGP_LABEL, "%s: error: label==MPLS_LABEL_NONE",
132 __func__);
133 return WQ_SUCCESS;
134 }
135
136 rc = (*(lcbq->cbfunc))(lcbq->label, lcbq->labelid, lcbq->allocated);
137
138 if (lcbq->allocated && rc) {
139 /*
140 * Callback rejected allocation. This situation could arise
141 * if there was a label request followed by the requestor
142 * deciding it didn't need the assignment (e.g., config
143 * change) while the reply to the original request (with
144 * label) was in the work queue.
145 */
146 if (debug)
147 zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
148 __func__, lcbq->labelid, lcbq->label);
149
150 uintptr_t lbl = lcbq->label;
151 void *labelid;
152 struct lp_lcb *lcb;
153
154 /*
155 * If the rejected label was marked inuse by this labelid,
156 * release the label back to the pool.
157 *
158 * Further, if the rejected label was still assigned to
159 * this labelid in the LCB, delete the LCB.
160 */
161 if (!skiplist_search(lp->inuse, (void *)lbl, &labelid)) {
162 if (labelid == lcbq->labelid) {
163 if (!skiplist_search(lp->ledger, labelid,
164 (void **)&lcb)) {
165 if (lcbq->label == lcb->label)
166 skiplist_delete(lp->ledger,
167 labelid, NULL);
168 }
169 skiplist_delete(lp->inuse, (void *)lbl, NULL);
170 }
171 }
172 }
173
174 return WQ_SUCCESS;
175 }
176
177 static void lp_cbq_item_free(struct work_queue *wq, void *data)
178 {
179 XFREE(MTYPE_BGP_LABEL_CBQ, data);
180 }
181
182 static void lp_lcb_free(void *goner)
183 {
184 XFREE(MTYPE_BGP_LABEL_CB, goner);
185 }
186
187 static void lp_chunk_free(void *goner)
188 {
189 struct lp_chunk *chunk = (struct lp_chunk *)goner;
190
191 bf_free(chunk->allocated_map);
192 XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
193 }
194
195 void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
196 {
197 if (BGP_DEBUG(labelpool, LABELPOOL))
198 zlog_debug("%s: entry", __func__);
199
200 lp = pool; /* Set module pointer to pool data */
201
202 lp->ledger = skiplist_new(0, NULL, lp_lcb_free);
203 lp->inuse = skiplist_new(0, NULL, NULL);
204 lp->chunks = list_new();
205 lp->chunks->del = lp_chunk_free;
206 lp_fifo_init(&lp->requests);
207 lp->callback_q = work_queue_new(master, "label callbacks");
208
209 lp->callback_q->spec.workfunc = lp_cbq_docallback;
210 lp->callback_q->spec.del_item_data = lp_cbq_item_free;
211 lp->callback_q->spec.max_retries = 0;
212
213 lp->next_chunksize = LP_CHUNK_SIZE_MIN;
214
215 #if BGP_LABELPOOL_ENABLE_TESTS
216 lptest_init();
217 #endif
218 }
219
220 /* check if a label callback was for a BGP LU node, and if so, unlock it */
221 static void check_bgp_lu_cb_unlock(struct lp_lcb *lcb)
222 {
223 if (lcb->type == LP_TYPE_BGP_LU)
224 bgp_dest_unlock_node(lcb->labelid);
225 }
226
227 /* check if a label callback was for a BGP LU node, and if so, lock it */
228 static void check_bgp_lu_cb_lock(struct lp_lcb *lcb)
229 {
230 if (lcb->type == LP_TYPE_BGP_LU)
231 bgp_dest_lock_node(lcb->labelid);
232 }
233
234 void bgp_lp_finish(void)
235 {
236 struct lp_fifo *lf;
237 struct work_queue_item *item, *titem;
238
239 #if BGP_LABELPOOL_ENABLE_TESTS
240 lptest_finish();
241 #endif
242 if (!lp)
243 return;
244
245 skiplist_free(lp->ledger);
246 lp->ledger = NULL;
247
248 skiplist_free(lp->inuse);
249 lp->inuse = NULL;
250
251 list_delete(&lp->chunks);
252
253 while ((lf = lp_fifo_pop(&lp->requests))) {
254 check_bgp_lu_cb_unlock(&lf->lcb);
255 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
256 }
257 lp_fifo_fini(&lp->requests);
258
259 /* we must unlock path infos for LU callbacks; but we cannot do that
260 * in the deletion callback of the workqueue, as that is also called
261 * to remove an element from the queue after it has been run, resulting
262 * in a double unlock. Hence we need to iterate over our queues and
263 * lists and manually perform the unlocking (ugh)
264 */
265 STAILQ_FOREACH_SAFE (item, &lp->callback_q->items, wq, titem)
266 check_bgp_lu_cb_unlock(item->data);
267
268 work_queue_free_and_null(&lp->callback_q);
269
270 lp = NULL;
271 }
272
273 static mpls_label_t get_label_from_pool(void *labelid)
274 {
275 struct listnode *node;
276 struct lp_chunk *chunk;
277 int debug = BGP_DEBUG(labelpool, LABELPOOL);
278
279 /*
280 * Find a free label
281 */
282 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
283 uintptr_t lbl;
284 unsigned int index;
285
286 if (debug)
287 zlog_debug("%s: chunk first=%u last=%u",
288 __func__, chunk->first, chunk->last);
289
290 /*
291 * don't look in chunks with no available labels
292 */
293 if (!chunk->nfree)
294 continue;
295
296 /*
297 * roll through bitfield starting where we stopped
298 * last time
299 */
300 index = bf_find_next_clear_bit_wrap(
301 &chunk->allocated_map, chunk->idx_last_allocated + 1,
302 0);
303
304 /*
305 * since chunk->nfree is non-zero, we should always get
306 * a valid index
307 */
308 assert(index != WORD_MAX);
309
310 lbl = chunk->first + index;
311 if (skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
312 /* something is very wrong */
313 zlog_err("%s: unable to insert inuse label %u (id %p)",
314 __func__, (uint32_t)lbl, labelid);
315 return MPLS_LABEL_NONE;
316 }
317
318 /*
319 * Success
320 */
321 bf_set_bit(chunk->allocated_map, index);
322 chunk->idx_last_allocated = index;
323 chunk->nfree -= 1;
324
325 return lbl;
326 }
327
328 return MPLS_LABEL_NONE;
329 }
330
331 /*
332 * Success indicated by value of "label" field in returned LCB
333 */
334 static struct lp_lcb *lcb_alloc(
335 int type,
336 void *labelid,
337 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
338 {
339 /*
340 * Set up label control block
341 */
342 struct lp_lcb *new = XCALLOC(MTYPE_BGP_LABEL_CB,
343 sizeof(struct lp_lcb));
344
345 new->label = get_label_from_pool(labelid);
346 new->type = type;
347 new->labelid = labelid;
348 new->cbfunc = cbfunc;
349
350 return new;
351 }
352
353 /*
354 * Callers who need labels must supply a type, labelid, and callback.
355 * The type is a value defined in bgp_labelpool.h (add types as needed).
356 * The callback is for asynchronous notification of label allocation.
357 * The labelid is passed as an argument to the callback. It should be unique
358 * to the requested label instance.
359 *
360 * If zebra is not connected, callbacks with labels will be delayed
361 * until connection is established. If zebra connection is lost after
362 * labels have been assigned, existing assignments via this labelpool
363 * module will continue until reconnection.
364 *
365 * When connection to zebra is reestablished, previous label assignments
366 * will be invalidated (via callbacks having the "allocated" parameter unset)
367 * and new labels will be automatically reassigned by this labelpool module
368 * (that is, a requestor does not need to call bgp_lp_get() again if it is
369 * notified via callback that its label has been lost: it will eventually
370 * get another callback with a new label assignment).
371 *
372 * The callback function should return 0 to accept the allocation
373 * and non-zero to refuse it. The callback function return value is
374 * ignored for invalidations (i.e., when the "allocated" parameter is false)
375 *
376 * Prior requests for a given labelid are detected so that requests and
377 * assignments are not duplicated.
378 */
379 void bgp_lp_get(
380 int type,
381 void *labelid,
382 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
383 {
384 struct lp_lcb *lcb;
385 int requested = 0;
386 int debug = BGP_DEBUG(labelpool, LABELPOOL);
387
388 if (debug)
389 zlog_debug("%s: labelid=%p", __func__, labelid);
390
391 /*
392 * Have we seen this request before?
393 */
394 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
395 requested = 1;
396 } else {
397 lcb = lcb_alloc(type, labelid, cbfunc);
398 if (debug)
399 zlog_debug("%s: inserting lcb=%p label=%u",
400 __func__, lcb, lcb->label);
401 int rc = skiplist_insert(lp->ledger, labelid, lcb);
402
403 if (rc) {
404 /* shouldn't happen */
405 flog_err(EC_BGP_LABEL,
406 "%s: can't insert new LCB into ledger list",
407 __func__);
408 XFREE(MTYPE_BGP_LABEL_CB, lcb);
409 return;
410 }
411 }
412
413 if (lcb->label != MPLS_LABEL_NONE) {
414 /*
415 * Fast path: we filled the request from local pool (or
416 * this is a duplicate request that we filled already).
417 * Enqueue response work item with new label.
418 */
419 struct lp_cbq_item *q;
420
421 q = XCALLOC(MTYPE_BGP_LABEL_CBQ, sizeof(struct lp_cbq_item));
422
423 q->cbfunc = lcb->cbfunc;
424 q->type = lcb->type;
425 q->label = lcb->label;
426 q->labelid = lcb->labelid;
427 q->allocated = true;
428
429 /* if this is a LU request, lock node before queueing */
430 check_bgp_lu_cb_lock(lcb);
431
432 work_queue_add(lp->callback_q, q);
433
434 return;
435 }
436
437 if (requested)
438 return;
439
440 if (debug)
441 zlog_debug("%s: slow path. lcb=%p label=%u",
442 __func__, lcb, lcb->label);
443
444 /*
445 * Slow path: we are out of labels in the local pool,
446 * so remember the request and also get another chunk from
447 * the label manager.
448 *
449 * We track number of outstanding label requests: don't
450 * need to get a chunk for each one.
451 */
452
453 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
454 sizeof(struct lp_fifo));
455
456 lf->lcb = *lcb;
457 /* if this is a LU request, lock node before queueing */
458 check_bgp_lu_cb_lock(lcb);
459
460 lp_fifo_add_tail(&lp->requests, lf);
461
462 if (lp_fifo_count(&lp->requests) > lp->pending_count) {
463 if (!zclient || zclient->sock < 0)
464 return;
465 if (zclient_send_get_label_chunk(zclient, 0, lp->next_chunksize,
466 MPLS_LABEL_BASE_ANY) !=
467 ZCLIENT_SEND_FAILURE) {
468 lp->pending_count += lp->next_chunksize;
469 if ((lp->next_chunksize << 1) <= LP_CHUNK_SIZE_MAX)
470 lp->next_chunksize <<= 1;
471 }
472 }
473 }
474
475 void bgp_lp_release(
476 int type,
477 void *labelid,
478 mpls_label_t label)
479 {
480 struct lp_lcb *lcb;
481
482 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
483 if (label == lcb->label && type == lcb->type) {
484 struct listnode *node;
485 struct lp_chunk *chunk;
486 uintptr_t lbl = label;
487 bool deallocated = false;
488
489 /* no longer in use */
490 skiplist_delete(lp->inuse, (void *)lbl, NULL);
491
492 /* no longer requested */
493 skiplist_delete(lp->ledger, labelid, NULL);
494
495 /*
496 * Find the chunk this label belongs to and
497 * deallocate the label
498 */
499 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
500 uint32_t index;
501
502 if ((label < chunk->first) ||
503 (label > chunk->last))
504 continue;
505
506 index = label - chunk->first;
507 assert(bf_test_index(chunk->allocated_map,
508 index));
509 bf_release_index(chunk->allocated_map, index);
510 chunk->nfree += 1;
511 deallocated = true;
512 }
513 assert(deallocated);
514 }
515 }
516 }
517
518 /*
519 * zebra response giving us a chunk of labels
520 */
521 void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
522 {
523 struct lp_chunk *chunk;
524 int debug = BGP_DEBUG(labelpool, LABELPOOL);
525 struct lp_fifo *lf;
526 uint32_t labelcount;
527
528 if (last < first) {
529 flog_err(EC_BGP_LABEL,
530 "%s: zebra label chunk invalid: first=%u, last=%u",
531 __func__, first, last);
532 return;
533 }
534
535 chunk = XCALLOC(MTYPE_BGP_LABEL_CHUNK, sizeof(struct lp_chunk));
536
537 labelcount = last - first + 1;
538
539 chunk->first = first;
540 chunk->last = last;
541 chunk->nfree = labelcount;
542 bf_init(chunk->allocated_map, labelcount);
543
544 /*
545 * Optimize for allocation by adding the new (presumably larger)
546 * chunk at the head of the list so it is examined first.
547 */
548 listnode_add_head(lp->chunks, chunk);
549
550 lp->pending_count -= labelcount;
551
552 if (debug) {
553 zlog_debug("%s: %zu pending requests", __func__,
554 lp_fifo_count(&lp->requests));
555 }
556
557 while (labelcount && (lf = lp_fifo_first(&lp->requests))) {
558
559 struct lp_lcb *lcb;
560 void *labelid = lf->lcb.labelid;
561
562 if (skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
563 /* request no longer in effect */
564
565 if (debug) {
566 zlog_debug("%s: labelid %p: request no longer in effect",
567 __func__, labelid);
568 }
569 /* if this was a BGP_LU request, unlock node
570 */
571 check_bgp_lu_cb_unlock(lcb);
572 goto finishedrequest;
573 }
574
575 /* have LCB */
576 if (lcb->label != MPLS_LABEL_NONE) {
577 /* request already has a label */
578 if (debug) {
579 zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
580 __func__, labelid,
581 lcb->label, lcb->label, lcb);
582 }
583 /* if this was a BGP_LU request, unlock node
584 */
585 check_bgp_lu_cb_unlock(lcb);
586
587 goto finishedrequest;
588 }
589
590 lcb->label = get_label_from_pool(lcb->labelid);
591
592 if (lcb->label == MPLS_LABEL_NONE) {
593 /*
594 * Out of labels in local pool, await next chunk
595 */
596 if (debug) {
597 zlog_debug("%s: out of labels, await more",
598 __func__);
599 }
600 break;
601 }
602
603 labelcount -= 1;
604
605 /*
606 * we filled the request from local pool.
607 * Enqueue response work item with new label.
608 */
609 struct lp_cbq_item *q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
610 sizeof(struct lp_cbq_item));
611
612 q->cbfunc = lcb->cbfunc;
613 q->type = lcb->type;
614 q->label = lcb->label;
615 q->labelid = lcb->labelid;
616 q->allocated = true;
617
618 if (debug)
619 zlog_debug("%s: assigning label %u to labelid %p",
620 __func__, q->label, q->labelid);
621
622 work_queue_add(lp->callback_q, q);
623
624 finishedrequest:
625 lp_fifo_del(&lp->requests, lf);
626 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
627 }
628 }
629
630 /*
631 * continue using allocated labels until zebra returns
632 */
633 void bgp_lp_event_zebra_down(void)
634 {
635 /* rats. */
636 }
637
638 /*
639 * Inform owners of previously-allocated labels that their labels
640 * are not valid. Request chunk from zebra large enough to satisfy
641 * previously-allocated labels plus any outstanding requests.
642 */
643 void bgp_lp_event_zebra_up(void)
644 {
645 unsigned int labels_needed;
646 unsigned int chunks_needed;
647 void *labelid;
648 struct lp_lcb *lcb;
649 int lm_init_ok;
650
651 lp->reconnect_count++;
652 /*
653 * Get label chunk allocation request dispatched to zebra
654 */
655 labels_needed = lp_fifo_count(&lp->requests) +
656 skiplist_count(lp->inuse);
657
658 if (labels_needed > lp->next_chunksize) {
659 while ((lp->next_chunksize < labels_needed) &&
660 (lp->next_chunksize << 1 <= LP_CHUNK_SIZE_MAX))
661
662 lp->next_chunksize <<= 1;
663 }
664
665 /* round up */
666 chunks_needed = (labels_needed / lp->next_chunksize) + 1;
667 labels_needed = chunks_needed * lp->next_chunksize;
668
669 lm_init_ok = lm_label_manager_connect(zclient, 1) == 0;
670
671 if (!lm_init_ok) {
672 zlog_err("%s: label manager connection error", __func__);
673 return;
674 }
675
676 zclient_send_get_label_chunk(zclient, 0, labels_needed,
677 MPLS_LABEL_BASE_ANY);
678 lp->pending_count = labels_needed;
679
680 /*
681 * Invalidate current list of chunks
682 */
683 list_delete_all_node(lp->chunks);
684
685 /*
686 * Invalidate any existing labels and requeue them as requests
687 */
688 while (!skiplist_first(lp->inuse, NULL, &labelid)) {
689
690 /*
691 * Get LCB
692 */
693 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
694
695 if (lcb->label != MPLS_LABEL_NONE) {
696 /*
697 * invalidate
698 */
699 struct lp_cbq_item *q;
700
701 q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
702 sizeof(struct lp_cbq_item));
703 q->cbfunc = lcb->cbfunc;
704 q->type = lcb->type;
705 q->label = lcb->label;
706 q->labelid = lcb->labelid;
707 q->allocated = false;
708 check_bgp_lu_cb_lock(lcb);
709 work_queue_add(lp->callback_q, q);
710
711 lcb->label = MPLS_LABEL_NONE;
712 }
713
714 /*
715 * request queue
716 */
717 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
718 sizeof(struct lp_fifo));
719
720 lf->lcb = *lcb;
721 check_bgp_lu_cb_lock(lcb);
722 lp_fifo_add_tail(&lp->requests, lf);
723 }
724
725 skiplist_delete_first(lp->inuse);
726 }
727 }
728
729 DEFUN(show_bgp_labelpool_summary, show_bgp_labelpool_summary_cmd,
730 "show bgp labelpool summary [json]",
731 SHOW_STR BGP_STR
732 "BGP Labelpool information\n"
733 "BGP Labelpool summary\n" JSON_STR)
734 {
735 bool uj = use_json(argc, argv);
736 json_object *json = NULL;
737
738 if (!lp) {
739 if (uj)
740 vty_out(vty, "{}\n");
741 else
742 vty_out(vty, "No existing BGP labelpool\n");
743 return (CMD_WARNING);
744 }
745
746 if (uj) {
747 json = json_object_new_object();
748 #if CONFDATE > 20230131
749 CPP_NOTICE("Remove JSON object commands with keys starting with capital")
750 #endif
751 json_object_int_add(json, "Ledger", skiplist_count(lp->ledger));
752 json_object_int_add(json, "ledger", skiplist_count(lp->ledger));
753 json_object_int_add(json, "InUse", skiplist_count(lp->inuse));
754 json_object_int_add(json, "inUse", skiplist_count(lp->inuse));
755 json_object_int_add(json, "Requests",
756 lp_fifo_count(&lp->requests));
757 json_object_int_add(json, "requests",
758 lp_fifo_count(&lp->requests));
759 json_object_int_add(json, "LabelChunks", listcount(lp->chunks));
760 json_object_int_add(json, "labelChunks", listcount(lp->chunks));
761 json_object_int_add(json, "Pending", lp->pending_count);
762 json_object_int_add(json, "pending", lp->pending_count);
763 json_object_int_add(json, "Reconnects", lp->reconnect_count);
764 json_object_int_add(json, "reconnects", lp->reconnect_count);
765 vty_json(vty, json);
766 } else {
767 vty_out(vty, "Labelpool Summary\n");
768 vty_out(vty, "-----------------\n");
769 vty_out(vty, "%-13s %d\n",
770 "Ledger:", skiplist_count(lp->ledger));
771 vty_out(vty, "%-13s %d\n", "InUse:", skiplist_count(lp->inuse));
772 vty_out(vty, "%-13s %zu\n",
773 "Requests:", lp_fifo_count(&lp->requests));
774 vty_out(vty, "%-13s %d\n",
775 "LabelChunks:", listcount(lp->chunks));
776 vty_out(vty, "%-13s %d\n", "Pending:", lp->pending_count);
777 vty_out(vty, "%-13s %d\n", "Reconnects:", lp->reconnect_count);
778 }
779 return CMD_SUCCESS;
780 }
781
782 DEFUN(show_bgp_labelpool_ledger, show_bgp_labelpool_ledger_cmd,
783 "show bgp labelpool ledger [json]",
784 SHOW_STR BGP_STR
785 "BGP Labelpool information\n"
786 "BGP Labelpool ledger\n" JSON_STR)
787 {
788 bool uj = use_json(argc, argv);
789 json_object *json = NULL, *json_elem = NULL;
790 struct lp_lcb *lcb = NULL;
791 struct bgp_dest *dest;
792 void *cursor = NULL;
793 const struct prefix *p;
794 int rc, count;
795
796 if (!lp) {
797 if (uj)
798 vty_out(vty, "{}\n");
799 else
800 vty_out(vty, "No existing BGP labelpool\n");
801 return (CMD_WARNING);
802 }
803
804 if (uj) {
805 count = skiplist_count(lp->ledger);
806 if (!count) {
807 vty_out(vty, "{}\n");
808 return CMD_SUCCESS;
809 }
810 json = json_object_new_array();
811 } else {
812 vty_out(vty, "Prefix Label\n");
813 vty_out(vty, "---------------------------\n");
814 }
815
816 for (rc = skiplist_next(lp->ledger, (void **)&dest, (void **)&lcb,
817 &cursor);
818 !rc; rc = skiplist_next(lp->ledger, (void **)&dest, (void **)&lcb,
819 &cursor)) {
820 if (uj) {
821 json_elem = json_object_new_object();
822 json_object_array_add(json, json_elem);
823 }
824 switch (lcb->type) {
825 case LP_TYPE_BGP_LU:
826 if (!CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED))
827 if (uj) {
828 json_object_string_add(
829 json_elem, "prefix", "INVALID");
830 json_object_int_add(json_elem, "label",
831 lcb->label);
832 } else
833 vty_out(vty, "%-18s %u\n",
834 "INVALID", lcb->label);
835 else {
836 p = bgp_dest_get_prefix(dest);
837 if (uj) {
838 json_object_string_addf(
839 json_elem, "prefix", "%pFX", p);
840 json_object_int_add(json_elem, "label",
841 lcb->label);
842 } else
843 vty_out(vty, "%-18pFX %u\n", p,
844 lcb->label);
845 }
846 break;
847 case LP_TYPE_VRF:
848 if (uj) {
849 json_object_string_add(json_elem, "prefix",
850 "VRF");
851 json_object_int_add(json_elem, "label",
852 lcb->label);
853 } else
854 vty_out(vty, "%-18s %u\n", "VRF",
855 lcb->label);
856
857 break;
858 }
859 }
860 if (uj)
861 vty_json(vty, json);
862 return CMD_SUCCESS;
863 }
864
865 DEFUN(show_bgp_labelpool_inuse, show_bgp_labelpool_inuse_cmd,
866 "show bgp labelpool inuse [json]",
867 SHOW_STR BGP_STR
868 "BGP Labelpool information\n"
869 "BGP Labelpool inuse\n" JSON_STR)
870 {
871 bool uj = use_json(argc, argv);
872 json_object *json = NULL, *json_elem = NULL;
873 struct bgp_dest *dest;
874 mpls_label_t label;
875 struct lp_lcb *lcb;
876 void *cursor = NULL;
877 const struct prefix *p;
878 int rc, count;
879
880 if (!lp) {
881 vty_out(vty, "No existing BGP labelpool\n");
882 return (CMD_WARNING);
883 }
884 if (!lp) {
885 if (uj)
886 vty_out(vty, "{}\n");
887 else
888 vty_out(vty, "No existing BGP labelpool\n");
889 return (CMD_WARNING);
890 }
891
892 if (uj) {
893 count = skiplist_count(lp->inuse);
894 if (!count) {
895 vty_out(vty, "{}\n");
896 return CMD_SUCCESS;
897 }
898 json = json_object_new_array();
899 } else {
900 vty_out(vty, "Prefix Label\n");
901 vty_out(vty, "---------------------------\n");
902 }
903 for (rc = skiplist_next(lp->inuse, (void **)&label, (void **)&dest,
904 &cursor);
905 !rc; rc = skiplist_next(lp->ledger, (void **)&label,
906 (void **)&dest, &cursor)) {
907 if (skiplist_search(lp->ledger, dest, (void **)&lcb))
908 continue;
909
910 if (uj) {
911 json_elem = json_object_new_object();
912 json_object_array_add(json, json_elem);
913 }
914
915 switch (lcb->type) {
916 case LP_TYPE_BGP_LU:
917 if (!CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED))
918 if (uj) {
919 json_object_string_add(
920 json_elem, "prefix", "INVALID");
921 json_object_int_add(json_elem, "label",
922 label);
923 } else
924 vty_out(vty, "INVALID %u\n",
925 label);
926 else {
927 p = bgp_dest_get_prefix(dest);
928 if (uj) {
929 json_object_string_addf(
930 json_elem, "prefix", "%pFX", p);
931 json_object_int_add(json_elem, "label",
932 label);
933 } else
934 vty_out(vty, "%-18pFX %u\n", p,
935 label);
936 }
937 break;
938 case LP_TYPE_VRF:
939 if (uj) {
940 json_object_string_add(json_elem, "prefix",
941 "VRF");
942 json_object_int_add(json_elem, "label", label);
943 } else
944 vty_out(vty, "%-18s %u\n", "VRF",
945 label);
946 break;
947 }
948 }
949 if (uj)
950 vty_json(vty, json);
951 return CMD_SUCCESS;
952 }
953
954 DEFUN(show_bgp_labelpool_requests, show_bgp_labelpool_requests_cmd,
955 "show bgp labelpool requests [json]",
956 SHOW_STR BGP_STR
957 "BGP Labelpool information\n"
958 "BGP Labelpool requests\n" JSON_STR)
959 {
960 bool uj = use_json(argc, argv);
961 json_object *json = NULL, *json_elem = NULL;
962 struct bgp_dest *dest;
963 const struct prefix *p;
964 struct lp_fifo *item, *next;
965 int count;
966
967 if (!lp) {
968 if (uj)
969 vty_out(vty, "{}\n");
970 else
971 vty_out(vty, "No existing BGP labelpool\n");
972 return (CMD_WARNING);
973 }
974
975 if (uj) {
976 count = lp_fifo_count(&lp->requests);
977 if (!count) {
978 vty_out(vty, "{}\n");
979 return CMD_SUCCESS;
980 }
981 json = json_object_new_array();
982 } else {
983 vty_out(vty, "Prefix \n");
984 vty_out(vty, "----------------\n");
985 }
986
987 for (item = lp_fifo_first(&lp->requests); item; item = next) {
988 next = lp_fifo_next_safe(&lp->requests, item);
989 dest = item->lcb.labelid;
990 if (uj) {
991 json_elem = json_object_new_object();
992 json_object_array_add(json, json_elem);
993 }
994 switch (item->lcb.type) {
995 case LP_TYPE_BGP_LU:
996 if (!CHECK_FLAG(dest->flags,
997 BGP_NODE_LABEL_REQUESTED)) {
998 if (uj)
999 json_object_string_add(
1000 json_elem, "prefix", "INVALID");
1001 else
1002 vty_out(vty, "INVALID\n");
1003 } else {
1004 p = bgp_dest_get_prefix(dest);
1005 if (uj)
1006 json_object_string_addf(
1007 json_elem, "prefix", "%pFX", p);
1008 else
1009 vty_out(vty, "%-18pFX\n", p);
1010 }
1011 break;
1012 case LP_TYPE_VRF:
1013 if (uj)
1014 json_object_string_add(json_elem, "prefix",
1015 "VRF");
1016 else
1017 vty_out(vty, "VRF\n");
1018 break;
1019 }
1020 }
1021 if (uj)
1022 vty_json(vty, json);
1023 return CMD_SUCCESS;
1024 }
1025
1026 DEFUN(show_bgp_labelpool_chunks, show_bgp_labelpool_chunks_cmd,
1027 "show bgp labelpool chunks [json]",
1028 SHOW_STR BGP_STR
1029 "BGP Labelpool information\n"
1030 "BGP Labelpool chunks\n" JSON_STR)
1031 {
1032 bool uj = use_json(argc, argv);
1033 json_object *json = NULL, *json_elem;
1034 struct listnode *node;
1035 struct lp_chunk *chunk;
1036 int count;
1037
1038 if (!lp) {
1039 if (uj)
1040 vty_out(vty, "{}\n");
1041 else
1042 vty_out(vty, "No existing BGP labelpool\n");
1043 return (CMD_WARNING);
1044 }
1045
1046 if (uj) {
1047 count = listcount(lp->chunks);
1048 if (!count) {
1049 vty_out(vty, "{}\n");
1050 return CMD_SUCCESS;
1051 }
1052 json = json_object_new_array();
1053 } else {
1054 vty_out(vty, "%10s %10s %10s %10s\n", "First", "Last", "Size",
1055 "nfree");
1056 vty_out(vty, "-------------------------------------------\n");
1057 }
1058
1059 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
1060 uint32_t size;
1061
1062 size = chunk->last - chunk->first + 1;
1063
1064 if (uj) {
1065 json_elem = json_object_new_object();
1066 json_object_array_add(json, json_elem);
1067 json_object_int_add(json_elem, "first", chunk->first);
1068 json_object_int_add(json_elem, "last", chunk->last);
1069 json_object_int_add(json_elem, "size", size);
1070 json_object_int_add(json_elem, "numberFree",
1071 chunk->nfree);
1072 } else
1073 vty_out(vty, "%10u %10u %10u %10u\n", chunk->first,
1074 chunk->last, size, chunk->nfree);
1075 }
1076 if (uj)
1077 vty_json(vty, json);
1078 return CMD_SUCCESS;
1079 }
1080
1081 #if BGP_LABELPOOL_ENABLE_TESTS
1082 /*------------------------------------------------------------------------
1083 * Testing code start
1084 *------------------------------------------------------------------------*/
1085
1086 DEFINE_MTYPE_STATIC(BGPD, LABELPOOL_TEST, "Label pool test");
1087
1088 #define LPT_STAT_INSERT_FAIL 0
1089 #define LPT_STAT_DELETE_FAIL 1
1090 #define LPT_STAT_ALLOCATED 2
1091 #define LPT_STAT_DEALLOCATED 3
1092 #define LPT_STAT_MAX 4
1093
1094 const char *lpt_counter_names[] = {
1095 "sl insert failures",
1096 "sl delete failures",
1097 "labels allocated",
1098 "labels deallocated",
1099 };
1100
1101 static uint8_t lpt_generation;
1102 static bool lpt_inprogress;
1103 static struct skiplist *lp_tests;
1104 static unsigned int lpt_test_cb_tcb_lookup_fails;
1105 static unsigned int lpt_release_tcb_lookup_fails;
1106 static unsigned int lpt_test_event_tcb_lookup_fails;
1107 static unsigned int lpt_stop_tcb_lookup_fails;
1108
1109 struct lp_test {
1110 uint8_t generation;
1111 unsigned int request_maximum;
1112 unsigned int request_blocksize;
1113 uintptr_t request_count; /* match type of labelid */
1114 int label_type;
1115 struct skiplist *labels;
1116 struct timeval starttime;
1117 struct skiplist *timestamps_alloc;
1118 struct skiplist *timestamps_dealloc;
1119 struct thread *event_thread;
1120 unsigned int counter[LPT_STAT_MAX];
1121 };
1122
1123 /* test parameters */
1124 #define LPT_MAX_COUNT 500000 /* get this many labels in all */
1125 #define LPT_BLKSIZE 10000 /* this many at a time, then yield */
1126 #define LPT_TS_INTERVAL 10000 /* timestamp every this many labels */
1127
1128
1129 static int test_cb(mpls_label_t label, void *labelid, bool allocated)
1130 {
1131 uintptr_t generation;
1132 struct lp_test *tcb;
1133
1134 generation = ((uintptr_t)labelid >> 24) & 0xff;
1135
1136 if (skiplist_search(lp_tests, (void *)generation, (void **)&tcb)) {
1137
1138 /* couldn't find current test in progress */
1139 ++lpt_test_cb_tcb_lookup_fails;
1140 return -1; /* reject allocation */
1141 }
1142
1143 if (allocated) {
1144 ++tcb->counter[LPT_STAT_ALLOCATED];
1145 if (!(tcb->counter[LPT_STAT_ALLOCATED] % LPT_TS_INTERVAL)) {
1146 uintptr_t time_ms;
1147
1148 time_ms = monotime_since(&tcb->starttime, NULL) / 1000;
1149 skiplist_insert(tcb->timestamps_alloc,
1150 (void *)(uintptr_t)tcb
1151 ->counter[LPT_STAT_ALLOCATED],
1152 (void *)time_ms);
1153 }
1154 if (skiplist_insert(tcb->labels, labelid,
1155 (void *)(uintptr_t)label)) {
1156 ++tcb->counter[LPT_STAT_INSERT_FAIL];
1157 return -1;
1158 }
1159 } else {
1160 ++tcb->counter[LPT_STAT_DEALLOCATED];
1161 if (!(tcb->counter[LPT_STAT_DEALLOCATED] % LPT_TS_INTERVAL)) {
1162 uintptr_t time_ms;
1163
1164 time_ms = monotime_since(&tcb->starttime, NULL) / 1000;
1165 skiplist_insert(tcb->timestamps_dealloc,
1166 (void *)(uintptr_t)tcb
1167 ->counter[LPT_STAT_ALLOCATED],
1168 (void *)time_ms);
1169 }
1170 if (skiplist_delete(tcb->labels, labelid, 0)) {
1171 ++tcb->counter[LPT_STAT_DELETE_FAIL];
1172 return -1;
1173 }
1174 }
1175 return 0;
1176 }
1177
1178 static void labelpool_test_event_handler(struct thread *thread)
1179 {
1180 struct lp_test *tcb;
1181
1182 if (skiplist_search(lp_tests, (void *)(uintptr_t)(lpt_generation),
1183 (void **)&tcb)) {
1184
1185 /* couldn't find current test in progress */
1186 ++lpt_test_event_tcb_lookup_fails;
1187 return;
1188 }
1189
1190 /*
1191 * request a bunch of labels
1192 */
1193 for (unsigned int i = 0; (i < tcb->request_blocksize) &&
1194 (tcb->request_count < tcb->request_maximum);
1195 ++i) {
1196
1197 uintptr_t id;
1198
1199 ++tcb->request_count;
1200
1201 /*
1202 * construct 32-bit id from request_count and generation
1203 */
1204 id = ((uintptr_t)tcb->generation << 24) |
1205 (tcb->request_count & 0x00ffffff);
1206 bgp_lp_get(LP_TYPE_VRF, (void *)id, test_cb);
1207 }
1208
1209 if (tcb->request_count < tcb->request_maximum)
1210 thread_add_event(bm->master, labelpool_test_event_handler, NULL,
1211 0, &tcb->event_thread);
1212 }
1213
1214 static void lptest_stop(void)
1215 {
1216 struct lp_test *tcb;
1217
1218 if (!lpt_inprogress)
1219 return;
1220
1221 if (skiplist_search(lp_tests, (void *)(uintptr_t)(lpt_generation),
1222 (void **)&tcb)) {
1223
1224 /* couldn't find current test in progress */
1225 ++lpt_stop_tcb_lookup_fails;
1226 return;
1227 }
1228
1229 if (tcb->event_thread)
1230 thread_cancel(&tcb->event_thread);
1231
1232 lpt_inprogress = false;
1233 }
1234
1235 static int lptest_start(struct vty *vty)
1236 {
1237 struct lp_test *tcb;
1238
1239 if (lpt_inprogress) {
1240 vty_out(vty, "test already in progress\n");
1241 return -1;
1242 }
1243
1244 if (skiplist_count(lp_tests) >=
1245 (1 << (8 * sizeof(lpt_generation))) - 1) {
1246 /*
1247 * Too many test runs
1248 */
1249 vty_out(vty, "too many tests: clear first\n");
1250 return -1;
1251 }
1252
1253 /*
1254 * We pack the generation and request number into the labelid;
1255 * make sure they fit.
1256 */
1257 unsigned int n1 = LPT_MAX_COUNT;
1258 unsigned int sh = 0;
1259 unsigned int label_bits;
1260
1261 label_bits = 8 * (sizeof(tcb->request_count) - sizeof(lpt_generation));
1262
1263 /* n1 should be same type as tcb->request_maximum */
1264 assert(sizeof(n1) == sizeof(tcb->request_maximum));
1265
1266 while (n1 >>= 1)
1267 ++sh;
1268 sh += 1; /* number of bits needed to hold LPT_MAX_COUNT */
1269
1270 if (sh > label_bits) {
1271 vty_out(vty,
1272 "Sorry, test iteration count too big on this platform (LPT_MAX_COUNT %u, need %u bits, but label_bits is only %u)\n",
1273 LPT_MAX_COUNT, sh, label_bits);
1274 return -1;
1275 }
1276
1277 lpt_inprogress = true;
1278 ++lpt_generation;
1279
1280 tcb = XCALLOC(MTYPE_LABELPOOL_TEST, sizeof(*tcb));
1281
1282 tcb->generation = lpt_generation;
1283 tcb->label_type = LP_TYPE_VRF;
1284 tcb->request_maximum = LPT_MAX_COUNT;
1285 tcb->request_blocksize = LPT_BLKSIZE;
1286 tcb->labels = skiplist_new(0, NULL, NULL);
1287 tcb->timestamps_alloc = skiplist_new(0, NULL, NULL);
1288 tcb->timestamps_dealloc = skiplist_new(0, NULL, NULL);
1289 thread_add_event(bm->master, labelpool_test_event_handler, NULL, 0,
1290 &tcb->event_thread);
1291 monotime(&tcb->starttime);
1292
1293 skiplist_insert(lp_tests, (void *)(uintptr_t)tcb->generation, tcb);
1294 return 0;
1295 }
1296
1297 DEFPY(start_labelpool_perf_test, start_labelpool_perf_test_cmd,
1298 "debug bgp lptest start",
1299 DEBUG_STR BGP_STR
1300 "label pool test\n"
1301 "start\n")
1302 {
1303 lptest_start(vty);
1304 return CMD_SUCCESS;
1305 }
1306
1307 static void lptest_print_stats(struct vty *vty, struct lp_test *tcb)
1308 {
1309 unsigned int i;
1310
1311 vty_out(vty, "Global Lookup Failures in test_cb: %5u\n",
1312 lpt_test_cb_tcb_lookup_fails);
1313 vty_out(vty, "Global Lookup Failures in release: %5u\n",
1314 lpt_release_tcb_lookup_fails);
1315 vty_out(vty, "Global Lookup Failures in event: %5u\n",
1316 lpt_test_event_tcb_lookup_fails);
1317 vty_out(vty, "Global Lookup Failures in stop: %5u\n",
1318 lpt_stop_tcb_lookup_fails);
1319 vty_out(vty, "\n");
1320
1321 if (!tcb) {
1322 if (skiplist_search(lp_tests, (void *)(uintptr_t)lpt_generation,
1323 (void **)&tcb)) {
1324 vty_out(vty, "Error: can't find test %u\n",
1325 lpt_generation);
1326 return;
1327 }
1328 }
1329
1330 vty_out(vty, "Test Generation %u:\n", tcb->generation);
1331
1332 vty_out(vty, "Counter Value\n");
1333 for (i = 0; i < LPT_STAT_MAX; ++i) {
1334 vty_out(vty, "%20s: %10u\n", lpt_counter_names[i],
1335 tcb->counter[i]);
1336 }
1337 vty_out(vty, "\n");
1338
1339 if (tcb->timestamps_alloc) {
1340 void *Key;
1341 void *Value;
1342 void *cursor;
1343
1344 float elapsed;
1345
1346 vty_out(vty, "%10s %10s\n", "Count", "Seconds");
1347
1348 cursor = NULL;
1349 while (!skiplist_next(tcb->timestamps_alloc, &Key, &Value,
1350 &cursor)) {
1351
1352 elapsed = ((float)(uintptr_t)Value) / 1000;
1353
1354 vty_out(vty, "%10llu %10.3f\n",
1355 (unsigned long long)(uintptr_t)Key, elapsed);
1356 }
1357 vty_out(vty, "\n");
1358 }
1359 }
1360
1361 DEFPY(show_labelpool_perf_test, show_labelpool_perf_test_cmd,
1362 "debug bgp lptest show",
1363 DEBUG_STR BGP_STR
1364 "label pool test\n"
1365 "show\n")
1366 {
1367
1368 if (lp_tests) {
1369 void *Key;
1370 void *Value;
1371 void *cursor;
1372
1373 cursor = NULL;
1374 while (!skiplist_next(lp_tests, &Key, &Value, &cursor)) {
1375 lptest_print_stats(vty, (struct lp_test *)Value);
1376 }
1377 } else {
1378 vty_out(vty, "no test results\n");
1379 }
1380 return CMD_SUCCESS;
1381 }
1382
1383 DEFPY(stop_labelpool_perf_test, stop_labelpool_perf_test_cmd,
1384 "debug bgp lptest stop",
1385 DEBUG_STR BGP_STR
1386 "label pool test\n"
1387 "stop\n")
1388 {
1389
1390 if (lpt_inprogress) {
1391 lptest_stop();
1392 lptest_print_stats(vty, NULL);
1393 } else {
1394 vty_out(vty, "no test in progress\n");
1395 }
1396 return CMD_SUCCESS;
1397 }
1398
1399 DEFPY(clear_labelpool_perf_test, clear_labelpool_perf_test_cmd,
1400 "debug bgp lptest clear",
1401 DEBUG_STR BGP_STR
1402 "label pool test\n"
1403 "clear\n")
1404 {
1405
1406 if (lpt_inprogress) {
1407 lptest_stop();
1408 }
1409 if (lp_tests) {
1410 while (!skiplist_first(lp_tests, NULL, NULL))
1411 /* del function of skiplist cleans up tcbs */
1412 skiplist_delete_first(lp_tests);
1413 }
1414 return CMD_SUCCESS;
1415 }
1416
1417 /*
1418 * With the "release" command, we can release labels at intervals through
1419 * the ID space. Thus we can to exercise the bitfield-wrapping behavior
1420 * of the allocator in a subsequent test.
1421 */
1422 /* clang-format off */
1423 DEFPY(release_labelpool_perf_test, release_labelpool_perf_test_cmd,
1424 "debug bgp lptest release test GENERATION$generation every (1-5)$every_nth",
1425 DEBUG_STR
1426 BGP_STR
1427 "label pool test\n"
1428 "release labels\n"
1429 "\"test\"\n"
1430 "test number\n"
1431 "\"every\"\n"
1432 "label fraction denominator\n")
1433 {
1434 /* clang-format on */
1435
1436 unsigned long testnum;
1437 char *end;
1438 struct lp_test *tcb;
1439
1440 testnum = strtoul(generation, &end, 0);
1441 if (*end) {
1442 vty_out(vty, "Invalid test number: \"%s\"\n", generation);
1443 return CMD_SUCCESS;
1444 }
1445 if (lpt_inprogress && (testnum == lpt_generation)) {
1446 vty_out(vty,
1447 "Error: Test %lu is still in progress (stop first)\n",
1448 testnum);
1449 return CMD_SUCCESS;
1450 }
1451
1452 if (skiplist_search(lp_tests, (void *)(uintptr_t)testnum,
1453 (void **)&tcb)) {
1454
1455 /* couldn't find current test in progress */
1456 vty_out(vty, "Error: Can't look up test number: \"%lu\"\n",
1457 testnum);
1458 ++lpt_release_tcb_lookup_fails;
1459 return CMD_SUCCESS;
1460 }
1461
1462 void *Key, *cKey;
1463 void *Value, *cValue;
1464 void *cursor;
1465 unsigned int iteration;
1466 int rc;
1467
1468 cursor = NULL;
1469 iteration = 0;
1470 rc = skiplist_next(tcb->labels, &Key, &Value, &cursor);
1471
1472 while (!rc) {
1473 cKey = Key;
1474 cValue = Value;
1475
1476 /* find next item before we delete this one */
1477 rc = skiplist_next(tcb->labels, &Key, &Value, &cursor);
1478
1479 if (!(iteration % every_nth)) {
1480 bgp_lp_release(tcb->label_type, cKey,
1481 (mpls_label_t)(uintptr_t)cValue);
1482 skiplist_delete(tcb->labels, cKey, NULL);
1483 ++tcb->counter[LPT_STAT_DEALLOCATED];
1484 }
1485 ++iteration;
1486 }
1487
1488 return CMD_SUCCESS;
1489 }
1490
1491 static void lptest_delete(void *val)
1492 {
1493 struct lp_test *tcb = (struct lp_test *)val;
1494 void *Key;
1495 void *Value;
1496 void *cursor;
1497
1498 if (tcb->labels) {
1499 cursor = NULL;
1500 while (!skiplist_next(tcb->labels, &Key, &Value, &cursor))
1501 bgp_lp_release(tcb->label_type, Key,
1502 (mpls_label_t)(uintptr_t)Value);
1503 skiplist_free(tcb->labels);
1504 tcb->labels = NULL;
1505 }
1506 if (tcb->timestamps_alloc) {
1507 cursor = NULL;
1508 skiplist_free(tcb->timestamps_alloc);
1509 tcb->timestamps_alloc = NULL;
1510 }
1511
1512 if (tcb->timestamps_dealloc) {
1513 cursor = NULL;
1514 skiplist_free(tcb->timestamps_dealloc);
1515 tcb->timestamps_dealloc = NULL;
1516 }
1517
1518 if (tcb->event_thread)
1519 thread_cancel(&tcb->event_thread);
1520
1521 memset(tcb, 0, sizeof(*tcb));
1522
1523 XFREE(MTYPE_LABELPOOL_TEST, tcb);
1524 }
1525
1526 static void lptest_init(void)
1527 {
1528 lp_tests = skiplist_new(0, NULL, lptest_delete);
1529 }
1530
1531 static void lptest_finish(void)
1532 {
1533 if (lp_tests) {
1534 skiplist_free(lp_tests);
1535 lp_tests = NULL;
1536 }
1537 }
1538
1539 /*------------------------------------------------------------------------
1540 * Testing code end
1541 *------------------------------------------------------------------------*/
1542 #endif /* BGP_LABELPOOL_ENABLE_TESTS */
1543
1544 void bgp_lp_vty_init(void)
1545 {
1546 install_element(VIEW_NODE, &show_bgp_labelpool_summary_cmd);
1547 install_element(VIEW_NODE, &show_bgp_labelpool_ledger_cmd);
1548 install_element(VIEW_NODE, &show_bgp_labelpool_inuse_cmd);
1549 install_element(VIEW_NODE, &show_bgp_labelpool_requests_cmd);
1550 install_element(VIEW_NODE, &show_bgp_labelpool_chunks_cmd);
1551
1552 #if BGP_LABELPOOL_ENABLE_TESTS
1553 install_element(ENABLE_NODE, &start_labelpool_perf_test_cmd);
1554 install_element(ENABLE_NODE, &show_labelpool_perf_test_cmd);
1555 install_element(ENABLE_NODE, &stop_labelpool_perf_test_cmd);
1556 install_element(ENABLE_NODE, &release_labelpool_perf_test_cmd);
1557 install_element(ENABLE_NODE, &clear_labelpool_perf_test_cmd);
1558 #endif /* BGP_LABELPOOL_ENABLE_TESTS */
1559 }