]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_labelpool.c
doc: Add `show ipv6 rpf X:X::X:X` command to docs
[mirror_frr.git] / bgpd / bgp_labelpool.c
1 /*
2 * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
3 *
4 * Copyright (C) 2018 LabN Consulting, L.L.C.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include <zebra.h>
22
23 #include "log.h"
24 #include "memory.h"
25 #include "stream.h"
26 #include "mpls.h"
27 #include "vty.h"
28 #include "linklist.h"
29 #include "skiplist.h"
30 #include "workqueue.h"
31 #include "zclient.h"
32 #include "mpls.h"
33
34 #include "bgpd/bgpd.h"
35 #include "bgpd/bgp_labelpool.h"
36 #include "bgpd/bgp_debug.h"
37 #include "bgpd/bgp_errors.h"
38 #include "bgpd/bgp_route.h"
39
40 #define BGP_LABELPOOL_ENABLE_TESTS 0
41
42 #include "bgpd/bgp_labelpool_clippy.c"
43
44
45 /*
46 * Definitions and external declarations.
47 */
48 extern struct zclient *zclient;
49
50 #if BGP_LABELPOOL_ENABLE_TESTS
51 static void lptest_init(void);
52 static void lptest_finish(void);
53 #endif
54
55 /*
56 * Remember where pool data are kept
57 */
58 static struct labelpool *lp;
59
60 /*
61 * Number of labels requested at a time from the zebra label manager.
62 * We start small but double the request size each time up to a
63 * maximum size.
64 *
65 * The label space is 20 bits which is shared with other FRR processes
66 * on this host, so to avoid greedily requesting a mostly wasted chunk,
67 * we limit the chunk size to 1/16 of the label space (that's the -4 bits
68 * in the definition below). This limit slightly increases our cost of
69 * finding free labels in our allocated chunks.
70 */
71 #define LP_CHUNK_SIZE_MIN 128
72 #define LP_CHUNK_SIZE_MAX (1 << (20 - 4))
73
74 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk");
75 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO item");
76 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CB, "BGP Dynamic Label Assignment");
77 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CBQ, "BGP Dynamic Label Callback");
78
79 struct lp_chunk {
80 uint32_t first;
81 uint32_t last;
82 uint32_t nfree; /* un-allocated count */
83 uint32_t idx_last_allocated; /* start looking here */
84 bitfield_t allocated_map;
85 };
86
87 /*
88 * label control block
89 */
90 struct lp_lcb {
91 mpls_label_t label; /* MPLS_LABEL_NONE = not allocated */
92 int type;
93 void *labelid; /* unique ID */
94 /*
95 * callback for label allocation and loss
96 *
97 * allocated: false = lost
98 */
99 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
100 };
101
102 struct lp_fifo {
103 struct lp_fifo_item fifo;
104 struct lp_lcb lcb;
105 };
106
107 DECLARE_LIST(lp_fifo, struct lp_fifo, fifo);
108
109 struct lp_cbq_item {
110 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
111 int type;
112 mpls_label_t label;
113 void *labelid;
114 bool allocated; /* false = lost */
115 };
116
117 static wq_item_status lp_cbq_docallback(struct work_queue *wq, void *data)
118 {
119 struct lp_cbq_item *lcbq = data;
120 int rc;
121 int debug = BGP_DEBUG(labelpool, LABELPOOL);
122
123 if (debug)
124 zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
125 __func__, lcbq->labelid, lcbq->label, lcbq->allocated);
126
127 if (lcbq->label == MPLS_LABEL_NONE) {
128 /* shouldn't happen */
129 flog_err(EC_BGP_LABEL, "%s: error: label==MPLS_LABEL_NONE",
130 __func__);
131 return WQ_SUCCESS;
132 }
133
134 rc = (*(lcbq->cbfunc))(lcbq->label, lcbq->labelid, lcbq->allocated);
135
136 if (lcbq->allocated && rc) {
137 /*
138 * Callback rejected allocation. This situation could arise
139 * if there was a label request followed by the requestor
140 * deciding it didn't need the assignment (e.g., config
141 * change) while the reply to the original request (with
142 * label) was in the work queue.
143 */
144 if (debug)
145 zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
146 __func__, lcbq->labelid, lcbq->label);
147
148 uintptr_t lbl = lcbq->label;
149 void *labelid;
150 struct lp_lcb *lcb;
151
152 /*
153 * If the rejected label was marked inuse by this labelid,
154 * release the label back to the pool.
155 *
156 * Further, if the rejected label was still assigned to
157 * this labelid in the LCB, delete the LCB.
158 */
159 if (!skiplist_search(lp->inuse, (void *)lbl, &labelid)) {
160 if (labelid == lcbq->labelid) {
161 if (!skiplist_search(lp->ledger, labelid,
162 (void **)&lcb)) {
163 if (lcbq->label == lcb->label)
164 skiplist_delete(lp->ledger,
165 labelid, NULL);
166 }
167 skiplist_delete(lp->inuse, (void *)lbl, NULL);
168 }
169 }
170 }
171
172 return WQ_SUCCESS;
173 }
174
175 static void lp_cbq_item_free(struct work_queue *wq, void *data)
176 {
177 XFREE(MTYPE_BGP_LABEL_CBQ, data);
178 }
179
180 static void lp_lcb_free(void *goner)
181 {
182 XFREE(MTYPE_BGP_LABEL_CB, goner);
183 }
184
185 static void lp_chunk_free(void *goner)
186 {
187 struct lp_chunk *chunk = (struct lp_chunk *)goner;
188
189 bf_free(chunk->allocated_map);
190 XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
191 }
192
193 void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
194 {
195 if (BGP_DEBUG(labelpool, LABELPOOL))
196 zlog_debug("%s: entry", __func__);
197
198 lp = pool; /* Set module pointer to pool data */
199
200 lp->ledger = skiplist_new(0, NULL, lp_lcb_free);
201 lp->inuse = skiplist_new(0, NULL, NULL);
202 lp->chunks = list_new();
203 lp->chunks->del = lp_chunk_free;
204 lp_fifo_init(&lp->requests);
205 lp->callback_q = work_queue_new(master, "label callbacks");
206
207 lp->callback_q->spec.workfunc = lp_cbq_docallback;
208 lp->callback_q->spec.del_item_data = lp_cbq_item_free;
209 lp->callback_q->spec.max_retries = 0;
210
211 lp->next_chunksize = LP_CHUNK_SIZE_MIN;
212
213 #if BGP_LABELPOOL_ENABLE_TESTS
214 lptest_init();
215 #endif
216 }
217
218 /* check if a label callback was for a BGP LU node, and if so, unlock it */
219 static void check_bgp_lu_cb_unlock(struct lp_lcb *lcb)
220 {
221 if (lcb->type == LP_TYPE_BGP_LU)
222 bgp_dest_unlock_node(lcb->labelid);
223 }
224
225 /* check if a label callback was for a BGP LU node, and if so, lock it */
226 static void check_bgp_lu_cb_lock(struct lp_lcb *lcb)
227 {
228 if (lcb->type == LP_TYPE_BGP_LU)
229 bgp_dest_lock_node(lcb->labelid);
230 }
231
232 void bgp_lp_finish(void)
233 {
234 struct lp_fifo *lf;
235 struct work_queue_item *item, *titem;
236
237 #if BGP_LABELPOOL_ENABLE_TESTS
238 lptest_finish();
239 #endif
240 if (!lp)
241 return;
242
243 skiplist_free(lp->ledger);
244 lp->ledger = NULL;
245
246 skiplist_free(lp->inuse);
247 lp->inuse = NULL;
248
249 list_delete(&lp->chunks);
250
251 while ((lf = lp_fifo_pop(&lp->requests))) {
252 check_bgp_lu_cb_unlock(&lf->lcb);
253 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
254 }
255 lp_fifo_fini(&lp->requests);
256
257 /* we must unlock path infos for LU callbacks; but we cannot do that
258 * in the deletion callback of the workqueue, as that is also called
259 * to remove an element from the queue after it has been run, resulting
260 * in a double unlock. Hence we need to iterate over our queues and
261 * lists and manually perform the unlocking (ugh)
262 */
263 STAILQ_FOREACH_SAFE (item, &lp->callback_q->items, wq, titem)
264 check_bgp_lu_cb_unlock(item->data);
265
266 work_queue_free_and_null(&lp->callback_q);
267
268 lp = NULL;
269 }
270
271 static mpls_label_t get_label_from_pool(void *labelid)
272 {
273 struct listnode *node;
274 struct lp_chunk *chunk;
275 int debug = BGP_DEBUG(labelpool, LABELPOOL);
276
277 /*
278 * Find a free label
279 */
280 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
281 uintptr_t lbl;
282 unsigned int index;
283
284 if (debug)
285 zlog_debug("%s: chunk first=%u last=%u",
286 __func__, chunk->first, chunk->last);
287
288 /*
289 * don't look in chunks with no available labels
290 */
291 if (!chunk->nfree)
292 continue;
293
294 /*
295 * roll through bitfield starting where we stopped
296 * last time
297 */
298 index = bf_find_next_clear_bit_wrap(
299 &chunk->allocated_map, chunk->idx_last_allocated + 1,
300 0);
301
302 /*
303 * since chunk->nfree is non-zero, we should always get
304 * a valid index
305 */
306 assert(index != WORD_MAX);
307
308 lbl = chunk->first + index;
309 if (skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
310 /* something is very wrong */
311 zlog_err("%s: unable to insert inuse label %u (id %p)",
312 __func__, (uint32_t)lbl, labelid);
313 return MPLS_LABEL_NONE;
314 }
315
316 /*
317 * Success
318 */
319 bf_set_bit(chunk->allocated_map, index);
320 chunk->idx_last_allocated = index;
321 chunk->nfree -= 1;
322
323 return lbl;
324 }
325
326 return MPLS_LABEL_NONE;
327 }
328
329 /*
330 * Success indicated by value of "label" field in returned LCB
331 */
332 static struct lp_lcb *lcb_alloc(
333 int type,
334 void *labelid,
335 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
336 {
337 /*
338 * Set up label control block
339 */
340 struct lp_lcb *new = XCALLOC(MTYPE_BGP_LABEL_CB,
341 sizeof(struct lp_lcb));
342
343 new->label = get_label_from_pool(labelid);
344 new->type = type;
345 new->labelid = labelid;
346 new->cbfunc = cbfunc;
347
348 return new;
349 }
350
351 /*
352 * Callers who need labels must supply a type, labelid, and callback.
353 * The type is a value defined in bgp_labelpool.h (add types as needed).
354 * The callback is for asynchronous notification of label allocation.
355 * The labelid is passed as an argument to the callback. It should be unique
356 * to the requested label instance.
357 *
358 * If zebra is not connected, callbacks with labels will be delayed
359 * until connection is established. If zebra connection is lost after
360 * labels have been assigned, existing assignments via this labelpool
361 * module will continue until reconnection.
362 *
363 * When connection to zebra is reestablished, previous label assignments
364 * will be invalidated (via callbacks having the "allocated" parameter unset)
365 * and new labels will be automatically reassigned by this labelpool module
366 * (that is, a requestor does not need to call bgp_lp_get() again if it is
367 * notified via callback that its label has been lost: it will eventually
368 * get another callback with a new label assignment).
369 *
370 * The callback function should return 0 to accept the allocation
371 * and non-zero to refuse it. The callback function return value is
372 * ignored for invalidations (i.e., when the "allocated" parameter is false)
373 *
374 * Prior requests for a given labelid are detected so that requests and
375 * assignments are not duplicated.
376 */
377 void bgp_lp_get(
378 int type,
379 void *labelid,
380 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
381 {
382 struct lp_lcb *lcb;
383 int requested = 0;
384 int debug = BGP_DEBUG(labelpool, LABELPOOL);
385
386 if (debug)
387 zlog_debug("%s: labelid=%p", __func__, labelid);
388
389 /*
390 * Have we seen this request before?
391 */
392 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
393 requested = 1;
394 } else {
395 lcb = lcb_alloc(type, labelid, cbfunc);
396 if (debug)
397 zlog_debug("%s: inserting lcb=%p label=%u",
398 __func__, lcb, lcb->label);
399 int rc = skiplist_insert(lp->ledger, labelid, lcb);
400
401 if (rc) {
402 /* shouldn't happen */
403 flog_err(EC_BGP_LABEL,
404 "%s: can't insert new LCB into ledger list",
405 __func__);
406 XFREE(MTYPE_BGP_LABEL_CB, lcb);
407 return;
408 }
409 }
410
411 if (lcb->label != MPLS_LABEL_NONE) {
412 /*
413 * Fast path: we filled the request from local pool (or
414 * this is a duplicate request that we filled already).
415 * Enqueue response work item with new label.
416 */
417 struct lp_cbq_item *q;
418
419 q = XCALLOC(MTYPE_BGP_LABEL_CBQ, sizeof(struct lp_cbq_item));
420
421 q->cbfunc = lcb->cbfunc;
422 q->type = lcb->type;
423 q->label = lcb->label;
424 q->labelid = lcb->labelid;
425 q->allocated = true;
426
427 /* if this is a LU request, lock node before queueing */
428 check_bgp_lu_cb_lock(lcb);
429
430 work_queue_add(lp->callback_q, q);
431
432 return;
433 }
434
435 if (requested)
436 return;
437
438 if (debug)
439 zlog_debug("%s: slow path. lcb=%p label=%u",
440 __func__, lcb, lcb->label);
441
442 /*
443 * Slow path: we are out of labels in the local pool,
444 * so remember the request and also get another chunk from
445 * the label manager.
446 *
447 * We track number of outstanding label requests: don't
448 * need to get a chunk for each one.
449 */
450
451 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
452 sizeof(struct lp_fifo));
453
454 lf->lcb = *lcb;
455 /* if this is a LU request, lock node before queueing */
456 check_bgp_lu_cb_lock(lcb);
457
458 lp_fifo_add_tail(&lp->requests, lf);
459
460 if (lp_fifo_count(&lp->requests) > lp->pending_count) {
461 if (!zclient || zclient->sock < 0)
462 return;
463 if (zclient_send_get_label_chunk(zclient, 0, lp->next_chunksize,
464 MPLS_LABEL_BASE_ANY) !=
465 ZCLIENT_SEND_FAILURE) {
466 lp->pending_count += lp->next_chunksize;
467 if ((lp->next_chunksize << 1) <= LP_CHUNK_SIZE_MAX)
468 lp->next_chunksize <<= 1;
469 }
470 }
471 }
472
473 void bgp_lp_release(
474 int type,
475 void *labelid,
476 mpls_label_t label)
477 {
478 struct lp_lcb *lcb;
479
480 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
481 if (label == lcb->label && type == lcb->type) {
482 struct listnode *node;
483 struct lp_chunk *chunk;
484 uintptr_t lbl = label;
485 bool deallocated = false;
486
487 /* no longer in use */
488 skiplist_delete(lp->inuse, (void *)lbl, NULL);
489
490 /* no longer requested */
491 skiplist_delete(lp->ledger, labelid, NULL);
492
493 /*
494 * Find the chunk this label belongs to and
495 * deallocate the label
496 */
497 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
498 uint32_t index;
499
500 if ((label < chunk->first) ||
501 (label > chunk->last))
502 continue;
503
504 index = label - chunk->first;
505 assert(bf_test_index(chunk->allocated_map,
506 index));
507 bf_release_index(chunk->allocated_map, index);
508 chunk->nfree += 1;
509 deallocated = true;
510 }
511 assert(deallocated);
512 }
513 }
514 }
515
516 /*
517 * zebra response giving us a chunk of labels
518 */
519 void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
520 {
521 struct lp_chunk *chunk;
522 int debug = BGP_DEBUG(labelpool, LABELPOOL);
523 struct lp_fifo *lf;
524 uint32_t labelcount;
525
526 if (last < first) {
527 flog_err(EC_BGP_LABEL,
528 "%s: zebra label chunk invalid: first=%u, last=%u",
529 __func__, first, last);
530 return;
531 }
532
533 chunk = XCALLOC(MTYPE_BGP_LABEL_CHUNK, sizeof(struct lp_chunk));
534
535 labelcount = last - first + 1;
536
537 chunk->first = first;
538 chunk->last = last;
539 chunk->nfree = labelcount;
540 bf_init(chunk->allocated_map, labelcount);
541
542 /*
543 * Optimize for allocation by adding the new (presumably larger)
544 * chunk at the head of the list so it is examined first.
545 */
546 listnode_add_head(lp->chunks, chunk);
547
548 lp->pending_count -= labelcount;
549
550 if (debug) {
551 zlog_debug("%s: %zu pending requests", __func__,
552 lp_fifo_count(&lp->requests));
553 }
554
555 while (labelcount && (lf = lp_fifo_first(&lp->requests))) {
556
557 struct lp_lcb *lcb;
558 void *labelid = lf->lcb.labelid;
559
560 if (skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
561 /* request no longer in effect */
562
563 if (debug) {
564 zlog_debug("%s: labelid %p: request no longer in effect",
565 __func__, labelid);
566 }
567 /* if this was a BGP_LU request, unlock node
568 */
569 check_bgp_lu_cb_unlock(lcb);
570 goto finishedrequest;
571 }
572
573 /* have LCB */
574 if (lcb->label != MPLS_LABEL_NONE) {
575 /* request already has a label */
576 if (debug) {
577 zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
578 __func__, labelid,
579 lcb->label, lcb->label, lcb);
580 }
581 /* if this was a BGP_LU request, unlock node
582 */
583 check_bgp_lu_cb_unlock(lcb);
584
585 goto finishedrequest;
586 }
587
588 lcb->label = get_label_from_pool(lcb->labelid);
589
590 if (lcb->label == MPLS_LABEL_NONE) {
591 /*
592 * Out of labels in local pool, await next chunk
593 */
594 if (debug) {
595 zlog_debug("%s: out of labels, await more",
596 __func__);
597 }
598 break;
599 }
600
601 labelcount -= 1;
602
603 /*
604 * we filled the request from local pool.
605 * Enqueue response work item with new label.
606 */
607 struct lp_cbq_item *q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
608 sizeof(struct lp_cbq_item));
609
610 q->cbfunc = lcb->cbfunc;
611 q->type = lcb->type;
612 q->label = lcb->label;
613 q->labelid = lcb->labelid;
614 q->allocated = true;
615
616 if (debug)
617 zlog_debug("%s: assigning label %u to labelid %p",
618 __func__, q->label, q->labelid);
619
620 work_queue_add(lp->callback_q, q);
621
622 finishedrequest:
623 lp_fifo_del(&lp->requests, lf);
624 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
625 }
626 }
627
628 /*
629 * continue using allocated labels until zebra returns
630 */
631 void bgp_lp_event_zebra_down(void)
632 {
633 /* rats. */
634 }
635
636 /*
637 * Inform owners of previously-allocated labels that their labels
638 * are not valid. Request chunk from zebra large enough to satisfy
639 * previously-allocated labels plus any outstanding requests.
640 */
641 void bgp_lp_event_zebra_up(void)
642 {
643 unsigned int labels_needed;
644 unsigned int chunks_needed;
645 void *labelid;
646 struct lp_lcb *lcb;
647 int lm_init_ok;
648
649 lp->reconnect_count++;
650 /*
651 * Get label chunk allocation request dispatched to zebra
652 */
653 labels_needed = lp_fifo_count(&lp->requests) +
654 skiplist_count(lp->inuse);
655
656 if (labels_needed > lp->next_chunksize) {
657 while ((lp->next_chunksize < labels_needed) &&
658 (lp->next_chunksize << 1 <= LP_CHUNK_SIZE_MAX))
659
660 lp->next_chunksize <<= 1;
661 }
662
663 /* round up */
664 chunks_needed = (labels_needed / lp->next_chunksize) + 1;
665 labels_needed = chunks_needed * lp->next_chunksize;
666
667 lm_init_ok = lm_label_manager_connect(zclient, 1) == 0;
668
669 if (!lm_init_ok) {
670 zlog_err("%s: label manager connection error", __func__);
671 return;
672 }
673
674 zclient_send_get_label_chunk(zclient, 0, labels_needed,
675 MPLS_LABEL_BASE_ANY);
676 lp->pending_count = labels_needed;
677
678 /*
679 * Invalidate current list of chunks
680 */
681 list_delete_all_node(lp->chunks);
682
683 /*
684 * Invalidate any existing labels and requeue them as requests
685 */
686 while (!skiplist_first(lp->inuse, NULL, &labelid)) {
687
688 /*
689 * Get LCB
690 */
691 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
692
693 if (lcb->label != MPLS_LABEL_NONE) {
694 /*
695 * invalidate
696 */
697 struct lp_cbq_item *q;
698
699 q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
700 sizeof(struct lp_cbq_item));
701 q->cbfunc = lcb->cbfunc;
702 q->type = lcb->type;
703 q->label = lcb->label;
704 q->labelid = lcb->labelid;
705 q->allocated = false;
706 check_bgp_lu_cb_lock(lcb);
707 work_queue_add(lp->callback_q, q);
708
709 lcb->label = MPLS_LABEL_NONE;
710 }
711
712 /*
713 * request queue
714 */
715 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
716 sizeof(struct lp_fifo));
717
718 lf->lcb = *lcb;
719 check_bgp_lu_cb_lock(lcb);
720 lp_fifo_add_tail(&lp->requests, lf);
721 }
722
723 skiplist_delete_first(lp->inuse);
724 }
725 }
726
727 DEFUN(show_bgp_labelpool_summary, show_bgp_labelpool_summary_cmd,
728 "show bgp labelpool summary [json]",
729 SHOW_STR BGP_STR
730 "BGP Labelpool information\n"
731 "BGP Labelpool summary\n" JSON_STR)
732 {
733 bool uj = use_json(argc, argv);
734 json_object *json = NULL;
735
736 if (!lp) {
737 if (uj)
738 vty_out(vty, "{}\n");
739 else
740 vty_out(vty, "No existing BGP labelpool\n");
741 return (CMD_WARNING);
742 }
743
744 if (uj) {
745 json = json_object_new_object();
746 json_object_int_add(json, "ledger", skiplist_count(lp->ledger));
747 json_object_int_add(json, "inUse", skiplist_count(lp->inuse));
748 json_object_int_add(json, "requests",
749 lp_fifo_count(&lp->requests));
750 json_object_int_add(json, "labelChunks", listcount(lp->chunks));
751 json_object_int_add(json, "pending", lp->pending_count);
752 json_object_int_add(json, "reconnects", lp->reconnect_count);
753 vty_json(vty, json);
754 } else {
755 vty_out(vty, "Labelpool Summary\n");
756 vty_out(vty, "-----------------\n");
757 vty_out(vty, "%-13s %d\n",
758 "Ledger:", skiplist_count(lp->ledger));
759 vty_out(vty, "%-13s %d\n", "InUse:", skiplist_count(lp->inuse));
760 vty_out(vty, "%-13s %zu\n",
761 "Requests:", lp_fifo_count(&lp->requests));
762 vty_out(vty, "%-13s %d\n",
763 "LabelChunks:", listcount(lp->chunks));
764 vty_out(vty, "%-13s %d\n", "Pending:", lp->pending_count);
765 vty_out(vty, "%-13s %d\n", "Reconnects:", lp->reconnect_count);
766 }
767 return CMD_SUCCESS;
768 }
769
770 DEFUN(show_bgp_labelpool_ledger, show_bgp_labelpool_ledger_cmd,
771 "show bgp labelpool ledger [json]",
772 SHOW_STR BGP_STR
773 "BGP Labelpool information\n"
774 "BGP Labelpool ledger\n" JSON_STR)
775 {
776 bool uj = use_json(argc, argv);
777 json_object *json = NULL, *json_elem = NULL;
778 struct lp_lcb *lcb = NULL;
779 struct bgp_dest *dest;
780 void *cursor = NULL;
781 const struct prefix *p;
782 int rc, count;
783
784 if (!lp) {
785 if (uj)
786 vty_out(vty, "{}\n");
787 else
788 vty_out(vty, "No existing BGP labelpool\n");
789 return (CMD_WARNING);
790 }
791
792 if (uj) {
793 count = skiplist_count(lp->ledger);
794 if (!count) {
795 vty_out(vty, "{}\n");
796 return CMD_SUCCESS;
797 }
798 json = json_object_new_array();
799 } else {
800 vty_out(vty, "Prefix Label\n");
801 vty_out(vty, "---------------------------\n");
802 }
803
804 for (rc = skiplist_next(lp->ledger, (void **)&dest, (void **)&lcb,
805 &cursor);
806 !rc; rc = skiplist_next(lp->ledger, (void **)&dest, (void **)&lcb,
807 &cursor)) {
808 if (uj) {
809 json_elem = json_object_new_object();
810 json_object_array_add(json, json_elem);
811 }
812 switch (lcb->type) {
813 case LP_TYPE_BGP_LU:
814 if (!CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED))
815 if (uj) {
816 json_object_string_add(
817 json_elem, "prefix", "INVALID");
818 json_object_int_add(json_elem, "label",
819 lcb->label);
820 } else
821 vty_out(vty, "%-18s %u\n",
822 "INVALID", lcb->label);
823 else {
824 p = bgp_dest_get_prefix(dest);
825 if (uj) {
826 json_object_string_addf(
827 json_elem, "prefix", "%pFX", p);
828 json_object_int_add(json_elem, "label",
829 lcb->label);
830 } else
831 vty_out(vty, "%-18pFX %u\n", p,
832 lcb->label);
833 }
834 break;
835 case LP_TYPE_VRF:
836 if (uj) {
837 json_object_string_add(json_elem, "prefix",
838 "VRF");
839 json_object_int_add(json_elem, "label",
840 lcb->label);
841 } else
842 vty_out(vty, "%-18s %u\n", "VRF",
843 lcb->label);
844
845 break;
846 }
847 }
848 if (uj)
849 vty_json(vty, json);
850 return CMD_SUCCESS;
851 }
852
853 DEFUN(show_bgp_labelpool_inuse, show_bgp_labelpool_inuse_cmd,
854 "show bgp labelpool inuse [json]",
855 SHOW_STR BGP_STR
856 "BGP Labelpool information\n"
857 "BGP Labelpool inuse\n" JSON_STR)
858 {
859 bool uj = use_json(argc, argv);
860 json_object *json = NULL, *json_elem = NULL;
861 struct bgp_dest *dest;
862 mpls_label_t label;
863 struct lp_lcb *lcb;
864 void *cursor = NULL;
865 const struct prefix *p;
866 int rc, count;
867
868 if (!lp) {
869 vty_out(vty, "No existing BGP labelpool\n");
870 return (CMD_WARNING);
871 }
872 if (!lp) {
873 if (uj)
874 vty_out(vty, "{}\n");
875 else
876 vty_out(vty, "No existing BGP labelpool\n");
877 return (CMD_WARNING);
878 }
879
880 if (uj) {
881 count = skiplist_count(lp->inuse);
882 if (!count) {
883 vty_out(vty, "{}\n");
884 return CMD_SUCCESS;
885 }
886 json = json_object_new_array();
887 } else {
888 vty_out(vty, "Prefix Label\n");
889 vty_out(vty, "---------------------------\n");
890 }
891 for (rc = skiplist_next(lp->inuse, (void **)&label, (void **)&dest,
892 &cursor);
893 !rc; rc = skiplist_next(lp->ledger, (void **)&label,
894 (void **)&dest, &cursor)) {
895 if (skiplist_search(lp->ledger, dest, (void **)&lcb))
896 continue;
897
898 if (uj) {
899 json_elem = json_object_new_object();
900 json_object_array_add(json, json_elem);
901 }
902
903 switch (lcb->type) {
904 case LP_TYPE_BGP_LU:
905 if (!CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED))
906 if (uj) {
907 json_object_string_add(
908 json_elem, "prefix", "INVALID");
909 json_object_int_add(json_elem, "label",
910 label);
911 } else
912 vty_out(vty, "INVALID %u\n",
913 label);
914 else {
915 p = bgp_dest_get_prefix(dest);
916 if (uj) {
917 json_object_string_addf(
918 json_elem, "prefix", "%pFX", p);
919 json_object_int_add(json_elem, "label",
920 label);
921 } else
922 vty_out(vty, "%-18pFX %u\n", p,
923 label);
924 }
925 break;
926 case LP_TYPE_VRF:
927 if (uj) {
928 json_object_string_add(json_elem, "prefix",
929 "VRF");
930 json_object_int_add(json_elem, "label", label);
931 } else
932 vty_out(vty, "%-18s %u\n", "VRF",
933 label);
934 break;
935 }
936 }
937 if (uj)
938 vty_json(vty, json);
939 return CMD_SUCCESS;
940 }
941
942 DEFUN(show_bgp_labelpool_requests, show_bgp_labelpool_requests_cmd,
943 "show bgp labelpool requests [json]",
944 SHOW_STR BGP_STR
945 "BGP Labelpool information\n"
946 "BGP Labelpool requests\n" JSON_STR)
947 {
948 bool uj = use_json(argc, argv);
949 json_object *json = NULL, *json_elem = NULL;
950 struct bgp_dest *dest;
951 const struct prefix *p;
952 struct lp_fifo *item, *next;
953 int count;
954
955 if (!lp) {
956 if (uj)
957 vty_out(vty, "{}\n");
958 else
959 vty_out(vty, "No existing BGP labelpool\n");
960 return (CMD_WARNING);
961 }
962
963 if (uj) {
964 count = lp_fifo_count(&lp->requests);
965 if (!count) {
966 vty_out(vty, "{}\n");
967 return CMD_SUCCESS;
968 }
969 json = json_object_new_array();
970 } else {
971 vty_out(vty, "Prefix \n");
972 vty_out(vty, "----------------\n");
973 }
974
975 for (item = lp_fifo_first(&lp->requests); item; item = next) {
976 next = lp_fifo_next_safe(&lp->requests, item);
977 dest = item->lcb.labelid;
978 if (uj) {
979 json_elem = json_object_new_object();
980 json_object_array_add(json, json_elem);
981 }
982 switch (item->lcb.type) {
983 case LP_TYPE_BGP_LU:
984 if (!CHECK_FLAG(dest->flags,
985 BGP_NODE_LABEL_REQUESTED)) {
986 if (uj)
987 json_object_string_add(
988 json_elem, "prefix", "INVALID");
989 else
990 vty_out(vty, "INVALID\n");
991 } else {
992 p = bgp_dest_get_prefix(dest);
993 if (uj)
994 json_object_string_addf(
995 json_elem, "prefix", "%pFX", p);
996 else
997 vty_out(vty, "%-18pFX\n", p);
998 }
999 break;
1000 case LP_TYPE_VRF:
1001 if (uj)
1002 json_object_string_add(json_elem, "prefix",
1003 "VRF");
1004 else
1005 vty_out(vty, "VRF\n");
1006 break;
1007 }
1008 }
1009 if (uj)
1010 vty_json(vty, json);
1011 return CMD_SUCCESS;
1012 }
1013
1014 DEFUN(show_bgp_labelpool_chunks, show_bgp_labelpool_chunks_cmd,
1015 "show bgp labelpool chunks [json]",
1016 SHOW_STR BGP_STR
1017 "BGP Labelpool information\n"
1018 "BGP Labelpool chunks\n" JSON_STR)
1019 {
1020 bool uj = use_json(argc, argv);
1021 json_object *json = NULL, *json_elem;
1022 struct listnode *node;
1023 struct lp_chunk *chunk;
1024 int count;
1025
1026 if (!lp) {
1027 if (uj)
1028 vty_out(vty, "{}\n");
1029 else
1030 vty_out(vty, "No existing BGP labelpool\n");
1031 return (CMD_WARNING);
1032 }
1033
1034 if (uj) {
1035 count = listcount(lp->chunks);
1036 if (!count) {
1037 vty_out(vty, "{}\n");
1038 return CMD_SUCCESS;
1039 }
1040 json = json_object_new_array();
1041 } else {
1042 vty_out(vty, "%10s %10s %10s %10s\n", "First", "Last", "Size",
1043 "nfree");
1044 vty_out(vty, "-------------------------------------------\n");
1045 }
1046
1047 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
1048 uint32_t size;
1049
1050 size = chunk->last - chunk->first + 1;
1051
1052 if (uj) {
1053 json_elem = json_object_new_object();
1054 json_object_array_add(json, json_elem);
1055 json_object_int_add(json_elem, "first", chunk->first);
1056 json_object_int_add(json_elem, "last", chunk->last);
1057 json_object_int_add(json_elem, "size", size);
1058 json_object_int_add(json_elem, "numberFree",
1059 chunk->nfree);
1060 } else
1061 vty_out(vty, "%10u %10u %10u %10u\n", chunk->first,
1062 chunk->last, size, chunk->nfree);
1063 }
1064 if (uj)
1065 vty_json(vty, json);
1066 return CMD_SUCCESS;
1067 }
1068
1069 #if BGP_LABELPOOL_ENABLE_TESTS
1070 /*------------------------------------------------------------------------
1071 * Testing code start
1072 *------------------------------------------------------------------------*/
1073
1074 DEFINE_MTYPE_STATIC(BGPD, LABELPOOL_TEST, "Label pool test");
1075
1076 #define LPT_STAT_INSERT_FAIL 0
1077 #define LPT_STAT_DELETE_FAIL 1
1078 #define LPT_STAT_ALLOCATED 2
1079 #define LPT_STAT_DEALLOCATED 3
1080 #define LPT_STAT_MAX 4
1081
1082 const char *lpt_counter_names[] = {
1083 "sl insert failures",
1084 "sl delete failures",
1085 "labels allocated",
1086 "labels deallocated",
1087 };
1088
1089 static uint8_t lpt_generation;
1090 static bool lpt_inprogress;
1091 static struct skiplist *lp_tests;
1092 static unsigned int lpt_test_cb_tcb_lookup_fails;
1093 static unsigned int lpt_release_tcb_lookup_fails;
1094 static unsigned int lpt_test_event_tcb_lookup_fails;
1095 static unsigned int lpt_stop_tcb_lookup_fails;
1096
1097 struct lp_test {
1098 uint8_t generation;
1099 unsigned int request_maximum;
1100 unsigned int request_blocksize;
1101 uintptr_t request_count; /* match type of labelid */
1102 int label_type;
1103 struct skiplist *labels;
1104 struct timeval starttime;
1105 struct skiplist *timestamps_alloc;
1106 struct skiplist *timestamps_dealloc;
1107 struct thread *event_thread;
1108 unsigned int counter[LPT_STAT_MAX];
1109 };
1110
1111 /* test parameters */
1112 #define LPT_MAX_COUNT 500000 /* get this many labels in all */
1113 #define LPT_BLKSIZE 10000 /* this many at a time, then yield */
1114 #define LPT_TS_INTERVAL 10000 /* timestamp every this many labels */
1115
1116
1117 static int test_cb(mpls_label_t label, void *labelid, bool allocated)
1118 {
1119 uintptr_t generation;
1120 struct lp_test *tcb;
1121
1122 generation = ((uintptr_t)labelid >> 24) & 0xff;
1123
1124 if (skiplist_search(lp_tests, (void *)generation, (void **)&tcb)) {
1125
1126 /* couldn't find current test in progress */
1127 ++lpt_test_cb_tcb_lookup_fails;
1128 return -1; /* reject allocation */
1129 }
1130
1131 if (allocated) {
1132 ++tcb->counter[LPT_STAT_ALLOCATED];
1133 if (!(tcb->counter[LPT_STAT_ALLOCATED] % LPT_TS_INTERVAL)) {
1134 uintptr_t time_ms;
1135
1136 time_ms = monotime_since(&tcb->starttime, NULL) / 1000;
1137 skiplist_insert(tcb->timestamps_alloc,
1138 (void *)(uintptr_t)tcb
1139 ->counter[LPT_STAT_ALLOCATED],
1140 (void *)time_ms);
1141 }
1142 if (skiplist_insert(tcb->labels, labelid,
1143 (void *)(uintptr_t)label)) {
1144 ++tcb->counter[LPT_STAT_INSERT_FAIL];
1145 return -1;
1146 }
1147 } else {
1148 ++tcb->counter[LPT_STAT_DEALLOCATED];
1149 if (!(tcb->counter[LPT_STAT_DEALLOCATED] % LPT_TS_INTERVAL)) {
1150 uintptr_t time_ms;
1151
1152 time_ms = monotime_since(&tcb->starttime, NULL) / 1000;
1153 skiplist_insert(tcb->timestamps_dealloc,
1154 (void *)(uintptr_t)tcb
1155 ->counter[LPT_STAT_ALLOCATED],
1156 (void *)time_ms);
1157 }
1158 if (skiplist_delete(tcb->labels, labelid, 0)) {
1159 ++tcb->counter[LPT_STAT_DELETE_FAIL];
1160 return -1;
1161 }
1162 }
1163 return 0;
1164 }
1165
1166 static void labelpool_test_event_handler(struct thread *thread)
1167 {
1168 struct lp_test *tcb;
1169
1170 if (skiplist_search(lp_tests, (void *)(uintptr_t)(lpt_generation),
1171 (void **)&tcb)) {
1172
1173 /* couldn't find current test in progress */
1174 ++lpt_test_event_tcb_lookup_fails;
1175 return;
1176 }
1177
1178 /*
1179 * request a bunch of labels
1180 */
1181 for (unsigned int i = 0; (i < tcb->request_blocksize) &&
1182 (tcb->request_count < tcb->request_maximum);
1183 ++i) {
1184
1185 uintptr_t id;
1186
1187 ++tcb->request_count;
1188
1189 /*
1190 * construct 32-bit id from request_count and generation
1191 */
1192 id = ((uintptr_t)tcb->generation << 24) |
1193 (tcb->request_count & 0x00ffffff);
1194 bgp_lp_get(LP_TYPE_VRF, (void *)id, test_cb);
1195 }
1196
1197 if (tcb->request_count < tcb->request_maximum)
1198 thread_add_event(bm->master, labelpool_test_event_handler, NULL,
1199 0, &tcb->event_thread);
1200 }
1201
1202 static void lptest_stop(void)
1203 {
1204 struct lp_test *tcb;
1205
1206 if (!lpt_inprogress)
1207 return;
1208
1209 if (skiplist_search(lp_tests, (void *)(uintptr_t)(lpt_generation),
1210 (void **)&tcb)) {
1211
1212 /* couldn't find current test in progress */
1213 ++lpt_stop_tcb_lookup_fails;
1214 return;
1215 }
1216
1217 if (tcb->event_thread)
1218 thread_cancel(&tcb->event_thread);
1219
1220 lpt_inprogress = false;
1221 }
1222
1223 static int lptest_start(struct vty *vty)
1224 {
1225 struct lp_test *tcb;
1226
1227 if (lpt_inprogress) {
1228 vty_out(vty, "test already in progress\n");
1229 return -1;
1230 }
1231
1232 if (skiplist_count(lp_tests) >=
1233 (1 << (8 * sizeof(lpt_generation))) - 1) {
1234 /*
1235 * Too many test runs
1236 */
1237 vty_out(vty, "too many tests: clear first\n");
1238 return -1;
1239 }
1240
1241 /*
1242 * We pack the generation and request number into the labelid;
1243 * make sure they fit.
1244 */
1245 unsigned int n1 = LPT_MAX_COUNT;
1246 unsigned int sh = 0;
1247 unsigned int label_bits;
1248
1249 label_bits = 8 * (sizeof(tcb->request_count) - sizeof(lpt_generation));
1250
1251 /* n1 should be same type as tcb->request_maximum */
1252 assert(sizeof(n1) == sizeof(tcb->request_maximum));
1253
1254 while (n1 >>= 1)
1255 ++sh;
1256 sh += 1; /* number of bits needed to hold LPT_MAX_COUNT */
1257
1258 if (sh > label_bits) {
1259 vty_out(vty,
1260 "Sorry, test iteration count too big on this platform (LPT_MAX_COUNT %u, need %u bits, but label_bits is only %u)\n",
1261 LPT_MAX_COUNT, sh, label_bits);
1262 return -1;
1263 }
1264
1265 lpt_inprogress = true;
1266 ++lpt_generation;
1267
1268 tcb = XCALLOC(MTYPE_LABELPOOL_TEST, sizeof(*tcb));
1269
1270 tcb->generation = lpt_generation;
1271 tcb->label_type = LP_TYPE_VRF;
1272 tcb->request_maximum = LPT_MAX_COUNT;
1273 tcb->request_blocksize = LPT_BLKSIZE;
1274 tcb->labels = skiplist_new(0, NULL, NULL);
1275 tcb->timestamps_alloc = skiplist_new(0, NULL, NULL);
1276 tcb->timestamps_dealloc = skiplist_new(0, NULL, NULL);
1277 thread_add_event(bm->master, labelpool_test_event_handler, NULL, 0,
1278 &tcb->event_thread);
1279 monotime(&tcb->starttime);
1280
1281 skiplist_insert(lp_tests, (void *)(uintptr_t)tcb->generation, tcb);
1282 return 0;
1283 }
1284
1285 DEFPY(start_labelpool_perf_test, start_labelpool_perf_test_cmd,
1286 "debug bgp lptest start",
1287 DEBUG_STR BGP_STR
1288 "label pool test\n"
1289 "start\n")
1290 {
1291 lptest_start(vty);
1292 return CMD_SUCCESS;
1293 }
1294
1295 static void lptest_print_stats(struct vty *vty, struct lp_test *tcb)
1296 {
1297 unsigned int i;
1298
1299 vty_out(vty, "Global Lookup Failures in test_cb: %5u\n",
1300 lpt_test_cb_tcb_lookup_fails);
1301 vty_out(vty, "Global Lookup Failures in release: %5u\n",
1302 lpt_release_tcb_lookup_fails);
1303 vty_out(vty, "Global Lookup Failures in event: %5u\n",
1304 lpt_test_event_tcb_lookup_fails);
1305 vty_out(vty, "Global Lookup Failures in stop: %5u\n",
1306 lpt_stop_tcb_lookup_fails);
1307 vty_out(vty, "\n");
1308
1309 if (!tcb) {
1310 if (skiplist_search(lp_tests, (void *)(uintptr_t)lpt_generation,
1311 (void **)&tcb)) {
1312 vty_out(vty, "Error: can't find test %u\n",
1313 lpt_generation);
1314 return;
1315 }
1316 }
1317
1318 vty_out(vty, "Test Generation %u:\n", tcb->generation);
1319
1320 vty_out(vty, "Counter Value\n");
1321 for (i = 0; i < LPT_STAT_MAX; ++i) {
1322 vty_out(vty, "%20s: %10u\n", lpt_counter_names[i],
1323 tcb->counter[i]);
1324 }
1325 vty_out(vty, "\n");
1326
1327 if (tcb->timestamps_alloc) {
1328 void *Key;
1329 void *Value;
1330 void *cursor;
1331
1332 float elapsed;
1333
1334 vty_out(vty, "%10s %10s\n", "Count", "Seconds");
1335
1336 cursor = NULL;
1337 while (!skiplist_next(tcb->timestamps_alloc, &Key, &Value,
1338 &cursor)) {
1339
1340 elapsed = ((float)(uintptr_t)Value) / 1000;
1341
1342 vty_out(vty, "%10llu %10.3f\n",
1343 (unsigned long long)(uintptr_t)Key, elapsed);
1344 }
1345 vty_out(vty, "\n");
1346 }
1347 }
1348
1349 DEFPY(show_labelpool_perf_test, show_labelpool_perf_test_cmd,
1350 "debug bgp lptest show",
1351 DEBUG_STR BGP_STR
1352 "label pool test\n"
1353 "show\n")
1354 {
1355
1356 if (lp_tests) {
1357 void *Key;
1358 void *Value;
1359 void *cursor;
1360
1361 cursor = NULL;
1362 while (!skiplist_next(lp_tests, &Key, &Value, &cursor)) {
1363 lptest_print_stats(vty, (struct lp_test *)Value);
1364 }
1365 } else {
1366 vty_out(vty, "no test results\n");
1367 }
1368 return CMD_SUCCESS;
1369 }
1370
1371 DEFPY(stop_labelpool_perf_test, stop_labelpool_perf_test_cmd,
1372 "debug bgp lptest stop",
1373 DEBUG_STR BGP_STR
1374 "label pool test\n"
1375 "stop\n")
1376 {
1377
1378 if (lpt_inprogress) {
1379 lptest_stop();
1380 lptest_print_stats(vty, NULL);
1381 } else {
1382 vty_out(vty, "no test in progress\n");
1383 }
1384 return CMD_SUCCESS;
1385 }
1386
1387 DEFPY(clear_labelpool_perf_test, clear_labelpool_perf_test_cmd,
1388 "debug bgp lptest clear",
1389 DEBUG_STR BGP_STR
1390 "label pool test\n"
1391 "clear\n")
1392 {
1393
1394 if (lpt_inprogress) {
1395 lptest_stop();
1396 }
1397 if (lp_tests) {
1398 while (!skiplist_first(lp_tests, NULL, NULL))
1399 /* del function of skiplist cleans up tcbs */
1400 skiplist_delete_first(lp_tests);
1401 }
1402 return CMD_SUCCESS;
1403 }
1404
1405 /*
1406 * With the "release" command, we can release labels at intervals through
1407 * the ID space. Thus we can to exercise the bitfield-wrapping behavior
1408 * of the allocator in a subsequent test.
1409 */
1410 /* clang-format off */
1411 DEFPY(release_labelpool_perf_test, release_labelpool_perf_test_cmd,
1412 "debug bgp lptest release test GENERATION$generation every (1-5)$every_nth",
1413 DEBUG_STR
1414 BGP_STR
1415 "label pool test\n"
1416 "release labels\n"
1417 "\"test\"\n"
1418 "test number\n"
1419 "\"every\"\n"
1420 "label fraction denominator\n")
1421 {
1422 /* clang-format on */
1423
1424 unsigned long testnum;
1425 char *end;
1426 struct lp_test *tcb;
1427
1428 testnum = strtoul(generation, &end, 0);
1429 if (*end) {
1430 vty_out(vty, "Invalid test number: \"%s\"\n", generation);
1431 return CMD_SUCCESS;
1432 }
1433 if (lpt_inprogress && (testnum == lpt_generation)) {
1434 vty_out(vty,
1435 "Error: Test %lu is still in progress (stop first)\n",
1436 testnum);
1437 return CMD_SUCCESS;
1438 }
1439
1440 if (skiplist_search(lp_tests, (void *)(uintptr_t)testnum,
1441 (void **)&tcb)) {
1442
1443 /* couldn't find current test in progress */
1444 vty_out(vty, "Error: Can't look up test number: \"%lu\"\n",
1445 testnum);
1446 ++lpt_release_tcb_lookup_fails;
1447 return CMD_SUCCESS;
1448 }
1449
1450 void *Key, *cKey;
1451 void *Value, *cValue;
1452 void *cursor;
1453 unsigned int iteration;
1454 int rc;
1455
1456 cursor = NULL;
1457 iteration = 0;
1458 rc = skiplist_next(tcb->labels, &Key, &Value, &cursor);
1459
1460 while (!rc) {
1461 cKey = Key;
1462 cValue = Value;
1463
1464 /* find next item before we delete this one */
1465 rc = skiplist_next(tcb->labels, &Key, &Value, &cursor);
1466
1467 if (!(iteration % every_nth)) {
1468 bgp_lp_release(tcb->label_type, cKey,
1469 (mpls_label_t)(uintptr_t)cValue);
1470 skiplist_delete(tcb->labels, cKey, NULL);
1471 ++tcb->counter[LPT_STAT_DEALLOCATED];
1472 }
1473 ++iteration;
1474 }
1475
1476 return CMD_SUCCESS;
1477 }
1478
1479 static void lptest_delete(void *val)
1480 {
1481 struct lp_test *tcb = (struct lp_test *)val;
1482 void *Key;
1483 void *Value;
1484 void *cursor;
1485
1486 if (tcb->labels) {
1487 cursor = NULL;
1488 while (!skiplist_next(tcb->labels, &Key, &Value, &cursor))
1489 bgp_lp_release(tcb->label_type, Key,
1490 (mpls_label_t)(uintptr_t)Value);
1491 skiplist_free(tcb->labels);
1492 tcb->labels = NULL;
1493 }
1494 if (tcb->timestamps_alloc) {
1495 cursor = NULL;
1496 skiplist_free(tcb->timestamps_alloc);
1497 tcb->timestamps_alloc = NULL;
1498 }
1499
1500 if (tcb->timestamps_dealloc) {
1501 cursor = NULL;
1502 skiplist_free(tcb->timestamps_dealloc);
1503 tcb->timestamps_dealloc = NULL;
1504 }
1505
1506 if (tcb->event_thread)
1507 thread_cancel(&tcb->event_thread);
1508
1509 memset(tcb, 0, sizeof(*tcb));
1510
1511 XFREE(MTYPE_LABELPOOL_TEST, tcb);
1512 }
1513
1514 static void lptest_init(void)
1515 {
1516 lp_tests = skiplist_new(0, NULL, lptest_delete);
1517 }
1518
1519 static void lptest_finish(void)
1520 {
1521 if (lp_tests) {
1522 skiplist_free(lp_tests);
1523 lp_tests = NULL;
1524 }
1525 }
1526
1527 /*------------------------------------------------------------------------
1528 * Testing code end
1529 *------------------------------------------------------------------------*/
1530 #endif /* BGP_LABELPOOL_ENABLE_TESTS */
1531
1532 void bgp_lp_vty_init(void)
1533 {
1534 install_element(VIEW_NODE, &show_bgp_labelpool_summary_cmd);
1535 install_element(VIEW_NODE, &show_bgp_labelpool_ledger_cmd);
1536 install_element(VIEW_NODE, &show_bgp_labelpool_inuse_cmd);
1537 install_element(VIEW_NODE, &show_bgp_labelpool_requests_cmd);
1538 install_element(VIEW_NODE, &show_bgp_labelpool_chunks_cmd);
1539
1540 #if BGP_LABELPOOL_ENABLE_TESTS
1541 install_element(ENABLE_NODE, &start_labelpool_perf_test_cmd);
1542 install_element(ENABLE_NODE, &show_labelpool_perf_test_cmd);
1543 install_element(ENABLE_NODE, &stop_labelpool_perf_test_cmd);
1544 install_element(ENABLE_NODE, &release_labelpool_perf_test_cmd);
1545 install_element(ENABLE_NODE, &clear_labelpool_perf_test_cmd);
1546 #endif /* BGP_LABELPOOL_ENABLE_TESTS */
1547 }