]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_labelpool.c
Merge pull request #13649 from donaldsharp/unlock_the_node_or_else
[mirror_frr.git] / bgpd / bgp_labelpool.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
4 *
5 * Copyright (C) 2018 LabN Consulting, L.L.C.
6 */
7
8 #include <zebra.h>
9
10 #include "log.h"
11 #include "memory.h"
12 #include "stream.h"
13 #include "mpls.h"
14 #include "vty.h"
15 #include "linklist.h"
16 #include "skiplist.h"
17 #include "workqueue.h"
18 #include "zclient.h"
19 #include "mpls.h"
20
21 #include "bgpd/bgpd.h"
22 #include "bgpd/bgp_labelpool.h"
23 #include "bgpd/bgp_debug.h"
24 #include "bgpd/bgp_errors.h"
25 #include "bgpd/bgp_route.h"
26 #include "bgpd/bgp_zebra.h"
27 #include "bgpd/bgp_vty.h"
28 #include "bgpd/bgp_rd.h"
29
30 #define BGP_LABELPOOL_ENABLE_TESTS 0
31
32 #include "bgpd/bgp_labelpool_clippy.c"
33
34
35 /*
36 * Definitions and external declarations.
37 */
38 extern struct zclient *zclient;
39
40 #if BGP_LABELPOOL_ENABLE_TESTS
41 static void lptest_init(void);
42 static void lptest_finish(void);
43 #endif
44
45 /*
46 * Remember where pool data are kept
47 */
48 static struct labelpool *lp;
49
50 /*
51 * Number of labels requested at a time from the zebra label manager.
52 * We start small but double the request size each time up to a
53 * maximum size.
54 *
55 * The label space is 20 bits which is shared with other FRR processes
56 * on this host, so to avoid greedily requesting a mostly wasted chunk,
57 * we limit the chunk size to 1/16 of the label space (that's the -4 bits
58 * in the definition below). This limit slightly increases our cost of
59 * finding free labels in our allocated chunks.
60 */
61 #define LP_CHUNK_SIZE_MIN 128
62 #define LP_CHUNK_SIZE_MAX (1 << (20 - 4))
63
64 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk");
65 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO item");
66 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CB, "BGP Dynamic Label Assignment");
67 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CBQ, "BGP Dynamic Label Callback");
68
69 struct lp_chunk {
70 uint32_t first;
71 uint32_t last;
72 uint32_t nfree; /* un-allocated count */
73 uint32_t idx_last_allocated; /* start looking here */
74 bitfield_t allocated_map;
75 };
76
77 /*
78 * label control block
79 */
80 struct lp_lcb {
81 mpls_label_t label; /* MPLS_LABEL_NONE = not allocated */
82 int type;
83 void *labelid; /* unique ID */
84 /*
85 * callback for label allocation and loss
86 *
87 * allocated: false = lost
88 */
89 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
90 };
91
92 struct lp_fifo {
93 struct lp_fifo_item fifo;
94 struct lp_lcb lcb;
95 };
96
97 DECLARE_LIST(lp_fifo, struct lp_fifo, fifo);
98
99 struct lp_cbq_item {
100 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
101 int type;
102 mpls_label_t label;
103 void *labelid;
104 bool allocated; /* false = lost */
105 };
106
107 static wq_item_status lp_cbq_docallback(struct work_queue *wq, void *data)
108 {
109 struct lp_cbq_item *lcbq = data;
110 int rc;
111 int debug = BGP_DEBUG(labelpool, LABELPOOL);
112
113 if (debug)
114 zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
115 __func__, lcbq->labelid, lcbq->label, lcbq->allocated);
116
117 if (lcbq->label == MPLS_LABEL_NONE) {
118 /* shouldn't happen */
119 flog_err(EC_BGP_LABEL, "%s: error: label==MPLS_LABEL_NONE",
120 __func__);
121 return WQ_SUCCESS;
122 }
123
124 rc = (*(lcbq->cbfunc))(lcbq->label, lcbq->labelid, lcbq->allocated);
125
126 if (lcbq->allocated && rc) {
127 /*
128 * Callback rejected allocation. This situation could arise
129 * if there was a label request followed by the requestor
130 * deciding it didn't need the assignment (e.g., config
131 * change) while the reply to the original request (with
132 * label) was in the work queue.
133 */
134 if (debug)
135 zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
136 __func__, lcbq->labelid, lcbq->label);
137
138 uintptr_t lbl = lcbq->label;
139 void *labelid;
140 struct lp_lcb *lcb;
141
142 /*
143 * If the rejected label was marked inuse by this labelid,
144 * release the label back to the pool.
145 *
146 * Further, if the rejected label was still assigned to
147 * this labelid in the LCB, delete the LCB.
148 */
149 if (!skiplist_search(lp->inuse, (void *)lbl, &labelid)) {
150 if (labelid == lcbq->labelid) {
151 if (!skiplist_search(lp->ledger, labelid,
152 (void **)&lcb)) {
153 if (lcbq->label == lcb->label)
154 skiplist_delete(lp->ledger,
155 labelid, NULL);
156 }
157 skiplist_delete(lp->inuse, (void *)lbl, NULL);
158 }
159 }
160 }
161
162 return WQ_SUCCESS;
163 }
164
165 static void lp_cbq_item_free(struct work_queue *wq, void *data)
166 {
167 XFREE(MTYPE_BGP_LABEL_CBQ, data);
168 }
169
170 static void lp_lcb_free(void *goner)
171 {
172 XFREE(MTYPE_BGP_LABEL_CB, goner);
173 }
174
175 static void lp_chunk_free(void *goner)
176 {
177 struct lp_chunk *chunk = (struct lp_chunk *)goner;
178
179 bf_free(chunk->allocated_map);
180 XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
181 }
182
183 void bgp_lp_init(struct event_loop *master, struct labelpool *pool)
184 {
185 if (BGP_DEBUG(labelpool, LABELPOOL))
186 zlog_debug("%s: entry", __func__);
187
188 lp = pool; /* Set module pointer to pool data */
189
190 lp->ledger = skiplist_new(0, NULL, lp_lcb_free);
191 lp->inuse = skiplist_new(0, NULL, NULL);
192 lp->chunks = list_new();
193 lp->chunks->del = lp_chunk_free;
194 lp_fifo_init(&lp->requests);
195 lp->callback_q = work_queue_new(master, "label callbacks");
196
197 lp->callback_q->spec.workfunc = lp_cbq_docallback;
198 lp->callback_q->spec.del_item_data = lp_cbq_item_free;
199 lp->callback_q->spec.max_retries = 0;
200
201 lp->next_chunksize = LP_CHUNK_SIZE_MIN;
202
203 #if BGP_LABELPOOL_ENABLE_TESTS
204 lptest_init();
205 #endif
206 }
207
208 /* check if a label callback was for a BGP LU node, and if so, unlock it */
209 static void check_bgp_lu_cb_unlock(struct lp_lcb *lcb)
210 {
211 if (lcb->type == LP_TYPE_BGP_LU)
212 bgp_dest_unlock_node(lcb->labelid);
213 }
214
215 /* check if a label callback was for a BGP LU node, and if so, lock it */
216 static void check_bgp_lu_cb_lock(struct lp_lcb *lcb)
217 {
218 if (lcb->type == LP_TYPE_BGP_LU)
219 bgp_dest_lock_node(lcb->labelid);
220 }
221
222 void bgp_lp_finish(void)
223 {
224 struct lp_fifo *lf;
225 struct work_queue_item *item, *titem;
226
227 #if BGP_LABELPOOL_ENABLE_TESTS
228 lptest_finish();
229 #endif
230 if (!lp)
231 return;
232
233 skiplist_free(lp->ledger);
234 lp->ledger = NULL;
235
236 skiplist_free(lp->inuse);
237 lp->inuse = NULL;
238
239 list_delete(&lp->chunks);
240
241 while ((lf = lp_fifo_pop(&lp->requests))) {
242 check_bgp_lu_cb_unlock(&lf->lcb);
243 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
244 }
245 lp_fifo_fini(&lp->requests);
246
247 /* we must unlock path infos for LU callbacks; but we cannot do that
248 * in the deletion callback of the workqueue, as that is also called
249 * to remove an element from the queue after it has been run, resulting
250 * in a double unlock. Hence we need to iterate over our queues and
251 * lists and manually perform the unlocking (ugh)
252 */
253 STAILQ_FOREACH_SAFE (item, &lp->callback_q->items, wq, titem)
254 check_bgp_lu_cb_unlock(item->data);
255
256 work_queue_free_and_null(&lp->callback_q);
257
258 lp = NULL;
259 }
260
261 static mpls_label_t get_label_from_pool(void *labelid)
262 {
263 struct listnode *node;
264 struct lp_chunk *chunk;
265 int debug = BGP_DEBUG(labelpool, LABELPOOL);
266
267 /*
268 * Find a free label
269 */
270 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
271 uintptr_t lbl;
272 unsigned int index;
273
274 if (debug)
275 zlog_debug("%s: chunk first=%u last=%u",
276 __func__, chunk->first, chunk->last);
277
278 /*
279 * don't look in chunks with no available labels
280 */
281 if (!chunk->nfree)
282 continue;
283
284 /*
285 * roll through bitfield starting where we stopped
286 * last time
287 */
288 index = bf_find_next_clear_bit_wrap(
289 &chunk->allocated_map, chunk->idx_last_allocated + 1,
290 0);
291
292 /*
293 * since chunk->nfree is non-zero, we should always get
294 * a valid index
295 */
296 assert(index != WORD_MAX);
297
298 lbl = chunk->first + index;
299 if (skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
300 /* something is very wrong */
301 zlog_err("%s: unable to insert inuse label %u (id %p)",
302 __func__, (uint32_t)lbl, labelid);
303 return MPLS_LABEL_NONE;
304 }
305
306 /*
307 * Success
308 */
309 bf_set_bit(chunk->allocated_map, index);
310 chunk->idx_last_allocated = index;
311 chunk->nfree -= 1;
312
313 return lbl;
314 }
315
316 return MPLS_LABEL_NONE;
317 }
318
319 /*
320 * Success indicated by value of "label" field in returned LCB
321 */
322 static struct lp_lcb *lcb_alloc(
323 int type,
324 void *labelid,
325 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
326 {
327 /*
328 * Set up label control block
329 */
330 struct lp_lcb *new = XCALLOC(MTYPE_BGP_LABEL_CB,
331 sizeof(struct lp_lcb));
332
333 new->label = get_label_from_pool(labelid);
334 new->type = type;
335 new->labelid = labelid;
336 new->cbfunc = cbfunc;
337
338 return new;
339 }
340
341 /*
342 * Callers who need labels must supply a type, labelid, and callback.
343 * The type is a value defined in bgp_labelpool.h (add types as needed).
344 * The callback is for asynchronous notification of label allocation.
345 * The labelid is passed as an argument to the callback. It should be unique
346 * to the requested label instance.
347 *
348 * If zebra is not connected, callbacks with labels will be delayed
349 * until connection is established. If zebra connection is lost after
350 * labels have been assigned, existing assignments via this labelpool
351 * module will continue until reconnection.
352 *
353 * When connection to zebra is reestablished, previous label assignments
354 * will be invalidated (via callbacks having the "allocated" parameter unset)
355 * and new labels will be automatically reassigned by this labelpool module
356 * (that is, a requestor does not need to call bgp_lp_get() again if it is
357 * notified via callback that its label has been lost: it will eventually
358 * get another callback with a new label assignment).
359 *
360 * The callback function should return 0 to accept the allocation
361 * and non-zero to refuse it. The callback function return value is
362 * ignored for invalidations (i.e., when the "allocated" parameter is false)
363 *
364 * Prior requests for a given labelid are detected so that requests and
365 * assignments are not duplicated.
366 */
367 void bgp_lp_get(
368 int type,
369 void *labelid,
370 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
371 {
372 struct lp_lcb *lcb;
373 int requested = 0;
374 int debug = BGP_DEBUG(labelpool, LABELPOOL);
375
376 if (debug)
377 zlog_debug("%s: labelid=%p", __func__, labelid);
378
379 /*
380 * Have we seen this request before?
381 */
382 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
383 requested = 1;
384 } else {
385 lcb = lcb_alloc(type, labelid, cbfunc);
386 if (debug)
387 zlog_debug("%s: inserting lcb=%p label=%u",
388 __func__, lcb, lcb->label);
389 int rc = skiplist_insert(lp->ledger, labelid, lcb);
390
391 if (rc) {
392 /* shouldn't happen */
393 flog_err(EC_BGP_LABEL,
394 "%s: can't insert new LCB into ledger list",
395 __func__);
396 XFREE(MTYPE_BGP_LABEL_CB, lcb);
397 return;
398 }
399 }
400
401 if (lcb->label != MPLS_LABEL_NONE) {
402 /*
403 * Fast path: we filled the request from local pool (or
404 * this is a duplicate request that we filled already).
405 * Enqueue response work item with new label.
406 */
407 struct lp_cbq_item *q;
408
409 q = XCALLOC(MTYPE_BGP_LABEL_CBQ, sizeof(struct lp_cbq_item));
410
411 q->cbfunc = lcb->cbfunc;
412 q->type = lcb->type;
413 q->label = lcb->label;
414 q->labelid = lcb->labelid;
415 q->allocated = true;
416
417 /* if this is a LU request, lock node before queueing */
418 check_bgp_lu_cb_lock(lcb);
419
420 work_queue_add(lp->callback_q, q);
421
422 return;
423 }
424
425 if (requested)
426 return;
427
428 if (debug)
429 zlog_debug("%s: slow path. lcb=%p label=%u",
430 __func__, lcb, lcb->label);
431
432 /*
433 * Slow path: we are out of labels in the local pool,
434 * so remember the request and also get another chunk from
435 * the label manager.
436 *
437 * We track number of outstanding label requests: don't
438 * need to get a chunk for each one.
439 */
440
441 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
442 sizeof(struct lp_fifo));
443
444 lf->lcb = *lcb;
445 /* if this is a LU request, lock node before queueing */
446 check_bgp_lu_cb_lock(lcb);
447
448 lp_fifo_add_tail(&lp->requests, lf);
449
450 if (lp_fifo_count(&lp->requests) > lp->pending_count) {
451 if (!zclient || zclient->sock < 0)
452 return;
453 if (zclient_send_get_label_chunk(zclient, 0, lp->next_chunksize,
454 MPLS_LABEL_BASE_ANY) !=
455 ZCLIENT_SEND_FAILURE) {
456 lp->pending_count += lp->next_chunksize;
457 if ((lp->next_chunksize << 1) <= LP_CHUNK_SIZE_MAX)
458 lp->next_chunksize <<= 1;
459 }
460 }
461 }
462
463 void bgp_lp_release(
464 int type,
465 void *labelid,
466 mpls_label_t label)
467 {
468 struct lp_lcb *lcb;
469
470 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
471 if (label == lcb->label && type == lcb->type) {
472 struct listnode *node;
473 struct lp_chunk *chunk;
474 uintptr_t lbl = label;
475 bool deallocated = false;
476
477 /* no longer in use */
478 skiplist_delete(lp->inuse, (void *)lbl, NULL);
479
480 /* no longer requested */
481 skiplist_delete(lp->ledger, labelid, NULL);
482
483 /*
484 * Find the chunk this label belongs to and
485 * deallocate the label
486 */
487 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
488 uint32_t index;
489
490 if ((label < chunk->first) ||
491 (label > chunk->last))
492 continue;
493
494 index = label - chunk->first;
495 assert(bf_test_index(chunk->allocated_map,
496 index));
497 bf_release_index(chunk->allocated_map, index);
498 chunk->nfree += 1;
499 deallocated = true;
500 }
501 assert(deallocated);
502 }
503 }
504 }
505
506 /*
507 * zebra response giving us a chunk of labels
508 */
509 void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
510 {
511 struct lp_chunk *chunk;
512 int debug = BGP_DEBUG(labelpool, LABELPOOL);
513 struct lp_fifo *lf;
514 uint32_t labelcount;
515
516 if (last < first) {
517 flog_err(EC_BGP_LABEL,
518 "%s: zebra label chunk invalid: first=%u, last=%u",
519 __func__, first, last);
520 return;
521 }
522
523 chunk = XCALLOC(MTYPE_BGP_LABEL_CHUNK, sizeof(struct lp_chunk));
524
525 labelcount = last - first + 1;
526
527 chunk->first = first;
528 chunk->last = last;
529 chunk->nfree = labelcount;
530 bf_init(chunk->allocated_map, labelcount);
531
532 /*
533 * Optimize for allocation by adding the new (presumably larger)
534 * chunk at the head of the list so it is examined first.
535 */
536 listnode_add_head(lp->chunks, chunk);
537
538 lp->pending_count -= labelcount;
539
540 if (debug) {
541 zlog_debug("%s: %zu pending requests", __func__,
542 lp_fifo_count(&lp->requests));
543 }
544
545 while (labelcount && (lf = lp_fifo_first(&lp->requests))) {
546
547 struct lp_lcb *lcb;
548 void *labelid = lf->lcb.labelid;
549
550 if (skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
551 /* request no longer in effect */
552
553 if (debug) {
554 zlog_debug("%s: labelid %p: request no longer in effect",
555 __func__, labelid);
556 }
557 /* if this was a BGP_LU request, unlock node
558 */
559 check_bgp_lu_cb_unlock(lcb);
560 goto finishedrequest;
561 }
562
563 /* have LCB */
564 if (lcb->label != MPLS_LABEL_NONE) {
565 /* request already has a label */
566 if (debug) {
567 zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
568 __func__, labelid,
569 lcb->label, lcb->label, lcb);
570 }
571 /* if this was a BGP_LU request, unlock node
572 */
573 check_bgp_lu_cb_unlock(lcb);
574
575 goto finishedrequest;
576 }
577
578 lcb->label = get_label_from_pool(lcb->labelid);
579
580 if (lcb->label == MPLS_LABEL_NONE) {
581 /*
582 * Out of labels in local pool, await next chunk
583 */
584 if (debug) {
585 zlog_debug("%s: out of labels, await more",
586 __func__);
587 }
588 break;
589 }
590
591 labelcount -= 1;
592
593 /*
594 * we filled the request from local pool.
595 * Enqueue response work item with new label.
596 */
597 struct lp_cbq_item *q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
598 sizeof(struct lp_cbq_item));
599
600 q->cbfunc = lcb->cbfunc;
601 q->type = lcb->type;
602 q->label = lcb->label;
603 q->labelid = lcb->labelid;
604 q->allocated = true;
605
606 if (debug)
607 zlog_debug("%s: assigning label %u to labelid %p",
608 __func__, q->label, q->labelid);
609
610 work_queue_add(lp->callback_q, q);
611
612 finishedrequest:
613 lp_fifo_del(&lp->requests, lf);
614 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
615 }
616 }
617
618 /*
619 * continue using allocated labels until zebra returns
620 */
621 void bgp_lp_event_zebra_down(void)
622 {
623 /* rats. */
624 }
625
626 /*
627 * Inform owners of previously-allocated labels that their labels
628 * are not valid. Request chunk from zebra large enough to satisfy
629 * previously-allocated labels plus any outstanding requests.
630 */
631 void bgp_lp_event_zebra_up(void)
632 {
633 unsigned int labels_needed;
634 unsigned int chunks_needed;
635 void *labelid;
636 struct lp_lcb *lcb;
637 int lm_init_ok;
638
639 lp->reconnect_count++;
640 /*
641 * Get label chunk allocation request dispatched to zebra
642 */
643 labels_needed = lp_fifo_count(&lp->requests) +
644 skiplist_count(lp->inuse);
645
646 if (labels_needed > lp->next_chunksize) {
647 while ((lp->next_chunksize < labels_needed) &&
648 (lp->next_chunksize << 1 <= LP_CHUNK_SIZE_MAX))
649
650 lp->next_chunksize <<= 1;
651 }
652
653 /* round up */
654 chunks_needed = (labels_needed / lp->next_chunksize) + 1;
655 labels_needed = chunks_needed * lp->next_chunksize;
656
657 lm_init_ok = lm_label_manager_connect(zclient, 1) == 0;
658
659 if (!lm_init_ok) {
660 zlog_err("%s: label manager connection error", __func__);
661 return;
662 }
663
664 zclient_send_get_label_chunk(zclient, 0, labels_needed,
665 MPLS_LABEL_BASE_ANY);
666 lp->pending_count = labels_needed;
667
668 /*
669 * Invalidate current list of chunks
670 */
671 list_delete_all_node(lp->chunks);
672
673 /*
674 * Invalidate any existing labels and requeue them as requests
675 */
676 while (!skiplist_first(lp->inuse, NULL, &labelid)) {
677
678 /*
679 * Get LCB
680 */
681 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
682
683 if (lcb->label != MPLS_LABEL_NONE) {
684 /*
685 * invalidate
686 */
687 struct lp_cbq_item *q;
688
689 q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
690 sizeof(struct lp_cbq_item));
691 q->cbfunc = lcb->cbfunc;
692 q->type = lcb->type;
693 q->label = lcb->label;
694 q->labelid = lcb->labelid;
695 q->allocated = false;
696 check_bgp_lu_cb_lock(lcb);
697 work_queue_add(lp->callback_q, q);
698
699 lcb->label = MPLS_LABEL_NONE;
700 }
701
702 /*
703 * request queue
704 */
705 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
706 sizeof(struct lp_fifo));
707
708 lf->lcb = *lcb;
709 check_bgp_lu_cb_lock(lcb);
710 lp_fifo_add_tail(&lp->requests, lf);
711 }
712
713 skiplist_delete_first(lp->inuse);
714 }
715 }
716
717 DEFUN(show_bgp_labelpool_summary, show_bgp_labelpool_summary_cmd,
718 "show bgp labelpool summary [json]",
719 SHOW_STR BGP_STR
720 "BGP Labelpool information\n"
721 "BGP Labelpool summary\n" JSON_STR)
722 {
723 bool uj = use_json(argc, argv);
724 json_object *json = NULL;
725
726 if (!lp) {
727 if (uj)
728 vty_out(vty, "{}\n");
729 else
730 vty_out(vty, "No existing BGP labelpool\n");
731 return (CMD_WARNING);
732 }
733
734 if (uj) {
735 json = json_object_new_object();
736 json_object_int_add(json, "ledger", skiplist_count(lp->ledger));
737 json_object_int_add(json, "inUse", skiplist_count(lp->inuse));
738 json_object_int_add(json, "requests",
739 lp_fifo_count(&lp->requests));
740 json_object_int_add(json, "labelChunks", listcount(lp->chunks));
741 json_object_int_add(json, "pending", lp->pending_count);
742 json_object_int_add(json, "reconnects", lp->reconnect_count);
743 vty_json(vty, json);
744 } else {
745 vty_out(vty, "Labelpool Summary\n");
746 vty_out(vty, "-----------------\n");
747 vty_out(vty, "%-13s %d\n",
748 "Ledger:", skiplist_count(lp->ledger));
749 vty_out(vty, "%-13s %d\n", "InUse:", skiplist_count(lp->inuse));
750 vty_out(vty, "%-13s %zu\n",
751 "Requests:", lp_fifo_count(&lp->requests));
752 vty_out(vty, "%-13s %d\n",
753 "LabelChunks:", listcount(lp->chunks));
754 vty_out(vty, "%-13s %d\n", "Pending:", lp->pending_count);
755 vty_out(vty, "%-13s %d\n", "Reconnects:", lp->reconnect_count);
756 }
757 return CMD_SUCCESS;
758 }
759
760 DEFUN(show_bgp_labelpool_ledger, show_bgp_labelpool_ledger_cmd,
761 "show bgp labelpool ledger [json]",
762 SHOW_STR BGP_STR
763 "BGP Labelpool information\n"
764 "BGP Labelpool ledger\n" JSON_STR)
765 {
766 bool uj = use_json(argc, argv);
767 json_object *json = NULL, *json_elem = NULL;
768 struct lp_lcb *lcb = NULL;
769 struct bgp_dest *dest;
770 void *cursor = NULL;
771 const struct prefix *p;
772 int rc, count;
773
774 if (!lp) {
775 if (uj)
776 vty_out(vty, "{}\n");
777 else
778 vty_out(vty, "No existing BGP labelpool\n");
779 return (CMD_WARNING);
780 }
781
782 if (uj) {
783 count = skiplist_count(lp->ledger);
784 if (!count) {
785 vty_out(vty, "{}\n");
786 return CMD_SUCCESS;
787 }
788 json = json_object_new_array();
789 } else {
790 vty_out(vty, "Prefix Label\n");
791 vty_out(vty, "---------------------------\n");
792 }
793
794 for (rc = skiplist_next(lp->ledger, (void **)&dest, (void **)&lcb,
795 &cursor);
796 !rc; rc = skiplist_next(lp->ledger, (void **)&dest, (void **)&lcb,
797 &cursor)) {
798 if (uj) {
799 json_elem = json_object_new_object();
800 json_object_array_add(json, json_elem);
801 }
802 switch (lcb->type) {
803 case LP_TYPE_BGP_LU:
804 if (!CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED))
805 if (uj) {
806 json_object_string_add(
807 json_elem, "prefix", "INVALID");
808 json_object_int_add(json_elem, "label",
809 lcb->label);
810 } else
811 vty_out(vty, "%-18s %u\n",
812 "INVALID", lcb->label);
813 else {
814 p = bgp_dest_get_prefix(dest);
815 if (uj) {
816 json_object_string_addf(
817 json_elem, "prefix", "%pFX", p);
818 json_object_int_add(json_elem, "label",
819 lcb->label);
820 } else
821 vty_out(vty, "%-18pFX %u\n", p,
822 lcb->label);
823 }
824 break;
825 case LP_TYPE_VRF:
826 if (uj) {
827 json_object_string_add(json_elem, "prefix",
828 "VRF");
829 json_object_int_add(json_elem, "label",
830 lcb->label);
831 } else
832 vty_out(vty, "%-18s %u\n", "VRF",
833 lcb->label);
834
835 break;
836 case LP_TYPE_NEXTHOP:
837 if (uj) {
838 json_object_string_add(json_elem, "prefix",
839 "nexthop");
840 json_object_int_add(json_elem, "label",
841 lcb->label);
842 } else
843 vty_out(vty, "%-18s %u\n", "nexthop",
844 lcb->label);
845 break;
846 }
847 }
848 if (uj)
849 vty_json(vty, json);
850 return CMD_SUCCESS;
851 }
852
853 DEFUN(show_bgp_labelpool_inuse, show_bgp_labelpool_inuse_cmd,
854 "show bgp labelpool inuse [json]",
855 SHOW_STR BGP_STR
856 "BGP Labelpool information\n"
857 "BGP Labelpool inuse\n" JSON_STR)
858 {
859 bool uj = use_json(argc, argv);
860 json_object *json = NULL, *json_elem = NULL;
861 struct bgp_dest *dest;
862 mpls_label_t label;
863 struct lp_lcb *lcb;
864 void *cursor = NULL;
865 const struct prefix *p;
866 int rc, count;
867
868 if (!lp) {
869 vty_out(vty, "No existing BGP labelpool\n");
870 return (CMD_WARNING);
871 }
872 if (!lp) {
873 if (uj)
874 vty_out(vty, "{}\n");
875 else
876 vty_out(vty, "No existing BGP labelpool\n");
877 return (CMD_WARNING);
878 }
879
880 if (uj) {
881 count = skiplist_count(lp->inuse);
882 if (!count) {
883 vty_out(vty, "{}\n");
884 return CMD_SUCCESS;
885 }
886 json = json_object_new_array();
887 } else {
888 vty_out(vty, "Prefix Label\n");
889 vty_out(vty, "---------------------------\n");
890 }
891 for (rc = skiplist_next(lp->inuse, (void **)&label, (void **)&dest,
892 &cursor);
893 !rc; rc = skiplist_next(lp->ledger, (void **)&label,
894 (void **)&dest, &cursor)) {
895 if (skiplist_search(lp->ledger, dest, (void **)&lcb))
896 continue;
897
898 if (uj) {
899 json_elem = json_object_new_object();
900 json_object_array_add(json, json_elem);
901 }
902
903 switch (lcb->type) {
904 case LP_TYPE_BGP_LU:
905 if (!CHECK_FLAG(dest->flags, BGP_NODE_LABEL_REQUESTED))
906 if (uj) {
907 json_object_string_add(
908 json_elem, "prefix", "INVALID");
909 json_object_int_add(json_elem, "label",
910 label);
911 } else
912 vty_out(vty, "INVALID %u\n",
913 label);
914 else {
915 p = bgp_dest_get_prefix(dest);
916 if (uj) {
917 json_object_string_addf(
918 json_elem, "prefix", "%pFX", p);
919 json_object_int_add(json_elem, "label",
920 label);
921 } else
922 vty_out(vty, "%-18pFX %u\n", p,
923 label);
924 }
925 break;
926 case LP_TYPE_VRF:
927 if (uj) {
928 json_object_string_add(json_elem, "prefix",
929 "VRF");
930 json_object_int_add(json_elem, "label", label);
931 } else
932 vty_out(vty, "%-18s %u\n", "VRF",
933 label);
934 break;
935 case LP_TYPE_NEXTHOP:
936 if (uj) {
937 json_object_string_add(json_elem, "prefix",
938 "nexthop");
939 json_object_int_add(json_elem, "label", label);
940 } else
941 vty_out(vty, "%-18s %u\n", "nexthop",
942 label);
943 break;
944 }
945 }
946 if (uj)
947 vty_json(vty, json);
948 return CMD_SUCCESS;
949 }
950
951 DEFUN(show_bgp_labelpool_requests, show_bgp_labelpool_requests_cmd,
952 "show bgp labelpool requests [json]",
953 SHOW_STR BGP_STR
954 "BGP Labelpool information\n"
955 "BGP Labelpool requests\n" JSON_STR)
956 {
957 bool uj = use_json(argc, argv);
958 json_object *json = NULL, *json_elem = NULL;
959 struct bgp_dest *dest;
960 const struct prefix *p;
961 struct lp_fifo *item, *next;
962 int count;
963
964 if (!lp) {
965 if (uj)
966 vty_out(vty, "{}\n");
967 else
968 vty_out(vty, "No existing BGP labelpool\n");
969 return (CMD_WARNING);
970 }
971
972 if (uj) {
973 count = lp_fifo_count(&lp->requests);
974 if (!count) {
975 vty_out(vty, "{}\n");
976 return CMD_SUCCESS;
977 }
978 json = json_object_new_array();
979 } else {
980 vty_out(vty, "Prefix \n");
981 vty_out(vty, "----------------\n");
982 }
983
984 for (item = lp_fifo_first(&lp->requests); item; item = next) {
985 next = lp_fifo_next_safe(&lp->requests, item);
986 dest = item->lcb.labelid;
987 if (uj) {
988 json_elem = json_object_new_object();
989 json_object_array_add(json, json_elem);
990 }
991 switch (item->lcb.type) {
992 case LP_TYPE_BGP_LU:
993 if (!CHECK_FLAG(dest->flags,
994 BGP_NODE_LABEL_REQUESTED)) {
995 if (uj)
996 json_object_string_add(
997 json_elem, "prefix", "INVALID");
998 else
999 vty_out(vty, "INVALID\n");
1000 } else {
1001 p = bgp_dest_get_prefix(dest);
1002 if (uj)
1003 json_object_string_addf(
1004 json_elem, "prefix", "%pFX", p);
1005 else
1006 vty_out(vty, "%-18pFX\n", p);
1007 }
1008 break;
1009 case LP_TYPE_VRF:
1010 if (uj)
1011 json_object_string_add(json_elem, "prefix",
1012 "VRF");
1013 else
1014 vty_out(vty, "VRF\n");
1015 break;
1016 case LP_TYPE_NEXTHOP:
1017 if (uj)
1018 json_object_string_add(json_elem, "prefix",
1019 "nexthop");
1020 else
1021 vty_out(vty, "Nexthop\n");
1022 break;
1023 }
1024 }
1025 if (uj)
1026 vty_json(vty, json);
1027 return CMD_SUCCESS;
1028 }
1029
1030 DEFUN(show_bgp_labelpool_chunks, show_bgp_labelpool_chunks_cmd,
1031 "show bgp labelpool chunks [json]",
1032 SHOW_STR BGP_STR
1033 "BGP Labelpool information\n"
1034 "BGP Labelpool chunks\n" JSON_STR)
1035 {
1036 bool uj = use_json(argc, argv);
1037 json_object *json = NULL, *json_elem;
1038 struct listnode *node;
1039 struct lp_chunk *chunk;
1040 int count;
1041
1042 if (!lp) {
1043 if (uj)
1044 vty_out(vty, "{}\n");
1045 else
1046 vty_out(vty, "No existing BGP labelpool\n");
1047 return (CMD_WARNING);
1048 }
1049
1050 if (uj) {
1051 count = listcount(lp->chunks);
1052 if (!count) {
1053 vty_out(vty, "{}\n");
1054 return CMD_SUCCESS;
1055 }
1056 json = json_object_new_array();
1057 } else {
1058 vty_out(vty, "%10s %10s %10s %10s\n", "First", "Last", "Size",
1059 "nfree");
1060 vty_out(vty, "-------------------------------------------\n");
1061 }
1062
1063 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
1064 uint32_t size;
1065
1066 size = chunk->last - chunk->first + 1;
1067
1068 if (uj) {
1069 json_elem = json_object_new_object();
1070 json_object_array_add(json, json_elem);
1071 json_object_int_add(json_elem, "first", chunk->first);
1072 json_object_int_add(json_elem, "last", chunk->last);
1073 json_object_int_add(json_elem, "size", size);
1074 json_object_int_add(json_elem, "numberFree",
1075 chunk->nfree);
1076 } else
1077 vty_out(vty, "%10u %10u %10u %10u\n", chunk->first,
1078 chunk->last, size, chunk->nfree);
1079 }
1080 if (uj)
1081 vty_json(vty, json);
1082 return CMD_SUCCESS;
1083 }
1084
1085 static void show_bgp_nexthop_label_afi(struct vty *vty, afi_t afi,
1086 struct bgp *bgp, bool detail)
1087 {
1088 struct bgp_label_per_nexthop_cache_head *tree;
1089 struct bgp_label_per_nexthop_cache *iter;
1090 safi_t safi;
1091 void *src;
1092 char buf[PREFIX2STR_BUFFER];
1093 char labelstr[MPLS_LABEL_STRLEN];
1094 struct bgp_dest *dest;
1095 struct bgp_path_info *path;
1096 struct bgp *bgp_path;
1097 struct bgp_table *table;
1098 time_t tbuf;
1099
1100 vty_out(vty, "Current BGP label nexthop cache for %s, VRF %s\n",
1101 afi2str(afi), bgp->name_pretty);
1102
1103 tree = &bgp->mpls_labels_per_nexthop[afi];
1104 frr_each (bgp_label_per_nexthop_cache, tree, iter) {
1105 if (afi2family(afi) == AF_INET)
1106 src = (void *)&iter->nexthop.u.prefix4;
1107 else
1108 src = (void *)&iter->nexthop.u.prefix6;
1109
1110 vty_out(vty, " %s, label %s #paths %u\n",
1111 inet_ntop(afi2family(afi), src, buf, sizeof(buf)),
1112 mpls_label2str(1, &iter->label, labelstr,
1113 sizeof(labelstr), 0, true),
1114 iter->path_count);
1115 if (iter->nh)
1116 vty_out(vty, " if %s\n",
1117 ifindex2ifname(iter->nh->ifindex,
1118 iter->nh->vrf_id));
1119 tbuf = time(NULL) - (monotime(NULL) - iter->last_update);
1120 vty_out(vty, " Last update: %s", ctime(&tbuf));
1121 if (!detail)
1122 continue;
1123 vty_out(vty, " Paths:\n");
1124 LIST_FOREACH (path, &(iter->paths), label_nh_thread) {
1125 dest = path->net;
1126 table = bgp_dest_table(dest);
1127 assert(dest && table);
1128 afi = family2afi(bgp_dest_get_prefix(dest)->family);
1129 safi = table->safi;
1130 bgp_path = table->bgp;
1131
1132 if (dest->pdest) {
1133 vty_out(vty, " %d/%d %pBD RD ", afi, safi,
1134 dest);
1135
1136 vty_out(vty, BGP_RD_AS_FORMAT(bgp->asnotation),
1137 (struct prefix_rd *)bgp_dest_get_prefix(
1138 dest->pdest));
1139 vty_out(vty, " %s flags 0x%x\n",
1140 bgp_path->name_pretty, path->flags);
1141 } else
1142 vty_out(vty, " %d/%d %pBD %s flags 0x%x\n",
1143 afi, safi, dest, bgp_path->name_pretty,
1144 path->flags);
1145 }
1146 }
1147 }
1148
1149 DEFPY(show_bgp_nexthop_label, show_bgp_nexthop_label_cmd,
1150 "show bgp [<view|vrf> VIEWVRFNAME] label-nexthop [detail]",
1151 SHOW_STR BGP_STR BGP_INSTANCE_HELP_STR
1152 "BGP label per-nexthop table\n"
1153 "Show detailed information\n")
1154 {
1155 int idx = 0;
1156 char *vrf = NULL;
1157 struct bgp *bgp;
1158 bool detail = false;
1159 int afi;
1160
1161 if (argv_find(argv, argc, "vrf", &idx)) {
1162 vrf = argv[++idx]->arg;
1163 bgp = bgp_lookup_by_name(vrf);
1164 } else
1165 bgp = bgp_get_default();
1166
1167 if (!bgp)
1168 return CMD_SUCCESS;
1169
1170 if (argv_find(argv, argc, "detail", &idx))
1171 detail = true;
1172
1173 for (afi = AFI_IP; afi <= AFI_IP6; afi++)
1174 show_bgp_nexthop_label_afi(vty, afi, bgp, detail);
1175 return CMD_SUCCESS;
1176 }
1177
1178 #if BGP_LABELPOOL_ENABLE_TESTS
1179 /*------------------------------------------------------------------------
1180 * Testing code start
1181 *------------------------------------------------------------------------*/
1182
1183 DEFINE_MTYPE_STATIC(BGPD, LABELPOOL_TEST, "Label pool test");
1184
1185 #define LPT_STAT_INSERT_FAIL 0
1186 #define LPT_STAT_DELETE_FAIL 1
1187 #define LPT_STAT_ALLOCATED 2
1188 #define LPT_STAT_DEALLOCATED 3
1189 #define LPT_STAT_MAX 4
1190
1191 const char *lpt_counter_names[] = {
1192 "sl insert failures",
1193 "sl delete failures",
1194 "labels allocated",
1195 "labels deallocated",
1196 };
1197
1198 static uint8_t lpt_generation;
1199 static bool lpt_inprogress;
1200 static struct skiplist *lp_tests;
1201 static unsigned int lpt_test_cb_tcb_lookup_fails;
1202 static unsigned int lpt_release_tcb_lookup_fails;
1203 static unsigned int lpt_test_event_tcb_lookup_fails;
1204 static unsigned int lpt_stop_tcb_lookup_fails;
1205
1206 struct lp_test {
1207 uint8_t generation;
1208 unsigned int request_maximum;
1209 unsigned int request_blocksize;
1210 uintptr_t request_count; /* match type of labelid */
1211 int label_type;
1212 struct skiplist *labels;
1213 struct timeval starttime;
1214 struct skiplist *timestamps_alloc;
1215 struct skiplist *timestamps_dealloc;
1216 struct event *event_thread;
1217 unsigned int counter[LPT_STAT_MAX];
1218 };
1219
1220 /* test parameters */
1221 #define LPT_MAX_COUNT 500000 /* get this many labels in all */
1222 #define LPT_BLKSIZE 10000 /* this many at a time, then yield */
1223 #define LPT_TS_INTERVAL 10000 /* timestamp every this many labels */
1224
1225
1226 static int test_cb(mpls_label_t label, void *labelid, bool allocated)
1227 {
1228 uintptr_t generation;
1229 struct lp_test *tcb;
1230
1231 generation = ((uintptr_t)labelid >> 24) & 0xff;
1232
1233 if (skiplist_search(lp_tests, (void *)generation, (void **)&tcb)) {
1234
1235 /* couldn't find current test in progress */
1236 ++lpt_test_cb_tcb_lookup_fails;
1237 return -1; /* reject allocation */
1238 }
1239
1240 if (allocated) {
1241 ++tcb->counter[LPT_STAT_ALLOCATED];
1242 if (!(tcb->counter[LPT_STAT_ALLOCATED] % LPT_TS_INTERVAL)) {
1243 uintptr_t time_ms;
1244
1245 time_ms = monotime_since(&tcb->starttime, NULL) / 1000;
1246 skiplist_insert(tcb->timestamps_alloc,
1247 (void *)(uintptr_t)tcb
1248 ->counter[LPT_STAT_ALLOCATED],
1249 (void *)time_ms);
1250 }
1251 if (skiplist_insert(tcb->labels, labelid,
1252 (void *)(uintptr_t)label)) {
1253 ++tcb->counter[LPT_STAT_INSERT_FAIL];
1254 return -1;
1255 }
1256 } else {
1257 ++tcb->counter[LPT_STAT_DEALLOCATED];
1258 if (!(tcb->counter[LPT_STAT_DEALLOCATED] % LPT_TS_INTERVAL)) {
1259 uintptr_t time_ms;
1260
1261 time_ms = monotime_since(&tcb->starttime, NULL) / 1000;
1262 skiplist_insert(tcb->timestamps_dealloc,
1263 (void *)(uintptr_t)tcb
1264 ->counter[LPT_STAT_ALLOCATED],
1265 (void *)time_ms);
1266 }
1267 if (skiplist_delete(tcb->labels, labelid, 0)) {
1268 ++tcb->counter[LPT_STAT_DELETE_FAIL];
1269 return -1;
1270 }
1271 }
1272 return 0;
1273 }
1274
1275 static void labelpool_test_event_handler(struct event *thread)
1276 {
1277 struct lp_test *tcb;
1278
1279 if (skiplist_search(lp_tests, (void *)(uintptr_t)(lpt_generation),
1280 (void **)&tcb)) {
1281
1282 /* couldn't find current test in progress */
1283 ++lpt_test_event_tcb_lookup_fails;
1284 return;
1285 }
1286
1287 /*
1288 * request a bunch of labels
1289 */
1290 for (unsigned int i = 0; (i < tcb->request_blocksize) &&
1291 (tcb->request_count < tcb->request_maximum);
1292 ++i) {
1293
1294 uintptr_t id;
1295
1296 ++tcb->request_count;
1297
1298 /*
1299 * construct 32-bit id from request_count and generation
1300 */
1301 id = ((uintptr_t)tcb->generation << 24) |
1302 (tcb->request_count & 0x00ffffff);
1303 bgp_lp_get(LP_TYPE_VRF, (void *)id, test_cb);
1304 }
1305
1306 if (tcb->request_count < tcb->request_maximum)
1307 thread_add_event(bm->master, labelpool_test_event_handler, NULL,
1308 0, &tcb->event_thread);
1309 }
1310
1311 static void lptest_stop(void)
1312 {
1313 struct lp_test *tcb;
1314
1315 if (!lpt_inprogress)
1316 return;
1317
1318 if (skiplist_search(lp_tests, (void *)(uintptr_t)(lpt_generation),
1319 (void **)&tcb)) {
1320
1321 /* couldn't find current test in progress */
1322 ++lpt_stop_tcb_lookup_fails;
1323 return;
1324 }
1325
1326 if (tcb->event_thread)
1327 event_cancel(&tcb->event_thread);
1328
1329 lpt_inprogress = false;
1330 }
1331
1332 static int lptest_start(struct vty *vty)
1333 {
1334 struct lp_test *tcb;
1335
1336 if (lpt_inprogress) {
1337 vty_out(vty, "test already in progress\n");
1338 return -1;
1339 }
1340
1341 if (skiplist_count(lp_tests) >=
1342 (1 << (8 * sizeof(lpt_generation))) - 1) {
1343 /*
1344 * Too many test runs
1345 */
1346 vty_out(vty, "too many tests: clear first\n");
1347 return -1;
1348 }
1349
1350 /*
1351 * We pack the generation and request number into the labelid;
1352 * make sure they fit.
1353 */
1354 unsigned int n1 = LPT_MAX_COUNT;
1355 unsigned int sh = 0;
1356 unsigned int label_bits;
1357
1358 label_bits = 8 * (sizeof(tcb->request_count) - sizeof(lpt_generation));
1359
1360 /* n1 should be same type as tcb->request_maximum */
1361 assert(sizeof(n1) == sizeof(tcb->request_maximum));
1362
1363 while (n1 >>= 1)
1364 ++sh;
1365 sh += 1; /* number of bits needed to hold LPT_MAX_COUNT */
1366
1367 if (sh > label_bits) {
1368 vty_out(vty,
1369 "Sorry, test iteration count too big on this platform (LPT_MAX_COUNT %u, need %u bits, but label_bits is only %u)\n",
1370 LPT_MAX_COUNT, sh, label_bits);
1371 return -1;
1372 }
1373
1374 lpt_inprogress = true;
1375 ++lpt_generation;
1376
1377 tcb = XCALLOC(MTYPE_LABELPOOL_TEST, sizeof(*tcb));
1378
1379 tcb->generation = lpt_generation;
1380 tcb->label_type = LP_TYPE_VRF;
1381 tcb->request_maximum = LPT_MAX_COUNT;
1382 tcb->request_blocksize = LPT_BLKSIZE;
1383 tcb->labels = skiplist_new(0, NULL, NULL);
1384 tcb->timestamps_alloc = skiplist_new(0, NULL, NULL);
1385 tcb->timestamps_dealloc = skiplist_new(0, NULL, NULL);
1386 thread_add_event(bm->master, labelpool_test_event_handler, NULL, 0,
1387 &tcb->event_thread);
1388 monotime(&tcb->starttime);
1389
1390 skiplist_insert(lp_tests, (void *)(uintptr_t)tcb->generation, tcb);
1391 return 0;
1392 }
1393
1394 DEFPY(start_labelpool_perf_test, start_labelpool_perf_test_cmd,
1395 "debug bgp lptest start",
1396 DEBUG_STR BGP_STR
1397 "label pool test\n"
1398 "start\n")
1399 {
1400 lptest_start(vty);
1401 return CMD_SUCCESS;
1402 }
1403
1404 static void lptest_print_stats(struct vty *vty, struct lp_test *tcb)
1405 {
1406 unsigned int i;
1407
1408 vty_out(vty, "Global Lookup Failures in test_cb: %5u\n",
1409 lpt_test_cb_tcb_lookup_fails);
1410 vty_out(vty, "Global Lookup Failures in release: %5u\n",
1411 lpt_release_tcb_lookup_fails);
1412 vty_out(vty, "Global Lookup Failures in event: %5u\n",
1413 lpt_test_event_tcb_lookup_fails);
1414 vty_out(vty, "Global Lookup Failures in stop: %5u\n",
1415 lpt_stop_tcb_lookup_fails);
1416 vty_out(vty, "\n");
1417
1418 if (!tcb) {
1419 if (skiplist_search(lp_tests, (void *)(uintptr_t)lpt_generation,
1420 (void **)&tcb)) {
1421 vty_out(vty, "Error: can't find test %u\n",
1422 lpt_generation);
1423 return;
1424 }
1425 }
1426
1427 vty_out(vty, "Test Generation %u:\n", tcb->generation);
1428
1429 vty_out(vty, "Counter Value\n");
1430 for (i = 0; i < LPT_STAT_MAX; ++i) {
1431 vty_out(vty, "%20s: %10u\n", lpt_counter_names[i],
1432 tcb->counter[i]);
1433 }
1434 vty_out(vty, "\n");
1435
1436 if (tcb->timestamps_alloc) {
1437 void *Key;
1438 void *Value;
1439 void *cursor;
1440
1441 float elapsed;
1442
1443 vty_out(vty, "%10s %10s\n", "Count", "Seconds");
1444
1445 cursor = NULL;
1446 while (!skiplist_next(tcb->timestamps_alloc, &Key, &Value,
1447 &cursor)) {
1448
1449 elapsed = ((float)(uintptr_t)Value) / 1000;
1450
1451 vty_out(vty, "%10llu %10.3f\n",
1452 (unsigned long long)(uintptr_t)Key, elapsed);
1453 }
1454 vty_out(vty, "\n");
1455 }
1456 }
1457
1458 DEFPY(show_labelpool_perf_test, show_labelpool_perf_test_cmd,
1459 "debug bgp lptest show",
1460 DEBUG_STR BGP_STR
1461 "label pool test\n"
1462 "show\n")
1463 {
1464
1465 if (lp_tests) {
1466 void *Key;
1467 void *Value;
1468 void *cursor;
1469
1470 cursor = NULL;
1471 while (!skiplist_next(lp_tests, &Key, &Value, &cursor)) {
1472 lptest_print_stats(vty, (struct lp_test *)Value);
1473 }
1474 } else {
1475 vty_out(vty, "no test results\n");
1476 }
1477 return CMD_SUCCESS;
1478 }
1479
1480 DEFPY(stop_labelpool_perf_test, stop_labelpool_perf_test_cmd,
1481 "debug bgp lptest stop",
1482 DEBUG_STR BGP_STR
1483 "label pool test\n"
1484 "stop\n")
1485 {
1486
1487 if (lpt_inprogress) {
1488 lptest_stop();
1489 lptest_print_stats(vty, NULL);
1490 } else {
1491 vty_out(vty, "no test in progress\n");
1492 }
1493 return CMD_SUCCESS;
1494 }
1495
1496 DEFPY(clear_labelpool_perf_test, clear_labelpool_perf_test_cmd,
1497 "debug bgp lptest clear",
1498 DEBUG_STR BGP_STR
1499 "label pool test\n"
1500 "clear\n")
1501 {
1502
1503 if (lpt_inprogress) {
1504 lptest_stop();
1505 }
1506 if (lp_tests) {
1507 while (!skiplist_first(lp_tests, NULL, NULL))
1508 /* del function of skiplist cleans up tcbs */
1509 skiplist_delete_first(lp_tests);
1510 }
1511 return CMD_SUCCESS;
1512 }
1513
1514 /*
1515 * With the "release" command, we can release labels at intervals through
1516 * the ID space. Thus we can to exercise the bitfield-wrapping behavior
1517 * of the allocator in a subsequent test.
1518 */
1519 /* clang-format off */
1520 DEFPY(release_labelpool_perf_test, release_labelpool_perf_test_cmd,
1521 "debug bgp lptest release test GENERATION$generation every (1-5)$every_nth",
1522 DEBUG_STR
1523 BGP_STR
1524 "label pool test\n"
1525 "release labels\n"
1526 "\"test\"\n"
1527 "test number\n"
1528 "\"every\"\n"
1529 "label fraction denominator\n")
1530 {
1531 /* clang-format on */
1532
1533 unsigned long testnum;
1534 char *end;
1535 struct lp_test *tcb;
1536
1537 testnum = strtoul(generation, &end, 0);
1538 if (*end) {
1539 vty_out(vty, "Invalid test number: \"%s\"\n", generation);
1540 return CMD_SUCCESS;
1541 }
1542 if (lpt_inprogress && (testnum == lpt_generation)) {
1543 vty_out(vty,
1544 "Error: Test %lu is still in progress (stop first)\n",
1545 testnum);
1546 return CMD_SUCCESS;
1547 }
1548
1549 if (skiplist_search(lp_tests, (void *)(uintptr_t)testnum,
1550 (void **)&tcb)) {
1551
1552 /* couldn't find current test in progress */
1553 vty_out(vty, "Error: Can't look up test number: \"%lu\"\n",
1554 testnum);
1555 ++lpt_release_tcb_lookup_fails;
1556 return CMD_SUCCESS;
1557 }
1558
1559 void *Key, *cKey;
1560 void *Value, *cValue;
1561 void *cursor;
1562 unsigned int iteration;
1563 int rc;
1564
1565 cursor = NULL;
1566 iteration = 0;
1567 rc = skiplist_next(tcb->labels, &Key, &Value, &cursor);
1568
1569 while (!rc) {
1570 cKey = Key;
1571 cValue = Value;
1572
1573 /* find next item before we delete this one */
1574 rc = skiplist_next(tcb->labels, &Key, &Value, &cursor);
1575
1576 if (!(iteration % every_nth)) {
1577 bgp_lp_release(tcb->label_type, cKey,
1578 (mpls_label_t)(uintptr_t)cValue);
1579 skiplist_delete(tcb->labels, cKey, NULL);
1580 ++tcb->counter[LPT_STAT_DEALLOCATED];
1581 }
1582 ++iteration;
1583 }
1584
1585 return CMD_SUCCESS;
1586 }
1587
1588 static void lptest_delete(void *val)
1589 {
1590 struct lp_test *tcb = (struct lp_test *)val;
1591 void *Key;
1592 void *Value;
1593 void *cursor;
1594
1595 if (tcb->labels) {
1596 cursor = NULL;
1597 while (!skiplist_next(tcb->labels, &Key, &Value, &cursor))
1598 bgp_lp_release(tcb->label_type, Key,
1599 (mpls_label_t)(uintptr_t)Value);
1600 skiplist_free(tcb->labels);
1601 tcb->labels = NULL;
1602 }
1603 if (tcb->timestamps_alloc) {
1604 cursor = NULL;
1605 skiplist_free(tcb->timestamps_alloc);
1606 tcb->timestamps_alloc = NULL;
1607 }
1608
1609 if (tcb->timestamps_dealloc) {
1610 cursor = NULL;
1611 skiplist_free(tcb->timestamps_dealloc);
1612 tcb->timestamps_dealloc = NULL;
1613 }
1614
1615 if (tcb->event_thread)
1616 event_cancel(&tcb->event_thread);
1617
1618 memset(tcb, 0, sizeof(*tcb));
1619
1620 XFREE(MTYPE_LABELPOOL_TEST, tcb);
1621 }
1622
1623 static void lptest_init(void)
1624 {
1625 lp_tests = skiplist_new(0, NULL, lptest_delete);
1626 }
1627
1628 static void lptest_finish(void)
1629 {
1630 if (lp_tests) {
1631 skiplist_free(lp_tests);
1632 lp_tests = NULL;
1633 }
1634 }
1635
1636 /*------------------------------------------------------------------------
1637 * Testing code end
1638 *------------------------------------------------------------------------*/
1639 #endif /* BGP_LABELPOOL_ENABLE_TESTS */
1640
1641 void bgp_lp_vty_init(void)
1642 {
1643 install_element(VIEW_NODE, &show_bgp_labelpool_summary_cmd);
1644 install_element(VIEW_NODE, &show_bgp_labelpool_ledger_cmd);
1645 install_element(VIEW_NODE, &show_bgp_labelpool_inuse_cmd);
1646 install_element(VIEW_NODE, &show_bgp_labelpool_requests_cmd);
1647 install_element(VIEW_NODE, &show_bgp_labelpool_chunks_cmd);
1648
1649 #if BGP_LABELPOOL_ENABLE_TESTS
1650 install_element(ENABLE_NODE, &start_labelpool_perf_test_cmd);
1651 install_element(ENABLE_NODE, &show_labelpool_perf_test_cmd);
1652 install_element(ENABLE_NODE, &stop_labelpool_perf_test_cmd);
1653 install_element(ENABLE_NODE, &release_labelpool_perf_test_cmd);
1654 install_element(ENABLE_NODE, &clear_labelpool_perf_test_cmd);
1655 #endif /* BGP_LABELPOOL_ENABLE_TESTS */
1656 }
1657
1658 DEFINE_MTYPE_STATIC(BGPD, LABEL_PER_NEXTHOP_CACHE,
1659 "BGP Label Per Nexthop entry");
1660
1661 /* The nexthops values are compared to
1662 * find in the tree the appropriate cache entry
1663 */
1664 int bgp_label_per_nexthop_cache_cmp(const struct bgp_label_per_nexthop_cache *a,
1665 const struct bgp_label_per_nexthop_cache *b)
1666 {
1667 return prefix_cmp(&a->nexthop, &b->nexthop);
1668 }
1669
1670 struct bgp_label_per_nexthop_cache *
1671 bgp_label_per_nexthop_new(struct bgp_label_per_nexthop_cache_head *tree,
1672 struct prefix *nexthop)
1673 {
1674 struct bgp_label_per_nexthop_cache *blnc;
1675
1676 blnc = XCALLOC(MTYPE_LABEL_PER_NEXTHOP_CACHE,
1677 sizeof(struct bgp_label_per_nexthop_cache));
1678 blnc->tree = tree;
1679 blnc->label = MPLS_INVALID_LABEL;
1680 prefix_copy(&blnc->nexthop, nexthop);
1681 LIST_INIT(&(blnc->paths));
1682 bgp_label_per_nexthop_cache_add(tree, blnc);
1683
1684 return blnc;
1685 }
1686
1687 struct bgp_label_per_nexthop_cache *
1688 bgp_label_per_nexthop_find(struct bgp_label_per_nexthop_cache_head *tree,
1689 struct prefix *nexthop)
1690 {
1691 struct bgp_label_per_nexthop_cache blnc = {};
1692
1693 if (!tree)
1694 return NULL;
1695
1696 memcpy(&blnc.nexthop, nexthop, sizeof(struct prefix));
1697 return bgp_label_per_nexthop_cache_find(tree, &blnc);
1698 }
1699
1700 void bgp_label_per_nexthop_free(struct bgp_label_per_nexthop_cache *blnc)
1701 {
1702 if (blnc->label != MPLS_INVALID_LABEL) {
1703 bgp_zebra_send_nexthop_label(ZEBRA_MPLS_LABELS_DELETE,
1704 blnc->label, blnc->nh->ifindex,
1705 blnc->nh->vrf_id, ZEBRA_LSP_BGP,
1706 &blnc->nexthop);
1707 bgp_lp_release(LP_TYPE_NEXTHOP, blnc, blnc->label);
1708 }
1709 bgp_label_per_nexthop_cache_del(blnc->tree, blnc);
1710 if (blnc->nh)
1711 nexthop_free(blnc->nh);
1712 blnc->nh = NULL;
1713 XFREE(MTYPE_LABEL_PER_NEXTHOP_CACHE, blnc);
1714 }
1715
1716 void bgp_label_per_nexthop_init(void)
1717 {
1718 install_element(VIEW_NODE, &show_bgp_nexthop_label_cmd);
1719 }