]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_labelpool.c
bgpd: add peer description for each afi/safi line in show summary
[mirror_frr.git] / bgpd / bgp_labelpool.c
1 /*
2 * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
3 *
4 * Copyright (C) 2018 LabN Consulting, L.L.C.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include <zebra.h>
22
23 #include "log.h"
24 #include "memory.h"
25 #include "stream.h"
26 #include "mpls.h"
27 #include "vty.h"
28 #include "linklist.h"
29 #include "skiplist.h"
30 #include "workqueue.h"
31 #include "zclient.h"
32 #include "mpls.h"
33
34 #include "bgpd/bgpd.h"
35 #include "bgpd/bgp_labelpool.h"
36 #include "bgpd/bgp_debug.h"
37 #include "bgpd/bgp_errors.h"
38 #include "bgpd/bgp_route.h"
39
40 /*
41 * Definitions and external declarations.
42 */
43 extern struct zclient *zclient;
44
45 /*
46 * Remember where pool data are kept
47 */
48 static struct labelpool *lp;
49
50 /* request this many labels at a time from zebra */
51 #define LP_CHUNK_SIZE 50
52
53 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk")
54 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO item")
55 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CB, "BGP Dynamic Label Assignment")
56 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CBQ, "BGP Dynamic Label Callback")
57
58 struct lp_chunk {
59 uint32_t first;
60 uint32_t last;
61 };
62
63 /*
64 * label control block
65 */
66 struct lp_lcb {
67 mpls_label_t label; /* MPLS_LABEL_NONE = not allocated */
68 int type;
69 void *labelid; /* unique ID */
70 /*
71 * callback for label allocation and loss
72 *
73 * allocated: false = lost
74 */
75 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
76 };
77
78 struct lp_fifo {
79 struct lp_fifo_item fifo;
80 struct lp_lcb lcb;
81 };
82
83 DECLARE_LIST(lp_fifo, struct lp_fifo, fifo)
84
85 struct lp_cbq_item {
86 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
87 int type;
88 mpls_label_t label;
89 void *labelid;
90 bool allocated; /* false = lost */
91 };
92
93 static wq_item_status lp_cbq_docallback(struct work_queue *wq, void *data)
94 {
95 struct lp_cbq_item *lcbq = data;
96 int rc;
97 int debug = BGP_DEBUG(labelpool, LABELPOOL);
98
99 if (debug)
100 zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
101 __func__, lcbq->labelid, lcbq->label, lcbq->allocated);
102
103 if (lcbq->label == MPLS_LABEL_NONE) {
104 /* shouldn't happen */
105 flog_err(EC_BGP_LABEL, "%s: error: label==MPLS_LABEL_NONE",
106 __func__);
107 return WQ_SUCCESS;
108 }
109
110 rc = (*(lcbq->cbfunc))(lcbq->label, lcbq->labelid, lcbq->allocated);
111
112 if (lcbq->allocated && rc) {
113 /*
114 * Callback rejected allocation. This situation could arise
115 * if there was a label request followed by the requestor
116 * deciding it didn't need the assignment (e.g., config
117 * change) while the reply to the original request (with
118 * label) was in the work queue.
119 */
120 if (debug)
121 zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
122 __func__, lcbq->labelid, lcbq->label);
123
124 uintptr_t lbl = lcbq->label;
125 void *labelid;
126 struct lp_lcb *lcb;
127
128 /*
129 * If the rejected label was marked inuse by this labelid,
130 * release the label back to the pool.
131 *
132 * Further, if the rejected label was still assigned to
133 * this labelid in the LCB, delete the LCB.
134 */
135 if (!skiplist_search(lp->inuse, (void *)lbl, &labelid)) {
136 if (labelid == lcbq->labelid) {
137 if (!skiplist_search(lp->ledger, labelid,
138 (void **)&lcb)) {
139 if (lcbq->label == lcb->label)
140 skiplist_delete(lp->ledger,
141 labelid, NULL);
142 }
143 skiplist_delete(lp->inuse, (void *)lbl, NULL);
144 }
145 }
146 }
147
148 return WQ_SUCCESS;
149 }
150
151 static void lp_cbq_item_free(struct work_queue *wq, void *data)
152 {
153 XFREE(MTYPE_BGP_LABEL_CBQ, data);
154 }
155
156 static void lp_lcb_free(void *goner)
157 {
158 XFREE(MTYPE_BGP_LABEL_CB, goner);
159 }
160
161 static void lp_chunk_free(void *goner)
162 {
163 XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
164 }
165
166 void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
167 {
168 if (BGP_DEBUG(labelpool, LABELPOOL))
169 zlog_debug("%s: entry", __func__);
170
171 lp = pool; /* Set module pointer to pool data */
172
173 lp->ledger = skiplist_new(0, NULL, lp_lcb_free);
174 lp->inuse = skiplist_new(0, NULL, NULL);
175 lp->chunks = list_new();
176 lp->chunks->del = lp_chunk_free;
177 lp_fifo_init(&lp->requests);
178 lp->callback_q = work_queue_new(master, "label callbacks");
179
180 lp->callback_q->spec.workfunc = lp_cbq_docallback;
181 lp->callback_q->spec.del_item_data = lp_cbq_item_free;
182 lp->callback_q->spec.max_retries = 0;
183 }
184
185 /* check if a label callback was for a BGP LU path, and if so, unlock it */
186 static void check_bgp_lu_cb_unlock(struct lp_lcb *lcb)
187 {
188 if (lcb->type == LP_TYPE_BGP_LU)
189 bgp_path_info_unlock(lcb->labelid);
190 }
191
192 /* check if a label callback was for a BGP LU path, and if so, lock it */
193 static void check_bgp_lu_cb_lock(struct lp_lcb *lcb)
194 {
195 if (lcb->type == LP_TYPE_BGP_LU)
196 bgp_path_info_lock(lcb->labelid);
197 }
198
199 void bgp_lp_finish(void)
200 {
201 struct lp_fifo *lf;
202 struct work_queue_item *item, *titem;
203
204 if (!lp)
205 return;
206
207 skiplist_free(lp->ledger);
208 lp->ledger = NULL;
209
210 skiplist_free(lp->inuse);
211 lp->inuse = NULL;
212
213 list_delete(&lp->chunks);
214
215 while ((lf = lp_fifo_pop(&lp->requests))) {
216 check_bgp_lu_cb_unlock(&lf->lcb);
217 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
218 }
219 lp_fifo_fini(&lp->requests);
220
221 /* we must unlock path infos for LU callbacks; but we cannot do that
222 * in the deletion callback of the workqueue, as that is also called
223 * to remove an element from the queue after it has been run, resulting
224 * in a double unlock. Hence we need to iterate over our queues and
225 * lists and manually perform the unlocking (ugh)
226 */
227 STAILQ_FOREACH_SAFE (item, &lp->callback_q->items, wq, titem)
228 check_bgp_lu_cb_unlock(item->data);
229
230 work_queue_free_and_null(&lp->callback_q);
231
232 lp = NULL;
233 }
234
235 static mpls_label_t get_label_from_pool(void *labelid)
236 {
237 struct listnode *node;
238 struct lp_chunk *chunk;
239 int debug = BGP_DEBUG(labelpool, LABELPOOL);
240
241 /*
242 * Find a free label
243 * Linear search is not efficient but should be executed infrequently.
244 */
245 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
246 uintptr_t lbl;
247
248 if (debug)
249 zlog_debug("%s: chunk first=%u last=%u",
250 __func__, chunk->first, chunk->last);
251
252 for (lbl = chunk->first; lbl <= chunk->last; ++lbl) {
253 /* labelid is key to all-request "ledger" list */
254 if (!skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
255 /*
256 * Success
257 */
258 return lbl;
259 }
260 }
261 }
262 return MPLS_LABEL_NONE;
263 }
264
265 /*
266 * Success indicated by value of "label" field in returned LCB
267 */
268 static struct lp_lcb *lcb_alloc(
269 int type,
270 void *labelid,
271 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
272 {
273 /*
274 * Set up label control block
275 */
276 struct lp_lcb *new = XCALLOC(MTYPE_BGP_LABEL_CB,
277 sizeof(struct lp_lcb));
278
279 new->label = get_label_from_pool(labelid);
280 new->type = type;
281 new->labelid = labelid;
282 new->cbfunc = cbfunc;
283
284 return new;
285 }
286
287 /*
288 * Callers who need labels must supply a type, labelid, and callback.
289 * The type is a value defined in bgp_labelpool.h (add types as needed).
290 * The callback is for asynchronous notification of label allocation.
291 * The labelid is passed as an argument to the callback. It should be unique
292 * to the requested label instance.
293 *
294 * If zebra is not connected, callbacks with labels will be delayed
295 * until connection is established. If zebra connection is lost after
296 * labels have been assigned, existing assignments via this labelpool
297 * module will continue until reconnection.
298 *
299 * When connection to zebra is reestablished, previous label assignments
300 * will be invalidated (via callbacks having the "allocated" parameter unset)
301 * and new labels will be automatically reassigned by this labelpool module
302 * (that is, a requestor does not need to call lp_get() again if it is
303 * notified via callback that its label has been lost: it will eventually
304 * get another callback with a new label assignment).
305 *
306 * Prior requests for a given labelid are detected so that requests and
307 * assignments are not duplicated.
308 */
309 void bgp_lp_get(
310 int type,
311 void *labelid,
312 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
313 {
314 struct lp_lcb *lcb;
315 int requested = 0;
316 int debug = BGP_DEBUG(labelpool, LABELPOOL);
317
318 if (debug)
319 zlog_debug("%s: labelid=%p", __func__, labelid);
320
321 /*
322 * Have we seen this request before?
323 */
324 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
325 requested = 1;
326 } else {
327 lcb = lcb_alloc(type, labelid, cbfunc);
328 if (debug)
329 zlog_debug("%s: inserting lcb=%p label=%u",
330 __func__, lcb, lcb->label);
331 int rc = skiplist_insert(lp->ledger, labelid, lcb);
332
333 if (rc) {
334 /* shouldn't happen */
335 flog_err(EC_BGP_LABEL,
336 "%s: can't insert new LCB into ledger list",
337 __func__);
338 XFREE(MTYPE_BGP_LABEL_CB, lcb);
339 return;
340 }
341 }
342
343 if (lcb->label != MPLS_LABEL_NONE) {
344 /*
345 * Fast path: we filled the request from local pool (or
346 * this is a duplicate request that we filled already).
347 * Enqueue response work item with new label.
348 */
349 struct lp_cbq_item *q;
350
351 q = XCALLOC(MTYPE_BGP_LABEL_CBQ, sizeof(struct lp_cbq_item));
352
353 q->cbfunc = lcb->cbfunc;
354 q->type = lcb->type;
355 q->label = lcb->label;
356 q->labelid = lcb->labelid;
357 q->allocated = true;
358
359 /* if this is a LU request, lock path info before queueing */
360 check_bgp_lu_cb_lock(lcb);
361
362 work_queue_add(lp->callback_q, q);
363
364 return;
365 }
366
367 if (requested)
368 return;
369
370 if (debug)
371 zlog_debug("%s: slow path. lcb=%p label=%u",
372 __func__, lcb, lcb->label);
373
374 /*
375 * Slow path: we are out of labels in the local pool,
376 * so remember the request and also get another chunk from
377 * the label manager.
378 *
379 * We track number of outstanding label requests: don't
380 * need to get a chunk for each one.
381 */
382
383 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
384 sizeof(struct lp_fifo));
385
386 lf->lcb = *lcb;
387 /* if this is a LU request, lock path info before queueing */
388 check_bgp_lu_cb_lock(lcb);
389
390 lp_fifo_add_tail(&lp->requests, lf);
391
392 if (lp_fifo_count(&lp->requests) > lp->pending_count) {
393 if (!zclient || zclient->sock < 0)
394 return;
395 if (zclient_send_get_label_chunk(zclient, 0, LP_CHUNK_SIZE,
396 MPLS_LABEL_BASE_ANY)
397 == ZCLIENT_SEND_FAILURE)
398 lp->pending_count += LP_CHUNK_SIZE;
399 }
400 }
401
402 void bgp_lp_release(
403 int type,
404 void *labelid,
405 mpls_label_t label)
406 {
407 struct lp_lcb *lcb;
408
409 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
410 if (label == lcb->label && type == lcb->type) {
411 uintptr_t lbl = label;
412
413 /* no longer in use */
414 skiplist_delete(lp->inuse, (void *)lbl, NULL);
415
416 /* no longer requested */
417 skiplist_delete(lp->ledger, labelid, NULL);
418 }
419 }
420 }
421
422 /*
423 * zebra response giving us a chunk of labels
424 */
425 void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
426 {
427 struct lp_chunk *chunk;
428 int debug = BGP_DEBUG(labelpool, LABELPOOL);
429 struct lp_fifo *lf;
430
431 if (last < first) {
432 flog_err(EC_BGP_LABEL,
433 "%s: zebra label chunk invalid: first=%u, last=%u",
434 __func__, first, last);
435 return;
436 }
437
438 chunk = XCALLOC(MTYPE_BGP_LABEL_CHUNK, sizeof(struct lp_chunk));
439
440 chunk->first = first;
441 chunk->last = last;
442
443 listnode_add(lp->chunks, chunk);
444
445 lp->pending_count -= (last - first + 1);
446
447 if (debug) {
448 zlog_debug("%s: %zu pending requests", __func__,
449 lp_fifo_count(&lp->requests));
450 }
451
452 while ((lf = lp_fifo_first(&lp->requests))) {
453
454 struct lp_lcb *lcb;
455 void *labelid = lf->lcb.labelid;
456
457 if (skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
458 /* request no longer in effect */
459
460 if (debug) {
461 zlog_debug("%s: labelid %p: request no longer in effect",
462 __func__, labelid);
463 }
464 goto finishedrequest;
465 }
466
467 /* have LCB */
468 if (lcb->label != MPLS_LABEL_NONE) {
469 /* request already has a label */
470 if (debug) {
471 zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
472 __func__, labelid,
473 lcb->label, lcb->label, lcb);
474 }
475 /* if this was a BGP_LU request, unlock path info node
476 */
477 check_bgp_lu_cb_unlock(lcb);
478
479 goto finishedrequest;
480 }
481
482 lcb->label = get_label_from_pool(lcb->labelid);
483
484 if (lcb->label == MPLS_LABEL_NONE) {
485 /*
486 * Out of labels in local pool, await next chunk
487 */
488 if (debug) {
489 zlog_debug("%s: out of labels, await more",
490 __func__);
491 }
492 break;
493 }
494
495 /*
496 * we filled the request from local pool.
497 * Enqueue response work item with new label.
498 */
499 struct lp_cbq_item *q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
500 sizeof(struct lp_cbq_item));
501
502 q->cbfunc = lcb->cbfunc;
503 q->type = lcb->type;
504 q->label = lcb->label;
505 q->labelid = lcb->labelid;
506 q->allocated = true;
507
508 if (debug)
509 zlog_debug("%s: assigning label %u to labelid %p",
510 __func__, q->label, q->labelid);
511
512 work_queue_add(lp->callback_q, q);
513
514 finishedrequest:
515 lp_fifo_del(&lp->requests, lf);
516 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
517 }
518 }
519
520 /*
521 * continue using allocated labels until zebra returns
522 */
523 void bgp_lp_event_zebra_down(void)
524 {
525 /* rats. */
526 }
527
528 /*
529 * Inform owners of previously-allocated labels that their labels
530 * are not valid. Request chunk from zebra large enough to satisfy
531 * previously-allocated labels plus any outstanding requests.
532 */
533 void bgp_lp_event_zebra_up(void)
534 {
535 int labels_needed;
536 int chunks_needed;
537 void *labelid;
538 struct lp_lcb *lcb;
539 int lm_init_ok;
540
541 /*
542 * Get label chunk allocation request dispatched to zebra
543 */
544 labels_needed = lp_fifo_count(&lp->requests) +
545 skiplist_count(lp->inuse);
546
547 /* round up */
548 chunks_needed = (labels_needed / LP_CHUNK_SIZE) + 1;
549 labels_needed = chunks_needed * LP_CHUNK_SIZE;
550
551 lm_init_ok = lm_label_manager_connect(zclient, 1) == 0;
552
553 if (!lm_init_ok) {
554 zlog_err("%s: label manager connection error", __func__);
555 return;
556 }
557
558 zclient_send_get_label_chunk(zclient, 0, labels_needed,
559 MPLS_LABEL_BASE_ANY);
560 lp->pending_count = labels_needed;
561
562 /*
563 * Invalidate current list of chunks
564 */
565 list_delete_all_node(lp->chunks);
566
567 /*
568 * Invalidate any existing labels and requeue them as requests
569 */
570 while (!skiplist_first(lp->inuse, NULL, &labelid)) {
571
572 /*
573 * Get LCB
574 */
575 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
576
577 if (lcb->label != MPLS_LABEL_NONE) {
578 /*
579 * invalidate
580 */
581 struct lp_cbq_item *q;
582
583 q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
584 sizeof(struct lp_cbq_item));
585 q->cbfunc = lcb->cbfunc;
586 q->type = lcb->type;
587 q->label = lcb->label;
588 q->labelid = lcb->labelid;
589 q->allocated = false;
590 check_bgp_lu_cb_lock(lcb);
591 work_queue_add(lp->callback_q, q);
592
593 lcb->label = MPLS_LABEL_NONE;
594 }
595
596 /*
597 * request queue
598 */
599 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
600 sizeof(struct lp_fifo));
601
602 lf->lcb = *lcb;
603 check_bgp_lu_cb_lock(lcb);
604 lp_fifo_add_tail(&lp->requests, lf);
605 }
606
607 skiplist_delete_first(lp->inuse);
608 }
609 }