]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_labelpool.c
yang: change EIGRP authentication enum name
[mirror_frr.git] / bgpd / bgp_labelpool.c
1 /*
2 * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
3 *
4 * Copyright (C) 2018 LabN Consulting, L.L.C.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include <zebra.h>
22
23 #include "log.h"
24 #include "memory.h"
25 #include "stream.h"
26 #include "mpls.h"
27 #include "vty.h"
28 #include "linklist.h"
29 #include "skiplist.h"
30 #include "workqueue.h"
31 #include "zclient.h"
32 #include "mpls.h"
33
34 #include "bgpd/bgpd.h"
35 #include "bgpd/bgp_labelpool.h"
36 #include "bgpd/bgp_debug.h"
37 #include "bgpd/bgp_errors.h"
38 #include "bgpd/bgp_route.h"
39
40 /*
41 * Definitions and external declarations.
42 */
43 extern struct zclient *zclient;
44
45 /*
46 * Remember where pool data are kept
47 */
48 static struct labelpool *lp;
49
50 /* request this many labels at a time from zebra */
51 #define LP_CHUNK_SIZE 50
52
53 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk")
54 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO item")
55 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CB, "BGP Dynamic Label Assignment")
56 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CBQ, "BGP Dynamic Label Callback")
57
58 struct lp_chunk {
59 uint32_t first;
60 uint32_t last;
61 };
62
63 /*
64 * label control block
65 */
66 struct lp_lcb {
67 mpls_label_t label; /* MPLS_LABEL_NONE = not allocated */
68 int type;
69 void *labelid; /* unique ID */
70 /*
71 * callback for label allocation and loss
72 *
73 * allocated: false = lost
74 */
75 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
76 };
77
78 struct lp_fifo {
79 struct lp_fifo_item fifo;
80 struct lp_lcb lcb;
81 };
82
83 DECLARE_LIST(lp_fifo, struct lp_fifo, fifo)
84
85 struct lp_cbq_item {
86 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
87 int type;
88 mpls_label_t label;
89 void *labelid;
90 bool allocated; /* false = lost */
91 };
92
93 static wq_item_status lp_cbq_docallback(struct work_queue *wq, void *data)
94 {
95 struct lp_cbq_item *lcbq = data;
96 int rc;
97 int debug = BGP_DEBUG(labelpool, LABELPOOL);
98
99 if (debug)
100 zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
101 __func__, lcbq->labelid, lcbq->label, lcbq->allocated);
102
103 if (lcbq->label == MPLS_LABEL_NONE) {
104 /* shouldn't happen */
105 flog_err(EC_BGP_LABEL, "%s: error: label==MPLS_LABEL_NONE",
106 __func__);
107 return WQ_SUCCESS;
108 }
109
110 rc = (*(lcbq->cbfunc))(lcbq->label, lcbq->labelid, lcbq->allocated);
111
112 if (lcbq->allocated && rc) {
113 /*
114 * Callback rejected allocation. This situation could arise
115 * if there was a label request followed by the requestor
116 * deciding it didn't need the assignment (e.g., config
117 * change) while the reply to the original request (with
118 * label) was in the work queue.
119 */
120 if (debug)
121 zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
122 __func__, lcbq->labelid, lcbq->label);
123
124 uintptr_t lbl = lcbq->label;
125 void *labelid;
126 struct lp_lcb *lcb;
127
128 /*
129 * If the rejected label was marked inuse by this labelid,
130 * release the label back to the pool.
131 *
132 * Further, if the rejected label was still assigned to
133 * this labelid in the LCB, delete the LCB.
134 */
135 if (!skiplist_search(lp->inuse, (void *)lbl, &labelid)) {
136 if (labelid == lcbq->labelid) {
137 if (!skiplist_search(lp->ledger, labelid,
138 (void **)&lcb)) {
139 if (lcbq->label == lcb->label)
140 skiplist_delete(lp->ledger,
141 labelid, NULL);
142 }
143 skiplist_delete(lp->inuse, (void *)lbl, NULL);
144 }
145 }
146 }
147
148 return WQ_SUCCESS;
149 }
150
151 static void lp_cbq_item_free(struct work_queue *wq, void *data)
152 {
153 XFREE(MTYPE_BGP_LABEL_CBQ, data);
154 }
155
156 static void lp_lcb_free(void *goner)
157 {
158 XFREE(MTYPE_BGP_LABEL_CB, goner);
159 }
160
161 static void lp_chunk_free(void *goner)
162 {
163 XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
164 }
165
166 void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
167 {
168 if (BGP_DEBUG(labelpool, LABELPOOL))
169 zlog_debug("%s: entry", __func__);
170
171 lp = pool; /* Set module pointer to pool data */
172
173 lp->ledger = skiplist_new(0, NULL, lp_lcb_free);
174 lp->inuse = skiplist_new(0, NULL, NULL);
175 lp->chunks = list_new();
176 lp->chunks->del = lp_chunk_free;
177 lp_fifo_init(&lp->requests);
178 lp->callback_q = work_queue_new(master, "label callbacks");
179
180 lp->callback_q->spec.workfunc = lp_cbq_docallback;
181 lp->callback_q->spec.del_item_data = lp_cbq_item_free;
182 lp->callback_q->spec.max_retries = 0;
183 }
184
185 /* check if a label callback was for a BGP LU path, and if so, unlock it */
186 static void check_bgp_lu_cb_unlock(struct lp_lcb *lcb)
187 {
188 if (lcb->type == LP_TYPE_BGP_LU)
189 bgp_path_info_unlock(lcb->labelid);
190 }
191
192 /* check if a label callback was for a BGP LU path, and if so, lock it */
193 static void check_bgp_lu_cb_lock(struct lp_lcb *lcb)
194 {
195 if (lcb->type == LP_TYPE_BGP_LU)
196 bgp_path_info_lock(lcb->labelid);
197 }
198
199 void bgp_lp_finish(void)
200 {
201 struct lp_fifo *lf;
202 struct work_queue_item *item, *titem;
203
204 if (!lp)
205 return;
206
207 skiplist_free(lp->ledger);
208 lp->ledger = NULL;
209
210 skiplist_free(lp->inuse);
211 lp->inuse = NULL;
212
213 list_delete(&lp->chunks);
214
215 while ((lf = lp_fifo_pop(&lp->requests))) {
216 check_bgp_lu_cb_unlock(&lf->lcb);
217 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
218 }
219 lp_fifo_fini(&lp->requests);
220
221 /* we must unlock path infos for LU callbacks; but we cannot do that
222 * in the deletion callback of the workqueue, as that is also called
223 * to remove an element from the queue after it has been run, resulting
224 * in a double unlock. Hence we need to iterate over our queues and
225 * lists and manually perform the unlocking (ugh)
226 */
227 STAILQ_FOREACH_SAFE (item, &lp->callback_q->items, wq, titem)
228 check_bgp_lu_cb_unlock(item->data);
229
230 work_queue_free_and_null(&lp->callback_q);
231
232 lp = NULL;
233 }
234
235 static mpls_label_t get_label_from_pool(void *labelid)
236 {
237 struct listnode *node;
238 struct lp_chunk *chunk;
239 int debug = BGP_DEBUG(labelpool, LABELPOOL);
240
241 /*
242 * Find a free label
243 * Linear search is not efficient but should be executed infrequently.
244 */
245 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
246 uintptr_t lbl;
247
248 if (debug)
249 zlog_debug("%s: chunk first=%u last=%u",
250 __func__, chunk->first, chunk->last);
251
252 for (lbl = chunk->first; lbl <= chunk->last; ++lbl) {
253 /* labelid is key to all-request "ledger" list */
254 if (!skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
255 /*
256 * Success
257 */
258 return lbl;
259 }
260 }
261 }
262 return MPLS_LABEL_NONE;
263 }
264
265 /*
266 * Success indicated by value of "label" field in returned LCB
267 */
268 static struct lp_lcb *lcb_alloc(
269 int type,
270 void *labelid,
271 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
272 {
273 /*
274 * Set up label control block
275 */
276 struct lp_lcb *new = XCALLOC(MTYPE_BGP_LABEL_CB,
277 sizeof(struct lp_lcb));
278
279 new->label = get_label_from_pool(labelid);
280 new->type = type;
281 new->labelid = labelid;
282 new->cbfunc = cbfunc;
283
284 return new;
285 }
286
287 /*
288 * Callers who need labels must supply a type, labelid, and callback.
289 * The type is a value defined in bgp_labelpool.h (add types as needed).
290 * The callback is for asynchronous notification of label allocation.
291 * The labelid is passed as an argument to the callback. It should be unique
292 * to the requested label instance.
293 *
294 * If zebra is not connected, callbacks with labels will be delayed
295 * until connection is established. If zebra connection is lost after
296 * labels have been assigned, existing assignments via this labelpool
297 * module will continue until reconnection.
298 *
299 * When connection to zebra is reestablished, previous label assignments
300 * will be invalidated (via callbacks having the "allocated" parameter unset)
301 * and new labels will be automatically reassigned by this labelpool module
302 * (that is, a requestor does not need to call lp_get() again if it is
303 * notified via callback that its label has been lost: it will eventually
304 * get another callback with a new label assignment).
305 *
306 * Prior requests for a given labelid are detected so that requests and
307 * assignments are not duplicated.
308 */
309 void bgp_lp_get(
310 int type,
311 void *labelid,
312 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
313 {
314 struct lp_lcb *lcb;
315 int requested = 0;
316 int debug = BGP_DEBUG(labelpool, LABELPOOL);
317
318 if (debug)
319 zlog_debug("%s: labelid=%p", __func__, labelid);
320
321 /*
322 * Have we seen this request before?
323 */
324 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
325 requested = 1;
326 } else {
327 lcb = lcb_alloc(type, labelid, cbfunc);
328 if (debug)
329 zlog_debug("%s: inserting lcb=%p label=%u",
330 __func__, lcb, lcb->label);
331 int rc = skiplist_insert(lp->ledger, labelid, lcb);
332
333 if (rc) {
334 /* shouldn't happen */
335 flog_err(EC_BGP_LABEL,
336 "%s: can't insert new LCB into ledger list",
337 __func__);
338 XFREE(MTYPE_BGP_LABEL_CB, lcb);
339 return;
340 }
341 }
342
343 if (lcb->label != MPLS_LABEL_NONE) {
344 /*
345 * Fast path: we filled the request from local pool (or
346 * this is a duplicate request that we filled already).
347 * Enqueue response work item with new label.
348 */
349 struct lp_cbq_item *q;
350
351 q = XCALLOC(MTYPE_BGP_LABEL_CBQ, sizeof(struct lp_cbq_item));
352
353 q->cbfunc = lcb->cbfunc;
354 q->type = lcb->type;
355 q->label = lcb->label;
356 q->labelid = lcb->labelid;
357 q->allocated = true;
358
359 /* if this is a LU request, lock path info before queueing */
360 check_bgp_lu_cb_lock(lcb);
361
362 work_queue_add(lp->callback_q, q);
363
364 return;
365 }
366
367 if (requested)
368 return;
369
370 if (debug)
371 zlog_debug("%s: slow path. lcb=%p label=%u",
372 __func__, lcb, lcb->label);
373
374 /*
375 * Slow path: we are out of labels in the local pool,
376 * so remember the request and also get another chunk from
377 * the label manager.
378 *
379 * We track number of outstanding label requests: don't
380 * need to get a chunk for each one.
381 */
382
383 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
384 sizeof(struct lp_fifo));
385
386 lf->lcb = *lcb;
387 /* if this is a LU request, lock path info before queueing */
388 check_bgp_lu_cb_lock(lcb);
389
390 lp_fifo_add_tail(&lp->requests, lf);
391
392 if (lp_fifo_count(&lp->requests) > lp->pending_count) {
393 if (!zclient || zclient->sock < 0)
394 return;
395 if (!zclient_send_get_label_chunk(zclient, 0, LP_CHUNK_SIZE,
396 MPLS_LABEL_BASE_ANY))
397 lp->pending_count += LP_CHUNK_SIZE;
398 }
399 }
400
401 void bgp_lp_release(
402 int type,
403 void *labelid,
404 mpls_label_t label)
405 {
406 struct lp_lcb *lcb;
407
408 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
409 if (label == lcb->label && type == lcb->type) {
410 uintptr_t lbl = label;
411
412 /* no longer in use */
413 skiplist_delete(lp->inuse, (void *)lbl, NULL);
414
415 /* no longer requested */
416 skiplist_delete(lp->ledger, labelid, NULL);
417 }
418 }
419 }
420
421 /*
422 * zebra response giving us a chunk of labels
423 */
424 void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
425 {
426 struct lp_chunk *chunk;
427 int debug = BGP_DEBUG(labelpool, LABELPOOL);
428 struct lp_fifo *lf;
429
430 if (last < first) {
431 flog_err(EC_BGP_LABEL,
432 "%s: zebra label chunk invalid: first=%u, last=%u",
433 __func__, first, last);
434 return;
435 }
436
437 chunk = XCALLOC(MTYPE_BGP_LABEL_CHUNK, sizeof(struct lp_chunk));
438
439 chunk->first = first;
440 chunk->last = last;
441
442 listnode_add(lp->chunks, chunk);
443
444 lp->pending_count -= (last - first + 1);
445
446 if (debug) {
447 zlog_debug("%s: %zu pending requests", __func__,
448 lp_fifo_count(&lp->requests));
449 }
450
451 while ((lf = lp_fifo_first(&lp->requests))) {
452
453 struct lp_lcb *lcb;
454 void *labelid = lf->lcb.labelid;
455
456 if (skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
457 /* request no longer in effect */
458
459 if (debug) {
460 zlog_debug("%s: labelid %p: request no longer in effect",
461 __func__, labelid);
462 }
463 goto finishedrequest;
464 }
465
466 /* have LCB */
467 if (lcb->label != MPLS_LABEL_NONE) {
468 /* request already has a label */
469 if (debug) {
470 zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
471 __func__, labelid,
472 lcb->label, lcb->label, lcb);
473 }
474 /* if this was a BGP_LU request, unlock path info node
475 */
476 check_bgp_lu_cb_unlock(lcb);
477
478 goto finishedrequest;
479 }
480
481 lcb->label = get_label_from_pool(lcb->labelid);
482
483 if (lcb->label == MPLS_LABEL_NONE) {
484 /*
485 * Out of labels in local pool, await next chunk
486 */
487 if (debug) {
488 zlog_debug("%s: out of labels, await more",
489 __func__);
490 }
491 break;
492 }
493
494 /*
495 * we filled the request from local pool.
496 * Enqueue response work item with new label.
497 */
498 struct lp_cbq_item *q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
499 sizeof(struct lp_cbq_item));
500
501 q->cbfunc = lcb->cbfunc;
502 q->type = lcb->type;
503 q->label = lcb->label;
504 q->labelid = lcb->labelid;
505 q->allocated = true;
506
507 if (debug)
508 zlog_debug("%s: assigning label %u to labelid %p",
509 __func__, q->label, q->labelid);
510
511 work_queue_add(lp->callback_q, q);
512
513 finishedrequest:
514 lp_fifo_del(&lp->requests, lf);
515 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
516 }
517 }
518
519 /*
520 * continue using allocated labels until zebra returns
521 */
522 void bgp_lp_event_zebra_down(void)
523 {
524 /* rats. */
525 }
526
527 /*
528 * Inform owners of previously-allocated labels that their labels
529 * are not valid. Request chunk from zebra large enough to satisfy
530 * previously-allocated labels plus any outstanding requests.
531 */
532 void bgp_lp_event_zebra_up(void)
533 {
534 int labels_needed;
535 int chunks_needed;
536 void *labelid;
537 struct lp_lcb *lcb;
538 int lm_init_ok;
539
540 /*
541 * Get label chunk allocation request dispatched to zebra
542 */
543 labels_needed = lp_fifo_count(&lp->requests) +
544 skiplist_count(lp->inuse);
545
546 /* round up */
547 chunks_needed = (labels_needed / LP_CHUNK_SIZE) + 1;
548 labels_needed = chunks_needed * LP_CHUNK_SIZE;
549
550 lm_init_ok = lm_label_manager_connect(zclient, 1) == 0;
551
552 if (!lm_init_ok) {
553 zlog_err("%s: label manager connection error", __func__);
554 return;
555 }
556
557 zclient_send_get_label_chunk(zclient, 0, labels_needed,
558 MPLS_LABEL_BASE_ANY);
559 lp->pending_count = labels_needed;
560
561 /*
562 * Invalidate current list of chunks
563 */
564 list_delete_all_node(lp->chunks);
565
566 /*
567 * Invalidate any existing labels and requeue them as requests
568 */
569 while (!skiplist_first(lp->inuse, NULL, &labelid)) {
570
571 /*
572 * Get LCB
573 */
574 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
575
576 if (lcb->label != MPLS_LABEL_NONE) {
577 /*
578 * invalidate
579 */
580 struct lp_cbq_item *q;
581
582 q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
583 sizeof(struct lp_cbq_item));
584 q->cbfunc = lcb->cbfunc;
585 q->type = lcb->type;
586 q->label = lcb->label;
587 q->labelid = lcb->labelid;
588 q->allocated = false;
589 check_bgp_lu_cb_lock(lcb);
590 work_queue_add(lp->callback_q, q);
591
592 lcb->label = MPLS_LABEL_NONE;
593 }
594
595 /*
596 * request queue
597 */
598 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
599 sizeof(struct lp_fifo));
600
601 lf->lcb = *lcb;
602 check_bgp_lu_cb_lock(lcb);
603 lp_fifo_add_tail(&lp->requests, lf);
604 }
605
606 skiplist_delete_first(lp->inuse);
607 }
608 }