]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_labelpool.c
Merge pull request #4654 from manuhalo/fix_bgp_lbp_warn
[mirror_frr.git] / bgpd / bgp_labelpool.c
1 /*
2 * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
3 *
4 * Copyright (C) 2018 LabN Consulting, L.L.C.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include <zebra.h>
22
23 #include "log.h"
24 #include "memory.h"
25 #include "stream.h"
26 #include "mpls.h"
27 #include "vty.h"
28 #include "linklist.h"
29 #include "skiplist.h"
30 #include "workqueue.h"
31 #include "zclient.h"
32
33 #include "bgpd/bgpd.h"
34 #include "bgpd/bgp_labelpool.h"
35 #include "bgpd/bgp_debug.h"
36 #include "bgpd/bgp_errors.h"
37 #include "bgpd/bgp_route.h"
38
39 /*
40 * Definitions and external declarations.
41 */
42 extern struct zclient *zclient;
43
44 /*
45 * Remember where pool data are kept
46 */
47 static struct labelpool *lp;
48
49 /* request this many labels at a time from zebra */
50 #define LP_CHUNK_SIZE 50
51
52 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk")
53 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO item")
54 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CB, "BGP Dynamic Label Assignment")
55 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CBQ, "BGP Dynamic Label Callback")
56
57 struct lp_chunk {
58 uint32_t first;
59 uint32_t last;
60 };
61
62 /*
63 * label control block
64 */
65 struct lp_lcb {
66 mpls_label_t label; /* MPLS_LABEL_NONE = not allocated */
67 int type;
68 void *labelid; /* unique ID */
69 /*
70 * callback for label allocation and loss
71 *
72 * allocated: false = lost
73 */
74 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
75 };
76
77 struct lp_fifo {
78 struct lp_fifo_item fifo;
79 struct lp_lcb lcb;
80 };
81
82 DECLARE_LIST(lp_fifo, struct lp_fifo, fifo)
83
84 struct lp_cbq_item {
85 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
86 int type;
87 mpls_label_t label;
88 void *labelid;
89 bool allocated; /* false = lost */
90 };
91
92 static wq_item_status lp_cbq_docallback(struct work_queue *wq, void *data)
93 {
94 struct lp_cbq_item *lcbq = data;
95 int rc;
96 int debug = BGP_DEBUG(labelpool, LABELPOOL);
97
98 if (debug)
99 zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
100 __func__, lcbq->labelid, lcbq->label, lcbq->allocated);
101
102 if (lcbq->label == MPLS_LABEL_NONE) {
103 /* shouldn't happen */
104 flog_err(EC_BGP_LABEL, "%s: error: label==MPLS_LABEL_NONE",
105 __func__);
106 return WQ_SUCCESS;
107 }
108
109 rc = (*(lcbq->cbfunc))(lcbq->label, lcbq->labelid, lcbq->allocated);
110
111 if (lcbq->allocated && rc) {
112 /*
113 * Callback rejected allocation. This situation could arise
114 * if there was a label request followed by the requestor
115 * deciding it didn't need the assignment (e.g., config
116 * change) while the reply to the original request (with
117 * label) was in the work queue.
118 */
119 if (debug)
120 zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
121 __func__, lcbq->labelid, lcbq->label);
122
123 uintptr_t lbl = lcbq->label;
124 void *labelid;
125 struct lp_lcb *lcb;
126
127 /*
128 * If the rejected label was marked inuse by this labelid,
129 * release the label back to the pool.
130 *
131 * Further, if the rejected label was still assigned to
132 * this labelid in the LCB, delete the LCB.
133 */
134 if (!skiplist_search(lp->inuse, (void *)lbl, &labelid)) {
135 if (labelid == lcbq->labelid) {
136 if (!skiplist_search(lp->ledger, labelid,
137 (void **)&lcb)) {
138 if (lcbq->label == lcb->label)
139 skiplist_delete(lp->ledger,
140 labelid, NULL);
141 }
142 skiplist_delete(lp->inuse, (void *)lbl, NULL);
143 }
144 }
145 }
146
147 return WQ_SUCCESS;
148 }
149
150 static void lp_cbq_item_free(struct work_queue *wq, void *data)
151 {
152 XFREE(MTYPE_BGP_LABEL_CBQ, data);
153 }
154
155 static void lp_lcb_free(void *goner)
156 {
157 XFREE(MTYPE_BGP_LABEL_CB, goner);
158 }
159
160 static void lp_chunk_free(void *goner)
161 {
162 XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
163 }
164
165 void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
166 {
167 if (BGP_DEBUG(labelpool, LABELPOOL))
168 zlog_debug("%s: entry", __func__);
169
170 lp = pool; /* Set module pointer to pool data */
171
172 lp->ledger = skiplist_new(0, NULL, lp_lcb_free);
173 lp->inuse = skiplist_new(0, NULL, NULL);
174 lp->chunks = list_new();
175 lp->chunks->del = lp_chunk_free;
176 lp_fifo_init(&lp->requests);
177 lp->callback_q = work_queue_new(master, "label callbacks");
178
179 lp->callback_q->spec.workfunc = lp_cbq_docallback;
180 lp->callback_q->spec.del_item_data = lp_cbq_item_free;
181 lp->callback_q->spec.max_retries = 0;
182 }
183
184 /* check if a label callback was for a BGP LU path, and if so, unlock it */
185 static void check_bgp_lu_cb_unlock(struct lp_lcb *lcb)
186 {
187 if (lcb->type == LP_TYPE_BGP_LU)
188 bgp_path_info_unlock(lcb->labelid);
189 }
190
191 /* check if a label callback was for a BGP LU path, and if so, lock it */
192 static void check_bgp_lu_cb_lock(struct lp_lcb *lcb)
193 {
194 if (lcb->type == LP_TYPE_BGP_LU)
195 bgp_path_info_lock(lcb->labelid);
196 }
197
198 void bgp_lp_finish(void)
199 {
200 struct lp_fifo *lf;
201 struct work_queue_item *item, *titem;
202
203 if (!lp)
204 return;
205
206 skiplist_free(lp->ledger);
207 lp->ledger = NULL;
208
209 skiplist_free(lp->inuse);
210 lp->inuse = NULL;
211
212 list_delete(&lp->chunks);
213
214 while ((lf = lp_fifo_pop(&lp->requests))) {
215 check_bgp_lu_cb_unlock(&lf->lcb);
216 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
217 }
218 lp_fifo_fini(&lp->requests);
219
220 /* we must unlock path infos for LU callbacks; but we cannot do that
221 * in the deletion callback of the workqueue, as that is also called
222 * to remove an element from the queue after it has been run, resulting
223 * in a double unlock. Hence we need to iterate over our queues and
224 * lists and manually perform the unlocking (ugh)
225 */
226 STAILQ_FOREACH_SAFE (item, &lp->callback_q->items, wq, titem)
227 check_bgp_lu_cb_unlock(item->data);
228
229 work_queue_free_and_null(&lp->callback_q);
230
231 lp = NULL;
232 }
233
234 static mpls_label_t get_label_from_pool(void *labelid)
235 {
236 struct listnode *node;
237 struct lp_chunk *chunk;
238 int debug = BGP_DEBUG(labelpool, LABELPOOL);
239
240 /*
241 * Find a free label
242 * Linear search is not efficient but should be executed infrequently.
243 */
244 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
245 uintptr_t lbl;
246
247 if (debug)
248 zlog_debug("%s: chunk first=%u last=%u",
249 __func__, chunk->first, chunk->last);
250
251 for (lbl = chunk->first; lbl <= chunk->last; ++lbl) {
252 /* labelid is key to all-request "ledger" list */
253 if (!skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
254 /*
255 * Success
256 */
257 return lbl;
258 }
259 }
260 }
261 return MPLS_LABEL_NONE;
262 }
263
264 /*
265 * Success indicated by value of "label" field in returned LCB
266 */
267 static struct lp_lcb *lcb_alloc(
268 int type,
269 void *labelid,
270 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
271 {
272 /*
273 * Set up label control block
274 */
275 struct lp_lcb *new = XCALLOC(MTYPE_BGP_LABEL_CB,
276 sizeof(struct lp_lcb));
277
278 new->label = get_label_from_pool(labelid);
279 new->type = type;
280 new->labelid = labelid;
281 new->cbfunc = cbfunc;
282
283 return new;
284 }
285
286 /*
287 * Callers who need labels must supply a type, labelid, and callback.
288 * The type is a value defined in bgp_labelpool.h (add types as needed).
289 * The callback is for asynchronous notification of label allocation.
290 * The labelid is passed as an argument to the callback. It should be unique
291 * to the requested label instance.
292 *
293 * If zebra is not connected, callbacks with labels will be delayed
294 * until connection is established. If zebra connection is lost after
295 * labels have been assigned, existing assignments via this labelpool
296 * module will continue until reconnection.
297 *
298 * When connection to zebra is reestablished, previous label assignments
299 * will be invalidated (via callbacks having the "allocated" parameter unset)
300 * and new labels will be automatically reassigned by this labelpool module
301 * (that is, a requestor does not need to call lp_get() again if it is
302 * notified via callback that its label has been lost: it will eventually
303 * get another callback with a new label assignment).
304 *
305 * Prior requests for a given labelid are detected so that requests and
306 * assignments are not duplicated.
307 */
308 void bgp_lp_get(
309 int type,
310 void *labelid,
311 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
312 {
313 struct lp_lcb *lcb;
314 int requested = 0;
315 int debug = BGP_DEBUG(labelpool, LABELPOOL);
316
317 if (debug)
318 zlog_debug("%s: labelid=%p", __func__, labelid);
319
320 /*
321 * Have we seen this request before?
322 */
323 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
324 requested = 1;
325 } else {
326 lcb = lcb_alloc(type, labelid, cbfunc);
327 if (debug)
328 zlog_debug("%s: inserting lcb=%p label=%u",
329 __func__, lcb, lcb->label);
330 int rc = skiplist_insert(lp->ledger, labelid, lcb);
331
332 if (rc) {
333 /* shouldn't happen */
334 flog_err(EC_BGP_LABEL,
335 "%s: can't insert new LCB into ledger list",
336 __func__);
337 XFREE(MTYPE_BGP_LABEL_CB, lcb);
338 return;
339 }
340 }
341
342 if (lcb->label != MPLS_LABEL_NONE) {
343 /*
344 * Fast path: we filled the request from local pool (or
345 * this is a duplicate request that we filled already).
346 * Enqueue response work item with new label.
347 */
348 struct lp_cbq_item *q;
349
350 q = XCALLOC(MTYPE_BGP_LABEL_CBQ, sizeof(struct lp_cbq_item));
351
352 q->cbfunc = lcb->cbfunc;
353 q->type = lcb->type;
354 q->label = lcb->label;
355 q->labelid = lcb->labelid;
356 q->allocated = true;
357
358 /* if this is a LU request, lock path info before queueing */
359 check_bgp_lu_cb_lock(lcb);
360
361 work_queue_add(lp->callback_q, q);
362
363 return;
364 }
365
366 if (requested)
367 return;
368
369 if (debug)
370 zlog_debug("%s: slow path. lcb=%p label=%u",
371 __func__, lcb, lcb->label);
372
373 /*
374 * Slow path: we are out of labels in the local pool,
375 * so remember the request and also get another chunk from
376 * the label manager.
377 *
378 * We track number of outstanding label requests: don't
379 * need to get a chunk for each one.
380 */
381
382 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
383 sizeof(struct lp_fifo));
384
385 lf->lcb = *lcb;
386 /* if this is a LU request, lock path info before queueing */
387 check_bgp_lu_cb_lock(lcb);
388
389 lp_fifo_add_tail(&lp->requests, lf);
390
391 if (lp_fifo_count(&lp->requests) > lp->pending_count) {
392 if (!zclient || zclient->sock < 0)
393 return;
394 if (!zclient_send_get_label_chunk(zclient, 0, LP_CHUNK_SIZE))
395 lp->pending_count += LP_CHUNK_SIZE;
396 }
397 }
398
399 void bgp_lp_release(
400 int type,
401 void *labelid,
402 mpls_label_t label)
403 {
404 struct lp_lcb *lcb;
405
406 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
407 if (label == lcb->label && type == lcb->type) {
408 uintptr_t lbl = label;
409
410 /* no longer in use */
411 skiplist_delete(lp->inuse, (void *)lbl, NULL);
412
413 /* no longer requested */
414 skiplist_delete(lp->ledger, labelid, NULL);
415 }
416 }
417 }
418
419 /*
420 * zebra response giving us a chunk of labels
421 */
422 void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
423 {
424 struct lp_chunk *chunk;
425 int debug = BGP_DEBUG(labelpool, LABELPOOL);
426 struct lp_fifo *lf;
427
428 if (last < first) {
429 flog_err(EC_BGP_LABEL,
430 "%s: zebra label chunk invalid: first=%u, last=%u",
431 __func__, first, last);
432 return;
433 }
434
435 chunk = XCALLOC(MTYPE_BGP_LABEL_CHUNK, sizeof(struct lp_chunk));
436
437 chunk->first = first;
438 chunk->last = last;
439
440 listnode_add(lp->chunks, chunk);
441
442 lp->pending_count -= (last - first + 1);
443
444 if (debug) {
445 zlog_debug("%s: %zu pending requests", __func__,
446 lp_fifo_count(&lp->requests));
447 }
448
449 while ((lf = lp_fifo_first(&lp->requests))) {
450
451 struct lp_lcb *lcb;
452 void *labelid = lf->lcb.labelid;
453
454 if (skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
455 /* request no longer in effect */
456
457 if (debug) {
458 zlog_debug("%s: labelid %p: request no longer in effect",
459 __func__, labelid);
460 }
461 goto finishedrequest;
462 }
463
464 /* have LCB */
465 if (lcb->label != MPLS_LABEL_NONE) {
466 /* request already has a label */
467 if (debug) {
468 zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
469 __func__, labelid,
470 lcb->label, lcb->label, lcb);
471 }
472 /* if this was a BGP_LU request, unlock path info node
473 */
474 check_bgp_lu_cb_unlock(lcb);
475
476 goto finishedrequest;
477 }
478
479 lcb->label = get_label_from_pool(lcb->labelid);
480
481 if (lcb->label == MPLS_LABEL_NONE) {
482 /*
483 * Out of labels in local pool, await next chunk
484 */
485 if (debug) {
486 zlog_debug("%s: out of labels, await more",
487 __func__);
488 }
489 break;
490 }
491
492 /*
493 * we filled the request from local pool.
494 * Enqueue response work item with new label.
495 */
496 struct lp_cbq_item *q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
497 sizeof(struct lp_cbq_item));
498
499 q->cbfunc = lcb->cbfunc;
500 q->type = lcb->type;
501 q->label = lcb->label;
502 q->labelid = lcb->labelid;
503 q->allocated = true;
504
505 if (debug)
506 zlog_debug("%s: assigning label %u to labelid %p",
507 __func__, q->label, q->labelid);
508
509 work_queue_add(lp->callback_q, q);
510
511 finishedrequest:
512 lp_fifo_del(&lp->requests, lf);
513 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
514 }
515 }
516
517 /*
518 * continue using allocated labels until zebra returns
519 */
520 void bgp_lp_event_zebra_down(void)
521 {
522 /* rats. */
523 }
524
525 /*
526 * Inform owners of previously-allocated labels that their labels
527 * are not valid. Request chunk from zebra large enough to satisfy
528 * previously-allocated labels plus any outstanding requests.
529 */
530 void bgp_lp_event_zebra_up(void)
531 {
532 int labels_needed;
533 int chunks_needed;
534 void *labelid;
535 struct lp_lcb *lcb;
536 int lm_init_ok;
537
538 /*
539 * Get label chunk allocation request dispatched to zebra
540 */
541 labels_needed = lp_fifo_count(&lp->requests) +
542 skiplist_count(lp->inuse);
543
544 /* round up */
545 chunks_needed = (labels_needed / LP_CHUNK_SIZE) + 1;
546 labels_needed = chunks_needed * LP_CHUNK_SIZE;
547
548 lm_init_ok = lm_label_manager_connect(zclient, 1) == 0;
549
550 if (!lm_init_ok) {
551 zlog_err("%s: label manager connection error", __func__);
552 return;
553 }
554
555 zclient_send_get_label_chunk(zclient, 0, labels_needed);
556 lp->pending_count = labels_needed;
557
558 /*
559 * Invalidate current list of chunks
560 */
561 list_delete_all_node(lp->chunks);
562
563 /*
564 * Invalidate any existing labels and requeue them as requests
565 */
566 while (!skiplist_first(lp->inuse, NULL, &labelid)) {
567
568 /*
569 * Get LCB
570 */
571 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
572
573 if (lcb->label != MPLS_LABEL_NONE) {
574 /*
575 * invalidate
576 */
577 struct lp_cbq_item *q;
578
579 q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
580 sizeof(struct lp_cbq_item));
581 q->cbfunc = lcb->cbfunc;
582 q->type = lcb->type;
583 q->label = lcb->label;
584 q->labelid = lcb->labelid;
585 q->allocated = false;
586 check_bgp_lu_cb_lock(lcb);
587 work_queue_add(lp->callback_q, q);
588
589 lcb->label = MPLS_LABEL_NONE;
590 }
591
592 /*
593 * request queue
594 */
595 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
596 sizeof(struct lp_fifo));
597
598 lf->lcb = *lcb;
599 check_bgp_lu_cb_lock(lcb);
600 lp_fifo_add_tail(&lp->requests, lf);
601 }
602
603 skiplist_delete_first(lp->inuse);
604 }
605 }