]> git.proxmox.com Git - mirror_frr.git/blame - bgpd/bgp_labelpool.c
zebra, lib: fix the ZEBRA_INTERFACE_VRF_UPDATE zapi message
[mirror_frr.git] / bgpd / bgp_labelpool.c
CommitLineData
955bfd98
PZ
1/*
2 * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
3 *
4 * Copyright (C) 2018 LabN Consulting, L.L.C.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#include <zebra.h>
22
23#include "log.h"
24#include "memory.h"
25#include "stream.h"
26#include "mpls.h"
27#include "vty.h"
28#include "fifo.h"
29#include "linklist.h"
30#include "skiplist.h"
31#include "workqueue.h"
32#include "zclient.h"
33
34#include "bgpd/bgpd.h"
35#include "bgpd/bgp_labelpool.h"
36#include "bgpd/bgp_debug.h"
14454c9f 37#include "bgpd/bgp_errors.h"
955bfd98
PZ
38
39/*
40 * Definitions and external declarations.
41 */
42extern struct zclient *zclient;
43
44/*
45 * Remember where pool data are kept
46 */
47static struct labelpool *lp;
48
49/* request this many labels at a time from zebra */
50#define LP_CHUNK_SIZE 50
51
52DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk")
53DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO")
54DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CB, "BGP Dynamic Label Assignment")
55DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CBQ, "BGP Dynamic Label Callback")
56
57#define LABEL_FIFO_ADD(F, N) \
58 do { \
59 FIFO_ADD((F), (N)); \
60 (F)->count++; \
61 } while (0)
62
63#define LABEL_FIFO_DEL(F, N) \
64 do { \
65 FIFO_DEL((N)); \
66 (F)->count--; \
67 } while (0)
68
69#define LABEL_FIFO_INIT(F) \
70 do { \
71 FIFO_INIT((F)); \
72 (F)->count = 0; \
73 } while (0)
74
75#define LABEL_FIFO_COUNT(F) ((F)->count)
76
77#define LABEL_FIFO_EMPTY(F) FIFO_EMPTY(F)
78
79#define LABEL_FIFO_HEAD(F) ((F)->next == (F) ? NULL : (F)->next)
80
81struct lp_chunk {
82 uint32_t first;
83 uint32_t last;
84};
85
86/*
87 * label control block
88 */
89struct lp_lcb {
90 mpls_label_t label; /* MPLS_LABEL_NONE = not allocated */
91 int type;
92 void *labelid; /* unique ID */
93 /*
94 * callback for label allocation and loss
95 *
96 * allocated: false = lost
97 */
98 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
99};
100
101/* XXX same first elements as "struct fifo" */
102struct lp_fifo {
103 struct lp_fifo *next;
104 struct lp_fifo *prev;
105
106 uint32_t count;
107 struct lp_lcb lcb;
108};
109
110struct lp_cbq_item {
111 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
112 int type;
113 mpls_label_t label;
114 void *labelid;
115 bool allocated; /* false = lost */
116};
117
118static wq_item_status lp_cbq_docallback(struct work_queue *wq, void *data)
119{
120 struct lp_cbq_item *lcbq = data;
121 int rc;
122 int debug = BGP_DEBUG(labelpool, LABELPOOL);
123
124 if (debug)
125 zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
126 __func__, lcbq->labelid, lcbq->label, lcbq->allocated);
127
128 if (lcbq->label == MPLS_LABEL_NONE) {
129 /* shouldn't happen */
e50f7cfd 130 flog_err(EC_BGP_LABEL, "%s: error: label==MPLS_LABEL_NONE",
1c50c1c0 131 __func__);
955bfd98
PZ
132 return WQ_SUCCESS;
133 }
134
135 rc = (*(lcbq->cbfunc))(lcbq->label, lcbq->labelid, lcbq->allocated);
136
137 if (lcbq->allocated && rc) {
138 /*
139 * Callback rejected allocation. This situation could arise
140 * if there was a label request followed by the requestor
141 * deciding it didn't need the assignment (e.g., config
142 * change) while the reply to the original request (with
143 * label) was in the work queue.
144 */
145 if (debug)
146 zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
147 __func__, lcbq->labelid, lcbq->label);
148
149 uintptr_t lbl = lcbq->label;
150 void *labelid;
151 struct lp_lcb *lcb;
152
153 /*
154 * If the rejected label was marked inuse by this labelid,
155 * release the label back to the pool.
156 *
157 * Further, if the rejected label was still assigned to
158 * this labelid in the LCB, delete the LCB.
159 */
160 if (!skiplist_search(lp->inuse, (void *)lbl, &labelid)) {
161 if (labelid == lcbq->labelid) {
162 if (!skiplist_search(lp->ledger, labelid,
163 (void **)&lcb)) {
164 if (lcbq->label == lcb->label)
165 skiplist_delete(lp->ledger,
166 labelid, NULL);
167 }
168 skiplist_delete(lp->inuse, (void *)lbl, NULL);
169 }
170 }
171 }
172
173 return WQ_SUCCESS;
174}
175
176static void lp_cbq_item_free(struct work_queue *wq, void *data)
177{
178 XFREE(MTYPE_BGP_LABEL_CBQ, data);
179}
180
181static void lp_lcb_free(void *goner)
182{
183 if (goner)
184 XFREE(MTYPE_BGP_LABEL_CB, goner);
185}
186
187static void lp_chunk_free(void *goner)
188{
189 if (goner)
190 XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
191}
192
193void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
194{
195 if (BGP_DEBUG(labelpool, LABELPOOL))
196 zlog_debug("%s: entry", __func__);
197
198 lp = pool; /* Set module pointer to pool data */
199
200 lp->ledger = skiplist_new(0, NULL, lp_lcb_free);
201 lp->inuse = skiplist_new(0, NULL, NULL);
202 lp->chunks = list_new();
203 lp->chunks->del = lp_chunk_free;
204 lp->requests = XCALLOC(MTYPE_BGP_LABEL_FIFO, sizeof(struct lp_fifo));
205 LABEL_FIFO_INIT(lp->requests);
206 lp->callback_q = work_queue_new(master, "label callbacks");
955bfd98
PZ
207
208 lp->callback_q->spec.workfunc = lp_cbq_docallback;
209 lp->callback_q->spec.del_item_data = lp_cbq_item_free;
210 lp->callback_q->spec.max_retries = 0;
211}
212
213void bgp_lp_finish(void)
214{
215 struct lp_fifo *lf;
216
217 if (!lp)
218 return;
219
220 skiplist_free(lp->ledger);
221 lp->ledger = NULL;
222
223 skiplist_free(lp->inuse);
224 lp->inuse = NULL;
225
6a154c88 226 list_delete(&lp->chunks);
955bfd98
PZ
227
228 while ((lf = LABEL_FIFO_HEAD(lp->requests))) {
229
230 LABEL_FIFO_DEL(lp->requests, lf);
231 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
232 }
233 XFREE(MTYPE_BGP_LABEL_FIFO, lp->requests);
234 lp->requests = NULL;
235
236 work_queue_free_and_null(&lp->callback_q);
237
238 lp = NULL;
239}
240
241static mpls_label_t get_label_from_pool(void *labelid)
242{
243 struct listnode *node;
244 struct lp_chunk *chunk;
245 int debug = BGP_DEBUG(labelpool, LABELPOOL);
246
247 /*
248 * Find a free label
249 * Linear search is not efficient but should be executed infrequently.
250 */
251 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
252 uintptr_t lbl;
253
254 if (debug)
255 zlog_debug("%s: chunk first=%u last=%u",
256 __func__, chunk->first, chunk->last);
257
258 for (lbl = chunk->first; lbl <= chunk->last; ++lbl) {
259 /* labelid is key to all-request "ledger" list */
260 if (!skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
261 /*
262 * Success
263 */
264 return lbl;
265 }
266 }
267 }
268 return MPLS_LABEL_NONE;
269}
270
271/*
272 * Success indicated by value of "label" field in returned LCB
273 */
274static struct lp_lcb *lcb_alloc(
275 int type,
276 void *labelid,
277 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
278{
279 /*
280 * Set up label control block
281 */
282 struct lp_lcb *new = XCALLOC(MTYPE_BGP_LABEL_CB,
283 sizeof(struct lp_lcb));
284
285 new->label = get_label_from_pool(labelid);
286 new->type = type;
287 new->labelid = labelid;
288 new->cbfunc = cbfunc;
289
290 return new;
291}
292
293/*
294 * Callers who need labels must supply a type, labelid, and callback.
295 * The type is a value defined in bgp_labelpool.h (add types as needed).
296 * The callback is for asynchronous notification of label allocation.
297 * The labelid is passed as an argument to the callback. It should be unique
298 * to the requested label instance.
299 *
300 * If zebra is not connected, callbacks with labels will be delayed
301 * until connection is established. If zebra connection is lost after
302 * labels have been assigned, existing assignments via this labelpool
303 * module will continue until reconnection.
304 *
305 * When connection to zebra is reestablished, previous label assignments
306 * will be invalidated (via callbacks having the "allocated" parameter unset)
307 * and new labels will be automatically reassigned by this labelpool module
308 * (that is, a requestor does not need to call lp_get() again if it is
309 * notified via callback that its label has been lost: it will eventually
310 * get another callback with a new label assignment).
311 *
312 * Prior requests for a given labelid are detected so that requests and
313 * assignments are not duplicated.
314 */
315void bgp_lp_get(
316 int type,
317 void *labelid,
318 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
319{
320 struct lp_lcb *lcb;
321 int requested = 0;
322 int debug = BGP_DEBUG(labelpool, LABELPOOL);
323
324 if (debug)
325 zlog_debug("%s: labelid=%p", __func__, labelid);
326
327 /*
328 * Have we seen this request before?
329 */
330 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
331 requested = 1;
332 } else {
333 lcb = lcb_alloc(type, labelid, cbfunc);
334 if (debug)
335 zlog_debug("%s: inserting lcb=%p label=%u",
336 __func__, lcb, lcb->label);
337 int rc = skiplist_insert(lp->ledger, labelid, lcb);
338
339 if (rc) {
340 /* shouldn't happen */
e50f7cfd 341 flog_err(EC_BGP_LABEL,
1c50c1c0
QY
342 "%s: can't insert new LCB into ledger list",
343 __func__);
955bfd98
PZ
344 XFREE(MTYPE_BGP_LABEL_CB, lcb);
345 return;
346 }
347 }
348
349 if (lcb->label != MPLS_LABEL_NONE) {
350 /*
351 * Fast path: we filled the request from local pool (or
352 * this is a duplicate request that we filled already).
353 * Enqueue response work item with new label.
354 */
355 struct lp_cbq_item *q;
356
357 q = XCALLOC(MTYPE_BGP_LABEL_CBQ, sizeof(struct lp_cbq_item));
358
359 q->cbfunc = lcb->cbfunc;
360 q->type = lcb->type;
361 q->label = lcb->label;
362 q->labelid = lcb->labelid;
363 q->allocated = true;
364
365 work_queue_add(lp->callback_q, q);
366
367 return;
368 }
369
370 if (requested)
371 return;
372
373 if (debug)
374 zlog_debug("%s: slow path. lcb=%p label=%u",
375 __func__, lcb, lcb->label);
376
377 /*
378 * Slow path: we are out of labels in the local pool,
379 * so remember the request and also get another chunk from
380 * the label manager.
381 *
382 * We track number of outstanding label requests: don't
383 * need to get a chunk for each one.
384 */
385
386 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
387 sizeof(struct lp_fifo));
388
389 lf->lcb = *lcb;
390 LABEL_FIFO_ADD(lp->requests, lf);
391
392 if (LABEL_FIFO_COUNT(lp->requests) > lp->pending_count) {
393 if (!zclient_send_get_label_chunk(zclient, 0, LP_CHUNK_SIZE)) {
394 lp->pending_count += LP_CHUNK_SIZE;
395 return;
396 }
397 }
398}
399
400void bgp_lp_release(
401 int type,
402 void *labelid,
403 mpls_label_t label)
404{
405 struct lp_lcb *lcb;
406
407 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
408 if (label == lcb->label && type == lcb->type) {
409 uintptr_t lbl = label;
410
411 /* no longer in use */
412 skiplist_delete(lp->inuse, (void *)lbl, NULL);
413
414 /* no longer requested */
415 skiplist_delete(lp->ledger, labelid, NULL);
416 }
417 }
418}
419
420/*
421 * zebra response giving us a chunk of labels
422 */
423void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
424{
425 struct lp_chunk *chunk;
426 int debug = BGP_DEBUG(labelpool, LABELPOOL);
427 struct lp_fifo *lf;
428
429 if (last < first) {
e50f7cfd 430 flog_err(EC_BGP_LABEL,
1c50c1c0
QY
431 "%s: zebra label chunk invalid: first=%u, last=%u",
432 __func__, first, last);
955bfd98
PZ
433 return;
434 }
435
436 chunk = XCALLOC(MTYPE_BGP_LABEL_CHUNK, sizeof(struct lp_chunk));
437
438 chunk->first = first;
439 chunk->last = last;
440
441 listnode_add(lp->chunks, chunk);
442
443 lp->pending_count -= (last - first + 1);
444
445 if (debug) {
446 zlog_debug("%s: %u pending requests", __func__,
447 LABEL_FIFO_COUNT(lp->requests));
448 }
449
450 while ((lf = LABEL_FIFO_HEAD(lp->requests))) {
451
452 struct lp_lcb *lcb;
453 void *labelid = lf->lcb.labelid;
454
455 if (skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
456 /* request no longer in effect */
457
458 if (debug) {
459 zlog_debug("%s: labelid %p: request no longer in effect",
460 __func__, labelid);
461 }
462 goto finishedrequest;
463 }
464
465 /* have LCB */
466 if (lcb->label != MPLS_LABEL_NONE) {
467 /* request already has a label */
468 if (debug) {
469 zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
470 __func__, labelid,
471 lcb->label, lcb->label, lcb);
472 }
473 goto finishedrequest;
474 }
475
476 lcb->label = get_label_from_pool(lcb->labelid);
477
478 if (lcb->label == MPLS_LABEL_NONE) {
479 /*
480 * Out of labels in local pool, await next chunk
481 */
482 if (debug) {
483 zlog_debug("%s: out of labels, await more",
484 __func__);
485 }
486 break;
487 }
488
489 /*
490 * we filled the request from local pool.
491 * Enqueue response work item with new label.
492 */
493 struct lp_cbq_item *q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
494 sizeof(struct lp_cbq_item));
495
496 q->cbfunc = lcb->cbfunc;
497 q->type = lcb->type;
498 q->label = lcb->label;
499 q->labelid = lcb->labelid;
500 q->allocated = true;
501
502 if (debug)
503 zlog_debug("%s: assigning label %u to labelid %p",
504 __func__, q->label, q->labelid);
505
506 work_queue_add(lp->callback_q, q);
507
508finishedrequest:
509 LABEL_FIFO_DEL(lp->requests, lf);
510 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
511 }
512}
513
514/*
515 * continue using allocated labels until zebra returns
516 */
517void bgp_lp_event_zebra_down(void)
518{
519 /* rats. */
520}
521
522/*
523 * Inform owners of previously-allocated labels that their labels
524 * are not valid. Request chunk from zebra large enough to satisfy
525 * previously-allocated labels plus any outstanding requests.
526 */
527void bgp_lp_event_zebra_up(void)
528{
529 int labels_needed;
530 int chunks_needed;
531 void *labelid;
532 struct lp_lcb *lcb;
f533be73 533 int lm_init_ok;
955bfd98
PZ
534
535 /*
536 * Get label chunk allocation request dispatched to zebra
537 */
538 labels_needed = LABEL_FIFO_COUNT(lp->requests) +
539 skiplist_count(lp->inuse);
540
541 /* round up */
542 chunks_needed = (labels_needed / LP_CHUNK_SIZE) + 1;
543 labels_needed = chunks_needed * LP_CHUNK_SIZE;
544
f533be73 545 lm_init_ok = lm_label_manager_connect(zclient, 1) == 0;
546
547 if (!lm_init_ok)
548 zlog_err("%s: label manager connection error", __func__);
549
955bfd98
PZ
550 zclient_send_get_label_chunk(zclient, 0, labels_needed);
551 lp->pending_count = labels_needed;
552
553 /*
554 * Invalidate current list of chunks
555 */
556 list_delete_all_node(lp->chunks);
557
558 /*
559 * Invalidate any existing labels and requeue them as requests
560 */
561 while (!skiplist_first(lp->inuse, NULL, &labelid)) {
562
563 /*
564 * Get LCB
565 */
566 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
567
568 if (lcb->label != MPLS_LABEL_NONE) {
569 /*
570 * invalidate
571 */
572 struct lp_cbq_item *q;
573
574 q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
575 sizeof(struct lp_cbq_item));
576 q->cbfunc = lcb->cbfunc;
577 q->type = lcb->type;
578 q->label = lcb->label;
579 q->labelid = lcb->labelid;
580 q->allocated = false;
581 work_queue_add(lp->callback_q, q);
582
583 lcb->label = MPLS_LABEL_NONE;
584 }
585
586 /*
587 * request queue
588 */
589 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
590 sizeof(struct lp_fifo));
591
592 lf->lcb = *lcb;
593 LABEL_FIFO_ADD(lp->requests, lf);
594 }
595
596 skiplist_delete_first(lp->inuse);
597 }
598}