]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_labelpool.c
Merge pull request #3889 from donaldsharp/rnh_vrf_down_stuff
[mirror_frr.git] / bgpd / bgp_labelpool.c
1 /*
2 * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
3 *
4 * Copyright (C) 2018 LabN Consulting, L.L.C.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include <zebra.h>
22
23 #include "log.h"
24 #include "memory.h"
25 #include "stream.h"
26 #include "mpls.h"
27 #include "vty.h"
28 #include "fifo.h"
29 #include "linklist.h"
30 #include "skiplist.h"
31 #include "workqueue.h"
32 #include "zclient.h"
33
34 #include "bgpd/bgpd.h"
35 #include "bgpd/bgp_labelpool.h"
36 #include "bgpd/bgp_debug.h"
37 #include "bgpd/bgp_errors.h"
38
39 /*
40 * Definitions and external declarations.
41 */
42 extern struct zclient *zclient;
43
44 /*
45 * Remember where pool data are kept
46 */
47 static struct labelpool *lp;
48
49 /* request this many labels at a time from zebra */
50 #define LP_CHUNK_SIZE 50
51
52 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk")
53 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO")
54 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CB, "BGP Dynamic Label Assignment")
55 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CBQ, "BGP Dynamic Label Callback")
56
57 #define LABEL_FIFO_ADD(F, N) \
58 do { \
59 FIFO_ADD((F), (N)); \
60 (F)->count++; \
61 } while (0)
62
63 #define LABEL_FIFO_DEL(F, N) \
64 do { \
65 FIFO_DEL((N)); \
66 (F)->count--; \
67 } while (0)
68
69 #define LABEL_FIFO_INIT(F) \
70 do { \
71 FIFO_INIT((F)); \
72 (F)->count = 0; \
73 } while (0)
74
75 #define LABEL_FIFO_COUNT(F) ((F)->count)
76
77 #define LABEL_FIFO_EMPTY(F) FIFO_EMPTY(F)
78
79 #define LABEL_FIFO_HEAD(F) ((F)->next == (F) ? NULL : (F)->next)
80
81 struct lp_chunk {
82 uint32_t first;
83 uint32_t last;
84 };
85
86 /*
87 * label control block
88 */
89 struct lp_lcb {
90 mpls_label_t label; /* MPLS_LABEL_NONE = not allocated */
91 int type;
92 void *labelid; /* unique ID */
93 /*
94 * callback for label allocation and loss
95 *
96 * allocated: false = lost
97 */
98 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
99 };
100
101 /* XXX same first elements as "struct fifo" */
102 struct lp_fifo {
103 struct lp_fifo *next;
104 struct lp_fifo *prev;
105
106 uint32_t count;
107 struct lp_lcb lcb;
108 };
109
110 struct lp_cbq_item {
111 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
112 int type;
113 mpls_label_t label;
114 void *labelid;
115 bool allocated; /* false = lost */
116 };
117
118 static wq_item_status lp_cbq_docallback(struct work_queue *wq, void *data)
119 {
120 struct lp_cbq_item *lcbq = data;
121 int rc;
122 int debug = BGP_DEBUG(labelpool, LABELPOOL);
123
124 if (debug)
125 zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
126 __func__, lcbq->labelid, lcbq->label, lcbq->allocated);
127
128 if (lcbq->label == MPLS_LABEL_NONE) {
129 /* shouldn't happen */
130 flog_err(EC_BGP_LABEL, "%s: error: label==MPLS_LABEL_NONE",
131 __func__);
132 return WQ_SUCCESS;
133 }
134
135 rc = (*(lcbq->cbfunc))(lcbq->label, lcbq->labelid, lcbq->allocated);
136
137 if (lcbq->allocated && rc) {
138 /*
139 * Callback rejected allocation. This situation could arise
140 * if there was a label request followed by the requestor
141 * deciding it didn't need the assignment (e.g., config
142 * change) while the reply to the original request (with
143 * label) was in the work queue.
144 */
145 if (debug)
146 zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
147 __func__, lcbq->labelid, lcbq->label);
148
149 uintptr_t lbl = lcbq->label;
150 void *labelid;
151 struct lp_lcb *lcb;
152
153 /*
154 * If the rejected label was marked inuse by this labelid,
155 * release the label back to the pool.
156 *
157 * Further, if the rejected label was still assigned to
158 * this labelid in the LCB, delete the LCB.
159 */
160 if (!skiplist_search(lp->inuse, (void *)lbl, &labelid)) {
161 if (labelid == lcbq->labelid) {
162 if (!skiplist_search(lp->ledger, labelid,
163 (void **)&lcb)) {
164 if (lcbq->label == lcb->label)
165 skiplist_delete(lp->ledger,
166 labelid, NULL);
167 }
168 skiplist_delete(lp->inuse, (void *)lbl, NULL);
169 }
170 }
171 }
172
173 return WQ_SUCCESS;
174 }
175
176 static void lp_cbq_item_free(struct work_queue *wq, void *data)
177 {
178 XFREE(MTYPE_BGP_LABEL_CBQ, data);
179 }
180
181 static void lp_lcb_free(void *goner)
182 {
183 XFREE(MTYPE_BGP_LABEL_CB, goner);
184 }
185
186 static void lp_chunk_free(void *goner)
187 {
188 XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
189 }
190
191 void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
192 {
193 if (BGP_DEBUG(labelpool, LABELPOOL))
194 zlog_debug("%s: entry", __func__);
195
196 lp = pool; /* Set module pointer to pool data */
197
198 lp->ledger = skiplist_new(0, NULL, lp_lcb_free);
199 lp->inuse = skiplist_new(0, NULL, NULL);
200 lp->chunks = list_new();
201 lp->chunks->del = lp_chunk_free;
202 lp->requests = XCALLOC(MTYPE_BGP_LABEL_FIFO, sizeof(struct lp_fifo));
203 LABEL_FIFO_INIT(lp->requests);
204 lp->callback_q = work_queue_new(master, "label callbacks");
205
206 lp->callback_q->spec.workfunc = lp_cbq_docallback;
207 lp->callback_q->spec.del_item_data = lp_cbq_item_free;
208 lp->callback_q->spec.max_retries = 0;
209 }
210
211 void bgp_lp_finish(void)
212 {
213 struct lp_fifo *lf;
214
215 if (!lp)
216 return;
217
218 skiplist_free(lp->ledger);
219 lp->ledger = NULL;
220
221 skiplist_free(lp->inuse);
222 lp->inuse = NULL;
223
224 list_delete(&lp->chunks);
225
226 while ((lf = LABEL_FIFO_HEAD(lp->requests))) {
227
228 LABEL_FIFO_DEL(lp->requests, lf);
229 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
230 }
231 XFREE(MTYPE_BGP_LABEL_FIFO, lp->requests);
232 lp->requests = NULL;
233
234 work_queue_free_and_null(&lp->callback_q);
235
236 lp = NULL;
237 }
238
239 static mpls_label_t get_label_from_pool(void *labelid)
240 {
241 struct listnode *node;
242 struct lp_chunk *chunk;
243 int debug = BGP_DEBUG(labelpool, LABELPOOL);
244
245 /*
246 * Find a free label
247 * Linear search is not efficient but should be executed infrequently.
248 */
249 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
250 uintptr_t lbl;
251
252 if (debug)
253 zlog_debug("%s: chunk first=%u last=%u",
254 __func__, chunk->first, chunk->last);
255
256 for (lbl = chunk->first; lbl <= chunk->last; ++lbl) {
257 /* labelid is key to all-request "ledger" list */
258 if (!skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
259 /*
260 * Success
261 */
262 return lbl;
263 }
264 }
265 }
266 return MPLS_LABEL_NONE;
267 }
268
269 /*
270 * Success indicated by value of "label" field in returned LCB
271 */
272 static struct lp_lcb *lcb_alloc(
273 int type,
274 void *labelid,
275 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
276 {
277 /*
278 * Set up label control block
279 */
280 struct lp_lcb *new = XCALLOC(MTYPE_BGP_LABEL_CB,
281 sizeof(struct lp_lcb));
282
283 new->label = get_label_from_pool(labelid);
284 new->type = type;
285 new->labelid = labelid;
286 new->cbfunc = cbfunc;
287
288 return new;
289 }
290
291 /*
292 * Callers who need labels must supply a type, labelid, and callback.
293 * The type is a value defined in bgp_labelpool.h (add types as needed).
294 * The callback is for asynchronous notification of label allocation.
295 * The labelid is passed as an argument to the callback. It should be unique
296 * to the requested label instance.
297 *
298 * If zebra is not connected, callbacks with labels will be delayed
299 * until connection is established. If zebra connection is lost after
300 * labels have been assigned, existing assignments via this labelpool
301 * module will continue until reconnection.
302 *
303 * When connection to zebra is reestablished, previous label assignments
304 * will be invalidated (via callbacks having the "allocated" parameter unset)
305 * and new labels will be automatically reassigned by this labelpool module
306 * (that is, a requestor does not need to call lp_get() again if it is
307 * notified via callback that its label has been lost: it will eventually
308 * get another callback with a new label assignment).
309 *
310 * Prior requests for a given labelid are detected so that requests and
311 * assignments are not duplicated.
312 */
313 void bgp_lp_get(
314 int type,
315 void *labelid,
316 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
317 {
318 struct lp_lcb *lcb;
319 int requested = 0;
320 int debug = BGP_DEBUG(labelpool, LABELPOOL);
321
322 if (debug)
323 zlog_debug("%s: labelid=%p", __func__, labelid);
324
325 /*
326 * Have we seen this request before?
327 */
328 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
329 requested = 1;
330 } else {
331 lcb = lcb_alloc(type, labelid, cbfunc);
332 if (debug)
333 zlog_debug("%s: inserting lcb=%p label=%u",
334 __func__, lcb, lcb->label);
335 int rc = skiplist_insert(lp->ledger, labelid, lcb);
336
337 if (rc) {
338 /* shouldn't happen */
339 flog_err(EC_BGP_LABEL,
340 "%s: can't insert new LCB into ledger list",
341 __func__);
342 XFREE(MTYPE_BGP_LABEL_CB, lcb);
343 return;
344 }
345 }
346
347 if (lcb->label != MPLS_LABEL_NONE) {
348 /*
349 * Fast path: we filled the request from local pool (or
350 * this is a duplicate request that we filled already).
351 * Enqueue response work item with new label.
352 */
353 struct lp_cbq_item *q;
354
355 q = XCALLOC(MTYPE_BGP_LABEL_CBQ, sizeof(struct lp_cbq_item));
356
357 q->cbfunc = lcb->cbfunc;
358 q->type = lcb->type;
359 q->label = lcb->label;
360 q->labelid = lcb->labelid;
361 q->allocated = true;
362
363 work_queue_add(lp->callback_q, q);
364
365 return;
366 }
367
368 if (requested)
369 return;
370
371 if (debug)
372 zlog_debug("%s: slow path. lcb=%p label=%u",
373 __func__, lcb, lcb->label);
374
375 /*
376 * Slow path: we are out of labels in the local pool,
377 * so remember the request and also get another chunk from
378 * the label manager.
379 *
380 * We track number of outstanding label requests: don't
381 * need to get a chunk for each one.
382 */
383
384 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
385 sizeof(struct lp_fifo));
386
387 lf->lcb = *lcb;
388 LABEL_FIFO_ADD(lp->requests, lf);
389
390 if (LABEL_FIFO_COUNT(lp->requests) > lp->pending_count) {
391 if (!zclient_send_get_label_chunk(zclient, 0, LP_CHUNK_SIZE)) {
392 lp->pending_count += LP_CHUNK_SIZE;
393 return;
394 }
395 }
396 }
397
398 void bgp_lp_release(
399 int type,
400 void *labelid,
401 mpls_label_t label)
402 {
403 struct lp_lcb *lcb;
404
405 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
406 if (label == lcb->label && type == lcb->type) {
407 uintptr_t lbl = label;
408
409 /* no longer in use */
410 skiplist_delete(lp->inuse, (void *)lbl, NULL);
411
412 /* no longer requested */
413 skiplist_delete(lp->ledger, labelid, NULL);
414 }
415 }
416 }
417
418 /*
419 * zebra response giving us a chunk of labels
420 */
421 void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
422 {
423 struct lp_chunk *chunk;
424 int debug = BGP_DEBUG(labelpool, LABELPOOL);
425 struct lp_fifo *lf;
426
427 if (last < first) {
428 flog_err(EC_BGP_LABEL,
429 "%s: zebra label chunk invalid: first=%u, last=%u",
430 __func__, first, last);
431 return;
432 }
433
434 chunk = XCALLOC(MTYPE_BGP_LABEL_CHUNK, sizeof(struct lp_chunk));
435
436 chunk->first = first;
437 chunk->last = last;
438
439 listnode_add(lp->chunks, chunk);
440
441 lp->pending_count -= (last - first + 1);
442
443 if (debug) {
444 zlog_debug("%s: %u pending requests", __func__,
445 LABEL_FIFO_COUNT(lp->requests));
446 }
447
448 while ((lf = LABEL_FIFO_HEAD(lp->requests))) {
449
450 struct lp_lcb *lcb;
451 void *labelid = lf->lcb.labelid;
452
453 if (skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
454 /* request no longer in effect */
455
456 if (debug) {
457 zlog_debug("%s: labelid %p: request no longer in effect",
458 __func__, labelid);
459 }
460 goto finishedrequest;
461 }
462
463 /* have LCB */
464 if (lcb->label != MPLS_LABEL_NONE) {
465 /* request already has a label */
466 if (debug) {
467 zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
468 __func__, labelid,
469 lcb->label, lcb->label, lcb);
470 }
471 goto finishedrequest;
472 }
473
474 lcb->label = get_label_from_pool(lcb->labelid);
475
476 if (lcb->label == MPLS_LABEL_NONE) {
477 /*
478 * Out of labels in local pool, await next chunk
479 */
480 if (debug) {
481 zlog_debug("%s: out of labels, await more",
482 __func__);
483 }
484 break;
485 }
486
487 /*
488 * we filled the request from local pool.
489 * Enqueue response work item with new label.
490 */
491 struct lp_cbq_item *q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
492 sizeof(struct lp_cbq_item));
493
494 q->cbfunc = lcb->cbfunc;
495 q->type = lcb->type;
496 q->label = lcb->label;
497 q->labelid = lcb->labelid;
498 q->allocated = true;
499
500 if (debug)
501 zlog_debug("%s: assigning label %u to labelid %p",
502 __func__, q->label, q->labelid);
503
504 work_queue_add(lp->callback_q, q);
505
506 finishedrequest:
507 LABEL_FIFO_DEL(lp->requests, lf);
508 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
509 }
510 }
511
512 /*
513 * continue using allocated labels until zebra returns
514 */
515 void bgp_lp_event_zebra_down(void)
516 {
517 /* rats. */
518 }
519
520 /*
521 * Inform owners of previously-allocated labels that their labels
522 * are not valid. Request chunk from zebra large enough to satisfy
523 * previously-allocated labels plus any outstanding requests.
524 */
525 void bgp_lp_event_zebra_up(void)
526 {
527 int labels_needed;
528 int chunks_needed;
529 void *labelid;
530 struct lp_lcb *lcb;
531 int lm_init_ok;
532
533 /*
534 * Get label chunk allocation request dispatched to zebra
535 */
536 labels_needed = LABEL_FIFO_COUNT(lp->requests) +
537 skiplist_count(lp->inuse);
538
539 /* round up */
540 chunks_needed = (labels_needed / LP_CHUNK_SIZE) + 1;
541 labels_needed = chunks_needed * LP_CHUNK_SIZE;
542
543 lm_init_ok = lm_label_manager_connect(zclient, 1) == 0;
544
545 if (!lm_init_ok)
546 zlog_err("%s: label manager connection error", __func__);
547
548 zclient_send_get_label_chunk(zclient, 0, labels_needed);
549 lp->pending_count = labels_needed;
550
551 /*
552 * Invalidate current list of chunks
553 */
554 list_delete_all_node(lp->chunks);
555
556 /*
557 * Invalidate any existing labels and requeue them as requests
558 */
559 while (!skiplist_first(lp->inuse, NULL, &labelid)) {
560
561 /*
562 * Get LCB
563 */
564 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
565
566 if (lcb->label != MPLS_LABEL_NONE) {
567 /*
568 * invalidate
569 */
570 struct lp_cbq_item *q;
571
572 q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
573 sizeof(struct lp_cbq_item));
574 q->cbfunc = lcb->cbfunc;
575 q->type = lcb->type;
576 q->label = lcb->label;
577 q->labelid = lcb->labelid;
578 q->allocated = false;
579 work_queue_add(lp->callback_q, q);
580
581 lcb->label = MPLS_LABEL_NONE;
582 }
583
584 /*
585 * request queue
586 */
587 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
588 sizeof(struct lp_fifo));
589
590 lf->lcb = *lcb;
591 LABEL_FIFO_ADD(lp->requests, lf);
592 }
593
594 skiplist_delete_first(lp->inuse);
595 }
596 }