]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_labelpool.c
Merge pull request #2067 from LabNConsulting/working/master/bgp-vpn-leak-new-label...
[mirror_frr.git] / bgpd / bgp_labelpool.c
1 /*
2 * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
3 *
4 * Copyright (C) 2018 LabN Consulting, L.L.C.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include <zebra.h>
22
23 #include "log.h"
24 #include "memory.h"
25 #include "stream.h"
26 #include "mpls.h"
27 #include "vty.h"
28 #include "fifo.h"
29 #include "linklist.h"
30 #include "skiplist.h"
31 #include "workqueue.h"
32 #include "zclient.h"
33
34 #include "bgpd/bgpd.h"
35 #include "bgpd/bgp_labelpool.h"
36 #include "bgpd/bgp_debug.h"
37
38 /*
39 * Definitions and external declarations.
40 */
41 extern struct zclient *zclient;
42
43 /*
44 * Remember where pool data are kept
45 */
46 static struct labelpool *lp;
47
48 /* request this many labels at a time from zebra */
49 #define LP_CHUNK_SIZE 50
50
51 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk")
52 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO")
53 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CB, "BGP Dynamic Label Assignment")
54 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CBQ, "BGP Dynamic Label Callback")
55
56 #define LABEL_FIFO_ADD(F, N) \
57 do { \
58 FIFO_ADD((F), (N)); \
59 (F)->count++; \
60 } while (0)
61
62 #define LABEL_FIFO_DEL(F, N) \
63 do { \
64 FIFO_DEL((N)); \
65 (F)->count--; \
66 } while (0)
67
68 #define LABEL_FIFO_INIT(F) \
69 do { \
70 FIFO_INIT((F)); \
71 (F)->count = 0; \
72 } while (0)
73
74 #define LABEL_FIFO_COUNT(F) ((F)->count)
75
76 #define LABEL_FIFO_EMPTY(F) FIFO_EMPTY(F)
77
78 #define LABEL_FIFO_HEAD(F) ((F)->next == (F) ? NULL : (F)->next)
79
80 struct lp_chunk {
81 uint32_t first;
82 uint32_t last;
83 };
84
85 /*
86 * label control block
87 */
88 struct lp_lcb {
89 mpls_label_t label; /* MPLS_LABEL_NONE = not allocated */
90 int type;
91 void *labelid; /* unique ID */
92 /*
93 * callback for label allocation and loss
94 *
95 * allocated: false = lost
96 */
97 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
98 };
99
100 /* XXX same first elements as "struct fifo" */
101 struct lp_fifo {
102 struct lp_fifo *next;
103 struct lp_fifo *prev;
104
105 uint32_t count;
106 struct lp_lcb lcb;
107 };
108
109 struct lp_cbq_item {
110 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
111 int type;
112 mpls_label_t label;
113 void *labelid;
114 bool allocated; /* false = lost */
115 };
116
117 static wq_item_status lp_cbq_docallback(struct work_queue *wq, void *data)
118 {
119 struct lp_cbq_item *lcbq = data;
120 int rc;
121 int debug = BGP_DEBUG(labelpool, LABELPOOL);
122
123 if (debug)
124 zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
125 __func__, lcbq->labelid, lcbq->label, lcbq->allocated);
126
127 if (lcbq->label == MPLS_LABEL_NONE) {
128 /* shouldn't happen */
129 zlog_err("%s: error: label==MPLS_LABEL_NONE", __func__);
130 return WQ_SUCCESS;
131 }
132
133 rc = (*(lcbq->cbfunc))(lcbq->label, lcbq->labelid, lcbq->allocated);
134
135 if (lcbq->allocated && rc) {
136 /*
137 * Callback rejected allocation. This situation could arise
138 * if there was a label request followed by the requestor
139 * deciding it didn't need the assignment (e.g., config
140 * change) while the reply to the original request (with
141 * label) was in the work queue.
142 */
143 if (debug)
144 zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
145 __func__, lcbq->labelid, lcbq->label);
146
147 uintptr_t lbl = lcbq->label;
148 void *labelid;
149 struct lp_lcb *lcb;
150
151 /*
152 * If the rejected label was marked inuse by this labelid,
153 * release the label back to the pool.
154 *
155 * Further, if the rejected label was still assigned to
156 * this labelid in the LCB, delete the LCB.
157 */
158 if (!skiplist_search(lp->inuse, (void *)lbl, &labelid)) {
159 if (labelid == lcbq->labelid) {
160 if (!skiplist_search(lp->ledger, labelid,
161 (void **)&lcb)) {
162 if (lcbq->label == lcb->label)
163 skiplist_delete(lp->ledger,
164 labelid, NULL);
165 }
166 skiplist_delete(lp->inuse, (void *)lbl, NULL);
167 }
168 }
169 }
170
171 return WQ_SUCCESS;
172 }
173
174 static void lp_cbq_item_free(struct work_queue *wq, void *data)
175 {
176 XFREE(MTYPE_BGP_LABEL_CBQ, data);
177 }
178
179 static void lp_lcb_free(void *goner)
180 {
181 if (goner)
182 XFREE(MTYPE_BGP_LABEL_CB, goner);
183 }
184
185 static void lp_chunk_free(void *goner)
186 {
187 if (goner)
188 XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
189 }
190
191 void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
192 {
193 if (BGP_DEBUG(labelpool, LABELPOOL))
194 zlog_debug("%s: entry", __func__);
195
196 lp = pool; /* Set module pointer to pool data */
197
198 lp->ledger = skiplist_new(0, NULL, lp_lcb_free);
199 lp->inuse = skiplist_new(0, NULL, NULL);
200 lp->chunks = list_new();
201 lp->chunks->del = lp_chunk_free;
202 lp->requests = XCALLOC(MTYPE_BGP_LABEL_FIFO, sizeof(struct lp_fifo));
203 LABEL_FIFO_INIT(lp->requests);
204 lp->callback_q = work_queue_new(master, "label callbacks");
205 if (!lp->callback_q) {
206 zlog_err("%s: Failed to allocate work queue", __func__);
207 exit(1);
208 }
209
210 lp->callback_q->spec.workfunc = lp_cbq_docallback;
211 lp->callback_q->spec.del_item_data = lp_cbq_item_free;
212 lp->callback_q->spec.max_retries = 0;
213 }
214
215 void bgp_lp_finish(void)
216 {
217 struct lp_fifo *lf;
218
219 if (!lp)
220 return;
221
222 skiplist_free(lp->ledger);
223 lp->ledger = NULL;
224
225 skiplist_free(lp->inuse);
226 lp->inuse = NULL;
227
228 list_delete_and_null(&lp->chunks);
229
230 while ((lf = LABEL_FIFO_HEAD(lp->requests))) {
231
232 LABEL_FIFO_DEL(lp->requests, lf);
233 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
234 }
235 XFREE(MTYPE_BGP_LABEL_FIFO, lp->requests);
236 lp->requests = NULL;
237
238 work_queue_free_and_null(&lp->callback_q);
239
240 lp = NULL;
241 }
242
243 static mpls_label_t get_label_from_pool(void *labelid)
244 {
245 struct listnode *node;
246 struct lp_chunk *chunk;
247 int debug = BGP_DEBUG(labelpool, LABELPOOL);
248
249 /*
250 * Find a free label
251 * Linear search is not efficient but should be executed infrequently.
252 */
253 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
254 uintptr_t lbl;
255
256 if (debug)
257 zlog_debug("%s: chunk first=%u last=%u",
258 __func__, chunk->first, chunk->last);
259
260 for (lbl = chunk->first; lbl <= chunk->last; ++lbl) {
261 /* labelid is key to all-request "ledger" list */
262 if (!skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
263 /*
264 * Success
265 */
266 return lbl;
267 }
268 }
269 }
270 return MPLS_LABEL_NONE;
271 }
272
273 /*
274 * Success indicated by value of "label" field in returned LCB
275 */
276 static struct lp_lcb *lcb_alloc(
277 int type,
278 void *labelid,
279 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
280 {
281 /*
282 * Set up label control block
283 */
284 struct lp_lcb *new = XCALLOC(MTYPE_BGP_LABEL_CB,
285 sizeof(struct lp_lcb));
286
287 new->label = get_label_from_pool(labelid);
288 new->type = type;
289 new->labelid = labelid;
290 new->cbfunc = cbfunc;
291
292 return new;
293 }
294
295 /*
296 * Callers who need labels must supply a type, labelid, and callback.
297 * The type is a value defined in bgp_labelpool.h (add types as needed).
298 * The callback is for asynchronous notification of label allocation.
299 * The labelid is passed as an argument to the callback. It should be unique
300 * to the requested label instance.
301 *
302 * If zebra is not connected, callbacks with labels will be delayed
303 * until connection is established. If zebra connection is lost after
304 * labels have been assigned, existing assignments via this labelpool
305 * module will continue until reconnection.
306 *
307 * When connection to zebra is reestablished, previous label assignments
308 * will be invalidated (via callbacks having the "allocated" parameter unset)
309 * and new labels will be automatically reassigned by this labelpool module
310 * (that is, a requestor does not need to call lp_get() again if it is
311 * notified via callback that its label has been lost: it will eventually
312 * get another callback with a new label assignment).
313 *
314 * Prior requests for a given labelid are detected so that requests and
315 * assignments are not duplicated.
316 */
317 void bgp_lp_get(
318 int type,
319 void *labelid,
320 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
321 {
322 struct lp_lcb *lcb;
323 int requested = 0;
324 int debug = BGP_DEBUG(labelpool, LABELPOOL);
325
326 if (debug)
327 zlog_debug("%s: labelid=%p", __func__, labelid);
328
329 /*
330 * Have we seen this request before?
331 */
332 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
333 requested = 1;
334 } else {
335 lcb = lcb_alloc(type, labelid, cbfunc);
336 if (debug)
337 zlog_debug("%s: inserting lcb=%p label=%u",
338 __func__, lcb, lcb->label);
339 int rc = skiplist_insert(lp->ledger, labelid, lcb);
340
341 if (rc) {
342 /* shouldn't happen */
343 zlog_err("%s: can't insert new LCB into ledger list",
344 __func__);
345 XFREE(MTYPE_BGP_LABEL_CB, lcb);
346 return;
347 }
348 }
349
350 if (lcb->label != MPLS_LABEL_NONE) {
351 /*
352 * Fast path: we filled the request from local pool (or
353 * this is a duplicate request that we filled already).
354 * Enqueue response work item with new label.
355 */
356 struct lp_cbq_item *q;
357
358 q = XCALLOC(MTYPE_BGP_LABEL_CBQ, sizeof(struct lp_cbq_item));
359
360 q->cbfunc = lcb->cbfunc;
361 q->type = lcb->type;
362 q->label = lcb->label;
363 q->labelid = lcb->labelid;
364 q->allocated = true;
365
366 work_queue_add(lp->callback_q, q);
367
368 return;
369 }
370
371 if (requested)
372 return;
373
374 if (debug)
375 zlog_debug("%s: slow path. lcb=%p label=%u",
376 __func__, lcb, lcb->label);
377
378 /*
379 * Slow path: we are out of labels in the local pool,
380 * so remember the request and also get another chunk from
381 * the label manager.
382 *
383 * We track number of outstanding label requests: don't
384 * need to get a chunk for each one.
385 */
386
387 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
388 sizeof(struct lp_fifo));
389
390 lf->lcb = *lcb;
391 LABEL_FIFO_ADD(lp->requests, lf);
392
393 if (LABEL_FIFO_COUNT(lp->requests) > lp->pending_count) {
394 if (!zclient_send_get_label_chunk(zclient, 0, LP_CHUNK_SIZE)) {
395 lp->pending_count += LP_CHUNK_SIZE;
396 return;
397 }
398 }
399 }
400
401 void bgp_lp_release(
402 int type,
403 void *labelid,
404 mpls_label_t label)
405 {
406 struct lp_lcb *lcb;
407
408 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
409 if (label == lcb->label && type == lcb->type) {
410 uintptr_t lbl = label;
411
412 /* no longer in use */
413 skiplist_delete(lp->inuse, (void *)lbl, NULL);
414
415 /* no longer requested */
416 skiplist_delete(lp->ledger, labelid, NULL);
417 }
418 }
419 }
420
421 /*
422 * zebra response giving us a chunk of labels
423 */
424 void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
425 {
426 struct lp_chunk *chunk;
427 int debug = BGP_DEBUG(labelpool, LABELPOOL);
428 struct lp_fifo *lf;
429
430 if (last < first) {
431 zlog_err("%s: zebra label chunk invalid: first=%u, last=%u",
432 __func__, first, last);
433 return;
434 }
435
436 chunk = XCALLOC(MTYPE_BGP_LABEL_CHUNK, sizeof(struct lp_chunk));
437
438 chunk->first = first;
439 chunk->last = last;
440
441 listnode_add(lp->chunks, chunk);
442
443 lp->pending_count -= (last - first + 1);
444
445 if (debug) {
446 zlog_debug("%s: %u pending requests", __func__,
447 LABEL_FIFO_COUNT(lp->requests));
448 }
449
450 while ((lf = LABEL_FIFO_HEAD(lp->requests))) {
451
452 struct lp_lcb *lcb;
453 void *labelid = lf->lcb.labelid;
454
455 if (skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
456 /* request no longer in effect */
457
458 if (debug) {
459 zlog_debug("%s: labelid %p: request no longer in effect",
460 __func__, labelid);
461 }
462 goto finishedrequest;
463 }
464
465 /* have LCB */
466 if (lcb->label != MPLS_LABEL_NONE) {
467 /* request already has a label */
468 if (debug) {
469 zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
470 __func__, labelid,
471 lcb->label, lcb->label, lcb);
472 }
473 goto finishedrequest;
474 }
475
476 lcb->label = get_label_from_pool(lcb->labelid);
477
478 if (lcb->label == MPLS_LABEL_NONE) {
479 /*
480 * Out of labels in local pool, await next chunk
481 */
482 if (debug) {
483 zlog_debug("%s: out of labels, await more",
484 __func__);
485 }
486 break;
487 }
488
489 /*
490 * we filled the request from local pool.
491 * Enqueue response work item with new label.
492 */
493 struct lp_cbq_item *q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
494 sizeof(struct lp_cbq_item));
495
496 q->cbfunc = lcb->cbfunc;
497 q->type = lcb->type;
498 q->label = lcb->label;
499 q->labelid = lcb->labelid;
500 q->allocated = true;
501
502 if (debug)
503 zlog_debug("%s: assigning label %u to labelid %p",
504 __func__, q->label, q->labelid);
505
506 work_queue_add(lp->callback_q, q);
507
508 finishedrequest:
509 LABEL_FIFO_DEL(lp->requests, lf);
510 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
511 }
512 }
513
514 /*
515 * continue using allocated labels until zebra returns
516 */
517 void bgp_lp_event_zebra_down(void)
518 {
519 /* rats. */
520 }
521
522 /*
523 * Inform owners of previously-allocated labels that their labels
524 * are not valid. Request chunk from zebra large enough to satisfy
525 * previously-allocated labels plus any outstanding requests.
526 */
527 void bgp_lp_event_zebra_up(void)
528 {
529 int labels_needed;
530 int chunks_needed;
531 void *labelid;
532 struct lp_lcb *lcb;
533
534 /*
535 * Get label chunk allocation request dispatched to zebra
536 */
537 labels_needed = LABEL_FIFO_COUNT(lp->requests) +
538 skiplist_count(lp->inuse);
539
540 /* round up */
541 chunks_needed = (labels_needed / LP_CHUNK_SIZE) + 1;
542 labels_needed = chunks_needed * LP_CHUNK_SIZE;
543
544 zclient_send_get_label_chunk(zclient, 0, labels_needed);
545 lp->pending_count = labels_needed;
546
547 /*
548 * Invalidate current list of chunks
549 */
550 list_delete_all_node(lp->chunks);
551
552 /*
553 * Invalidate any existing labels and requeue them as requests
554 */
555 while (!skiplist_first(lp->inuse, NULL, &labelid)) {
556
557 /*
558 * Get LCB
559 */
560 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
561
562 if (lcb->label != MPLS_LABEL_NONE) {
563 /*
564 * invalidate
565 */
566 struct lp_cbq_item *q;
567
568 q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
569 sizeof(struct lp_cbq_item));
570 q->cbfunc = lcb->cbfunc;
571 q->type = lcb->type;
572 q->label = lcb->label;
573 q->labelid = lcb->labelid;
574 q->allocated = false;
575 work_queue_add(lp->callback_q, q);
576
577 lcb->label = MPLS_LABEL_NONE;
578 }
579
580 /*
581 * request queue
582 */
583 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
584 sizeof(struct lp_fifo));
585
586 lf->lcb = *lcb;
587 LABEL_FIFO_ADD(lp->requests, lf);
588 }
589
590 skiplist_delete_first(lp->inuse);
591 }
592 }