]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_labelpool.c
Merge pull request #4550 from manuhalo/fix_bgp_label_cb
[mirror_frr.git] / bgpd / bgp_labelpool.c
1 /*
2 * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
3 *
4 * Copyright (C) 2018 LabN Consulting, L.L.C.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include <zebra.h>
22
23 #include "log.h"
24 #include "memory.h"
25 #include "stream.h"
26 #include "mpls.h"
27 #include "vty.h"
28 #include "linklist.h"
29 #include "skiplist.h"
30 #include "workqueue.h"
31 #include "zclient.h"
32
33 #include "bgpd/bgpd.h"
34 #include "bgpd/bgp_labelpool.h"
35 #include "bgpd/bgp_debug.h"
36 #include "bgpd/bgp_errors.h"
37
38 /*
39 * Definitions and external declarations.
40 */
41 extern struct zclient *zclient;
42
43 /*
44 * Remember where pool data are kept
45 */
46 static struct labelpool *lp;
47
48 /* request this many labels at a time from zebra */
49 #define LP_CHUNK_SIZE 50
50
51 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk")
52 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO item")
53 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CB, "BGP Dynamic Label Assignment")
54 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CBQ, "BGP Dynamic Label Callback")
55
56 struct lp_chunk {
57 uint32_t first;
58 uint32_t last;
59 };
60
61 /*
62 * label control block
63 */
64 struct lp_lcb {
65 mpls_label_t label; /* MPLS_LABEL_NONE = not allocated */
66 int type;
67 void *labelid; /* unique ID */
68 /*
69 * callback for label allocation and loss
70 *
71 * allocated: false = lost
72 */
73 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
74 };
75
76 struct lp_fifo {
77 struct lp_fifo_item fifo;
78 struct lp_lcb lcb;
79 };
80
81 DECLARE_LIST(lp_fifo, struct lp_fifo, fifo)
82
83 struct lp_cbq_item {
84 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
85 int type;
86 mpls_label_t label;
87 void *labelid;
88 bool allocated; /* false = lost */
89 };
90
91 static wq_item_status lp_cbq_docallback(struct work_queue *wq, void *data)
92 {
93 struct lp_cbq_item *lcbq = data;
94 int rc;
95 int debug = BGP_DEBUG(labelpool, LABELPOOL);
96
97 if (debug)
98 zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
99 __func__, lcbq->labelid, lcbq->label, lcbq->allocated);
100
101 if (lcbq->label == MPLS_LABEL_NONE) {
102 /* shouldn't happen */
103 flog_err(EC_BGP_LABEL, "%s: error: label==MPLS_LABEL_NONE",
104 __func__);
105 return WQ_SUCCESS;
106 }
107
108 rc = (*(lcbq->cbfunc))(lcbq->label, lcbq->labelid, lcbq->allocated);
109
110 if (lcbq->allocated && rc) {
111 /*
112 * Callback rejected allocation. This situation could arise
113 * if there was a label request followed by the requestor
114 * deciding it didn't need the assignment (e.g., config
115 * change) while the reply to the original request (with
116 * label) was in the work queue.
117 */
118 if (debug)
119 zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
120 __func__, lcbq->labelid, lcbq->label);
121
122 uintptr_t lbl = lcbq->label;
123 void *labelid;
124 struct lp_lcb *lcb;
125
126 /*
127 * If the rejected label was marked inuse by this labelid,
128 * release the label back to the pool.
129 *
130 * Further, if the rejected label was still assigned to
131 * this labelid in the LCB, delete the LCB.
132 */
133 if (!skiplist_search(lp->inuse, (void *)lbl, &labelid)) {
134 if (labelid == lcbq->labelid) {
135 if (!skiplist_search(lp->ledger, labelid,
136 (void **)&lcb)) {
137 if (lcbq->label == lcb->label)
138 skiplist_delete(lp->ledger,
139 labelid, NULL);
140 }
141 skiplist_delete(lp->inuse, (void *)lbl, NULL);
142 }
143 }
144 }
145
146 return WQ_SUCCESS;
147 }
148
149 static void lp_cbq_item_free(struct work_queue *wq, void *data)
150 {
151 XFREE(MTYPE_BGP_LABEL_CBQ, data);
152 }
153
154 static void lp_lcb_free(void *goner)
155 {
156 XFREE(MTYPE_BGP_LABEL_CB, goner);
157 }
158
159 static void lp_chunk_free(void *goner)
160 {
161 XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
162 }
163
164 void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
165 {
166 if (BGP_DEBUG(labelpool, LABELPOOL))
167 zlog_debug("%s: entry", __func__);
168
169 lp = pool; /* Set module pointer to pool data */
170
171 lp->ledger = skiplist_new(0, NULL, lp_lcb_free);
172 lp->inuse = skiplist_new(0, NULL, NULL);
173 lp->chunks = list_new();
174 lp->chunks->del = lp_chunk_free;
175 lp_fifo_init(&lp->requests);
176 lp->callback_q = work_queue_new(master, "label callbacks");
177
178 lp->callback_q->spec.workfunc = lp_cbq_docallback;
179 lp->callback_q->spec.del_item_data = lp_cbq_item_free;
180 lp->callback_q->spec.max_retries = 0;
181 }
182
183 /* check if a label callback was for a BGP LU path, and if so, unlock it */
184 static void check_bgp_lu_cb_unlock(struct lp_lcb *lcb)
185 {
186 if (lcb->type == LP_TYPE_BGP_LU)
187 bgp_path_info_unlock(lcb->labelid);
188 }
189
190 /* check if a label callback was for a BGP LU path, and if so, lock it */
191 static void check_bgp_lu_cb_lock(struct lp_lcb *lcb)
192 {
193 if (lcb->type == LP_TYPE_BGP_LU)
194 bgp_path_info_lock(lcb->labelid);
195 }
196
197 void bgp_lp_finish(void)
198 {
199 struct lp_fifo *lf;
200 struct work_queue_item *item, *titem;
201
202 if (!lp)
203 return;
204
205 skiplist_free(lp->ledger);
206 lp->ledger = NULL;
207
208 skiplist_free(lp->inuse);
209 lp->inuse = NULL;
210
211 list_delete(&lp->chunks);
212
213 while ((lf = lp_fifo_pop(&lp->requests))) {
214 check_bgp_lu_cb_unlock(&lf->lcb);
215 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
216 }
217 lp_fifo_fini(&lp->requests);
218
219 /* we must unlock path infos for LU callbacks; but we cannot do that
220 * in the deletion callback of the workqueue, as that is also called
221 * to remove an element from the queue after it has been run, resulting
222 * in a double unlock. Hence we need to iterate over our queues and
223 * lists and manually perform the unlocking (ugh)
224 */
225 STAILQ_FOREACH_SAFE (item, &lp->callback_q->items, wq, titem)
226 check_bgp_lu_cb_unlock(item->data);
227
228 work_queue_free_and_null(&lp->callback_q);
229
230 lp = NULL;
231 }
232
233 static mpls_label_t get_label_from_pool(void *labelid)
234 {
235 struct listnode *node;
236 struct lp_chunk *chunk;
237 int debug = BGP_DEBUG(labelpool, LABELPOOL);
238
239 /*
240 * Find a free label
241 * Linear search is not efficient but should be executed infrequently.
242 */
243 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
244 uintptr_t lbl;
245
246 if (debug)
247 zlog_debug("%s: chunk first=%u last=%u",
248 __func__, chunk->first, chunk->last);
249
250 for (lbl = chunk->first; lbl <= chunk->last; ++lbl) {
251 /* labelid is key to all-request "ledger" list */
252 if (!skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
253 /*
254 * Success
255 */
256 return lbl;
257 }
258 }
259 }
260 return MPLS_LABEL_NONE;
261 }
262
263 /*
264 * Success indicated by value of "label" field in returned LCB
265 */
266 static struct lp_lcb *lcb_alloc(
267 int type,
268 void *labelid,
269 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
270 {
271 /*
272 * Set up label control block
273 */
274 struct lp_lcb *new = XCALLOC(MTYPE_BGP_LABEL_CB,
275 sizeof(struct lp_lcb));
276
277 new->label = get_label_from_pool(labelid);
278 new->type = type;
279 new->labelid = labelid;
280 new->cbfunc = cbfunc;
281
282 return new;
283 }
284
285 /*
286 * Callers who need labels must supply a type, labelid, and callback.
287 * The type is a value defined in bgp_labelpool.h (add types as needed).
288 * The callback is for asynchronous notification of label allocation.
289 * The labelid is passed as an argument to the callback. It should be unique
290 * to the requested label instance.
291 *
292 * If zebra is not connected, callbacks with labels will be delayed
293 * until connection is established. If zebra connection is lost after
294 * labels have been assigned, existing assignments via this labelpool
295 * module will continue until reconnection.
296 *
297 * When connection to zebra is reestablished, previous label assignments
298 * will be invalidated (via callbacks having the "allocated" parameter unset)
299 * and new labels will be automatically reassigned by this labelpool module
300 * (that is, a requestor does not need to call lp_get() again if it is
301 * notified via callback that its label has been lost: it will eventually
302 * get another callback with a new label assignment).
303 *
304 * Prior requests for a given labelid are detected so that requests and
305 * assignments are not duplicated.
306 */
307 void bgp_lp_get(
308 int type,
309 void *labelid,
310 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
311 {
312 struct lp_lcb *lcb;
313 int requested = 0;
314 int debug = BGP_DEBUG(labelpool, LABELPOOL);
315
316 if (debug)
317 zlog_debug("%s: labelid=%p", __func__, labelid);
318
319 /*
320 * Have we seen this request before?
321 */
322 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
323 requested = 1;
324 } else {
325 lcb = lcb_alloc(type, labelid, cbfunc);
326 if (debug)
327 zlog_debug("%s: inserting lcb=%p label=%u",
328 __func__, lcb, lcb->label);
329 int rc = skiplist_insert(lp->ledger, labelid, lcb);
330
331 if (rc) {
332 /* shouldn't happen */
333 flog_err(EC_BGP_LABEL,
334 "%s: can't insert new LCB into ledger list",
335 __func__);
336 XFREE(MTYPE_BGP_LABEL_CB, lcb);
337 return;
338 }
339 }
340
341 if (lcb->label != MPLS_LABEL_NONE) {
342 /*
343 * Fast path: we filled the request from local pool (or
344 * this is a duplicate request that we filled already).
345 * Enqueue response work item with new label.
346 */
347 struct lp_cbq_item *q;
348
349 q = XCALLOC(MTYPE_BGP_LABEL_CBQ, sizeof(struct lp_cbq_item));
350
351 q->cbfunc = lcb->cbfunc;
352 q->type = lcb->type;
353 q->label = lcb->label;
354 q->labelid = lcb->labelid;
355 q->allocated = true;
356
357 /* if this is a LU request, lock path info before queueing */
358 check_bgp_lu_cb_lock(lcb);
359
360 work_queue_add(lp->callback_q, q);
361
362 return;
363 }
364
365 if (requested)
366 return;
367
368 if (debug)
369 zlog_debug("%s: slow path. lcb=%p label=%u",
370 __func__, lcb, lcb->label);
371
372 /*
373 * Slow path: we are out of labels in the local pool,
374 * so remember the request and also get another chunk from
375 * the label manager.
376 *
377 * We track number of outstanding label requests: don't
378 * need to get a chunk for each one.
379 */
380
381 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
382 sizeof(struct lp_fifo));
383
384 lf->lcb = *lcb;
385 /* if this is a LU request, lock path info before queueing */
386 check_bgp_lu_cb_lock(lcb);
387
388 lp_fifo_add_tail(&lp->requests, lf);
389
390 if (lp_fifo_count(&lp->requests) > lp->pending_count) {
391 if (!zclient || zclient->sock < 0)
392 return;
393 if (!zclient_send_get_label_chunk(zclient, 0, LP_CHUNK_SIZE))
394 lp->pending_count += LP_CHUNK_SIZE;
395 }
396 }
397
398 void bgp_lp_release(
399 int type,
400 void *labelid,
401 mpls_label_t label)
402 {
403 struct lp_lcb *lcb;
404
405 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
406 if (label == lcb->label && type == lcb->type) {
407 uintptr_t lbl = label;
408
409 /* no longer in use */
410 skiplist_delete(lp->inuse, (void *)lbl, NULL);
411
412 /* no longer requested */
413 skiplist_delete(lp->ledger, labelid, NULL);
414 }
415 }
416 }
417
418 /*
419 * zebra response giving us a chunk of labels
420 */
421 void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
422 {
423 struct lp_chunk *chunk;
424 int debug = BGP_DEBUG(labelpool, LABELPOOL);
425 struct lp_fifo *lf;
426
427 if (last < first) {
428 flog_err(EC_BGP_LABEL,
429 "%s: zebra label chunk invalid: first=%u, last=%u",
430 __func__, first, last);
431 return;
432 }
433
434 chunk = XCALLOC(MTYPE_BGP_LABEL_CHUNK, sizeof(struct lp_chunk));
435
436 chunk->first = first;
437 chunk->last = last;
438
439 listnode_add(lp->chunks, chunk);
440
441 lp->pending_count -= (last - first + 1);
442
443 if (debug) {
444 zlog_debug("%s: %zu pending requests", __func__,
445 lp_fifo_count(&lp->requests));
446 }
447
448 while ((lf = lp_fifo_first(&lp->requests))) {
449
450 struct lp_lcb *lcb;
451 void *labelid = lf->lcb.labelid;
452
453 if (skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
454 /* request no longer in effect */
455
456 if (debug) {
457 zlog_debug("%s: labelid %p: request no longer in effect",
458 __func__, labelid);
459 }
460 goto finishedrequest;
461 }
462
463 /* have LCB */
464 if (lcb->label != MPLS_LABEL_NONE) {
465 /* request already has a label */
466 if (debug) {
467 zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
468 __func__, labelid,
469 lcb->label, lcb->label, lcb);
470 }
471 /* if this was a BGP_LU request, unlock path info node
472 */
473 check_bgp_lu_cb_unlock(lcb);
474
475 goto finishedrequest;
476 }
477
478 lcb->label = get_label_from_pool(lcb->labelid);
479
480 if (lcb->label == MPLS_LABEL_NONE) {
481 /*
482 * Out of labels in local pool, await next chunk
483 */
484 if (debug) {
485 zlog_debug("%s: out of labels, await more",
486 __func__);
487 }
488 break;
489 }
490
491 /*
492 * we filled the request from local pool.
493 * Enqueue response work item with new label.
494 */
495 struct lp_cbq_item *q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
496 sizeof(struct lp_cbq_item));
497
498 q->cbfunc = lcb->cbfunc;
499 q->type = lcb->type;
500 q->label = lcb->label;
501 q->labelid = lcb->labelid;
502 q->allocated = true;
503
504 if (debug)
505 zlog_debug("%s: assigning label %u to labelid %p",
506 __func__, q->label, q->labelid);
507
508 work_queue_add(lp->callback_q, q);
509
510 finishedrequest:
511 lp_fifo_del(&lp->requests, lf);
512 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
513 }
514 }
515
516 /*
517 * continue using allocated labels until zebra returns
518 */
519 void bgp_lp_event_zebra_down(void)
520 {
521 /* rats. */
522 }
523
524 /*
525 * Inform owners of previously-allocated labels that their labels
526 * are not valid. Request chunk from zebra large enough to satisfy
527 * previously-allocated labels plus any outstanding requests.
528 */
529 void bgp_lp_event_zebra_up(void)
530 {
531 int labels_needed;
532 int chunks_needed;
533 void *labelid;
534 struct lp_lcb *lcb;
535 int lm_init_ok;
536
537 /*
538 * Get label chunk allocation request dispatched to zebra
539 */
540 labels_needed = lp_fifo_count(&lp->requests) +
541 skiplist_count(lp->inuse);
542
543 /* round up */
544 chunks_needed = (labels_needed / LP_CHUNK_SIZE) + 1;
545 labels_needed = chunks_needed * LP_CHUNK_SIZE;
546
547 lm_init_ok = lm_label_manager_connect(zclient, 1) == 0;
548
549 if (!lm_init_ok) {
550 zlog_err("%s: label manager connection error", __func__);
551 return;
552 }
553
554 zclient_send_get_label_chunk(zclient, 0, labels_needed);
555 lp->pending_count = labels_needed;
556
557 /*
558 * Invalidate current list of chunks
559 */
560 list_delete_all_node(lp->chunks);
561
562 /*
563 * Invalidate any existing labels and requeue them as requests
564 */
565 while (!skiplist_first(lp->inuse, NULL, &labelid)) {
566
567 /*
568 * Get LCB
569 */
570 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
571
572 if (lcb->label != MPLS_LABEL_NONE) {
573 /*
574 * invalidate
575 */
576 struct lp_cbq_item *q;
577
578 q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
579 sizeof(struct lp_cbq_item));
580 q->cbfunc = lcb->cbfunc;
581 q->type = lcb->type;
582 q->label = lcb->label;
583 q->labelid = lcb->labelid;
584 q->allocated = false;
585 check_bgp_lu_cb_lock(lcb);
586 work_queue_add(lp->callback_q, q);
587
588 lcb->label = MPLS_LABEL_NONE;
589 }
590
591 /*
592 * request queue
593 */
594 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
595 sizeof(struct lp_fifo));
596
597 lf->lcb = *lcb;
598 check_bgp_lu_cb_lock(lcb);
599 lp_fifo_add_tail(&lp->requests, lf);
600 }
601
602 skiplist_delete_first(lp->inuse);
603 }
604 }