]> git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_labelpool.c
Merge pull request #2805 from opensourcerouting/malloc-size
[mirror_frr.git] / bgpd / bgp_labelpool.c
1 /*
2 * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
3 *
4 * Copyright (C) 2018 LabN Consulting, L.L.C.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include <zebra.h>
22
23 #include "log.h"
24 #include "memory.h"
25 #include "stream.h"
26 #include "mpls.h"
27 #include "vty.h"
28 #include "fifo.h"
29 #include "linklist.h"
30 #include "skiplist.h"
31 #include "workqueue.h"
32 #include "zclient.h"
33
34 #include "bgpd/bgpd.h"
35 #include "bgpd/bgp_labelpool.h"
36 #include "bgpd/bgp_debug.h"
37
38 /*
39 * Definitions and external declarations.
40 */
41 extern struct zclient *zclient;
42
43 /*
44 * Remember where pool data are kept
45 */
46 static struct labelpool *lp;
47
48 /* request this many labels at a time from zebra */
49 #define LP_CHUNK_SIZE 50
50
51 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CHUNK, "BGP Label Chunk")
52 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_FIFO, "BGP Label FIFO")
53 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CB, "BGP Dynamic Label Assignment")
54 DEFINE_MTYPE_STATIC(BGPD, BGP_LABEL_CBQ, "BGP Dynamic Label Callback")
55
56 #define LABEL_FIFO_ADD(F, N) \
57 do { \
58 FIFO_ADD((F), (N)); \
59 (F)->count++; \
60 } while (0)
61
62 #define LABEL_FIFO_DEL(F, N) \
63 do { \
64 FIFO_DEL((N)); \
65 (F)->count--; \
66 } while (0)
67
68 #define LABEL_FIFO_INIT(F) \
69 do { \
70 FIFO_INIT((F)); \
71 (F)->count = 0; \
72 } while (0)
73
74 #define LABEL_FIFO_COUNT(F) ((F)->count)
75
76 #define LABEL_FIFO_EMPTY(F) FIFO_EMPTY(F)
77
78 #define LABEL_FIFO_HEAD(F) ((F)->next == (F) ? NULL : (F)->next)
79
80 struct lp_chunk {
81 uint32_t first;
82 uint32_t last;
83 };
84
85 /*
86 * label control block
87 */
88 struct lp_lcb {
89 mpls_label_t label; /* MPLS_LABEL_NONE = not allocated */
90 int type;
91 void *labelid; /* unique ID */
92 /*
93 * callback for label allocation and loss
94 *
95 * allocated: false = lost
96 */
97 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
98 };
99
100 /* XXX same first elements as "struct fifo" */
101 struct lp_fifo {
102 struct lp_fifo *next;
103 struct lp_fifo *prev;
104
105 uint32_t count;
106 struct lp_lcb lcb;
107 };
108
109 struct lp_cbq_item {
110 int (*cbfunc)(mpls_label_t label, void *lblid, bool alloc);
111 int type;
112 mpls_label_t label;
113 void *labelid;
114 bool allocated; /* false = lost */
115 };
116
117 static wq_item_status lp_cbq_docallback(struct work_queue *wq, void *data)
118 {
119 struct lp_cbq_item *lcbq = data;
120 int rc;
121 int debug = BGP_DEBUG(labelpool, LABELPOOL);
122
123 if (debug)
124 zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
125 __func__, lcbq->labelid, lcbq->label, lcbq->allocated);
126
127 if (lcbq->label == MPLS_LABEL_NONE) {
128 /* shouldn't happen */
129 zlog_err("%s: error: label==MPLS_LABEL_NONE", __func__);
130 return WQ_SUCCESS;
131 }
132
133 rc = (*(lcbq->cbfunc))(lcbq->label, lcbq->labelid, lcbq->allocated);
134
135 if (lcbq->allocated && rc) {
136 /*
137 * Callback rejected allocation. This situation could arise
138 * if there was a label request followed by the requestor
139 * deciding it didn't need the assignment (e.g., config
140 * change) while the reply to the original request (with
141 * label) was in the work queue.
142 */
143 if (debug)
144 zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
145 __func__, lcbq->labelid, lcbq->label);
146
147 uintptr_t lbl = lcbq->label;
148 void *labelid;
149 struct lp_lcb *lcb;
150
151 /*
152 * If the rejected label was marked inuse by this labelid,
153 * release the label back to the pool.
154 *
155 * Further, if the rejected label was still assigned to
156 * this labelid in the LCB, delete the LCB.
157 */
158 if (!skiplist_search(lp->inuse, (void *)lbl, &labelid)) {
159 if (labelid == lcbq->labelid) {
160 if (!skiplist_search(lp->ledger, labelid,
161 (void **)&lcb)) {
162 if (lcbq->label == lcb->label)
163 skiplist_delete(lp->ledger,
164 labelid, NULL);
165 }
166 skiplist_delete(lp->inuse, (void *)lbl, NULL);
167 }
168 }
169 }
170
171 return WQ_SUCCESS;
172 }
173
174 static void lp_cbq_item_free(struct work_queue *wq, void *data)
175 {
176 XFREE(MTYPE_BGP_LABEL_CBQ, data);
177 }
178
179 static void lp_lcb_free(void *goner)
180 {
181 if (goner)
182 XFREE(MTYPE_BGP_LABEL_CB, goner);
183 }
184
185 static void lp_chunk_free(void *goner)
186 {
187 if (goner)
188 XFREE(MTYPE_BGP_LABEL_CHUNK, goner);
189 }
190
191 void bgp_lp_init(struct thread_master *master, struct labelpool *pool)
192 {
193 if (BGP_DEBUG(labelpool, LABELPOOL))
194 zlog_debug("%s: entry", __func__);
195
196 lp = pool; /* Set module pointer to pool data */
197
198 lp->ledger = skiplist_new(0, NULL, lp_lcb_free);
199 lp->inuse = skiplist_new(0, NULL, NULL);
200 lp->chunks = list_new();
201 lp->chunks->del = lp_chunk_free;
202 lp->requests = XCALLOC(MTYPE_BGP_LABEL_FIFO, sizeof(struct lp_fifo));
203 LABEL_FIFO_INIT(lp->requests);
204 lp->callback_q = work_queue_new(master, "label callbacks");
205
206 lp->callback_q->spec.workfunc = lp_cbq_docallback;
207 lp->callback_q->spec.del_item_data = lp_cbq_item_free;
208 lp->callback_q->spec.max_retries = 0;
209 }
210
211 void bgp_lp_finish(void)
212 {
213 struct lp_fifo *lf;
214
215 if (!lp)
216 return;
217
218 skiplist_free(lp->ledger);
219 lp->ledger = NULL;
220
221 skiplist_free(lp->inuse);
222 lp->inuse = NULL;
223
224 list_delete_and_null(&lp->chunks);
225
226 while ((lf = LABEL_FIFO_HEAD(lp->requests))) {
227
228 LABEL_FIFO_DEL(lp->requests, lf);
229 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
230 }
231 XFREE(MTYPE_BGP_LABEL_FIFO, lp->requests);
232 lp->requests = NULL;
233
234 work_queue_free_and_null(&lp->callback_q);
235
236 lp = NULL;
237 }
238
239 static mpls_label_t get_label_from_pool(void *labelid)
240 {
241 struct listnode *node;
242 struct lp_chunk *chunk;
243 int debug = BGP_DEBUG(labelpool, LABELPOOL);
244
245 /*
246 * Find a free label
247 * Linear search is not efficient but should be executed infrequently.
248 */
249 for (ALL_LIST_ELEMENTS_RO(lp->chunks, node, chunk)) {
250 uintptr_t lbl;
251
252 if (debug)
253 zlog_debug("%s: chunk first=%u last=%u",
254 __func__, chunk->first, chunk->last);
255
256 for (lbl = chunk->first; lbl <= chunk->last; ++lbl) {
257 /* labelid is key to all-request "ledger" list */
258 if (!skiplist_insert(lp->inuse, (void *)lbl, labelid)) {
259 /*
260 * Success
261 */
262 return lbl;
263 }
264 }
265 }
266 return MPLS_LABEL_NONE;
267 }
268
269 /*
270 * Success indicated by value of "label" field in returned LCB
271 */
272 static struct lp_lcb *lcb_alloc(
273 int type,
274 void *labelid,
275 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
276 {
277 /*
278 * Set up label control block
279 */
280 struct lp_lcb *new = XCALLOC(MTYPE_BGP_LABEL_CB,
281 sizeof(struct lp_lcb));
282
283 new->label = get_label_from_pool(labelid);
284 new->type = type;
285 new->labelid = labelid;
286 new->cbfunc = cbfunc;
287
288 return new;
289 }
290
291 /*
292 * Callers who need labels must supply a type, labelid, and callback.
293 * The type is a value defined in bgp_labelpool.h (add types as needed).
294 * The callback is for asynchronous notification of label allocation.
295 * The labelid is passed as an argument to the callback. It should be unique
296 * to the requested label instance.
297 *
298 * If zebra is not connected, callbacks with labels will be delayed
299 * until connection is established. If zebra connection is lost after
300 * labels have been assigned, existing assignments via this labelpool
301 * module will continue until reconnection.
302 *
303 * When connection to zebra is reestablished, previous label assignments
304 * will be invalidated (via callbacks having the "allocated" parameter unset)
305 * and new labels will be automatically reassigned by this labelpool module
306 * (that is, a requestor does not need to call lp_get() again if it is
307 * notified via callback that its label has been lost: it will eventually
308 * get another callback with a new label assignment).
309 *
310 * Prior requests for a given labelid are detected so that requests and
311 * assignments are not duplicated.
312 */
313 void bgp_lp_get(
314 int type,
315 void *labelid,
316 int (*cbfunc)(mpls_label_t label, void *labelid, bool allocated))
317 {
318 struct lp_lcb *lcb;
319 int requested = 0;
320 int debug = BGP_DEBUG(labelpool, LABELPOOL);
321
322 if (debug)
323 zlog_debug("%s: labelid=%p", __func__, labelid);
324
325 /*
326 * Have we seen this request before?
327 */
328 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
329 requested = 1;
330 } else {
331 lcb = lcb_alloc(type, labelid, cbfunc);
332 if (debug)
333 zlog_debug("%s: inserting lcb=%p label=%u",
334 __func__, lcb, lcb->label);
335 int rc = skiplist_insert(lp->ledger, labelid, lcb);
336
337 if (rc) {
338 /* shouldn't happen */
339 zlog_err("%s: can't insert new LCB into ledger list",
340 __func__);
341 XFREE(MTYPE_BGP_LABEL_CB, lcb);
342 return;
343 }
344 }
345
346 if (lcb->label != MPLS_LABEL_NONE) {
347 /*
348 * Fast path: we filled the request from local pool (or
349 * this is a duplicate request that we filled already).
350 * Enqueue response work item with new label.
351 */
352 struct lp_cbq_item *q;
353
354 q = XCALLOC(MTYPE_BGP_LABEL_CBQ, sizeof(struct lp_cbq_item));
355
356 q->cbfunc = lcb->cbfunc;
357 q->type = lcb->type;
358 q->label = lcb->label;
359 q->labelid = lcb->labelid;
360 q->allocated = true;
361
362 work_queue_add(lp->callback_q, q);
363
364 return;
365 }
366
367 if (requested)
368 return;
369
370 if (debug)
371 zlog_debug("%s: slow path. lcb=%p label=%u",
372 __func__, lcb, lcb->label);
373
374 /*
375 * Slow path: we are out of labels in the local pool,
376 * so remember the request and also get another chunk from
377 * the label manager.
378 *
379 * We track number of outstanding label requests: don't
380 * need to get a chunk for each one.
381 */
382
383 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
384 sizeof(struct lp_fifo));
385
386 lf->lcb = *lcb;
387 LABEL_FIFO_ADD(lp->requests, lf);
388
389 if (LABEL_FIFO_COUNT(lp->requests) > lp->pending_count) {
390 if (!zclient_send_get_label_chunk(zclient, 0, LP_CHUNK_SIZE)) {
391 lp->pending_count += LP_CHUNK_SIZE;
392 return;
393 }
394 }
395 }
396
397 void bgp_lp_release(
398 int type,
399 void *labelid,
400 mpls_label_t label)
401 {
402 struct lp_lcb *lcb;
403
404 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
405 if (label == lcb->label && type == lcb->type) {
406 uintptr_t lbl = label;
407
408 /* no longer in use */
409 skiplist_delete(lp->inuse, (void *)lbl, NULL);
410
411 /* no longer requested */
412 skiplist_delete(lp->ledger, labelid, NULL);
413 }
414 }
415 }
416
417 /*
418 * zebra response giving us a chunk of labels
419 */
420 void bgp_lp_event_chunk(uint8_t keep, uint32_t first, uint32_t last)
421 {
422 struct lp_chunk *chunk;
423 int debug = BGP_DEBUG(labelpool, LABELPOOL);
424 struct lp_fifo *lf;
425
426 if (last < first) {
427 zlog_err("%s: zebra label chunk invalid: first=%u, last=%u",
428 __func__, first, last);
429 return;
430 }
431
432 chunk = XCALLOC(MTYPE_BGP_LABEL_CHUNK, sizeof(struct lp_chunk));
433
434 chunk->first = first;
435 chunk->last = last;
436
437 listnode_add(lp->chunks, chunk);
438
439 lp->pending_count -= (last - first + 1);
440
441 if (debug) {
442 zlog_debug("%s: %u pending requests", __func__,
443 LABEL_FIFO_COUNT(lp->requests));
444 }
445
446 while ((lf = LABEL_FIFO_HEAD(lp->requests))) {
447
448 struct lp_lcb *lcb;
449 void *labelid = lf->lcb.labelid;
450
451 if (skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
452 /* request no longer in effect */
453
454 if (debug) {
455 zlog_debug("%s: labelid %p: request no longer in effect",
456 __func__, labelid);
457 }
458 goto finishedrequest;
459 }
460
461 /* have LCB */
462 if (lcb->label != MPLS_LABEL_NONE) {
463 /* request already has a label */
464 if (debug) {
465 zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
466 __func__, labelid,
467 lcb->label, lcb->label, lcb);
468 }
469 goto finishedrequest;
470 }
471
472 lcb->label = get_label_from_pool(lcb->labelid);
473
474 if (lcb->label == MPLS_LABEL_NONE) {
475 /*
476 * Out of labels in local pool, await next chunk
477 */
478 if (debug) {
479 zlog_debug("%s: out of labels, await more",
480 __func__);
481 }
482 break;
483 }
484
485 /*
486 * we filled the request from local pool.
487 * Enqueue response work item with new label.
488 */
489 struct lp_cbq_item *q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
490 sizeof(struct lp_cbq_item));
491
492 q->cbfunc = lcb->cbfunc;
493 q->type = lcb->type;
494 q->label = lcb->label;
495 q->labelid = lcb->labelid;
496 q->allocated = true;
497
498 if (debug)
499 zlog_debug("%s: assigning label %u to labelid %p",
500 __func__, q->label, q->labelid);
501
502 work_queue_add(lp->callback_q, q);
503
504 finishedrequest:
505 LABEL_FIFO_DEL(lp->requests, lf);
506 XFREE(MTYPE_BGP_LABEL_FIFO, lf);
507 }
508 }
509
510 /*
511 * continue using allocated labels until zebra returns
512 */
513 void bgp_lp_event_zebra_down(void)
514 {
515 /* rats. */
516 }
517
518 /*
519 * Inform owners of previously-allocated labels that their labels
520 * are not valid. Request chunk from zebra large enough to satisfy
521 * previously-allocated labels plus any outstanding requests.
522 */
523 void bgp_lp_event_zebra_up(void)
524 {
525 int labels_needed;
526 int chunks_needed;
527 void *labelid;
528 struct lp_lcb *lcb;
529
530 /*
531 * Get label chunk allocation request dispatched to zebra
532 */
533 labels_needed = LABEL_FIFO_COUNT(lp->requests) +
534 skiplist_count(lp->inuse);
535
536 /* round up */
537 chunks_needed = (labels_needed / LP_CHUNK_SIZE) + 1;
538 labels_needed = chunks_needed * LP_CHUNK_SIZE;
539
540 zclient_send_get_label_chunk(zclient, 0, labels_needed);
541 lp->pending_count = labels_needed;
542
543 /*
544 * Invalidate current list of chunks
545 */
546 list_delete_all_node(lp->chunks);
547
548 /*
549 * Invalidate any existing labels and requeue them as requests
550 */
551 while (!skiplist_first(lp->inuse, NULL, &labelid)) {
552
553 /*
554 * Get LCB
555 */
556 if (!skiplist_search(lp->ledger, labelid, (void **)&lcb)) {
557
558 if (lcb->label != MPLS_LABEL_NONE) {
559 /*
560 * invalidate
561 */
562 struct lp_cbq_item *q;
563
564 q = XCALLOC(MTYPE_BGP_LABEL_CBQ,
565 sizeof(struct lp_cbq_item));
566 q->cbfunc = lcb->cbfunc;
567 q->type = lcb->type;
568 q->label = lcb->label;
569 q->labelid = lcb->labelid;
570 q->allocated = false;
571 work_queue_add(lp->callback_q, q);
572
573 lcb->label = MPLS_LABEL_NONE;
574 }
575
576 /*
577 * request queue
578 */
579 struct lp_fifo *lf = XCALLOC(MTYPE_BGP_LABEL_FIFO,
580 sizeof(struct lp_fifo));
581
582 lf->lcb = *lcb;
583 LABEL_FIFO_ADD(lp->requests, lf);
584 }
585
586 skiplist_delete_first(lp->inuse);
587 }
588 }