]>
git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_labelpool.c
2 * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
4 * Copyright (C) 2018 LabN Consulting, L.L.C.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 #include "workqueue.h"
33 #include "bgpd/bgpd.h"
34 #include "bgpd/bgp_labelpool.h"
35 #include "bgpd/bgp_debug.h"
36 #include "bgpd/bgp_errors.h"
39 * Definitions and external declarations.
41 extern struct zclient
*zclient
;
44 * Remember where pool data are kept
46 static struct labelpool
*lp
;
48 /* request this many labels at a time from zebra */
49 #define LP_CHUNK_SIZE 50
51 DEFINE_MTYPE_STATIC(BGPD
, BGP_LABEL_CHUNK
, "BGP Label Chunk")
52 DEFINE_MTYPE_STATIC(BGPD
, BGP_LABEL_FIFO
, "BGP Label FIFO item")
53 DEFINE_MTYPE_STATIC(BGPD
, BGP_LABEL_CB
, "BGP Dynamic Label Assignment")
54 DEFINE_MTYPE_STATIC(BGPD
, BGP_LABEL_CBQ
, "BGP Dynamic Label Callback")
65 mpls_label_t label
; /* MPLS_LABEL_NONE = not allocated */
67 void *labelid
; /* unique ID */
69 * callback for label allocation and loss
71 * allocated: false = lost
73 int (*cbfunc
)(mpls_label_t label
, void *lblid
, bool alloc
);
77 struct lp_fifo_item fifo
;
81 DECLARE_LIST(lp_fifo
, struct lp_fifo
, fifo
)
84 int (*cbfunc
)(mpls_label_t label
, void *lblid
, bool alloc
);
88 bool allocated
; /* false = lost */
91 static wq_item_status
lp_cbq_docallback(struct work_queue
*wq
, void *data
)
93 struct lp_cbq_item
*lcbq
= data
;
95 int debug
= BGP_DEBUG(labelpool
, LABELPOOL
);
98 zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
99 __func__
, lcbq
->labelid
, lcbq
->label
, lcbq
->allocated
);
101 if (lcbq
->label
== MPLS_LABEL_NONE
) {
102 /* shouldn't happen */
103 flog_err(EC_BGP_LABEL
, "%s: error: label==MPLS_LABEL_NONE",
108 rc
= (*(lcbq
->cbfunc
))(lcbq
->label
, lcbq
->labelid
, lcbq
->allocated
);
110 if (lcbq
->allocated
&& rc
) {
112 * Callback rejected allocation. This situation could arise
113 * if there was a label request followed by the requestor
114 * deciding it didn't need the assignment (e.g., config
115 * change) while the reply to the original request (with
116 * label) was in the work queue.
119 zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
120 __func__
, lcbq
->labelid
, lcbq
->label
);
122 uintptr_t lbl
= lcbq
->label
;
127 * If the rejected label was marked inuse by this labelid,
128 * release the label back to the pool.
130 * Further, if the rejected label was still assigned to
131 * this labelid in the LCB, delete the LCB.
133 if (!skiplist_search(lp
->inuse
, (void *)lbl
, &labelid
)) {
134 if (labelid
== lcbq
->labelid
) {
135 if (!skiplist_search(lp
->ledger
, labelid
,
137 if (lcbq
->label
== lcb
->label
)
138 skiplist_delete(lp
->ledger
,
141 skiplist_delete(lp
->inuse
, (void *)lbl
, NULL
);
149 static void lp_cbq_item_free(struct work_queue
*wq
, void *data
)
151 XFREE(MTYPE_BGP_LABEL_CBQ
, data
);
154 static void lp_lcb_free(void *goner
)
156 XFREE(MTYPE_BGP_LABEL_CB
, goner
);
159 static void lp_chunk_free(void *goner
)
161 XFREE(MTYPE_BGP_LABEL_CHUNK
, goner
);
164 void bgp_lp_init(struct thread_master
*master
, struct labelpool
*pool
)
166 if (BGP_DEBUG(labelpool
, LABELPOOL
))
167 zlog_debug("%s: entry", __func__
);
169 lp
= pool
; /* Set module pointer to pool data */
171 lp
->ledger
= skiplist_new(0, NULL
, lp_lcb_free
);
172 lp
->inuse
= skiplist_new(0, NULL
, NULL
);
173 lp
->chunks
= list_new();
174 lp
->chunks
->del
= lp_chunk_free
;
175 lp_fifo_init(&lp
->requests
);
176 lp
->callback_q
= work_queue_new(master
, "label callbacks");
178 lp
->callback_q
->spec
.workfunc
= lp_cbq_docallback
;
179 lp
->callback_q
->spec
.del_item_data
= lp_cbq_item_free
;
180 lp
->callback_q
->spec
.max_retries
= 0;
183 /* check if a label callback was for a BGP LU path, and if so, unlock it */
184 static void check_bgp_lu_cb_unlock(struct lp_lcb
*lcb
)
186 if (lcb
->type
== LP_TYPE_BGP_LU
)
187 bgp_path_info_unlock(lcb
->labelid
);
190 /* check if a label callback was for a BGP LU path, and if so, lock it */
191 static void check_bgp_lu_cb_lock(struct lp_lcb
*lcb
)
193 if (lcb
->type
== LP_TYPE_BGP_LU
)
194 bgp_path_info_lock(lcb
->labelid
);
197 void bgp_lp_finish(void)
200 struct work_queue_item
*item
, *titem
;
205 skiplist_free(lp
->ledger
);
208 skiplist_free(lp
->inuse
);
211 list_delete(&lp
->chunks
);
213 while ((lf
= lp_fifo_pop(&lp
->requests
))) {
214 check_bgp_lu_cb_unlock(&lf
->lcb
);
215 XFREE(MTYPE_BGP_LABEL_FIFO
, lf
);
217 lp_fifo_fini(&lp
->requests
);
219 /* we must unlock path infos for LU callbacks; but we cannot do that
220 * in the deletion callback of the workqueue, as that is also called
221 * to remove an element from the queue after it has been run, resulting
222 * in a double unlock. Hence we need to iterate over our queues and
223 * lists and manually perform the unlocking (ugh)
225 STAILQ_FOREACH_SAFE (item
, &lp
->callback_q
->items
, wq
, titem
)
226 check_bgp_lu_cb_unlock(item
->data
);
228 work_queue_free_and_null(&lp
->callback_q
);
233 static mpls_label_t
get_label_from_pool(void *labelid
)
235 struct listnode
*node
;
236 struct lp_chunk
*chunk
;
237 int debug
= BGP_DEBUG(labelpool
, LABELPOOL
);
241 * Linear search is not efficient but should be executed infrequently.
243 for (ALL_LIST_ELEMENTS_RO(lp
->chunks
, node
, chunk
)) {
247 zlog_debug("%s: chunk first=%u last=%u",
248 __func__
, chunk
->first
, chunk
->last
);
250 for (lbl
= chunk
->first
; lbl
<= chunk
->last
; ++lbl
) {
251 /* labelid is key to all-request "ledger" list */
252 if (!skiplist_insert(lp
->inuse
, (void *)lbl
, labelid
)) {
260 return MPLS_LABEL_NONE
;
264 * Success indicated by value of "label" field in returned LCB
266 static struct lp_lcb
*lcb_alloc(
269 int (*cbfunc
)(mpls_label_t label
, void *labelid
, bool allocated
))
272 * Set up label control block
274 struct lp_lcb
*new = XCALLOC(MTYPE_BGP_LABEL_CB
,
275 sizeof(struct lp_lcb
));
277 new->label
= get_label_from_pool(labelid
);
279 new->labelid
= labelid
;
280 new->cbfunc
= cbfunc
;
286 * Callers who need labels must supply a type, labelid, and callback.
287 * The type is a value defined in bgp_labelpool.h (add types as needed).
288 * The callback is for asynchronous notification of label allocation.
289 * The labelid is passed as an argument to the callback. It should be unique
290 * to the requested label instance.
292 * If zebra is not connected, callbacks with labels will be delayed
293 * until connection is established. If zebra connection is lost after
294 * labels have been assigned, existing assignments via this labelpool
295 * module will continue until reconnection.
297 * When connection to zebra is reestablished, previous label assignments
298 * will be invalidated (via callbacks having the "allocated" parameter unset)
299 * and new labels will be automatically reassigned by this labelpool module
300 * (that is, a requestor does not need to call lp_get() again if it is
301 * notified via callback that its label has been lost: it will eventually
302 * get another callback with a new label assignment).
304 * Prior requests for a given labelid are detected so that requests and
305 * assignments are not duplicated.
310 int (*cbfunc
)(mpls_label_t label
, void *labelid
, bool allocated
))
314 int debug
= BGP_DEBUG(labelpool
, LABELPOOL
);
317 zlog_debug("%s: labelid=%p", __func__
, labelid
);
320 * Have we seen this request before?
322 if (!skiplist_search(lp
->ledger
, labelid
, (void **)&lcb
)) {
325 lcb
= lcb_alloc(type
, labelid
, cbfunc
);
327 zlog_debug("%s: inserting lcb=%p label=%u",
328 __func__
, lcb
, lcb
->label
);
329 int rc
= skiplist_insert(lp
->ledger
, labelid
, lcb
);
332 /* shouldn't happen */
333 flog_err(EC_BGP_LABEL
,
334 "%s: can't insert new LCB into ledger list",
336 XFREE(MTYPE_BGP_LABEL_CB
, lcb
);
341 if (lcb
->label
!= MPLS_LABEL_NONE
) {
343 * Fast path: we filled the request from local pool (or
344 * this is a duplicate request that we filled already).
345 * Enqueue response work item with new label.
347 struct lp_cbq_item
*q
;
349 q
= XCALLOC(MTYPE_BGP_LABEL_CBQ
, sizeof(struct lp_cbq_item
));
351 q
->cbfunc
= lcb
->cbfunc
;
353 q
->label
= lcb
->label
;
354 q
->labelid
= lcb
->labelid
;
357 /* if this is a LU request, lock path info before queueing */
358 check_bgp_lu_cb_lock(lcb
);
360 work_queue_add(lp
->callback_q
, q
);
369 zlog_debug("%s: slow path. lcb=%p label=%u",
370 __func__
, lcb
, lcb
->label
);
373 * Slow path: we are out of labels in the local pool,
374 * so remember the request and also get another chunk from
377 * We track number of outstanding label requests: don't
378 * need to get a chunk for each one.
381 struct lp_fifo
*lf
= XCALLOC(MTYPE_BGP_LABEL_FIFO
,
382 sizeof(struct lp_fifo
));
385 /* if this is a LU request, lock path info before queueing */
386 check_bgp_lu_cb_lock(lcb
);
388 lp_fifo_add_tail(&lp
->requests
, lf
);
390 if (lp_fifo_count(&lp
->requests
) > lp
->pending_count
) {
391 if (!zclient
|| zclient
->sock
< 0)
393 if (!zclient_send_get_label_chunk(zclient
, 0, LP_CHUNK_SIZE
))
394 lp
->pending_count
+= LP_CHUNK_SIZE
;
405 if (!skiplist_search(lp
->ledger
, labelid
, (void **)&lcb
)) {
406 if (label
== lcb
->label
&& type
== lcb
->type
) {
407 uintptr_t lbl
= label
;
409 /* no longer in use */
410 skiplist_delete(lp
->inuse
, (void *)lbl
, NULL
);
412 /* no longer requested */
413 skiplist_delete(lp
->ledger
, labelid
, NULL
);
419 * zebra response giving us a chunk of labels
421 void bgp_lp_event_chunk(uint8_t keep
, uint32_t first
, uint32_t last
)
423 struct lp_chunk
*chunk
;
424 int debug
= BGP_DEBUG(labelpool
, LABELPOOL
);
428 flog_err(EC_BGP_LABEL
,
429 "%s: zebra label chunk invalid: first=%u, last=%u",
430 __func__
, first
, last
);
434 chunk
= XCALLOC(MTYPE_BGP_LABEL_CHUNK
, sizeof(struct lp_chunk
));
436 chunk
->first
= first
;
439 listnode_add(lp
->chunks
, chunk
);
441 lp
->pending_count
-= (last
- first
+ 1);
444 zlog_debug("%s: %zu pending requests", __func__
,
445 lp_fifo_count(&lp
->requests
));
448 while ((lf
= lp_fifo_first(&lp
->requests
))) {
451 void *labelid
= lf
->lcb
.labelid
;
453 if (skiplist_search(lp
->ledger
, labelid
, (void **)&lcb
)) {
454 /* request no longer in effect */
457 zlog_debug("%s: labelid %p: request no longer in effect",
460 goto finishedrequest
;
464 if (lcb
->label
!= MPLS_LABEL_NONE
) {
465 /* request already has a label */
467 zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
469 lcb
->label
, lcb
->label
, lcb
);
471 /* if this was a BGP_LU request, unlock path info node
473 check_bgp_lu_cb_unlock(lcb
);
475 goto finishedrequest
;
478 lcb
->label
= get_label_from_pool(lcb
->labelid
);
480 if (lcb
->label
== MPLS_LABEL_NONE
) {
482 * Out of labels in local pool, await next chunk
485 zlog_debug("%s: out of labels, await more",
492 * we filled the request from local pool.
493 * Enqueue response work item with new label.
495 struct lp_cbq_item
*q
= XCALLOC(MTYPE_BGP_LABEL_CBQ
,
496 sizeof(struct lp_cbq_item
));
498 q
->cbfunc
= lcb
->cbfunc
;
500 q
->label
= lcb
->label
;
501 q
->labelid
= lcb
->labelid
;
505 zlog_debug("%s: assigning label %u to labelid %p",
506 __func__
, q
->label
, q
->labelid
);
508 work_queue_add(lp
->callback_q
, q
);
511 lp_fifo_del(&lp
->requests
, lf
);
512 XFREE(MTYPE_BGP_LABEL_FIFO
, lf
);
517 * continue using allocated labels until zebra returns
519 void bgp_lp_event_zebra_down(void)
525 * Inform owners of previously-allocated labels that their labels
526 * are not valid. Request chunk from zebra large enough to satisfy
527 * previously-allocated labels plus any outstanding requests.
529 void bgp_lp_event_zebra_up(void)
538 * Get label chunk allocation request dispatched to zebra
540 labels_needed
= lp_fifo_count(&lp
->requests
) +
541 skiplist_count(lp
->inuse
);
544 chunks_needed
= (labels_needed
/ LP_CHUNK_SIZE
) + 1;
545 labels_needed
= chunks_needed
* LP_CHUNK_SIZE
;
547 lm_init_ok
= lm_label_manager_connect(zclient
, 1) == 0;
550 zlog_err("%s: label manager connection error", __func__
);
554 zclient_send_get_label_chunk(zclient
, 0, labels_needed
);
555 lp
->pending_count
= labels_needed
;
558 * Invalidate current list of chunks
560 list_delete_all_node(lp
->chunks
);
563 * Invalidate any existing labels and requeue them as requests
565 while (!skiplist_first(lp
->inuse
, NULL
, &labelid
)) {
570 if (!skiplist_search(lp
->ledger
, labelid
, (void **)&lcb
)) {
572 if (lcb
->label
!= MPLS_LABEL_NONE
) {
576 struct lp_cbq_item
*q
;
578 q
= XCALLOC(MTYPE_BGP_LABEL_CBQ
,
579 sizeof(struct lp_cbq_item
));
580 q
->cbfunc
= lcb
->cbfunc
;
582 q
->label
= lcb
->label
;
583 q
->labelid
= lcb
->labelid
;
584 q
->allocated
= false;
585 check_bgp_lu_cb_lock(lcb
);
586 work_queue_add(lp
->callback_q
, q
);
588 lcb
->label
= MPLS_LABEL_NONE
;
594 struct lp_fifo
*lf
= XCALLOC(MTYPE_BGP_LABEL_FIFO
,
595 sizeof(struct lp_fifo
));
598 check_bgp_lu_cb_lock(lcb
);
599 lp_fifo_add_tail(&lp
->requests
, lf
);
602 skiplist_delete_first(lp
->inuse
);