]>
git.proxmox.com Git - mirror_frr.git/blob - bgpd/bgp_labelpool.c
2 * BGP Label Pool - Manage label chunk allocations from zebra asynchronously
4 * Copyright (C) 2018 LabN Consulting, L.L.C.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 #include "workqueue.h"
34 #include "bgpd/bgpd.h"
35 #include "bgpd/bgp_labelpool.h"
36 #include "bgpd/bgp_debug.h"
37 #include "bgpd/bgp_errors.h"
38 #include "bgpd/bgp_route.h"
41 * Definitions and external declarations.
43 extern struct zclient
*zclient
;
46 * Remember where pool data are kept
48 static struct labelpool
*lp
;
50 /* request this many labels at a time from zebra */
51 #define LP_CHUNK_SIZE 50
53 DEFINE_MTYPE_STATIC(BGPD
, BGP_LABEL_CHUNK
, "BGP Label Chunk")
54 DEFINE_MTYPE_STATIC(BGPD
, BGP_LABEL_FIFO
, "BGP Label FIFO item")
55 DEFINE_MTYPE_STATIC(BGPD
, BGP_LABEL_CB
, "BGP Dynamic Label Assignment")
56 DEFINE_MTYPE_STATIC(BGPD
, BGP_LABEL_CBQ
, "BGP Dynamic Label Callback")
67 mpls_label_t label
; /* MPLS_LABEL_NONE = not allocated */
69 void *labelid
; /* unique ID */
71 * callback for label allocation and loss
73 * allocated: false = lost
75 int (*cbfunc
)(mpls_label_t label
, void *lblid
, bool alloc
);
79 struct lp_fifo_item fifo
;
83 DECLARE_LIST(lp_fifo
, struct lp_fifo
, fifo
)
86 int (*cbfunc
)(mpls_label_t label
, void *lblid
, bool alloc
);
90 bool allocated
; /* false = lost */
93 static wq_item_status
lp_cbq_docallback(struct work_queue
*wq
, void *data
)
95 struct lp_cbq_item
*lcbq
= data
;
97 int debug
= BGP_DEBUG(labelpool
, LABELPOOL
);
100 zlog_debug("%s: calling callback with labelid=%p label=%u allocated=%d",
101 __func__
, lcbq
->labelid
, lcbq
->label
, lcbq
->allocated
);
103 if (lcbq
->label
== MPLS_LABEL_NONE
) {
104 /* shouldn't happen */
105 flog_err(EC_BGP_LABEL
, "%s: error: label==MPLS_LABEL_NONE",
110 rc
= (*(lcbq
->cbfunc
))(lcbq
->label
, lcbq
->labelid
, lcbq
->allocated
);
112 if (lcbq
->allocated
&& rc
) {
114 * Callback rejected allocation. This situation could arise
115 * if there was a label request followed by the requestor
116 * deciding it didn't need the assignment (e.g., config
117 * change) while the reply to the original request (with
118 * label) was in the work queue.
121 zlog_debug("%s: callback rejected allocation, releasing labelid=%p label=%u",
122 __func__
, lcbq
->labelid
, lcbq
->label
);
124 uintptr_t lbl
= lcbq
->label
;
129 * If the rejected label was marked inuse by this labelid,
130 * release the label back to the pool.
132 * Further, if the rejected label was still assigned to
133 * this labelid in the LCB, delete the LCB.
135 if (!skiplist_search(lp
->inuse
, (void *)lbl
, &labelid
)) {
136 if (labelid
== lcbq
->labelid
) {
137 if (!skiplist_search(lp
->ledger
, labelid
,
139 if (lcbq
->label
== lcb
->label
)
140 skiplist_delete(lp
->ledger
,
143 skiplist_delete(lp
->inuse
, (void *)lbl
, NULL
);
151 static void lp_cbq_item_free(struct work_queue
*wq
, void *data
)
153 XFREE(MTYPE_BGP_LABEL_CBQ
, data
);
156 static void lp_lcb_free(void *goner
)
158 XFREE(MTYPE_BGP_LABEL_CB
, goner
);
161 static void lp_chunk_free(void *goner
)
163 XFREE(MTYPE_BGP_LABEL_CHUNK
, goner
);
166 void bgp_lp_init(struct thread_master
*master
, struct labelpool
*pool
)
168 if (BGP_DEBUG(labelpool
, LABELPOOL
))
169 zlog_debug("%s: entry", __func__
);
171 lp
= pool
; /* Set module pointer to pool data */
173 lp
->ledger
= skiplist_new(0, NULL
, lp_lcb_free
);
174 lp
->inuse
= skiplist_new(0, NULL
, NULL
);
175 lp
->chunks
= list_new();
176 lp
->chunks
->del
= lp_chunk_free
;
177 lp_fifo_init(&lp
->requests
);
178 lp
->callback_q
= work_queue_new(master
, "label callbacks");
180 lp
->callback_q
->spec
.workfunc
= lp_cbq_docallback
;
181 lp
->callback_q
->spec
.del_item_data
= lp_cbq_item_free
;
182 lp
->callback_q
->spec
.max_retries
= 0;
185 /* check if a label callback was for a BGP LU path, and if so, unlock it */
186 static void check_bgp_lu_cb_unlock(struct lp_lcb
*lcb
)
188 if (lcb
->type
== LP_TYPE_BGP_LU
)
189 bgp_path_info_unlock(lcb
->labelid
);
192 /* check if a label callback was for a BGP LU path, and if so, lock it */
193 static void check_bgp_lu_cb_lock(struct lp_lcb
*lcb
)
195 if (lcb
->type
== LP_TYPE_BGP_LU
)
196 bgp_path_info_lock(lcb
->labelid
);
199 void bgp_lp_finish(void)
202 struct work_queue_item
*item
, *titem
;
207 skiplist_free(lp
->ledger
);
210 skiplist_free(lp
->inuse
);
213 list_delete(&lp
->chunks
);
215 while ((lf
= lp_fifo_pop(&lp
->requests
))) {
216 check_bgp_lu_cb_unlock(&lf
->lcb
);
217 XFREE(MTYPE_BGP_LABEL_FIFO
, lf
);
219 lp_fifo_fini(&lp
->requests
);
221 /* we must unlock path infos for LU callbacks; but we cannot do that
222 * in the deletion callback of the workqueue, as that is also called
223 * to remove an element from the queue after it has been run, resulting
224 * in a double unlock. Hence we need to iterate over our queues and
225 * lists and manually perform the unlocking (ugh)
227 STAILQ_FOREACH_SAFE (item
, &lp
->callback_q
->items
, wq
, titem
)
228 check_bgp_lu_cb_unlock(item
->data
);
230 work_queue_free_and_null(&lp
->callback_q
);
235 static mpls_label_t
get_label_from_pool(void *labelid
)
237 struct listnode
*node
;
238 struct lp_chunk
*chunk
;
239 int debug
= BGP_DEBUG(labelpool
, LABELPOOL
);
243 * Linear search is not efficient but should be executed infrequently.
245 for (ALL_LIST_ELEMENTS_RO(lp
->chunks
, node
, chunk
)) {
249 zlog_debug("%s: chunk first=%u last=%u",
250 __func__
, chunk
->first
, chunk
->last
);
252 for (lbl
= chunk
->first
; lbl
<= chunk
->last
; ++lbl
) {
253 /* labelid is key to all-request "ledger" list */
254 if (!skiplist_insert(lp
->inuse
, (void *)lbl
, labelid
)) {
262 return MPLS_LABEL_NONE
;
266 * Success indicated by value of "label" field in returned LCB
268 static struct lp_lcb
*lcb_alloc(
271 int (*cbfunc
)(mpls_label_t label
, void *labelid
, bool allocated
))
274 * Set up label control block
276 struct lp_lcb
*new = XCALLOC(MTYPE_BGP_LABEL_CB
,
277 sizeof(struct lp_lcb
));
279 new->label
= get_label_from_pool(labelid
);
281 new->labelid
= labelid
;
282 new->cbfunc
= cbfunc
;
288 * Callers who need labels must supply a type, labelid, and callback.
289 * The type is a value defined in bgp_labelpool.h (add types as needed).
290 * The callback is for asynchronous notification of label allocation.
291 * The labelid is passed as an argument to the callback. It should be unique
292 * to the requested label instance.
294 * If zebra is not connected, callbacks with labels will be delayed
295 * until connection is established. If zebra connection is lost after
296 * labels have been assigned, existing assignments via this labelpool
297 * module will continue until reconnection.
299 * When connection to zebra is reestablished, previous label assignments
300 * will be invalidated (via callbacks having the "allocated" parameter unset)
301 * and new labels will be automatically reassigned by this labelpool module
302 * (that is, a requestor does not need to call lp_get() again if it is
303 * notified via callback that its label has been lost: it will eventually
304 * get another callback with a new label assignment).
306 * Prior requests for a given labelid are detected so that requests and
307 * assignments are not duplicated.
312 int (*cbfunc
)(mpls_label_t label
, void *labelid
, bool allocated
))
316 int debug
= BGP_DEBUG(labelpool
, LABELPOOL
);
319 zlog_debug("%s: labelid=%p", __func__
, labelid
);
322 * Have we seen this request before?
324 if (!skiplist_search(lp
->ledger
, labelid
, (void **)&lcb
)) {
327 lcb
= lcb_alloc(type
, labelid
, cbfunc
);
329 zlog_debug("%s: inserting lcb=%p label=%u",
330 __func__
, lcb
, lcb
->label
);
331 int rc
= skiplist_insert(lp
->ledger
, labelid
, lcb
);
334 /* shouldn't happen */
335 flog_err(EC_BGP_LABEL
,
336 "%s: can't insert new LCB into ledger list",
338 XFREE(MTYPE_BGP_LABEL_CB
, lcb
);
343 if (lcb
->label
!= MPLS_LABEL_NONE
) {
345 * Fast path: we filled the request from local pool (or
346 * this is a duplicate request that we filled already).
347 * Enqueue response work item with new label.
349 struct lp_cbq_item
*q
;
351 q
= XCALLOC(MTYPE_BGP_LABEL_CBQ
, sizeof(struct lp_cbq_item
));
353 q
->cbfunc
= lcb
->cbfunc
;
355 q
->label
= lcb
->label
;
356 q
->labelid
= lcb
->labelid
;
359 /* if this is a LU request, lock path info before queueing */
360 check_bgp_lu_cb_lock(lcb
);
362 work_queue_add(lp
->callback_q
, q
);
371 zlog_debug("%s: slow path. lcb=%p label=%u",
372 __func__
, lcb
, lcb
->label
);
375 * Slow path: we are out of labels in the local pool,
376 * so remember the request and also get another chunk from
379 * We track number of outstanding label requests: don't
380 * need to get a chunk for each one.
383 struct lp_fifo
*lf
= XCALLOC(MTYPE_BGP_LABEL_FIFO
,
384 sizeof(struct lp_fifo
));
387 /* if this is a LU request, lock path info before queueing */
388 check_bgp_lu_cb_lock(lcb
);
390 lp_fifo_add_tail(&lp
->requests
, lf
);
392 if (lp_fifo_count(&lp
->requests
) > lp
->pending_count
) {
393 if (!zclient
|| zclient
->sock
< 0)
395 if (!zclient_send_get_label_chunk(zclient
, 0, LP_CHUNK_SIZE
,
396 MPLS_LABEL_BASE_ANY
))
397 lp
->pending_count
+= LP_CHUNK_SIZE
;
408 if (!skiplist_search(lp
->ledger
, labelid
, (void **)&lcb
)) {
409 if (label
== lcb
->label
&& type
== lcb
->type
) {
410 uintptr_t lbl
= label
;
412 /* no longer in use */
413 skiplist_delete(lp
->inuse
, (void *)lbl
, NULL
);
415 /* no longer requested */
416 skiplist_delete(lp
->ledger
, labelid
, NULL
);
422 * zebra response giving us a chunk of labels
424 void bgp_lp_event_chunk(uint8_t keep
, uint32_t first
, uint32_t last
)
426 struct lp_chunk
*chunk
;
427 int debug
= BGP_DEBUG(labelpool
, LABELPOOL
);
431 flog_err(EC_BGP_LABEL
,
432 "%s: zebra label chunk invalid: first=%u, last=%u",
433 __func__
, first
, last
);
437 chunk
= XCALLOC(MTYPE_BGP_LABEL_CHUNK
, sizeof(struct lp_chunk
));
439 chunk
->first
= first
;
442 listnode_add(lp
->chunks
, chunk
);
444 lp
->pending_count
-= (last
- first
+ 1);
447 zlog_debug("%s: %zu pending requests", __func__
,
448 lp_fifo_count(&lp
->requests
));
451 while ((lf
= lp_fifo_first(&lp
->requests
))) {
454 void *labelid
= lf
->lcb
.labelid
;
456 if (skiplist_search(lp
->ledger
, labelid
, (void **)&lcb
)) {
457 /* request no longer in effect */
460 zlog_debug("%s: labelid %p: request no longer in effect",
463 goto finishedrequest
;
467 if (lcb
->label
!= MPLS_LABEL_NONE
) {
468 /* request already has a label */
470 zlog_debug("%s: labelid %p: request already has a label: %u=0x%x, lcb=%p",
472 lcb
->label
, lcb
->label
, lcb
);
474 /* if this was a BGP_LU request, unlock path info node
476 check_bgp_lu_cb_unlock(lcb
);
478 goto finishedrequest
;
481 lcb
->label
= get_label_from_pool(lcb
->labelid
);
483 if (lcb
->label
== MPLS_LABEL_NONE
) {
485 * Out of labels in local pool, await next chunk
488 zlog_debug("%s: out of labels, await more",
495 * we filled the request from local pool.
496 * Enqueue response work item with new label.
498 struct lp_cbq_item
*q
= XCALLOC(MTYPE_BGP_LABEL_CBQ
,
499 sizeof(struct lp_cbq_item
));
501 q
->cbfunc
= lcb
->cbfunc
;
503 q
->label
= lcb
->label
;
504 q
->labelid
= lcb
->labelid
;
508 zlog_debug("%s: assigning label %u to labelid %p",
509 __func__
, q
->label
, q
->labelid
);
511 work_queue_add(lp
->callback_q
, q
);
514 lp_fifo_del(&lp
->requests
, lf
);
515 XFREE(MTYPE_BGP_LABEL_FIFO
, lf
);
520 * continue using allocated labels until zebra returns
522 void bgp_lp_event_zebra_down(void)
528 * Inform owners of previously-allocated labels that their labels
529 * are not valid. Request chunk from zebra large enough to satisfy
530 * previously-allocated labels plus any outstanding requests.
532 void bgp_lp_event_zebra_up(void)
541 * Get label chunk allocation request dispatched to zebra
543 labels_needed
= lp_fifo_count(&lp
->requests
) +
544 skiplist_count(lp
->inuse
);
547 chunks_needed
= (labels_needed
/ LP_CHUNK_SIZE
) + 1;
548 labels_needed
= chunks_needed
* LP_CHUNK_SIZE
;
550 lm_init_ok
= lm_label_manager_connect(zclient
, 1) == 0;
553 zlog_err("%s: label manager connection error", __func__
);
557 zclient_send_get_label_chunk(zclient
, 0, labels_needed
,
558 MPLS_LABEL_BASE_ANY
);
559 lp
->pending_count
= labels_needed
;
562 * Invalidate current list of chunks
564 list_delete_all_node(lp
->chunks
);
567 * Invalidate any existing labels and requeue them as requests
569 while (!skiplist_first(lp
->inuse
, NULL
, &labelid
)) {
574 if (!skiplist_search(lp
->ledger
, labelid
, (void **)&lcb
)) {
576 if (lcb
->label
!= MPLS_LABEL_NONE
) {
580 struct lp_cbq_item
*q
;
582 q
= XCALLOC(MTYPE_BGP_LABEL_CBQ
,
583 sizeof(struct lp_cbq_item
));
584 q
->cbfunc
= lcb
->cbfunc
;
586 q
->label
= lcb
->label
;
587 q
->labelid
= lcb
->labelid
;
588 q
->allocated
= false;
589 check_bgp_lu_cb_lock(lcb
);
590 work_queue_add(lp
->callback_q
, q
);
592 lcb
->label
= MPLS_LABEL_NONE
;
598 struct lp_fifo
*lf
= XCALLOC(MTYPE_BGP_LABEL_FIFO
,
599 sizeof(struct lp_fifo
));
602 check_bgp_lu_cb_lock(lcb
);
603 lp_fifo_add_tail(&lp
->requests
, lf
);
606 skiplist_delete_first(lp
->inuse
);