1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2020 NetDEF, Inc.
6 /* TODOS AND KNOWN ISSUES:
7 - Delete mapping from NB keys to PLSPID when an LSP is deleted either
9 - Revert the hacks to work around ODL requiring a report with
10 operational status DOWN when an LSP is activated.
11 - Enforce only the PCE a policy has been delegated to can update it.
12 - If the router-id is used because the PCC IP is not specified
13 (either IPv4 or IPv6), the connection to the PCE is not reset
14 when the router-id changes.
23 #include "northbound.h"
24 #include "frr_pthread.h"
27 #include "pathd/pathd.h"
28 #include "pathd/path_zebra.h"
29 #include "pathd/path_errors.h"
30 #include "pathd/path_pcep.h"
31 #include "pathd/path_pcep_controller.h"
32 #include "pathd/path_pcep_lib.h"
33 #include "pathd/path_pcep_config.h"
34 #include "pathd/path_pcep_debug.h"
37 /* The number of time we will skip connecting if we are missing the PCC
38 * address for an inet family different from the selected transport one*/
39 #define OTHER_FAMILY_MAX_RETRIES 4
40 #define MAX_ERROR_MSG_SIZE 256
41 #define MAX_COMPREQ_TRIES 3
43 pthread_mutex_t g_pcc_info_mtx
= PTHREAD_MUTEX_INITIALIZER
;
45 /* PCEP Event Handler */
46 static void handle_pcep_open(struct ctrl_state
*ctrl_state
,
47 struct pcc_state
*pcc_state
,
48 struct pcep_message
*msg
);
49 static void handle_pcep_message(struct ctrl_state
*ctrl_state
,
50 struct pcc_state
*pcc_state
,
51 struct pcep_message
*msg
);
52 static void handle_pcep_lsp_initiate(struct ctrl_state
*ctrl_state
,
53 struct pcc_state
*pcc_state
,
54 struct pcep_message
*msg
);
55 static void handle_pcep_lsp_update(struct ctrl_state
*ctrl_state
,
56 struct pcc_state
*pcc_state
,
57 struct pcep_message
*msg
);
58 static void continue_pcep_lsp_update(struct ctrl_state
*ctrl_state
,
59 struct pcc_state
*pcc_state
,
60 struct path
*path
, void *payload
);
61 static void handle_pcep_comp_reply(struct ctrl_state
*ctrl_state
,
62 struct pcc_state
*pcc_state
,
63 struct pcep_message
*msg
);
65 /* Internal Functions */
66 static const char *ipaddr_type_name(struct ipaddr
*addr
);
67 static bool filter_path(struct pcc_state
*pcc_state
, struct path
*path
);
68 static void select_pcc_addresses(struct pcc_state
*pcc_state
);
69 static void select_transport_address(struct pcc_state
*pcc_state
);
70 static void update_tag(struct pcc_state
*pcc_state
);
71 static void update_originator(struct pcc_state
*pcc_state
);
72 static void schedule_reconnect(struct ctrl_state
*ctrl_state
,
73 struct pcc_state
*pcc_state
);
74 static void schedule_session_timeout(struct ctrl_state
*ctrl_state
,
75 struct pcc_state
*pcc_state
);
76 static void cancel_session_timeout(struct ctrl_state
*ctrl_state
,
77 struct pcc_state
*pcc_state
);
78 static void send_pcep_message(struct pcc_state
*pcc_state
,
79 struct pcep_message
*msg
);
80 static void send_pcep_error(struct pcc_state
*pcc_state
,
81 enum pcep_error_type error_type
,
82 enum pcep_error_value error_value
,
83 struct path
*trigger_path
);
84 static void send_report(struct pcc_state
*pcc_state
, struct path
*path
);
85 static void send_comp_request(struct ctrl_state
*ctrl_state
,
86 struct pcc_state
*pcc_state
,
87 struct req_entry
*req
);
88 static void cancel_comp_requests(struct ctrl_state
*ctrl_state
,
89 struct pcc_state
*pcc_state
);
90 static void cancel_comp_request(struct ctrl_state
*ctrl_state
,
91 struct pcc_state
*pcc_state
,
92 struct req_entry
*req
);
93 static void specialize_outgoing_path(struct pcc_state
*pcc_state
,
95 static void specialize_incoming_path(struct pcc_state
*pcc_state
,
97 static bool validate_incoming_path(struct pcc_state
*pcc_state
,
98 struct path
*path
, char *errbuff
,
100 static void set_pcc_address(struct pcc_state
*pcc_state
,
101 struct lsp_nb_key
*nbkey
, struct ipaddr
*addr
);
102 static int compare_pcc_opts(struct pcc_opts
*lhs
, struct pcc_opts
*rhs
);
103 static int compare_pce_opts(struct pce_opts
*lhs
, struct pce_opts
*rhs
);
104 static int get_previous_best_pce(struct pcc_state
**pcc
);
105 static int get_best_pce(struct pcc_state
**pcc
);
106 static int get_pce_count_connected(struct pcc_state
**pcc
);
107 static bool update_best_pce(struct pcc_state
**pcc
, int best
);
109 /* Data Structure Helper Functions */
110 static void lookup_plspid(struct pcc_state
*pcc_state
, struct path
*path
);
111 static void lookup_nbkey(struct pcc_state
*pcc_state
, struct path
*path
);
112 static void free_req_entry(struct req_entry
*req
);
113 static struct req_entry
*push_new_req(struct pcc_state
*pcc_state
,
115 static void repush_req(struct pcc_state
*pcc_state
, struct req_entry
*req
);
116 static struct req_entry
*pop_req(struct pcc_state
*pcc_state
, uint32_t reqid
);
117 static struct req_entry
*pop_req_no_reqid(struct pcc_state
*pcc_state
,
119 static bool add_reqid_mapping(struct pcc_state
*pcc_state
, struct path
*path
);
120 static void remove_reqid_mapping(struct pcc_state
*pcc_state
,
122 static uint32_t lookup_reqid(struct pcc_state
*pcc_state
, struct path
*path
);
123 static bool has_pending_req_for(struct pcc_state
*pcc_state
, struct path
*path
);
125 /* Data Structure Callbacks */
126 static int plspid_map_cmp(const struct plspid_map_data
*a
,
127 const struct plspid_map_data
*b
);
128 static uint32_t plspid_map_hash(const struct plspid_map_data
*e
);
129 static int nbkey_map_cmp(const struct nbkey_map_data
*a
,
130 const struct nbkey_map_data
*b
);
131 static uint32_t nbkey_map_hash(const struct nbkey_map_data
*e
);
132 static int req_map_cmp(const struct req_map_data
*a
,
133 const struct req_map_data
*b
);
134 static uint32_t req_map_hash(const struct req_map_data
*e
);
136 /* Data Structure Declarations */
137 DECLARE_HASH(plspid_map
, struct plspid_map_data
, mi
, plspid_map_cmp
,
139 DECLARE_HASH(nbkey_map
, struct nbkey_map_data
, mi
, nbkey_map_cmp
,
141 DECLARE_HASH(req_map
, struct req_map_data
, mi
, req_map_cmp
, req_map_hash
);
143 static inline int req_entry_compare(const struct req_entry
*a
,
144 const struct req_entry
*b
)
146 return a
->path
->req_id
- b
->path
->req_id
;
148 RB_GENERATE(req_entry_head
, req_entry
, entry
, req_entry_compare
)
151 /* ------------ API Functions ------------ */
153 struct pcc_state
*pcep_pcc_initialize(struct ctrl_state
*ctrl_state
, int index
)
155 struct pcc_state
*pcc_state
= XCALLOC(MTYPE_PCEP
, sizeof(*pcc_state
));
157 pcc_state
->id
= index
;
158 pcc_state
->status
= PCEP_PCC_DISCONNECTED
;
159 pcc_state
->next_reqid
= 1;
160 pcc_state
->next_plspid
= 1;
162 RB_INIT(req_entry_head
, &pcc_state
->requests
);
164 update_tag(pcc_state
);
165 update_originator(pcc_state
);
167 PCEP_DEBUG("%s PCC initialized", pcc_state
->tag
);
172 void pcep_pcc_finalize(struct ctrl_state
*ctrl_state
,
173 struct pcc_state
*pcc_state
)
175 PCEP_DEBUG("%s PCC finalizing...", pcc_state
->tag
);
177 pcep_pcc_disable(ctrl_state
, pcc_state
);
179 if (pcc_state
->pcc_opts
!= NULL
) {
180 XFREE(MTYPE_PCEP
, pcc_state
->pcc_opts
);
181 pcc_state
->pcc_opts
= NULL
;
183 if (pcc_state
->pce_opts
!= NULL
) {
184 XFREE(MTYPE_PCEP
, pcc_state
->pce_opts
);
185 pcc_state
->pce_opts
= NULL
;
187 if (pcc_state
->originator
!= NULL
) {
188 XFREE(MTYPE_PCEP
, pcc_state
->originator
);
189 pcc_state
->originator
= NULL
;
192 if (pcc_state
->t_reconnect
!= NULL
) {
193 event_cancel(&pcc_state
->t_reconnect
);
194 pcc_state
->t_reconnect
= NULL
;
197 if (pcc_state
->t_update_best
!= NULL
) {
198 event_cancel(&pcc_state
->t_update_best
);
199 pcc_state
->t_update_best
= NULL
;
202 if (pcc_state
->t_session_timeout
!= NULL
) {
203 event_cancel(&pcc_state
->t_session_timeout
);
204 pcc_state
->t_session_timeout
= NULL
;
207 XFREE(MTYPE_PCEP
, pcc_state
);
210 int compare_pcc_opts(struct pcc_opts
*lhs
, struct pcc_opts
*rhs
)
222 retval
= lhs
->port
- rhs
->port
;
227 retval
= lhs
->msd
- rhs
->msd
;
232 if (IS_IPADDR_V4(&lhs
->addr
)) {
233 retval
= memcmp(&lhs
->addr
.ipaddr_v4
, &rhs
->addr
.ipaddr_v4
,
234 sizeof(lhs
->addr
.ipaddr_v4
));
238 } else if (IS_IPADDR_V6(&lhs
->addr
)) {
239 retval
= memcmp(&lhs
->addr
.ipaddr_v6
, &rhs
->addr
.ipaddr_v6
,
240 sizeof(lhs
->addr
.ipaddr_v6
));
249 int compare_pce_opts(struct pce_opts
*lhs
, struct pce_opts
*rhs
)
259 int retval
= lhs
->port
- rhs
->port
;
264 retval
= strcmp(lhs
->pce_name
, rhs
->pce_name
);
269 retval
= lhs
->precedence
- rhs
->precedence
;
274 retval
= memcmp(&lhs
->addr
, &rhs
->addr
, sizeof(lhs
->addr
));
282 int pcep_pcc_update(struct ctrl_state
*ctrl_state
, struct pcc_state
*pcc_state
,
283 struct pcc_opts
*pcc_opts
, struct pce_opts
*pce_opts
)
287 // If the options did not change, then there is nothing to do
288 if ((compare_pce_opts(pce_opts
, pcc_state
->pce_opts
) == 0)
289 && (compare_pcc_opts(pcc_opts
, pcc_state
->pcc_opts
) == 0)) {
293 if ((ret
= pcep_pcc_disable(ctrl_state
, pcc_state
))) {
294 XFREE(MTYPE_PCEP
, pcc_opts
);
295 XFREE(MTYPE_PCEP
, pce_opts
);
299 if (pcc_state
->pcc_opts
!= NULL
) {
300 XFREE(MTYPE_PCEP
, pcc_state
->pcc_opts
);
302 if (pcc_state
->pce_opts
!= NULL
) {
303 XFREE(MTYPE_PCEP
, pcc_state
->pce_opts
);
306 pcc_state
->pcc_opts
= pcc_opts
;
307 pcc_state
->pce_opts
= pce_opts
;
309 if (IS_IPADDR_V4(&pcc_opts
->addr
)) {
310 pcc_state
->pcc_addr_v4
= pcc_opts
->addr
.ipaddr_v4
;
311 SET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
);
313 UNSET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
);
316 if (IS_IPADDR_V6(&pcc_opts
->addr
)) {
317 memcpy(&pcc_state
->pcc_addr_v6
, &pcc_opts
->addr
.ipaddr_v6
,
318 sizeof(struct in6_addr
));
319 SET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
);
321 UNSET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
);
324 update_tag(pcc_state
);
325 update_originator(pcc_state
);
327 return pcep_pcc_enable(ctrl_state
, pcc_state
);
330 void pcep_pcc_reconnect(struct ctrl_state
*ctrl_state
,
331 struct pcc_state
*pcc_state
)
333 if (pcc_state
->status
== PCEP_PCC_DISCONNECTED
)
334 pcep_pcc_enable(ctrl_state
, pcc_state
);
337 int pcep_pcc_enable(struct ctrl_state
*ctrl_state
, struct pcc_state
*pcc_state
)
339 assert(pcc_state
->status
== PCEP_PCC_DISCONNECTED
);
340 assert(pcc_state
->sess
== NULL
);
342 if (pcc_state
->t_reconnect
!= NULL
) {
343 event_cancel(&pcc_state
->t_reconnect
);
344 pcc_state
->t_reconnect
= NULL
;
347 select_transport_address(pcc_state
);
349 /* Even though we are connecting using IPv6. we want to have an IPv4
350 * address so we can handle candidate path with IPv4 endpoints */
351 if (!CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
)) {
352 if (pcc_state
->retry_count
< OTHER_FAMILY_MAX_RETRIES
) {
353 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
354 "skipping connection to PCE %pIA:%d due to missing PCC IPv4 address",
355 &pcc_state
->pce_opts
->addr
,
356 pcc_state
->pce_opts
->port
);
357 schedule_reconnect(ctrl_state
, pcc_state
);
360 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
361 "missing IPv4 PCC address, IPv4 candidate paths will be ignored");
365 /* Even though we are connecting using IPv4. we want to have an IPv6
366 * address so we can handle candidate path with IPv6 endpoints */
367 if (!CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
)) {
368 if (pcc_state
->retry_count
< OTHER_FAMILY_MAX_RETRIES
) {
369 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
370 "skipping connection to PCE %pIA:%d due to missing PCC IPv6 address",
371 &pcc_state
->pce_opts
->addr
,
372 pcc_state
->pce_opts
->port
);
373 schedule_reconnect(ctrl_state
, pcc_state
);
376 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
377 "missing IPv6 PCC address, IPv6 candidate paths will be ignored");
381 /* Even if the maximum retries to try to have all the familly addresses
382 * have been spent, we still need the one for the transport familly */
383 if (pcc_state
->pcc_addr_tr
.ipa_type
== IPADDR_NONE
) {
384 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
385 "skipping connection to PCE %pIA:%d due to missing PCC address",
386 &pcc_state
->pce_opts
->addr
,
387 pcc_state
->pce_opts
->port
);
388 schedule_reconnect(ctrl_state
, pcc_state
);
392 PCEP_DEBUG("%s PCC connecting", pcc_state
->tag
);
393 pcc_state
->sess
= pcep_lib_connect(
394 &pcc_state
->pcc_addr_tr
, pcc_state
->pcc_opts
->port
,
395 &pcc_state
->pce_opts
->addr
, pcc_state
->pce_opts
->port
,
396 pcc_state
->pcc_opts
->msd
, &pcc_state
->pce_opts
->config_opts
);
398 if (pcc_state
->sess
== NULL
) {
399 flog_warn(EC_PATH_PCEP_LIB_CONNECT
,
400 "failed to connect to PCE %pIA:%d from %pIA:%d",
401 &pcc_state
->pce_opts
->addr
,
402 pcc_state
->pce_opts
->port
,
403 &pcc_state
->pcc_addr_tr
,
404 pcc_state
->pcc_opts
->port
);
405 schedule_reconnect(ctrl_state
, pcc_state
);
409 // In case some best pce alternative were waiting to activate
410 if (pcc_state
->t_update_best
!= NULL
) {
411 event_cancel(&pcc_state
->t_update_best
);
412 pcc_state
->t_update_best
= NULL
;
415 pcc_state
->status
= PCEP_PCC_CONNECTING
;
420 int pcep_pcc_disable(struct ctrl_state
*ctrl_state
, struct pcc_state
*pcc_state
)
422 switch (pcc_state
->status
) {
423 case PCEP_PCC_DISCONNECTED
:
425 case PCEP_PCC_CONNECTING
:
426 case PCEP_PCC_SYNCHRONIZING
:
427 case PCEP_PCC_OPERATING
:
428 PCEP_DEBUG("%s Disconnecting PCC...", pcc_state
->tag
);
429 cancel_comp_requests(ctrl_state
, pcc_state
);
430 pcep_lib_disconnect(pcc_state
->sess
);
431 /* No need to remove if any PCEs is connected */
432 if (get_pce_count_connected(ctrl_state
->pcc
) == 0) {
433 pcep_thread_remove_candidate_path_segments(ctrl_state
,
436 pcc_state
->sess
= NULL
;
437 pcc_state
->status
= PCEP_PCC_DISCONNECTED
;
439 case PCEP_PCC_INITIALIZED
:
443 assert(!"Reached end of function where we are not expecting to");
446 void pcep_pcc_sync_path(struct ctrl_state
*ctrl_state
,
447 struct pcc_state
*pcc_state
, struct path
*path
)
449 if (pcc_state
->status
== PCEP_PCC_SYNCHRONIZING
) {
450 path
->is_synching
= true;
451 } else if (pcc_state
->status
== PCEP_PCC_OPERATING
)
452 path
->is_synching
= false;
456 path
->go_active
= true;
458 /* Accumulate the dynamic paths without any LSP so computation
459 * requests can be performed after synchronization */
460 if ((path
->type
== SRTE_CANDIDATE_TYPE_DYNAMIC
)
461 && (path
->first_hop
== NULL
)
462 && !has_pending_req_for(pcc_state
, path
)) {
463 PCEP_DEBUG("%s Scheduling computation request for path %s",
464 pcc_state
->tag
, path
->name
);
465 push_new_req(pcc_state
, path
);
469 /* Synchronize the path if the PCE supports LSP updates and the
470 * endpoint address familly is supported */
471 if (pcc_state
->caps
.is_stateful
) {
472 if (filter_path(pcc_state
, path
)) {
473 PCEP_DEBUG("%s Synchronizing path %s", pcc_state
->tag
,
475 send_report(pcc_state
, path
);
478 "%s Skipping %s candidate path %s synchronization",
480 ipaddr_type_name(&path
->nbkey
.endpoint
),
486 void pcep_pcc_sync_done(struct ctrl_state
*ctrl_state
,
487 struct pcc_state
*pcc_state
)
489 struct req_entry
*req
;
491 if (pcc_state
->status
!= PCEP_PCC_SYNCHRONIZING
492 && pcc_state
->status
!= PCEP_PCC_OPERATING
)
495 if (pcc_state
->caps
.is_stateful
496 && pcc_state
->status
== PCEP_PCC_SYNCHRONIZING
) {
497 struct path
*path
= pcep_new_path();
498 *path
= (struct path
){.name
= NULL
,
501 .status
= PCEP_LSP_OPERATIONAL_DOWN
,
504 .was_created
= false,
505 .was_removed
= false,
506 .is_synching
= false,
507 .is_delegated
= false,
509 .first_metric
= NULL
};
510 send_report(pcc_state
, path
);
511 pcep_free_path(path
);
514 pcc_state
->synchronized
= true;
515 pcc_state
->status
= PCEP_PCC_OPERATING
;
517 PCEP_DEBUG("%s Synchronization done", pcc_state
->tag
);
519 /* Start the computation request accumulated during synchronization */
520 RB_FOREACH (req
, req_entry_head
, &pcc_state
->requests
) {
521 send_comp_request(ctrl_state
, pcc_state
, req
);
525 void pcep_pcc_send_report(struct ctrl_state
*ctrl_state
,
526 struct pcc_state
*pcc_state
, struct path
*path
,
529 if ((pcc_state
->status
!= PCEP_PCC_OPERATING
)
530 || (!pcc_state
->caps
.is_stateful
)) {
531 pcep_free_path(path
);
535 PCEP_DEBUG("(%s)%s Send report for candidate path %s", __func__
,
536 pcc_state
->tag
, path
->name
);
538 /* ODL and Cisco requires the first reported
539 * LSP to have a DOWN status, the later status changes
540 * will be comunicated through hook calls.
542 enum pcep_lsp_operational_status real_status
= path
->status
;
543 path
->status
= PCEP_LSP_OPERATIONAL_DOWN
;
544 send_report(pcc_state
, path
);
546 /* If no update is expected and the real status wasn't down, we need to
547 * send a second report with the real status */
548 if (is_stable
&& (real_status
!= PCEP_LSP_OPERATIONAL_DOWN
)) {
549 PCEP_DEBUG("(%s)%s Send report for candidate path (!DOWN) %s",
550 __func__
, pcc_state
->tag
, path
->name
);
551 path
->status
= real_status
;
552 send_report(pcc_state
, path
);
555 pcep_free_path(path
);
559 void pcep_pcc_send_error(struct ctrl_state
*ctrl_state
,
560 struct pcc_state
*pcc_state
, struct pcep_error
*error
,
564 PCEP_DEBUG("(%s) Send error after PcInitiated ", __func__
);
567 send_pcep_error(pcc_state
, error
->error_type
, error
->error_value
,
569 pcep_free_path(error
->path
);
570 XFREE(MTYPE_PCEP
, error
);
572 /* ------------ Timeout handler ------------ */
574 void pcep_pcc_timeout_handler(struct ctrl_state
*ctrl_state
,
575 struct pcc_state
*pcc_state
,
576 enum pcep_ctrl_timeout_type type
, void *param
)
578 struct req_entry
*req
;
581 case TO_COMPUTATION_REQUEST
:
582 assert(param
!= NULL
);
583 req
= (struct req_entry
*)param
;
584 pop_req(pcc_state
, req
->path
->req_id
);
585 flog_warn(EC_PATH_PCEP_COMPUTATION_REQUEST_TIMEOUT
,
586 "Computation request %d timeout", req
->path
->req_id
);
587 cancel_comp_request(ctrl_state
, pcc_state
, req
);
588 if (req
->retry_count
++ < MAX_COMPREQ_TRIES
) {
589 repush_req(pcc_state
, req
);
590 send_comp_request(ctrl_state
, pcc_state
, req
);
593 if (pcc_state
->caps
.is_stateful
) {
596 "%s Delegating undefined dynamic path %s to PCE %s",
597 pcc_state
->tag
, req
->path
->name
,
598 pcc_state
->originator
);
599 path
= pcep_copy_path(req
->path
);
600 path
->is_delegated
= true;
601 send_report(pcc_state
, path
);
612 /* ------------ Pathd event handler ------------ */
614 void pcep_pcc_pathd_event_handler(struct ctrl_state
*ctrl_state
,
615 struct pcc_state
*pcc_state
,
616 enum pcep_pathd_event_type type
,
619 struct req_entry
*req
;
621 if (pcc_state
->status
!= PCEP_PCC_OPERATING
)
624 /* Skipping candidate path with endpoint that do not match the
625 * configured or deduced PCC IP version */
626 if (!filter_path(pcc_state
, path
)) {
627 PCEP_DEBUG("%s Skipping %s candidate path %s event",
629 ipaddr_type_name(&path
->nbkey
.endpoint
), path
->name
);
634 case PCEP_PATH_CREATED
:
635 if (has_pending_req_for(pcc_state
, path
)) {
637 "%s Candidate path %s created, computation request already sent",
638 pcc_state
->tag
, path
->name
);
641 PCEP_DEBUG("%s Candidate path %s created", pcc_state
->tag
,
643 if ((path
->first_hop
== NULL
)
644 && (path
->type
== SRTE_CANDIDATE_TYPE_DYNAMIC
)) {
645 req
= push_new_req(pcc_state
, path
);
646 send_comp_request(ctrl_state
, pcc_state
, req
);
647 } else if (pcc_state
->caps
.is_stateful
)
648 send_report(pcc_state
, path
);
650 case PCEP_PATH_UPDATED
:
651 PCEP_DEBUG("%s Candidate path %s updated", pcc_state
->tag
,
653 if (pcc_state
->caps
.is_stateful
)
654 send_report(pcc_state
, path
);
656 case PCEP_PATH_REMOVED
:
657 PCEP_DEBUG("%s Candidate path %s removed", pcc_state
->tag
,
659 path
->was_removed
= true;
660 /* Removed as response to a PcInitiated 'R'emove*/
661 /* RFC 8281 #5.4 LSP Deletion*/
662 path
->do_remove
= path
->was_removed
;
663 if (pcc_state
->caps
.is_stateful
)
664 send_report(pcc_state
, path
);
666 case PCEP_PATH_UNDEFINED
:
667 flog_warn(EC_PATH_PCEP_RECOVERABLE_INTERNAL_ERROR
,
668 "Unexpected pathd event received by pcc %s: %u",
669 pcc_state
->tag
, type
);
675 /* ------------ PCEP event handler ------------ */
677 void pcep_pcc_pcep_event_handler(struct ctrl_state
*ctrl_state
,
678 struct pcc_state
*pcc_state
, pcep_event
*event
)
680 PCEP_DEBUG("%s Received PCEP event: %s", pcc_state
->tag
,
681 pcep_event_type_name(event
->event_type
));
682 switch (event
->event_type
) {
683 case PCC_CONNECTED_TO_PCE
:
684 assert(PCEP_PCC_CONNECTING
== pcc_state
->status
);
685 PCEP_DEBUG("%s Connection established", pcc_state
->tag
);
686 pcc_state
->status
= PCEP_PCC_SYNCHRONIZING
;
687 pcc_state
->retry_count
= 0;
688 pcc_state
->synchronized
= false;
689 PCEP_DEBUG("%s Starting PCE synchronization", pcc_state
->tag
);
690 cancel_session_timeout(ctrl_state
, pcc_state
);
691 pcep_pcc_calculate_best_pce(ctrl_state
->pcc
);
692 pcep_thread_start_sync(ctrl_state
, pcc_state
->id
);
694 case PCC_SENT_INVALID_OPEN
:
695 PCEP_DEBUG("%s Sent invalid OPEN message", pcc_state
->tag
);
697 "%s Reconciling values: keep alive (%d) dead timer (%d) seconds ",
699 pcc_state
->sess
->pcc_config
700 .keep_alive_pce_negotiated_timer_seconds
,
701 pcc_state
->sess
->pcc_config
702 .dead_timer_pce_negotiated_seconds
);
703 pcc_state
->pce_opts
->config_opts
.keep_alive_seconds
=
704 pcc_state
->sess
->pcc_config
705 .keep_alive_pce_negotiated_timer_seconds
;
706 pcc_state
->pce_opts
->config_opts
.dead_timer_seconds
=
707 pcc_state
->sess
->pcc_config
708 .dead_timer_pce_negotiated_seconds
;
711 case PCC_RCVD_INVALID_OPEN
:
712 PCEP_DEBUG("%s Received invalid OPEN message", pcc_state
->tag
);
713 PCEP_DEBUG_PCEP("%s PCEP message: %s", pcc_state
->tag
,
714 format_pcep_message(event
->message
));
716 case PCE_DEAD_TIMER_EXPIRED
:
717 case PCE_CLOSED_SOCKET
:
718 case PCE_SENT_PCEP_CLOSE
:
719 case PCE_OPEN_KEEP_WAIT_TIMER_EXPIRED
:
720 case PCC_PCEP_SESSION_CLOSED
:
721 case PCC_RCVD_MAX_INVALID_MSGS
:
722 case PCC_RCVD_MAX_UNKOWN_MSGS
:
723 pcep_pcc_disable(ctrl_state
, pcc_state
);
724 schedule_reconnect(ctrl_state
, pcc_state
);
725 schedule_session_timeout(ctrl_state
, pcc_state
);
727 case MESSAGE_RECEIVED
:
728 PCEP_DEBUG_PCEP("%s Received PCEP message: %s", pcc_state
->tag
,
729 format_pcep_message(event
->message
));
730 if (pcc_state
->status
== PCEP_PCC_CONNECTING
) {
731 if (event
->message
->msg_header
->type
== PCEP_TYPE_OPEN
)
732 handle_pcep_open(ctrl_state
, pcc_state
,
736 assert(pcc_state
->status
== PCEP_PCC_SYNCHRONIZING
737 || pcc_state
->status
== PCEP_PCC_OPERATING
);
738 handle_pcep_message(ctrl_state
, pcc_state
, event
->message
);
740 case PCC_CONNECTION_FAILURE
:
741 flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEPLIB_EVENT
,
742 "Unexpected event from pceplib: %s",
743 format_pcep_event(event
));
749 /*------------------ Multi-PCE --------------------- */
751 /* Internal util function, returns true if sync is necessary, false otherwise */
752 bool update_best_pce(struct pcc_state
**pcc
, int best
)
754 PCEP_DEBUG(" recalculating pce precedence ");
756 struct pcc_state
*best_pcc_state
=
757 pcep_pcc_get_pcc_by_id(pcc
, best
);
758 if (best_pcc_state
->previous_best
!= best_pcc_state
->is_best
) {
759 PCEP_DEBUG(" %s Resynch best (%i) previous best (%i)",
760 best_pcc_state
->tag
, best_pcc_state
->id
,
761 best_pcc_state
->previous_best
);
765 " %s No Resynch best (%i) previous best (%i)",
766 best_pcc_state
->tag
, best_pcc_state
->id
,
767 best_pcc_state
->previous_best
);
770 PCEP_DEBUG(" No best pce available, all pce seem disconnected");
776 int get_best_pce(struct pcc_state
**pcc
)
778 for (int i
= 0; i
< MAX_PCC
; i
++) {
779 if (pcc
[i
] && pcc
[i
]->pce_opts
) {
780 if (pcc
[i
]->is_best
== true) {
788 int get_pce_count_connected(struct pcc_state
**pcc
)
791 for (int i
= 0; i
< MAX_PCC
; i
++) {
792 if (pcc
[i
] && pcc
[i
]->pce_opts
793 && pcc
[i
]->status
!= PCEP_PCC_DISCONNECTED
) {
800 int get_previous_best_pce(struct pcc_state
**pcc
)
802 int previous_best_pce
= -1;
804 for (int i
= 0; i
< MAX_PCC
; i
++) {
805 if (pcc
[i
] && pcc
[i
]->pce_opts
&& pcc
[i
]->previous_best
== true
806 && pcc
[i
]->status
!= PCEP_PCC_DISCONNECTED
) {
807 previous_best_pce
= i
;
811 return previous_best_pce
!= -1 ? pcc
[previous_best_pce
]->id
: 0;
814 /* Called by path_pcep_controller EV_REMOVE_PCC
815 * Event handler when a PCC is removed. */
816 int pcep_pcc_multi_pce_remove_pcc(struct ctrl_state
*ctrl_state
,
817 struct pcc_state
**pcc
)
819 int new_best_pcc_id
= -1;
820 new_best_pcc_id
= pcep_pcc_calculate_best_pce(pcc
);
821 if (new_best_pcc_id
) {
822 if (update_best_pce(ctrl_state
->pcc
, new_best_pcc_id
) == true) {
823 pcep_thread_start_sync(ctrl_state
, new_best_pcc_id
);
830 /* Called by path_pcep_controller EV_SYNC_PATH
831 * Event handler when a path is sync'd. */
832 int pcep_pcc_multi_pce_sync_path(struct ctrl_state
*ctrl_state
, int pcc_id
,
833 struct pcc_state
**pcc
)
835 int previous_best_pcc_id
= -1;
837 if (pcc_id
== get_best_pce(pcc
)) {
838 previous_best_pcc_id
= get_previous_best_pce(pcc
);
839 if (previous_best_pcc_id
!= 0) {
840 /* while adding new pce, path has to resync to the
841 * previous best. pcep_thread_start_sync() will be
842 * called by the calling function */
843 if (update_best_pce(ctrl_state
->pcc
,
844 previous_best_pcc_id
)
846 cancel_comp_requests(
848 pcep_pcc_get_pcc_by_id(
849 pcc
, previous_best_pcc_id
));
850 pcep_thread_start_sync(ctrl_state
,
851 previous_best_pcc_id
);
859 /* Called by path_pcep_controller when the TM_CALCULATE_BEST_PCE
861 int pcep_pcc_timer_update_best_pce(struct ctrl_state
*ctrl_state
, int pcc_id
)
864 /* resync whatever was the new best */
865 int prev_best
= get_best_pce(ctrl_state
->pcc
);
866 int best_id
= pcep_pcc_calculate_best_pce(ctrl_state
->pcc
);
867 if (best_id
&& prev_best
!= best_id
) { // Avoid Multiple call
868 struct pcc_state
*pcc_state
=
869 pcep_pcc_get_pcc_by_id(ctrl_state
->pcc
, best_id
);
870 if (update_best_pce(ctrl_state
->pcc
, pcc_state
->id
) == true) {
871 pcep_thread_start_sync(ctrl_state
, pcc_state
->id
);
878 /* Called by path_pcep_controller::pcep_thread_event_update_pce_options()
879 * Returns the best PCE id */
880 int pcep_pcc_calculate_best_pce(struct pcc_state
**pcc
)
882 int best_precedence
= 255; // DEFAULT_PCE_PRECEDENCE;
884 int one_connected_pce
= -1;
885 int previous_best_pce
= -1;
886 int step_0_best
= -1;
887 int step_0_previous
= -1;
891 for (int i
= 0; i
< MAX_PCC
; i
++) {
892 if (pcc
[i
] && pcc
[i
]->pce_opts
) {
894 "multi-pce: calculate all : i (%i) is_best (%i) previous_best (%i) ",
895 i
, pcc
[i
]->is_best
, pcc
[i
]->previous_best
);
898 if (pcc
[i
]->is_best
== true) {
901 if (pcc
[i
]->previous_best
== true) {
912 for (int i
= 0; i
< MAX_PCC
; i
++) {
913 if (pcc
[i
] && pcc
[i
]->pce_opts
914 && pcc
[i
]->status
!= PCEP_PCC_DISCONNECTED
) {
915 one_connected_pce
= i
; // In case none better
916 if (pcc
[i
]->pce_opts
->precedence
<= best_precedence
) {
918 && pcc
[best_pce
]->pce_opts
->precedence
922 &pcc
[i
]->pce_opts
->addr
,
926 // collide of precedences so
930 if (!pcc
[i
]->previous_best
) {
942 "multi-pce: calculate data : sb (%i) sp (%i) oc (%i) b (%i) ",
943 step_0_best
, step_0_previous
, one_connected_pce
, best_pce
);
945 // Changed of state so ...
946 if (step_0_best
!= best_pce
) {
947 pthread_mutex_lock(&g_pcc_info_mtx
);
948 // Calculate previous
949 previous_best_pce
= step_0_best
;
951 if (step_0_best
!= -1) {
952 pcc
[step_0_best
]->is_best
= false;
954 if (step_0_previous
!= -1) {
955 pcc
[step_0_previous
]->previous_best
= false;
959 if (previous_best_pce
!= -1
960 && pcc
[previous_best_pce
]->status
961 == PCEP_PCC_DISCONNECTED
) {
962 pcc
[previous_best_pce
]->previous_best
= true;
963 zlog_debug("multi-pce: previous best pce (%i) ",
964 previous_best_pce
+ 1);
969 if (best_pce
!= -1) {
970 pcc
[best_pce
]->is_best
= true;
971 zlog_debug("multi-pce: best pce (%i) ", best_pce
+ 1);
973 if (one_connected_pce
!= -1) {
974 best_pce
= one_connected_pce
;
975 pcc
[one_connected_pce
]->is_best
= true;
977 "multi-pce: one connected best pce (default) (%i) ",
978 one_connected_pce
+ 1);
980 for (int i
= 0; i
< MAX_PCC
; i
++) {
981 if (pcc
[i
] && pcc
[i
]->pce_opts
) {
983 pcc
[i
]->is_best
= true;
985 "(disconnected) best pce (default) (%i) ",
992 pthread_mutex_unlock(&g_pcc_info_mtx
);
995 return ((best_pce
== -1) ? 0 : pcc
[best_pce
]->id
);
998 int pcep_pcc_get_pcc_id_by_ip_port(struct pcc_state
**pcc
,
999 struct pce_opts
*pce_opts
)
1005 for (int idx
= 0; idx
< MAX_PCC
; idx
++) {
1007 if ((ipaddr_cmp((const struct ipaddr
*)&pcc
[idx
]
1009 (const struct ipaddr
*)&pce_opts
->addr
)
1011 && pcc
[idx
]->pce_opts
->port
== pce_opts
->port
) {
1012 zlog_debug("found pcc_id (%d) idx (%d)",
1014 return pcc
[idx
]->id
;
1021 int pcep_pcc_get_pcc_id_by_idx(struct pcc_state
**pcc
, int idx
)
1023 if (pcc
== NULL
|| idx
< 0) {
1027 return pcc
[idx
] ? pcc
[idx
]->id
: 0;
1030 struct pcc_state
*pcep_pcc_get_pcc_by_id(struct pcc_state
**pcc
, int id
)
1032 if (pcc
== NULL
|| id
< 0) {
1036 for (int i
= 0; i
< MAX_PCC
; i
++) {
1038 if (pcc
[i
]->id
== id
) {
1039 zlog_debug("found id (%d) pcc_idx (%d)",
1049 struct pcc_state
*pcep_pcc_get_pcc_by_name(struct pcc_state
**pcc
,
1050 const char *pce_name
)
1052 if (pcc
== NULL
|| pce_name
== NULL
) {
1056 for (int i
= 0; i
< MAX_PCC
; i
++) {
1057 if (pcc
[i
] == NULL
) {
1061 if (strcmp(pcc
[i
]->pce_opts
->pce_name
, pce_name
) == 0) {
1069 int pcep_pcc_get_pcc_idx_by_id(struct pcc_state
**pcc
, int id
)
1075 for (int idx
= 0; idx
< MAX_PCC
; idx
++) {
1077 if (pcc
[idx
]->id
== id
) {
1078 zlog_debug("found pcc_id (%d) array_idx (%d)",
1088 int pcep_pcc_get_free_pcc_idx(struct pcc_state
**pcc
)
1090 assert(pcc
!= NULL
);
1092 for (int idx
= 0; idx
< MAX_PCC
; idx
++) {
1093 if (pcc
[idx
] == NULL
) {
1094 zlog_debug("new pcc_idx (%d)", idx
);
1102 int pcep_pcc_get_pcc_id(struct pcc_state
*pcc
)
1104 return ((pcc
== NULL
) ? 0 : pcc
->id
);
1107 void pcep_pcc_copy_pcc_info(struct pcc_state
**pcc
,
1108 struct pcep_pcc_info
*pcc_info
)
1110 struct pcc_state
*pcc_state
=
1111 pcep_pcc_get_pcc_by_name(pcc
, pcc_info
->pce_name
);
1116 pcc_info
->ctrl_state
= NULL
;
1117 if(pcc_state
->pcc_opts
){
1118 pcc_info
->msd
= pcc_state
->pcc_opts
->msd
;
1119 pcc_info
->pcc_port
= pcc_state
->pcc_opts
->port
;
1121 pcc_info
->next_plspid
= pcc_state
->next_plspid
;
1122 pcc_info
->next_reqid
= pcc_state
->next_reqid
;
1123 pcc_info
->status
= pcc_state
->status
;
1124 pcc_info
->pcc_id
= pcc_state
->id
;
1125 pthread_mutex_lock(&g_pcc_info_mtx
);
1126 pcc_info
->is_best_multi_pce
= pcc_state
->is_best
;
1127 pcc_info
->previous_best
= pcc_state
->previous_best
;
1128 pthread_mutex_unlock(&g_pcc_info_mtx
);
1129 pcc_info
->precedence
=
1130 pcc_state
->pce_opts
? pcc_state
->pce_opts
->precedence
: 0;
1131 if(pcc_state
->pcc_addr_tr
.ipa_type
!= IPADDR_NONE
){
1132 memcpy(&pcc_info
->pcc_addr
, &pcc_state
->pcc_addr_tr
,
1133 sizeof(struct ipaddr
));
1138 /*------------------ PCEP Message handlers --------------------- */
1140 void handle_pcep_open(struct ctrl_state
*ctrl_state
,
1141 struct pcc_state
*pcc_state
, struct pcep_message
*msg
)
1143 assert(msg
->msg_header
->type
== PCEP_TYPE_OPEN
);
1144 pcep_lib_parse_capabilities(msg
, &pcc_state
->caps
);
1145 PCEP_DEBUG("PCE capabilities: %s, %s%s",
1146 pcc_state
->caps
.is_stateful
? "stateful" : "stateless",
1147 pcc_state
->caps
.supported_ofs_are_known
1148 ? (pcc_state
->caps
.supported_ofs
== 0
1149 ? "no objective functions supported"
1150 : "supported objective functions are ")
1151 : "supported objective functions are unknown",
1152 format_objfun_set(pcc_state
->caps
.supported_ofs
));
1155 void handle_pcep_message(struct ctrl_state
*ctrl_state
,
1156 struct pcc_state
*pcc_state
, struct pcep_message
*msg
)
1158 if (pcc_state
->status
!= PCEP_PCC_OPERATING
)
1161 switch (msg
->msg_header
->type
) {
1162 case PCEP_TYPE_INITIATE
:
1163 handle_pcep_lsp_initiate(ctrl_state
, pcc_state
, msg
);
1165 case PCEP_TYPE_UPDATE
:
1166 handle_pcep_lsp_update(ctrl_state
, pcc_state
, msg
);
1168 case PCEP_TYPE_PCREP
:
1169 handle_pcep_comp_reply(ctrl_state
, pcc_state
, msg
);
1171 case PCEP_TYPE_OPEN
:
1172 case PCEP_TYPE_KEEPALIVE
:
1173 case PCEP_TYPE_PCREQ
:
1174 case PCEP_TYPE_PCNOTF
:
1175 case PCEP_TYPE_ERROR
:
1176 case PCEP_TYPE_CLOSE
:
1177 case PCEP_TYPE_REPORT
:
1178 case PCEP_TYPE_START_TLS
:
1180 flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEP_MESSAGE
,
1181 "Unexpected pcep message from pceplib: %s",
1182 format_pcep_message(msg
));
1187 void handle_pcep_lsp_update(struct ctrl_state
*ctrl_state
,
1188 struct pcc_state
*pcc_state
,
1189 struct pcep_message
*msg
)
1192 path
= pcep_lib_parse_path(msg
);
1193 lookup_nbkey(pcc_state
, path
);
1194 pcep_thread_refine_path(ctrl_state
, pcc_state
->id
,
1195 &continue_pcep_lsp_update
, path
, NULL
);
1198 void continue_pcep_lsp_update(struct ctrl_state
*ctrl_state
,
1199 struct pcc_state
*pcc_state
, struct path
*path
,
1202 char err
[MAX_ERROR_MSG_SIZE
] = {0};
1204 specialize_incoming_path(pcc_state
, path
);
1205 PCEP_DEBUG("%s Received LSP update", pcc_state
->tag
);
1206 PCEP_DEBUG_PATH("%s", format_path(path
));
1208 if (validate_incoming_path(pcc_state
, path
, err
, sizeof(err
)))
1209 pcep_thread_update_path(ctrl_state
, pcc_state
->id
, path
);
1211 /* FIXME: Monitor the amount of errors from the PCE and
1212 * possibly disconnect and blacklist */
1213 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1214 "Unsupported PCEP protocol feature: %s", err
);
1215 pcep_free_path(path
);
1219 void handle_pcep_lsp_initiate(struct ctrl_state
*ctrl_state
,
1220 struct pcc_state
*pcc_state
,
1221 struct pcep_message
*msg
)
1223 char err
[MAX_ERROR_MSG_SIZE
] = "";
1226 path
= pcep_lib_parse_path(msg
);
1228 if (!pcc_state
->pce_opts
->config_opts
.pce_initiated
) {
1229 /* PCE Initiated is not enabled */
1230 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1231 "Not allowed PCE initiated path received: %s",
1232 format_pcep_message(msg
));
1233 send_pcep_error(pcc_state
, PCEP_ERRT_LSP_INSTANTIATE_ERROR
,
1234 PCEP_ERRV_UNACCEPTABLE_INSTANTIATE_ERROR
, path
);
1238 if (path
->do_remove
) {
1239 // lookup in nbkey sequential as no endpoint
1240 struct nbkey_map_data
*key
;
1243 frr_each (nbkey_map
, &pcc_state
->nbkey_map
, key
) {
1244 ipaddr2str(&key
->nbkey
.endpoint
, endpoint
,
1247 EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1248 "FOR_EACH nbkey [color (%d) endpoint (%s)] path [plsp_id (%d)] ",
1249 key
->nbkey
.color
, endpoint
, path
->plsp_id
);
1250 if (path
->plsp_id
== key
->plspid
) {
1252 EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1253 "FOR_EACH MATCH nbkey [color (%d) endpoint (%s)] path [plsp_id (%d)] ",
1254 key
->nbkey
.color
, endpoint
,
1256 path
->nbkey
= key
->nbkey
;
1261 if (path
->first_hop
== NULL
/*ero sets first_hop*/) {
1262 /* If the PCC receives a PCInitiate message without an
1263 * ERO and the R flag in the SRP object != zero, then it
1264 * MUST send a PCErr message with Error-type=6
1265 * (Mandatory Object missing) and Error-value=9 (ERO
1266 * object missing). */
1267 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1268 "ERO object missing or incomplete : %s",
1269 format_pcep_message(msg
));
1270 send_pcep_error(pcc_state
,
1271 PCEP_ERRT_LSP_INSTANTIATE_ERROR
,
1272 PCEP_ERRV_INTERNAL_ERROR
, path
);
1276 if (path
->plsp_id
!= 0) {
1277 /* If the PCC receives a PCInitiate message with a
1278 * non-zero PLSP-ID and the R flag in the SRP object set
1279 * to zero, then it MUST send a PCErr message with
1280 * Error-type=19 (Invalid Operation) and Error-value=8
1281 * (Non-zero PLSP-ID in the LSP Initiate Request) */
1283 EC_PATH_PCEP_PROTOCOL_ERROR
,
1284 "PCE initiated path with non-zero PLSP ID: %s",
1285 format_pcep_message(msg
));
1286 send_pcep_error(pcc_state
, PCEP_ERRT_INVALID_OPERATION
,
1287 PCEP_ERRV_LSP_INIT_NON_ZERO_PLSP_ID
,
1292 if (path
->name
== NULL
) {
1293 /* If the PCC receives a PCInitiate message without a
1294 * SYMBOLIC-PATH-NAME TLV, then it MUST send a PCErr
1295 * message with Error-type=10 (Reception of an invalid
1296 * object) and Error-value=8 (SYMBOLIC-PATH-NAME TLV
1299 EC_PATH_PCEP_PROTOCOL_ERROR
,
1300 "PCE initiated path without symbolic name: %s",
1301 format_pcep_message(msg
));
1303 pcc_state
, PCEP_ERRT_RECEPTION_OF_INV_OBJECT
,
1304 PCEP_ERRV_SYMBOLIC_PATH_NAME_TLV_MISSING
, path
);
1309 /* TODO: If there is a conflict with the symbolic path name of an
1310 * existing LSP, the PCC MUST send a PCErr message with Error-type=23
1311 * (Bad Parameter value) and Error-value=1 (SYMBOLIC-PATH-NAME in
1314 specialize_incoming_path(pcc_state
, path
);
1315 /* TODO: Validate the PCC address received from the PCE is valid */
1316 PCEP_DEBUG("%s Received LSP initiate", pcc_state
->tag
);
1317 PCEP_DEBUG_PATH("%s", format_path(path
));
1319 if (validate_incoming_path(pcc_state
, path
, err
, sizeof(err
))) {
1320 pcep_thread_initiate_path(ctrl_state
, pcc_state
->id
, path
);
1322 /* FIXME: Monitor the amount of errors from the PCE and
1323 * possibly disconnect and blacklist */
1324 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1325 "Unsupported PCEP protocol feature: %s", err
);
1326 send_pcep_error(pcc_state
, PCEP_ERRT_INVALID_OPERATION
,
1327 PCEP_ERRV_LSP_NOT_PCE_INITIATED
, path
);
1328 pcep_free_path(path
);
1332 void handle_pcep_comp_reply(struct ctrl_state
*ctrl_state
,
1333 struct pcc_state
*pcc_state
,
1334 struct pcep_message
*msg
)
1336 char err
[MAX_ERROR_MSG_SIZE
] = "";
1337 struct req_entry
*req
;
1340 path
= pcep_lib_parse_path(msg
);
1341 if (path
->no_path
) {
1342 req
= pop_req_no_reqid(pcc_state
, path
->req_id
);
1344 req
= pop_req(pcc_state
, path
->req_id
);
1347 /* TODO: check the rate of bad computation reply and close
1348 * the connection if more that a given rate.
1351 "%s Received computation reply for unknown request %d",
1352 pcc_state
->tag
, path
->req_id
);
1353 PCEP_DEBUG_PATH("%s", format_path(path
));
1354 send_pcep_error(pcc_state
, PCEP_ERRT_UNKNOWN_REQ_REF
,
1355 PCEP_ERRV_UNASSIGNED
, NULL
);
1359 /* Cancel the computation request timeout */
1360 pcep_thread_cancel_timer(&req
->t_retry
);
1362 /* Transfer relevent metadata from the request to the response */
1363 path
->nbkey
= req
->path
->nbkey
;
1364 path
->plsp_id
= req
->path
->plsp_id
;
1365 path
->type
= req
->path
->type
;
1366 path
->name
= XSTRDUP(MTYPE_PCEP
, req
->path
->name
);
1367 specialize_incoming_path(pcc_state
, path
);
1369 PCEP_DEBUG("%s Received computation reply %d (no-path: %s)",
1370 pcc_state
->tag
, path
->req_id
,
1371 path
->no_path
? "true" : "false");
1372 PCEP_DEBUG_PATH("%s", format_path(path
));
1374 if (path
->no_path
) {
1375 PCEP_DEBUG("%s Computation for path %s did not find any result",
1376 pcc_state
->tag
, path
->name
);
1377 free_req_entry(req
);
1378 pcep_free_path(path
);
1380 } else if (validate_incoming_path(pcc_state
, path
, err
, sizeof(err
))) {
1381 /* Updating a dynamic path will automatically delegate it */
1382 pcep_thread_update_path(ctrl_state
, pcc_state
->id
, path
);
1383 free_req_entry(req
);
1386 /* FIXME: Monitor the amount of errors from the PCE and
1387 * possibly disconnect and blacklist */
1388 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1389 "Unsupported PCEP protocol feature: %s", err
);
1392 pcep_free_path(path
);
1394 /* Delegate the path regardless of the outcome */
1395 /* TODO: For now we are using the path from the request, when
1396 * pathd API is thread safe, we could get a new path */
1397 if (pcc_state
->caps
.is_stateful
) {
1398 PCEP_DEBUG("%s Delegating undefined dynamic path %s to PCE %s",
1399 pcc_state
->tag
, req
->path
->name
,
1400 pcc_state
->originator
);
1401 path
= pcep_copy_path(req
->path
);
1402 path
->is_delegated
= true;
1403 send_report(pcc_state
, path
);
1404 pcep_free_path(path
);
1407 free_req_entry(req
);
1411 /* ------------ Internal Functions ------------ */
1413 const char *ipaddr_type_name(struct ipaddr
*addr
)
1415 if (IS_IPADDR_V4(addr
))
1417 if (IS_IPADDR_V6(addr
))
1422 bool filter_path(struct pcc_state
*pcc_state
, struct path
*path
)
1424 return (IS_IPADDR_V4(&path
->nbkey
.endpoint
)
1425 && CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
))
1426 || (IS_IPADDR_V6(&path
->nbkey
.endpoint
)
1427 && CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
));
1430 void select_pcc_addresses(struct pcc_state
*pcc_state
)
1432 /* If no IPv4 address was specified, try to get one from zebra */
1433 if (!CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
)) {
1434 if (get_ipv4_router_id(&pcc_state
->pcc_addr_v4
)) {
1435 SET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
);
1439 /* If no IPv6 address was specified, try to get one from zebra */
1440 if (!CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
)) {
1441 if (get_ipv6_router_id(&pcc_state
->pcc_addr_v6
)) {
1442 SET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
);
1447 void select_transport_address(struct pcc_state
*pcc_state
)
1449 struct ipaddr
*taddr
= &pcc_state
->pcc_addr_tr
;
1451 select_pcc_addresses(pcc_state
);
1453 taddr
->ipa_type
= IPADDR_NONE
;
1455 /* Select a transport source address in function of the configured PCE
1457 if (IS_IPADDR_V4(&pcc_state
->pce_opts
->addr
)) {
1458 if (CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
)) {
1459 taddr
->ipaddr_v4
= pcc_state
->pcc_addr_v4
;
1460 taddr
->ipa_type
= IPADDR_V4
;
1463 if (CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
)) {
1464 taddr
->ipaddr_v6
= pcc_state
->pcc_addr_v6
;
1465 taddr
->ipa_type
= IPADDR_V6
;
1470 void update_tag(struct pcc_state
*pcc_state
)
1472 if (pcc_state
->pce_opts
!= NULL
) {
1473 assert(!IS_IPADDR_NONE(&pcc_state
->pce_opts
->addr
));
1474 if (IS_IPADDR_V6(&pcc_state
->pce_opts
->addr
)) {
1475 snprintfrr(pcc_state
->tag
, sizeof(pcc_state
->tag
),
1477 &pcc_state
->pce_opts
->addr
.ipaddr_v6
,
1478 pcc_state
->pce_opts
->port
, pcc_state
->id
);
1480 snprintfrr(pcc_state
->tag
, sizeof(pcc_state
->tag
),
1482 &pcc_state
->pce_opts
->addr
.ipaddr_v4
,
1483 pcc_state
->pce_opts
->port
, pcc_state
->id
);
1486 snprintfrr(pcc_state
->tag
, sizeof(pcc_state
->tag
), "(%u)",
1491 void update_originator(struct pcc_state
*pcc_state
)
1494 if (pcc_state
->originator
!= NULL
) {
1495 XFREE(MTYPE_PCEP
, pcc_state
->originator
);
1496 pcc_state
->originator
= NULL
;
1498 if (pcc_state
->pce_opts
== NULL
)
1500 originator
= XCALLOC(MTYPE_PCEP
, 52);
1501 assert(!IS_IPADDR_NONE(&pcc_state
->pce_opts
->addr
));
1502 if (IS_IPADDR_V6(&pcc_state
->pce_opts
->addr
)) {
1503 snprintfrr(originator
, 52, "%pI6:%i",
1504 &pcc_state
->pce_opts
->addr
.ipaddr_v6
,
1505 pcc_state
->pce_opts
->port
);
1507 snprintfrr(originator
, 52, "%pI4:%i",
1508 &pcc_state
->pce_opts
->addr
.ipaddr_v4
,
1509 pcc_state
->pce_opts
->port
);
1511 pcc_state
->originator
= originator
;
1514 void schedule_reconnect(struct ctrl_state
*ctrl_state
,
1515 struct pcc_state
*pcc_state
)
1517 pcc_state
->retry_count
++;
1518 pcep_thread_schedule_reconnect(ctrl_state
, pcc_state
->id
,
1519 pcc_state
->retry_count
,
1520 &pcc_state
->t_reconnect
);
1521 if (pcc_state
->retry_count
== 1) {
1522 pcep_thread_schedule_sync_best_pce(
1523 ctrl_state
, pcc_state
->id
,
1524 pcc_state
->pce_opts
->config_opts
1525 .delegation_timeout_seconds
,
1526 &pcc_state
->t_update_best
);
1530 void schedule_session_timeout(struct ctrl_state
*ctrl_state
,
1531 struct pcc_state
*pcc_state
)
1533 /* No need to schedule timeout if multiple PCEs are connected */
1534 if (get_pce_count_connected(ctrl_state
->pcc
)) {
1536 "schedule_session_timeout not setting timer for multi-pce mode");
1541 pcep_thread_schedule_session_timeout(
1542 ctrl_state
, pcep_pcc_get_pcc_id(pcc_state
),
1543 pcc_state
->pce_opts
->config_opts
1544 .session_timeout_inteval_seconds
,
1545 &pcc_state
->t_session_timeout
);
1548 void cancel_session_timeout(struct ctrl_state
*ctrl_state
,
1549 struct pcc_state
*pcc_state
)
1551 /* No need to schedule timeout if multiple PCEs are connected */
1552 if (pcc_state
->t_session_timeout
== NULL
) {
1553 PCEP_DEBUG_PCEP("cancel_session_timeout timer thread NULL");
1557 PCEP_DEBUG_PCEP("Cancel session_timeout timer");
1558 pcep_thread_cancel_timer(&pcc_state
->t_session_timeout
);
1559 pcc_state
->t_session_timeout
= NULL
;
1562 void send_pcep_message(struct pcc_state
*pcc_state
, struct pcep_message
*msg
)
1564 if (pcc_state
->sess
!= NULL
) {
1565 PCEP_DEBUG_PCEP("%s Sending PCEP message: %s", pcc_state
->tag
,
1566 format_pcep_message(msg
));
1567 send_message(pcc_state
->sess
, msg
, true);
1571 void send_pcep_error(struct pcc_state
*pcc_state
,
1572 enum pcep_error_type error_type
,
1573 enum pcep_error_value error_value
,
1574 struct path
*trigger_path
)
1576 struct pcep_message
*msg
;
1577 PCEP_DEBUG("%s Sending PCEP error type %s (%d) value %s (%d)",
1578 pcc_state
->tag
, pcep_error_type_name(error_type
), error_type
,
1579 pcep_error_value_name(error_type
, error_value
), error_value
);
1580 msg
= pcep_lib_format_error(error_type
, error_value
, trigger_path
);
1581 send_pcep_message(pcc_state
, msg
);
1584 void send_report(struct pcc_state
*pcc_state
, struct path
*path
)
1586 struct pcep_message
*report
;
1589 specialize_outgoing_path(pcc_state
, path
);
1590 PCEP_DEBUG_PATH("%s Sending path %s: %s", pcc_state
->tag
, path
->name
,
1592 report
= pcep_lib_format_report(&pcc_state
->caps
, path
);
1593 send_pcep_message(pcc_state
, report
);
1596 /* Updates the path for the PCE, updating the delegation and creation flags */
1597 void specialize_outgoing_path(struct pcc_state
*pcc_state
, struct path
*path
)
1599 bool is_delegated
= false;
1600 bool was_created
= false;
1602 lookup_plspid(pcc_state
, path
);
1604 set_pcc_address(pcc_state
, &path
->nbkey
, &path
->pcc_addr
);
1605 path
->sender
= pcc_state
->pcc_addr_tr
;
1607 /* TODO: When the pathd API have a way to mark a path as
1608 * delegated, use it instead of considering all dynamic path
1609 * delegated. We need to disable the originator check for now,
1610 * because path could be delegated without having any originator yet */
1611 // if ((path->originator == NULL)
1612 // || (strcmp(path->originator, pcc_state->originator) == 0)) {
1613 // is_delegated = (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)
1614 // && (path->first_hop != NULL);
1615 // /* it seems the PCE consider updating an LSP a creation ?!?
1616 // at least Cisco does... */
1617 // was_created = path->update_origin == SRTE_ORIGIN_PCEP;
1619 is_delegated
= (path
->type
== SRTE_CANDIDATE_TYPE_DYNAMIC
);
1620 was_created
= path
->update_origin
== SRTE_ORIGIN_PCEP
;
1622 path
->pcc_id
= pcc_state
->id
;
1623 path
->go_active
= is_delegated
&& pcc_state
->is_best
;
1624 path
->is_delegated
= is_delegated
&& pcc_state
->is_best
;
1625 path
->was_created
= was_created
;
1628 /* Updates the path for the PCC */
1629 void specialize_incoming_path(struct pcc_state
*pcc_state
, struct path
*path
)
1631 if (IS_IPADDR_NONE(&path
->pcc_addr
))
1632 set_pcc_address(pcc_state
, &path
->nbkey
, &path
->pcc_addr
);
1633 path
->sender
= pcc_state
->pce_opts
->addr
;
1634 path
->pcc_id
= pcc_state
->id
;
1635 path
->update_origin
= SRTE_ORIGIN_PCEP
;
1636 path
->originator
= XSTRDUP(MTYPE_PCEP
, pcc_state
->originator
);
1639 /* Ensure the path can be handled by the PCC and if not, sends an error */
1640 bool validate_incoming_path(struct pcc_state
*pcc_state
, struct path
*path
,
1641 char *errbuff
, size_t buffsize
)
1643 struct path_hop
*hop
;
1644 enum pcep_error_type err_type
= 0;
1645 enum pcep_error_value err_value
= PCEP_ERRV_UNASSIGNED
;
1647 for (hop
= path
->first_hop
; hop
!= NULL
; hop
= hop
->next
) {
1648 /* Hops without SID are not supported */
1649 if (!hop
->has_sid
) {
1650 snprintfrr(errbuff
, buffsize
, "SR segment without SID");
1651 err_type
= PCEP_ERRT_RECEPTION_OF_INV_OBJECT
;
1652 err_value
= PCEP_ERRV_DISJOINTED_CONF_TLV_MISSING
;
1655 /* Hops with non-MPLS SID are not supported */
1656 if (!hop
->is_mpls
) {
1657 snprintfrr(errbuff
, buffsize
,
1658 "SR segment with non-MPLS SID");
1659 err_type
= PCEP_ERRT_RECEPTION_OF_INV_OBJECT
;
1660 err_value
= PCEP_ERRV_UNSUPPORTED_NAI
;
1665 if (err_type
!= 0) {
1666 send_pcep_error(pcc_state
, err_type
, err_value
, NULL
);
1673 void send_comp_request(struct ctrl_state
*ctrl_state
,
1674 struct pcc_state
*pcc_state
, struct req_entry
*req
)
1676 assert(req
!= NULL
);
1681 assert(req
->path
!= NULL
);
1682 assert(req
->path
->req_id
> 0);
1683 assert(RB_FIND(req_entry_head
, &pcc_state
->requests
, req
) == req
);
1684 assert(lookup_reqid(pcc_state
, req
->path
) == req
->path
->req_id
);
1687 struct pcep_message
*msg
;
1689 if (!pcc_state
->is_best
) {
1693 specialize_outgoing_path(pcc_state
, req
->path
);
1696 "%s Sending computation request %d for path %s to %pIA (retry %d)",
1697 pcc_state
->tag
, req
->path
->req_id
, req
->path
->name
,
1698 &req
->path
->nbkey
.endpoint
, req
->retry_count
);
1699 PCEP_DEBUG_PATH("%s Computation request path %s: %s", pcc_state
->tag
,
1700 req
->path
->name
, format_path(req
->path
));
1702 msg
= pcep_lib_format_request(&pcc_state
->caps
, req
->path
);
1703 send_pcep_message(pcc_state
, msg
);
1704 req
->was_sent
= true;
1706 timeout
= pcc_state
->pce_opts
->config_opts
.pcep_request_time_seconds
;
1707 pcep_thread_schedule_timeout(ctrl_state
, pcc_state
->id
,
1708 TO_COMPUTATION_REQUEST
, timeout
,
1709 (void *)req
, &req
->t_retry
);
1712 void cancel_comp_requests(struct ctrl_state
*ctrl_state
,
1713 struct pcc_state
*pcc_state
)
1715 struct req_entry
*req
, *safe_req
;
1717 RB_FOREACH_SAFE (req
, req_entry_head
, &pcc_state
->requests
, safe_req
) {
1718 cancel_comp_request(ctrl_state
, pcc_state
, req
);
1719 RB_REMOVE(req_entry_head
, &pcc_state
->requests
, req
);
1720 remove_reqid_mapping(pcc_state
, req
->path
);
1721 free_req_entry(req
);
1725 void cancel_comp_request(struct ctrl_state
*ctrl_state
,
1726 struct pcc_state
*pcc_state
, struct req_entry
*req
)
1728 struct pcep_message
*msg
;
1730 if (req
->was_sent
) {
1731 /* TODO: Send a computation request cancelation
1732 * notification to the PCE */
1733 pcep_thread_cancel_timer(&req
->t_retry
);
1737 "%s Canceling computation request %d for path %s to %pIA (retry %d)",
1738 pcc_state
->tag
, req
->path
->req_id
, req
->path
->name
,
1739 &req
->path
->nbkey
.endpoint
, req
->retry_count
);
1740 PCEP_DEBUG_PATH("%s Canceled computation request path %s: %s",
1741 pcc_state
->tag
, req
->path
->name
,
1742 format_path(req
->path
));
1744 msg
= pcep_lib_format_request_cancelled(req
->path
->req_id
);
1745 send_pcep_message(pcc_state
, msg
);
1748 void set_pcc_address(struct pcc_state
*pcc_state
, struct lsp_nb_key
*nbkey
,
1749 struct ipaddr
*addr
)
1751 select_pcc_addresses(pcc_state
);
1752 if (IS_IPADDR_V6(&nbkey
->endpoint
)) {
1753 assert(CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
));
1754 addr
->ipa_type
= IPADDR_V6
;
1755 addr
->ipaddr_v6
= pcc_state
->pcc_addr_v6
;
1756 } else if (IS_IPADDR_V4(&nbkey
->endpoint
)) {
1757 assert(CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
));
1758 addr
->ipa_type
= IPADDR_V4
;
1759 addr
->ipaddr_v4
= pcc_state
->pcc_addr_v4
;
1761 addr
->ipa_type
= IPADDR_NONE
;
1765 /* ------------ Data Structure Helper Functions ------------ */
1767 void lookup_plspid(struct pcc_state
*pcc_state
, struct path
*path
)
1769 struct plspid_map_data key
, *plspid_mapping
;
1770 struct nbkey_map_data
*nbkey_mapping
;
1772 if (path
->nbkey
.color
!= 0) {
1773 key
.nbkey
= path
->nbkey
;
1774 plspid_mapping
= plspid_map_find(&pcc_state
->plspid_map
, &key
);
1775 if (plspid_mapping
== NULL
) {
1777 XCALLOC(MTYPE_PCEP
, sizeof(*plspid_mapping
));
1778 plspid_mapping
->nbkey
= key
.nbkey
;
1779 plspid_mapping
->plspid
= pcc_state
->next_plspid
;
1780 plspid_map_add(&pcc_state
->plspid_map
, plspid_mapping
);
1782 XCALLOC(MTYPE_PCEP
, sizeof(*nbkey_mapping
));
1783 nbkey_mapping
->nbkey
= key
.nbkey
;
1784 nbkey_mapping
->plspid
= pcc_state
->next_plspid
;
1785 nbkey_map_add(&pcc_state
->nbkey_map
, nbkey_mapping
);
1786 pcc_state
->next_plspid
++;
1787 // FIXME: Send some error to the PCE isntead of crashing
1788 assert(pcc_state
->next_plspid
<= 1048576);
1790 path
->plsp_id
= plspid_mapping
->plspid
;
1794 void lookup_nbkey(struct pcc_state
*pcc_state
, struct path
*path
)
1796 struct nbkey_map_data key
, *mapping
;
1797 // TODO: Should give an error to the PCE instead of crashing
1798 assert(path
->plsp_id
!= 0);
1799 key
.plspid
= path
->plsp_id
;
1800 mapping
= nbkey_map_find(&pcc_state
->nbkey_map
, &key
);
1801 assert(mapping
!= NULL
);
1802 path
->nbkey
= mapping
->nbkey
;
1805 void free_req_entry(struct req_entry
*req
)
1807 pcep_free_path(req
->path
);
1808 XFREE(MTYPE_PCEP
, req
);
1811 struct req_entry
*push_new_req(struct pcc_state
*pcc_state
, struct path
*path
)
1813 struct req_entry
*req
;
1815 req
= XCALLOC(MTYPE_PCEP
, sizeof(*req
));
1816 req
->retry_count
= 0;
1817 req
->path
= pcep_copy_path(path
);
1818 repush_req(pcc_state
, req
);
1823 void repush_req(struct pcc_state
*pcc_state
, struct req_entry
*req
)
1825 uint32_t reqid
= pcc_state
->next_reqid
;
1828 req
->was_sent
= false;
1829 req
->path
->req_id
= reqid
;
1830 res
= RB_INSERT(req_entry_head
, &pcc_state
->requests
, req
);
1831 assert(res
== NULL
);
1832 assert(add_reqid_mapping(pcc_state
, req
->path
) == true);
1834 pcc_state
->next_reqid
+= 1;
1835 /* Wrapping is allowed, but 0 is not a valid id */
1836 if (pcc_state
->next_reqid
== 0)
1837 pcc_state
->next_reqid
= 1;
1840 struct req_entry
*pop_req(struct pcc_state
*pcc_state
, uint32_t reqid
)
1842 struct path path
= {.req_id
= reqid
};
1843 struct req_entry key
= {.path
= &path
};
1844 struct req_entry
*req
;
1846 req
= RB_FIND(req_entry_head
, &pcc_state
->requests
, &key
);
1849 RB_REMOVE(req_entry_head
, &pcc_state
->requests
, req
);
1850 remove_reqid_mapping(pcc_state
, req
->path
);
1855 struct req_entry
*pop_req_no_reqid(struct pcc_state
*pcc_state
, uint32_t reqid
)
1857 struct path path
= {.req_id
= reqid
};
1858 struct req_entry key
= {.path
= &path
};
1859 struct req_entry
*req
;
1861 req
= RB_FIND(req_entry_head
, &pcc_state
->requests
, &key
);
1864 RB_REMOVE(req_entry_head
, &pcc_state
->requests
, req
);
1869 bool add_reqid_mapping(struct pcc_state
*pcc_state
, struct path
*path
)
1871 struct req_map_data
*mapping
;
1872 mapping
= XCALLOC(MTYPE_PCEP
, sizeof(*mapping
));
1873 mapping
->nbkey
= path
->nbkey
;
1874 mapping
->reqid
= path
->req_id
;
1875 if (req_map_add(&pcc_state
->req_map
, mapping
) != NULL
) {
1876 XFREE(MTYPE_PCEP
, mapping
);
1882 void remove_reqid_mapping(struct pcc_state
*pcc_state
, struct path
*path
)
1884 struct req_map_data key
, *mapping
;
1885 key
.nbkey
= path
->nbkey
;
1886 mapping
= req_map_find(&pcc_state
->req_map
, &key
);
1887 if (mapping
!= NULL
) {
1888 req_map_del(&pcc_state
->req_map
, mapping
);
1889 XFREE(MTYPE_PCEP
, mapping
);
1893 uint32_t lookup_reqid(struct pcc_state
*pcc_state
, struct path
*path
)
1895 struct req_map_data key
, *mapping
;
1896 key
.nbkey
= path
->nbkey
;
1897 mapping
= req_map_find(&pcc_state
->req_map
, &key
);
1898 if (mapping
!= NULL
)
1899 return mapping
->reqid
;
1903 bool has_pending_req_for(struct pcc_state
*pcc_state
, struct path
*path
)
1905 struct req_entry key
= {.path
= path
};
1906 struct req_entry
*req
;
1909 PCEP_DEBUG_PATH("(%s) %s", format_path(path
), __func__
);
1910 /* Looking for request without result */
1911 if (path
->no_path
|| !path
->first_hop
) {
1912 PCEP_DEBUG_PATH("%s Path : no_path|!first_hop", __func__
);
1913 /* ...and already was handle */
1914 req
= RB_FIND(req_entry_head
, &pcc_state
->requests
, &key
);
1916 /* we must purge remaining reqid */
1917 PCEP_DEBUG_PATH("%s Purge pending reqid: no_path(%s)",
1919 path
->no_path
? "TRUE" : "FALSE");
1920 if (lookup_reqid(pcc_state
, path
) != 0) {
1921 PCEP_DEBUG_PATH("%s Purge pending reqid: DONE ",
1923 remove_reqid_mapping(pcc_state
, path
);
1932 return lookup_reqid(pcc_state
, path
) != 0;
1936 /* ------------ Data Structure Callbacks ------------ */
1938 #define CMP_RETURN(A, B) \
1940 return (A < B) ? -1 : 1
1942 static uint32_t hash_nbkey(const struct lsp_nb_key
*nbkey
)
1945 hash
= jhash_2words(nbkey
->color
, nbkey
->preference
, 0x55aa5a5a);
1946 switch (nbkey
->endpoint
.ipa_type
) {
1948 return jhash(&nbkey
->endpoint
.ipaddr_v4
,
1949 sizeof(nbkey
->endpoint
.ipaddr_v4
), hash
);
1951 return jhash(&nbkey
->endpoint
.ipaddr_v6
,
1952 sizeof(nbkey
->endpoint
.ipaddr_v6
), hash
);
1957 assert(!"Reached end of function where we were not expecting to");
1960 static int cmp_nbkey(const struct lsp_nb_key
*a
, const struct lsp_nb_key
*b
)
1962 CMP_RETURN(a
->color
, b
->color
);
1963 int cmp
= ipaddr_cmp(&a
->endpoint
, &b
->endpoint
);
1966 CMP_RETURN(a
->preference
, b
->preference
);
1970 int plspid_map_cmp(const struct plspid_map_data
*a
,
1971 const struct plspid_map_data
*b
)
1973 return cmp_nbkey(&a
->nbkey
, &b
->nbkey
);
1976 uint32_t plspid_map_hash(const struct plspid_map_data
*e
)
1978 return hash_nbkey(&e
->nbkey
);
1981 int nbkey_map_cmp(const struct nbkey_map_data
*a
,
1982 const struct nbkey_map_data
*b
)
1984 CMP_RETURN(a
->plspid
, b
->plspid
);
1988 uint32_t nbkey_map_hash(const struct nbkey_map_data
*e
)
1993 int req_map_cmp(const struct req_map_data
*a
, const struct req_map_data
*b
)
1995 return cmp_nbkey(&a
->nbkey
, &b
->nbkey
);
1998 uint32_t req_map_hash(const struct req_map_data
*e
)
2000 return hash_nbkey(&e
->nbkey
);