2 * Copyright (C) 2020 NetDEF, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along
15 * with this program; see the file COPYING; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 /* TODOS AND KNOWN ISSUES:
20 - Delete mapping from NB keys to PLSPID when an LSP is deleted either
22 - Revert the hacks to work around ODL requiring a report with
23 operational status DOWN when an LSP is activated.
24 - Enforce only the PCE a policy has been delegated to can update it.
25 - If the router-id is used because the PCC IP is not specified
26 (either IPv4 or IPv6), the connection to the PCE is not reset
27 when the router-id changes.
36 #include "lib/version.h"
37 #include "northbound.h"
38 #include "frr_pthread.h"
41 #include "pathd/pathd.h"
42 #include "pathd/path_zebra.h"
43 #include "pathd/path_errors.h"
44 #include "pathd/path_pcep.h"
45 #include "pathd/path_pcep_controller.h"
46 #include "pathd/path_pcep_lib.h"
47 #include "pathd/path_pcep_config.h"
48 #include "pathd/path_pcep_debug.h"
51 /* The number of time we will skip connecting if we are missing the PCC
52 * address for an inet family different from the selected transport one*/
53 #define OTHER_FAMILY_MAX_RETRIES 4
54 #define MAX_ERROR_MSG_SIZE 256
55 #define MAX_COMPREQ_TRIES 3
57 pthread_mutex_t g_pcc_info_mtx
= PTHREAD_MUTEX_INITIALIZER
;
59 /* PCEP Event Handler */
60 static void handle_pcep_open(struct ctrl_state
*ctrl_state
,
61 struct pcc_state
*pcc_state
,
62 struct pcep_message
*msg
);
63 static void handle_pcep_message(struct ctrl_state
*ctrl_state
,
64 struct pcc_state
*pcc_state
,
65 struct pcep_message
*msg
);
66 static void handle_pcep_lsp_initiate(struct ctrl_state
*ctrl_state
,
67 struct pcc_state
*pcc_state
,
68 struct pcep_message
*msg
);
69 static void handle_pcep_lsp_update(struct ctrl_state
*ctrl_state
,
70 struct pcc_state
*pcc_state
,
71 struct pcep_message
*msg
);
72 static void continue_pcep_lsp_update(struct ctrl_state
*ctrl_state
,
73 struct pcc_state
*pcc_state
,
74 struct path
*path
, void *payload
);
75 static void handle_pcep_comp_reply(struct ctrl_state
*ctrl_state
,
76 struct pcc_state
*pcc_state
,
77 struct pcep_message
*msg
);
79 /* Internal Functions */
80 static const char *ipaddr_type_name(struct ipaddr
*addr
);
81 static bool filter_path(struct pcc_state
*pcc_state
, struct path
*path
);
82 static void select_pcc_addresses(struct pcc_state
*pcc_state
);
83 static void select_transport_address(struct pcc_state
*pcc_state
);
84 static void update_tag(struct pcc_state
*pcc_state
);
85 static void update_originator(struct pcc_state
*pcc_state
);
86 static void schedule_reconnect(struct ctrl_state
*ctrl_state
,
87 struct pcc_state
*pcc_state
);
88 static void schedule_session_timeout(struct ctrl_state
*ctrl_state
,
89 struct pcc_state
*pcc_state
);
90 static void cancel_session_timeout(struct ctrl_state
*ctrl_state
,
91 struct pcc_state
*pcc_state
);
92 static void send_pcep_message(struct pcc_state
*pcc_state
,
93 struct pcep_message
*msg
);
94 static void send_pcep_error(struct pcc_state
*pcc_state
,
95 enum pcep_error_type error_type
,
96 enum pcep_error_value error_value
,
97 struct path
*trigger_path
);
98 static void send_report(struct pcc_state
*pcc_state
, struct path
*path
);
99 static void send_comp_request(struct ctrl_state
*ctrl_state
,
100 struct pcc_state
*pcc_state
,
101 struct req_entry
*req
);
102 static void cancel_comp_requests(struct ctrl_state
*ctrl_state
,
103 struct pcc_state
*pcc_state
);
104 static void cancel_comp_request(struct ctrl_state
*ctrl_state
,
105 struct pcc_state
*pcc_state
,
106 struct req_entry
*req
);
107 static void specialize_outgoing_path(struct pcc_state
*pcc_state
,
109 static void specialize_incoming_path(struct pcc_state
*pcc_state
,
111 static bool validate_incoming_path(struct pcc_state
*pcc_state
,
112 struct path
*path
, char *errbuff
,
114 static void set_pcc_address(struct pcc_state
*pcc_state
,
115 struct lsp_nb_key
*nbkey
, struct ipaddr
*addr
);
116 static int compare_pcc_opts(struct pcc_opts
*lhs
, struct pcc_opts
*rhs
);
117 static int compare_pce_opts(struct pce_opts
*lhs
, struct pce_opts
*rhs
);
118 static int get_previous_best_pce(struct pcc_state
**pcc
);
119 static int get_best_pce(struct pcc_state
**pcc
);
120 static int get_pce_count_connected(struct pcc_state
**pcc
);
121 static bool update_best_pce(struct pcc_state
**pcc
, int best
);
123 /* Data Structure Helper Functions */
124 static void lookup_plspid(struct pcc_state
*pcc_state
, struct path
*path
);
125 static void lookup_nbkey(struct pcc_state
*pcc_state
, struct path
*path
);
126 static void free_req_entry(struct req_entry
*req
);
127 static struct req_entry
*push_new_req(struct pcc_state
*pcc_state
,
129 static void repush_req(struct pcc_state
*pcc_state
, struct req_entry
*req
);
130 static struct req_entry
*pop_req(struct pcc_state
*pcc_state
, uint32_t reqid
);
131 static struct req_entry
*pop_req_no_reqid(struct pcc_state
*pcc_state
,
133 static bool add_reqid_mapping(struct pcc_state
*pcc_state
, struct path
*path
);
134 static void remove_reqid_mapping(struct pcc_state
*pcc_state
,
136 static uint32_t lookup_reqid(struct pcc_state
*pcc_state
, struct path
*path
);
137 static bool has_pending_req_for(struct pcc_state
*pcc_state
, struct path
*path
);
139 /* Data Structure Callbacks */
140 static int plspid_map_cmp(const struct plspid_map_data
*a
,
141 const struct plspid_map_data
*b
);
142 static uint32_t plspid_map_hash(const struct plspid_map_data
*e
);
143 static int nbkey_map_cmp(const struct nbkey_map_data
*a
,
144 const struct nbkey_map_data
*b
);
145 static uint32_t nbkey_map_hash(const struct nbkey_map_data
*e
);
146 static int req_map_cmp(const struct req_map_data
*a
,
147 const struct req_map_data
*b
);
148 static uint32_t req_map_hash(const struct req_map_data
*e
);
150 /* Data Structure Declarations */
151 DECLARE_HASH(plspid_map
, struct plspid_map_data
, mi
, plspid_map_cmp
,
153 DECLARE_HASH(nbkey_map
, struct nbkey_map_data
, mi
, nbkey_map_cmp
,
155 DECLARE_HASH(req_map
, struct req_map_data
, mi
, req_map_cmp
, req_map_hash
);
157 static inline int req_entry_compare(const struct req_entry
*a
,
158 const struct req_entry
*b
)
160 return a
->path
->req_id
- b
->path
->req_id
;
162 RB_GENERATE(req_entry_head
, req_entry
, entry
, req_entry_compare
)
165 /* ------------ API Functions ------------ */
167 struct pcc_state
*pcep_pcc_initialize(struct ctrl_state
*ctrl_state
, int index
)
169 struct pcc_state
*pcc_state
= XCALLOC(MTYPE_PCEP
, sizeof(*pcc_state
));
171 pcc_state
->id
= index
;
172 pcc_state
->status
= PCEP_PCC_DISCONNECTED
;
173 pcc_state
->next_reqid
= 1;
174 pcc_state
->next_plspid
= 1;
176 RB_INIT(req_entry_head
, &pcc_state
->requests
);
178 update_tag(pcc_state
);
179 update_originator(pcc_state
);
181 PCEP_DEBUG("%s PCC initialized", pcc_state
->tag
);
186 void pcep_pcc_finalize(struct ctrl_state
*ctrl_state
,
187 struct pcc_state
*pcc_state
)
189 PCEP_DEBUG("%s PCC finalizing...", pcc_state
->tag
);
191 pcep_pcc_disable(ctrl_state
, pcc_state
);
193 if (pcc_state
->pcc_opts
!= NULL
) {
194 XFREE(MTYPE_PCEP
, pcc_state
->pcc_opts
);
195 pcc_state
->pcc_opts
= NULL
;
197 if (pcc_state
->pce_opts
!= NULL
) {
198 XFREE(MTYPE_PCEP
, pcc_state
->pce_opts
);
199 pcc_state
->pce_opts
= NULL
;
201 if (pcc_state
->originator
!= NULL
) {
202 XFREE(MTYPE_PCEP
, pcc_state
->originator
);
203 pcc_state
->originator
= NULL
;
206 if (pcc_state
->t_reconnect
!= NULL
) {
207 thread_cancel(&pcc_state
->t_reconnect
);
208 pcc_state
->t_reconnect
= NULL
;
211 if (pcc_state
->t_update_best
!= NULL
) {
212 thread_cancel(&pcc_state
->t_update_best
);
213 pcc_state
->t_update_best
= NULL
;
216 if (pcc_state
->t_session_timeout
!= NULL
) {
217 thread_cancel(&pcc_state
->t_session_timeout
);
218 pcc_state
->t_session_timeout
= NULL
;
221 XFREE(MTYPE_PCEP
, pcc_state
);
224 int compare_pcc_opts(struct pcc_opts
*lhs
, struct pcc_opts
*rhs
)
236 retval
= lhs
->port
- rhs
->port
;
241 retval
= lhs
->msd
- rhs
->msd
;
246 if (IS_IPADDR_V4(&lhs
->addr
)) {
247 retval
= memcmp(&lhs
->addr
.ipaddr_v4
, &rhs
->addr
.ipaddr_v4
,
248 sizeof(lhs
->addr
.ipaddr_v4
));
252 } else if (IS_IPADDR_V6(&lhs
->addr
)) {
253 retval
= memcmp(&lhs
->addr
.ipaddr_v6
, &rhs
->addr
.ipaddr_v6
,
254 sizeof(lhs
->addr
.ipaddr_v6
));
263 int compare_pce_opts(struct pce_opts
*lhs
, struct pce_opts
*rhs
)
273 int retval
= lhs
->port
- rhs
->port
;
278 retval
= strcmp(lhs
->pce_name
, rhs
->pce_name
);
283 retval
= lhs
->precedence
- rhs
->precedence
;
288 retval
= memcmp(&lhs
->addr
, &rhs
->addr
, sizeof(lhs
->addr
));
296 int pcep_pcc_update(struct ctrl_state
*ctrl_state
, struct pcc_state
*pcc_state
,
297 struct pcc_opts
*pcc_opts
, struct pce_opts
*pce_opts
)
301 // If the options did not change, then there is nothing to do
302 if ((compare_pce_opts(pce_opts
, pcc_state
->pce_opts
) == 0)
303 && (compare_pcc_opts(pcc_opts
, pcc_state
->pcc_opts
) == 0)) {
307 if ((ret
= pcep_pcc_disable(ctrl_state
, pcc_state
))) {
308 XFREE(MTYPE_PCEP
, pcc_opts
);
309 XFREE(MTYPE_PCEP
, pce_opts
);
313 if (pcc_state
->pcc_opts
!= NULL
) {
314 XFREE(MTYPE_PCEP
, pcc_state
->pcc_opts
);
316 if (pcc_state
->pce_opts
!= NULL
) {
317 XFREE(MTYPE_PCEP
, pcc_state
->pce_opts
);
320 pcc_state
->pcc_opts
= pcc_opts
;
321 pcc_state
->pce_opts
= pce_opts
;
323 if (IS_IPADDR_V4(&pcc_opts
->addr
)) {
324 pcc_state
->pcc_addr_v4
= pcc_opts
->addr
.ipaddr_v4
;
325 SET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
);
327 UNSET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
);
330 if (IS_IPADDR_V6(&pcc_opts
->addr
)) {
331 memcpy(&pcc_state
->pcc_addr_v6
, &pcc_opts
->addr
.ipaddr_v6
,
332 sizeof(struct in6_addr
));
333 SET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
);
335 UNSET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
);
338 update_tag(pcc_state
);
339 update_originator(pcc_state
);
341 return pcep_pcc_enable(ctrl_state
, pcc_state
);
344 void pcep_pcc_reconnect(struct ctrl_state
*ctrl_state
,
345 struct pcc_state
*pcc_state
)
347 if (pcc_state
->status
== PCEP_PCC_DISCONNECTED
)
348 pcep_pcc_enable(ctrl_state
, pcc_state
);
351 int pcep_pcc_enable(struct ctrl_state
*ctrl_state
, struct pcc_state
*pcc_state
)
353 assert(pcc_state
->status
== PCEP_PCC_DISCONNECTED
);
354 assert(pcc_state
->sess
== NULL
);
356 if (pcc_state
->t_reconnect
!= NULL
) {
357 thread_cancel(&pcc_state
->t_reconnect
);
358 pcc_state
->t_reconnect
= NULL
;
361 select_transport_address(pcc_state
);
363 /* Even though we are connecting using IPv6. we want to have an IPv4
364 * address so we can handle candidate path with IPv4 endpoints */
365 if (!CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
)) {
366 if (pcc_state
->retry_count
< OTHER_FAMILY_MAX_RETRIES
) {
367 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
368 "skipping connection to PCE %pIA:%d due to missing PCC IPv4 address",
369 &pcc_state
->pce_opts
->addr
,
370 pcc_state
->pce_opts
->port
);
371 schedule_reconnect(ctrl_state
, pcc_state
);
374 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
375 "missing IPv4 PCC address, IPv4 candidate paths will be ignored");
379 /* Even though we are connecting using IPv4. we want to have an IPv6
380 * address so we can handle candidate path with IPv6 endpoints */
381 if (!CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
)) {
382 if (pcc_state
->retry_count
< OTHER_FAMILY_MAX_RETRIES
) {
383 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
384 "skipping connection to PCE %pIA:%d due to missing PCC IPv6 address",
385 &pcc_state
->pce_opts
->addr
,
386 pcc_state
->pce_opts
->port
);
387 schedule_reconnect(ctrl_state
, pcc_state
);
390 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
391 "missing IPv6 PCC address, IPv6 candidate paths will be ignored");
395 /* Even if the maximum retries to try to have all the familly addresses
396 * have been spent, we still need the one for the transport familly */
397 if (pcc_state
->pcc_addr_tr
.ipa_type
== IPADDR_NONE
) {
398 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
399 "skipping connection to PCE %pIA:%d due to missing PCC address",
400 &pcc_state
->pce_opts
->addr
,
401 pcc_state
->pce_opts
->port
);
402 schedule_reconnect(ctrl_state
, pcc_state
);
406 PCEP_DEBUG("%s PCC connecting", pcc_state
->tag
);
407 pcc_state
->sess
= pcep_lib_connect(
408 &pcc_state
->pcc_addr_tr
, pcc_state
->pcc_opts
->port
,
409 &pcc_state
->pce_opts
->addr
, pcc_state
->pce_opts
->port
,
410 pcc_state
->pcc_opts
->msd
, &pcc_state
->pce_opts
->config_opts
);
412 if (pcc_state
->sess
== NULL
) {
413 flog_warn(EC_PATH_PCEP_LIB_CONNECT
,
414 "failed to connect to PCE %pIA:%d from %pIA:%d",
415 &pcc_state
->pce_opts
->addr
,
416 pcc_state
->pce_opts
->port
,
417 &pcc_state
->pcc_addr_tr
,
418 pcc_state
->pcc_opts
->port
);
419 schedule_reconnect(ctrl_state
, pcc_state
);
423 // In case some best pce alternative were waiting to activate
424 if (pcc_state
->t_update_best
!= NULL
) {
425 thread_cancel(&pcc_state
->t_update_best
);
426 pcc_state
->t_update_best
= NULL
;
429 pcc_state
->status
= PCEP_PCC_CONNECTING
;
434 int pcep_pcc_disable(struct ctrl_state
*ctrl_state
, struct pcc_state
*pcc_state
)
436 switch (pcc_state
->status
) {
437 case PCEP_PCC_DISCONNECTED
:
439 case PCEP_PCC_CONNECTING
:
440 case PCEP_PCC_SYNCHRONIZING
:
441 case PCEP_PCC_OPERATING
:
442 PCEP_DEBUG("%s Disconnecting PCC...", pcc_state
->tag
);
443 cancel_comp_requests(ctrl_state
, pcc_state
);
444 pcep_lib_disconnect(pcc_state
->sess
);
445 /* No need to remove if any PCEs is connected */
446 if (get_pce_count_connected(ctrl_state
->pcc
) == 0) {
447 pcep_thread_remove_candidate_path_segments(ctrl_state
,
450 pcc_state
->sess
= NULL
;
451 pcc_state
->status
= PCEP_PCC_DISCONNECTED
;
458 void pcep_pcc_sync_path(struct ctrl_state
*ctrl_state
,
459 struct pcc_state
*pcc_state
, struct path
*path
)
461 if (pcc_state
->status
== PCEP_PCC_SYNCHRONIZING
) {
462 path
->is_synching
= true;
463 } else if (pcc_state
->status
== PCEP_PCC_OPERATING
)
464 path
->is_synching
= false;
468 path
->go_active
= true;
470 /* Accumulate the dynamic paths without any LSP so computation
471 * requests can be performed after synchronization */
472 if ((path
->type
== SRTE_CANDIDATE_TYPE_DYNAMIC
)
473 && (path
->first_hop
== NULL
)
474 && !has_pending_req_for(pcc_state
, path
)) {
475 PCEP_DEBUG("%s Scheduling computation request for path %s",
476 pcc_state
->tag
, path
->name
);
477 push_new_req(pcc_state
, path
);
481 /* Synchronize the path if the PCE supports LSP updates and the
482 * endpoint address familly is supported */
483 if (pcc_state
->caps
.is_stateful
) {
484 if (filter_path(pcc_state
, path
)) {
485 PCEP_DEBUG("%s Synchronizing path %s", pcc_state
->tag
,
487 send_report(pcc_state
, path
);
490 "%s Skipping %s candidate path %s synchronization",
492 ipaddr_type_name(&path
->nbkey
.endpoint
),
498 void pcep_pcc_sync_done(struct ctrl_state
*ctrl_state
,
499 struct pcc_state
*pcc_state
)
501 struct req_entry
*req
;
503 if (pcc_state
->status
!= PCEP_PCC_SYNCHRONIZING
504 && pcc_state
->status
!= PCEP_PCC_OPERATING
)
507 if (pcc_state
->caps
.is_stateful
508 && pcc_state
->status
== PCEP_PCC_SYNCHRONIZING
) {
509 struct path
*path
= pcep_new_path();
510 *path
= (struct path
){.name
= NULL
,
513 .status
= PCEP_LSP_OPERATIONAL_DOWN
,
516 .was_created
= false,
517 .was_removed
= false,
518 .is_synching
= false,
519 .is_delegated
= false,
521 .first_metric
= NULL
};
522 send_report(pcc_state
, path
);
523 pcep_free_path(path
);
526 pcc_state
->synchronized
= true;
527 pcc_state
->status
= PCEP_PCC_OPERATING
;
529 PCEP_DEBUG("%s Synchronization done", pcc_state
->tag
);
531 /* Start the computation request accumulated during synchronization */
532 RB_FOREACH (req
, req_entry_head
, &pcc_state
->requests
) {
533 send_comp_request(ctrl_state
, pcc_state
, req
);
537 void pcep_pcc_send_report(struct ctrl_state
*ctrl_state
,
538 struct pcc_state
*pcc_state
, struct path
*path
,
541 if ((pcc_state
->status
!= PCEP_PCC_OPERATING
)
542 || (!pcc_state
->caps
.is_stateful
)) {
543 pcep_free_path(path
);
547 PCEP_DEBUG("(%s)%s Send report for candidate path %s", __func__
,
548 pcc_state
->tag
, path
->name
);
550 /* ODL and Cisco requires the first reported
551 * LSP to have a DOWN status, the later status changes
552 * will be comunicated through hook calls.
554 enum pcep_lsp_operational_status real_status
= path
->status
;
555 path
->status
= PCEP_LSP_OPERATIONAL_DOWN
;
556 send_report(pcc_state
, path
);
558 /* If no update is expected and the real status wasn't down, we need to
559 * send a second report with the real status */
560 if (is_stable
&& (real_status
!= PCEP_LSP_OPERATIONAL_DOWN
)) {
561 PCEP_DEBUG("(%s)%s Send report for candidate path (!DOWN) %s",
562 __func__
, pcc_state
->tag
, path
->name
);
563 path
->status
= real_status
;
564 send_report(pcc_state
, path
);
567 pcep_free_path(path
);
571 void pcep_pcc_send_error(struct ctrl_state
*ctrl_state
,
572 struct pcc_state
*pcc_state
, struct pcep_error
*error
,
576 PCEP_DEBUG("(%s) Send error after PcInitiated ", __func__
);
579 send_pcep_error(pcc_state
, error
->error_type
, error
->error_value
,
581 pcep_free_path(error
->path
);
582 XFREE(MTYPE_PCEP
, error
);
584 /* ------------ Timeout handler ------------ */
586 void pcep_pcc_timeout_handler(struct ctrl_state
*ctrl_state
,
587 struct pcc_state
*pcc_state
,
588 enum pcep_ctrl_timeout_type type
, void *param
)
590 struct req_entry
*req
;
593 case TO_COMPUTATION_REQUEST
:
594 assert(param
!= NULL
);
595 req
= (struct req_entry
*)param
;
596 pop_req(pcc_state
, req
->path
->req_id
);
597 flog_warn(EC_PATH_PCEP_COMPUTATION_REQUEST_TIMEOUT
,
598 "Computation request %d timeout", req
->path
->req_id
);
599 cancel_comp_request(ctrl_state
, pcc_state
, req
);
600 if (req
->retry_count
++ < MAX_COMPREQ_TRIES
) {
601 repush_req(pcc_state
, req
);
602 send_comp_request(ctrl_state
, pcc_state
, req
);
605 if (pcc_state
->caps
.is_stateful
) {
608 "%s Delegating undefined dynamic path %s to PCE %s",
609 pcc_state
->tag
, req
->path
->name
,
610 pcc_state
->originator
);
611 path
= pcep_copy_path(req
->path
);
612 path
->is_delegated
= true;
613 send_report(pcc_state
, path
);
623 /* ------------ Pathd event handler ------------ */
625 void pcep_pcc_pathd_event_handler(struct ctrl_state
*ctrl_state
,
626 struct pcc_state
*pcc_state
,
627 enum pcep_pathd_event_type type
,
630 struct req_entry
*req
;
632 if (pcc_state
->status
!= PCEP_PCC_OPERATING
)
635 /* Skipping candidate path with endpoint that do not match the
636 * configured or deduced PCC IP version */
637 if (!filter_path(pcc_state
, path
)) {
638 PCEP_DEBUG("%s Skipping %s candidate path %s event",
640 ipaddr_type_name(&path
->nbkey
.endpoint
), path
->name
);
645 case PCEP_PATH_CREATED
:
646 if (has_pending_req_for(pcc_state
, path
)) {
648 "%s Candidate path %s created, computation request already sent",
649 pcc_state
->tag
, path
->name
);
652 PCEP_DEBUG("%s Candidate path %s created", pcc_state
->tag
,
654 if ((path
->first_hop
== NULL
)
655 && (path
->type
== SRTE_CANDIDATE_TYPE_DYNAMIC
)) {
656 req
= push_new_req(pcc_state
, path
);
657 send_comp_request(ctrl_state
, pcc_state
, req
);
658 } else if (pcc_state
->caps
.is_stateful
)
659 send_report(pcc_state
, path
);
661 case PCEP_PATH_UPDATED
:
662 PCEP_DEBUG("%s Candidate path %s updated", pcc_state
->tag
,
664 if (pcc_state
->caps
.is_stateful
)
665 send_report(pcc_state
, path
);
667 case PCEP_PATH_REMOVED
:
668 PCEP_DEBUG("%s Candidate path %s removed", pcc_state
->tag
,
670 path
->was_removed
= true;
671 /* Removed as response to a PcInitiated 'R'emove*/
672 /* RFC 8281 #5.4 LSP Deletion*/
673 path
->do_remove
= path
->was_removed
;
674 if (pcc_state
->caps
.is_stateful
)
675 send_report(pcc_state
, path
);
678 flog_warn(EC_PATH_PCEP_RECOVERABLE_INTERNAL_ERROR
,
679 "Unexpected pathd event received by pcc %s: %u",
680 pcc_state
->tag
, type
);
686 /* ------------ PCEP event handler ------------ */
688 void pcep_pcc_pcep_event_handler(struct ctrl_state
*ctrl_state
,
689 struct pcc_state
*pcc_state
, pcep_event
*event
)
691 PCEP_DEBUG("%s Received PCEP event: %s", pcc_state
->tag
,
692 pcep_event_type_name(event
->event_type
));
693 switch (event
->event_type
) {
694 case PCC_CONNECTED_TO_PCE
:
695 assert(PCEP_PCC_CONNECTING
== pcc_state
->status
);
696 PCEP_DEBUG("%s Connection established", pcc_state
->tag
);
697 pcc_state
->status
= PCEP_PCC_SYNCHRONIZING
;
698 pcc_state
->retry_count
= 0;
699 pcc_state
->synchronized
= false;
700 PCEP_DEBUG("%s Starting PCE synchronization", pcc_state
->tag
);
701 cancel_session_timeout(ctrl_state
, pcc_state
);
702 pcep_pcc_calculate_best_pce(ctrl_state
->pcc
);
703 pcep_thread_start_sync(ctrl_state
, pcc_state
->id
);
705 case PCC_SENT_INVALID_OPEN
:
706 PCEP_DEBUG("%s Sent invalid OPEN message", pcc_state
->tag
);
708 "%s Reconciling values: keep alive (%d) dead timer (%d) seconds ",
710 pcc_state
->sess
->pcc_config
711 .keep_alive_pce_negotiated_timer_seconds
,
712 pcc_state
->sess
->pcc_config
713 .dead_timer_pce_negotiated_seconds
);
714 pcc_state
->pce_opts
->config_opts
.keep_alive_seconds
=
715 pcc_state
->sess
->pcc_config
716 .keep_alive_pce_negotiated_timer_seconds
;
717 pcc_state
->pce_opts
->config_opts
.dead_timer_seconds
=
718 pcc_state
->sess
->pcc_config
719 .dead_timer_pce_negotiated_seconds
;
722 case PCC_RCVD_INVALID_OPEN
:
723 PCEP_DEBUG("%s Received invalid OPEN message", pcc_state
->tag
);
724 PCEP_DEBUG_PCEP("%s PCEP message: %s", pcc_state
->tag
,
725 format_pcep_message(event
->message
));
727 case PCE_DEAD_TIMER_EXPIRED
:
728 case PCE_CLOSED_SOCKET
:
729 case PCE_SENT_PCEP_CLOSE
:
730 case PCE_OPEN_KEEP_WAIT_TIMER_EXPIRED
:
731 case PCC_PCEP_SESSION_CLOSED
:
732 case PCC_RCVD_MAX_INVALID_MSGS
:
733 case PCC_RCVD_MAX_UNKOWN_MSGS
:
734 pcep_pcc_disable(ctrl_state
, pcc_state
);
735 schedule_reconnect(ctrl_state
, pcc_state
);
736 schedule_session_timeout(ctrl_state
, pcc_state
);
738 case MESSAGE_RECEIVED
:
739 PCEP_DEBUG_PCEP("%s Received PCEP message: %s", pcc_state
->tag
,
740 format_pcep_message(event
->message
));
741 if (pcc_state
->status
== PCEP_PCC_CONNECTING
) {
742 if (event
->message
->msg_header
->type
== PCEP_TYPE_OPEN
)
743 handle_pcep_open(ctrl_state
, pcc_state
,
747 assert(pcc_state
->status
== PCEP_PCC_SYNCHRONIZING
748 || pcc_state
->status
== PCEP_PCC_OPERATING
);
749 handle_pcep_message(ctrl_state
, pcc_state
, event
->message
);
752 flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEPLIB_EVENT
,
753 "Unexpected event from pceplib: %s",
754 format_pcep_event(event
));
760 /*------------------ Multi-PCE --------------------- */
762 /* Internal util function, returns true if sync is necessary, false otherwise */
763 bool update_best_pce(struct pcc_state
**pcc
, int best
)
765 PCEP_DEBUG(" recalculating pce precedence ");
767 struct pcc_state
*best_pcc_state
=
768 pcep_pcc_get_pcc_by_id(pcc
, best
);
769 if (best_pcc_state
->previous_best
!= best_pcc_state
->is_best
) {
770 PCEP_DEBUG(" %s Resynch best (%i) previous best (%i)",
771 best_pcc_state
->tag
, best_pcc_state
->id
,
772 best_pcc_state
->previous_best
);
776 " %s No Resynch best (%i) previous best (%i)",
777 best_pcc_state
->tag
, best_pcc_state
->id
,
778 best_pcc_state
->previous_best
);
781 PCEP_DEBUG(" No best pce available, all pce seem disconnected");
787 int get_best_pce(struct pcc_state
**pcc
)
789 for (int i
= 0; i
< MAX_PCC
; i
++) {
790 if (pcc
[i
] && pcc
[i
]->pce_opts
) {
791 if (pcc
[i
]->is_best
== true) {
799 int get_pce_count_connected(struct pcc_state
**pcc
)
802 for (int i
= 0; i
< MAX_PCC
; i
++) {
803 if (pcc
[i
] && pcc
[i
]->pce_opts
804 && pcc
[i
]->status
!= PCEP_PCC_DISCONNECTED
) {
811 int get_previous_best_pce(struct pcc_state
**pcc
)
813 int previous_best_pce
= -1;
815 for (int i
= 0; i
< MAX_PCC
; i
++) {
816 if (pcc
[i
] && pcc
[i
]->pce_opts
&& pcc
[i
]->previous_best
== true
817 && pcc
[i
]->status
!= PCEP_PCC_DISCONNECTED
) {
818 previous_best_pce
= i
;
822 return previous_best_pce
!= -1 ? pcc
[previous_best_pce
]->id
: 0;
825 /* Called by path_pcep_controller EV_REMOVE_PCC
826 * Event handler when a PCC is removed. */
827 int pcep_pcc_multi_pce_remove_pcc(struct ctrl_state
*ctrl_state
,
828 struct pcc_state
**pcc
)
830 int new_best_pcc_id
= -1;
831 new_best_pcc_id
= pcep_pcc_calculate_best_pce(pcc
);
832 if (new_best_pcc_id
) {
833 if (update_best_pce(ctrl_state
->pcc
, new_best_pcc_id
) == true) {
834 pcep_thread_start_sync(ctrl_state
, new_best_pcc_id
);
841 /* Called by path_pcep_controller EV_SYNC_PATH
842 * Event handler when a path is sync'd. */
843 int pcep_pcc_multi_pce_sync_path(struct ctrl_state
*ctrl_state
, int pcc_id
,
844 struct pcc_state
**pcc
)
846 int previous_best_pcc_id
= -1;
848 if (pcc_id
== get_best_pce(pcc
)) {
849 previous_best_pcc_id
= get_previous_best_pce(pcc
);
850 if (previous_best_pcc_id
!= 0) {
851 /* while adding new pce, path has to resync to the
852 * previous best. pcep_thread_start_sync() will be
853 * called by the calling function */
854 if (update_best_pce(ctrl_state
->pcc
,
855 previous_best_pcc_id
)
857 cancel_comp_requests(
859 pcep_pcc_get_pcc_by_id(
860 pcc
, previous_best_pcc_id
));
861 pcep_thread_start_sync(ctrl_state
,
862 previous_best_pcc_id
);
870 /* Called by path_pcep_controller when the TM_CALCULATE_BEST_PCE
872 int pcep_pcc_timer_update_best_pce(struct ctrl_state
*ctrl_state
, int pcc_id
)
875 /* resync whatever was the new best */
876 int prev_best
= get_best_pce(ctrl_state
->pcc
);
877 int best_id
= pcep_pcc_calculate_best_pce(ctrl_state
->pcc
);
878 if (best_id
&& prev_best
!= best_id
) { // Avoid Multiple call
879 struct pcc_state
*pcc_state
=
880 pcep_pcc_get_pcc_by_id(ctrl_state
->pcc
, best_id
);
881 if (update_best_pce(ctrl_state
->pcc
, pcc_state
->id
) == true) {
882 pcep_thread_start_sync(ctrl_state
, pcc_state
->id
);
889 /* Called by path_pcep_controller::pcep_thread_event_update_pce_options()
890 * Returns the best PCE id */
891 int pcep_pcc_calculate_best_pce(struct pcc_state
**pcc
)
893 int best_precedence
= 255; // DEFAULT_PCE_PRECEDENCE;
895 int one_connected_pce
= -1;
896 int previous_best_pce
= -1;
897 int step_0_best
= -1;
898 int step_0_previous
= -1;
902 for (int i
= 0; i
< MAX_PCC
; i
++) {
903 if (pcc
[i
] && pcc
[i
]->pce_opts
) {
905 "multi-pce: calculate all : i (%i) is_best (%i) previous_best (%i) ",
906 i
, pcc
[i
]->is_best
, pcc
[i
]->previous_best
);
909 if (pcc
[i
]->is_best
== true) {
912 if (pcc
[i
]->previous_best
== true) {
923 for (int i
= 0; i
< MAX_PCC
; i
++) {
924 if (pcc
[i
] && pcc
[i
]->pce_opts
925 && pcc
[i
]->status
!= PCEP_PCC_DISCONNECTED
) {
926 one_connected_pce
= i
; // In case none better
927 if (pcc
[i
]->pce_opts
->precedence
<= best_precedence
) {
929 && pcc
[best_pce
]->pce_opts
->precedence
933 &pcc
[i
]->pce_opts
->addr
,
937 // collide of precedences so
941 if (!pcc
[i
]->previous_best
) {
953 "multi-pce: calculate data : sb (%i) sp (%i) oc (%i) b (%i) ",
954 step_0_best
, step_0_previous
, one_connected_pce
, best_pce
);
956 // Changed of state so ...
957 if (step_0_best
!= best_pce
) {
958 pthread_mutex_lock(&g_pcc_info_mtx
);
959 // Calculate previous
960 previous_best_pce
= step_0_best
;
962 if (step_0_best
!= -1) {
963 pcc
[step_0_best
]->is_best
= false;
965 if (step_0_previous
!= -1) {
966 pcc
[step_0_previous
]->previous_best
= false;
970 if (previous_best_pce
!= -1
971 && pcc
[previous_best_pce
]->status
972 == PCEP_PCC_DISCONNECTED
) {
973 pcc
[previous_best_pce
]->previous_best
= true;
974 zlog_debug("multi-pce: previous best pce (%i) ",
975 previous_best_pce
+ 1);
980 if (best_pce
!= -1) {
981 pcc
[best_pce
]->is_best
= true;
982 zlog_debug("multi-pce: best pce (%i) ", best_pce
+ 1);
984 if (one_connected_pce
!= -1) {
985 best_pce
= one_connected_pce
;
986 pcc
[one_connected_pce
]->is_best
= true;
988 "multi-pce: one connected best pce (default) (%i) ",
989 one_connected_pce
+ 1);
991 for (int i
= 0; i
< MAX_PCC
; i
++) {
992 if (pcc
[i
] && pcc
[i
]->pce_opts
) {
994 pcc
[i
]->is_best
= true;
996 "(disconnected) best pce (default) (%i) ",
1003 pthread_mutex_unlock(&g_pcc_info_mtx
);
1006 return ((best_pce
== -1) ? 0 : pcc
[best_pce
]->id
);
1009 int pcep_pcc_get_pcc_id_by_ip_port(struct pcc_state
**pcc
,
1010 struct pce_opts
*pce_opts
)
1016 for (int idx
= 0; idx
< MAX_PCC
; idx
++) {
1018 if ((ipaddr_cmp((const struct ipaddr
*)&pcc
[idx
]
1020 (const struct ipaddr
*)&pce_opts
->addr
)
1022 && pcc
[idx
]->pce_opts
->port
== pce_opts
->port
) {
1023 zlog_debug("found pcc_id (%d) idx (%d)",
1025 return pcc
[idx
]->id
;
1032 int pcep_pcc_get_pcc_id_by_idx(struct pcc_state
**pcc
, int idx
)
1034 if (pcc
== NULL
|| idx
< 0) {
1038 return pcc
[idx
] ? pcc
[idx
]->id
: 0;
1041 struct pcc_state
*pcep_pcc_get_pcc_by_id(struct pcc_state
**pcc
, int id
)
1043 if (pcc
== NULL
|| id
< 0) {
1047 for (int i
= 0; i
< MAX_PCC
; i
++) {
1049 if (pcc
[i
]->id
== id
) {
1050 zlog_debug("found id (%d) pcc_idx (%d)",
1060 struct pcc_state
*pcep_pcc_get_pcc_by_name(struct pcc_state
**pcc
,
1061 const char *pce_name
)
1063 if (pcc
== NULL
|| pce_name
== NULL
) {
1067 for (int i
= 0; i
< MAX_PCC
; i
++) {
1068 if (pcc
[i
] == NULL
) {
1072 if (strcmp(pcc
[i
]->pce_opts
->pce_name
, pce_name
) == 0) {
1080 int pcep_pcc_get_pcc_idx_by_id(struct pcc_state
**pcc
, int id
)
1086 for (int idx
= 0; idx
< MAX_PCC
; idx
++) {
1088 if (pcc
[idx
]->id
== id
) {
1089 zlog_debug("found pcc_id (%d) array_idx (%d)",
1099 int pcep_pcc_get_free_pcc_idx(struct pcc_state
**pcc
)
1101 assert(pcc
!= NULL
);
1103 for (int idx
= 0; idx
< MAX_PCC
; idx
++) {
1104 if (pcc
[idx
] == NULL
) {
1105 zlog_debug("new pcc_idx (%d)", idx
);
1113 int pcep_pcc_get_pcc_id(struct pcc_state
*pcc
)
1115 return ((pcc
== NULL
) ? 0 : pcc
->id
);
1118 void pcep_pcc_copy_pcc_info(struct pcc_state
**pcc
,
1119 struct pcep_pcc_info
*pcc_info
)
1121 struct pcc_state
*pcc_state
=
1122 pcep_pcc_get_pcc_by_name(pcc
, pcc_info
->pce_name
);
1127 pcc_info
->ctrl_state
= NULL
;
1128 if(pcc_state
->pcc_opts
){
1129 pcc_info
->msd
= pcc_state
->pcc_opts
->msd
;
1130 pcc_info
->pcc_port
= pcc_state
->pcc_opts
->port
;
1132 pcc_info
->next_plspid
= pcc_state
->next_plspid
;
1133 pcc_info
->next_reqid
= pcc_state
->next_reqid
;
1134 pcc_info
->status
= pcc_state
->status
;
1135 pcc_info
->pcc_id
= pcc_state
->id
;
1136 pthread_mutex_lock(&g_pcc_info_mtx
);
1137 pcc_info
->is_best_multi_pce
= pcc_state
->is_best
;
1138 pcc_info
->previous_best
= pcc_state
->previous_best
;
1139 pthread_mutex_unlock(&g_pcc_info_mtx
);
1140 pcc_info
->precedence
=
1141 pcc_state
->pce_opts
? pcc_state
->pce_opts
->precedence
: 0;
1142 if(pcc_state
->pcc_addr_tr
.ipa_type
!= IPADDR_NONE
){
1143 memcpy(&pcc_info
->pcc_addr
, &pcc_state
->pcc_addr_tr
,
1144 sizeof(struct ipaddr
));
1149 /*------------------ PCEP Message handlers --------------------- */
1151 void handle_pcep_open(struct ctrl_state
*ctrl_state
,
1152 struct pcc_state
*pcc_state
, struct pcep_message
*msg
)
1154 assert(msg
->msg_header
->type
== PCEP_TYPE_OPEN
);
1155 pcep_lib_parse_capabilities(msg
, &pcc_state
->caps
);
1156 PCEP_DEBUG("PCE capabilities: %s, %s%s",
1157 pcc_state
->caps
.is_stateful
? "stateful" : "stateless",
1158 pcc_state
->caps
.supported_ofs_are_known
1159 ? (pcc_state
->caps
.supported_ofs
== 0
1160 ? "no objective functions supported"
1161 : "supported objective functions are ")
1162 : "supported objective functions are unknown",
1163 format_objfun_set(pcc_state
->caps
.supported_ofs
));
1166 void handle_pcep_message(struct ctrl_state
*ctrl_state
,
1167 struct pcc_state
*pcc_state
, struct pcep_message
*msg
)
1169 if (pcc_state
->status
!= PCEP_PCC_OPERATING
)
1172 switch (msg
->msg_header
->type
) {
1173 case PCEP_TYPE_INITIATE
:
1174 handle_pcep_lsp_initiate(ctrl_state
, pcc_state
, msg
);
1176 case PCEP_TYPE_UPDATE
:
1177 handle_pcep_lsp_update(ctrl_state
, pcc_state
, msg
);
1179 case PCEP_TYPE_PCREP
:
1180 handle_pcep_comp_reply(ctrl_state
, pcc_state
, msg
);
1183 flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEP_MESSAGE
,
1184 "Unexpected pcep message from pceplib: %s",
1185 format_pcep_message(msg
));
1190 void handle_pcep_lsp_update(struct ctrl_state
*ctrl_state
,
1191 struct pcc_state
*pcc_state
,
1192 struct pcep_message
*msg
)
1195 path
= pcep_lib_parse_path(msg
);
1196 lookup_nbkey(pcc_state
, path
);
1197 pcep_thread_refine_path(ctrl_state
, pcc_state
->id
,
1198 &continue_pcep_lsp_update
, path
, NULL
);
1201 void continue_pcep_lsp_update(struct ctrl_state
*ctrl_state
,
1202 struct pcc_state
*pcc_state
, struct path
*path
,
1205 char err
[MAX_ERROR_MSG_SIZE
] = {0};
1207 specialize_incoming_path(pcc_state
, path
);
1208 PCEP_DEBUG("%s Received LSP update", pcc_state
->tag
);
1209 PCEP_DEBUG_PATH("%s", format_path(path
));
1211 if (validate_incoming_path(pcc_state
, path
, err
, sizeof(err
)))
1212 pcep_thread_update_path(ctrl_state
, pcc_state
->id
, path
);
1214 /* FIXME: Monitor the amount of errors from the PCE and
1215 * possibly disconnect and blacklist */
1216 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1217 "Unsupported PCEP protocol feature: %s", err
);
1218 pcep_free_path(path
);
1222 void handle_pcep_lsp_initiate(struct ctrl_state
*ctrl_state
,
1223 struct pcc_state
*pcc_state
,
1224 struct pcep_message
*msg
)
1226 char err
[MAX_ERROR_MSG_SIZE
] = "";
1229 path
= pcep_lib_parse_path(msg
);
1231 if (!pcc_state
->pce_opts
->config_opts
.pce_initiated
) {
1232 /* PCE Initiated is not enabled */
1233 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1234 "Not allowed PCE initiated path received: %s",
1235 format_pcep_message(msg
));
1236 send_pcep_error(pcc_state
, PCEP_ERRT_LSP_INSTANTIATE_ERROR
,
1237 PCEP_ERRV_UNACCEPTABLE_INSTANTIATE_ERROR
, path
);
1241 if (path
->do_remove
) {
1242 // lookup in nbkey sequential as no endpoint
1243 struct nbkey_map_data
*key
;
1246 frr_each (nbkey_map
, &pcc_state
->nbkey_map
, key
) {
1247 ipaddr2str(&key
->nbkey
.endpoint
, endpoint
,
1250 EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1251 "FOR_EACH nbkey [color (%d) endpoint (%s)] path [plsp_id (%d)] ",
1252 key
->nbkey
.color
, endpoint
, path
->plsp_id
);
1253 if (path
->plsp_id
== key
->plspid
) {
1255 EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1256 "FOR_EACH MATCH nbkey [color (%d) endpoint (%s)] path [plsp_id (%d)] ",
1257 key
->nbkey
.color
, endpoint
,
1259 path
->nbkey
= key
->nbkey
;
1264 if (path
->first_hop
== NULL
/*ero sets first_hop*/) {
1265 /* If the PCC receives a PCInitiate message without an
1266 * ERO and the R flag in the SRP object != zero, then it
1267 * MUST send a PCErr message with Error-type=6
1268 * (Mandatory Object missing) and Error-value=9 (ERO
1269 * object missing). */
1270 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1271 "ERO object missing or incomplete : %s",
1272 format_pcep_message(msg
));
1273 send_pcep_error(pcc_state
,
1274 PCEP_ERRT_LSP_INSTANTIATE_ERROR
,
1275 PCEP_ERRV_INTERNAL_ERROR
, path
);
1279 if (path
->plsp_id
!= 0) {
1280 /* If the PCC receives a PCInitiate message with a
1281 * non-zero PLSP-ID and the R flag in the SRP object set
1282 * to zero, then it MUST send a PCErr message with
1283 * Error-type=19 (Invalid Operation) and Error-value=8
1284 * (Non-zero PLSP-ID in the LSP Initiate Request) */
1286 EC_PATH_PCEP_PROTOCOL_ERROR
,
1287 "PCE initiated path with non-zero PLSP ID: %s",
1288 format_pcep_message(msg
));
1289 send_pcep_error(pcc_state
, PCEP_ERRT_INVALID_OPERATION
,
1290 PCEP_ERRV_LSP_INIT_NON_ZERO_PLSP_ID
,
1295 if (path
->name
== NULL
) {
1296 /* If the PCC receives a PCInitiate message without a
1297 * SYMBOLIC-PATH-NAME TLV, then it MUST send a PCErr
1298 * message with Error-type=10 (Reception of an invalid
1299 * object) and Error-value=8 (SYMBOLIC-PATH-NAME TLV
1302 EC_PATH_PCEP_PROTOCOL_ERROR
,
1303 "PCE initiated path without symbolic name: %s",
1304 format_pcep_message(msg
));
1306 pcc_state
, PCEP_ERRT_RECEPTION_OF_INV_OBJECT
,
1307 PCEP_ERRV_SYMBOLIC_PATH_NAME_TLV_MISSING
, path
);
1312 /* TODO: If there is a conflict with the symbolic path name of an
1313 * existing LSP, the PCC MUST send a PCErr message with Error-type=23
1314 * (Bad Parameter value) and Error-value=1 (SYMBOLIC-PATH-NAME in
1317 specialize_incoming_path(pcc_state
, path
);
1318 /* TODO: Validate the PCC address received from the PCE is valid */
1319 PCEP_DEBUG("%s Received LSP initiate", pcc_state
->tag
);
1320 PCEP_DEBUG_PATH("%s", format_path(path
));
1322 if (validate_incoming_path(pcc_state
, path
, err
, sizeof(err
))) {
1323 pcep_thread_initiate_path(ctrl_state
, pcc_state
->id
, path
);
1325 /* FIXME: Monitor the amount of errors from the PCE and
1326 * possibly disconnect and blacklist */
1327 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1328 "Unsupported PCEP protocol feature: %s", err
);
1329 pcep_free_path(path
);
1330 send_pcep_error(pcc_state
, PCEP_ERRT_INVALID_OPERATION
,
1331 PCEP_ERRV_LSP_NOT_PCE_INITIATED
, path
);
1335 void handle_pcep_comp_reply(struct ctrl_state
*ctrl_state
,
1336 struct pcc_state
*pcc_state
,
1337 struct pcep_message
*msg
)
1339 char err
[MAX_ERROR_MSG_SIZE
] = "";
1340 struct req_entry
*req
;
1343 path
= pcep_lib_parse_path(msg
);
1344 if (path
->no_path
) {
1345 req
= pop_req_no_reqid(pcc_state
, path
->req_id
);
1347 req
= pop_req(pcc_state
, path
->req_id
);
1350 /* TODO: check the rate of bad computation reply and close
1351 * the connection if more that a given rate.
1354 "%s Received computation reply for unknown request %d",
1355 pcc_state
->tag
, path
->req_id
);
1356 PCEP_DEBUG_PATH("%s", format_path(path
));
1357 send_pcep_error(pcc_state
, PCEP_ERRT_UNKNOWN_REQ_REF
,
1358 PCEP_ERRV_UNASSIGNED
, NULL
);
1362 /* Cancel the computation request timeout */
1363 pcep_thread_cancel_timer(&req
->t_retry
);
1365 /* Transfer relevent metadata from the request to the response */
1366 path
->nbkey
= req
->path
->nbkey
;
1367 path
->plsp_id
= req
->path
->plsp_id
;
1368 path
->type
= req
->path
->type
;
1369 path
->name
= XSTRDUP(MTYPE_PCEP
, req
->path
->name
);
1370 specialize_incoming_path(pcc_state
, path
);
1372 PCEP_DEBUG("%s Received computation reply %d (no-path: %s)",
1373 pcc_state
->tag
, path
->req_id
,
1374 path
->no_path
? "true" : "false");
1375 PCEP_DEBUG_PATH("%s", format_path(path
));
1377 if (path
->no_path
) {
1378 PCEP_DEBUG("%s Computation for path %s did not find any result",
1379 pcc_state
->tag
, path
->name
);
1380 free_req_entry(req
);
1381 pcep_free_path(path
);
1383 } else if (validate_incoming_path(pcc_state
, path
, err
, sizeof(err
))) {
1384 /* Updating a dynamic path will automatically delegate it */
1385 pcep_thread_update_path(ctrl_state
, pcc_state
->id
, path
);
1386 free_req_entry(req
);
1389 /* FIXME: Monitor the amount of errors from the PCE and
1390 * possibly disconnect and blacklist */
1391 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1392 "Unsupported PCEP protocol feature: %s", err
);
1395 pcep_free_path(path
);
1397 /* Delegate the path regardless of the outcome */
1398 /* TODO: For now we are using the path from the request, when
1399 * pathd API is thread safe, we could get a new path */
1400 if (pcc_state
->caps
.is_stateful
) {
1401 PCEP_DEBUG("%s Delegating undefined dynamic path %s to PCE %s",
1402 pcc_state
->tag
, req
->path
->name
,
1403 pcc_state
->originator
);
1404 path
= pcep_copy_path(req
->path
);
1405 path
->is_delegated
= true;
1406 send_report(pcc_state
, path
);
1407 pcep_free_path(path
);
1410 free_req_entry(req
);
1414 /* ------------ Internal Functions ------------ */
1416 const char *ipaddr_type_name(struct ipaddr
*addr
)
1418 if (IS_IPADDR_V4(addr
))
1420 if (IS_IPADDR_V6(addr
))
1425 bool filter_path(struct pcc_state
*pcc_state
, struct path
*path
)
1427 return (IS_IPADDR_V4(&path
->nbkey
.endpoint
)
1428 && CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
))
1429 || (IS_IPADDR_V6(&path
->nbkey
.endpoint
)
1430 && CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
));
1433 void select_pcc_addresses(struct pcc_state
*pcc_state
)
1435 /* If no IPv4 address was specified, try to get one from zebra */
1436 if (!CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
)) {
1437 if (get_ipv4_router_id(&pcc_state
->pcc_addr_v4
)) {
1438 SET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
);
1442 /* If no IPv6 address was specified, try to get one from zebra */
1443 if (!CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
)) {
1444 if (get_ipv6_router_id(&pcc_state
->pcc_addr_v6
)) {
1445 SET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
);
1450 void select_transport_address(struct pcc_state
*pcc_state
)
1452 struct ipaddr
*taddr
= &pcc_state
->pcc_addr_tr
;
1454 select_pcc_addresses(pcc_state
);
1456 taddr
->ipa_type
= IPADDR_NONE
;
1458 /* Select a transport source address in function of the configured PCE
1460 if (IS_IPADDR_V4(&pcc_state
->pce_opts
->addr
)) {
1461 if (CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
)) {
1462 taddr
->ipaddr_v4
= pcc_state
->pcc_addr_v4
;
1463 taddr
->ipa_type
= IPADDR_V4
;
1466 if (CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
)) {
1467 taddr
->ipaddr_v6
= pcc_state
->pcc_addr_v6
;
1468 taddr
->ipa_type
= IPADDR_V6
;
1473 void update_tag(struct pcc_state
*pcc_state
)
1475 if (pcc_state
->pce_opts
!= NULL
) {
1476 assert(!IS_IPADDR_NONE(&pcc_state
->pce_opts
->addr
));
1477 if (IS_IPADDR_V6(&pcc_state
->pce_opts
->addr
)) {
1478 snprintfrr(pcc_state
->tag
, sizeof(pcc_state
->tag
),
1480 &pcc_state
->pce_opts
->addr
.ipaddr_v6
,
1481 pcc_state
->pce_opts
->port
, pcc_state
->id
);
1483 snprintfrr(pcc_state
->tag
, sizeof(pcc_state
->tag
),
1485 &pcc_state
->pce_opts
->addr
.ipaddr_v4
,
1486 pcc_state
->pce_opts
->port
, pcc_state
->id
);
1489 snprintfrr(pcc_state
->tag
, sizeof(pcc_state
->tag
), "(%u)",
1494 void update_originator(struct pcc_state
*pcc_state
)
1497 if (pcc_state
->originator
!= NULL
) {
1498 XFREE(MTYPE_PCEP
, pcc_state
->originator
);
1499 pcc_state
->originator
= NULL
;
1501 if (pcc_state
->pce_opts
== NULL
)
1503 originator
= XCALLOC(MTYPE_PCEP
, 52);
1504 assert(!IS_IPADDR_NONE(&pcc_state
->pce_opts
->addr
));
1505 if (IS_IPADDR_V6(&pcc_state
->pce_opts
->addr
)) {
1506 snprintfrr(originator
, 52, "%pI6:%i",
1507 &pcc_state
->pce_opts
->addr
.ipaddr_v6
,
1508 pcc_state
->pce_opts
->port
);
1510 snprintfrr(originator
, 52, "%pI4:%i",
1511 &pcc_state
->pce_opts
->addr
.ipaddr_v4
,
1512 pcc_state
->pce_opts
->port
);
1514 pcc_state
->originator
= originator
;
1517 void schedule_reconnect(struct ctrl_state
*ctrl_state
,
1518 struct pcc_state
*pcc_state
)
1520 pcc_state
->retry_count
++;
1521 pcep_thread_schedule_reconnect(ctrl_state
, pcc_state
->id
,
1522 pcc_state
->retry_count
,
1523 &pcc_state
->t_reconnect
);
1524 if (pcc_state
->retry_count
== 1) {
1525 pcep_thread_schedule_sync_best_pce(
1526 ctrl_state
, pcc_state
->id
,
1527 pcc_state
->pce_opts
->config_opts
1528 .delegation_timeout_seconds
,
1529 &pcc_state
->t_update_best
);
1533 void schedule_session_timeout(struct ctrl_state
*ctrl_state
,
1534 struct pcc_state
*pcc_state
)
1536 /* No need to schedule timeout if multiple PCEs are connected */
1537 if (get_pce_count_connected(ctrl_state
->pcc
)) {
1539 "schedule_session_timeout not setting timer for multi-pce mode");
1544 pcep_thread_schedule_session_timeout(
1545 ctrl_state
, pcep_pcc_get_pcc_id(pcc_state
),
1546 pcc_state
->pce_opts
->config_opts
1547 .session_timeout_inteval_seconds
,
1548 &pcc_state
->t_session_timeout
);
1551 void cancel_session_timeout(struct ctrl_state
*ctrl_state
,
1552 struct pcc_state
*pcc_state
)
1554 /* No need to schedule timeout if multiple PCEs are connected */
1555 if (pcc_state
->t_session_timeout
== NULL
) {
1556 PCEP_DEBUG_PCEP("cancel_session_timeout timer thread NULL");
1560 PCEP_DEBUG_PCEP("Cancel session_timeout timer");
1561 pcep_thread_cancel_timer(&pcc_state
->t_session_timeout
);
1562 pcc_state
->t_session_timeout
= NULL
;
1565 void send_pcep_message(struct pcc_state
*pcc_state
, struct pcep_message
*msg
)
1567 if (pcc_state
->sess
!= NULL
) {
1568 PCEP_DEBUG_PCEP("%s Sending PCEP message: %s", pcc_state
->tag
,
1569 format_pcep_message(msg
));
1570 send_message(pcc_state
->sess
, msg
, true);
1574 void send_pcep_error(struct pcc_state
*pcc_state
,
1575 enum pcep_error_type error_type
,
1576 enum pcep_error_value error_value
,
1577 struct path
*trigger_path
)
1579 struct pcep_message
*msg
;
1580 PCEP_DEBUG("%s Sending PCEP error type %s (%d) value %s (%d)",
1581 pcc_state
->tag
, pcep_error_type_name(error_type
), error_type
,
1582 pcep_error_value_name(error_type
, error_value
), error_value
);
1583 msg
= pcep_lib_format_error(error_type
, error_value
, trigger_path
);
1584 send_pcep_message(pcc_state
, msg
);
1587 void send_report(struct pcc_state
*pcc_state
, struct path
*path
)
1589 struct pcep_message
*report
;
1592 specialize_outgoing_path(pcc_state
, path
);
1593 PCEP_DEBUG_PATH("%s Sending path %s: %s", pcc_state
->tag
, path
->name
,
1595 report
= pcep_lib_format_report(&pcc_state
->caps
, path
);
1596 send_pcep_message(pcc_state
, report
);
1599 /* Updates the path for the PCE, updating the delegation and creation flags */
1600 void specialize_outgoing_path(struct pcc_state
*pcc_state
, struct path
*path
)
1602 bool is_delegated
= false;
1603 bool was_created
= false;
1605 lookup_plspid(pcc_state
, path
);
1607 set_pcc_address(pcc_state
, &path
->nbkey
, &path
->pcc_addr
);
1608 path
->sender
= pcc_state
->pcc_addr_tr
;
1610 /* TODO: When the pathd API have a way to mark a path as
1611 * delegated, use it instead of considering all dynamic path
1612 * delegated. We need to disable the originator check for now,
1613 * because path could be delegated without having any originator yet */
1614 // if ((path->originator == NULL)
1615 // || (strcmp(path->originator, pcc_state->originator) == 0)) {
1616 // is_delegated = (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)
1617 // && (path->first_hop != NULL);
1618 // /* it seems the PCE consider updating an LSP a creation ?!?
1619 // at least Cisco does... */
1620 // was_created = path->update_origin == SRTE_ORIGIN_PCEP;
1622 is_delegated
= (path
->type
== SRTE_CANDIDATE_TYPE_DYNAMIC
);
1623 was_created
= path
->update_origin
== SRTE_ORIGIN_PCEP
;
1625 path
->pcc_id
= pcc_state
->id
;
1626 path
->go_active
= is_delegated
&& pcc_state
->is_best
;
1627 path
->is_delegated
= is_delegated
&& pcc_state
->is_best
;
1628 path
->was_created
= was_created
;
1631 /* Updates the path for the PCC */
1632 void specialize_incoming_path(struct pcc_state
*pcc_state
, struct path
*path
)
1634 if (IS_IPADDR_NONE(&path
->pcc_addr
))
1635 set_pcc_address(pcc_state
, &path
->nbkey
, &path
->pcc_addr
);
1636 path
->sender
= pcc_state
->pce_opts
->addr
;
1637 path
->pcc_id
= pcc_state
->id
;
1638 path
->update_origin
= SRTE_ORIGIN_PCEP
;
1639 path
->originator
= XSTRDUP(MTYPE_PCEP
, pcc_state
->originator
);
1642 /* Ensure the path can be handled by the PCC and if not, sends an error */
1643 bool validate_incoming_path(struct pcc_state
*pcc_state
, struct path
*path
,
1644 char *errbuff
, size_t buffsize
)
1646 struct path_hop
*hop
;
1647 enum pcep_error_type err_type
= 0;
1648 enum pcep_error_value err_value
= PCEP_ERRV_UNASSIGNED
;
1650 for (hop
= path
->first_hop
; hop
!= NULL
; hop
= hop
->next
) {
1651 /* Hops without SID are not supported */
1652 if (!hop
->has_sid
) {
1653 snprintfrr(errbuff
, buffsize
, "SR segment without SID");
1654 err_type
= PCEP_ERRT_RECEPTION_OF_INV_OBJECT
;
1655 err_value
= PCEP_ERRV_DISJOINTED_CONF_TLV_MISSING
;
1658 /* Hops with non-MPLS SID are not supported */
1659 if (!hop
->is_mpls
) {
1660 snprintfrr(errbuff
, buffsize
,
1661 "SR segment with non-MPLS SID");
1662 err_type
= PCEP_ERRT_RECEPTION_OF_INV_OBJECT
;
1663 err_value
= PCEP_ERRV_UNSUPPORTED_NAI
;
1668 if (err_type
!= 0) {
1669 send_pcep_error(pcc_state
, err_type
, err_value
, NULL
);
1676 void send_comp_request(struct ctrl_state
*ctrl_state
,
1677 struct pcc_state
*pcc_state
, struct req_entry
*req
)
1679 assert(req
!= NULL
);
1684 assert(req
->path
!= NULL
);
1685 assert(req
->path
->req_id
> 0);
1686 assert(RB_FIND(req_entry_head
, &pcc_state
->requests
, req
) == req
);
1687 assert(lookup_reqid(pcc_state
, req
->path
) == req
->path
->req_id
);
1690 struct pcep_message
*msg
;
1692 if (!pcc_state
->is_best
) {
1696 specialize_outgoing_path(pcc_state
, req
->path
);
1699 "%s Sending computation request %d for path %s to %pIA (retry %d)",
1700 pcc_state
->tag
, req
->path
->req_id
, req
->path
->name
,
1701 &req
->path
->nbkey
.endpoint
, req
->retry_count
);
1702 PCEP_DEBUG_PATH("%s Computation request path %s: %s", pcc_state
->tag
,
1703 req
->path
->name
, format_path(req
->path
));
1705 msg
= pcep_lib_format_request(&pcc_state
->caps
, req
->path
);
1706 send_pcep_message(pcc_state
, msg
);
1707 req
->was_sent
= true;
1709 timeout
= pcc_state
->pce_opts
->config_opts
.pcep_request_time_seconds
;
1710 pcep_thread_schedule_timeout(ctrl_state
, pcc_state
->id
,
1711 TO_COMPUTATION_REQUEST
, timeout
,
1712 (void *)req
, &req
->t_retry
);
1715 void cancel_comp_requests(struct ctrl_state
*ctrl_state
,
1716 struct pcc_state
*pcc_state
)
1718 struct req_entry
*req
, *safe_req
;
1720 RB_FOREACH_SAFE (req
, req_entry_head
, &pcc_state
->requests
, safe_req
) {
1721 cancel_comp_request(ctrl_state
, pcc_state
, req
);
1722 RB_REMOVE(req_entry_head
, &pcc_state
->requests
, req
);
1723 remove_reqid_mapping(pcc_state
, req
->path
);
1724 free_req_entry(req
);
1728 void cancel_comp_request(struct ctrl_state
*ctrl_state
,
1729 struct pcc_state
*pcc_state
, struct req_entry
*req
)
1731 struct pcep_message
*msg
;
1733 if (req
->was_sent
) {
1734 /* TODO: Send a computation request cancelation
1735 * notification to the PCE */
1736 pcep_thread_cancel_timer(&req
->t_retry
);
1740 "%s Canceling computation request %d for path %s to %pIA (retry %d)",
1741 pcc_state
->tag
, req
->path
->req_id
, req
->path
->name
,
1742 &req
->path
->nbkey
.endpoint
, req
->retry_count
);
1743 PCEP_DEBUG_PATH("%s Canceled computation request path %s: %s",
1744 pcc_state
->tag
, req
->path
->name
,
1745 format_path(req
->path
));
1747 msg
= pcep_lib_format_request_cancelled(req
->path
->req_id
);
1748 send_pcep_message(pcc_state
, msg
);
1751 void set_pcc_address(struct pcc_state
*pcc_state
, struct lsp_nb_key
*nbkey
,
1752 struct ipaddr
*addr
)
1754 select_pcc_addresses(pcc_state
);
1755 if (IS_IPADDR_V6(&nbkey
->endpoint
)) {
1756 assert(CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
));
1757 addr
->ipa_type
= IPADDR_V6
;
1758 addr
->ipaddr_v6
= pcc_state
->pcc_addr_v6
;
1759 } else if (IS_IPADDR_V4(&nbkey
->endpoint
)) {
1760 assert(CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
));
1761 addr
->ipa_type
= IPADDR_V4
;
1762 addr
->ipaddr_v4
= pcc_state
->pcc_addr_v4
;
1764 addr
->ipa_type
= IPADDR_NONE
;
1768 /* ------------ Data Structure Helper Functions ------------ */
1770 void lookup_plspid(struct pcc_state
*pcc_state
, struct path
*path
)
1772 struct plspid_map_data key
, *plspid_mapping
;
1773 struct nbkey_map_data
*nbkey_mapping
;
1775 if (path
->nbkey
.color
!= 0) {
1776 key
.nbkey
= path
->nbkey
;
1777 plspid_mapping
= plspid_map_find(&pcc_state
->plspid_map
, &key
);
1778 if (plspid_mapping
== NULL
) {
1780 XCALLOC(MTYPE_PCEP
, sizeof(*plspid_mapping
));
1781 plspid_mapping
->nbkey
= key
.nbkey
;
1782 plspid_mapping
->plspid
= pcc_state
->next_plspid
;
1783 plspid_map_add(&pcc_state
->plspid_map
, plspid_mapping
);
1785 XCALLOC(MTYPE_PCEP
, sizeof(*nbkey_mapping
));
1786 nbkey_mapping
->nbkey
= key
.nbkey
;
1787 nbkey_mapping
->plspid
= pcc_state
->next_plspid
;
1788 nbkey_map_add(&pcc_state
->nbkey_map
, nbkey_mapping
);
1789 pcc_state
->next_plspid
++;
1790 // FIXME: Send some error to the PCE isntead of crashing
1791 assert(pcc_state
->next_plspid
<= 1048576);
1793 path
->plsp_id
= plspid_mapping
->plspid
;
1797 void lookup_nbkey(struct pcc_state
*pcc_state
, struct path
*path
)
1799 struct nbkey_map_data key
, *mapping
;
1800 // TODO: Should give an error to the PCE instead of crashing
1801 assert(path
->plsp_id
!= 0);
1802 key
.plspid
= path
->plsp_id
;
1803 mapping
= nbkey_map_find(&pcc_state
->nbkey_map
, &key
);
1804 assert(mapping
!= NULL
);
1805 path
->nbkey
= mapping
->nbkey
;
1808 void free_req_entry(struct req_entry
*req
)
1810 pcep_free_path(req
->path
);
1811 XFREE(MTYPE_PCEP
, req
);
1814 struct req_entry
*push_new_req(struct pcc_state
*pcc_state
, struct path
*path
)
1816 struct req_entry
*req
;
1818 req
= XCALLOC(MTYPE_PCEP
, sizeof(*req
));
1819 req
->retry_count
= 0;
1820 req
->path
= pcep_copy_path(path
);
1821 repush_req(pcc_state
, req
);
1826 void repush_req(struct pcc_state
*pcc_state
, struct req_entry
*req
)
1828 uint32_t reqid
= pcc_state
->next_reqid
;
1831 req
->was_sent
= false;
1832 req
->path
->req_id
= reqid
;
1833 res
= RB_INSERT(req_entry_head
, &pcc_state
->requests
, req
);
1834 assert(res
== NULL
);
1835 assert(add_reqid_mapping(pcc_state
, req
->path
) == true);
1837 pcc_state
->next_reqid
+= 1;
1838 /* Wrapping is allowed, but 0 is not a valid id */
1839 if (pcc_state
->next_reqid
== 0)
1840 pcc_state
->next_reqid
= 1;
1843 struct req_entry
*pop_req(struct pcc_state
*pcc_state
, uint32_t reqid
)
1845 struct path path
= {.req_id
= reqid
};
1846 struct req_entry key
= {.path
= &path
};
1847 struct req_entry
*req
;
1849 req
= RB_FIND(req_entry_head
, &pcc_state
->requests
, &key
);
1852 RB_REMOVE(req_entry_head
, &pcc_state
->requests
, req
);
1853 remove_reqid_mapping(pcc_state
, req
->path
);
1858 struct req_entry
*pop_req_no_reqid(struct pcc_state
*pcc_state
, uint32_t reqid
)
1860 struct path path
= {.req_id
= reqid
};
1861 struct req_entry key
= {.path
= &path
};
1862 struct req_entry
*req
;
1864 req
= RB_FIND(req_entry_head
, &pcc_state
->requests
, &key
);
1867 RB_REMOVE(req_entry_head
, &pcc_state
->requests
, req
);
1872 bool add_reqid_mapping(struct pcc_state
*pcc_state
, struct path
*path
)
1874 struct req_map_data
*mapping
;
1875 mapping
= XCALLOC(MTYPE_PCEP
, sizeof(*mapping
));
1876 mapping
->nbkey
= path
->nbkey
;
1877 mapping
->reqid
= path
->req_id
;
1878 if (req_map_add(&pcc_state
->req_map
, mapping
) != NULL
) {
1879 XFREE(MTYPE_PCEP
, mapping
);
1885 void remove_reqid_mapping(struct pcc_state
*pcc_state
, struct path
*path
)
1887 struct req_map_data key
, *mapping
;
1888 key
.nbkey
= path
->nbkey
;
1889 mapping
= req_map_find(&pcc_state
->req_map
, &key
);
1890 if (mapping
!= NULL
) {
1891 req_map_del(&pcc_state
->req_map
, mapping
);
1892 XFREE(MTYPE_PCEP
, mapping
);
1896 uint32_t lookup_reqid(struct pcc_state
*pcc_state
, struct path
*path
)
1898 struct req_map_data key
, *mapping
;
1899 key
.nbkey
= path
->nbkey
;
1900 mapping
= req_map_find(&pcc_state
->req_map
, &key
);
1901 if (mapping
!= NULL
)
1902 return mapping
->reqid
;
1906 bool has_pending_req_for(struct pcc_state
*pcc_state
, struct path
*path
)
1908 struct req_entry key
= {.path
= path
};
1909 struct req_entry
*req
;
1912 PCEP_DEBUG_PATH("(%s) %s", format_path(path
), __func__
);
1913 /* Looking for request without result */
1914 if (path
->no_path
|| !path
->first_hop
) {
1915 PCEP_DEBUG_PATH("%s Path : no_path|!first_hop", __func__
);
1916 /* ...and already was handle */
1917 req
= RB_FIND(req_entry_head
, &pcc_state
->requests
, &key
);
1919 /* we must purge remaining reqid */
1920 PCEP_DEBUG_PATH("%s Purge pending reqid: no_path(%s)",
1922 path
->no_path
? "TRUE" : "FALSE");
1923 if (lookup_reqid(pcc_state
, path
) != 0) {
1924 PCEP_DEBUG_PATH("%s Purge pending reqid: DONE ",
1926 remove_reqid_mapping(pcc_state
, path
);
1935 return lookup_reqid(pcc_state
, path
) != 0;
1939 /* ------------ Data Structure Callbacks ------------ */
1941 #define CMP_RETURN(A, B) \
1943 return (A < B) ? -1 : 1
1945 static uint32_t hash_nbkey(const struct lsp_nb_key
*nbkey
)
1948 hash
= jhash_2words(nbkey
->color
, nbkey
->preference
, 0x55aa5a5a);
1949 switch (nbkey
->endpoint
.ipa_type
) {
1951 return jhash(&nbkey
->endpoint
.ipaddr_v4
,
1952 sizeof(nbkey
->endpoint
.ipaddr_v4
), hash
);
1954 return jhash(&nbkey
->endpoint
.ipaddr_v6
,
1955 sizeof(nbkey
->endpoint
.ipaddr_v6
), hash
);
1961 static int cmp_nbkey(const struct lsp_nb_key
*a
, const struct lsp_nb_key
*b
)
1963 CMP_RETURN(a
->color
, b
->color
);
1964 int cmp
= ipaddr_cmp(&a
->endpoint
, &b
->endpoint
);
1967 CMP_RETURN(a
->preference
, b
->preference
);
1971 int plspid_map_cmp(const struct plspid_map_data
*a
,
1972 const struct plspid_map_data
*b
)
1974 return cmp_nbkey(&a
->nbkey
, &b
->nbkey
);
1977 uint32_t plspid_map_hash(const struct plspid_map_data
*e
)
1979 return hash_nbkey(&e
->nbkey
);
1982 int nbkey_map_cmp(const struct nbkey_map_data
*a
,
1983 const struct nbkey_map_data
*b
)
1985 CMP_RETURN(a
->plspid
, b
->plspid
);
1989 uint32_t nbkey_map_hash(const struct nbkey_map_data
*e
)
1994 int req_map_cmp(const struct req_map_data
*a
, const struct req_map_data
*b
)
1996 return cmp_nbkey(&a
->nbkey
, &b
->nbkey
);
1999 uint32_t req_map_hash(const struct req_map_data
*e
)
2001 return hash_nbkey(&e
->nbkey
);