2 * Copyright (C) 2020 NetDEF, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along
15 * with this program; see the file COPYING; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 /* TODOS AND KNOWN ISSUES:
20 - Delete mapping from NB keys to PLSPID when an LSP is deleted either
22 - Revert the hacks to work around ODL requiring a report with
23 operational status DOWN when an LSP is activated.
24 - Enforce only the PCE a policy has been delegated to can update it.
25 - If the router-id is used because the PCC IP is not specified
26 (either IPv4 or IPv6), the connection to the PCE is not reset
27 when the router-id changes.
36 #include "lib/version.h"
37 #include "northbound.h"
38 #include "frr_pthread.h"
41 #include "pathd/pathd.h"
42 #include "pathd/path_zebra.h"
43 #include "pathd/path_errors.h"
44 #include "pathd/path_pcep.h"
45 #include "pathd/path_pcep_controller.h"
46 #include "pathd/path_pcep_lib.h"
47 #include "pathd/path_pcep_config.h"
48 #include "pathd/path_pcep_debug.h"
51 /* The number of time we will skip connecting if we are missing the PCC
52 * address for an inet family different from the selected transport one*/
53 #define OTHER_FAMILY_MAX_RETRIES 4
54 #define MAX_ERROR_MSG_SIZE 256
55 #define MAX_COMPREQ_TRIES 3
57 pthread_mutex_t g_pcc_info_mtx
= PTHREAD_MUTEX_INITIALIZER
;
59 /* PCEP Event Handler */
60 static void handle_pcep_open(struct ctrl_state
*ctrl_state
,
61 struct pcc_state
*pcc_state
,
62 struct pcep_message
*msg
);
63 static void handle_pcep_message(struct ctrl_state
*ctrl_state
,
64 struct pcc_state
*pcc_state
,
65 struct pcep_message
*msg
);
66 static void handle_pcep_lsp_initiate(struct ctrl_state
*ctrl_state
,
67 struct pcc_state
*pcc_state
,
68 struct pcep_message
*msg
);
69 static void handle_pcep_lsp_update(struct ctrl_state
*ctrl_state
,
70 struct pcc_state
*pcc_state
,
71 struct pcep_message
*msg
);
72 static void continue_pcep_lsp_update(struct ctrl_state
*ctrl_state
,
73 struct pcc_state
*pcc_state
,
74 struct path
*path
, void *payload
);
75 static void handle_pcep_comp_reply(struct ctrl_state
*ctrl_state
,
76 struct pcc_state
*pcc_state
,
77 struct pcep_message
*msg
);
79 /* Internal Functions */
80 static const char *ipaddr_type_name(struct ipaddr
*addr
);
81 static bool filter_path(struct pcc_state
*pcc_state
, struct path
*path
);
82 static void select_pcc_addresses(struct pcc_state
*pcc_state
);
83 static void select_transport_address(struct pcc_state
*pcc_state
);
84 static void update_tag(struct pcc_state
*pcc_state
);
85 static void update_originator(struct pcc_state
*pcc_state
);
86 static void schedule_reconnect(struct ctrl_state
*ctrl_state
,
87 struct pcc_state
*pcc_state
);
88 static void schedule_session_timeout(struct ctrl_state
*ctrl_state
,
89 struct pcc_state
*pcc_state
);
90 static void cancel_session_timeout(struct ctrl_state
*ctrl_state
,
91 struct pcc_state
*pcc_state
);
92 static void send_pcep_message(struct pcc_state
*pcc_state
,
93 struct pcep_message
*msg
);
94 static void send_pcep_error(struct pcc_state
*pcc_state
,
95 enum pcep_error_type error_type
,
96 enum pcep_error_value error_value
);
97 static void send_report(struct pcc_state
*pcc_state
, struct path
*path
);
98 static void send_comp_request(struct ctrl_state
*ctrl_state
,
99 struct pcc_state
*pcc_state
,
100 struct req_entry
*req
);
101 static void cancel_comp_requests(struct ctrl_state
*ctrl_state
,
102 struct pcc_state
*pcc_state
);
103 static void cancel_comp_request(struct ctrl_state
*ctrl_state
,
104 struct pcc_state
*pcc_state
,
105 struct req_entry
*req
);
106 static void specialize_outgoing_path(struct pcc_state
*pcc_state
,
108 static void specialize_incoming_path(struct pcc_state
*pcc_state
,
110 static bool validate_incoming_path(struct pcc_state
*pcc_state
,
111 struct path
*path
, char *errbuff
,
113 static void set_pcc_address(struct pcc_state
*pcc_state
,
114 struct lsp_nb_key
*nbkey
, struct ipaddr
*addr
);
115 static int compare_pcc_opts(struct pcc_opts
*lhs
, struct pcc_opts
*rhs
);
116 static int compare_pce_opts(struct pce_opts
*lhs
, struct pce_opts
*rhs
);
117 static int get_previous_best_pce(struct pcc_state
**pcc
);
118 static int get_best_pce(struct pcc_state
**pcc
);
119 static int get_pce_count_connected(struct pcc_state
**pcc
);
120 static bool update_best_pce(struct pcc_state
**pcc
, int best
);
122 /* Data Structure Helper Functions */
123 static void lookup_plspid(struct pcc_state
*pcc_state
, struct path
*path
);
124 static void lookup_nbkey(struct pcc_state
*pcc_state
, struct path
*path
);
125 static void free_req_entry(struct req_entry
*req
);
126 static struct req_entry
*push_new_req(struct pcc_state
*pcc_state
,
128 static void repush_req(struct pcc_state
*pcc_state
, struct req_entry
*req
);
129 static struct req_entry
*pop_req(struct pcc_state
*pcc_state
, uint32_t reqid
);
130 static bool add_reqid_mapping(struct pcc_state
*pcc_state
, struct path
*path
);
131 static void remove_reqid_mapping(struct pcc_state
*pcc_state
,
133 static uint32_t lookup_reqid(struct pcc_state
*pcc_state
, struct path
*path
);
134 static bool has_pending_req_for(struct pcc_state
*pcc_state
, struct path
*path
);
136 /* Data Structure Callbacks */
137 static int plspid_map_cmp(const struct plspid_map_data
*a
,
138 const struct plspid_map_data
*b
);
139 static uint32_t plspid_map_hash(const struct plspid_map_data
*e
);
140 static int nbkey_map_cmp(const struct nbkey_map_data
*a
,
141 const struct nbkey_map_data
*b
);
142 static uint32_t nbkey_map_hash(const struct nbkey_map_data
*e
);
143 static int req_map_cmp(const struct req_map_data
*a
,
144 const struct req_map_data
*b
);
145 static uint32_t req_map_hash(const struct req_map_data
*e
);
147 /* Data Structure Declarations */
148 DECLARE_HASH(plspid_map
, struct plspid_map_data
, mi
, plspid_map_cmp
,
150 DECLARE_HASH(nbkey_map
, struct nbkey_map_data
, mi
, nbkey_map_cmp
,
152 DECLARE_HASH(req_map
, struct req_map_data
, mi
, req_map_cmp
, req_map_hash
);
154 static inline int req_entry_compare(const struct req_entry
*a
,
155 const struct req_entry
*b
)
157 return a
->path
->req_id
- b
->path
->req_id
;
159 RB_GENERATE(req_entry_head
, req_entry
, entry
, req_entry_compare
)
162 /* ------------ API Functions ------------ */
164 struct pcc_state
*pcep_pcc_initialize(struct ctrl_state
*ctrl_state
, int index
)
166 struct pcc_state
*pcc_state
= XCALLOC(MTYPE_PCEP
, sizeof(*pcc_state
));
168 pcc_state
->id
= index
;
169 pcc_state
->status
= PCEP_PCC_DISCONNECTED
;
170 pcc_state
->next_reqid
= 1;
171 pcc_state
->next_plspid
= 1;
173 RB_INIT(req_entry_head
, &pcc_state
->requests
);
175 update_tag(pcc_state
);
176 update_originator(pcc_state
);
178 PCEP_DEBUG("%s PCC initialized", pcc_state
->tag
);
183 void pcep_pcc_finalize(struct ctrl_state
*ctrl_state
,
184 struct pcc_state
*pcc_state
)
186 PCEP_DEBUG("%s PCC finalizing...", pcc_state
->tag
);
188 pcep_pcc_disable(ctrl_state
, pcc_state
);
190 if (pcc_state
->pcc_opts
!= NULL
) {
191 XFREE(MTYPE_PCEP
, pcc_state
->pcc_opts
);
192 pcc_state
->pcc_opts
= NULL
;
194 if (pcc_state
->pce_opts
!= NULL
) {
195 XFREE(MTYPE_PCEP
, pcc_state
->pce_opts
);
196 pcc_state
->pce_opts
= NULL
;
198 if (pcc_state
->originator
!= NULL
) {
199 XFREE(MTYPE_PCEP
, pcc_state
->originator
);
200 pcc_state
->originator
= NULL
;
203 if (pcc_state
->t_reconnect
!= NULL
) {
204 thread_cancel(&pcc_state
->t_reconnect
);
205 pcc_state
->t_reconnect
= NULL
;
208 if (pcc_state
->t_update_best
!= NULL
) {
209 thread_cancel(&pcc_state
->t_update_best
);
210 pcc_state
->t_update_best
= NULL
;
213 if (pcc_state
->t_session_timeout
!= NULL
) {
214 thread_cancel(&pcc_state
->t_session_timeout
);
215 pcc_state
->t_session_timeout
= NULL
;
218 XFREE(MTYPE_PCEP
, pcc_state
);
221 int compare_pcc_opts(struct pcc_opts
*lhs
, struct pcc_opts
*rhs
)
233 retval
= lhs
->port
- rhs
->port
;
238 retval
= lhs
->msd
- rhs
->msd
;
243 if (IS_IPADDR_V4(&lhs
->addr
)) {
244 retval
= memcmp(&lhs
->addr
.ipaddr_v4
, &rhs
->addr
.ipaddr_v4
,
245 sizeof(lhs
->addr
.ipaddr_v4
));
249 } else if (IS_IPADDR_V6(&lhs
->addr
)) {
250 retval
= memcmp(&lhs
->addr
.ipaddr_v6
, &rhs
->addr
.ipaddr_v6
,
251 sizeof(lhs
->addr
.ipaddr_v6
));
260 int compare_pce_opts(struct pce_opts
*lhs
, struct pce_opts
*rhs
)
270 int retval
= lhs
->port
- rhs
->port
;
275 retval
= strcmp(lhs
->pce_name
, rhs
->pce_name
);
280 retval
= lhs
->precedence
- rhs
->precedence
;
285 retval
= memcmp(&lhs
->addr
, &rhs
->addr
, sizeof(lhs
->addr
));
293 int pcep_pcc_update(struct ctrl_state
*ctrl_state
, struct pcc_state
*pcc_state
,
294 struct pcc_opts
*pcc_opts
, struct pce_opts
*pce_opts
)
298 // If the options did not change, then there is nothing to do
299 if ((compare_pce_opts(pce_opts
, pcc_state
->pce_opts
) == 0)
300 && (compare_pcc_opts(pcc_opts
, pcc_state
->pcc_opts
) == 0)) {
304 if ((ret
= pcep_pcc_disable(ctrl_state
, pcc_state
))) {
305 XFREE(MTYPE_PCEP
, pcc_opts
);
306 XFREE(MTYPE_PCEP
, pce_opts
);
310 if (pcc_state
->pcc_opts
!= NULL
) {
311 XFREE(MTYPE_PCEP
, pcc_state
->pcc_opts
);
313 if (pcc_state
->pce_opts
!= NULL
) {
314 XFREE(MTYPE_PCEP
, pcc_state
->pce_opts
);
317 pcc_state
->pcc_opts
= pcc_opts
;
318 pcc_state
->pce_opts
= pce_opts
;
320 if (IS_IPADDR_V4(&pcc_opts
->addr
)) {
321 pcc_state
->pcc_addr_v4
= pcc_opts
->addr
.ipaddr_v4
;
322 SET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
);
324 UNSET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
);
327 if (IS_IPADDR_V6(&pcc_opts
->addr
)) {
328 memcpy(&pcc_state
->pcc_addr_v6
, &pcc_opts
->addr
.ipaddr_v6
,
329 sizeof(struct in6_addr
));
330 SET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
);
332 UNSET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
);
335 update_tag(pcc_state
);
336 update_originator(pcc_state
);
338 return pcep_pcc_enable(ctrl_state
, pcc_state
);
341 void pcep_pcc_reconnect(struct ctrl_state
*ctrl_state
,
342 struct pcc_state
*pcc_state
)
344 if (pcc_state
->status
== PCEP_PCC_DISCONNECTED
)
345 pcep_pcc_enable(ctrl_state
, pcc_state
);
348 int pcep_pcc_enable(struct ctrl_state
*ctrl_state
, struct pcc_state
*pcc_state
)
350 assert(pcc_state
->status
== PCEP_PCC_DISCONNECTED
);
351 assert(pcc_state
->sess
== NULL
);
353 if (pcc_state
->t_reconnect
!= NULL
) {
354 thread_cancel(&pcc_state
->t_reconnect
);
355 pcc_state
->t_reconnect
= NULL
;
358 select_transport_address(pcc_state
);
360 /* Even though we are connecting using IPv6. we want to have an IPv4
361 * address so we can handle candidate path with IPv4 endpoints */
362 if (!CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
)) {
363 if (pcc_state
->retry_count
< OTHER_FAMILY_MAX_RETRIES
) {
364 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
365 "skipping connection to PCE %pIA:%d due to missing PCC IPv4 address",
366 &pcc_state
->pce_opts
->addr
,
367 pcc_state
->pce_opts
->port
);
368 schedule_reconnect(ctrl_state
, pcc_state
);
371 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
372 "missing IPv4 PCC address, IPv4 candidate paths will be ignored");
376 /* Even though we are connecting using IPv4. we want to have an IPv6
377 * address so we can handle candidate path with IPv6 endpoints */
378 if (!CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
)) {
379 if (pcc_state
->retry_count
< OTHER_FAMILY_MAX_RETRIES
) {
380 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
381 "skipping connection to PCE %pIA:%d due to missing PCC IPv6 address",
382 &pcc_state
->pce_opts
->addr
,
383 pcc_state
->pce_opts
->port
);
384 schedule_reconnect(ctrl_state
, pcc_state
);
387 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
388 "missing IPv6 PCC address, IPv6 candidate paths will be ignored");
392 /* Even if the maximum retries to try to have all the familly addresses
393 * have been spent, we still need the one for the transport familly */
394 if (pcc_state
->pcc_addr_tr
.ipa_type
== IPADDR_NONE
) {
395 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS
,
396 "skipping connection to PCE %pIA:%d due to missing PCC address",
397 &pcc_state
->pce_opts
->addr
,
398 pcc_state
->pce_opts
->port
);
399 schedule_reconnect(ctrl_state
, pcc_state
);
403 PCEP_DEBUG("%s PCC connecting", pcc_state
->tag
);
404 pcc_state
->sess
= pcep_lib_connect(
405 &pcc_state
->pcc_addr_tr
, pcc_state
->pcc_opts
->port
,
406 &pcc_state
->pce_opts
->addr
, pcc_state
->pce_opts
->port
,
407 pcc_state
->pcc_opts
->msd
, &pcc_state
->pce_opts
->config_opts
);
409 if (pcc_state
->sess
== NULL
) {
410 flog_warn(EC_PATH_PCEP_LIB_CONNECT
,
411 "failed to connect to PCE %pIA:%d from %pIA:%d",
412 &pcc_state
->pce_opts
->addr
,
413 pcc_state
->pce_opts
->port
,
414 &pcc_state
->pcc_addr_tr
,
415 pcc_state
->pcc_opts
->port
);
416 schedule_reconnect(ctrl_state
, pcc_state
);
420 // In case some best pce alternative were waiting to activate
421 if (pcc_state
->t_update_best
!= NULL
) {
422 thread_cancel(&pcc_state
->t_update_best
);
423 pcc_state
->t_update_best
= NULL
;
426 pcc_state
->status
= PCEP_PCC_CONNECTING
;
431 int pcep_pcc_disable(struct ctrl_state
*ctrl_state
, struct pcc_state
*pcc_state
)
433 switch (pcc_state
->status
) {
434 case PCEP_PCC_DISCONNECTED
:
436 case PCEP_PCC_CONNECTING
:
437 case PCEP_PCC_SYNCHRONIZING
:
438 case PCEP_PCC_OPERATING
:
439 PCEP_DEBUG("%s Disconnecting PCC...", pcc_state
->tag
);
440 cancel_comp_requests(ctrl_state
, pcc_state
);
441 pcep_lib_disconnect(pcc_state
->sess
);
442 /* No need to remove if any PCEs is connected */
443 if (get_pce_count_connected(ctrl_state
->pcc
) == 0) {
444 pcep_thread_remove_candidate_path_segments(ctrl_state
,
447 pcc_state
->sess
= NULL
;
448 pcc_state
->status
= PCEP_PCC_DISCONNECTED
;
455 void pcep_pcc_sync_path(struct ctrl_state
*ctrl_state
,
456 struct pcc_state
*pcc_state
, struct path
*path
)
458 if (pcc_state
->status
== PCEP_PCC_SYNCHRONIZING
) {
459 path
->is_synching
= true;
460 } else if (pcc_state
->status
== PCEP_PCC_OPERATING
)
461 path
->is_synching
= false;
465 path
->go_active
= true;
467 /* Accumulate the dynamic paths without any LSP so computation
468 * requests can be performed after synchronization */
469 if ((path
->type
== SRTE_CANDIDATE_TYPE_DYNAMIC
)
470 && (path
->first_hop
== NULL
)
471 && !has_pending_req_for(pcc_state
, path
)) {
472 PCEP_DEBUG("%s Scheduling computation request for path %s",
473 pcc_state
->tag
, path
->name
);
474 push_new_req(pcc_state
, path
);
478 /* Synchronize the path if the PCE supports LSP updates and the
479 * endpoint address familly is supported */
480 if (pcc_state
->caps
.is_stateful
) {
481 if (filter_path(pcc_state
, path
)) {
482 PCEP_DEBUG("%s Synchronizing path %s", pcc_state
->tag
,
484 send_report(pcc_state
, path
);
487 "%s Skipping %s candidate path %s synchronization",
489 ipaddr_type_name(&path
->nbkey
.endpoint
),
495 void pcep_pcc_sync_done(struct ctrl_state
*ctrl_state
,
496 struct pcc_state
*pcc_state
)
498 struct req_entry
*req
;
500 if (pcc_state
->status
!= PCEP_PCC_SYNCHRONIZING
501 && pcc_state
->status
!= PCEP_PCC_OPERATING
)
504 if (pcc_state
->caps
.is_stateful
505 && pcc_state
->status
== PCEP_PCC_SYNCHRONIZING
) {
506 struct path
*path
= pcep_new_path();
507 *path
= (struct path
){.name
= NULL
,
510 .status
= PCEP_LSP_OPERATIONAL_DOWN
,
513 .was_created
= false,
514 .was_removed
= false,
515 .is_synching
= false,
516 .is_delegated
= false,
518 .first_metric
= NULL
};
519 send_report(pcc_state
, path
);
520 pcep_free_path(path
);
523 pcc_state
->synchronized
= true;
524 pcc_state
->status
= PCEP_PCC_OPERATING
;
526 PCEP_DEBUG("%s Synchronization done", pcc_state
->tag
);
528 /* Start the computation request accumulated during synchronization */
529 RB_FOREACH (req
, req_entry_head
, &pcc_state
->requests
) {
530 send_comp_request(ctrl_state
, pcc_state
, req
);
534 void pcep_pcc_send_report(struct ctrl_state
*ctrl_state
,
535 struct pcc_state
*pcc_state
, struct path
*path
,
538 if ((pcc_state
->status
!= PCEP_PCC_OPERATING
)
539 || (!pcc_state
->caps
.is_stateful
)) {
540 pcep_free_path(path
);
544 PCEP_DEBUG("%s Send report for candidate path %s", pcc_state
->tag
,
547 /* ODL and Cisco requires the first reported
548 * LSP to have a DOWN status, the later status changes
549 * will be comunicated through hook calls.
551 enum pcep_lsp_operational_status real_status
= path
->status
;
552 path
->status
= PCEP_LSP_OPERATIONAL_DOWN
;
553 send_report(pcc_state
, path
);
555 /* If no update is expected and the real status wasn't down, we need to
556 * send a second report with the real status */
557 if (is_stable
&& (real_status
!= PCEP_LSP_OPERATIONAL_DOWN
)) {
559 path
->status
= real_status
;
560 send_report(pcc_state
, path
);
563 pcep_free_path(path
);
567 /* ------------ Timeout handler ------------ */
569 void pcep_pcc_timeout_handler(struct ctrl_state
*ctrl_state
,
570 struct pcc_state
*pcc_state
,
571 enum pcep_ctrl_timeout_type type
, void *param
)
573 struct req_entry
*req
;
576 case TO_COMPUTATION_REQUEST
:
577 assert(param
!= NULL
);
578 req
= (struct req_entry
*)param
;
579 pop_req(pcc_state
, req
->path
->req_id
);
580 flog_warn(EC_PATH_PCEP_COMPUTATION_REQUEST_TIMEOUT
,
581 "Computation request %d timeout", req
->path
->req_id
);
582 cancel_comp_request(ctrl_state
, pcc_state
, req
);
583 if (req
->retry_count
++ < MAX_COMPREQ_TRIES
) {
584 repush_req(pcc_state
, req
);
585 send_comp_request(ctrl_state
, pcc_state
, req
);
588 if (pcc_state
->caps
.is_stateful
) {
591 "%s Delegating undefined dynamic path %s to PCE %s",
592 pcc_state
->tag
, req
->path
->name
,
593 pcc_state
->originator
);
594 path
= pcep_copy_path(req
->path
);
595 path
->is_delegated
= true;
596 send_report(pcc_state
, path
);
606 /* ------------ Pathd event handler ------------ */
608 void pcep_pcc_pathd_event_handler(struct ctrl_state
*ctrl_state
,
609 struct pcc_state
*pcc_state
,
610 enum pcep_pathd_event_type type
,
613 struct req_entry
*req
;
615 if (pcc_state
->status
!= PCEP_PCC_OPERATING
)
618 /* Skipping candidate path with endpoint that do not match the
619 * configured or deduced PCC IP version */
620 if (!filter_path(pcc_state
, path
)) {
621 PCEP_DEBUG("%s Skipping %s candidate path %s event",
623 ipaddr_type_name(&path
->nbkey
.endpoint
), path
->name
);
628 case PCEP_PATH_CREATED
:
629 if (has_pending_req_for(pcc_state
, path
)) {
631 "%s Candidate path %s created, computation request already sent",
632 pcc_state
->tag
, path
->name
);
635 PCEP_DEBUG("%s Candidate path %s created", pcc_state
->tag
,
637 if ((path
->first_hop
== NULL
)
638 && (path
->type
== SRTE_CANDIDATE_TYPE_DYNAMIC
)) {
639 req
= push_new_req(pcc_state
, path
);
640 send_comp_request(ctrl_state
, pcc_state
, req
);
641 } else if (pcc_state
->caps
.is_stateful
)
642 send_report(pcc_state
, path
);
644 case PCEP_PATH_UPDATED
:
645 PCEP_DEBUG("%s Candidate path %s updated", pcc_state
->tag
,
647 if (pcc_state
->caps
.is_stateful
)
648 send_report(pcc_state
, path
);
650 case PCEP_PATH_REMOVED
:
651 PCEP_DEBUG("%s Candidate path %s removed", pcc_state
->tag
,
653 path
->was_removed
= true;
654 if (pcc_state
->caps
.is_stateful
)
655 send_report(pcc_state
, path
);
658 flog_warn(EC_PATH_PCEP_RECOVERABLE_INTERNAL_ERROR
,
659 "Unexpected pathd event received by pcc %s: %u",
660 pcc_state
->tag
, type
);
666 /* ------------ PCEP event handler ------------ */
668 void pcep_pcc_pcep_event_handler(struct ctrl_state
*ctrl_state
,
669 struct pcc_state
*pcc_state
, pcep_event
*event
)
671 PCEP_DEBUG("%s Received PCEP event: %s", pcc_state
->tag
,
672 pcep_event_type_name(event
->event_type
));
673 switch (event
->event_type
) {
674 case PCC_CONNECTED_TO_PCE
:
675 assert(PCEP_PCC_CONNECTING
== pcc_state
->status
);
676 PCEP_DEBUG("%s Connection established", pcc_state
->tag
);
677 pcc_state
->status
= PCEP_PCC_SYNCHRONIZING
;
678 pcc_state
->retry_count
= 0;
679 pcc_state
->synchronized
= false;
680 PCEP_DEBUG("%s Starting PCE synchronization", pcc_state
->tag
);
681 cancel_session_timeout(ctrl_state
, pcc_state
);
682 pcep_pcc_calculate_best_pce(ctrl_state
->pcc
);
683 pcep_thread_start_sync(ctrl_state
, pcc_state
->id
);
685 case PCC_SENT_INVALID_OPEN
:
686 PCEP_DEBUG("%s Sent invalid OPEN message", pcc_state
->tag
);
688 "%s Reconciling values: keep alive (%d) dead timer (%d) seconds ",
690 pcc_state
->sess
->pcc_config
691 .keep_alive_pce_negotiated_timer_seconds
,
692 pcc_state
->sess
->pcc_config
693 .dead_timer_pce_negotiated_seconds
);
694 pcc_state
->pce_opts
->config_opts
.keep_alive_seconds
=
695 pcc_state
->sess
->pcc_config
696 .keep_alive_pce_negotiated_timer_seconds
;
697 pcc_state
->pce_opts
->config_opts
.dead_timer_seconds
=
698 pcc_state
->sess
->pcc_config
699 .dead_timer_pce_negotiated_seconds
;
702 case PCC_RCVD_INVALID_OPEN
:
703 PCEP_DEBUG("%s Received invalid OPEN message", pcc_state
->tag
);
704 PCEP_DEBUG_PCEP("%s PCEP message: %s", pcc_state
->tag
,
705 format_pcep_message(event
->message
));
707 case PCE_DEAD_TIMER_EXPIRED
:
708 case PCE_CLOSED_SOCKET
:
709 case PCE_SENT_PCEP_CLOSE
:
710 case PCE_OPEN_KEEP_WAIT_TIMER_EXPIRED
:
711 case PCC_PCEP_SESSION_CLOSED
:
712 case PCC_RCVD_MAX_INVALID_MSGS
:
713 case PCC_RCVD_MAX_UNKOWN_MSGS
:
714 pcep_pcc_disable(ctrl_state
, pcc_state
);
715 schedule_reconnect(ctrl_state
, pcc_state
);
716 schedule_session_timeout(ctrl_state
, pcc_state
);
718 case MESSAGE_RECEIVED
:
719 PCEP_DEBUG_PCEP("%s Received PCEP message: %s", pcc_state
->tag
,
720 format_pcep_message(event
->message
));
721 if (pcc_state
->status
== PCEP_PCC_CONNECTING
) {
722 if (event
->message
->msg_header
->type
== PCEP_TYPE_OPEN
)
723 handle_pcep_open(ctrl_state
, pcc_state
,
727 assert(pcc_state
->status
== PCEP_PCC_SYNCHRONIZING
728 || pcc_state
->status
== PCEP_PCC_OPERATING
);
729 handle_pcep_message(ctrl_state
, pcc_state
, event
->message
);
732 flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEPLIB_EVENT
,
733 "Unexpected event from pceplib: %s",
734 format_pcep_event(event
));
740 /*------------------ Multi-PCE --------------------- */
742 /* Internal util function, returns true if sync is necessary, false otherwise */
743 bool update_best_pce(struct pcc_state
**pcc
, int best
)
745 PCEP_DEBUG(" recalculating pce precedence ");
747 struct pcc_state
*best_pcc_state
=
748 pcep_pcc_get_pcc_by_id(pcc
, best
);
749 if (best_pcc_state
->previous_best
!= best_pcc_state
->is_best
) {
750 PCEP_DEBUG(" %s Resynch best (%i) previous best (%i)",
751 best_pcc_state
->tag
, best_pcc_state
->id
,
752 best_pcc_state
->previous_best
);
756 " %s No Resynch best (%i) previous best (%i)",
757 best_pcc_state
->tag
, best_pcc_state
->id
,
758 best_pcc_state
->previous_best
);
761 PCEP_DEBUG(" No best pce available, all pce seem disconnected");
767 int get_best_pce(struct pcc_state
**pcc
)
769 for (int i
= 0; i
< MAX_PCC
; i
++) {
770 if (pcc
[i
] && pcc
[i
]->pce_opts
) {
771 if (pcc
[i
]->is_best
== true) {
779 int get_pce_count_connected(struct pcc_state
**pcc
)
782 for (int i
= 0; i
< MAX_PCC
; i
++) {
783 if (pcc
[i
] && pcc
[i
]->pce_opts
784 && pcc
[i
]->status
!= PCEP_PCC_DISCONNECTED
) {
791 int get_previous_best_pce(struct pcc_state
**pcc
)
793 int previous_best_pce
= -1;
795 for (int i
= 0; i
< MAX_PCC
; i
++) {
796 if (pcc
[i
] && pcc
[i
]->pce_opts
&& pcc
[i
]->previous_best
== true
797 && pcc
[i
]->status
!= PCEP_PCC_DISCONNECTED
) {
798 previous_best_pce
= i
;
802 return previous_best_pce
!= -1 ? pcc
[previous_best_pce
]->id
: 0;
805 /* Called by path_pcep_controller EV_REMOVE_PCC
806 * Event handler when a PCC is removed. */
807 int pcep_pcc_multi_pce_remove_pcc(struct ctrl_state
*ctrl_state
,
808 struct pcc_state
**pcc
)
810 int new_best_pcc_id
= -1;
811 new_best_pcc_id
= pcep_pcc_calculate_best_pce(pcc
);
812 if (new_best_pcc_id
) {
813 if (update_best_pce(ctrl_state
->pcc
, new_best_pcc_id
) == true) {
814 pcep_thread_start_sync(ctrl_state
, new_best_pcc_id
);
821 /* Called by path_pcep_controller EV_SYNC_PATH
822 * Event handler when a path is sync'd. */
823 int pcep_pcc_multi_pce_sync_path(struct ctrl_state
*ctrl_state
, int pcc_id
,
824 struct pcc_state
**pcc
)
826 int previous_best_pcc_id
= -1;
828 if (pcc_id
== get_best_pce(pcc
)) {
829 previous_best_pcc_id
= get_previous_best_pce(pcc
);
830 if (previous_best_pcc_id
!= 0) {
831 /* while adding new pce, path has to resync to the
832 * previous best. pcep_thread_start_sync() will be
833 * called by the calling function */
834 if (update_best_pce(ctrl_state
->pcc
,
835 previous_best_pcc_id
)
837 cancel_comp_requests(
839 pcep_pcc_get_pcc_by_id(
840 pcc
, previous_best_pcc_id
));
841 pcep_thread_start_sync(ctrl_state
,
842 previous_best_pcc_id
);
850 /* Called by path_pcep_controller when the TM_CALCULATE_BEST_PCE
852 int pcep_pcc_timer_update_best_pce(struct ctrl_state
*ctrl_state
, int pcc_id
)
855 /* resync whatever was the new best */
856 int prev_best
= get_best_pce(ctrl_state
->pcc
);
857 int best_id
= pcep_pcc_calculate_best_pce(ctrl_state
->pcc
);
858 if (best_id
&& prev_best
!= best_id
) { // Avoid Multiple call
859 struct pcc_state
*pcc_state
=
860 pcep_pcc_get_pcc_by_id(ctrl_state
->pcc
, best_id
);
861 if (update_best_pce(ctrl_state
->pcc
, pcc_state
->id
) == true) {
862 pcep_thread_start_sync(ctrl_state
, pcc_state
->id
);
869 /* Called by path_pcep_controller::pcep_thread_event_update_pce_options()
870 * Returns the best PCE id */
871 int pcep_pcc_calculate_best_pce(struct pcc_state
**pcc
)
873 int best_precedence
= 255; // DEFAULT_PCE_PRECEDENCE;
875 int one_connected_pce
= -1;
876 int previous_best_pce
= -1;
877 int step_0_best
= -1;
878 int step_0_previous
= -1;
882 for (int i
= 0; i
< MAX_PCC
; i
++) {
883 if (pcc
[i
] && pcc
[i
]->pce_opts
) {
885 "multi-pce: calculate all : i (%i) is_best (%i) previous_best (%i) ",
886 i
, pcc
[i
]->is_best
, pcc
[i
]->previous_best
);
889 if (pcc
[i
]->is_best
== true) {
892 if (pcc
[i
]->previous_best
== true) {
903 for (int i
= 0; i
< MAX_PCC
; i
++) {
904 if (pcc
[i
] && pcc
[i
]->pce_opts
905 && pcc
[i
]->status
!= PCEP_PCC_DISCONNECTED
) {
906 one_connected_pce
= i
; // In case none better
907 if (pcc
[i
]->pce_opts
->precedence
<= best_precedence
) {
909 && pcc
[best_pce
]->pce_opts
->precedence
913 &pcc
[i
]->pce_opts
->addr
,
917 // collide of precedences so
921 if (!pcc
[i
]->previous_best
) {
933 "multi-pce: calculate data : sb (%i) sp (%i) oc (%i) b (%i) ",
934 step_0_best
, step_0_previous
, one_connected_pce
, best_pce
);
936 // Changed of state so ...
937 if (step_0_best
!= best_pce
) {
938 pthread_mutex_lock(&g_pcc_info_mtx
);
939 // Calculate previous
940 previous_best_pce
= step_0_best
;
942 if (step_0_best
!= -1) {
943 pcc
[step_0_best
]->is_best
= false;
945 if (step_0_previous
!= -1) {
946 pcc
[step_0_previous
]->previous_best
= false;
950 if (previous_best_pce
!= -1
951 && pcc
[previous_best_pce
]->status
952 == PCEP_PCC_DISCONNECTED
) {
953 pcc
[previous_best_pce
]->previous_best
= true;
954 zlog_debug("multi-pce: previous best pce (%i) ",
955 previous_best_pce
+ 1);
960 if (best_pce
!= -1) {
961 pcc
[best_pce
]->is_best
= true;
962 zlog_debug("multi-pce: best pce (%i) ", best_pce
+ 1);
964 if (one_connected_pce
!= -1) {
965 best_pce
= one_connected_pce
;
966 pcc
[one_connected_pce
]->is_best
= true;
968 "multi-pce: one connected best pce (default) (%i) ",
969 one_connected_pce
+ 1);
971 for (int i
= 0; i
< MAX_PCC
; i
++) {
972 if (pcc
[i
] && pcc
[i
]->pce_opts
) {
974 pcc
[i
]->is_best
= true;
976 "(disconnected) best pce (default) (%i) ",
983 pthread_mutex_unlock(&g_pcc_info_mtx
);
986 return ((best_pce
== -1) ? 0 : pcc
[best_pce
]->id
);
989 int pcep_pcc_get_pcc_id_by_ip_port(struct pcc_state
**pcc
,
990 struct pce_opts
*pce_opts
)
996 for (int idx
= 0; idx
< MAX_PCC
; idx
++) {
998 if ((ipaddr_cmp((const struct ipaddr
*)&pcc
[idx
]
1000 (const struct ipaddr
*)&pce_opts
->addr
)
1002 && pcc
[idx
]->pce_opts
->port
== pce_opts
->port
) {
1003 zlog_debug("found pcc_id (%d) idx (%d)",
1005 return pcc
[idx
]->id
;
1012 int pcep_pcc_get_pcc_id_by_idx(struct pcc_state
**pcc
, int idx
)
1014 if (pcc
== NULL
|| idx
< 0) {
1018 return pcc
[idx
] ? pcc
[idx
]->id
: 0;
1021 struct pcc_state
*pcep_pcc_get_pcc_by_id(struct pcc_state
**pcc
, int id
)
1023 if (pcc
== NULL
|| id
< 0) {
1027 for (int i
= 0; i
< MAX_PCC
; i
++) {
1029 if (pcc
[i
]->id
== id
) {
1030 zlog_debug("found id (%d) pcc_idx (%d)",
1040 struct pcc_state
*pcep_pcc_get_pcc_by_name(struct pcc_state
**pcc
,
1041 const char *pce_name
)
1043 if (pcc
== NULL
|| pce_name
== NULL
) {
1047 for (int i
= 0; i
< MAX_PCC
; i
++) {
1048 if (pcc
[i
] == NULL
) {
1052 if (strcmp(pcc
[i
]->pce_opts
->pce_name
, pce_name
) == 0) {
1060 int pcep_pcc_get_pcc_idx_by_id(struct pcc_state
**pcc
, int id
)
1066 for (int idx
= 0; idx
< MAX_PCC
; idx
++) {
1068 if (pcc
[idx
]->id
== id
) {
1069 zlog_debug("found pcc_id (%d) array_idx (%d)",
1079 int pcep_pcc_get_free_pcc_idx(struct pcc_state
**pcc
)
1081 assert(pcc
!= NULL
);
1083 for (int idx
= 0; idx
< MAX_PCC
; idx
++) {
1084 if (pcc
[idx
] == NULL
) {
1085 zlog_debug("new pcc_idx (%d)", idx
);
1093 int pcep_pcc_get_pcc_id(struct pcc_state
*pcc
)
1095 return ((pcc
== NULL
) ? 0 : pcc
->id
);
1098 void pcep_pcc_copy_pcc_info(struct pcc_state
**pcc
,
1099 struct pcep_pcc_info
*pcc_info
)
1101 struct pcc_state
*pcc_state
=
1102 pcep_pcc_get_pcc_by_name(pcc
, pcc_info
->pce_name
);
1107 pcc_info
->ctrl_state
= NULL
;
1108 if(pcc_state
->pcc_opts
){
1109 pcc_info
->msd
= pcc_state
->pcc_opts
->msd
;
1110 pcc_info
->pcc_port
= pcc_state
->pcc_opts
->port
;
1112 pcc_info
->next_plspid
= pcc_state
->next_plspid
;
1113 pcc_info
->next_reqid
= pcc_state
->next_reqid
;
1114 pcc_info
->status
= pcc_state
->status
;
1115 pcc_info
->pcc_id
= pcc_state
->id
;
1116 pthread_mutex_lock(&g_pcc_info_mtx
);
1117 pcc_info
->is_best_multi_pce
= pcc_state
->is_best
;
1118 pcc_info
->previous_best
= pcc_state
->previous_best
;
1119 pthread_mutex_unlock(&g_pcc_info_mtx
);
1120 pcc_info
->precedence
=
1121 pcc_state
->pce_opts
? pcc_state
->pce_opts
->precedence
: 0;
1122 if(pcc_state
->pcc_addr_tr
.ipa_type
!= IPADDR_NONE
){
1123 memcpy(&pcc_info
->pcc_addr
, &pcc_state
->pcc_addr_tr
,
1124 sizeof(struct ipaddr
));
1129 /*------------------ PCEP Message handlers --------------------- */
1131 void handle_pcep_open(struct ctrl_state
*ctrl_state
,
1132 struct pcc_state
*pcc_state
, struct pcep_message
*msg
)
1134 assert(msg
->msg_header
->type
== PCEP_TYPE_OPEN
);
1135 pcep_lib_parse_capabilities(msg
, &pcc_state
->caps
);
1136 PCEP_DEBUG("PCE capabilities: %s, %s%s",
1137 pcc_state
->caps
.is_stateful
? "stateful" : "stateless",
1138 pcc_state
->caps
.supported_ofs_are_known
1139 ? (pcc_state
->caps
.supported_ofs
== 0
1140 ? "no objective functions supported"
1141 : "supported objective functions are ")
1142 : "supported objective functions are unknown",
1143 format_objfun_set(pcc_state
->caps
.supported_ofs
));
1146 void handle_pcep_message(struct ctrl_state
*ctrl_state
,
1147 struct pcc_state
*pcc_state
, struct pcep_message
*msg
)
1149 if (pcc_state
->status
!= PCEP_PCC_OPERATING
)
1152 switch (msg
->msg_header
->type
) {
1153 case PCEP_TYPE_INITIATE
:
1154 handle_pcep_lsp_initiate(ctrl_state
, pcc_state
, msg
);
1156 case PCEP_TYPE_UPDATE
:
1157 handle_pcep_lsp_update(ctrl_state
, pcc_state
, msg
);
1159 case PCEP_TYPE_PCREP
:
1160 handle_pcep_comp_reply(ctrl_state
, pcc_state
, msg
);
1163 flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEP_MESSAGE
,
1164 "Unexpected pcep message from pceplib: %s",
1165 format_pcep_message(msg
));
1170 void handle_pcep_lsp_update(struct ctrl_state
*ctrl_state
,
1171 struct pcc_state
*pcc_state
,
1172 struct pcep_message
*msg
)
1175 path
= pcep_lib_parse_path(msg
);
1176 lookup_nbkey(pcc_state
, path
);
1177 pcep_thread_refine_path(ctrl_state
, pcc_state
->id
,
1178 &continue_pcep_lsp_update
, path
, NULL
);
1181 void continue_pcep_lsp_update(struct ctrl_state
*ctrl_state
,
1182 struct pcc_state
*pcc_state
, struct path
*path
,
1185 char err
[MAX_ERROR_MSG_SIZE
] = {0};
1187 specialize_incoming_path(pcc_state
, path
);
1188 PCEP_DEBUG("%s Received LSP update", pcc_state
->tag
);
1189 PCEP_DEBUG_PATH("%s", format_path(path
));
1191 if (validate_incoming_path(pcc_state
, path
, err
, sizeof(err
)))
1192 pcep_thread_update_path(ctrl_state
, pcc_state
->id
, path
);
1194 /* FIXME: Monitor the amount of errors from the PCE and
1195 * possibly disconnect and blacklist */
1196 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1197 "Unsupported PCEP protocol feature: %s", err
);
1198 pcep_free_path(path
);
1202 void handle_pcep_lsp_initiate(struct ctrl_state
*ctrl_state
,
1203 struct pcc_state
*pcc_state
,
1204 struct pcep_message
*msg
)
1206 PCEP_DEBUG("%s Received LSP initiate, not supported yet",
1209 /* TODO when we support both PCC and PCE initiated sessions,
1210 * we should first check the session type before
1211 * rejecting this message. */
1212 send_pcep_error(pcc_state
, PCEP_ERRT_INVALID_OPERATION
,
1213 PCEP_ERRV_LSP_NOT_PCE_INITIATED
);
1216 void handle_pcep_comp_reply(struct ctrl_state
*ctrl_state
,
1217 struct pcc_state
*pcc_state
,
1218 struct pcep_message
*msg
)
1220 char err
[MAX_ERROR_MSG_SIZE
] = "";
1221 struct req_entry
*req
;
1224 path
= pcep_lib_parse_path(msg
);
1225 req
= pop_req(pcc_state
, path
->req_id
);
1227 /* TODO: check the rate of bad computation reply and close
1228 * the connection if more that a given rate.
1231 "%s Received computation reply for unknown request %d",
1232 pcc_state
->tag
, path
->req_id
);
1233 PCEP_DEBUG_PATH("%s", format_path(path
));
1234 send_pcep_error(pcc_state
, PCEP_ERRT_UNKNOWN_REQ_REF
,
1235 PCEP_ERRV_UNASSIGNED
);
1239 /* Cancel the computation request timeout */
1240 pcep_thread_cancel_timer(&req
->t_retry
);
1242 /* Transfer relevent metadata from the request to the response */
1243 path
->nbkey
= req
->path
->nbkey
;
1244 path
->plsp_id
= req
->path
->plsp_id
;
1245 path
->type
= req
->path
->type
;
1246 path
->name
= XSTRDUP(MTYPE_PCEP
, req
->path
->name
);
1247 specialize_incoming_path(pcc_state
, path
);
1249 PCEP_DEBUG("%s Received computation reply %d (no-path: %s)",
1250 pcc_state
->tag
, path
->req_id
,
1251 path
->no_path
? "true" : "false");
1252 PCEP_DEBUG_PATH("%s", format_path(path
));
1254 if (path
->no_path
) {
1255 PCEP_DEBUG("%s Computation for path %s did not find any result",
1256 pcc_state
->tag
, path
->name
);
1257 } else if (validate_incoming_path(pcc_state
, path
, err
, sizeof(err
))) {
1258 /* Updating a dynamic path will automatically delegate it */
1259 pcep_thread_update_path(ctrl_state
, pcc_state
->id
, path
);
1260 free_req_entry(req
);
1263 /* FIXME: Monitor the amount of errors from the PCE and
1264 * possibly disconnect and blacklist */
1265 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE
,
1266 "Unsupported PCEP protocol feature: %s", err
);
1269 pcep_free_path(path
);
1271 /* Delegate the path regardless of the outcome */
1272 /* TODO: For now we are using the path from the request, when
1273 * pathd API is thread safe, we could get a new path */
1274 if (pcc_state
->caps
.is_stateful
) {
1275 PCEP_DEBUG("%s Delegating undefined dynamic path %s to PCE %s",
1276 pcc_state
->tag
, path
->name
, pcc_state
->originator
);
1277 path
= pcep_copy_path(req
->path
);
1278 path
->is_delegated
= true;
1279 send_report(pcc_state
, path
);
1280 pcep_free_path(path
);
1283 free_req_entry(req
);
1287 /* ------------ Internal Functions ------------ */
1289 const char *ipaddr_type_name(struct ipaddr
*addr
)
1291 if (IS_IPADDR_V4(addr
))
1293 if (IS_IPADDR_V6(addr
))
1298 bool filter_path(struct pcc_state
*pcc_state
, struct path
*path
)
1300 return (IS_IPADDR_V4(&path
->nbkey
.endpoint
)
1301 && CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
))
1302 || (IS_IPADDR_V6(&path
->nbkey
.endpoint
)
1303 && CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
));
1306 void select_pcc_addresses(struct pcc_state
*pcc_state
)
1308 /* If no IPv4 address was specified, try to get one from zebra */
1309 if (!CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
)) {
1310 if (get_ipv4_router_id(&pcc_state
->pcc_addr_v4
)) {
1311 SET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
);
1315 /* If no IPv6 address was specified, try to get one from zebra */
1316 if (!CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
)) {
1317 if (get_ipv6_router_id(&pcc_state
->pcc_addr_v6
)) {
1318 SET_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
);
1323 void select_transport_address(struct pcc_state
*pcc_state
)
1325 struct ipaddr
*taddr
= &pcc_state
->pcc_addr_tr
;
1327 select_pcc_addresses(pcc_state
);
1329 taddr
->ipa_type
= IPADDR_NONE
;
1331 /* Select a transport source address in function of the configured PCE
1333 if (IS_IPADDR_V4(&pcc_state
->pce_opts
->addr
)) {
1334 if (CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
)) {
1335 taddr
->ipaddr_v4
= pcc_state
->pcc_addr_v4
;
1336 taddr
->ipa_type
= IPADDR_V4
;
1339 if (CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
)) {
1340 taddr
->ipaddr_v6
= pcc_state
->pcc_addr_v6
;
1341 taddr
->ipa_type
= IPADDR_V6
;
1346 void update_tag(struct pcc_state
*pcc_state
)
1348 if (pcc_state
->pce_opts
!= NULL
) {
1349 assert(!IS_IPADDR_NONE(&pcc_state
->pce_opts
->addr
));
1350 if (IS_IPADDR_V6(&pcc_state
->pce_opts
->addr
)) {
1351 snprintfrr(pcc_state
->tag
, sizeof(pcc_state
->tag
),
1353 &pcc_state
->pce_opts
->addr
.ipaddr_v6
,
1354 pcc_state
->pce_opts
->port
, pcc_state
->id
);
1356 snprintfrr(pcc_state
->tag
, sizeof(pcc_state
->tag
),
1358 &pcc_state
->pce_opts
->addr
.ipaddr_v4
,
1359 pcc_state
->pce_opts
->port
, pcc_state
->id
);
1362 snprintfrr(pcc_state
->tag
, sizeof(pcc_state
->tag
), "(%u)",
1367 void update_originator(struct pcc_state
*pcc_state
)
1370 if (pcc_state
->originator
!= NULL
) {
1371 XFREE(MTYPE_PCEP
, pcc_state
->originator
);
1372 pcc_state
->originator
= NULL
;
1374 if (pcc_state
->pce_opts
== NULL
)
1376 originator
= XCALLOC(MTYPE_PCEP
, 52);
1377 assert(!IS_IPADDR_NONE(&pcc_state
->pce_opts
->addr
));
1378 if (IS_IPADDR_V6(&pcc_state
->pce_opts
->addr
)) {
1379 snprintfrr(originator
, 52, "%pI6:%i",
1380 &pcc_state
->pce_opts
->addr
.ipaddr_v6
,
1381 pcc_state
->pce_opts
->port
);
1383 snprintfrr(originator
, 52, "%pI4:%i",
1384 &pcc_state
->pce_opts
->addr
.ipaddr_v4
,
1385 pcc_state
->pce_opts
->port
);
1387 pcc_state
->originator
= originator
;
1390 void schedule_reconnect(struct ctrl_state
*ctrl_state
,
1391 struct pcc_state
*pcc_state
)
1393 pcc_state
->retry_count
++;
1394 pcep_thread_schedule_reconnect(ctrl_state
, pcc_state
->id
,
1395 pcc_state
->retry_count
,
1396 &pcc_state
->t_reconnect
);
1397 if (pcc_state
->retry_count
== 1) {
1398 pcep_thread_schedule_sync_best_pce(
1399 ctrl_state
, pcc_state
->id
,
1400 pcc_state
->pce_opts
->config_opts
1401 .delegation_timeout_seconds
,
1402 &pcc_state
->t_update_best
);
1406 void schedule_session_timeout(struct ctrl_state
*ctrl_state
,
1407 struct pcc_state
*pcc_state
)
1409 /* No need to schedule timeout if multiple PCEs are connected */
1410 if (get_pce_count_connected(ctrl_state
->pcc
)) {
1412 "schedule_session_timeout not setting timer for multi-pce mode");
1417 pcep_thread_schedule_session_timeout(
1418 ctrl_state
, pcep_pcc_get_pcc_id(pcc_state
),
1419 pcc_state
->pce_opts
->config_opts
1420 .session_timeout_inteval_seconds
,
1421 &pcc_state
->t_session_timeout
);
1424 void cancel_session_timeout(struct ctrl_state
*ctrl_state
,
1425 struct pcc_state
*pcc_state
)
1427 /* No need to schedule timeout if multiple PCEs are connected */
1428 if (pcc_state
->t_session_timeout
== NULL
) {
1429 PCEP_DEBUG_PCEP("cancel_session_timeout timer thread NULL");
1433 PCEP_DEBUG_PCEP("Cancel session_timeout timer");
1434 pcep_thread_cancel_timer(&pcc_state
->t_session_timeout
);
1435 pcc_state
->t_session_timeout
= NULL
;
1438 void send_pcep_message(struct pcc_state
*pcc_state
, struct pcep_message
*msg
)
1440 if (pcc_state
->sess
!= NULL
) {
1441 PCEP_DEBUG_PCEP("%s Sending PCEP message: %s", pcc_state
->tag
,
1442 format_pcep_message(msg
));
1443 send_message(pcc_state
->sess
, msg
, true);
1447 void send_pcep_error(struct pcc_state
*pcc_state
,
1448 enum pcep_error_type error_type
,
1449 enum pcep_error_value error_value
)
1451 struct pcep_message
*msg
;
1452 PCEP_DEBUG("%s Sending PCEP error type %s (%d) value %s (%d)",
1453 pcc_state
->tag
, pcep_error_type_name(error_type
), error_type
,
1454 pcep_error_value_name(error_type
, error_value
), error_value
);
1455 msg
= pcep_lib_format_error(error_type
, error_value
);
1456 send_pcep_message(pcc_state
, msg
);
1459 void send_report(struct pcc_state
*pcc_state
, struct path
*path
)
1461 struct pcep_message
*report
;
1464 specialize_outgoing_path(pcc_state
, path
);
1465 PCEP_DEBUG_PATH("%s Sending path %s: %s", pcc_state
->tag
, path
->name
,
1467 report
= pcep_lib_format_report(&pcc_state
->caps
, path
);
1468 send_pcep_message(pcc_state
, report
);
1471 /* Updates the path for the PCE, updating the delegation and creation flags */
1472 void specialize_outgoing_path(struct pcc_state
*pcc_state
, struct path
*path
)
1474 bool is_delegated
= false;
1475 bool was_created
= false;
1477 lookup_plspid(pcc_state
, path
);
1479 set_pcc_address(pcc_state
, &path
->nbkey
, &path
->pcc_addr
);
1480 path
->sender
= pcc_state
->pcc_addr_tr
;
1482 /* TODO: When the pathd API have a way to mark a path as
1483 * delegated, use it instead of considering all dynamic path
1484 * delegated. We need to disable the originator check for now,
1485 * because path could be delegated without having any originator yet */
1486 // if ((path->originator == NULL)
1487 // || (strcmp(path->originator, pcc_state->originator) == 0)) {
1488 // is_delegated = (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)
1489 // && (path->first_hop != NULL);
1490 // /* it seems the PCE consider updating an LSP a creation ?!?
1491 // at least Cisco does... */
1492 // was_created = path->update_origin == SRTE_ORIGIN_PCEP;
1494 is_delegated
= (path
->type
== SRTE_CANDIDATE_TYPE_DYNAMIC
);
1495 was_created
= path
->update_origin
== SRTE_ORIGIN_PCEP
;
1497 path
->pcc_id
= pcc_state
->id
;
1498 path
->go_active
= is_delegated
&& pcc_state
->is_best
;
1499 path
->is_delegated
= is_delegated
&& pcc_state
->is_best
;
1500 path
->was_created
= was_created
;
1503 /* Updates the path for the PCC */
1504 void specialize_incoming_path(struct pcc_state
*pcc_state
, struct path
*path
)
1506 set_pcc_address(pcc_state
, &path
->nbkey
, &path
->pcc_addr
);
1507 path
->sender
= pcc_state
->pce_opts
->addr
;
1508 path
->pcc_id
= pcc_state
->id
;
1509 path
->update_origin
= SRTE_ORIGIN_PCEP
;
1510 path
->originator
= XSTRDUP(MTYPE_PCEP
, pcc_state
->originator
);
1513 /* Ensure the path can be handled by the PCC and if not, sends an error */
1514 bool validate_incoming_path(struct pcc_state
*pcc_state
, struct path
*path
,
1515 char *errbuff
, size_t buffsize
)
1517 struct path_hop
*hop
;
1518 enum pcep_error_type err_type
= 0;
1519 enum pcep_error_value err_value
= PCEP_ERRV_UNASSIGNED
;
1521 for (hop
= path
->first_hop
; hop
!= NULL
; hop
= hop
->next
) {
1522 /* Hops without SID are not supported */
1523 if (!hop
->has_sid
) {
1524 snprintfrr(errbuff
, buffsize
, "SR segment without SID");
1525 err_type
= PCEP_ERRT_RECEPTION_OF_INV_OBJECT
;
1526 err_value
= PCEP_ERRV_DISJOINTED_CONF_TLV_MISSING
;
1529 /* Hops with non-MPLS SID are not supported */
1530 if (!hop
->is_mpls
) {
1531 snprintfrr(errbuff
, buffsize
,
1532 "SR segment with non-MPLS SID");
1533 err_type
= PCEP_ERRT_RECEPTION_OF_INV_OBJECT
;
1534 err_value
= PCEP_ERRV_UNSUPPORTED_NAI
;
1539 if (err_type
!= 0) {
1540 send_pcep_error(pcc_state
, err_type
, err_value
);
1547 void send_comp_request(struct ctrl_state
*ctrl_state
,
1548 struct pcc_state
*pcc_state
, struct req_entry
*req
)
1550 assert(req
!= NULL
);
1555 assert(req
->path
!= NULL
);
1556 assert(req
->path
->req_id
> 0);
1557 assert(RB_FIND(req_entry_head
, &pcc_state
->requests
, req
) == req
);
1558 assert(lookup_reqid(pcc_state
, req
->path
) == req
->path
->req_id
);
1561 struct pcep_message
*msg
;
1563 if (!pcc_state
->is_best
) {
1566 /* TODO: Add a timer to retry the computation request ? */
1568 specialize_outgoing_path(pcc_state
, req
->path
);
1571 "%s Sending computation request %d for path %s to %pIA (retry %d)",
1572 pcc_state
->tag
, req
->path
->req_id
, req
->path
->name
,
1573 &req
->path
->nbkey
.endpoint
, req
->retry_count
);
1574 PCEP_DEBUG_PATH("%s Computation request path %s: %s", pcc_state
->tag
,
1575 req
->path
->name
, format_path(req
->path
));
1577 msg
= pcep_lib_format_request(&pcc_state
->caps
, req
->path
);
1578 send_pcep_message(pcc_state
, msg
);
1579 req
->was_sent
= true;
1581 /* TODO: Enable this back when the pcep config changes are merged back
1583 // timeout = pcc_state->pce_opts->config_opts.pcep_request_time_seconds;
1585 pcep_thread_schedule_timeout(ctrl_state
, pcc_state
->id
,
1586 TO_COMPUTATION_REQUEST
, timeout
,
1587 (void *)req
, &req
->t_retry
);
1590 void cancel_comp_requests(struct ctrl_state
*ctrl_state
,
1591 struct pcc_state
*pcc_state
)
1593 struct req_entry
*req
, *safe_req
;
1595 RB_FOREACH_SAFE (req
, req_entry_head
, &pcc_state
->requests
, safe_req
) {
1596 cancel_comp_request(ctrl_state
, pcc_state
, req
);
1597 RB_REMOVE(req_entry_head
, &pcc_state
->requests
, req
);
1598 remove_reqid_mapping(pcc_state
, req
->path
);
1599 free_req_entry(req
);
1603 void cancel_comp_request(struct ctrl_state
*ctrl_state
,
1604 struct pcc_state
*pcc_state
, struct req_entry
*req
)
1606 struct pcep_message
*msg
;
1608 if (req
->was_sent
) {
1609 /* TODO: Send a computation request cancelation
1610 * notification to the PCE */
1611 pcep_thread_cancel_timer(&req
->t_retry
);
1615 "%s Canceling computation request %d for path %s to %pIA (retry %d)",
1616 pcc_state
->tag
, req
->path
->req_id
, req
->path
->name
,
1617 &req
->path
->nbkey
.endpoint
, req
->retry_count
);
1618 PCEP_DEBUG_PATH("%s Canceled computation request path %s: %s",
1619 pcc_state
->tag
, req
->path
->name
,
1620 format_path(req
->path
));
1622 msg
= pcep_lib_format_request_cancelled(req
->path
->req_id
);
1623 send_pcep_message(pcc_state
, msg
);
1626 void set_pcc_address(struct pcc_state
*pcc_state
, struct lsp_nb_key
*nbkey
,
1627 struct ipaddr
*addr
)
1629 select_pcc_addresses(pcc_state
);
1630 if (IS_IPADDR_V6(&nbkey
->endpoint
)) {
1631 assert(CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV6
));
1632 addr
->ipa_type
= IPADDR_V6
;
1633 addr
->ipaddr_v6
= pcc_state
->pcc_addr_v6
;
1634 } else if (IS_IPADDR_V4(&nbkey
->endpoint
)) {
1635 assert(CHECK_FLAG(pcc_state
->flags
, F_PCC_STATE_HAS_IPV4
));
1636 addr
->ipa_type
= IPADDR_V4
;
1637 addr
->ipaddr_v4
= pcc_state
->pcc_addr_v4
;
1639 addr
->ipa_type
= IPADDR_NONE
;
1644 /* ------------ Data Structure Helper Functions ------------ */
1646 void lookup_plspid(struct pcc_state
*pcc_state
, struct path
*path
)
1648 struct plspid_map_data key
, *plspid_mapping
;
1649 struct nbkey_map_data
*nbkey_mapping
;
1651 if (path
->nbkey
.color
!= 0) {
1652 key
.nbkey
= path
->nbkey
;
1653 plspid_mapping
= plspid_map_find(&pcc_state
->plspid_map
, &key
);
1654 if (plspid_mapping
== NULL
) {
1656 XCALLOC(MTYPE_PCEP
, sizeof(*plspid_mapping
));
1657 plspid_mapping
->nbkey
= key
.nbkey
;
1658 plspid_mapping
->plspid
= pcc_state
->next_plspid
;
1659 plspid_map_add(&pcc_state
->plspid_map
, plspid_mapping
);
1661 XCALLOC(MTYPE_PCEP
, sizeof(*nbkey_mapping
));
1662 nbkey_mapping
->nbkey
= key
.nbkey
;
1663 nbkey_mapping
->plspid
= pcc_state
->next_plspid
;
1664 nbkey_map_add(&pcc_state
->nbkey_map
, nbkey_mapping
);
1665 pcc_state
->next_plspid
++;
1666 // FIXME: Send some error to the PCE isntead of crashing
1667 assert(pcc_state
->next_plspid
<= 1048576);
1669 path
->plsp_id
= plspid_mapping
->plspid
;
1673 void lookup_nbkey(struct pcc_state
*pcc_state
, struct path
*path
)
1675 struct nbkey_map_data key
, *mapping
;
1676 // TODO: Should give an error to the PCE instead of crashing
1677 assert(path
->plsp_id
!= 0);
1678 key
.plspid
= path
->plsp_id
;
1679 mapping
= nbkey_map_find(&pcc_state
->nbkey_map
, &key
);
1680 assert(mapping
!= NULL
);
1681 path
->nbkey
= mapping
->nbkey
;
1684 void free_req_entry(struct req_entry
*req
)
1686 pcep_free_path(req
->path
);
1687 XFREE(MTYPE_PCEP
, req
);
1690 struct req_entry
*push_new_req(struct pcc_state
*pcc_state
, struct path
*path
)
1692 struct req_entry
*req
;
1694 req
= XCALLOC(MTYPE_PCEP
, sizeof(*req
));
1695 req
->retry_count
= 0;
1696 req
->path
= pcep_copy_path(path
);
1697 repush_req(pcc_state
, req
);
1702 void repush_req(struct pcc_state
*pcc_state
, struct req_entry
*req
)
1704 uint32_t reqid
= pcc_state
->next_reqid
;
1707 req
->was_sent
= false;
1708 req
->path
->req_id
= reqid
;
1709 res
= RB_INSERT(req_entry_head
, &pcc_state
->requests
, req
);
1710 assert(res
== NULL
);
1711 assert(add_reqid_mapping(pcc_state
, req
->path
) == true);
1713 pcc_state
->next_reqid
+= 1;
1714 /* Wrapping is allowed, but 0 is not a valid id */
1715 if (pcc_state
->next_reqid
== 0)
1716 pcc_state
->next_reqid
= 1;
1719 struct req_entry
*pop_req(struct pcc_state
*pcc_state
, uint32_t reqid
)
1721 struct path path
= {.req_id
= reqid
};
1722 struct req_entry key
= {.path
= &path
};
1723 struct req_entry
*req
;
1725 req
= RB_FIND(req_entry_head
, &pcc_state
->requests
, &key
);
1728 RB_REMOVE(req_entry_head
, &pcc_state
->requests
, req
);
1729 remove_reqid_mapping(pcc_state
, req
->path
);
1734 bool add_reqid_mapping(struct pcc_state
*pcc_state
, struct path
*path
)
1736 struct req_map_data
*mapping
;
1737 mapping
= XCALLOC(MTYPE_PCEP
, sizeof(*mapping
));
1738 mapping
->nbkey
= path
->nbkey
;
1739 mapping
->reqid
= path
->req_id
;
1740 if (req_map_add(&pcc_state
->req_map
, mapping
) != NULL
) {
1741 XFREE(MTYPE_PCEP
, mapping
);
1747 void remove_reqid_mapping(struct pcc_state
*pcc_state
, struct path
*path
)
1749 struct req_map_data key
, *mapping
;
1750 key
.nbkey
= path
->nbkey
;
1751 mapping
= req_map_find(&pcc_state
->req_map
, &key
);
1752 if (mapping
!= NULL
) {
1753 req_map_del(&pcc_state
->req_map
, mapping
);
1754 XFREE(MTYPE_PCEP
, mapping
);
1758 uint32_t lookup_reqid(struct pcc_state
*pcc_state
, struct path
*path
)
1760 struct req_map_data key
, *mapping
;
1761 key
.nbkey
= path
->nbkey
;
1762 mapping
= req_map_find(&pcc_state
->req_map
, &key
);
1763 if (mapping
!= NULL
)
1764 return mapping
->reqid
;
1768 bool has_pending_req_for(struct pcc_state
*pcc_state
, struct path
*path
)
1770 return lookup_reqid(pcc_state
, path
) != 0;
1774 /* ------------ Data Structure Callbacks ------------ */
1776 #define CMP_RETURN(A, B) \
1778 return (A < B) ? -1 : 1
1780 static uint32_t hash_nbkey(const struct lsp_nb_key
*nbkey
)
1783 hash
= jhash_2words(nbkey
->color
, nbkey
->preference
, 0x55aa5a5a);
1784 switch (nbkey
->endpoint
.ipa_type
) {
1786 return jhash(&nbkey
->endpoint
.ipaddr_v4
,
1787 sizeof(nbkey
->endpoint
.ipaddr_v4
), hash
);
1789 return jhash(&nbkey
->endpoint
.ipaddr_v6
,
1790 sizeof(nbkey
->endpoint
.ipaddr_v6
), hash
);
1796 static int cmp_nbkey(const struct lsp_nb_key
*a
, const struct lsp_nb_key
*b
)
1798 CMP_RETURN(a
->color
, b
->color
);
1799 int cmp
= ipaddr_cmp(&a
->endpoint
, &b
->endpoint
);
1802 CMP_RETURN(a
->preference
, b
->preference
);
1806 int plspid_map_cmp(const struct plspid_map_data
*a
,
1807 const struct plspid_map_data
*b
)
1809 return cmp_nbkey(&a
->nbkey
, &b
->nbkey
);
1812 uint32_t plspid_map_hash(const struct plspid_map_data
*e
)
1814 return hash_nbkey(&e
->nbkey
);
1817 int nbkey_map_cmp(const struct nbkey_map_data
*a
,
1818 const struct nbkey_map_data
*b
)
1820 CMP_RETURN(a
->plspid
, b
->plspid
);
1824 uint32_t nbkey_map_hash(const struct nbkey_map_data
*e
)
1829 int req_map_cmp(const struct req_map_data
*a
, const struct req_map_data
*b
)
1831 return cmp_nbkey(&a
->nbkey
, &b
->nbkey
);
1834 uint32_t req_map_hash(const struct req_map_data
*e
)
1836 return hash_nbkey(&e
->nbkey
);