]> git.proxmox.com Git - mirror_frr.git/blob - pathd/path_pcep_pcc.c
Merge pull request #12837 from donaldsharp/unlikely_routemap
[mirror_frr.git] / pathd / path_pcep_pcc.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2020 NetDEF, Inc.
4 */
5
6 /* TODOS AND KNOWN ISSUES:
7 - Delete mapping from NB keys to PLSPID when an LSP is deleted either
8 by the PCE or by NB.
9 - Revert the hacks to work around ODL requiring a report with
10 operational status DOWN when an LSP is activated.
11 - Enforce only the PCE a policy has been delegated to can update it.
12 - If the router-id is used because the PCC IP is not specified
13 (either IPv4 or IPv6), the connection to the PCE is not reset
14 when the router-id changes.
15 */
16
17 #include <zebra.h>
18
19 #include "log.h"
20 #include "command.h"
21 #include "libfrr.h"
22 #include "printfrr.h"
23 #include "northbound.h"
24 #include "frr_pthread.h"
25 #include "jhash.h"
26
27 #include "pathd/pathd.h"
28 #include "pathd/path_zebra.h"
29 #include "pathd/path_errors.h"
30 #include "pathd/path_pcep.h"
31 #include "pathd/path_pcep_controller.h"
32 #include "pathd/path_pcep_lib.h"
33 #include "pathd/path_pcep_config.h"
34 #include "pathd/path_pcep_debug.h"
35
36
37 /* The number of time we will skip connecting if we are missing the PCC
38 * address for an inet family different from the selected transport one*/
39 #define OTHER_FAMILY_MAX_RETRIES 4
40 #define MAX_ERROR_MSG_SIZE 256
41 #define MAX_COMPREQ_TRIES 3
42
43 pthread_mutex_t g_pcc_info_mtx = PTHREAD_MUTEX_INITIALIZER;
44
45 /* PCEP Event Handler */
46 static void handle_pcep_open(struct ctrl_state *ctrl_state,
47 struct pcc_state *pcc_state,
48 struct pcep_message *msg);
49 static void handle_pcep_message(struct ctrl_state *ctrl_state,
50 struct pcc_state *pcc_state,
51 struct pcep_message *msg);
52 static void handle_pcep_lsp_initiate(struct ctrl_state *ctrl_state,
53 struct pcc_state *pcc_state,
54 struct pcep_message *msg);
55 static void handle_pcep_lsp_update(struct ctrl_state *ctrl_state,
56 struct pcc_state *pcc_state,
57 struct pcep_message *msg);
58 static void continue_pcep_lsp_update(struct ctrl_state *ctrl_state,
59 struct pcc_state *pcc_state,
60 struct path *path, void *payload);
61 static void handle_pcep_comp_reply(struct ctrl_state *ctrl_state,
62 struct pcc_state *pcc_state,
63 struct pcep_message *msg);
64
65 /* Internal Functions */
66 static const char *ipaddr_type_name(struct ipaddr *addr);
67 static bool filter_path(struct pcc_state *pcc_state, struct path *path);
68 static void select_pcc_addresses(struct pcc_state *pcc_state);
69 static void select_transport_address(struct pcc_state *pcc_state);
70 static void update_tag(struct pcc_state *pcc_state);
71 static void update_originator(struct pcc_state *pcc_state);
72 static void schedule_reconnect(struct ctrl_state *ctrl_state,
73 struct pcc_state *pcc_state);
74 static void schedule_session_timeout(struct ctrl_state *ctrl_state,
75 struct pcc_state *pcc_state);
76 static void cancel_session_timeout(struct ctrl_state *ctrl_state,
77 struct pcc_state *pcc_state);
78 static void send_pcep_message(struct pcc_state *pcc_state,
79 struct pcep_message *msg);
80 static void send_pcep_error(struct pcc_state *pcc_state,
81 enum pcep_error_type error_type,
82 enum pcep_error_value error_value,
83 struct path *trigger_path);
84 static void send_report(struct pcc_state *pcc_state, struct path *path);
85 static void send_comp_request(struct ctrl_state *ctrl_state,
86 struct pcc_state *pcc_state,
87 struct req_entry *req);
88 static void cancel_comp_requests(struct ctrl_state *ctrl_state,
89 struct pcc_state *pcc_state);
90 static void cancel_comp_request(struct ctrl_state *ctrl_state,
91 struct pcc_state *pcc_state,
92 struct req_entry *req);
93 static void specialize_outgoing_path(struct pcc_state *pcc_state,
94 struct path *path);
95 static void specialize_incoming_path(struct pcc_state *pcc_state,
96 struct path *path);
97 static bool validate_incoming_path(struct pcc_state *pcc_state,
98 struct path *path, char *errbuff,
99 size_t buffsize);
100 static void set_pcc_address(struct pcc_state *pcc_state,
101 struct lsp_nb_key *nbkey, struct ipaddr *addr);
102 static int compare_pcc_opts(struct pcc_opts *lhs, struct pcc_opts *rhs);
103 static int compare_pce_opts(struct pce_opts *lhs, struct pce_opts *rhs);
104 static int get_previous_best_pce(struct pcc_state **pcc);
105 static int get_best_pce(struct pcc_state **pcc);
106 static int get_pce_count_connected(struct pcc_state **pcc);
107 static bool update_best_pce(struct pcc_state **pcc, int best);
108
109 /* Data Structure Helper Functions */
110 static void lookup_plspid(struct pcc_state *pcc_state, struct path *path);
111 static void lookup_nbkey(struct pcc_state *pcc_state, struct path *path);
112 static void free_req_entry(struct req_entry *req);
113 static struct req_entry *push_new_req(struct pcc_state *pcc_state,
114 struct path *path);
115 static void repush_req(struct pcc_state *pcc_state, struct req_entry *req);
116 static struct req_entry *pop_req(struct pcc_state *pcc_state, uint32_t reqid);
117 static struct req_entry *pop_req_no_reqid(struct pcc_state *pcc_state,
118 uint32_t reqid);
119 static bool add_reqid_mapping(struct pcc_state *pcc_state, struct path *path);
120 static void remove_reqid_mapping(struct pcc_state *pcc_state,
121 struct path *path);
122 static uint32_t lookup_reqid(struct pcc_state *pcc_state, struct path *path);
123 static bool has_pending_req_for(struct pcc_state *pcc_state, struct path *path);
124
125 /* Data Structure Callbacks */
126 static int plspid_map_cmp(const struct plspid_map_data *a,
127 const struct plspid_map_data *b);
128 static uint32_t plspid_map_hash(const struct plspid_map_data *e);
129 static int nbkey_map_cmp(const struct nbkey_map_data *a,
130 const struct nbkey_map_data *b);
131 static uint32_t nbkey_map_hash(const struct nbkey_map_data *e);
132 static int req_map_cmp(const struct req_map_data *a,
133 const struct req_map_data *b);
134 static uint32_t req_map_hash(const struct req_map_data *e);
135
136 /* Data Structure Declarations */
137 DECLARE_HASH(plspid_map, struct plspid_map_data, mi, plspid_map_cmp,
138 plspid_map_hash);
139 DECLARE_HASH(nbkey_map, struct nbkey_map_data, mi, nbkey_map_cmp,
140 nbkey_map_hash);
141 DECLARE_HASH(req_map, struct req_map_data, mi, req_map_cmp, req_map_hash);
142
143 static inline int req_entry_compare(const struct req_entry *a,
144 const struct req_entry *b)
145 {
146 return a->path->req_id - b->path->req_id;
147 }
148 RB_GENERATE(req_entry_head, req_entry, entry, req_entry_compare)
149
150
151 /* ------------ API Functions ------------ */
152
153 struct pcc_state *pcep_pcc_initialize(struct ctrl_state *ctrl_state, int index)
154 {
155 struct pcc_state *pcc_state = XCALLOC(MTYPE_PCEP, sizeof(*pcc_state));
156
157 pcc_state->id = index;
158 pcc_state->status = PCEP_PCC_DISCONNECTED;
159 pcc_state->next_reqid = 1;
160 pcc_state->next_plspid = 1;
161
162 RB_INIT(req_entry_head, &pcc_state->requests);
163
164 update_tag(pcc_state);
165 update_originator(pcc_state);
166
167 PCEP_DEBUG("%s PCC initialized", pcc_state->tag);
168
169 return pcc_state;
170 }
171
172 void pcep_pcc_finalize(struct ctrl_state *ctrl_state,
173 struct pcc_state *pcc_state)
174 {
175 PCEP_DEBUG("%s PCC finalizing...", pcc_state->tag);
176
177 pcep_pcc_disable(ctrl_state, pcc_state);
178
179 if (pcc_state->pcc_opts != NULL) {
180 XFREE(MTYPE_PCEP, pcc_state->pcc_opts);
181 pcc_state->pcc_opts = NULL;
182 }
183 if (pcc_state->pce_opts != NULL) {
184 XFREE(MTYPE_PCEP, pcc_state->pce_opts);
185 pcc_state->pce_opts = NULL;
186 }
187 if (pcc_state->originator != NULL) {
188 XFREE(MTYPE_PCEP, pcc_state->originator);
189 pcc_state->originator = NULL;
190 }
191
192 if (pcc_state->t_reconnect != NULL) {
193 event_cancel(&pcc_state->t_reconnect);
194 pcc_state->t_reconnect = NULL;
195 }
196
197 if (pcc_state->t_update_best != NULL) {
198 event_cancel(&pcc_state->t_update_best);
199 pcc_state->t_update_best = NULL;
200 }
201
202 if (pcc_state->t_session_timeout != NULL) {
203 event_cancel(&pcc_state->t_session_timeout);
204 pcc_state->t_session_timeout = NULL;
205 }
206
207 XFREE(MTYPE_PCEP, pcc_state);
208 }
209
210 int compare_pcc_opts(struct pcc_opts *lhs, struct pcc_opts *rhs)
211 {
212 int retval;
213
214 if (lhs == NULL) {
215 return 1;
216 }
217
218 if (rhs == NULL) {
219 return -1;
220 }
221
222 retval = lhs->port - rhs->port;
223 if (retval != 0) {
224 return retval;
225 }
226
227 retval = lhs->msd - rhs->msd;
228 if (retval != 0) {
229 return retval;
230 }
231
232 if (IS_IPADDR_V4(&lhs->addr)) {
233 retval = memcmp(&lhs->addr.ipaddr_v4, &rhs->addr.ipaddr_v4,
234 sizeof(lhs->addr.ipaddr_v4));
235 if (retval != 0) {
236 return retval;
237 }
238 } else if (IS_IPADDR_V6(&lhs->addr)) {
239 retval = memcmp(&lhs->addr.ipaddr_v6, &rhs->addr.ipaddr_v6,
240 sizeof(lhs->addr.ipaddr_v6));
241 if (retval != 0) {
242 return retval;
243 }
244 }
245
246 return 0;
247 }
248
249 int compare_pce_opts(struct pce_opts *lhs, struct pce_opts *rhs)
250 {
251 if (lhs == NULL) {
252 return 1;
253 }
254
255 if (rhs == NULL) {
256 return -1;
257 }
258
259 int retval = lhs->port - rhs->port;
260 if (retval != 0) {
261 return retval;
262 }
263
264 retval = strcmp(lhs->pce_name, rhs->pce_name);
265 if (retval != 0) {
266 return retval;
267 }
268
269 retval = lhs->precedence - rhs->precedence;
270 if (retval != 0) {
271 return retval;
272 }
273
274 retval = memcmp(&lhs->addr, &rhs->addr, sizeof(lhs->addr));
275 if (retval != 0) {
276 return retval;
277 }
278
279 return 0;
280 }
281
282 int pcep_pcc_update(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state,
283 struct pcc_opts *pcc_opts, struct pce_opts *pce_opts)
284 {
285 int ret = 0;
286
287 // If the options did not change, then there is nothing to do
288 if ((compare_pce_opts(pce_opts, pcc_state->pce_opts) == 0)
289 && (compare_pcc_opts(pcc_opts, pcc_state->pcc_opts) == 0)) {
290 return ret;
291 }
292
293 if ((ret = pcep_pcc_disable(ctrl_state, pcc_state))) {
294 XFREE(MTYPE_PCEP, pcc_opts);
295 XFREE(MTYPE_PCEP, pce_opts);
296 return ret;
297 }
298
299 if (pcc_state->pcc_opts != NULL) {
300 XFREE(MTYPE_PCEP, pcc_state->pcc_opts);
301 }
302 if (pcc_state->pce_opts != NULL) {
303 XFREE(MTYPE_PCEP, pcc_state->pce_opts);
304 }
305
306 pcc_state->pcc_opts = pcc_opts;
307 pcc_state->pce_opts = pce_opts;
308
309 if (IS_IPADDR_V4(&pcc_opts->addr)) {
310 pcc_state->pcc_addr_v4 = pcc_opts->addr.ipaddr_v4;
311 SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4);
312 } else {
313 UNSET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4);
314 }
315
316 if (IS_IPADDR_V6(&pcc_opts->addr)) {
317 memcpy(&pcc_state->pcc_addr_v6, &pcc_opts->addr.ipaddr_v6,
318 sizeof(struct in6_addr));
319 SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6);
320 } else {
321 UNSET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6);
322 }
323
324 update_tag(pcc_state);
325 update_originator(pcc_state);
326
327 return pcep_pcc_enable(ctrl_state, pcc_state);
328 }
329
330 void pcep_pcc_reconnect(struct ctrl_state *ctrl_state,
331 struct pcc_state *pcc_state)
332 {
333 if (pcc_state->status == PCEP_PCC_DISCONNECTED)
334 pcep_pcc_enable(ctrl_state, pcc_state);
335 }
336
337 int pcep_pcc_enable(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state)
338 {
339 assert(pcc_state->status == PCEP_PCC_DISCONNECTED);
340 assert(pcc_state->sess == NULL);
341
342 if (pcc_state->t_reconnect != NULL) {
343 event_cancel(&pcc_state->t_reconnect);
344 pcc_state->t_reconnect = NULL;
345 }
346
347 select_transport_address(pcc_state);
348
349 /* Even though we are connecting using IPv6. we want to have an IPv4
350 * address so we can handle candidate path with IPv4 endpoints */
351 if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)) {
352 if (pcc_state->retry_count < OTHER_FAMILY_MAX_RETRIES) {
353 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
354 "skipping connection to PCE %pIA:%d due to missing PCC IPv4 address",
355 &pcc_state->pce_opts->addr,
356 pcc_state->pce_opts->port);
357 schedule_reconnect(ctrl_state, pcc_state);
358 return 0;
359 } else {
360 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
361 "missing IPv4 PCC address, IPv4 candidate paths will be ignored");
362 }
363 }
364
365 /* Even though we are connecting using IPv4. we want to have an IPv6
366 * address so we can handle candidate path with IPv6 endpoints */
367 if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)) {
368 if (pcc_state->retry_count < OTHER_FAMILY_MAX_RETRIES) {
369 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
370 "skipping connection to PCE %pIA:%d due to missing PCC IPv6 address",
371 &pcc_state->pce_opts->addr,
372 pcc_state->pce_opts->port);
373 schedule_reconnect(ctrl_state, pcc_state);
374 return 0;
375 } else {
376 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
377 "missing IPv6 PCC address, IPv6 candidate paths will be ignored");
378 }
379 }
380
381 /* Even if the maximum retries to try to have all the familly addresses
382 * have been spent, we still need the one for the transport familly */
383 if (pcc_state->pcc_addr_tr.ipa_type == IPADDR_NONE) {
384 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
385 "skipping connection to PCE %pIA:%d due to missing PCC address",
386 &pcc_state->pce_opts->addr,
387 pcc_state->pce_opts->port);
388 schedule_reconnect(ctrl_state, pcc_state);
389 return 0;
390 }
391
392 PCEP_DEBUG("%s PCC connecting", pcc_state->tag);
393 pcc_state->sess = pcep_lib_connect(
394 &pcc_state->pcc_addr_tr, pcc_state->pcc_opts->port,
395 &pcc_state->pce_opts->addr, pcc_state->pce_opts->port,
396 pcc_state->pcc_opts->msd, &pcc_state->pce_opts->config_opts);
397
398 if (pcc_state->sess == NULL) {
399 flog_warn(EC_PATH_PCEP_LIB_CONNECT,
400 "failed to connect to PCE %pIA:%d from %pIA:%d",
401 &pcc_state->pce_opts->addr,
402 pcc_state->pce_opts->port,
403 &pcc_state->pcc_addr_tr,
404 pcc_state->pcc_opts->port);
405 schedule_reconnect(ctrl_state, pcc_state);
406 return 0;
407 }
408
409 // In case some best pce alternative were waiting to activate
410 if (pcc_state->t_update_best != NULL) {
411 event_cancel(&pcc_state->t_update_best);
412 pcc_state->t_update_best = NULL;
413 }
414
415 pcc_state->status = PCEP_PCC_CONNECTING;
416
417 return 0;
418 }
419
420 int pcep_pcc_disable(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state)
421 {
422 switch (pcc_state->status) {
423 case PCEP_PCC_DISCONNECTED:
424 return 0;
425 case PCEP_PCC_CONNECTING:
426 case PCEP_PCC_SYNCHRONIZING:
427 case PCEP_PCC_OPERATING:
428 PCEP_DEBUG("%s Disconnecting PCC...", pcc_state->tag);
429 cancel_comp_requests(ctrl_state, pcc_state);
430 pcep_lib_disconnect(pcc_state->sess);
431 /* No need to remove if any PCEs is connected */
432 if (get_pce_count_connected(ctrl_state->pcc) == 0) {
433 pcep_thread_remove_candidate_path_segments(ctrl_state,
434 pcc_state);
435 }
436 pcc_state->sess = NULL;
437 pcc_state->status = PCEP_PCC_DISCONNECTED;
438 return 0;
439 case PCEP_PCC_INITIALIZED:
440 return 1;
441 }
442
443 assert(!"Reached end of function where we are not expecting to");
444 }
445
446 void pcep_pcc_sync_path(struct ctrl_state *ctrl_state,
447 struct pcc_state *pcc_state, struct path *path)
448 {
449 if (pcc_state->status == PCEP_PCC_SYNCHRONIZING) {
450 path->is_synching = true;
451 } else if (pcc_state->status == PCEP_PCC_OPERATING)
452 path->is_synching = false;
453 else
454 return;
455
456 path->go_active = true;
457
458 /* Accumulate the dynamic paths without any LSP so computation
459 * requests can be performed after synchronization */
460 if ((path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)
461 && (path->first_hop == NULL)
462 && !has_pending_req_for(pcc_state, path)) {
463 PCEP_DEBUG("%s Scheduling computation request for path %s",
464 pcc_state->tag, path->name);
465 push_new_req(pcc_state, path);
466 return;
467 }
468
469 /* Synchronize the path if the PCE supports LSP updates and the
470 * endpoint address familly is supported */
471 if (pcc_state->caps.is_stateful) {
472 if (filter_path(pcc_state, path)) {
473 PCEP_DEBUG("%s Synchronizing path %s", pcc_state->tag,
474 path->name);
475 send_report(pcc_state, path);
476 } else {
477 PCEP_DEBUG(
478 "%s Skipping %s candidate path %s synchronization",
479 pcc_state->tag,
480 ipaddr_type_name(&path->nbkey.endpoint),
481 path->name);
482 }
483 }
484 }
485
486 void pcep_pcc_sync_done(struct ctrl_state *ctrl_state,
487 struct pcc_state *pcc_state)
488 {
489 struct req_entry *req;
490
491 if (pcc_state->status != PCEP_PCC_SYNCHRONIZING
492 && pcc_state->status != PCEP_PCC_OPERATING)
493 return;
494
495 if (pcc_state->caps.is_stateful
496 && pcc_state->status == PCEP_PCC_SYNCHRONIZING) {
497 struct path *path = pcep_new_path();
498 *path = (struct path){.name = NULL,
499 .srp_id = 0,
500 .plsp_id = 0,
501 .status = PCEP_LSP_OPERATIONAL_DOWN,
502 .do_remove = false,
503 .go_active = false,
504 .was_created = false,
505 .was_removed = false,
506 .is_synching = false,
507 .is_delegated = false,
508 .first_hop = NULL,
509 .first_metric = NULL};
510 send_report(pcc_state, path);
511 pcep_free_path(path);
512 }
513
514 pcc_state->synchronized = true;
515 pcc_state->status = PCEP_PCC_OPERATING;
516
517 PCEP_DEBUG("%s Synchronization done", pcc_state->tag);
518
519 /* Start the computation request accumulated during synchronization */
520 RB_FOREACH (req, req_entry_head, &pcc_state->requests) {
521 send_comp_request(ctrl_state, pcc_state, req);
522 }
523 }
524
525 void pcep_pcc_send_report(struct ctrl_state *ctrl_state,
526 struct pcc_state *pcc_state, struct path *path,
527 bool is_stable)
528 {
529 if ((pcc_state->status != PCEP_PCC_OPERATING)
530 || (!pcc_state->caps.is_stateful)) {
531 pcep_free_path(path);
532 return;
533 }
534
535 PCEP_DEBUG("(%s)%s Send report for candidate path %s", __func__,
536 pcc_state->tag, path->name);
537
538 /* ODL and Cisco requires the first reported
539 * LSP to have a DOWN status, the later status changes
540 * will be comunicated through hook calls.
541 */
542 enum pcep_lsp_operational_status real_status = path->status;
543 path->status = PCEP_LSP_OPERATIONAL_DOWN;
544 send_report(pcc_state, path);
545
546 /* If no update is expected and the real status wasn't down, we need to
547 * send a second report with the real status */
548 if (is_stable && (real_status != PCEP_LSP_OPERATIONAL_DOWN)) {
549 PCEP_DEBUG("(%s)%s Send report for candidate path (!DOWN) %s",
550 __func__, pcc_state->tag, path->name);
551 path->status = real_status;
552 send_report(pcc_state, path);
553 }
554
555 pcep_free_path(path);
556 }
557
558
559 void pcep_pcc_send_error(struct ctrl_state *ctrl_state,
560 struct pcc_state *pcc_state, struct pcep_error *error,
561 bool sub_type)
562 {
563
564 PCEP_DEBUG("(%s) Send error after PcInitiated ", __func__);
565
566
567 send_pcep_error(pcc_state, error->error_type, error->error_value,
568 error->path);
569 pcep_free_path(error->path);
570 XFREE(MTYPE_PCEP, error);
571 }
572 /* ------------ Timeout handler ------------ */
573
574 void pcep_pcc_timeout_handler(struct ctrl_state *ctrl_state,
575 struct pcc_state *pcc_state,
576 enum pcep_ctrl_timeout_type type, void *param)
577 {
578 struct req_entry *req;
579
580 switch (type) {
581 case TO_COMPUTATION_REQUEST:
582 assert(param != NULL);
583 req = (struct req_entry *)param;
584 pop_req(pcc_state, req->path->req_id);
585 flog_warn(EC_PATH_PCEP_COMPUTATION_REQUEST_TIMEOUT,
586 "Computation request %d timeout", req->path->req_id);
587 cancel_comp_request(ctrl_state, pcc_state, req);
588 if (req->retry_count++ < MAX_COMPREQ_TRIES) {
589 repush_req(pcc_state, req);
590 send_comp_request(ctrl_state, pcc_state, req);
591 return;
592 }
593 if (pcc_state->caps.is_stateful) {
594 struct path *path;
595 PCEP_DEBUG(
596 "%s Delegating undefined dynamic path %s to PCE %s",
597 pcc_state->tag, req->path->name,
598 pcc_state->originator);
599 path = pcep_copy_path(req->path);
600 path->is_delegated = true;
601 send_report(pcc_state, path);
602 free_req_entry(req);
603 }
604 break;
605 case TO_UNDEFINED:
606 case TO_MAX:
607 break;
608 }
609 }
610
611
612 /* ------------ Pathd event handler ------------ */
613
614 void pcep_pcc_pathd_event_handler(struct ctrl_state *ctrl_state,
615 struct pcc_state *pcc_state,
616 enum pcep_pathd_event_type type,
617 struct path *path)
618 {
619 struct req_entry *req;
620
621 if (pcc_state->status != PCEP_PCC_OPERATING)
622 return;
623
624 /* Skipping candidate path with endpoint that do not match the
625 * configured or deduced PCC IP version */
626 if (!filter_path(pcc_state, path)) {
627 PCEP_DEBUG("%s Skipping %s candidate path %s event",
628 pcc_state->tag,
629 ipaddr_type_name(&path->nbkey.endpoint), path->name);
630 return;
631 }
632
633 switch (type) {
634 case PCEP_PATH_CREATED:
635 if (has_pending_req_for(pcc_state, path)) {
636 PCEP_DEBUG(
637 "%s Candidate path %s created, computation request already sent",
638 pcc_state->tag, path->name);
639 return;
640 }
641 PCEP_DEBUG("%s Candidate path %s created", pcc_state->tag,
642 path->name);
643 if ((path->first_hop == NULL)
644 && (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)) {
645 req = push_new_req(pcc_state, path);
646 send_comp_request(ctrl_state, pcc_state, req);
647 } else if (pcc_state->caps.is_stateful)
648 send_report(pcc_state, path);
649 return;
650 case PCEP_PATH_UPDATED:
651 PCEP_DEBUG("%s Candidate path %s updated", pcc_state->tag,
652 path->name);
653 if (pcc_state->caps.is_stateful)
654 send_report(pcc_state, path);
655 return;
656 case PCEP_PATH_REMOVED:
657 PCEP_DEBUG("%s Candidate path %s removed", pcc_state->tag,
658 path->name);
659 path->was_removed = true;
660 /* Removed as response to a PcInitiated 'R'emove*/
661 /* RFC 8281 #5.4 LSP Deletion*/
662 path->do_remove = path->was_removed;
663 if (pcc_state->caps.is_stateful)
664 send_report(pcc_state, path);
665 return;
666 case PCEP_PATH_UNDEFINED:
667 flog_warn(EC_PATH_PCEP_RECOVERABLE_INTERNAL_ERROR,
668 "Unexpected pathd event received by pcc %s: %u",
669 pcc_state->tag, type);
670 return;
671 }
672 }
673
674
675 /* ------------ PCEP event handler ------------ */
676
677 void pcep_pcc_pcep_event_handler(struct ctrl_state *ctrl_state,
678 struct pcc_state *pcc_state, pcep_event *event)
679 {
680 PCEP_DEBUG("%s Received PCEP event: %s", pcc_state->tag,
681 pcep_event_type_name(event->event_type));
682 switch (event->event_type) {
683 case PCC_CONNECTED_TO_PCE:
684 assert(PCEP_PCC_CONNECTING == pcc_state->status);
685 PCEP_DEBUG("%s Connection established", pcc_state->tag);
686 pcc_state->status = PCEP_PCC_SYNCHRONIZING;
687 pcc_state->retry_count = 0;
688 pcc_state->synchronized = false;
689 PCEP_DEBUG("%s Starting PCE synchronization", pcc_state->tag);
690 cancel_session_timeout(ctrl_state, pcc_state);
691 pcep_pcc_calculate_best_pce(ctrl_state->pcc);
692 pcep_thread_start_sync(ctrl_state, pcc_state->id);
693 break;
694 case PCC_SENT_INVALID_OPEN:
695 PCEP_DEBUG("%s Sent invalid OPEN message", pcc_state->tag);
696 PCEP_DEBUG(
697 "%s Reconciling values: keep alive (%d) dead timer (%d) seconds ",
698 pcc_state->tag,
699 pcc_state->sess->pcc_config
700 .keep_alive_pce_negotiated_timer_seconds,
701 pcc_state->sess->pcc_config
702 .dead_timer_pce_negotiated_seconds);
703 pcc_state->pce_opts->config_opts.keep_alive_seconds =
704 pcc_state->sess->pcc_config
705 .keep_alive_pce_negotiated_timer_seconds;
706 pcc_state->pce_opts->config_opts.dead_timer_seconds =
707 pcc_state->sess->pcc_config
708 .dead_timer_pce_negotiated_seconds;
709 break;
710
711 case PCC_RCVD_INVALID_OPEN:
712 PCEP_DEBUG("%s Received invalid OPEN message", pcc_state->tag);
713 PCEP_DEBUG_PCEP("%s PCEP message: %s", pcc_state->tag,
714 format_pcep_message(event->message));
715 break;
716 case PCE_DEAD_TIMER_EXPIRED:
717 case PCE_CLOSED_SOCKET:
718 case PCE_SENT_PCEP_CLOSE:
719 case PCE_OPEN_KEEP_WAIT_TIMER_EXPIRED:
720 case PCC_PCEP_SESSION_CLOSED:
721 case PCC_RCVD_MAX_INVALID_MSGS:
722 case PCC_RCVD_MAX_UNKOWN_MSGS:
723 pcep_pcc_disable(ctrl_state, pcc_state);
724 schedule_reconnect(ctrl_state, pcc_state);
725 schedule_session_timeout(ctrl_state, pcc_state);
726 break;
727 case MESSAGE_RECEIVED:
728 PCEP_DEBUG_PCEP("%s Received PCEP message: %s", pcc_state->tag,
729 format_pcep_message(event->message));
730 if (pcc_state->status == PCEP_PCC_CONNECTING) {
731 if (event->message->msg_header->type == PCEP_TYPE_OPEN)
732 handle_pcep_open(ctrl_state, pcc_state,
733 event->message);
734 break;
735 }
736 assert(pcc_state->status == PCEP_PCC_SYNCHRONIZING
737 || pcc_state->status == PCEP_PCC_OPERATING);
738 handle_pcep_message(ctrl_state, pcc_state, event->message);
739 break;
740 case PCC_CONNECTION_FAILURE:
741 flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEPLIB_EVENT,
742 "Unexpected event from pceplib: %s",
743 format_pcep_event(event));
744 break;
745 }
746 }
747
748
749 /*------------------ Multi-PCE --------------------- */
750
751 /* Internal util function, returns true if sync is necessary, false otherwise */
752 bool update_best_pce(struct pcc_state **pcc, int best)
753 {
754 PCEP_DEBUG(" recalculating pce precedence ");
755 if (best) {
756 struct pcc_state *best_pcc_state =
757 pcep_pcc_get_pcc_by_id(pcc, best);
758 if (best_pcc_state->previous_best != best_pcc_state->is_best) {
759 PCEP_DEBUG(" %s Resynch best (%i) previous best (%i)",
760 best_pcc_state->tag, best_pcc_state->id,
761 best_pcc_state->previous_best);
762 return true;
763 } else {
764 PCEP_DEBUG(
765 " %s No Resynch best (%i) previous best (%i)",
766 best_pcc_state->tag, best_pcc_state->id,
767 best_pcc_state->previous_best);
768 }
769 } else {
770 PCEP_DEBUG(" No best pce available, all pce seem disconnected");
771 }
772
773 return false;
774 }
775
776 int get_best_pce(struct pcc_state **pcc)
777 {
778 for (int i = 0; i < MAX_PCC; i++) {
779 if (pcc[i] && pcc[i]->pce_opts) {
780 if (pcc[i]->is_best == true) {
781 return pcc[i]->id;
782 }
783 }
784 }
785 return 0;
786 }
787
788 int get_pce_count_connected(struct pcc_state **pcc)
789 {
790 int count = 0;
791 for (int i = 0; i < MAX_PCC; i++) {
792 if (pcc[i] && pcc[i]->pce_opts
793 && pcc[i]->status != PCEP_PCC_DISCONNECTED) {
794 count++;
795 }
796 }
797 return count;
798 }
799
800 int get_previous_best_pce(struct pcc_state **pcc)
801 {
802 int previous_best_pce = -1;
803
804 for (int i = 0; i < MAX_PCC; i++) {
805 if (pcc[i] && pcc[i]->pce_opts && pcc[i]->previous_best == true
806 && pcc[i]->status != PCEP_PCC_DISCONNECTED) {
807 previous_best_pce = i;
808 break;
809 }
810 }
811 return previous_best_pce != -1 ? pcc[previous_best_pce]->id : 0;
812 }
813
814 /* Called by path_pcep_controller EV_REMOVE_PCC
815 * Event handler when a PCC is removed. */
816 int pcep_pcc_multi_pce_remove_pcc(struct ctrl_state *ctrl_state,
817 struct pcc_state **pcc)
818 {
819 int new_best_pcc_id = -1;
820 new_best_pcc_id = pcep_pcc_calculate_best_pce(pcc);
821 if (new_best_pcc_id) {
822 if (update_best_pce(ctrl_state->pcc, new_best_pcc_id) == true) {
823 pcep_thread_start_sync(ctrl_state, new_best_pcc_id);
824 }
825 }
826
827 return 0;
828 }
829
830 /* Called by path_pcep_controller EV_SYNC_PATH
831 * Event handler when a path is sync'd. */
832 int pcep_pcc_multi_pce_sync_path(struct ctrl_state *ctrl_state, int pcc_id,
833 struct pcc_state **pcc)
834 {
835 int previous_best_pcc_id = -1;
836
837 if (pcc_id == get_best_pce(pcc)) {
838 previous_best_pcc_id = get_previous_best_pce(pcc);
839 if (previous_best_pcc_id != 0) {
840 /* while adding new pce, path has to resync to the
841 * previous best. pcep_thread_start_sync() will be
842 * called by the calling function */
843 if (update_best_pce(ctrl_state->pcc,
844 previous_best_pcc_id)
845 == true) {
846 cancel_comp_requests(
847 ctrl_state,
848 pcep_pcc_get_pcc_by_id(
849 pcc, previous_best_pcc_id));
850 pcep_thread_start_sync(ctrl_state,
851 previous_best_pcc_id);
852 }
853 }
854 }
855
856 return 0;
857 }
858
859 /* Called by path_pcep_controller when the TM_CALCULATE_BEST_PCE
860 * timer expires */
861 int pcep_pcc_timer_update_best_pce(struct ctrl_state *ctrl_state, int pcc_id)
862 {
863 int ret = 0;
864 /* resync whatever was the new best */
865 int prev_best = get_best_pce(ctrl_state->pcc);
866 int best_id = pcep_pcc_calculate_best_pce(ctrl_state->pcc);
867 if (best_id && prev_best != best_id) { // Avoid Multiple call
868 struct pcc_state *pcc_state =
869 pcep_pcc_get_pcc_by_id(ctrl_state->pcc, best_id);
870 if (update_best_pce(ctrl_state->pcc, pcc_state->id) == true) {
871 pcep_thread_start_sync(ctrl_state, pcc_state->id);
872 }
873 }
874
875 return ret;
876 }
877
878 /* Called by path_pcep_controller::pcep_thread_event_update_pce_options()
879 * Returns the best PCE id */
880 int pcep_pcc_calculate_best_pce(struct pcc_state **pcc)
881 {
882 int best_precedence = 255; // DEFAULT_PCE_PRECEDENCE;
883 int best_pce = -1;
884 int one_connected_pce = -1;
885 int previous_best_pce = -1;
886 int step_0_best = -1;
887 int step_0_previous = -1;
888 int pcc_count = 0;
889
890 // Get state
891 for (int i = 0; i < MAX_PCC; i++) {
892 if (pcc[i] && pcc[i]->pce_opts) {
893 zlog_debug(
894 "multi-pce: calculate all : i (%i) is_best (%i) previous_best (%i) ",
895 i, pcc[i]->is_best, pcc[i]->previous_best);
896 pcc_count++;
897
898 if (pcc[i]->is_best == true) {
899 step_0_best = i;
900 }
901 if (pcc[i]->previous_best == true) {
902 step_0_previous = i;
903 }
904 }
905 }
906
907 if (!pcc_count) {
908 return 0;
909 }
910
911 // Calculate best
912 for (int i = 0; i < MAX_PCC; i++) {
913 if (pcc[i] && pcc[i]->pce_opts
914 && pcc[i]->status != PCEP_PCC_DISCONNECTED) {
915 one_connected_pce = i; // In case none better
916 if (pcc[i]->pce_opts->precedence <= best_precedence) {
917 if (best_pce != -1
918 && pcc[best_pce]->pce_opts->precedence
919 == pcc[i]->pce_opts
920 ->precedence) {
921 if (ipaddr_cmp(
922 &pcc[i]->pce_opts->addr,
923 &pcc[best_pce]
924 ->pce_opts->addr)
925 > 0)
926 // collide of precedences so
927 // compare ip
928 best_pce = i;
929 } else {
930 if (!pcc[i]->previous_best) {
931 best_precedence =
932 pcc[i]->pce_opts
933 ->precedence;
934 best_pce = i;
935 }
936 }
937 }
938 }
939 }
940
941 zlog_debug(
942 "multi-pce: calculate data : sb (%i) sp (%i) oc (%i) b (%i) ",
943 step_0_best, step_0_previous, one_connected_pce, best_pce);
944
945 // Changed of state so ...
946 if (step_0_best != best_pce) {
947 pthread_mutex_lock(&g_pcc_info_mtx);
948 // Calculate previous
949 previous_best_pce = step_0_best;
950 // Clean state
951 if (step_0_best != -1) {
952 pcc[step_0_best]->is_best = false;
953 }
954 if (step_0_previous != -1) {
955 pcc[step_0_previous]->previous_best = false;
956 }
957
958 // Set previous
959 if (previous_best_pce != -1
960 && pcc[previous_best_pce]->status
961 == PCEP_PCC_DISCONNECTED) {
962 pcc[previous_best_pce]->previous_best = true;
963 zlog_debug("multi-pce: previous best pce (%i) ",
964 previous_best_pce + 1);
965 }
966
967
968 // Set best
969 if (best_pce != -1) {
970 pcc[best_pce]->is_best = true;
971 zlog_debug("multi-pce: best pce (%i) ", best_pce + 1);
972 } else {
973 if (one_connected_pce != -1) {
974 best_pce = one_connected_pce;
975 pcc[one_connected_pce]->is_best = true;
976 zlog_debug(
977 "multi-pce: one connected best pce (default) (%i) ",
978 one_connected_pce + 1);
979 } else {
980 for (int i = 0; i < MAX_PCC; i++) {
981 if (pcc[i] && pcc[i]->pce_opts) {
982 best_pce = i;
983 pcc[i]->is_best = true;
984 zlog_debug(
985 "(disconnected) best pce (default) (%i) ",
986 i + 1);
987 break;
988 }
989 }
990 }
991 }
992 pthread_mutex_unlock(&g_pcc_info_mtx);
993 }
994
995 return ((best_pce == -1) ? 0 : pcc[best_pce]->id);
996 }
997
998 int pcep_pcc_get_pcc_id_by_ip_port(struct pcc_state **pcc,
999 struct pce_opts *pce_opts)
1000 {
1001 if (pcc == NULL) {
1002 return 0;
1003 }
1004
1005 for (int idx = 0; idx < MAX_PCC; idx++) {
1006 if (pcc[idx]) {
1007 if ((ipaddr_cmp((const struct ipaddr *)&pcc[idx]
1008 ->pce_opts->addr,
1009 (const struct ipaddr *)&pce_opts->addr)
1010 == 0)
1011 && pcc[idx]->pce_opts->port == pce_opts->port) {
1012 zlog_debug("found pcc_id (%d) idx (%d)",
1013 pcc[idx]->id, idx);
1014 return pcc[idx]->id;
1015 }
1016 }
1017 }
1018 return 0;
1019 }
1020
1021 int pcep_pcc_get_pcc_id_by_idx(struct pcc_state **pcc, int idx)
1022 {
1023 if (pcc == NULL || idx < 0) {
1024 return 0;
1025 }
1026
1027 return pcc[idx] ? pcc[idx]->id : 0;
1028 }
1029
1030 struct pcc_state *pcep_pcc_get_pcc_by_id(struct pcc_state **pcc, int id)
1031 {
1032 if (pcc == NULL || id < 0) {
1033 return NULL;
1034 }
1035
1036 for (int i = 0; i < MAX_PCC; i++) {
1037 if (pcc[i]) {
1038 if (pcc[i]->id == id) {
1039 zlog_debug("found id (%d) pcc_idx (%d)",
1040 pcc[i]->id, i);
1041 return pcc[i];
1042 }
1043 }
1044 }
1045
1046 return NULL;
1047 }
1048
1049 struct pcc_state *pcep_pcc_get_pcc_by_name(struct pcc_state **pcc,
1050 const char *pce_name)
1051 {
1052 if (pcc == NULL || pce_name == NULL) {
1053 return NULL;
1054 }
1055
1056 for (int i = 0; i < MAX_PCC; i++) {
1057 if (pcc[i] == NULL) {
1058 continue;
1059 }
1060
1061 if (strcmp(pcc[i]->pce_opts->pce_name, pce_name) == 0) {
1062 return pcc[i];
1063 }
1064 }
1065
1066 return NULL;
1067 }
1068
1069 int pcep_pcc_get_pcc_idx_by_id(struct pcc_state **pcc, int id)
1070 {
1071 if (pcc == NULL) {
1072 return -1;
1073 }
1074
1075 for (int idx = 0; idx < MAX_PCC; idx++) {
1076 if (pcc[idx]) {
1077 if (pcc[idx]->id == id) {
1078 zlog_debug("found pcc_id (%d) array_idx (%d)",
1079 pcc[idx]->id, idx);
1080 return idx;
1081 }
1082 }
1083 }
1084
1085 return -1;
1086 }
1087
1088 int pcep_pcc_get_free_pcc_idx(struct pcc_state **pcc)
1089 {
1090 assert(pcc != NULL);
1091
1092 for (int idx = 0; idx < MAX_PCC; idx++) {
1093 if (pcc[idx] == NULL) {
1094 zlog_debug("new pcc_idx (%d)", idx);
1095 return idx;
1096 }
1097 }
1098
1099 return -1;
1100 }
1101
1102 int pcep_pcc_get_pcc_id(struct pcc_state *pcc)
1103 {
1104 return ((pcc == NULL) ? 0 : pcc->id);
1105 }
1106
1107 void pcep_pcc_copy_pcc_info(struct pcc_state **pcc,
1108 struct pcep_pcc_info *pcc_info)
1109 {
1110 struct pcc_state *pcc_state =
1111 pcep_pcc_get_pcc_by_name(pcc, pcc_info->pce_name);
1112 if (!pcc_state) {
1113 return;
1114 }
1115
1116 pcc_info->ctrl_state = NULL;
1117 if(pcc_state->pcc_opts){
1118 pcc_info->msd = pcc_state->pcc_opts->msd;
1119 pcc_info->pcc_port = pcc_state->pcc_opts->port;
1120 }
1121 pcc_info->next_plspid = pcc_state->next_plspid;
1122 pcc_info->next_reqid = pcc_state->next_reqid;
1123 pcc_info->status = pcc_state->status;
1124 pcc_info->pcc_id = pcc_state->id;
1125 pthread_mutex_lock(&g_pcc_info_mtx);
1126 pcc_info->is_best_multi_pce = pcc_state->is_best;
1127 pcc_info->previous_best = pcc_state->previous_best;
1128 pthread_mutex_unlock(&g_pcc_info_mtx);
1129 pcc_info->precedence =
1130 pcc_state->pce_opts ? pcc_state->pce_opts->precedence : 0;
1131 if(pcc_state->pcc_addr_tr.ipa_type != IPADDR_NONE){
1132 memcpy(&pcc_info->pcc_addr, &pcc_state->pcc_addr_tr,
1133 sizeof(struct ipaddr));
1134 }
1135 }
1136
1137
1138 /*------------------ PCEP Message handlers --------------------- */
1139
1140 void handle_pcep_open(struct ctrl_state *ctrl_state,
1141 struct pcc_state *pcc_state, struct pcep_message *msg)
1142 {
1143 assert(msg->msg_header->type == PCEP_TYPE_OPEN);
1144 pcep_lib_parse_capabilities(msg, &pcc_state->caps);
1145 PCEP_DEBUG("PCE capabilities: %s, %s%s",
1146 pcc_state->caps.is_stateful ? "stateful" : "stateless",
1147 pcc_state->caps.supported_ofs_are_known
1148 ? (pcc_state->caps.supported_ofs == 0
1149 ? "no objective functions supported"
1150 : "supported objective functions are ")
1151 : "supported objective functions are unknown",
1152 format_objfun_set(pcc_state->caps.supported_ofs));
1153 }
1154
1155 void handle_pcep_message(struct ctrl_state *ctrl_state,
1156 struct pcc_state *pcc_state, struct pcep_message *msg)
1157 {
1158 if (pcc_state->status != PCEP_PCC_OPERATING)
1159 return;
1160
1161 switch (msg->msg_header->type) {
1162 case PCEP_TYPE_INITIATE:
1163 handle_pcep_lsp_initiate(ctrl_state, pcc_state, msg);
1164 break;
1165 case PCEP_TYPE_UPDATE:
1166 handle_pcep_lsp_update(ctrl_state, pcc_state, msg);
1167 break;
1168 case PCEP_TYPE_PCREP:
1169 handle_pcep_comp_reply(ctrl_state, pcc_state, msg);
1170 break;
1171 case PCEP_TYPE_OPEN:
1172 case PCEP_TYPE_KEEPALIVE:
1173 case PCEP_TYPE_PCREQ:
1174 case PCEP_TYPE_PCNOTF:
1175 case PCEP_TYPE_ERROR:
1176 case PCEP_TYPE_CLOSE:
1177 case PCEP_TYPE_REPORT:
1178 case PCEP_TYPE_START_TLS:
1179 case PCEP_TYPE_MAX:
1180 flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEP_MESSAGE,
1181 "Unexpected pcep message from pceplib: %s",
1182 format_pcep_message(msg));
1183 break;
1184 }
1185 }
1186
1187 void handle_pcep_lsp_update(struct ctrl_state *ctrl_state,
1188 struct pcc_state *pcc_state,
1189 struct pcep_message *msg)
1190 {
1191 struct path *path;
1192 path = pcep_lib_parse_path(msg);
1193 lookup_nbkey(pcc_state, path);
1194 pcep_thread_refine_path(ctrl_state, pcc_state->id,
1195 &continue_pcep_lsp_update, path, NULL);
1196 }
1197
1198 void continue_pcep_lsp_update(struct ctrl_state *ctrl_state,
1199 struct pcc_state *pcc_state, struct path *path,
1200 void *payload)
1201 {
1202 char err[MAX_ERROR_MSG_SIZE] = {0};
1203
1204 specialize_incoming_path(pcc_state, path);
1205 PCEP_DEBUG("%s Received LSP update", pcc_state->tag);
1206 PCEP_DEBUG_PATH("%s", format_path(path));
1207
1208 if (validate_incoming_path(pcc_state, path, err, sizeof(err)))
1209 pcep_thread_update_path(ctrl_state, pcc_state->id, path);
1210 else {
1211 /* FIXME: Monitor the amount of errors from the PCE and
1212 * possibly disconnect and blacklist */
1213 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1214 "Unsupported PCEP protocol feature: %s", err);
1215 pcep_free_path(path);
1216 }
1217 }
1218
1219 void handle_pcep_lsp_initiate(struct ctrl_state *ctrl_state,
1220 struct pcc_state *pcc_state,
1221 struct pcep_message *msg)
1222 {
1223 char err[MAX_ERROR_MSG_SIZE] = "";
1224 struct path *path;
1225
1226 path = pcep_lib_parse_path(msg);
1227
1228 if (!pcc_state->pce_opts->config_opts.pce_initiated) {
1229 /* PCE Initiated is not enabled */
1230 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1231 "Not allowed PCE initiated path received: %s",
1232 format_pcep_message(msg));
1233 send_pcep_error(pcc_state, PCEP_ERRT_LSP_INSTANTIATE_ERROR,
1234 PCEP_ERRV_UNACCEPTABLE_INSTANTIATE_ERROR, path);
1235 return;
1236 }
1237
1238 if (path->do_remove) {
1239 // lookup in nbkey sequential as no endpoint
1240 struct nbkey_map_data *key;
1241 char endpoint[46];
1242
1243 frr_each (nbkey_map, &pcc_state->nbkey_map, key) {
1244 ipaddr2str(&key->nbkey.endpoint, endpoint,
1245 sizeof(endpoint));
1246 flog_warn(
1247 EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1248 "FOR_EACH nbkey [color (%d) endpoint (%s)] path [plsp_id (%d)] ",
1249 key->nbkey.color, endpoint, path->plsp_id);
1250 if (path->plsp_id == key->plspid) {
1251 flog_warn(
1252 EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1253 "FOR_EACH MATCH nbkey [color (%d) endpoint (%s)] path [plsp_id (%d)] ",
1254 key->nbkey.color, endpoint,
1255 path->plsp_id);
1256 path->nbkey = key->nbkey;
1257 break;
1258 }
1259 }
1260 } else {
1261 if (path->first_hop == NULL /*ero sets first_hop*/) {
1262 /* If the PCC receives a PCInitiate message without an
1263 * ERO and the R flag in the SRP object != zero, then it
1264 * MUST send a PCErr message with Error-type=6
1265 * (Mandatory Object missing) and Error-value=9 (ERO
1266 * object missing). */
1267 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1268 "ERO object missing or incomplete : %s",
1269 format_pcep_message(msg));
1270 send_pcep_error(pcc_state,
1271 PCEP_ERRT_LSP_INSTANTIATE_ERROR,
1272 PCEP_ERRV_INTERNAL_ERROR, path);
1273 return;
1274 }
1275
1276 if (path->plsp_id != 0) {
1277 /* If the PCC receives a PCInitiate message with a
1278 * non-zero PLSP-ID and the R flag in the SRP object set
1279 * to zero, then it MUST send a PCErr message with
1280 * Error-type=19 (Invalid Operation) and Error-value=8
1281 * (Non-zero PLSP-ID in the LSP Initiate Request) */
1282 flog_warn(
1283 EC_PATH_PCEP_PROTOCOL_ERROR,
1284 "PCE initiated path with non-zero PLSP ID: %s",
1285 format_pcep_message(msg));
1286 send_pcep_error(pcc_state, PCEP_ERRT_INVALID_OPERATION,
1287 PCEP_ERRV_LSP_INIT_NON_ZERO_PLSP_ID,
1288 path);
1289 return;
1290 }
1291
1292 if (path->name == NULL) {
1293 /* If the PCC receives a PCInitiate message without a
1294 * SYMBOLIC-PATH-NAME TLV, then it MUST send a PCErr
1295 * message with Error-type=10 (Reception of an invalid
1296 * object) and Error-value=8 (SYMBOLIC-PATH-NAME TLV
1297 * missing) */
1298 flog_warn(
1299 EC_PATH_PCEP_PROTOCOL_ERROR,
1300 "PCE initiated path without symbolic name: %s",
1301 format_pcep_message(msg));
1302 send_pcep_error(
1303 pcc_state, PCEP_ERRT_RECEPTION_OF_INV_OBJECT,
1304 PCEP_ERRV_SYMBOLIC_PATH_NAME_TLV_MISSING, path);
1305 return;
1306 }
1307 }
1308
1309 /* TODO: If there is a conflict with the symbolic path name of an
1310 * existing LSP, the PCC MUST send a PCErr message with Error-type=23
1311 * (Bad Parameter value) and Error-value=1 (SYMBOLIC-PATH-NAME in
1312 * use) */
1313
1314 specialize_incoming_path(pcc_state, path);
1315 /* TODO: Validate the PCC address received from the PCE is valid */
1316 PCEP_DEBUG("%s Received LSP initiate", pcc_state->tag);
1317 PCEP_DEBUG_PATH("%s", format_path(path));
1318
1319 if (validate_incoming_path(pcc_state, path, err, sizeof(err))) {
1320 pcep_thread_initiate_path(ctrl_state, pcc_state->id, path);
1321 } else {
1322 /* FIXME: Monitor the amount of errors from the PCE and
1323 * possibly disconnect and blacklist */
1324 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1325 "Unsupported PCEP protocol feature: %s", err);
1326 send_pcep_error(pcc_state, PCEP_ERRT_INVALID_OPERATION,
1327 PCEP_ERRV_LSP_NOT_PCE_INITIATED, path);
1328 pcep_free_path(path);
1329 }
1330 }
1331
1332 void handle_pcep_comp_reply(struct ctrl_state *ctrl_state,
1333 struct pcc_state *pcc_state,
1334 struct pcep_message *msg)
1335 {
1336 char err[MAX_ERROR_MSG_SIZE] = "";
1337 struct req_entry *req;
1338 struct path *path;
1339
1340 path = pcep_lib_parse_path(msg);
1341 if (path->no_path) {
1342 req = pop_req_no_reqid(pcc_state, path->req_id);
1343 } else {
1344 req = pop_req(pcc_state, path->req_id);
1345 }
1346 if (req == NULL) {
1347 /* TODO: check the rate of bad computation reply and close
1348 * the connection if more that a given rate.
1349 */
1350 PCEP_DEBUG(
1351 "%s Received computation reply for unknown request %d",
1352 pcc_state->tag, path->req_id);
1353 PCEP_DEBUG_PATH("%s", format_path(path));
1354 send_pcep_error(pcc_state, PCEP_ERRT_UNKNOWN_REQ_REF,
1355 PCEP_ERRV_UNASSIGNED, NULL);
1356 return;
1357 }
1358
1359 /* Cancel the computation request timeout */
1360 pcep_thread_cancel_timer(&req->t_retry);
1361
1362 /* Transfer relevent metadata from the request to the response */
1363 path->nbkey = req->path->nbkey;
1364 path->plsp_id = req->path->plsp_id;
1365 path->type = req->path->type;
1366 path->name = XSTRDUP(MTYPE_PCEP, req->path->name);
1367 specialize_incoming_path(pcc_state, path);
1368
1369 PCEP_DEBUG("%s Received computation reply %d (no-path: %s)",
1370 pcc_state->tag, path->req_id,
1371 path->no_path ? "true" : "false");
1372 PCEP_DEBUG_PATH("%s", format_path(path));
1373
1374 if (path->no_path) {
1375 PCEP_DEBUG("%s Computation for path %s did not find any result",
1376 pcc_state->tag, path->name);
1377 free_req_entry(req);
1378 pcep_free_path(path);
1379 return;
1380 } else if (validate_incoming_path(pcc_state, path, err, sizeof(err))) {
1381 /* Updating a dynamic path will automatically delegate it */
1382 pcep_thread_update_path(ctrl_state, pcc_state->id, path);
1383 free_req_entry(req);
1384 return;
1385 } else {
1386 /* FIXME: Monitor the amount of errors from the PCE and
1387 * possibly disconnect and blacklist */
1388 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1389 "Unsupported PCEP protocol feature: %s", err);
1390 }
1391
1392 pcep_free_path(path);
1393
1394 /* Delegate the path regardless of the outcome */
1395 /* TODO: For now we are using the path from the request, when
1396 * pathd API is thread safe, we could get a new path */
1397 if (pcc_state->caps.is_stateful) {
1398 PCEP_DEBUG("%s Delegating undefined dynamic path %s to PCE %s",
1399 pcc_state->tag, req->path->name,
1400 pcc_state->originator);
1401 path = pcep_copy_path(req->path);
1402 path->is_delegated = true;
1403 send_report(pcc_state, path);
1404 pcep_free_path(path);
1405 }
1406
1407 free_req_entry(req);
1408 }
1409
1410
1411 /* ------------ Internal Functions ------------ */
1412
1413 const char *ipaddr_type_name(struct ipaddr *addr)
1414 {
1415 if (IS_IPADDR_V4(addr))
1416 return "IPv4";
1417 if (IS_IPADDR_V6(addr))
1418 return "IPv6";
1419 return "undefined";
1420 }
1421
1422 bool filter_path(struct pcc_state *pcc_state, struct path *path)
1423 {
1424 return (IS_IPADDR_V4(&path->nbkey.endpoint)
1425 && CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4))
1426 || (IS_IPADDR_V6(&path->nbkey.endpoint)
1427 && CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6));
1428 }
1429
1430 void select_pcc_addresses(struct pcc_state *pcc_state)
1431 {
1432 /* If no IPv4 address was specified, try to get one from zebra */
1433 if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)) {
1434 if (get_ipv4_router_id(&pcc_state->pcc_addr_v4)) {
1435 SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4);
1436 }
1437 }
1438
1439 /* If no IPv6 address was specified, try to get one from zebra */
1440 if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)) {
1441 if (get_ipv6_router_id(&pcc_state->pcc_addr_v6)) {
1442 SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6);
1443 }
1444 }
1445 }
1446
1447 void select_transport_address(struct pcc_state *pcc_state)
1448 {
1449 struct ipaddr *taddr = &pcc_state->pcc_addr_tr;
1450
1451 select_pcc_addresses(pcc_state);
1452
1453 taddr->ipa_type = IPADDR_NONE;
1454
1455 /* Select a transport source address in function of the configured PCE
1456 * address */
1457 if (IS_IPADDR_V4(&pcc_state->pce_opts->addr)) {
1458 if (CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)) {
1459 taddr->ipaddr_v4 = pcc_state->pcc_addr_v4;
1460 taddr->ipa_type = IPADDR_V4;
1461 }
1462 } else {
1463 if (CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)) {
1464 taddr->ipaddr_v6 = pcc_state->pcc_addr_v6;
1465 taddr->ipa_type = IPADDR_V6;
1466 }
1467 }
1468 }
1469
1470 void update_tag(struct pcc_state *pcc_state)
1471 {
1472 if (pcc_state->pce_opts != NULL) {
1473 assert(!IS_IPADDR_NONE(&pcc_state->pce_opts->addr));
1474 if (IS_IPADDR_V6(&pcc_state->pce_opts->addr)) {
1475 snprintfrr(pcc_state->tag, sizeof(pcc_state->tag),
1476 "%pI6:%i (%u)",
1477 &pcc_state->pce_opts->addr.ipaddr_v6,
1478 pcc_state->pce_opts->port, pcc_state->id);
1479 } else {
1480 snprintfrr(pcc_state->tag, sizeof(pcc_state->tag),
1481 "%pI4:%i (%u)",
1482 &pcc_state->pce_opts->addr.ipaddr_v4,
1483 pcc_state->pce_opts->port, pcc_state->id);
1484 }
1485 } else {
1486 snprintfrr(pcc_state->tag, sizeof(pcc_state->tag), "(%u)",
1487 pcc_state->id);
1488 }
1489 }
1490
1491 void update_originator(struct pcc_state *pcc_state)
1492 {
1493 char *originator;
1494 if (pcc_state->originator != NULL) {
1495 XFREE(MTYPE_PCEP, pcc_state->originator);
1496 pcc_state->originator = NULL;
1497 }
1498 if (pcc_state->pce_opts == NULL)
1499 return;
1500 originator = XCALLOC(MTYPE_PCEP, 52);
1501 assert(!IS_IPADDR_NONE(&pcc_state->pce_opts->addr));
1502 if (IS_IPADDR_V6(&pcc_state->pce_opts->addr)) {
1503 snprintfrr(originator, 52, "%pI6:%i",
1504 &pcc_state->pce_opts->addr.ipaddr_v6,
1505 pcc_state->pce_opts->port);
1506 } else {
1507 snprintfrr(originator, 52, "%pI4:%i",
1508 &pcc_state->pce_opts->addr.ipaddr_v4,
1509 pcc_state->pce_opts->port);
1510 }
1511 pcc_state->originator = originator;
1512 }
1513
1514 void schedule_reconnect(struct ctrl_state *ctrl_state,
1515 struct pcc_state *pcc_state)
1516 {
1517 pcc_state->retry_count++;
1518 pcep_thread_schedule_reconnect(ctrl_state, pcc_state->id,
1519 pcc_state->retry_count,
1520 &pcc_state->t_reconnect);
1521 if (pcc_state->retry_count == 1) {
1522 pcep_thread_schedule_sync_best_pce(
1523 ctrl_state, pcc_state->id,
1524 pcc_state->pce_opts->config_opts
1525 .delegation_timeout_seconds,
1526 &pcc_state->t_update_best);
1527 }
1528 }
1529
1530 void schedule_session_timeout(struct ctrl_state *ctrl_state,
1531 struct pcc_state *pcc_state)
1532 {
1533 /* No need to schedule timeout if multiple PCEs are connected */
1534 if (get_pce_count_connected(ctrl_state->pcc)) {
1535 PCEP_DEBUG_PCEP(
1536 "schedule_session_timeout not setting timer for multi-pce mode");
1537
1538 return;
1539 }
1540
1541 pcep_thread_schedule_session_timeout(
1542 ctrl_state, pcep_pcc_get_pcc_id(pcc_state),
1543 pcc_state->pce_opts->config_opts
1544 .session_timeout_inteval_seconds,
1545 &pcc_state->t_session_timeout);
1546 }
1547
1548 void cancel_session_timeout(struct ctrl_state *ctrl_state,
1549 struct pcc_state *pcc_state)
1550 {
1551 /* No need to schedule timeout if multiple PCEs are connected */
1552 if (pcc_state->t_session_timeout == NULL) {
1553 PCEP_DEBUG_PCEP("cancel_session_timeout timer thread NULL");
1554 return;
1555 }
1556
1557 PCEP_DEBUG_PCEP("Cancel session_timeout timer");
1558 pcep_thread_cancel_timer(&pcc_state->t_session_timeout);
1559 pcc_state->t_session_timeout = NULL;
1560 }
1561
1562 void send_pcep_message(struct pcc_state *pcc_state, struct pcep_message *msg)
1563 {
1564 if (pcc_state->sess != NULL) {
1565 PCEP_DEBUG_PCEP("%s Sending PCEP message: %s", pcc_state->tag,
1566 format_pcep_message(msg));
1567 send_message(pcc_state->sess, msg, true);
1568 }
1569 }
1570
1571 void send_pcep_error(struct pcc_state *pcc_state,
1572 enum pcep_error_type error_type,
1573 enum pcep_error_value error_value,
1574 struct path *trigger_path)
1575 {
1576 struct pcep_message *msg;
1577 PCEP_DEBUG("%s Sending PCEP error type %s (%d) value %s (%d)",
1578 pcc_state->tag, pcep_error_type_name(error_type), error_type,
1579 pcep_error_value_name(error_type, error_value), error_value);
1580 msg = pcep_lib_format_error(error_type, error_value, trigger_path);
1581 send_pcep_message(pcc_state, msg);
1582 }
1583
1584 void send_report(struct pcc_state *pcc_state, struct path *path)
1585 {
1586 struct pcep_message *report;
1587
1588 path->req_id = 0;
1589 specialize_outgoing_path(pcc_state, path);
1590 PCEP_DEBUG_PATH("%s Sending path %s: %s", pcc_state->tag, path->name,
1591 format_path(path));
1592 report = pcep_lib_format_report(&pcc_state->caps, path);
1593 send_pcep_message(pcc_state, report);
1594 }
1595
1596 /* Updates the path for the PCE, updating the delegation and creation flags */
1597 void specialize_outgoing_path(struct pcc_state *pcc_state, struct path *path)
1598 {
1599 bool is_delegated = false;
1600 bool was_created = false;
1601
1602 lookup_plspid(pcc_state, path);
1603
1604 set_pcc_address(pcc_state, &path->nbkey, &path->pcc_addr);
1605 path->sender = pcc_state->pcc_addr_tr;
1606
1607 /* TODO: When the pathd API have a way to mark a path as
1608 * delegated, use it instead of considering all dynamic path
1609 * delegated. We need to disable the originator check for now,
1610 * because path could be delegated without having any originator yet */
1611 // if ((path->originator == NULL)
1612 // || (strcmp(path->originator, pcc_state->originator) == 0)) {
1613 // is_delegated = (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)
1614 // && (path->first_hop != NULL);
1615 // /* it seems the PCE consider updating an LSP a creation ?!?
1616 // at least Cisco does... */
1617 // was_created = path->update_origin == SRTE_ORIGIN_PCEP;
1618 // }
1619 is_delegated = (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC);
1620 was_created = path->update_origin == SRTE_ORIGIN_PCEP;
1621
1622 path->pcc_id = pcc_state->id;
1623 path->go_active = is_delegated && pcc_state->is_best;
1624 path->is_delegated = is_delegated && pcc_state->is_best;
1625 path->was_created = was_created;
1626 }
1627
1628 /* Updates the path for the PCC */
1629 void specialize_incoming_path(struct pcc_state *pcc_state, struct path *path)
1630 {
1631 if (IS_IPADDR_NONE(&path->pcc_addr))
1632 set_pcc_address(pcc_state, &path->nbkey, &path->pcc_addr);
1633 path->sender = pcc_state->pce_opts->addr;
1634 path->pcc_id = pcc_state->id;
1635 path->update_origin = SRTE_ORIGIN_PCEP;
1636 path->originator = XSTRDUP(MTYPE_PCEP, pcc_state->originator);
1637 }
1638
1639 /* Ensure the path can be handled by the PCC and if not, sends an error */
1640 bool validate_incoming_path(struct pcc_state *pcc_state, struct path *path,
1641 char *errbuff, size_t buffsize)
1642 {
1643 struct path_hop *hop;
1644 enum pcep_error_type err_type = 0;
1645 enum pcep_error_value err_value = PCEP_ERRV_UNASSIGNED;
1646
1647 for (hop = path->first_hop; hop != NULL; hop = hop->next) {
1648 /* Hops without SID are not supported */
1649 if (!hop->has_sid) {
1650 snprintfrr(errbuff, buffsize, "SR segment without SID");
1651 err_type = PCEP_ERRT_RECEPTION_OF_INV_OBJECT;
1652 err_value = PCEP_ERRV_DISJOINTED_CONF_TLV_MISSING;
1653 break;
1654 }
1655 /* Hops with non-MPLS SID are not supported */
1656 if (!hop->is_mpls) {
1657 snprintfrr(errbuff, buffsize,
1658 "SR segment with non-MPLS SID");
1659 err_type = PCEP_ERRT_RECEPTION_OF_INV_OBJECT;
1660 err_value = PCEP_ERRV_UNSUPPORTED_NAI;
1661 break;
1662 }
1663 }
1664
1665 if (err_type != 0) {
1666 send_pcep_error(pcc_state, err_type, err_value, NULL);
1667 return false;
1668 }
1669
1670 return true;
1671 }
1672
1673 void send_comp_request(struct ctrl_state *ctrl_state,
1674 struct pcc_state *pcc_state, struct req_entry *req)
1675 {
1676 assert(req != NULL);
1677
1678 if (req->t_retry)
1679 return;
1680
1681 assert(req->path != NULL);
1682 assert(req->path->req_id > 0);
1683 assert(RB_FIND(req_entry_head, &pcc_state->requests, req) == req);
1684 assert(lookup_reqid(pcc_state, req->path) == req->path->req_id);
1685
1686 int timeout;
1687 struct pcep_message *msg;
1688
1689 if (!pcc_state->is_best) {
1690 return;
1691 }
1692
1693 specialize_outgoing_path(pcc_state, req->path);
1694
1695 PCEP_DEBUG(
1696 "%s Sending computation request %d for path %s to %pIA (retry %d)",
1697 pcc_state->tag, req->path->req_id, req->path->name,
1698 &req->path->nbkey.endpoint, req->retry_count);
1699 PCEP_DEBUG_PATH("%s Computation request path %s: %s", pcc_state->tag,
1700 req->path->name, format_path(req->path));
1701
1702 msg = pcep_lib_format_request(&pcc_state->caps, req->path);
1703 send_pcep_message(pcc_state, msg);
1704 req->was_sent = true;
1705
1706 timeout = pcc_state->pce_opts->config_opts.pcep_request_time_seconds;
1707 pcep_thread_schedule_timeout(ctrl_state, pcc_state->id,
1708 TO_COMPUTATION_REQUEST, timeout,
1709 (void *)req, &req->t_retry);
1710 }
1711
1712 void cancel_comp_requests(struct ctrl_state *ctrl_state,
1713 struct pcc_state *pcc_state)
1714 {
1715 struct req_entry *req, *safe_req;
1716
1717 RB_FOREACH_SAFE (req, req_entry_head, &pcc_state->requests, safe_req) {
1718 cancel_comp_request(ctrl_state, pcc_state, req);
1719 RB_REMOVE(req_entry_head, &pcc_state->requests, req);
1720 remove_reqid_mapping(pcc_state, req->path);
1721 free_req_entry(req);
1722 }
1723 }
1724
1725 void cancel_comp_request(struct ctrl_state *ctrl_state,
1726 struct pcc_state *pcc_state, struct req_entry *req)
1727 {
1728 struct pcep_message *msg;
1729
1730 if (req->was_sent) {
1731 /* TODO: Send a computation request cancelation
1732 * notification to the PCE */
1733 pcep_thread_cancel_timer(&req->t_retry);
1734 }
1735
1736 PCEP_DEBUG(
1737 "%s Canceling computation request %d for path %s to %pIA (retry %d)",
1738 pcc_state->tag, req->path->req_id, req->path->name,
1739 &req->path->nbkey.endpoint, req->retry_count);
1740 PCEP_DEBUG_PATH("%s Canceled computation request path %s: %s",
1741 pcc_state->tag, req->path->name,
1742 format_path(req->path));
1743
1744 msg = pcep_lib_format_request_cancelled(req->path->req_id);
1745 send_pcep_message(pcc_state, msg);
1746 }
1747
1748 void set_pcc_address(struct pcc_state *pcc_state, struct lsp_nb_key *nbkey,
1749 struct ipaddr *addr)
1750 {
1751 select_pcc_addresses(pcc_state);
1752 if (IS_IPADDR_V6(&nbkey->endpoint)) {
1753 assert(CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6));
1754 addr->ipa_type = IPADDR_V6;
1755 addr->ipaddr_v6 = pcc_state->pcc_addr_v6;
1756 } else if (IS_IPADDR_V4(&nbkey->endpoint)) {
1757 assert(CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4));
1758 addr->ipa_type = IPADDR_V4;
1759 addr->ipaddr_v4 = pcc_state->pcc_addr_v4;
1760 } else {
1761 addr->ipa_type = IPADDR_NONE;
1762 }
1763 }
1764
1765 /* ------------ Data Structure Helper Functions ------------ */
1766
1767 void lookup_plspid(struct pcc_state *pcc_state, struct path *path)
1768 {
1769 struct plspid_map_data key, *plspid_mapping;
1770 struct nbkey_map_data *nbkey_mapping;
1771
1772 if (path->nbkey.color != 0) {
1773 key.nbkey = path->nbkey;
1774 plspid_mapping = plspid_map_find(&pcc_state->plspid_map, &key);
1775 if (plspid_mapping == NULL) {
1776 plspid_mapping =
1777 XCALLOC(MTYPE_PCEP, sizeof(*plspid_mapping));
1778 plspid_mapping->nbkey = key.nbkey;
1779 plspid_mapping->plspid = pcc_state->next_plspid;
1780 plspid_map_add(&pcc_state->plspid_map, plspid_mapping);
1781 nbkey_mapping =
1782 XCALLOC(MTYPE_PCEP, sizeof(*nbkey_mapping));
1783 nbkey_mapping->nbkey = key.nbkey;
1784 nbkey_mapping->plspid = pcc_state->next_plspid;
1785 nbkey_map_add(&pcc_state->nbkey_map, nbkey_mapping);
1786 pcc_state->next_plspid++;
1787 // FIXME: Send some error to the PCE isntead of crashing
1788 assert(pcc_state->next_plspid <= 1048576);
1789 }
1790 path->plsp_id = plspid_mapping->plspid;
1791 }
1792 }
1793
1794 void lookup_nbkey(struct pcc_state *pcc_state, struct path *path)
1795 {
1796 struct nbkey_map_data key, *mapping;
1797 // TODO: Should give an error to the PCE instead of crashing
1798 assert(path->plsp_id != 0);
1799 key.plspid = path->plsp_id;
1800 mapping = nbkey_map_find(&pcc_state->nbkey_map, &key);
1801 assert(mapping != NULL);
1802 path->nbkey = mapping->nbkey;
1803 }
1804
1805 void free_req_entry(struct req_entry *req)
1806 {
1807 pcep_free_path(req->path);
1808 XFREE(MTYPE_PCEP, req);
1809 }
1810
1811 struct req_entry *push_new_req(struct pcc_state *pcc_state, struct path *path)
1812 {
1813 struct req_entry *req;
1814
1815 req = XCALLOC(MTYPE_PCEP, sizeof(*req));
1816 req->retry_count = 0;
1817 req->path = pcep_copy_path(path);
1818 repush_req(pcc_state, req);
1819
1820 return req;
1821 }
1822
1823 void repush_req(struct pcc_state *pcc_state, struct req_entry *req)
1824 {
1825 uint32_t reqid = pcc_state->next_reqid;
1826 void *res;
1827
1828 req->was_sent = false;
1829 req->path->req_id = reqid;
1830 res = RB_INSERT(req_entry_head, &pcc_state->requests, req);
1831 assert(res == NULL);
1832 assert(add_reqid_mapping(pcc_state, req->path) == true);
1833
1834 pcc_state->next_reqid += 1;
1835 /* Wrapping is allowed, but 0 is not a valid id */
1836 if (pcc_state->next_reqid == 0)
1837 pcc_state->next_reqid = 1;
1838 }
1839
1840 struct req_entry *pop_req(struct pcc_state *pcc_state, uint32_t reqid)
1841 {
1842 struct path path = {.req_id = reqid};
1843 struct req_entry key = {.path = &path};
1844 struct req_entry *req;
1845
1846 req = RB_FIND(req_entry_head, &pcc_state->requests, &key);
1847 if (req == NULL)
1848 return NULL;
1849 RB_REMOVE(req_entry_head, &pcc_state->requests, req);
1850 remove_reqid_mapping(pcc_state, req->path);
1851
1852 return req;
1853 }
1854
1855 struct req_entry *pop_req_no_reqid(struct pcc_state *pcc_state, uint32_t reqid)
1856 {
1857 struct path path = {.req_id = reqid};
1858 struct req_entry key = {.path = &path};
1859 struct req_entry *req;
1860
1861 req = RB_FIND(req_entry_head, &pcc_state->requests, &key);
1862 if (req == NULL)
1863 return NULL;
1864 RB_REMOVE(req_entry_head, &pcc_state->requests, req);
1865
1866 return req;
1867 }
1868
1869 bool add_reqid_mapping(struct pcc_state *pcc_state, struct path *path)
1870 {
1871 struct req_map_data *mapping;
1872 mapping = XCALLOC(MTYPE_PCEP, sizeof(*mapping));
1873 mapping->nbkey = path->nbkey;
1874 mapping->reqid = path->req_id;
1875 if (req_map_add(&pcc_state->req_map, mapping) != NULL) {
1876 XFREE(MTYPE_PCEP, mapping);
1877 return false;
1878 }
1879 return true;
1880 }
1881
1882 void remove_reqid_mapping(struct pcc_state *pcc_state, struct path *path)
1883 {
1884 struct req_map_data key, *mapping;
1885 key.nbkey = path->nbkey;
1886 mapping = req_map_find(&pcc_state->req_map, &key);
1887 if (mapping != NULL) {
1888 req_map_del(&pcc_state->req_map, mapping);
1889 XFREE(MTYPE_PCEP, mapping);
1890 }
1891 }
1892
1893 uint32_t lookup_reqid(struct pcc_state *pcc_state, struct path *path)
1894 {
1895 struct req_map_data key, *mapping;
1896 key.nbkey = path->nbkey;
1897 mapping = req_map_find(&pcc_state->req_map, &key);
1898 if (mapping != NULL)
1899 return mapping->reqid;
1900 return 0;
1901 }
1902
1903 bool has_pending_req_for(struct pcc_state *pcc_state, struct path *path)
1904 {
1905 struct req_entry key = {.path = path};
1906 struct req_entry *req;
1907
1908
1909 PCEP_DEBUG_PATH("(%s) %s", format_path(path), __func__);
1910 /* Looking for request without result */
1911 if (path->no_path || !path->first_hop) {
1912 PCEP_DEBUG_PATH("%s Path : no_path|!first_hop", __func__);
1913 /* ...and already was handle */
1914 req = RB_FIND(req_entry_head, &pcc_state->requests, &key);
1915 if (!req) {
1916 /* we must purge remaining reqid */
1917 PCEP_DEBUG_PATH("%s Purge pending reqid: no_path(%s)",
1918 __func__,
1919 path->no_path ? "TRUE" : "FALSE");
1920 if (lookup_reqid(pcc_state, path) != 0) {
1921 PCEP_DEBUG_PATH("%s Purge pending reqid: DONE ",
1922 __func__);
1923 remove_reqid_mapping(pcc_state, path);
1924 return true;
1925 } else {
1926 return false;
1927 }
1928 }
1929 }
1930
1931
1932 return lookup_reqid(pcc_state, path) != 0;
1933 }
1934
1935
1936 /* ------------ Data Structure Callbacks ------------ */
1937
1938 #define CMP_RETURN(A, B) \
1939 if (A != B) \
1940 return (A < B) ? -1 : 1
1941
1942 static uint32_t hash_nbkey(const struct lsp_nb_key *nbkey)
1943 {
1944 uint32_t hash;
1945 hash = jhash_2words(nbkey->color, nbkey->preference, 0x55aa5a5a);
1946 switch (nbkey->endpoint.ipa_type) {
1947 case IPADDR_V4:
1948 return jhash(&nbkey->endpoint.ipaddr_v4,
1949 sizeof(nbkey->endpoint.ipaddr_v4), hash);
1950 case IPADDR_V6:
1951 return jhash(&nbkey->endpoint.ipaddr_v6,
1952 sizeof(nbkey->endpoint.ipaddr_v6), hash);
1953 case IPADDR_NONE:
1954 return hash;
1955 }
1956
1957 assert(!"Reached end of function where we were not expecting to");
1958 }
1959
1960 static int cmp_nbkey(const struct lsp_nb_key *a, const struct lsp_nb_key *b)
1961 {
1962 CMP_RETURN(a->color, b->color);
1963 int cmp = ipaddr_cmp(&a->endpoint, &b->endpoint);
1964 if (cmp != 0)
1965 return cmp;
1966 CMP_RETURN(a->preference, b->preference);
1967 return 0;
1968 }
1969
1970 int plspid_map_cmp(const struct plspid_map_data *a,
1971 const struct plspid_map_data *b)
1972 {
1973 return cmp_nbkey(&a->nbkey, &b->nbkey);
1974 }
1975
1976 uint32_t plspid_map_hash(const struct plspid_map_data *e)
1977 {
1978 return hash_nbkey(&e->nbkey);
1979 }
1980
1981 int nbkey_map_cmp(const struct nbkey_map_data *a,
1982 const struct nbkey_map_data *b)
1983 {
1984 CMP_RETURN(a->plspid, b->plspid);
1985 return 0;
1986 }
1987
1988 uint32_t nbkey_map_hash(const struct nbkey_map_data *e)
1989 {
1990 return e->plspid;
1991 }
1992
1993 int req_map_cmp(const struct req_map_data *a, const struct req_map_data *b)
1994 {
1995 return cmp_nbkey(&a->nbkey, &b->nbkey);
1996 }
1997
1998 uint32_t req_map_hash(const struct req_map_data *e)
1999 {
2000 return hash_nbkey(&e->nbkey);
2001 }