]> git.proxmox.com Git - mirror_frr.git/blame - pathd/path_pcep_pcc.c
Merge pull request #11802 from AbhishekNR/ttable_pim_state
[mirror_frr.git] / pathd / path_pcep_pcc.c
CommitLineData
efba0985
SM
1/*
2 * Copyright (C) 2020 NetDEF, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; see the file COPYING; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19/* TODOS AND KNOWN ISSUES:
20 - Delete mapping from NB keys to PLSPID when an LSP is deleted either
21 by the PCE or by NB.
22 - Revert the hacks to work around ODL requiring a report with
23 operational status DOWN when an LSP is activated.
24 - Enforce only the PCE a policy has been delegated to can update it.
25 - If the router-id is used because the PCC IP is not specified
26 (either IPv4 or IPv6), the connection to the PCE is not reset
27 when the router-id changes.
28*/
29
30#include <zebra.h>
31
32#include "log.h"
33#include "command.h"
34#include "libfrr.h"
35#include "printfrr.h"
09781197 36#include "lib/version.h"
efba0985
SM
37#include "northbound.h"
38#include "frr_pthread.h"
39#include "jhash.h"
40
41#include "pathd/pathd.h"
42#include "pathd/path_zebra.h"
43#include "pathd/path_errors.h"
efba0985
SM
44#include "pathd/path_pcep.h"
45#include "pathd/path_pcep_controller.h"
46#include "pathd/path_pcep_lib.h"
47#include "pathd/path_pcep_config.h"
48#include "pathd/path_pcep_debug.h"
49
50
51/* The number of time we will skip connecting if we are missing the PCC
52 * address for an inet family different from the selected transport one*/
53#define OTHER_FAMILY_MAX_RETRIES 4
54#define MAX_ERROR_MSG_SIZE 256
55#define MAX_COMPREQ_TRIES 3
56
74971473 57pthread_mutex_t g_pcc_info_mtx = PTHREAD_MUTEX_INITIALIZER;
efba0985
SM
58
59/* PCEP Event Handler */
60static void handle_pcep_open(struct ctrl_state *ctrl_state,
61 struct pcc_state *pcc_state,
62 struct pcep_message *msg);
63static void handle_pcep_message(struct ctrl_state *ctrl_state,
64 struct pcc_state *pcc_state,
65 struct pcep_message *msg);
74971473
JG
66static void handle_pcep_lsp_initiate(struct ctrl_state *ctrl_state,
67 struct pcc_state *pcc_state,
68 struct pcep_message *msg);
efba0985
SM
69static void handle_pcep_lsp_update(struct ctrl_state *ctrl_state,
70 struct pcc_state *pcc_state,
71 struct pcep_message *msg);
74971473 72static void continue_pcep_lsp_update(struct ctrl_state *ctrl_state,
efba0985 73 struct pcc_state *pcc_state,
74971473 74 struct path *path, void *payload);
efba0985
SM
75static void handle_pcep_comp_reply(struct ctrl_state *ctrl_state,
76 struct pcc_state *pcc_state,
77 struct pcep_message *msg);
78
79/* Internal Functions */
80static const char *ipaddr_type_name(struct ipaddr *addr);
81static bool filter_path(struct pcc_state *pcc_state, struct path *path);
82static void select_pcc_addresses(struct pcc_state *pcc_state);
83static void select_transport_address(struct pcc_state *pcc_state);
84static void update_tag(struct pcc_state *pcc_state);
85static void update_originator(struct pcc_state *pcc_state);
86static void schedule_reconnect(struct ctrl_state *ctrl_state,
87 struct pcc_state *pcc_state);
88static void schedule_session_timeout(struct ctrl_state *ctrl_state,
89 struct pcc_state *pcc_state);
90static void cancel_session_timeout(struct ctrl_state *ctrl_state,
91 struct pcc_state *pcc_state);
92static void send_pcep_message(struct pcc_state *pcc_state,
93 struct pcep_message *msg);
94static void send_pcep_error(struct pcc_state *pcc_state,
95 enum pcep_error_type error_type,
56634922
JG
96 enum pcep_error_value error_value,
97 struct path *trigger_path);
efba0985
SM
98static void send_report(struct pcc_state *pcc_state, struct path *path);
99static void send_comp_request(struct ctrl_state *ctrl_state,
100 struct pcc_state *pcc_state,
101 struct req_entry *req);
102static void cancel_comp_requests(struct ctrl_state *ctrl_state,
103 struct pcc_state *pcc_state);
104static void cancel_comp_request(struct ctrl_state *ctrl_state,
105 struct pcc_state *pcc_state,
106 struct req_entry *req);
107static void specialize_outgoing_path(struct pcc_state *pcc_state,
108 struct path *path);
109static void specialize_incoming_path(struct pcc_state *pcc_state,
110 struct path *path);
111static bool validate_incoming_path(struct pcc_state *pcc_state,
112 struct path *path, char *errbuff,
113 size_t buffsize);
114static void set_pcc_address(struct pcc_state *pcc_state,
115 struct lsp_nb_key *nbkey, struct ipaddr *addr);
116static int compare_pcc_opts(struct pcc_opts *lhs, struct pcc_opts *rhs);
117static int compare_pce_opts(struct pce_opts *lhs, struct pce_opts *rhs);
118static int get_previous_best_pce(struct pcc_state **pcc);
119static int get_best_pce(struct pcc_state **pcc);
120static int get_pce_count_connected(struct pcc_state **pcc);
121static bool update_best_pce(struct pcc_state **pcc, int best);
122
123/* Data Structure Helper Functions */
124static void lookup_plspid(struct pcc_state *pcc_state, struct path *path);
125static void lookup_nbkey(struct pcc_state *pcc_state, struct path *path);
126static void free_req_entry(struct req_entry *req);
127static struct req_entry *push_new_req(struct pcc_state *pcc_state,
128 struct path *path);
129static void repush_req(struct pcc_state *pcc_state, struct req_entry *req);
130static struct req_entry *pop_req(struct pcc_state *pcc_state, uint32_t reqid);
0a1bf4be
JG
131static struct req_entry *pop_req_no_reqid(struct pcc_state *pcc_state,
132 uint32_t reqid);
efba0985
SM
133static bool add_reqid_mapping(struct pcc_state *pcc_state, struct path *path);
134static void remove_reqid_mapping(struct pcc_state *pcc_state,
135 struct path *path);
136static uint32_t lookup_reqid(struct pcc_state *pcc_state, struct path *path);
137static bool has_pending_req_for(struct pcc_state *pcc_state, struct path *path);
138
139/* Data Structure Callbacks */
140static int plspid_map_cmp(const struct plspid_map_data *a,
141 const struct plspid_map_data *b);
142static uint32_t plspid_map_hash(const struct plspid_map_data *e);
143static int nbkey_map_cmp(const struct nbkey_map_data *a,
144 const struct nbkey_map_data *b);
145static uint32_t nbkey_map_hash(const struct nbkey_map_data *e);
146static int req_map_cmp(const struct req_map_data *a,
147 const struct req_map_data *b);
148static uint32_t req_map_hash(const struct req_map_data *e);
149
150/* Data Structure Declarations */
151DECLARE_HASH(plspid_map, struct plspid_map_data, mi, plspid_map_cmp,
960b9a53 152 plspid_map_hash);
efba0985 153DECLARE_HASH(nbkey_map, struct nbkey_map_data, mi, nbkey_map_cmp,
960b9a53
DL
154 nbkey_map_hash);
155DECLARE_HASH(req_map, struct req_map_data, mi, req_map_cmp, req_map_hash);
efba0985
SM
156
157static inline int req_entry_compare(const struct req_entry *a,
158 const struct req_entry *b)
159{
160 return a->path->req_id - b->path->req_id;
161}
162RB_GENERATE(req_entry_head, req_entry, entry, req_entry_compare)
163
164
165/* ------------ API Functions ------------ */
166
167struct pcc_state *pcep_pcc_initialize(struct ctrl_state *ctrl_state, int index)
168{
169 struct pcc_state *pcc_state = XCALLOC(MTYPE_PCEP, sizeof(*pcc_state));
170
171 pcc_state->id = index;
172 pcc_state->status = PCEP_PCC_DISCONNECTED;
173 pcc_state->next_reqid = 1;
174 pcc_state->next_plspid = 1;
175
176 RB_INIT(req_entry_head, &pcc_state->requests);
177
178 update_tag(pcc_state);
179 update_originator(pcc_state);
180
181 PCEP_DEBUG("%s PCC initialized", pcc_state->tag);
182
183 return pcc_state;
184}
185
186void pcep_pcc_finalize(struct ctrl_state *ctrl_state,
187 struct pcc_state *pcc_state)
188{
189 PCEP_DEBUG("%s PCC finalizing...", pcc_state->tag);
190
191 pcep_pcc_disable(ctrl_state, pcc_state);
192
193 if (pcc_state->pcc_opts != NULL) {
194 XFREE(MTYPE_PCEP, pcc_state->pcc_opts);
195 pcc_state->pcc_opts = NULL;
196 }
197 if (pcc_state->pce_opts != NULL) {
198 XFREE(MTYPE_PCEP, pcc_state->pce_opts);
199 pcc_state->pce_opts = NULL;
200 }
201 if (pcc_state->originator != NULL) {
202 XFREE(MTYPE_PCEP, pcc_state->originator);
203 pcc_state->originator = NULL;
204 }
205
206 if (pcc_state->t_reconnect != NULL) {
207 thread_cancel(&pcc_state->t_reconnect);
208 pcc_state->t_reconnect = NULL;
209 }
210
211 if (pcc_state->t_update_best != NULL) {
212 thread_cancel(&pcc_state->t_update_best);
213 pcc_state->t_update_best = NULL;
214 }
215
216 if (pcc_state->t_session_timeout != NULL) {
217 thread_cancel(&pcc_state->t_session_timeout);
218 pcc_state->t_session_timeout = NULL;
219 }
220
221 XFREE(MTYPE_PCEP, pcc_state);
222}
223
224int compare_pcc_opts(struct pcc_opts *lhs, struct pcc_opts *rhs)
225{
226 int retval;
227
228 if (lhs == NULL) {
229 return 1;
230 }
231
232 if (rhs == NULL) {
233 return -1;
234 }
235
236 retval = lhs->port - rhs->port;
237 if (retval != 0) {
238 return retval;
239 }
240
241 retval = lhs->msd - rhs->msd;
242 if (retval != 0) {
243 return retval;
244 }
245
246 if (IS_IPADDR_V4(&lhs->addr)) {
247 retval = memcmp(&lhs->addr.ipaddr_v4, &rhs->addr.ipaddr_v4,
248 sizeof(lhs->addr.ipaddr_v4));
249 if (retval != 0) {
250 return retval;
251 }
252 } else if (IS_IPADDR_V6(&lhs->addr)) {
253 retval = memcmp(&lhs->addr.ipaddr_v6, &rhs->addr.ipaddr_v6,
254 sizeof(lhs->addr.ipaddr_v6));
255 if (retval != 0) {
256 return retval;
257 }
258 }
259
260 return 0;
261}
262
263int compare_pce_opts(struct pce_opts *lhs, struct pce_opts *rhs)
264{
265 if (lhs == NULL) {
266 return 1;
267 }
268
269 if (rhs == NULL) {
270 return -1;
271 }
272
273 int retval = lhs->port - rhs->port;
274 if (retval != 0) {
275 return retval;
276 }
277
278 retval = strcmp(lhs->pce_name, rhs->pce_name);
279 if (retval != 0) {
280 return retval;
281 }
282
283 retval = lhs->precedence - rhs->precedence;
284 if (retval != 0) {
285 return retval;
286 }
287
288 retval = memcmp(&lhs->addr, &rhs->addr, sizeof(lhs->addr));
289 if (retval != 0) {
290 return retval;
291 }
292
293 return 0;
294}
295
296int pcep_pcc_update(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state,
297 struct pcc_opts *pcc_opts, struct pce_opts *pce_opts)
298{
299 int ret = 0;
300
301 // If the options did not change, then there is nothing to do
302 if ((compare_pce_opts(pce_opts, pcc_state->pce_opts) == 0)
303 && (compare_pcc_opts(pcc_opts, pcc_state->pcc_opts) == 0)) {
304 return ret;
305 }
306
307 if ((ret = pcep_pcc_disable(ctrl_state, pcc_state))) {
308 XFREE(MTYPE_PCEP, pcc_opts);
309 XFREE(MTYPE_PCEP, pce_opts);
310 return ret;
311 }
312
313 if (pcc_state->pcc_opts != NULL) {
314 XFREE(MTYPE_PCEP, pcc_state->pcc_opts);
315 }
316 if (pcc_state->pce_opts != NULL) {
317 XFREE(MTYPE_PCEP, pcc_state->pce_opts);
318 }
319
320 pcc_state->pcc_opts = pcc_opts;
321 pcc_state->pce_opts = pce_opts;
322
323 if (IS_IPADDR_V4(&pcc_opts->addr)) {
324 pcc_state->pcc_addr_v4 = pcc_opts->addr.ipaddr_v4;
325 SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4);
326 } else {
327 UNSET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4);
328 }
329
330 if (IS_IPADDR_V6(&pcc_opts->addr)) {
331 memcpy(&pcc_state->pcc_addr_v6, &pcc_opts->addr.ipaddr_v6,
332 sizeof(struct in6_addr));
333 SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6);
334 } else {
335 UNSET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6);
336 }
337
338 update_tag(pcc_state);
339 update_originator(pcc_state);
340
341 return pcep_pcc_enable(ctrl_state, pcc_state);
342}
343
344void pcep_pcc_reconnect(struct ctrl_state *ctrl_state,
345 struct pcc_state *pcc_state)
346{
347 if (pcc_state->status == PCEP_PCC_DISCONNECTED)
348 pcep_pcc_enable(ctrl_state, pcc_state);
349}
350
351int pcep_pcc_enable(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state)
352{
efba0985
SM
353 assert(pcc_state->status == PCEP_PCC_DISCONNECTED);
354 assert(pcc_state->sess == NULL);
355
356 if (pcc_state->t_reconnect != NULL) {
357 thread_cancel(&pcc_state->t_reconnect);
358 pcc_state->t_reconnect = NULL;
359 }
360
361 select_transport_address(pcc_state);
362
363 /* Even though we are connecting using IPv6. we want to have an IPv4
364 * address so we can handle candidate path with IPv4 endpoints */
365 if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)) {
366 if (pcc_state->retry_count < OTHER_FAMILY_MAX_RETRIES) {
367 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
d85bf6f1 368 "skipping connection to PCE %pIA:%d due to missing PCC IPv4 address",
84b3eb42 369 &pcc_state->pce_opts->addr,
efba0985
SM
370 pcc_state->pce_opts->port);
371 schedule_reconnect(ctrl_state, pcc_state);
372 return 0;
373 } else {
374 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
d85bf6f1 375 "missing IPv4 PCC address, IPv4 candidate paths will be ignored");
efba0985
SM
376 }
377 }
378
379 /* Even though we are connecting using IPv4. we want to have an IPv6
380 * address so we can handle candidate path with IPv6 endpoints */
381 if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)) {
382 if (pcc_state->retry_count < OTHER_FAMILY_MAX_RETRIES) {
383 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
d85bf6f1 384 "skipping connection to PCE %pIA:%d due to missing PCC IPv6 address",
84b3eb42 385 &pcc_state->pce_opts->addr,
efba0985
SM
386 pcc_state->pce_opts->port);
387 schedule_reconnect(ctrl_state, pcc_state);
388 return 0;
389 } else {
390 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
d85bf6f1 391 "missing IPv6 PCC address, IPv6 candidate paths will be ignored");
efba0985
SM
392 }
393 }
394
395 /* Even if the maximum retries to try to have all the familly addresses
396 * have been spent, we still need the one for the transport familly */
397 if (pcc_state->pcc_addr_tr.ipa_type == IPADDR_NONE) {
398 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
d85bf6f1 399 "skipping connection to PCE %pIA:%d due to missing PCC address",
84b3eb42 400 &pcc_state->pce_opts->addr,
efba0985
SM
401 pcc_state->pce_opts->port);
402 schedule_reconnect(ctrl_state, pcc_state);
403 return 0;
404 }
405
406 PCEP_DEBUG("%s PCC connecting", pcc_state->tag);
407 pcc_state->sess = pcep_lib_connect(
408 &pcc_state->pcc_addr_tr, pcc_state->pcc_opts->port,
409 &pcc_state->pce_opts->addr, pcc_state->pce_opts->port,
410 pcc_state->pcc_opts->msd, &pcc_state->pce_opts->config_opts);
411
412 if (pcc_state->sess == NULL) {
413 flog_warn(EC_PATH_PCEP_LIB_CONNECT,
84b3eb42
DL
414 "failed to connect to PCE %pIA:%d from %pIA:%d",
415 &pcc_state->pce_opts->addr,
efba0985 416 pcc_state->pce_opts->port,
84b3eb42 417 &pcc_state->pcc_addr_tr,
efba0985
SM
418 pcc_state->pcc_opts->port);
419 schedule_reconnect(ctrl_state, pcc_state);
420 return 0;
421 }
422
423 // In case some best pce alternative were waiting to activate
424 if (pcc_state->t_update_best != NULL) {
425 thread_cancel(&pcc_state->t_update_best);
426 pcc_state->t_update_best = NULL;
427 }
428
429 pcc_state->status = PCEP_PCC_CONNECTING;
430
431 return 0;
432}
433
434int pcep_pcc_disable(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state)
435{
436 switch (pcc_state->status) {
437 case PCEP_PCC_DISCONNECTED:
438 return 0;
439 case PCEP_PCC_CONNECTING:
440 case PCEP_PCC_SYNCHRONIZING:
441 case PCEP_PCC_OPERATING:
442 PCEP_DEBUG("%s Disconnecting PCC...", pcc_state->tag);
443 cancel_comp_requests(ctrl_state, pcc_state);
444 pcep_lib_disconnect(pcc_state->sess);
445 /* No need to remove if any PCEs is connected */
446 if (get_pce_count_connected(ctrl_state->pcc) == 0) {
447 pcep_thread_remove_candidate_path_segments(ctrl_state,
448 pcc_state);
449 }
450 pcc_state->sess = NULL;
451 pcc_state->status = PCEP_PCC_DISCONNECTED;
452 return 0;
453 default:
454 return 1;
455 }
456}
457
458void pcep_pcc_sync_path(struct ctrl_state *ctrl_state,
459 struct pcc_state *pcc_state, struct path *path)
460{
461 if (pcc_state->status == PCEP_PCC_SYNCHRONIZING) {
462 path->is_synching = true;
463 } else if (pcc_state->status == PCEP_PCC_OPERATING)
464 path->is_synching = false;
465 else
466 return;
467
468 path->go_active = true;
469
470 /* Accumulate the dynamic paths without any LSP so computation
471 * requests can be performed after synchronization */
472 if ((path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)
473 && (path->first_hop == NULL)
474 && !has_pending_req_for(pcc_state, path)) {
475 PCEP_DEBUG("%s Scheduling computation request for path %s",
476 pcc_state->tag, path->name);
477 push_new_req(pcc_state, path);
478 return;
479 }
480
481 /* Synchronize the path if the PCE supports LSP updates and the
482 * endpoint address familly is supported */
483 if (pcc_state->caps.is_stateful) {
484 if (filter_path(pcc_state, path)) {
485 PCEP_DEBUG("%s Synchronizing path %s", pcc_state->tag,
486 path->name);
487 send_report(pcc_state, path);
488 } else {
489 PCEP_DEBUG(
d85bf6f1 490 "%s Skipping %s candidate path %s synchronization",
efba0985
SM
491 pcc_state->tag,
492 ipaddr_type_name(&path->nbkey.endpoint),
493 path->name);
494 }
495 }
496}
497
498void pcep_pcc_sync_done(struct ctrl_state *ctrl_state,
499 struct pcc_state *pcc_state)
500{
501 struct req_entry *req;
502
503 if (pcc_state->status != PCEP_PCC_SYNCHRONIZING
504 && pcc_state->status != PCEP_PCC_OPERATING)
505 return;
506
507 if (pcc_state->caps.is_stateful
508 && pcc_state->status == PCEP_PCC_SYNCHRONIZING) {
509 struct path *path = pcep_new_path();
510 *path = (struct path){.name = NULL,
511 .srp_id = 0,
512 .plsp_id = 0,
513 .status = PCEP_LSP_OPERATIONAL_DOWN,
514 .do_remove = false,
515 .go_active = false,
516 .was_created = false,
517 .was_removed = false,
518 .is_synching = false,
519 .is_delegated = false,
520 .first_hop = NULL,
521 .first_metric = NULL};
522 send_report(pcc_state, path);
523 pcep_free_path(path);
524 }
525
526 pcc_state->synchronized = true;
527 pcc_state->status = PCEP_PCC_OPERATING;
528
529 PCEP_DEBUG("%s Synchronization done", pcc_state->tag);
530
531 /* Start the computation request accumulated during synchronization */
532 RB_FOREACH (req, req_entry_head, &pcc_state->requests) {
533 send_comp_request(ctrl_state, pcc_state, req);
534 }
535}
536
537void pcep_pcc_send_report(struct ctrl_state *ctrl_state,
74971473
JG
538 struct pcc_state *pcc_state, struct path *path,
539 bool is_stable)
efba0985 540{
74971473
JG
541 if ((pcc_state->status != PCEP_PCC_OPERATING)
542 || (!pcc_state->caps.is_stateful)) {
543 pcep_free_path(path);
efba0985 544 return;
74971473 545 }
efba0985 546
56634922
JG
547 PCEP_DEBUG("(%s)%s Send report for candidate path %s", __func__,
548 pcc_state->tag, path->name);
74971473
JG
549
550 /* ODL and Cisco requires the first reported
551 * LSP to have a DOWN status, the later status changes
552 * will be comunicated through hook calls.
553 */
554 enum pcep_lsp_operational_status real_status = path->status;
555 path->status = PCEP_LSP_OPERATIONAL_DOWN;
556 send_report(pcc_state, path);
557
558 /* If no update is expected and the real status wasn't down, we need to
559 * send a second report with the real status */
560 if (is_stable && (real_status != PCEP_LSP_OPERATIONAL_DOWN)) {
56634922
JG
561 PCEP_DEBUG("(%s)%s Send report for candidate path (!DOWN) %s",
562 __func__, pcc_state->tag, path->name);
74971473 563 path->status = real_status;
efba0985
SM
564 send_report(pcc_state, path);
565 }
74971473
JG
566
567 pcep_free_path(path);
efba0985
SM
568}
569
74971473 570
56634922
JG
571void pcep_pcc_send_error(struct ctrl_state *ctrl_state,
572 struct pcc_state *pcc_state, struct pcep_error *error,
573 bool sub_type)
574{
575
576 PCEP_DEBUG("(%s) Send error after PcInitiated ", __func__);
577
578
579 send_pcep_error(pcc_state, error->error_type, error->error_value,
580 error->path);
581 pcep_free_path(error->path);
582 XFREE(MTYPE_PCEP, error);
583}
efba0985
SM
584/* ------------ Timeout handler ------------ */
585
586void pcep_pcc_timeout_handler(struct ctrl_state *ctrl_state,
587 struct pcc_state *pcc_state,
74971473 588 enum pcep_ctrl_timeout_type type, void *param)
efba0985
SM
589{
590 struct req_entry *req;
591
592 switch (type) {
593 case TO_COMPUTATION_REQUEST:
594 assert(param != NULL);
595 req = (struct req_entry *)param;
596 pop_req(pcc_state, req->path->req_id);
597 flog_warn(EC_PATH_PCEP_COMPUTATION_REQUEST_TIMEOUT,
598 "Computation request %d timeout", req->path->req_id);
599 cancel_comp_request(ctrl_state, pcc_state, req);
600 if (req->retry_count++ < MAX_COMPREQ_TRIES) {
601 repush_req(pcc_state, req);
602 send_comp_request(ctrl_state, pcc_state, req);
603 return;
604 }
605 if (pcc_state->caps.is_stateful) {
606 struct path *path;
607 PCEP_DEBUG(
608 "%s Delegating undefined dynamic path %s to PCE %s",
609 pcc_state->tag, req->path->name,
610 pcc_state->originator);
611 path = pcep_copy_path(req->path);
612 path->is_delegated = true;
613 send_report(pcc_state, path);
614 free_req_entry(req);
615 }
616 break;
617 default:
618 break;
619 }
620}
621
622
623/* ------------ Pathd event handler ------------ */
624
625void pcep_pcc_pathd_event_handler(struct ctrl_state *ctrl_state,
626 struct pcc_state *pcc_state,
627 enum pcep_pathd_event_type type,
628 struct path *path)
629{
630 struct req_entry *req;
631
632 if (pcc_state->status != PCEP_PCC_OPERATING)
633 return;
634
635 /* Skipping candidate path with endpoint that do not match the
636 * configured or deduced PCC IP version */
637 if (!filter_path(pcc_state, path)) {
638 PCEP_DEBUG("%s Skipping %s candidate path %s event",
639 pcc_state->tag,
640 ipaddr_type_name(&path->nbkey.endpoint), path->name);
641 return;
642 }
643
644 switch (type) {
645 case PCEP_PATH_CREATED:
646 if (has_pending_req_for(pcc_state, path)) {
647 PCEP_DEBUG(
648 "%s Candidate path %s created, computation request already sent",
649 pcc_state->tag, path->name);
650 return;
651 }
652 PCEP_DEBUG("%s Candidate path %s created", pcc_state->tag,
653 path->name);
654 if ((path->first_hop == NULL)
655 && (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)) {
656 req = push_new_req(pcc_state, path);
657 send_comp_request(ctrl_state, pcc_state, req);
658 } else if (pcc_state->caps.is_stateful)
659 send_report(pcc_state, path);
660 return;
661 case PCEP_PATH_UPDATED:
662 PCEP_DEBUG("%s Candidate path %s updated", pcc_state->tag,
663 path->name);
664 if (pcc_state->caps.is_stateful)
665 send_report(pcc_state, path);
666 return;
667 case PCEP_PATH_REMOVED:
668 PCEP_DEBUG("%s Candidate path %s removed", pcc_state->tag,
669 path->name);
670 path->was_removed = true;
56634922
JG
671 /* Removed as response to a PcInitiated 'R'emove*/
672 /* RFC 8281 #5.4 LSP Deletion*/
673 path->do_remove = path->was_removed;
efba0985
SM
674 if (pcc_state->caps.is_stateful)
675 send_report(pcc_state, path);
676 return;
677 default:
678 flog_warn(EC_PATH_PCEP_RECOVERABLE_INTERNAL_ERROR,
679 "Unexpected pathd event received by pcc %s: %u",
680 pcc_state->tag, type);
681 return;
682 }
683}
684
685
686/* ------------ PCEP event handler ------------ */
687
688void pcep_pcc_pcep_event_handler(struct ctrl_state *ctrl_state,
689 struct pcc_state *pcc_state, pcep_event *event)
690{
691 PCEP_DEBUG("%s Received PCEP event: %s", pcc_state->tag,
692 pcep_event_type_name(event->event_type));
693 switch (event->event_type) {
694 case PCC_CONNECTED_TO_PCE:
695 assert(PCEP_PCC_CONNECTING == pcc_state->status);
696 PCEP_DEBUG("%s Connection established", pcc_state->tag);
697 pcc_state->status = PCEP_PCC_SYNCHRONIZING;
698 pcc_state->retry_count = 0;
699 pcc_state->synchronized = false;
700 PCEP_DEBUG("%s Starting PCE synchronization", pcc_state->tag);
701 cancel_session_timeout(ctrl_state, pcc_state);
702 pcep_pcc_calculate_best_pce(ctrl_state->pcc);
703 pcep_thread_start_sync(ctrl_state, pcc_state->id);
704 break;
705 case PCC_SENT_INVALID_OPEN:
706 PCEP_DEBUG("%s Sent invalid OPEN message", pcc_state->tag);
707 PCEP_DEBUG(
708 "%s Reconciling values: keep alive (%d) dead timer (%d) seconds ",
709 pcc_state->tag,
710 pcc_state->sess->pcc_config
711 .keep_alive_pce_negotiated_timer_seconds,
712 pcc_state->sess->pcc_config
713 .dead_timer_pce_negotiated_seconds);
714 pcc_state->pce_opts->config_opts.keep_alive_seconds =
715 pcc_state->sess->pcc_config
716 .keep_alive_pce_negotiated_timer_seconds;
717 pcc_state->pce_opts->config_opts.dead_timer_seconds =
718 pcc_state->sess->pcc_config
719 .dead_timer_pce_negotiated_seconds;
720 break;
721
722 case PCC_RCVD_INVALID_OPEN:
723 PCEP_DEBUG("%s Received invalid OPEN message", pcc_state->tag);
724 PCEP_DEBUG_PCEP("%s PCEP message: %s", pcc_state->tag,
725 format_pcep_message(event->message));
726 break;
727 case PCE_DEAD_TIMER_EXPIRED:
728 case PCE_CLOSED_SOCKET:
729 case PCE_SENT_PCEP_CLOSE:
730 case PCE_OPEN_KEEP_WAIT_TIMER_EXPIRED:
731 case PCC_PCEP_SESSION_CLOSED:
732 case PCC_RCVD_MAX_INVALID_MSGS:
733 case PCC_RCVD_MAX_UNKOWN_MSGS:
734 pcep_pcc_disable(ctrl_state, pcc_state);
735 schedule_reconnect(ctrl_state, pcc_state);
736 schedule_session_timeout(ctrl_state, pcc_state);
737 break;
738 case MESSAGE_RECEIVED:
739 PCEP_DEBUG_PCEP("%s Received PCEP message: %s", pcc_state->tag,
740 format_pcep_message(event->message));
741 if (pcc_state->status == PCEP_PCC_CONNECTING) {
742 if (event->message->msg_header->type == PCEP_TYPE_OPEN)
743 handle_pcep_open(ctrl_state, pcc_state,
744 event->message);
745 break;
746 }
747 assert(pcc_state->status == PCEP_PCC_SYNCHRONIZING
748 || pcc_state->status == PCEP_PCC_OPERATING);
749 handle_pcep_message(ctrl_state, pcc_state, event->message);
750 break;
751 default:
752 flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEPLIB_EVENT,
753 "Unexpected event from pceplib: %s",
754 format_pcep_event(event));
755 break;
756 }
757}
758
759
760/*------------------ Multi-PCE --------------------- */
761
762/* Internal util function, returns true if sync is necessary, false otherwise */
763bool update_best_pce(struct pcc_state **pcc, int best)
764{
765 PCEP_DEBUG(" recalculating pce precedence ");
766 if (best) {
767 struct pcc_state *best_pcc_state =
768 pcep_pcc_get_pcc_by_id(pcc, best);
769 if (best_pcc_state->previous_best != best_pcc_state->is_best) {
770 PCEP_DEBUG(" %s Resynch best (%i) previous best (%i)",
771 best_pcc_state->tag, best_pcc_state->id,
772 best_pcc_state->previous_best);
773 return true;
774 } else {
775 PCEP_DEBUG(
776 " %s No Resynch best (%i) previous best (%i)",
777 best_pcc_state->tag, best_pcc_state->id,
778 best_pcc_state->previous_best);
779 }
780 } else {
781 PCEP_DEBUG(" No best pce available, all pce seem disconnected");
782 }
783
784 return false;
785}
786
787int get_best_pce(struct pcc_state **pcc)
788{
789 for (int i = 0; i < MAX_PCC; i++) {
790 if (pcc[i] && pcc[i]->pce_opts) {
791 if (pcc[i]->is_best == true) {
792 return pcc[i]->id;
793 }
794 }
795 }
796 return 0;
797}
798
799int get_pce_count_connected(struct pcc_state **pcc)
800{
801 int count = 0;
802 for (int i = 0; i < MAX_PCC; i++) {
803 if (pcc[i] && pcc[i]->pce_opts
804 && pcc[i]->status != PCEP_PCC_DISCONNECTED) {
805 count++;
806 }
807 }
808 return count;
809}
810
811int get_previous_best_pce(struct pcc_state **pcc)
812{
813 int previous_best_pce = -1;
814
815 for (int i = 0; i < MAX_PCC; i++) {
816 if (pcc[i] && pcc[i]->pce_opts && pcc[i]->previous_best == true
817 && pcc[i]->status != PCEP_PCC_DISCONNECTED) {
818 previous_best_pce = i;
819 break;
820 }
821 }
822 return previous_best_pce != -1 ? pcc[previous_best_pce]->id : 0;
823}
824
825/* Called by path_pcep_controller EV_REMOVE_PCC
826 * Event handler when a PCC is removed. */
827int pcep_pcc_multi_pce_remove_pcc(struct ctrl_state *ctrl_state,
828 struct pcc_state **pcc)
829{
830 int new_best_pcc_id = -1;
831 new_best_pcc_id = pcep_pcc_calculate_best_pce(pcc);
832 if (new_best_pcc_id) {
833 if (update_best_pce(ctrl_state->pcc, new_best_pcc_id) == true) {
834 pcep_thread_start_sync(ctrl_state, new_best_pcc_id);
835 }
836 }
837
838 return 0;
839}
840
841/* Called by path_pcep_controller EV_SYNC_PATH
842 * Event handler when a path is sync'd. */
843int pcep_pcc_multi_pce_sync_path(struct ctrl_state *ctrl_state, int pcc_id,
844 struct pcc_state **pcc)
845{
846 int previous_best_pcc_id = -1;
847
848 if (pcc_id == get_best_pce(pcc)) {
849 previous_best_pcc_id = get_previous_best_pce(pcc);
850 if (previous_best_pcc_id != 0) {
851 /* while adding new pce, path has to resync to the
852 * previous best. pcep_thread_start_sync() will be
853 * called by the calling function */
854 if (update_best_pce(ctrl_state->pcc,
855 previous_best_pcc_id)
856 == true) {
857 cancel_comp_requests(
858 ctrl_state,
859 pcep_pcc_get_pcc_by_id(
860 pcc, previous_best_pcc_id));
861 pcep_thread_start_sync(ctrl_state,
862 previous_best_pcc_id);
863 }
864 }
865 }
866
867 return 0;
868}
869
870/* Called by path_pcep_controller when the TM_CALCULATE_BEST_PCE
871 * timer expires */
872int pcep_pcc_timer_update_best_pce(struct ctrl_state *ctrl_state, int pcc_id)
873{
874 int ret = 0;
875 /* resync whatever was the new best */
876 int prev_best = get_best_pce(ctrl_state->pcc);
877 int best_id = pcep_pcc_calculate_best_pce(ctrl_state->pcc);
878 if (best_id && prev_best != best_id) { // Avoid Multiple call
879 struct pcc_state *pcc_state =
880 pcep_pcc_get_pcc_by_id(ctrl_state->pcc, best_id);
881 if (update_best_pce(ctrl_state->pcc, pcc_state->id) == true) {
882 pcep_thread_start_sync(ctrl_state, pcc_state->id);
883 }
884 }
885
886 return ret;
887}
888
889/* Called by path_pcep_controller::pcep_thread_event_update_pce_options()
890 * Returns the best PCE id */
891int pcep_pcc_calculate_best_pce(struct pcc_state **pcc)
892{
893 int best_precedence = 255; // DEFAULT_PCE_PRECEDENCE;
894 int best_pce = -1;
895 int one_connected_pce = -1;
896 int previous_best_pce = -1;
897 int step_0_best = -1;
898 int step_0_previous = -1;
899 int pcc_count = 0;
900
901 // Get state
902 for (int i = 0; i < MAX_PCC; i++) {
903 if (pcc[i] && pcc[i]->pce_opts) {
904 zlog_debug(
905 "multi-pce: calculate all : i (%i) is_best (%i) previous_best (%i) ",
906 i, pcc[i]->is_best, pcc[i]->previous_best);
907 pcc_count++;
908
909 if (pcc[i]->is_best == true) {
910 step_0_best = i;
911 }
912 if (pcc[i]->previous_best == true) {
913 step_0_previous = i;
914 }
915 }
916 }
917
918 if (!pcc_count) {
919 return 0;
920 }
921
922 // Calculate best
923 for (int i = 0; i < MAX_PCC; i++) {
924 if (pcc[i] && pcc[i]->pce_opts
925 && pcc[i]->status != PCEP_PCC_DISCONNECTED) {
926 one_connected_pce = i; // In case none better
927 if (pcc[i]->pce_opts->precedence <= best_precedence) {
928 if (best_pce != -1
929 && pcc[best_pce]->pce_opts->precedence
930 == pcc[i]->pce_opts
931 ->precedence) {
932 if (ipaddr_cmp(
933 &pcc[i]->pce_opts->addr,
934 &pcc[best_pce]
935 ->pce_opts->addr)
936 > 0)
937 // collide of precedences so
938 // compare ip
939 best_pce = i;
940 } else {
941 if (!pcc[i]->previous_best) {
942 best_precedence =
943 pcc[i]->pce_opts
944 ->precedence;
945 best_pce = i;
946 }
947 }
948 }
949 }
950 }
951
952 zlog_debug(
953 "multi-pce: calculate data : sb (%i) sp (%i) oc (%i) b (%i) ",
954 step_0_best, step_0_previous, one_connected_pce, best_pce);
955
956 // Changed of state so ...
957 if (step_0_best != best_pce) {
74971473 958 pthread_mutex_lock(&g_pcc_info_mtx);
efba0985
SM
959 // Calculate previous
960 previous_best_pce = step_0_best;
961 // Clean state
962 if (step_0_best != -1) {
963 pcc[step_0_best]->is_best = false;
964 }
965 if (step_0_previous != -1) {
966 pcc[step_0_previous]->previous_best = false;
967 }
968
969 // Set previous
970 if (previous_best_pce != -1
971 && pcc[previous_best_pce]->status
972 == PCEP_PCC_DISCONNECTED) {
973 pcc[previous_best_pce]->previous_best = true;
974 zlog_debug("multi-pce: previous best pce (%i) ",
975 previous_best_pce + 1);
976 }
977
978
979 // Set best
980 if (best_pce != -1) {
981 pcc[best_pce]->is_best = true;
982 zlog_debug("multi-pce: best pce (%i) ", best_pce + 1);
983 } else {
984 if (one_connected_pce != -1) {
985 best_pce = one_connected_pce;
986 pcc[one_connected_pce]->is_best = true;
987 zlog_debug(
988 "multi-pce: one connected best pce (default) (%i) ",
989 one_connected_pce + 1);
990 } else {
991 for (int i = 0; i < MAX_PCC; i++) {
992 if (pcc[i] && pcc[i]->pce_opts) {
993 best_pce = i;
994 pcc[i]->is_best = true;
995 zlog_debug(
996 "(disconnected) best pce (default) (%i) ",
997 i + 1);
998 break;
999 }
1000 }
1001 }
1002 }
74971473 1003 pthread_mutex_unlock(&g_pcc_info_mtx);
efba0985
SM
1004 }
1005
1006 return ((best_pce == -1) ? 0 : pcc[best_pce]->id);
1007}
1008
1009int pcep_pcc_get_pcc_id_by_ip_port(struct pcc_state **pcc,
1010 struct pce_opts *pce_opts)
1011{
1012 if (pcc == NULL) {
1013 return 0;
1014 }
1015
1016 for (int idx = 0; idx < MAX_PCC; idx++) {
1017 if (pcc[idx]) {
1018 if ((ipaddr_cmp((const struct ipaddr *)&pcc[idx]
1019 ->pce_opts->addr,
1020 (const struct ipaddr *)&pce_opts->addr)
1021 == 0)
1022 && pcc[idx]->pce_opts->port == pce_opts->port) {
1023 zlog_debug("found pcc_id (%d) idx (%d)",
1024 pcc[idx]->id, idx);
1025 return pcc[idx]->id;
1026 }
1027 }
1028 }
1029 return 0;
1030}
1031
1032int pcep_pcc_get_pcc_id_by_idx(struct pcc_state **pcc, int idx)
1033{
1034 if (pcc == NULL || idx < 0) {
1035 return 0;
1036 }
1037
1038 return pcc[idx] ? pcc[idx]->id : 0;
1039}
1040
1041struct pcc_state *pcep_pcc_get_pcc_by_id(struct pcc_state **pcc, int id)
1042{
1043 if (pcc == NULL || id < 0) {
1044 return NULL;
1045 }
1046
1047 for (int i = 0; i < MAX_PCC; i++) {
1048 if (pcc[i]) {
1049 if (pcc[i]->id == id) {
1050 zlog_debug("found id (%d) pcc_idx (%d)",
1051 pcc[i]->id, i);
1052 return pcc[i];
1053 }
1054 }
1055 }
1056
1057 return NULL;
1058}
1059
1060struct pcc_state *pcep_pcc_get_pcc_by_name(struct pcc_state **pcc,
1061 const char *pce_name)
1062{
1063 if (pcc == NULL || pce_name == NULL) {
1064 return NULL;
1065 }
1066
1067 for (int i = 0; i < MAX_PCC; i++) {
1068 if (pcc[i] == NULL) {
1069 continue;
1070 }
1071
1072 if (strcmp(pcc[i]->pce_opts->pce_name, pce_name) == 0) {
1073 return pcc[i];
1074 }
1075 }
1076
1077 return NULL;
1078}
1079
1080int pcep_pcc_get_pcc_idx_by_id(struct pcc_state **pcc, int id)
1081{
1082 if (pcc == NULL) {
1083 return -1;
1084 }
1085
1086 for (int idx = 0; idx < MAX_PCC; idx++) {
1087 if (pcc[idx]) {
1088 if (pcc[idx]->id == id) {
1089 zlog_debug("found pcc_id (%d) array_idx (%d)",
1090 pcc[idx]->id, idx);
1091 return idx;
1092 }
1093 }
1094 }
1095
1096 return -1;
1097}
1098
1099int pcep_pcc_get_free_pcc_idx(struct pcc_state **pcc)
1100{
1101 assert(pcc != NULL);
1102
1103 for (int idx = 0; idx < MAX_PCC; idx++) {
1104 if (pcc[idx] == NULL) {
1105 zlog_debug("new pcc_idx (%d)", idx);
1106 return idx;
1107 }
1108 }
1109
1110 return -1;
1111}
1112
1113int pcep_pcc_get_pcc_id(struct pcc_state *pcc)
1114{
1115 return ((pcc == NULL) ? 0 : pcc->id);
1116}
1117
1118void pcep_pcc_copy_pcc_info(struct pcc_state **pcc,
1119 struct pcep_pcc_info *pcc_info)
1120{
1121 struct pcc_state *pcc_state =
1122 pcep_pcc_get_pcc_by_name(pcc, pcc_info->pce_name);
1123 if (!pcc_state) {
1124 return;
1125 }
1126
1127 pcc_info->ctrl_state = NULL;
74971473
JG
1128 if(pcc_state->pcc_opts){
1129 pcc_info->msd = pcc_state->pcc_opts->msd;
1130 pcc_info->pcc_port = pcc_state->pcc_opts->port;
1131 }
efba0985
SM
1132 pcc_info->next_plspid = pcc_state->next_plspid;
1133 pcc_info->next_reqid = pcc_state->next_reqid;
1134 pcc_info->status = pcc_state->status;
1135 pcc_info->pcc_id = pcc_state->id;
74971473 1136 pthread_mutex_lock(&g_pcc_info_mtx);
efba0985 1137 pcc_info->is_best_multi_pce = pcc_state->is_best;
5a90e1f3 1138 pcc_info->previous_best = pcc_state->previous_best;
74971473 1139 pthread_mutex_unlock(&g_pcc_info_mtx);
efba0985
SM
1140 pcc_info->precedence =
1141 pcc_state->pce_opts ? pcc_state->pce_opts->precedence : 0;
74971473
JG
1142 if(pcc_state->pcc_addr_tr.ipa_type != IPADDR_NONE){
1143 memcpy(&pcc_info->pcc_addr, &pcc_state->pcc_addr_tr,
1144 sizeof(struct ipaddr));
1145 }
efba0985
SM
1146}
1147
1148
1149/*------------------ PCEP Message handlers --------------------- */
1150
1151void handle_pcep_open(struct ctrl_state *ctrl_state,
1152 struct pcc_state *pcc_state, struct pcep_message *msg)
1153{
1154 assert(msg->msg_header->type == PCEP_TYPE_OPEN);
1155 pcep_lib_parse_capabilities(msg, &pcc_state->caps);
1156 PCEP_DEBUG("PCE capabilities: %s, %s%s",
1157 pcc_state->caps.is_stateful ? "stateful" : "stateless",
1158 pcc_state->caps.supported_ofs_are_known
1159 ? (pcc_state->caps.supported_ofs == 0
1160 ? "no objective functions supported"
1161 : "supported objective functions are ")
1162 : "supported objective functions are unknown",
1163 format_objfun_set(pcc_state->caps.supported_ofs));
1164}
1165
1166void handle_pcep_message(struct ctrl_state *ctrl_state,
1167 struct pcc_state *pcc_state, struct pcep_message *msg)
1168{
1169 if (pcc_state->status != PCEP_PCC_OPERATING)
1170 return;
1171
1172 switch (msg->msg_header->type) {
1173 case PCEP_TYPE_INITIATE:
1174 handle_pcep_lsp_initiate(ctrl_state, pcc_state, msg);
1175 break;
1176 case PCEP_TYPE_UPDATE:
1177 handle_pcep_lsp_update(ctrl_state, pcc_state, msg);
1178 break;
1179 case PCEP_TYPE_PCREP:
1180 handle_pcep_comp_reply(ctrl_state, pcc_state, msg);
1181 break;
1182 default:
1183 flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEP_MESSAGE,
1184 "Unexpected pcep message from pceplib: %s",
1185 format_pcep_message(msg));
1186 break;
1187 }
1188}
1189
1190void handle_pcep_lsp_update(struct ctrl_state *ctrl_state,
1191 struct pcc_state *pcc_state,
1192 struct pcep_message *msg)
1193{
efba0985
SM
1194 struct path *path;
1195 path = pcep_lib_parse_path(msg);
1196 lookup_nbkey(pcc_state, path);
74971473
JG
1197 pcep_thread_refine_path(ctrl_state, pcc_state->id,
1198 &continue_pcep_lsp_update, path, NULL);
1199}
1200
1201void continue_pcep_lsp_update(struct ctrl_state *ctrl_state,
1202 struct pcc_state *pcc_state, struct path *path,
1203 void *payload)
1204{
1205 char err[MAX_ERROR_MSG_SIZE] = {0};
1206
efba0985
SM
1207 specialize_incoming_path(pcc_state, path);
1208 PCEP_DEBUG("%s Received LSP update", pcc_state->tag);
1209 PCEP_DEBUG_PATH("%s", format_path(path));
1210
1211 if (validate_incoming_path(pcc_state, path, err, sizeof(err)))
1212 pcep_thread_update_path(ctrl_state, pcc_state->id, path);
1213 else {
1214 /* FIXME: Monitor the amount of errors from the PCE and
1215 * possibly disconnect and blacklist */
1216 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1217 "Unsupported PCEP protocol feature: %s", err);
1218 pcep_free_path(path);
1219 }
1220}
1221
1222void handle_pcep_lsp_initiate(struct ctrl_state *ctrl_state,
1223 struct pcc_state *pcc_state,
1224 struct pcep_message *msg)
1225{
56634922
JG
1226 char err[MAX_ERROR_MSG_SIZE] = "";
1227 struct path *path;
1228
1229 path = pcep_lib_parse_path(msg);
1230
1231 if (!pcc_state->pce_opts->config_opts.pce_initiated) {
1232 /* PCE Initiated is not enabled */
1233 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1234 "Not allowed PCE initiated path received: %s",
1235 format_pcep_message(msg));
1236 send_pcep_error(pcc_state, PCEP_ERRT_LSP_INSTANTIATE_ERROR,
1237 PCEP_ERRV_UNACCEPTABLE_INSTANTIATE_ERROR, path);
1238 return;
1239 }
1240
1241 if (path->do_remove) {
1242 // lookup in nbkey sequential as no endpoint
1243 struct nbkey_map_data *key;
1244 char endpoint[46];
1245
1246 frr_each (nbkey_map, &pcc_state->nbkey_map, key) {
1247 ipaddr2str(&key->nbkey.endpoint, endpoint,
1248 sizeof(endpoint));
1249 flog_warn(
1250 EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1251 "FOR_EACH nbkey [color (%d) endpoint (%s)] path [plsp_id (%d)] ",
1252 key->nbkey.color, endpoint, path->plsp_id);
1253 if (path->plsp_id == key->plspid) {
1254 flog_warn(
1255 EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1256 "FOR_EACH MATCH nbkey [color (%d) endpoint (%s)] path [plsp_id (%d)] ",
1257 key->nbkey.color, endpoint,
1258 path->plsp_id);
1259 path->nbkey = key->nbkey;
1260 break;
1261 }
1262 }
1263 } else {
1264 if (path->first_hop == NULL /*ero sets first_hop*/) {
1265 /* If the PCC receives a PCInitiate message without an
1266 * ERO and the R flag in the SRP object != zero, then it
1267 * MUST send a PCErr message with Error-type=6
1268 * (Mandatory Object missing) and Error-value=9 (ERO
1269 * object missing). */
1270 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1271 "ERO object missing or incomplete : %s",
1272 format_pcep_message(msg));
1273 send_pcep_error(pcc_state,
1274 PCEP_ERRT_LSP_INSTANTIATE_ERROR,
1275 PCEP_ERRV_INTERNAL_ERROR, path);
1276 return;
1277 }
1278
1279 if (path->plsp_id != 0) {
1280 /* If the PCC receives a PCInitiate message with a
1281 * non-zero PLSP-ID and the R flag in the SRP object set
1282 * to zero, then it MUST send a PCErr message with
1283 * Error-type=19 (Invalid Operation) and Error-value=8
1284 * (Non-zero PLSP-ID in the LSP Initiate Request) */
1285 flog_warn(
1286 EC_PATH_PCEP_PROTOCOL_ERROR,
1287 "PCE initiated path with non-zero PLSP ID: %s",
1288 format_pcep_message(msg));
1289 send_pcep_error(pcc_state, PCEP_ERRT_INVALID_OPERATION,
1290 PCEP_ERRV_LSP_INIT_NON_ZERO_PLSP_ID,
1291 path);
1292 return;
1293 }
1294
1295 if (path->name == NULL) {
1296 /* If the PCC receives a PCInitiate message without a
1297 * SYMBOLIC-PATH-NAME TLV, then it MUST send a PCErr
1298 * message with Error-type=10 (Reception of an invalid
1299 * object) and Error-value=8 (SYMBOLIC-PATH-NAME TLV
1300 * missing) */
1301 flog_warn(
1302 EC_PATH_PCEP_PROTOCOL_ERROR,
1303 "PCE initiated path without symbolic name: %s",
1304 format_pcep_message(msg));
1305 send_pcep_error(
1306 pcc_state, PCEP_ERRT_RECEPTION_OF_INV_OBJECT,
1307 PCEP_ERRV_SYMBOLIC_PATH_NAME_TLV_MISSING, path);
1308 return;
1309 }
1310 }
1311
1312 /* TODO: If there is a conflict with the symbolic path name of an
1313 * existing LSP, the PCC MUST send a PCErr message with Error-type=23
1314 * (Bad Parameter value) and Error-value=1 (SYMBOLIC-PATH-NAME in
1315 * use) */
1316
1317 specialize_incoming_path(pcc_state, path);
1318 /* TODO: Validate the PCC address received from the PCE is valid */
1319 PCEP_DEBUG("%s Received LSP initiate", pcc_state->tag);
1320 PCEP_DEBUG_PATH("%s", format_path(path));
1321
1322 if (validate_incoming_path(pcc_state, path, err, sizeof(err))) {
1323 pcep_thread_initiate_path(ctrl_state, pcc_state->id, path);
1324 } else {
1325 /* FIXME: Monitor the amount of errors from the PCE and
1326 * possibly disconnect and blacklist */
1327 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1328 "Unsupported PCEP protocol feature: %s", err);
56634922
JG
1329 send_pcep_error(pcc_state, PCEP_ERRT_INVALID_OPERATION,
1330 PCEP_ERRV_LSP_NOT_PCE_INITIATED, path);
241b791e 1331 pcep_free_path(path);
56634922 1332 }
efba0985
SM
1333}
1334
1335void handle_pcep_comp_reply(struct ctrl_state *ctrl_state,
1336 struct pcc_state *pcc_state,
1337 struct pcep_message *msg)
1338{
1339 char err[MAX_ERROR_MSG_SIZE] = "";
1340 struct req_entry *req;
1341 struct path *path;
1342
1343 path = pcep_lib_parse_path(msg);
0a1bf4be
JG
1344 if (path->no_path) {
1345 req = pop_req_no_reqid(pcc_state, path->req_id);
1346 } else {
1347 req = pop_req(pcc_state, path->req_id);
1348 }
efba0985
SM
1349 if (req == NULL) {
1350 /* TODO: check the rate of bad computation reply and close
1351 * the connection if more that a given rate.
1352 */
1353 PCEP_DEBUG(
d85bf6f1 1354 "%s Received computation reply for unknown request %d",
efba0985
SM
1355 pcc_state->tag, path->req_id);
1356 PCEP_DEBUG_PATH("%s", format_path(path));
1357 send_pcep_error(pcc_state, PCEP_ERRT_UNKNOWN_REQ_REF,
56634922 1358 PCEP_ERRV_UNASSIGNED, NULL);
efba0985
SM
1359 return;
1360 }
1361
1362 /* Cancel the computation request timeout */
1363 pcep_thread_cancel_timer(&req->t_retry);
1364
1365 /* Transfer relevent metadata from the request to the response */
1366 path->nbkey = req->path->nbkey;
1367 path->plsp_id = req->path->plsp_id;
1368 path->type = req->path->type;
1369 path->name = XSTRDUP(MTYPE_PCEP, req->path->name);
1370 specialize_incoming_path(pcc_state, path);
1371
1372 PCEP_DEBUG("%s Received computation reply %d (no-path: %s)",
1373 pcc_state->tag, path->req_id,
1374 path->no_path ? "true" : "false");
1375 PCEP_DEBUG_PATH("%s", format_path(path));
1376
1377 if (path->no_path) {
1378 PCEP_DEBUG("%s Computation for path %s did not find any result",
1379 pcc_state->tag, path->name);
0a1bf4be
JG
1380 free_req_entry(req);
1381 pcep_free_path(path);
1382 return;
efba0985
SM
1383 } else if (validate_incoming_path(pcc_state, path, err, sizeof(err))) {
1384 /* Updating a dynamic path will automatically delegate it */
1385 pcep_thread_update_path(ctrl_state, pcc_state->id, path);
1386 free_req_entry(req);
1387 return;
1388 } else {
1389 /* FIXME: Monitor the amount of errors from the PCE and
1390 * possibly disconnect and blacklist */
1391 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1392 "Unsupported PCEP protocol feature: %s", err);
1393 }
1394
1395 pcep_free_path(path);
1396
1397 /* Delegate the path regardless of the outcome */
1398 /* TODO: For now we are using the path from the request, when
1399 * pathd API is thread safe, we could get a new path */
1400 if (pcc_state->caps.is_stateful) {
1401 PCEP_DEBUG("%s Delegating undefined dynamic path %s to PCE %s",
ac57e2c9
JG
1402 pcc_state->tag, req->path->name,
1403 pcc_state->originator);
efba0985
SM
1404 path = pcep_copy_path(req->path);
1405 path->is_delegated = true;
1406 send_report(pcc_state, path);
1407 pcep_free_path(path);
1408 }
1409
1410 free_req_entry(req);
1411}
1412
1413
1414/* ------------ Internal Functions ------------ */
1415
1416const char *ipaddr_type_name(struct ipaddr *addr)
1417{
1418 if (IS_IPADDR_V4(addr))
1419 return "IPv4";
1420 if (IS_IPADDR_V6(addr))
1421 return "IPv6";
1422 return "undefined";
1423}
1424
1425bool filter_path(struct pcc_state *pcc_state, struct path *path)
1426{
1427 return (IS_IPADDR_V4(&path->nbkey.endpoint)
1428 && CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4))
1429 || (IS_IPADDR_V6(&path->nbkey.endpoint)
1430 && CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6));
1431}
1432
1433void select_pcc_addresses(struct pcc_state *pcc_state)
1434{
1435 /* If no IPv4 address was specified, try to get one from zebra */
1436 if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)) {
1437 if (get_ipv4_router_id(&pcc_state->pcc_addr_v4)) {
1438 SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4);
1439 }
1440 }
1441
1442 /* If no IPv6 address was specified, try to get one from zebra */
1443 if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)) {
1444 if (get_ipv6_router_id(&pcc_state->pcc_addr_v6)) {
1445 SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6);
1446 }
1447 }
1448}
1449
1450void select_transport_address(struct pcc_state *pcc_state)
1451{
1452 struct ipaddr *taddr = &pcc_state->pcc_addr_tr;
1453
1454 select_pcc_addresses(pcc_state);
1455
1456 taddr->ipa_type = IPADDR_NONE;
1457
1458 /* Select a transport source address in function of the configured PCE
1459 * address */
1460 if (IS_IPADDR_V4(&pcc_state->pce_opts->addr)) {
1461 if (CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)) {
efba0985 1462 taddr->ipaddr_v4 = pcc_state->pcc_addr_v4;
74971473 1463 taddr->ipa_type = IPADDR_V4;
efba0985
SM
1464 }
1465 } else {
1466 if (CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)) {
efba0985 1467 taddr->ipaddr_v6 = pcc_state->pcc_addr_v6;
74971473 1468 taddr->ipa_type = IPADDR_V6;
efba0985
SM
1469 }
1470 }
1471}
1472
1473void update_tag(struct pcc_state *pcc_state)
1474{
1475 if (pcc_state->pce_opts != NULL) {
1476 assert(!IS_IPADDR_NONE(&pcc_state->pce_opts->addr));
1477 if (IS_IPADDR_V6(&pcc_state->pce_opts->addr)) {
1478 snprintfrr(pcc_state->tag, sizeof(pcc_state->tag),
1479 "%pI6:%i (%u)",
1480 &pcc_state->pce_opts->addr.ipaddr_v6,
1481 pcc_state->pce_opts->port, pcc_state->id);
1482 } else {
1483 snprintfrr(pcc_state->tag, sizeof(pcc_state->tag),
1484 "%pI4:%i (%u)",
1485 &pcc_state->pce_opts->addr.ipaddr_v4,
1486 pcc_state->pce_opts->port, pcc_state->id);
1487 }
1488 } else {
1489 snprintfrr(pcc_state->tag, sizeof(pcc_state->tag), "(%u)",
1490 pcc_state->id);
1491 }
1492}
1493
1494void update_originator(struct pcc_state *pcc_state)
1495{
1496 char *originator;
1497 if (pcc_state->originator != NULL) {
1498 XFREE(MTYPE_PCEP, pcc_state->originator);
1499 pcc_state->originator = NULL;
1500 }
1501 if (pcc_state->pce_opts == NULL)
1502 return;
1503 originator = XCALLOC(MTYPE_PCEP, 52);
1504 assert(!IS_IPADDR_NONE(&pcc_state->pce_opts->addr));
1505 if (IS_IPADDR_V6(&pcc_state->pce_opts->addr)) {
1506 snprintfrr(originator, 52, "%pI6:%i",
1507 &pcc_state->pce_opts->addr.ipaddr_v6,
1508 pcc_state->pce_opts->port);
1509 } else {
1510 snprintfrr(originator, 52, "%pI4:%i",
1511 &pcc_state->pce_opts->addr.ipaddr_v4,
1512 pcc_state->pce_opts->port);
1513 }
1514 pcc_state->originator = originator;
1515}
1516
1517void schedule_reconnect(struct ctrl_state *ctrl_state,
1518 struct pcc_state *pcc_state)
1519{
1520 pcc_state->retry_count++;
1521 pcep_thread_schedule_reconnect(ctrl_state, pcc_state->id,
1522 pcc_state->retry_count,
1523 &pcc_state->t_reconnect);
1524 if (pcc_state->retry_count == 1) {
1525 pcep_thread_schedule_sync_best_pce(
1526 ctrl_state, pcc_state->id,
1527 pcc_state->pce_opts->config_opts
1528 .delegation_timeout_seconds,
1529 &pcc_state->t_update_best);
1530 }
1531}
1532
1533void schedule_session_timeout(struct ctrl_state *ctrl_state,
1534 struct pcc_state *pcc_state)
1535{
1536 /* No need to schedule timeout if multiple PCEs are connected */
1537 if (get_pce_count_connected(ctrl_state->pcc)) {
1538 PCEP_DEBUG_PCEP(
1539 "schedule_session_timeout not setting timer for multi-pce mode");
1540
1541 return;
1542 }
1543
1544 pcep_thread_schedule_session_timeout(
1545 ctrl_state, pcep_pcc_get_pcc_id(pcc_state),
1546 pcc_state->pce_opts->config_opts
1547 .session_timeout_inteval_seconds,
1548 &pcc_state->t_session_timeout);
1549}
1550
1551void cancel_session_timeout(struct ctrl_state *ctrl_state,
1552 struct pcc_state *pcc_state)
1553{
1554 /* No need to schedule timeout if multiple PCEs are connected */
1555 if (pcc_state->t_session_timeout == NULL) {
1556 PCEP_DEBUG_PCEP("cancel_session_timeout timer thread NULL");
1557 return;
1558 }
1559
1560 PCEP_DEBUG_PCEP("Cancel session_timeout timer");
1561 pcep_thread_cancel_timer(&pcc_state->t_session_timeout);
1562 pcc_state->t_session_timeout = NULL;
1563}
1564
1565void send_pcep_message(struct pcc_state *pcc_state, struct pcep_message *msg)
1566{
1567 if (pcc_state->sess != NULL) {
1568 PCEP_DEBUG_PCEP("%s Sending PCEP message: %s", pcc_state->tag,
1569 format_pcep_message(msg));
1570 send_message(pcc_state->sess, msg, true);
1571 }
1572}
1573
1574void send_pcep_error(struct pcc_state *pcc_state,
1575 enum pcep_error_type error_type,
56634922
JG
1576 enum pcep_error_value error_value,
1577 struct path *trigger_path)
efba0985
SM
1578{
1579 struct pcep_message *msg;
1580 PCEP_DEBUG("%s Sending PCEP error type %s (%d) value %s (%d)",
1581 pcc_state->tag, pcep_error_type_name(error_type), error_type,
1582 pcep_error_value_name(error_type, error_value), error_value);
56634922 1583 msg = pcep_lib_format_error(error_type, error_value, trigger_path);
efba0985
SM
1584 send_pcep_message(pcc_state, msg);
1585}
1586
1587void send_report(struct pcc_state *pcc_state, struct path *path)
1588{
1589 struct pcep_message *report;
1590
1591 path->req_id = 0;
1592 specialize_outgoing_path(pcc_state, path);
1593 PCEP_DEBUG_PATH("%s Sending path %s: %s", pcc_state->tag, path->name,
1594 format_path(path));
1595 report = pcep_lib_format_report(&pcc_state->caps, path);
1596 send_pcep_message(pcc_state, report);
1597}
1598
1599/* Updates the path for the PCE, updating the delegation and creation flags */
1600void specialize_outgoing_path(struct pcc_state *pcc_state, struct path *path)
1601{
1602 bool is_delegated = false;
1603 bool was_created = false;
1604
1605 lookup_plspid(pcc_state, path);
1606
1607 set_pcc_address(pcc_state, &path->nbkey, &path->pcc_addr);
1608 path->sender = pcc_state->pcc_addr_tr;
1609
1610 /* TODO: When the pathd API have a way to mark a path as
1611 * delegated, use it instead of considering all dynamic path
1612 * delegated. We need to disable the originator check for now,
1613 * because path could be delegated without having any originator yet */
1614 // if ((path->originator == NULL)
1615 // || (strcmp(path->originator, pcc_state->originator) == 0)) {
1616 // is_delegated = (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)
1617 // && (path->first_hop != NULL);
1618 // /* it seems the PCE consider updating an LSP a creation ?!?
1619 // at least Cisco does... */
1620 // was_created = path->update_origin == SRTE_ORIGIN_PCEP;
1621 // }
1622 is_delegated = (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC);
1623 was_created = path->update_origin == SRTE_ORIGIN_PCEP;
1624
1625 path->pcc_id = pcc_state->id;
1626 path->go_active = is_delegated && pcc_state->is_best;
1627 path->is_delegated = is_delegated && pcc_state->is_best;
1628 path->was_created = was_created;
1629}
1630
1631/* Updates the path for the PCC */
1632void specialize_incoming_path(struct pcc_state *pcc_state, struct path *path)
1633{
56634922
JG
1634 if (IS_IPADDR_NONE(&path->pcc_addr))
1635 set_pcc_address(pcc_state, &path->nbkey, &path->pcc_addr);
efba0985
SM
1636 path->sender = pcc_state->pce_opts->addr;
1637 path->pcc_id = pcc_state->id;
1638 path->update_origin = SRTE_ORIGIN_PCEP;
1639 path->originator = XSTRDUP(MTYPE_PCEP, pcc_state->originator);
1640}
1641
1642/* Ensure the path can be handled by the PCC and if not, sends an error */
1643bool validate_incoming_path(struct pcc_state *pcc_state, struct path *path,
1644 char *errbuff, size_t buffsize)
1645{
1646 struct path_hop *hop;
1647 enum pcep_error_type err_type = 0;
1648 enum pcep_error_value err_value = PCEP_ERRV_UNASSIGNED;
1649
1650 for (hop = path->first_hop; hop != NULL; hop = hop->next) {
1651 /* Hops without SID are not supported */
1652 if (!hop->has_sid) {
1653 snprintfrr(errbuff, buffsize, "SR segment without SID");
1654 err_type = PCEP_ERRT_RECEPTION_OF_INV_OBJECT;
1655 err_value = PCEP_ERRV_DISJOINTED_CONF_TLV_MISSING;
1656 break;
1657 }
1658 /* Hops with non-MPLS SID are not supported */
1659 if (!hop->is_mpls) {
1660 snprintfrr(errbuff, buffsize,
1661 "SR segment with non-MPLS SID");
1662 err_type = PCEP_ERRT_RECEPTION_OF_INV_OBJECT;
1663 err_value = PCEP_ERRV_UNSUPPORTED_NAI;
1664 break;
1665 }
1666 }
1667
1668 if (err_type != 0) {
56634922 1669 send_pcep_error(pcc_state, err_type, err_value, NULL);
efba0985
SM
1670 return false;
1671 }
1672
1673 return true;
1674}
1675
1676void send_comp_request(struct ctrl_state *ctrl_state,
1677 struct pcc_state *pcc_state, struct req_entry *req)
1678{
1679 assert(req != NULL);
1680
1681 if (req->t_retry)
1682 return;
1683
1684 assert(req->path != NULL);
1685 assert(req->path->req_id > 0);
1686 assert(RB_FIND(req_entry_head, &pcc_state->requests, req) == req);
1687 assert(lookup_reqid(pcc_state, req->path) == req->path->req_id);
1688
1689 int timeout;
efba0985
SM
1690 struct pcep_message *msg;
1691
1692 if (!pcc_state->is_best) {
1693 return;
1694 }
efba0985
SM
1695
1696 specialize_outgoing_path(pcc_state, req->path);
1697
1698 PCEP_DEBUG(
84b3eb42 1699 "%s Sending computation request %d for path %s to %pIA (retry %d)",
efba0985 1700 pcc_state->tag, req->path->req_id, req->path->name,
84b3eb42 1701 &req->path->nbkey.endpoint, req->retry_count);
efba0985
SM
1702 PCEP_DEBUG_PATH("%s Computation request path %s: %s", pcc_state->tag,
1703 req->path->name, format_path(req->path));
1704
1705 msg = pcep_lib_format_request(&pcc_state->caps, req->path);
1706 send_pcep_message(pcc_state, msg);
1707 req->was_sent = true;
1708
56634922 1709 timeout = pcc_state->pce_opts->config_opts.pcep_request_time_seconds;
efba0985
SM
1710 pcep_thread_schedule_timeout(ctrl_state, pcc_state->id,
1711 TO_COMPUTATION_REQUEST, timeout,
1712 (void *)req, &req->t_retry);
1713}
1714
1715void cancel_comp_requests(struct ctrl_state *ctrl_state,
1716 struct pcc_state *pcc_state)
1717{
1718 struct req_entry *req, *safe_req;
1719
1720 RB_FOREACH_SAFE (req, req_entry_head, &pcc_state->requests, safe_req) {
1721 cancel_comp_request(ctrl_state, pcc_state, req);
1722 RB_REMOVE(req_entry_head, &pcc_state->requests, req);
1723 remove_reqid_mapping(pcc_state, req->path);
1724 free_req_entry(req);
1725 }
1726}
1727
1728void cancel_comp_request(struct ctrl_state *ctrl_state,
1729 struct pcc_state *pcc_state, struct req_entry *req)
1730{
efba0985
SM
1731 struct pcep_message *msg;
1732
1733 if (req->was_sent) {
1734 /* TODO: Send a computation request cancelation
1735 * notification to the PCE */
1736 pcep_thread_cancel_timer(&req->t_retry);
1737 }
1738
1739 PCEP_DEBUG(
84b3eb42 1740 "%s Canceling computation request %d for path %s to %pIA (retry %d)",
efba0985 1741 pcc_state->tag, req->path->req_id, req->path->name,
84b3eb42 1742 &req->path->nbkey.endpoint, req->retry_count);
efba0985
SM
1743 PCEP_DEBUG_PATH("%s Canceled computation request path %s: %s",
1744 pcc_state->tag, req->path->name,
1745 format_path(req->path));
1746
1747 msg = pcep_lib_format_request_cancelled(req->path->req_id);
1748 send_pcep_message(pcc_state, msg);
1749}
1750
1751void set_pcc_address(struct pcc_state *pcc_state, struct lsp_nb_key *nbkey,
1752 struct ipaddr *addr)
1753{
1754 select_pcc_addresses(pcc_state);
1755 if (IS_IPADDR_V6(&nbkey->endpoint)) {
1756 assert(CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6));
1757 addr->ipa_type = IPADDR_V6;
1758 addr->ipaddr_v6 = pcc_state->pcc_addr_v6;
1759 } else if (IS_IPADDR_V4(&nbkey->endpoint)) {
1760 assert(CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4));
1761 addr->ipa_type = IPADDR_V4;
1762 addr->ipaddr_v4 = pcc_state->pcc_addr_v4;
1763 } else {
1764 addr->ipa_type = IPADDR_NONE;
1765 }
1766}
1767
efba0985
SM
1768/* ------------ Data Structure Helper Functions ------------ */
1769
1770void lookup_plspid(struct pcc_state *pcc_state, struct path *path)
1771{
1772 struct plspid_map_data key, *plspid_mapping;
1773 struct nbkey_map_data *nbkey_mapping;
1774
1775 if (path->nbkey.color != 0) {
1776 key.nbkey = path->nbkey;
1777 plspid_mapping = plspid_map_find(&pcc_state->plspid_map, &key);
1778 if (plspid_mapping == NULL) {
1779 plspid_mapping =
1780 XCALLOC(MTYPE_PCEP, sizeof(*plspid_mapping));
1781 plspid_mapping->nbkey = key.nbkey;
1782 plspid_mapping->plspid = pcc_state->next_plspid;
1783 plspid_map_add(&pcc_state->plspid_map, plspid_mapping);
1784 nbkey_mapping =
1785 XCALLOC(MTYPE_PCEP, sizeof(*nbkey_mapping));
1786 nbkey_mapping->nbkey = key.nbkey;
1787 nbkey_mapping->plspid = pcc_state->next_plspid;
1788 nbkey_map_add(&pcc_state->nbkey_map, nbkey_mapping);
1789 pcc_state->next_plspid++;
1790 // FIXME: Send some error to the PCE isntead of crashing
1791 assert(pcc_state->next_plspid <= 1048576);
1792 }
1793 path->plsp_id = plspid_mapping->plspid;
1794 }
1795}
1796
1797void lookup_nbkey(struct pcc_state *pcc_state, struct path *path)
1798{
1799 struct nbkey_map_data key, *mapping;
1800 // TODO: Should give an error to the PCE instead of crashing
1801 assert(path->plsp_id != 0);
1802 key.plspid = path->plsp_id;
1803 mapping = nbkey_map_find(&pcc_state->nbkey_map, &key);
1804 assert(mapping != NULL);
1805 path->nbkey = mapping->nbkey;
1806}
1807
1808void free_req_entry(struct req_entry *req)
1809{
1810 pcep_free_path(req->path);
1811 XFREE(MTYPE_PCEP, req);
1812}
1813
1814struct req_entry *push_new_req(struct pcc_state *pcc_state, struct path *path)
1815{
1816 struct req_entry *req;
1817
1818 req = XCALLOC(MTYPE_PCEP, sizeof(*req));
1819 req->retry_count = 0;
1820 req->path = pcep_copy_path(path);
1821 repush_req(pcc_state, req);
1822
1823 return req;
1824}
1825
1826void repush_req(struct pcc_state *pcc_state, struct req_entry *req)
1827{
1828 uint32_t reqid = pcc_state->next_reqid;
1829 void *res;
1830
1831 req->was_sent = false;
1832 req->path->req_id = reqid;
1833 res = RB_INSERT(req_entry_head, &pcc_state->requests, req);
1834 assert(res == NULL);
1835 assert(add_reqid_mapping(pcc_state, req->path) == true);
1836
1837 pcc_state->next_reqid += 1;
1838 /* Wrapping is allowed, but 0 is not a valid id */
1839 if (pcc_state->next_reqid == 0)
1840 pcc_state->next_reqid = 1;
1841}
1842
1843struct req_entry *pop_req(struct pcc_state *pcc_state, uint32_t reqid)
1844{
1845 struct path path = {.req_id = reqid};
1846 struct req_entry key = {.path = &path};
1847 struct req_entry *req;
1848
1849 req = RB_FIND(req_entry_head, &pcc_state->requests, &key);
1850 if (req == NULL)
1851 return NULL;
1852 RB_REMOVE(req_entry_head, &pcc_state->requests, req);
1853 remove_reqid_mapping(pcc_state, req->path);
1854
1855 return req;
1856}
1857
0a1bf4be
JG
1858struct req_entry *pop_req_no_reqid(struct pcc_state *pcc_state, uint32_t reqid)
1859{
1860 struct path path = {.req_id = reqid};
1861 struct req_entry key = {.path = &path};
1862 struct req_entry *req;
1863
1864 req = RB_FIND(req_entry_head, &pcc_state->requests, &key);
1865 if (req == NULL)
1866 return NULL;
1867 RB_REMOVE(req_entry_head, &pcc_state->requests, req);
1868
1869 return req;
1870}
1871
efba0985
SM
1872bool add_reqid_mapping(struct pcc_state *pcc_state, struct path *path)
1873{
1874 struct req_map_data *mapping;
1875 mapping = XCALLOC(MTYPE_PCEP, sizeof(*mapping));
1876 mapping->nbkey = path->nbkey;
1877 mapping->reqid = path->req_id;
1878 if (req_map_add(&pcc_state->req_map, mapping) != NULL) {
1879 XFREE(MTYPE_PCEP, mapping);
1880 return false;
1881 }
1882 return true;
1883}
1884
1885void remove_reqid_mapping(struct pcc_state *pcc_state, struct path *path)
1886{
1887 struct req_map_data key, *mapping;
1888 key.nbkey = path->nbkey;
1889 mapping = req_map_find(&pcc_state->req_map, &key);
1890 if (mapping != NULL) {
1891 req_map_del(&pcc_state->req_map, mapping);
1892 XFREE(MTYPE_PCEP, mapping);
1893 }
1894}
1895
1896uint32_t lookup_reqid(struct pcc_state *pcc_state, struct path *path)
1897{
1898 struct req_map_data key, *mapping;
1899 key.nbkey = path->nbkey;
1900 mapping = req_map_find(&pcc_state->req_map, &key);
1901 if (mapping != NULL)
1902 return mapping->reqid;
1903 return 0;
1904}
1905
1906bool has_pending_req_for(struct pcc_state *pcc_state, struct path *path)
1907{
0a1bf4be
JG
1908 struct req_entry key = {.path = path};
1909 struct req_entry *req;
1910
1911
1912 PCEP_DEBUG_PATH("(%s) %s", format_path(path), __func__);
1913 /* Looking for request without result */
1914 if (path->no_path || !path->first_hop) {
1915 PCEP_DEBUG_PATH("%s Path : no_path|!first_hop", __func__);
1916 /* ...and already was handle */
1917 req = RB_FIND(req_entry_head, &pcc_state->requests, &key);
1918 if (!req) {
1919 /* we must purge remaining reqid */
1920 PCEP_DEBUG_PATH("%s Purge pending reqid: no_path(%s)",
1921 __func__,
1922 path->no_path ? "TRUE" : "FALSE");
1923 if (lookup_reqid(pcc_state, path) != 0) {
1924 PCEP_DEBUG_PATH("%s Purge pending reqid: DONE ",
1925 __func__);
1926 remove_reqid_mapping(pcc_state, path);
1927 return true;
1928 } else {
1929 return false;
1930 }
1931 }
1932 }
1933
1934
efba0985
SM
1935 return lookup_reqid(pcc_state, path) != 0;
1936}
1937
1938
1939/* ------------ Data Structure Callbacks ------------ */
1940
1941#define CMP_RETURN(A, B) \
1942 if (A != B) \
1943 return (A < B) ? -1 : 1
1944
1945static uint32_t hash_nbkey(const struct lsp_nb_key *nbkey)
1946{
1947 uint32_t hash;
1948 hash = jhash_2words(nbkey->color, nbkey->preference, 0x55aa5a5a);
1949 switch (nbkey->endpoint.ipa_type) {
1950 case IPADDR_V4:
1951 return jhash(&nbkey->endpoint.ipaddr_v4,
1952 sizeof(nbkey->endpoint.ipaddr_v4), hash);
1953 case IPADDR_V6:
1954 return jhash(&nbkey->endpoint.ipaddr_v6,
1955 sizeof(nbkey->endpoint.ipaddr_v6), hash);
1956 default:
1957 return hash;
1958 }
1959}
1960
1961static int cmp_nbkey(const struct lsp_nb_key *a, const struct lsp_nb_key *b)
1962{
1963 CMP_RETURN(a->color, b->color);
1964 int cmp = ipaddr_cmp(&a->endpoint, &b->endpoint);
1965 if (cmp != 0)
1966 return cmp;
1967 CMP_RETURN(a->preference, b->preference);
1968 return 0;
1969}
1970
1971int plspid_map_cmp(const struct plspid_map_data *a,
1972 const struct plspid_map_data *b)
1973{
1974 return cmp_nbkey(&a->nbkey, &b->nbkey);
1975}
1976
1977uint32_t plspid_map_hash(const struct plspid_map_data *e)
1978{
1979 return hash_nbkey(&e->nbkey);
1980}
1981
1982int nbkey_map_cmp(const struct nbkey_map_data *a,
1983 const struct nbkey_map_data *b)
1984{
1985 CMP_RETURN(a->plspid, b->plspid);
1986 return 0;
1987}
1988
1989uint32_t nbkey_map_hash(const struct nbkey_map_data *e)
1990{
1991 return e->plspid;
1992}
1993
1994int req_map_cmp(const struct req_map_data *a, const struct req_map_data *b)
1995{
1996 return cmp_nbkey(&a->nbkey, &b->nbkey);
1997}
1998
1999uint32_t req_map_hash(const struct req_map_data *e)
2000{
2001 return hash_nbkey(&e->nbkey);
2002}