]> git.proxmox.com Git - mirror_frr.git/blob - pathd/path_pcep_pcc.c
Merge pull request #8662 from idryzhov/fix-check-linux-vrf
[mirror_frr.git] / pathd / path_pcep_pcc.c
1 /*
2 * Copyright (C) 2020 NetDEF, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; see the file COPYING; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 /* TODOS AND KNOWN ISSUES:
20 - Delete mapping from NB keys to PLSPID when an LSP is deleted either
21 by the PCE or by NB.
22 - Revert the hacks to work around ODL requiring a report with
23 operational status DOWN when an LSP is activated.
24 - Enforce only the PCE a policy has been delegated to can update it.
25 - If the router-id is used because the PCC IP is not specified
26 (either IPv4 or IPv6), the connection to the PCE is not reset
27 when the router-id changes.
28 */
29
30 #include <zebra.h>
31
32 #include "log.h"
33 #include "command.h"
34 #include "libfrr.h"
35 #include "printfrr.h"
36 #include "lib/version.h"
37 #include "northbound.h"
38 #include "frr_pthread.h"
39 #include "jhash.h"
40
41 #include "pathd/pathd.h"
42 #include "pathd/path_zebra.h"
43 #include "pathd/path_errors.h"
44 #include "pathd/path_pcep.h"
45 #include "pathd/path_pcep_controller.h"
46 #include "pathd/path_pcep_lib.h"
47 #include "pathd/path_pcep_config.h"
48 #include "pathd/path_pcep_debug.h"
49
50
51 /* The number of time we will skip connecting if we are missing the PCC
52 * address for an inet family different from the selected transport one*/
53 #define OTHER_FAMILY_MAX_RETRIES 4
54 #define MAX_ERROR_MSG_SIZE 256
55 #define MAX_COMPREQ_TRIES 3
56
57 pthread_mutex_t g_pcc_info_mtx = PTHREAD_MUTEX_INITIALIZER;
58
59 /* PCEP Event Handler */
60 static void handle_pcep_open(struct ctrl_state *ctrl_state,
61 struct pcc_state *pcc_state,
62 struct pcep_message *msg);
63 static void handle_pcep_message(struct ctrl_state *ctrl_state,
64 struct pcc_state *pcc_state,
65 struct pcep_message *msg);
66 static void handle_pcep_lsp_initiate(struct ctrl_state *ctrl_state,
67 struct pcc_state *pcc_state,
68 struct pcep_message *msg);
69 static void handle_pcep_lsp_update(struct ctrl_state *ctrl_state,
70 struct pcc_state *pcc_state,
71 struct pcep_message *msg);
72 static void continue_pcep_lsp_update(struct ctrl_state *ctrl_state,
73 struct pcc_state *pcc_state,
74 struct path *path, void *payload);
75 static void handle_pcep_comp_reply(struct ctrl_state *ctrl_state,
76 struct pcc_state *pcc_state,
77 struct pcep_message *msg);
78
79 /* Internal Functions */
80 static const char *ipaddr_type_name(struct ipaddr *addr);
81 static bool filter_path(struct pcc_state *pcc_state, struct path *path);
82 static void select_pcc_addresses(struct pcc_state *pcc_state);
83 static void select_transport_address(struct pcc_state *pcc_state);
84 static void update_tag(struct pcc_state *pcc_state);
85 static void update_originator(struct pcc_state *pcc_state);
86 static void schedule_reconnect(struct ctrl_state *ctrl_state,
87 struct pcc_state *pcc_state);
88 static void schedule_session_timeout(struct ctrl_state *ctrl_state,
89 struct pcc_state *pcc_state);
90 static void cancel_session_timeout(struct ctrl_state *ctrl_state,
91 struct pcc_state *pcc_state);
92 static void send_pcep_message(struct pcc_state *pcc_state,
93 struct pcep_message *msg);
94 static void send_pcep_error(struct pcc_state *pcc_state,
95 enum pcep_error_type error_type,
96 enum pcep_error_value error_value);
97 static void send_report(struct pcc_state *pcc_state, struct path *path);
98 static void send_comp_request(struct ctrl_state *ctrl_state,
99 struct pcc_state *pcc_state,
100 struct req_entry *req);
101 static void cancel_comp_requests(struct ctrl_state *ctrl_state,
102 struct pcc_state *pcc_state);
103 static void cancel_comp_request(struct ctrl_state *ctrl_state,
104 struct pcc_state *pcc_state,
105 struct req_entry *req);
106 static void specialize_outgoing_path(struct pcc_state *pcc_state,
107 struct path *path);
108 static void specialize_incoming_path(struct pcc_state *pcc_state,
109 struct path *path);
110 static bool validate_incoming_path(struct pcc_state *pcc_state,
111 struct path *path, char *errbuff,
112 size_t buffsize);
113 static void set_pcc_address(struct pcc_state *pcc_state,
114 struct lsp_nb_key *nbkey, struct ipaddr *addr);
115 static int compare_pcc_opts(struct pcc_opts *lhs, struct pcc_opts *rhs);
116 static int compare_pce_opts(struct pce_opts *lhs, struct pce_opts *rhs);
117 static int get_previous_best_pce(struct pcc_state **pcc);
118 static int get_best_pce(struct pcc_state **pcc);
119 static int get_pce_count_connected(struct pcc_state **pcc);
120 static bool update_best_pce(struct pcc_state **pcc, int best);
121
122 /* Data Structure Helper Functions */
123 static void lookup_plspid(struct pcc_state *pcc_state, struct path *path);
124 static void lookup_nbkey(struct pcc_state *pcc_state, struct path *path);
125 static void free_req_entry(struct req_entry *req);
126 static struct req_entry *push_new_req(struct pcc_state *pcc_state,
127 struct path *path);
128 static void repush_req(struct pcc_state *pcc_state, struct req_entry *req);
129 static struct req_entry *pop_req(struct pcc_state *pcc_state, uint32_t reqid);
130 static bool add_reqid_mapping(struct pcc_state *pcc_state, struct path *path);
131 static void remove_reqid_mapping(struct pcc_state *pcc_state,
132 struct path *path);
133 static uint32_t lookup_reqid(struct pcc_state *pcc_state, struct path *path);
134 static bool has_pending_req_for(struct pcc_state *pcc_state, struct path *path);
135
136 /* Data Structure Callbacks */
137 static int plspid_map_cmp(const struct plspid_map_data *a,
138 const struct plspid_map_data *b);
139 static uint32_t plspid_map_hash(const struct plspid_map_data *e);
140 static int nbkey_map_cmp(const struct nbkey_map_data *a,
141 const struct nbkey_map_data *b);
142 static uint32_t nbkey_map_hash(const struct nbkey_map_data *e);
143 static int req_map_cmp(const struct req_map_data *a,
144 const struct req_map_data *b);
145 static uint32_t req_map_hash(const struct req_map_data *e);
146
147 /* Data Structure Declarations */
148 DECLARE_HASH(plspid_map, struct plspid_map_data, mi, plspid_map_cmp,
149 plspid_map_hash);
150 DECLARE_HASH(nbkey_map, struct nbkey_map_data, mi, nbkey_map_cmp,
151 nbkey_map_hash);
152 DECLARE_HASH(req_map, struct req_map_data, mi, req_map_cmp, req_map_hash);
153
154 static inline int req_entry_compare(const struct req_entry *a,
155 const struct req_entry *b)
156 {
157 return a->path->req_id - b->path->req_id;
158 }
159 RB_GENERATE(req_entry_head, req_entry, entry, req_entry_compare)
160
161
162 /* ------------ API Functions ------------ */
163
164 struct pcc_state *pcep_pcc_initialize(struct ctrl_state *ctrl_state, int index)
165 {
166 struct pcc_state *pcc_state = XCALLOC(MTYPE_PCEP, sizeof(*pcc_state));
167
168 pcc_state->id = index;
169 pcc_state->status = PCEP_PCC_DISCONNECTED;
170 pcc_state->next_reqid = 1;
171 pcc_state->next_plspid = 1;
172
173 RB_INIT(req_entry_head, &pcc_state->requests);
174
175 update_tag(pcc_state);
176 update_originator(pcc_state);
177
178 PCEP_DEBUG("%s PCC initialized", pcc_state->tag);
179
180 return pcc_state;
181 }
182
183 void pcep_pcc_finalize(struct ctrl_state *ctrl_state,
184 struct pcc_state *pcc_state)
185 {
186 PCEP_DEBUG("%s PCC finalizing...", pcc_state->tag);
187
188 pcep_pcc_disable(ctrl_state, pcc_state);
189
190 if (pcc_state->pcc_opts != NULL) {
191 XFREE(MTYPE_PCEP, pcc_state->pcc_opts);
192 pcc_state->pcc_opts = NULL;
193 }
194 if (pcc_state->pce_opts != NULL) {
195 XFREE(MTYPE_PCEP, pcc_state->pce_opts);
196 pcc_state->pce_opts = NULL;
197 }
198 if (pcc_state->originator != NULL) {
199 XFREE(MTYPE_PCEP, pcc_state->originator);
200 pcc_state->originator = NULL;
201 }
202
203 if (pcc_state->t_reconnect != NULL) {
204 thread_cancel(&pcc_state->t_reconnect);
205 pcc_state->t_reconnect = NULL;
206 }
207
208 if (pcc_state->t_update_best != NULL) {
209 thread_cancel(&pcc_state->t_update_best);
210 pcc_state->t_update_best = NULL;
211 }
212
213 if (pcc_state->t_session_timeout != NULL) {
214 thread_cancel(&pcc_state->t_session_timeout);
215 pcc_state->t_session_timeout = NULL;
216 }
217
218 XFREE(MTYPE_PCEP, pcc_state);
219 }
220
221 int compare_pcc_opts(struct pcc_opts *lhs, struct pcc_opts *rhs)
222 {
223 int retval;
224
225 if (lhs == NULL) {
226 return 1;
227 }
228
229 if (rhs == NULL) {
230 return -1;
231 }
232
233 retval = lhs->port - rhs->port;
234 if (retval != 0) {
235 return retval;
236 }
237
238 retval = lhs->msd - rhs->msd;
239 if (retval != 0) {
240 return retval;
241 }
242
243 if (IS_IPADDR_V4(&lhs->addr)) {
244 retval = memcmp(&lhs->addr.ipaddr_v4, &rhs->addr.ipaddr_v4,
245 sizeof(lhs->addr.ipaddr_v4));
246 if (retval != 0) {
247 return retval;
248 }
249 } else if (IS_IPADDR_V6(&lhs->addr)) {
250 retval = memcmp(&lhs->addr.ipaddr_v6, &rhs->addr.ipaddr_v6,
251 sizeof(lhs->addr.ipaddr_v6));
252 if (retval != 0) {
253 return retval;
254 }
255 }
256
257 return 0;
258 }
259
260 int compare_pce_opts(struct pce_opts *lhs, struct pce_opts *rhs)
261 {
262 if (lhs == NULL) {
263 return 1;
264 }
265
266 if (rhs == NULL) {
267 return -1;
268 }
269
270 int retval = lhs->port - rhs->port;
271 if (retval != 0) {
272 return retval;
273 }
274
275 retval = strcmp(lhs->pce_name, rhs->pce_name);
276 if (retval != 0) {
277 return retval;
278 }
279
280 retval = lhs->precedence - rhs->precedence;
281 if (retval != 0) {
282 return retval;
283 }
284
285 retval = memcmp(&lhs->addr, &rhs->addr, sizeof(lhs->addr));
286 if (retval != 0) {
287 return retval;
288 }
289
290 return 0;
291 }
292
293 int pcep_pcc_update(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state,
294 struct pcc_opts *pcc_opts, struct pce_opts *pce_opts)
295 {
296 int ret = 0;
297
298 // If the options did not change, then there is nothing to do
299 if ((compare_pce_opts(pce_opts, pcc_state->pce_opts) == 0)
300 && (compare_pcc_opts(pcc_opts, pcc_state->pcc_opts) == 0)) {
301 return ret;
302 }
303
304 if ((ret = pcep_pcc_disable(ctrl_state, pcc_state))) {
305 XFREE(MTYPE_PCEP, pcc_opts);
306 XFREE(MTYPE_PCEP, pce_opts);
307 return ret;
308 }
309
310 if (pcc_state->pcc_opts != NULL) {
311 XFREE(MTYPE_PCEP, pcc_state->pcc_opts);
312 }
313 if (pcc_state->pce_opts != NULL) {
314 XFREE(MTYPE_PCEP, pcc_state->pce_opts);
315 }
316
317 pcc_state->pcc_opts = pcc_opts;
318 pcc_state->pce_opts = pce_opts;
319
320 if (IS_IPADDR_V4(&pcc_opts->addr)) {
321 pcc_state->pcc_addr_v4 = pcc_opts->addr.ipaddr_v4;
322 SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4);
323 } else {
324 UNSET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4);
325 }
326
327 if (IS_IPADDR_V6(&pcc_opts->addr)) {
328 memcpy(&pcc_state->pcc_addr_v6, &pcc_opts->addr.ipaddr_v6,
329 sizeof(struct in6_addr));
330 SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6);
331 } else {
332 UNSET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6);
333 }
334
335 update_tag(pcc_state);
336 update_originator(pcc_state);
337
338 return pcep_pcc_enable(ctrl_state, pcc_state);
339 }
340
341 void pcep_pcc_reconnect(struct ctrl_state *ctrl_state,
342 struct pcc_state *pcc_state)
343 {
344 if (pcc_state->status == PCEP_PCC_DISCONNECTED)
345 pcep_pcc_enable(ctrl_state, pcc_state);
346 }
347
348 int pcep_pcc_enable(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state)
349 {
350 assert(pcc_state->status == PCEP_PCC_DISCONNECTED);
351 assert(pcc_state->sess == NULL);
352
353 if (pcc_state->t_reconnect != NULL) {
354 thread_cancel(&pcc_state->t_reconnect);
355 pcc_state->t_reconnect = NULL;
356 }
357
358 select_transport_address(pcc_state);
359
360 /* Even though we are connecting using IPv6. we want to have an IPv4
361 * address so we can handle candidate path with IPv4 endpoints */
362 if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)) {
363 if (pcc_state->retry_count < OTHER_FAMILY_MAX_RETRIES) {
364 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
365 "skipping connection to PCE %pIA:%d due to missing PCC IPv4 address",
366 &pcc_state->pce_opts->addr,
367 pcc_state->pce_opts->port);
368 schedule_reconnect(ctrl_state, pcc_state);
369 return 0;
370 } else {
371 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
372 "missing IPv4 PCC address, IPv4 candidate paths will be ignored");
373 }
374 }
375
376 /* Even though we are connecting using IPv4. we want to have an IPv6
377 * address so we can handle candidate path with IPv6 endpoints */
378 if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)) {
379 if (pcc_state->retry_count < OTHER_FAMILY_MAX_RETRIES) {
380 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
381 "skipping connection to PCE %pIA:%d due to missing PCC IPv6 address",
382 &pcc_state->pce_opts->addr,
383 pcc_state->pce_opts->port);
384 schedule_reconnect(ctrl_state, pcc_state);
385 return 0;
386 } else {
387 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
388 "missing IPv6 PCC address, IPv6 candidate paths will be ignored");
389 }
390 }
391
392 /* Even if the maximum retries to try to have all the familly addresses
393 * have been spent, we still need the one for the transport familly */
394 if (pcc_state->pcc_addr_tr.ipa_type == IPADDR_NONE) {
395 flog_warn(EC_PATH_PCEP_MISSING_SOURCE_ADDRESS,
396 "skipping connection to PCE %pIA:%d due to missing PCC address",
397 &pcc_state->pce_opts->addr,
398 pcc_state->pce_opts->port);
399 schedule_reconnect(ctrl_state, pcc_state);
400 return 0;
401 }
402
403 PCEP_DEBUG("%s PCC connecting", pcc_state->tag);
404 pcc_state->sess = pcep_lib_connect(
405 &pcc_state->pcc_addr_tr, pcc_state->pcc_opts->port,
406 &pcc_state->pce_opts->addr, pcc_state->pce_opts->port,
407 pcc_state->pcc_opts->msd, &pcc_state->pce_opts->config_opts);
408
409 if (pcc_state->sess == NULL) {
410 flog_warn(EC_PATH_PCEP_LIB_CONNECT,
411 "failed to connect to PCE %pIA:%d from %pIA:%d",
412 &pcc_state->pce_opts->addr,
413 pcc_state->pce_opts->port,
414 &pcc_state->pcc_addr_tr,
415 pcc_state->pcc_opts->port);
416 schedule_reconnect(ctrl_state, pcc_state);
417 return 0;
418 }
419
420 // In case some best pce alternative were waiting to activate
421 if (pcc_state->t_update_best != NULL) {
422 thread_cancel(&pcc_state->t_update_best);
423 pcc_state->t_update_best = NULL;
424 }
425
426 pcc_state->status = PCEP_PCC_CONNECTING;
427
428 return 0;
429 }
430
431 int pcep_pcc_disable(struct ctrl_state *ctrl_state, struct pcc_state *pcc_state)
432 {
433 switch (pcc_state->status) {
434 case PCEP_PCC_DISCONNECTED:
435 return 0;
436 case PCEP_PCC_CONNECTING:
437 case PCEP_PCC_SYNCHRONIZING:
438 case PCEP_PCC_OPERATING:
439 PCEP_DEBUG("%s Disconnecting PCC...", pcc_state->tag);
440 cancel_comp_requests(ctrl_state, pcc_state);
441 pcep_lib_disconnect(pcc_state->sess);
442 /* No need to remove if any PCEs is connected */
443 if (get_pce_count_connected(ctrl_state->pcc) == 0) {
444 pcep_thread_remove_candidate_path_segments(ctrl_state,
445 pcc_state);
446 }
447 pcc_state->sess = NULL;
448 pcc_state->status = PCEP_PCC_DISCONNECTED;
449 return 0;
450 default:
451 return 1;
452 }
453 }
454
455 void pcep_pcc_sync_path(struct ctrl_state *ctrl_state,
456 struct pcc_state *pcc_state, struct path *path)
457 {
458 if (pcc_state->status == PCEP_PCC_SYNCHRONIZING) {
459 path->is_synching = true;
460 } else if (pcc_state->status == PCEP_PCC_OPERATING)
461 path->is_synching = false;
462 else
463 return;
464
465 path->go_active = true;
466
467 /* Accumulate the dynamic paths without any LSP so computation
468 * requests can be performed after synchronization */
469 if ((path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)
470 && (path->first_hop == NULL)
471 && !has_pending_req_for(pcc_state, path)) {
472 PCEP_DEBUG("%s Scheduling computation request for path %s",
473 pcc_state->tag, path->name);
474 push_new_req(pcc_state, path);
475 return;
476 }
477
478 /* Synchronize the path if the PCE supports LSP updates and the
479 * endpoint address familly is supported */
480 if (pcc_state->caps.is_stateful) {
481 if (filter_path(pcc_state, path)) {
482 PCEP_DEBUG("%s Synchronizing path %s", pcc_state->tag,
483 path->name);
484 send_report(pcc_state, path);
485 } else {
486 PCEP_DEBUG(
487 "%s Skipping %s candidate path %s synchronization",
488 pcc_state->tag,
489 ipaddr_type_name(&path->nbkey.endpoint),
490 path->name);
491 }
492 }
493 }
494
495 void pcep_pcc_sync_done(struct ctrl_state *ctrl_state,
496 struct pcc_state *pcc_state)
497 {
498 struct req_entry *req;
499
500 if (pcc_state->status != PCEP_PCC_SYNCHRONIZING
501 && pcc_state->status != PCEP_PCC_OPERATING)
502 return;
503
504 if (pcc_state->caps.is_stateful
505 && pcc_state->status == PCEP_PCC_SYNCHRONIZING) {
506 struct path *path = pcep_new_path();
507 *path = (struct path){.name = NULL,
508 .srp_id = 0,
509 .plsp_id = 0,
510 .status = PCEP_LSP_OPERATIONAL_DOWN,
511 .do_remove = false,
512 .go_active = false,
513 .was_created = false,
514 .was_removed = false,
515 .is_synching = false,
516 .is_delegated = false,
517 .first_hop = NULL,
518 .first_metric = NULL};
519 send_report(pcc_state, path);
520 pcep_free_path(path);
521 }
522
523 pcc_state->synchronized = true;
524 pcc_state->status = PCEP_PCC_OPERATING;
525
526 PCEP_DEBUG("%s Synchronization done", pcc_state->tag);
527
528 /* Start the computation request accumulated during synchronization */
529 RB_FOREACH (req, req_entry_head, &pcc_state->requests) {
530 send_comp_request(ctrl_state, pcc_state, req);
531 }
532 }
533
534 void pcep_pcc_send_report(struct ctrl_state *ctrl_state,
535 struct pcc_state *pcc_state, struct path *path,
536 bool is_stable)
537 {
538 if ((pcc_state->status != PCEP_PCC_OPERATING)
539 || (!pcc_state->caps.is_stateful)) {
540 pcep_free_path(path);
541 return;
542 }
543
544 PCEP_DEBUG("%s Send report for candidate path %s", pcc_state->tag,
545 path->name);
546
547 /* ODL and Cisco requires the first reported
548 * LSP to have a DOWN status, the later status changes
549 * will be comunicated through hook calls.
550 */
551 enum pcep_lsp_operational_status real_status = path->status;
552 path->status = PCEP_LSP_OPERATIONAL_DOWN;
553 send_report(pcc_state, path);
554
555 /* If no update is expected and the real status wasn't down, we need to
556 * send a second report with the real status */
557 if (is_stable && (real_status != PCEP_LSP_OPERATIONAL_DOWN)) {
558 path->srp_id = 0;
559 path->status = real_status;
560 send_report(pcc_state, path);
561 }
562
563 pcep_free_path(path);
564 }
565
566
567 /* ------------ Timeout handler ------------ */
568
569 void pcep_pcc_timeout_handler(struct ctrl_state *ctrl_state,
570 struct pcc_state *pcc_state,
571 enum pcep_ctrl_timeout_type type, void *param)
572 {
573 struct req_entry *req;
574
575 switch (type) {
576 case TO_COMPUTATION_REQUEST:
577 assert(param != NULL);
578 req = (struct req_entry *)param;
579 pop_req(pcc_state, req->path->req_id);
580 flog_warn(EC_PATH_PCEP_COMPUTATION_REQUEST_TIMEOUT,
581 "Computation request %d timeout", req->path->req_id);
582 cancel_comp_request(ctrl_state, pcc_state, req);
583 if (req->retry_count++ < MAX_COMPREQ_TRIES) {
584 repush_req(pcc_state, req);
585 send_comp_request(ctrl_state, pcc_state, req);
586 return;
587 }
588 if (pcc_state->caps.is_stateful) {
589 struct path *path;
590 PCEP_DEBUG(
591 "%s Delegating undefined dynamic path %s to PCE %s",
592 pcc_state->tag, req->path->name,
593 pcc_state->originator);
594 path = pcep_copy_path(req->path);
595 path->is_delegated = true;
596 send_report(pcc_state, path);
597 free_req_entry(req);
598 }
599 break;
600 default:
601 break;
602 }
603 }
604
605
606 /* ------------ Pathd event handler ------------ */
607
608 void pcep_pcc_pathd_event_handler(struct ctrl_state *ctrl_state,
609 struct pcc_state *pcc_state,
610 enum pcep_pathd_event_type type,
611 struct path *path)
612 {
613 struct req_entry *req;
614
615 if (pcc_state->status != PCEP_PCC_OPERATING)
616 return;
617
618 /* Skipping candidate path with endpoint that do not match the
619 * configured or deduced PCC IP version */
620 if (!filter_path(pcc_state, path)) {
621 PCEP_DEBUG("%s Skipping %s candidate path %s event",
622 pcc_state->tag,
623 ipaddr_type_name(&path->nbkey.endpoint), path->name);
624 return;
625 }
626
627 switch (type) {
628 case PCEP_PATH_CREATED:
629 if (has_pending_req_for(pcc_state, path)) {
630 PCEP_DEBUG(
631 "%s Candidate path %s created, computation request already sent",
632 pcc_state->tag, path->name);
633 return;
634 }
635 PCEP_DEBUG("%s Candidate path %s created", pcc_state->tag,
636 path->name);
637 if ((path->first_hop == NULL)
638 && (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)) {
639 req = push_new_req(pcc_state, path);
640 send_comp_request(ctrl_state, pcc_state, req);
641 } else if (pcc_state->caps.is_stateful)
642 send_report(pcc_state, path);
643 return;
644 case PCEP_PATH_UPDATED:
645 PCEP_DEBUG("%s Candidate path %s updated", pcc_state->tag,
646 path->name);
647 if (pcc_state->caps.is_stateful)
648 send_report(pcc_state, path);
649 return;
650 case PCEP_PATH_REMOVED:
651 PCEP_DEBUG("%s Candidate path %s removed", pcc_state->tag,
652 path->name);
653 path->was_removed = true;
654 if (pcc_state->caps.is_stateful)
655 send_report(pcc_state, path);
656 return;
657 default:
658 flog_warn(EC_PATH_PCEP_RECOVERABLE_INTERNAL_ERROR,
659 "Unexpected pathd event received by pcc %s: %u",
660 pcc_state->tag, type);
661 return;
662 }
663 }
664
665
666 /* ------------ PCEP event handler ------------ */
667
668 void pcep_pcc_pcep_event_handler(struct ctrl_state *ctrl_state,
669 struct pcc_state *pcc_state, pcep_event *event)
670 {
671 PCEP_DEBUG("%s Received PCEP event: %s", pcc_state->tag,
672 pcep_event_type_name(event->event_type));
673 switch (event->event_type) {
674 case PCC_CONNECTED_TO_PCE:
675 assert(PCEP_PCC_CONNECTING == pcc_state->status);
676 PCEP_DEBUG("%s Connection established", pcc_state->tag);
677 pcc_state->status = PCEP_PCC_SYNCHRONIZING;
678 pcc_state->retry_count = 0;
679 pcc_state->synchronized = false;
680 PCEP_DEBUG("%s Starting PCE synchronization", pcc_state->tag);
681 cancel_session_timeout(ctrl_state, pcc_state);
682 pcep_pcc_calculate_best_pce(ctrl_state->pcc);
683 pcep_thread_start_sync(ctrl_state, pcc_state->id);
684 break;
685 case PCC_SENT_INVALID_OPEN:
686 PCEP_DEBUG("%s Sent invalid OPEN message", pcc_state->tag);
687 PCEP_DEBUG(
688 "%s Reconciling values: keep alive (%d) dead timer (%d) seconds ",
689 pcc_state->tag,
690 pcc_state->sess->pcc_config
691 .keep_alive_pce_negotiated_timer_seconds,
692 pcc_state->sess->pcc_config
693 .dead_timer_pce_negotiated_seconds);
694 pcc_state->pce_opts->config_opts.keep_alive_seconds =
695 pcc_state->sess->pcc_config
696 .keep_alive_pce_negotiated_timer_seconds;
697 pcc_state->pce_opts->config_opts.dead_timer_seconds =
698 pcc_state->sess->pcc_config
699 .dead_timer_pce_negotiated_seconds;
700 break;
701
702 case PCC_RCVD_INVALID_OPEN:
703 PCEP_DEBUG("%s Received invalid OPEN message", pcc_state->tag);
704 PCEP_DEBUG_PCEP("%s PCEP message: %s", pcc_state->tag,
705 format_pcep_message(event->message));
706 break;
707 case PCE_DEAD_TIMER_EXPIRED:
708 case PCE_CLOSED_SOCKET:
709 case PCE_SENT_PCEP_CLOSE:
710 case PCE_OPEN_KEEP_WAIT_TIMER_EXPIRED:
711 case PCC_PCEP_SESSION_CLOSED:
712 case PCC_RCVD_MAX_INVALID_MSGS:
713 case PCC_RCVD_MAX_UNKOWN_MSGS:
714 pcep_pcc_disable(ctrl_state, pcc_state);
715 schedule_reconnect(ctrl_state, pcc_state);
716 schedule_session_timeout(ctrl_state, pcc_state);
717 break;
718 case MESSAGE_RECEIVED:
719 PCEP_DEBUG_PCEP("%s Received PCEP message: %s", pcc_state->tag,
720 format_pcep_message(event->message));
721 if (pcc_state->status == PCEP_PCC_CONNECTING) {
722 if (event->message->msg_header->type == PCEP_TYPE_OPEN)
723 handle_pcep_open(ctrl_state, pcc_state,
724 event->message);
725 break;
726 }
727 assert(pcc_state->status == PCEP_PCC_SYNCHRONIZING
728 || pcc_state->status == PCEP_PCC_OPERATING);
729 handle_pcep_message(ctrl_state, pcc_state, event->message);
730 break;
731 default:
732 flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEPLIB_EVENT,
733 "Unexpected event from pceplib: %s",
734 format_pcep_event(event));
735 break;
736 }
737 }
738
739
740 /*------------------ Multi-PCE --------------------- */
741
742 /* Internal util function, returns true if sync is necessary, false otherwise */
743 bool update_best_pce(struct pcc_state **pcc, int best)
744 {
745 PCEP_DEBUG(" recalculating pce precedence ");
746 if (best) {
747 struct pcc_state *best_pcc_state =
748 pcep_pcc_get_pcc_by_id(pcc, best);
749 if (best_pcc_state->previous_best != best_pcc_state->is_best) {
750 PCEP_DEBUG(" %s Resynch best (%i) previous best (%i)",
751 best_pcc_state->tag, best_pcc_state->id,
752 best_pcc_state->previous_best);
753 return true;
754 } else {
755 PCEP_DEBUG(
756 " %s No Resynch best (%i) previous best (%i)",
757 best_pcc_state->tag, best_pcc_state->id,
758 best_pcc_state->previous_best);
759 }
760 } else {
761 PCEP_DEBUG(" No best pce available, all pce seem disconnected");
762 }
763
764 return false;
765 }
766
767 int get_best_pce(struct pcc_state **pcc)
768 {
769 for (int i = 0; i < MAX_PCC; i++) {
770 if (pcc[i] && pcc[i]->pce_opts) {
771 if (pcc[i]->is_best == true) {
772 return pcc[i]->id;
773 }
774 }
775 }
776 return 0;
777 }
778
779 int get_pce_count_connected(struct pcc_state **pcc)
780 {
781 int count = 0;
782 for (int i = 0; i < MAX_PCC; i++) {
783 if (pcc[i] && pcc[i]->pce_opts
784 && pcc[i]->status != PCEP_PCC_DISCONNECTED) {
785 count++;
786 }
787 }
788 return count;
789 }
790
791 int get_previous_best_pce(struct pcc_state **pcc)
792 {
793 int previous_best_pce = -1;
794
795 for (int i = 0; i < MAX_PCC; i++) {
796 if (pcc[i] && pcc[i]->pce_opts && pcc[i]->previous_best == true
797 && pcc[i]->status != PCEP_PCC_DISCONNECTED) {
798 previous_best_pce = i;
799 break;
800 }
801 }
802 return previous_best_pce != -1 ? pcc[previous_best_pce]->id : 0;
803 }
804
805 /* Called by path_pcep_controller EV_REMOVE_PCC
806 * Event handler when a PCC is removed. */
807 int pcep_pcc_multi_pce_remove_pcc(struct ctrl_state *ctrl_state,
808 struct pcc_state **pcc)
809 {
810 int new_best_pcc_id = -1;
811 new_best_pcc_id = pcep_pcc_calculate_best_pce(pcc);
812 if (new_best_pcc_id) {
813 if (update_best_pce(ctrl_state->pcc, new_best_pcc_id) == true) {
814 pcep_thread_start_sync(ctrl_state, new_best_pcc_id);
815 }
816 }
817
818 return 0;
819 }
820
821 /* Called by path_pcep_controller EV_SYNC_PATH
822 * Event handler when a path is sync'd. */
823 int pcep_pcc_multi_pce_sync_path(struct ctrl_state *ctrl_state, int pcc_id,
824 struct pcc_state **pcc)
825 {
826 int previous_best_pcc_id = -1;
827
828 if (pcc_id == get_best_pce(pcc)) {
829 previous_best_pcc_id = get_previous_best_pce(pcc);
830 if (previous_best_pcc_id != 0) {
831 /* while adding new pce, path has to resync to the
832 * previous best. pcep_thread_start_sync() will be
833 * called by the calling function */
834 if (update_best_pce(ctrl_state->pcc,
835 previous_best_pcc_id)
836 == true) {
837 cancel_comp_requests(
838 ctrl_state,
839 pcep_pcc_get_pcc_by_id(
840 pcc, previous_best_pcc_id));
841 pcep_thread_start_sync(ctrl_state,
842 previous_best_pcc_id);
843 }
844 }
845 }
846
847 return 0;
848 }
849
850 /* Called by path_pcep_controller when the TM_CALCULATE_BEST_PCE
851 * timer expires */
852 int pcep_pcc_timer_update_best_pce(struct ctrl_state *ctrl_state, int pcc_id)
853 {
854 int ret = 0;
855 /* resync whatever was the new best */
856 int prev_best = get_best_pce(ctrl_state->pcc);
857 int best_id = pcep_pcc_calculate_best_pce(ctrl_state->pcc);
858 if (best_id && prev_best != best_id) { // Avoid Multiple call
859 struct pcc_state *pcc_state =
860 pcep_pcc_get_pcc_by_id(ctrl_state->pcc, best_id);
861 if (update_best_pce(ctrl_state->pcc, pcc_state->id) == true) {
862 pcep_thread_start_sync(ctrl_state, pcc_state->id);
863 }
864 }
865
866 return ret;
867 }
868
869 /* Called by path_pcep_controller::pcep_thread_event_update_pce_options()
870 * Returns the best PCE id */
871 int pcep_pcc_calculate_best_pce(struct pcc_state **pcc)
872 {
873 int best_precedence = 255; // DEFAULT_PCE_PRECEDENCE;
874 int best_pce = -1;
875 int one_connected_pce = -1;
876 int previous_best_pce = -1;
877 int step_0_best = -1;
878 int step_0_previous = -1;
879 int pcc_count = 0;
880
881 // Get state
882 for (int i = 0; i < MAX_PCC; i++) {
883 if (pcc[i] && pcc[i]->pce_opts) {
884 zlog_debug(
885 "multi-pce: calculate all : i (%i) is_best (%i) previous_best (%i) ",
886 i, pcc[i]->is_best, pcc[i]->previous_best);
887 pcc_count++;
888
889 if (pcc[i]->is_best == true) {
890 step_0_best = i;
891 }
892 if (pcc[i]->previous_best == true) {
893 step_0_previous = i;
894 }
895 }
896 }
897
898 if (!pcc_count) {
899 return 0;
900 }
901
902 // Calculate best
903 for (int i = 0; i < MAX_PCC; i++) {
904 if (pcc[i] && pcc[i]->pce_opts
905 && pcc[i]->status != PCEP_PCC_DISCONNECTED) {
906 one_connected_pce = i; // In case none better
907 if (pcc[i]->pce_opts->precedence <= best_precedence) {
908 if (best_pce != -1
909 && pcc[best_pce]->pce_opts->precedence
910 == pcc[i]->pce_opts
911 ->precedence) {
912 if (ipaddr_cmp(
913 &pcc[i]->pce_opts->addr,
914 &pcc[best_pce]
915 ->pce_opts->addr)
916 > 0)
917 // collide of precedences so
918 // compare ip
919 best_pce = i;
920 } else {
921 if (!pcc[i]->previous_best) {
922 best_precedence =
923 pcc[i]->pce_opts
924 ->precedence;
925 best_pce = i;
926 }
927 }
928 }
929 }
930 }
931
932 zlog_debug(
933 "multi-pce: calculate data : sb (%i) sp (%i) oc (%i) b (%i) ",
934 step_0_best, step_0_previous, one_connected_pce, best_pce);
935
936 // Changed of state so ...
937 if (step_0_best != best_pce) {
938 pthread_mutex_lock(&g_pcc_info_mtx);
939 // Calculate previous
940 previous_best_pce = step_0_best;
941 // Clean state
942 if (step_0_best != -1) {
943 pcc[step_0_best]->is_best = false;
944 }
945 if (step_0_previous != -1) {
946 pcc[step_0_previous]->previous_best = false;
947 }
948
949 // Set previous
950 if (previous_best_pce != -1
951 && pcc[previous_best_pce]->status
952 == PCEP_PCC_DISCONNECTED) {
953 pcc[previous_best_pce]->previous_best = true;
954 zlog_debug("multi-pce: previous best pce (%i) ",
955 previous_best_pce + 1);
956 }
957
958
959 // Set best
960 if (best_pce != -1) {
961 pcc[best_pce]->is_best = true;
962 zlog_debug("multi-pce: best pce (%i) ", best_pce + 1);
963 } else {
964 if (one_connected_pce != -1) {
965 best_pce = one_connected_pce;
966 pcc[one_connected_pce]->is_best = true;
967 zlog_debug(
968 "multi-pce: one connected best pce (default) (%i) ",
969 one_connected_pce + 1);
970 } else {
971 for (int i = 0; i < MAX_PCC; i++) {
972 if (pcc[i] && pcc[i]->pce_opts) {
973 best_pce = i;
974 pcc[i]->is_best = true;
975 zlog_debug(
976 "(disconnected) best pce (default) (%i) ",
977 i + 1);
978 break;
979 }
980 }
981 }
982 }
983 pthread_mutex_unlock(&g_pcc_info_mtx);
984 }
985
986 return ((best_pce == -1) ? 0 : pcc[best_pce]->id);
987 }
988
989 int pcep_pcc_get_pcc_id_by_ip_port(struct pcc_state **pcc,
990 struct pce_opts *pce_opts)
991 {
992 if (pcc == NULL) {
993 return 0;
994 }
995
996 for (int idx = 0; idx < MAX_PCC; idx++) {
997 if (pcc[idx]) {
998 if ((ipaddr_cmp((const struct ipaddr *)&pcc[idx]
999 ->pce_opts->addr,
1000 (const struct ipaddr *)&pce_opts->addr)
1001 == 0)
1002 && pcc[idx]->pce_opts->port == pce_opts->port) {
1003 zlog_debug("found pcc_id (%d) idx (%d)",
1004 pcc[idx]->id, idx);
1005 return pcc[idx]->id;
1006 }
1007 }
1008 }
1009 return 0;
1010 }
1011
1012 int pcep_pcc_get_pcc_id_by_idx(struct pcc_state **pcc, int idx)
1013 {
1014 if (pcc == NULL || idx < 0) {
1015 return 0;
1016 }
1017
1018 return pcc[idx] ? pcc[idx]->id : 0;
1019 }
1020
1021 struct pcc_state *pcep_pcc_get_pcc_by_id(struct pcc_state **pcc, int id)
1022 {
1023 if (pcc == NULL || id < 0) {
1024 return NULL;
1025 }
1026
1027 for (int i = 0; i < MAX_PCC; i++) {
1028 if (pcc[i]) {
1029 if (pcc[i]->id == id) {
1030 zlog_debug("found id (%d) pcc_idx (%d)",
1031 pcc[i]->id, i);
1032 return pcc[i];
1033 }
1034 }
1035 }
1036
1037 return NULL;
1038 }
1039
1040 struct pcc_state *pcep_pcc_get_pcc_by_name(struct pcc_state **pcc,
1041 const char *pce_name)
1042 {
1043 if (pcc == NULL || pce_name == NULL) {
1044 return NULL;
1045 }
1046
1047 for (int i = 0; i < MAX_PCC; i++) {
1048 if (pcc[i] == NULL) {
1049 continue;
1050 }
1051
1052 if (strcmp(pcc[i]->pce_opts->pce_name, pce_name) == 0) {
1053 return pcc[i];
1054 }
1055 }
1056
1057 return NULL;
1058 }
1059
1060 int pcep_pcc_get_pcc_idx_by_id(struct pcc_state **pcc, int id)
1061 {
1062 if (pcc == NULL) {
1063 return -1;
1064 }
1065
1066 for (int idx = 0; idx < MAX_PCC; idx++) {
1067 if (pcc[idx]) {
1068 if (pcc[idx]->id == id) {
1069 zlog_debug("found pcc_id (%d) array_idx (%d)",
1070 pcc[idx]->id, idx);
1071 return idx;
1072 }
1073 }
1074 }
1075
1076 return -1;
1077 }
1078
1079 int pcep_pcc_get_free_pcc_idx(struct pcc_state **pcc)
1080 {
1081 assert(pcc != NULL);
1082
1083 for (int idx = 0; idx < MAX_PCC; idx++) {
1084 if (pcc[idx] == NULL) {
1085 zlog_debug("new pcc_idx (%d)", idx);
1086 return idx;
1087 }
1088 }
1089
1090 return -1;
1091 }
1092
1093 int pcep_pcc_get_pcc_id(struct pcc_state *pcc)
1094 {
1095 return ((pcc == NULL) ? 0 : pcc->id);
1096 }
1097
1098 void pcep_pcc_copy_pcc_info(struct pcc_state **pcc,
1099 struct pcep_pcc_info *pcc_info)
1100 {
1101 struct pcc_state *pcc_state =
1102 pcep_pcc_get_pcc_by_name(pcc, pcc_info->pce_name);
1103 if (!pcc_state) {
1104 return;
1105 }
1106
1107 pcc_info->ctrl_state = NULL;
1108 if(pcc_state->pcc_opts){
1109 pcc_info->msd = pcc_state->pcc_opts->msd;
1110 pcc_info->pcc_port = pcc_state->pcc_opts->port;
1111 }
1112 pcc_info->next_plspid = pcc_state->next_plspid;
1113 pcc_info->next_reqid = pcc_state->next_reqid;
1114 pcc_info->status = pcc_state->status;
1115 pcc_info->pcc_id = pcc_state->id;
1116 pthread_mutex_lock(&g_pcc_info_mtx);
1117 pcc_info->is_best_multi_pce = pcc_state->is_best;
1118 pcc_info->previous_best = pcc_state->previous_best;
1119 pthread_mutex_unlock(&g_pcc_info_mtx);
1120 pcc_info->precedence =
1121 pcc_state->pce_opts ? pcc_state->pce_opts->precedence : 0;
1122 if(pcc_state->pcc_addr_tr.ipa_type != IPADDR_NONE){
1123 memcpy(&pcc_info->pcc_addr, &pcc_state->pcc_addr_tr,
1124 sizeof(struct ipaddr));
1125 }
1126 }
1127
1128
1129 /*------------------ PCEP Message handlers --------------------- */
1130
1131 void handle_pcep_open(struct ctrl_state *ctrl_state,
1132 struct pcc_state *pcc_state, struct pcep_message *msg)
1133 {
1134 assert(msg->msg_header->type == PCEP_TYPE_OPEN);
1135 pcep_lib_parse_capabilities(msg, &pcc_state->caps);
1136 PCEP_DEBUG("PCE capabilities: %s, %s%s",
1137 pcc_state->caps.is_stateful ? "stateful" : "stateless",
1138 pcc_state->caps.supported_ofs_are_known
1139 ? (pcc_state->caps.supported_ofs == 0
1140 ? "no objective functions supported"
1141 : "supported objective functions are ")
1142 : "supported objective functions are unknown",
1143 format_objfun_set(pcc_state->caps.supported_ofs));
1144 }
1145
1146 void handle_pcep_message(struct ctrl_state *ctrl_state,
1147 struct pcc_state *pcc_state, struct pcep_message *msg)
1148 {
1149 if (pcc_state->status != PCEP_PCC_OPERATING)
1150 return;
1151
1152 switch (msg->msg_header->type) {
1153 case PCEP_TYPE_INITIATE:
1154 handle_pcep_lsp_initiate(ctrl_state, pcc_state, msg);
1155 break;
1156 case PCEP_TYPE_UPDATE:
1157 handle_pcep_lsp_update(ctrl_state, pcc_state, msg);
1158 break;
1159 case PCEP_TYPE_PCREP:
1160 handle_pcep_comp_reply(ctrl_state, pcc_state, msg);
1161 break;
1162 default:
1163 flog_warn(EC_PATH_PCEP_UNEXPECTED_PCEP_MESSAGE,
1164 "Unexpected pcep message from pceplib: %s",
1165 format_pcep_message(msg));
1166 break;
1167 }
1168 }
1169
1170 void handle_pcep_lsp_update(struct ctrl_state *ctrl_state,
1171 struct pcc_state *pcc_state,
1172 struct pcep_message *msg)
1173 {
1174 struct path *path;
1175 path = pcep_lib_parse_path(msg);
1176 lookup_nbkey(pcc_state, path);
1177 pcep_thread_refine_path(ctrl_state, pcc_state->id,
1178 &continue_pcep_lsp_update, path, NULL);
1179 }
1180
1181 void continue_pcep_lsp_update(struct ctrl_state *ctrl_state,
1182 struct pcc_state *pcc_state, struct path *path,
1183 void *payload)
1184 {
1185 char err[MAX_ERROR_MSG_SIZE] = {0};
1186
1187 specialize_incoming_path(pcc_state, path);
1188 PCEP_DEBUG("%s Received LSP update", pcc_state->tag);
1189 PCEP_DEBUG_PATH("%s", format_path(path));
1190
1191 if (validate_incoming_path(pcc_state, path, err, sizeof(err)))
1192 pcep_thread_update_path(ctrl_state, pcc_state->id, path);
1193 else {
1194 /* FIXME: Monitor the amount of errors from the PCE and
1195 * possibly disconnect and blacklist */
1196 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1197 "Unsupported PCEP protocol feature: %s", err);
1198 pcep_free_path(path);
1199 }
1200 }
1201
1202 void handle_pcep_lsp_initiate(struct ctrl_state *ctrl_state,
1203 struct pcc_state *pcc_state,
1204 struct pcep_message *msg)
1205 {
1206 PCEP_DEBUG("%s Received LSP initiate, not supported yet",
1207 pcc_state->tag);
1208
1209 /* TODO when we support both PCC and PCE initiated sessions,
1210 * we should first check the session type before
1211 * rejecting this message. */
1212 send_pcep_error(pcc_state, PCEP_ERRT_INVALID_OPERATION,
1213 PCEP_ERRV_LSP_NOT_PCE_INITIATED);
1214 }
1215
1216 void handle_pcep_comp_reply(struct ctrl_state *ctrl_state,
1217 struct pcc_state *pcc_state,
1218 struct pcep_message *msg)
1219 {
1220 char err[MAX_ERROR_MSG_SIZE] = "";
1221 struct req_entry *req;
1222 struct path *path;
1223
1224 path = pcep_lib_parse_path(msg);
1225 req = pop_req(pcc_state, path->req_id);
1226 if (req == NULL) {
1227 /* TODO: check the rate of bad computation reply and close
1228 * the connection if more that a given rate.
1229 */
1230 PCEP_DEBUG(
1231 "%s Received computation reply for unknown request %d",
1232 pcc_state->tag, path->req_id);
1233 PCEP_DEBUG_PATH("%s", format_path(path));
1234 send_pcep_error(pcc_state, PCEP_ERRT_UNKNOWN_REQ_REF,
1235 PCEP_ERRV_UNASSIGNED);
1236 return;
1237 }
1238
1239 /* Cancel the computation request timeout */
1240 pcep_thread_cancel_timer(&req->t_retry);
1241
1242 /* Transfer relevent metadata from the request to the response */
1243 path->nbkey = req->path->nbkey;
1244 path->plsp_id = req->path->plsp_id;
1245 path->type = req->path->type;
1246 path->name = XSTRDUP(MTYPE_PCEP, req->path->name);
1247 specialize_incoming_path(pcc_state, path);
1248
1249 PCEP_DEBUG("%s Received computation reply %d (no-path: %s)",
1250 pcc_state->tag, path->req_id,
1251 path->no_path ? "true" : "false");
1252 PCEP_DEBUG_PATH("%s", format_path(path));
1253
1254 if (path->no_path) {
1255 PCEP_DEBUG("%s Computation for path %s did not find any result",
1256 pcc_state->tag, path->name);
1257 } else if (validate_incoming_path(pcc_state, path, err, sizeof(err))) {
1258 /* Updating a dynamic path will automatically delegate it */
1259 pcep_thread_update_path(ctrl_state, pcc_state->id, path);
1260 free_req_entry(req);
1261 return;
1262 } else {
1263 /* FIXME: Monitor the amount of errors from the PCE and
1264 * possibly disconnect and blacklist */
1265 flog_warn(EC_PATH_PCEP_UNSUPPORTED_PCEP_FEATURE,
1266 "Unsupported PCEP protocol feature: %s", err);
1267 }
1268
1269 pcep_free_path(path);
1270
1271 /* Delegate the path regardless of the outcome */
1272 /* TODO: For now we are using the path from the request, when
1273 * pathd API is thread safe, we could get a new path */
1274 if (pcc_state->caps.is_stateful) {
1275 PCEP_DEBUG("%s Delegating undefined dynamic path %s to PCE %s",
1276 pcc_state->tag, req->path->name,
1277 pcc_state->originator);
1278 path = pcep_copy_path(req->path);
1279 path->is_delegated = true;
1280 send_report(pcc_state, path);
1281 pcep_free_path(path);
1282 }
1283
1284 free_req_entry(req);
1285 }
1286
1287
1288 /* ------------ Internal Functions ------------ */
1289
1290 const char *ipaddr_type_name(struct ipaddr *addr)
1291 {
1292 if (IS_IPADDR_V4(addr))
1293 return "IPv4";
1294 if (IS_IPADDR_V6(addr))
1295 return "IPv6";
1296 return "undefined";
1297 }
1298
1299 bool filter_path(struct pcc_state *pcc_state, struct path *path)
1300 {
1301 return (IS_IPADDR_V4(&path->nbkey.endpoint)
1302 && CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4))
1303 || (IS_IPADDR_V6(&path->nbkey.endpoint)
1304 && CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6));
1305 }
1306
1307 void select_pcc_addresses(struct pcc_state *pcc_state)
1308 {
1309 /* If no IPv4 address was specified, try to get one from zebra */
1310 if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)) {
1311 if (get_ipv4_router_id(&pcc_state->pcc_addr_v4)) {
1312 SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4);
1313 }
1314 }
1315
1316 /* If no IPv6 address was specified, try to get one from zebra */
1317 if (!CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)) {
1318 if (get_ipv6_router_id(&pcc_state->pcc_addr_v6)) {
1319 SET_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6);
1320 }
1321 }
1322 }
1323
1324 void select_transport_address(struct pcc_state *pcc_state)
1325 {
1326 struct ipaddr *taddr = &pcc_state->pcc_addr_tr;
1327
1328 select_pcc_addresses(pcc_state);
1329
1330 taddr->ipa_type = IPADDR_NONE;
1331
1332 /* Select a transport source address in function of the configured PCE
1333 * address */
1334 if (IS_IPADDR_V4(&pcc_state->pce_opts->addr)) {
1335 if (CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4)) {
1336 taddr->ipaddr_v4 = pcc_state->pcc_addr_v4;
1337 taddr->ipa_type = IPADDR_V4;
1338 }
1339 } else {
1340 if (CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6)) {
1341 taddr->ipaddr_v6 = pcc_state->pcc_addr_v6;
1342 taddr->ipa_type = IPADDR_V6;
1343 }
1344 }
1345 }
1346
1347 void update_tag(struct pcc_state *pcc_state)
1348 {
1349 if (pcc_state->pce_opts != NULL) {
1350 assert(!IS_IPADDR_NONE(&pcc_state->pce_opts->addr));
1351 if (IS_IPADDR_V6(&pcc_state->pce_opts->addr)) {
1352 snprintfrr(pcc_state->tag, sizeof(pcc_state->tag),
1353 "%pI6:%i (%u)",
1354 &pcc_state->pce_opts->addr.ipaddr_v6,
1355 pcc_state->pce_opts->port, pcc_state->id);
1356 } else {
1357 snprintfrr(pcc_state->tag, sizeof(pcc_state->tag),
1358 "%pI4:%i (%u)",
1359 &pcc_state->pce_opts->addr.ipaddr_v4,
1360 pcc_state->pce_opts->port, pcc_state->id);
1361 }
1362 } else {
1363 snprintfrr(pcc_state->tag, sizeof(pcc_state->tag), "(%u)",
1364 pcc_state->id);
1365 }
1366 }
1367
1368 void update_originator(struct pcc_state *pcc_state)
1369 {
1370 char *originator;
1371 if (pcc_state->originator != NULL) {
1372 XFREE(MTYPE_PCEP, pcc_state->originator);
1373 pcc_state->originator = NULL;
1374 }
1375 if (pcc_state->pce_opts == NULL)
1376 return;
1377 originator = XCALLOC(MTYPE_PCEP, 52);
1378 assert(!IS_IPADDR_NONE(&pcc_state->pce_opts->addr));
1379 if (IS_IPADDR_V6(&pcc_state->pce_opts->addr)) {
1380 snprintfrr(originator, 52, "%pI6:%i",
1381 &pcc_state->pce_opts->addr.ipaddr_v6,
1382 pcc_state->pce_opts->port);
1383 } else {
1384 snprintfrr(originator, 52, "%pI4:%i",
1385 &pcc_state->pce_opts->addr.ipaddr_v4,
1386 pcc_state->pce_opts->port);
1387 }
1388 pcc_state->originator = originator;
1389 }
1390
1391 void schedule_reconnect(struct ctrl_state *ctrl_state,
1392 struct pcc_state *pcc_state)
1393 {
1394 pcc_state->retry_count++;
1395 pcep_thread_schedule_reconnect(ctrl_state, pcc_state->id,
1396 pcc_state->retry_count,
1397 &pcc_state->t_reconnect);
1398 if (pcc_state->retry_count == 1) {
1399 pcep_thread_schedule_sync_best_pce(
1400 ctrl_state, pcc_state->id,
1401 pcc_state->pce_opts->config_opts
1402 .delegation_timeout_seconds,
1403 &pcc_state->t_update_best);
1404 }
1405 }
1406
1407 void schedule_session_timeout(struct ctrl_state *ctrl_state,
1408 struct pcc_state *pcc_state)
1409 {
1410 /* No need to schedule timeout if multiple PCEs are connected */
1411 if (get_pce_count_connected(ctrl_state->pcc)) {
1412 PCEP_DEBUG_PCEP(
1413 "schedule_session_timeout not setting timer for multi-pce mode");
1414
1415 return;
1416 }
1417
1418 pcep_thread_schedule_session_timeout(
1419 ctrl_state, pcep_pcc_get_pcc_id(pcc_state),
1420 pcc_state->pce_opts->config_opts
1421 .session_timeout_inteval_seconds,
1422 &pcc_state->t_session_timeout);
1423 }
1424
1425 void cancel_session_timeout(struct ctrl_state *ctrl_state,
1426 struct pcc_state *pcc_state)
1427 {
1428 /* No need to schedule timeout if multiple PCEs are connected */
1429 if (pcc_state->t_session_timeout == NULL) {
1430 PCEP_DEBUG_PCEP("cancel_session_timeout timer thread NULL");
1431 return;
1432 }
1433
1434 PCEP_DEBUG_PCEP("Cancel session_timeout timer");
1435 pcep_thread_cancel_timer(&pcc_state->t_session_timeout);
1436 pcc_state->t_session_timeout = NULL;
1437 }
1438
1439 void send_pcep_message(struct pcc_state *pcc_state, struct pcep_message *msg)
1440 {
1441 if (pcc_state->sess != NULL) {
1442 PCEP_DEBUG_PCEP("%s Sending PCEP message: %s", pcc_state->tag,
1443 format_pcep_message(msg));
1444 send_message(pcc_state->sess, msg, true);
1445 }
1446 }
1447
1448 void send_pcep_error(struct pcc_state *pcc_state,
1449 enum pcep_error_type error_type,
1450 enum pcep_error_value error_value)
1451 {
1452 struct pcep_message *msg;
1453 PCEP_DEBUG("%s Sending PCEP error type %s (%d) value %s (%d)",
1454 pcc_state->tag, pcep_error_type_name(error_type), error_type,
1455 pcep_error_value_name(error_type, error_value), error_value);
1456 msg = pcep_lib_format_error(error_type, error_value);
1457 send_pcep_message(pcc_state, msg);
1458 }
1459
1460 void send_report(struct pcc_state *pcc_state, struct path *path)
1461 {
1462 struct pcep_message *report;
1463
1464 path->req_id = 0;
1465 specialize_outgoing_path(pcc_state, path);
1466 PCEP_DEBUG_PATH("%s Sending path %s: %s", pcc_state->tag, path->name,
1467 format_path(path));
1468 report = pcep_lib_format_report(&pcc_state->caps, path);
1469 send_pcep_message(pcc_state, report);
1470 }
1471
1472 /* Updates the path for the PCE, updating the delegation and creation flags */
1473 void specialize_outgoing_path(struct pcc_state *pcc_state, struct path *path)
1474 {
1475 bool is_delegated = false;
1476 bool was_created = false;
1477
1478 lookup_plspid(pcc_state, path);
1479
1480 set_pcc_address(pcc_state, &path->nbkey, &path->pcc_addr);
1481 path->sender = pcc_state->pcc_addr_tr;
1482
1483 /* TODO: When the pathd API have a way to mark a path as
1484 * delegated, use it instead of considering all dynamic path
1485 * delegated. We need to disable the originator check for now,
1486 * because path could be delegated without having any originator yet */
1487 // if ((path->originator == NULL)
1488 // || (strcmp(path->originator, pcc_state->originator) == 0)) {
1489 // is_delegated = (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC)
1490 // && (path->first_hop != NULL);
1491 // /* it seems the PCE consider updating an LSP a creation ?!?
1492 // at least Cisco does... */
1493 // was_created = path->update_origin == SRTE_ORIGIN_PCEP;
1494 // }
1495 is_delegated = (path->type == SRTE_CANDIDATE_TYPE_DYNAMIC);
1496 was_created = path->update_origin == SRTE_ORIGIN_PCEP;
1497
1498 path->pcc_id = pcc_state->id;
1499 path->go_active = is_delegated && pcc_state->is_best;
1500 path->is_delegated = is_delegated && pcc_state->is_best;
1501 path->was_created = was_created;
1502 }
1503
1504 /* Updates the path for the PCC */
1505 void specialize_incoming_path(struct pcc_state *pcc_state, struct path *path)
1506 {
1507 set_pcc_address(pcc_state, &path->nbkey, &path->pcc_addr);
1508 path->sender = pcc_state->pce_opts->addr;
1509 path->pcc_id = pcc_state->id;
1510 path->update_origin = SRTE_ORIGIN_PCEP;
1511 path->originator = XSTRDUP(MTYPE_PCEP, pcc_state->originator);
1512 }
1513
1514 /* Ensure the path can be handled by the PCC and if not, sends an error */
1515 bool validate_incoming_path(struct pcc_state *pcc_state, struct path *path,
1516 char *errbuff, size_t buffsize)
1517 {
1518 struct path_hop *hop;
1519 enum pcep_error_type err_type = 0;
1520 enum pcep_error_value err_value = PCEP_ERRV_UNASSIGNED;
1521
1522 for (hop = path->first_hop; hop != NULL; hop = hop->next) {
1523 /* Hops without SID are not supported */
1524 if (!hop->has_sid) {
1525 snprintfrr(errbuff, buffsize, "SR segment without SID");
1526 err_type = PCEP_ERRT_RECEPTION_OF_INV_OBJECT;
1527 err_value = PCEP_ERRV_DISJOINTED_CONF_TLV_MISSING;
1528 break;
1529 }
1530 /* Hops with non-MPLS SID are not supported */
1531 if (!hop->is_mpls) {
1532 snprintfrr(errbuff, buffsize,
1533 "SR segment with non-MPLS SID");
1534 err_type = PCEP_ERRT_RECEPTION_OF_INV_OBJECT;
1535 err_value = PCEP_ERRV_UNSUPPORTED_NAI;
1536 break;
1537 }
1538 }
1539
1540 if (err_type != 0) {
1541 send_pcep_error(pcc_state, err_type, err_value);
1542 return false;
1543 }
1544
1545 return true;
1546 }
1547
1548 void send_comp_request(struct ctrl_state *ctrl_state,
1549 struct pcc_state *pcc_state, struct req_entry *req)
1550 {
1551 assert(req != NULL);
1552
1553 if (req->t_retry)
1554 return;
1555
1556 assert(req->path != NULL);
1557 assert(req->path->req_id > 0);
1558 assert(RB_FIND(req_entry_head, &pcc_state->requests, req) == req);
1559 assert(lookup_reqid(pcc_state, req->path) == req->path->req_id);
1560
1561 int timeout;
1562 struct pcep_message *msg;
1563
1564 if (!pcc_state->is_best) {
1565 return;
1566 }
1567 /* TODO: Add a timer to retry the computation request ? */
1568
1569 specialize_outgoing_path(pcc_state, req->path);
1570
1571 PCEP_DEBUG(
1572 "%s Sending computation request %d for path %s to %pIA (retry %d)",
1573 pcc_state->tag, req->path->req_id, req->path->name,
1574 &req->path->nbkey.endpoint, req->retry_count);
1575 PCEP_DEBUG_PATH("%s Computation request path %s: %s", pcc_state->tag,
1576 req->path->name, format_path(req->path));
1577
1578 msg = pcep_lib_format_request(&pcc_state->caps, req->path);
1579 send_pcep_message(pcc_state, msg);
1580 req->was_sent = true;
1581
1582 /* TODO: Enable this back when the pcep config changes are merged back
1583 */
1584 // timeout = pcc_state->pce_opts->config_opts.pcep_request_time_seconds;
1585 timeout = 30;
1586 pcep_thread_schedule_timeout(ctrl_state, pcc_state->id,
1587 TO_COMPUTATION_REQUEST, timeout,
1588 (void *)req, &req->t_retry);
1589 }
1590
1591 void cancel_comp_requests(struct ctrl_state *ctrl_state,
1592 struct pcc_state *pcc_state)
1593 {
1594 struct req_entry *req, *safe_req;
1595
1596 RB_FOREACH_SAFE (req, req_entry_head, &pcc_state->requests, safe_req) {
1597 cancel_comp_request(ctrl_state, pcc_state, req);
1598 RB_REMOVE(req_entry_head, &pcc_state->requests, req);
1599 remove_reqid_mapping(pcc_state, req->path);
1600 free_req_entry(req);
1601 }
1602 }
1603
1604 void cancel_comp_request(struct ctrl_state *ctrl_state,
1605 struct pcc_state *pcc_state, struct req_entry *req)
1606 {
1607 struct pcep_message *msg;
1608
1609 if (req->was_sent) {
1610 /* TODO: Send a computation request cancelation
1611 * notification to the PCE */
1612 pcep_thread_cancel_timer(&req->t_retry);
1613 }
1614
1615 PCEP_DEBUG(
1616 "%s Canceling computation request %d for path %s to %pIA (retry %d)",
1617 pcc_state->tag, req->path->req_id, req->path->name,
1618 &req->path->nbkey.endpoint, req->retry_count);
1619 PCEP_DEBUG_PATH("%s Canceled computation request path %s: %s",
1620 pcc_state->tag, req->path->name,
1621 format_path(req->path));
1622
1623 msg = pcep_lib_format_request_cancelled(req->path->req_id);
1624 send_pcep_message(pcc_state, msg);
1625 }
1626
1627 void set_pcc_address(struct pcc_state *pcc_state, struct lsp_nb_key *nbkey,
1628 struct ipaddr *addr)
1629 {
1630 select_pcc_addresses(pcc_state);
1631 if (IS_IPADDR_V6(&nbkey->endpoint)) {
1632 assert(CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV6));
1633 addr->ipa_type = IPADDR_V6;
1634 addr->ipaddr_v6 = pcc_state->pcc_addr_v6;
1635 } else if (IS_IPADDR_V4(&nbkey->endpoint)) {
1636 assert(CHECK_FLAG(pcc_state->flags, F_PCC_STATE_HAS_IPV4));
1637 addr->ipa_type = IPADDR_V4;
1638 addr->ipaddr_v4 = pcc_state->pcc_addr_v4;
1639 } else {
1640 addr->ipa_type = IPADDR_NONE;
1641 }
1642 }
1643
1644
1645 /* ------------ Data Structure Helper Functions ------------ */
1646
1647 void lookup_plspid(struct pcc_state *pcc_state, struct path *path)
1648 {
1649 struct plspid_map_data key, *plspid_mapping;
1650 struct nbkey_map_data *nbkey_mapping;
1651
1652 if (path->nbkey.color != 0) {
1653 key.nbkey = path->nbkey;
1654 plspid_mapping = plspid_map_find(&pcc_state->plspid_map, &key);
1655 if (plspid_mapping == NULL) {
1656 plspid_mapping =
1657 XCALLOC(MTYPE_PCEP, sizeof(*plspid_mapping));
1658 plspid_mapping->nbkey = key.nbkey;
1659 plspid_mapping->plspid = pcc_state->next_plspid;
1660 plspid_map_add(&pcc_state->plspid_map, plspid_mapping);
1661 nbkey_mapping =
1662 XCALLOC(MTYPE_PCEP, sizeof(*nbkey_mapping));
1663 nbkey_mapping->nbkey = key.nbkey;
1664 nbkey_mapping->plspid = pcc_state->next_plspid;
1665 nbkey_map_add(&pcc_state->nbkey_map, nbkey_mapping);
1666 pcc_state->next_plspid++;
1667 // FIXME: Send some error to the PCE isntead of crashing
1668 assert(pcc_state->next_plspid <= 1048576);
1669 }
1670 path->plsp_id = plspid_mapping->plspid;
1671 }
1672 }
1673
1674 void lookup_nbkey(struct pcc_state *pcc_state, struct path *path)
1675 {
1676 struct nbkey_map_data key, *mapping;
1677 // TODO: Should give an error to the PCE instead of crashing
1678 assert(path->plsp_id != 0);
1679 key.plspid = path->plsp_id;
1680 mapping = nbkey_map_find(&pcc_state->nbkey_map, &key);
1681 assert(mapping != NULL);
1682 path->nbkey = mapping->nbkey;
1683 }
1684
1685 void free_req_entry(struct req_entry *req)
1686 {
1687 pcep_free_path(req->path);
1688 XFREE(MTYPE_PCEP, req);
1689 }
1690
1691 struct req_entry *push_new_req(struct pcc_state *pcc_state, struct path *path)
1692 {
1693 struct req_entry *req;
1694
1695 req = XCALLOC(MTYPE_PCEP, sizeof(*req));
1696 req->retry_count = 0;
1697 req->path = pcep_copy_path(path);
1698 repush_req(pcc_state, req);
1699
1700 return req;
1701 }
1702
1703 void repush_req(struct pcc_state *pcc_state, struct req_entry *req)
1704 {
1705 uint32_t reqid = pcc_state->next_reqid;
1706 void *res;
1707
1708 req->was_sent = false;
1709 req->path->req_id = reqid;
1710 res = RB_INSERT(req_entry_head, &pcc_state->requests, req);
1711 assert(res == NULL);
1712 assert(add_reqid_mapping(pcc_state, req->path) == true);
1713
1714 pcc_state->next_reqid += 1;
1715 /* Wrapping is allowed, but 0 is not a valid id */
1716 if (pcc_state->next_reqid == 0)
1717 pcc_state->next_reqid = 1;
1718 }
1719
1720 struct req_entry *pop_req(struct pcc_state *pcc_state, uint32_t reqid)
1721 {
1722 struct path path = {.req_id = reqid};
1723 struct req_entry key = {.path = &path};
1724 struct req_entry *req;
1725
1726 req = RB_FIND(req_entry_head, &pcc_state->requests, &key);
1727 if (req == NULL)
1728 return NULL;
1729 RB_REMOVE(req_entry_head, &pcc_state->requests, req);
1730 remove_reqid_mapping(pcc_state, req->path);
1731
1732 return req;
1733 }
1734
1735 bool add_reqid_mapping(struct pcc_state *pcc_state, struct path *path)
1736 {
1737 struct req_map_data *mapping;
1738 mapping = XCALLOC(MTYPE_PCEP, sizeof(*mapping));
1739 mapping->nbkey = path->nbkey;
1740 mapping->reqid = path->req_id;
1741 if (req_map_add(&pcc_state->req_map, mapping) != NULL) {
1742 XFREE(MTYPE_PCEP, mapping);
1743 return false;
1744 }
1745 return true;
1746 }
1747
1748 void remove_reqid_mapping(struct pcc_state *pcc_state, struct path *path)
1749 {
1750 struct req_map_data key, *mapping;
1751 key.nbkey = path->nbkey;
1752 mapping = req_map_find(&pcc_state->req_map, &key);
1753 if (mapping != NULL) {
1754 req_map_del(&pcc_state->req_map, mapping);
1755 XFREE(MTYPE_PCEP, mapping);
1756 }
1757 }
1758
1759 uint32_t lookup_reqid(struct pcc_state *pcc_state, struct path *path)
1760 {
1761 struct req_map_data key, *mapping;
1762 key.nbkey = path->nbkey;
1763 mapping = req_map_find(&pcc_state->req_map, &key);
1764 if (mapping != NULL)
1765 return mapping->reqid;
1766 return 0;
1767 }
1768
1769 bool has_pending_req_for(struct pcc_state *pcc_state, struct path *path)
1770 {
1771 return lookup_reqid(pcc_state, path) != 0;
1772 }
1773
1774
1775 /* ------------ Data Structure Callbacks ------------ */
1776
1777 #define CMP_RETURN(A, B) \
1778 if (A != B) \
1779 return (A < B) ? -1 : 1
1780
1781 static uint32_t hash_nbkey(const struct lsp_nb_key *nbkey)
1782 {
1783 uint32_t hash;
1784 hash = jhash_2words(nbkey->color, nbkey->preference, 0x55aa5a5a);
1785 switch (nbkey->endpoint.ipa_type) {
1786 case IPADDR_V4:
1787 return jhash(&nbkey->endpoint.ipaddr_v4,
1788 sizeof(nbkey->endpoint.ipaddr_v4), hash);
1789 case IPADDR_V6:
1790 return jhash(&nbkey->endpoint.ipaddr_v6,
1791 sizeof(nbkey->endpoint.ipaddr_v6), hash);
1792 default:
1793 return hash;
1794 }
1795 }
1796
1797 static int cmp_nbkey(const struct lsp_nb_key *a, const struct lsp_nb_key *b)
1798 {
1799 CMP_RETURN(a->color, b->color);
1800 int cmp = ipaddr_cmp(&a->endpoint, &b->endpoint);
1801 if (cmp != 0)
1802 return cmp;
1803 CMP_RETURN(a->preference, b->preference);
1804 return 0;
1805 }
1806
1807 int plspid_map_cmp(const struct plspid_map_data *a,
1808 const struct plspid_map_data *b)
1809 {
1810 return cmp_nbkey(&a->nbkey, &b->nbkey);
1811 }
1812
1813 uint32_t plspid_map_hash(const struct plspid_map_data *e)
1814 {
1815 return hash_nbkey(&e->nbkey);
1816 }
1817
1818 int nbkey_map_cmp(const struct nbkey_map_data *a,
1819 const struct nbkey_map_data *b)
1820 {
1821 CMP_RETURN(a->plspid, b->plspid);
1822 return 0;
1823 }
1824
1825 uint32_t nbkey_map_hash(const struct nbkey_map_data *e)
1826 {
1827 return e->plspid;
1828 }
1829
1830 int req_map_cmp(const struct req_map_data *a, const struct req_map_data *b)
1831 {
1832 return cmp_nbkey(&a->nbkey, &b->nbkey);
1833 }
1834
1835 uint32_t req_map_hash(const struct req_map_data *e)
1836 {
1837 return hash_nbkey(&e->nbkey);
1838 }