]> git.proxmox.com Git - mirror_frr.git/blob - lib/northbound.c
*: require semicolon after DEFINE_MTYPE & co
[mirror_frr.git] / lib / northbound.c
1 /*
2 * Copyright (C) 2018 NetDEF, Inc.
3 * Renato Westphal
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #include <zebra.h>
21
22 #include "libfrr.h"
23 #include "log.h"
24 #include "lib_errors.h"
25 #include "hash.h"
26 #include "command.h"
27 #include "debug.h"
28 #include "db.h"
29 #include "frr_pthread.h"
30 #include "northbound.h"
31 #include "northbound_cli.h"
32 #include "northbound_db.h"
33 #include "frrstr.h"
34
35 DEFINE_MTYPE_STATIC(LIB, NB_NODE, "Northbound Node");
36 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG, "Northbound Configuration");
37 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG_ENTRY, "Northbound Configuration Entry");
38
39 /* Running configuration - shouldn't be modified directly. */
40 struct nb_config *running_config;
41
42 /* Hash table of user pointers associated with configuration entries. */
43 static struct hash *running_config_entries;
44
45 /* Management lock for the running configuration. */
46 static struct {
47 /* Mutex protecting this structure. */
48 pthread_mutex_t mtx;
49
50 /* Actual lock. */
51 bool locked;
52
53 /* Northbound client who owns this lock. */
54 enum nb_client owner_client;
55
56 /* Northbound user who owns this lock. */
57 const void *owner_user;
58 } running_config_mgmt_lock;
59
60 /* Knob to record config transaction */
61 static bool nb_db_enabled;
62 /*
63 * Global lock used to prevent multiple configuration transactions from
64 * happening concurrently.
65 */
66 static bool transaction_in_progress;
67
68 static int nb_callback_pre_validate(struct nb_context *context,
69 const struct nb_node *nb_node,
70 const struct lyd_node *dnode, char *errmsg,
71 size_t errmsg_len);
72 static int nb_callback_configuration(struct nb_context *context,
73 const enum nb_event event,
74 struct nb_config_change *change,
75 char *errmsg, size_t errmsg_len);
76 static struct nb_transaction *
77 nb_transaction_new(struct nb_context *context, struct nb_config *config,
78 struct nb_config_cbs *changes, const char *comment,
79 char *errmsg, size_t errmsg_len);
80 static void nb_transaction_free(struct nb_transaction *transaction);
81 static int nb_transaction_process(enum nb_event event,
82 struct nb_transaction *transaction,
83 char *errmsg, size_t errmsg_len);
84 static void nb_transaction_apply_finish(struct nb_transaction *transaction,
85 char *errmsg, size_t errmsg_len);
86 static int nb_oper_data_iter_node(const struct lys_node *snode,
87 const char *xpath, const void *list_entry,
88 const struct yang_list_keys *list_keys,
89 struct yang_translator *translator,
90 bool first, uint32_t flags,
91 nb_oper_data_cb cb, void *arg);
92
93 static int nb_node_check_config_only(const struct lys_node *snode, void *arg)
94 {
95 bool *config_only = arg;
96
97 if (CHECK_FLAG(snode->flags, LYS_CONFIG_R)) {
98 *config_only = false;
99 return YANG_ITER_STOP;
100 }
101
102 return YANG_ITER_CONTINUE;
103 }
104
105 static int nb_node_new_cb(const struct lys_node *snode, void *arg)
106 {
107 struct nb_node *nb_node;
108 struct lys_node *sparent, *sparent_list;
109
110 nb_node = XCALLOC(MTYPE_NB_NODE, sizeof(*nb_node));
111 yang_snode_get_path(snode, YANG_PATH_DATA, nb_node->xpath,
112 sizeof(nb_node->xpath));
113 nb_node->priority = NB_DFLT_PRIORITY;
114 sparent = yang_snode_real_parent(snode);
115 if (sparent)
116 nb_node->parent = sparent->priv;
117 sparent_list = yang_snode_parent_list(snode);
118 if (sparent_list)
119 nb_node->parent_list = sparent_list->priv;
120
121 /* Set flags. */
122 if (CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
123 bool config_only = true;
124
125 (void)yang_snodes_iterate_subtree(snode, NULL,
126 nb_node_check_config_only, 0,
127 &config_only);
128 if (config_only)
129 SET_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY);
130 }
131 if (CHECK_FLAG(snode->nodetype, LYS_LIST)) {
132 struct lys_node_list *slist;
133
134 slist = (struct lys_node_list *)snode;
135 if (slist->keys_size == 0)
136 SET_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST);
137 }
138
139 /*
140 * Link the northbound node and the libyang schema node with one
141 * another.
142 */
143 nb_node->snode = snode;
144 assert(snode->priv == NULL);
145 lys_set_private(snode, nb_node);
146
147 return YANG_ITER_CONTINUE;
148 }
149
150 static int nb_node_del_cb(const struct lys_node *snode, void *arg)
151 {
152 struct nb_node *nb_node;
153
154 nb_node = snode->priv;
155 if (nb_node) {
156 lys_set_private(snode, NULL);
157 XFREE(MTYPE_NB_NODE, nb_node);
158 }
159
160 return YANG_ITER_CONTINUE;
161 }
162
163 void nb_nodes_create(void)
164 {
165 yang_snodes_iterate(NULL, nb_node_new_cb, 0, NULL);
166 }
167
168 void nb_nodes_delete(void)
169 {
170 yang_snodes_iterate(NULL, nb_node_del_cb, 0, NULL);
171 }
172
173 struct nb_node *nb_node_find(const char *xpath)
174 {
175 const struct lys_node *snode;
176
177 /*
178 * Use libyang to find the schema node associated to the xpath and get
179 * the northbound node from there (snode private pointer).
180 */
181 snode = ly_ctx_get_node(ly_native_ctx, NULL, xpath, 0);
182 if (!snode)
183 return NULL;
184
185 return snode->priv;
186 }
187
188 void nb_node_set_dependency_cbs(const char *dependency_xpath,
189 const char *dependant_xpath,
190 struct nb_dependency_callbacks *cbs)
191 {
192 struct nb_node *dependency = nb_node_find(dependency_xpath);
193 struct nb_node *dependant = nb_node_find(dependant_xpath);
194
195 if (!dependency || !dependant)
196 return;
197
198 dependency->dep_cbs.get_dependant_xpath = cbs->get_dependant_xpath;
199 dependant->dep_cbs.get_dependency_xpath = cbs->get_dependency_xpath;
200 }
201
202 bool nb_node_has_dependency(struct nb_node *node)
203 {
204 return node->dep_cbs.get_dependency_xpath != NULL;
205 }
206
207 static int nb_node_validate_cb(const struct nb_node *nb_node,
208 enum nb_operation operation,
209 int callback_implemented, bool optional)
210 {
211 bool valid;
212
213 valid = nb_operation_is_valid(operation, nb_node->snode);
214
215 /*
216 * Add an exception for operational data callbacks. A rw list usually
217 * doesn't need any associated operational data callbacks. But if this
218 * rw list is augmented by another module which adds state nodes under
219 * it, then this list will need to have the 'get_next()', 'get_keys()'
220 * and 'lookup_entry()' callbacks. As such, never log a warning when
221 * these callbacks are implemented when they are not needed, since this
222 * depends on context (e.g. some daemons might augment "frr-interface"
223 * while others don't).
224 */
225 if (!valid && callback_implemented && operation != NB_OP_GET_NEXT
226 && operation != NB_OP_GET_KEYS && operation != NB_OP_LOOKUP_ENTRY)
227 flog_warn(EC_LIB_NB_CB_UNNEEDED,
228 "unneeded '%s' callback for '%s'",
229 nb_operation_name(operation), nb_node->xpath);
230
231 if (!optional && valid && !callback_implemented) {
232 flog_err(EC_LIB_NB_CB_MISSING, "missing '%s' callback for '%s'",
233 nb_operation_name(operation), nb_node->xpath);
234 return 1;
235 }
236
237 return 0;
238 }
239
240 /*
241 * Check if the required callbacks were implemented for the given northbound
242 * node.
243 */
244 static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
245
246 {
247 unsigned int error = 0;
248
249 error += nb_node_validate_cb(nb_node, NB_OP_CREATE,
250 !!nb_node->cbs.create, false);
251 error += nb_node_validate_cb(nb_node, NB_OP_MODIFY,
252 !!nb_node->cbs.modify, false);
253 error += nb_node_validate_cb(nb_node, NB_OP_DESTROY,
254 !!nb_node->cbs.destroy, false);
255 error += nb_node_validate_cb(nb_node, NB_OP_MOVE, !!nb_node->cbs.move,
256 false);
257 error += nb_node_validate_cb(nb_node, NB_OP_PRE_VALIDATE,
258 !!nb_node->cbs.pre_validate, true);
259 error += nb_node_validate_cb(nb_node, NB_OP_APPLY_FINISH,
260 !!nb_node->cbs.apply_finish, true);
261 error += nb_node_validate_cb(nb_node, NB_OP_GET_ELEM,
262 !!nb_node->cbs.get_elem, false);
263 error += nb_node_validate_cb(nb_node, NB_OP_GET_NEXT,
264 !!nb_node->cbs.get_next, false);
265 error += nb_node_validate_cb(nb_node, NB_OP_GET_KEYS,
266 !!nb_node->cbs.get_keys, false);
267 error += nb_node_validate_cb(nb_node, NB_OP_LOOKUP_ENTRY,
268 !!nb_node->cbs.lookup_entry, false);
269 error += nb_node_validate_cb(nb_node, NB_OP_RPC, !!nb_node->cbs.rpc,
270 false);
271
272 return error;
273 }
274
275 static unsigned int nb_node_validate_priority(const struct nb_node *nb_node)
276 {
277 /* Top-level nodes can have any priority. */
278 if (!nb_node->parent)
279 return 0;
280
281 if (nb_node->priority < nb_node->parent->priority) {
282 flog_err(EC_LIB_NB_CB_INVALID_PRIO,
283 "node has higher priority than its parent [xpath %s]",
284 nb_node->xpath);
285 return 1;
286 }
287
288 return 0;
289 }
290
291 static int nb_node_validate(const struct lys_node *snode, void *arg)
292 {
293 struct nb_node *nb_node = snode->priv;
294 unsigned int *errors = arg;
295
296 /* Validate callbacks and priority. */
297 if (nb_node) {
298 *errors += nb_node_validate_cbs(nb_node);
299 *errors += nb_node_validate_priority(nb_node);
300 }
301
302 return YANG_ITER_CONTINUE;
303 }
304
305 struct nb_config *nb_config_new(struct lyd_node *dnode)
306 {
307 struct nb_config *config;
308
309 config = XCALLOC(MTYPE_NB_CONFIG, sizeof(*config));
310 if (dnode)
311 config->dnode = dnode;
312 else
313 config->dnode = yang_dnode_new(ly_native_ctx, true);
314 config->version = 0;
315
316 return config;
317 }
318
319 void nb_config_free(struct nb_config *config)
320 {
321 if (config->dnode)
322 yang_dnode_free(config->dnode);
323 XFREE(MTYPE_NB_CONFIG, config);
324 }
325
326 struct nb_config *nb_config_dup(const struct nb_config *config)
327 {
328 struct nb_config *dup;
329
330 dup = XCALLOC(MTYPE_NB_CONFIG, sizeof(*dup));
331 dup->dnode = yang_dnode_dup(config->dnode);
332 dup->version = config->version;
333
334 return dup;
335 }
336
337 int nb_config_merge(struct nb_config *config_dst, struct nb_config *config_src,
338 bool preserve_source)
339 {
340 int ret;
341
342 ret = lyd_merge(config_dst->dnode, config_src->dnode, LYD_OPT_EXPLICIT);
343 if (ret != 0)
344 flog_warn(EC_LIB_LIBYANG, "%s: lyd_merge() failed", __func__);
345
346 if (!preserve_source)
347 nb_config_free(config_src);
348
349 return (ret == 0) ? NB_OK : NB_ERR;
350 }
351
352 void nb_config_replace(struct nb_config *config_dst,
353 struct nb_config *config_src, bool preserve_source)
354 {
355 /* Update version. */
356 if (config_src->version != 0)
357 config_dst->version = config_src->version;
358
359 /* Update dnode. */
360 if (config_dst->dnode)
361 yang_dnode_free(config_dst->dnode);
362 if (preserve_source) {
363 config_dst->dnode = yang_dnode_dup(config_src->dnode);
364 } else {
365 config_dst->dnode = config_src->dnode;
366 config_src->dnode = NULL;
367 nb_config_free(config_src);
368 }
369 }
370
371 /* Generate the nb_config_cbs tree. */
372 static inline int nb_config_cb_compare(const struct nb_config_cb *a,
373 const struct nb_config_cb *b)
374 {
375 /* Sort by priority first. */
376 if (a->nb_node->priority < b->nb_node->priority)
377 return -1;
378 if (a->nb_node->priority > b->nb_node->priority)
379 return 1;
380
381 /*
382 * Preserve the order of the configuration changes as told by libyang.
383 */
384 if (a->seq < b->seq)
385 return -1;
386 if (a->seq > b->seq)
387 return 1;
388
389 /*
390 * All 'apply_finish' callbacks have their sequence number set to zero.
391 * In this case, compare them using their dnode pointers (the order
392 * doesn't matter for callbacks that have the same priority).
393 */
394 if (a->dnode < b->dnode)
395 return -1;
396 if (a->dnode > b->dnode)
397 return 1;
398
399 return 0;
400 }
401 RB_GENERATE(nb_config_cbs, nb_config_cb, entry, nb_config_cb_compare);
402
403 static void nb_config_diff_add_change(struct nb_config_cbs *changes,
404 enum nb_operation operation,
405 uint32_t *seq,
406 const struct lyd_node *dnode)
407 {
408 struct nb_config_change *change;
409
410 /* Ignore unimplemented nodes. */
411 if (!dnode->schema->priv)
412 return;
413
414 change = XCALLOC(MTYPE_TMP, sizeof(*change));
415 change->cb.operation = operation;
416 change->cb.seq = *seq;
417 *seq = *seq + 1;
418 change->cb.nb_node = dnode->schema->priv;
419 change->cb.dnode = dnode;
420
421 RB_INSERT(nb_config_cbs, changes, &change->cb);
422 }
423
424 static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
425 {
426 while (!RB_EMPTY(nb_config_cbs, changes)) {
427 struct nb_config_change *change;
428
429 change = (struct nb_config_change *)RB_ROOT(nb_config_cbs,
430 changes);
431 RB_REMOVE(nb_config_cbs, changes, &change->cb);
432 XFREE(MTYPE_TMP, change);
433 }
434 }
435
436 /*
437 * Helper function used when calculating the delta between two different
438 * configurations. Given a new subtree, calculate all new YANG data nodes,
439 * excluding default leafs and leaf-lists. This is a recursive function.
440 */
441 static void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
442 struct nb_config_cbs *changes)
443 {
444 enum nb_operation operation;
445 struct lyd_node *child;
446
447 /* Ignore unimplemented nodes. */
448 if (!dnode->schema->priv)
449 return;
450
451 switch (dnode->schema->nodetype) {
452 case LYS_LEAF:
453 case LYS_LEAFLIST:
454 if (lyd_wd_default((struct lyd_node_leaf_list *)dnode))
455 break;
456
457 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
458 operation = NB_OP_CREATE;
459 else if (nb_operation_is_valid(NB_OP_MODIFY, dnode->schema))
460 operation = NB_OP_MODIFY;
461 else
462 return;
463
464 nb_config_diff_add_change(changes, operation, seq, dnode);
465 break;
466 case LYS_CONTAINER:
467 case LYS_LIST:
468 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
469 nb_config_diff_add_change(changes, NB_OP_CREATE, seq,
470 dnode);
471
472 /* Process child nodes recursively. */
473 LY_TREE_FOR (dnode->child, child) {
474 nb_config_diff_created(child, seq, changes);
475 }
476 break;
477 default:
478 break;
479 }
480 }
481
482 static void nb_config_diff_deleted(const struct lyd_node *dnode, uint32_t *seq,
483 struct nb_config_cbs *changes)
484 {
485 /* Ignore unimplemented nodes. */
486 if (!dnode->schema->priv)
487 return;
488
489 if (nb_operation_is_valid(NB_OP_DESTROY, dnode->schema))
490 nb_config_diff_add_change(changes, NB_OP_DESTROY, seq, dnode);
491 else if (CHECK_FLAG(dnode->schema->nodetype, LYS_CONTAINER)) {
492 struct lyd_node *child;
493
494 /*
495 * Non-presence containers need special handling since they
496 * don't have "destroy" callbacks. In this case, what we need to
497 * do is to call the "destroy" callbacks of their child nodes
498 * when applicable (i.e. optional nodes).
499 */
500 LY_TREE_FOR (dnode->child, child) {
501 nb_config_diff_deleted(child, seq, changes);
502 }
503 }
504 }
505
506 /* Calculate the delta between two different configurations. */
507 static void nb_config_diff(const struct nb_config *config1,
508 const struct nb_config *config2,
509 struct nb_config_cbs *changes)
510 {
511 struct lyd_difflist *diff;
512 uint32_t seq = 0;
513
514 diff = lyd_diff(config1->dnode, config2->dnode,
515 LYD_DIFFOPT_WITHDEFAULTS);
516 assert(diff);
517
518 for (int i = 0; diff->type[i] != LYD_DIFF_END; i++) {
519 LYD_DIFFTYPE type;
520 struct lyd_node *dnode;
521
522 type = diff->type[i];
523
524 switch (type) {
525 case LYD_DIFF_CREATED:
526 dnode = diff->second[i];
527 nb_config_diff_created(dnode, &seq, changes);
528 break;
529 case LYD_DIFF_DELETED:
530 dnode = diff->first[i];
531 nb_config_diff_deleted(dnode, &seq, changes);
532 break;
533 case LYD_DIFF_CHANGED:
534 dnode = diff->second[i];
535 nb_config_diff_add_change(changes, NB_OP_MODIFY, &seq,
536 dnode);
537 break;
538 case LYD_DIFF_MOVEDAFTER1:
539 case LYD_DIFF_MOVEDAFTER2:
540 default:
541 continue;
542 }
543 }
544
545 lyd_free_diff(diff);
546 }
547
548 int nb_candidate_edit(struct nb_config *candidate,
549 const struct nb_node *nb_node,
550 enum nb_operation operation, const char *xpath,
551 const struct yang_data *previous,
552 const struct yang_data *data)
553 {
554 struct lyd_node *dnode, *dep_dnode;
555 char xpath_edit[XPATH_MAXLEN];
556 char dep_xpath[XPATH_MAXLEN];
557
558 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
559 if (nb_node->snode->nodetype == LYS_LEAFLIST)
560 snprintf(xpath_edit, sizeof(xpath_edit), "%s[.='%s']", xpath,
561 data->value);
562 else
563 strlcpy(xpath_edit, xpath, sizeof(xpath_edit));
564
565 switch (operation) {
566 case NB_OP_CREATE:
567 case NB_OP_MODIFY:
568 ly_errno = 0;
569 dnode = lyd_new_path(candidate->dnode, ly_native_ctx,
570 xpath_edit, (void *)data->value, 0,
571 LYD_PATH_OPT_UPDATE);
572 if (dnode) {
573 /*
574 * create dependency
575 *
576 * dnode returned by the lyd_new_path may be from a
577 * different schema, so we need to update the nb_node
578 */
579 nb_node = dnode->schema->priv;
580 if (nb_node->dep_cbs.get_dependency_xpath) {
581 nb_node->dep_cbs.get_dependency_xpath(
582 dnode, dep_xpath);
583
584 ly_errno = 0;
585 dep_dnode = lyd_new_path(candidate->dnode,
586 ly_native_ctx,
587 dep_xpath, NULL, 0,
588 LYD_PATH_OPT_UPDATE);
589 if (!dep_dnode && ly_errno) {
590 flog_warn(EC_LIB_LIBYANG,
591 "%s: lyd_new_path(%s) failed",
592 __func__, dep_xpath);
593 return NB_ERR;
594 }
595 }
596 } else if (ly_errno) {
597 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path(%s) failed",
598 __func__, xpath_edit);
599 return NB_ERR;
600 }
601 break;
602 case NB_OP_DESTROY:
603 dnode = yang_dnode_get(candidate->dnode, xpath_edit);
604 if (!dnode)
605 /*
606 * Return a special error code so the caller can choose
607 * whether to ignore it or not.
608 */
609 return NB_ERR_NOT_FOUND;
610 /* destroy dependant */
611 if (nb_node->dep_cbs.get_dependant_xpath) {
612 nb_node->dep_cbs.get_dependant_xpath(dnode, dep_xpath);
613
614 dep_dnode = yang_dnode_get(candidate->dnode, dep_xpath);
615 if (dep_dnode)
616 lyd_free(dep_dnode);
617 }
618 lyd_free(dnode);
619 break;
620 case NB_OP_MOVE:
621 /* TODO: update configuration. */
622 break;
623 default:
624 flog_warn(EC_LIB_DEVELOPMENT,
625 "%s: unknown operation (%u) [xpath %s]", __func__,
626 operation, xpath_edit);
627 return NB_ERR;
628 }
629
630 return NB_OK;
631 }
632
633 bool nb_candidate_needs_update(const struct nb_config *candidate)
634 {
635 if (candidate->version < running_config->version)
636 return true;
637
638 return false;
639 }
640
641 int nb_candidate_update(struct nb_config *candidate)
642 {
643 struct nb_config *updated_config;
644
645 updated_config = nb_config_dup(running_config);
646 if (nb_config_merge(updated_config, candidate, true) != NB_OK)
647 return NB_ERR;
648
649 nb_config_replace(candidate, updated_config, false);
650
651 return NB_OK;
652 }
653
654 /*
655 * Perform YANG syntactic and semantic validation.
656 *
657 * WARNING: lyd_validate() can change the configuration as part of the
658 * validation process.
659 */
660 static int nb_candidate_validate_yang(struct nb_config *candidate, char *errmsg,
661 size_t errmsg_len)
662 {
663 if (lyd_validate(&candidate->dnode,
664 LYD_OPT_STRICT | LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
665 ly_native_ctx)
666 != 0) {
667 yang_print_errors(ly_native_ctx, errmsg, errmsg_len);
668 return NB_ERR_VALIDATION;
669 }
670
671 return NB_OK;
672 }
673
674 /* Perform code-level validation using the northbound callbacks. */
675 static int nb_candidate_validate_code(struct nb_context *context,
676 struct nb_config *candidate,
677 struct nb_config_cbs *changes,
678 char *errmsg, size_t errmsg_len)
679 {
680 struct nb_config_cb *cb;
681 struct lyd_node *root, *next, *child;
682 int ret;
683
684 /* First validate the candidate as a whole. */
685 LY_TREE_FOR (candidate->dnode, root) {
686 LY_TREE_DFS_BEGIN (root, next, child) {
687 struct nb_node *nb_node;
688
689 nb_node = child->schema->priv;
690 if (!nb_node || !nb_node->cbs.pre_validate)
691 goto next;
692
693 ret = nb_callback_pre_validate(context, nb_node, child,
694 errmsg, errmsg_len);
695 if (ret != NB_OK)
696 return NB_ERR_VALIDATION;
697
698 next:
699 LY_TREE_DFS_END(root, next, child);
700 }
701 }
702
703 /* Now validate the configuration changes. */
704 RB_FOREACH (cb, nb_config_cbs, changes) {
705 struct nb_config_change *change = (struct nb_config_change *)cb;
706
707 ret = nb_callback_configuration(context, NB_EV_VALIDATE, change,
708 errmsg, errmsg_len);
709 if (ret != NB_OK)
710 return NB_ERR_VALIDATION;
711 }
712
713 return NB_OK;
714 }
715
716 int nb_candidate_validate(struct nb_context *context,
717 struct nb_config *candidate, char *errmsg,
718 size_t errmsg_len)
719 {
720 struct nb_config_cbs changes;
721 int ret;
722
723 if (nb_candidate_validate_yang(candidate, errmsg, sizeof(errmsg_len))
724 != NB_OK)
725 return NB_ERR_VALIDATION;
726
727 RB_INIT(nb_config_cbs, &changes);
728 nb_config_diff(running_config, candidate, &changes);
729 ret = nb_candidate_validate_code(context, candidate, &changes, errmsg,
730 errmsg_len);
731 nb_config_diff_del_changes(&changes);
732
733 return ret;
734 }
735
736 int nb_candidate_commit_prepare(struct nb_context *context,
737 struct nb_config *candidate,
738 const char *comment,
739 struct nb_transaction **transaction,
740 char *errmsg, size_t errmsg_len)
741 {
742 struct nb_config_cbs changes;
743
744 if (nb_candidate_validate_yang(candidate, errmsg, errmsg_len)
745 != NB_OK) {
746 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
747 "%s: failed to validate candidate configuration",
748 __func__);
749 return NB_ERR_VALIDATION;
750 }
751
752 RB_INIT(nb_config_cbs, &changes);
753 nb_config_diff(running_config, candidate, &changes);
754 if (RB_EMPTY(nb_config_cbs, &changes)) {
755 snprintf(
756 errmsg, errmsg_len,
757 "No changes to apply were found during preparation phase");
758 return NB_ERR_NO_CHANGES;
759 }
760
761 if (nb_candidate_validate_code(context, candidate, &changes, errmsg,
762 errmsg_len)
763 != NB_OK) {
764 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
765 "%s: failed to validate candidate configuration",
766 __func__);
767 nb_config_diff_del_changes(&changes);
768 return NB_ERR_VALIDATION;
769 }
770
771 *transaction = nb_transaction_new(context, candidate, &changes, comment,
772 errmsg, errmsg_len);
773 if (*transaction == NULL) {
774 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
775 "%s: failed to create transaction: %s", __func__,
776 errmsg);
777 nb_config_diff_del_changes(&changes);
778 return NB_ERR_LOCKED;
779 }
780
781 return nb_transaction_process(NB_EV_PREPARE, *transaction, errmsg,
782 errmsg_len);
783 }
784
785 void nb_candidate_commit_abort(struct nb_transaction *transaction, char *errmsg,
786 size_t errmsg_len)
787 {
788 (void)nb_transaction_process(NB_EV_ABORT, transaction, errmsg,
789 errmsg_len);
790 nb_transaction_free(transaction);
791 }
792
793 void nb_candidate_commit_apply(struct nb_transaction *transaction,
794 bool save_transaction, uint32_t *transaction_id,
795 char *errmsg, size_t errmsg_len)
796 {
797 (void)nb_transaction_process(NB_EV_APPLY, transaction, errmsg,
798 errmsg_len);
799 nb_transaction_apply_finish(transaction, errmsg, errmsg_len);
800
801 /* Replace running by candidate. */
802 transaction->config->version++;
803 nb_config_replace(running_config, transaction->config, true);
804
805 /* Record transaction. */
806 if (save_transaction && nb_db_enabled
807 && nb_db_transaction_save(transaction, transaction_id) != NB_OK)
808 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED,
809 "%s: failed to record transaction", __func__);
810
811 nb_transaction_free(transaction);
812 }
813
814 int nb_candidate_commit(struct nb_context *context, struct nb_config *candidate,
815 bool save_transaction, const char *comment,
816 uint32_t *transaction_id, char *errmsg,
817 size_t errmsg_len)
818 {
819 struct nb_transaction *transaction = NULL;
820 int ret;
821
822 ret = nb_candidate_commit_prepare(context, candidate, comment,
823 &transaction, errmsg, errmsg_len);
824 /*
825 * Apply the changes if the preparation phase succeeded. Otherwise abort
826 * the transaction.
827 */
828 if (ret == NB_OK)
829 nb_candidate_commit_apply(transaction, save_transaction,
830 transaction_id, errmsg, errmsg_len);
831 else if (transaction != NULL)
832 nb_candidate_commit_abort(transaction, errmsg, errmsg_len);
833
834 return ret;
835 }
836
837 int nb_running_lock(enum nb_client client, const void *user)
838 {
839 int ret = -1;
840
841 frr_with_mutex (&running_config_mgmt_lock.mtx) {
842 if (!running_config_mgmt_lock.locked) {
843 running_config_mgmt_lock.locked = true;
844 running_config_mgmt_lock.owner_client = client;
845 running_config_mgmt_lock.owner_user = user;
846 ret = 0;
847 }
848 }
849
850 return ret;
851 }
852
853 int nb_running_unlock(enum nb_client client, const void *user)
854 {
855 int ret = -1;
856
857 frr_with_mutex (&running_config_mgmt_lock.mtx) {
858 if (running_config_mgmt_lock.locked
859 && running_config_mgmt_lock.owner_client == client
860 && running_config_mgmt_lock.owner_user == user) {
861 running_config_mgmt_lock.locked = false;
862 running_config_mgmt_lock.owner_client = NB_CLIENT_NONE;
863 running_config_mgmt_lock.owner_user = NULL;
864 ret = 0;
865 }
866 }
867
868 return ret;
869 }
870
871 int nb_running_lock_check(enum nb_client client, const void *user)
872 {
873 int ret = -1;
874
875 frr_with_mutex (&running_config_mgmt_lock.mtx) {
876 if (!running_config_mgmt_lock.locked
877 || (running_config_mgmt_lock.owner_client == client
878 && running_config_mgmt_lock.owner_user == user))
879 ret = 0;
880 }
881
882 return ret;
883 }
884
885 static void nb_log_config_callback(const enum nb_event event,
886 enum nb_operation operation,
887 const struct lyd_node *dnode)
888 {
889 const char *value;
890 char xpath[XPATH_MAXLEN];
891
892 if (!DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL))
893 return;
894
895 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
896 if (yang_snode_is_typeless_data(dnode->schema))
897 value = "(none)";
898 else
899 value = yang_dnode_get_string(dnode, NULL);
900
901 zlog_debug(
902 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
903 nb_event_name(event), nb_operation_name(operation), xpath,
904 value);
905 }
906
907 static int nb_callback_create(struct nb_context *context,
908 const struct nb_node *nb_node,
909 enum nb_event event, const struct lyd_node *dnode,
910 union nb_resource *resource, char *errmsg,
911 size_t errmsg_len)
912 {
913 struct nb_cb_create_args args = {};
914 bool unexpected_error = false;
915 int ret;
916
917 nb_log_config_callback(event, NB_OP_CREATE, dnode);
918
919 args.context = context;
920 args.event = event;
921 args.dnode = dnode;
922 args.resource = resource;
923 args.errmsg = errmsg;
924 args.errmsg_len = errmsg_len;
925 ret = nb_node->cbs.create(&args);
926
927 /* Detect and log unexpected errors. */
928 switch (ret) {
929 case NB_OK:
930 case NB_ERR:
931 break;
932 case NB_ERR_VALIDATION:
933 if (event != NB_EV_VALIDATE)
934 unexpected_error = true;
935 break;
936 case NB_ERR_RESOURCE:
937 if (event != NB_EV_PREPARE)
938 unexpected_error = true;
939 break;
940 case NB_ERR_INCONSISTENCY:
941 if (event == NB_EV_VALIDATE)
942 unexpected_error = true;
943 break;
944 default:
945 unexpected_error = true;
946 break;
947 }
948 if (unexpected_error)
949 DEBUGD(&nb_dbg_cbs_config,
950 "northbound callback: unexpected return value: %s",
951 nb_err_name(ret));
952
953 return ret;
954 }
955
956 static int nb_callback_modify(struct nb_context *context,
957 const struct nb_node *nb_node,
958 enum nb_event event, const struct lyd_node *dnode,
959 union nb_resource *resource, char *errmsg,
960 size_t errmsg_len)
961 {
962 struct nb_cb_modify_args args = {};
963 bool unexpected_error = false;
964 int ret;
965
966 nb_log_config_callback(event, NB_OP_MODIFY, dnode);
967
968 args.context = context;
969 args.event = event;
970 args.dnode = dnode;
971 args.resource = resource;
972 args.errmsg = errmsg;
973 args.errmsg_len = errmsg_len;
974 ret = nb_node->cbs.modify(&args);
975
976 /* Detect and log unexpected errors. */
977 switch (ret) {
978 case NB_OK:
979 case NB_ERR:
980 break;
981 case NB_ERR_VALIDATION:
982 if (event != NB_EV_VALIDATE)
983 unexpected_error = true;
984 break;
985 case NB_ERR_RESOURCE:
986 if (event != NB_EV_PREPARE)
987 unexpected_error = true;
988 break;
989 case NB_ERR_INCONSISTENCY:
990 if (event == NB_EV_VALIDATE)
991 unexpected_error = true;
992 break;
993 default:
994 unexpected_error = true;
995 break;
996 }
997 if (unexpected_error)
998 DEBUGD(&nb_dbg_cbs_config,
999 "northbound callback: unexpected return value: %s",
1000 nb_err_name(ret));
1001
1002 return ret;
1003 }
1004
1005 static int nb_callback_destroy(struct nb_context *context,
1006 const struct nb_node *nb_node,
1007 enum nb_event event,
1008 const struct lyd_node *dnode, char *errmsg,
1009 size_t errmsg_len)
1010 {
1011 struct nb_cb_destroy_args args = {};
1012 bool unexpected_error = false;
1013 int ret;
1014
1015 nb_log_config_callback(event, NB_OP_DESTROY, dnode);
1016
1017 args.context = context;
1018 args.event = event;
1019 args.dnode = dnode;
1020 args.errmsg = errmsg;
1021 args.errmsg_len = errmsg_len;
1022 ret = nb_node->cbs.destroy(&args);
1023
1024 /* Detect and log unexpected errors. */
1025 switch (ret) {
1026 case NB_OK:
1027 case NB_ERR:
1028 break;
1029 case NB_ERR_VALIDATION:
1030 if (event != NB_EV_VALIDATE)
1031 unexpected_error = true;
1032 break;
1033 case NB_ERR_INCONSISTENCY:
1034 if (event == NB_EV_VALIDATE)
1035 unexpected_error = true;
1036 break;
1037 default:
1038 unexpected_error = true;
1039 break;
1040 }
1041 if (unexpected_error)
1042 DEBUGD(&nb_dbg_cbs_config,
1043 "northbound callback: unexpected return value: %s",
1044 nb_err_name(ret));
1045
1046 return ret;
1047 }
1048
1049 static int nb_callback_move(struct nb_context *context,
1050 const struct nb_node *nb_node, enum nb_event event,
1051 const struct lyd_node *dnode, char *errmsg,
1052 size_t errmsg_len)
1053 {
1054 struct nb_cb_move_args args = {};
1055 bool unexpected_error = false;
1056 int ret;
1057
1058 nb_log_config_callback(event, NB_OP_MOVE, dnode);
1059
1060 args.context = context;
1061 args.event = event;
1062 args.dnode = dnode;
1063 args.errmsg = errmsg;
1064 args.errmsg_len = errmsg_len;
1065 ret = nb_node->cbs.move(&args);
1066
1067 /* Detect and log unexpected errors. */
1068 switch (ret) {
1069 case NB_OK:
1070 case NB_ERR:
1071 break;
1072 case NB_ERR_VALIDATION:
1073 if (event != NB_EV_VALIDATE)
1074 unexpected_error = true;
1075 break;
1076 case NB_ERR_INCONSISTENCY:
1077 if (event == NB_EV_VALIDATE)
1078 unexpected_error = true;
1079 break;
1080 default:
1081 unexpected_error = true;
1082 break;
1083 }
1084 if (unexpected_error)
1085 DEBUGD(&nb_dbg_cbs_config,
1086 "northbound callback: unexpected return value: %s",
1087 nb_err_name(ret));
1088
1089 return ret;
1090 }
1091
1092 static int nb_callback_pre_validate(struct nb_context *context,
1093 const struct nb_node *nb_node,
1094 const struct lyd_node *dnode, char *errmsg,
1095 size_t errmsg_len)
1096 {
1097 struct nb_cb_pre_validate_args args = {};
1098 bool unexpected_error = false;
1099 int ret;
1100
1101 nb_log_config_callback(NB_EV_VALIDATE, NB_OP_PRE_VALIDATE, dnode);
1102
1103 args.dnode = dnode;
1104 args.errmsg = errmsg;
1105 args.errmsg_len = errmsg_len;
1106 ret = nb_node->cbs.pre_validate(&args);
1107
1108 /* Detect and log unexpected errors. */
1109 switch (ret) {
1110 case NB_OK:
1111 case NB_ERR_VALIDATION:
1112 break;
1113 default:
1114 unexpected_error = true;
1115 break;
1116 }
1117 if (unexpected_error)
1118 DEBUGD(&nb_dbg_cbs_config,
1119 "northbound callback: unexpected return value: %s",
1120 nb_err_name(ret));
1121
1122 return ret;
1123 }
1124
1125 static void nb_callback_apply_finish(struct nb_context *context,
1126 const struct nb_node *nb_node,
1127 const struct lyd_node *dnode, char *errmsg,
1128 size_t errmsg_len)
1129 {
1130 struct nb_cb_apply_finish_args args = {};
1131
1132 nb_log_config_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH, dnode);
1133
1134 args.context = context;
1135 args.dnode = dnode;
1136 args.errmsg = errmsg;
1137 args.errmsg_len = errmsg_len;
1138 nb_node->cbs.apply_finish(&args);
1139 }
1140
1141 struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node,
1142 const char *xpath,
1143 const void *list_entry)
1144 {
1145 struct nb_cb_get_elem_args args = {};
1146
1147 DEBUGD(&nb_dbg_cbs_state,
1148 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
1149 xpath, list_entry);
1150
1151 args.xpath = xpath;
1152 args.list_entry = list_entry;
1153 return nb_node->cbs.get_elem(&args);
1154 }
1155
1156 const void *nb_callback_get_next(const struct nb_node *nb_node,
1157 const void *parent_list_entry,
1158 const void *list_entry)
1159 {
1160 struct nb_cb_get_next_args args = {};
1161
1162 DEBUGD(&nb_dbg_cbs_state,
1163 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
1164 nb_node->xpath, parent_list_entry, list_entry);
1165
1166 args.parent_list_entry = parent_list_entry;
1167 args.list_entry = list_entry;
1168 return nb_node->cbs.get_next(&args);
1169 }
1170
1171 int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
1172 struct yang_list_keys *keys)
1173 {
1174 struct nb_cb_get_keys_args args = {};
1175
1176 DEBUGD(&nb_dbg_cbs_state,
1177 "northbound callback (get_keys): node [%s] list_entry [%p]",
1178 nb_node->xpath, list_entry);
1179
1180 args.list_entry = list_entry;
1181 args.keys = keys;
1182 return nb_node->cbs.get_keys(&args);
1183 }
1184
1185 const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
1186 const void *parent_list_entry,
1187 const struct yang_list_keys *keys)
1188 {
1189 struct nb_cb_lookup_entry_args args = {};
1190
1191 DEBUGD(&nb_dbg_cbs_state,
1192 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
1193 nb_node->xpath, parent_list_entry);
1194
1195 args.parent_list_entry = parent_list_entry;
1196 args.keys = keys;
1197 return nb_node->cbs.lookup_entry(&args);
1198 }
1199
1200 int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
1201 const struct list *input, struct list *output, char *errmsg,
1202 size_t errmsg_len)
1203 {
1204 struct nb_cb_rpc_args args = {};
1205
1206 DEBUGD(&nb_dbg_cbs_rpc, "northbound RPC: %s", xpath);
1207
1208 args.xpath = xpath;
1209 args.input = input;
1210 args.output = output;
1211 args.errmsg = errmsg;
1212 args.errmsg_len = errmsg_len;
1213 return nb_node->cbs.rpc(&args);
1214 }
1215
1216 /*
1217 * Call the northbound configuration callback associated to a given
1218 * configuration change.
1219 */
1220 static int nb_callback_configuration(struct nb_context *context,
1221 const enum nb_event event,
1222 struct nb_config_change *change,
1223 char *errmsg, size_t errmsg_len)
1224 {
1225 enum nb_operation operation = change->cb.operation;
1226 char xpath[XPATH_MAXLEN];
1227 const struct nb_node *nb_node = change->cb.nb_node;
1228 const struct lyd_node *dnode = change->cb.dnode;
1229 union nb_resource *resource;
1230 int ret = NB_ERR;
1231
1232 if (event == NB_EV_VALIDATE)
1233 resource = NULL;
1234 else
1235 resource = &change->resource;
1236
1237 switch (operation) {
1238 case NB_OP_CREATE:
1239 ret = nb_callback_create(context, nb_node, event, dnode,
1240 resource, errmsg, errmsg_len);
1241 break;
1242 case NB_OP_MODIFY:
1243 ret = nb_callback_modify(context, nb_node, event, dnode,
1244 resource, errmsg, errmsg_len);
1245 break;
1246 case NB_OP_DESTROY:
1247 ret = nb_callback_destroy(context, nb_node, event, dnode,
1248 errmsg, errmsg_len);
1249 break;
1250 case NB_OP_MOVE:
1251 ret = nb_callback_move(context, nb_node, event, dnode, errmsg,
1252 errmsg_len);
1253 break;
1254 default:
1255 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1256 flog_err(EC_LIB_DEVELOPMENT,
1257 "%s: unknown operation (%u) [xpath %s]", __func__,
1258 operation, xpath);
1259 exit(1);
1260 }
1261
1262 if (ret != NB_OK) {
1263 int priority;
1264 enum lib_log_refs ref;
1265
1266 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1267
1268 switch (event) {
1269 case NB_EV_VALIDATE:
1270 priority = LOG_WARNING;
1271 ref = EC_LIB_NB_CB_CONFIG_VALIDATE;
1272 break;
1273 case NB_EV_PREPARE:
1274 priority = LOG_WARNING;
1275 ref = EC_LIB_NB_CB_CONFIG_PREPARE;
1276 break;
1277 case NB_EV_ABORT:
1278 priority = LOG_WARNING;
1279 ref = EC_LIB_NB_CB_CONFIG_ABORT;
1280 break;
1281 case NB_EV_APPLY:
1282 priority = LOG_ERR;
1283 ref = EC_LIB_NB_CB_CONFIG_APPLY;
1284 break;
1285 default:
1286 flog_err(EC_LIB_DEVELOPMENT,
1287 "%s: unknown event (%u) [xpath %s]", __func__,
1288 event, xpath);
1289 exit(1);
1290 }
1291
1292 flog(priority, ref,
1293 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
1294 nb_err_name(ret), nb_event_name(event),
1295 nb_operation_name(operation), xpath);
1296 if (strlen(errmsg) > 0)
1297 flog(priority, ref,
1298 "error processing configuration change: %s",
1299 errmsg);
1300 }
1301
1302 return ret;
1303 }
1304
1305 static struct nb_transaction *
1306 nb_transaction_new(struct nb_context *context, struct nb_config *config,
1307 struct nb_config_cbs *changes, const char *comment,
1308 char *errmsg, size_t errmsg_len)
1309 {
1310 struct nb_transaction *transaction;
1311
1312 if (nb_running_lock_check(context->client, context->user)) {
1313 strlcpy(errmsg,
1314 "running configuration is locked by another client",
1315 errmsg_len);
1316 return NULL;
1317 }
1318
1319 if (transaction_in_progress) {
1320 strlcpy(errmsg,
1321 "there's already another transaction in progress",
1322 errmsg_len);
1323 return NULL;
1324 }
1325 transaction_in_progress = true;
1326
1327 transaction = XCALLOC(MTYPE_TMP, sizeof(*transaction));
1328 transaction->context = context;
1329 if (comment)
1330 strlcpy(transaction->comment, comment,
1331 sizeof(transaction->comment));
1332 transaction->config = config;
1333 transaction->changes = *changes;
1334
1335 return transaction;
1336 }
1337
1338 static void nb_transaction_free(struct nb_transaction *transaction)
1339 {
1340 nb_config_diff_del_changes(&transaction->changes);
1341 XFREE(MTYPE_TMP, transaction);
1342 transaction_in_progress = false;
1343 }
1344
1345 /* Process all configuration changes associated to a transaction. */
1346 static int nb_transaction_process(enum nb_event event,
1347 struct nb_transaction *transaction,
1348 char *errmsg, size_t errmsg_len)
1349 {
1350 struct nb_config_cb *cb;
1351
1352 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1353 struct nb_config_change *change = (struct nb_config_change *)cb;
1354 int ret;
1355
1356 /*
1357 * Only try to release resources that were allocated
1358 * successfully.
1359 */
1360 if (event == NB_EV_ABORT && !change->prepare_ok)
1361 break;
1362
1363 /* Call the appropriate callback. */
1364 ret = nb_callback_configuration(transaction->context, event,
1365 change, errmsg, errmsg_len);
1366 switch (event) {
1367 case NB_EV_PREPARE:
1368 if (ret != NB_OK)
1369 return ret;
1370 change->prepare_ok = true;
1371 break;
1372 case NB_EV_ABORT:
1373 case NB_EV_APPLY:
1374 /*
1375 * At this point it's not possible to reject the
1376 * transaction anymore, so any failure here can lead to
1377 * inconsistencies and should be treated as a bug.
1378 * Operations prone to errors, like validations and
1379 * resource allocations, should be performed during the
1380 * 'prepare' phase.
1381 */
1382 break;
1383 default:
1384 break;
1385 }
1386 }
1387
1388 return NB_OK;
1389 }
1390
1391 static struct nb_config_cb *
1392 nb_apply_finish_cb_new(struct nb_config_cbs *cbs, const struct nb_node *nb_node,
1393 const struct lyd_node *dnode)
1394 {
1395 struct nb_config_cb *cb;
1396
1397 cb = XCALLOC(MTYPE_TMP, sizeof(*cb));
1398 cb->nb_node = nb_node;
1399 cb->dnode = dnode;
1400 RB_INSERT(nb_config_cbs, cbs, cb);
1401
1402 return cb;
1403 }
1404
1405 static struct nb_config_cb *
1406 nb_apply_finish_cb_find(struct nb_config_cbs *cbs,
1407 const struct nb_node *nb_node,
1408 const struct lyd_node *dnode)
1409 {
1410 struct nb_config_cb s;
1411
1412 s.seq = 0;
1413 s.nb_node = nb_node;
1414 s.dnode = dnode;
1415 return RB_FIND(nb_config_cbs, cbs, &s);
1416 }
1417
1418 /* Call the 'apply_finish' callbacks. */
1419 static void nb_transaction_apply_finish(struct nb_transaction *transaction,
1420 char *errmsg, size_t errmsg_len)
1421 {
1422 struct nb_config_cbs cbs;
1423 struct nb_config_cb *cb;
1424
1425 /* Initialize tree of 'apply_finish' callbacks. */
1426 RB_INIT(nb_config_cbs, &cbs);
1427
1428 /* Identify the 'apply_finish' callbacks that need to be called. */
1429 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1430 struct nb_config_change *change = (struct nb_config_change *)cb;
1431 const struct lyd_node *dnode = change->cb.dnode;
1432
1433 /*
1434 * Iterate up to the root of the data tree. When a node is being
1435 * deleted, skip its 'apply_finish' callback if one is defined
1436 * (the 'apply_finish' callbacks from the node ancestors should
1437 * be called though).
1438 */
1439 if (change->cb.operation == NB_OP_DESTROY) {
1440 char xpath[XPATH_MAXLEN];
1441
1442 dnode = dnode->parent;
1443 if (!dnode)
1444 break;
1445
1446 /*
1447 * The dnode from 'delete' callbacks point to elements
1448 * from the running configuration. Use yang_dnode_get()
1449 * to get the corresponding dnode from the candidate
1450 * configuration that is being committed.
1451 */
1452 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1453 dnode = yang_dnode_get(transaction->config->dnode,
1454 xpath);
1455 }
1456 while (dnode) {
1457 struct nb_node *nb_node;
1458
1459 nb_node = dnode->schema->priv;
1460 if (!nb_node || !nb_node->cbs.apply_finish)
1461 goto next;
1462
1463 /*
1464 * Don't call the callback more than once for the same
1465 * data node.
1466 */
1467 if (nb_apply_finish_cb_find(&cbs, nb_node, dnode))
1468 goto next;
1469
1470 nb_apply_finish_cb_new(&cbs, nb_node, dnode);
1471
1472 next:
1473 dnode = dnode->parent;
1474 }
1475 }
1476
1477 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1478 RB_FOREACH (cb, nb_config_cbs, &cbs)
1479 nb_callback_apply_finish(transaction->context, cb->nb_node,
1480 cb->dnode, errmsg, errmsg_len);
1481
1482 /* Release memory. */
1483 while (!RB_EMPTY(nb_config_cbs, &cbs)) {
1484 cb = RB_ROOT(nb_config_cbs, &cbs);
1485 RB_REMOVE(nb_config_cbs, &cbs, cb);
1486 XFREE(MTYPE_TMP, cb);
1487 }
1488 }
1489
1490 static int nb_oper_data_iter_children(const struct lys_node *snode,
1491 const char *xpath, const void *list_entry,
1492 const struct yang_list_keys *list_keys,
1493 struct yang_translator *translator,
1494 bool first, uint32_t flags,
1495 nb_oper_data_cb cb, void *arg)
1496 {
1497 struct lys_node *child;
1498
1499 LY_TREE_FOR (snode->child, child) {
1500 int ret;
1501
1502 ret = nb_oper_data_iter_node(child, xpath, list_entry,
1503 list_keys, translator, false,
1504 flags, cb, arg);
1505 if (ret != NB_OK)
1506 return ret;
1507 }
1508
1509 return NB_OK;
1510 }
1511
1512 static int nb_oper_data_iter_leaf(const struct nb_node *nb_node,
1513 const char *xpath, const void *list_entry,
1514 const struct yang_list_keys *list_keys,
1515 struct yang_translator *translator,
1516 uint32_t flags, nb_oper_data_cb cb, void *arg)
1517 {
1518 struct yang_data *data;
1519
1520 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1521 return NB_OK;
1522
1523 /* Ignore list keys. */
1524 if (lys_is_key((struct lys_node_leaf *)nb_node->snode, NULL))
1525 return NB_OK;
1526
1527 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1528 if (data == NULL)
1529 /* Leaf of type "empty" is not present. */
1530 return NB_OK;
1531
1532 return (*cb)(nb_node->snode, translator, data, arg);
1533 }
1534
1535 static int nb_oper_data_iter_container(const struct nb_node *nb_node,
1536 const char *xpath,
1537 const void *list_entry,
1538 const struct yang_list_keys *list_keys,
1539 struct yang_translator *translator,
1540 uint32_t flags, nb_oper_data_cb cb,
1541 void *arg)
1542 {
1543 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1544 return NB_OK;
1545
1546 /* Presence containers. */
1547 if (nb_node->cbs.get_elem) {
1548 struct yang_data *data;
1549 int ret;
1550
1551 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1552 if (data == NULL)
1553 /* Presence container is not present. */
1554 return NB_OK;
1555
1556 ret = (*cb)(nb_node->snode, translator, data, arg);
1557 if (ret != NB_OK)
1558 return ret;
1559 }
1560
1561 /* Iterate over the child nodes. */
1562 return nb_oper_data_iter_children(nb_node->snode, xpath, list_entry,
1563 list_keys, translator, false, flags,
1564 cb, arg);
1565 }
1566
1567 static int
1568 nb_oper_data_iter_leaflist(const struct nb_node *nb_node, const char *xpath,
1569 const void *parent_list_entry,
1570 const struct yang_list_keys *parent_list_keys,
1571 struct yang_translator *translator, uint32_t flags,
1572 nb_oper_data_cb cb, void *arg)
1573 {
1574 const void *list_entry = NULL;
1575
1576 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1577 return NB_OK;
1578
1579 do {
1580 struct yang_data *data;
1581 int ret;
1582
1583 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1584 list_entry);
1585 if (!list_entry)
1586 /* End of the list. */
1587 break;
1588
1589 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1590 if (data == NULL)
1591 continue;
1592
1593 ret = (*cb)(nb_node->snode, translator, data, arg);
1594 if (ret != NB_OK)
1595 return ret;
1596 } while (list_entry);
1597
1598 return NB_OK;
1599 }
1600
1601 static int nb_oper_data_iter_list(const struct nb_node *nb_node,
1602 const char *xpath_list,
1603 const void *parent_list_entry,
1604 const struct yang_list_keys *parent_list_keys,
1605 struct yang_translator *translator,
1606 uint32_t flags, nb_oper_data_cb cb, void *arg)
1607 {
1608 struct lys_node_list *slist = (struct lys_node_list *)nb_node->snode;
1609 const void *list_entry = NULL;
1610 uint32_t position = 1;
1611
1612 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1613 return NB_OK;
1614
1615 /* Iterate over all list entries. */
1616 do {
1617 struct yang_list_keys list_keys;
1618 char xpath[XPATH_MAXLEN * 2];
1619 int ret;
1620
1621 /* Obtain list entry. */
1622 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1623 list_entry);
1624 if (!list_entry)
1625 /* End of the list. */
1626 break;
1627
1628 if (!CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST)) {
1629 /* Obtain the list entry keys. */
1630 if (nb_callback_get_keys(nb_node, list_entry,
1631 &list_keys)
1632 != NB_OK) {
1633 flog_warn(EC_LIB_NB_CB_STATE,
1634 "%s: failed to get list keys",
1635 __func__);
1636 return NB_ERR;
1637 }
1638
1639 /* Build XPath of the list entry. */
1640 strlcpy(xpath, xpath_list, sizeof(xpath));
1641 for (unsigned int i = 0; i < list_keys.num; i++) {
1642 snprintf(xpath + strlen(xpath),
1643 sizeof(xpath) - strlen(xpath),
1644 "[%s='%s']", slist->keys[i]->name,
1645 list_keys.key[i]);
1646 }
1647 } else {
1648 /*
1649 * Keyless list - build XPath using a positional index.
1650 */
1651 snprintf(xpath, sizeof(xpath), "%s[%u]", xpath_list,
1652 position);
1653 position++;
1654 }
1655
1656 /* Iterate over the child nodes. */
1657 ret = nb_oper_data_iter_children(
1658 nb_node->snode, xpath, list_entry, &list_keys,
1659 translator, false, flags, cb, arg);
1660 if (ret != NB_OK)
1661 return ret;
1662 } while (list_entry);
1663
1664 return NB_OK;
1665 }
1666
1667 static int nb_oper_data_iter_node(const struct lys_node *snode,
1668 const char *xpath_parent,
1669 const void *list_entry,
1670 const struct yang_list_keys *list_keys,
1671 struct yang_translator *translator,
1672 bool first, uint32_t flags,
1673 nb_oper_data_cb cb, void *arg)
1674 {
1675 struct nb_node *nb_node;
1676 char xpath[XPATH_MAXLEN];
1677 int ret = NB_OK;
1678
1679 if (!first && CHECK_FLAG(flags, NB_OPER_DATA_ITER_NORECURSE)
1680 && CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST))
1681 return NB_OK;
1682
1683 /* Update XPath. */
1684 strlcpy(xpath, xpath_parent, sizeof(xpath));
1685 if (!first && snode->nodetype != LYS_USES) {
1686 struct lys_node *parent;
1687
1688 /* Get the real parent. */
1689 parent = snode->parent;
1690 while (parent && parent->nodetype == LYS_USES)
1691 parent = parent->parent;
1692
1693 /*
1694 * When necessary, include the namespace of the augmenting
1695 * module.
1696 */
1697 if (parent && parent->nodetype == LYS_AUGMENT)
1698 snprintf(xpath + strlen(xpath),
1699 sizeof(xpath) - strlen(xpath), "/%s:%s",
1700 snode->module->name, snode->name);
1701 else
1702 snprintf(xpath + strlen(xpath),
1703 sizeof(xpath) - strlen(xpath), "/%s",
1704 snode->name);
1705 }
1706
1707 nb_node = snode->priv;
1708 switch (snode->nodetype) {
1709 case LYS_CONTAINER:
1710 ret = nb_oper_data_iter_container(nb_node, xpath, list_entry,
1711 list_keys, translator, flags,
1712 cb, arg);
1713 break;
1714 case LYS_LEAF:
1715 ret = nb_oper_data_iter_leaf(nb_node, xpath, list_entry,
1716 list_keys, translator, flags, cb,
1717 arg);
1718 break;
1719 case LYS_LEAFLIST:
1720 ret = nb_oper_data_iter_leaflist(nb_node, xpath, list_entry,
1721 list_keys, translator, flags,
1722 cb, arg);
1723 break;
1724 case LYS_LIST:
1725 ret = nb_oper_data_iter_list(nb_node, xpath, list_entry,
1726 list_keys, translator, flags, cb,
1727 arg);
1728 break;
1729 case LYS_USES:
1730 ret = nb_oper_data_iter_children(snode, xpath, list_entry,
1731 list_keys, translator, false,
1732 flags, cb, arg);
1733 break;
1734 default:
1735 break;
1736 }
1737
1738 return ret;
1739 }
1740
1741 int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
1742 uint32_t flags, nb_oper_data_cb cb, void *arg)
1743 {
1744 struct nb_node *nb_node;
1745 const void *list_entry = NULL;
1746 struct yang_list_keys list_keys;
1747 struct list *list_dnodes;
1748 struct lyd_node *dnode, *dn;
1749 struct listnode *ln;
1750 int ret;
1751
1752 nb_node = nb_node_find(xpath);
1753 if (!nb_node) {
1754 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1755 "%s: unknown data path: %s", __func__, xpath);
1756 return NB_ERR;
1757 }
1758
1759 /* For now this function works only with containers and lists. */
1760 if (!CHECK_FLAG(nb_node->snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
1761 flog_warn(
1762 EC_LIB_NB_OPERATIONAL_DATA,
1763 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1764 __func__, xpath);
1765 return NB_ERR;
1766 }
1767
1768 /*
1769 * Create a data tree from the XPath so that we can parse the keys of
1770 * all YANG lists (if any).
1771 */
1772 ly_errno = 0;
1773 dnode = lyd_new_path(NULL, ly_native_ctx, xpath, NULL, 0,
1774 LYD_PATH_OPT_UPDATE | LYD_PATH_OPT_NOPARENTRET);
1775 if (!dnode) {
1776 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
1777 __func__);
1778 return NB_ERR;
1779 }
1780
1781 /*
1782 * Create a linked list to sort the data nodes starting from the root.
1783 */
1784 list_dnodes = list_new();
1785 for (dn = dnode; dn; dn = dn->parent) {
1786 if (dn->schema->nodetype != LYS_LIST || !dn->child)
1787 continue;
1788 listnode_add_head(list_dnodes, dn);
1789 }
1790 /*
1791 * Use the northbound callbacks to find list entry pointer corresponding
1792 * to the given XPath.
1793 */
1794 for (ALL_LIST_ELEMENTS_RO(list_dnodes, ln, dn)) {
1795 struct lyd_node *child;
1796 struct nb_node *nn;
1797 unsigned int n = 0;
1798
1799 /* Obtain the list entry keys. */
1800 memset(&list_keys, 0, sizeof(list_keys));
1801 LY_TREE_FOR (dn->child, child) {
1802 if (!lys_is_key((struct lys_node_leaf *)child->schema,
1803 NULL))
1804 continue;
1805 strlcpy(list_keys.key[n],
1806 yang_dnode_get_string(child, NULL),
1807 sizeof(list_keys.key[n]));
1808 n++;
1809 }
1810 list_keys.num = n;
1811 if (list_keys.num
1812 != ((struct lys_node_list *)dn->schema)->keys_size) {
1813 list_delete(&list_dnodes);
1814 yang_dnode_free(dnode);
1815 return NB_ERR_NOT_FOUND;
1816 }
1817
1818 /* Find the list entry pointer. */
1819 nn = dn->schema->priv;
1820 if (!nn->cbs.lookup_entry) {
1821 flog_warn(
1822 EC_LIB_NB_OPERATIONAL_DATA,
1823 "%s: data path doesn't support iteration over operational data: %s",
1824 __func__, xpath);
1825 list_delete(&list_dnodes);
1826 yang_dnode_free(dnode);
1827 return NB_ERR;
1828 }
1829
1830 list_entry =
1831 nb_callback_lookup_entry(nn, list_entry, &list_keys);
1832 if (list_entry == NULL) {
1833 list_delete(&list_dnodes);
1834 yang_dnode_free(dnode);
1835 return NB_ERR_NOT_FOUND;
1836 }
1837 }
1838
1839 /* If a list entry was given, iterate over that list entry only. */
1840 if (dnode->schema->nodetype == LYS_LIST && dnode->child)
1841 ret = nb_oper_data_iter_children(
1842 nb_node->snode, xpath, list_entry, &list_keys,
1843 translator, true, flags, cb, arg);
1844 else
1845 ret = nb_oper_data_iter_node(nb_node->snode, xpath, list_entry,
1846 &list_keys, translator, true,
1847 flags, cb, arg);
1848
1849 list_delete(&list_dnodes);
1850 yang_dnode_free(dnode);
1851
1852 return ret;
1853 }
1854
1855 bool nb_operation_is_valid(enum nb_operation operation,
1856 const struct lys_node *snode)
1857 {
1858 struct nb_node *nb_node = snode->priv;
1859 struct lys_node_container *scontainer;
1860 struct lys_node_leaf *sleaf;
1861
1862 switch (operation) {
1863 case NB_OP_CREATE:
1864 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1865 return false;
1866
1867 switch (snode->nodetype) {
1868 case LYS_LEAF:
1869 sleaf = (struct lys_node_leaf *)snode;
1870 if (sleaf->type.base != LY_TYPE_EMPTY)
1871 return false;
1872 break;
1873 case LYS_CONTAINER:
1874 scontainer = (struct lys_node_container *)snode;
1875 if (!scontainer->presence)
1876 return false;
1877 break;
1878 case LYS_LIST:
1879 case LYS_LEAFLIST:
1880 break;
1881 default:
1882 return false;
1883 }
1884 return true;
1885 case NB_OP_MODIFY:
1886 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1887 return false;
1888
1889 switch (snode->nodetype) {
1890 case LYS_LEAF:
1891 sleaf = (struct lys_node_leaf *)snode;
1892 if (sleaf->type.base == LY_TYPE_EMPTY)
1893 return false;
1894
1895 /* List keys can't be modified. */
1896 if (lys_is_key(sleaf, NULL))
1897 return false;
1898 break;
1899 default:
1900 return false;
1901 }
1902 return true;
1903 case NB_OP_DESTROY:
1904 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1905 return false;
1906
1907 switch (snode->nodetype) {
1908 case LYS_LEAF:
1909 sleaf = (struct lys_node_leaf *)snode;
1910
1911 /* List keys can't be deleted. */
1912 if (lys_is_key(sleaf, NULL))
1913 return false;
1914
1915 /*
1916 * Only optional leafs can be deleted, or leafs whose
1917 * parent is a case statement.
1918 */
1919 if (snode->parent->nodetype == LYS_CASE)
1920 return true;
1921 if (sleaf->when)
1922 return true;
1923 if (CHECK_FLAG(sleaf->flags, LYS_MAND_TRUE)
1924 || sleaf->dflt)
1925 return false;
1926 break;
1927 case LYS_CONTAINER:
1928 scontainer = (struct lys_node_container *)snode;
1929 if (!scontainer->presence)
1930 return false;
1931 break;
1932 case LYS_LIST:
1933 case LYS_LEAFLIST:
1934 break;
1935 default:
1936 return false;
1937 }
1938 return true;
1939 case NB_OP_MOVE:
1940 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1941 return false;
1942
1943 switch (snode->nodetype) {
1944 case LYS_LIST:
1945 case LYS_LEAFLIST:
1946 if (!CHECK_FLAG(snode->flags, LYS_USERORDERED))
1947 return false;
1948 break;
1949 default:
1950 return false;
1951 }
1952 return true;
1953 case NB_OP_PRE_VALIDATE:
1954 case NB_OP_APPLY_FINISH:
1955 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1956 return false;
1957 return true;
1958 case NB_OP_GET_ELEM:
1959 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_R))
1960 return false;
1961
1962 switch (snode->nodetype) {
1963 case LYS_LEAF:
1964 case LYS_LEAFLIST:
1965 break;
1966 case LYS_CONTAINER:
1967 scontainer = (struct lys_node_container *)snode;
1968 if (!scontainer->presence)
1969 return false;
1970 break;
1971 default:
1972 return false;
1973 }
1974 return true;
1975 case NB_OP_GET_NEXT:
1976 switch (snode->nodetype) {
1977 case LYS_LIST:
1978 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1979 return false;
1980 break;
1981 case LYS_LEAFLIST:
1982 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1983 return false;
1984 break;
1985 default:
1986 return false;
1987 }
1988 return true;
1989 case NB_OP_GET_KEYS:
1990 case NB_OP_LOOKUP_ENTRY:
1991 switch (snode->nodetype) {
1992 case LYS_LIST:
1993 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1994 return false;
1995 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST))
1996 return false;
1997 break;
1998 default:
1999 return false;
2000 }
2001 return true;
2002 case NB_OP_RPC:
2003 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W | LYS_CONFIG_R))
2004 return false;
2005
2006 switch (snode->nodetype) {
2007 case LYS_RPC:
2008 case LYS_ACTION:
2009 break;
2010 default:
2011 return false;
2012 }
2013 return true;
2014 default:
2015 return false;
2016 }
2017 }
2018
2019 DEFINE_HOOK(nb_notification_send, (const char *xpath, struct list *arguments),
2020 (xpath, arguments));
2021
2022 int nb_notification_send(const char *xpath, struct list *arguments)
2023 {
2024 int ret;
2025
2026 DEBUGD(&nb_dbg_notif, "northbound notification: %s", xpath);
2027
2028 ret = hook_call(nb_notification_send, xpath, arguments);
2029 if (arguments)
2030 list_delete(&arguments);
2031
2032 return ret;
2033 }
2034
2035 /* Running configuration user pointers management. */
2036 struct nb_config_entry {
2037 char xpath[XPATH_MAXLEN];
2038 void *entry;
2039 };
2040
2041 static bool running_config_entry_cmp(const void *value1, const void *value2)
2042 {
2043 const struct nb_config_entry *c1 = value1;
2044 const struct nb_config_entry *c2 = value2;
2045
2046 return strmatch(c1->xpath, c2->xpath);
2047 }
2048
2049 static unsigned int running_config_entry_key_make(const void *value)
2050 {
2051 return string_hash_make(value);
2052 }
2053
2054 static void *running_config_entry_alloc(void *p)
2055 {
2056 struct nb_config_entry *new, *key = p;
2057
2058 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY, sizeof(*new));
2059 strlcpy(new->xpath, key->xpath, sizeof(new->xpath));
2060
2061 return new;
2062 }
2063
2064 static void running_config_entry_free(void *arg)
2065 {
2066 XFREE(MTYPE_NB_CONFIG_ENTRY, arg);
2067 }
2068
2069 void nb_running_set_entry(const struct lyd_node *dnode, void *entry)
2070 {
2071 struct nb_config_entry *config, s;
2072
2073 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2074 config = hash_get(running_config_entries, &s,
2075 running_config_entry_alloc);
2076 config->entry = entry;
2077 }
2078
2079 void nb_running_move_tree(const char *xpath_from, const char *xpath_to)
2080 {
2081 struct nb_config_entry *entry;
2082 struct list *entries = hash_to_list(running_config_entries);
2083 struct listnode *ln;
2084
2085 for (ALL_LIST_ELEMENTS_RO(entries, ln, entry)) {
2086 if (!frrstr_startswith(entry->xpath, xpath_from))
2087 continue;
2088
2089 hash_release(running_config_entries, entry);
2090
2091 char *newpath =
2092 frrstr_replace(entry->xpath, xpath_from, xpath_to);
2093 strlcpy(entry->xpath, newpath, sizeof(entry->xpath));
2094 XFREE(MTYPE_TMP, newpath);
2095
2096 hash_get(running_config_entries, entry, hash_alloc_intern);
2097 }
2098
2099 list_delete(&entries);
2100 }
2101
2102 static void *nb_running_unset_entry_helper(const struct lyd_node *dnode)
2103 {
2104 struct nb_config_entry *config, s;
2105 struct lyd_node *child;
2106 void *entry = NULL;
2107
2108 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2109 config = hash_release(running_config_entries, &s);
2110 if (config) {
2111 entry = config->entry;
2112 running_config_entry_free(config);
2113 }
2114
2115 /* Unset user pointers from the child nodes. */
2116 if (CHECK_FLAG(dnode->schema->nodetype, LYS_LIST | LYS_CONTAINER)) {
2117 LY_TREE_FOR (dnode->child, child) {
2118 (void)nb_running_unset_entry_helper(child);
2119 }
2120 }
2121
2122 return entry;
2123 }
2124
2125 void *nb_running_unset_entry(const struct lyd_node *dnode)
2126 {
2127 void *entry;
2128
2129 entry = nb_running_unset_entry_helper(dnode);
2130 assert(entry);
2131
2132 return entry;
2133 }
2134
2135 static void *nb_running_get_entry_worker(const struct lyd_node *dnode,
2136 const char *xpath,
2137 bool abort_if_not_found,
2138 bool rec_search)
2139 {
2140 const struct lyd_node *orig_dnode = dnode;
2141 char xpath_buf[XPATH_MAXLEN];
2142 bool rec_flag = true;
2143
2144 assert(dnode || xpath);
2145
2146 if (!dnode)
2147 dnode = yang_dnode_get(running_config->dnode, xpath);
2148
2149 while (rec_flag && dnode) {
2150 struct nb_config_entry *config, s;
2151
2152 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2153 config = hash_lookup(running_config_entries, &s);
2154 if (config)
2155 return config->entry;
2156
2157 rec_flag = rec_search;
2158
2159 dnode = dnode->parent;
2160 }
2161
2162 if (!abort_if_not_found)
2163 return NULL;
2164
2165 yang_dnode_get_path(orig_dnode, xpath_buf, sizeof(xpath_buf));
2166 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
2167 "%s: failed to find entry [xpath %s]", __func__, xpath_buf);
2168 zlog_backtrace(LOG_ERR);
2169 abort();
2170 }
2171
2172 void *nb_running_get_entry(const struct lyd_node *dnode, const char *xpath,
2173 bool abort_if_not_found)
2174 {
2175 return nb_running_get_entry_worker(dnode, xpath, abort_if_not_found,
2176 true);
2177 }
2178
2179 void *nb_running_get_entry_non_rec(const struct lyd_node *dnode,
2180 const char *xpath, bool abort_if_not_found)
2181 {
2182 return nb_running_get_entry_worker(dnode, xpath, abort_if_not_found,
2183 false);
2184 }
2185
2186 /* Logging functions. */
2187 const char *nb_event_name(enum nb_event event)
2188 {
2189 switch (event) {
2190 case NB_EV_VALIDATE:
2191 return "validate";
2192 case NB_EV_PREPARE:
2193 return "prepare";
2194 case NB_EV_ABORT:
2195 return "abort";
2196 case NB_EV_APPLY:
2197 return "apply";
2198 default:
2199 return "unknown";
2200 }
2201 }
2202
2203 const char *nb_operation_name(enum nb_operation operation)
2204 {
2205 switch (operation) {
2206 case NB_OP_CREATE:
2207 return "create";
2208 case NB_OP_MODIFY:
2209 return "modify";
2210 case NB_OP_DESTROY:
2211 return "destroy";
2212 case NB_OP_MOVE:
2213 return "move";
2214 case NB_OP_PRE_VALIDATE:
2215 return "pre_validate";
2216 case NB_OP_APPLY_FINISH:
2217 return "apply_finish";
2218 case NB_OP_GET_ELEM:
2219 return "get_elem";
2220 case NB_OP_GET_NEXT:
2221 return "get_next";
2222 case NB_OP_GET_KEYS:
2223 return "get_keys";
2224 case NB_OP_LOOKUP_ENTRY:
2225 return "lookup_entry";
2226 case NB_OP_RPC:
2227 return "rpc";
2228 default:
2229 return "unknown";
2230 }
2231 }
2232
2233 const char *nb_err_name(enum nb_error error)
2234 {
2235 switch (error) {
2236 case NB_OK:
2237 return "ok";
2238 case NB_ERR:
2239 return "generic error";
2240 case NB_ERR_NO_CHANGES:
2241 return "no changes";
2242 case NB_ERR_NOT_FOUND:
2243 return "element not found";
2244 case NB_ERR_LOCKED:
2245 return "resource is locked";
2246 case NB_ERR_VALIDATION:
2247 return "validation";
2248 case NB_ERR_RESOURCE:
2249 return "failed to allocate resource";
2250 case NB_ERR_INCONSISTENCY:
2251 return "internal inconsistency";
2252 default:
2253 return "unknown";
2254 }
2255 }
2256
2257 const char *nb_client_name(enum nb_client client)
2258 {
2259 switch (client) {
2260 case NB_CLIENT_CLI:
2261 return "CLI";
2262 case NB_CLIENT_CONFD:
2263 return "ConfD";
2264 case NB_CLIENT_SYSREPO:
2265 return "Sysrepo";
2266 case NB_CLIENT_GRPC:
2267 return "gRPC";
2268 default:
2269 return "unknown";
2270 }
2271 }
2272
2273 static void nb_load_callbacks(const struct frr_yang_module_info *module)
2274 {
2275 for (size_t i = 0; module->nodes[i].xpath; i++) {
2276 struct nb_node *nb_node;
2277 uint32_t priority;
2278
2279 if (i > YANG_MODULE_MAX_NODES) {
2280 zlog_err(
2281 "%s: %s.yang has more than %u nodes. Please increase YANG_MODULE_MAX_NODES to fix this problem.",
2282 __func__, module->name, YANG_MODULE_MAX_NODES);
2283 exit(1);
2284 }
2285
2286 nb_node = nb_node_find(module->nodes[i].xpath);
2287 if (!nb_node) {
2288 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
2289 "%s: unknown data path: %s", __func__,
2290 module->nodes[i].xpath);
2291 continue;
2292 }
2293
2294 nb_node->cbs = module->nodes[i].cbs;
2295 priority = module->nodes[i].priority;
2296 if (priority != 0)
2297 nb_node->priority = priority;
2298 }
2299 }
2300
2301 void nb_validate_callbacks(void)
2302 {
2303 unsigned int errors = 0;
2304
2305 yang_snodes_iterate(NULL, nb_node_validate, 0, &errors);
2306 if (errors > 0) {
2307 flog_err(
2308 EC_LIB_NB_CBS_VALIDATION,
2309 "%s: failed to validate northbound callbacks: %u error(s)",
2310 __func__, errors);
2311 exit(1);
2312 }
2313 }
2314
2315 void nb_load_module(const struct frr_yang_module_info *module_info)
2316 {
2317 struct yang_module *module;
2318
2319 DEBUGD(&nb_dbg_events, "northbound: loading %s.yang",
2320 module_info->name);
2321
2322 module = yang_module_load(module_info->name);
2323 yang_snodes_iterate(module->info, nb_node_new_cb, 0, NULL);
2324 nb_load_callbacks(module_info);
2325 }
2326
2327 void nb_init(struct thread_master *tm,
2328 const struct frr_yang_module_info *const modules[],
2329 size_t nmodules, bool db_enabled)
2330 {
2331 nb_db_enabled = db_enabled;
2332
2333 /* Load YANG modules and their corresponding northbound callbacks. */
2334 for (size_t i = 0; i < nmodules; i++)
2335 nb_load_module(modules[i]);
2336
2337 /* Validate northbound callbacks. */
2338 nb_validate_callbacks();
2339
2340 /* Create an empty running configuration. */
2341 running_config = nb_config_new(NULL);
2342 running_config_entries = hash_create(running_config_entry_key_make,
2343 running_config_entry_cmp,
2344 "Running Configuration Entries");
2345 pthread_mutex_init(&running_config_mgmt_lock.mtx, NULL);
2346
2347 /* Initialize the northbound CLI. */
2348 nb_cli_init(tm);
2349 }
2350
2351 void nb_terminate(void)
2352 {
2353 /* Terminate the northbound CLI. */
2354 nb_cli_terminate();
2355
2356 /* Delete all nb_node's from all YANG modules. */
2357 nb_nodes_delete();
2358
2359 /* Delete the running configuration. */
2360 hash_clean(running_config_entries, running_config_entry_free);
2361 hash_free(running_config_entries);
2362 nb_config_free(running_config);
2363 pthread_mutex_destroy(&running_config_mgmt_lock.mtx);
2364 }