2 * Copyright (C) 2018 NetDEF, Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "lib_errors.h"
29 #include "frr_pthread.h"
30 #include "northbound.h"
31 #include "northbound_cli.h"
32 #include "northbound_db.h"
35 DEFINE_MTYPE_STATIC(LIB
, NB_NODE
, "Northbound Node");
36 DEFINE_MTYPE_STATIC(LIB
, NB_CONFIG
, "Northbound Configuration");
37 DEFINE_MTYPE_STATIC(LIB
, NB_CONFIG_ENTRY
, "Northbound Configuration Entry");
39 /* Running configuration - shouldn't be modified directly. */
40 struct nb_config
*running_config
;
42 /* Hash table of user pointers associated with configuration entries. */
43 static struct hash
*running_config_entries
;
45 /* Management lock for the running configuration. */
47 /* Mutex protecting this structure. */
53 /* Northbound client who owns this lock. */
54 enum nb_client owner_client
;
56 /* Northbound user who owns this lock. */
57 const void *owner_user
;
58 } running_config_mgmt_lock
;
60 /* Knob to record config transaction */
61 static bool nb_db_enabled
;
63 * Global lock used to prevent multiple configuration transactions from
64 * happening concurrently.
66 static bool transaction_in_progress
;
68 static int nb_callback_pre_validate(struct nb_context
*context
,
69 const struct nb_node
*nb_node
,
70 const struct lyd_node
*dnode
, char *errmsg
,
72 static int nb_callback_configuration(struct nb_context
*context
,
73 const enum nb_event event
,
74 struct nb_config_change
*change
,
75 char *errmsg
, size_t errmsg_len
);
76 static struct nb_transaction
*
77 nb_transaction_new(struct nb_context
*context
, struct nb_config
*config
,
78 struct nb_config_cbs
*changes
, const char *comment
,
79 char *errmsg
, size_t errmsg_len
);
80 static void nb_transaction_free(struct nb_transaction
*transaction
);
81 static int nb_transaction_process(enum nb_event event
,
82 struct nb_transaction
*transaction
,
83 char *errmsg
, size_t errmsg_len
);
84 static void nb_transaction_apply_finish(struct nb_transaction
*transaction
,
85 char *errmsg
, size_t errmsg_len
);
86 static int nb_oper_data_iter_node(const struct lysc_node
*snode
,
87 const char *xpath
, const void *list_entry
,
88 const struct yang_list_keys
*list_keys
,
89 struct yang_translator
*translator
,
90 bool first
, uint32_t flags
,
91 nb_oper_data_cb cb
, void *arg
);
93 static int nb_node_check_config_only(const struct lysc_node
*snode
, void *arg
)
95 bool *config_only
= arg
;
97 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_R
)) {
99 return YANG_ITER_STOP
;
102 return YANG_ITER_CONTINUE
;
105 static int nb_node_new_cb(const struct lysc_node
*snode
, void *arg
)
107 struct nb_node
*nb_node
;
108 struct lysc_node
*sparent
, *sparent_list
;
110 nb_node
= XCALLOC(MTYPE_NB_NODE
, sizeof(*nb_node
));
111 yang_snode_get_path(snode
, YANG_PATH_DATA
, nb_node
->xpath
,
112 sizeof(nb_node
->xpath
));
113 nb_node
->priority
= NB_DFLT_PRIORITY
;
114 sparent
= yang_snode_real_parent(snode
);
116 nb_node
->parent
= sparent
->priv
;
117 sparent_list
= yang_snode_parent_list(snode
);
119 nb_node
->parent_list
= sparent_list
->priv
;
122 if (CHECK_FLAG(snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
)) {
123 bool config_only
= true;
125 (void)yang_snodes_iterate_subtree(snode
, NULL
,
126 nb_node_check_config_only
, 0,
129 SET_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
);
131 if (CHECK_FLAG(snode
->nodetype
, LYS_LIST
)) {
132 if (yang_snode_num_keys(snode
) == 0)
133 SET_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
);
137 * Link the northbound node and the libyang schema node with one
140 nb_node
->snode
= snode
;
141 assert(snode
->priv
== NULL
);
142 ((struct lysc_node
*)snode
)->priv
= nb_node
;
144 return YANG_ITER_CONTINUE
;
147 static int nb_node_del_cb(const struct lysc_node
*snode
, void *arg
)
149 struct nb_node
*nb_node
;
151 nb_node
= snode
->priv
;
153 ((struct lysc_node
*)snode
)->priv
= NULL
;
154 XFREE(MTYPE_NB_NODE
, nb_node
);
157 return YANG_ITER_CONTINUE
;
160 void nb_nodes_create(void)
162 yang_snodes_iterate(NULL
, nb_node_new_cb
, 0, NULL
);
165 void nb_nodes_delete(void)
167 yang_snodes_iterate(NULL
, nb_node_del_cb
, 0, NULL
);
170 struct nb_node
*nb_node_find(const char *path
)
172 const struct lysc_node
*snode
;
175 * Use libyang to find the schema node associated to the path and get
176 * the northbound node from there (snode private pointer).
178 snode
= lys_find_path(ly_native_ctx
, NULL
, path
, 0);
185 void nb_node_set_dependency_cbs(const char *dependency_xpath
,
186 const char *dependant_xpath
,
187 struct nb_dependency_callbacks
*cbs
)
189 struct nb_node
*dependency
= nb_node_find(dependency_xpath
);
190 struct nb_node
*dependant
= nb_node_find(dependant_xpath
);
192 if (!dependency
|| !dependant
)
195 dependency
->dep_cbs
.get_dependant_xpath
= cbs
->get_dependant_xpath
;
196 dependant
->dep_cbs
.get_dependency_xpath
= cbs
->get_dependency_xpath
;
199 bool nb_node_has_dependency(struct nb_node
*node
)
201 return node
->dep_cbs
.get_dependency_xpath
!= NULL
;
204 static int nb_node_validate_cb(const struct nb_node
*nb_node
,
205 enum nb_operation operation
,
206 int callback_implemented
, bool optional
)
210 valid
= nb_operation_is_valid(operation
, nb_node
->snode
);
213 * Add an exception for operational data callbacks. A rw list usually
214 * doesn't need any associated operational data callbacks. But if this
215 * rw list is augmented by another module which adds state nodes under
216 * it, then this list will need to have the 'get_next()', 'get_keys()'
217 * and 'lookup_entry()' callbacks. As such, never log a warning when
218 * these callbacks are implemented when they are not needed, since this
219 * depends on context (e.g. some daemons might augment "frr-interface"
220 * while others don't).
222 if (!valid
&& callback_implemented
&& operation
!= NB_OP_GET_NEXT
223 && operation
!= NB_OP_GET_KEYS
&& operation
!= NB_OP_LOOKUP_ENTRY
)
224 flog_warn(EC_LIB_NB_CB_UNNEEDED
,
225 "unneeded '%s' callback for '%s'",
226 nb_operation_name(operation
), nb_node
->xpath
);
228 if (!optional
&& valid
&& !callback_implemented
) {
229 flog_err(EC_LIB_NB_CB_MISSING
, "missing '%s' callback for '%s'",
230 nb_operation_name(operation
), nb_node
->xpath
);
238 * Check if the required callbacks were implemented for the given northbound
241 static unsigned int nb_node_validate_cbs(const struct nb_node
*nb_node
)
244 unsigned int error
= 0;
246 error
+= nb_node_validate_cb(nb_node
, NB_OP_CREATE
,
247 !!nb_node
->cbs
.create
, false);
248 error
+= nb_node_validate_cb(nb_node
, NB_OP_MODIFY
,
249 !!nb_node
->cbs
.modify
, false);
250 error
+= nb_node_validate_cb(nb_node
, NB_OP_DESTROY
,
251 !!nb_node
->cbs
.destroy
, false);
252 error
+= nb_node_validate_cb(nb_node
, NB_OP_MOVE
, !!nb_node
->cbs
.move
,
254 error
+= nb_node_validate_cb(nb_node
, NB_OP_PRE_VALIDATE
,
255 !!nb_node
->cbs
.pre_validate
, true);
256 error
+= nb_node_validate_cb(nb_node
, NB_OP_APPLY_FINISH
,
257 !!nb_node
->cbs
.apply_finish
, true);
258 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_ELEM
,
259 !!nb_node
->cbs
.get_elem
, false);
260 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_NEXT
,
261 !!nb_node
->cbs
.get_next
, false);
262 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_KEYS
,
263 !!nb_node
->cbs
.get_keys
, false);
264 error
+= nb_node_validate_cb(nb_node
, NB_OP_LOOKUP_ENTRY
,
265 !!nb_node
->cbs
.lookup_entry
, false);
266 error
+= nb_node_validate_cb(nb_node
, NB_OP_RPC
, !!nb_node
->cbs
.rpc
,
272 static unsigned int nb_node_validate_priority(const struct nb_node
*nb_node
)
274 /* Top-level nodes can have any priority. */
275 if (!nb_node
->parent
)
278 if (nb_node
->priority
< nb_node
->parent
->priority
) {
279 flog_err(EC_LIB_NB_CB_INVALID_PRIO
,
280 "node has higher priority than its parent [xpath %s]",
288 static int nb_node_validate(const struct lysc_node
*snode
, void *arg
)
290 struct nb_node
*nb_node
= snode
->priv
;
291 unsigned int *errors
= arg
;
293 /* Validate callbacks and priority. */
295 *errors
+= nb_node_validate_cbs(nb_node
);
296 *errors
+= nb_node_validate_priority(nb_node
);
299 return YANG_ITER_CONTINUE
;
302 struct nb_config
*nb_config_new(struct lyd_node
*dnode
)
304 struct nb_config
*config
;
306 config
= XCALLOC(MTYPE_NB_CONFIG
, sizeof(*config
));
308 config
->dnode
= dnode
;
310 config
->dnode
= yang_dnode_new(ly_native_ctx
, true);
316 void nb_config_free(struct nb_config
*config
)
319 yang_dnode_free(config
->dnode
);
320 XFREE(MTYPE_NB_CONFIG
, config
);
323 struct nb_config
*nb_config_dup(const struct nb_config
*config
)
325 struct nb_config
*dup
;
327 dup
= XCALLOC(MTYPE_NB_CONFIG
, sizeof(*dup
));
328 dup
->dnode
= yang_dnode_dup(config
->dnode
);
329 dup
->version
= config
->version
;
334 int nb_config_merge(struct nb_config
*config_dst
, struct nb_config
*config_src
,
335 bool preserve_source
)
339 ret
= lyd_merge_siblings(&config_dst
->dnode
, config_src
->dnode
, 0);
341 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_merge() failed", __func__
);
343 if (!preserve_source
)
344 nb_config_free(config_src
);
346 return (ret
== 0) ? NB_OK
: NB_ERR
;
349 void nb_config_replace(struct nb_config
*config_dst
,
350 struct nb_config
*config_src
, bool preserve_source
)
352 /* Update version. */
353 if (config_src
->version
!= 0)
354 config_dst
->version
= config_src
->version
;
357 if (config_dst
->dnode
)
358 yang_dnode_free(config_dst
->dnode
);
359 if (preserve_source
) {
360 config_dst
->dnode
= yang_dnode_dup(config_src
->dnode
);
362 config_dst
->dnode
= config_src
->dnode
;
363 config_src
->dnode
= NULL
;
364 nb_config_free(config_src
);
368 /* Generate the nb_config_cbs tree. */
369 static inline int nb_config_cb_compare(const struct nb_config_cb
*a
,
370 const struct nb_config_cb
*b
)
372 /* Sort by priority first. */
373 if (a
->nb_node
->priority
< b
->nb_node
->priority
)
375 if (a
->nb_node
->priority
> b
->nb_node
->priority
)
379 * Preserve the order of the configuration changes as told by libyang.
387 * All 'apply_finish' callbacks have their sequence number set to zero.
388 * In this case, compare them using their dnode pointers (the order
389 * doesn't matter for callbacks that have the same priority).
391 if (a
->dnode
< b
->dnode
)
393 if (a
->dnode
> b
->dnode
)
398 RB_GENERATE(nb_config_cbs
, nb_config_cb
, entry
, nb_config_cb_compare
);
400 static void nb_config_diff_add_change(struct nb_config_cbs
*changes
,
401 enum nb_operation operation
,
403 const struct lyd_node
*dnode
)
405 struct nb_config_change
*change
;
407 /* Ignore unimplemented nodes. */
408 if (!dnode
->schema
->priv
)
411 change
= XCALLOC(MTYPE_TMP
, sizeof(*change
));
412 change
->cb
.operation
= operation
;
413 change
->cb
.seq
= *seq
;
415 change
->cb
.nb_node
= dnode
->schema
->priv
;
416 change
->cb
.dnode
= dnode
;
418 RB_INSERT(nb_config_cbs
, changes
, &change
->cb
);
421 static void nb_config_diff_del_changes(struct nb_config_cbs
*changes
)
423 while (!RB_EMPTY(nb_config_cbs
, changes
)) {
424 struct nb_config_change
*change
;
426 change
= (struct nb_config_change
*)RB_ROOT(nb_config_cbs
,
428 RB_REMOVE(nb_config_cbs
, changes
, &change
->cb
);
429 XFREE(MTYPE_TMP
, change
);
434 * Helper function used when calculating the delta between two different
435 * configurations. Given a new subtree, calculate all new YANG data nodes,
436 * excluding default leafs and leaf-lists. This is a recursive function.
438 static void nb_config_diff_created(const struct lyd_node
*dnode
, uint32_t *seq
,
439 struct nb_config_cbs
*changes
)
441 enum nb_operation operation
;
442 struct lyd_node
*child
;
444 /* Ignore unimplemented nodes. */
445 if (!dnode
->schema
->priv
)
448 switch (dnode
->schema
->nodetype
) {
451 if (lyd_is_default(dnode
))
454 if (nb_operation_is_valid(NB_OP_CREATE
, dnode
->schema
))
455 operation
= NB_OP_CREATE
;
456 else if (nb_operation_is_valid(NB_OP_MODIFY
, dnode
->schema
))
457 operation
= NB_OP_MODIFY
;
461 nb_config_diff_add_change(changes
, operation
, seq
, dnode
);
465 if (nb_operation_is_valid(NB_OP_CREATE
, dnode
->schema
))
466 nb_config_diff_add_change(changes
, NB_OP_CREATE
, seq
,
469 /* Process child nodes recursively. */
470 LY_LIST_FOR (lyd_child(dnode
), child
) {
471 nb_config_diff_created(child
, seq
, changes
);
479 static void nb_config_diff_deleted(const struct lyd_node
*dnode
, uint32_t *seq
,
480 struct nb_config_cbs
*changes
)
482 /* Ignore unimplemented nodes. */
483 if (!dnode
->schema
->priv
)
486 if (nb_operation_is_valid(NB_OP_DESTROY
, dnode
->schema
))
487 nb_config_diff_add_change(changes
, NB_OP_DESTROY
, seq
, dnode
);
488 else if (CHECK_FLAG(dnode
->schema
->nodetype
, LYS_CONTAINER
)) {
489 struct lyd_node
*child
;
492 * Non-presence containers need special handling since they
493 * don't have "destroy" callbacks. In this case, what we need to
494 * do is to call the "destroy" callbacks of their child nodes
495 * when applicable (i.e. optional nodes).
497 LY_LIST_FOR (lyd_child(dnode
), child
) {
498 nb_config_diff_deleted(child
, seq
, changes
);
503 static int nb_lyd_diff_get_op(const struct lyd_node
*dnode
)
505 const struct lyd_meta
*meta
;
506 LY_LIST_FOR (dnode
->meta
, meta
) {
507 if (strcmp(meta
->name
, "operation")
508 || strcmp(meta
->annotation
->module
->name
, "yang"))
510 return lyd_get_meta_value(meta
)[0];
515 #if 0 /* Used below in nb_config_diff inside normally disabled code */
516 static inline void nb_config_diff_dnode_log_path(const char *context
,
518 const struct lyd_node
*dnode
)
520 if (dnode
->schema
->nodetype
& LYD_NODE_TERM
)
521 zlog_debug("nb_config_diff: %s: %s: %s", context
, path
,
522 lyd_get_value(dnode
));
524 zlog_debug("nb_config_diff: %s: %s", context
, path
);
527 static inline void nb_config_diff_dnode_log(const char *context
,
528 const struct lyd_node
*dnode
)
531 zlog_debug("nb_config_diff: %s: NULL", context
);
535 char *path
= lyd_path(dnode
, LYD_PATH_STD
, NULL
, 0);
536 nb_config_diff_dnode_log_path(context
, path
, dnode
);
541 /* Calculate the delta between two different configurations. */
542 static void nb_config_diff(const struct nb_config
*config1
,
543 const struct nb_config
*config2
,
544 struct nb_config_cbs
*changes
)
546 struct lyd_node
*diff
= NULL
;
547 const struct lyd_node
*root
, *dnode
;
548 struct lyd_node
*target
;
553 #if 0 /* Useful (noisy) when debugging diff code, and for improving later */
554 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
)) {
555 LY_LIST_FOR(config1
->dnode
, root
) {
556 LYD_TREE_DFS_BEGIN(root
, dnode
) {
557 nb_config_diff_dnode_log("from", dnode
);
558 LYD_TREE_DFS_END(root
, dnode
);
561 LY_LIST_FOR(config2
->dnode
, root
) {
562 LYD_TREE_DFS_BEGIN(root
, dnode
) {
563 nb_config_diff_dnode_log("to", dnode
);
564 LYD_TREE_DFS_END(root
, dnode
);
570 err
= lyd_diff_siblings(config1
->dnode
, config2
->dnode
,
571 LYD_DIFF_DEFAULTS
, &diff
);
574 if (diff
&& DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
)) {
577 if (!lyd_print_mem(&s
, diff
, LYD_JSON
,
578 LYD_PRINT_WITHSIBLINGS
| LYD_PRINT_WD_ALL
)) {
579 zlog_debug("%s: %s", __func__
, s
);
586 LY_LIST_FOR (diff
, root
) {
587 LYD_TREE_DFS_BEGIN (root
, dnode
) {
588 op
= nb_lyd_diff_get_op(dnode
);
590 path
= lyd_path(dnode
, LYD_PATH_STD
, NULL
, 0);
592 #if 0 /* Useful (noisy) when debugging diff code, and for improving later */
593 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
)) {
595 snprintf(context
, sizeof(context
),
596 "iterating diff: oper: %c seq: %u", op
, seq
);
597 nb_config_diff_dnode_log_path(context
, path
, dnode
);
601 case 'c': /* create */
603 * This is rather inefficient, but when we use
604 * dnode from the diff instead of the
605 * candidate config node we get failures when
606 * looking up default values, etc, based on
609 target
= yang_dnode_get(config2
->dnode
, path
);
611 nb_config_diff_created(target
, &seq
, changes
);
613 /* Skip rest of sub-tree, move to next sibling
615 LYD_TREE_DFS_continue
= 1;
617 case 'd': /* delete */
618 target
= yang_dnode_get(config1
->dnode
, path
);
620 nb_config_diff_deleted(target
, &seq
, changes
);
622 /* Skip rest of sub-tree, move to next sibling
624 LYD_TREE_DFS_continue
= 1;
626 case 'r': /* replace */
627 /* either moving an entry or changing a value */
628 target
= yang_dnode_get(config2
->dnode
, path
);
630 nb_config_diff_add_change(changes
, NB_OP_MODIFY
,
638 LYD_TREE_DFS_END(root
, dnode
);
645 int nb_candidate_edit(struct nb_config
*candidate
,
646 const struct nb_node
*nb_node
,
647 enum nb_operation operation
, const char *xpath
,
648 const struct yang_data
*previous
,
649 const struct yang_data
*data
)
651 struct lyd_node
*dnode
, *dep_dnode
;
652 char xpath_edit
[XPATH_MAXLEN
];
653 char dep_xpath
[XPATH_MAXLEN
];
656 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
657 if (nb_node
->snode
->nodetype
== LYS_LEAFLIST
)
658 snprintf(xpath_edit
, sizeof(xpath_edit
), "%s[.='%s']", xpath
,
661 strlcpy(xpath_edit
, xpath
, sizeof(xpath_edit
));
666 err
= lyd_new_path(candidate
->dnode
, ly_native_ctx
, xpath_edit
,
667 (void *)data
->value
, LYD_NEW_PATH_UPDATE
,
670 flog_warn(EC_LIB_LIBYANG
,
671 "%s: lyd_new_path(%s) failed: %d", __func__
,
675 /* Create default nodes */
676 LY_ERR err
= lyd_new_implicit_tree(
677 dnode
, LYD_IMPLICIT_NO_STATE
, NULL
);
679 flog_warn(EC_LIB_LIBYANG
,
680 "%s: lyd_new_implicit_all failed: %d",
686 * dnode returned by the lyd_new_path may be from a
687 * different schema, so we need to update the nb_node
689 nb_node
= dnode
->schema
->priv
;
690 if (nb_node
->dep_cbs
.get_dependency_xpath
) {
691 nb_node
->dep_cbs
.get_dependency_xpath(
694 err
= lyd_new_path(candidate
->dnode
,
695 ly_native_ctx
, dep_xpath
,
696 NULL
, LYD_NEW_PATH_UPDATE
,
698 /* Create default nodes */
699 if (!err
&& dep_dnode
)
700 err
= lyd_new_implicit_tree(
702 LYD_IMPLICIT_NO_STATE
, NULL
);
706 "%s: dependency: lyd_new_path(%s) failed: %d",
707 __func__
, dep_xpath
, err
);
714 dnode
= yang_dnode_get(candidate
->dnode
, xpath_edit
);
717 * Return a special error code so the caller can choose
718 * whether to ignore it or not.
720 return NB_ERR_NOT_FOUND
;
721 /* destroy dependant */
722 if (nb_node
->dep_cbs
.get_dependant_xpath
) {
723 nb_node
->dep_cbs
.get_dependant_xpath(dnode
, dep_xpath
);
725 dep_dnode
= yang_dnode_get(candidate
->dnode
, dep_xpath
);
727 lyd_free_tree(dep_dnode
);
729 lyd_free_tree(dnode
);
732 /* TODO: update configuration. */
735 flog_warn(EC_LIB_DEVELOPMENT
,
736 "%s: unknown operation (%u) [xpath %s]", __func__
,
737 operation
, xpath_edit
);
744 bool nb_candidate_needs_update(const struct nb_config
*candidate
)
746 if (candidate
->version
< running_config
->version
)
752 int nb_candidate_update(struct nb_config
*candidate
)
754 struct nb_config
*updated_config
;
756 updated_config
= nb_config_dup(running_config
);
757 if (nb_config_merge(updated_config
, candidate
, true) != NB_OK
)
760 nb_config_replace(candidate
, updated_config
, false);
766 * Perform YANG syntactic and semantic validation.
768 * WARNING: lyd_validate() can change the configuration as part of the
769 * validation process.
771 static int nb_candidate_validate_yang(struct nb_config
*candidate
, char *errmsg
,
774 if (lyd_validate_all(&candidate
->dnode
, ly_native_ctx
,
775 LYD_VALIDATE_NO_STATE
, NULL
)
777 yang_print_errors(ly_native_ctx
, errmsg
, errmsg_len
);
778 return NB_ERR_VALIDATION
;
784 /* Perform code-level validation using the northbound callbacks. */
785 static int nb_candidate_validate_code(struct nb_context
*context
,
786 struct nb_config
*candidate
,
787 struct nb_config_cbs
*changes
,
788 char *errmsg
, size_t errmsg_len
)
790 struct nb_config_cb
*cb
;
791 struct lyd_node
*root
, *child
;
794 /* First validate the candidate as a whole. */
795 LY_LIST_FOR (candidate
->dnode
, root
) {
796 LYD_TREE_DFS_BEGIN (root
, child
) {
797 struct nb_node
*nb_node
;
799 nb_node
= child
->schema
->priv
;
800 if (!nb_node
|| !nb_node
->cbs
.pre_validate
)
803 ret
= nb_callback_pre_validate(context
, nb_node
, child
,
806 return NB_ERR_VALIDATION
;
809 LYD_TREE_DFS_END(root
, child
);
813 /* Now validate the configuration changes. */
814 RB_FOREACH (cb
, nb_config_cbs
, changes
) {
815 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
817 ret
= nb_callback_configuration(context
, NB_EV_VALIDATE
, change
,
820 return NB_ERR_VALIDATION
;
826 int nb_candidate_validate(struct nb_context
*context
,
827 struct nb_config
*candidate
, char *errmsg
,
830 struct nb_config_cbs changes
;
833 if (nb_candidate_validate_yang(candidate
, errmsg
, sizeof(errmsg_len
))
835 return NB_ERR_VALIDATION
;
837 RB_INIT(nb_config_cbs
, &changes
);
838 nb_config_diff(running_config
, candidate
, &changes
);
839 ret
= nb_candidate_validate_code(context
, candidate
, &changes
, errmsg
,
841 nb_config_diff_del_changes(&changes
);
846 int nb_candidate_commit_prepare(struct nb_context
*context
,
847 struct nb_config
*candidate
,
849 struct nb_transaction
**transaction
,
850 char *errmsg
, size_t errmsg_len
)
852 struct nb_config_cbs changes
;
854 if (nb_candidate_validate_yang(candidate
, errmsg
, errmsg_len
)
856 flog_warn(EC_LIB_NB_CANDIDATE_INVALID
,
857 "%s: failed to validate candidate configuration",
859 return NB_ERR_VALIDATION
;
862 RB_INIT(nb_config_cbs
, &changes
);
863 nb_config_diff(running_config
, candidate
, &changes
);
864 if (RB_EMPTY(nb_config_cbs
, &changes
)) {
867 "No changes to apply were found during preparation phase");
868 return NB_ERR_NO_CHANGES
;
871 if (nb_candidate_validate_code(context
, candidate
, &changes
, errmsg
,
874 flog_warn(EC_LIB_NB_CANDIDATE_INVALID
,
875 "%s: failed to validate candidate configuration",
877 nb_config_diff_del_changes(&changes
);
878 return NB_ERR_VALIDATION
;
881 *transaction
= nb_transaction_new(context
, candidate
, &changes
, comment
,
883 if (*transaction
== NULL
) {
884 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED
,
885 "%s: failed to create transaction: %s", __func__
,
887 nb_config_diff_del_changes(&changes
);
888 return NB_ERR_LOCKED
;
891 return nb_transaction_process(NB_EV_PREPARE
, *transaction
, errmsg
,
895 void nb_candidate_commit_abort(struct nb_transaction
*transaction
, char *errmsg
,
898 (void)nb_transaction_process(NB_EV_ABORT
, transaction
, errmsg
,
900 nb_transaction_free(transaction
);
903 void nb_candidate_commit_apply(struct nb_transaction
*transaction
,
904 bool save_transaction
, uint32_t *transaction_id
,
905 char *errmsg
, size_t errmsg_len
)
907 (void)nb_transaction_process(NB_EV_APPLY
, transaction
, errmsg
,
909 nb_transaction_apply_finish(transaction
, errmsg
, errmsg_len
);
911 /* Replace running by candidate. */
912 transaction
->config
->version
++;
913 nb_config_replace(running_config
, transaction
->config
, true);
915 /* Record transaction. */
916 if (save_transaction
&& nb_db_enabled
917 && nb_db_transaction_save(transaction
, transaction_id
) != NB_OK
)
918 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED
,
919 "%s: failed to record transaction", __func__
);
921 nb_transaction_free(transaction
);
924 int nb_candidate_commit(struct nb_context
*context
, struct nb_config
*candidate
,
925 bool save_transaction
, const char *comment
,
926 uint32_t *transaction_id
, char *errmsg
,
929 struct nb_transaction
*transaction
= NULL
;
932 ret
= nb_candidate_commit_prepare(context
, candidate
, comment
,
933 &transaction
, errmsg
, errmsg_len
);
935 * Apply the changes if the preparation phase succeeded. Otherwise abort
939 nb_candidate_commit_apply(transaction
, save_transaction
,
940 transaction_id
, errmsg
, errmsg_len
);
941 else if (transaction
!= NULL
)
942 nb_candidate_commit_abort(transaction
, errmsg
, errmsg_len
);
947 int nb_running_lock(enum nb_client client
, const void *user
)
951 frr_with_mutex (&running_config_mgmt_lock
.mtx
) {
952 if (!running_config_mgmt_lock
.locked
) {
953 running_config_mgmt_lock
.locked
= true;
954 running_config_mgmt_lock
.owner_client
= client
;
955 running_config_mgmt_lock
.owner_user
= user
;
963 int nb_running_unlock(enum nb_client client
, const void *user
)
967 frr_with_mutex (&running_config_mgmt_lock
.mtx
) {
968 if (running_config_mgmt_lock
.locked
969 && running_config_mgmt_lock
.owner_client
== client
970 && running_config_mgmt_lock
.owner_user
== user
) {
971 running_config_mgmt_lock
.locked
= false;
972 running_config_mgmt_lock
.owner_client
= NB_CLIENT_NONE
;
973 running_config_mgmt_lock
.owner_user
= NULL
;
981 int nb_running_lock_check(enum nb_client client
, const void *user
)
985 frr_with_mutex (&running_config_mgmt_lock
.mtx
) {
986 if (!running_config_mgmt_lock
.locked
987 || (running_config_mgmt_lock
.owner_client
== client
988 && running_config_mgmt_lock
.owner_user
== user
))
995 static void nb_log_config_callback(const enum nb_event event
,
996 enum nb_operation operation
,
997 const struct lyd_node
*dnode
)
1000 char xpath
[XPATH_MAXLEN
];
1002 if (!DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
))
1005 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1006 if (yang_snode_is_typeless_data(dnode
->schema
))
1009 value
= yang_dnode_get_string(dnode
, NULL
);
1012 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
1013 nb_event_name(event
), nb_operation_name(operation
), xpath
,
1017 static int nb_callback_create(struct nb_context
*context
,
1018 const struct nb_node
*nb_node
,
1019 enum nb_event event
, const struct lyd_node
*dnode
,
1020 union nb_resource
*resource
, char *errmsg
,
1023 struct nb_cb_create_args args
= {};
1024 bool unexpected_error
= false;
1027 nb_log_config_callback(event
, NB_OP_CREATE
, dnode
);
1029 args
.context
= context
;
1032 args
.resource
= resource
;
1033 args
.errmsg
= errmsg
;
1034 args
.errmsg_len
= errmsg_len
;
1035 ret
= nb_node
->cbs
.create(&args
);
1037 /* Detect and log unexpected errors. */
1042 case NB_ERR_VALIDATION
:
1043 if (event
!= NB_EV_VALIDATE
)
1044 unexpected_error
= true;
1046 case NB_ERR_RESOURCE
:
1047 if (event
!= NB_EV_PREPARE
)
1048 unexpected_error
= true;
1050 case NB_ERR_INCONSISTENCY
:
1051 if (event
== NB_EV_VALIDATE
)
1052 unexpected_error
= true;
1055 unexpected_error
= true;
1058 if (unexpected_error
)
1059 DEBUGD(&nb_dbg_cbs_config
,
1060 "northbound callback: unexpected return value: %s",
1066 static int nb_callback_modify(struct nb_context
*context
,
1067 const struct nb_node
*nb_node
,
1068 enum nb_event event
, const struct lyd_node
*dnode
,
1069 union nb_resource
*resource
, char *errmsg
,
1072 struct nb_cb_modify_args args
= {};
1073 bool unexpected_error
= false;
1076 nb_log_config_callback(event
, NB_OP_MODIFY
, dnode
);
1078 args
.context
= context
;
1081 args
.resource
= resource
;
1082 args
.errmsg
= errmsg
;
1083 args
.errmsg_len
= errmsg_len
;
1084 ret
= nb_node
->cbs
.modify(&args
);
1086 /* Detect and log unexpected errors. */
1091 case NB_ERR_VALIDATION
:
1092 if (event
!= NB_EV_VALIDATE
)
1093 unexpected_error
= true;
1095 case NB_ERR_RESOURCE
:
1096 if (event
!= NB_EV_PREPARE
)
1097 unexpected_error
= true;
1099 case NB_ERR_INCONSISTENCY
:
1100 if (event
== NB_EV_VALIDATE
)
1101 unexpected_error
= true;
1104 unexpected_error
= true;
1107 if (unexpected_error
)
1108 DEBUGD(&nb_dbg_cbs_config
,
1109 "northbound callback: unexpected return value: %s",
1115 static int nb_callback_destroy(struct nb_context
*context
,
1116 const struct nb_node
*nb_node
,
1117 enum nb_event event
,
1118 const struct lyd_node
*dnode
, char *errmsg
,
1121 struct nb_cb_destroy_args args
= {};
1122 bool unexpected_error
= false;
1125 nb_log_config_callback(event
, NB_OP_DESTROY
, dnode
);
1127 args
.context
= context
;
1130 args
.errmsg
= errmsg
;
1131 args
.errmsg_len
= errmsg_len
;
1132 ret
= nb_node
->cbs
.destroy(&args
);
1134 /* Detect and log unexpected errors. */
1139 case NB_ERR_VALIDATION
:
1140 if (event
!= NB_EV_VALIDATE
)
1141 unexpected_error
= true;
1143 case NB_ERR_INCONSISTENCY
:
1144 if (event
== NB_EV_VALIDATE
)
1145 unexpected_error
= true;
1148 unexpected_error
= true;
1151 if (unexpected_error
)
1152 DEBUGD(&nb_dbg_cbs_config
,
1153 "northbound callback: unexpected return value: %s",
1159 static int nb_callback_move(struct nb_context
*context
,
1160 const struct nb_node
*nb_node
, enum nb_event event
,
1161 const struct lyd_node
*dnode
, char *errmsg
,
1164 struct nb_cb_move_args args
= {};
1165 bool unexpected_error
= false;
1168 nb_log_config_callback(event
, NB_OP_MOVE
, dnode
);
1170 args
.context
= context
;
1173 args
.errmsg
= errmsg
;
1174 args
.errmsg_len
= errmsg_len
;
1175 ret
= nb_node
->cbs
.move(&args
);
1177 /* Detect and log unexpected errors. */
1182 case NB_ERR_VALIDATION
:
1183 if (event
!= NB_EV_VALIDATE
)
1184 unexpected_error
= true;
1186 case NB_ERR_INCONSISTENCY
:
1187 if (event
== NB_EV_VALIDATE
)
1188 unexpected_error
= true;
1191 unexpected_error
= true;
1194 if (unexpected_error
)
1195 DEBUGD(&nb_dbg_cbs_config
,
1196 "northbound callback: unexpected return value: %s",
1202 static int nb_callback_pre_validate(struct nb_context
*context
,
1203 const struct nb_node
*nb_node
,
1204 const struct lyd_node
*dnode
, char *errmsg
,
1207 struct nb_cb_pre_validate_args args
= {};
1208 bool unexpected_error
= false;
1211 nb_log_config_callback(NB_EV_VALIDATE
, NB_OP_PRE_VALIDATE
, dnode
);
1214 args
.errmsg
= errmsg
;
1215 args
.errmsg_len
= errmsg_len
;
1216 ret
= nb_node
->cbs
.pre_validate(&args
);
1218 /* Detect and log unexpected errors. */
1221 case NB_ERR_VALIDATION
:
1224 unexpected_error
= true;
1227 if (unexpected_error
)
1228 DEBUGD(&nb_dbg_cbs_config
,
1229 "northbound callback: unexpected return value: %s",
1235 static void nb_callback_apply_finish(struct nb_context
*context
,
1236 const struct nb_node
*nb_node
,
1237 const struct lyd_node
*dnode
, char *errmsg
,
1240 struct nb_cb_apply_finish_args args
= {};
1242 nb_log_config_callback(NB_EV_APPLY
, NB_OP_APPLY_FINISH
, dnode
);
1244 args
.context
= context
;
1246 args
.errmsg
= errmsg
;
1247 args
.errmsg_len
= errmsg_len
;
1248 nb_node
->cbs
.apply_finish(&args
);
1251 struct yang_data
*nb_callback_get_elem(const struct nb_node
*nb_node
,
1253 const void *list_entry
)
1255 struct nb_cb_get_elem_args args
= {};
1257 DEBUGD(&nb_dbg_cbs_state
,
1258 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
1262 args
.list_entry
= list_entry
;
1263 return nb_node
->cbs
.get_elem(&args
);
1266 const void *nb_callback_get_next(const struct nb_node
*nb_node
,
1267 const void *parent_list_entry
,
1268 const void *list_entry
)
1270 struct nb_cb_get_next_args args
= {};
1272 DEBUGD(&nb_dbg_cbs_state
,
1273 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
1274 nb_node
->xpath
, parent_list_entry
, list_entry
);
1276 args
.parent_list_entry
= parent_list_entry
;
1277 args
.list_entry
= list_entry
;
1278 return nb_node
->cbs
.get_next(&args
);
1281 int nb_callback_get_keys(const struct nb_node
*nb_node
, const void *list_entry
,
1282 struct yang_list_keys
*keys
)
1284 struct nb_cb_get_keys_args args
= {};
1286 DEBUGD(&nb_dbg_cbs_state
,
1287 "northbound callback (get_keys): node [%s] list_entry [%p]",
1288 nb_node
->xpath
, list_entry
);
1290 args
.list_entry
= list_entry
;
1292 return nb_node
->cbs
.get_keys(&args
);
1295 const void *nb_callback_lookup_entry(const struct nb_node
*nb_node
,
1296 const void *parent_list_entry
,
1297 const struct yang_list_keys
*keys
)
1299 struct nb_cb_lookup_entry_args args
= {};
1301 DEBUGD(&nb_dbg_cbs_state
,
1302 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
1303 nb_node
->xpath
, parent_list_entry
);
1305 args
.parent_list_entry
= parent_list_entry
;
1307 return nb_node
->cbs
.lookup_entry(&args
);
1310 int nb_callback_rpc(const struct nb_node
*nb_node
, const char *xpath
,
1311 const struct list
*input
, struct list
*output
, char *errmsg
,
1314 struct nb_cb_rpc_args args
= {};
1316 DEBUGD(&nb_dbg_cbs_rpc
, "northbound RPC: %s", xpath
);
1320 args
.output
= output
;
1321 args
.errmsg
= errmsg
;
1322 args
.errmsg_len
= errmsg_len
;
1323 return nb_node
->cbs
.rpc(&args
);
1327 * Call the northbound configuration callback associated to a given
1328 * configuration change.
1330 static int nb_callback_configuration(struct nb_context
*context
,
1331 const enum nb_event event
,
1332 struct nb_config_change
*change
,
1333 char *errmsg
, size_t errmsg_len
)
1335 enum nb_operation operation
= change
->cb
.operation
;
1336 char xpath
[XPATH_MAXLEN
];
1337 const struct nb_node
*nb_node
= change
->cb
.nb_node
;
1338 const struct lyd_node
*dnode
= change
->cb
.dnode
;
1339 union nb_resource
*resource
;
1342 if (event
== NB_EV_VALIDATE
)
1345 resource
= &change
->resource
;
1347 switch (operation
) {
1349 ret
= nb_callback_create(context
, nb_node
, event
, dnode
,
1350 resource
, errmsg
, errmsg_len
);
1353 ret
= nb_callback_modify(context
, nb_node
, event
, dnode
,
1354 resource
, errmsg
, errmsg_len
);
1357 ret
= nb_callback_destroy(context
, nb_node
, event
, dnode
,
1358 errmsg
, errmsg_len
);
1361 ret
= nb_callback_move(context
, nb_node
, event
, dnode
, errmsg
,
1365 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1366 flog_err(EC_LIB_DEVELOPMENT
,
1367 "%s: unknown operation (%u) [xpath %s]", __func__
,
1373 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1376 case NB_EV_VALIDATE
:
1377 flog_warn(EC_LIB_NB_CB_CONFIG_VALIDATE
,
1378 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1379 nb_err_name(ret
), nb_event_name(event
),
1380 nb_operation_name(operation
), xpath
,
1381 errmsg
[0] ? " message: " : "", errmsg
);
1384 flog_warn(EC_LIB_NB_CB_CONFIG_PREPARE
,
1385 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1386 nb_err_name(ret
), nb_event_name(event
),
1387 nb_operation_name(operation
), xpath
,
1388 errmsg
[0] ? " message: " : "", errmsg
);
1391 flog_warn(EC_LIB_NB_CB_CONFIG_ABORT
,
1392 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1393 nb_err_name(ret
), nb_event_name(event
),
1394 nb_operation_name(operation
), xpath
,
1395 errmsg
[0] ? " message: " : "", errmsg
);
1398 flog_err(EC_LIB_NB_CB_CONFIG_APPLY
,
1399 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1400 nb_err_name(ret
), nb_event_name(event
),
1401 nb_operation_name(operation
), xpath
,
1402 errmsg
[0] ? " message: " : "", errmsg
);
1405 flog_err(EC_LIB_DEVELOPMENT
,
1406 "%s: unknown event (%u) [xpath %s]", __func__
,
1415 static struct nb_transaction
*
1416 nb_transaction_new(struct nb_context
*context
, struct nb_config
*config
,
1417 struct nb_config_cbs
*changes
, const char *comment
,
1418 char *errmsg
, size_t errmsg_len
)
1420 struct nb_transaction
*transaction
;
1422 if (nb_running_lock_check(context
->client
, context
->user
)) {
1424 "running configuration is locked by another client",
1429 if (transaction_in_progress
) {
1431 "there's already another transaction in progress",
1435 transaction_in_progress
= true;
1437 transaction
= XCALLOC(MTYPE_TMP
, sizeof(*transaction
));
1438 transaction
->context
= context
;
1440 strlcpy(transaction
->comment
, comment
,
1441 sizeof(transaction
->comment
));
1442 transaction
->config
= config
;
1443 transaction
->changes
= *changes
;
1448 static void nb_transaction_free(struct nb_transaction
*transaction
)
1450 nb_config_diff_del_changes(&transaction
->changes
);
1451 XFREE(MTYPE_TMP
, transaction
);
1452 transaction_in_progress
= false;
1455 /* Process all configuration changes associated to a transaction. */
1456 static int nb_transaction_process(enum nb_event event
,
1457 struct nb_transaction
*transaction
,
1458 char *errmsg
, size_t errmsg_len
)
1460 struct nb_config_cb
*cb
;
1462 RB_FOREACH (cb
, nb_config_cbs
, &transaction
->changes
) {
1463 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
1467 * Only try to release resources that were allocated
1470 if (event
== NB_EV_ABORT
&& !change
->prepare_ok
)
1473 /* Call the appropriate callback. */
1474 ret
= nb_callback_configuration(transaction
->context
, event
,
1475 change
, errmsg
, errmsg_len
);
1480 change
->prepare_ok
= true;
1485 * At this point it's not possible to reject the
1486 * transaction anymore, so any failure here can lead to
1487 * inconsistencies and should be treated as a bug.
1488 * Operations prone to errors, like validations and
1489 * resource allocations, should be performed during the
1501 static struct nb_config_cb
*
1502 nb_apply_finish_cb_new(struct nb_config_cbs
*cbs
, const struct nb_node
*nb_node
,
1503 const struct lyd_node
*dnode
)
1505 struct nb_config_cb
*cb
;
1507 cb
= XCALLOC(MTYPE_TMP
, sizeof(*cb
));
1508 cb
->nb_node
= nb_node
;
1510 RB_INSERT(nb_config_cbs
, cbs
, cb
);
1515 static struct nb_config_cb
*
1516 nb_apply_finish_cb_find(struct nb_config_cbs
*cbs
,
1517 const struct nb_node
*nb_node
,
1518 const struct lyd_node
*dnode
)
1520 struct nb_config_cb s
;
1523 s
.nb_node
= nb_node
;
1525 return RB_FIND(nb_config_cbs
, cbs
, &s
);
1528 /* Call the 'apply_finish' callbacks. */
1529 static void nb_transaction_apply_finish(struct nb_transaction
*transaction
,
1530 char *errmsg
, size_t errmsg_len
)
1532 struct nb_config_cbs cbs
;
1533 struct nb_config_cb
*cb
;
1535 /* Initialize tree of 'apply_finish' callbacks. */
1536 RB_INIT(nb_config_cbs
, &cbs
);
1538 /* Identify the 'apply_finish' callbacks that need to be called. */
1539 RB_FOREACH (cb
, nb_config_cbs
, &transaction
->changes
) {
1540 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
1541 const struct lyd_node
*dnode
= change
->cb
.dnode
;
1544 * Iterate up to the root of the data tree. When a node is being
1545 * deleted, skip its 'apply_finish' callback if one is defined
1546 * (the 'apply_finish' callbacks from the node ancestors should
1547 * be called though).
1549 if (change
->cb
.operation
== NB_OP_DESTROY
) {
1550 char xpath
[XPATH_MAXLEN
];
1552 dnode
= lyd_parent(dnode
);
1557 * The dnode from 'delete' callbacks point to elements
1558 * from the running configuration. Use yang_dnode_get()
1559 * to get the corresponding dnode from the candidate
1560 * configuration that is being committed.
1562 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1563 dnode
= yang_dnode_get(transaction
->config
->dnode
,
1567 struct nb_node
*nb_node
;
1569 nb_node
= dnode
->schema
->priv
;
1570 if (!nb_node
|| !nb_node
->cbs
.apply_finish
)
1574 * Don't call the callback more than once for the same
1577 if (nb_apply_finish_cb_find(&cbs
, nb_node
, dnode
))
1580 nb_apply_finish_cb_new(&cbs
, nb_node
, dnode
);
1583 dnode
= lyd_parent(dnode
);
1587 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1588 RB_FOREACH (cb
, nb_config_cbs
, &cbs
)
1589 nb_callback_apply_finish(transaction
->context
, cb
->nb_node
,
1590 cb
->dnode
, errmsg
, errmsg_len
);
1592 /* Release memory. */
1593 while (!RB_EMPTY(nb_config_cbs
, &cbs
)) {
1594 cb
= RB_ROOT(nb_config_cbs
, &cbs
);
1595 RB_REMOVE(nb_config_cbs
, &cbs
, cb
);
1596 XFREE(MTYPE_TMP
, cb
);
1600 static int nb_oper_data_iter_children(const struct lysc_node
*snode
,
1601 const char *xpath
, const void *list_entry
,
1602 const struct yang_list_keys
*list_keys
,
1603 struct yang_translator
*translator
,
1604 bool first
, uint32_t flags
,
1605 nb_oper_data_cb cb
, void *arg
)
1607 const struct lysc_node
*child
;
1609 LY_LIST_FOR (lysc_node_child(snode
), child
) {
1612 ret
= nb_oper_data_iter_node(child
, xpath
, list_entry
,
1613 list_keys
, translator
, false,
1622 static int nb_oper_data_iter_leaf(const struct nb_node
*nb_node
,
1623 const char *xpath
, const void *list_entry
,
1624 const struct yang_list_keys
*list_keys
,
1625 struct yang_translator
*translator
,
1626 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1628 struct yang_data
*data
;
1630 if (CHECK_FLAG(nb_node
->snode
->flags
, LYS_CONFIG_W
))
1633 /* Ignore list keys. */
1634 if (lysc_is_key(nb_node
->snode
))
1637 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1639 /* Leaf of type "empty" is not present. */
1642 return (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1645 static int nb_oper_data_iter_container(const struct nb_node
*nb_node
,
1647 const void *list_entry
,
1648 const struct yang_list_keys
*list_keys
,
1649 struct yang_translator
*translator
,
1650 uint32_t flags
, nb_oper_data_cb cb
,
1653 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1656 /* Presence containers. */
1657 if (nb_node
->cbs
.get_elem
) {
1658 struct yang_data
*data
;
1661 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1663 /* Presence container is not present. */
1666 ret
= (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1671 /* Iterate over the child nodes. */
1672 return nb_oper_data_iter_children(nb_node
->snode
, xpath
, list_entry
,
1673 list_keys
, translator
, false, flags
,
1678 nb_oper_data_iter_leaflist(const struct nb_node
*nb_node
, const char *xpath
,
1679 const void *parent_list_entry
,
1680 const struct yang_list_keys
*parent_list_keys
,
1681 struct yang_translator
*translator
, uint32_t flags
,
1682 nb_oper_data_cb cb
, void *arg
)
1684 const void *list_entry
= NULL
;
1686 if (CHECK_FLAG(nb_node
->snode
->flags
, LYS_CONFIG_W
))
1690 struct yang_data
*data
;
1693 list_entry
= nb_callback_get_next(nb_node
, parent_list_entry
,
1696 /* End of the list. */
1699 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1703 ret
= (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1706 } while (list_entry
);
1711 static int nb_oper_data_iter_list(const struct nb_node
*nb_node
,
1712 const char *xpath_list
,
1713 const void *parent_list_entry
,
1714 const struct yang_list_keys
*parent_list_keys
,
1715 struct yang_translator
*translator
,
1716 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1718 const struct lysc_node
*snode
= nb_node
->snode
;
1719 const void *list_entry
= NULL
;
1720 uint32_t position
= 1;
1722 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1725 /* Iterate over all list entries. */
1727 const struct lysc_node_leaf
*skey
;
1728 struct yang_list_keys list_keys
;
1729 char xpath
[XPATH_MAXLEN
* 2];
1732 /* Obtain list entry. */
1733 list_entry
= nb_callback_get_next(nb_node
, parent_list_entry
,
1736 /* End of the list. */
1739 if (!CHECK_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
)) {
1740 /* Obtain the list entry keys. */
1741 if (nb_callback_get_keys(nb_node
, list_entry
,
1744 flog_warn(EC_LIB_NB_CB_STATE
,
1745 "%s: failed to get list keys",
1750 /* Build XPath of the list entry. */
1751 strlcpy(xpath
, xpath_list
, sizeof(xpath
));
1753 LY_FOR_KEYS (snode
, skey
) {
1754 assert(i
< list_keys
.num
);
1755 snprintf(xpath
+ strlen(xpath
),
1756 sizeof(xpath
) - strlen(xpath
),
1757 "[%s='%s']", skey
->name
,
1761 assert(i
== list_keys
.num
);
1764 * Keyless list - build XPath using a positional index.
1766 snprintf(xpath
, sizeof(xpath
), "%s[%u]", xpath_list
,
1771 /* Iterate over the child nodes. */
1772 ret
= nb_oper_data_iter_children(
1773 nb_node
->snode
, xpath
, list_entry
, &list_keys
,
1774 translator
, false, flags
, cb
, arg
);
1777 } while (list_entry
);
1782 static int nb_oper_data_iter_node(const struct lysc_node
*snode
,
1783 const char *xpath_parent
,
1784 const void *list_entry
,
1785 const struct yang_list_keys
*list_keys
,
1786 struct yang_translator
*translator
,
1787 bool first
, uint32_t flags
,
1788 nb_oper_data_cb cb
, void *arg
)
1790 struct nb_node
*nb_node
;
1791 char xpath
[XPATH_MAXLEN
];
1794 if (!first
&& CHECK_FLAG(flags
, NB_OPER_DATA_ITER_NORECURSE
)
1795 && CHECK_FLAG(snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
))
1799 strlcpy(xpath
, xpath_parent
, sizeof(xpath
));
1800 if (!first
&& snode
->nodetype
!= LYS_USES
) {
1801 struct lysc_node
*parent
;
1803 /* Get the real parent. */
1804 parent
= snode
->parent
;
1807 * When necessary, include the namespace of the augmenting
1810 if (parent
&& parent
->module
!= snode
->module
)
1811 snprintf(xpath
+ strlen(xpath
),
1812 sizeof(xpath
) - strlen(xpath
), "/%s:%s",
1813 snode
->module
->name
, snode
->name
);
1815 snprintf(xpath
+ strlen(xpath
),
1816 sizeof(xpath
) - strlen(xpath
), "/%s",
1820 nb_node
= snode
->priv
;
1821 switch (snode
->nodetype
) {
1823 ret
= nb_oper_data_iter_container(nb_node
, xpath
, list_entry
,
1824 list_keys
, translator
, flags
,
1828 ret
= nb_oper_data_iter_leaf(nb_node
, xpath
, list_entry
,
1829 list_keys
, translator
, flags
, cb
,
1833 ret
= nb_oper_data_iter_leaflist(nb_node
, xpath
, list_entry
,
1834 list_keys
, translator
, flags
,
1838 ret
= nb_oper_data_iter_list(nb_node
, xpath
, list_entry
,
1839 list_keys
, translator
, flags
, cb
,
1843 ret
= nb_oper_data_iter_children(snode
, xpath
, list_entry
,
1844 list_keys
, translator
, false,
1854 int nb_oper_data_iterate(const char *xpath
, struct yang_translator
*translator
,
1855 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1857 struct nb_node
*nb_node
;
1858 const void *list_entry
= NULL
;
1859 struct yang_list_keys list_keys
;
1860 struct list
*list_dnodes
;
1861 struct lyd_node
*dnode
, *dn
;
1862 struct listnode
*ln
;
1865 nb_node
= nb_node_find(xpath
);
1867 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH
,
1868 "%s: unknown data path: %s", __func__
, xpath
);
1872 /* For now this function works only with containers and lists. */
1873 if (!CHECK_FLAG(nb_node
->snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
)) {
1875 EC_LIB_NB_OPERATIONAL_DATA
,
1876 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1882 * Create a data tree from the XPath so that we can parse the keys of
1883 * all YANG lists (if any).
1886 LY_ERR err
= lyd_new_path(NULL
, ly_native_ctx
, xpath
, NULL
,
1887 LYD_NEW_PATH_UPDATE
, &dnode
);
1888 if (err
|| !dnode
) {
1889 const char *errmsg
=
1890 err
? ly_errmsg(ly_native_ctx
) : "node not found";
1891 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_new_path() failed %s",
1897 * Create a linked list to sort the data nodes starting from the root.
1899 list_dnodes
= list_new();
1900 for (dn
= dnode
; dn
; dn
= lyd_parent(dn
)) {
1901 if (dn
->schema
->nodetype
!= LYS_LIST
|| !lyd_child(dn
))
1903 listnode_add_head(list_dnodes
, dn
);
1906 * Use the northbound callbacks to find list entry pointer corresponding
1907 * to the given XPath.
1909 for (ALL_LIST_ELEMENTS_RO(list_dnodes
, ln
, dn
)) {
1910 struct lyd_node
*child
;
1914 /* Obtain the list entry keys. */
1915 memset(&list_keys
, 0, sizeof(list_keys
));
1916 LY_LIST_FOR (lyd_child(dn
), child
) {
1917 if (!lysc_is_key(child
->schema
))
1919 strlcpy(list_keys
.key
[n
],
1920 yang_dnode_get_string(child
, NULL
),
1921 sizeof(list_keys
.key
[n
]));
1925 if (list_keys
.num
!= yang_snode_num_keys(dn
->schema
)) {
1926 list_delete(&list_dnodes
);
1927 yang_dnode_free(dnode
);
1928 return NB_ERR_NOT_FOUND
;
1931 /* Find the list entry pointer. */
1932 nn
= dn
->schema
->priv
;
1933 if (!nn
->cbs
.lookup_entry
) {
1935 EC_LIB_NB_OPERATIONAL_DATA
,
1936 "%s: data path doesn't support iteration over operational data: %s",
1938 list_delete(&list_dnodes
);
1939 yang_dnode_free(dnode
);
1944 nb_callback_lookup_entry(nn
, list_entry
, &list_keys
);
1945 if (list_entry
== NULL
) {
1946 list_delete(&list_dnodes
);
1947 yang_dnode_free(dnode
);
1948 return NB_ERR_NOT_FOUND
;
1952 /* If a list entry was given, iterate over that list entry only. */
1953 if (dnode
->schema
->nodetype
== LYS_LIST
&& lyd_child(dnode
))
1954 ret
= nb_oper_data_iter_children(
1955 nb_node
->snode
, xpath
, list_entry
, &list_keys
,
1956 translator
, true, flags
, cb
, arg
);
1958 ret
= nb_oper_data_iter_node(nb_node
->snode
, xpath
, list_entry
,
1959 &list_keys
, translator
, true,
1962 list_delete(&list_dnodes
);
1963 yang_dnode_free(dnode
);
1968 bool nb_operation_is_valid(enum nb_operation operation
,
1969 const struct lysc_node
*snode
)
1971 struct nb_node
*nb_node
= snode
->priv
;
1972 struct lysc_node_container
*scontainer
;
1973 struct lysc_node_leaf
*sleaf
;
1975 switch (operation
) {
1977 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1980 switch (snode
->nodetype
) {
1982 sleaf
= (struct lysc_node_leaf
*)snode
;
1983 if (sleaf
->type
->basetype
!= LY_TYPE_EMPTY
)
1987 scontainer
= (struct lysc_node_container
*)snode
;
1988 if (!CHECK_FLAG(scontainer
->flags
, LYS_PRESENCE
))
1999 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2002 switch (snode
->nodetype
) {
2004 sleaf
= (struct lysc_node_leaf
*)snode
;
2005 if (sleaf
->type
->basetype
== LY_TYPE_EMPTY
)
2008 /* List keys can't be modified. */
2009 if (lysc_is_key(sleaf
))
2017 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2020 switch (snode
->nodetype
) {
2022 sleaf
= (struct lysc_node_leaf
*)snode
;
2024 /* List keys can't be deleted. */
2025 if (lysc_is_key(sleaf
))
2029 * Only optional leafs can be deleted, or leafs whose
2030 * parent is a case statement.
2032 if (snode
->parent
->nodetype
== LYS_CASE
)
2036 if (CHECK_FLAG(sleaf
->flags
, LYS_MAND_TRUE
)
2041 scontainer
= (struct lysc_node_container
*)snode
;
2042 if (!CHECK_FLAG(scontainer
->flags
, LYS_PRESENCE
))
2053 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2056 switch (snode
->nodetype
) {
2059 if (!CHECK_FLAG(snode
->flags
, LYS_ORDBY_USER
))
2066 case NB_OP_PRE_VALIDATE
:
2067 case NB_OP_APPLY_FINISH
:
2068 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2071 case NB_OP_GET_ELEM
:
2072 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_R
))
2075 switch (snode
->nodetype
) {
2080 scontainer
= (struct lysc_node_container
*)snode
;
2081 if (!CHECK_FLAG(scontainer
->flags
, LYS_PRESENCE
))
2088 case NB_OP_GET_NEXT
:
2089 switch (snode
->nodetype
) {
2091 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
2095 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2102 case NB_OP_GET_KEYS
:
2103 case NB_OP_LOOKUP_ENTRY
:
2104 switch (snode
->nodetype
) {
2106 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
2108 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
))
2116 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
| LYS_CONFIG_R
))
2119 switch (snode
->nodetype
) {
2132 DEFINE_HOOK(nb_notification_send
, (const char *xpath
, struct list
*arguments
),
2133 (xpath
, arguments
));
2135 int nb_notification_send(const char *xpath
, struct list
*arguments
)
2139 DEBUGD(&nb_dbg_notif
, "northbound notification: %s", xpath
);
2141 ret
= hook_call(nb_notification_send
, xpath
, arguments
);
2143 list_delete(&arguments
);
2148 /* Running configuration user pointers management. */
2149 struct nb_config_entry
{
2150 char xpath
[XPATH_MAXLEN
];
2154 static bool running_config_entry_cmp(const void *value1
, const void *value2
)
2156 const struct nb_config_entry
*c1
= value1
;
2157 const struct nb_config_entry
*c2
= value2
;
2159 return strmatch(c1
->xpath
, c2
->xpath
);
2162 static unsigned int running_config_entry_key_make(const void *value
)
2164 return string_hash_make(value
);
2167 static void *running_config_entry_alloc(void *p
)
2169 struct nb_config_entry
*new, *key
= p
;
2171 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY
, sizeof(*new));
2172 strlcpy(new->xpath
, key
->xpath
, sizeof(new->xpath
));
2177 static void running_config_entry_free(void *arg
)
2179 XFREE(MTYPE_NB_CONFIG_ENTRY
, arg
);
2182 void nb_running_set_entry(const struct lyd_node
*dnode
, void *entry
)
2184 struct nb_config_entry
*config
, s
;
2186 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
2187 config
= hash_get(running_config_entries
, &s
,
2188 running_config_entry_alloc
);
2189 config
->entry
= entry
;
2192 void nb_running_move_tree(const char *xpath_from
, const char *xpath_to
)
2194 struct nb_config_entry
*entry
;
2195 struct list
*entries
= hash_to_list(running_config_entries
);
2196 struct listnode
*ln
;
2198 for (ALL_LIST_ELEMENTS_RO(entries
, ln
, entry
)) {
2199 if (!frrstr_startswith(entry
->xpath
, xpath_from
))
2202 hash_release(running_config_entries
, entry
);
2205 frrstr_replace(entry
->xpath
, xpath_from
, xpath_to
);
2206 strlcpy(entry
->xpath
, newpath
, sizeof(entry
->xpath
));
2207 XFREE(MTYPE_TMP
, newpath
);
2209 hash_get(running_config_entries
, entry
, hash_alloc_intern
);
2212 list_delete(&entries
);
2215 static void *nb_running_unset_entry_helper(const struct lyd_node
*dnode
)
2217 struct nb_config_entry
*config
, s
;
2218 struct lyd_node
*child
;
2221 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
2222 config
= hash_release(running_config_entries
, &s
);
2224 entry
= config
->entry
;
2225 running_config_entry_free(config
);
2228 /* Unset user pointers from the child nodes. */
2229 if (CHECK_FLAG(dnode
->schema
->nodetype
, LYS_LIST
| LYS_CONTAINER
)) {
2230 LY_LIST_FOR (lyd_child(dnode
), child
) {
2231 (void)nb_running_unset_entry_helper(child
);
2238 void *nb_running_unset_entry(const struct lyd_node
*dnode
)
2242 entry
= nb_running_unset_entry_helper(dnode
);
2248 static void *nb_running_get_entry_worker(const struct lyd_node
*dnode
,
2250 bool abort_if_not_found
,
2253 const struct lyd_node
*orig_dnode
= dnode
;
2254 char xpath_buf
[XPATH_MAXLEN
];
2255 bool rec_flag
= true;
2257 assert(dnode
|| xpath
);
2260 dnode
= yang_dnode_get(running_config
->dnode
, xpath
);
2262 while (rec_flag
&& dnode
) {
2263 struct nb_config_entry
*config
, s
;
2265 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
2266 config
= hash_lookup(running_config_entries
, &s
);
2268 return config
->entry
;
2270 rec_flag
= rec_search
;
2272 dnode
= lyd_parent(dnode
);
2275 if (!abort_if_not_found
)
2278 yang_dnode_get_path(orig_dnode
, xpath_buf
, sizeof(xpath_buf
));
2279 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND
,
2280 "%s: failed to find entry [xpath %s]", __func__
, xpath_buf
);
2281 zlog_backtrace(LOG_ERR
);
2285 void *nb_running_get_entry(const struct lyd_node
*dnode
, const char *xpath
,
2286 bool abort_if_not_found
)
2288 return nb_running_get_entry_worker(dnode
, xpath
, abort_if_not_found
,
2292 void *nb_running_get_entry_non_rec(const struct lyd_node
*dnode
,
2293 const char *xpath
, bool abort_if_not_found
)
2295 return nb_running_get_entry_worker(dnode
, xpath
, abort_if_not_found
,
2299 /* Logging functions. */
2300 const char *nb_event_name(enum nb_event event
)
2303 case NB_EV_VALIDATE
:
2316 const char *nb_operation_name(enum nb_operation operation
)
2318 switch (operation
) {
2327 case NB_OP_PRE_VALIDATE
:
2328 return "pre_validate";
2329 case NB_OP_APPLY_FINISH
:
2330 return "apply_finish";
2331 case NB_OP_GET_ELEM
:
2333 case NB_OP_GET_NEXT
:
2335 case NB_OP_GET_KEYS
:
2337 case NB_OP_LOOKUP_ENTRY
:
2338 return "lookup_entry";
2346 const char *nb_err_name(enum nb_error error
)
2352 return "generic error";
2353 case NB_ERR_NO_CHANGES
:
2354 return "no changes";
2355 case NB_ERR_NOT_FOUND
:
2356 return "element not found";
2358 return "resource is locked";
2359 case NB_ERR_VALIDATION
:
2360 return "validation";
2361 case NB_ERR_RESOURCE
:
2362 return "failed to allocate resource";
2363 case NB_ERR_INCONSISTENCY
:
2364 return "internal inconsistency";
2370 const char *nb_client_name(enum nb_client client
)
2375 case NB_CLIENT_CONFD
:
2377 case NB_CLIENT_SYSREPO
:
2379 case NB_CLIENT_GRPC
:
2386 static void nb_load_callbacks(const struct frr_yang_module_info
*module
)
2388 for (size_t i
= 0; module
->nodes
[i
].xpath
; i
++) {
2389 struct nb_node
*nb_node
;
2392 if (i
> YANG_MODULE_MAX_NODES
) {
2394 "%s: %s.yang has more than %u nodes. Please increase YANG_MODULE_MAX_NODES to fix this problem.",
2395 __func__
, module
->name
, YANG_MODULE_MAX_NODES
);
2399 nb_node
= nb_node_find(module
->nodes
[i
].xpath
);
2401 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH
,
2402 "%s: unknown data path: %s", __func__
,
2403 module
->nodes
[i
].xpath
);
2407 nb_node
->cbs
= module
->nodes
[i
].cbs
;
2408 priority
= module
->nodes
[i
].priority
;
2410 nb_node
->priority
= priority
;
2414 void nb_validate_callbacks(void)
2416 unsigned int errors
= 0;
2418 yang_snodes_iterate(NULL
, nb_node_validate
, 0, &errors
);
2421 EC_LIB_NB_CBS_VALIDATION
,
2422 "%s: failed to validate northbound callbacks: %u error(s)",
2429 void nb_init(struct thread_master
*tm
,
2430 const struct frr_yang_module_info
*const modules
[],
2431 size_t nmodules
, bool db_enabled
)
2433 struct yang_module
*loaded
[nmodules
], **loadedp
= loaded
;
2434 bool explicit_compile
;
2437 * Currently using this explicit compile feature in libyang2 leads to
2438 * incorrect behavior in FRR. The functionality suppresses the compiling
2439 * of modules until they have all been loaded into the context. This
2440 * avoids multiple recompiles of the same modules as they are
2441 * imported/augmented etc.
2443 explicit_compile
= false;
2445 nb_db_enabled
= db_enabled
;
2447 yang_init(true, explicit_compile
);
2449 /* Load YANG modules and their corresponding northbound callbacks. */
2450 for (size_t i
= 0; i
< nmodules
; i
++) {
2451 DEBUGD(&nb_dbg_events
, "northbound: loading %s.yang",
2453 *loadedp
++ = yang_module_load(modules
[i
]->name
);
2456 if (explicit_compile
)
2457 yang_init_loading_complete();
2459 /* Initialize the compiled nodes with northbound data */
2460 for (size_t i
= 0; i
< nmodules
; i
++) {
2461 yang_snodes_iterate(loaded
[i
]->info
, nb_node_new_cb
, 0, NULL
);
2462 nb_load_callbacks(modules
[i
]);
2465 /* Validate northbound callbacks. */
2466 nb_validate_callbacks();
2468 /* Create an empty running configuration. */
2469 running_config
= nb_config_new(NULL
);
2470 running_config_entries
= hash_create(running_config_entry_key_make
,
2471 running_config_entry_cmp
,
2472 "Running Configuration Entries");
2473 pthread_mutex_init(&running_config_mgmt_lock
.mtx
, NULL
);
2475 /* Initialize the northbound CLI. */
2479 void nb_terminate(void)
2481 /* Terminate the northbound CLI. */
2484 /* Delete all nb_node's from all YANG modules. */
2487 /* Delete the running configuration. */
2488 hash_clean(running_config_entries
, running_config_entry_free
);
2489 hash_free(running_config_entries
);
2490 nb_config_free(running_config
);
2491 pthread_mutex_destroy(&running_config_mgmt_lock
.mtx
);