2 * Copyright (C) 2018 NetDEF, Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "lib_errors.h"
29 #include "frr_pthread.h"
30 #include "northbound.h"
31 #include "northbound_cli.h"
32 #include "northbound_db.h"
35 DEFINE_MTYPE_STATIC(LIB
, NB_NODE
, "Northbound Node");
36 DEFINE_MTYPE_STATIC(LIB
, NB_CONFIG
, "Northbound Configuration");
37 DEFINE_MTYPE_STATIC(LIB
, NB_CONFIG_ENTRY
, "Northbound Configuration Entry");
39 /* Running configuration - shouldn't be modified directly. */
40 struct nb_config
*running_config
;
42 /* Hash table of user pointers associated with configuration entries. */
43 static struct hash
*running_config_entries
;
45 /* Management lock for the running configuration. */
47 /* Mutex protecting this structure. */
53 /* Northbound client who owns this lock. */
54 enum nb_client owner_client
;
56 /* Northbound user who owns this lock. */
57 const void *owner_user
;
58 } running_config_mgmt_lock
;
60 /* Knob to record config transaction */
61 static bool nb_db_enabled
;
63 * Global lock used to prevent multiple configuration transactions from
64 * happening concurrently.
66 static bool transaction_in_progress
;
68 static int nb_callback_pre_validate(struct nb_context
*context
,
69 const struct nb_node
*nb_node
,
70 const struct lyd_node
*dnode
, char *errmsg
,
72 static int nb_callback_configuration(struct nb_context
*context
,
73 const enum nb_event event
,
74 struct nb_config_change
*change
,
75 char *errmsg
, size_t errmsg_len
);
76 static struct nb_transaction
*
77 nb_transaction_new(struct nb_context
*context
, struct nb_config
*config
,
78 struct nb_config_cbs
*changes
, const char *comment
,
79 char *errmsg
, size_t errmsg_len
);
80 static void nb_transaction_free(struct nb_transaction
*transaction
);
81 static int nb_transaction_process(enum nb_event event
,
82 struct nb_transaction
*transaction
,
83 char *errmsg
, size_t errmsg_len
);
84 static void nb_transaction_apply_finish(struct nb_transaction
*transaction
,
85 char *errmsg
, size_t errmsg_len
);
86 static int nb_oper_data_iter_node(const struct lysc_node
*snode
,
87 const char *xpath
, const void *list_entry
,
88 const struct yang_list_keys
*list_keys
,
89 struct yang_translator
*translator
,
90 bool first
, uint32_t flags
,
91 nb_oper_data_cb cb
, void *arg
);
93 static int nb_node_check_config_only(const struct lysc_node
*snode
, void *arg
)
95 bool *config_only
= arg
;
97 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_R
)) {
99 return YANG_ITER_STOP
;
102 return YANG_ITER_CONTINUE
;
105 static int nb_node_new_cb(const struct lysc_node
*snode
, void *arg
)
107 struct nb_node
*nb_node
;
108 struct lysc_node
*sparent
, *sparent_list
;
110 nb_node
= XCALLOC(MTYPE_NB_NODE
, sizeof(*nb_node
));
111 yang_snode_get_path(snode
, YANG_PATH_DATA
, nb_node
->xpath
,
112 sizeof(nb_node
->xpath
));
113 nb_node
->priority
= NB_DFLT_PRIORITY
;
114 sparent
= yang_snode_real_parent(snode
);
116 nb_node
->parent
= sparent
->priv
;
117 sparent_list
= yang_snode_parent_list(snode
);
119 nb_node
->parent_list
= sparent_list
->priv
;
122 if (CHECK_FLAG(snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
)) {
123 bool config_only
= true;
125 (void)yang_snodes_iterate_subtree(snode
, NULL
,
126 nb_node_check_config_only
, 0,
129 SET_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
);
131 if (CHECK_FLAG(snode
->nodetype
, LYS_LIST
)) {
132 if (yang_snode_num_keys(snode
) == 0)
133 SET_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
);
137 * Link the northbound node and the libyang schema node with one
140 nb_node
->snode
= snode
;
141 assert(snode
->priv
== NULL
);
142 ((struct lysc_node
*)snode
)->priv
= nb_node
;
144 return YANG_ITER_CONTINUE
;
147 static int nb_node_del_cb(const struct lysc_node
*snode
, void *arg
)
149 struct nb_node
*nb_node
;
151 nb_node
= snode
->priv
;
153 ((struct lysc_node
*)snode
)->priv
= NULL
;
154 XFREE(MTYPE_NB_NODE
, nb_node
);
157 return YANG_ITER_CONTINUE
;
160 void nb_nodes_create(void)
162 yang_snodes_iterate(NULL
, nb_node_new_cb
, 0, NULL
);
165 void nb_nodes_delete(void)
167 yang_snodes_iterate(NULL
, nb_node_del_cb
, 0, NULL
);
170 struct nb_node
*nb_node_find(const char *path
)
172 const struct lysc_node
*snode
;
175 * Use libyang to find the schema node associated to the path and get
176 * the northbound node from there (snode private pointer).
178 snode
= lys_find_path(ly_native_ctx
, NULL
, path
, 0);
185 void nb_node_set_dependency_cbs(const char *dependency_xpath
,
186 const char *dependant_xpath
,
187 struct nb_dependency_callbacks
*cbs
)
189 struct nb_node
*dependency
= nb_node_find(dependency_xpath
);
190 struct nb_node
*dependant
= nb_node_find(dependant_xpath
);
192 if (!dependency
|| !dependant
)
195 dependency
->dep_cbs
.get_dependant_xpath
= cbs
->get_dependant_xpath
;
196 dependant
->dep_cbs
.get_dependency_xpath
= cbs
->get_dependency_xpath
;
199 bool nb_node_has_dependency(struct nb_node
*node
)
201 return node
->dep_cbs
.get_dependency_xpath
!= NULL
;
204 static int nb_node_validate_cb(const struct nb_node
*nb_node
,
205 enum nb_operation operation
,
206 int callback_implemented
, bool optional
)
210 valid
= nb_operation_is_valid(operation
, nb_node
->snode
);
213 * Add an exception for operational data callbacks. A rw list usually
214 * doesn't need any associated operational data callbacks. But if this
215 * rw list is augmented by another module which adds state nodes under
216 * it, then this list will need to have the 'get_next()', 'get_keys()'
217 * and 'lookup_entry()' callbacks. As such, never log a warning when
218 * these callbacks are implemented when they are not needed, since this
219 * depends on context (e.g. some daemons might augment "frr-interface"
220 * while others don't).
222 if (!valid
&& callback_implemented
&& operation
!= NB_OP_GET_NEXT
223 && operation
!= NB_OP_GET_KEYS
&& operation
!= NB_OP_LOOKUP_ENTRY
)
224 flog_warn(EC_LIB_NB_CB_UNNEEDED
,
225 "unneeded '%s' callback for '%s'",
226 nb_operation_name(operation
), nb_node
->xpath
);
228 if (!optional
&& valid
&& !callback_implemented
) {
229 flog_err(EC_LIB_NB_CB_MISSING
, "missing '%s' callback for '%s'",
230 nb_operation_name(operation
), nb_node
->xpath
);
238 * Check if the required callbacks were implemented for the given northbound
241 static unsigned int nb_node_validate_cbs(const struct nb_node
*nb_node
)
244 unsigned int error
= 0;
246 error
+= nb_node_validate_cb(nb_node
, NB_OP_CREATE
,
247 !!nb_node
->cbs
.create
, false);
248 error
+= nb_node_validate_cb(nb_node
, NB_OP_MODIFY
,
249 !!nb_node
->cbs
.modify
, false);
250 error
+= nb_node_validate_cb(nb_node
, NB_OP_DESTROY
,
251 !!nb_node
->cbs
.destroy
, false);
252 error
+= nb_node_validate_cb(nb_node
, NB_OP_MOVE
, !!nb_node
->cbs
.move
,
254 error
+= nb_node_validate_cb(nb_node
, NB_OP_PRE_VALIDATE
,
255 !!nb_node
->cbs
.pre_validate
, true);
256 error
+= nb_node_validate_cb(nb_node
, NB_OP_APPLY_FINISH
,
257 !!nb_node
->cbs
.apply_finish
, true);
258 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_ELEM
,
259 !!nb_node
->cbs
.get_elem
, false);
260 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_NEXT
,
261 !!nb_node
->cbs
.get_next
, false);
262 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_KEYS
,
263 !!nb_node
->cbs
.get_keys
, false);
264 error
+= nb_node_validate_cb(nb_node
, NB_OP_LOOKUP_ENTRY
,
265 !!nb_node
->cbs
.lookup_entry
, false);
266 error
+= nb_node_validate_cb(nb_node
, NB_OP_RPC
, !!nb_node
->cbs
.rpc
,
272 static unsigned int nb_node_validate_priority(const struct nb_node
*nb_node
)
274 /* Top-level nodes can have any priority. */
275 if (!nb_node
->parent
)
278 if (nb_node
->priority
< nb_node
->parent
->priority
) {
279 flog_err(EC_LIB_NB_CB_INVALID_PRIO
,
280 "node has higher priority than its parent [xpath %s]",
288 static int nb_node_validate(const struct lysc_node
*snode
, void *arg
)
290 struct nb_node
*nb_node
= snode
->priv
;
291 unsigned int *errors
= arg
;
293 /* Validate callbacks and priority. */
295 *errors
+= nb_node_validate_cbs(nb_node
);
296 *errors
+= nb_node_validate_priority(nb_node
);
299 return YANG_ITER_CONTINUE
;
302 struct nb_config
*nb_config_new(struct lyd_node
*dnode
)
304 struct nb_config
*config
;
306 config
= XCALLOC(MTYPE_NB_CONFIG
, sizeof(*config
));
308 config
->dnode
= dnode
;
310 config
->dnode
= yang_dnode_new(ly_native_ctx
, true);
316 void nb_config_free(struct nb_config
*config
)
319 yang_dnode_free(config
->dnode
);
320 XFREE(MTYPE_NB_CONFIG
, config
);
323 struct nb_config
*nb_config_dup(const struct nb_config
*config
)
325 struct nb_config
*dup
;
327 dup
= XCALLOC(MTYPE_NB_CONFIG
, sizeof(*dup
));
328 dup
->dnode
= yang_dnode_dup(config
->dnode
);
329 dup
->version
= config
->version
;
334 int nb_config_merge(struct nb_config
*config_dst
, struct nb_config
*config_src
,
335 bool preserve_source
)
339 ret
= lyd_merge_siblings(&config_dst
->dnode
, config_src
->dnode
, 0);
341 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_merge() failed", __func__
);
343 if (!preserve_source
)
344 nb_config_free(config_src
);
346 return (ret
== 0) ? NB_OK
: NB_ERR
;
349 void nb_config_replace(struct nb_config
*config_dst
,
350 struct nb_config
*config_src
, bool preserve_source
)
352 /* Update version. */
353 if (config_src
->version
!= 0)
354 config_dst
->version
= config_src
->version
;
357 if (config_dst
->dnode
)
358 yang_dnode_free(config_dst
->dnode
);
359 if (preserve_source
) {
360 config_dst
->dnode
= yang_dnode_dup(config_src
->dnode
);
362 config_dst
->dnode
= config_src
->dnode
;
363 config_src
->dnode
= NULL
;
364 nb_config_free(config_src
);
368 /* Generate the nb_config_cbs tree. */
369 static inline int nb_config_cb_compare(const struct nb_config_cb
*a
,
370 const struct nb_config_cb
*b
)
372 /* Sort by priority first. */
373 if (a
->nb_node
->priority
< b
->nb_node
->priority
)
375 if (a
->nb_node
->priority
> b
->nb_node
->priority
)
379 * Preserve the order of the configuration changes as told by libyang.
387 * All 'apply_finish' callbacks have their sequence number set to zero.
388 * In this case, compare them using their dnode pointers (the order
389 * doesn't matter for callbacks that have the same priority).
391 if (a
->dnode
< b
->dnode
)
393 if (a
->dnode
> b
->dnode
)
398 RB_GENERATE(nb_config_cbs
, nb_config_cb
, entry
, nb_config_cb_compare
);
400 static void nb_config_diff_add_change(struct nb_config_cbs
*changes
,
401 enum nb_operation operation
,
403 const struct lyd_node
*dnode
)
405 struct nb_config_change
*change
;
407 /* Ignore unimplemented nodes. */
408 if (!dnode
->schema
->priv
)
411 change
= XCALLOC(MTYPE_TMP
, sizeof(*change
));
412 change
->cb
.operation
= operation
;
413 change
->cb
.seq
= *seq
;
415 change
->cb
.nb_node
= dnode
->schema
->priv
;
416 change
->cb
.dnode
= dnode
;
418 RB_INSERT(nb_config_cbs
, changes
, &change
->cb
);
421 static void nb_config_diff_del_changes(struct nb_config_cbs
*changes
)
423 while (!RB_EMPTY(nb_config_cbs
, changes
)) {
424 struct nb_config_change
*change
;
426 change
= (struct nb_config_change
*)RB_ROOT(nb_config_cbs
,
428 RB_REMOVE(nb_config_cbs
, changes
, &change
->cb
);
429 XFREE(MTYPE_TMP
, change
);
434 * Helper function used when calculating the delta between two different
435 * configurations. Given a new subtree, calculate all new YANG data nodes,
436 * excluding default leafs and leaf-lists. This is a recursive function.
438 static void nb_config_diff_created(const struct lyd_node
*dnode
, uint32_t *seq
,
439 struct nb_config_cbs
*changes
)
441 enum nb_operation operation
;
442 struct lyd_node
*child
;
444 /* Ignore unimplemented nodes. */
445 if (!dnode
->schema
->priv
)
448 switch (dnode
->schema
->nodetype
) {
451 if (lyd_is_default(dnode
))
454 if (nb_operation_is_valid(NB_OP_CREATE
, dnode
->schema
))
455 operation
= NB_OP_CREATE
;
456 else if (nb_operation_is_valid(NB_OP_MODIFY
, dnode
->schema
))
457 operation
= NB_OP_MODIFY
;
461 nb_config_diff_add_change(changes
, operation
, seq
, dnode
);
465 if (nb_operation_is_valid(NB_OP_CREATE
, dnode
->schema
))
466 nb_config_diff_add_change(changes
, NB_OP_CREATE
, seq
,
469 /* Process child nodes recursively. */
470 LY_LIST_FOR (lyd_child(dnode
), child
) {
471 nb_config_diff_created(child
, seq
, changes
);
479 static void nb_config_diff_deleted(const struct lyd_node
*dnode
, uint32_t *seq
,
480 struct nb_config_cbs
*changes
)
482 /* Ignore unimplemented nodes. */
483 if (!dnode
->schema
->priv
)
486 if (nb_operation_is_valid(NB_OP_DESTROY
, dnode
->schema
))
487 nb_config_diff_add_change(changes
, NB_OP_DESTROY
, seq
, dnode
);
488 else if (CHECK_FLAG(dnode
->schema
->nodetype
, LYS_CONTAINER
)) {
489 struct lyd_node
*child
;
492 * Non-presence containers need special handling since they
493 * don't have "destroy" callbacks. In this case, what we need to
494 * do is to call the "destroy" callbacks of their child nodes
495 * when applicable (i.e. optional nodes).
497 LY_LIST_FOR (lyd_child(dnode
), child
) {
498 nb_config_diff_deleted(child
, seq
, changes
);
503 static int nb_lyd_diff_get_op(const struct lyd_node
*dnode
)
505 const struct lyd_meta
*meta
;
506 LY_LIST_FOR (dnode
->meta
, meta
) {
507 if (strcmp(meta
->name
, "operation")
508 || strcmp(meta
->annotation
->module
->name
, "yang"))
510 return lyd_get_meta_value(meta
)[0];
515 #if 0 /* Used below in nb_config_diff inside normally disabled code */
516 static inline void nb_config_diff_dnode_log_path(const char *context
,
518 const struct lyd_node
*dnode
)
520 if (dnode
->schema
->nodetype
& LYD_NODE_TERM
)
521 zlog_debug("nb_config_diff: %s: %s: %s", context
, path
,
522 lyd_get_value(dnode
));
524 zlog_debug("nb_config_diff: %s: %s", context
, path
);
527 static inline void nb_config_diff_dnode_log(const char *context
,
528 const struct lyd_node
*dnode
)
531 zlog_debug("nb_config_diff: %s: NULL", context
);
535 char *path
= lyd_path(dnode
, LYD_PATH_STD
, NULL
, 0);
536 nb_config_diff_dnode_log_path(context
, path
, dnode
);
541 /* Calculate the delta between two different configurations. */
542 static void nb_config_diff(const struct nb_config
*config1
,
543 const struct nb_config
*config2
,
544 struct nb_config_cbs
*changes
)
546 struct lyd_node
*diff
= NULL
;
547 const struct lyd_node
*root
, *dnode
;
548 struct lyd_node
*target
;
553 #if 0 /* Useful (noisy) when debugging diff code, and for improving later */
554 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
)) {
555 LY_LIST_FOR(config1
->dnode
, root
) {
556 LYD_TREE_DFS_BEGIN(root
, dnode
) {
557 nb_config_diff_dnode_log("from", dnode
);
558 LYD_TREE_DFS_END(root
, dnode
);
561 LY_LIST_FOR(config2
->dnode
, root
) {
562 LYD_TREE_DFS_BEGIN(root
, dnode
) {
563 nb_config_diff_dnode_log("to", dnode
);
564 LYD_TREE_DFS_END(root
, dnode
);
570 err
= lyd_diff_siblings(config1
->dnode
, config2
->dnode
,
571 LYD_DIFF_DEFAULTS
, &diff
);
574 if (diff
&& DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
)) {
577 if (!lyd_print_mem(&s
, diff
, LYD_JSON
,
578 LYD_PRINT_WITHSIBLINGS
| LYD_PRINT_WD_ALL
)) {
579 zlog_debug("%s: %s", __func__
, s
);
586 LY_LIST_FOR (diff
, root
) {
587 LYD_TREE_DFS_BEGIN (root
, dnode
) {
588 op
= nb_lyd_diff_get_op(dnode
);
590 path
= lyd_path(dnode
, LYD_PATH_STD
, NULL
, 0);
592 #if 0 /* Useful (noisy) when debugging diff code, and for improving later */
593 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
)) {
595 snprintf(context
, sizeof(context
),
596 "iterating diff: oper: %c seq: %u", op
, seq
);
597 nb_config_diff_dnode_log_path(context
, path
, dnode
);
601 case 'c': /* create */
603 * This is rather inefficient, but when we use
604 * dnode from the diff instead of the
605 * candidate config node we get failures when
606 * looking up default values, etc, based on
609 target
= yang_dnode_get(config2
->dnode
, path
);
611 nb_config_diff_created(target
, &seq
, changes
);
613 /* Skip rest of sub-tree, move to next sibling
615 LYD_TREE_DFS_continue
= 1;
617 case 'd': /* delete */
618 target
= yang_dnode_get(config1
->dnode
, path
);
620 nb_config_diff_deleted(target
, &seq
, changes
);
622 /* Skip rest of sub-tree, move to next sibling
624 LYD_TREE_DFS_continue
= 1;
626 case 'r': /* replace */
627 /* either moving an entry or changing a value */
628 target
= yang_dnode_get(config2
->dnode
, path
);
630 nb_config_diff_add_change(changes
, NB_OP_MODIFY
,
638 LYD_TREE_DFS_END(root
, dnode
);
645 int nb_candidate_edit(struct nb_config
*candidate
,
646 const struct nb_node
*nb_node
,
647 enum nb_operation operation
, const char *xpath
,
648 const struct yang_data
*previous
,
649 const struct yang_data
*data
)
651 struct lyd_node
*dnode
, *dep_dnode
;
652 char xpath_edit
[XPATH_MAXLEN
];
653 char dep_xpath
[XPATH_MAXLEN
];
656 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
657 if (nb_node
->snode
->nodetype
== LYS_LEAFLIST
)
658 snprintf(xpath_edit
, sizeof(xpath_edit
), "%s[.='%s']", xpath
,
661 strlcpy(xpath_edit
, xpath
, sizeof(xpath_edit
));
666 err
= lyd_new_path(candidate
->dnode
, ly_native_ctx
, xpath_edit
,
667 (void *)data
->value
, LYD_NEW_PATH_UPDATE
,
670 flog_warn(EC_LIB_LIBYANG
,
671 "%s: lyd_new_path(%s) failed: %d", __func__
,
675 /* Create default nodes */
676 LY_ERR err
= lyd_new_implicit_tree(
677 dnode
, LYD_IMPLICIT_NO_STATE
, NULL
);
679 flog_warn(EC_LIB_LIBYANG
,
680 "%s: lyd_new_implicit_all failed: %d",
686 * dnode returned by the lyd_new_path may be from a
687 * different schema, so we need to update the nb_node
689 nb_node
= dnode
->schema
->priv
;
690 if (nb_node
->dep_cbs
.get_dependency_xpath
) {
691 nb_node
->dep_cbs
.get_dependency_xpath(
694 err
= lyd_new_path(candidate
->dnode
,
695 ly_native_ctx
, dep_xpath
,
696 NULL
, LYD_NEW_PATH_UPDATE
,
698 /* Create default nodes */
699 if (!err
&& dep_dnode
)
700 err
= lyd_new_implicit_tree(
702 LYD_IMPLICIT_NO_STATE
, NULL
);
706 "%s: dependency: lyd_new_path(%s) failed: %d",
707 __func__
, dep_xpath
, err
);
714 dnode
= yang_dnode_get(candidate
->dnode
, xpath_edit
);
717 * Return a special error code so the caller can choose
718 * whether to ignore it or not.
720 return NB_ERR_NOT_FOUND
;
721 /* destroy dependant */
722 if (nb_node
->dep_cbs
.get_dependant_xpath
) {
723 nb_node
->dep_cbs
.get_dependant_xpath(dnode
, dep_xpath
);
725 dep_dnode
= yang_dnode_get(candidate
->dnode
, dep_xpath
);
727 lyd_free_tree(dep_dnode
);
729 lyd_free_tree(dnode
);
732 /* TODO: update configuration. */
735 flog_warn(EC_LIB_DEVELOPMENT
,
736 "%s: unknown operation (%u) [xpath %s]", __func__
,
737 operation
, xpath_edit
);
744 bool nb_candidate_needs_update(const struct nb_config
*candidate
)
746 if (candidate
->version
< running_config
->version
)
752 int nb_candidate_update(struct nb_config
*candidate
)
754 struct nb_config
*updated_config
;
756 updated_config
= nb_config_dup(running_config
);
757 if (nb_config_merge(updated_config
, candidate
, true) != NB_OK
)
760 nb_config_replace(candidate
, updated_config
, false);
766 * Perform YANG syntactic and semantic validation.
768 * WARNING: lyd_validate() can change the configuration as part of the
769 * validation process.
771 static int nb_candidate_validate_yang(struct nb_config
*candidate
, char *errmsg
,
774 if (lyd_validate_all(&candidate
->dnode
, ly_native_ctx
,
775 LYD_VALIDATE_NO_STATE
, NULL
)
777 yang_print_errors(ly_native_ctx
, errmsg
, errmsg_len
);
778 return NB_ERR_VALIDATION
;
784 /* Perform code-level validation using the northbound callbacks. */
785 static int nb_candidate_validate_code(struct nb_context
*context
,
786 struct nb_config
*candidate
,
787 struct nb_config_cbs
*changes
,
788 char *errmsg
, size_t errmsg_len
)
790 struct nb_config_cb
*cb
;
791 struct lyd_node
*root
, *child
;
794 /* First validate the candidate as a whole. */
795 LY_LIST_FOR (candidate
->dnode
, root
) {
796 LYD_TREE_DFS_BEGIN (root
, child
) {
797 struct nb_node
*nb_node
;
799 nb_node
= child
->schema
->priv
;
800 if (!nb_node
|| !nb_node
->cbs
.pre_validate
)
803 ret
= nb_callback_pre_validate(context
, nb_node
, child
,
806 return NB_ERR_VALIDATION
;
809 LYD_TREE_DFS_END(root
, child
);
813 /* Now validate the configuration changes. */
814 RB_FOREACH (cb
, nb_config_cbs
, changes
) {
815 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
817 ret
= nb_callback_configuration(context
, NB_EV_VALIDATE
, change
,
820 return NB_ERR_VALIDATION
;
826 int nb_candidate_validate(struct nb_context
*context
,
827 struct nb_config
*candidate
, char *errmsg
,
830 struct nb_config_cbs changes
;
833 if (nb_candidate_validate_yang(candidate
, errmsg
, errmsg_len
) != NB_OK
)
834 return NB_ERR_VALIDATION
;
836 RB_INIT(nb_config_cbs
, &changes
);
837 nb_config_diff(running_config
, candidate
, &changes
);
838 ret
= nb_candidate_validate_code(context
, candidate
, &changes
, errmsg
,
840 nb_config_diff_del_changes(&changes
);
845 int nb_candidate_commit_prepare(struct nb_context
*context
,
846 struct nb_config
*candidate
,
848 struct nb_transaction
**transaction
,
849 char *errmsg
, size_t errmsg_len
)
851 struct nb_config_cbs changes
;
853 if (nb_candidate_validate_yang(candidate
, errmsg
, errmsg_len
)
855 flog_warn(EC_LIB_NB_CANDIDATE_INVALID
,
856 "%s: failed to validate candidate configuration",
858 return NB_ERR_VALIDATION
;
861 RB_INIT(nb_config_cbs
, &changes
);
862 nb_config_diff(running_config
, candidate
, &changes
);
863 if (RB_EMPTY(nb_config_cbs
, &changes
)) {
866 "No changes to apply were found during preparation phase");
867 return NB_ERR_NO_CHANGES
;
870 if (nb_candidate_validate_code(context
, candidate
, &changes
, errmsg
,
873 flog_warn(EC_LIB_NB_CANDIDATE_INVALID
,
874 "%s: failed to validate candidate configuration",
876 nb_config_diff_del_changes(&changes
);
877 return NB_ERR_VALIDATION
;
880 *transaction
= nb_transaction_new(context
, candidate
, &changes
, comment
,
882 if (*transaction
== NULL
) {
883 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED
,
884 "%s: failed to create transaction: %s", __func__
,
886 nb_config_diff_del_changes(&changes
);
887 return NB_ERR_LOCKED
;
890 return nb_transaction_process(NB_EV_PREPARE
, *transaction
, errmsg
,
894 void nb_candidate_commit_abort(struct nb_transaction
*transaction
, char *errmsg
,
897 (void)nb_transaction_process(NB_EV_ABORT
, transaction
, errmsg
,
899 nb_transaction_free(transaction
);
902 void nb_candidate_commit_apply(struct nb_transaction
*transaction
,
903 bool save_transaction
, uint32_t *transaction_id
,
904 char *errmsg
, size_t errmsg_len
)
906 (void)nb_transaction_process(NB_EV_APPLY
, transaction
, errmsg
,
908 nb_transaction_apply_finish(transaction
, errmsg
, errmsg_len
);
910 /* Replace running by candidate. */
911 transaction
->config
->version
++;
912 nb_config_replace(running_config
, transaction
->config
, true);
914 /* Record transaction. */
915 if (save_transaction
&& nb_db_enabled
916 && nb_db_transaction_save(transaction
, transaction_id
) != NB_OK
)
917 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED
,
918 "%s: failed to record transaction", __func__
);
920 nb_transaction_free(transaction
);
923 int nb_candidate_commit(struct nb_context
*context
, struct nb_config
*candidate
,
924 bool save_transaction
, const char *comment
,
925 uint32_t *transaction_id
, char *errmsg
,
928 struct nb_transaction
*transaction
= NULL
;
931 ret
= nb_candidate_commit_prepare(context
, candidate
, comment
,
932 &transaction
, errmsg
, errmsg_len
);
934 * Apply the changes if the preparation phase succeeded. Otherwise abort
938 nb_candidate_commit_apply(transaction
, save_transaction
,
939 transaction_id
, errmsg
, errmsg_len
);
940 else if (transaction
!= NULL
)
941 nb_candidate_commit_abort(transaction
, errmsg
, errmsg_len
);
946 int nb_running_lock(enum nb_client client
, const void *user
)
950 frr_with_mutex (&running_config_mgmt_lock
.mtx
) {
951 if (!running_config_mgmt_lock
.locked
) {
952 running_config_mgmt_lock
.locked
= true;
953 running_config_mgmt_lock
.owner_client
= client
;
954 running_config_mgmt_lock
.owner_user
= user
;
962 int nb_running_unlock(enum nb_client client
, const void *user
)
966 frr_with_mutex (&running_config_mgmt_lock
.mtx
) {
967 if (running_config_mgmt_lock
.locked
968 && running_config_mgmt_lock
.owner_client
== client
969 && running_config_mgmt_lock
.owner_user
== user
) {
970 running_config_mgmt_lock
.locked
= false;
971 running_config_mgmt_lock
.owner_client
= NB_CLIENT_NONE
;
972 running_config_mgmt_lock
.owner_user
= NULL
;
980 int nb_running_lock_check(enum nb_client client
, const void *user
)
984 frr_with_mutex (&running_config_mgmt_lock
.mtx
) {
985 if (!running_config_mgmt_lock
.locked
986 || (running_config_mgmt_lock
.owner_client
== client
987 && running_config_mgmt_lock
.owner_user
== user
))
994 static void nb_log_config_callback(const enum nb_event event
,
995 enum nb_operation operation
,
996 const struct lyd_node
*dnode
)
999 char xpath
[XPATH_MAXLEN
];
1001 if (!DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
))
1004 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1005 if (yang_snode_is_typeless_data(dnode
->schema
))
1008 value
= yang_dnode_get_string(dnode
, NULL
);
1011 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
1012 nb_event_name(event
), nb_operation_name(operation
), xpath
,
1016 static int nb_callback_create(struct nb_context
*context
,
1017 const struct nb_node
*nb_node
,
1018 enum nb_event event
, const struct lyd_node
*dnode
,
1019 union nb_resource
*resource
, char *errmsg
,
1022 struct nb_cb_create_args args
= {};
1023 bool unexpected_error
= false;
1026 nb_log_config_callback(event
, NB_OP_CREATE
, dnode
);
1028 args
.context
= context
;
1031 args
.resource
= resource
;
1032 args
.errmsg
= errmsg
;
1033 args
.errmsg_len
= errmsg_len
;
1034 ret
= nb_node
->cbs
.create(&args
);
1036 /* Detect and log unexpected errors. */
1041 case NB_ERR_VALIDATION
:
1042 if (event
!= NB_EV_VALIDATE
)
1043 unexpected_error
= true;
1045 case NB_ERR_RESOURCE
:
1046 if (event
!= NB_EV_PREPARE
)
1047 unexpected_error
= true;
1049 case NB_ERR_INCONSISTENCY
:
1050 if (event
== NB_EV_VALIDATE
)
1051 unexpected_error
= true;
1054 unexpected_error
= true;
1057 if (unexpected_error
)
1058 DEBUGD(&nb_dbg_cbs_config
,
1059 "northbound callback: unexpected return value: %s",
1065 static int nb_callback_modify(struct nb_context
*context
,
1066 const struct nb_node
*nb_node
,
1067 enum nb_event event
, const struct lyd_node
*dnode
,
1068 union nb_resource
*resource
, char *errmsg
,
1071 struct nb_cb_modify_args args
= {};
1072 bool unexpected_error
= false;
1075 nb_log_config_callback(event
, NB_OP_MODIFY
, dnode
);
1077 args
.context
= context
;
1080 args
.resource
= resource
;
1081 args
.errmsg
= errmsg
;
1082 args
.errmsg_len
= errmsg_len
;
1083 ret
= nb_node
->cbs
.modify(&args
);
1085 /* Detect and log unexpected errors. */
1090 case NB_ERR_VALIDATION
:
1091 if (event
!= NB_EV_VALIDATE
)
1092 unexpected_error
= true;
1094 case NB_ERR_RESOURCE
:
1095 if (event
!= NB_EV_PREPARE
)
1096 unexpected_error
= true;
1098 case NB_ERR_INCONSISTENCY
:
1099 if (event
== NB_EV_VALIDATE
)
1100 unexpected_error
= true;
1103 unexpected_error
= true;
1106 if (unexpected_error
)
1107 DEBUGD(&nb_dbg_cbs_config
,
1108 "northbound callback: unexpected return value: %s",
1114 static int nb_callback_destroy(struct nb_context
*context
,
1115 const struct nb_node
*nb_node
,
1116 enum nb_event event
,
1117 const struct lyd_node
*dnode
, char *errmsg
,
1120 struct nb_cb_destroy_args args
= {};
1121 bool unexpected_error
= false;
1124 nb_log_config_callback(event
, NB_OP_DESTROY
, dnode
);
1126 args
.context
= context
;
1129 args
.errmsg
= errmsg
;
1130 args
.errmsg_len
= errmsg_len
;
1131 ret
= nb_node
->cbs
.destroy(&args
);
1133 /* Detect and log unexpected errors. */
1138 case NB_ERR_VALIDATION
:
1139 if (event
!= NB_EV_VALIDATE
)
1140 unexpected_error
= true;
1142 case NB_ERR_INCONSISTENCY
:
1143 if (event
== NB_EV_VALIDATE
)
1144 unexpected_error
= true;
1147 unexpected_error
= true;
1150 if (unexpected_error
)
1151 DEBUGD(&nb_dbg_cbs_config
,
1152 "northbound callback: unexpected return value: %s",
1158 static int nb_callback_move(struct nb_context
*context
,
1159 const struct nb_node
*nb_node
, enum nb_event event
,
1160 const struct lyd_node
*dnode
, char *errmsg
,
1163 struct nb_cb_move_args args
= {};
1164 bool unexpected_error
= false;
1167 nb_log_config_callback(event
, NB_OP_MOVE
, dnode
);
1169 args
.context
= context
;
1172 args
.errmsg
= errmsg
;
1173 args
.errmsg_len
= errmsg_len
;
1174 ret
= nb_node
->cbs
.move(&args
);
1176 /* Detect and log unexpected errors. */
1181 case NB_ERR_VALIDATION
:
1182 if (event
!= NB_EV_VALIDATE
)
1183 unexpected_error
= true;
1185 case NB_ERR_INCONSISTENCY
:
1186 if (event
== NB_EV_VALIDATE
)
1187 unexpected_error
= true;
1190 unexpected_error
= true;
1193 if (unexpected_error
)
1194 DEBUGD(&nb_dbg_cbs_config
,
1195 "northbound callback: unexpected return value: %s",
1201 static int nb_callback_pre_validate(struct nb_context
*context
,
1202 const struct nb_node
*nb_node
,
1203 const struct lyd_node
*dnode
, char *errmsg
,
1206 struct nb_cb_pre_validate_args args
= {};
1207 bool unexpected_error
= false;
1210 nb_log_config_callback(NB_EV_VALIDATE
, NB_OP_PRE_VALIDATE
, dnode
);
1213 args
.errmsg
= errmsg
;
1214 args
.errmsg_len
= errmsg_len
;
1215 ret
= nb_node
->cbs
.pre_validate(&args
);
1217 /* Detect and log unexpected errors. */
1220 case NB_ERR_VALIDATION
:
1223 unexpected_error
= true;
1226 if (unexpected_error
)
1227 DEBUGD(&nb_dbg_cbs_config
,
1228 "northbound callback: unexpected return value: %s",
1234 static void nb_callback_apply_finish(struct nb_context
*context
,
1235 const struct nb_node
*nb_node
,
1236 const struct lyd_node
*dnode
, char *errmsg
,
1239 struct nb_cb_apply_finish_args args
= {};
1241 nb_log_config_callback(NB_EV_APPLY
, NB_OP_APPLY_FINISH
, dnode
);
1243 args
.context
= context
;
1245 args
.errmsg
= errmsg
;
1246 args
.errmsg_len
= errmsg_len
;
1247 nb_node
->cbs
.apply_finish(&args
);
1250 struct yang_data
*nb_callback_get_elem(const struct nb_node
*nb_node
,
1252 const void *list_entry
)
1254 struct nb_cb_get_elem_args args
= {};
1256 DEBUGD(&nb_dbg_cbs_state
,
1257 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
1261 args
.list_entry
= list_entry
;
1262 return nb_node
->cbs
.get_elem(&args
);
1265 const void *nb_callback_get_next(const struct nb_node
*nb_node
,
1266 const void *parent_list_entry
,
1267 const void *list_entry
)
1269 struct nb_cb_get_next_args args
= {};
1271 DEBUGD(&nb_dbg_cbs_state
,
1272 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
1273 nb_node
->xpath
, parent_list_entry
, list_entry
);
1275 args
.parent_list_entry
= parent_list_entry
;
1276 args
.list_entry
= list_entry
;
1277 return nb_node
->cbs
.get_next(&args
);
1280 int nb_callback_get_keys(const struct nb_node
*nb_node
, const void *list_entry
,
1281 struct yang_list_keys
*keys
)
1283 struct nb_cb_get_keys_args args
= {};
1285 DEBUGD(&nb_dbg_cbs_state
,
1286 "northbound callback (get_keys): node [%s] list_entry [%p]",
1287 nb_node
->xpath
, list_entry
);
1289 args
.list_entry
= list_entry
;
1291 return nb_node
->cbs
.get_keys(&args
);
1294 const void *nb_callback_lookup_entry(const struct nb_node
*nb_node
,
1295 const void *parent_list_entry
,
1296 const struct yang_list_keys
*keys
)
1298 struct nb_cb_lookup_entry_args args
= {};
1300 DEBUGD(&nb_dbg_cbs_state
,
1301 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
1302 nb_node
->xpath
, parent_list_entry
);
1304 args
.parent_list_entry
= parent_list_entry
;
1306 return nb_node
->cbs
.lookup_entry(&args
);
1309 int nb_callback_rpc(const struct nb_node
*nb_node
, const char *xpath
,
1310 const struct list
*input
, struct list
*output
, char *errmsg
,
1313 struct nb_cb_rpc_args args
= {};
1315 DEBUGD(&nb_dbg_cbs_rpc
, "northbound RPC: %s", xpath
);
1319 args
.output
= output
;
1320 args
.errmsg
= errmsg
;
1321 args
.errmsg_len
= errmsg_len
;
1322 return nb_node
->cbs
.rpc(&args
);
1326 * Call the northbound configuration callback associated to a given
1327 * configuration change.
1329 static int nb_callback_configuration(struct nb_context
*context
,
1330 const enum nb_event event
,
1331 struct nb_config_change
*change
,
1332 char *errmsg
, size_t errmsg_len
)
1334 enum nb_operation operation
= change
->cb
.operation
;
1335 char xpath
[XPATH_MAXLEN
];
1336 const struct nb_node
*nb_node
= change
->cb
.nb_node
;
1337 const struct lyd_node
*dnode
= change
->cb
.dnode
;
1338 union nb_resource
*resource
;
1341 if (event
== NB_EV_VALIDATE
)
1344 resource
= &change
->resource
;
1346 switch (operation
) {
1348 ret
= nb_callback_create(context
, nb_node
, event
, dnode
,
1349 resource
, errmsg
, errmsg_len
);
1352 ret
= nb_callback_modify(context
, nb_node
, event
, dnode
,
1353 resource
, errmsg
, errmsg_len
);
1356 ret
= nb_callback_destroy(context
, nb_node
, event
, dnode
,
1357 errmsg
, errmsg_len
);
1360 ret
= nb_callback_move(context
, nb_node
, event
, dnode
, errmsg
,
1364 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1365 flog_err(EC_LIB_DEVELOPMENT
,
1366 "%s: unknown operation (%u) [xpath %s]", __func__
,
1372 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1375 case NB_EV_VALIDATE
:
1376 flog_warn(EC_LIB_NB_CB_CONFIG_VALIDATE
,
1377 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1378 nb_err_name(ret
), nb_event_name(event
),
1379 nb_operation_name(operation
), xpath
,
1380 errmsg
[0] ? " message: " : "", errmsg
);
1383 flog_warn(EC_LIB_NB_CB_CONFIG_PREPARE
,
1384 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1385 nb_err_name(ret
), nb_event_name(event
),
1386 nb_operation_name(operation
), xpath
,
1387 errmsg
[0] ? " message: " : "", errmsg
);
1390 flog_warn(EC_LIB_NB_CB_CONFIG_ABORT
,
1391 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1392 nb_err_name(ret
), nb_event_name(event
),
1393 nb_operation_name(operation
), xpath
,
1394 errmsg
[0] ? " message: " : "", errmsg
);
1397 flog_err(EC_LIB_NB_CB_CONFIG_APPLY
,
1398 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1399 nb_err_name(ret
), nb_event_name(event
),
1400 nb_operation_name(operation
), xpath
,
1401 errmsg
[0] ? " message: " : "", errmsg
);
1404 flog_err(EC_LIB_DEVELOPMENT
,
1405 "%s: unknown event (%u) [xpath %s]", __func__
,
1414 static struct nb_transaction
*
1415 nb_transaction_new(struct nb_context
*context
, struct nb_config
*config
,
1416 struct nb_config_cbs
*changes
, const char *comment
,
1417 char *errmsg
, size_t errmsg_len
)
1419 struct nb_transaction
*transaction
;
1421 if (nb_running_lock_check(context
->client
, context
->user
)) {
1423 "running configuration is locked by another client",
1428 if (transaction_in_progress
) {
1430 "there's already another transaction in progress",
1434 transaction_in_progress
= true;
1436 transaction
= XCALLOC(MTYPE_TMP
, sizeof(*transaction
));
1437 transaction
->context
= context
;
1439 strlcpy(transaction
->comment
, comment
,
1440 sizeof(transaction
->comment
));
1441 transaction
->config
= config
;
1442 transaction
->changes
= *changes
;
1447 static void nb_transaction_free(struct nb_transaction
*transaction
)
1449 nb_config_diff_del_changes(&transaction
->changes
);
1450 XFREE(MTYPE_TMP
, transaction
);
1451 transaction_in_progress
= false;
1454 /* Process all configuration changes associated to a transaction. */
1455 static int nb_transaction_process(enum nb_event event
,
1456 struct nb_transaction
*transaction
,
1457 char *errmsg
, size_t errmsg_len
)
1459 struct nb_config_cb
*cb
;
1461 RB_FOREACH (cb
, nb_config_cbs
, &transaction
->changes
) {
1462 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
1466 * Only try to release resources that were allocated
1469 if (event
== NB_EV_ABORT
&& !change
->prepare_ok
)
1472 /* Call the appropriate callback. */
1473 ret
= nb_callback_configuration(transaction
->context
, event
,
1474 change
, errmsg
, errmsg_len
);
1479 change
->prepare_ok
= true;
1484 * At this point it's not possible to reject the
1485 * transaction anymore, so any failure here can lead to
1486 * inconsistencies and should be treated as a bug.
1487 * Operations prone to errors, like validations and
1488 * resource allocations, should be performed during the
1500 static struct nb_config_cb
*
1501 nb_apply_finish_cb_new(struct nb_config_cbs
*cbs
, const struct nb_node
*nb_node
,
1502 const struct lyd_node
*dnode
)
1504 struct nb_config_cb
*cb
;
1506 cb
= XCALLOC(MTYPE_TMP
, sizeof(*cb
));
1507 cb
->nb_node
= nb_node
;
1509 RB_INSERT(nb_config_cbs
, cbs
, cb
);
1514 static struct nb_config_cb
*
1515 nb_apply_finish_cb_find(struct nb_config_cbs
*cbs
,
1516 const struct nb_node
*nb_node
,
1517 const struct lyd_node
*dnode
)
1519 struct nb_config_cb s
;
1522 s
.nb_node
= nb_node
;
1524 return RB_FIND(nb_config_cbs
, cbs
, &s
);
1527 /* Call the 'apply_finish' callbacks. */
1528 static void nb_transaction_apply_finish(struct nb_transaction
*transaction
,
1529 char *errmsg
, size_t errmsg_len
)
1531 struct nb_config_cbs cbs
;
1532 struct nb_config_cb
*cb
;
1534 /* Initialize tree of 'apply_finish' callbacks. */
1535 RB_INIT(nb_config_cbs
, &cbs
);
1537 /* Identify the 'apply_finish' callbacks that need to be called. */
1538 RB_FOREACH (cb
, nb_config_cbs
, &transaction
->changes
) {
1539 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
1540 const struct lyd_node
*dnode
= change
->cb
.dnode
;
1543 * Iterate up to the root of the data tree. When a node is being
1544 * deleted, skip its 'apply_finish' callback if one is defined
1545 * (the 'apply_finish' callbacks from the node ancestors should
1546 * be called though).
1548 if (change
->cb
.operation
== NB_OP_DESTROY
) {
1549 char xpath
[XPATH_MAXLEN
];
1551 dnode
= lyd_parent(dnode
);
1556 * The dnode from 'delete' callbacks point to elements
1557 * from the running configuration. Use yang_dnode_get()
1558 * to get the corresponding dnode from the candidate
1559 * configuration that is being committed.
1561 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1562 dnode
= yang_dnode_get(transaction
->config
->dnode
,
1566 struct nb_node
*nb_node
;
1568 nb_node
= dnode
->schema
->priv
;
1569 if (!nb_node
|| !nb_node
->cbs
.apply_finish
)
1573 * Don't call the callback more than once for the same
1576 if (nb_apply_finish_cb_find(&cbs
, nb_node
, dnode
))
1579 nb_apply_finish_cb_new(&cbs
, nb_node
, dnode
);
1582 dnode
= lyd_parent(dnode
);
1586 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1587 RB_FOREACH (cb
, nb_config_cbs
, &cbs
)
1588 nb_callback_apply_finish(transaction
->context
, cb
->nb_node
,
1589 cb
->dnode
, errmsg
, errmsg_len
);
1591 /* Release memory. */
1592 while (!RB_EMPTY(nb_config_cbs
, &cbs
)) {
1593 cb
= RB_ROOT(nb_config_cbs
, &cbs
);
1594 RB_REMOVE(nb_config_cbs
, &cbs
, cb
);
1595 XFREE(MTYPE_TMP
, cb
);
1599 static int nb_oper_data_iter_children(const struct lysc_node
*snode
,
1600 const char *xpath
, const void *list_entry
,
1601 const struct yang_list_keys
*list_keys
,
1602 struct yang_translator
*translator
,
1603 bool first
, uint32_t flags
,
1604 nb_oper_data_cb cb
, void *arg
)
1606 const struct lysc_node
*child
;
1608 LY_LIST_FOR (lysc_node_child(snode
), child
) {
1611 ret
= nb_oper_data_iter_node(child
, xpath
, list_entry
,
1612 list_keys
, translator
, false,
1621 static int nb_oper_data_iter_leaf(const struct nb_node
*nb_node
,
1622 const char *xpath
, const void *list_entry
,
1623 const struct yang_list_keys
*list_keys
,
1624 struct yang_translator
*translator
,
1625 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1627 struct yang_data
*data
;
1629 if (CHECK_FLAG(nb_node
->snode
->flags
, LYS_CONFIG_W
))
1632 /* Ignore list keys. */
1633 if (lysc_is_key(nb_node
->snode
))
1636 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1638 /* Leaf of type "empty" is not present. */
1641 return (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1644 static int nb_oper_data_iter_container(const struct nb_node
*nb_node
,
1646 const void *list_entry
,
1647 const struct yang_list_keys
*list_keys
,
1648 struct yang_translator
*translator
,
1649 uint32_t flags
, nb_oper_data_cb cb
,
1652 const struct lysc_node
*snode
= nb_node
->snode
;
1654 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1657 /* Read-only presence containers. */
1658 if (nb_node
->cbs
.get_elem
) {
1659 struct yang_data
*data
;
1662 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1664 /* Presence container is not present. */
1667 ret
= (*cb
)(snode
, translator
, data
, arg
);
1672 /* Read-write presence containers. */
1673 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
)) {
1674 struct lysc_node_container
*scontainer
;
1676 scontainer
= (struct lysc_node_container
*)snode
;
1677 if (CHECK_FLAG(scontainer
->flags
, LYS_PRESENCE
)
1678 && !yang_dnode_get(running_config
->dnode
, xpath
))
1682 /* Iterate over the child nodes. */
1683 return nb_oper_data_iter_children(snode
, xpath
, list_entry
, list_keys
,
1684 translator
, false, flags
, cb
, arg
);
1688 nb_oper_data_iter_leaflist(const struct nb_node
*nb_node
, const char *xpath
,
1689 const void *parent_list_entry
,
1690 const struct yang_list_keys
*parent_list_keys
,
1691 struct yang_translator
*translator
, uint32_t flags
,
1692 nb_oper_data_cb cb
, void *arg
)
1694 const void *list_entry
= NULL
;
1696 if (CHECK_FLAG(nb_node
->snode
->flags
, LYS_CONFIG_W
))
1700 struct yang_data
*data
;
1703 list_entry
= nb_callback_get_next(nb_node
, parent_list_entry
,
1706 /* End of the list. */
1709 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1713 ret
= (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1716 } while (list_entry
);
1721 static int nb_oper_data_iter_list(const struct nb_node
*nb_node
,
1722 const char *xpath_list
,
1723 const void *parent_list_entry
,
1724 const struct yang_list_keys
*parent_list_keys
,
1725 struct yang_translator
*translator
,
1726 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1728 const struct lysc_node
*snode
= nb_node
->snode
;
1729 const void *list_entry
= NULL
;
1730 uint32_t position
= 1;
1732 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1735 /* Iterate over all list entries. */
1737 const struct lysc_node_leaf
*skey
;
1738 struct yang_list_keys list_keys
;
1739 char xpath
[XPATH_MAXLEN
* 2];
1742 /* Obtain list entry. */
1743 list_entry
= nb_callback_get_next(nb_node
, parent_list_entry
,
1746 /* End of the list. */
1749 if (!CHECK_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
)) {
1750 /* Obtain the list entry keys. */
1751 if (nb_callback_get_keys(nb_node
, list_entry
,
1754 flog_warn(EC_LIB_NB_CB_STATE
,
1755 "%s: failed to get list keys",
1760 /* Build XPath of the list entry. */
1761 strlcpy(xpath
, xpath_list
, sizeof(xpath
));
1763 LY_FOR_KEYS (snode
, skey
) {
1764 assert(i
< list_keys
.num
);
1765 snprintf(xpath
+ strlen(xpath
),
1766 sizeof(xpath
) - strlen(xpath
),
1767 "[%s='%s']", skey
->name
,
1771 assert(i
== list_keys
.num
);
1774 * Keyless list - build XPath using a positional index.
1776 snprintf(xpath
, sizeof(xpath
), "%s[%u]", xpath_list
,
1781 /* Iterate over the child nodes. */
1782 ret
= nb_oper_data_iter_children(
1783 nb_node
->snode
, xpath
, list_entry
, &list_keys
,
1784 translator
, false, flags
, cb
, arg
);
1787 } while (list_entry
);
1792 static int nb_oper_data_iter_node(const struct lysc_node
*snode
,
1793 const char *xpath_parent
,
1794 const void *list_entry
,
1795 const struct yang_list_keys
*list_keys
,
1796 struct yang_translator
*translator
,
1797 bool first
, uint32_t flags
,
1798 nb_oper_data_cb cb
, void *arg
)
1800 struct nb_node
*nb_node
;
1801 char xpath
[XPATH_MAXLEN
];
1804 if (!first
&& CHECK_FLAG(flags
, NB_OPER_DATA_ITER_NORECURSE
)
1805 && CHECK_FLAG(snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
))
1809 strlcpy(xpath
, xpath_parent
, sizeof(xpath
));
1810 if (!first
&& snode
->nodetype
!= LYS_USES
) {
1811 struct lysc_node
*parent
;
1813 /* Get the real parent. */
1814 parent
= snode
->parent
;
1817 * When necessary, include the namespace of the augmenting
1820 if (parent
&& parent
->module
!= snode
->module
)
1821 snprintf(xpath
+ strlen(xpath
),
1822 sizeof(xpath
) - strlen(xpath
), "/%s:%s",
1823 snode
->module
->name
, snode
->name
);
1825 snprintf(xpath
+ strlen(xpath
),
1826 sizeof(xpath
) - strlen(xpath
), "/%s",
1830 nb_node
= snode
->priv
;
1831 switch (snode
->nodetype
) {
1833 ret
= nb_oper_data_iter_container(nb_node
, xpath
, list_entry
,
1834 list_keys
, translator
, flags
,
1838 ret
= nb_oper_data_iter_leaf(nb_node
, xpath
, list_entry
,
1839 list_keys
, translator
, flags
, cb
,
1843 ret
= nb_oper_data_iter_leaflist(nb_node
, xpath
, list_entry
,
1844 list_keys
, translator
, flags
,
1848 ret
= nb_oper_data_iter_list(nb_node
, xpath
, list_entry
,
1849 list_keys
, translator
, flags
, cb
,
1853 ret
= nb_oper_data_iter_children(snode
, xpath
, list_entry
,
1854 list_keys
, translator
, false,
1864 int nb_oper_data_iterate(const char *xpath
, struct yang_translator
*translator
,
1865 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1867 struct nb_node
*nb_node
;
1868 const void *list_entry
= NULL
;
1869 struct yang_list_keys list_keys
;
1870 struct list
*list_dnodes
;
1871 struct lyd_node
*dnode
, *dn
;
1872 struct listnode
*ln
;
1875 nb_node
= nb_node_find(xpath
);
1877 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH
,
1878 "%s: unknown data path: %s", __func__
, xpath
);
1882 /* For now this function works only with containers and lists. */
1883 if (!CHECK_FLAG(nb_node
->snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
)) {
1885 EC_LIB_NB_OPERATIONAL_DATA
,
1886 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1892 * Create a data tree from the XPath so that we can parse the keys of
1893 * all YANG lists (if any).
1896 LY_ERR err
= lyd_new_path(NULL
, ly_native_ctx
, xpath
, NULL
,
1897 LYD_NEW_PATH_UPDATE
, &dnode
);
1898 if (err
|| !dnode
) {
1899 const char *errmsg
=
1900 err
? ly_errmsg(ly_native_ctx
) : "node not found";
1901 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_new_path() failed %s",
1907 * Create a linked list to sort the data nodes starting from the root.
1909 list_dnodes
= list_new();
1910 for (dn
= dnode
; dn
; dn
= lyd_parent(dn
)) {
1911 if (dn
->schema
->nodetype
!= LYS_LIST
|| !lyd_child(dn
))
1913 listnode_add_head(list_dnodes
, dn
);
1916 * Use the northbound callbacks to find list entry pointer corresponding
1917 * to the given XPath.
1919 for (ALL_LIST_ELEMENTS_RO(list_dnodes
, ln
, dn
)) {
1920 struct lyd_node
*child
;
1924 /* Obtain the list entry keys. */
1925 memset(&list_keys
, 0, sizeof(list_keys
));
1926 LY_LIST_FOR (lyd_child(dn
), child
) {
1927 if (!lysc_is_key(child
->schema
))
1929 strlcpy(list_keys
.key
[n
],
1930 yang_dnode_get_string(child
, NULL
),
1931 sizeof(list_keys
.key
[n
]));
1935 if (list_keys
.num
!= yang_snode_num_keys(dn
->schema
)) {
1936 list_delete(&list_dnodes
);
1937 yang_dnode_free(dnode
);
1938 return NB_ERR_NOT_FOUND
;
1941 /* Find the list entry pointer. */
1942 nn
= dn
->schema
->priv
;
1943 if (!nn
->cbs
.lookup_entry
) {
1945 EC_LIB_NB_OPERATIONAL_DATA
,
1946 "%s: data path doesn't support iteration over operational data: %s",
1948 list_delete(&list_dnodes
);
1949 yang_dnode_free(dnode
);
1954 nb_callback_lookup_entry(nn
, list_entry
, &list_keys
);
1955 if (list_entry
== NULL
) {
1956 list_delete(&list_dnodes
);
1957 yang_dnode_free(dnode
);
1958 return NB_ERR_NOT_FOUND
;
1962 /* If a list entry was given, iterate over that list entry only. */
1963 if (dnode
->schema
->nodetype
== LYS_LIST
&& lyd_child(dnode
))
1964 ret
= nb_oper_data_iter_children(
1965 nb_node
->snode
, xpath
, list_entry
, &list_keys
,
1966 translator
, true, flags
, cb
, arg
);
1968 ret
= nb_oper_data_iter_node(nb_node
->snode
, xpath
, list_entry
,
1969 &list_keys
, translator
, true,
1972 list_delete(&list_dnodes
);
1973 yang_dnode_free(dnode
);
1978 bool nb_operation_is_valid(enum nb_operation operation
,
1979 const struct lysc_node
*snode
)
1981 struct nb_node
*nb_node
= snode
->priv
;
1982 struct lysc_node_container
*scontainer
;
1983 struct lysc_node_leaf
*sleaf
;
1985 switch (operation
) {
1987 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1990 switch (snode
->nodetype
) {
1992 sleaf
= (struct lysc_node_leaf
*)snode
;
1993 if (sleaf
->type
->basetype
!= LY_TYPE_EMPTY
)
1997 scontainer
= (struct lysc_node_container
*)snode
;
1998 if (!CHECK_FLAG(scontainer
->flags
, LYS_PRESENCE
))
2009 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2012 switch (snode
->nodetype
) {
2014 sleaf
= (struct lysc_node_leaf
*)snode
;
2015 if (sleaf
->type
->basetype
== LY_TYPE_EMPTY
)
2018 /* List keys can't be modified. */
2019 if (lysc_is_key(sleaf
))
2027 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2030 switch (snode
->nodetype
) {
2032 sleaf
= (struct lysc_node_leaf
*)snode
;
2034 /* List keys can't be deleted. */
2035 if (lysc_is_key(sleaf
))
2039 * Only optional leafs can be deleted, or leafs whose
2040 * parent is a case statement.
2042 if (snode
->parent
->nodetype
== LYS_CASE
)
2046 if (CHECK_FLAG(sleaf
->flags
, LYS_MAND_TRUE
)
2051 scontainer
= (struct lysc_node_container
*)snode
;
2052 if (!CHECK_FLAG(scontainer
->flags
, LYS_PRESENCE
))
2063 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2066 switch (snode
->nodetype
) {
2069 if (!CHECK_FLAG(snode
->flags
, LYS_ORDBY_USER
))
2076 case NB_OP_PRE_VALIDATE
:
2077 case NB_OP_APPLY_FINISH
:
2078 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2081 case NB_OP_GET_ELEM
:
2082 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_R
))
2085 switch (snode
->nodetype
) {
2090 scontainer
= (struct lysc_node_container
*)snode
;
2091 if (!CHECK_FLAG(scontainer
->flags
, LYS_PRESENCE
))
2098 case NB_OP_GET_NEXT
:
2099 switch (snode
->nodetype
) {
2101 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
2105 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2112 case NB_OP_GET_KEYS
:
2113 case NB_OP_LOOKUP_ENTRY
:
2114 switch (snode
->nodetype
) {
2116 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
2118 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
))
2126 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
| LYS_CONFIG_R
))
2129 switch (snode
->nodetype
) {
2142 DEFINE_HOOK(nb_notification_send
, (const char *xpath
, struct list
*arguments
),
2143 (xpath
, arguments
));
2145 int nb_notification_send(const char *xpath
, struct list
*arguments
)
2149 DEBUGD(&nb_dbg_notif
, "northbound notification: %s", xpath
);
2151 ret
= hook_call(nb_notification_send
, xpath
, arguments
);
2153 list_delete(&arguments
);
2158 /* Running configuration user pointers management. */
2159 struct nb_config_entry
{
2160 char xpath
[XPATH_MAXLEN
];
2164 static bool running_config_entry_cmp(const void *value1
, const void *value2
)
2166 const struct nb_config_entry
*c1
= value1
;
2167 const struct nb_config_entry
*c2
= value2
;
2169 return strmatch(c1
->xpath
, c2
->xpath
);
2172 static unsigned int running_config_entry_key_make(const void *value
)
2174 return string_hash_make(value
);
2177 static void *running_config_entry_alloc(void *p
)
2179 struct nb_config_entry
*new, *key
= p
;
2181 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY
, sizeof(*new));
2182 strlcpy(new->xpath
, key
->xpath
, sizeof(new->xpath
));
2187 static void running_config_entry_free(void *arg
)
2189 XFREE(MTYPE_NB_CONFIG_ENTRY
, arg
);
2192 void nb_running_set_entry(const struct lyd_node
*dnode
, void *entry
)
2194 struct nb_config_entry
*config
, s
;
2196 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
2197 config
= hash_get(running_config_entries
, &s
,
2198 running_config_entry_alloc
);
2199 config
->entry
= entry
;
2202 void nb_running_move_tree(const char *xpath_from
, const char *xpath_to
)
2204 struct nb_config_entry
*entry
;
2205 struct list
*entries
= hash_to_list(running_config_entries
);
2206 struct listnode
*ln
;
2208 for (ALL_LIST_ELEMENTS_RO(entries
, ln
, entry
)) {
2209 if (!frrstr_startswith(entry
->xpath
, xpath_from
))
2212 hash_release(running_config_entries
, entry
);
2215 frrstr_replace(entry
->xpath
, xpath_from
, xpath_to
);
2216 strlcpy(entry
->xpath
, newpath
, sizeof(entry
->xpath
));
2217 XFREE(MTYPE_TMP
, newpath
);
2219 (void)hash_get(running_config_entries
, entry
,
2223 list_delete(&entries
);
2226 static void *nb_running_unset_entry_helper(const struct lyd_node
*dnode
)
2228 struct nb_config_entry
*config
, s
;
2229 struct lyd_node
*child
;
2232 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
2233 config
= hash_release(running_config_entries
, &s
);
2235 entry
= config
->entry
;
2236 running_config_entry_free(config
);
2239 /* Unset user pointers from the child nodes. */
2240 if (CHECK_FLAG(dnode
->schema
->nodetype
, LYS_LIST
| LYS_CONTAINER
)) {
2241 LY_LIST_FOR (lyd_child(dnode
), child
) {
2242 (void)nb_running_unset_entry_helper(child
);
2249 void *nb_running_unset_entry(const struct lyd_node
*dnode
)
2253 entry
= nb_running_unset_entry_helper(dnode
);
2259 static void *nb_running_get_entry_worker(const struct lyd_node
*dnode
,
2261 bool abort_if_not_found
,
2264 const struct lyd_node
*orig_dnode
= dnode
;
2265 char xpath_buf
[XPATH_MAXLEN
];
2266 bool rec_flag
= true;
2268 assert(dnode
|| xpath
);
2271 dnode
= yang_dnode_get(running_config
->dnode
, xpath
);
2273 while (rec_flag
&& dnode
) {
2274 struct nb_config_entry
*config
, s
;
2276 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
2277 config
= hash_lookup(running_config_entries
, &s
);
2279 return config
->entry
;
2281 rec_flag
= rec_search
;
2283 dnode
= lyd_parent(dnode
);
2286 if (!abort_if_not_found
)
2289 yang_dnode_get_path(orig_dnode
, xpath_buf
, sizeof(xpath_buf
));
2290 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND
,
2291 "%s: failed to find entry [xpath %s]", __func__
, xpath_buf
);
2292 zlog_backtrace(LOG_ERR
);
2296 void *nb_running_get_entry(const struct lyd_node
*dnode
, const char *xpath
,
2297 bool abort_if_not_found
)
2299 return nb_running_get_entry_worker(dnode
, xpath
, abort_if_not_found
,
2303 void *nb_running_get_entry_non_rec(const struct lyd_node
*dnode
,
2304 const char *xpath
, bool abort_if_not_found
)
2306 return nb_running_get_entry_worker(dnode
, xpath
, abort_if_not_found
,
2310 /* Logging functions. */
2311 const char *nb_event_name(enum nb_event event
)
2314 case NB_EV_VALIDATE
:
2327 const char *nb_operation_name(enum nb_operation operation
)
2329 switch (operation
) {
2338 case NB_OP_PRE_VALIDATE
:
2339 return "pre_validate";
2340 case NB_OP_APPLY_FINISH
:
2341 return "apply_finish";
2342 case NB_OP_GET_ELEM
:
2344 case NB_OP_GET_NEXT
:
2346 case NB_OP_GET_KEYS
:
2348 case NB_OP_LOOKUP_ENTRY
:
2349 return "lookup_entry";
2357 const char *nb_err_name(enum nb_error error
)
2363 return "generic error";
2364 case NB_ERR_NO_CHANGES
:
2365 return "no changes";
2366 case NB_ERR_NOT_FOUND
:
2367 return "element not found";
2369 return "resource is locked";
2370 case NB_ERR_VALIDATION
:
2371 return "validation";
2372 case NB_ERR_RESOURCE
:
2373 return "failed to allocate resource";
2374 case NB_ERR_INCONSISTENCY
:
2375 return "internal inconsistency";
2381 const char *nb_client_name(enum nb_client client
)
2386 case NB_CLIENT_CONFD
:
2388 case NB_CLIENT_SYSREPO
:
2390 case NB_CLIENT_GRPC
:
2397 static void nb_load_callbacks(const struct frr_yang_module_info
*module
)
2399 for (size_t i
= 0; module
->nodes
[i
].xpath
; i
++) {
2400 struct nb_node
*nb_node
;
2403 if (i
> YANG_MODULE_MAX_NODES
) {
2405 "%s: %s.yang has more than %u nodes. Please increase YANG_MODULE_MAX_NODES to fix this problem.",
2406 __func__
, module
->name
, YANG_MODULE_MAX_NODES
);
2410 nb_node
= nb_node_find(module
->nodes
[i
].xpath
);
2412 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH
,
2413 "%s: unknown data path: %s", __func__
,
2414 module
->nodes
[i
].xpath
);
2418 nb_node
->cbs
= module
->nodes
[i
].cbs
;
2419 priority
= module
->nodes
[i
].priority
;
2421 nb_node
->priority
= priority
;
2425 void nb_validate_callbacks(void)
2427 unsigned int errors
= 0;
2429 yang_snodes_iterate(NULL
, nb_node_validate
, 0, &errors
);
2432 EC_LIB_NB_CBS_VALIDATION
,
2433 "%s: failed to validate northbound callbacks: %u error(s)",
2440 void nb_init(struct thread_master
*tm
,
2441 const struct frr_yang_module_info
*const modules
[],
2442 size_t nmodules
, bool db_enabled
)
2444 struct yang_module
*loaded
[nmodules
], **loadedp
= loaded
;
2445 bool explicit_compile
;
2448 * Currently using this explicit compile feature in libyang2 leads to
2449 * incorrect behavior in FRR. The functionality suppresses the compiling
2450 * of modules until they have all been loaded into the context. This
2451 * avoids multiple recompiles of the same modules as they are
2452 * imported/augmented etc.
2454 explicit_compile
= false;
2456 nb_db_enabled
= db_enabled
;
2458 yang_init(true, explicit_compile
);
2460 /* Load YANG modules and their corresponding northbound callbacks. */
2461 for (size_t i
= 0; i
< nmodules
; i
++) {
2462 DEBUGD(&nb_dbg_events
, "northbound: loading %s.yang",
2464 *loadedp
++ = yang_module_load(modules
[i
]->name
);
2467 if (explicit_compile
)
2468 yang_init_loading_complete();
2470 /* Initialize the compiled nodes with northbound data */
2471 for (size_t i
= 0; i
< nmodules
; i
++) {
2472 yang_snodes_iterate(loaded
[i
]->info
, nb_node_new_cb
, 0, NULL
);
2473 nb_load_callbacks(modules
[i
]);
2476 /* Validate northbound callbacks. */
2477 nb_validate_callbacks();
2479 /* Create an empty running configuration. */
2480 running_config
= nb_config_new(NULL
);
2481 running_config_entries
= hash_create(running_config_entry_key_make
,
2482 running_config_entry_cmp
,
2483 "Running Configuration Entries");
2484 pthread_mutex_init(&running_config_mgmt_lock
.mtx
, NULL
);
2486 /* Initialize the northbound CLI. */
2490 void nb_terminate(void)
2492 /* Terminate the northbound CLI. */
2495 /* Delete all nb_node's from all YANG modules. */
2498 /* Delete the running configuration. */
2499 hash_clean(running_config_entries
, running_config_entry_free
);
2500 hash_free(running_config_entries
);
2501 nb_config_free(running_config
);
2502 pthread_mutex_destroy(&running_config_mgmt_lock
.mtx
);