1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2018 NetDEF, Inc.
11 #include "lib_errors.h"
16 #include "frr_pthread.h"
17 #include "northbound.h"
18 #include "northbound_cli.h"
19 #include "northbound_db.h"
22 DEFINE_MTYPE_STATIC(LIB
, NB_NODE
, "Northbound Node");
23 DEFINE_MTYPE_STATIC(LIB
, NB_CONFIG
, "Northbound Configuration");
24 DEFINE_MTYPE_STATIC(LIB
, NB_CONFIG_ENTRY
, "Northbound Configuration Entry");
26 /* Running configuration - shouldn't be modified directly. */
27 struct nb_config
*running_config
;
29 /* Hash table of user pointers associated with configuration entries. */
30 static struct hash
*running_config_entries
;
32 /* Management lock for the running configuration. */
34 /* Mutex protecting this structure. */
40 /* Northbound client who owns this lock. */
41 enum nb_client owner_client
;
43 /* Northbound user who owns this lock. */
44 const void *owner_user
;
45 } running_config_mgmt_lock
;
47 /* Knob to record config transaction */
48 static bool nb_db_enabled
;
50 * Global lock used to prevent multiple configuration transactions from
51 * happening concurrently.
53 static bool transaction_in_progress
;
55 static int nb_callback_pre_validate(struct nb_context
*context
,
56 const struct nb_node
*nb_node
,
57 const struct lyd_node
*dnode
, char *errmsg
,
59 static int nb_callback_configuration(struct nb_context
*context
,
60 const enum nb_event event
,
61 struct nb_config_change
*change
,
62 char *errmsg
, size_t errmsg_len
);
63 static struct nb_transaction
*
64 nb_transaction_new(struct nb_context context
, struct nb_config
*config
,
65 struct nb_config_cbs
*changes
, const char *comment
,
66 char *errmsg
, size_t errmsg_len
);
67 static void nb_transaction_free(struct nb_transaction
*transaction
);
68 static int nb_transaction_process(enum nb_event event
,
69 struct nb_transaction
*transaction
,
70 char *errmsg
, size_t errmsg_len
);
71 static void nb_transaction_apply_finish(struct nb_transaction
*transaction
,
72 char *errmsg
, size_t errmsg_len
);
73 static int nb_oper_data_iter_node(const struct lysc_node
*snode
,
74 const char *xpath
, const void *list_entry
,
75 const struct yang_list_keys
*list_keys
,
76 struct yang_translator
*translator
,
77 bool first
, uint32_t flags
,
78 nb_oper_data_cb cb
, void *arg
);
80 static int nb_node_check_config_only(const struct lysc_node
*snode
, void *arg
)
82 bool *config_only
= arg
;
84 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_R
)) {
86 return YANG_ITER_STOP
;
89 return YANG_ITER_CONTINUE
;
92 static int nb_node_new_cb(const struct lysc_node
*snode
, void *arg
)
94 struct nb_node
*nb_node
;
95 struct lysc_node
*sparent
, *sparent_list
;
96 struct frr_yang_module_info
*module
;
98 module
= (struct frr_yang_module_info
*)arg
;
99 nb_node
= XCALLOC(MTYPE_NB_NODE
, sizeof(*nb_node
));
100 yang_snode_get_path(snode
, YANG_PATH_DATA
, nb_node
->xpath
,
101 sizeof(nb_node
->xpath
));
102 nb_node
->priority
= NB_DFLT_PRIORITY
;
103 sparent
= yang_snode_real_parent(snode
);
105 nb_node
->parent
= sparent
->priv
;
106 sparent_list
= yang_snode_parent_list(snode
);
108 nb_node
->parent_list
= sparent_list
->priv
;
111 if (CHECK_FLAG(snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
)) {
112 bool config_only
= true;
114 (void)yang_snodes_iterate_subtree(snode
, NULL
,
115 nb_node_check_config_only
, 0,
118 SET_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
);
120 if (CHECK_FLAG(snode
->nodetype
, LYS_LIST
)) {
121 if (yang_snode_num_keys(snode
) == 0)
122 SET_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
);
126 * Link the northbound node and the libyang schema node with one
129 nb_node
->snode
= snode
;
130 assert(snode
->priv
== NULL
);
131 ((struct lysc_node
*)snode
)->priv
= nb_node
;
133 if (module
&& module
->ignore_cbs
)
134 SET_FLAG(nb_node
->flags
, F_NB_NODE_IGNORE_CBS
);
136 return YANG_ITER_CONTINUE
;
139 static int nb_node_del_cb(const struct lysc_node
*snode
, void *arg
)
141 struct nb_node
*nb_node
;
143 nb_node
= snode
->priv
;
145 ((struct lysc_node
*)snode
)->priv
= NULL
;
146 XFREE(MTYPE_NB_NODE
, nb_node
);
149 return YANG_ITER_CONTINUE
;
152 void nb_nodes_create(void)
154 yang_snodes_iterate(NULL
, nb_node_new_cb
, 0, NULL
);
157 void nb_nodes_delete(void)
159 yang_snodes_iterate(NULL
, nb_node_del_cb
, 0, NULL
);
162 struct nb_node
*nb_node_find(const char *path
)
164 const struct lysc_node
*snode
;
167 * Use libyang to find the schema node associated to the path and get
168 * the northbound node from there (snode private pointer).
170 snode
= lys_find_path(ly_native_ctx
, NULL
, path
, 0);
177 void nb_node_set_dependency_cbs(const char *dependency_xpath
,
178 const char *dependant_xpath
,
179 struct nb_dependency_callbacks
*cbs
)
181 struct nb_node
*dependency
= nb_node_find(dependency_xpath
);
182 struct nb_node
*dependant
= nb_node_find(dependant_xpath
);
184 if (!dependency
|| !dependant
)
187 dependency
->dep_cbs
.get_dependant_xpath
= cbs
->get_dependant_xpath
;
188 dependant
->dep_cbs
.get_dependency_xpath
= cbs
->get_dependency_xpath
;
191 bool nb_node_has_dependency(struct nb_node
*node
)
193 return node
->dep_cbs
.get_dependency_xpath
!= NULL
;
196 static int nb_node_validate_cb(const struct nb_node
*nb_node
,
197 enum nb_operation operation
,
198 int callback_implemented
, bool optional
)
202 valid
= nb_operation_is_valid(operation
, nb_node
->snode
);
205 * Add an exception for operational data callbacks. A rw list usually
206 * doesn't need any associated operational data callbacks. But if this
207 * rw list is augmented by another module which adds state nodes under
208 * it, then this list will need to have the 'get_next()', 'get_keys()'
209 * and 'lookup_entry()' callbacks. As such, never log a warning when
210 * these callbacks are implemented when they are not needed, since this
211 * depends on context (e.g. some daemons might augment "frr-interface"
212 * while others don't).
214 if (!valid
&& callback_implemented
&& operation
!= NB_OP_GET_NEXT
215 && operation
!= NB_OP_GET_KEYS
&& operation
!= NB_OP_LOOKUP_ENTRY
)
216 flog_warn(EC_LIB_NB_CB_UNNEEDED
,
217 "unneeded '%s' callback for '%s'",
218 nb_operation_name(operation
), nb_node
->xpath
);
220 if (!optional
&& valid
&& !callback_implemented
) {
221 flog_err(EC_LIB_NB_CB_MISSING
, "missing '%s' callback for '%s'",
222 nb_operation_name(operation
), nb_node
->xpath
);
230 * Check if the required callbacks were implemented for the given northbound
233 static unsigned int nb_node_validate_cbs(const struct nb_node
*nb_node
)
236 unsigned int error
= 0;
238 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_IGNORE_CBS
))
241 error
+= nb_node_validate_cb(nb_node
, NB_OP_CREATE
,
242 !!nb_node
->cbs
.create
, false);
243 error
+= nb_node_validate_cb(nb_node
, NB_OP_MODIFY
,
244 !!nb_node
->cbs
.modify
, false);
245 error
+= nb_node_validate_cb(nb_node
, NB_OP_DESTROY
,
246 !!nb_node
->cbs
.destroy
, false);
247 error
+= nb_node_validate_cb(nb_node
, NB_OP_MOVE
, !!nb_node
->cbs
.move
,
249 error
+= nb_node_validate_cb(nb_node
, NB_OP_PRE_VALIDATE
,
250 !!nb_node
->cbs
.pre_validate
, true);
251 error
+= nb_node_validate_cb(nb_node
, NB_OP_APPLY_FINISH
,
252 !!nb_node
->cbs
.apply_finish
, true);
253 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_ELEM
,
254 !!nb_node
->cbs
.get_elem
, false);
255 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_NEXT
,
256 !!nb_node
->cbs
.get_next
, false);
257 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_KEYS
,
258 !!nb_node
->cbs
.get_keys
, false);
259 error
+= nb_node_validate_cb(nb_node
, NB_OP_LOOKUP_ENTRY
,
260 !!nb_node
->cbs
.lookup_entry
, false);
261 error
+= nb_node_validate_cb(nb_node
, NB_OP_RPC
, !!nb_node
->cbs
.rpc
,
267 static unsigned int nb_node_validate_priority(const struct nb_node
*nb_node
)
269 /* Top-level nodes can have any priority. */
270 if (!nb_node
->parent
)
273 if (nb_node
->priority
< nb_node
->parent
->priority
) {
274 flog_err(EC_LIB_NB_CB_INVALID_PRIO
,
275 "node has higher priority than its parent [xpath %s]",
283 static int nb_node_validate(const struct lysc_node
*snode
, void *arg
)
285 struct nb_node
*nb_node
= snode
->priv
;
286 unsigned int *errors
= arg
;
288 /* Validate callbacks and priority. */
290 *errors
+= nb_node_validate_cbs(nb_node
);
291 *errors
+= nb_node_validate_priority(nb_node
);
294 return YANG_ITER_CONTINUE
;
297 struct nb_config
*nb_config_new(struct lyd_node
*dnode
)
299 struct nb_config
*config
;
301 config
= XCALLOC(MTYPE_NB_CONFIG
, sizeof(*config
));
303 config
->dnode
= dnode
;
305 config
->dnode
= yang_dnode_new(ly_native_ctx
, true);
308 RB_INIT(nb_config_cbs
, &config
->cfg_chgs
);
313 void nb_config_free(struct nb_config
*config
)
316 yang_dnode_free(config
->dnode
);
317 nb_config_diff_del_changes(&config
->cfg_chgs
);
318 XFREE(MTYPE_NB_CONFIG
, config
);
321 struct nb_config
*nb_config_dup(const struct nb_config
*config
)
323 struct nb_config
*dup
;
325 dup
= XCALLOC(MTYPE_NB_CONFIG
, sizeof(*dup
));
326 dup
->dnode
= yang_dnode_dup(config
->dnode
);
327 dup
->version
= config
->version
;
329 RB_INIT(nb_config_cbs
, &dup
->cfg_chgs
);
334 int nb_config_merge(struct nb_config
*config_dst
, struct nb_config
*config_src
,
335 bool preserve_source
)
339 ret
= lyd_merge_siblings(&config_dst
->dnode
, config_src
->dnode
, 0);
341 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_merge() failed", __func__
);
343 if (!preserve_source
)
344 nb_config_free(config_src
);
346 return (ret
== 0) ? NB_OK
: NB_ERR
;
349 void nb_config_replace(struct nb_config
*config_dst
,
350 struct nb_config
*config_src
, bool preserve_source
)
352 /* Update version. */
353 if (config_src
->version
!= 0)
354 config_dst
->version
= config_src
->version
;
357 if (config_dst
->dnode
)
358 yang_dnode_free(config_dst
->dnode
);
359 if (preserve_source
) {
360 config_dst
->dnode
= yang_dnode_dup(config_src
->dnode
);
362 config_dst
->dnode
= config_src
->dnode
;
363 config_src
->dnode
= NULL
;
364 nb_config_free(config_src
);
368 /* Generate the nb_config_cbs tree. */
369 static inline int nb_config_cb_compare(const struct nb_config_cb
*a
,
370 const struct nb_config_cb
*b
)
372 /* Sort by priority first. */
373 if (a
->nb_node
->priority
< b
->nb_node
->priority
)
375 if (a
->nb_node
->priority
> b
->nb_node
->priority
)
379 * Preserve the order of the configuration changes as told by libyang.
387 * All 'apply_finish' callbacks have their sequence number set to zero.
388 * In this case, compare them using their dnode pointers (the order
389 * doesn't matter for callbacks that have the same priority).
391 if (a
->dnode
< b
->dnode
)
393 if (a
->dnode
> b
->dnode
)
398 RB_GENERATE(nb_config_cbs
, nb_config_cb
, entry
, nb_config_cb_compare
);
400 static void nb_config_diff_add_change(struct nb_config_cbs
*changes
,
401 enum nb_operation operation
,
403 const struct lyd_node
*dnode
)
405 struct nb_config_change
*change
;
407 /* Ignore unimplemented nodes. */
408 if (!dnode
->schema
->priv
)
411 change
= XCALLOC(MTYPE_TMP
, sizeof(*change
));
412 change
->cb
.operation
= operation
;
413 change
->cb
.seq
= *seq
;
415 change
->cb
.nb_node
= dnode
->schema
->priv
;
416 change
->cb
.dnode
= dnode
;
418 RB_INSERT(nb_config_cbs
, changes
, &change
->cb
);
421 void nb_config_diff_del_changes(struct nb_config_cbs
*changes
)
423 while (!RB_EMPTY(nb_config_cbs
, changes
)) {
424 struct nb_config_change
*change
;
426 change
= (struct nb_config_change
*)RB_ROOT(nb_config_cbs
,
428 RB_REMOVE(nb_config_cbs
, changes
, &change
->cb
);
429 XFREE(MTYPE_TMP
, change
);
434 * Helper function used when calculating the delta between two different
435 * configurations. Given a new subtree, calculate all new YANG data nodes,
436 * excluding default leafs and leaf-lists. This is a recursive function.
438 void nb_config_diff_created(const struct lyd_node
*dnode
, uint32_t *seq
,
439 struct nb_config_cbs
*changes
)
441 enum nb_operation operation
;
442 struct lyd_node
*child
;
444 /* Ignore unimplemented nodes. */
445 if (!dnode
->schema
->priv
)
448 switch (dnode
->schema
->nodetype
) {
451 if (lyd_is_default(dnode
))
454 if (nb_operation_is_valid(NB_OP_CREATE
, dnode
->schema
))
455 operation
= NB_OP_CREATE
;
456 else if (nb_operation_is_valid(NB_OP_MODIFY
, dnode
->schema
))
457 operation
= NB_OP_MODIFY
;
461 nb_config_diff_add_change(changes
, operation
, seq
, dnode
);
465 if (nb_operation_is_valid(NB_OP_CREATE
, dnode
->schema
))
466 nb_config_diff_add_change(changes
, NB_OP_CREATE
, seq
,
469 /* Process child nodes recursively. */
470 LY_LIST_FOR (lyd_child(dnode
), child
) {
471 nb_config_diff_created(child
, seq
, changes
);
479 static void nb_config_diff_deleted(const struct lyd_node
*dnode
, uint32_t *seq
,
480 struct nb_config_cbs
*changes
)
482 /* Ignore unimplemented nodes. */
483 if (!dnode
->schema
->priv
)
486 if (nb_operation_is_valid(NB_OP_DESTROY
, dnode
->schema
))
487 nb_config_diff_add_change(changes
, NB_OP_DESTROY
, seq
, dnode
);
488 else if (CHECK_FLAG(dnode
->schema
->nodetype
, LYS_CONTAINER
)) {
489 struct lyd_node
*child
;
492 * Non-presence containers need special handling since they
493 * don't have "destroy" callbacks. In this case, what we need to
494 * do is to call the "destroy" callbacks of their child nodes
495 * when applicable (i.e. optional nodes).
497 LY_LIST_FOR (lyd_child(dnode
), child
) {
498 nb_config_diff_deleted(child
, seq
, changes
);
503 static int nb_lyd_diff_get_op(const struct lyd_node
*dnode
)
505 const struct lyd_meta
*meta
;
506 LY_LIST_FOR (dnode
->meta
, meta
) {
507 if (strcmp(meta
->name
, "operation")
508 || strcmp(meta
->annotation
->module
->name
, "yang"))
510 return lyd_get_meta_value(meta
)[0];
515 #if 0 /* Used below in nb_config_diff inside normally disabled code */
516 static inline void nb_config_diff_dnode_log_path(const char *context
,
518 const struct lyd_node
*dnode
)
520 if (dnode
->schema
->nodetype
& LYD_NODE_TERM
)
521 zlog_debug("nb_config_diff: %s: %s: %s", context
, path
,
522 lyd_get_value(dnode
));
524 zlog_debug("nb_config_diff: %s: %s", context
, path
);
527 static inline void nb_config_diff_dnode_log(const char *context
,
528 const struct lyd_node
*dnode
)
531 zlog_debug("nb_config_diff: %s: NULL", context
);
535 char *path
= lyd_path(dnode
, LYD_PATH_STD
, NULL
, 0);
536 nb_config_diff_dnode_log_path(context
, path
, dnode
);
542 * Calculate the delta between two different configurations.
544 * NOTE: 'config1' is the reference DB, while 'config2' is
545 * the DB being compared against 'config1'. Typically 'config1'
546 * should be the Running DB and 'config2' is the Candidate DB.
548 void nb_config_diff(const struct nb_config
*config1
,
549 const struct nb_config
*config2
,
550 struct nb_config_cbs
*changes
)
552 struct lyd_node
*diff
= NULL
;
553 const struct lyd_node
*root
, *dnode
;
554 struct lyd_node
*target
;
559 #if 0 /* Useful (noisy) when debugging diff code, and for improving later */
560 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
)) {
561 LY_LIST_FOR(config1
->dnode
, root
) {
562 LYD_TREE_DFS_BEGIN(root
, dnode
) {
563 nb_config_diff_dnode_log("from", dnode
);
564 LYD_TREE_DFS_END(root
, dnode
);
567 LY_LIST_FOR(config2
->dnode
, root
) {
568 LYD_TREE_DFS_BEGIN(root
, dnode
) {
569 nb_config_diff_dnode_log("to", dnode
);
570 LYD_TREE_DFS_END(root
, dnode
);
576 err
= lyd_diff_siblings(config1
->dnode
, config2
->dnode
,
577 LYD_DIFF_DEFAULTS
, &diff
);
580 if (diff
&& DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
)) {
583 if (!lyd_print_mem(&s
, diff
, LYD_JSON
,
584 LYD_PRINT_WITHSIBLINGS
| LYD_PRINT_WD_ALL
)) {
585 zlog_debug("%s: %s", __func__
, s
);
592 LY_LIST_FOR (diff
, root
) {
593 LYD_TREE_DFS_BEGIN (root
, dnode
) {
594 op
= nb_lyd_diff_get_op(dnode
);
596 path
= lyd_path(dnode
, LYD_PATH_STD
, NULL
, 0);
598 #if 0 /* Useful (noisy) when debugging diff code, and for improving later */
599 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
)) {
601 snprintf(context
, sizeof(context
),
602 "iterating diff: oper: %c seq: %u", op
, seq
);
603 nb_config_diff_dnode_log_path(context
, path
, dnode
);
607 case 'c': /* create */
609 * This is rather inefficient, but when we use
610 * dnode from the diff instead of the
611 * candidate config node we get failures when
612 * looking up default values, etc, based on
615 target
= yang_dnode_get(config2
->dnode
, path
);
617 nb_config_diff_created(target
, &seq
, changes
);
619 /* Skip rest of sub-tree, move to next sibling
621 LYD_TREE_DFS_continue
= 1;
623 case 'd': /* delete */
624 target
= yang_dnode_get(config1
->dnode
, path
);
626 nb_config_diff_deleted(target
, &seq
, changes
);
628 /* Skip rest of sub-tree, move to next sibling
630 LYD_TREE_DFS_continue
= 1;
632 case 'r': /* replace */
633 /* either moving an entry or changing a value */
634 target
= yang_dnode_get(config2
->dnode
, path
);
636 nb_config_diff_add_change(changes
, NB_OP_MODIFY
,
644 LYD_TREE_DFS_END(root
, dnode
);
651 int nb_candidate_edit(struct nb_config
*candidate
,
652 const struct nb_node
*nb_node
,
653 enum nb_operation operation
, const char *xpath
,
654 const struct yang_data
*previous
,
655 const struct yang_data
*data
)
657 struct lyd_node
*dnode
, *dep_dnode
;
658 char xpath_edit
[XPATH_MAXLEN
];
659 char dep_xpath
[XPATH_MAXLEN
];
662 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
663 if (nb_node
->snode
->nodetype
== LYS_LEAFLIST
)
664 snprintf(xpath_edit
, sizeof(xpath_edit
), "%s[.='%s']", xpath
,
667 strlcpy(xpath_edit
, xpath
, sizeof(xpath_edit
));
672 err
= lyd_new_path(candidate
->dnode
, ly_native_ctx
, xpath_edit
,
673 (void *)data
->value
, LYD_NEW_PATH_UPDATE
,
676 flog_warn(EC_LIB_LIBYANG
,
677 "%s: lyd_new_path(%s) failed: %d", __func__
,
681 /* Create default nodes */
682 LY_ERR err
= lyd_new_implicit_tree(
683 dnode
, LYD_IMPLICIT_NO_STATE
, NULL
);
685 flog_warn(EC_LIB_LIBYANG
,
686 "%s: lyd_new_implicit_all failed: %d",
692 * dnode returned by the lyd_new_path may be from a
693 * different schema, so we need to update the nb_node
695 nb_node
= dnode
->schema
->priv
;
696 if (nb_node
->dep_cbs
.get_dependency_xpath
) {
697 nb_node
->dep_cbs
.get_dependency_xpath(
700 err
= lyd_new_path(candidate
->dnode
,
701 ly_native_ctx
, dep_xpath
,
702 NULL
, LYD_NEW_PATH_UPDATE
,
704 /* Create default nodes */
705 if (!err
&& dep_dnode
)
706 err
= lyd_new_implicit_tree(
708 LYD_IMPLICIT_NO_STATE
, NULL
);
712 "%s: dependency: lyd_new_path(%s) failed: %d",
713 __func__
, dep_xpath
, err
);
720 dnode
= yang_dnode_get(candidate
->dnode
, xpath_edit
);
723 * Return a special error code so the caller can choose
724 * whether to ignore it or not.
726 return NB_ERR_NOT_FOUND
;
727 /* destroy dependant */
728 if (nb_node
->dep_cbs
.get_dependant_xpath
) {
729 nb_node
->dep_cbs
.get_dependant_xpath(dnode
, dep_xpath
);
731 dep_dnode
= yang_dnode_get(candidate
->dnode
, dep_xpath
);
733 lyd_free_tree(dep_dnode
);
735 lyd_free_tree(dnode
);
738 /* TODO: update configuration. */
740 case NB_OP_PRE_VALIDATE
:
741 case NB_OP_APPLY_FINISH
:
745 case NB_OP_LOOKUP_ENTRY
:
747 flog_warn(EC_LIB_DEVELOPMENT
,
748 "%s: unknown operation (%u) [xpath %s]", __func__
,
749 operation
, xpath_edit
);
756 static void nb_update_candidate_changes(struct nb_config
*candidate
,
757 struct nb_cfg_change
*change
,
760 enum nb_operation oper
= change
->operation
;
761 char *xpath
= change
->xpath
;
762 struct lyd_node
*root
= NULL
;
763 struct lyd_node
*dnode
;
764 struct nb_config_cbs
*cfg_chgs
= &candidate
->cfg_chgs
;
770 root
= yang_dnode_get(candidate
->dnode
, xpath
);
773 root
= yang_dnode_get(running_config
->dnode
, xpath
);
777 case NB_OP_PRE_VALIDATE
:
778 case NB_OP_APPLY_FINISH
:
782 case NB_OP_LOOKUP_ENTRY
:
786 assert(!"non-enum value, invalid");
792 LYD_TREE_DFS_BEGIN (root
, dnode
) {
793 op
= nb_lyd_diff_get_op(dnode
);
796 nb_config_diff_created(dnode
, seq
, cfg_chgs
);
797 LYD_TREE_DFS_continue
= 1;
800 nb_config_diff_deleted(dnode
, seq
, cfg_chgs
);
801 LYD_TREE_DFS_continue
= 1;
804 nb_config_diff_add_change(cfg_chgs
, NB_OP_MODIFY
, seq
,
810 LYD_TREE_DFS_END(root
, dnode
);
814 static bool nb_is_operation_allowed(struct nb_node
*nb_node
,
815 struct nb_cfg_change
*change
)
817 enum nb_operation oper
= change
->operation
;
819 if (lysc_is_key(nb_node
->snode
)) {
820 if (oper
== NB_OP_MODIFY
|| oper
== NB_OP_DESTROY
)
826 void nb_candidate_edit_config_changes(
827 struct nb_config
*candidate_config
, struct nb_cfg_change cfg_changes
[],
828 size_t num_cfg_changes
, const char *xpath_base
, const char *curr_xpath
,
829 int xpath_index
, char *err_buf
, int err_bufsize
, bool *error
)
836 if (xpath_base
== NULL
)
839 /* Edit candidate configuration. */
840 for (size_t i
= 0; i
< num_cfg_changes
; i
++) {
841 struct nb_cfg_change
*change
= &cfg_changes
[i
];
842 struct nb_node
*nb_node
;
843 char xpath
[XPATH_MAXLEN
];
844 struct yang_data
*data
;
847 /* Handle relative XPaths. */
848 memset(xpath
, 0, sizeof(xpath
));
849 if (xpath_index
> 0 &&
850 (xpath_base
[0] == '.' || change
->xpath
[0] == '.'))
851 strlcpy(xpath
, curr_xpath
, sizeof(xpath
));
853 if (xpath_base
[0] == '.')
854 strlcat(xpath
, xpath_base
+ 1, sizeof(xpath
));
856 strlcat(xpath
, xpath_base
, sizeof(xpath
));
858 if (change
->xpath
[0] == '.')
859 strlcat(xpath
, change
->xpath
+ 1, sizeof(xpath
));
861 strlcpy(xpath
, change
->xpath
, sizeof(xpath
));
863 /* Find the northbound node associated to the data path. */
864 nb_node
= nb_node_find(xpath
);
866 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH
,
867 "%s: unknown data path: %s", __func__
, xpath
);
872 /* Find if the node to be edited is not a key node */
873 if (!nb_is_operation_allowed(nb_node
, change
)) {
874 zlog_err(" Xpath %s points to key node", xpath
);
880 /* If the value is not set, get the default if it exists. */
881 if (change
->value
== NULL
)
882 change
->value
= yang_snode_get_default(nb_node
->snode
);
883 data
= yang_data_new(xpath
, change
->value
);
886 * Ignore "not found" errors when editing the candidate
889 ret
= nb_candidate_edit(candidate_config
, nb_node
,
890 change
->operation
, xpath
, NULL
, data
);
891 yang_data_free(data
);
892 if (ret
!= NB_OK
&& ret
!= NB_ERR_NOT_FOUND
) {
894 EC_LIB_NB_CANDIDATE_EDIT_ERROR
,
895 "%s: failed to edit candidate configuration: operation [%s] xpath [%s]",
896 __func__
, nb_operation_name(change
->operation
),
902 nb_update_candidate_changes(candidate_config
, change
, &seq
);
905 if (error
&& *error
) {
909 * Failure to edit the candidate configuration should never
910 * happen in practice, unless there's a bug in the code. When
911 * that happens, log the error but otherwise ignore it.
913 snprintf(err_buf
, err_bufsize
,
914 "%% Failed to edit configuration.\n\n%s",
915 yang_print_errors(ly_native_ctx
, buf
, sizeof(buf
)));
919 bool nb_candidate_needs_update(const struct nb_config
*candidate
)
921 if (candidate
->version
< running_config
->version
)
927 int nb_candidate_update(struct nb_config
*candidate
)
929 struct nb_config
*updated_config
;
931 updated_config
= nb_config_dup(running_config
);
932 if (nb_config_merge(updated_config
, candidate
, true) != NB_OK
)
935 nb_config_replace(candidate
, updated_config
, false);
941 * Perform YANG syntactic and semantic validation.
943 * WARNING: lyd_validate() can change the configuration as part of the
944 * validation process.
946 int nb_candidate_validate_yang(struct nb_config
*candidate
, bool no_state
,
947 char *errmsg
, size_t errmsg_len
)
949 if (lyd_validate_all(&candidate
->dnode
, ly_native_ctx
,
950 no_state
? LYD_VALIDATE_NO_STATE
951 : LYD_VALIDATE_PRESENT
,
953 yang_print_errors(ly_native_ctx
, errmsg
, errmsg_len
);
954 return NB_ERR_VALIDATION
;
960 /* Perform code-level validation using the northbound callbacks. */
961 int nb_candidate_validate_code(struct nb_context
*context
,
962 struct nb_config
*candidate
,
963 struct nb_config_cbs
*changes
, char *errmsg
,
966 struct nb_config_cb
*cb
;
967 struct lyd_node
*root
, *child
;
970 /* First validate the candidate as a whole. */
971 LY_LIST_FOR (candidate
->dnode
, root
) {
972 LYD_TREE_DFS_BEGIN (root
, child
) {
973 struct nb_node
*nb_node
;
975 nb_node
= child
->schema
->priv
;
976 if (!nb_node
|| !nb_node
->cbs
.pre_validate
)
979 ret
= nb_callback_pre_validate(context
, nb_node
, child
,
982 return NB_ERR_VALIDATION
;
985 LYD_TREE_DFS_END(root
, child
);
989 /* Now validate the configuration changes. */
990 RB_FOREACH (cb
, nb_config_cbs
, changes
) {
991 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
993 ret
= nb_callback_configuration(context
, NB_EV_VALIDATE
, change
,
996 return NB_ERR_VALIDATION
;
1002 int nb_candidate_diff_and_validate_yang(struct nb_context
*context
,
1003 struct nb_config
*candidate
,
1004 struct nb_config_cbs
*changes
,
1005 char *errmsg
, size_t errmsg_len
)
1007 if (nb_candidate_validate_yang(candidate
, true, errmsg
,
1008 sizeof(errmsg_len
)) != NB_OK
)
1009 return NB_ERR_VALIDATION
;
1011 RB_INIT(nb_config_cbs
, changes
);
1012 nb_config_diff(running_config
, candidate
, changes
);
1017 int nb_candidate_validate(struct nb_context
*context
,
1018 struct nb_config
*candidate
, char *errmsg
,
1021 struct nb_config_cbs changes
;
1024 ret
= nb_candidate_diff_and_validate_yang(context
, candidate
, &changes
,
1025 errmsg
, errmsg_len
);
1029 ret
= nb_candidate_validate_code(context
, candidate
, &changes
, errmsg
,
1031 nb_config_diff_del_changes(&changes
);
1036 int nb_candidate_commit_prepare(struct nb_context context
,
1037 struct nb_config
*candidate
,
1038 const char *comment
,
1039 struct nb_transaction
**transaction
,
1040 bool skip_validate
, bool ignore_zero_change
,
1041 char *errmsg
, size_t errmsg_len
)
1043 struct nb_config_cbs changes
;
1045 if (!skip_validate
&&
1046 nb_candidate_validate_yang(candidate
, true, errmsg
, errmsg_len
) !=
1048 flog_warn(EC_LIB_NB_CANDIDATE_INVALID
,
1049 "%s: failed to validate candidate configuration",
1051 return NB_ERR_VALIDATION
;
1054 RB_INIT(nb_config_cbs
, &changes
);
1055 nb_config_diff(running_config
, candidate
, &changes
);
1056 if (!ignore_zero_change
&& RB_EMPTY(nb_config_cbs
, &changes
)) {
1059 "No changes to apply were found during preparation phase");
1060 return NB_ERR_NO_CHANGES
;
1063 if (!skip_validate
&&
1064 nb_candidate_validate_code(&context
, candidate
, &changes
, errmsg
,
1065 errmsg_len
) != NB_OK
) {
1066 flog_warn(EC_LIB_NB_CANDIDATE_INVALID
,
1067 "%s: failed to validate candidate configuration",
1069 nb_config_diff_del_changes(&changes
);
1070 return NB_ERR_VALIDATION
;
1074 * Re-use an existing transaction if provided. Else allocate a new one.
1077 *transaction
= nb_transaction_new(context
, candidate
, &changes
,
1078 comment
, errmsg
, errmsg_len
);
1079 if (*transaction
== NULL
) {
1080 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED
,
1081 "%s: failed to create transaction: %s", __func__
,
1083 nb_config_diff_del_changes(&changes
);
1084 return NB_ERR_LOCKED
;
1087 return nb_transaction_process(NB_EV_PREPARE
, *transaction
, errmsg
,
1091 void nb_candidate_commit_abort(struct nb_transaction
*transaction
, char *errmsg
,
1094 (void)nb_transaction_process(NB_EV_ABORT
, transaction
, errmsg
,
1096 nb_transaction_free(transaction
);
1099 void nb_candidate_commit_apply(struct nb_transaction
*transaction
,
1100 bool save_transaction
, uint32_t *transaction_id
,
1101 char *errmsg
, size_t errmsg_len
)
1103 (void)nb_transaction_process(NB_EV_APPLY
, transaction
, errmsg
,
1105 nb_transaction_apply_finish(transaction
, errmsg
, errmsg_len
);
1107 /* Replace running by candidate. */
1108 transaction
->config
->version
++;
1109 nb_config_replace(running_config
, transaction
->config
, true);
1111 /* Record transaction. */
1112 if (save_transaction
&& nb_db_enabled
1113 && nb_db_transaction_save(transaction
, transaction_id
) != NB_OK
)
1114 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED
,
1115 "%s: failed to record transaction", __func__
);
1117 nb_transaction_free(transaction
);
1120 int nb_candidate_commit(struct nb_context context
, struct nb_config
*candidate
,
1121 bool save_transaction
, const char *comment
,
1122 uint32_t *transaction_id
, char *errmsg
,
1125 struct nb_transaction
*transaction
= NULL
;
1128 ret
= nb_candidate_commit_prepare(context
, candidate
, comment
,
1129 &transaction
, false, false, errmsg
,
1132 * Apply the changes if the preparation phase succeeded. Otherwise abort
1136 nb_candidate_commit_apply(transaction
, save_transaction
,
1137 transaction_id
, errmsg
, errmsg_len
);
1138 else if (transaction
!= NULL
)
1139 nb_candidate_commit_abort(transaction
, errmsg
, errmsg_len
);
1144 int nb_running_lock(enum nb_client client
, const void *user
)
1148 frr_with_mutex (&running_config_mgmt_lock
.mtx
) {
1149 if (!running_config_mgmt_lock
.locked
) {
1150 running_config_mgmt_lock
.locked
= true;
1151 running_config_mgmt_lock
.owner_client
= client
;
1152 running_config_mgmt_lock
.owner_user
= user
;
1160 int nb_running_unlock(enum nb_client client
, const void *user
)
1164 frr_with_mutex (&running_config_mgmt_lock
.mtx
) {
1165 if (running_config_mgmt_lock
.locked
1166 && running_config_mgmt_lock
.owner_client
== client
1167 && running_config_mgmt_lock
.owner_user
== user
) {
1168 running_config_mgmt_lock
.locked
= false;
1169 running_config_mgmt_lock
.owner_client
= NB_CLIENT_NONE
;
1170 running_config_mgmt_lock
.owner_user
= NULL
;
1178 int nb_running_lock_check(enum nb_client client
, const void *user
)
1182 frr_with_mutex (&running_config_mgmt_lock
.mtx
) {
1183 if (!running_config_mgmt_lock
.locked
1184 || (running_config_mgmt_lock
.owner_client
== client
1185 && running_config_mgmt_lock
.owner_user
== user
))
1192 static void nb_log_config_callback(const enum nb_event event
,
1193 enum nb_operation operation
,
1194 const struct lyd_node
*dnode
)
1197 char xpath
[XPATH_MAXLEN
];
1199 if (!DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
))
1202 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1203 if (yang_snode_is_typeless_data(dnode
->schema
))
1206 value
= yang_dnode_get_string(dnode
, NULL
);
1209 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
1210 nb_event_name(event
), nb_operation_name(operation
), xpath
,
1214 static int nb_callback_create(struct nb_context
*context
,
1215 const struct nb_node
*nb_node
,
1216 enum nb_event event
, const struct lyd_node
*dnode
,
1217 union nb_resource
*resource
, char *errmsg
,
1220 struct nb_cb_create_args args
= {};
1221 bool unexpected_error
= false;
1224 assert(!CHECK_FLAG(nb_node
->flags
, F_NB_NODE_IGNORE_CBS
));
1226 nb_log_config_callback(event
, NB_OP_CREATE
, dnode
);
1228 args
.context
= context
;
1231 args
.resource
= resource
;
1232 args
.errmsg
= errmsg
;
1233 args
.errmsg_len
= errmsg_len
;
1234 ret
= nb_node
->cbs
.create(&args
);
1236 /* Detect and log unexpected errors. */
1241 case NB_ERR_VALIDATION
:
1242 if (event
!= NB_EV_VALIDATE
)
1243 unexpected_error
= true;
1245 case NB_ERR_RESOURCE
:
1246 if (event
!= NB_EV_PREPARE
)
1247 unexpected_error
= true;
1249 case NB_ERR_INCONSISTENCY
:
1250 if (event
== NB_EV_VALIDATE
)
1251 unexpected_error
= true;
1254 unexpected_error
= true;
1257 if (unexpected_error
)
1258 DEBUGD(&nb_dbg_cbs_config
,
1259 "northbound callback: unexpected return value: %s",
1265 static int nb_callback_modify(struct nb_context
*context
,
1266 const struct nb_node
*nb_node
,
1267 enum nb_event event
, const struct lyd_node
*dnode
,
1268 union nb_resource
*resource
, char *errmsg
,
1271 struct nb_cb_modify_args args
= {};
1272 bool unexpected_error
= false;
1275 assert(!CHECK_FLAG(nb_node
->flags
, F_NB_NODE_IGNORE_CBS
));
1277 nb_log_config_callback(event
, NB_OP_MODIFY
, dnode
);
1279 args
.context
= context
;
1282 args
.resource
= resource
;
1283 args
.errmsg
= errmsg
;
1284 args
.errmsg_len
= errmsg_len
;
1285 ret
= nb_node
->cbs
.modify(&args
);
1287 /* Detect and log unexpected errors. */
1292 case NB_ERR_VALIDATION
:
1293 if (event
!= NB_EV_VALIDATE
)
1294 unexpected_error
= true;
1296 case NB_ERR_RESOURCE
:
1297 if (event
!= NB_EV_PREPARE
)
1298 unexpected_error
= true;
1300 case NB_ERR_INCONSISTENCY
:
1301 if (event
== NB_EV_VALIDATE
)
1302 unexpected_error
= true;
1305 unexpected_error
= true;
1308 if (unexpected_error
)
1309 DEBUGD(&nb_dbg_cbs_config
,
1310 "northbound callback: unexpected return value: %s",
1316 static int nb_callback_destroy(struct nb_context
*context
,
1317 const struct nb_node
*nb_node
,
1318 enum nb_event event
,
1319 const struct lyd_node
*dnode
, char *errmsg
,
1322 struct nb_cb_destroy_args args
= {};
1323 bool unexpected_error
= false;
1326 assert(!CHECK_FLAG(nb_node
->flags
, F_NB_NODE_IGNORE_CBS
));
1328 nb_log_config_callback(event
, NB_OP_DESTROY
, dnode
);
1330 args
.context
= context
;
1333 args
.errmsg
= errmsg
;
1334 args
.errmsg_len
= errmsg_len
;
1335 ret
= nb_node
->cbs
.destroy(&args
);
1337 /* Detect and log unexpected errors. */
1342 case NB_ERR_VALIDATION
:
1343 if (event
!= NB_EV_VALIDATE
)
1344 unexpected_error
= true;
1346 case NB_ERR_INCONSISTENCY
:
1347 if (event
== NB_EV_VALIDATE
)
1348 unexpected_error
= true;
1351 unexpected_error
= true;
1354 if (unexpected_error
)
1355 DEBUGD(&nb_dbg_cbs_config
,
1356 "northbound callback: unexpected return value: %s",
1362 static int nb_callback_move(struct nb_context
*context
,
1363 const struct nb_node
*nb_node
, enum nb_event event
,
1364 const struct lyd_node
*dnode
, char *errmsg
,
1367 struct nb_cb_move_args args
= {};
1368 bool unexpected_error
= false;
1371 assert(!CHECK_FLAG(nb_node
->flags
, F_NB_NODE_IGNORE_CBS
));
1373 nb_log_config_callback(event
, NB_OP_MOVE
, dnode
);
1375 args
.context
= context
;
1378 args
.errmsg
= errmsg
;
1379 args
.errmsg_len
= errmsg_len
;
1380 ret
= nb_node
->cbs
.move(&args
);
1382 /* Detect and log unexpected errors. */
1387 case NB_ERR_VALIDATION
:
1388 if (event
!= NB_EV_VALIDATE
)
1389 unexpected_error
= true;
1391 case NB_ERR_INCONSISTENCY
:
1392 if (event
== NB_EV_VALIDATE
)
1393 unexpected_error
= true;
1396 unexpected_error
= true;
1399 if (unexpected_error
)
1400 DEBUGD(&nb_dbg_cbs_config
,
1401 "northbound callback: unexpected return value: %s",
1407 static int nb_callback_pre_validate(struct nb_context
*context
,
1408 const struct nb_node
*nb_node
,
1409 const struct lyd_node
*dnode
, char *errmsg
,
1412 struct nb_cb_pre_validate_args args
= {};
1413 bool unexpected_error
= false;
1416 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_IGNORE_CBS
))
1419 nb_log_config_callback(NB_EV_VALIDATE
, NB_OP_PRE_VALIDATE
, dnode
);
1422 args
.errmsg
= errmsg
;
1423 args
.errmsg_len
= errmsg_len
;
1424 ret
= nb_node
->cbs
.pre_validate(&args
);
1426 /* Detect and log unexpected errors. */
1429 case NB_ERR_VALIDATION
:
1432 unexpected_error
= true;
1435 if (unexpected_error
)
1436 DEBUGD(&nb_dbg_cbs_config
,
1437 "northbound callback: unexpected return value: %s",
1443 static void nb_callback_apply_finish(struct nb_context
*context
,
1444 const struct nb_node
*nb_node
,
1445 const struct lyd_node
*dnode
, char *errmsg
,
1448 struct nb_cb_apply_finish_args args
= {};
1450 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_IGNORE_CBS
))
1453 nb_log_config_callback(NB_EV_APPLY
, NB_OP_APPLY_FINISH
, dnode
);
1455 args
.context
= context
;
1457 args
.errmsg
= errmsg
;
1458 args
.errmsg_len
= errmsg_len
;
1459 nb_node
->cbs
.apply_finish(&args
);
1462 struct yang_data
*nb_callback_get_elem(const struct nb_node
*nb_node
,
1464 const void *list_entry
)
1466 struct nb_cb_get_elem_args args
= {};
1468 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_IGNORE_CBS
))
1471 DEBUGD(&nb_dbg_cbs_state
,
1472 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
1476 args
.list_entry
= list_entry
;
1477 return nb_node
->cbs
.get_elem(&args
);
1480 const void *nb_callback_get_next(const struct nb_node
*nb_node
,
1481 const void *parent_list_entry
,
1482 const void *list_entry
)
1484 struct nb_cb_get_next_args args
= {};
1486 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_IGNORE_CBS
))
1489 DEBUGD(&nb_dbg_cbs_state
,
1490 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
1491 nb_node
->xpath
, parent_list_entry
, list_entry
);
1493 args
.parent_list_entry
= parent_list_entry
;
1494 args
.list_entry
= list_entry
;
1495 return nb_node
->cbs
.get_next(&args
);
1498 int nb_callback_get_keys(const struct nb_node
*nb_node
, const void *list_entry
,
1499 struct yang_list_keys
*keys
)
1501 struct nb_cb_get_keys_args args
= {};
1503 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_IGNORE_CBS
))
1506 DEBUGD(&nb_dbg_cbs_state
,
1507 "northbound callback (get_keys): node [%s] list_entry [%p]",
1508 nb_node
->xpath
, list_entry
);
1510 args
.list_entry
= list_entry
;
1512 return nb_node
->cbs
.get_keys(&args
);
1515 const void *nb_callback_lookup_entry(const struct nb_node
*nb_node
,
1516 const void *parent_list_entry
,
1517 const struct yang_list_keys
*keys
)
1519 struct nb_cb_lookup_entry_args args
= {};
1521 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_IGNORE_CBS
))
1524 DEBUGD(&nb_dbg_cbs_state
,
1525 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
1526 nb_node
->xpath
, parent_list_entry
);
1528 args
.parent_list_entry
= parent_list_entry
;
1530 return nb_node
->cbs
.lookup_entry(&args
);
1533 int nb_callback_rpc(const struct nb_node
*nb_node
, const char *xpath
,
1534 const struct list
*input
, struct list
*output
, char *errmsg
,
1537 struct nb_cb_rpc_args args
= {};
1539 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_IGNORE_CBS
))
1542 DEBUGD(&nb_dbg_cbs_rpc
, "northbound RPC: %s", xpath
);
1546 args
.output
= output
;
1547 args
.errmsg
= errmsg
;
1548 args
.errmsg_len
= errmsg_len
;
1549 return nb_node
->cbs
.rpc(&args
);
1553 * Call the northbound configuration callback associated to a given
1554 * configuration change.
1556 static int nb_callback_configuration(struct nb_context
*context
,
1557 const enum nb_event event
,
1558 struct nb_config_change
*change
,
1559 char *errmsg
, size_t errmsg_len
)
1561 enum nb_operation operation
= change
->cb
.operation
;
1562 char xpath
[XPATH_MAXLEN
];
1563 const struct nb_node
*nb_node
= change
->cb
.nb_node
;
1564 const struct lyd_node
*dnode
= change
->cb
.dnode
;
1565 union nb_resource
*resource
;
1568 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_IGNORE_CBS
))
1571 if (event
== NB_EV_VALIDATE
)
1574 resource
= &change
->resource
;
1576 switch (operation
) {
1578 ret
= nb_callback_create(context
, nb_node
, event
, dnode
,
1579 resource
, errmsg
, errmsg_len
);
1582 ret
= nb_callback_modify(context
, nb_node
, event
, dnode
,
1583 resource
, errmsg
, errmsg_len
);
1586 ret
= nb_callback_destroy(context
, nb_node
, event
, dnode
,
1587 errmsg
, errmsg_len
);
1590 ret
= nb_callback_move(context
, nb_node
, event
, dnode
, errmsg
,
1593 case NB_OP_PRE_VALIDATE
:
1594 case NB_OP_APPLY_FINISH
:
1595 case NB_OP_GET_ELEM
:
1596 case NB_OP_GET_NEXT
:
1597 case NB_OP_GET_KEYS
:
1598 case NB_OP_LOOKUP_ENTRY
:
1600 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1601 flog_err(EC_LIB_DEVELOPMENT
,
1602 "%s: unknown operation (%u) [xpath %s]", __func__
,
1608 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1611 case NB_EV_VALIDATE
:
1612 flog_warn(EC_LIB_NB_CB_CONFIG_VALIDATE
,
1613 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1614 nb_err_name(ret
), nb_event_name(event
),
1615 nb_operation_name(operation
), xpath
,
1616 errmsg
[0] ? " message: " : "", errmsg
);
1619 flog_warn(EC_LIB_NB_CB_CONFIG_PREPARE
,
1620 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1621 nb_err_name(ret
), nb_event_name(event
),
1622 nb_operation_name(operation
), xpath
,
1623 errmsg
[0] ? " message: " : "", errmsg
);
1626 flog_warn(EC_LIB_NB_CB_CONFIG_ABORT
,
1627 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1628 nb_err_name(ret
), nb_event_name(event
),
1629 nb_operation_name(operation
), xpath
,
1630 errmsg
[0] ? " message: " : "", errmsg
);
1633 flog_err(EC_LIB_NB_CB_CONFIG_APPLY
,
1634 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1635 nb_err_name(ret
), nb_event_name(event
),
1636 nb_operation_name(operation
), xpath
,
1637 errmsg
[0] ? " message: " : "", errmsg
);
1640 flog_err(EC_LIB_DEVELOPMENT
,
1641 "%s: unknown event (%u) [xpath %s]", __func__
,
1650 static struct nb_transaction
*
1651 nb_transaction_new(struct nb_context context
, struct nb_config
*config
,
1652 struct nb_config_cbs
*changes
, const char *comment
,
1653 char *errmsg
, size_t errmsg_len
)
1655 struct nb_transaction
*transaction
;
1657 if (nb_running_lock_check(context
.client
, context
.user
)) {
1659 "running configuration is locked by another client",
1664 if (transaction_in_progress
) {
1666 "there's already another transaction in progress",
1670 transaction_in_progress
= true;
1672 transaction
= XCALLOC(MTYPE_TMP
, sizeof(*transaction
));
1673 transaction
->context
= context
;
1675 strlcpy(transaction
->comment
, comment
,
1676 sizeof(transaction
->comment
));
1677 transaction
->config
= config
;
1678 transaction
->changes
= *changes
;
1683 static void nb_transaction_free(struct nb_transaction
*transaction
)
1685 nb_config_diff_del_changes(&transaction
->changes
);
1686 XFREE(MTYPE_TMP
, transaction
);
1687 transaction_in_progress
= false;
1690 /* Process all configuration changes associated to a transaction. */
1691 static int nb_transaction_process(enum nb_event event
,
1692 struct nb_transaction
*transaction
,
1693 char *errmsg
, size_t errmsg_len
)
1695 struct nb_config_cb
*cb
;
1697 RB_FOREACH (cb
, nb_config_cbs
, &transaction
->changes
) {
1698 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
1702 * Only try to release resources that were allocated
1705 if (event
== NB_EV_ABORT
&& !change
->prepare_ok
)
1708 /* Call the appropriate callback. */
1709 ret
= nb_callback_configuration(&transaction
->context
, event
,
1710 change
, errmsg
, errmsg_len
);
1715 change
->prepare_ok
= true;
1720 * At this point it's not possible to reject the
1721 * transaction anymore, so any failure here can lead to
1722 * inconsistencies and should be treated as a bug.
1723 * Operations prone to errors, like validations and
1724 * resource allocations, should be performed during the
1728 case NB_EV_VALIDATE
:
1736 static struct nb_config_cb
*
1737 nb_apply_finish_cb_new(struct nb_config_cbs
*cbs
, const struct nb_node
*nb_node
,
1738 const struct lyd_node
*dnode
)
1740 struct nb_config_cb
*cb
;
1742 cb
= XCALLOC(MTYPE_TMP
, sizeof(*cb
));
1743 cb
->nb_node
= nb_node
;
1745 RB_INSERT(nb_config_cbs
, cbs
, cb
);
1750 static struct nb_config_cb
*
1751 nb_apply_finish_cb_find(struct nb_config_cbs
*cbs
,
1752 const struct nb_node
*nb_node
,
1753 const struct lyd_node
*dnode
)
1755 struct nb_config_cb s
;
1758 s
.nb_node
= nb_node
;
1760 return RB_FIND(nb_config_cbs
, cbs
, &s
);
1763 /* Call the 'apply_finish' callbacks. */
1764 static void nb_transaction_apply_finish(struct nb_transaction
*transaction
,
1765 char *errmsg
, size_t errmsg_len
)
1767 struct nb_config_cbs cbs
;
1768 struct nb_config_cb
*cb
;
1770 /* Initialize tree of 'apply_finish' callbacks. */
1771 RB_INIT(nb_config_cbs
, &cbs
);
1773 /* Identify the 'apply_finish' callbacks that need to be called. */
1774 RB_FOREACH (cb
, nb_config_cbs
, &transaction
->changes
) {
1775 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
1776 const struct lyd_node
*dnode
= change
->cb
.dnode
;
1779 * Iterate up to the root of the data tree. When a node is being
1780 * deleted, skip its 'apply_finish' callback if one is defined
1781 * (the 'apply_finish' callbacks from the node ancestors should
1782 * be called though).
1784 if (change
->cb
.operation
== NB_OP_DESTROY
) {
1785 char xpath
[XPATH_MAXLEN
];
1787 dnode
= lyd_parent(dnode
);
1792 * The dnode from 'delete' callbacks point to elements
1793 * from the running configuration. Use yang_dnode_get()
1794 * to get the corresponding dnode from the candidate
1795 * configuration that is being committed.
1797 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1798 dnode
= yang_dnode_get(transaction
->config
->dnode
,
1802 struct nb_node
*nb_node
;
1804 nb_node
= dnode
->schema
->priv
;
1805 if (!nb_node
|| !nb_node
->cbs
.apply_finish
)
1809 * Don't call the callback more than once for the same
1812 if (nb_apply_finish_cb_find(&cbs
, nb_node
, dnode
))
1815 nb_apply_finish_cb_new(&cbs
, nb_node
, dnode
);
1818 dnode
= lyd_parent(dnode
);
1822 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1823 RB_FOREACH (cb
, nb_config_cbs
, &cbs
)
1824 nb_callback_apply_finish(&transaction
->context
, cb
->nb_node
,
1825 cb
->dnode
, errmsg
, errmsg_len
);
1827 /* Release memory. */
1828 while (!RB_EMPTY(nb_config_cbs
, &cbs
)) {
1829 cb
= RB_ROOT(nb_config_cbs
, &cbs
);
1830 RB_REMOVE(nb_config_cbs
, &cbs
, cb
);
1831 XFREE(MTYPE_TMP
, cb
);
1835 static int nb_oper_data_iter_children(const struct lysc_node
*snode
,
1836 const char *xpath
, const void *list_entry
,
1837 const struct yang_list_keys
*list_keys
,
1838 struct yang_translator
*translator
,
1839 bool first
, uint32_t flags
,
1840 nb_oper_data_cb cb
, void *arg
)
1842 const struct lysc_node
*child
;
1844 LY_LIST_FOR (lysc_node_child(snode
), child
) {
1847 ret
= nb_oper_data_iter_node(child
, xpath
, list_entry
,
1848 list_keys
, translator
, false,
1857 static int nb_oper_data_iter_leaf(const struct nb_node
*nb_node
,
1858 const char *xpath
, const void *list_entry
,
1859 const struct yang_list_keys
*list_keys
,
1860 struct yang_translator
*translator
,
1861 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1863 struct yang_data
*data
;
1865 if (CHECK_FLAG(nb_node
->snode
->flags
, LYS_CONFIG_W
))
1868 /* Ignore list keys. */
1869 if (lysc_is_key(nb_node
->snode
))
1872 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1874 /* Leaf of type "empty" is not present. */
1877 return (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1880 static int nb_oper_data_iter_container(const struct nb_node
*nb_node
,
1882 const void *list_entry
,
1883 const struct yang_list_keys
*list_keys
,
1884 struct yang_translator
*translator
,
1885 uint32_t flags
, nb_oper_data_cb cb
,
1888 const struct lysc_node
*snode
= nb_node
->snode
;
1890 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1893 /* Read-only presence containers. */
1894 if (nb_node
->cbs
.get_elem
) {
1895 struct yang_data
*data
;
1898 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1900 /* Presence container is not present. */
1903 ret
= (*cb
)(snode
, translator
, data
, arg
);
1908 /* Read-write presence containers. */
1909 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
)) {
1910 struct lysc_node_container
*scontainer
;
1912 scontainer
= (struct lysc_node_container
*)snode
;
1913 if (CHECK_FLAG(scontainer
->flags
, LYS_PRESENCE
)
1914 && !yang_dnode_get(running_config
->dnode
, xpath
))
1918 /* Iterate over the child nodes. */
1919 return nb_oper_data_iter_children(snode
, xpath
, list_entry
, list_keys
,
1920 translator
, false, flags
, cb
, arg
);
1924 nb_oper_data_iter_leaflist(const struct nb_node
*nb_node
, const char *xpath
,
1925 const void *parent_list_entry
,
1926 const struct yang_list_keys
*parent_list_keys
,
1927 struct yang_translator
*translator
, uint32_t flags
,
1928 nb_oper_data_cb cb
, void *arg
)
1930 const void *list_entry
= NULL
;
1932 if (CHECK_FLAG(nb_node
->snode
->flags
, LYS_CONFIG_W
))
1936 struct yang_data
*data
;
1939 list_entry
= nb_callback_get_next(nb_node
, parent_list_entry
,
1942 /* End of the list. */
1945 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1949 ret
= (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1952 } while (list_entry
);
1957 static int nb_oper_data_iter_list(const struct nb_node
*nb_node
,
1958 const char *xpath_list
,
1959 const void *parent_list_entry
,
1960 const struct yang_list_keys
*parent_list_keys
,
1961 struct yang_translator
*translator
,
1962 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1964 const struct lysc_node
*snode
= nb_node
->snode
;
1965 const void *list_entry
= NULL
;
1966 uint32_t position
= 1;
1968 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1971 /* Iterate over all list entries. */
1973 const struct lysc_node_leaf
*skey
;
1974 struct yang_list_keys list_keys
= {};
1975 char xpath
[XPATH_MAXLEN
* 2];
1978 /* Obtain list entry. */
1979 list_entry
= nb_callback_get_next(nb_node
, parent_list_entry
,
1982 /* End of the list. */
1985 if (!CHECK_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
)) {
1986 /* Obtain the list entry keys. */
1987 if (nb_callback_get_keys(nb_node
, list_entry
,
1990 flog_warn(EC_LIB_NB_CB_STATE
,
1991 "%s: failed to get list keys",
1996 /* Build XPath of the list entry. */
1997 strlcpy(xpath
, xpath_list
, sizeof(xpath
));
1999 LY_FOR_KEYS (snode
, skey
) {
2000 assert(i
< list_keys
.num
);
2001 snprintf(xpath
+ strlen(xpath
),
2002 sizeof(xpath
) - strlen(xpath
),
2003 "[%s='%s']", skey
->name
,
2007 assert(i
== list_keys
.num
);
2010 * Keyless list - build XPath using a positional index.
2012 snprintf(xpath
, sizeof(xpath
), "%s[%u]", xpath_list
,
2017 /* Iterate over the child nodes. */
2018 ret
= nb_oper_data_iter_children(
2019 nb_node
->snode
, xpath
, list_entry
, &list_keys
,
2020 translator
, false, flags
, cb
, arg
);
2023 } while (list_entry
);
2028 static int nb_oper_data_iter_node(const struct lysc_node
*snode
,
2029 const char *xpath_parent
,
2030 const void *list_entry
,
2031 const struct yang_list_keys
*list_keys
,
2032 struct yang_translator
*translator
,
2033 bool first
, uint32_t flags
,
2034 nb_oper_data_cb cb
, void *arg
)
2036 struct nb_node
*nb_node
;
2037 char xpath
[XPATH_MAXLEN
];
2040 if (!first
&& CHECK_FLAG(flags
, NB_OPER_DATA_ITER_NORECURSE
)
2041 && CHECK_FLAG(snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
))
2045 strlcpy(xpath
, xpath_parent
, sizeof(xpath
));
2046 if (!first
&& snode
->nodetype
!= LYS_USES
) {
2047 struct lysc_node
*parent
;
2049 /* Get the real parent. */
2050 parent
= snode
->parent
;
2053 * When necessary, include the namespace of the augmenting
2056 if (parent
&& parent
->module
!= snode
->module
)
2057 snprintf(xpath
+ strlen(xpath
),
2058 sizeof(xpath
) - strlen(xpath
), "/%s:%s",
2059 snode
->module
->name
, snode
->name
);
2061 snprintf(xpath
+ strlen(xpath
),
2062 sizeof(xpath
) - strlen(xpath
), "/%s",
2066 nb_node
= snode
->priv
;
2067 switch (snode
->nodetype
) {
2069 ret
= nb_oper_data_iter_container(nb_node
, xpath
, list_entry
,
2070 list_keys
, translator
, flags
,
2074 ret
= nb_oper_data_iter_leaf(nb_node
, xpath
, list_entry
,
2075 list_keys
, translator
, flags
, cb
,
2079 ret
= nb_oper_data_iter_leaflist(nb_node
, xpath
, list_entry
,
2080 list_keys
, translator
, flags
,
2084 ret
= nb_oper_data_iter_list(nb_node
, xpath
, list_entry
,
2085 list_keys
, translator
, flags
, cb
,
2089 ret
= nb_oper_data_iter_children(snode
, xpath
, list_entry
,
2090 list_keys
, translator
, false,
2100 int nb_oper_data_iterate(const char *xpath
, struct yang_translator
*translator
,
2101 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
2103 struct nb_node
*nb_node
;
2104 const void *list_entry
= NULL
;
2105 struct yang_list_keys list_keys
;
2106 struct list
*list_dnodes
;
2107 struct lyd_node
*dnode
, *dn
;
2108 struct listnode
*ln
;
2111 nb_node
= nb_node_find(xpath
);
2113 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH
,
2114 "%s: unknown data path: %s", __func__
, xpath
);
2118 /* For now this function works only with containers and lists. */
2119 if (!CHECK_FLAG(nb_node
->snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
)) {
2121 EC_LIB_NB_OPERATIONAL_DATA
,
2122 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
2128 * Create a data tree from the XPath so that we can parse the keys of
2129 * all YANG lists (if any).
2132 LY_ERR err
= lyd_new_path(NULL
, ly_native_ctx
, xpath
, NULL
,
2133 LYD_NEW_PATH_UPDATE
, &dnode
);
2134 if (err
|| !dnode
) {
2135 const char *errmsg
=
2136 err
? ly_errmsg(ly_native_ctx
) : "node not found";
2137 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_new_path() failed %s",
2143 * Create a linked list to sort the data nodes starting from the root.
2145 list_dnodes
= list_new();
2146 for (dn
= dnode
; dn
; dn
= lyd_parent(dn
)) {
2147 if (dn
->schema
->nodetype
!= LYS_LIST
|| !lyd_child(dn
))
2149 listnode_add_head(list_dnodes
, dn
);
2152 * Use the northbound callbacks to find list entry pointer corresponding
2153 * to the given XPath.
2155 for (ALL_LIST_ELEMENTS_RO(list_dnodes
, ln
, dn
)) {
2156 struct lyd_node
*child
;
2160 /* Obtain the list entry keys. */
2161 memset(&list_keys
, 0, sizeof(list_keys
));
2162 LY_LIST_FOR (lyd_child(dn
), child
) {
2163 if (!lysc_is_key(child
->schema
))
2165 strlcpy(list_keys
.key
[n
],
2166 yang_dnode_get_string(child
, NULL
),
2167 sizeof(list_keys
.key
[n
]));
2171 if (list_keys
.num
!= yang_snode_num_keys(dn
->schema
)) {
2172 list_delete(&list_dnodes
);
2173 yang_dnode_free(dnode
);
2174 return NB_ERR_NOT_FOUND
;
2177 /* Find the list entry pointer. */
2178 nn
= dn
->schema
->priv
;
2179 if (!nn
->cbs
.lookup_entry
) {
2181 EC_LIB_NB_OPERATIONAL_DATA
,
2182 "%s: data path doesn't support iteration over operational data: %s",
2184 list_delete(&list_dnodes
);
2185 yang_dnode_free(dnode
);
2190 nb_callback_lookup_entry(nn
, list_entry
, &list_keys
);
2191 if (list_entry
== NULL
) {
2192 list_delete(&list_dnodes
);
2193 yang_dnode_free(dnode
);
2194 return NB_ERR_NOT_FOUND
;
2198 /* If a list entry was given, iterate over that list entry only. */
2199 if (dnode
->schema
->nodetype
== LYS_LIST
&& lyd_child(dnode
))
2200 ret
= nb_oper_data_iter_children(
2201 nb_node
->snode
, xpath
, list_entry
, &list_keys
,
2202 translator
, true, flags
, cb
, arg
);
2204 ret
= nb_oper_data_iter_node(nb_node
->snode
, xpath
, list_entry
,
2205 &list_keys
, translator
, true,
2208 list_delete(&list_dnodes
);
2209 yang_dnode_free(dnode
);
2214 bool nb_operation_is_valid(enum nb_operation operation
,
2215 const struct lysc_node
*snode
)
2217 struct nb_node
*nb_node
= snode
->priv
;
2218 struct lysc_node_container
*scontainer
;
2219 struct lysc_node_leaf
*sleaf
;
2221 switch (operation
) {
2223 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2226 switch (snode
->nodetype
) {
2228 sleaf
= (struct lysc_node_leaf
*)snode
;
2229 if (sleaf
->type
->basetype
!= LY_TYPE_EMPTY
)
2233 scontainer
= (struct lysc_node_container
*)snode
;
2234 if (!CHECK_FLAG(scontainer
->flags
, LYS_PRESENCE
))
2245 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2248 switch (snode
->nodetype
) {
2250 sleaf
= (struct lysc_node_leaf
*)snode
;
2251 if (sleaf
->type
->basetype
== LY_TYPE_EMPTY
)
2254 /* List keys can't be modified. */
2255 if (lysc_is_key(sleaf
))
2263 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2266 switch (snode
->nodetype
) {
2268 sleaf
= (struct lysc_node_leaf
*)snode
;
2270 /* List keys can't be deleted. */
2271 if (lysc_is_key(sleaf
))
2275 * Only optional leafs can be deleted, or leafs whose
2276 * parent is a case statement.
2278 if (snode
->parent
->nodetype
== LYS_CASE
)
2282 if (CHECK_FLAG(sleaf
->flags
, LYS_MAND_TRUE
)
2287 scontainer
= (struct lysc_node_container
*)snode
;
2288 if (!CHECK_FLAG(scontainer
->flags
, LYS_PRESENCE
))
2299 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2302 switch (snode
->nodetype
) {
2305 if (!CHECK_FLAG(snode
->flags
, LYS_ORDBY_USER
))
2312 case NB_OP_PRE_VALIDATE
:
2313 case NB_OP_APPLY_FINISH
:
2314 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2317 case NB_OP_GET_ELEM
:
2318 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_R
))
2321 switch (snode
->nodetype
) {
2326 scontainer
= (struct lysc_node_container
*)snode
;
2327 if (!CHECK_FLAG(scontainer
->flags
, LYS_PRESENCE
))
2334 case NB_OP_GET_NEXT
:
2335 switch (snode
->nodetype
) {
2337 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
2341 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2348 case NB_OP_GET_KEYS
:
2349 case NB_OP_LOOKUP_ENTRY
:
2350 switch (snode
->nodetype
) {
2352 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
2354 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
))
2362 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
| LYS_CONFIG_R
))
2365 switch (snode
->nodetype
) {
2378 DEFINE_HOOK(nb_notification_send
, (const char *xpath
, struct list
*arguments
),
2379 (xpath
, arguments
));
2381 int nb_notification_send(const char *xpath
, struct list
*arguments
)
2385 DEBUGD(&nb_dbg_notif
, "northbound notification: %s", xpath
);
2387 ret
= hook_call(nb_notification_send
, xpath
, arguments
);
2389 list_delete(&arguments
);
2394 /* Running configuration user pointers management. */
2395 struct nb_config_entry
{
2396 char xpath
[XPATH_MAXLEN
];
2400 static bool running_config_entry_cmp(const void *value1
, const void *value2
)
2402 const struct nb_config_entry
*c1
= value1
;
2403 const struct nb_config_entry
*c2
= value2
;
2405 return strmatch(c1
->xpath
, c2
->xpath
);
2408 static unsigned int running_config_entry_key_make(const void *value
)
2410 return string_hash_make(value
);
2413 static void *running_config_entry_alloc(void *p
)
2415 struct nb_config_entry
*new, *key
= p
;
2417 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY
, sizeof(*new));
2418 strlcpy(new->xpath
, key
->xpath
, sizeof(new->xpath
));
2423 static void running_config_entry_free(void *arg
)
2425 XFREE(MTYPE_NB_CONFIG_ENTRY
, arg
);
2428 void nb_running_set_entry(const struct lyd_node
*dnode
, void *entry
)
2430 struct nb_config_entry
*config
, s
;
2432 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
2433 config
= hash_get(running_config_entries
, &s
,
2434 running_config_entry_alloc
);
2435 config
->entry
= entry
;
2438 void nb_running_move_tree(const char *xpath_from
, const char *xpath_to
)
2440 struct nb_config_entry
*entry
;
2441 struct list
*entries
= hash_to_list(running_config_entries
);
2442 struct listnode
*ln
;
2444 for (ALL_LIST_ELEMENTS_RO(entries
, ln
, entry
)) {
2445 if (!frrstr_startswith(entry
->xpath
, xpath_from
))
2448 hash_release(running_config_entries
, entry
);
2451 frrstr_replace(entry
->xpath
, xpath_from
, xpath_to
);
2452 strlcpy(entry
->xpath
, newpath
, sizeof(entry
->xpath
));
2453 XFREE(MTYPE_TMP
, newpath
);
2455 (void)hash_get(running_config_entries
, entry
,
2459 list_delete(&entries
);
2462 static void *nb_running_unset_entry_helper(const struct lyd_node
*dnode
)
2464 struct nb_config_entry
*config
, s
;
2465 struct lyd_node
*child
;
2468 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
2469 config
= hash_release(running_config_entries
, &s
);
2471 entry
= config
->entry
;
2472 running_config_entry_free(config
);
2475 /* Unset user pointers from the child nodes. */
2476 if (CHECK_FLAG(dnode
->schema
->nodetype
, LYS_LIST
| LYS_CONTAINER
)) {
2477 LY_LIST_FOR (lyd_child(dnode
), child
) {
2478 (void)nb_running_unset_entry_helper(child
);
2485 void *nb_running_unset_entry(const struct lyd_node
*dnode
)
2489 entry
= nb_running_unset_entry_helper(dnode
);
2495 static void *nb_running_get_entry_worker(const struct lyd_node
*dnode
,
2497 bool abort_if_not_found
,
2500 const struct lyd_node
*orig_dnode
= dnode
;
2501 char xpath_buf
[XPATH_MAXLEN
];
2502 bool rec_flag
= true;
2504 assert(dnode
|| xpath
);
2507 dnode
= yang_dnode_get(running_config
->dnode
, xpath
);
2509 while (rec_flag
&& dnode
) {
2510 struct nb_config_entry
*config
, s
;
2512 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
2513 config
= hash_lookup(running_config_entries
, &s
);
2515 return config
->entry
;
2517 rec_flag
= rec_search
;
2519 dnode
= lyd_parent(dnode
);
2522 if (!abort_if_not_found
)
2525 yang_dnode_get_path(orig_dnode
, xpath_buf
, sizeof(xpath_buf
));
2526 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND
,
2527 "%s: failed to find entry [xpath %s]", __func__
, xpath_buf
);
2528 zlog_backtrace(LOG_ERR
);
2532 void *nb_running_get_entry(const struct lyd_node
*dnode
, const char *xpath
,
2533 bool abort_if_not_found
)
2535 return nb_running_get_entry_worker(dnode
, xpath
, abort_if_not_found
,
2539 void *nb_running_get_entry_non_rec(const struct lyd_node
*dnode
,
2540 const char *xpath
, bool abort_if_not_found
)
2542 return nb_running_get_entry_worker(dnode
, xpath
, abort_if_not_found
,
2546 /* Logging functions. */
2547 const char *nb_event_name(enum nb_event event
)
2550 case NB_EV_VALIDATE
:
2560 assert(!"Reached end of function we should never hit");
2563 const char *nb_operation_name(enum nb_operation operation
)
2565 switch (operation
) {
2574 case NB_OP_PRE_VALIDATE
:
2575 return "pre_validate";
2576 case NB_OP_APPLY_FINISH
:
2577 return "apply_finish";
2578 case NB_OP_GET_ELEM
:
2580 case NB_OP_GET_NEXT
:
2582 case NB_OP_GET_KEYS
:
2584 case NB_OP_LOOKUP_ENTRY
:
2585 return "lookup_entry";
2590 assert(!"Reached end of function we should never hit");
2593 const char *nb_err_name(enum nb_error error
)
2599 return "generic error";
2600 case NB_ERR_NO_CHANGES
:
2601 return "no changes";
2602 case NB_ERR_NOT_FOUND
:
2603 return "element not found";
2605 return "resource is locked";
2606 case NB_ERR_VALIDATION
:
2607 return "validation";
2608 case NB_ERR_RESOURCE
:
2609 return "failed to allocate resource";
2610 case NB_ERR_INCONSISTENCY
:
2611 return "internal inconsistency";
2614 assert(!"Reached end of function we should never hit");
2617 const char *nb_client_name(enum nb_client client
)
2622 case NB_CLIENT_CONFD
:
2624 case NB_CLIENT_SYSREPO
:
2626 case NB_CLIENT_GRPC
:
2628 case NB_CLIENT_PCEP
:
2630 case NB_CLIENT_MGMTD_SERVER
:
2631 return "MGMTD Server";
2632 case NB_CLIENT_MGMTD_BE
:
2633 return "MGMT Backend";
2634 case NB_CLIENT_NONE
:
2638 assert(!"Reached end of function we should never hit");
2641 static void nb_load_callbacks(const struct frr_yang_module_info
*module
)
2644 if (module
->ignore_cbs
)
2647 for (size_t i
= 0; module
->nodes
[i
].xpath
; i
++) {
2648 struct nb_node
*nb_node
;
2651 if (i
> YANG_MODULE_MAX_NODES
) {
2653 "%s: %s.yang has more than %u nodes. Please increase YANG_MODULE_MAX_NODES to fix this problem.",
2654 __func__
, module
->name
, YANG_MODULE_MAX_NODES
);
2658 nb_node
= nb_node_find(module
->nodes
[i
].xpath
);
2660 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH
,
2661 "%s: unknown data path: %s", __func__
,
2662 module
->nodes
[i
].xpath
);
2666 nb_node
->cbs
= module
->nodes
[i
].cbs
;
2667 priority
= module
->nodes
[i
].priority
;
2669 nb_node
->priority
= priority
;
2673 void nb_validate_callbacks(void)
2675 unsigned int errors
= 0;
2677 yang_snodes_iterate(NULL
, nb_node_validate
, 0, &errors
);
2680 EC_LIB_NB_CBS_VALIDATION
,
2681 "%s: failed to validate northbound callbacks: %u error(s)",
2688 void nb_init(struct thread_master
*tm
,
2689 const struct frr_yang_module_info
*const modules
[],
2690 size_t nmodules
, bool db_enabled
)
2692 struct yang_module
*loaded
[nmodules
], **loadedp
= loaded
;
2693 bool explicit_compile
;
2696 * Currently using this explicit compile feature in libyang2 leads to
2697 * incorrect behavior in FRR. The functionality suppresses the compiling
2698 * of modules until they have all been loaded into the context. This
2699 * avoids multiple recompiles of the same modules as they are
2700 * imported/augmented etc.
2702 explicit_compile
= false;
2704 nb_db_enabled
= db_enabled
;
2706 yang_init(true, explicit_compile
);
2708 /* Load YANG modules and their corresponding northbound callbacks. */
2709 for (size_t i
= 0; i
< nmodules
; i
++) {
2710 DEBUGD(&nb_dbg_events
, "northbound: loading %s.yang",
2712 *loadedp
++ = yang_module_load(modules
[i
]->name
);
2715 if (explicit_compile
)
2716 yang_init_loading_complete();
2718 /* Initialize the compiled nodes with northbound data */
2719 for (size_t i
= 0; i
< nmodules
; i
++) {
2720 yang_snodes_iterate(loaded
[i
]->info
, nb_node_new_cb
, 0,
2721 (void *)modules
[i
]);
2722 nb_load_callbacks(modules
[i
]);
2725 /* Validate northbound callbacks. */
2726 nb_validate_callbacks();
2728 /* Create an empty running configuration. */
2729 running_config
= nb_config_new(NULL
);
2730 running_config_entries
= hash_create(running_config_entry_key_make
,
2731 running_config_entry_cmp
,
2732 "Running Configuration Entries");
2733 pthread_mutex_init(&running_config_mgmt_lock
.mtx
, NULL
);
2735 /* Initialize the northbound CLI. */
2739 void nb_terminate(void)
2741 /* Terminate the northbound CLI. */
2744 /* Delete all nb_node's from all YANG modules. */
2747 /* Delete the running configuration. */
2748 hash_clean_and_free(&running_config_entries
, running_config_entry_free
);
2749 nb_config_free(running_config
);
2750 pthread_mutex_destroy(&running_config_mgmt_lock
.mtx
);