2 * Copyright (C) 2018 NetDEF, Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "lib_errors.h"
29 #include "frr_pthread.h"
30 #include "northbound.h"
31 #include "northbound_cli.h"
32 #include "northbound_db.h"
35 DEFINE_MTYPE_STATIC(LIB
, NB_NODE
, "Northbound Node")
36 DEFINE_MTYPE_STATIC(LIB
, NB_CONFIG
, "Northbound Configuration")
37 DEFINE_MTYPE_STATIC(LIB
, NB_CONFIG_ENTRY
, "Northbound Configuration Entry")
39 /* Running configuration - shouldn't be modified directly. */
40 struct nb_config
*running_config
;
42 /* Hash table of user pointers associated with configuration entries. */
43 static struct hash
*running_config_entries
;
45 /* Management lock for the running configuration. */
47 /* Mutex protecting this structure. */
53 /* Northbound client who owns this lock. */
54 enum nb_client owner_client
;
56 /* Northbound user who owns this lock. */
57 const void *owner_user
;
58 } running_config_mgmt_lock
;
60 /* Knob to record config transaction */
61 static bool nb_db_enabled
;
63 * Global lock used to prevent multiple configuration transactions from
64 * happening concurrently.
66 static bool transaction_in_progress
;
68 static int nb_callback_pre_validate(struct nb_context
*context
,
69 const struct nb_node
*nb_node
,
70 const struct lyd_node
*dnode
, char *errmsg
,
72 static int nb_callback_configuration(struct nb_context
*context
,
73 const enum nb_event event
,
74 struct nb_config_change
*change
,
75 char *errmsg
, size_t errmsg_len
);
76 static struct nb_transaction
*
77 nb_transaction_new(struct nb_context
*context
, struct nb_config
*config
,
78 struct nb_config_cbs
*changes
, const char *comment
,
79 char *errmsg
, size_t errmsg_len
);
80 static void nb_transaction_free(struct nb_transaction
*transaction
);
81 static int nb_transaction_process(enum nb_event event
,
82 struct nb_transaction
*transaction
,
83 char *errmsg
, size_t errmsg_len
);
84 static void nb_transaction_apply_finish(struct nb_transaction
*transaction
,
85 char *errmsg
, size_t errmsg_len
);
86 static int nb_oper_data_iter_node(const struct lys_node
*snode
,
87 const char *xpath
, const void *list_entry
,
88 const struct yang_list_keys
*list_keys
,
89 struct yang_translator
*translator
,
90 bool first
, uint32_t flags
,
91 nb_oper_data_cb cb
, void *arg
);
93 static int nb_node_check_config_only(const struct lys_node
*snode
, void *arg
)
95 bool *config_only
= arg
;
97 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_R
)) {
99 return YANG_ITER_STOP
;
102 return YANG_ITER_CONTINUE
;
105 static int nb_node_new_cb(const struct lys_node
*snode
, void *arg
)
107 struct nb_node
*nb_node
;
108 struct lys_node
*sparent
, *sparent_list
;
110 nb_node
= XCALLOC(MTYPE_NB_NODE
, sizeof(*nb_node
));
111 yang_snode_get_path(snode
, YANG_PATH_DATA
, nb_node
->xpath
,
112 sizeof(nb_node
->xpath
));
113 nb_node
->priority
= NB_DFLT_PRIORITY
;
114 sparent
= yang_snode_real_parent(snode
);
116 nb_node
->parent
= sparent
->priv
;
117 sparent_list
= yang_snode_parent_list(snode
);
119 nb_node
->parent_list
= sparent_list
->priv
;
122 if (CHECK_FLAG(snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
)) {
123 bool config_only
= true;
125 (void)yang_snodes_iterate_subtree(snode
, NULL
,
126 nb_node_check_config_only
, 0,
129 SET_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
);
131 if (CHECK_FLAG(snode
->nodetype
, LYS_LIST
)) {
132 struct lys_node_list
*slist
;
134 slist
= (struct lys_node_list
*)snode
;
135 if (slist
->keys_size
== 0)
136 SET_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
);
140 * Link the northbound node and the libyang schema node with one
143 nb_node
->snode
= snode
;
144 assert(snode
->priv
== NULL
);
145 lys_set_private(snode
, nb_node
);
147 return YANG_ITER_CONTINUE
;
150 static int nb_node_del_cb(const struct lys_node
*snode
, void *arg
)
152 struct nb_node
*nb_node
;
154 nb_node
= snode
->priv
;
156 lys_set_private(snode
, NULL
);
157 XFREE(MTYPE_NB_NODE
, nb_node
);
160 return YANG_ITER_CONTINUE
;
163 void nb_nodes_create(void)
165 yang_snodes_iterate(NULL
, nb_node_new_cb
, 0, NULL
);
168 void nb_nodes_delete(void)
170 yang_snodes_iterate(NULL
, nb_node_del_cb
, 0, NULL
);
173 struct nb_node
*nb_node_find(const char *xpath
)
175 const struct lys_node
*snode
;
178 * Use libyang to find the schema node associated to the xpath and get
179 * the northbound node from there (snode private pointer).
181 snode
= ly_ctx_get_node(ly_native_ctx
, NULL
, xpath
, 0);
188 void nb_node_set_dependency_cbs(const char *dependency_xpath
,
189 const char *dependant_xpath
,
190 struct nb_dependency_callbacks
*cbs
)
192 struct nb_node
*dependency
= nb_node_find(dependency_xpath
);
193 struct nb_node
*dependant
= nb_node_find(dependant_xpath
);
195 if (!dependency
|| !dependant
)
198 dependency
->dep_cbs
.get_dependant_xpath
= cbs
->get_dependant_xpath
;
199 dependant
->dep_cbs
.get_dependency_xpath
= cbs
->get_dependency_xpath
;
202 bool nb_node_has_dependency(struct nb_node
*node
)
204 return node
->dep_cbs
.get_dependency_xpath
!= NULL
;
207 static int nb_node_validate_cb(const struct nb_node
*nb_node
,
208 enum nb_operation operation
,
209 int callback_implemented
, bool optional
)
213 valid
= nb_operation_is_valid(operation
, nb_node
->snode
);
216 * Add an exception for operational data callbacks. A rw list usually
217 * doesn't need any associated operational data callbacks. But if this
218 * rw list is augmented by another module which adds state nodes under
219 * it, then this list will need to have the 'get_next()', 'get_keys()'
220 * and 'lookup_entry()' callbacks. As such, never log a warning when
221 * these callbacks are implemented when they are not needed, since this
222 * depends on context (e.g. some daemons might augment "frr-interface"
223 * while others don't).
225 if (!valid
&& callback_implemented
&& operation
!= NB_OP_GET_NEXT
226 && operation
!= NB_OP_GET_KEYS
&& operation
!= NB_OP_LOOKUP_ENTRY
)
227 flog_warn(EC_LIB_NB_CB_UNNEEDED
,
228 "unneeded '%s' callback for '%s'",
229 nb_operation_name(operation
), nb_node
->xpath
);
231 if (!optional
&& valid
&& !callback_implemented
) {
232 flog_err(EC_LIB_NB_CB_MISSING
, "missing '%s' callback for '%s'",
233 nb_operation_name(operation
), nb_node
->xpath
);
241 * Check if the required callbacks were implemented for the given northbound
244 static unsigned int nb_node_validate_cbs(const struct nb_node
*nb_node
)
247 unsigned int error
= 0;
249 error
+= nb_node_validate_cb(nb_node
, NB_OP_CREATE
,
250 !!nb_node
->cbs
.create
, false);
251 error
+= nb_node_validate_cb(nb_node
, NB_OP_MODIFY
,
252 !!nb_node
->cbs
.modify
, false);
253 error
+= nb_node_validate_cb(nb_node
, NB_OP_DESTROY
,
254 !!nb_node
->cbs
.destroy
, false);
255 error
+= nb_node_validate_cb(nb_node
, NB_OP_MOVE
, !!nb_node
->cbs
.move
,
257 error
+= nb_node_validate_cb(nb_node
, NB_OP_PRE_VALIDATE
,
258 !!nb_node
->cbs
.pre_validate
, true);
259 error
+= nb_node_validate_cb(nb_node
, NB_OP_APPLY_FINISH
,
260 !!nb_node
->cbs
.apply_finish
, true);
261 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_ELEM
,
262 !!nb_node
->cbs
.get_elem
, false);
263 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_NEXT
,
264 !!nb_node
->cbs
.get_next
, false);
265 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_KEYS
,
266 !!nb_node
->cbs
.get_keys
, false);
267 error
+= nb_node_validate_cb(nb_node
, NB_OP_LOOKUP_ENTRY
,
268 !!nb_node
->cbs
.lookup_entry
, false);
269 error
+= nb_node_validate_cb(nb_node
, NB_OP_RPC
, !!nb_node
->cbs
.rpc
,
275 static unsigned int nb_node_validate_priority(const struct nb_node
*nb_node
)
277 /* Top-level nodes can have any priority. */
278 if (!nb_node
->parent
)
281 if (nb_node
->priority
< nb_node
->parent
->priority
) {
282 flog_err(EC_LIB_NB_CB_INVALID_PRIO
,
283 "node has higher priority than its parent [xpath %s]",
291 static int nb_node_validate(const struct lys_node
*snode
, void *arg
)
293 struct nb_node
*nb_node
= snode
->priv
;
294 unsigned int *errors
= arg
;
296 /* Validate callbacks and priority. */
298 *errors
+= nb_node_validate_cbs(nb_node
);
299 *errors
+= nb_node_validate_priority(nb_node
);
302 return YANG_ITER_CONTINUE
;
305 struct nb_config
*nb_config_new(struct lyd_node
*dnode
)
307 struct nb_config
*config
;
309 config
= XCALLOC(MTYPE_NB_CONFIG
, sizeof(*config
));
311 config
->dnode
= dnode
;
313 config
->dnode
= yang_dnode_new(ly_native_ctx
, true);
319 void nb_config_free(struct nb_config
*config
)
322 yang_dnode_free(config
->dnode
);
323 XFREE(MTYPE_NB_CONFIG
, config
);
326 struct nb_config
*nb_config_dup(const struct nb_config
*config
)
328 struct nb_config
*dup
;
330 dup
= XCALLOC(MTYPE_NB_CONFIG
, sizeof(*dup
));
331 dup
->dnode
= yang_dnode_dup(config
->dnode
);
332 dup
->version
= config
->version
;
337 int nb_config_merge(struct nb_config
*config_dst
, struct nb_config
*config_src
,
338 bool preserve_source
)
342 ret
= lyd_merge(config_dst
->dnode
, config_src
->dnode
, LYD_OPT_EXPLICIT
);
344 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_merge() failed", __func__
);
346 if (!preserve_source
)
347 nb_config_free(config_src
);
349 return (ret
== 0) ? NB_OK
: NB_ERR
;
352 void nb_config_replace(struct nb_config
*config_dst
,
353 struct nb_config
*config_src
, bool preserve_source
)
355 /* Update version. */
356 if (config_src
->version
!= 0)
357 config_dst
->version
= config_src
->version
;
360 if (config_dst
->dnode
)
361 yang_dnode_free(config_dst
->dnode
);
362 if (preserve_source
) {
363 config_dst
->dnode
= yang_dnode_dup(config_src
->dnode
);
365 config_dst
->dnode
= config_src
->dnode
;
366 config_src
->dnode
= NULL
;
367 nb_config_free(config_src
);
371 /* Generate the nb_config_cbs tree. */
372 static inline int nb_config_cb_compare(const struct nb_config_cb
*a
,
373 const struct nb_config_cb
*b
)
375 /* Sort by priority first. */
376 if (a
->nb_node
->priority
< b
->nb_node
->priority
)
378 if (a
->nb_node
->priority
> b
->nb_node
->priority
)
382 * Preserve the order of the configuration changes as told by libyang.
390 * All 'apply_finish' callbacks have their sequence number set to zero.
391 * In this case, compare them using their dnode pointers (the order
392 * doesn't matter for callbacks that have the same priority).
394 if (a
->dnode
< b
->dnode
)
396 if (a
->dnode
> b
->dnode
)
401 RB_GENERATE(nb_config_cbs
, nb_config_cb
, entry
, nb_config_cb_compare
);
403 static void nb_config_diff_add_change(struct nb_config_cbs
*changes
,
404 enum nb_operation operation
,
406 const struct lyd_node
*dnode
)
408 struct nb_config_change
*change
;
410 /* Ignore unimplemented nodes. */
411 if (!dnode
->schema
->priv
)
414 change
= XCALLOC(MTYPE_TMP
, sizeof(*change
));
415 change
->cb
.operation
= operation
;
416 change
->cb
.seq
= *seq
;
418 change
->cb
.nb_node
= dnode
->schema
->priv
;
419 change
->cb
.dnode
= dnode
;
421 RB_INSERT(nb_config_cbs
, changes
, &change
->cb
);
424 static void nb_config_diff_del_changes(struct nb_config_cbs
*changes
)
426 while (!RB_EMPTY(nb_config_cbs
, changes
)) {
427 struct nb_config_change
*change
;
429 change
= (struct nb_config_change
*)RB_ROOT(nb_config_cbs
,
431 RB_REMOVE(nb_config_cbs
, changes
, &change
->cb
);
432 XFREE(MTYPE_TMP
, change
);
437 * Helper function used when calculating the delta between two different
438 * configurations. Given a new subtree, calculate all new YANG data nodes,
439 * excluding default leafs and leaf-lists. This is a recursive function.
441 static void nb_config_diff_created(const struct lyd_node
*dnode
, uint32_t *seq
,
442 struct nb_config_cbs
*changes
)
444 enum nb_operation operation
;
445 struct lyd_node
*child
;
447 /* Ignore unimplemented nodes. */
448 if (!dnode
->schema
->priv
)
451 switch (dnode
->schema
->nodetype
) {
454 if (lyd_wd_default((struct lyd_node_leaf_list
*)dnode
))
457 if (nb_operation_is_valid(NB_OP_CREATE
, dnode
->schema
))
458 operation
= NB_OP_CREATE
;
459 else if (nb_operation_is_valid(NB_OP_MODIFY
, dnode
->schema
))
460 operation
= NB_OP_MODIFY
;
464 nb_config_diff_add_change(changes
, operation
, seq
, dnode
);
468 if (nb_operation_is_valid(NB_OP_CREATE
, dnode
->schema
))
469 nb_config_diff_add_change(changes
, NB_OP_CREATE
, seq
,
472 /* Process child nodes recursively. */
473 LY_TREE_FOR (dnode
->child
, child
) {
474 nb_config_diff_created(child
, seq
, changes
);
482 static void nb_config_diff_deleted(const struct lyd_node
*dnode
, uint32_t *seq
,
483 struct nb_config_cbs
*changes
)
485 /* Ignore unimplemented nodes. */
486 if (!dnode
->schema
->priv
)
489 if (nb_operation_is_valid(NB_OP_DESTROY
, dnode
->schema
))
490 nb_config_diff_add_change(changes
, NB_OP_DESTROY
, seq
, dnode
);
491 else if (CHECK_FLAG(dnode
->schema
->nodetype
, LYS_CONTAINER
)) {
492 struct lyd_node
*child
;
495 * Non-presence containers need special handling since they
496 * don't have "destroy" callbacks. In this case, what we need to
497 * do is to call the "destroy" callbacks of their child nodes
498 * when applicable (i.e. optional nodes).
500 LY_TREE_FOR (dnode
->child
, child
) {
501 nb_config_diff_deleted(child
, seq
, changes
);
506 /* Calculate the delta between two different configurations. */
507 static void nb_config_diff(const struct nb_config
*config1
,
508 const struct nb_config
*config2
,
509 struct nb_config_cbs
*changes
)
511 struct lyd_difflist
*diff
;
514 diff
= lyd_diff(config1
->dnode
, config2
->dnode
,
515 LYD_DIFFOPT_WITHDEFAULTS
);
518 for (int i
= 0; diff
->type
[i
] != LYD_DIFF_END
; i
++) {
520 struct lyd_node
*dnode
;
522 type
= diff
->type
[i
];
525 case LYD_DIFF_CREATED
:
526 dnode
= diff
->second
[i
];
527 nb_config_diff_created(dnode
, &seq
, changes
);
529 case LYD_DIFF_DELETED
:
530 dnode
= diff
->first
[i
];
531 nb_config_diff_deleted(dnode
, &seq
, changes
);
533 case LYD_DIFF_CHANGED
:
534 dnode
= diff
->second
[i
];
535 nb_config_diff_add_change(changes
, NB_OP_MODIFY
, &seq
,
538 case LYD_DIFF_MOVEDAFTER1
:
539 case LYD_DIFF_MOVEDAFTER2
:
548 int nb_candidate_edit(struct nb_config
*candidate
,
549 const struct nb_node
*nb_node
,
550 enum nb_operation operation
, const char *xpath
,
551 const struct yang_data
*previous
,
552 const struct yang_data
*data
)
554 struct lyd_node
*dnode
, *dep_dnode
;
555 char xpath_edit
[XPATH_MAXLEN
];
556 char dep_xpath
[XPATH_MAXLEN
];
558 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
559 if (nb_node
->snode
->nodetype
== LYS_LEAFLIST
)
560 snprintf(xpath_edit
, sizeof(xpath_edit
), "%s[.='%s']", xpath
,
563 strlcpy(xpath_edit
, xpath
, sizeof(xpath_edit
));
569 dnode
= lyd_new_path(candidate
->dnode
, ly_native_ctx
,
570 xpath_edit
, (void *)data
->value
, 0,
571 LYD_PATH_OPT_UPDATE
);
576 * dnode returned by the lyd_new_path may be from a
577 * different schema, so we need to update the nb_node
579 nb_node
= dnode
->schema
->priv
;
580 if (nb_node
->dep_cbs
.get_dependency_xpath
) {
581 nb_node
->dep_cbs
.get_dependency_xpath(
585 dep_dnode
= lyd_new_path(candidate
->dnode
,
588 LYD_PATH_OPT_UPDATE
);
589 if (!dep_dnode
&& ly_errno
) {
590 flog_warn(EC_LIB_LIBYANG
,
591 "%s: lyd_new_path(%s) failed",
592 __func__
, dep_xpath
);
596 } else if (ly_errno
) {
597 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_new_path(%s) failed",
598 __func__
, xpath_edit
);
603 dnode
= yang_dnode_get(candidate
->dnode
, xpath_edit
);
606 * Return a special error code so the caller can choose
607 * whether to ignore it or not.
609 return NB_ERR_NOT_FOUND
;
610 /* destroy dependant */
611 if (nb_node
->dep_cbs
.get_dependant_xpath
) {
612 nb_node
->dep_cbs
.get_dependant_xpath(dnode
, dep_xpath
);
614 dep_dnode
= yang_dnode_get(candidate
->dnode
, dep_xpath
);
621 /* TODO: update configuration. */
624 flog_warn(EC_LIB_DEVELOPMENT
,
625 "%s: unknown operation (%u) [xpath %s]", __func__
,
626 operation
, xpath_edit
);
633 bool nb_candidate_needs_update(const struct nb_config
*candidate
)
635 if (candidate
->version
< running_config
->version
)
641 int nb_candidate_update(struct nb_config
*candidate
)
643 struct nb_config
*updated_config
;
645 updated_config
= nb_config_dup(running_config
);
646 if (nb_config_merge(updated_config
, candidate
, true) != NB_OK
)
649 nb_config_replace(candidate
, updated_config
, false);
655 * Perform YANG syntactic and semantic validation.
657 * WARNING: lyd_validate() can change the configuration as part of the
658 * validation process.
660 static int nb_candidate_validate_yang(struct nb_config
*candidate
, char *errmsg
,
663 if (lyd_validate(&candidate
->dnode
,
664 LYD_OPT_STRICT
| LYD_OPT_CONFIG
| LYD_OPT_WHENAUTODEL
,
667 yang_print_errors(ly_native_ctx
, errmsg
, errmsg_len
);
668 return NB_ERR_VALIDATION
;
674 /* Perform code-level validation using the northbound callbacks. */
675 static int nb_candidate_validate_code(struct nb_context
*context
,
676 struct nb_config
*candidate
,
677 struct nb_config_cbs
*changes
,
678 char *errmsg
, size_t errmsg_len
)
680 struct nb_config_cb
*cb
;
681 struct lyd_node
*root
, *next
, *child
;
684 /* First validate the candidate as a whole. */
685 LY_TREE_FOR (candidate
->dnode
, root
) {
686 LY_TREE_DFS_BEGIN (root
, next
, child
) {
687 struct nb_node
*nb_node
;
689 nb_node
= child
->schema
->priv
;
690 if (!nb_node
|| !nb_node
->cbs
.pre_validate
)
693 ret
= nb_callback_pre_validate(context
, nb_node
, child
,
696 return NB_ERR_VALIDATION
;
699 LY_TREE_DFS_END(root
, next
, child
);
703 /* Now validate the configuration changes. */
704 RB_FOREACH (cb
, nb_config_cbs
, changes
) {
705 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
707 ret
= nb_callback_configuration(context
, NB_EV_VALIDATE
, change
,
710 return NB_ERR_VALIDATION
;
716 int nb_candidate_validate(struct nb_context
*context
,
717 struct nb_config
*candidate
, char *errmsg
,
720 struct nb_config_cbs changes
;
723 if (nb_candidate_validate_yang(candidate
, errmsg
, sizeof(errmsg_len
))
725 return NB_ERR_VALIDATION
;
727 RB_INIT(nb_config_cbs
, &changes
);
728 nb_config_diff(running_config
, candidate
, &changes
);
729 ret
= nb_candidate_validate_code(context
, candidate
, &changes
, errmsg
,
731 nb_config_diff_del_changes(&changes
);
736 int nb_candidate_commit_prepare(struct nb_context
*context
,
737 struct nb_config
*candidate
,
739 struct nb_transaction
**transaction
,
740 char *errmsg
, size_t errmsg_len
)
742 struct nb_config_cbs changes
;
744 if (nb_candidate_validate_yang(candidate
, errmsg
, errmsg_len
)
746 flog_warn(EC_LIB_NB_CANDIDATE_INVALID
,
747 "%s: failed to validate candidate configuration",
749 return NB_ERR_VALIDATION
;
752 RB_INIT(nb_config_cbs
, &changes
);
753 nb_config_diff(running_config
, candidate
, &changes
);
754 if (RB_EMPTY(nb_config_cbs
, &changes
)) {
757 "No changes to apply were found during preparation phase");
758 return NB_ERR_NO_CHANGES
;
761 if (nb_candidate_validate_code(context
, candidate
, &changes
, errmsg
,
764 flog_warn(EC_LIB_NB_CANDIDATE_INVALID
,
765 "%s: failed to validate candidate configuration",
767 nb_config_diff_del_changes(&changes
);
768 return NB_ERR_VALIDATION
;
771 *transaction
= nb_transaction_new(context
, candidate
, &changes
, comment
,
773 if (*transaction
== NULL
) {
774 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED
,
775 "%s: failed to create transaction: %s", __func__
,
777 nb_config_diff_del_changes(&changes
);
778 return NB_ERR_LOCKED
;
781 return nb_transaction_process(NB_EV_PREPARE
, *transaction
, errmsg
,
785 void nb_candidate_commit_abort(struct nb_transaction
*transaction
, char *errmsg
,
788 (void)nb_transaction_process(NB_EV_ABORT
, transaction
, errmsg
,
790 nb_transaction_free(transaction
);
793 void nb_candidate_commit_apply(struct nb_transaction
*transaction
,
794 bool save_transaction
, uint32_t *transaction_id
,
795 char *errmsg
, size_t errmsg_len
)
797 (void)nb_transaction_process(NB_EV_APPLY
, transaction
, errmsg
,
799 nb_transaction_apply_finish(transaction
, errmsg
, errmsg_len
);
801 /* Replace running by candidate. */
802 transaction
->config
->version
++;
803 nb_config_replace(running_config
, transaction
->config
, true);
805 /* Record transaction. */
806 if (save_transaction
&& nb_db_enabled
807 && nb_db_transaction_save(transaction
, transaction_id
) != NB_OK
)
808 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED
,
809 "%s: failed to record transaction", __func__
);
811 nb_transaction_free(transaction
);
814 int nb_candidate_commit(struct nb_context
*context
, struct nb_config
*candidate
,
815 bool save_transaction
, const char *comment
,
816 uint32_t *transaction_id
, char *errmsg
,
819 struct nb_transaction
*transaction
= NULL
;
822 ret
= nb_candidate_commit_prepare(context
, candidate
, comment
,
823 &transaction
, errmsg
, errmsg_len
);
825 * Apply the changes if the preparation phase succeeded. Otherwise abort
829 nb_candidate_commit_apply(transaction
, save_transaction
,
830 transaction_id
, errmsg
, errmsg_len
);
831 else if (transaction
!= NULL
)
832 nb_candidate_commit_abort(transaction
, errmsg
, errmsg_len
);
837 int nb_running_lock(enum nb_client client
, const void *user
)
841 frr_with_mutex (&running_config_mgmt_lock
.mtx
) {
842 if (!running_config_mgmt_lock
.locked
) {
843 running_config_mgmt_lock
.locked
= true;
844 running_config_mgmt_lock
.owner_client
= client
;
845 running_config_mgmt_lock
.owner_user
= user
;
853 int nb_running_unlock(enum nb_client client
, const void *user
)
857 frr_with_mutex (&running_config_mgmt_lock
.mtx
) {
858 if (running_config_mgmt_lock
.locked
859 && running_config_mgmt_lock
.owner_client
== client
860 && running_config_mgmt_lock
.owner_user
== user
) {
861 running_config_mgmt_lock
.locked
= false;
862 running_config_mgmt_lock
.owner_client
= NB_CLIENT_NONE
;
863 running_config_mgmt_lock
.owner_user
= NULL
;
871 int nb_running_lock_check(enum nb_client client
, const void *user
)
875 frr_with_mutex (&running_config_mgmt_lock
.mtx
) {
876 if (!running_config_mgmt_lock
.locked
877 || (running_config_mgmt_lock
.owner_client
== client
878 && running_config_mgmt_lock
.owner_user
== user
))
885 static void nb_log_config_callback(const enum nb_event event
,
886 enum nb_operation operation
,
887 const struct lyd_node
*dnode
)
890 char xpath
[XPATH_MAXLEN
];
892 if (!DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
))
895 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
896 if (yang_snode_is_typeless_data(dnode
->schema
))
899 value
= yang_dnode_get_string(dnode
, NULL
);
902 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
903 nb_event_name(event
), nb_operation_name(operation
), xpath
,
907 static int nb_callback_create(struct nb_context
*context
,
908 const struct nb_node
*nb_node
,
909 enum nb_event event
, const struct lyd_node
*dnode
,
910 union nb_resource
*resource
, char *errmsg
,
913 struct nb_cb_create_args args
= {};
914 bool unexpected_error
= false;
917 nb_log_config_callback(event
, NB_OP_CREATE
, dnode
);
919 args
.context
= context
;
922 args
.resource
= resource
;
923 args
.errmsg
= errmsg
;
924 args
.errmsg_len
= errmsg_len
;
925 ret
= nb_node
->cbs
.create(&args
);
927 /* Detect and log unexpected errors. */
932 case NB_ERR_VALIDATION
:
933 if (event
!= NB_EV_VALIDATE
)
934 unexpected_error
= true;
936 case NB_ERR_RESOURCE
:
937 if (event
!= NB_EV_PREPARE
)
938 unexpected_error
= true;
940 case NB_ERR_INCONSISTENCY
:
941 if (event
== NB_EV_VALIDATE
)
942 unexpected_error
= true;
945 unexpected_error
= true;
948 if (unexpected_error
)
949 DEBUGD(&nb_dbg_cbs_config
,
950 "northbound callback: unexpected return value: %s",
956 static int nb_callback_modify(struct nb_context
*context
,
957 const struct nb_node
*nb_node
,
958 enum nb_event event
, const struct lyd_node
*dnode
,
959 union nb_resource
*resource
, char *errmsg
,
962 struct nb_cb_modify_args args
= {};
963 bool unexpected_error
= false;
966 nb_log_config_callback(event
, NB_OP_MODIFY
, dnode
);
968 args
.context
= context
;
971 args
.resource
= resource
;
972 args
.errmsg
= errmsg
;
973 args
.errmsg_len
= errmsg_len
;
974 ret
= nb_node
->cbs
.modify(&args
);
976 /* Detect and log unexpected errors. */
981 case NB_ERR_VALIDATION
:
982 if (event
!= NB_EV_VALIDATE
)
983 unexpected_error
= true;
985 case NB_ERR_RESOURCE
:
986 if (event
!= NB_EV_PREPARE
)
987 unexpected_error
= true;
989 case NB_ERR_INCONSISTENCY
:
990 if (event
== NB_EV_VALIDATE
)
991 unexpected_error
= true;
994 unexpected_error
= true;
997 if (unexpected_error
)
998 DEBUGD(&nb_dbg_cbs_config
,
999 "northbound callback: unexpected return value: %s",
1005 static int nb_callback_destroy(struct nb_context
*context
,
1006 const struct nb_node
*nb_node
,
1007 enum nb_event event
,
1008 const struct lyd_node
*dnode
, char *errmsg
,
1011 struct nb_cb_destroy_args args
= {};
1012 bool unexpected_error
= false;
1015 nb_log_config_callback(event
, NB_OP_DESTROY
, dnode
);
1017 args
.context
= context
;
1020 args
.errmsg
= errmsg
;
1021 args
.errmsg_len
= errmsg_len
;
1022 ret
= nb_node
->cbs
.destroy(&args
);
1024 /* Detect and log unexpected errors. */
1029 case NB_ERR_VALIDATION
:
1030 if (event
!= NB_EV_VALIDATE
)
1031 unexpected_error
= true;
1033 case NB_ERR_INCONSISTENCY
:
1034 if (event
== NB_EV_VALIDATE
)
1035 unexpected_error
= true;
1038 unexpected_error
= true;
1041 if (unexpected_error
)
1042 DEBUGD(&nb_dbg_cbs_config
,
1043 "northbound callback: unexpected return value: %s",
1049 static int nb_callback_move(struct nb_context
*context
,
1050 const struct nb_node
*nb_node
, enum nb_event event
,
1051 const struct lyd_node
*dnode
, char *errmsg
,
1054 struct nb_cb_move_args args
= {};
1055 bool unexpected_error
= false;
1058 nb_log_config_callback(event
, NB_OP_MOVE
, dnode
);
1060 args
.context
= context
;
1063 args
.errmsg
= errmsg
;
1064 args
.errmsg_len
= errmsg_len
;
1065 ret
= nb_node
->cbs
.move(&args
);
1067 /* Detect and log unexpected errors. */
1072 case NB_ERR_VALIDATION
:
1073 if (event
!= NB_EV_VALIDATE
)
1074 unexpected_error
= true;
1076 case NB_ERR_INCONSISTENCY
:
1077 if (event
== NB_EV_VALIDATE
)
1078 unexpected_error
= true;
1081 unexpected_error
= true;
1084 if (unexpected_error
)
1085 DEBUGD(&nb_dbg_cbs_config
,
1086 "northbound callback: unexpected return value: %s",
1092 static int nb_callback_pre_validate(struct nb_context
*context
,
1093 const struct nb_node
*nb_node
,
1094 const struct lyd_node
*dnode
, char *errmsg
,
1097 struct nb_cb_pre_validate_args args
= {};
1098 bool unexpected_error
= false;
1101 nb_log_config_callback(NB_EV_VALIDATE
, NB_OP_PRE_VALIDATE
, dnode
);
1104 args
.errmsg
= errmsg
;
1105 args
.errmsg_len
= errmsg_len
;
1106 ret
= nb_node
->cbs
.pre_validate(&args
);
1108 /* Detect and log unexpected errors. */
1111 case NB_ERR_VALIDATION
:
1114 unexpected_error
= true;
1117 if (unexpected_error
)
1118 DEBUGD(&nb_dbg_cbs_config
,
1119 "northbound callback: unexpected return value: %s",
1125 static void nb_callback_apply_finish(struct nb_context
*context
,
1126 const struct nb_node
*nb_node
,
1127 const struct lyd_node
*dnode
, char *errmsg
,
1130 struct nb_cb_apply_finish_args args
= {};
1132 nb_log_config_callback(NB_EV_APPLY
, NB_OP_APPLY_FINISH
, dnode
);
1134 args
.context
= context
;
1136 args
.errmsg
= errmsg
;
1137 args
.errmsg_len
= errmsg_len
;
1138 nb_node
->cbs
.apply_finish(&args
);
1141 struct yang_data
*nb_callback_get_elem(const struct nb_node
*nb_node
,
1143 const void *list_entry
)
1145 struct nb_cb_get_elem_args args
= {};
1147 DEBUGD(&nb_dbg_cbs_state
,
1148 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
1152 args
.list_entry
= list_entry
;
1153 return nb_node
->cbs
.get_elem(&args
);
1156 const void *nb_callback_get_next(const struct nb_node
*nb_node
,
1157 const void *parent_list_entry
,
1158 const void *list_entry
)
1160 struct nb_cb_get_next_args args
= {};
1162 DEBUGD(&nb_dbg_cbs_state
,
1163 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
1164 nb_node
->xpath
, parent_list_entry
, list_entry
);
1166 args
.parent_list_entry
= parent_list_entry
;
1167 args
.list_entry
= list_entry
;
1168 return nb_node
->cbs
.get_next(&args
);
1171 int nb_callback_get_keys(const struct nb_node
*nb_node
, const void *list_entry
,
1172 struct yang_list_keys
*keys
)
1174 struct nb_cb_get_keys_args args
= {};
1176 DEBUGD(&nb_dbg_cbs_state
,
1177 "northbound callback (get_keys): node [%s] list_entry [%p]",
1178 nb_node
->xpath
, list_entry
);
1180 args
.list_entry
= list_entry
;
1182 return nb_node
->cbs
.get_keys(&args
);
1185 const void *nb_callback_lookup_entry(const struct nb_node
*nb_node
,
1186 const void *parent_list_entry
,
1187 const struct yang_list_keys
*keys
)
1189 struct nb_cb_lookup_entry_args args
= {};
1191 DEBUGD(&nb_dbg_cbs_state
,
1192 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
1193 nb_node
->xpath
, parent_list_entry
);
1195 args
.parent_list_entry
= parent_list_entry
;
1197 return nb_node
->cbs
.lookup_entry(&args
);
1200 int nb_callback_rpc(const struct nb_node
*nb_node
, const char *xpath
,
1201 const struct list
*input
, struct list
*output
, char *errmsg
,
1204 struct nb_cb_rpc_args args
= {};
1206 DEBUGD(&nb_dbg_cbs_rpc
, "northbound RPC: %s", xpath
);
1210 args
.output
= output
;
1211 args
.errmsg
= errmsg
;
1212 args
.errmsg_len
= errmsg_len
;
1213 return nb_node
->cbs
.rpc(&args
);
1217 * Call the northbound configuration callback associated to a given
1218 * configuration change.
1220 static int nb_callback_configuration(struct nb_context
*context
,
1221 const enum nb_event event
,
1222 struct nb_config_change
*change
,
1223 char *errmsg
, size_t errmsg_len
)
1225 enum nb_operation operation
= change
->cb
.operation
;
1226 char xpath
[XPATH_MAXLEN
];
1227 const struct nb_node
*nb_node
= change
->cb
.nb_node
;
1228 const struct lyd_node
*dnode
= change
->cb
.dnode
;
1229 union nb_resource
*resource
;
1232 if (event
== NB_EV_VALIDATE
)
1235 resource
= &change
->resource
;
1237 switch (operation
) {
1239 ret
= nb_callback_create(context
, nb_node
, event
, dnode
,
1240 resource
, errmsg
, errmsg_len
);
1243 ret
= nb_callback_modify(context
, nb_node
, event
, dnode
,
1244 resource
, errmsg
, errmsg_len
);
1247 ret
= nb_callback_destroy(context
, nb_node
, event
, dnode
,
1248 errmsg
, errmsg_len
);
1251 ret
= nb_callback_move(context
, nb_node
, event
, dnode
, errmsg
,
1255 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1256 flog_err(EC_LIB_DEVELOPMENT
,
1257 "%s: unknown operation (%u) [xpath %s]", __func__
,
1264 enum lib_log_refs ref
;
1266 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1269 case NB_EV_VALIDATE
:
1270 priority
= LOG_WARNING
;
1271 ref
= EC_LIB_NB_CB_CONFIG_VALIDATE
;
1274 priority
= LOG_WARNING
;
1275 ref
= EC_LIB_NB_CB_CONFIG_PREPARE
;
1278 priority
= LOG_WARNING
;
1279 ref
= EC_LIB_NB_CB_CONFIG_ABORT
;
1283 ref
= EC_LIB_NB_CB_CONFIG_APPLY
;
1286 flog_err(EC_LIB_DEVELOPMENT
,
1287 "%s: unknown event (%u) [xpath %s]", __func__
,
1293 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
1294 nb_err_name(ret
), nb_event_name(event
),
1295 nb_operation_name(operation
), xpath
);
1296 if (strlen(errmsg
) > 0)
1298 "error processing configuration change: %s",
1305 static struct nb_transaction
*
1306 nb_transaction_new(struct nb_context
*context
, struct nb_config
*config
,
1307 struct nb_config_cbs
*changes
, const char *comment
,
1308 char *errmsg
, size_t errmsg_len
)
1310 struct nb_transaction
*transaction
;
1312 if (nb_running_lock_check(context
->client
, context
->user
)) {
1314 "running configuration is locked by another client",
1319 if (transaction_in_progress
) {
1321 "there's already another transaction in progress",
1325 transaction_in_progress
= true;
1327 transaction
= XCALLOC(MTYPE_TMP
, sizeof(*transaction
));
1328 transaction
->context
= context
;
1330 strlcpy(transaction
->comment
, comment
,
1331 sizeof(transaction
->comment
));
1332 transaction
->config
= config
;
1333 transaction
->changes
= *changes
;
1338 static void nb_transaction_free(struct nb_transaction
*transaction
)
1340 nb_config_diff_del_changes(&transaction
->changes
);
1341 XFREE(MTYPE_TMP
, transaction
);
1342 transaction_in_progress
= false;
1345 /* Process all configuration changes associated to a transaction. */
1346 static int nb_transaction_process(enum nb_event event
,
1347 struct nb_transaction
*transaction
,
1348 char *errmsg
, size_t errmsg_len
)
1350 struct nb_config_cb
*cb
;
1352 RB_FOREACH (cb
, nb_config_cbs
, &transaction
->changes
) {
1353 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
1357 * Only try to release resources that were allocated
1360 if (event
== NB_EV_ABORT
&& !change
->prepare_ok
)
1363 /* Call the appropriate callback. */
1364 ret
= nb_callback_configuration(transaction
->context
, event
,
1365 change
, errmsg
, errmsg_len
);
1370 change
->prepare_ok
= true;
1375 * At this point it's not possible to reject the
1376 * transaction anymore, so any failure here can lead to
1377 * inconsistencies and should be treated as a bug.
1378 * Operations prone to errors, like validations and
1379 * resource allocations, should be performed during the
1391 static struct nb_config_cb
*
1392 nb_apply_finish_cb_new(struct nb_config_cbs
*cbs
, const struct nb_node
*nb_node
,
1393 const struct lyd_node
*dnode
)
1395 struct nb_config_cb
*cb
;
1397 cb
= XCALLOC(MTYPE_TMP
, sizeof(*cb
));
1398 cb
->nb_node
= nb_node
;
1400 RB_INSERT(nb_config_cbs
, cbs
, cb
);
1405 static struct nb_config_cb
*
1406 nb_apply_finish_cb_find(struct nb_config_cbs
*cbs
,
1407 const struct nb_node
*nb_node
,
1408 const struct lyd_node
*dnode
)
1410 struct nb_config_cb s
;
1413 s
.nb_node
= nb_node
;
1415 return RB_FIND(nb_config_cbs
, cbs
, &s
);
1418 /* Call the 'apply_finish' callbacks. */
1419 static void nb_transaction_apply_finish(struct nb_transaction
*transaction
,
1420 char *errmsg
, size_t errmsg_len
)
1422 struct nb_config_cbs cbs
;
1423 struct nb_config_cb
*cb
;
1425 /* Initialize tree of 'apply_finish' callbacks. */
1426 RB_INIT(nb_config_cbs
, &cbs
);
1428 /* Identify the 'apply_finish' callbacks that need to be called. */
1429 RB_FOREACH (cb
, nb_config_cbs
, &transaction
->changes
) {
1430 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
1431 const struct lyd_node
*dnode
= change
->cb
.dnode
;
1434 * Iterate up to the root of the data tree. When a node is being
1435 * deleted, skip its 'apply_finish' callback if one is defined
1436 * (the 'apply_finish' callbacks from the node ancestors should
1437 * be called though).
1439 if (change
->cb
.operation
== NB_OP_DESTROY
) {
1440 char xpath
[XPATH_MAXLEN
];
1442 dnode
= dnode
->parent
;
1447 * The dnode from 'delete' callbacks point to elements
1448 * from the running configuration. Use yang_dnode_get()
1449 * to get the corresponding dnode from the candidate
1450 * configuration that is being committed.
1452 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1453 dnode
= yang_dnode_get(transaction
->config
->dnode
,
1457 struct nb_node
*nb_node
;
1459 nb_node
= dnode
->schema
->priv
;
1460 if (!nb_node
|| !nb_node
->cbs
.apply_finish
)
1464 * Don't call the callback more than once for the same
1467 if (nb_apply_finish_cb_find(&cbs
, nb_node
, dnode
))
1470 nb_apply_finish_cb_new(&cbs
, nb_node
, dnode
);
1473 dnode
= dnode
->parent
;
1477 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1478 RB_FOREACH (cb
, nb_config_cbs
, &cbs
)
1479 nb_callback_apply_finish(transaction
->context
, cb
->nb_node
,
1480 cb
->dnode
, errmsg
, errmsg_len
);
1482 /* Release memory. */
1483 while (!RB_EMPTY(nb_config_cbs
, &cbs
)) {
1484 cb
= RB_ROOT(nb_config_cbs
, &cbs
);
1485 RB_REMOVE(nb_config_cbs
, &cbs
, cb
);
1486 XFREE(MTYPE_TMP
, cb
);
1490 static int nb_oper_data_iter_children(const struct lys_node
*snode
,
1491 const char *xpath
, const void *list_entry
,
1492 const struct yang_list_keys
*list_keys
,
1493 struct yang_translator
*translator
,
1494 bool first
, uint32_t flags
,
1495 nb_oper_data_cb cb
, void *arg
)
1497 struct lys_node
*child
;
1499 LY_TREE_FOR (snode
->child
, child
) {
1502 ret
= nb_oper_data_iter_node(child
, xpath
, list_entry
,
1503 list_keys
, translator
, false,
1512 static int nb_oper_data_iter_leaf(const struct nb_node
*nb_node
,
1513 const char *xpath
, const void *list_entry
,
1514 const struct yang_list_keys
*list_keys
,
1515 struct yang_translator
*translator
,
1516 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1518 struct yang_data
*data
;
1520 if (CHECK_FLAG(nb_node
->snode
->flags
, LYS_CONFIG_W
))
1523 /* Ignore list keys. */
1524 if (lys_is_key((struct lys_node_leaf
*)nb_node
->snode
, NULL
))
1527 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1529 /* Leaf of type "empty" is not present. */
1532 return (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1535 static int nb_oper_data_iter_container(const struct nb_node
*nb_node
,
1537 const void *list_entry
,
1538 const struct yang_list_keys
*list_keys
,
1539 struct yang_translator
*translator
,
1540 uint32_t flags
, nb_oper_data_cb cb
,
1543 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1546 /* Presence containers. */
1547 if (nb_node
->cbs
.get_elem
) {
1548 struct yang_data
*data
;
1551 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1553 /* Presence container is not present. */
1556 ret
= (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1561 /* Iterate over the child nodes. */
1562 return nb_oper_data_iter_children(nb_node
->snode
, xpath
, list_entry
,
1563 list_keys
, translator
, false, flags
,
1568 nb_oper_data_iter_leaflist(const struct nb_node
*nb_node
, const char *xpath
,
1569 const void *parent_list_entry
,
1570 const struct yang_list_keys
*parent_list_keys
,
1571 struct yang_translator
*translator
, uint32_t flags
,
1572 nb_oper_data_cb cb
, void *arg
)
1574 const void *list_entry
= NULL
;
1576 if (CHECK_FLAG(nb_node
->snode
->flags
, LYS_CONFIG_W
))
1580 struct yang_data
*data
;
1583 list_entry
= nb_callback_get_next(nb_node
, parent_list_entry
,
1586 /* End of the list. */
1589 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1593 ret
= (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1596 } while (list_entry
);
1601 static int nb_oper_data_iter_list(const struct nb_node
*nb_node
,
1602 const char *xpath_list
,
1603 const void *parent_list_entry
,
1604 const struct yang_list_keys
*parent_list_keys
,
1605 struct yang_translator
*translator
,
1606 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1608 struct lys_node_list
*slist
= (struct lys_node_list
*)nb_node
->snode
;
1609 const void *list_entry
= NULL
;
1610 uint32_t position
= 1;
1612 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1615 /* Iterate over all list entries. */
1617 struct yang_list_keys list_keys
;
1618 char xpath
[XPATH_MAXLEN
* 2];
1621 /* Obtain list entry. */
1622 list_entry
= nb_callback_get_next(nb_node
, parent_list_entry
,
1625 /* End of the list. */
1628 if (!CHECK_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
)) {
1629 /* Obtain the list entry keys. */
1630 if (nb_callback_get_keys(nb_node
, list_entry
,
1633 flog_warn(EC_LIB_NB_CB_STATE
,
1634 "%s: failed to get list keys",
1639 /* Build XPath of the list entry. */
1640 strlcpy(xpath
, xpath_list
, sizeof(xpath
));
1641 for (unsigned int i
= 0; i
< list_keys
.num
; i
++) {
1642 snprintf(xpath
+ strlen(xpath
),
1643 sizeof(xpath
) - strlen(xpath
),
1644 "[%s='%s']", slist
->keys
[i
]->name
,
1649 * Keyless list - build XPath using a positional index.
1651 snprintf(xpath
, sizeof(xpath
), "%s[%u]", xpath_list
,
1656 /* Iterate over the child nodes. */
1657 ret
= nb_oper_data_iter_children(
1658 nb_node
->snode
, xpath
, list_entry
, &list_keys
,
1659 translator
, false, flags
, cb
, arg
);
1662 } while (list_entry
);
1667 static int nb_oper_data_iter_node(const struct lys_node
*snode
,
1668 const char *xpath_parent
,
1669 const void *list_entry
,
1670 const struct yang_list_keys
*list_keys
,
1671 struct yang_translator
*translator
,
1672 bool first
, uint32_t flags
,
1673 nb_oper_data_cb cb
, void *arg
)
1675 struct nb_node
*nb_node
;
1676 char xpath
[XPATH_MAXLEN
];
1679 if (!first
&& CHECK_FLAG(flags
, NB_OPER_DATA_ITER_NORECURSE
)
1680 && CHECK_FLAG(snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
))
1684 strlcpy(xpath
, xpath_parent
, sizeof(xpath
));
1685 if (!first
&& snode
->nodetype
!= LYS_USES
) {
1686 struct lys_node
*parent
;
1688 /* Get the real parent. */
1689 parent
= snode
->parent
;
1690 while (parent
&& parent
->nodetype
== LYS_USES
)
1691 parent
= parent
->parent
;
1694 * When necessary, include the namespace of the augmenting
1697 if (parent
&& parent
->nodetype
== LYS_AUGMENT
)
1698 snprintf(xpath
+ strlen(xpath
),
1699 sizeof(xpath
) - strlen(xpath
), "/%s:%s",
1700 snode
->module
->name
, snode
->name
);
1702 snprintf(xpath
+ strlen(xpath
),
1703 sizeof(xpath
) - strlen(xpath
), "/%s",
1707 nb_node
= snode
->priv
;
1708 switch (snode
->nodetype
) {
1710 ret
= nb_oper_data_iter_container(nb_node
, xpath
, list_entry
,
1711 list_keys
, translator
, flags
,
1715 ret
= nb_oper_data_iter_leaf(nb_node
, xpath
, list_entry
,
1716 list_keys
, translator
, flags
, cb
,
1720 ret
= nb_oper_data_iter_leaflist(nb_node
, xpath
, list_entry
,
1721 list_keys
, translator
, flags
,
1725 ret
= nb_oper_data_iter_list(nb_node
, xpath
, list_entry
,
1726 list_keys
, translator
, flags
, cb
,
1730 ret
= nb_oper_data_iter_children(snode
, xpath
, list_entry
,
1731 list_keys
, translator
, false,
1741 int nb_oper_data_iterate(const char *xpath
, struct yang_translator
*translator
,
1742 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1744 struct nb_node
*nb_node
;
1745 const void *list_entry
= NULL
;
1746 struct yang_list_keys list_keys
;
1747 struct list
*list_dnodes
;
1748 struct lyd_node
*dnode
, *dn
;
1749 struct listnode
*ln
;
1752 nb_node
= nb_node_find(xpath
);
1754 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH
,
1755 "%s: unknown data path: %s", __func__
, xpath
);
1759 /* For now this function works only with containers and lists. */
1760 if (!CHECK_FLAG(nb_node
->snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
)) {
1762 EC_LIB_NB_OPERATIONAL_DATA
,
1763 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1769 * Create a data tree from the XPath so that we can parse the keys of
1770 * all YANG lists (if any).
1773 dnode
= lyd_new_path(NULL
, ly_native_ctx
, xpath
, NULL
, 0,
1774 LYD_PATH_OPT_UPDATE
| LYD_PATH_OPT_NOPARENTRET
);
1776 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_new_path() failed",
1782 * Create a linked list to sort the data nodes starting from the root.
1784 list_dnodes
= list_new();
1785 for (dn
= dnode
; dn
; dn
= dn
->parent
) {
1786 if (dn
->schema
->nodetype
!= LYS_LIST
|| !dn
->child
)
1788 listnode_add_head(list_dnodes
, dn
);
1791 * Use the northbound callbacks to find list entry pointer corresponding
1792 * to the given XPath.
1794 for (ALL_LIST_ELEMENTS_RO(list_dnodes
, ln
, dn
)) {
1795 struct lyd_node
*child
;
1799 /* Obtain the list entry keys. */
1800 memset(&list_keys
, 0, sizeof(list_keys
));
1801 LY_TREE_FOR (dn
->child
, child
) {
1802 if (!lys_is_key((struct lys_node_leaf
*)child
->schema
,
1805 strlcpy(list_keys
.key
[n
],
1806 yang_dnode_get_string(child
, NULL
),
1807 sizeof(list_keys
.key
[n
]));
1812 != ((struct lys_node_list
*)dn
->schema
)->keys_size
) {
1813 list_delete(&list_dnodes
);
1814 yang_dnode_free(dnode
);
1815 return NB_ERR_NOT_FOUND
;
1818 /* Find the list entry pointer. */
1819 nn
= dn
->schema
->priv
;
1820 if (!nn
->cbs
.lookup_entry
) {
1822 EC_LIB_NB_OPERATIONAL_DATA
,
1823 "%s: data path doesn't support iteration over operational data: %s",
1825 list_delete(&list_dnodes
);
1826 yang_dnode_free(dnode
);
1831 nb_callback_lookup_entry(nn
, list_entry
, &list_keys
);
1832 if (list_entry
== NULL
) {
1833 list_delete(&list_dnodes
);
1834 yang_dnode_free(dnode
);
1835 return NB_ERR_NOT_FOUND
;
1839 /* If a list entry was given, iterate over that list entry only. */
1840 if (dnode
->schema
->nodetype
== LYS_LIST
&& dnode
->child
)
1841 ret
= nb_oper_data_iter_children(
1842 nb_node
->snode
, xpath
, list_entry
, &list_keys
,
1843 translator
, true, flags
, cb
, arg
);
1845 ret
= nb_oper_data_iter_node(nb_node
->snode
, xpath
, list_entry
,
1846 &list_keys
, translator
, true,
1849 list_delete(&list_dnodes
);
1850 yang_dnode_free(dnode
);
1855 bool nb_operation_is_valid(enum nb_operation operation
,
1856 const struct lys_node
*snode
)
1858 struct nb_node
*nb_node
= snode
->priv
;
1859 struct lys_node_container
*scontainer
;
1860 struct lys_node_leaf
*sleaf
;
1862 switch (operation
) {
1864 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1867 switch (snode
->nodetype
) {
1869 sleaf
= (struct lys_node_leaf
*)snode
;
1870 if (sleaf
->type
.base
!= LY_TYPE_EMPTY
)
1874 scontainer
= (struct lys_node_container
*)snode
;
1875 if (!scontainer
->presence
)
1886 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1889 switch (snode
->nodetype
) {
1891 sleaf
= (struct lys_node_leaf
*)snode
;
1892 if (sleaf
->type
.base
== LY_TYPE_EMPTY
)
1895 /* List keys can't be modified. */
1896 if (lys_is_key(sleaf
, NULL
))
1904 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1907 switch (snode
->nodetype
) {
1909 sleaf
= (struct lys_node_leaf
*)snode
;
1911 /* List keys can't be deleted. */
1912 if (lys_is_key(sleaf
, NULL
))
1916 * Only optional leafs can be deleted, or leafs whose
1917 * parent is a case statement.
1919 if (snode
->parent
->nodetype
== LYS_CASE
)
1923 if (CHECK_FLAG(sleaf
->flags
, LYS_MAND_TRUE
)
1928 scontainer
= (struct lys_node_container
*)snode
;
1929 if (!scontainer
->presence
)
1940 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1943 switch (snode
->nodetype
) {
1946 if (!CHECK_FLAG(snode
->flags
, LYS_USERORDERED
))
1953 case NB_OP_PRE_VALIDATE
:
1954 case NB_OP_APPLY_FINISH
:
1955 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1958 case NB_OP_GET_ELEM
:
1959 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_R
))
1962 switch (snode
->nodetype
) {
1967 scontainer
= (struct lys_node_container
*)snode
;
1968 if (!scontainer
->presence
)
1975 case NB_OP_GET_NEXT
:
1976 switch (snode
->nodetype
) {
1978 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1982 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1989 case NB_OP_GET_KEYS
:
1990 case NB_OP_LOOKUP_ENTRY
:
1991 switch (snode
->nodetype
) {
1993 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1995 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
))
2003 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
| LYS_CONFIG_R
))
2006 switch (snode
->nodetype
) {
2019 DEFINE_HOOK(nb_notification_send
, (const char *xpath
, struct list
*arguments
),
2020 (xpath
, arguments
));
2022 int nb_notification_send(const char *xpath
, struct list
*arguments
)
2026 DEBUGD(&nb_dbg_notif
, "northbound notification: %s", xpath
);
2028 ret
= hook_call(nb_notification_send
, xpath
, arguments
);
2030 list_delete(&arguments
);
2035 /* Running configuration user pointers management. */
2036 struct nb_config_entry
{
2037 char xpath
[XPATH_MAXLEN
];
2041 static bool running_config_entry_cmp(const void *value1
, const void *value2
)
2043 const struct nb_config_entry
*c1
= value1
;
2044 const struct nb_config_entry
*c2
= value2
;
2046 return strmatch(c1
->xpath
, c2
->xpath
);
2049 static unsigned int running_config_entry_key_make(const void *value
)
2051 return string_hash_make(value
);
2054 static void *running_config_entry_alloc(void *p
)
2056 struct nb_config_entry
*new, *key
= p
;
2058 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY
, sizeof(*new));
2059 strlcpy(new->xpath
, key
->xpath
, sizeof(new->xpath
));
2064 static void running_config_entry_free(void *arg
)
2066 XFREE(MTYPE_NB_CONFIG_ENTRY
, arg
);
2069 void nb_running_set_entry(const struct lyd_node
*dnode
, void *entry
)
2071 struct nb_config_entry
*config
, s
;
2073 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
2074 config
= hash_get(running_config_entries
, &s
,
2075 running_config_entry_alloc
);
2076 config
->entry
= entry
;
2079 void nb_running_move_tree(const char *xpath_from
, const char *xpath_to
)
2081 struct nb_config_entry
*entry
;
2082 struct list
*entries
= hash_to_list(running_config_entries
);
2083 struct listnode
*ln
;
2085 for (ALL_LIST_ELEMENTS_RO(entries
, ln
, entry
)) {
2086 if (!frrstr_startswith(entry
->xpath
, xpath_from
))
2089 hash_release(running_config_entries
, entry
);
2092 frrstr_replace(entry
->xpath
, xpath_from
, xpath_to
);
2093 strlcpy(entry
->xpath
, newpath
, sizeof(entry
->xpath
));
2094 XFREE(MTYPE_TMP
, newpath
);
2096 hash_get(running_config_entries
, entry
, hash_alloc_intern
);
2099 list_delete(&entries
);
2102 static void *nb_running_unset_entry_helper(const struct lyd_node
*dnode
)
2104 struct nb_config_entry
*config
, s
;
2105 struct lyd_node
*child
;
2108 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
2109 config
= hash_release(running_config_entries
, &s
);
2111 entry
= config
->entry
;
2112 running_config_entry_free(config
);
2115 /* Unset user pointers from the child nodes. */
2116 if (CHECK_FLAG(dnode
->schema
->nodetype
, LYS_LIST
| LYS_CONTAINER
)) {
2117 LY_TREE_FOR (dnode
->child
, child
) {
2118 (void)nb_running_unset_entry_helper(child
);
2125 void *nb_running_unset_entry(const struct lyd_node
*dnode
)
2129 entry
= nb_running_unset_entry_helper(dnode
);
2135 static void *nb_running_get_entry_worker(const struct lyd_node
*dnode
,
2137 bool abort_if_not_found
,
2140 const struct lyd_node
*orig_dnode
= dnode
;
2141 char xpath_buf
[XPATH_MAXLEN
];
2142 bool rec_flag
= true;
2144 assert(dnode
|| xpath
);
2147 dnode
= yang_dnode_get(running_config
->dnode
, xpath
);
2149 while (rec_flag
&& dnode
) {
2150 struct nb_config_entry
*config
, s
;
2152 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
2153 config
= hash_lookup(running_config_entries
, &s
);
2155 return config
->entry
;
2157 rec_flag
= rec_search
;
2159 dnode
= dnode
->parent
;
2162 if (!abort_if_not_found
)
2165 yang_dnode_get_path(orig_dnode
, xpath_buf
, sizeof(xpath_buf
));
2166 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND
,
2167 "%s: failed to find entry [xpath %s]", __func__
, xpath_buf
);
2168 zlog_backtrace(LOG_ERR
);
2172 void *nb_running_get_entry(const struct lyd_node
*dnode
, const char *xpath
,
2173 bool abort_if_not_found
)
2175 return nb_running_get_entry_worker(dnode
, xpath
, abort_if_not_found
,
2179 void *nb_running_get_entry_non_rec(const struct lyd_node
*dnode
,
2180 const char *xpath
, bool abort_if_not_found
)
2182 return nb_running_get_entry_worker(dnode
, xpath
, abort_if_not_found
,
2186 /* Logging functions. */
2187 const char *nb_event_name(enum nb_event event
)
2190 case NB_EV_VALIDATE
:
2203 const char *nb_operation_name(enum nb_operation operation
)
2205 switch (operation
) {
2214 case NB_OP_PRE_VALIDATE
:
2215 return "pre_validate";
2216 case NB_OP_APPLY_FINISH
:
2217 return "apply_finish";
2218 case NB_OP_GET_ELEM
:
2220 case NB_OP_GET_NEXT
:
2222 case NB_OP_GET_KEYS
:
2224 case NB_OP_LOOKUP_ENTRY
:
2225 return "lookup_entry";
2233 const char *nb_err_name(enum nb_error error
)
2239 return "generic error";
2240 case NB_ERR_NO_CHANGES
:
2241 return "no changes";
2242 case NB_ERR_NOT_FOUND
:
2243 return "element not found";
2245 return "resource is locked";
2246 case NB_ERR_VALIDATION
:
2247 return "validation";
2248 case NB_ERR_RESOURCE
:
2249 return "failed to allocate resource";
2250 case NB_ERR_INCONSISTENCY
:
2251 return "internal inconsistency";
2257 const char *nb_client_name(enum nb_client client
)
2262 case NB_CLIENT_CONFD
:
2264 case NB_CLIENT_SYSREPO
:
2266 case NB_CLIENT_GRPC
:
2273 static void nb_load_callbacks(const struct frr_yang_module_info
*module
)
2275 for (size_t i
= 0; module
->nodes
[i
].xpath
; i
++) {
2276 struct nb_node
*nb_node
;
2279 if (i
> YANG_MODULE_MAX_NODES
) {
2281 "%s: %s.yang has more than %u nodes. Please increase YANG_MODULE_MAX_NODES to fix this problem.",
2282 __func__
, module
->name
, YANG_MODULE_MAX_NODES
);
2286 nb_node
= nb_node_find(module
->nodes
[i
].xpath
);
2288 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH
,
2289 "%s: unknown data path: %s", __func__
,
2290 module
->nodes
[i
].xpath
);
2294 nb_node
->cbs
= module
->nodes
[i
].cbs
;
2295 priority
= module
->nodes
[i
].priority
;
2297 nb_node
->priority
= priority
;
2301 void nb_validate_callbacks(void)
2303 unsigned int errors
= 0;
2305 yang_snodes_iterate(NULL
, nb_node_validate
, 0, &errors
);
2308 EC_LIB_NB_CBS_VALIDATION
,
2309 "%s: failed to validate northbound callbacks: %u error(s)",
2315 void nb_load_module(const struct frr_yang_module_info
*module_info
)
2317 struct yang_module
*module
;
2319 DEBUGD(&nb_dbg_events
, "northbound: loading %s.yang",
2322 module
= yang_module_load(module_info
->name
);
2323 yang_snodes_iterate(module
->info
, nb_node_new_cb
, 0, NULL
);
2324 nb_load_callbacks(module_info
);
2327 void nb_init(struct thread_master
*tm
,
2328 const struct frr_yang_module_info
*const modules
[],
2329 size_t nmodules
, bool db_enabled
)
2331 nb_db_enabled
= db_enabled
;
2333 /* Load YANG modules and their corresponding northbound callbacks. */
2334 for (size_t i
= 0; i
< nmodules
; i
++)
2335 nb_load_module(modules
[i
]);
2337 /* Validate northbound callbacks. */
2338 nb_validate_callbacks();
2340 /* Create an empty running configuration. */
2341 running_config
= nb_config_new(NULL
);
2342 running_config_entries
= hash_create(running_config_entry_key_make
,
2343 running_config_entry_cmp
,
2344 "Running Configuration Entries");
2345 pthread_mutex_init(&running_config_mgmt_lock
.mtx
, NULL
);
2347 /* Initialize the northbound CLI. */
2351 void nb_terminate(void)
2353 /* Terminate the northbound CLI. */
2356 /* Delete all nb_node's from all YANG modules. */
2359 /* Delete the running configuration. */
2360 hash_clean(running_config_entries
, running_config_entry_free
);
2361 hash_free(running_config_entries
);
2362 nb_config_free(running_config
);
2363 pthread_mutex_destroy(&running_config_mgmt_lock
.mtx
);