1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2018 NetDEF, Inc.
11 #include "lib_errors.h"
16 #include "frr_pthread.h"
17 #include "northbound.h"
18 #include "northbound_cli.h"
19 #include "northbound_db.h"
22 DEFINE_MTYPE_STATIC(LIB
, NB_NODE
, "Northbound Node");
23 DEFINE_MTYPE_STATIC(LIB
, NB_CONFIG
, "Northbound Configuration");
24 DEFINE_MTYPE_STATIC(LIB
, NB_CONFIG_ENTRY
, "Northbound Configuration Entry");
26 /* Running configuration - shouldn't be modified directly. */
27 struct nb_config
*running_config
;
29 /* Hash table of user pointers associated with configuration entries. */
30 static struct hash
*running_config_entries
;
32 /* Management lock for the running configuration. */
34 /* Mutex protecting this structure. */
40 /* Northbound client who owns this lock. */
41 enum nb_client owner_client
;
43 /* Northbound user who owns this lock. */
44 const void *owner_user
;
45 } running_config_mgmt_lock
;
47 /* Knob to record config transaction */
48 static bool nb_db_enabled
;
50 * Global lock used to prevent multiple configuration transactions from
51 * happening concurrently.
53 static bool transaction_in_progress
;
55 static int nb_callback_pre_validate(struct nb_context
*context
,
56 const struct nb_node
*nb_node
,
57 const struct lyd_node
*dnode
, char *errmsg
,
59 static int nb_callback_configuration(struct nb_context
*context
,
60 const enum nb_event event
,
61 struct nb_config_change
*change
,
62 char *errmsg
, size_t errmsg_len
);
63 static struct nb_transaction
*
64 nb_transaction_new(struct nb_context context
, struct nb_config
*config
,
65 struct nb_config_cbs
*changes
, const char *comment
,
66 char *errmsg
, size_t errmsg_len
);
67 static void nb_transaction_free(struct nb_transaction
*transaction
);
68 static int nb_transaction_process(enum nb_event event
,
69 struct nb_transaction
*transaction
,
70 char *errmsg
, size_t errmsg_len
);
71 static void nb_transaction_apply_finish(struct nb_transaction
*transaction
,
72 char *errmsg
, size_t errmsg_len
);
73 static int nb_oper_data_iter_node(const struct lysc_node
*snode
,
74 const char *xpath
, const void *list_entry
,
75 const struct yang_list_keys
*list_keys
,
76 struct yang_translator
*translator
,
77 bool first
, uint32_t flags
,
78 nb_oper_data_cb cb
, void *arg
);
80 static int nb_node_check_config_only(const struct lysc_node
*snode
, void *arg
)
82 bool *config_only
= arg
;
84 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_R
)) {
86 return YANG_ITER_STOP
;
89 return YANG_ITER_CONTINUE
;
92 static int nb_node_new_cb(const struct lysc_node
*snode
, void *arg
)
94 struct nb_node
*nb_node
;
95 struct lysc_node
*sparent
, *sparent_list
;
97 nb_node
= XCALLOC(MTYPE_NB_NODE
, sizeof(*nb_node
));
98 yang_snode_get_path(snode
, YANG_PATH_DATA
, nb_node
->xpath
,
99 sizeof(nb_node
->xpath
));
100 nb_node
->priority
= NB_DFLT_PRIORITY
;
101 sparent
= yang_snode_real_parent(snode
);
103 nb_node
->parent
= sparent
->priv
;
104 sparent_list
= yang_snode_parent_list(snode
);
106 nb_node
->parent_list
= sparent_list
->priv
;
109 if (CHECK_FLAG(snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
)) {
110 bool config_only
= true;
112 (void)yang_snodes_iterate_subtree(snode
, NULL
,
113 nb_node_check_config_only
, 0,
116 SET_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
);
118 if (CHECK_FLAG(snode
->nodetype
, LYS_LIST
)) {
119 if (yang_snode_num_keys(snode
) == 0)
120 SET_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
);
124 * Link the northbound node and the libyang schema node with one
127 nb_node
->snode
= snode
;
128 assert(snode
->priv
== NULL
);
129 ((struct lysc_node
*)snode
)->priv
= nb_node
;
131 return YANG_ITER_CONTINUE
;
134 static int nb_node_del_cb(const struct lysc_node
*snode
, void *arg
)
136 struct nb_node
*nb_node
;
138 nb_node
= snode
->priv
;
140 ((struct lysc_node
*)snode
)->priv
= NULL
;
141 XFREE(MTYPE_NB_NODE
, nb_node
);
144 return YANG_ITER_CONTINUE
;
147 void nb_nodes_create(void)
149 yang_snodes_iterate(NULL
, nb_node_new_cb
, 0, NULL
);
152 void nb_nodes_delete(void)
154 yang_snodes_iterate(NULL
, nb_node_del_cb
, 0, NULL
);
157 struct nb_node
*nb_node_find(const char *path
)
159 const struct lysc_node
*snode
;
162 * Use libyang to find the schema node associated to the path and get
163 * the northbound node from there (snode private pointer).
165 snode
= lys_find_path(ly_native_ctx
, NULL
, path
, 0);
172 void nb_node_set_dependency_cbs(const char *dependency_xpath
,
173 const char *dependant_xpath
,
174 struct nb_dependency_callbacks
*cbs
)
176 struct nb_node
*dependency
= nb_node_find(dependency_xpath
);
177 struct nb_node
*dependant
= nb_node_find(dependant_xpath
);
179 if (!dependency
|| !dependant
)
182 dependency
->dep_cbs
.get_dependant_xpath
= cbs
->get_dependant_xpath
;
183 dependant
->dep_cbs
.get_dependency_xpath
= cbs
->get_dependency_xpath
;
186 bool nb_node_has_dependency(struct nb_node
*node
)
188 return node
->dep_cbs
.get_dependency_xpath
!= NULL
;
191 static int nb_node_validate_cb(const struct nb_node
*nb_node
,
192 enum nb_operation operation
,
193 int callback_implemented
, bool optional
)
197 valid
= nb_operation_is_valid(operation
, nb_node
->snode
);
200 * Add an exception for operational data callbacks. A rw list usually
201 * doesn't need any associated operational data callbacks. But if this
202 * rw list is augmented by another module which adds state nodes under
203 * it, then this list will need to have the 'get_next()', 'get_keys()'
204 * and 'lookup_entry()' callbacks. As such, never log a warning when
205 * these callbacks are implemented when they are not needed, since this
206 * depends on context (e.g. some daemons might augment "frr-interface"
207 * while others don't).
209 if (!valid
&& callback_implemented
&& operation
!= NB_OP_GET_NEXT
210 && operation
!= NB_OP_GET_KEYS
&& operation
!= NB_OP_LOOKUP_ENTRY
)
211 flog_warn(EC_LIB_NB_CB_UNNEEDED
,
212 "unneeded '%s' callback for '%s'",
213 nb_operation_name(operation
), nb_node
->xpath
);
215 if (!optional
&& valid
&& !callback_implemented
) {
216 flog_err(EC_LIB_NB_CB_MISSING
, "missing '%s' callback for '%s'",
217 nb_operation_name(operation
), nb_node
->xpath
);
225 * Check if the required callbacks were implemented for the given northbound
228 static unsigned int nb_node_validate_cbs(const struct nb_node
*nb_node
)
231 unsigned int error
= 0;
233 error
+= nb_node_validate_cb(nb_node
, NB_OP_CREATE
,
234 !!nb_node
->cbs
.create
, false);
235 error
+= nb_node_validate_cb(nb_node
, NB_OP_MODIFY
,
236 !!nb_node
->cbs
.modify
, false);
237 error
+= nb_node_validate_cb(nb_node
, NB_OP_DESTROY
,
238 !!nb_node
->cbs
.destroy
, false);
239 error
+= nb_node_validate_cb(nb_node
, NB_OP_MOVE
, !!nb_node
->cbs
.move
,
241 error
+= nb_node_validate_cb(nb_node
, NB_OP_PRE_VALIDATE
,
242 !!nb_node
->cbs
.pre_validate
, true);
243 error
+= nb_node_validate_cb(nb_node
, NB_OP_APPLY_FINISH
,
244 !!nb_node
->cbs
.apply_finish
, true);
245 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_ELEM
,
246 !!nb_node
->cbs
.get_elem
, false);
247 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_NEXT
,
248 !!nb_node
->cbs
.get_next
, false);
249 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_KEYS
,
250 !!nb_node
->cbs
.get_keys
, false);
251 error
+= nb_node_validate_cb(nb_node
, NB_OP_LOOKUP_ENTRY
,
252 !!nb_node
->cbs
.lookup_entry
, false);
253 error
+= nb_node_validate_cb(nb_node
, NB_OP_RPC
, !!nb_node
->cbs
.rpc
,
259 static unsigned int nb_node_validate_priority(const struct nb_node
*nb_node
)
261 /* Top-level nodes can have any priority. */
262 if (!nb_node
->parent
)
265 if (nb_node
->priority
< nb_node
->parent
->priority
) {
266 flog_err(EC_LIB_NB_CB_INVALID_PRIO
,
267 "node has higher priority than its parent [xpath %s]",
275 static int nb_node_validate(const struct lysc_node
*snode
, void *arg
)
277 struct nb_node
*nb_node
= snode
->priv
;
278 unsigned int *errors
= arg
;
280 /* Validate callbacks and priority. */
282 *errors
+= nb_node_validate_cbs(nb_node
);
283 *errors
+= nb_node_validate_priority(nb_node
);
286 return YANG_ITER_CONTINUE
;
289 struct nb_config
*nb_config_new(struct lyd_node
*dnode
)
291 struct nb_config
*config
;
293 config
= XCALLOC(MTYPE_NB_CONFIG
, sizeof(*config
));
295 config
->dnode
= dnode
;
297 config
->dnode
= yang_dnode_new(ly_native_ctx
, true);
303 void nb_config_free(struct nb_config
*config
)
306 yang_dnode_free(config
->dnode
);
307 XFREE(MTYPE_NB_CONFIG
, config
);
310 struct nb_config
*nb_config_dup(const struct nb_config
*config
)
312 struct nb_config
*dup
;
314 dup
= XCALLOC(MTYPE_NB_CONFIG
, sizeof(*dup
));
315 dup
->dnode
= yang_dnode_dup(config
->dnode
);
316 dup
->version
= config
->version
;
321 int nb_config_merge(struct nb_config
*config_dst
, struct nb_config
*config_src
,
322 bool preserve_source
)
326 ret
= lyd_merge_siblings(&config_dst
->dnode
, config_src
->dnode
, 0);
328 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_merge() failed", __func__
);
330 if (!preserve_source
)
331 nb_config_free(config_src
);
333 return (ret
== 0) ? NB_OK
: NB_ERR
;
336 void nb_config_replace(struct nb_config
*config_dst
,
337 struct nb_config
*config_src
, bool preserve_source
)
339 /* Update version. */
340 if (config_src
->version
!= 0)
341 config_dst
->version
= config_src
->version
;
344 if (config_dst
->dnode
)
345 yang_dnode_free(config_dst
->dnode
);
346 if (preserve_source
) {
347 config_dst
->dnode
= yang_dnode_dup(config_src
->dnode
);
349 config_dst
->dnode
= config_src
->dnode
;
350 config_src
->dnode
= NULL
;
351 nb_config_free(config_src
);
355 /* Generate the nb_config_cbs tree. */
356 static inline int nb_config_cb_compare(const struct nb_config_cb
*a
,
357 const struct nb_config_cb
*b
)
359 /* Sort by priority first. */
360 if (a
->nb_node
->priority
< b
->nb_node
->priority
)
362 if (a
->nb_node
->priority
> b
->nb_node
->priority
)
366 * Preserve the order of the configuration changes as told by libyang.
374 * All 'apply_finish' callbacks have their sequence number set to zero.
375 * In this case, compare them using their dnode pointers (the order
376 * doesn't matter for callbacks that have the same priority).
378 if (a
->dnode
< b
->dnode
)
380 if (a
->dnode
> b
->dnode
)
385 RB_GENERATE(nb_config_cbs
, nb_config_cb
, entry
, nb_config_cb_compare
);
387 static void nb_config_diff_add_change(struct nb_config_cbs
*changes
,
388 enum nb_operation operation
,
390 const struct lyd_node
*dnode
)
392 struct nb_config_change
*change
;
394 /* Ignore unimplemented nodes. */
395 if (!dnode
->schema
->priv
)
398 change
= XCALLOC(MTYPE_TMP
, sizeof(*change
));
399 change
->cb
.operation
= operation
;
400 change
->cb
.seq
= *seq
;
402 change
->cb
.nb_node
= dnode
->schema
->priv
;
403 change
->cb
.dnode
= dnode
;
405 RB_INSERT(nb_config_cbs
, changes
, &change
->cb
);
408 static void nb_config_diff_del_changes(struct nb_config_cbs
*changes
)
410 while (!RB_EMPTY(nb_config_cbs
, changes
)) {
411 struct nb_config_change
*change
;
413 change
= (struct nb_config_change
*)RB_ROOT(nb_config_cbs
,
415 RB_REMOVE(nb_config_cbs
, changes
, &change
->cb
);
416 XFREE(MTYPE_TMP
, change
);
421 * Helper function used when calculating the delta between two different
422 * configurations. Given a new subtree, calculate all new YANG data nodes,
423 * excluding default leafs and leaf-lists. This is a recursive function.
425 static void nb_config_diff_created(const struct lyd_node
*dnode
, uint32_t *seq
,
426 struct nb_config_cbs
*changes
)
428 enum nb_operation operation
;
429 struct lyd_node
*child
;
431 /* Ignore unimplemented nodes. */
432 if (!dnode
->schema
->priv
)
435 switch (dnode
->schema
->nodetype
) {
438 if (lyd_is_default(dnode
))
441 if (nb_operation_is_valid(NB_OP_CREATE
, dnode
->schema
))
442 operation
= NB_OP_CREATE
;
443 else if (nb_operation_is_valid(NB_OP_MODIFY
, dnode
->schema
))
444 operation
= NB_OP_MODIFY
;
448 nb_config_diff_add_change(changes
, operation
, seq
, dnode
);
452 if (nb_operation_is_valid(NB_OP_CREATE
, dnode
->schema
))
453 nb_config_diff_add_change(changes
, NB_OP_CREATE
, seq
,
456 /* Process child nodes recursively. */
457 LY_LIST_FOR (lyd_child(dnode
), child
) {
458 nb_config_diff_created(child
, seq
, changes
);
466 static void nb_config_diff_deleted(const struct lyd_node
*dnode
, uint32_t *seq
,
467 struct nb_config_cbs
*changes
)
469 /* Ignore unimplemented nodes. */
470 if (!dnode
->schema
->priv
)
473 if (nb_operation_is_valid(NB_OP_DESTROY
, dnode
->schema
))
474 nb_config_diff_add_change(changes
, NB_OP_DESTROY
, seq
, dnode
);
475 else if (CHECK_FLAG(dnode
->schema
->nodetype
, LYS_CONTAINER
)) {
476 struct lyd_node
*child
;
479 * Non-presence containers need special handling since they
480 * don't have "destroy" callbacks. In this case, what we need to
481 * do is to call the "destroy" callbacks of their child nodes
482 * when applicable (i.e. optional nodes).
484 LY_LIST_FOR (lyd_child(dnode
), child
) {
485 nb_config_diff_deleted(child
, seq
, changes
);
490 static int nb_lyd_diff_get_op(const struct lyd_node
*dnode
)
492 const struct lyd_meta
*meta
;
493 LY_LIST_FOR (dnode
->meta
, meta
) {
494 if (strcmp(meta
->name
, "operation")
495 || strcmp(meta
->annotation
->module
->name
, "yang"))
497 return lyd_get_meta_value(meta
)[0];
502 #if 0 /* Used below in nb_config_diff inside normally disabled code */
503 static inline void nb_config_diff_dnode_log_path(const char *context
,
505 const struct lyd_node
*dnode
)
507 if (dnode
->schema
->nodetype
& LYD_NODE_TERM
)
508 zlog_debug("nb_config_diff: %s: %s: %s", context
, path
,
509 lyd_get_value(dnode
));
511 zlog_debug("nb_config_diff: %s: %s", context
, path
);
514 static inline void nb_config_diff_dnode_log(const char *context
,
515 const struct lyd_node
*dnode
)
518 zlog_debug("nb_config_diff: %s: NULL", context
);
522 char *path
= lyd_path(dnode
, LYD_PATH_STD
, NULL
, 0);
523 nb_config_diff_dnode_log_path(context
, path
, dnode
);
528 /* Calculate the delta between two different configurations. */
529 static void nb_config_diff(const struct nb_config
*config1
,
530 const struct nb_config
*config2
,
531 struct nb_config_cbs
*changes
)
533 struct lyd_node
*diff
= NULL
;
534 const struct lyd_node
*root
, *dnode
;
535 struct lyd_node
*target
;
540 #if 0 /* Useful (noisy) when debugging diff code, and for improving later */
541 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
)) {
542 LY_LIST_FOR(config1
->dnode
, root
) {
543 LYD_TREE_DFS_BEGIN(root
, dnode
) {
544 nb_config_diff_dnode_log("from", dnode
);
545 LYD_TREE_DFS_END(root
, dnode
);
548 LY_LIST_FOR(config2
->dnode
, root
) {
549 LYD_TREE_DFS_BEGIN(root
, dnode
) {
550 nb_config_diff_dnode_log("to", dnode
);
551 LYD_TREE_DFS_END(root
, dnode
);
557 err
= lyd_diff_siblings(config1
->dnode
, config2
->dnode
,
558 LYD_DIFF_DEFAULTS
, &diff
);
561 if (diff
&& DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
)) {
564 if (!lyd_print_mem(&s
, diff
, LYD_JSON
,
565 LYD_PRINT_WITHSIBLINGS
| LYD_PRINT_WD_ALL
)) {
566 zlog_debug("%s: %s", __func__
, s
);
573 LY_LIST_FOR (diff
, root
) {
574 LYD_TREE_DFS_BEGIN (root
, dnode
) {
575 op
= nb_lyd_diff_get_op(dnode
);
577 path
= lyd_path(dnode
, LYD_PATH_STD
, NULL
, 0);
579 #if 0 /* Useful (noisy) when debugging diff code, and for improving later */
580 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
)) {
582 snprintf(context
, sizeof(context
),
583 "iterating diff: oper: %c seq: %u", op
, seq
);
584 nb_config_diff_dnode_log_path(context
, path
, dnode
);
588 case 'c': /* create */
590 * This is rather inefficient, but when we use
591 * dnode from the diff instead of the
592 * candidate config node we get failures when
593 * looking up default values, etc, based on
596 target
= yang_dnode_get(config2
->dnode
, path
);
598 nb_config_diff_created(target
, &seq
, changes
);
600 /* Skip rest of sub-tree, move to next sibling
602 LYD_TREE_DFS_continue
= 1;
604 case 'd': /* delete */
605 target
= yang_dnode_get(config1
->dnode
, path
);
607 nb_config_diff_deleted(target
, &seq
, changes
);
609 /* Skip rest of sub-tree, move to next sibling
611 LYD_TREE_DFS_continue
= 1;
613 case 'r': /* replace */
614 /* either moving an entry or changing a value */
615 target
= yang_dnode_get(config2
->dnode
, path
);
617 nb_config_diff_add_change(changes
, NB_OP_MODIFY
,
625 LYD_TREE_DFS_END(root
, dnode
);
632 int nb_candidate_edit(struct nb_config
*candidate
,
633 const struct nb_node
*nb_node
,
634 enum nb_operation operation
, const char *xpath
,
635 const struct yang_data
*previous
,
636 const struct yang_data
*data
)
638 struct lyd_node
*dnode
, *dep_dnode
;
639 char xpath_edit
[XPATH_MAXLEN
];
640 char dep_xpath
[XPATH_MAXLEN
];
643 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
644 if (nb_node
->snode
->nodetype
== LYS_LEAFLIST
)
645 snprintf(xpath_edit
, sizeof(xpath_edit
), "%s[.='%s']", xpath
,
648 strlcpy(xpath_edit
, xpath
, sizeof(xpath_edit
));
653 err
= lyd_new_path(candidate
->dnode
, ly_native_ctx
, xpath_edit
,
654 (void *)data
->value
, LYD_NEW_PATH_UPDATE
,
657 flog_warn(EC_LIB_LIBYANG
,
658 "%s: lyd_new_path(%s) failed: %d", __func__
,
662 /* Create default nodes */
663 LY_ERR err
= lyd_new_implicit_tree(
664 dnode
, LYD_IMPLICIT_NO_STATE
, NULL
);
666 flog_warn(EC_LIB_LIBYANG
,
667 "%s: lyd_new_implicit_all failed: %d",
673 * dnode returned by the lyd_new_path may be from a
674 * different schema, so we need to update the nb_node
676 nb_node
= dnode
->schema
->priv
;
677 if (nb_node
->dep_cbs
.get_dependency_xpath
) {
678 nb_node
->dep_cbs
.get_dependency_xpath(
681 err
= lyd_new_path(candidate
->dnode
,
682 ly_native_ctx
, dep_xpath
,
683 NULL
, LYD_NEW_PATH_UPDATE
,
685 /* Create default nodes */
686 if (!err
&& dep_dnode
)
687 err
= lyd_new_implicit_tree(
689 LYD_IMPLICIT_NO_STATE
, NULL
);
693 "%s: dependency: lyd_new_path(%s) failed: %d",
694 __func__
, dep_xpath
, err
);
701 dnode
= yang_dnode_get(candidate
->dnode
, xpath_edit
);
704 * Return a special error code so the caller can choose
705 * whether to ignore it or not.
707 return NB_ERR_NOT_FOUND
;
708 /* destroy dependant */
709 if (nb_node
->dep_cbs
.get_dependant_xpath
) {
710 nb_node
->dep_cbs
.get_dependant_xpath(dnode
, dep_xpath
);
712 dep_dnode
= yang_dnode_get(candidate
->dnode
, dep_xpath
);
714 lyd_free_tree(dep_dnode
);
716 lyd_free_tree(dnode
);
719 /* TODO: update configuration. */
721 case NB_OP_PRE_VALIDATE
:
722 case NB_OP_APPLY_FINISH
:
726 case NB_OP_LOOKUP_ENTRY
:
728 flog_warn(EC_LIB_DEVELOPMENT
,
729 "%s: unknown operation (%u) [xpath %s]", __func__
,
730 operation
, xpath_edit
);
737 bool nb_candidate_needs_update(const struct nb_config
*candidate
)
739 if (candidate
->version
< running_config
->version
)
745 int nb_candidate_update(struct nb_config
*candidate
)
747 struct nb_config
*updated_config
;
749 updated_config
= nb_config_dup(running_config
);
750 if (nb_config_merge(updated_config
, candidate
, true) != NB_OK
)
753 nb_config_replace(candidate
, updated_config
, false);
759 * Perform YANG syntactic and semantic validation.
761 * WARNING: lyd_validate() can change the configuration as part of the
762 * validation process.
764 static int nb_candidate_validate_yang(struct nb_config
*candidate
, char *errmsg
,
767 if (lyd_validate_all(&candidate
->dnode
, ly_native_ctx
,
768 LYD_VALIDATE_NO_STATE
, NULL
)
770 yang_print_errors(ly_native_ctx
, errmsg
, errmsg_len
);
771 return NB_ERR_VALIDATION
;
777 /* Perform code-level validation using the northbound callbacks. */
778 static int nb_candidate_validate_code(struct nb_context
*context
,
779 struct nb_config
*candidate
,
780 struct nb_config_cbs
*changes
,
781 char *errmsg
, size_t errmsg_len
)
783 struct nb_config_cb
*cb
;
784 struct lyd_node
*root
, *child
;
787 /* First validate the candidate as a whole. */
788 LY_LIST_FOR (candidate
->dnode
, root
) {
789 LYD_TREE_DFS_BEGIN (root
, child
) {
790 struct nb_node
*nb_node
;
792 nb_node
= child
->schema
->priv
;
793 if (!nb_node
|| !nb_node
->cbs
.pre_validate
)
796 ret
= nb_callback_pre_validate(context
, nb_node
, child
,
799 return NB_ERR_VALIDATION
;
802 LYD_TREE_DFS_END(root
, child
);
806 /* Now validate the configuration changes. */
807 RB_FOREACH (cb
, nb_config_cbs
, changes
) {
808 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
810 ret
= nb_callback_configuration(context
, NB_EV_VALIDATE
, change
,
813 return NB_ERR_VALIDATION
;
819 int nb_candidate_validate(struct nb_context
*context
,
820 struct nb_config
*candidate
, char *errmsg
,
823 struct nb_config_cbs changes
;
826 if (nb_candidate_validate_yang(candidate
, errmsg
, errmsg_len
) != NB_OK
)
827 return NB_ERR_VALIDATION
;
829 RB_INIT(nb_config_cbs
, &changes
);
830 nb_config_diff(running_config
, candidate
, &changes
);
831 ret
= nb_candidate_validate_code(context
, candidate
, &changes
, errmsg
,
833 nb_config_diff_del_changes(&changes
);
838 int nb_candidate_commit_prepare(struct nb_context context
,
839 struct nb_config
*candidate
,
841 struct nb_transaction
**transaction
,
842 char *errmsg
, size_t errmsg_len
)
844 struct nb_config_cbs changes
;
846 if (nb_candidate_validate_yang(candidate
, errmsg
, errmsg_len
)
848 flog_warn(EC_LIB_NB_CANDIDATE_INVALID
,
849 "%s: failed to validate candidate configuration",
851 return NB_ERR_VALIDATION
;
854 RB_INIT(nb_config_cbs
, &changes
);
855 nb_config_diff(running_config
, candidate
, &changes
);
856 if (RB_EMPTY(nb_config_cbs
, &changes
)) {
859 "No changes to apply were found during preparation phase");
860 return NB_ERR_NO_CHANGES
;
863 if (nb_candidate_validate_code(&context
, candidate
, &changes
, errmsg
,
864 errmsg_len
) != NB_OK
) {
865 flog_warn(EC_LIB_NB_CANDIDATE_INVALID
,
866 "%s: failed to validate candidate configuration",
868 nb_config_diff_del_changes(&changes
);
869 return NB_ERR_VALIDATION
;
872 *transaction
= nb_transaction_new(context
, candidate
, &changes
, comment
,
874 if (*transaction
== NULL
) {
875 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED
,
876 "%s: failed to create transaction: %s", __func__
,
878 nb_config_diff_del_changes(&changes
);
879 return NB_ERR_LOCKED
;
882 return nb_transaction_process(NB_EV_PREPARE
, *transaction
, errmsg
,
886 void nb_candidate_commit_abort(struct nb_transaction
*transaction
, char *errmsg
,
889 (void)nb_transaction_process(NB_EV_ABORT
, transaction
, errmsg
,
891 nb_transaction_free(transaction
);
894 void nb_candidate_commit_apply(struct nb_transaction
*transaction
,
895 bool save_transaction
, uint32_t *transaction_id
,
896 char *errmsg
, size_t errmsg_len
)
898 (void)nb_transaction_process(NB_EV_APPLY
, transaction
, errmsg
,
900 nb_transaction_apply_finish(transaction
, errmsg
, errmsg_len
);
902 /* Replace running by candidate. */
903 transaction
->config
->version
++;
904 nb_config_replace(running_config
, transaction
->config
, true);
906 /* Record transaction. */
907 if (save_transaction
&& nb_db_enabled
908 && nb_db_transaction_save(transaction
, transaction_id
) != NB_OK
)
909 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED
,
910 "%s: failed to record transaction", __func__
);
912 nb_transaction_free(transaction
);
915 int nb_candidate_commit(struct nb_context context
, struct nb_config
*candidate
,
916 bool save_transaction
, const char *comment
,
917 uint32_t *transaction_id
, char *errmsg
,
920 struct nb_transaction
*transaction
= NULL
;
923 ret
= nb_candidate_commit_prepare(context
, candidate
, comment
,
924 &transaction
, errmsg
, errmsg_len
);
926 * Apply the changes if the preparation phase succeeded. Otherwise abort
930 nb_candidate_commit_apply(transaction
, save_transaction
,
931 transaction_id
, errmsg
, errmsg_len
);
932 else if (transaction
!= NULL
)
933 nb_candidate_commit_abort(transaction
, errmsg
, errmsg_len
);
938 int nb_running_lock(enum nb_client client
, const void *user
)
942 frr_with_mutex (&running_config_mgmt_lock
.mtx
) {
943 if (!running_config_mgmt_lock
.locked
) {
944 running_config_mgmt_lock
.locked
= true;
945 running_config_mgmt_lock
.owner_client
= client
;
946 running_config_mgmt_lock
.owner_user
= user
;
954 int nb_running_unlock(enum nb_client client
, const void *user
)
958 frr_with_mutex (&running_config_mgmt_lock
.mtx
) {
959 if (running_config_mgmt_lock
.locked
960 && running_config_mgmt_lock
.owner_client
== client
961 && running_config_mgmt_lock
.owner_user
== user
) {
962 running_config_mgmt_lock
.locked
= false;
963 running_config_mgmt_lock
.owner_client
= NB_CLIENT_NONE
;
964 running_config_mgmt_lock
.owner_user
= NULL
;
972 int nb_running_lock_check(enum nb_client client
, const void *user
)
976 frr_with_mutex (&running_config_mgmt_lock
.mtx
) {
977 if (!running_config_mgmt_lock
.locked
978 || (running_config_mgmt_lock
.owner_client
== client
979 && running_config_mgmt_lock
.owner_user
== user
))
986 static void nb_log_config_callback(const enum nb_event event
,
987 enum nb_operation operation
,
988 const struct lyd_node
*dnode
)
991 char xpath
[XPATH_MAXLEN
];
993 if (!DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
))
996 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
997 if (yang_snode_is_typeless_data(dnode
->schema
))
1000 value
= yang_dnode_get_string(dnode
, NULL
);
1003 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
1004 nb_event_name(event
), nb_operation_name(operation
), xpath
,
1008 static int nb_callback_create(struct nb_context
*context
,
1009 const struct nb_node
*nb_node
,
1010 enum nb_event event
, const struct lyd_node
*dnode
,
1011 union nb_resource
*resource
, char *errmsg
,
1014 struct nb_cb_create_args args
= {};
1015 bool unexpected_error
= false;
1018 nb_log_config_callback(event
, NB_OP_CREATE
, dnode
);
1020 args
.context
= context
;
1023 args
.resource
= resource
;
1024 args
.errmsg
= errmsg
;
1025 args
.errmsg_len
= errmsg_len
;
1026 ret
= nb_node
->cbs
.create(&args
);
1028 /* Detect and log unexpected errors. */
1033 case NB_ERR_VALIDATION
:
1034 if (event
!= NB_EV_VALIDATE
)
1035 unexpected_error
= true;
1037 case NB_ERR_RESOURCE
:
1038 if (event
!= NB_EV_PREPARE
)
1039 unexpected_error
= true;
1041 case NB_ERR_INCONSISTENCY
:
1042 if (event
== NB_EV_VALIDATE
)
1043 unexpected_error
= true;
1046 unexpected_error
= true;
1049 if (unexpected_error
)
1050 DEBUGD(&nb_dbg_cbs_config
,
1051 "northbound callback: unexpected return value: %s",
1057 static int nb_callback_modify(struct nb_context
*context
,
1058 const struct nb_node
*nb_node
,
1059 enum nb_event event
, const struct lyd_node
*dnode
,
1060 union nb_resource
*resource
, char *errmsg
,
1063 struct nb_cb_modify_args args
= {};
1064 bool unexpected_error
= false;
1067 nb_log_config_callback(event
, NB_OP_MODIFY
, dnode
);
1069 args
.context
= context
;
1072 args
.resource
= resource
;
1073 args
.errmsg
= errmsg
;
1074 args
.errmsg_len
= errmsg_len
;
1075 ret
= nb_node
->cbs
.modify(&args
);
1077 /* Detect and log unexpected errors. */
1082 case NB_ERR_VALIDATION
:
1083 if (event
!= NB_EV_VALIDATE
)
1084 unexpected_error
= true;
1086 case NB_ERR_RESOURCE
:
1087 if (event
!= NB_EV_PREPARE
)
1088 unexpected_error
= true;
1090 case NB_ERR_INCONSISTENCY
:
1091 if (event
== NB_EV_VALIDATE
)
1092 unexpected_error
= true;
1095 unexpected_error
= true;
1098 if (unexpected_error
)
1099 DEBUGD(&nb_dbg_cbs_config
,
1100 "northbound callback: unexpected return value: %s",
1106 static int nb_callback_destroy(struct nb_context
*context
,
1107 const struct nb_node
*nb_node
,
1108 enum nb_event event
,
1109 const struct lyd_node
*dnode
, char *errmsg
,
1112 struct nb_cb_destroy_args args
= {};
1113 bool unexpected_error
= false;
1116 nb_log_config_callback(event
, NB_OP_DESTROY
, dnode
);
1118 args
.context
= context
;
1121 args
.errmsg
= errmsg
;
1122 args
.errmsg_len
= errmsg_len
;
1123 ret
= nb_node
->cbs
.destroy(&args
);
1125 /* Detect and log unexpected errors. */
1130 case NB_ERR_VALIDATION
:
1131 if (event
!= NB_EV_VALIDATE
)
1132 unexpected_error
= true;
1134 case NB_ERR_INCONSISTENCY
:
1135 if (event
== NB_EV_VALIDATE
)
1136 unexpected_error
= true;
1139 unexpected_error
= true;
1142 if (unexpected_error
)
1143 DEBUGD(&nb_dbg_cbs_config
,
1144 "northbound callback: unexpected return value: %s",
1150 static int nb_callback_move(struct nb_context
*context
,
1151 const struct nb_node
*nb_node
, enum nb_event event
,
1152 const struct lyd_node
*dnode
, char *errmsg
,
1155 struct nb_cb_move_args args
= {};
1156 bool unexpected_error
= false;
1159 nb_log_config_callback(event
, NB_OP_MOVE
, dnode
);
1161 args
.context
= context
;
1164 args
.errmsg
= errmsg
;
1165 args
.errmsg_len
= errmsg_len
;
1166 ret
= nb_node
->cbs
.move(&args
);
1168 /* Detect and log unexpected errors. */
1173 case NB_ERR_VALIDATION
:
1174 if (event
!= NB_EV_VALIDATE
)
1175 unexpected_error
= true;
1177 case NB_ERR_INCONSISTENCY
:
1178 if (event
== NB_EV_VALIDATE
)
1179 unexpected_error
= true;
1182 unexpected_error
= true;
1185 if (unexpected_error
)
1186 DEBUGD(&nb_dbg_cbs_config
,
1187 "northbound callback: unexpected return value: %s",
1193 static int nb_callback_pre_validate(struct nb_context
*context
,
1194 const struct nb_node
*nb_node
,
1195 const struct lyd_node
*dnode
, char *errmsg
,
1198 struct nb_cb_pre_validate_args args
= {};
1199 bool unexpected_error
= false;
1202 nb_log_config_callback(NB_EV_VALIDATE
, NB_OP_PRE_VALIDATE
, dnode
);
1205 args
.errmsg
= errmsg
;
1206 args
.errmsg_len
= errmsg_len
;
1207 ret
= nb_node
->cbs
.pre_validate(&args
);
1209 /* Detect and log unexpected errors. */
1212 case NB_ERR_VALIDATION
:
1215 unexpected_error
= true;
1218 if (unexpected_error
)
1219 DEBUGD(&nb_dbg_cbs_config
,
1220 "northbound callback: unexpected return value: %s",
1226 static void nb_callback_apply_finish(struct nb_context
*context
,
1227 const struct nb_node
*nb_node
,
1228 const struct lyd_node
*dnode
, char *errmsg
,
1231 struct nb_cb_apply_finish_args args
= {};
1233 nb_log_config_callback(NB_EV_APPLY
, NB_OP_APPLY_FINISH
, dnode
);
1235 args
.context
= context
;
1237 args
.errmsg
= errmsg
;
1238 args
.errmsg_len
= errmsg_len
;
1239 nb_node
->cbs
.apply_finish(&args
);
1242 struct yang_data
*nb_callback_get_elem(const struct nb_node
*nb_node
,
1244 const void *list_entry
)
1246 struct nb_cb_get_elem_args args
= {};
1248 DEBUGD(&nb_dbg_cbs_state
,
1249 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
1253 args
.list_entry
= list_entry
;
1254 return nb_node
->cbs
.get_elem(&args
);
1257 const void *nb_callback_get_next(const struct nb_node
*nb_node
,
1258 const void *parent_list_entry
,
1259 const void *list_entry
)
1261 struct nb_cb_get_next_args args
= {};
1263 DEBUGD(&nb_dbg_cbs_state
,
1264 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
1265 nb_node
->xpath
, parent_list_entry
, list_entry
);
1267 args
.parent_list_entry
= parent_list_entry
;
1268 args
.list_entry
= list_entry
;
1269 return nb_node
->cbs
.get_next(&args
);
1272 int nb_callback_get_keys(const struct nb_node
*nb_node
, const void *list_entry
,
1273 struct yang_list_keys
*keys
)
1275 struct nb_cb_get_keys_args args
= {};
1277 DEBUGD(&nb_dbg_cbs_state
,
1278 "northbound callback (get_keys): node [%s] list_entry [%p]",
1279 nb_node
->xpath
, list_entry
);
1281 args
.list_entry
= list_entry
;
1283 return nb_node
->cbs
.get_keys(&args
);
1286 const void *nb_callback_lookup_entry(const struct nb_node
*nb_node
,
1287 const void *parent_list_entry
,
1288 const struct yang_list_keys
*keys
)
1290 struct nb_cb_lookup_entry_args args
= {};
1292 DEBUGD(&nb_dbg_cbs_state
,
1293 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
1294 nb_node
->xpath
, parent_list_entry
);
1296 args
.parent_list_entry
= parent_list_entry
;
1298 return nb_node
->cbs
.lookup_entry(&args
);
1301 int nb_callback_rpc(const struct nb_node
*nb_node
, const char *xpath
,
1302 const struct list
*input
, struct list
*output
, char *errmsg
,
1305 struct nb_cb_rpc_args args
= {};
1307 DEBUGD(&nb_dbg_cbs_rpc
, "northbound RPC: %s", xpath
);
1311 args
.output
= output
;
1312 args
.errmsg
= errmsg
;
1313 args
.errmsg_len
= errmsg_len
;
1314 return nb_node
->cbs
.rpc(&args
);
1318 * Call the northbound configuration callback associated to a given
1319 * configuration change.
1321 static int nb_callback_configuration(struct nb_context
*context
,
1322 const enum nb_event event
,
1323 struct nb_config_change
*change
,
1324 char *errmsg
, size_t errmsg_len
)
1326 enum nb_operation operation
= change
->cb
.operation
;
1327 char xpath
[XPATH_MAXLEN
];
1328 const struct nb_node
*nb_node
= change
->cb
.nb_node
;
1329 const struct lyd_node
*dnode
= change
->cb
.dnode
;
1330 union nb_resource
*resource
;
1333 if (event
== NB_EV_VALIDATE
)
1336 resource
= &change
->resource
;
1338 switch (operation
) {
1340 ret
= nb_callback_create(context
, nb_node
, event
, dnode
,
1341 resource
, errmsg
, errmsg_len
);
1344 ret
= nb_callback_modify(context
, nb_node
, event
, dnode
,
1345 resource
, errmsg
, errmsg_len
);
1348 ret
= nb_callback_destroy(context
, nb_node
, event
, dnode
,
1349 errmsg
, errmsg_len
);
1352 ret
= nb_callback_move(context
, nb_node
, event
, dnode
, errmsg
,
1355 case NB_OP_PRE_VALIDATE
:
1356 case NB_OP_APPLY_FINISH
:
1357 case NB_OP_GET_ELEM
:
1358 case NB_OP_GET_NEXT
:
1359 case NB_OP_GET_KEYS
:
1360 case NB_OP_LOOKUP_ENTRY
:
1362 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1363 flog_err(EC_LIB_DEVELOPMENT
,
1364 "%s: unknown operation (%u) [xpath %s]", __func__
,
1370 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1373 case NB_EV_VALIDATE
:
1374 flog_warn(EC_LIB_NB_CB_CONFIG_VALIDATE
,
1375 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1376 nb_err_name(ret
), nb_event_name(event
),
1377 nb_operation_name(operation
), xpath
,
1378 errmsg
[0] ? " message: " : "", errmsg
);
1381 flog_warn(EC_LIB_NB_CB_CONFIG_PREPARE
,
1382 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1383 nb_err_name(ret
), nb_event_name(event
),
1384 nb_operation_name(operation
), xpath
,
1385 errmsg
[0] ? " message: " : "", errmsg
);
1388 flog_warn(EC_LIB_NB_CB_CONFIG_ABORT
,
1389 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1390 nb_err_name(ret
), nb_event_name(event
),
1391 nb_operation_name(operation
), xpath
,
1392 errmsg
[0] ? " message: " : "", errmsg
);
1395 flog_err(EC_LIB_NB_CB_CONFIG_APPLY
,
1396 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1397 nb_err_name(ret
), nb_event_name(event
),
1398 nb_operation_name(operation
), xpath
,
1399 errmsg
[0] ? " message: " : "", errmsg
);
1402 flog_err(EC_LIB_DEVELOPMENT
,
1403 "%s: unknown event (%u) [xpath %s]", __func__
,
1412 static struct nb_transaction
*
1413 nb_transaction_new(struct nb_context context
, struct nb_config
*config
,
1414 struct nb_config_cbs
*changes
, const char *comment
,
1415 char *errmsg
, size_t errmsg_len
)
1417 struct nb_transaction
*transaction
;
1419 if (nb_running_lock_check(context
.client
, context
.user
)) {
1421 "running configuration is locked by another client",
1426 if (transaction_in_progress
) {
1428 "there's already another transaction in progress",
1432 transaction_in_progress
= true;
1434 transaction
= XCALLOC(MTYPE_TMP
, sizeof(*transaction
));
1435 transaction
->context
= context
;
1437 strlcpy(transaction
->comment
, comment
,
1438 sizeof(transaction
->comment
));
1439 transaction
->config
= config
;
1440 transaction
->changes
= *changes
;
1445 static void nb_transaction_free(struct nb_transaction
*transaction
)
1447 nb_config_diff_del_changes(&transaction
->changes
);
1448 XFREE(MTYPE_TMP
, transaction
);
1449 transaction_in_progress
= false;
1452 /* Process all configuration changes associated to a transaction. */
1453 static int nb_transaction_process(enum nb_event event
,
1454 struct nb_transaction
*transaction
,
1455 char *errmsg
, size_t errmsg_len
)
1457 struct nb_config_cb
*cb
;
1459 RB_FOREACH (cb
, nb_config_cbs
, &transaction
->changes
) {
1460 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
1464 * Only try to release resources that were allocated
1467 if (event
== NB_EV_ABORT
&& !change
->prepare_ok
)
1470 /* Call the appropriate callback. */
1471 ret
= nb_callback_configuration(&transaction
->context
, event
,
1472 change
, errmsg
, errmsg_len
);
1477 change
->prepare_ok
= true;
1482 * At this point it's not possible to reject the
1483 * transaction anymore, so any failure here can lead to
1484 * inconsistencies and should be treated as a bug.
1485 * Operations prone to errors, like validations and
1486 * resource allocations, should be performed during the
1490 case NB_EV_VALIDATE
:
1498 static struct nb_config_cb
*
1499 nb_apply_finish_cb_new(struct nb_config_cbs
*cbs
, const struct nb_node
*nb_node
,
1500 const struct lyd_node
*dnode
)
1502 struct nb_config_cb
*cb
;
1504 cb
= XCALLOC(MTYPE_TMP
, sizeof(*cb
));
1505 cb
->nb_node
= nb_node
;
1507 RB_INSERT(nb_config_cbs
, cbs
, cb
);
1512 static struct nb_config_cb
*
1513 nb_apply_finish_cb_find(struct nb_config_cbs
*cbs
,
1514 const struct nb_node
*nb_node
,
1515 const struct lyd_node
*dnode
)
1517 struct nb_config_cb s
;
1520 s
.nb_node
= nb_node
;
1522 return RB_FIND(nb_config_cbs
, cbs
, &s
);
1525 /* Call the 'apply_finish' callbacks. */
1526 static void nb_transaction_apply_finish(struct nb_transaction
*transaction
,
1527 char *errmsg
, size_t errmsg_len
)
1529 struct nb_config_cbs cbs
;
1530 struct nb_config_cb
*cb
;
1532 /* Initialize tree of 'apply_finish' callbacks. */
1533 RB_INIT(nb_config_cbs
, &cbs
);
1535 /* Identify the 'apply_finish' callbacks that need to be called. */
1536 RB_FOREACH (cb
, nb_config_cbs
, &transaction
->changes
) {
1537 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
1538 const struct lyd_node
*dnode
= change
->cb
.dnode
;
1541 * Iterate up to the root of the data tree. When a node is being
1542 * deleted, skip its 'apply_finish' callback if one is defined
1543 * (the 'apply_finish' callbacks from the node ancestors should
1544 * be called though).
1546 if (change
->cb
.operation
== NB_OP_DESTROY
) {
1547 char xpath
[XPATH_MAXLEN
];
1549 dnode
= lyd_parent(dnode
);
1554 * The dnode from 'delete' callbacks point to elements
1555 * from the running configuration. Use yang_dnode_get()
1556 * to get the corresponding dnode from the candidate
1557 * configuration that is being committed.
1559 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1560 dnode
= yang_dnode_get(transaction
->config
->dnode
,
1564 struct nb_node
*nb_node
;
1566 nb_node
= dnode
->schema
->priv
;
1567 if (!nb_node
|| !nb_node
->cbs
.apply_finish
)
1571 * Don't call the callback more than once for the same
1574 if (nb_apply_finish_cb_find(&cbs
, nb_node
, dnode
))
1577 nb_apply_finish_cb_new(&cbs
, nb_node
, dnode
);
1580 dnode
= lyd_parent(dnode
);
1584 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1585 RB_FOREACH (cb
, nb_config_cbs
, &cbs
)
1586 nb_callback_apply_finish(&transaction
->context
, cb
->nb_node
,
1587 cb
->dnode
, errmsg
, errmsg_len
);
1589 /* Release memory. */
1590 while (!RB_EMPTY(nb_config_cbs
, &cbs
)) {
1591 cb
= RB_ROOT(nb_config_cbs
, &cbs
);
1592 RB_REMOVE(nb_config_cbs
, &cbs
, cb
);
1593 XFREE(MTYPE_TMP
, cb
);
1597 static int nb_oper_data_iter_children(const struct lysc_node
*snode
,
1598 const char *xpath
, const void *list_entry
,
1599 const struct yang_list_keys
*list_keys
,
1600 struct yang_translator
*translator
,
1601 bool first
, uint32_t flags
,
1602 nb_oper_data_cb cb
, void *arg
)
1604 const struct lysc_node
*child
;
1606 LY_LIST_FOR (lysc_node_child(snode
), child
) {
1609 ret
= nb_oper_data_iter_node(child
, xpath
, list_entry
,
1610 list_keys
, translator
, false,
1619 static int nb_oper_data_iter_leaf(const struct nb_node
*nb_node
,
1620 const char *xpath
, const void *list_entry
,
1621 const struct yang_list_keys
*list_keys
,
1622 struct yang_translator
*translator
,
1623 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1625 struct yang_data
*data
;
1627 if (CHECK_FLAG(nb_node
->snode
->flags
, LYS_CONFIG_W
))
1630 /* Ignore list keys. */
1631 if (lysc_is_key(nb_node
->snode
))
1634 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1636 /* Leaf of type "empty" is not present. */
1639 return (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1642 static int nb_oper_data_iter_container(const struct nb_node
*nb_node
,
1644 const void *list_entry
,
1645 const struct yang_list_keys
*list_keys
,
1646 struct yang_translator
*translator
,
1647 uint32_t flags
, nb_oper_data_cb cb
,
1650 const struct lysc_node
*snode
= nb_node
->snode
;
1652 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1655 /* Read-only presence containers. */
1656 if (nb_node
->cbs
.get_elem
) {
1657 struct yang_data
*data
;
1660 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1662 /* Presence container is not present. */
1665 ret
= (*cb
)(snode
, translator
, data
, arg
);
1670 /* Read-write presence containers. */
1671 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
)) {
1672 struct lysc_node_container
*scontainer
;
1674 scontainer
= (struct lysc_node_container
*)snode
;
1675 if (CHECK_FLAG(scontainer
->flags
, LYS_PRESENCE
)
1676 && !yang_dnode_get(running_config
->dnode
, xpath
))
1680 /* Iterate over the child nodes. */
1681 return nb_oper_data_iter_children(snode
, xpath
, list_entry
, list_keys
,
1682 translator
, false, flags
, cb
, arg
);
1686 nb_oper_data_iter_leaflist(const struct nb_node
*nb_node
, const char *xpath
,
1687 const void *parent_list_entry
,
1688 const struct yang_list_keys
*parent_list_keys
,
1689 struct yang_translator
*translator
, uint32_t flags
,
1690 nb_oper_data_cb cb
, void *arg
)
1692 const void *list_entry
= NULL
;
1694 if (CHECK_FLAG(nb_node
->snode
->flags
, LYS_CONFIG_W
))
1698 struct yang_data
*data
;
1701 list_entry
= nb_callback_get_next(nb_node
, parent_list_entry
,
1704 /* End of the list. */
1707 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1711 ret
= (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1714 } while (list_entry
);
1719 static int nb_oper_data_iter_list(const struct nb_node
*nb_node
,
1720 const char *xpath_list
,
1721 const void *parent_list_entry
,
1722 const struct yang_list_keys
*parent_list_keys
,
1723 struct yang_translator
*translator
,
1724 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1726 const struct lysc_node
*snode
= nb_node
->snode
;
1727 const void *list_entry
= NULL
;
1728 uint32_t position
= 1;
1730 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1733 /* Iterate over all list entries. */
1735 const struct lysc_node_leaf
*skey
;
1736 struct yang_list_keys list_keys
;
1737 char xpath
[XPATH_MAXLEN
* 2];
1740 /* Obtain list entry. */
1741 list_entry
= nb_callback_get_next(nb_node
, parent_list_entry
,
1744 /* End of the list. */
1747 if (!CHECK_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
)) {
1748 /* Obtain the list entry keys. */
1749 if (nb_callback_get_keys(nb_node
, list_entry
,
1752 flog_warn(EC_LIB_NB_CB_STATE
,
1753 "%s: failed to get list keys",
1758 /* Build XPath of the list entry. */
1759 strlcpy(xpath
, xpath_list
, sizeof(xpath
));
1761 LY_FOR_KEYS (snode
, skey
) {
1762 assert(i
< list_keys
.num
);
1763 snprintf(xpath
+ strlen(xpath
),
1764 sizeof(xpath
) - strlen(xpath
),
1765 "[%s='%s']", skey
->name
,
1769 assert(i
== list_keys
.num
);
1772 * Keyless list - build XPath using a positional index.
1774 snprintf(xpath
, sizeof(xpath
), "%s[%u]", xpath_list
,
1779 /* Iterate over the child nodes. */
1780 ret
= nb_oper_data_iter_children(
1781 nb_node
->snode
, xpath
, list_entry
, &list_keys
,
1782 translator
, false, flags
, cb
, arg
);
1785 } while (list_entry
);
1790 static int nb_oper_data_iter_node(const struct lysc_node
*snode
,
1791 const char *xpath_parent
,
1792 const void *list_entry
,
1793 const struct yang_list_keys
*list_keys
,
1794 struct yang_translator
*translator
,
1795 bool first
, uint32_t flags
,
1796 nb_oper_data_cb cb
, void *arg
)
1798 struct nb_node
*nb_node
;
1799 char xpath
[XPATH_MAXLEN
];
1802 if (!first
&& CHECK_FLAG(flags
, NB_OPER_DATA_ITER_NORECURSE
)
1803 && CHECK_FLAG(snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
))
1807 strlcpy(xpath
, xpath_parent
, sizeof(xpath
));
1808 if (!first
&& snode
->nodetype
!= LYS_USES
) {
1809 struct lysc_node
*parent
;
1811 /* Get the real parent. */
1812 parent
= snode
->parent
;
1815 * When necessary, include the namespace of the augmenting
1818 if (parent
&& parent
->module
!= snode
->module
)
1819 snprintf(xpath
+ strlen(xpath
),
1820 sizeof(xpath
) - strlen(xpath
), "/%s:%s",
1821 snode
->module
->name
, snode
->name
);
1823 snprintf(xpath
+ strlen(xpath
),
1824 sizeof(xpath
) - strlen(xpath
), "/%s",
1828 nb_node
= snode
->priv
;
1829 switch (snode
->nodetype
) {
1831 ret
= nb_oper_data_iter_container(nb_node
, xpath
, list_entry
,
1832 list_keys
, translator
, flags
,
1836 ret
= nb_oper_data_iter_leaf(nb_node
, xpath
, list_entry
,
1837 list_keys
, translator
, flags
, cb
,
1841 ret
= nb_oper_data_iter_leaflist(nb_node
, xpath
, list_entry
,
1842 list_keys
, translator
, flags
,
1846 ret
= nb_oper_data_iter_list(nb_node
, xpath
, list_entry
,
1847 list_keys
, translator
, flags
, cb
,
1851 ret
= nb_oper_data_iter_children(snode
, xpath
, list_entry
,
1852 list_keys
, translator
, false,
1862 int nb_oper_data_iterate(const char *xpath
, struct yang_translator
*translator
,
1863 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1865 struct nb_node
*nb_node
;
1866 const void *list_entry
= NULL
;
1867 struct yang_list_keys list_keys
;
1868 struct list
*list_dnodes
;
1869 struct lyd_node
*dnode
, *dn
;
1870 struct listnode
*ln
;
1873 nb_node
= nb_node_find(xpath
);
1875 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH
,
1876 "%s: unknown data path: %s", __func__
, xpath
);
1880 /* For now this function works only with containers and lists. */
1881 if (!CHECK_FLAG(nb_node
->snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
)) {
1883 EC_LIB_NB_OPERATIONAL_DATA
,
1884 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1890 * Create a data tree from the XPath so that we can parse the keys of
1891 * all YANG lists (if any).
1894 LY_ERR err
= lyd_new_path(NULL
, ly_native_ctx
, xpath
, NULL
,
1895 LYD_NEW_PATH_UPDATE
, &dnode
);
1896 if (err
|| !dnode
) {
1897 const char *errmsg
=
1898 err
? ly_errmsg(ly_native_ctx
) : "node not found";
1899 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_new_path() failed %s",
1905 * Create a linked list to sort the data nodes starting from the root.
1907 list_dnodes
= list_new();
1908 for (dn
= dnode
; dn
; dn
= lyd_parent(dn
)) {
1909 if (dn
->schema
->nodetype
!= LYS_LIST
|| !lyd_child(dn
))
1911 listnode_add_head(list_dnodes
, dn
);
1914 * Use the northbound callbacks to find list entry pointer corresponding
1915 * to the given XPath.
1917 for (ALL_LIST_ELEMENTS_RO(list_dnodes
, ln
, dn
)) {
1918 struct lyd_node
*child
;
1922 /* Obtain the list entry keys. */
1923 memset(&list_keys
, 0, sizeof(list_keys
));
1924 LY_LIST_FOR (lyd_child(dn
), child
) {
1925 if (!lysc_is_key(child
->schema
))
1927 strlcpy(list_keys
.key
[n
],
1928 yang_dnode_get_string(child
, NULL
),
1929 sizeof(list_keys
.key
[n
]));
1933 if (list_keys
.num
!= yang_snode_num_keys(dn
->schema
)) {
1934 list_delete(&list_dnodes
);
1935 yang_dnode_free(dnode
);
1936 return NB_ERR_NOT_FOUND
;
1939 /* Find the list entry pointer. */
1940 nn
= dn
->schema
->priv
;
1941 if (!nn
->cbs
.lookup_entry
) {
1943 EC_LIB_NB_OPERATIONAL_DATA
,
1944 "%s: data path doesn't support iteration over operational data: %s",
1946 list_delete(&list_dnodes
);
1947 yang_dnode_free(dnode
);
1952 nb_callback_lookup_entry(nn
, list_entry
, &list_keys
);
1953 if (list_entry
== NULL
) {
1954 list_delete(&list_dnodes
);
1955 yang_dnode_free(dnode
);
1956 return NB_ERR_NOT_FOUND
;
1960 /* If a list entry was given, iterate over that list entry only. */
1961 if (dnode
->schema
->nodetype
== LYS_LIST
&& lyd_child(dnode
))
1962 ret
= nb_oper_data_iter_children(
1963 nb_node
->snode
, xpath
, list_entry
, &list_keys
,
1964 translator
, true, flags
, cb
, arg
);
1966 ret
= nb_oper_data_iter_node(nb_node
->snode
, xpath
, list_entry
,
1967 &list_keys
, translator
, true,
1970 list_delete(&list_dnodes
);
1971 yang_dnode_free(dnode
);
1976 bool nb_operation_is_valid(enum nb_operation operation
,
1977 const struct lysc_node
*snode
)
1979 struct nb_node
*nb_node
= snode
->priv
;
1980 struct lysc_node_container
*scontainer
;
1981 struct lysc_node_leaf
*sleaf
;
1983 switch (operation
) {
1985 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1988 switch (snode
->nodetype
) {
1990 sleaf
= (struct lysc_node_leaf
*)snode
;
1991 if (sleaf
->type
->basetype
!= LY_TYPE_EMPTY
)
1995 scontainer
= (struct lysc_node_container
*)snode
;
1996 if (!CHECK_FLAG(scontainer
->flags
, LYS_PRESENCE
))
2007 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2010 switch (snode
->nodetype
) {
2012 sleaf
= (struct lysc_node_leaf
*)snode
;
2013 if (sleaf
->type
->basetype
== LY_TYPE_EMPTY
)
2016 /* List keys can't be modified. */
2017 if (lysc_is_key(sleaf
))
2025 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2028 switch (snode
->nodetype
) {
2030 sleaf
= (struct lysc_node_leaf
*)snode
;
2032 /* List keys can't be deleted. */
2033 if (lysc_is_key(sleaf
))
2037 * Only optional leafs can be deleted, or leafs whose
2038 * parent is a case statement.
2040 if (snode
->parent
->nodetype
== LYS_CASE
)
2044 if (CHECK_FLAG(sleaf
->flags
, LYS_MAND_TRUE
)
2049 scontainer
= (struct lysc_node_container
*)snode
;
2050 if (!CHECK_FLAG(scontainer
->flags
, LYS_PRESENCE
))
2061 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2064 switch (snode
->nodetype
) {
2067 if (!CHECK_FLAG(snode
->flags
, LYS_ORDBY_USER
))
2074 case NB_OP_PRE_VALIDATE
:
2075 case NB_OP_APPLY_FINISH
:
2076 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2079 case NB_OP_GET_ELEM
:
2080 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_R
))
2083 switch (snode
->nodetype
) {
2088 scontainer
= (struct lysc_node_container
*)snode
;
2089 if (!CHECK_FLAG(scontainer
->flags
, LYS_PRESENCE
))
2096 case NB_OP_GET_NEXT
:
2097 switch (snode
->nodetype
) {
2099 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
2103 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
2110 case NB_OP_GET_KEYS
:
2111 case NB_OP_LOOKUP_ENTRY
:
2112 switch (snode
->nodetype
) {
2114 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
2116 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
))
2124 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
| LYS_CONFIG_R
))
2127 switch (snode
->nodetype
) {
2140 DEFINE_HOOK(nb_notification_send
, (const char *xpath
, struct list
*arguments
),
2141 (xpath
, arguments
));
2143 int nb_notification_send(const char *xpath
, struct list
*arguments
)
2147 DEBUGD(&nb_dbg_notif
, "northbound notification: %s", xpath
);
2149 ret
= hook_call(nb_notification_send
, xpath
, arguments
);
2151 list_delete(&arguments
);
2156 /* Running configuration user pointers management. */
2157 struct nb_config_entry
{
2158 char xpath
[XPATH_MAXLEN
];
2162 static bool running_config_entry_cmp(const void *value1
, const void *value2
)
2164 const struct nb_config_entry
*c1
= value1
;
2165 const struct nb_config_entry
*c2
= value2
;
2167 return strmatch(c1
->xpath
, c2
->xpath
);
2170 static unsigned int running_config_entry_key_make(const void *value
)
2172 return string_hash_make(value
);
2175 static void *running_config_entry_alloc(void *p
)
2177 struct nb_config_entry
*new, *key
= p
;
2179 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY
, sizeof(*new));
2180 strlcpy(new->xpath
, key
->xpath
, sizeof(new->xpath
));
2185 static void running_config_entry_free(void *arg
)
2187 XFREE(MTYPE_NB_CONFIG_ENTRY
, arg
);
2190 void nb_running_set_entry(const struct lyd_node
*dnode
, void *entry
)
2192 struct nb_config_entry
*config
, s
;
2194 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
2195 config
= hash_get(running_config_entries
, &s
,
2196 running_config_entry_alloc
);
2197 config
->entry
= entry
;
2200 void nb_running_move_tree(const char *xpath_from
, const char *xpath_to
)
2202 struct nb_config_entry
*entry
;
2203 struct list
*entries
= hash_to_list(running_config_entries
);
2204 struct listnode
*ln
;
2206 for (ALL_LIST_ELEMENTS_RO(entries
, ln
, entry
)) {
2207 if (!frrstr_startswith(entry
->xpath
, xpath_from
))
2210 hash_release(running_config_entries
, entry
);
2213 frrstr_replace(entry
->xpath
, xpath_from
, xpath_to
);
2214 strlcpy(entry
->xpath
, newpath
, sizeof(entry
->xpath
));
2215 XFREE(MTYPE_TMP
, newpath
);
2217 (void)hash_get(running_config_entries
, entry
,
2221 list_delete(&entries
);
2224 static void *nb_running_unset_entry_helper(const struct lyd_node
*dnode
)
2226 struct nb_config_entry
*config
, s
;
2227 struct lyd_node
*child
;
2230 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
2231 config
= hash_release(running_config_entries
, &s
);
2233 entry
= config
->entry
;
2234 running_config_entry_free(config
);
2237 /* Unset user pointers from the child nodes. */
2238 if (CHECK_FLAG(dnode
->schema
->nodetype
, LYS_LIST
| LYS_CONTAINER
)) {
2239 LY_LIST_FOR (lyd_child(dnode
), child
) {
2240 (void)nb_running_unset_entry_helper(child
);
2247 void *nb_running_unset_entry(const struct lyd_node
*dnode
)
2251 entry
= nb_running_unset_entry_helper(dnode
);
2257 static void *nb_running_get_entry_worker(const struct lyd_node
*dnode
,
2259 bool abort_if_not_found
,
2262 const struct lyd_node
*orig_dnode
= dnode
;
2263 char xpath_buf
[XPATH_MAXLEN
];
2264 bool rec_flag
= true;
2266 assert(dnode
|| xpath
);
2269 dnode
= yang_dnode_get(running_config
->dnode
, xpath
);
2271 while (rec_flag
&& dnode
) {
2272 struct nb_config_entry
*config
, s
;
2274 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
2275 config
= hash_lookup(running_config_entries
, &s
);
2277 return config
->entry
;
2279 rec_flag
= rec_search
;
2281 dnode
= lyd_parent(dnode
);
2284 if (!abort_if_not_found
)
2287 yang_dnode_get_path(orig_dnode
, xpath_buf
, sizeof(xpath_buf
));
2288 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND
,
2289 "%s: failed to find entry [xpath %s]", __func__
, xpath_buf
);
2290 zlog_backtrace(LOG_ERR
);
2294 void *nb_running_get_entry(const struct lyd_node
*dnode
, const char *xpath
,
2295 bool abort_if_not_found
)
2297 return nb_running_get_entry_worker(dnode
, xpath
, abort_if_not_found
,
2301 void *nb_running_get_entry_non_rec(const struct lyd_node
*dnode
,
2302 const char *xpath
, bool abort_if_not_found
)
2304 return nb_running_get_entry_worker(dnode
, xpath
, abort_if_not_found
,
2308 /* Logging functions. */
2309 const char *nb_event_name(enum nb_event event
)
2312 case NB_EV_VALIDATE
:
2322 assert(!"Reached end of function we should never hit");
2325 const char *nb_operation_name(enum nb_operation operation
)
2327 switch (operation
) {
2336 case NB_OP_PRE_VALIDATE
:
2337 return "pre_validate";
2338 case NB_OP_APPLY_FINISH
:
2339 return "apply_finish";
2340 case NB_OP_GET_ELEM
:
2342 case NB_OP_GET_NEXT
:
2344 case NB_OP_GET_KEYS
:
2346 case NB_OP_LOOKUP_ENTRY
:
2347 return "lookup_entry";
2352 assert(!"Reached end of function we should never hit");
2355 const char *nb_err_name(enum nb_error error
)
2361 return "generic error";
2362 case NB_ERR_NO_CHANGES
:
2363 return "no changes";
2364 case NB_ERR_NOT_FOUND
:
2365 return "element not found";
2367 return "resource is locked";
2368 case NB_ERR_VALIDATION
:
2369 return "validation";
2370 case NB_ERR_RESOURCE
:
2371 return "failed to allocate resource";
2372 case NB_ERR_INCONSISTENCY
:
2373 return "internal inconsistency";
2376 assert(!"Reached end of function we should never hit");
2379 const char *nb_client_name(enum nb_client client
)
2384 case NB_CLIENT_CONFD
:
2386 case NB_CLIENT_SYSREPO
:
2388 case NB_CLIENT_GRPC
:
2390 case NB_CLIENT_PCEP
:
2392 case NB_CLIENT_NONE
:
2396 assert(!"Reached end of function we should never hit");
2399 static void nb_load_callbacks(const struct frr_yang_module_info
*module
)
2401 for (size_t i
= 0; module
->nodes
[i
].xpath
; i
++) {
2402 struct nb_node
*nb_node
;
2405 if (i
> YANG_MODULE_MAX_NODES
) {
2407 "%s: %s.yang has more than %u nodes. Please increase YANG_MODULE_MAX_NODES to fix this problem.",
2408 __func__
, module
->name
, YANG_MODULE_MAX_NODES
);
2412 nb_node
= nb_node_find(module
->nodes
[i
].xpath
);
2414 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH
,
2415 "%s: unknown data path: %s", __func__
,
2416 module
->nodes
[i
].xpath
);
2420 nb_node
->cbs
= module
->nodes
[i
].cbs
;
2421 priority
= module
->nodes
[i
].priority
;
2423 nb_node
->priority
= priority
;
2427 void nb_validate_callbacks(void)
2429 unsigned int errors
= 0;
2431 yang_snodes_iterate(NULL
, nb_node_validate
, 0, &errors
);
2434 EC_LIB_NB_CBS_VALIDATION
,
2435 "%s: failed to validate northbound callbacks: %u error(s)",
2442 void nb_init(struct thread_master
*tm
,
2443 const struct frr_yang_module_info
*const modules
[],
2444 size_t nmodules
, bool db_enabled
)
2446 struct yang_module
*loaded
[nmodules
], **loadedp
= loaded
;
2447 bool explicit_compile
;
2450 * Currently using this explicit compile feature in libyang2 leads to
2451 * incorrect behavior in FRR. The functionality suppresses the compiling
2452 * of modules until they have all been loaded into the context. This
2453 * avoids multiple recompiles of the same modules as they are
2454 * imported/augmented etc.
2456 explicit_compile
= false;
2458 nb_db_enabled
= db_enabled
;
2460 yang_init(true, explicit_compile
);
2462 /* Load YANG modules and their corresponding northbound callbacks. */
2463 for (size_t i
= 0; i
< nmodules
; i
++) {
2464 DEBUGD(&nb_dbg_events
, "northbound: loading %s.yang",
2466 *loadedp
++ = yang_module_load(modules
[i
]->name
);
2469 if (explicit_compile
)
2470 yang_init_loading_complete();
2472 /* Initialize the compiled nodes with northbound data */
2473 for (size_t i
= 0; i
< nmodules
; i
++) {
2474 yang_snodes_iterate(loaded
[i
]->info
, nb_node_new_cb
, 0, NULL
);
2475 nb_load_callbacks(modules
[i
]);
2478 /* Validate northbound callbacks. */
2479 nb_validate_callbacks();
2481 /* Create an empty running configuration. */
2482 running_config
= nb_config_new(NULL
);
2483 running_config_entries
= hash_create(running_config_entry_key_make
,
2484 running_config_entry_cmp
,
2485 "Running Configuration Entries");
2486 pthread_mutex_init(&running_config_mgmt_lock
.mtx
, NULL
);
2488 /* Initialize the northbound CLI. */
2492 void nb_terminate(void)
2494 /* Terminate the northbound CLI. */
2497 /* Delete all nb_node's from all YANG modules. */
2500 /* Delete the running configuration. */
2501 hash_clean(running_config_entries
, running_config_entry_free
);
2502 hash_free(running_config_entries
);
2503 nb_config_free(running_config
);
2504 pthread_mutex_destroy(&running_config_mgmt_lock
.mtx
);