2 * Copyright (C) 2018 NetDEF, Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "lib_errors.h"
29 #include "frr_pthread.h"
30 #include "northbound.h"
31 #include "northbound_cli.h"
32 #include "northbound_db.h"
34 DEFINE_MTYPE_STATIC(LIB
, NB_NODE
, "Northbound Node")
35 DEFINE_MTYPE_STATIC(LIB
, NB_CONFIG
, "Northbound Configuration")
36 DEFINE_MTYPE_STATIC(LIB
, NB_CONFIG_ENTRY
, "Northbound Configuration Entry")
38 /* Running configuration - shouldn't be modified directly. */
39 struct nb_config
*running_config
;
41 /* Hash table of user pointers associated with configuration entries. */
42 static struct hash
*running_config_entries
;
44 /* Management lock for the running configuration. */
46 /* Mutex protecting this structure. */
52 /* Northbound client who owns this lock. */
53 enum nb_client owner_client
;
55 /* Northbound user who owns this lock. */
56 const void *owner_user
;
57 } running_config_mgmt_lock
;
60 * Global lock used to prevent multiple configuration transactions from
61 * happening concurrently.
63 static bool transaction_in_progress
;
65 static int nb_callback_configuration(const enum nb_event event
,
66 struct nb_config_change
*change
);
67 static void nb_log_callback(const enum nb_event event
,
68 enum nb_operation operation
, const char *xpath
,
70 static struct nb_transaction
*nb_transaction_new(struct nb_config
*config
,
71 struct nb_config_cbs
*changes
,
72 enum nb_client client
,
75 static void nb_transaction_free(struct nb_transaction
*transaction
);
76 static int nb_transaction_process(enum nb_event event
,
77 struct nb_transaction
*transaction
);
78 static void nb_transaction_apply_finish(struct nb_transaction
*transaction
);
79 static int nb_oper_data_iter_node(const struct lys_node
*snode
,
80 const char *xpath
, const void *list_entry
,
81 const struct yang_list_keys
*list_keys
,
82 struct yang_translator
*translator
,
83 bool first
, uint32_t flags
,
84 nb_oper_data_cb cb
, void *arg
);
86 static int nb_node_check_config_only(const struct lys_node
*snode
, void *arg
)
88 bool *config_only
= arg
;
90 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_R
)) {
92 return YANG_ITER_STOP
;
95 return YANG_ITER_CONTINUE
;
98 static int nb_node_new_cb(const struct lys_node
*snode
, void *arg
)
100 struct nb_node
*nb_node
;
101 struct lys_node
*sparent
, *sparent_list
;
103 nb_node
= XCALLOC(MTYPE_NB_NODE
, sizeof(*nb_node
));
104 yang_snode_get_path(snode
, YANG_PATH_DATA
, nb_node
->xpath
,
105 sizeof(nb_node
->xpath
));
106 nb_node
->priority
= NB_DFLT_PRIORITY
;
107 sparent
= yang_snode_real_parent(snode
);
109 nb_node
->parent
= sparent
->priv
;
110 sparent_list
= yang_snode_parent_list(snode
);
112 nb_node
->parent_list
= sparent_list
->priv
;
115 if (CHECK_FLAG(snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
)) {
116 bool config_only
= true;
118 yang_snodes_iterate_subtree(snode
, nb_node_check_config_only
,
119 YANG_ITER_ALLOW_AUGMENTATIONS
,
122 SET_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
);
124 if (CHECK_FLAG(snode
->nodetype
, LYS_LIST
)) {
125 struct lys_node_list
*slist
;
127 slist
= (struct lys_node_list
*)snode
;
128 if (slist
->keys_size
== 0)
129 SET_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
);
133 * Link the northbound node and the libyang schema node with one
136 nb_node
->snode
= snode
;
137 lys_set_private(snode
, nb_node
);
139 return YANG_ITER_CONTINUE
;
142 static int nb_node_del_cb(const struct lys_node
*snode
, void *arg
)
144 struct nb_node
*nb_node
;
146 nb_node
= snode
->priv
;
147 lys_set_private(snode
, NULL
);
148 XFREE(MTYPE_NB_NODE
, nb_node
);
150 return YANG_ITER_CONTINUE
;
153 void nb_nodes_create(void)
155 yang_snodes_iterate_all(nb_node_new_cb
, 0, NULL
);
158 void nb_nodes_delete(void)
160 yang_snodes_iterate_all(nb_node_del_cb
, 0, NULL
);
163 struct nb_node
*nb_node_find(const char *xpath
)
165 const struct lys_node
*snode
;
168 * Use libyang to find the schema node associated to the xpath and get
169 * the northbound node from there (snode private pointer).
171 snode
= ly_ctx_get_node(ly_native_ctx
, NULL
, xpath
, 0);
178 static int nb_node_validate_cb(const struct nb_node
*nb_node
,
179 enum nb_operation operation
,
180 int callback_implemented
, bool optional
)
184 valid
= nb_operation_is_valid(operation
, nb_node
->snode
);
186 if (!valid
&& callback_implemented
)
187 flog_warn(EC_LIB_NB_CB_UNNEEDED
,
188 "unneeded '%s' callback for '%s'",
189 nb_operation_name(operation
), nb_node
->xpath
);
191 if (!optional
&& valid
&& !callback_implemented
) {
192 flog_err(EC_LIB_NB_CB_MISSING
, "missing '%s' callback for '%s'",
193 nb_operation_name(operation
), nb_node
->xpath
);
201 * Check if the required callbacks were implemented for the given northbound
204 static unsigned int nb_node_validate_cbs(const struct nb_node
*nb_node
)
207 unsigned int error
= 0;
209 error
+= nb_node_validate_cb(nb_node
, NB_OP_CREATE
,
210 !!nb_node
->cbs
.create
, false);
211 error
+= nb_node_validate_cb(nb_node
, NB_OP_MODIFY
,
212 !!nb_node
->cbs
.modify
, false);
213 error
+= nb_node_validate_cb(nb_node
, NB_OP_DESTROY
,
214 !!nb_node
->cbs
.destroy
, false);
215 error
+= nb_node_validate_cb(nb_node
, NB_OP_MOVE
, !!nb_node
->cbs
.move
,
217 error
+= nb_node_validate_cb(nb_node
, NB_OP_PRE_VALIDATE
,
218 !!nb_node
->cbs
.pre_validate
, true);
219 error
+= nb_node_validate_cb(nb_node
, NB_OP_APPLY_FINISH
,
220 !!nb_node
->cbs
.apply_finish
, true);
221 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_ELEM
,
222 !!nb_node
->cbs
.get_elem
, false);
223 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_NEXT
,
224 !!nb_node
->cbs
.get_next
, false);
225 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_KEYS
,
226 !!nb_node
->cbs
.get_keys
, false);
227 error
+= nb_node_validate_cb(nb_node
, NB_OP_LOOKUP_ENTRY
,
228 !!nb_node
->cbs
.lookup_entry
, false);
229 error
+= nb_node_validate_cb(nb_node
, NB_OP_RPC
, !!nb_node
->cbs
.rpc
,
235 static unsigned int nb_node_validate_priority(const struct nb_node
*nb_node
)
237 /* Top-level nodes can have any priority. */
238 if (!nb_node
->parent
)
241 if (nb_node
->priority
< nb_node
->parent
->priority
) {
242 flog_err(EC_LIB_NB_CB_INVALID_PRIO
,
243 "node has higher priority than its parent [xpath %s]",
251 static int nb_node_validate(const struct lys_node
*snode
, void *arg
)
253 struct nb_node
*nb_node
= snode
->priv
;
254 unsigned int *errors
= arg
;
256 /* Validate callbacks and priority. */
257 *errors
+= nb_node_validate_cbs(nb_node
);
258 *errors
+= nb_node_validate_priority(nb_node
);
260 return YANG_ITER_CONTINUE
;
263 struct nb_config
*nb_config_new(struct lyd_node
*dnode
)
265 struct nb_config
*config
;
267 config
= XCALLOC(MTYPE_NB_CONFIG
, sizeof(*config
));
269 config
->dnode
= dnode
;
271 config
->dnode
= yang_dnode_new(ly_native_ctx
, true);
273 pthread_rwlock_init(&config
->lock
, NULL
);
278 void nb_config_free(struct nb_config
*config
)
281 yang_dnode_free(config
->dnode
);
282 pthread_rwlock_destroy(&config
->lock
);
283 XFREE(MTYPE_NB_CONFIG
, config
);
286 struct nb_config
*nb_config_dup(const struct nb_config
*config
)
288 struct nb_config
*dup
;
290 dup
= XCALLOC(MTYPE_NB_CONFIG
, sizeof(*dup
));
291 dup
->dnode
= yang_dnode_dup(config
->dnode
);
292 dup
->version
= config
->version
;
293 pthread_rwlock_init(&dup
->lock
, NULL
);
298 int nb_config_merge(struct nb_config
*config_dst
, struct nb_config
*config_src
,
299 bool preserve_source
)
303 ret
= lyd_merge(config_dst
->dnode
, config_src
->dnode
, LYD_OPT_EXPLICIT
);
305 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_merge() failed", __func__
);
307 if (!preserve_source
)
308 nb_config_free(config_src
);
310 return (ret
== 0) ? NB_OK
: NB_ERR
;
313 void nb_config_replace(struct nb_config
*config_dst
,
314 struct nb_config
*config_src
, bool preserve_source
)
316 /* Update version. */
317 if (config_src
->version
!= 0)
318 config_dst
->version
= config_src
->version
;
321 if (config_dst
->dnode
)
322 yang_dnode_free(config_dst
->dnode
);
323 if (preserve_source
) {
324 config_dst
->dnode
= yang_dnode_dup(config_src
->dnode
);
326 config_dst
->dnode
= config_src
->dnode
;
327 config_src
->dnode
= NULL
;
328 nb_config_free(config_src
);
332 /* Generate the nb_config_cbs tree. */
333 static inline int nb_config_cb_compare(const struct nb_config_cb
*a
,
334 const struct nb_config_cb
*b
)
336 /* Sort by priority first. */
337 if (a
->nb_node
->priority
< b
->nb_node
->priority
)
339 if (a
->nb_node
->priority
> b
->nb_node
->priority
)
343 * Use XPath as a tie-breaker. This will naturally sort parent nodes
344 * before their children.
346 return strcmp(a
->xpath
, b
->xpath
);
348 RB_GENERATE(nb_config_cbs
, nb_config_cb
, entry
, nb_config_cb_compare
);
350 static void nb_config_diff_add_change(struct nb_config_cbs
*changes
,
351 enum nb_operation operation
,
352 const struct lyd_node
*dnode
)
354 struct nb_config_change
*change
;
356 change
= XCALLOC(MTYPE_TMP
, sizeof(*change
));
357 change
->cb
.operation
= operation
;
358 change
->cb
.nb_node
= dnode
->schema
->priv
;
359 yang_dnode_get_path(dnode
, change
->cb
.xpath
, sizeof(change
->cb
.xpath
));
360 change
->cb
.dnode
= dnode
;
362 RB_INSERT(nb_config_cbs
, changes
, &change
->cb
);
365 static void nb_config_diff_del_changes(struct nb_config_cbs
*changes
)
367 while (!RB_EMPTY(nb_config_cbs
, changes
)) {
368 struct nb_config_change
*change
;
370 change
= (struct nb_config_change
*)RB_ROOT(nb_config_cbs
,
372 RB_REMOVE(nb_config_cbs
, changes
, &change
->cb
);
373 XFREE(MTYPE_TMP
, change
);
378 * Helper function used when calculating the delta between two different
379 * configurations. Given a new subtree, calculate all new YANG data nodes,
380 * excluding default leafs and leaf-lists. This is a recursive function.
382 static void nb_config_diff_created(const struct lyd_node
*dnode
,
383 struct nb_config_cbs
*changes
)
385 enum nb_operation operation
;
386 struct lyd_node
*child
;
388 switch (dnode
->schema
->nodetype
) {
391 if (lyd_wd_default((struct lyd_node_leaf_list
*)dnode
))
394 if (nb_operation_is_valid(NB_OP_CREATE
, dnode
->schema
))
395 operation
= NB_OP_CREATE
;
396 else if (nb_operation_is_valid(NB_OP_MODIFY
, dnode
->schema
))
397 operation
= NB_OP_MODIFY
;
401 nb_config_diff_add_change(changes
, operation
, dnode
);
405 if (nb_operation_is_valid(NB_OP_CREATE
, dnode
->schema
))
406 nb_config_diff_add_change(changes
, NB_OP_CREATE
, dnode
);
408 /* Process child nodes recursively. */
409 LY_TREE_FOR (dnode
->child
, child
) {
410 nb_config_diff_created(child
, changes
);
418 static void nb_config_diff_deleted(const struct lyd_node
*dnode
,
419 struct nb_config_cbs
*changes
)
421 if (nb_operation_is_valid(NB_OP_DESTROY
, dnode
->schema
))
422 nb_config_diff_add_change(changes
, NB_OP_DESTROY
, dnode
);
423 else if (CHECK_FLAG(dnode
->schema
->nodetype
, LYS_CONTAINER
)) {
424 struct lyd_node
*child
;
427 * Non-presence containers need special handling since they
428 * don't have "destroy" callbacks. In this case, what we need to
429 * do is to call the "destroy" callbacks of their child nodes
430 * when applicable (i.e. optional nodes).
432 LY_TREE_FOR (dnode
->child
, child
) {
433 nb_config_diff_deleted(child
, changes
);
438 /* Calculate the delta between two different configurations. */
439 static void nb_config_diff(const struct nb_config
*config1
,
440 const struct nb_config
*config2
,
441 struct nb_config_cbs
*changes
)
443 struct lyd_difflist
*diff
;
445 diff
= lyd_diff(config1
->dnode
, config2
->dnode
,
446 LYD_DIFFOPT_WITHDEFAULTS
);
449 for (int i
= 0; diff
->type
[i
] != LYD_DIFF_END
; i
++) {
451 struct lyd_node
*dnode
;
453 type
= diff
->type
[i
];
456 case LYD_DIFF_CREATED
:
457 dnode
= diff
->second
[i
];
458 nb_config_diff_created(dnode
, changes
);
460 case LYD_DIFF_DELETED
:
461 dnode
= diff
->first
[i
];
462 nb_config_diff_deleted(dnode
, changes
);
464 case LYD_DIFF_CHANGED
:
465 dnode
= diff
->second
[i
];
466 nb_config_diff_add_change(changes
, NB_OP_MODIFY
, dnode
);
468 case LYD_DIFF_MOVEDAFTER1
:
469 case LYD_DIFF_MOVEDAFTER2
:
478 int nb_candidate_edit(struct nb_config
*candidate
,
479 const struct nb_node
*nb_node
,
480 enum nb_operation operation
, const char *xpath
,
481 const struct yang_data
*previous
,
482 const struct yang_data
*data
)
484 struct lyd_node
*dnode
;
485 char xpath_edit
[XPATH_MAXLEN
];
487 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
488 if (nb_node
->snode
->nodetype
== LYS_LEAFLIST
)
489 snprintf(xpath_edit
, sizeof(xpath_edit
), "%s[.='%s']", xpath
,
492 strlcpy(xpath_edit
, xpath
, sizeof(xpath_edit
));
498 dnode
= lyd_new_path(candidate
->dnode
, ly_native_ctx
,
499 xpath_edit
, (void *)data
->value
, 0,
500 LYD_PATH_OPT_UPDATE
);
501 if (!dnode
&& ly_errno
) {
502 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_new_path() failed",
508 * If a new node was created, call lyd_validate() only to create
509 * default child nodes.
512 lyd_schema_sort(dnode
, 0);
514 LYD_OPT_CONFIG
| LYD_OPT_WHENAUTODEL
,
519 dnode
= yang_dnode_get(candidate
->dnode
, xpath_edit
);
522 * Return a special error code so the caller can choose
523 * whether to ignore it or not.
525 return NB_ERR_NOT_FOUND
;
529 /* TODO: update configuration. */
532 flog_warn(EC_LIB_DEVELOPMENT
,
533 "%s: unknown operation (%u) [xpath %s]", __func__
,
534 operation
, xpath_edit
);
541 bool nb_candidate_needs_update(const struct nb_config
*candidate
)
545 pthread_rwlock_rdlock(&running_config
->lock
);
547 if (candidate
->version
< running_config
->version
)
550 pthread_rwlock_unlock(&running_config
->lock
);
555 int nb_candidate_update(struct nb_config
*candidate
)
557 struct nb_config
*updated_config
;
559 pthread_rwlock_rdlock(&running_config
->lock
);
561 updated_config
= nb_config_dup(running_config
);
563 pthread_rwlock_unlock(&running_config
->lock
);
565 if (nb_config_merge(updated_config
, candidate
, true) != NB_OK
)
568 nb_config_replace(candidate
, updated_config
, false);
574 * Perform YANG syntactic and semantic validation.
576 * WARNING: lyd_validate() can change the configuration as part of the
577 * validation process.
579 static int nb_candidate_validate_yang(struct nb_config
*candidate
)
581 if (lyd_validate(&candidate
->dnode
,
582 LYD_OPT_STRICT
| LYD_OPT_CONFIG
| LYD_OPT_WHENAUTODEL
,
585 return NB_ERR_VALIDATION
;
590 /* Perform code-level validation using the northbound callbacks. */
591 static int nb_candidate_validate_code(struct nb_config
*candidate
,
592 struct nb_config_cbs
*changes
)
594 struct nb_config_cb
*cb
;
595 struct lyd_node
*root
, *next
, *child
;
598 /* First validate the candidate as a whole. */
599 LY_TREE_FOR (candidate
->dnode
, root
) {
600 LY_TREE_DFS_BEGIN (root
, next
, child
) {
601 struct nb_node
*nb_node
;
603 nb_node
= child
->schema
->priv
;
604 if (!nb_node
->cbs
.pre_validate
)
607 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config
,
609 char xpath
[XPATH_MAXLEN
];
611 yang_dnode_get_path(child
, xpath
,
613 nb_log_callback(NB_EV_VALIDATE
,
614 NB_OP_PRE_VALIDATE
, xpath
,
618 ret
= (*nb_node
->cbs
.pre_validate
)(child
);
620 return NB_ERR_VALIDATION
;
623 LY_TREE_DFS_END(root
, next
, child
);
627 /* Now validate the configuration changes. */
628 RB_FOREACH (cb
, nb_config_cbs
, changes
) {
629 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
631 ret
= nb_callback_configuration(NB_EV_VALIDATE
, change
);
633 return NB_ERR_VALIDATION
;
639 int nb_candidate_validate(struct nb_config
*candidate
)
641 struct nb_config_cbs changes
;
644 if (nb_candidate_validate_yang(candidate
) != NB_OK
)
645 return NB_ERR_VALIDATION
;
647 RB_INIT(nb_config_cbs
, &changes
);
648 pthread_rwlock_rdlock(&running_config
->lock
);
650 nb_config_diff(running_config
, candidate
, &changes
);
651 ret
= nb_candidate_validate_code(candidate
, &changes
);
652 nb_config_diff_del_changes(&changes
);
654 pthread_rwlock_unlock(&running_config
->lock
);
659 int nb_candidate_commit_prepare(struct nb_config
*candidate
,
660 enum nb_client client
, const void *user
,
662 struct nb_transaction
**transaction
)
664 struct nb_config_cbs changes
;
666 if (nb_candidate_validate_yang(candidate
) != NB_OK
) {
667 flog_warn(EC_LIB_NB_CANDIDATE_INVALID
,
668 "%s: failed to validate candidate configuration",
670 return NB_ERR_VALIDATION
;
673 RB_INIT(nb_config_cbs
, &changes
);
674 pthread_rwlock_rdlock(&running_config
->lock
);
676 nb_config_diff(running_config
, candidate
, &changes
);
677 if (RB_EMPTY(nb_config_cbs
, &changes
)) {
678 pthread_rwlock_unlock(&running_config
->lock
);
679 return NB_ERR_NO_CHANGES
;
682 if (nb_candidate_validate_code(candidate
, &changes
) != NB_OK
) {
684 EC_LIB_NB_CANDIDATE_INVALID
,
685 "%s: failed to validate candidate configuration",
687 nb_config_diff_del_changes(&changes
);
688 pthread_rwlock_unlock(&running_config
->lock
);
689 return NB_ERR_VALIDATION
;
692 *transaction
= nb_transaction_new(candidate
, &changes
, client
,
694 if (*transaction
== NULL
) {
695 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED
,
696 "%s: failed to create transaction", __func__
);
697 nb_config_diff_del_changes(&changes
);
698 pthread_rwlock_unlock(&running_config
->lock
);
699 return NB_ERR_LOCKED
;
702 pthread_rwlock_unlock(&running_config
->lock
);
704 return nb_transaction_process(NB_EV_PREPARE
, *transaction
);
707 void nb_candidate_commit_abort(struct nb_transaction
*transaction
)
709 (void)nb_transaction_process(NB_EV_ABORT
, transaction
);
710 nb_transaction_free(transaction
);
713 void nb_candidate_commit_apply(struct nb_transaction
*transaction
,
714 bool save_transaction
, uint32_t *transaction_id
)
716 (void)nb_transaction_process(NB_EV_APPLY
, transaction
);
717 nb_transaction_apply_finish(transaction
);
719 /* Replace running by candidate. */
720 transaction
->config
->version
++;
721 pthread_rwlock_wrlock(&running_config
->lock
);
723 nb_config_replace(running_config
, transaction
->config
, true);
725 pthread_rwlock_unlock(&running_config
->lock
);
727 /* Record transaction. */
729 && nb_db_transaction_save(transaction
, transaction_id
) != NB_OK
)
730 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED
,
731 "%s: failed to record transaction", __func__
);
733 nb_transaction_free(transaction
);
736 int nb_candidate_commit(struct nb_config
*candidate
, enum nb_client client
,
737 const void *user
, bool save_transaction
,
738 const char *comment
, uint32_t *transaction_id
)
740 struct nb_transaction
*transaction
= NULL
;
743 ret
= nb_candidate_commit_prepare(candidate
, client
, user
, comment
,
746 * Apply the changes if the preparation phase succeeded. Otherwise abort
750 nb_candidate_commit_apply(transaction
, save_transaction
,
752 else if (transaction
!= NULL
)
753 nb_candidate_commit_abort(transaction
);
758 int nb_running_lock(enum nb_client client
, const void *user
)
762 frr_with_mutex(&running_config_mgmt_lock
.mtx
) {
763 if (!running_config_mgmt_lock
.locked
) {
764 running_config_mgmt_lock
.locked
= true;
765 running_config_mgmt_lock
.owner_client
= client
;
766 running_config_mgmt_lock
.owner_user
= user
;
774 int nb_running_unlock(enum nb_client client
, const void *user
)
778 frr_with_mutex(&running_config_mgmt_lock
.mtx
) {
779 if (running_config_mgmt_lock
.locked
780 && running_config_mgmt_lock
.owner_client
== client
781 && running_config_mgmt_lock
.owner_user
== user
) {
782 running_config_mgmt_lock
.locked
= false;
783 running_config_mgmt_lock
.owner_client
= NB_CLIENT_NONE
;
784 running_config_mgmt_lock
.owner_user
= NULL
;
792 int nb_running_lock_check(enum nb_client client
, const void *user
)
796 frr_with_mutex(&running_config_mgmt_lock
.mtx
) {
797 if (!running_config_mgmt_lock
.locked
798 || (running_config_mgmt_lock
.owner_client
== client
799 && running_config_mgmt_lock
.owner_user
== user
))
806 static void nb_log_callback(const enum nb_event event
,
807 enum nb_operation operation
, const char *xpath
,
811 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
812 nb_event_name(event
), nb_operation_name(operation
), xpath
,
813 value
? value
: "(NULL)");
817 * Call the northbound configuration callback associated to a given
818 * configuration change.
820 static int nb_callback_configuration(const enum nb_event event
,
821 struct nb_config_change
*change
)
823 enum nb_operation operation
= change
->cb
.operation
;
824 const char *xpath
= change
->cb
.xpath
;
825 const struct nb_node
*nb_node
= change
->cb
.nb_node
;
826 const struct lyd_node
*dnode
= change
->cb
.dnode
;
827 union nb_resource
*resource
;
830 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
)) {
831 const char *value
= "(none)";
833 if (dnode
&& !yang_snode_is_typeless_data(dnode
->schema
))
834 value
= yang_dnode_get_string(dnode
, NULL
);
836 nb_log_callback(event
, operation
, xpath
, value
);
839 if (event
== NB_EV_VALIDATE
)
842 resource
= &change
->resource
;
846 ret
= (*nb_node
->cbs
.create
)(event
, dnode
, resource
);
849 ret
= (*nb_node
->cbs
.modify
)(event
, dnode
, resource
);
852 ret
= (*nb_node
->cbs
.destroy
)(event
, dnode
);
855 ret
= (*nb_node
->cbs
.move
)(event
, dnode
);
858 flog_err(EC_LIB_DEVELOPMENT
,
859 "%s: unknown operation (%u) [xpath %s]", __func__
,
866 enum lib_log_refs ref
;
870 priority
= LOG_WARNING
;
871 ref
= EC_LIB_NB_CB_CONFIG_VALIDATE
;
874 priority
= LOG_WARNING
;
875 ref
= EC_LIB_NB_CB_CONFIG_PREPARE
;
878 priority
= LOG_WARNING
;
879 ref
= EC_LIB_NB_CB_CONFIG_ABORT
;
883 ref
= EC_LIB_NB_CB_CONFIG_APPLY
;
886 flog_err(EC_LIB_DEVELOPMENT
,
887 "%s: unknown event (%u) [xpath %s]",
888 __func__
, event
, xpath
);
893 "%s: error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
894 __func__
, nb_err_name(ret
), nb_event_name(event
),
895 nb_operation_name(operation
), xpath
);
901 struct yang_data
*nb_callback_get_elem(const struct nb_node
*nb_node
,
903 const void *list_entry
)
905 DEBUGD(&nb_dbg_cbs_state
,
906 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
909 return nb_node
->cbs
.get_elem(xpath
, list_entry
);
912 const void *nb_callback_get_next(const struct nb_node
*nb_node
,
913 const void *parent_list_entry
,
914 const void *list_entry
)
916 DEBUGD(&nb_dbg_cbs_state
,
917 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
918 nb_node
->xpath
, parent_list_entry
, list_entry
);
920 return nb_node
->cbs
.get_next(parent_list_entry
, list_entry
);
923 int nb_callback_get_keys(const struct nb_node
*nb_node
, const void *list_entry
,
924 struct yang_list_keys
*keys
)
926 DEBUGD(&nb_dbg_cbs_state
,
927 "northbound callback (get_keys): node [%s] list_entry [%p]",
928 nb_node
->xpath
, list_entry
);
930 return nb_node
->cbs
.get_keys(list_entry
, keys
);
933 const void *nb_callback_lookup_entry(const struct nb_node
*nb_node
,
934 const void *parent_list_entry
,
935 const struct yang_list_keys
*keys
)
937 DEBUGD(&nb_dbg_cbs_state
,
938 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
939 nb_node
->xpath
, parent_list_entry
);
941 return nb_node
->cbs
.lookup_entry(parent_list_entry
, keys
);
944 int nb_callback_rpc(const struct nb_node
*nb_node
, const char *xpath
,
945 const struct list
*input
, struct list
*output
)
947 DEBUGD(&nb_dbg_cbs_rpc
, "northbound RPC: %s", xpath
);
949 return nb_node
->cbs
.rpc(xpath
, input
, output
);
952 static struct nb_transaction
*
953 nb_transaction_new(struct nb_config
*config
, struct nb_config_cbs
*changes
,
954 enum nb_client client
, const void *user
, const char *comment
)
956 struct nb_transaction
*transaction
;
958 if (nb_running_lock_check(client
, user
)) {
960 EC_LIB_NB_TRANSACTION_CREATION_FAILED
,
961 "%s: running configuration is locked by another client",
966 if (transaction_in_progress
) {
968 EC_LIB_NB_TRANSACTION_CREATION_FAILED
,
969 "%s: error - there's already another transaction in progress",
973 transaction_in_progress
= true;
975 transaction
= XCALLOC(MTYPE_TMP
, sizeof(*transaction
));
976 transaction
->client
= client
;
978 strlcpy(transaction
->comment
, comment
,
979 sizeof(transaction
->comment
));
980 transaction
->config
= config
;
981 transaction
->changes
= *changes
;
986 static void nb_transaction_free(struct nb_transaction
*transaction
)
988 nb_config_diff_del_changes(&transaction
->changes
);
989 XFREE(MTYPE_TMP
, transaction
);
990 transaction_in_progress
= false;
993 /* Process all configuration changes associated to a transaction. */
994 static int nb_transaction_process(enum nb_event event
,
995 struct nb_transaction
*transaction
)
997 struct nb_config_cb
*cb
;
1000 * Need to lock the running configuration since transaction->changes
1001 * can contain pointers to data nodes from the running configuration.
1003 pthread_rwlock_rdlock(&running_config
->lock
);
1005 RB_FOREACH (cb
, nb_config_cbs
, &transaction
->changes
) {
1006 struct nb_config_change
*change
=
1007 (struct nb_config_change
*)cb
;
1011 * Only try to release resources that were allocated
1014 if (event
== NB_EV_ABORT
&& change
->prepare_ok
== false)
1017 /* Call the appropriate callback. */
1018 ret
= nb_callback_configuration(event
, change
);
1022 pthread_rwlock_unlock(
1023 &running_config
->lock
);
1026 change
->prepare_ok
= true;
1031 * At this point it's not possible to reject the
1032 * transaction anymore, so any failure here can
1033 * lead to inconsistencies and should be treated
1034 * as a bug. Operations prone to errors, like
1035 * validations and resource allocations, should
1036 * be performed during the 'prepare' phase.
1044 pthread_rwlock_unlock(&running_config
->lock
);
1049 static struct nb_config_cb
*
1050 nb_apply_finish_cb_new(struct nb_config_cbs
*cbs
, const char *xpath
,
1051 const struct nb_node
*nb_node
,
1052 const struct lyd_node
*dnode
)
1054 struct nb_config_cb
*cb
;
1056 cb
= XCALLOC(MTYPE_TMP
, sizeof(*cb
));
1057 strlcpy(cb
->xpath
, xpath
, sizeof(cb
->xpath
));
1058 cb
->nb_node
= nb_node
;
1060 RB_INSERT(nb_config_cbs
, cbs
, cb
);
1065 static struct nb_config_cb
*
1066 nb_apply_finish_cb_find(struct nb_config_cbs
*cbs
, const char *xpath
,
1067 const struct nb_node
*nb_node
)
1069 struct nb_config_cb s
;
1071 strlcpy(s
.xpath
, xpath
, sizeof(s
.xpath
));
1072 s
.nb_node
= nb_node
;
1073 return RB_FIND(nb_config_cbs
, cbs
, &s
);
1076 /* Call the 'apply_finish' callbacks. */
1077 static void nb_transaction_apply_finish(struct nb_transaction
*transaction
)
1079 struct nb_config_cbs cbs
;
1080 struct nb_config_cb
*cb
;
1082 /* Initialize tree of 'apply_finish' callbacks. */
1083 RB_INIT(nb_config_cbs
, &cbs
);
1085 /* Identify the 'apply_finish' callbacks that need to be called. */
1086 RB_FOREACH (cb
, nb_config_cbs
, &transaction
->changes
) {
1087 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
1088 const struct lyd_node
*dnode
= change
->cb
.dnode
;
1091 * Iterate up to the root of the data tree. When a node is being
1092 * deleted, skip its 'apply_finish' callback if one is defined
1093 * (the 'apply_finish' callbacks from the node ancestors should
1094 * be called though).
1096 if (change
->cb
.operation
== NB_OP_DESTROY
) {
1097 char xpath
[XPATH_MAXLEN
];
1099 dnode
= dnode
->parent
;
1104 * The dnode from 'delete' callbacks point to elements
1105 * from the running configuration. Use yang_dnode_get()
1106 * to get the corresponding dnode from the candidate
1107 * configuration that is being committed.
1109 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1110 dnode
= yang_dnode_get(transaction
->config
->dnode
,
1114 char xpath
[XPATH_MAXLEN
];
1115 struct nb_node
*nb_node
;
1117 nb_node
= dnode
->schema
->priv
;
1118 if (!nb_node
->cbs
.apply_finish
)
1122 * Don't call the callback more than once for the same
1125 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1126 if (nb_apply_finish_cb_find(&cbs
, xpath
, nb_node
))
1129 nb_apply_finish_cb_new(&cbs
, xpath
, nb_node
, dnode
);
1132 dnode
= dnode
->parent
;
1136 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1137 RB_FOREACH (cb
, nb_config_cbs
, &cbs
) {
1138 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
))
1139 nb_log_callback(NB_EV_APPLY
, NB_OP_APPLY_FINISH
,
1142 (*cb
->nb_node
->cbs
.apply_finish
)(cb
->dnode
);
1145 /* Release memory. */
1146 while (!RB_EMPTY(nb_config_cbs
, &cbs
)) {
1147 cb
= RB_ROOT(nb_config_cbs
, &cbs
);
1148 RB_REMOVE(nb_config_cbs
, &cbs
, cb
);
1149 XFREE(MTYPE_TMP
, cb
);
1153 static int nb_oper_data_iter_children(const struct lys_node
*snode
,
1154 const char *xpath
, const void *list_entry
,
1155 const struct yang_list_keys
*list_keys
,
1156 struct yang_translator
*translator
,
1157 bool first
, uint32_t flags
,
1158 nb_oper_data_cb cb
, void *arg
)
1160 struct lys_node
*child
;
1162 LY_TREE_FOR (snode
->child
, child
) {
1165 ret
= nb_oper_data_iter_node(child
, xpath
, list_entry
,
1166 list_keys
, translator
, false,
1175 static int nb_oper_data_iter_leaf(const struct nb_node
*nb_node
,
1176 const char *xpath
, const void *list_entry
,
1177 const struct yang_list_keys
*list_keys
,
1178 struct yang_translator
*translator
,
1179 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1181 struct yang_data
*data
;
1183 if (CHECK_FLAG(nb_node
->snode
->flags
, LYS_CONFIG_W
))
1186 /* Ignore list keys. */
1187 if (lys_is_key((struct lys_node_leaf
*)nb_node
->snode
, NULL
))
1190 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1192 /* Leaf of type "empty" is not present. */
1195 return (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1198 static int nb_oper_data_iter_container(const struct nb_node
*nb_node
,
1200 const void *list_entry
,
1201 const struct yang_list_keys
*list_keys
,
1202 struct yang_translator
*translator
,
1203 uint32_t flags
, nb_oper_data_cb cb
,
1206 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1209 /* Presence containers. */
1210 if (nb_node
->cbs
.get_elem
) {
1211 struct yang_data
*data
;
1214 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1216 /* Presence container is not present. */
1219 ret
= (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1224 /* Iterate over the child nodes. */
1225 return nb_oper_data_iter_children(nb_node
->snode
, xpath
, list_entry
,
1226 list_keys
, translator
, false, flags
,
1231 nb_oper_data_iter_leaflist(const struct nb_node
*nb_node
, const char *xpath
,
1232 const void *parent_list_entry
,
1233 const struct yang_list_keys
*parent_list_keys
,
1234 struct yang_translator
*translator
, uint32_t flags
,
1235 nb_oper_data_cb cb
, void *arg
)
1237 const void *list_entry
= NULL
;
1239 if (CHECK_FLAG(nb_node
->snode
->flags
, LYS_CONFIG_W
))
1243 struct yang_data
*data
;
1246 list_entry
= nb_callback_get_next(nb_node
, parent_list_entry
,
1249 /* End of the list. */
1252 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1256 ret
= (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1259 } while (list_entry
);
1264 static int nb_oper_data_iter_list(const struct nb_node
*nb_node
,
1265 const char *xpath_list
,
1266 const void *parent_list_entry
,
1267 const struct yang_list_keys
*parent_list_keys
,
1268 struct yang_translator
*translator
,
1269 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1271 struct lys_node_list
*slist
= (struct lys_node_list
*)nb_node
->snode
;
1272 const void *list_entry
= NULL
;
1273 uint32_t position
= 1;
1275 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1278 /* Iterate over all list entries. */
1280 struct yang_list_keys list_keys
;
1281 char xpath
[XPATH_MAXLEN
* 2];
1284 /* Obtain list entry. */
1285 list_entry
= nb_callback_get_next(nb_node
, parent_list_entry
,
1288 /* End of the list. */
1291 if (!CHECK_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
)) {
1292 /* Obtain the list entry keys. */
1293 if (nb_callback_get_keys(nb_node
, list_entry
,
1296 flog_warn(EC_LIB_NB_CB_STATE
,
1297 "%s: failed to get list keys",
1302 /* Build XPath of the list entry. */
1303 strlcpy(xpath
, xpath_list
, sizeof(xpath
));
1304 for (unsigned int i
= 0; i
< list_keys
.num
; i
++) {
1305 snprintf(xpath
+ strlen(xpath
),
1306 sizeof(xpath
) - strlen(xpath
),
1307 "[%s='%s']", slist
->keys
[i
]->name
,
1312 * Keyless list - build XPath using a positional index.
1314 snprintf(xpath
, sizeof(xpath
), "%s[%u]", xpath_list
,
1319 /* Iterate over the child nodes. */
1320 ret
= nb_oper_data_iter_children(
1321 nb_node
->snode
, xpath
, list_entry
, &list_keys
,
1322 translator
, false, flags
, cb
, arg
);
1325 } while (list_entry
);
1330 static int nb_oper_data_iter_node(const struct lys_node
*snode
,
1331 const char *xpath_parent
,
1332 const void *list_entry
,
1333 const struct yang_list_keys
*list_keys
,
1334 struct yang_translator
*translator
,
1335 bool first
, uint32_t flags
,
1336 nb_oper_data_cb cb
, void *arg
)
1338 struct nb_node
*nb_node
;
1339 char xpath
[XPATH_MAXLEN
];
1342 if (!first
&& CHECK_FLAG(flags
, NB_OPER_DATA_ITER_NORECURSE
)
1343 && CHECK_FLAG(snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
))
1347 strlcpy(xpath
, xpath_parent
, sizeof(xpath
));
1348 if (!first
&& snode
->nodetype
!= LYS_USES
)
1349 snprintf(xpath
+ strlen(xpath
), sizeof(xpath
) - strlen(xpath
),
1350 "/%s", snode
->name
);
1352 nb_node
= snode
->priv
;
1353 switch (snode
->nodetype
) {
1355 ret
= nb_oper_data_iter_container(nb_node
, xpath
, list_entry
,
1356 list_keys
, translator
, flags
,
1360 ret
= nb_oper_data_iter_leaf(nb_node
, xpath
, list_entry
,
1361 list_keys
, translator
, flags
, cb
,
1365 ret
= nb_oper_data_iter_leaflist(nb_node
, xpath
, list_entry
,
1366 list_keys
, translator
, flags
,
1370 ret
= nb_oper_data_iter_list(nb_node
, xpath
, list_entry
,
1371 list_keys
, translator
, flags
, cb
,
1375 ret
= nb_oper_data_iter_children(snode
, xpath
, list_entry
,
1376 list_keys
, translator
, false,
1386 int nb_oper_data_iterate(const char *xpath
, struct yang_translator
*translator
,
1387 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1389 struct nb_node
*nb_node
;
1390 const void *list_entry
= NULL
;
1391 struct yang_list_keys list_keys
;
1392 struct list
*list_dnodes
;
1393 struct lyd_node
*dnode
, *dn
;
1394 struct listnode
*ln
;
1397 nb_node
= nb_node_find(xpath
);
1399 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH
,
1400 "%s: unknown data path: %s", __func__
, xpath
);
1404 /* For now this function works only with containers and lists. */
1405 if (!CHECK_FLAG(nb_node
->snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
)) {
1407 EC_LIB_NB_OPERATIONAL_DATA
,
1408 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1414 * Create a data tree from the XPath so that we can parse the keys of
1415 * all YANG lists (if any).
1418 dnode
= lyd_new_path(NULL
, ly_native_ctx
, xpath
, NULL
, 0,
1419 LYD_PATH_OPT_UPDATE
| LYD_PATH_OPT_NOPARENTRET
);
1421 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_new_path() failed",
1427 * Create a linked list to sort the data nodes starting from the root.
1429 list_dnodes
= list_new();
1430 for (dn
= dnode
; dn
; dn
= dn
->parent
) {
1431 if (dn
->schema
->nodetype
!= LYS_LIST
|| !dn
->child
)
1433 listnode_add_head(list_dnodes
, dn
);
1436 * Use the northbound callbacks to find list entry pointer corresponding
1437 * to the given XPath.
1439 for (ALL_LIST_ELEMENTS_RO(list_dnodes
, ln
, dn
)) {
1440 struct lyd_node
*child
;
1444 /* Obtain the list entry keys. */
1445 memset(&list_keys
, 0, sizeof(list_keys
));
1446 LY_TREE_FOR (dn
->child
, child
) {
1447 if (!lys_is_key((struct lys_node_leaf
*)child
->schema
,
1450 strlcpy(list_keys
.key
[n
],
1451 yang_dnode_get_string(child
, NULL
),
1452 sizeof(list_keys
.key
[n
]));
1457 != ((struct lys_node_list
*)dn
->schema
)->keys_size
) {
1458 list_delete(&list_dnodes
);
1459 yang_dnode_free(dnode
);
1460 return NB_ERR_NOT_FOUND
;
1463 /* Find the list entry pointer. */
1464 nn
= dn
->schema
->priv
;
1466 nb_callback_lookup_entry(nn
, list_entry
, &list_keys
);
1467 if (list_entry
== NULL
) {
1468 list_delete(&list_dnodes
);
1469 yang_dnode_free(dnode
);
1470 return NB_ERR_NOT_FOUND
;
1474 /* If a list entry was given, iterate over that list entry only. */
1475 if (dnode
->schema
->nodetype
== LYS_LIST
&& dnode
->child
)
1476 ret
= nb_oper_data_iter_children(
1477 nb_node
->snode
, xpath
, list_entry
, &list_keys
,
1478 translator
, true, flags
, cb
, arg
);
1480 ret
= nb_oper_data_iter_node(nb_node
->snode
, xpath
, list_entry
,
1481 &list_keys
, translator
, true,
1484 list_delete(&list_dnodes
);
1485 yang_dnode_free(dnode
);
1490 bool nb_operation_is_valid(enum nb_operation operation
,
1491 const struct lys_node
*snode
)
1493 struct nb_node
*nb_node
= snode
->priv
;
1494 struct lys_node_container
*scontainer
;
1495 struct lys_node_leaf
*sleaf
;
1497 switch (operation
) {
1499 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1502 switch (snode
->nodetype
) {
1504 sleaf
= (struct lys_node_leaf
*)snode
;
1505 if (sleaf
->type
.base
!= LY_TYPE_EMPTY
)
1509 scontainer
= (struct lys_node_container
*)snode
;
1510 if (!scontainer
->presence
)
1521 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1524 switch (snode
->nodetype
) {
1526 sleaf
= (struct lys_node_leaf
*)snode
;
1527 if (sleaf
->type
.base
== LY_TYPE_EMPTY
)
1530 /* List keys can't be modified. */
1531 if (lys_is_key(sleaf
, NULL
))
1539 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1542 switch (snode
->nodetype
) {
1544 sleaf
= (struct lys_node_leaf
*)snode
;
1546 /* List keys can't be deleted. */
1547 if (lys_is_key(sleaf
, NULL
))
1551 * Only optional leafs can be deleted, or leafs whose
1552 * parent is a case statement.
1554 if (snode
->parent
->nodetype
== LYS_CASE
)
1558 if (CHECK_FLAG(sleaf
->flags
, LYS_MAND_TRUE
)
1563 scontainer
= (struct lys_node_container
*)snode
;
1564 if (!scontainer
->presence
)
1575 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1578 switch (snode
->nodetype
) {
1581 if (!CHECK_FLAG(snode
->flags
, LYS_USERORDERED
))
1588 case NB_OP_PRE_VALIDATE
:
1589 case NB_OP_APPLY_FINISH
:
1590 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1593 case NB_OP_GET_ELEM
:
1594 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_R
))
1597 switch (snode
->nodetype
) {
1602 scontainer
= (struct lys_node_container
*)snode
;
1603 if (!scontainer
->presence
)
1610 case NB_OP_GET_NEXT
:
1611 switch (snode
->nodetype
) {
1613 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1617 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1624 case NB_OP_GET_KEYS
:
1625 case NB_OP_LOOKUP_ENTRY
:
1626 switch (snode
->nodetype
) {
1628 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1630 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
))
1638 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
| LYS_CONFIG_R
))
1641 switch (snode
->nodetype
) {
1654 DEFINE_HOOK(nb_notification_send
, (const char *xpath
, struct list
*arguments
),
1655 (xpath
, arguments
));
1657 int nb_notification_send(const char *xpath
, struct list
*arguments
)
1661 DEBUGD(&nb_dbg_notif
, "northbound notification: %s", xpath
);
1663 ret
= hook_call(nb_notification_send
, xpath
, arguments
);
1665 list_delete(&arguments
);
1670 /* Running configuration user pointers management. */
1671 struct nb_config_entry
{
1672 char xpath
[XPATH_MAXLEN
];
1676 static bool running_config_entry_cmp(const void *value1
, const void *value2
)
1678 const struct nb_config_entry
*c1
= value1
;
1679 const struct nb_config_entry
*c2
= value2
;
1681 return strmatch(c1
->xpath
, c2
->xpath
);
1684 static unsigned int running_config_entry_key_make(const void *value
)
1686 return string_hash_make(value
);
1689 static void *running_config_entry_alloc(void *p
)
1691 struct nb_config_entry
*new, *key
= p
;
1693 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY
, sizeof(*new));
1694 strlcpy(new->xpath
, key
->xpath
, sizeof(new->xpath
));
1699 static void running_config_entry_free(void *arg
)
1701 XFREE(MTYPE_NB_CONFIG_ENTRY
, arg
);
1704 void nb_running_set_entry(const struct lyd_node
*dnode
, void *entry
)
1706 struct nb_config_entry
*config
, s
;
1708 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
1709 config
= hash_get(running_config_entries
, &s
,
1710 running_config_entry_alloc
);
1711 config
->entry
= entry
;
1714 static void *nb_running_unset_entry_helper(const struct lyd_node
*dnode
)
1716 struct nb_config_entry
*config
, s
;
1717 struct lyd_node
*child
;
1720 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
1721 config
= hash_release(running_config_entries
, &s
);
1723 entry
= config
->entry
;
1724 running_config_entry_free(config
);
1727 /* Unset user pointers from the child nodes. */
1728 if (CHECK_FLAG(dnode
->schema
->nodetype
, LYS_LIST
| LYS_CONTAINER
)) {
1729 LY_TREE_FOR (dnode
->child
, child
) {
1730 (void)nb_running_unset_entry_helper(child
);
1737 void *nb_running_unset_entry(const struct lyd_node
*dnode
)
1741 entry
= nb_running_unset_entry_helper(dnode
);
1747 void *nb_running_get_entry(const struct lyd_node
*dnode
, const char *xpath
,
1748 bool abort_if_not_found
)
1750 const struct lyd_node
*orig_dnode
= dnode
;
1751 char xpath_buf
[XPATH_MAXLEN
];
1753 assert(dnode
|| xpath
);
1756 dnode
= yang_dnode_get(running_config
->dnode
, xpath
);
1759 struct nb_config_entry
*config
, s
;
1761 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
1762 config
= hash_lookup(running_config_entries
, &s
);
1764 return config
->entry
;
1766 dnode
= dnode
->parent
;
1769 if (!abort_if_not_found
)
1772 yang_dnode_get_path(orig_dnode
, xpath_buf
, sizeof(xpath_buf
));
1773 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND
,
1774 "%s: failed to find entry [xpath %s]", __func__
, xpath_buf
);
1775 zlog_backtrace(LOG_ERR
);
1779 /* Logging functions. */
1780 const char *nb_event_name(enum nb_event event
)
1783 case NB_EV_VALIDATE
:
1796 const char *nb_operation_name(enum nb_operation operation
)
1798 switch (operation
) {
1807 case NB_OP_PRE_VALIDATE
:
1808 return "pre_validate";
1809 case NB_OP_APPLY_FINISH
:
1810 return "apply_finish";
1811 case NB_OP_GET_ELEM
:
1813 case NB_OP_GET_NEXT
:
1815 case NB_OP_GET_KEYS
:
1817 case NB_OP_LOOKUP_ENTRY
:
1818 return "lookup_entry";
1826 const char *nb_err_name(enum nb_error error
)
1832 return "generic error";
1833 case NB_ERR_NO_CHANGES
:
1834 return "no changes";
1835 case NB_ERR_NOT_FOUND
:
1836 return "element not found";
1838 return "resource is locked";
1839 case NB_ERR_VALIDATION
:
1840 return "validation error";
1841 case NB_ERR_RESOURCE
:
1842 return "failed to allocate resource";
1843 case NB_ERR_INCONSISTENCY
:
1844 return "internal inconsistency";
1850 const char *nb_client_name(enum nb_client client
)
1855 case NB_CLIENT_CONFD
:
1857 case NB_CLIENT_SYSREPO
:
1859 case NB_CLIENT_GRPC
:
1866 static void nb_load_callbacks(const struct frr_yang_module_info
*module
)
1868 for (size_t i
= 0; module
->nodes
[i
].xpath
; i
++) {
1869 struct nb_node
*nb_node
;
1872 nb_node
= nb_node_find(module
->nodes
[i
].xpath
);
1874 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH
,
1875 "%s: unknown data path: %s", __func__
,
1876 module
->nodes
[i
].xpath
);
1880 nb_node
->cbs
= module
->nodes
[i
].cbs
;
1881 priority
= module
->nodes
[i
].priority
;
1883 nb_node
->priority
= priority
;
1887 void nb_init(struct thread_master
*tm
,
1888 const struct frr_yang_module_info
*modules
[], size_t nmodules
)
1890 unsigned int errors
= 0;
1892 /* Load YANG modules. */
1893 for (size_t i
= 0; i
< nmodules
; i
++)
1894 yang_module_load(modules
[i
]->name
);
1896 /* Create a nb_node for all YANG schema nodes. */
1899 /* Load northbound callbacks. */
1900 for (size_t i
= 0; i
< nmodules
; i
++)
1901 nb_load_callbacks(modules
[i
]);
1903 /* Validate northbound callbacks. */
1904 yang_snodes_iterate_all(nb_node_validate
, 0, &errors
);
1907 EC_LIB_NB_CBS_VALIDATION
,
1908 "%s: failed to validate northbound callbacks: %u error(s)",
1913 /* Create an empty running configuration. */
1914 running_config
= nb_config_new(NULL
);
1915 running_config_entries
= hash_create(running_config_entry_key_make
,
1916 running_config_entry_cmp
,
1917 "Running Configuration Entries");
1918 pthread_mutex_init(&running_config_mgmt_lock
.mtx
, NULL
);
1920 /* Initialize the northbound CLI. */
1924 void nb_terminate(void)
1926 /* Terminate the northbound CLI. */
1929 /* Delete all nb_node's from all YANG modules. */
1932 /* Delete the running configuration. */
1933 hash_clean(running_config_entries
, running_config_entry_free
);
1934 hash_free(running_config_entries
);
1935 nb_config_free(running_config
);
1936 pthread_mutex_destroy(&running_config_mgmt_lock
.mtx
);