2 * Copyright (C) 2018 NetDEF, Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "lib_errors.h"
29 #include "northbound.h"
30 #include "northbound_cli.h"
31 #include "northbound_db.h"
33 DEFINE_MTYPE_STATIC(LIB
, NB_NODE
, "Northbound Node")
34 DEFINE_MTYPE_STATIC(LIB
, NB_CONFIG
, "Northbound Configuration")
35 DEFINE_MTYPE_STATIC(LIB
, NB_CONFIG_ENTRY
, "Northbound Configuration Entry")
37 /* Running configuration - shouldn't be modified directly. */
38 struct nb_config
*running_config
;
40 /* Hash table of user pointers associated with configuration entries. */
41 static struct hash
*running_config_entries
;
43 /* Management lock for the running configuration. */
45 /* Mutex protecting this structure. */
51 /* Northbound client who owns this lock. */
52 enum nb_client owner_client
;
54 /* Northbound user who owns this lock. */
55 const void *owner_user
;
56 } running_config_mgmt_lock
;
59 * Global lock used to prevent multiple configuration transactions from
60 * happening concurrently.
62 static bool transaction_in_progress
;
64 static int nb_callback_configuration(const enum nb_event event
,
65 struct nb_config_change
*change
);
66 static struct nb_transaction
*nb_transaction_new(struct nb_config
*config
,
67 struct nb_config_cbs
*changes
,
68 enum nb_client client
,
71 static void nb_transaction_free(struct nb_transaction
*transaction
);
72 static int nb_transaction_process(enum nb_event event
,
73 struct nb_transaction
*transaction
);
74 static void nb_transaction_apply_finish(struct nb_transaction
*transaction
);
75 static int nb_oper_data_iter_node(const struct lys_node
*snode
,
76 const char *xpath
, const void *list_entry
,
77 const struct yang_list_keys
*list_keys
,
78 struct yang_translator
*translator
,
79 bool first
, uint32_t flags
,
80 nb_oper_data_cb cb
, void *arg
);
82 static int nb_node_check_config_only(const struct lys_node
*snode
, void *arg
)
84 bool *config_only
= arg
;
86 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_R
)) {
88 return YANG_ITER_STOP
;
91 return YANG_ITER_CONTINUE
;
94 static int nb_node_new_cb(const struct lys_node
*snode
, void *arg
)
96 struct nb_node
*nb_node
;
97 struct lys_node
*sparent
, *sparent_list
;
99 nb_node
= XCALLOC(MTYPE_NB_NODE
, sizeof(*nb_node
));
100 yang_snode_get_path(snode
, YANG_PATH_DATA
, nb_node
->xpath
,
101 sizeof(nb_node
->xpath
));
102 nb_node
->priority
= NB_DFLT_PRIORITY
;
103 sparent
= yang_snode_real_parent(snode
);
105 nb_node
->parent
= sparent
->priv
;
106 sparent_list
= yang_snode_parent_list(snode
);
108 nb_node
->parent_list
= sparent_list
->priv
;
111 if (CHECK_FLAG(snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
)) {
112 bool config_only
= true;
114 yang_snodes_iterate_subtree(snode
, nb_node_check_config_only
,
115 YANG_ITER_ALLOW_AUGMENTATIONS
,
118 SET_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
);
120 if (CHECK_FLAG(snode
->nodetype
, LYS_LIST
)) {
121 struct lys_node_list
*slist
;
123 slist
= (struct lys_node_list
*)snode
;
124 if (slist
->keys_size
== 0)
125 SET_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
);
129 * Link the northbound node and the libyang schema node with one
132 nb_node
->snode
= snode
;
133 lys_set_private(snode
, nb_node
);
135 return YANG_ITER_CONTINUE
;
138 static int nb_node_del_cb(const struct lys_node
*snode
, void *arg
)
140 struct nb_node
*nb_node
;
142 nb_node
= snode
->priv
;
143 lys_set_private(snode
, NULL
);
144 XFREE(MTYPE_NB_NODE
, nb_node
);
146 return YANG_ITER_CONTINUE
;
149 void nb_nodes_create(void)
151 yang_snodes_iterate_all(nb_node_new_cb
, 0, NULL
);
154 void nb_nodes_delete(void)
156 yang_snodes_iterate_all(nb_node_del_cb
, 0, NULL
);
159 struct nb_node
*nb_node_find(const char *xpath
)
161 const struct lys_node
*snode
;
164 * Use libyang to find the schema node associated to the xpath and get
165 * the northbound node from there (snode private pointer).
167 snode
= ly_ctx_get_node(ly_native_ctx
, NULL
, xpath
, 0);
174 static int nb_node_validate_cb(const struct nb_node
*nb_node
,
175 enum nb_operation operation
,
176 int callback_implemented
, bool optional
)
180 valid
= nb_operation_is_valid(operation
, nb_node
->snode
);
182 if (!valid
&& callback_implemented
)
183 flog_warn(EC_LIB_NB_CB_UNNEEDED
,
184 "unneeded '%s' callback for '%s'",
185 nb_operation_name(operation
), nb_node
->xpath
);
187 if (!optional
&& valid
&& !callback_implemented
) {
188 flog_err(EC_LIB_NB_CB_MISSING
, "missing '%s' callback for '%s'",
189 nb_operation_name(operation
), nb_node
->xpath
);
197 * Check if the required callbacks were implemented for the given northbound
200 static unsigned int nb_node_validate_cbs(const struct nb_node
*nb_node
)
203 unsigned int error
= 0;
205 error
+= nb_node_validate_cb(nb_node
, NB_OP_CREATE
,
206 !!nb_node
->cbs
.create
, false);
207 error
+= nb_node_validate_cb(nb_node
, NB_OP_MODIFY
,
208 !!nb_node
->cbs
.modify
, false);
209 error
+= nb_node_validate_cb(nb_node
, NB_OP_DESTROY
,
210 !!nb_node
->cbs
.destroy
, false);
211 error
+= nb_node_validate_cb(nb_node
, NB_OP_MOVE
, !!nb_node
->cbs
.move
,
213 error
+= nb_node_validate_cb(nb_node
, NB_OP_APPLY_FINISH
,
214 !!nb_node
->cbs
.apply_finish
, true);
215 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_ELEM
,
216 !!nb_node
->cbs
.get_elem
, false);
217 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_NEXT
,
218 !!nb_node
->cbs
.get_next
, false);
219 error
+= nb_node_validate_cb(nb_node
, NB_OP_GET_KEYS
,
220 !!nb_node
->cbs
.get_keys
, false);
221 error
+= nb_node_validate_cb(nb_node
, NB_OP_LOOKUP_ENTRY
,
222 !!nb_node
->cbs
.lookup_entry
, false);
223 error
+= nb_node_validate_cb(nb_node
, NB_OP_RPC
, !!nb_node
->cbs
.rpc
,
229 static unsigned int nb_node_validate_priority(const struct nb_node
*nb_node
)
231 /* Top-level nodes can have any priority. */
232 if (!nb_node
->parent
)
235 if (nb_node
->priority
< nb_node
->parent
->priority
) {
236 flog_err(EC_LIB_NB_CB_INVALID_PRIO
,
237 "node has higher priority than its parent [xpath %s]",
245 static int nb_node_validate(const struct lys_node
*snode
, void *arg
)
247 struct nb_node
*nb_node
= snode
->priv
;
248 unsigned int *errors
= arg
;
250 /* Validate callbacks and priority. */
251 *errors
+= nb_node_validate_cbs(nb_node
);
252 *errors
+= nb_node_validate_priority(nb_node
);
254 return YANG_ITER_CONTINUE
;
257 struct nb_config
*nb_config_new(struct lyd_node
*dnode
)
259 struct nb_config
*config
;
261 config
= XCALLOC(MTYPE_NB_CONFIG
, sizeof(*config
));
263 config
->dnode
= dnode
;
265 config
->dnode
= yang_dnode_new(ly_native_ctx
, true);
267 pthread_rwlock_init(&config
->lock
, NULL
);
272 void nb_config_free(struct nb_config
*config
)
275 yang_dnode_free(config
->dnode
);
276 pthread_rwlock_destroy(&config
->lock
);
277 XFREE(MTYPE_NB_CONFIG
, config
);
280 struct nb_config
*nb_config_dup(const struct nb_config
*config
)
282 struct nb_config
*dup
;
284 dup
= XCALLOC(MTYPE_NB_CONFIG
, sizeof(*dup
));
285 dup
->dnode
= yang_dnode_dup(config
->dnode
);
286 dup
->version
= config
->version
;
287 pthread_rwlock_init(&dup
->lock
, NULL
);
292 int nb_config_merge(struct nb_config
*config_dst
, struct nb_config
*config_src
,
293 bool preserve_source
)
297 ret
= lyd_merge(config_dst
->dnode
, config_src
->dnode
, LYD_OPT_EXPLICIT
);
299 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_merge() failed", __func__
);
301 if (!preserve_source
)
302 nb_config_free(config_src
);
304 return (ret
== 0) ? NB_OK
: NB_ERR
;
307 void nb_config_replace(struct nb_config
*config_dst
,
308 struct nb_config
*config_src
, bool preserve_source
)
310 /* Update version. */
311 if (config_src
->version
!= 0)
312 config_dst
->version
= config_src
->version
;
315 if (config_dst
->dnode
)
316 yang_dnode_free(config_dst
->dnode
);
317 if (preserve_source
) {
318 config_dst
->dnode
= yang_dnode_dup(config_src
->dnode
);
320 config_dst
->dnode
= config_src
->dnode
;
321 config_src
->dnode
= NULL
;
322 nb_config_free(config_src
);
326 /* Generate the nb_config_cbs tree. */
327 static inline int nb_config_cb_compare(const struct nb_config_cb
*a
,
328 const struct nb_config_cb
*b
)
330 /* Sort by priority first. */
331 if (a
->nb_node
->priority
< b
->nb_node
->priority
)
333 if (a
->nb_node
->priority
> b
->nb_node
->priority
)
337 * Use XPath as a tie-breaker. This will naturally sort parent nodes
338 * before their children.
340 return strcmp(a
->xpath
, b
->xpath
);
342 RB_GENERATE(nb_config_cbs
, nb_config_cb
, entry
, nb_config_cb_compare
);
344 static void nb_config_diff_add_change(struct nb_config_cbs
*changes
,
345 enum nb_operation operation
,
346 const struct lyd_node
*dnode
)
348 struct nb_config_change
*change
;
350 change
= XCALLOC(MTYPE_TMP
, sizeof(*change
));
351 change
->cb
.operation
= operation
;
352 change
->cb
.nb_node
= dnode
->schema
->priv
;
353 yang_dnode_get_path(dnode
, change
->cb
.xpath
, sizeof(change
->cb
.xpath
));
354 change
->cb
.dnode
= dnode
;
356 RB_INSERT(nb_config_cbs
, changes
, &change
->cb
);
359 static void nb_config_diff_del_changes(struct nb_config_cbs
*changes
)
361 while (!RB_EMPTY(nb_config_cbs
, changes
)) {
362 struct nb_config_change
*change
;
364 change
= (struct nb_config_change
*)RB_ROOT(nb_config_cbs
,
366 RB_REMOVE(nb_config_cbs
, changes
, &change
->cb
);
367 XFREE(MTYPE_TMP
, change
);
372 * Helper function used when calculating the delta between two different
373 * configurations. Given a new subtree, calculate all new YANG data nodes,
374 * excluding default leafs and leaf-lists. This is a recursive function.
376 static void nb_config_diff_created(const struct lyd_node
*dnode
,
377 struct nb_config_cbs
*changes
)
379 enum nb_operation operation
;
380 struct lyd_node
*child
;
382 switch (dnode
->schema
->nodetype
) {
385 if (lyd_wd_default((struct lyd_node_leaf_list
*)dnode
))
388 if (nb_operation_is_valid(NB_OP_CREATE
, dnode
->schema
))
389 operation
= NB_OP_CREATE
;
390 else if (nb_operation_is_valid(NB_OP_MODIFY
, dnode
->schema
))
391 operation
= NB_OP_MODIFY
;
395 nb_config_diff_add_change(changes
, operation
, dnode
);
399 if (nb_operation_is_valid(NB_OP_CREATE
, dnode
->schema
))
400 nb_config_diff_add_change(changes
, NB_OP_CREATE
, dnode
);
402 /* Process child nodes recursively. */
403 LY_TREE_FOR (dnode
->child
, child
) {
404 nb_config_diff_created(child
, changes
);
412 static void nb_config_diff_deleted(const struct lyd_node
*dnode
,
413 struct nb_config_cbs
*changes
)
415 if (nb_operation_is_valid(NB_OP_DESTROY
, dnode
->schema
))
416 nb_config_diff_add_change(changes
, NB_OP_DESTROY
, dnode
);
417 else if (CHECK_FLAG(dnode
->schema
->nodetype
, LYS_CONTAINER
)) {
418 struct lyd_node
*child
;
421 * Non-presence containers need special handling since they
422 * don't have "destroy" callbacks. In this case, what we need to
423 * do is to call the "destroy" callbacks of their child nodes
424 * when applicable (i.e. optional nodes).
426 LY_TREE_FOR (dnode
->child
, child
) {
427 nb_config_diff_deleted(child
, changes
);
432 /* Calculate the delta between two different configurations. */
433 static void nb_config_diff(const struct nb_config
*config1
,
434 const struct nb_config
*config2
,
435 struct nb_config_cbs
*changes
)
437 struct lyd_difflist
*diff
;
439 diff
= lyd_diff(config1
->dnode
, config2
->dnode
,
440 LYD_DIFFOPT_WITHDEFAULTS
);
443 for (int i
= 0; diff
->type
[i
] != LYD_DIFF_END
; i
++) {
445 struct lyd_node
*dnode
;
447 type
= diff
->type
[i
];
450 case LYD_DIFF_CREATED
:
451 dnode
= diff
->second
[i
];
452 nb_config_diff_created(dnode
, changes
);
454 case LYD_DIFF_DELETED
:
455 dnode
= diff
->first
[i
];
456 nb_config_diff_deleted(dnode
, changes
);
458 case LYD_DIFF_CHANGED
:
459 dnode
= diff
->second
[i
];
460 nb_config_diff_add_change(changes
, NB_OP_MODIFY
, dnode
);
462 case LYD_DIFF_MOVEDAFTER1
:
463 case LYD_DIFF_MOVEDAFTER2
:
472 int nb_candidate_edit(struct nb_config
*candidate
,
473 const struct nb_node
*nb_node
,
474 enum nb_operation operation
, const char *xpath
,
475 const struct yang_data
*previous
,
476 const struct yang_data
*data
)
478 struct lyd_node
*dnode
;
479 char xpath_edit
[XPATH_MAXLEN
];
481 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
482 if (nb_node
->snode
->nodetype
== LYS_LEAFLIST
)
483 snprintf(xpath_edit
, sizeof(xpath_edit
), "%s[.='%s']", xpath
,
486 strlcpy(xpath_edit
, xpath
, sizeof(xpath_edit
));
492 dnode
= lyd_new_path(candidate
->dnode
, ly_native_ctx
,
493 xpath_edit
, (void *)data
->value
, 0,
494 LYD_PATH_OPT_UPDATE
);
495 if (!dnode
&& ly_errno
) {
496 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_new_path() failed",
502 * If a new node was created, call lyd_validate() only to create
503 * default child nodes.
506 lyd_schema_sort(dnode
, 0);
507 lyd_validate(&dnode
, LYD_OPT_CONFIG
, ly_native_ctx
);
511 dnode
= yang_dnode_get(candidate
->dnode
, xpath_edit
);
514 * Return a special error code so the caller can choose
515 * whether to ignore it or not.
517 return NB_ERR_NOT_FOUND
;
521 /* TODO: update configuration. */
524 flog_warn(EC_LIB_DEVELOPMENT
,
525 "%s: unknown operation (%u) [xpath %s]", __func__
,
526 operation
, xpath_edit
);
533 bool nb_candidate_needs_update(const struct nb_config
*candidate
)
537 pthread_rwlock_rdlock(&running_config
->lock
);
539 if (candidate
->version
< running_config
->version
)
542 pthread_rwlock_unlock(&running_config
->lock
);
547 int nb_candidate_update(struct nb_config
*candidate
)
549 struct nb_config
*updated_config
;
551 pthread_rwlock_rdlock(&running_config
->lock
);
553 updated_config
= nb_config_dup(running_config
);
555 pthread_rwlock_unlock(&running_config
->lock
);
557 if (nb_config_merge(updated_config
, candidate
, true) != NB_OK
)
560 nb_config_replace(candidate
, updated_config
, false);
566 * Perform YANG syntactic and semantic validation.
568 * WARNING: lyd_validate() can change the configuration as part of the
569 * validation process.
571 static int nb_candidate_validate_yang(struct nb_config
*candidate
)
573 if (lyd_validate(&candidate
->dnode
, LYD_OPT_STRICT
| LYD_OPT_CONFIG
,
576 return NB_ERR_VALIDATION
;
581 /* Perform code-level validation using the northbound callbacks. */
582 static int nb_candidate_validate_changes(struct nb_config
*candidate
,
583 struct nb_config_cbs
*changes
)
585 struct nb_config_cb
*cb
;
587 RB_FOREACH (cb
, nb_config_cbs
, changes
) {
588 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
591 ret
= nb_callback_configuration(NB_EV_VALIDATE
, change
);
593 return NB_ERR_VALIDATION
;
599 int nb_candidate_validate(struct nb_config
*candidate
)
601 struct nb_config_cbs changes
;
604 if (nb_candidate_validate_yang(candidate
) != NB_OK
)
605 return NB_ERR_VALIDATION
;
607 RB_INIT(nb_config_cbs
, &changes
);
608 pthread_rwlock_rdlock(&running_config
->lock
);
610 nb_config_diff(running_config
, candidate
, &changes
);
611 ret
= nb_candidate_validate_changes(candidate
, &changes
);
612 nb_config_diff_del_changes(&changes
);
614 pthread_rwlock_unlock(&running_config
->lock
);
619 int nb_candidate_commit_prepare(struct nb_config
*candidate
,
620 enum nb_client client
, const void *user
,
622 struct nb_transaction
**transaction
)
624 struct nb_config_cbs changes
;
626 if (nb_candidate_validate_yang(candidate
) != NB_OK
) {
627 flog_warn(EC_LIB_NB_CANDIDATE_INVALID
,
628 "%s: failed to validate candidate configuration",
630 return NB_ERR_VALIDATION
;
633 RB_INIT(nb_config_cbs
, &changes
);
634 pthread_rwlock_rdlock(&running_config
->lock
);
636 nb_config_diff(running_config
, candidate
, &changes
);
637 if (RB_EMPTY(nb_config_cbs
, &changes
)) {
638 pthread_rwlock_unlock(&running_config
->lock
);
639 return NB_ERR_NO_CHANGES
;
642 if (nb_candidate_validate_changes(candidate
, &changes
)
645 EC_LIB_NB_CANDIDATE_INVALID
,
646 "%s: failed to validate candidate configuration",
648 nb_config_diff_del_changes(&changes
);
649 pthread_rwlock_unlock(&running_config
->lock
);
650 return NB_ERR_VALIDATION
;
653 *transaction
= nb_transaction_new(candidate
, &changes
, client
,
655 if (*transaction
== NULL
) {
656 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED
,
657 "%s: failed to create transaction", __func__
);
658 nb_config_diff_del_changes(&changes
);
659 pthread_rwlock_unlock(&running_config
->lock
);
660 return NB_ERR_LOCKED
;
663 pthread_rwlock_unlock(&running_config
->lock
);
665 return nb_transaction_process(NB_EV_PREPARE
, *transaction
);
668 void nb_candidate_commit_abort(struct nb_transaction
*transaction
)
670 (void)nb_transaction_process(NB_EV_ABORT
, transaction
);
671 nb_transaction_free(transaction
);
674 void nb_candidate_commit_apply(struct nb_transaction
*transaction
,
675 bool save_transaction
, uint32_t *transaction_id
)
677 (void)nb_transaction_process(NB_EV_APPLY
, transaction
);
678 nb_transaction_apply_finish(transaction
);
680 /* Replace running by candidate. */
681 transaction
->config
->version
++;
682 pthread_rwlock_wrlock(&running_config
->lock
);
684 nb_config_replace(running_config
, transaction
->config
, true);
686 pthread_rwlock_unlock(&running_config
->lock
);
688 /* Record transaction. */
690 && nb_db_transaction_save(transaction
, transaction_id
) != NB_OK
)
691 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED
,
692 "%s: failed to record transaction", __func__
);
694 nb_transaction_free(transaction
);
697 int nb_candidate_commit(struct nb_config
*candidate
, enum nb_client client
,
698 const void *user
, bool save_transaction
,
699 const char *comment
, uint32_t *transaction_id
)
701 struct nb_transaction
*transaction
= NULL
;
704 ret
= nb_candidate_commit_prepare(candidate
, client
, user
, comment
,
707 * Apply the changes if the preparation phase succeeded. Otherwise abort
711 nb_candidate_commit_apply(transaction
, save_transaction
,
713 else if (transaction
!= NULL
)
714 nb_candidate_commit_abort(transaction
);
719 int nb_running_lock(enum nb_client client
, const void *user
)
723 pthread_mutex_lock(&running_config_mgmt_lock
.mtx
);
725 if (!running_config_mgmt_lock
.locked
) {
726 running_config_mgmt_lock
.locked
= true;
727 running_config_mgmt_lock
.owner_client
= client
;
728 running_config_mgmt_lock
.owner_user
= user
;
732 pthread_mutex_unlock(&running_config_mgmt_lock
.mtx
);
737 int nb_running_unlock(enum nb_client client
, const void *user
)
741 pthread_mutex_lock(&running_config_mgmt_lock
.mtx
);
743 if (running_config_mgmt_lock
.locked
744 && running_config_mgmt_lock
.owner_client
== client
745 && running_config_mgmt_lock
.owner_user
== user
) {
746 running_config_mgmt_lock
.locked
= false;
747 running_config_mgmt_lock
.owner_client
= NB_CLIENT_NONE
;
748 running_config_mgmt_lock
.owner_user
= NULL
;
752 pthread_mutex_unlock(&running_config_mgmt_lock
.mtx
);
757 int nb_running_lock_check(enum nb_client client
, const void *user
)
761 pthread_mutex_lock(&running_config_mgmt_lock
.mtx
);
763 if (!running_config_mgmt_lock
.locked
764 || (running_config_mgmt_lock
.owner_client
== client
765 && running_config_mgmt_lock
.owner_user
== user
))
768 pthread_mutex_unlock(&running_config_mgmt_lock
.mtx
);
773 static void nb_log_callback(const enum nb_event event
,
774 enum nb_operation operation
, const char *xpath
,
778 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
779 nb_event_name(event
), nb_operation_name(operation
), xpath
,
780 value
? value
: "(NULL)");
784 * Call the northbound configuration callback associated to a given
785 * configuration change.
787 static int nb_callback_configuration(const enum nb_event event
,
788 struct nb_config_change
*change
)
790 enum nb_operation operation
= change
->cb
.operation
;
791 const char *xpath
= change
->cb
.xpath
;
792 const struct nb_node
*nb_node
= change
->cb
.nb_node
;
793 const struct lyd_node
*dnode
= change
->cb
.dnode
;
794 union nb_resource
*resource
;
797 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
)) {
798 const char *value
= "(none)";
800 if (dnode
&& !yang_snode_is_typeless_data(dnode
->schema
))
801 value
= yang_dnode_get_string(dnode
, NULL
);
803 nb_log_callback(event
, operation
, xpath
, value
);
806 if (event
== NB_EV_VALIDATE
)
809 resource
= &change
->resource
;
813 ret
= (*nb_node
->cbs
.create
)(event
, dnode
, resource
);
816 ret
= (*nb_node
->cbs
.modify
)(event
, dnode
, resource
);
819 ret
= (*nb_node
->cbs
.destroy
)(event
, dnode
);
822 ret
= (*nb_node
->cbs
.move
)(event
, dnode
);
825 flog_err(EC_LIB_DEVELOPMENT
,
826 "%s: unknown operation (%u) [xpath %s]", __func__
,
833 enum lib_log_refs ref
;
837 priority
= LOG_WARNING
;
838 ref
= EC_LIB_NB_CB_CONFIG_VALIDATE
;
841 priority
= LOG_WARNING
;
842 ref
= EC_LIB_NB_CB_CONFIG_PREPARE
;
845 priority
= LOG_WARNING
;
846 ref
= EC_LIB_NB_CB_CONFIG_ABORT
;
850 ref
= EC_LIB_NB_CB_CONFIG_APPLY
;
853 flog_err(EC_LIB_DEVELOPMENT
,
854 "%s: unknown event (%u) [xpath %s]",
855 __func__
, event
, xpath
);
860 "%s: error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
861 __func__
, nb_err_name(ret
), nb_event_name(event
),
862 nb_operation_name(operation
), xpath
);
868 struct yang_data
*nb_callback_get_elem(const struct nb_node
*nb_node
,
870 const void *list_entry
)
872 DEBUGD(&nb_dbg_cbs_state
,
873 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
876 return nb_node
->cbs
.get_elem(xpath
, list_entry
);
879 const void *nb_callback_get_next(const struct nb_node
*nb_node
,
880 const void *parent_list_entry
,
881 const void *list_entry
)
883 DEBUGD(&nb_dbg_cbs_state
,
884 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
885 nb_node
->xpath
, parent_list_entry
, list_entry
);
887 return nb_node
->cbs
.get_next(parent_list_entry
, list_entry
);
890 int nb_callback_get_keys(const struct nb_node
*nb_node
, const void *list_entry
,
891 struct yang_list_keys
*keys
)
893 DEBUGD(&nb_dbg_cbs_state
,
894 "northbound callback (get_keys): node [%s] list_entry [%p]",
895 nb_node
->xpath
, list_entry
);
897 return nb_node
->cbs
.get_keys(list_entry
, keys
);
900 const void *nb_callback_lookup_entry(const struct nb_node
*nb_node
,
901 const void *parent_list_entry
,
902 const struct yang_list_keys
*keys
)
904 DEBUGD(&nb_dbg_cbs_state
,
905 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
906 nb_node
->xpath
, parent_list_entry
);
908 return nb_node
->cbs
.lookup_entry(parent_list_entry
, keys
);
911 int nb_callback_rpc(const struct nb_node
*nb_node
, const char *xpath
,
912 const struct list
*input
, struct list
*output
)
914 DEBUGD(&nb_dbg_cbs_rpc
, "northbound RPC: %s", xpath
);
916 return nb_node
->cbs
.rpc(xpath
, input
, output
);
919 static struct nb_transaction
*
920 nb_transaction_new(struct nb_config
*config
, struct nb_config_cbs
*changes
,
921 enum nb_client client
, const void *user
, const char *comment
)
923 struct nb_transaction
*transaction
;
925 if (nb_running_lock_check(client
, user
)) {
927 EC_LIB_NB_TRANSACTION_CREATION_FAILED
,
928 "%s: running configuration is locked by another client",
933 if (transaction_in_progress
) {
935 EC_LIB_NB_TRANSACTION_CREATION_FAILED
,
936 "%s: error - there's already another transaction in progress",
940 transaction_in_progress
= true;
942 transaction
= XCALLOC(MTYPE_TMP
, sizeof(*transaction
));
943 transaction
->client
= client
;
945 strlcpy(transaction
->comment
, comment
,
946 sizeof(transaction
->comment
));
947 transaction
->config
= config
;
948 transaction
->changes
= *changes
;
953 static void nb_transaction_free(struct nb_transaction
*transaction
)
955 nb_config_diff_del_changes(&transaction
->changes
);
956 XFREE(MTYPE_TMP
, transaction
);
957 transaction_in_progress
= false;
960 /* Process all configuration changes associated to a transaction. */
961 static int nb_transaction_process(enum nb_event event
,
962 struct nb_transaction
*transaction
)
964 struct nb_config_cb
*cb
;
967 * Need to lock the running configuration since transaction->changes
968 * can contain pointers to data nodes from the running configuration.
970 pthread_rwlock_rdlock(&running_config
->lock
);
972 RB_FOREACH (cb
, nb_config_cbs
, &transaction
->changes
) {
973 struct nb_config_change
*change
=
974 (struct nb_config_change
*)cb
;
978 * Only try to release resources that were allocated
981 if (event
== NB_EV_ABORT
&& change
->prepare_ok
== false)
984 /* Call the appropriate callback. */
985 ret
= nb_callback_configuration(event
, change
);
989 pthread_rwlock_unlock(
990 &running_config
->lock
);
993 change
->prepare_ok
= true;
998 * At this point it's not possible to reject the
999 * transaction anymore, so any failure here can
1000 * lead to inconsistencies and should be treated
1001 * as a bug. Operations prone to errors, like
1002 * validations and resource allocations, should
1003 * be performed during the 'prepare' phase.
1011 pthread_rwlock_unlock(&running_config
->lock
);
1016 static struct nb_config_cb
*
1017 nb_apply_finish_cb_new(struct nb_config_cbs
*cbs
, const char *xpath
,
1018 const struct nb_node
*nb_node
,
1019 const struct lyd_node
*dnode
)
1021 struct nb_config_cb
*cb
;
1023 cb
= XCALLOC(MTYPE_TMP
, sizeof(*cb
));
1024 strlcpy(cb
->xpath
, xpath
, sizeof(cb
->xpath
));
1025 cb
->nb_node
= nb_node
;
1027 RB_INSERT(nb_config_cbs
, cbs
, cb
);
1032 static struct nb_config_cb
*
1033 nb_apply_finish_cb_find(struct nb_config_cbs
*cbs
, const char *xpath
,
1034 const struct nb_node
*nb_node
)
1036 struct nb_config_cb s
;
1038 strlcpy(s
.xpath
, xpath
, sizeof(s
.xpath
));
1039 s
.nb_node
= nb_node
;
1040 return RB_FIND(nb_config_cbs
, cbs
, &s
);
1043 /* Call the 'apply_finish' callbacks. */
1044 static void nb_transaction_apply_finish(struct nb_transaction
*transaction
)
1046 struct nb_config_cbs cbs
;
1047 struct nb_config_cb
*cb
;
1049 /* Initialize tree of 'apply_finish' callbacks. */
1050 RB_INIT(nb_config_cbs
, &cbs
);
1052 /* Identify the 'apply_finish' callbacks that need to be called. */
1053 RB_FOREACH (cb
, nb_config_cbs
, &transaction
->changes
) {
1054 struct nb_config_change
*change
= (struct nb_config_change
*)cb
;
1055 const struct lyd_node
*dnode
= change
->cb
.dnode
;
1058 * Iterate up to the root of the data tree. When a node is being
1059 * deleted, skip its 'apply_finish' callback if one is defined
1060 * (the 'apply_finish' callbacks from the node ancestors should
1061 * be called though).
1063 if (change
->cb
.operation
== NB_OP_DESTROY
) {
1064 char xpath
[XPATH_MAXLEN
];
1066 dnode
= dnode
->parent
;
1071 * The dnode from 'delete' callbacks point to elements
1072 * from the running configuration. Use yang_dnode_get()
1073 * to get the corresponding dnode from the candidate
1074 * configuration that is being committed.
1076 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1077 dnode
= yang_dnode_get(transaction
->config
->dnode
,
1081 char xpath
[XPATH_MAXLEN
];
1082 struct nb_node
*nb_node
;
1084 nb_node
= dnode
->schema
->priv
;
1085 if (!nb_node
->cbs
.apply_finish
)
1089 * Don't call the callback more than once for the same
1092 yang_dnode_get_path(dnode
, xpath
, sizeof(xpath
));
1093 if (nb_apply_finish_cb_find(&cbs
, xpath
, nb_node
))
1096 nb_apply_finish_cb_new(&cbs
, xpath
, nb_node
, dnode
);
1099 dnode
= dnode
->parent
;
1103 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1104 RB_FOREACH (cb
, nb_config_cbs
, &cbs
) {
1105 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config
, DEBUG_MODE_ALL
))
1106 nb_log_callback(NB_EV_APPLY
, NB_OP_APPLY_FINISH
,
1109 (*cb
->nb_node
->cbs
.apply_finish
)(cb
->dnode
);
1112 /* Release memory. */
1113 while (!RB_EMPTY(nb_config_cbs
, &cbs
)) {
1114 cb
= RB_ROOT(nb_config_cbs
, &cbs
);
1115 RB_REMOVE(nb_config_cbs
, &cbs
, cb
);
1116 XFREE(MTYPE_TMP
, cb
);
1120 static int nb_oper_data_iter_children(const struct lys_node
*snode
,
1121 const char *xpath
, const void *list_entry
,
1122 const struct yang_list_keys
*list_keys
,
1123 struct yang_translator
*translator
,
1124 bool first
, uint32_t flags
,
1125 nb_oper_data_cb cb
, void *arg
)
1127 struct lys_node
*child
;
1129 LY_TREE_FOR (snode
->child
, child
) {
1132 ret
= nb_oper_data_iter_node(child
, xpath
, list_entry
,
1133 list_keys
, translator
, false,
1142 static int nb_oper_data_iter_leaf(const struct nb_node
*nb_node
,
1143 const char *xpath
, const void *list_entry
,
1144 const struct yang_list_keys
*list_keys
,
1145 struct yang_translator
*translator
,
1146 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1148 struct yang_data
*data
;
1150 if (CHECK_FLAG(nb_node
->snode
->flags
, LYS_CONFIG_W
))
1153 /* Ignore list keys. */
1154 if (lys_is_key((struct lys_node_leaf
*)nb_node
->snode
, NULL
))
1157 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1159 /* Leaf of type "empty" is not present. */
1162 return (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1165 static int nb_oper_data_iter_container(const struct nb_node
*nb_node
,
1167 const void *list_entry
,
1168 const struct yang_list_keys
*list_keys
,
1169 struct yang_translator
*translator
,
1170 uint32_t flags
, nb_oper_data_cb cb
,
1173 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1176 /* Presence containers. */
1177 if (nb_node
->cbs
.get_elem
) {
1178 struct yang_data
*data
;
1181 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1183 /* Presence container is not present. */
1186 ret
= (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1191 /* Iterate over the child nodes. */
1192 return nb_oper_data_iter_children(nb_node
->snode
, xpath
, list_entry
,
1193 list_keys
, translator
, false, flags
,
1198 nb_oper_data_iter_leaflist(const struct nb_node
*nb_node
, const char *xpath
,
1199 const void *parent_list_entry
,
1200 const struct yang_list_keys
*parent_list_keys
,
1201 struct yang_translator
*translator
, uint32_t flags
,
1202 nb_oper_data_cb cb
, void *arg
)
1204 const void *list_entry
= NULL
;
1206 if (CHECK_FLAG(nb_node
->snode
->flags
, LYS_CONFIG_W
))
1210 struct yang_data
*data
;
1213 list_entry
= nb_callback_get_next(nb_node
, parent_list_entry
,
1216 /* End of the list. */
1219 data
= nb_callback_get_elem(nb_node
, xpath
, list_entry
);
1223 ret
= (*cb
)(nb_node
->snode
, translator
, data
, arg
);
1226 } while (list_entry
);
1231 static int nb_oper_data_iter_list(const struct nb_node
*nb_node
,
1232 const char *xpath_list
,
1233 const void *parent_list_entry
,
1234 const struct yang_list_keys
*parent_list_keys
,
1235 struct yang_translator
*translator
,
1236 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1238 struct lys_node_list
*slist
= (struct lys_node_list
*)nb_node
->snode
;
1239 const void *list_entry
= NULL
;
1240 uint32_t position
= 1;
1242 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1245 /* Iterate over all list entries. */
1247 struct yang_list_keys list_keys
;
1248 char xpath
[XPATH_MAXLEN
* 2];
1251 /* Obtain list entry. */
1252 list_entry
= nb_callback_get_next(nb_node
, parent_list_entry
,
1255 /* End of the list. */
1258 if (!CHECK_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
)) {
1259 /* Obtain the list entry keys. */
1260 if (nb_callback_get_keys(nb_node
, list_entry
,
1263 flog_warn(EC_LIB_NB_CB_STATE
,
1264 "%s: failed to get list keys",
1269 /* Build XPath of the list entry. */
1270 strlcpy(xpath
, xpath_list
, sizeof(xpath
));
1271 for (unsigned int i
= 0; i
< list_keys
.num
; i
++) {
1272 snprintf(xpath
+ strlen(xpath
),
1273 sizeof(xpath
) - strlen(xpath
),
1274 "[%s='%s']", slist
->keys
[i
]->name
,
1279 * Keyless list - build XPath using a positional index.
1281 snprintf(xpath
, sizeof(xpath
), "%s[%u]", xpath_list
,
1286 /* Iterate over the child nodes. */
1287 ret
= nb_oper_data_iter_children(
1288 nb_node
->snode
, xpath
, list_entry
, &list_keys
,
1289 translator
, false, flags
, cb
, arg
);
1292 } while (list_entry
);
1297 static int nb_oper_data_iter_node(const struct lys_node
*snode
,
1298 const char *xpath_parent
,
1299 const void *list_entry
,
1300 const struct yang_list_keys
*list_keys
,
1301 struct yang_translator
*translator
,
1302 bool first
, uint32_t flags
,
1303 nb_oper_data_cb cb
, void *arg
)
1305 struct nb_node
*nb_node
;
1306 char xpath
[XPATH_MAXLEN
];
1309 if (!first
&& CHECK_FLAG(flags
, NB_OPER_DATA_ITER_NORECURSE
)
1310 && CHECK_FLAG(snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
))
1314 strlcpy(xpath
, xpath_parent
, sizeof(xpath
));
1315 if (!first
&& snode
->nodetype
!= LYS_USES
)
1316 snprintf(xpath
+ strlen(xpath
), sizeof(xpath
) - strlen(xpath
),
1317 "/%s", snode
->name
);
1319 nb_node
= snode
->priv
;
1320 switch (snode
->nodetype
) {
1322 ret
= nb_oper_data_iter_container(nb_node
, xpath
, list_entry
,
1323 list_keys
, translator
, flags
,
1327 ret
= nb_oper_data_iter_leaf(nb_node
, xpath
, list_entry
,
1328 list_keys
, translator
, flags
, cb
,
1332 ret
= nb_oper_data_iter_leaflist(nb_node
, xpath
, list_entry
,
1333 list_keys
, translator
, flags
,
1337 ret
= nb_oper_data_iter_list(nb_node
, xpath
, list_entry
,
1338 list_keys
, translator
, flags
, cb
,
1342 ret
= nb_oper_data_iter_children(snode
, xpath
, list_entry
,
1343 list_keys
, translator
, false,
1353 int nb_oper_data_iterate(const char *xpath
, struct yang_translator
*translator
,
1354 uint32_t flags
, nb_oper_data_cb cb
, void *arg
)
1356 struct nb_node
*nb_node
;
1357 const void *list_entry
= NULL
;
1358 struct yang_list_keys list_keys
;
1359 struct list
*list_dnodes
;
1360 struct lyd_node
*dnode
, *dn
;
1361 struct listnode
*ln
;
1364 nb_node
= nb_node_find(xpath
);
1366 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH
,
1367 "%s: unknown data path: %s", __func__
, xpath
);
1371 /* For now this function works only with containers and lists. */
1372 if (!CHECK_FLAG(nb_node
->snode
->nodetype
, LYS_CONTAINER
| LYS_LIST
)) {
1374 EC_LIB_NB_OPERATIONAL_DATA
,
1375 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1381 * Create a data tree from the XPath so that we can parse the keys of
1382 * all YANG lists (if any).
1385 dnode
= lyd_new_path(NULL
, ly_native_ctx
, xpath
, NULL
, 0,
1386 LYD_PATH_OPT_UPDATE
);
1387 if (!dnode
&& ly_errno
) {
1388 flog_warn(EC_LIB_LIBYANG
, "%s: lyd_new_path() failed",
1393 * We can remove the following two lines once we depend on
1394 * libyang-v0.16-r2, which has the LYD_PATH_OPT_NOPARENTRET flag for
1397 dnode
= yang_dnode_get(dnode
, xpath
);
1401 * Create a linked list to sort the data nodes starting from the root.
1403 list_dnodes
= list_new();
1404 for (dn
= dnode
; dn
; dn
= dn
->parent
) {
1405 if (dn
->schema
->nodetype
!= LYS_LIST
|| !dn
->child
)
1407 listnode_add_head(list_dnodes
, dn
);
1410 * Use the northbound callbacks to find list entry pointer corresponding
1411 * to the given XPath.
1413 for (ALL_LIST_ELEMENTS_RO(list_dnodes
, ln
, dn
)) {
1414 struct lyd_node
*child
;
1418 /* Obtain the list entry keys. */
1419 memset(&list_keys
, 0, sizeof(list_keys
));
1420 LY_TREE_FOR (dn
->child
, child
) {
1421 if (!lys_is_key((struct lys_node_leaf
*)child
->schema
,
1424 strlcpy(list_keys
.key
[n
],
1425 yang_dnode_get_string(child
, NULL
),
1426 sizeof(list_keys
.key
[n
]));
1431 != ((struct lys_node_list
*)dn
->schema
)->keys_size
) {
1432 list_delete(&list_dnodes
);
1433 yang_dnode_free(dnode
);
1434 return NB_ERR_NOT_FOUND
;
1437 /* Find the list entry pointer. */
1438 nn
= dn
->schema
->priv
;
1440 nb_callback_lookup_entry(nn
, list_entry
, &list_keys
);
1441 if (list_entry
== NULL
) {
1442 list_delete(&list_dnodes
);
1443 yang_dnode_free(dnode
);
1444 return NB_ERR_NOT_FOUND
;
1448 /* If a list entry was given, iterate over that list entry only. */
1449 if (dnode
->schema
->nodetype
== LYS_LIST
&& dnode
->child
)
1450 ret
= nb_oper_data_iter_children(
1451 nb_node
->snode
, xpath
, list_entry
, &list_keys
,
1452 translator
, true, flags
, cb
, arg
);
1454 ret
= nb_oper_data_iter_node(nb_node
->snode
, xpath
, list_entry
,
1455 &list_keys
, translator
, true,
1458 list_delete(&list_dnodes
);
1459 yang_dnode_free(dnode
);
1464 bool nb_operation_is_valid(enum nb_operation operation
,
1465 const struct lys_node
*snode
)
1467 struct nb_node
*nb_node
= snode
->priv
;
1468 struct lys_node_container
*scontainer
;
1469 struct lys_node_leaf
*sleaf
;
1471 switch (operation
) {
1473 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1476 switch (snode
->nodetype
) {
1478 sleaf
= (struct lys_node_leaf
*)snode
;
1479 if (sleaf
->type
.base
!= LY_TYPE_EMPTY
)
1483 scontainer
= (struct lys_node_container
*)snode
;
1484 if (!scontainer
->presence
)
1495 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1498 switch (snode
->nodetype
) {
1500 sleaf
= (struct lys_node_leaf
*)snode
;
1501 if (sleaf
->type
.base
== LY_TYPE_EMPTY
)
1504 /* List keys can't be modified. */
1505 if (lys_is_key(sleaf
, NULL
))
1513 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1516 switch (snode
->nodetype
) {
1518 sleaf
= (struct lys_node_leaf
*)snode
;
1520 /* List keys can't be deleted. */
1521 if (lys_is_key(sleaf
, NULL
))
1525 * Only optional leafs can be deleted, or leafs whose
1526 * parent is a case statement.
1528 if (snode
->parent
->nodetype
== LYS_CASE
)
1532 if (CHECK_FLAG(sleaf
->flags
, LYS_MAND_TRUE
)
1537 scontainer
= (struct lys_node_container
*)snode
;
1538 if (!scontainer
->presence
)
1549 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1552 switch (snode
->nodetype
) {
1555 if (!CHECK_FLAG(snode
->flags
, LYS_USERORDERED
))
1562 case NB_OP_APPLY_FINISH
:
1563 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1566 case NB_OP_GET_ELEM
:
1567 if (!CHECK_FLAG(snode
->flags
, LYS_CONFIG_R
))
1570 switch (snode
->nodetype
) {
1575 scontainer
= (struct lys_node_container
*)snode
;
1576 if (!scontainer
->presence
)
1583 case NB_OP_GET_NEXT
:
1584 switch (snode
->nodetype
) {
1586 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1590 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
))
1597 case NB_OP_GET_KEYS
:
1598 case NB_OP_LOOKUP_ENTRY
:
1599 switch (snode
->nodetype
) {
1601 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_CONFIG_ONLY
))
1603 if (CHECK_FLAG(nb_node
->flags
, F_NB_NODE_KEYLESS_LIST
))
1611 if (CHECK_FLAG(snode
->flags
, LYS_CONFIG_W
| LYS_CONFIG_R
))
1614 switch (snode
->nodetype
) {
1627 DEFINE_HOOK(nb_notification_send
, (const char *xpath
, struct list
*arguments
),
1628 (xpath
, arguments
));
1630 int nb_notification_send(const char *xpath
, struct list
*arguments
)
1634 DEBUGD(&nb_dbg_notif
, "northbound notification: %s", xpath
);
1636 ret
= hook_call(nb_notification_send
, xpath
, arguments
);
1638 list_delete(&arguments
);
1643 /* Running configuration user pointers management. */
1644 struct nb_config_entry
{
1645 char xpath
[XPATH_MAXLEN
];
1649 static bool running_config_entry_cmp(const void *value1
, const void *value2
)
1651 const struct nb_config_entry
*c1
= value1
;
1652 const struct nb_config_entry
*c2
= value2
;
1654 return strmatch(c1
->xpath
, c2
->xpath
);
1657 static unsigned int running_config_entry_key_make(const void *value
)
1659 return string_hash_make(value
);
1662 static void *running_config_entry_alloc(void *p
)
1664 struct nb_config_entry
*new, *key
= p
;
1666 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY
, sizeof(*new));
1667 strlcpy(new->xpath
, key
->xpath
, sizeof(new->xpath
));
1672 static void running_config_entry_free(void *arg
)
1674 XFREE(MTYPE_NB_CONFIG_ENTRY
, arg
);
1677 void nb_running_set_entry(const struct lyd_node
*dnode
, void *entry
)
1679 struct nb_config_entry
*config
, s
;
1681 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
1682 config
= hash_get(running_config_entries
, &s
,
1683 running_config_entry_alloc
);
1684 config
->entry
= entry
;
1687 static void *nb_running_unset_entry_helper(const struct lyd_node
*dnode
)
1689 struct nb_config_entry
*config
, s
;
1690 struct lyd_node
*child
;
1693 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
1694 config
= hash_release(running_config_entries
, &s
);
1696 entry
= config
->entry
;
1697 running_config_entry_free(config
);
1700 /* Unset user pointers from the child nodes. */
1701 if (CHECK_FLAG(dnode
->schema
->nodetype
, LYS_LIST
| LYS_CONTAINER
)) {
1702 LY_TREE_FOR (dnode
->child
, child
) {
1703 (void)nb_running_unset_entry_helper(child
);
1710 void *nb_running_unset_entry(const struct lyd_node
*dnode
)
1714 entry
= nb_running_unset_entry_helper(dnode
);
1720 void *nb_running_get_entry(const struct lyd_node
*dnode
, const char *xpath
,
1721 bool abort_if_not_found
)
1723 const struct lyd_node
*orig_dnode
= dnode
;
1724 char xpath_buf
[XPATH_MAXLEN
];
1726 assert(dnode
|| xpath
);
1729 dnode
= yang_dnode_get(running_config
->dnode
, xpath
);
1732 struct nb_config_entry
*config
, s
;
1734 yang_dnode_get_path(dnode
, s
.xpath
, sizeof(s
.xpath
));
1735 config
= hash_lookup(running_config_entries
, &s
);
1737 return config
->entry
;
1739 dnode
= dnode
->parent
;
1742 if (!abort_if_not_found
)
1745 yang_dnode_get_path(orig_dnode
, xpath_buf
, sizeof(xpath_buf
));
1746 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND
,
1747 "%s: failed to find entry [xpath %s]", __func__
, xpath_buf
);
1748 zlog_backtrace(LOG_ERR
);
1752 /* Logging functions. */
1753 const char *nb_event_name(enum nb_event event
)
1756 case NB_EV_VALIDATE
:
1769 const char *nb_operation_name(enum nb_operation operation
)
1771 switch (operation
) {
1780 case NB_OP_APPLY_FINISH
:
1781 return "apply_finish";
1782 case NB_OP_GET_ELEM
:
1784 case NB_OP_GET_NEXT
:
1786 case NB_OP_GET_KEYS
:
1788 case NB_OP_LOOKUP_ENTRY
:
1789 return "lookup_entry";
1797 const char *nb_err_name(enum nb_error error
)
1803 return "generic error";
1804 case NB_ERR_NO_CHANGES
:
1805 return "no changes";
1806 case NB_ERR_NOT_FOUND
:
1807 return "element not found";
1809 return "resource is locked";
1810 case NB_ERR_VALIDATION
:
1811 return "validation error";
1812 case NB_ERR_RESOURCE
:
1813 return "failed to allocate resource";
1814 case NB_ERR_INCONSISTENCY
:
1815 return "internal inconsistency";
1821 const char *nb_client_name(enum nb_client client
)
1826 case NB_CLIENT_CONFD
:
1828 case NB_CLIENT_SYSREPO
:
1830 case NB_CLIENT_GRPC
:
1837 static void nb_load_callbacks(const struct frr_yang_module_info
*module
)
1839 for (size_t i
= 0; module
->nodes
[i
].xpath
; i
++) {
1840 struct nb_node
*nb_node
;
1843 nb_node
= nb_node_find(module
->nodes
[i
].xpath
);
1845 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH
,
1846 "%s: unknown data path: %s", __func__
,
1847 module
->nodes
[i
].xpath
);
1851 nb_node
->cbs
= module
->nodes
[i
].cbs
;
1852 priority
= module
->nodes
[i
].priority
;
1854 nb_node
->priority
= priority
;
1858 void nb_init(struct thread_master
*tm
,
1859 const struct frr_yang_module_info
*modules
[], size_t nmodules
)
1861 unsigned int errors
= 0;
1863 /* Load YANG modules. */
1864 for (size_t i
= 0; i
< nmodules
; i
++)
1865 yang_module_load(modules
[i
]->name
);
1867 /* Create a nb_node for all YANG schema nodes. */
1870 /* Load northbound callbacks. */
1871 for (size_t i
= 0; i
< nmodules
; i
++)
1872 nb_load_callbacks(modules
[i
]);
1874 /* Validate northbound callbacks. */
1875 yang_snodes_iterate_all(nb_node_validate
, 0, &errors
);
1878 EC_LIB_NB_CBS_VALIDATION
,
1879 "%s: failed to validate northbound callbacks: %u error(s)",
1884 /* Create an empty running configuration. */
1885 running_config
= nb_config_new(NULL
);
1886 running_config_entries
= hash_create(running_config_entry_key_make
,
1887 running_config_entry_cmp
,
1888 "Running Configuration Entries");
1889 pthread_mutex_init(&running_config_mgmt_lock
.mtx
, NULL
);
1891 /* Initialize the northbound CLI. */
1895 void nb_terminate(void)
1897 /* Terminate the northbound CLI. */
1900 /* Delete all nb_node's from all YANG modules. */
1903 /* Delete the running configuration. */
1904 hash_clean(running_config_entries
, running_config_entry_free
);
1905 hash_free(running_config_entries
);
1906 nb_config_free(running_config
);
1907 pthread_mutex_destroy(&running_config_mgmt_lock
.mtx
);