]> git.proxmox.com Git - mirror_frr.git/blame - lib/northbound.c
Merge pull request #13073 from donaldsharp/static_use_after_free
[mirror_frr.git] / lib / northbound.c
CommitLineData
acddc0ed 1// SPDX-License-Identifier: GPL-2.0-or-later
1c2facd1
RW
2/*
3 * Copyright (C) 2018 NetDEF, Inc.
4 * Renato Westphal
1c2facd1
RW
5 */
6
7#include <zebra.h>
8
9#include "libfrr.h"
10#include "log.h"
11#include "lib_errors.h"
ccd43ada 12#include "hash.h"
1c2facd1 13#include "command.h"
9eb2c0a1 14#include "debug.h"
1c2facd1 15#include "db.h"
00dffa8c 16#include "frr_pthread.h"
1c2facd1
RW
17#include "northbound.h"
18#include "northbound_cli.h"
19#include "northbound_db.h"
f7c20aa1 20#include "frrstr.h"
1c2facd1 21
bf8d3d6a
DL
22DEFINE_MTYPE_STATIC(LIB, NB_NODE, "Northbound Node");
23DEFINE_MTYPE_STATIC(LIB, NB_CONFIG, "Northbound Configuration");
24DEFINE_MTYPE_STATIC(LIB, NB_CONFIG_ENTRY, "Northbound Configuration Entry");
1c2facd1
RW
25
26/* Running configuration - shouldn't be modified directly. */
27struct nb_config *running_config;
28
ccd43ada
RW
29/* Hash table of user pointers associated with configuration entries. */
30static struct hash *running_config_entries;
31
364ad673
RW
32/* Management lock for the running configuration. */
33static struct {
34 /* Mutex protecting this structure. */
35 pthread_mutex_t mtx;
36
37 /* Actual lock. */
38 bool locked;
39
40 /* Northbound client who owns this lock. */
41 enum nb_client owner_client;
42
43 /* Northbound user who owns this lock. */
44 const void *owner_user;
45} running_config_mgmt_lock;
46
390a8862
CS
47/* Knob to record config transaction */
48static bool nb_db_enabled;
1c2facd1
RW
49/*
50 * Global lock used to prevent multiple configuration transactions from
51 * happening concurrently.
52 */
53static bool transaction_in_progress;
54
13d6b9c1
RW
55static int nb_callback_pre_validate(struct nb_context *context,
56 const struct nb_node *nb_node,
df5eda3d
RW
57 const struct lyd_node *dnode, char *errmsg,
58 size_t errmsg_len);
13d6b9c1
RW
59static int nb_callback_configuration(struct nb_context *context,
60 const enum nb_event event,
df5eda3d
RW
61 struct nb_config_change *change,
62 char *errmsg, size_t errmsg_len);
63static struct nb_transaction *
41ef7327 64nb_transaction_new(struct nb_context context, struct nb_config *config,
df5eda3d
RW
65 struct nb_config_cbs *changes, const char *comment,
66 char *errmsg, size_t errmsg_len);
1c2facd1
RW
67static void nb_transaction_free(struct nb_transaction *transaction);
68static int nb_transaction_process(enum nb_event event,
df5eda3d
RW
69 struct nb_transaction *transaction,
70 char *errmsg, size_t errmsg_len);
71static void nb_transaction_apply_finish(struct nb_transaction *transaction,
72 char *errmsg, size_t errmsg_len);
3bb513c3 73static int nb_oper_data_iter_node(const struct lysc_node *snode,
1a4bc045
RW
74 const char *xpath, const void *list_entry,
75 const struct yang_list_keys *list_keys,
76 struct yang_translator *translator,
77 bool first, uint32_t flags,
78 nb_oper_data_cb cb, void *arg);
1c2facd1 79
3bb513c3 80static int nb_node_check_config_only(const struct lysc_node *snode, void *arg)
544ca69a
RW
81{
82 bool *config_only = arg;
83
84 if (CHECK_FLAG(snode->flags, LYS_CONFIG_R)) {
85 *config_only = false;
86 return YANG_ITER_STOP;
87 }
88
89 return YANG_ITER_CONTINUE;
90}
91
3bb513c3 92static int nb_node_new_cb(const struct lysc_node *snode, void *arg)
1c2facd1
RW
93{
94 struct nb_node *nb_node;
3bb513c3 95 struct lysc_node *sparent, *sparent_list;
1c2facd1
RW
96
97 nb_node = XCALLOC(MTYPE_NB_NODE, sizeof(*nb_node));
98 yang_snode_get_path(snode, YANG_PATH_DATA, nb_node->xpath,
99 sizeof(nb_node->xpath));
100 nb_node->priority = NB_DFLT_PRIORITY;
101 sparent = yang_snode_real_parent(snode);
102 if (sparent)
103 nb_node->parent = sparent->priv;
104 sparent_list = yang_snode_parent_list(snode);
105 if (sparent_list)
106 nb_node->parent_list = sparent_list->priv;
107
544ca69a
RW
108 /* Set flags. */
109 if (CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
110 bool config_only = true;
111
7895c3bc
DS
112 (void)yang_snodes_iterate_subtree(snode, NULL,
113 nb_node_check_config_only, 0,
114 &config_only);
544ca69a
RW
115 if (config_only)
116 SET_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY);
117 }
99fb518f 118 if (CHECK_FLAG(snode->nodetype, LYS_LIST)) {
3bb513c3 119 if (yang_snode_num_keys(snode) == 0)
99fb518f
RW
120 SET_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST);
121 }
544ca69a 122
1c2facd1
RW
123 /*
124 * Link the northbound node and the libyang schema node with one
125 * another.
126 */
127 nb_node->snode = snode;
8a923b48 128 assert(snode->priv == NULL);
3bb513c3 129 ((struct lysc_node *)snode)->priv = nb_node;
e0ccfad2
RW
130
131 return YANG_ITER_CONTINUE;
1c2facd1
RW
132}
133
3bb513c3 134static int nb_node_del_cb(const struct lysc_node *snode, void *arg)
1c2facd1
RW
135{
136 struct nb_node *nb_node;
137
138 nb_node = snode->priv;
9bde0b25 139 if (nb_node) {
3bb513c3 140 ((struct lysc_node *)snode)->priv = NULL;
9bde0b25
RW
141 XFREE(MTYPE_NB_NODE, nb_node);
142 }
e0ccfad2
RW
143
144 return YANG_ITER_CONTINUE;
1c2facd1
RW
145}
146
544ca69a
RW
147void nb_nodes_create(void)
148{
8d869d37 149 yang_snodes_iterate(NULL, nb_node_new_cb, 0, NULL);
544ca69a
RW
150}
151
152void nb_nodes_delete(void)
153{
8d869d37 154 yang_snodes_iterate(NULL, nb_node_del_cb, 0, NULL);
544ca69a
RW
155}
156
3bb513c3 157struct nb_node *nb_node_find(const char *path)
1c2facd1 158{
3bb513c3 159 const struct lysc_node *snode;
1c2facd1
RW
160
161 /*
3bb513c3 162 * Use libyang to find the schema node associated to the path and get
1c2facd1
RW
163 * the northbound node from there (snode private pointer).
164 */
3bb513c3 165 snode = lys_find_path(ly_native_ctx, NULL, path, 0);
1c2facd1
RW
166 if (!snode)
167 return NULL;
168
169 return snode->priv;
170}
171
f182d8d8
IR
172void nb_node_set_dependency_cbs(const char *dependency_xpath,
173 const char *dependant_xpath,
174 struct nb_dependency_callbacks *cbs)
175{
176 struct nb_node *dependency = nb_node_find(dependency_xpath);
177 struct nb_node *dependant = nb_node_find(dependant_xpath);
178
179 if (!dependency || !dependant)
180 return;
181
182 dependency->dep_cbs.get_dependant_xpath = cbs->get_dependant_xpath;
183 dependant->dep_cbs.get_dependency_xpath = cbs->get_dependency_xpath;
184}
185
186bool nb_node_has_dependency(struct nb_node *node)
187{
188 return node->dep_cbs.get_dependency_xpath != NULL;
189}
190
1c2facd1
RW
191static int nb_node_validate_cb(const struct nb_node *nb_node,
192 enum nb_operation operation,
193 int callback_implemented, bool optional)
194{
195 bool valid;
196
197 valid = nb_operation_is_valid(operation, nb_node->snode);
198
6f4e5edd
RW
199 /*
200 * Add an exception for operational data callbacks. A rw list usually
201 * doesn't need any associated operational data callbacks. But if this
202 * rw list is augmented by another module which adds state nodes under
203 * it, then this list will need to have the 'get_next()', 'get_keys()'
204 * and 'lookup_entry()' callbacks. As such, never log a warning when
205 * these callbacks are implemented when they are not needed, since this
206 * depends on context (e.g. some daemons might augment "frr-interface"
207 * while others don't).
208 */
209 if (!valid && callback_implemented && operation != NB_OP_GET_NEXT
210 && operation != NB_OP_GET_KEYS && operation != NB_OP_LOOKUP_ENTRY)
1c2facd1
RW
211 flog_warn(EC_LIB_NB_CB_UNNEEDED,
212 "unneeded '%s' callback for '%s'",
213 nb_operation_name(operation), nb_node->xpath);
214
215 if (!optional && valid && !callback_implemented) {
216 flog_err(EC_LIB_NB_CB_MISSING, "missing '%s' callback for '%s'",
217 nb_operation_name(operation), nb_node->xpath);
218 return 1;
219 }
220
221 return 0;
222}
223
224/*
225 * Check if the required callbacks were implemented for the given northbound
226 * node.
227 */
228static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
229
230{
231 unsigned int error = 0;
232
233 error += nb_node_validate_cb(nb_node, NB_OP_CREATE,
234 !!nb_node->cbs.create, false);
235 error += nb_node_validate_cb(nb_node, NB_OP_MODIFY,
236 !!nb_node->cbs.modify, false);
95ce849b 237 error += nb_node_validate_cb(nb_node, NB_OP_DESTROY,
d01b92fd 238 !!nb_node->cbs.destroy, false);
1c2facd1
RW
239 error += nb_node_validate_cb(nb_node, NB_OP_MOVE, !!nb_node->cbs.move,
240 false);
34224f0c
RW
241 error += nb_node_validate_cb(nb_node, NB_OP_PRE_VALIDATE,
242 !!nb_node->cbs.pre_validate, true);
1c2facd1
RW
243 error += nb_node_validate_cb(nb_node, NB_OP_APPLY_FINISH,
244 !!nb_node->cbs.apply_finish, true);
245 error += nb_node_validate_cb(nb_node, NB_OP_GET_ELEM,
246 !!nb_node->cbs.get_elem, false);
247 error += nb_node_validate_cb(nb_node, NB_OP_GET_NEXT,
248 !!nb_node->cbs.get_next, false);
249 error += nb_node_validate_cb(nb_node, NB_OP_GET_KEYS,
250 !!nb_node->cbs.get_keys, false);
251 error += nb_node_validate_cb(nb_node, NB_OP_LOOKUP_ENTRY,
252 !!nb_node->cbs.lookup_entry, false);
253 error += nb_node_validate_cb(nb_node, NB_OP_RPC, !!nb_node->cbs.rpc,
254 false);
255
256 return error;
257}
258
259static unsigned int nb_node_validate_priority(const struct nb_node *nb_node)
260{
261 /* Top-level nodes can have any priority. */
262 if (!nb_node->parent)
263 return 0;
264
265 if (nb_node->priority < nb_node->parent->priority) {
266 flog_err(EC_LIB_NB_CB_INVALID_PRIO,
267 "node has higher priority than its parent [xpath %s]",
268 nb_node->xpath);
269 return 1;
270 }
271
272 return 0;
273}
274
3bb513c3 275static int nb_node_validate(const struct lysc_node *snode, void *arg)
1c2facd1
RW
276{
277 struct nb_node *nb_node = snode->priv;
e0ccfad2 278 unsigned int *errors = arg;
1c2facd1
RW
279
280 /* Validate callbacks and priority. */
9bde0b25
RW
281 if (nb_node) {
282 *errors += nb_node_validate_cbs(nb_node);
283 *errors += nb_node_validate_priority(nb_node);
284 }
e0ccfad2
RW
285
286 return YANG_ITER_CONTINUE;
1c2facd1
RW
287}
288
289struct nb_config *nb_config_new(struct lyd_node *dnode)
290{
291 struct nb_config *config;
292
293 config = XCALLOC(MTYPE_NB_CONFIG, sizeof(*config));
294 if (dnode)
295 config->dnode = dnode;
296 else
5e02643a 297 config->dnode = yang_dnode_new(ly_native_ctx, true);
1c2facd1
RW
298 config->version = 0;
299
300 return config;
301}
302
303void nb_config_free(struct nb_config *config)
304{
305 if (config->dnode)
306 yang_dnode_free(config->dnode);
307 XFREE(MTYPE_NB_CONFIG, config);
308}
309
310struct nb_config *nb_config_dup(const struct nb_config *config)
311{
312 struct nb_config *dup;
313
314 dup = XCALLOC(MTYPE_NB_CONFIG, sizeof(*dup));
315 dup->dnode = yang_dnode_dup(config->dnode);
316 dup->version = config->version;
317
318 return dup;
319}
320
321int nb_config_merge(struct nb_config *config_dst, struct nb_config *config_src,
322 bool preserve_source)
323{
324 int ret;
325
44cd2b2c 326 ret = lyd_merge_siblings(&config_dst->dnode, config_src->dnode, 0);
1c2facd1
RW
327 if (ret != 0)
328 flog_warn(EC_LIB_LIBYANG, "%s: lyd_merge() failed", __func__);
329
330 if (!preserve_source)
331 nb_config_free(config_src);
332
333 return (ret == 0) ? NB_OK : NB_ERR;
334}
335
336void nb_config_replace(struct nb_config *config_dst,
337 struct nb_config *config_src, bool preserve_source)
338{
339 /* Update version. */
340 if (config_src->version != 0)
341 config_dst->version = config_src->version;
342
343 /* Update dnode. */
e5dc8a44
RW
344 if (config_dst->dnode)
345 yang_dnode_free(config_dst->dnode);
1c2facd1
RW
346 if (preserve_source) {
347 config_dst->dnode = yang_dnode_dup(config_src->dnode);
348 } else {
349 config_dst->dnode = config_src->dnode;
350 config_src->dnode = NULL;
351 nb_config_free(config_src);
352 }
353}
354
355/* Generate the nb_config_cbs tree. */
356static inline int nb_config_cb_compare(const struct nb_config_cb *a,
357 const struct nb_config_cb *b)
358{
359 /* Sort by priority first. */
360 if (a->nb_node->priority < b->nb_node->priority)
361 return -1;
362 if (a->nb_node->priority > b->nb_node->priority)
363 return 1;
364
365 /*
6b5d6e2d 366 * Preserve the order of the configuration changes as told by libyang.
1c2facd1 367 */
fe3f2c61
RW
368 if (a->seq < b->seq)
369 return -1;
370 if (a->seq > b->seq)
371 return 1;
372
373 /*
374 * All 'apply_finish' callbacks have their sequence number set to zero.
375 * In this case, compare them using their dnode pointers (the order
376 * doesn't matter for callbacks that have the same priority).
377 */
378 if (a->dnode < b->dnode)
379 return -1;
380 if (a->dnode > b->dnode)
381 return 1;
382
383 return 0;
1c2facd1
RW
384}
385RB_GENERATE(nb_config_cbs, nb_config_cb, entry, nb_config_cb_compare);
386
387static void nb_config_diff_add_change(struct nb_config_cbs *changes,
388 enum nb_operation operation,
6b5d6e2d 389 uint32_t *seq,
1c2facd1
RW
390 const struct lyd_node *dnode)
391{
392 struct nb_config_change *change;
393
f267201b
RW
394 /* Ignore unimplemented nodes. */
395 if (!dnode->schema->priv)
396 return;
397
1c2facd1
RW
398 change = XCALLOC(MTYPE_TMP, sizeof(*change));
399 change->cb.operation = operation;
6b5d6e2d
RW
400 change->cb.seq = *seq;
401 *seq = *seq + 1;
1c2facd1 402 change->cb.nb_node = dnode->schema->priv;
1c2facd1
RW
403 change->cb.dnode = dnode;
404
405 RB_INSERT(nb_config_cbs, changes, &change->cb);
406}
407
408static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
409{
410 while (!RB_EMPTY(nb_config_cbs, changes)) {
411 struct nb_config_change *change;
412
413 change = (struct nb_config_change *)RB_ROOT(nb_config_cbs,
414 changes);
415 RB_REMOVE(nb_config_cbs, changes, &change->cb);
416 XFREE(MTYPE_TMP, change);
417 }
418}
419
420/*
421 * Helper function used when calculating the delta between two different
422 * configurations. Given a new subtree, calculate all new YANG data nodes,
423 * excluding default leafs and leaf-lists. This is a recursive function.
424 */
6b5d6e2d 425static void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
cacbffaf 426 struct nb_config_cbs *changes)
1c2facd1 427{
cacbffaf 428 enum nb_operation operation;
1c2facd1
RW
429 struct lyd_node *child;
430
f267201b
RW
431 /* Ignore unimplemented nodes. */
432 if (!dnode->schema->priv)
433 return;
434
cacbffaf
RW
435 switch (dnode->schema->nodetype) {
436 case LYS_LEAF:
437 case LYS_LEAFLIST:
3bb513c3 438 if (lyd_is_default(dnode))
cacbffaf 439 break;
1c2facd1 440
cacbffaf
RW
441 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
442 operation = NB_OP_CREATE;
443 else if (nb_operation_is_valid(NB_OP_MODIFY, dnode->schema))
444 operation = NB_OP_MODIFY;
445 else
446 return;
1c2facd1 447
6b5d6e2d 448 nb_config_diff_add_change(changes, operation, seq, dnode);
cacbffaf
RW
449 break;
450 case LYS_CONTAINER:
451 case LYS_LIST:
452 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
6b5d6e2d
RW
453 nb_config_diff_add_change(changes, NB_OP_CREATE, seq,
454 dnode);
cacbffaf
RW
455
456 /* Process child nodes recursively. */
3bb513c3 457 LY_LIST_FOR (lyd_child(dnode), child) {
6b5d6e2d 458 nb_config_diff_created(child, seq, changes);
1c2facd1 459 }
cacbffaf
RW
460 break;
461 default:
462 break;
1c2facd1
RW
463 }
464}
465
6b5d6e2d 466static void nb_config_diff_deleted(const struct lyd_node *dnode, uint32_t *seq,
1912caa2
RW
467 struct nb_config_cbs *changes)
468{
f267201b
RW
469 /* Ignore unimplemented nodes. */
470 if (!dnode->schema->priv)
471 return;
472
1912caa2 473 if (nb_operation_is_valid(NB_OP_DESTROY, dnode->schema))
6b5d6e2d 474 nb_config_diff_add_change(changes, NB_OP_DESTROY, seq, dnode);
1912caa2
RW
475 else if (CHECK_FLAG(dnode->schema->nodetype, LYS_CONTAINER)) {
476 struct lyd_node *child;
477
478 /*
479 * Non-presence containers need special handling since they
480 * don't have "destroy" callbacks. In this case, what we need to
481 * do is to call the "destroy" callbacks of their child nodes
482 * when applicable (i.e. optional nodes).
483 */
3bb513c3 484 LY_LIST_FOR (lyd_child(dnode), child) {
6b5d6e2d 485 nb_config_diff_deleted(child, seq, changes);
1912caa2
RW
486 }
487 }
488}
489
3bb513c3
CH
490static int nb_lyd_diff_get_op(const struct lyd_node *dnode)
491{
492 const struct lyd_meta *meta;
493 LY_LIST_FOR (dnode->meta, meta) {
494 if (strcmp(meta->name, "operation")
495 || strcmp(meta->annotation->module->name, "yang"))
496 continue;
497 return lyd_get_meta_value(meta)[0];
498 }
499 return 'n';
500}
501
fd396924 502#if 0 /* Used below in nb_config_diff inside normally disabled code */
3bb513c3
CH
503static inline void nb_config_diff_dnode_log_path(const char *context,
504 const char *path,
505 const struct lyd_node *dnode)
506{
507 if (dnode->schema->nodetype & LYD_NODE_TERM)
508 zlog_debug("nb_config_diff: %s: %s: %s", context, path,
509 lyd_get_value(dnode));
510 else
511 zlog_debug("nb_config_diff: %s: %s", context, path);
512}
513
514static inline void nb_config_diff_dnode_log(const char *context,
515 const struct lyd_node *dnode)
516{
517 if (!dnode) {
518 zlog_debug("nb_config_diff: %s: NULL", context);
519 return;
520 }
521
522 char *path = lyd_path(dnode, LYD_PATH_STD, NULL, 0);
523 nb_config_diff_dnode_log_path(context, path, dnode);
524 free(path);
525}
fd396924 526#endif
3bb513c3 527
1c2facd1
RW
528/* Calculate the delta between two different configurations. */
529static void nb_config_diff(const struct nb_config *config1,
530 const struct nb_config *config2,
531 struct nb_config_cbs *changes)
532{
3bb513c3
CH
533 struct lyd_node *diff = NULL;
534 const struct lyd_node *root, *dnode;
535 struct lyd_node *target;
536 int op;
537 LY_ERR err;
538 char *path;
539
fd396924 540#if 0 /* Useful (noisy) when debugging diff code, and for improving later */
3bb513c3
CH
541 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
542 LY_LIST_FOR(config1->dnode, root) {
543 LYD_TREE_DFS_BEGIN(root, dnode) {
544 nb_config_diff_dnode_log("from", dnode);
545 LYD_TREE_DFS_END(root, dnode);
546 }
547 }
548 LY_LIST_FOR(config2->dnode, root) {
549 LYD_TREE_DFS_BEGIN(root, dnode) {
550 nb_config_diff_dnode_log("to", dnode);
551 LYD_TREE_DFS_END(root, dnode);
552 }
553 }
554 }
555#endif
1c2facd1 556
3bb513c3
CH
557 err = lyd_diff_siblings(config1->dnode, config2->dnode,
558 LYD_DIFF_DEFAULTS, &diff);
559 assert(!err);
1c2facd1 560
fd396924
CH
561 if (diff && DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
562 char *s;
563
564 if (!lyd_print_mem(&s, diff, LYD_JSON,
565 LYD_PRINT_WITHSIBLINGS | LYD_PRINT_WD_ALL)) {
566 zlog_debug("%s: %s", __func__, s);
567 free(s);
568 }
569 }
1c2facd1 570
3bb513c3 571 uint32_t seq = 0;
fd396924 572
3bb513c3
CH
573 LY_LIST_FOR (diff, root) {
574 LYD_TREE_DFS_BEGIN (root, dnode) {
575 op = nb_lyd_diff_get_op(dnode);
576
577 path = lyd_path(dnode, LYD_PATH_STD, NULL, 0);
578
fd396924 579#if 0 /* Useful (noisy) when debugging diff code, and for improving later */
3bb513c3
CH
580 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
581 char context[80];
582 snprintf(context, sizeof(context),
583 "iterating diff: oper: %c seq: %u", op, seq);
584 nb_config_diff_dnode_log_path(context, path, dnode);
585 }
586#endif
587 switch (op) {
588 case 'c': /* create */
589 /*
590 * This is rather inefficient, but when we use
591 * dnode from the diff instead of the
592 * candidate config node we get failures when
593 * looking up default values, etc, based on
594 * the diff tree.
595 */
596 target = yang_dnode_get(config2->dnode, path);
f96c2b6d 597 assert(target);
3bb513c3
CH
598 nb_config_diff_created(target, &seq, changes);
599
600 /* Skip rest of sub-tree, move to next sibling
601 */
602 LYD_TREE_DFS_continue = 1;
603 break;
604 case 'd': /* delete */
605 target = yang_dnode_get(config1->dnode, path);
f96c2b6d 606 assert(target);
3bb513c3 607 nb_config_diff_deleted(target, &seq, changes);
1c2facd1 608
3bb513c3
CH
609 /* Skip rest of sub-tree, move to next sibling
610 */
611 LYD_TREE_DFS_continue = 1;
612 break;
613 case 'r': /* replace */
614 /* either moving an entry or changing a value */
615 target = yang_dnode_get(config2->dnode, path);
616 assert(target);
617 nb_config_diff_add_change(changes, NB_OP_MODIFY,
618 &seq, target);
619 break;
620 case 'n': /* none */
621 default:
622 break;
623 }
624 free(path);
625 LYD_TREE_DFS_END(root, dnode);
1c2facd1 626 }
1c2facd1
RW
627 }
628
fd396924 629 lyd_free_all(diff);
1c2facd1
RW
630}
631
632int nb_candidate_edit(struct nb_config *candidate,
633 const struct nb_node *nb_node,
634 enum nb_operation operation, const char *xpath,
635 const struct yang_data *previous,
636 const struct yang_data *data)
637{
f182d8d8 638 struct lyd_node *dnode, *dep_dnode;
1c2facd1 639 char xpath_edit[XPATH_MAXLEN];
f182d8d8 640 char dep_xpath[XPATH_MAXLEN];
3bb513c3 641 LY_ERR err;
1c2facd1 642
1c2facd1
RW
643 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
644 if (nb_node->snode->nodetype == LYS_LEAFLIST)
645 snprintf(xpath_edit, sizeof(xpath_edit), "%s[.='%s']", xpath,
646 data->value);
647 else
648 strlcpy(xpath_edit, xpath, sizeof(xpath_edit));
649
650 switch (operation) {
651 case NB_OP_CREATE:
652 case NB_OP_MODIFY:
3bb513c3
CH
653 err = lyd_new_path(candidate->dnode, ly_native_ctx, xpath_edit,
654 (void *)data->value, LYD_NEW_PATH_UPDATE,
655 &dnode);
656 if (err) {
657 flog_warn(EC_LIB_LIBYANG,
658 "%s: lyd_new_path(%s) failed: %d", __func__,
659 xpath_edit, err);
660 return NB_ERR;
661 } else if (dnode) {
fd396924
CH
662 /* Create default nodes */
663 LY_ERR err = lyd_new_implicit_tree(
664 dnode, LYD_IMPLICIT_NO_STATE, NULL);
665 if (err) {
666 flog_warn(EC_LIB_LIBYANG,
667 "%s: lyd_new_implicit_all failed: %d",
668 __func__, err);
669 }
f182d8d8
IR
670 /*
671 * create dependency
672 *
673 * dnode returned by the lyd_new_path may be from a
674 * different schema, so we need to update the nb_node
675 */
676 nb_node = dnode->schema->priv;
677 if (nb_node->dep_cbs.get_dependency_xpath) {
678 nb_node->dep_cbs.get_dependency_xpath(
679 dnode, dep_xpath);
680
3bb513c3
CH
681 err = lyd_new_path(candidate->dnode,
682 ly_native_ctx, dep_xpath,
683 NULL, LYD_NEW_PATH_UPDATE,
684 &dep_dnode);
fd396924 685 /* Create default nodes */
4da08e01 686 if (!err && dep_dnode)
fd396924
CH
687 err = lyd_new_implicit_tree(
688 dep_dnode,
689 LYD_IMPLICIT_NO_STATE, NULL);
3bb513c3
CH
690 if (err) {
691 flog_warn(
692 EC_LIB_LIBYANG,
4da08e01 693 "%s: dependency: lyd_new_path(%s) failed: %d",
3bb513c3 694 __func__, dep_xpath, err);
f182d8d8
IR
695 return NB_ERR;
696 }
697 }
1c2facd1 698 }
1c2facd1 699 break;
95ce849b 700 case NB_OP_DESTROY:
1c2facd1
RW
701 dnode = yang_dnode_get(candidate->dnode, xpath_edit);
702 if (!dnode)
703 /*
704 * Return a special error code so the caller can choose
705 * whether to ignore it or not.
706 */
707 return NB_ERR_NOT_FOUND;
f182d8d8
IR
708 /* destroy dependant */
709 if (nb_node->dep_cbs.get_dependant_xpath) {
710 nb_node->dep_cbs.get_dependant_xpath(dnode, dep_xpath);
711
712 dep_dnode = yang_dnode_get(candidate->dnode, dep_xpath);
713 if (dep_dnode)
3bb513c3 714 lyd_free_tree(dep_dnode);
f182d8d8 715 }
3bb513c3 716 lyd_free_tree(dnode);
1c2facd1
RW
717 break;
718 case NB_OP_MOVE:
719 /* TODO: update configuration. */
720 break;
ca411e38
DS
721 case NB_OP_PRE_VALIDATE:
722 case NB_OP_APPLY_FINISH:
723 case NB_OP_GET_ELEM:
724 case NB_OP_GET_NEXT:
725 case NB_OP_GET_KEYS:
726 case NB_OP_LOOKUP_ENTRY:
727 case NB_OP_RPC:
1c2facd1
RW
728 flog_warn(EC_LIB_DEVELOPMENT,
729 "%s: unknown operation (%u) [xpath %s]", __func__,
730 operation, xpath_edit);
731 return NB_ERR;
732 }
733
734 return NB_OK;
735}
736
737bool nb_candidate_needs_update(const struct nb_config *candidate)
738{
8685be73
RW
739 if (candidate->version < running_config->version)
740 return true;
1c2facd1 741
8685be73 742 return false;
1c2facd1
RW
743}
744
745int nb_candidate_update(struct nb_config *candidate)
746{
747 struct nb_config *updated_config;
748
8685be73 749 updated_config = nb_config_dup(running_config);
1c2facd1
RW
750 if (nb_config_merge(updated_config, candidate, true) != NB_OK)
751 return NB_ERR;
752
753 nb_config_replace(candidate, updated_config, false);
754
755 return NB_OK;
756}
757
1c2facd1
RW
758/*
759 * Perform YANG syntactic and semantic validation.
760 *
761 * WARNING: lyd_validate() can change the configuration as part of the
762 * validation process.
763 */
df5eda3d
RW
764static int nb_candidate_validate_yang(struct nb_config *candidate, char *errmsg,
765 size_t errmsg_len)
1c2facd1 766{
3bb513c3
CH
767 if (lyd_validate_all(&candidate->dnode, ly_native_ctx,
768 LYD_VALIDATE_NO_STATE, NULL)
df5eda3d
RW
769 != 0) {
770 yang_print_errors(ly_native_ctx, errmsg, errmsg_len);
1c2facd1 771 return NB_ERR_VALIDATION;
df5eda3d 772 }
1c2facd1
RW
773
774 return NB_OK;
775}
776
777/* Perform code-level validation using the northbound callbacks. */
13d6b9c1
RW
778static int nb_candidate_validate_code(struct nb_context *context,
779 struct nb_config *candidate,
df5eda3d
RW
780 struct nb_config_cbs *changes,
781 char *errmsg, size_t errmsg_len)
1c2facd1
RW
782{
783 struct nb_config_cb *cb;
3bb513c3 784 struct lyd_node *root, *child;
34224f0c
RW
785 int ret;
786
787 /* First validate the candidate as a whole. */
3bb513c3
CH
788 LY_LIST_FOR (candidate->dnode, root) {
789 LYD_TREE_DFS_BEGIN (root, child) {
34224f0c
RW
790 struct nb_node *nb_node;
791
792 nb_node = child->schema->priv;
f267201b 793 if (!nb_node || !nb_node->cbs.pre_validate)
34224f0c
RW
794 goto next;
795
df5eda3d
RW
796 ret = nb_callback_pre_validate(context, nb_node, child,
797 errmsg, errmsg_len);
34224f0c
RW
798 if (ret != NB_OK)
799 return NB_ERR_VALIDATION;
800
801 next:
3bb513c3 802 LYD_TREE_DFS_END(root, child);
34224f0c
RW
803 }
804 }
805
806 /* Now validate the configuration changes. */
1c2facd1
RW
807 RB_FOREACH (cb, nb_config_cbs, changes) {
808 struct nb_config_change *change = (struct nb_config_change *)cb;
1c2facd1 809
df5eda3d
RW
810 ret = nb_callback_configuration(context, NB_EV_VALIDATE, change,
811 errmsg, errmsg_len);
1c2facd1
RW
812 if (ret != NB_OK)
813 return NB_ERR_VALIDATION;
814 }
815
816 return NB_OK;
817}
818
13d6b9c1 819int nb_candidate_validate(struct nb_context *context,
df5eda3d
RW
820 struct nb_config *candidate, char *errmsg,
821 size_t errmsg_len)
1c2facd1
RW
822{
823 struct nb_config_cbs changes;
824 int ret;
825
1c1c6c3f 826 if (nb_candidate_validate_yang(candidate, errmsg, errmsg_len) != NB_OK)
1c2facd1
RW
827 return NB_ERR_VALIDATION;
828
829 RB_INIT(nb_config_cbs, &changes);
8685be73 830 nb_config_diff(running_config, candidate, &changes);
df5eda3d
RW
831 ret = nb_candidate_validate_code(context, candidate, &changes, errmsg,
832 errmsg_len);
8685be73 833 nb_config_diff_del_changes(&changes);
1c2facd1
RW
834
835 return ret;
836}
837
41ef7327 838int nb_candidate_commit_prepare(struct nb_context context,
13d6b9c1 839 struct nb_config *candidate,
364ad673 840 const char *comment,
df5eda3d
RW
841 struct nb_transaction **transaction,
842 char *errmsg, size_t errmsg_len)
1c2facd1
RW
843{
844 struct nb_config_cbs changes;
845
df5eda3d
RW
846 if (nb_candidate_validate_yang(candidate, errmsg, errmsg_len)
847 != NB_OK) {
1c2facd1
RW
848 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
849 "%s: failed to validate candidate configuration",
850 __func__);
851 return NB_ERR_VALIDATION;
852 }
853
854 RB_INIT(nb_config_cbs, &changes);
8685be73 855 nb_config_diff(running_config, candidate, &changes);
5bfb669b
QY
856 if (RB_EMPTY(nb_config_cbs, &changes)) {
857 snprintf(
858 errmsg, errmsg_len,
859 "No changes to apply were found during preparation phase");
8685be73 860 return NB_ERR_NO_CHANGES;
5bfb669b 861 }
1c2facd1 862
41ef7327
CH
863 if (nb_candidate_validate_code(&context, candidate, &changes, errmsg,
864 errmsg_len) != NB_OK) {
8685be73
RW
865 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
866 "%s: failed to validate candidate configuration",
867 __func__);
868 nb_config_diff_del_changes(&changes);
869 return NB_ERR_VALIDATION;
870 }
1c2facd1 871
df5eda3d
RW
872 *transaction = nb_transaction_new(context, candidate, &changes, comment,
873 errmsg, errmsg_len);
8685be73
RW
874 if (*transaction == NULL) {
875 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
df5eda3d
RW
876 "%s: failed to create transaction: %s", __func__,
877 errmsg);
8685be73
RW
878 nb_config_diff_del_changes(&changes);
879 return NB_ERR_LOCKED;
1c2facd1
RW
880 }
881
df5eda3d
RW
882 return nb_transaction_process(NB_EV_PREPARE, *transaction, errmsg,
883 errmsg_len);
1c2facd1
RW
884}
885
0fe5b904
RW
886void nb_candidate_commit_abort(struct nb_transaction *transaction, char *errmsg,
887 size_t errmsg_len)
1c2facd1 888{
df5eda3d 889 (void)nb_transaction_process(NB_EV_ABORT, transaction, errmsg,
0fe5b904 890 errmsg_len);
1c2facd1
RW
891 nb_transaction_free(transaction);
892}
893
894void nb_candidate_commit_apply(struct nb_transaction *transaction,
0fe5b904
RW
895 bool save_transaction, uint32_t *transaction_id,
896 char *errmsg, size_t errmsg_len)
1c2facd1 897{
df5eda3d 898 (void)nb_transaction_process(NB_EV_APPLY, transaction, errmsg,
0fe5b904
RW
899 errmsg_len);
900 nb_transaction_apply_finish(transaction, errmsg, errmsg_len);
1c2facd1
RW
901
902 /* Replace running by candidate. */
903 transaction->config->version++;
8685be73 904 nb_config_replace(running_config, transaction->config, true);
1c2facd1
RW
905
906 /* Record transaction. */
390a8862 907 if (save_transaction && nb_db_enabled
1c2facd1
RW
908 && nb_db_transaction_save(transaction, transaction_id) != NB_OK)
909 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED,
910 "%s: failed to record transaction", __func__);
911
912 nb_transaction_free(transaction);
913}
914
41ef7327 915int nb_candidate_commit(struct nb_context context, struct nb_config *candidate,
13d6b9c1 916 bool save_transaction, const char *comment,
df5eda3d
RW
917 uint32_t *transaction_id, char *errmsg,
918 size_t errmsg_len)
1c2facd1
RW
919{
920 struct nb_transaction *transaction = NULL;
921 int ret;
922
13d6b9c1 923 ret = nb_candidate_commit_prepare(context, candidate, comment,
df5eda3d 924 &transaction, errmsg, errmsg_len);
1c2facd1
RW
925 /*
926 * Apply the changes if the preparation phase succeeded. Otherwise abort
927 * the transaction.
928 */
929 if (ret == NB_OK)
930 nb_candidate_commit_apply(transaction, save_transaction,
0fe5b904 931 transaction_id, errmsg, errmsg_len);
1c2facd1 932 else if (transaction != NULL)
0fe5b904 933 nb_candidate_commit_abort(transaction, errmsg, errmsg_len);
1c2facd1
RW
934
935 return ret;
936}
937
364ad673
RW
938int nb_running_lock(enum nb_client client, const void *user)
939{
940 int ret = -1;
941
1be4decb 942 frr_with_mutex (&running_config_mgmt_lock.mtx) {
364ad673
RW
943 if (!running_config_mgmt_lock.locked) {
944 running_config_mgmt_lock.locked = true;
945 running_config_mgmt_lock.owner_client = client;
946 running_config_mgmt_lock.owner_user = user;
947 ret = 0;
948 }
949 }
364ad673
RW
950
951 return ret;
952}
953
954int nb_running_unlock(enum nb_client client, const void *user)
955{
956 int ret = -1;
957
1be4decb 958 frr_with_mutex (&running_config_mgmt_lock.mtx) {
364ad673
RW
959 if (running_config_mgmt_lock.locked
960 && running_config_mgmt_lock.owner_client == client
961 && running_config_mgmt_lock.owner_user == user) {
962 running_config_mgmt_lock.locked = false;
963 running_config_mgmt_lock.owner_client = NB_CLIENT_NONE;
964 running_config_mgmt_lock.owner_user = NULL;
965 ret = 0;
966 }
967 }
364ad673
RW
968
969 return ret;
970}
971
972int nb_running_lock_check(enum nb_client client, const void *user)
973{
974 int ret = -1;
975
1be4decb 976 frr_with_mutex (&running_config_mgmt_lock.mtx) {
364ad673
RW
977 if (!running_config_mgmt_lock.locked
978 || (running_config_mgmt_lock.owner_client == client
979 && running_config_mgmt_lock.owner_user == user))
980 ret = 0;
981 }
364ad673
RW
982
983 return ret;
984}
985
97cd8493
RW
986static void nb_log_config_callback(const enum nb_event event,
987 enum nb_operation operation,
988 const struct lyd_node *dnode)
1c2facd1 989{
97cd8493
RW
990 const char *value;
991 char xpath[XPATH_MAXLEN];
992
993 if (!DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL))
994 return;
995
996 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
997 if (yang_snode_is_typeless_data(dnode->schema))
998 value = "(none)";
999 else
1000 value = yang_dnode_get_string(dnode, NULL);
1001
1c2facd1
RW
1002 zlog_debug(
1003 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
1004 nb_event_name(event), nb_operation_name(operation), xpath,
97cd8493
RW
1005 value);
1006}
1007
13d6b9c1
RW
1008static int nb_callback_create(struct nb_context *context,
1009 const struct nb_node *nb_node,
97cd8493 1010 enum nb_event event, const struct lyd_node *dnode,
df5eda3d
RW
1011 union nb_resource *resource, char *errmsg,
1012 size_t errmsg_len)
97cd8493 1013{
60ee8be1 1014 struct nb_cb_create_args args = {};
1abe6c53
RW
1015 bool unexpected_error = false;
1016 int ret;
60ee8be1 1017
97cd8493
RW
1018 nb_log_config_callback(event, NB_OP_CREATE, dnode);
1019
13d6b9c1 1020 args.context = context;
60ee8be1
RW
1021 args.event = event;
1022 args.dnode = dnode;
1023 args.resource = resource;
df5eda3d
RW
1024 args.errmsg = errmsg;
1025 args.errmsg_len = errmsg_len;
1abe6c53
RW
1026 ret = nb_node->cbs.create(&args);
1027
1028 /* Detect and log unexpected errors. */
1029 switch (ret) {
1030 case NB_OK:
1031 case NB_ERR:
1032 break;
1033 case NB_ERR_VALIDATION:
1034 if (event != NB_EV_VALIDATE)
1035 unexpected_error = true;
1036 break;
1037 case NB_ERR_RESOURCE:
1038 if (event != NB_EV_PREPARE)
1039 unexpected_error = true;
1040 break;
1041 case NB_ERR_INCONSISTENCY:
1042 if (event == NB_EV_VALIDATE)
1043 unexpected_error = true;
1044 break;
1045 default:
1046 unexpected_error = true;
1047 break;
1048 }
1049 if (unexpected_error)
1050 DEBUGD(&nb_dbg_cbs_config,
1051 "northbound callback: unexpected return value: %s",
1052 nb_err_name(ret));
1053
1054 return ret;
97cd8493
RW
1055}
1056
13d6b9c1
RW
1057static int nb_callback_modify(struct nb_context *context,
1058 const struct nb_node *nb_node,
97cd8493 1059 enum nb_event event, const struct lyd_node *dnode,
df5eda3d
RW
1060 union nb_resource *resource, char *errmsg,
1061 size_t errmsg_len)
97cd8493 1062{
60ee8be1 1063 struct nb_cb_modify_args args = {};
1abe6c53
RW
1064 bool unexpected_error = false;
1065 int ret;
60ee8be1 1066
97cd8493
RW
1067 nb_log_config_callback(event, NB_OP_MODIFY, dnode);
1068
13d6b9c1 1069 args.context = context;
60ee8be1
RW
1070 args.event = event;
1071 args.dnode = dnode;
1072 args.resource = resource;
df5eda3d
RW
1073 args.errmsg = errmsg;
1074 args.errmsg_len = errmsg_len;
1abe6c53
RW
1075 ret = nb_node->cbs.modify(&args);
1076
1077 /* Detect and log unexpected errors. */
1078 switch (ret) {
1079 case NB_OK:
1080 case NB_ERR:
1081 break;
1082 case NB_ERR_VALIDATION:
1083 if (event != NB_EV_VALIDATE)
1084 unexpected_error = true;
1085 break;
1086 case NB_ERR_RESOURCE:
1087 if (event != NB_EV_PREPARE)
1088 unexpected_error = true;
1089 break;
1090 case NB_ERR_INCONSISTENCY:
1091 if (event == NB_EV_VALIDATE)
1092 unexpected_error = true;
1093 break;
1094 default:
1095 unexpected_error = true;
1096 break;
1097 }
1098 if (unexpected_error)
1099 DEBUGD(&nb_dbg_cbs_config,
1100 "northbound callback: unexpected return value: %s",
1101 nb_err_name(ret));
1102
1103 return ret;
97cd8493
RW
1104}
1105
13d6b9c1
RW
1106static int nb_callback_destroy(struct nb_context *context,
1107 const struct nb_node *nb_node,
97cd8493 1108 enum nb_event event,
df5eda3d
RW
1109 const struct lyd_node *dnode, char *errmsg,
1110 size_t errmsg_len)
97cd8493 1111{
60ee8be1 1112 struct nb_cb_destroy_args args = {};
1abe6c53
RW
1113 bool unexpected_error = false;
1114 int ret;
60ee8be1 1115
97cd8493
RW
1116 nb_log_config_callback(event, NB_OP_DESTROY, dnode);
1117
13d6b9c1 1118 args.context = context;
60ee8be1
RW
1119 args.event = event;
1120 args.dnode = dnode;
df5eda3d
RW
1121 args.errmsg = errmsg;
1122 args.errmsg_len = errmsg_len;
1abe6c53
RW
1123 ret = nb_node->cbs.destroy(&args);
1124
1125 /* Detect and log unexpected errors. */
1126 switch (ret) {
1127 case NB_OK:
1128 case NB_ERR:
1129 break;
1130 case NB_ERR_VALIDATION:
1131 if (event != NB_EV_VALIDATE)
1132 unexpected_error = true;
1133 break;
1134 case NB_ERR_INCONSISTENCY:
1135 if (event == NB_EV_VALIDATE)
1136 unexpected_error = true;
1137 break;
1138 default:
1139 unexpected_error = true;
1140 break;
1141 }
1142 if (unexpected_error)
1143 DEBUGD(&nb_dbg_cbs_config,
1144 "northbound callback: unexpected return value: %s",
1145 nb_err_name(ret));
1146
1147 return ret;
97cd8493
RW
1148}
1149
13d6b9c1
RW
1150static int nb_callback_move(struct nb_context *context,
1151 const struct nb_node *nb_node, enum nb_event event,
df5eda3d
RW
1152 const struct lyd_node *dnode, char *errmsg,
1153 size_t errmsg_len)
97cd8493 1154{
60ee8be1 1155 struct nb_cb_move_args args = {};
1abe6c53
RW
1156 bool unexpected_error = false;
1157 int ret;
60ee8be1 1158
97cd8493
RW
1159 nb_log_config_callback(event, NB_OP_MOVE, dnode);
1160
13d6b9c1 1161 args.context = context;
60ee8be1
RW
1162 args.event = event;
1163 args.dnode = dnode;
df5eda3d
RW
1164 args.errmsg = errmsg;
1165 args.errmsg_len = errmsg_len;
1abe6c53
RW
1166 ret = nb_node->cbs.move(&args);
1167
1168 /* Detect and log unexpected errors. */
1169 switch (ret) {
1170 case NB_OK:
1171 case NB_ERR:
1172 break;
1173 case NB_ERR_VALIDATION:
1174 if (event != NB_EV_VALIDATE)
1175 unexpected_error = true;
1176 break;
1177 case NB_ERR_INCONSISTENCY:
1178 if (event == NB_EV_VALIDATE)
1179 unexpected_error = true;
1180 break;
1181 default:
1182 unexpected_error = true;
1183 break;
1184 }
1185 if (unexpected_error)
1186 DEBUGD(&nb_dbg_cbs_config,
1187 "northbound callback: unexpected return value: %s",
1188 nb_err_name(ret));
1189
1190 return ret;
97cd8493
RW
1191}
1192
13d6b9c1
RW
1193static int nb_callback_pre_validate(struct nb_context *context,
1194 const struct nb_node *nb_node,
df5eda3d
RW
1195 const struct lyd_node *dnode, char *errmsg,
1196 size_t errmsg_len)
97cd8493 1197{
60ee8be1 1198 struct nb_cb_pre_validate_args args = {};
1abe6c53
RW
1199 bool unexpected_error = false;
1200 int ret;
60ee8be1 1201
97cd8493
RW
1202 nb_log_config_callback(NB_EV_VALIDATE, NB_OP_PRE_VALIDATE, dnode);
1203
60ee8be1 1204 args.dnode = dnode;
df5eda3d
RW
1205 args.errmsg = errmsg;
1206 args.errmsg_len = errmsg_len;
1abe6c53
RW
1207 ret = nb_node->cbs.pre_validate(&args);
1208
1209 /* Detect and log unexpected errors. */
1210 switch (ret) {
1211 case NB_OK:
1212 case NB_ERR_VALIDATION:
1213 break;
1214 default:
1215 unexpected_error = true;
1216 break;
1217 }
1218 if (unexpected_error)
1219 DEBUGD(&nb_dbg_cbs_config,
1220 "northbound callback: unexpected return value: %s",
1221 nb_err_name(ret));
1222
1223 return ret;
97cd8493
RW
1224}
1225
13d6b9c1
RW
1226static void nb_callback_apply_finish(struct nb_context *context,
1227 const struct nb_node *nb_node,
df5eda3d
RW
1228 const struct lyd_node *dnode, char *errmsg,
1229 size_t errmsg_len)
97cd8493 1230{
60ee8be1
RW
1231 struct nb_cb_apply_finish_args args = {};
1232
97cd8493
RW
1233 nb_log_config_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH, dnode);
1234
13d6b9c1 1235 args.context = context;
60ee8be1 1236 args.dnode = dnode;
df5eda3d
RW
1237 args.errmsg = errmsg;
1238 args.errmsg_len = errmsg_len;
60ee8be1 1239 nb_node->cbs.apply_finish(&args);
97cd8493
RW
1240}
1241
1242struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node,
1243 const char *xpath,
1244 const void *list_entry)
1245{
60ee8be1
RW
1246 struct nb_cb_get_elem_args args = {};
1247
97cd8493
RW
1248 DEBUGD(&nb_dbg_cbs_state,
1249 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
1250 xpath, list_entry);
1251
60ee8be1
RW
1252 args.xpath = xpath;
1253 args.list_entry = list_entry;
1254 return nb_node->cbs.get_elem(&args);
97cd8493
RW
1255}
1256
1257const void *nb_callback_get_next(const struct nb_node *nb_node,
1258 const void *parent_list_entry,
1259 const void *list_entry)
1260{
60ee8be1
RW
1261 struct nb_cb_get_next_args args = {};
1262
97cd8493
RW
1263 DEBUGD(&nb_dbg_cbs_state,
1264 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
1265 nb_node->xpath, parent_list_entry, list_entry);
1266
60ee8be1
RW
1267 args.parent_list_entry = parent_list_entry;
1268 args.list_entry = list_entry;
1269 return nb_node->cbs.get_next(&args);
97cd8493
RW
1270}
1271
1272int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
1273 struct yang_list_keys *keys)
1274{
60ee8be1
RW
1275 struct nb_cb_get_keys_args args = {};
1276
97cd8493
RW
1277 DEBUGD(&nb_dbg_cbs_state,
1278 "northbound callback (get_keys): node [%s] list_entry [%p]",
1279 nb_node->xpath, list_entry);
1280
60ee8be1
RW
1281 args.list_entry = list_entry;
1282 args.keys = keys;
1283 return nb_node->cbs.get_keys(&args);
97cd8493
RW
1284}
1285
1286const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
1287 const void *parent_list_entry,
1288 const struct yang_list_keys *keys)
1289{
60ee8be1
RW
1290 struct nb_cb_lookup_entry_args args = {};
1291
97cd8493
RW
1292 DEBUGD(&nb_dbg_cbs_state,
1293 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
1294 nb_node->xpath, parent_list_entry);
1295
60ee8be1
RW
1296 args.parent_list_entry = parent_list_entry;
1297 args.keys = keys;
1298 return nb_node->cbs.lookup_entry(&args);
97cd8493
RW
1299}
1300
1301int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
f63f5f19
CS
1302 const struct list *input, struct list *output, char *errmsg,
1303 size_t errmsg_len)
97cd8493 1304{
60ee8be1
RW
1305 struct nb_cb_rpc_args args = {};
1306
97cd8493
RW
1307 DEBUGD(&nb_dbg_cbs_rpc, "northbound RPC: %s", xpath);
1308
60ee8be1
RW
1309 args.xpath = xpath;
1310 args.input = input;
1311 args.output = output;
f63f5f19
CS
1312 args.errmsg = errmsg;
1313 args.errmsg_len = errmsg_len;
60ee8be1 1314 return nb_node->cbs.rpc(&args);
1c2facd1
RW
1315}
1316
1317/*
1318 * Call the northbound configuration callback associated to a given
1319 * configuration change.
1320 */
13d6b9c1
RW
1321static int nb_callback_configuration(struct nb_context *context,
1322 const enum nb_event event,
df5eda3d
RW
1323 struct nb_config_change *change,
1324 char *errmsg, size_t errmsg_len)
1c2facd1
RW
1325{
1326 enum nb_operation operation = change->cb.operation;
0de19c0e 1327 char xpath[XPATH_MAXLEN];
1c2facd1
RW
1328 const struct nb_node *nb_node = change->cb.nb_node;
1329 const struct lyd_node *dnode = change->cb.dnode;
1330 union nb_resource *resource;
1331 int ret = NB_ERR;
1332
1c2facd1
RW
1333 if (event == NB_EV_VALIDATE)
1334 resource = NULL;
1335 else
1336 resource = &change->resource;
1337
1338 switch (operation) {
1339 case NB_OP_CREATE:
13d6b9c1 1340 ret = nb_callback_create(context, nb_node, event, dnode,
df5eda3d 1341 resource, errmsg, errmsg_len);
1c2facd1
RW
1342 break;
1343 case NB_OP_MODIFY:
13d6b9c1 1344 ret = nb_callback_modify(context, nb_node, event, dnode,
df5eda3d 1345 resource, errmsg, errmsg_len);
1c2facd1 1346 break;
95ce849b 1347 case NB_OP_DESTROY:
df5eda3d
RW
1348 ret = nb_callback_destroy(context, nb_node, event, dnode,
1349 errmsg, errmsg_len);
1c2facd1
RW
1350 break;
1351 case NB_OP_MOVE:
df5eda3d
RW
1352 ret = nb_callback_move(context, nb_node, event, dnode, errmsg,
1353 errmsg_len);
1c2facd1 1354 break;
ca411e38
DS
1355 case NB_OP_PRE_VALIDATE:
1356 case NB_OP_APPLY_FINISH:
1357 case NB_OP_GET_ELEM:
1358 case NB_OP_GET_NEXT:
1359 case NB_OP_GET_KEYS:
1360 case NB_OP_LOOKUP_ENTRY:
1361 case NB_OP_RPC:
0de19c0e 1362 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
c650e48c
RW
1363 flog_err(EC_LIB_DEVELOPMENT,
1364 "%s: unknown operation (%u) [xpath %s]", __func__,
1365 operation, xpath);
1366 exit(1);
1c2facd1
RW
1367 }
1368
625b70e3 1369 if (ret != NB_OK) {
0de19c0e
RW
1370 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1371
625b70e3
EDP
1372 switch (event) {
1373 case NB_EV_VALIDATE:
1b293b2b
DL
1374 flog_warn(EC_LIB_NB_CB_CONFIG_VALIDATE,
1375 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1376 nb_err_name(ret), nb_event_name(event),
1377 nb_operation_name(operation), xpath,
1378 errmsg[0] ? " message: " : "", errmsg);
625b70e3
EDP
1379 break;
1380 case NB_EV_PREPARE:
1b293b2b
DL
1381 flog_warn(EC_LIB_NB_CB_CONFIG_PREPARE,
1382 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1383 nb_err_name(ret), nb_event_name(event),
1384 nb_operation_name(operation), xpath,
1385 errmsg[0] ? " message: " : "", errmsg);
625b70e3
EDP
1386 break;
1387 case NB_EV_ABORT:
1b293b2b
DL
1388 flog_warn(EC_LIB_NB_CB_CONFIG_ABORT,
1389 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1390 nb_err_name(ret), nb_event_name(event),
1391 nb_operation_name(operation), xpath,
1392 errmsg[0] ? " message: " : "", errmsg);
625b70e3
EDP
1393 break;
1394 case NB_EV_APPLY:
1b293b2b
DL
1395 flog_err(EC_LIB_NB_CB_CONFIG_APPLY,
1396 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1397 nb_err_name(ret), nb_event_name(event),
1398 nb_operation_name(operation), xpath,
1399 errmsg[0] ? " message: " : "", errmsg);
625b70e3 1400 break;
c650e48c
RW
1401 default:
1402 flog_err(EC_LIB_DEVELOPMENT,
1be4decb
RW
1403 "%s: unknown event (%u) [xpath %s]", __func__,
1404 event, xpath);
c650e48c 1405 exit(1);
625b70e3 1406 }
625b70e3 1407 }
1c2facd1
RW
1408
1409 return ret;
1410}
1411
364ad673 1412static struct nb_transaction *
41ef7327 1413nb_transaction_new(struct nb_context context, struct nb_config *config,
df5eda3d
RW
1414 struct nb_config_cbs *changes, const char *comment,
1415 char *errmsg, size_t errmsg_len)
1c2facd1
RW
1416{
1417 struct nb_transaction *transaction;
1418
41ef7327 1419 if (nb_running_lock_check(context.client, context.user)) {
df5eda3d
RW
1420 strlcpy(errmsg,
1421 "running configuration is locked by another client",
1422 errmsg_len);
364ad673
RW
1423 return NULL;
1424 }
1425
1c2facd1 1426 if (transaction_in_progress) {
df5eda3d
RW
1427 strlcpy(errmsg,
1428 "there's already another transaction in progress",
1429 errmsg_len);
1c2facd1
RW
1430 return NULL;
1431 }
1432 transaction_in_progress = true;
1433
1434 transaction = XCALLOC(MTYPE_TMP, sizeof(*transaction));
13d6b9c1 1435 transaction->context = context;
1c2facd1
RW
1436 if (comment)
1437 strlcpy(transaction->comment, comment,
1438 sizeof(transaction->comment));
1439 transaction->config = config;
1440 transaction->changes = *changes;
1441
1442 return transaction;
1443}
1444
1445static void nb_transaction_free(struct nb_transaction *transaction)
1446{
1447 nb_config_diff_del_changes(&transaction->changes);
1448 XFREE(MTYPE_TMP, transaction);
1449 transaction_in_progress = false;
1450}
1451
1452/* Process all configuration changes associated to a transaction. */
1453static int nb_transaction_process(enum nb_event event,
df5eda3d
RW
1454 struct nb_transaction *transaction,
1455 char *errmsg, size_t errmsg_len)
1c2facd1
RW
1456{
1457 struct nb_config_cb *cb;
1458
8685be73
RW
1459 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1460 struct nb_config_change *change = (struct nb_config_change *)cb;
1461 int ret;
1c2facd1 1462
8685be73
RW
1463 /*
1464 * Only try to release resources that were allocated
1465 * successfully.
1466 */
a8f58eb6 1467 if (event == NB_EV_ABORT && !change->prepare_ok)
8685be73
RW
1468 break;
1469
1470 /* Call the appropriate callback. */
41ef7327 1471 ret = nb_callback_configuration(&transaction->context, event,
df5eda3d 1472 change, errmsg, errmsg_len);
8685be73
RW
1473 switch (event) {
1474 case NB_EV_PREPARE:
1475 if (ret != NB_OK)
1476 return ret;
1477 change->prepare_ok = true;
1478 break;
1479 case NB_EV_ABORT:
1480 case NB_EV_APPLY:
1c2facd1 1481 /*
8685be73
RW
1482 * At this point it's not possible to reject the
1483 * transaction anymore, so any failure here can lead to
1484 * inconsistencies and should be treated as a bug.
1485 * Operations prone to errors, like validations and
1486 * resource allocations, should be performed during the
1487 * 'prepare' phase.
1c2facd1 1488 */
8685be73 1489 break;
ca411e38 1490 case NB_EV_VALIDATE:
8685be73 1491 break;
1c2facd1
RW
1492 }
1493 }
1494
1495 return NB_OK;
1496}
1497
1498static struct nb_config_cb *
0de19c0e 1499nb_apply_finish_cb_new(struct nb_config_cbs *cbs, const struct nb_node *nb_node,
1c2facd1
RW
1500 const struct lyd_node *dnode)
1501{
1502 struct nb_config_cb *cb;
1503
1504 cb = XCALLOC(MTYPE_TMP, sizeof(*cb));
1c2facd1
RW
1505 cb->nb_node = nb_node;
1506 cb->dnode = dnode;
1507 RB_INSERT(nb_config_cbs, cbs, cb);
1508
1509 return cb;
1510}
1511
1512static struct nb_config_cb *
fe3f2c61
RW
1513nb_apply_finish_cb_find(struct nb_config_cbs *cbs,
1514 const struct nb_node *nb_node,
1515 const struct lyd_node *dnode)
1c2facd1
RW
1516{
1517 struct nb_config_cb s;
1518
fe3f2c61 1519 s.seq = 0;
1c2facd1 1520 s.nb_node = nb_node;
fe3f2c61 1521 s.dnode = dnode;
1c2facd1
RW
1522 return RB_FIND(nb_config_cbs, cbs, &s);
1523}
1524
1525/* Call the 'apply_finish' callbacks. */
df5eda3d
RW
1526static void nb_transaction_apply_finish(struct nb_transaction *transaction,
1527 char *errmsg, size_t errmsg_len)
1c2facd1
RW
1528{
1529 struct nb_config_cbs cbs;
1530 struct nb_config_cb *cb;
1531
1532 /* Initialize tree of 'apply_finish' callbacks. */
1533 RB_INIT(nb_config_cbs, &cbs);
1534
1535 /* Identify the 'apply_finish' callbacks that need to be called. */
1536 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1537 struct nb_config_change *change = (struct nb_config_change *)cb;
1538 const struct lyd_node *dnode = change->cb.dnode;
1539
1540 /*
1541 * Iterate up to the root of the data tree. When a node is being
1542 * deleted, skip its 'apply_finish' callback if one is defined
1543 * (the 'apply_finish' callbacks from the node ancestors should
1544 * be called though).
1545 */
95ce849b 1546 if (change->cb.operation == NB_OP_DESTROY) {
97cd8493
RW
1547 char xpath[XPATH_MAXLEN];
1548
3bb513c3 1549 dnode = lyd_parent(dnode);
1c2facd1
RW
1550 if (!dnode)
1551 break;
1552
1553 /*
1554 * The dnode from 'delete' callbacks point to elements
1555 * from the running configuration. Use yang_dnode_get()
1556 * to get the corresponding dnode from the candidate
1557 * configuration that is being committed.
1558 */
1559 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1560 dnode = yang_dnode_get(transaction->config->dnode,
1561 xpath);
1562 }
1563 while (dnode) {
1c2facd1
RW
1564 struct nb_node *nb_node;
1565
1566 nb_node = dnode->schema->priv;
f267201b 1567 if (!nb_node || !nb_node->cbs.apply_finish)
1c2facd1
RW
1568 goto next;
1569
1570 /*
1571 * Don't call the callback more than once for the same
1572 * data node.
1573 */
fe3f2c61 1574 if (nb_apply_finish_cb_find(&cbs, nb_node, dnode))
1c2facd1
RW
1575 goto next;
1576
0de19c0e 1577 nb_apply_finish_cb_new(&cbs, nb_node, dnode);
1c2facd1
RW
1578
1579 next:
3bb513c3 1580 dnode = lyd_parent(dnode);
1c2facd1
RW
1581 }
1582 }
1583
1584 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
97cd8493 1585 RB_FOREACH (cb, nb_config_cbs, &cbs)
41ef7327 1586 nb_callback_apply_finish(&transaction->context, cb->nb_node,
df5eda3d 1587 cb->dnode, errmsg, errmsg_len);
1c2facd1
RW
1588
1589 /* Release memory. */
1590 while (!RB_EMPTY(nb_config_cbs, &cbs)) {
1591 cb = RB_ROOT(nb_config_cbs, &cbs);
1592 RB_REMOVE(nb_config_cbs, &cbs, cb);
1593 XFREE(MTYPE_TMP, cb);
1594 }
1595}
1596
3bb513c3 1597static int nb_oper_data_iter_children(const struct lysc_node *snode,
1a4bc045
RW
1598 const char *xpath, const void *list_entry,
1599 const struct yang_list_keys *list_keys,
1600 struct yang_translator *translator,
1601 bool first, uint32_t flags,
1602 nb_oper_data_cb cb, void *arg)
1603{
3bb513c3 1604 const struct lysc_node *child;
1a4bc045 1605
3bb513c3 1606 LY_LIST_FOR (lysc_node_child(snode), child) {
1a4bc045
RW
1607 int ret;
1608
1609 ret = nb_oper_data_iter_node(child, xpath, list_entry,
1610 list_keys, translator, false,
1611 flags, cb, arg);
1612 if (ret != NB_OK)
1613 return ret;
1614 }
1615
1616 return NB_OK;
1617}
1618
1619static int nb_oper_data_iter_leaf(const struct nb_node *nb_node,
1620 const char *xpath, const void *list_entry,
1621 const struct yang_list_keys *list_keys,
1622 struct yang_translator *translator,
1623 uint32_t flags, nb_oper_data_cb cb, void *arg)
1624{
1625 struct yang_data *data;
1626
1627 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1628 return NB_OK;
1629
1630 /* Ignore list keys. */
3bb513c3 1631 if (lysc_is_key(nb_node->snode))
1a4bc045
RW
1632 return NB_OK;
1633
9eb2c0a1 1634 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1a4bc045
RW
1635 if (data == NULL)
1636 /* Leaf of type "empty" is not present. */
1637 return NB_OK;
1638
1639 return (*cb)(nb_node->snode, translator, data, arg);
1640}
1641
1642static int nb_oper_data_iter_container(const struct nb_node *nb_node,
1643 const char *xpath,
1644 const void *list_entry,
1645 const struct yang_list_keys *list_keys,
1646 struct yang_translator *translator,
1647 uint32_t flags, nb_oper_data_cb cb,
1648 void *arg)
1649{
9b40fa1e
RW
1650 const struct lysc_node *snode = nb_node->snode;
1651
1a4bc045
RW
1652 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1653 return NB_OK;
1654
9b40fa1e 1655 /* Read-only presence containers. */
1a4bc045
RW
1656 if (nb_node->cbs.get_elem) {
1657 struct yang_data *data;
1658 int ret;
1659
9eb2c0a1 1660 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1a4bc045
RW
1661 if (data == NULL)
1662 /* Presence container is not present. */
1663 return NB_OK;
1664
9b40fa1e 1665 ret = (*cb)(snode, translator, data, arg);
1a4bc045
RW
1666 if (ret != NB_OK)
1667 return ret;
1668 }
1669
9b40fa1e
RW
1670 /* Read-write presence containers. */
1671 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W)) {
1672 struct lysc_node_container *scontainer;
1673
1674 scontainer = (struct lysc_node_container *)snode;
1675 if (CHECK_FLAG(scontainer->flags, LYS_PRESENCE)
1676 && !yang_dnode_get(running_config->dnode, xpath))
1677 return NB_OK;
1678 }
1679
1a4bc045 1680 /* Iterate over the child nodes. */
9b40fa1e
RW
1681 return nb_oper_data_iter_children(snode, xpath, list_entry, list_keys,
1682 translator, false, flags, cb, arg);
1a4bc045
RW
1683}
1684
1685static int
1686nb_oper_data_iter_leaflist(const struct nb_node *nb_node, const char *xpath,
1687 const void *parent_list_entry,
1688 const struct yang_list_keys *parent_list_keys,
1689 struct yang_translator *translator, uint32_t flags,
1690 nb_oper_data_cb cb, void *arg)
1691{
1692 const void *list_entry = NULL;
1693
1694 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1695 return NB_OK;
1696
1697 do {
1698 struct yang_data *data;
1699 int ret;
1700
9eb2c0a1
RW
1701 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1702 list_entry);
1a4bc045
RW
1703 if (!list_entry)
1704 /* End of the list. */
1705 break;
1706
9eb2c0a1 1707 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1a4bc045
RW
1708 if (data == NULL)
1709 continue;
1710
1711 ret = (*cb)(nb_node->snode, translator, data, arg);
1712 if (ret != NB_OK)
1713 return ret;
1714 } while (list_entry);
1715
1716 return NB_OK;
1717}
1718
1719static int nb_oper_data_iter_list(const struct nb_node *nb_node,
1720 const char *xpath_list,
1721 const void *parent_list_entry,
1722 const struct yang_list_keys *parent_list_keys,
1723 struct yang_translator *translator,
1724 uint32_t flags, nb_oper_data_cb cb, void *arg)
1725{
3bb513c3 1726 const struct lysc_node *snode = nb_node->snode;
1a4bc045 1727 const void *list_entry = NULL;
99fb518f 1728 uint32_t position = 1;
1a4bc045
RW
1729
1730 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1731 return NB_OK;
1732
1733 /* Iterate over all list entries. */
1734 do {
3bb513c3 1735 const struct lysc_node_leaf *skey;
1a4bc045 1736 struct yang_list_keys list_keys;
f999f11e 1737 char xpath[XPATH_MAXLEN * 2];
1a4bc045
RW
1738 int ret;
1739
1740 /* Obtain list entry. */
9eb2c0a1
RW
1741 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1742 list_entry);
1a4bc045
RW
1743 if (!list_entry)
1744 /* End of the list. */
1745 break;
1746
99fb518f
RW
1747 if (!CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST)) {
1748 /* Obtain the list entry keys. */
9eb2c0a1
RW
1749 if (nb_callback_get_keys(nb_node, list_entry,
1750 &list_keys)
99fb518f
RW
1751 != NB_OK) {
1752 flog_warn(EC_LIB_NB_CB_STATE,
1753 "%s: failed to get list keys",
1754 __func__);
1755 return NB_ERR;
1756 }
1757
1758 /* Build XPath of the list entry. */
1759 strlcpy(xpath, xpath_list, sizeof(xpath));
3bb513c3
CH
1760 unsigned int i = 0;
1761 LY_FOR_KEYS (snode, skey) {
1762 assert(i < list_keys.num);
99fb518f
RW
1763 snprintf(xpath + strlen(xpath),
1764 sizeof(xpath) - strlen(xpath),
3bb513c3 1765 "[%s='%s']", skey->name,
99fb518f 1766 list_keys.key[i]);
3bb513c3 1767 i++;
99fb518f 1768 }
3bb513c3 1769 assert(i == list_keys.num);
99fb518f
RW
1770 } else {
1771 /*
1772 * Keyless list - build XPath using a positional index.
1773 */
1774 snprintf(xpath, sizeof(xpath), "%s[%u]", xpath_list,
1775 position);
1776 position++;
1a4bc045
RW
1777 }
1778
1779 /* Iterate over the child nodes. */
1780 ret = nb_oper_data_iter_children(
1781 nb_node->snode, xpath, list_entry, &list_keys,
1782 translator, false, flags, cb, arg);
1783 if (ret != NB_OK)
1784 return ret;
1785 } while (list_entry);
1786
1787 return NB_OK;
1788}
1789
3bb513c3 1790static int nb_oper_data_iter_node(const struct lysc_node *snode,
1a4bc045
RW
1791 const char *xpath_parent,
1792 const void *list_entry,
1793 const struct yang_list_keys *list_keys,
1794 struct yang_translator *translator,
1795 bool first, uint32_t flags,
1796 nb_oper_data_cb cb, void *arg)
1797{
1798 struct nb_node *nb_node;
1799 char xpath[XPATH_MAXLEN];
1800 int ret = NB_OK;
1801
1802 if (!first && CHECK_FLAG(flags, NB_OPER_DATA_ITER_NORECURSE)
1803 && CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST))
1804 return NB_OK;
1805
1806 /* Update XPath. */
1807 strlcpy(xpath, xpath_parent, sizeof(xpath));
6cd301e0 1808 if (!first && snode->nodetype != LYS_USES) {
3bb513c3 1809 struct lysc_node *parent;
6cd301e0
RW
1810
1811 /* Get the real parent. */
1812 parent = snode->parent;
6cd301e0
RW
1813
1814 /*
1815 * When necessary, include the namespace of the augmenting
1816 * module.
1817 */
3bb513c3 1818 if (parent && parent->module != snode->module)
6cd301e0
RW
1819 snprintf(xpath + strlen(xpath),
1820 sizeof(xpath) - strlen(xpath), "/%s:%s",
1821 snode->module->name, snode->name);
1822 else
1823 snprintf(xpath + strlen(xpath),
1824 sizeof(xpath) - strlen(xpath), "/%s",
1825 snode->name);
1826 }
1a4bc045
RW
1827
1828 nb_node = snode->priv;
1829 switch (snode->nodetype) {
1830 case LYS_CONTAINER:
1831 ret = nb_oper_data_iter_container(nb_node, xpath, list_entry,
1832 list_keys, translator, flags,
1833 cb, arg);
1834 break;
1835 case LYS_LEAF:
1836 ret = nb_oper_data_iter_leaf(nb_node, xpath, list_entry,
1837 list_keys, translator, flags, cb,
1838 arg);
1839 break;
1840 case LYS_LEAFLIST:
1841 ret = nb_oper_data_iter_leaflist(nb_node, xpath, list_entry,
1842 list_keys, translator, flags,
1843 cb, arg);
1844 break;
1845 case LYS_LIST:
1846 ret = nb_oper_data_iter_list(nb_node, xpath, list_entry,
1847 list_keys, translator, flags, cb,
1848 arg);
1849 break;
1850 case LYS_USES:
1851 ret = nb_oper_data_iter_children(snode, xpath, list_entry,
1852 list_keys, translator, false,
1853 flags, cb, arg);
1854 break;
1855 default:
1856 break;
1857 }
1858
1859 return ret;
1860}
1861
1862int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
1863 uint32_t flags, nb_oper_data_cb cb, void *arg)
1864{
1865 struct nb_node *nb_node;
1866 const void *list_entry = NULL;
1867 struct yang_list_keys list_keys;
1868 struct list *list_dnodes;
1869 struct lyd_node *dnode, *dn;
1870 struct listnode *ln;
1871 int ret;
1872
1873 nb_node = nb_node_find(xpath);
1874 if (!nb_node) {
1875 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1876 "%s: unknown data path: %s", __func__, xpath);
1877 return NB_ERR;
1878 }
1879
1880 /* For now this function works only with containers and lists. */
1881 if (!CHECK_FLAG(nb_node->snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
1882 flog_warn(
1883 EC_LIB_NB_OPERATIONAL_DATA,
1884 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1885 __func__, xpath);
1886 return NB_ERR;
1887 }
1888
1889 /*
1890 * Create a data tree from the XPath so that we can parse the keys of
1891 * all YANG lists (if any).
1892 */
3bb513c3
CH
1893
1894 LY_ERR err = lyd_new_path(NULL, ly_native_ctx, xpath, NULL,
1895 LYD_NEW_PATH_UPDATE, &dnode);
1896 if (err || !dnode) {
1897 const char *errmsg =
1898 err ? ly_errmsg(ly_native_ctx) : "node not found";
1899 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed %s",
1900 __func__, errmsg);
1a4bc045
RW
1901 return NB_ERR;
1902 }
1a4bc045
RW
1903
1904 /*
1905 * Create a linked list to sort the data nodes starting from the root.
1906 */
1907 list_dnodes = list_new();
3bb513c3
CH
1908 for (dn = dnode; dn; dn = lyd_parent(dn)) {
1909 if (dn->schema->nodetype != LYS_LIST || !lyd_child(dn))
1a4bc045
RW
1910 continue;
1911 listnode_add_head(list_dnodes, dn);
1912 }
1913 /*
1914 * Use the northbound callbacks to find list entry pointer corresponding
1915 * to the given XPath.
1916 */
1917 for (ALL_LIST_ELEMENTS_RO(list_dnodes, ln, dn)) {
1918 struct lyd_node *child;
1919 struct nb_node *nn;
1920 unsigned int n = 0;
1921
1922 /* Obtain the list entry keys. */
1923 memset(&list_keys, 0, sizeof(list_keys));
3bb513c3
CH
1924 LY_LIST_FOR (lyd_child(dn), child) {
1925 if (!lysc_is_key(child->schema))
1926 break;
1a4bc045
RW
1927 strlcpy(list_keys.key[n],
1928 yang_dnode_get_string(child, NULL),
1929 sizeof(list_keys.key[n]));
1930 n++;
1931 }
1932 list_keys.num = n;
3bb513c3 1933 if (list_keys.num != yang_snode_num_keys(dn->schema)) {
9f6de299
RW
1934 list_delete(&list_dnodes);
1935 yang_dnode_free(dnode);
1936 return NB_ERR_NOT_FOUND;
1937 }
1a4bc045
RW
1938
1939 /* Find the list entry pointer. */
1940 nn = dn->schema->priv;
baa1d4af
IR
1941 if (!nn->cbs.lookup_entry) {
1942 flog_warn(
1943 EC_LIB_NB_OPERATIONAL_DATA,
1944 "%s: data path doesn't support iteration over operational data: %s",
1945 __func__, xpath);
1946 list_delete(&list_dnodes);
1947 yang_dnode_free(dnode);
1948 return NB_ERR;
1949 }
1950
9eb2c0a1
RW
1951 list_entry =
1952 nb_callback_lookup_entry(nn, list_entry, &list_keys);
1a4bc045
RW
1953 if (list_entry == NULL) {
1954 list_delete(&list_dnodes);
1955 yang_dnode_free(dnode);
1956 return NB_ERR_NOT_FOUND;
1957 }
1958 }
1959
1960 /* If a list entry was given, iterate over that list entry only. */
3bb513c3 1961 if (dnode->schema->nodetype == LYS_LIST && lyd_child(dnode))
1a4bc045
RW
1962 ret = nb_oper_data_iter_children(
1963 nb_node->snode, xpath, list_entry, &list_keys,
1964 translator, true, flags, cb, arg);
1965 else
1966 ret = nb_oper_data_iter_node(nb_node->snode, xpath, list_entry,
1967 &list_keys, translator, true,
1968 flags, cb, arg);
1969
1970 list_delete(&list_dnodes);
1971 yang_dnode_free(dnode);
1972
1973 return ret;
1974}
1975
1c2facd1 1976bool nb_operation_is_valid(enum nb_operation operation,
3bb513c3 1977 const struct lysc_node *snode)
1c2facd1 1978{
544ca69a 1979 struct nb_node *nb_node = snode->priv;
3bb513c3
CH
1980 struct lysc_node_container *scontainer;
1981 struct lysc_node_leaf *sleaf;
1c2facd1
RW
1982
1983 switch (operation) {
1984 case NB_OP_CREATE:
db452508 1985 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
1986 return false;
1987
1988 switch (snode->nodetype) {
1989 case LYS_LEAF:
3bb513c3
CH
1990 sleaf = (struct lysc_node_leaf *)snode;
1991 if (sleaf->type->basetype != LY_TYPE_EMPTY)
1c2facd1
RW
1992 return false;
1993 break;
1994 case LYS_CONTAINER:
3bb513c3
CH
1995 scontainer = (struct lysc_node_container *)snode;
1996 if (!CHECK_FLAG(scontainer->flags, LYS_PRESENCE))
1c2facd1
RW
1997 return false;
1998 break;
1999 case LYS_LIST:
2000 case LYS_LEAFLIST:
2001 break;
2002 default:
2003 return false;
2004 }
2005 return true;
2006 case NB_OP_MODIFY:
db452508 2007 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
2008 return false;
2009
2010 switch (snode->nodetype) {
2011 case LYS_LEAF:
3bb513c3
CH
2012 sleaf = (struct lysc_node_leaf *)snode;
2013 if (sleaf->type->basetype == LY_TYPE_EMPTY)
1c2facd1
RW
2014 return false;
2015
2016 /* List keys can't be modified. */
3bb513c3 2017 if (lysc_is_key(sleaf))
1c2facd1
RW
2018 return false;
2019 break;
2020 default:
2021 return false;
2022 }
2023 return true;
95ce849b 2024 case NB_OP_DESTROY:
db452508 2025 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
2026 return false;
2027
2028 switch (snode->nodetype) {
2029 case LYS_LEAF:
3bb513c3 2030 sleaf = (struct lysc_node_leaf *)snode;
1c2facd1
RW
2031
2032 /* List keys can't be deleted. */
3bb513c3 2033 if (lysc_is_key(sleaf))
1c2facd1
RW
2034 return false;
2035
2036 /*
2037 * Only optional leafs can be deleted, or leafs whose
2038 * parent is a case statement.
2039 */
2040 if (snode->parent->nodetype == LYS_CASE)
2041 return true;
2042 if (sleaf->when)
2043 return true;
db452508
RW
2044 if (CHECK_FLAG(sleaf->flags, LYS_MAND_TRUE)
2045 || sleaf->dflt)
1c2facd1
RW
2046 return false;
2047 break;
2048 case LYS_CONTAINER:
3bb513c3
CH
2049 scontainer = (struct lysc_node_container *)snode;
2050 if (!CHECK_FLAG(scontainer->flags, LYS_PRESENCE))
1c2facd1
RW
2051 return false;
2052 break;
2053 case LYS_LIST:
2054 case LYS_LEAFLIST:
2055 break;
2056 default:
2057 return false;
2058 }
2059 return true;
2060 case NB_OP_MOVE:
db452508 2061 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
2062 return false;
2063
2064 switch (snode->nodetype) {
2065 case LYS_LIST:
2066 case LYS_LEAFLIST:
3bb513c3 2067 if (!CHECK_FLAG(snode->flags, LYS_ORDBY_USER))
1c2facd1
RW
2068 return false;
2069 break;
2070 default:
2071 return false;
2072 }
2073 return true;
34224f0c 2074 case NB_OP_PRE_VALIDATE:
1c2facd1 2075 case NB_OP_APPLY_FINISH:
db452508 2076 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
2077 return false;
2078 return true;
2079 case NB_OP_GET_ELEM:
db452508 2080 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_R))
1c2facd1
RW
2081 return false;
2082
2083 switch (snode->nodetype) {
2084 case LYS_LEAF:
1a4bc045 2085 case LYS_LEAFLIST:
1c2facd1
RW
2086 break;
2087 case LYS_CONTAINER:
3bb513c3
CH
2088 scontainer = (struct lysc_node_container *)snode;
2089 if (!CHECK_FLAG(scontainer->flags, LYS_PRESENCE))
1c2facd1
RW
2090 return false;
2091 break;
2092 default:
2093 return false;
2094 }
2095 return true;
2096 case NB_OP_GET_NEXT:
1a4bc045
RW
2097 switch (snode->nodetype) {
2098 case LYS_LIST:
2099 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
2100 return false;
2101 break;
2102 case LYS_LEAFLIST:
2103 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
2104 return false;
2105 break;
2106 default:
2107 return false;
2108 }
2109 return true;
1c2facd1
RW
2110 case NB_OP_GET_KEYS:
2111 case NB_OP_LOOKUP_ENTRY:
1c2facd1
RW
2112 switch (snode->nodetype) {
2113 case LYS_LIST:
544ca69a
RW
2114 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
2115 return false;
99fb518f
RW
2116 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST))
2117 return false;
1c2facd1
RW
2118 break;
2119 default:
2120 return false;
2121 }
2122 return true;
2123 case NB_OP_RPC:
db452508 2124 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W | LYS_CONFIG_R))
1c2facd1
RW
2125 return false;
2126
2127 switch (snode->nodetype) {
2128 case LYS_RPC:
2129 case LYS_ACTION:
2130 break;
2131 default:
2132 return false;
2133 }
2134 return true;
2135 default:
2136 return false;
2137 }
2138}
2139
2140DEFINE_HOOK(nb_notification_send, (const char *xpath, struct list *arguments),
2141 (xpath, arguments));
2142
2143int nb_notification_send(const char *xpath, struct list *arguments)
2144{
2145 int ret;
2146
9eb2c0a1
RW
2147 DEBUGD(&nb_dbg_notif, "northbound notification: %s", xpath);
2148
1c2facd1
RW
2149 ret = hook_call(nb_notification_send, xpath, arguments);
2150 if (arguments)
2151 list_delete(&arguments);
2152
2153 return ret;
2154}
2155
ccd43ada
RW
2156/* Running configuration user pointers management. */
2157struct nb_config_entry {
2158 char xpath[XPATH_MAXLEN];
2159 void *entry;
2160};
2161
2162static bool running_config_entry_cmp(const void *value1, const void *value2)
2163{
2164 const struct nb_config_entry *c1 = value1;
2165 const struct nb_config_entry *c2 = value2;
2166
2167 return strmatch(c1->xpath, c2->xpath);
2168}
2169
d8b87afe 2170static unsigned int running_config_entry_key_make(const void *value)
ccd43ada
RW
2171{
2172 return string_hash_make(value);
2173}
2174
2175static void *running_config_entry_alloc(void *p)
2176{
2177 struct nb_config_entry *new, *key = p;
2178
2179 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY, sizeof(*new));
2180 strlcpy(new->xpath, key->xpath, sizeof(new->xpath));
2181
2182 return new;
2183}
2184
2185static void running_config_entry_free(void *arg)
2186{
2187 XFREE(MTYPE_NB_CONFIG_ENTRY, arg);
2188}
2189
2190void nb_running_set_entry(const struct lyd_node *dnode, void *entry)
2191{
2192 struct nb_config_entry *config, s;
2193
2194 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2195 config = hash_get(running_config_entries, &s,
2196 running_config_entry_alloc);
2197 config->entry = entry;
2198}
2199
f7c20aa1
QY
2200void nb_running_move_tree(const char *xpath_from, const char *xpath_to)
2201{
2202 struct nb_config_entry *entry;
2203 struct list *entries = hash_to_list(running_config_entries);
2204 struct listnode *ln;
2205
2206 for (ALL_LIST_ELEMENTS_RO(entries, ln, entry)) {
2207 if (!frrstr_startswith(entry->xpath, xpath_from))
2208 continue;
2209
2210 hash_release(running_config_entries, entry);
2211
2212 char *newpath =
2213 frrstr_replace(entry->xpath, xpath_from, xpath_to);
2214 strlcpy(entry->xpath, newpath, sizeof(entry->xpath));
2215 XFREE(MTYPE_TMP, newpath);
2216
8e3aae66 2217 (void)hash_get(running_config_entries, entry,
2218 hash_alloc_intern);
f7c20aa1
QY
2219 }
2220
2221 list_delete(&entries);
2222}
2223
ccd43ada
RW
2224static void *nb_running_unset_entry_helper(const struct lyd_node *dnode)
2225{
2226 struct nb_config_entry *config, s;
2227 struct lyd_node *child;
2228 void *entry = NULL;
2229
2230 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2231 config = hash_release(running_config_entries, &s);
2232 if (config) {
2233 entry = config->entry;
2234 running_config_entry_free(config);
2235 }
2236
2237 /* Unset user pointers from the child nodes. */
2238 if (CHECK_FLAG(dnode->schema->nodetype, LYS_LIST | LYS_CONTAINER)) {
3bb513c3 2239 LY_LIST_FOR (lyd_child(dnode), child) {
ccd43ada
RW
2240 (void)nb_running_unset_entry_helper(child);
2241 }
2242 }
2243
2244 return entry;
2245}
2246
2247void *nb_running_unset_entry(const struct lyd_node *dnode)
2248{
2249 void *entry;
2250
2251 entry = nb_running_unset_entry_helper(dnode);
2252 assert(entry);
2253
2254 return entry;
2255}
2256
b112b1ab
G
2257static void *nb_running_get_entry_worker(const struct lyd_node *dnode,
2258 const char *xpath,
2259 bool abort_if_not_found,
2260 bool rec_search)
ccd43ada
RW
2261{
2262 const struct lyd_node *orig_dnode = dnode;
2263 char xpath_buf[XPATH_MAXLEN];
b112b1ab 2264 bool rec_flag = true;
ccd43ada
RW
2265
2266 assert(dnode || xpath);
2267
2268 if (!dnode)
2269 dnode = yang_dnode_get(running_config->dnode, xpath);
2270
b112b1ab 2271 while (rec_flag && dnode) {
ccd43ada
RW
2272 struct nb_config_entry *config, s;
2273
2274 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2275 config = hash_lookup(running_config_entries, &s);
2276 if (config)
2277 return config->entry;
2278
b112b1ab
G
2279 rec_flag = rec_search;
2280
3bb513c3 2281 dnode = lyd_parent(dnode);
ccd43ada
RW
2282 }
2283
2284 if (!abort_if_not_found)
2285 return NULL;
2286
2287 yang_dnode_get_path(orig_dnode, xpath_buf, sizeof(xpath_buf));
2288 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
2289 "%s: failed to find entry [xpath %s]", __func__, xpath_buf);
2290 zlog_backtrace(LOG_ERR);
2291 abort();
2292}
2293
b112b1ab
G
2294void *nb_running_get_entry(const struct lyd_node *dnode, const char *xpath,
2295 bool abort_if_not_found)
2296{
2297 return nb_running_get_entry_worker(dnode, xpath, abort_if_not_found,
2298 true);
2299}
2300
2301void *nb_running_get_entry_non_rec(const struct lyd_node *dnode,
2302 const char *xpath, bool abort_if_not_found)
2303{
2304 return nb_running_get_entry_worker(dnode, xpath, abort_if_not_found,
2305 false);
2306}
2307
ccd43ada 2308/* Logging functions. */
1c2facd1
RW
2309const char *nb_event_name(enum nb_event event)
2310{
2311 switch (event) {
2312 case NB_EV_VALIDATE:
2313 return "validate";
2314 case NB_EV_PREPARE:
2315 return "prepare";
2316 case NB_EV_ABORT:
2317 return "abort";
2318 case NB_EV_APPLY:
2319 return "apply";
1c2facd1 2320 }
ca411e38
DS
2321
2322 assert(!"Reached end of function we should never hit");
1c2facd1
RW
2323}
2324
2325const char *nb_operation_name(enum nb_operation operation)
2326{
2327 switch (operation) {
2328 case NB_OP_CREATE:
2329 return "create";
2330 case NB_OP_MODIFY:
2331 return "modify";
95ce849b
MS
2332 case NB_OP_DESTROY:
2333 return "destroy";
1c2facd1
RW
2334 case NB_OP_MOVE:
2335 return "move";
34224f0c
RW
2336 case NB_OP_PRE_VALIDATE:
2337 return "pre_validate";
1c2facd1
RW
2338 case NB_OP_APPLY_FINISH:
2339 return "apply_finish";
2340 case NB_OP_GET_ELEM:
2341 return "get_elem";
2342 case NB_OP_GET_NEXT:
2343 return "get_next";
2344 case NB_OP_GET_KEYS:
2345 return "get_keys";
2346 case NB_OP_LOOKUP_ENTRY:
2347 return "lookup_entry";
2348 case NB_OP_RPC:
2349 return "rpc";
1c2facd1 2350 }
ca411e38
DS
2351
2352 assert(!"Reached end of function we should never hit");
1c2facd1
RW
2353}
2354
2355const char *nb_err_name(enum nb_error error)
2356{
2357 switch (error) {
2358 case NB_OK:
2359 return "ok";
2360 case NB_ERR:
2361 return "generic error";
2362 case NB_ERR_NO_CHANGES:
2363 return "no changes";
2364 case NB_ERR_NOT_FOUND:
2365 return "element not found";
2366 case NB_ERR_LOCKED:
2367 return "resource is locked";
2368 case NB_ERR_VALIDATION:
df5eda3d 2369 return "validation";
1c2facd1
RW
2370 case NB_ERR_RESOURCE:
2371 return "failed to allocate resource";
2372 case NB_ERR_INCONSISTENCY:
2373 return "internal inconsistency";
1c2facd1 2374 }
ca411e38
DS
2375
2376 assert(!"Reached end of function we should never hit");
1c2facd1
RW
2377}
2378
2379const char *nb_client_name(enum nb_client client)
2380{
2381 switch (client) {
2382 case NB_CLIENT_CLI:
2383 return "CLI";
5bce33b3
RW
2384 case NB_CLIENT_CONFD:
2385 return "ConfD";
a7ca2199
RW
2386 case NB_CLIENT_SYSREPO:
2387 return "Sysrepo";
ec2ac5f2
RW
2388 case NB_CLIENT_GRPC:
2389 return "gRPC";
ca411e38
DS
2390 case NB_CLIENT_PCEP:
2391 return "Pcep";
2392 case NB_CLIENT_NONE:
2393 return "None";
1c2facd1 2394 }
ca411e38
DS
2395
2396 assert(!"Reached end of function we should never hit");
1c2facd1
RW
2397}
2398
2399static void nb_load_callbacks(const struct frr_yang_module_info *module)
2400{
2401 for (size_t i = 0; module->nodes[i].xpath; i++) {
2402 struct nb_node *nb_node;
2403 uint32_t priority;
2404
dc397e4c
RW
2405 if (i > YANG_MODULE_MAX_NODES) {
2406 zlog_err(
2407 "%s: %s.yang has more than %u nodes. Please increase YANG_MODULE_MAX_NODES to fix this problem.",
2408 __func__, module->name, YANG_MODULE_MAX_NODES);
2409 exit(1);
2410 }
2411
1c2facd1
RW
2412 nb_node = nb_node_find(module->nodes[i].xpath);
2413 if (!nb_node) {
2414 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
2415 "%s: unknown data path: %s", __func__,
2416 module->nodes[i].xpath);
2417 continue;
2418 }
2419
2420 nb_node->cbs = module->nodes[i].cbs;
2421 priority = module->nodes[i].priority;
2422 if (priority != 0)
2423 nb_node->priority = priority;
2424 }
2425}
2426
59e85ca1 2427void nb_validate_callbacks(void)
1c2facd1
RW
2428{
2429 unsigned int errors = 0;
2430
8d869d37 2431 yang_snodes_iterate(NULL, nb_node_validate, 0, &errors);
1c2facd1
RW
2432 if (errors > 0) {
2433 flog_err(
2434 EC_LIB_NB_CBS_VALIDATION,
2435 "%s: failed to validate northbound callbacks: %u error(s)",
2436 __func__, errors);
2437 exit(1);
2438 }
59e85ca1
RW
2439}
2440
59e85ca1
RW
2441
2442void nb_init(struct thread_master *tm,
2443 const struct frr_yang_module_info *const modules[],
2444 size_t nmodules, bool db_enabled)
2445{
3bb513c3
CH
2446 struct yang_module *loaded[nmodules], **loadedp = loaded;
2447 bool explicit_compile;
2448
2449 /*
2450 * Currently using this explicit compile feature in libyang2 leads to
2451 * incorrect behavior in FRR. The functionality suppresses the compiling
2452 * of modules until they have all been loaded into the context. This
2453 * avoids multiple recompiles of the same modules as they are
2454 * imported/augmented etc.
2455 */
2456 explicit_compile = false;
2457
390a8862
CS
2458 nb_db_enabled = db_enabled;
2459
3bb513c3
CH
2460 yang_init(true, explicit_compile);
2461
59e85ca1 2462 /* Load YANG modules and their corresponding northbound callbacks. */
3bb513c3
CH
2463 for (size_t i = 0; i < nmodules; i++) {
2464 DEBUGD(&nb_dbg_events, "northbound: loading %s.yang",
2465 modules[i]->name);
2466 *loadedp++ = yang_module_load(modules[i]->name);
2467 }
2468
2469 if (explicit_compile)
2470 yang_init_loading_complete();
2471
2472 /* Initialize the compiled nodes with northbound data */
2473 for (size_t i = 0; i < nmodules; i++) {
2474 yang_snodes_iterate(loaded[i]->info, nb_node_new_cb, 0, NULL);
2475 nb_load_callbacks(modules[i]);
2476 }
59e85ca1
RW
2477
2478 /* Validate northbound callbacks. */
2479 nb_validate_callbacks();
2480
1c2facd1
RW
2481 /* Create an empty running configuration. */
2482 running_config = nb_config_new(NULL);
ccd43ada
RW
2483 running_config_entries = hash_create(running_config_entry_key_make,
2484 running_config_entry_cmp,
2485 "Running Configuration Entries");
364ad673 2486 pthread_mutex_init(&running_config_mgmt_lock.mtx, NULL);
1c2facd1
RW
2487
2488 /* Initialize the northbound CLI. */
fbdc1c0a 2489 nb_cli_init(tm);
1c2facd1
RW
2490}
2491
2492void nb_terminate(void)
2493{
2494 /* Terminate the northbound CLI. */
2495 nb_cli_terminate();
2496
2497 /* Delete all nb_node's from all YANG modules. */
544ca69a 2498 nb_nodes_delete();
1c2facd1
RW
2499
2500 /* Delete the running configuration. */
ccd43ada
RW
2501 hash_clean(running_config_entries, running_config_entry_free);
2502 hash_free(running_config_entries);
1c2facd1 2503 nb_config_free(running_config);
364ad673 2504 pthread_mutex_destroy(&running_config_mgmt_lock.mtx);
1c2facd1 2505}