]> git.proxmox.com Git - mirror_frr.git/blob - lib/northbound.c
Merge pull request #12798 from donaldsharp/rib_match_multicast
[mirror_frr.git] / lib / northbound.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2018 NetDEF, Inc.
4 * Renato Westphal
5 */
6
7 #include <zebra.h>
8
9 #include "libfrr.h"
10 #include "log.h"
11 #include "lib_errors.h"
12 #include "hash.h"
13 #include "command.h"
14 #include "debug.h"
15 #include "db.h"
16 #include "frr_pthread.h"
17 #include "northbound.h"
18 #include "northbound_cli.h"
19 #include "northbound_db.h"
20 #include "frrstr.h"
21
22 DEFINE_MTYPE_STATIC(LIB, NB_NODE, "Northbound Node");
23 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG, "Northbound Configuration");
24 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG_ENTRY, "Northbound Configuration Entry");
25
26 /* Running configuration - shouldn't be modified directly. */
27 struct nb_config *running_config;
28
29 /* Hash table of user pointers associated with configuration entries. */
30 static struct hash *running_config_entries;
31
32 /* Management lock for the running configuration. */
33 static struct {
34 /* Mutex protecting this structure. */
35 pthread_mutex_t mtx;
36
37 /* Actual lock. */
38 bool locked;
39
40 /* Northbound client who owns this lock. */
41 enum nb_client owner_client;
42
43 /* Northbound user who owns this lock. */
44 const void *owner_user;
45 } running_config_mgmt_lock;
46
47 /* Knob to record config transaction */
48 static bool nb_db_enabled;
49 /*
50 * Global lock used to prevent multiple configuration transactions from
51 * happening concurrently.
52 */
53 static bool transaction_in_progress;
54
55 static int nb_callback_pre_validate(struct nb_context *context,
56 const struct nb_node *nb_node,
57 const struct lyd_node *dnode, char *errmsg,
58 size_t errmsg_len);
59 static int nb_callback_configuration(struct nb_context *context,
60 const enum nb_event event,
61 struct nb_config_change *change,
62 char *errmsg, size_t errmsg_len);
63 static struct nb_transaction *
64 nb_transaction_new(struct nb_context *context, struct nb_config *config,
65 struct nb_config_cbs *changes, const char *comment,
66 char *errmsg, size_t errmsg_len);
67 static void nb_transaction_free(struct nb_transaction *transaction);
68 static int nb_transaction_process(enum nb_event event,
69 struct nb_transaction *transaction,
70 char *errmsg, size_t errmsg_len);
71 static void nb_transaction_apply_finish(struct nb_transaction *transaction,
72 char *errmsg, size_t errmsg_len);
73 static int nb_oper_data_iter_node(const struct lysc_node *snode,
74 const char *xpath, const void *list_entry,
75 const struct yang_list_keys *list_keys,
76 struct yang_translator *translator,
77 bool first, uint32_t flags,
78 nb_oper_data_cb cb, void *arg);
79
80 static int nb_node_check_config_only(const struct lysc_node *snode, void *arg)
81 {
82 bool *config_only = arg;
83
84 if (CHECK_FLAG(snode->flags, LYS_CONFIG_R)) {
85 *config_only = false;
86 return YANG_ITER_STOP;
87 }
88
89 return YANG_ITER_CONTINUE;
90 }
91
92 static int nb_node_new_cb(const struct lysc_node *snode, void *arg)
93 {
94 struct nb_node *nb_node;
95 struct lysc_node *sparent, *sparent_list;
96
97 nb_node = XCALLOC(MTYPE_NB_NODE, sizeof(*nb_node));
98 yang_snode_get_path(snode, YANG_PATH_DATA, nb_node->xpath,
99 sizeof(nb_node->xpath));
100 nb_node->priority = NB_DFLT_PRIORITY;
101 sparent = yang_snode_real_parent(snode);
102 if (sparent)
103 nb_node->parent = sparent->priv;
104 sparent_list = yang_snode_parent_list(snode);
105 if (sparent_list)
106 nb_node->parent_list = sparent_list->priv;
107
108 /* Set flags. */
109 if (CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
110 bool config_only = true;
111
112 (void)yang_snodes_iterate_subtree(snode, NULL,
113 nb_node_check_config_only, 0,
114 &config_only);
115 if (config_only)
116 SET_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY);
117 }
118 if (CHECK_FLAG(snode->nodetype, LYS_LIST)) {
119 if (yang_snode_num_keys(snode) == 0)
120 SET_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST);
121 }
122
123 /*
124 * Link the northbound node and the libyang schema node with one
125 * another.
126 */
127 nb_node->snode = snode;
128 assert(snode->priv == NULL);
129 ((struct lysc_node *)snode)->priv = nb_node;
130
131 return YANG_ITER_CONTINUE;
132 }
133
134 static int nb_node_del_cb(const struct lysc_node *snode, void *arg)
135 {
136 struct nb_node *nb_node;
137
138 nb_node = snode->priv;
139 if (nb_node) {
140 ((struct lysc_node *)snode)->priv = NULL;
141 XFREE(MTYPE_NB_NODE, nb_node);
142 }
143
144 return YANG_ITER_CONTINUE;
145 }
146
147 void nb_nodes_create(void)
148 {
149 yang_snodes_iterate(NULL, nb_node_new_cb, 0, NULL);
150 }
151
152 void nb_nodes_delete(void)
153 {
154 yang_snodes_iterate(NULL, nb_node_del_cb, 0, NULL);
155 }
156
157 struct nb_node *nb_node_find(const char *path)
158 {
159 const struct lysc_node *snode;
160
161 /*
162 * Use libyang to find the schema node associated to the path and get
163 * the northbound node from there (snode private pointer).
164 */
165 snode = lys_find_path(ly_native_ctx, NULL, path, 0);
166 if (!snode)
167 return NULL;
168
169 return snode->priv;
170 }
171
172 void nb_node_set_dependency_cbs(const char *dependency_xpath,
173 const char *dependant_xpath,
174 struct nb_dependency_callbacks *cbs)
175 {
176 struct nb_node *dependency = nb_node_find(dependency_xpath);
177 struct nb_node *dependant = nb_node_find(dependant_xpath);
178
179 if (!dependency || !dependant)
180 return;
181
182 dependency->dep_cbs.get_dependant_xpath = cbs->get_dependant_xpath;
183 dependant->dep_cbs.get_dependency_xpath = cbs->get_dependency_xpath;
184 }
185
186 bool nb_node_has_dependency(struct nb_node *node)
187 {
188 return node->dep_cbs.get_dependency_xpath != NULL;
189 }
190
191 static int nb_node_validate_cb(const struct nb_node *nb_node,
192 enum nb_operation operation,
193 int callback_implemented, bool optional)
194 {
195 bool valid;
196
197 valid = nb_operation_is_valid(operation, nb_node->snode);
198
199 /*
200 * Add an exception for operational data callbacks. A rw list usually
201 * doesn't need any associated operational data callbacks. But if this
202 * rw list is augmented by another module which adds state nodes under
203 * it, then this list will need to have the 'get_next()', 'get_keys()'
204 * and 'lookup_entry()' callbacks. As such, never log a warning when
205 * these callbacks are implemented when they are not needed, since this
206 * depends on context (e.g. some daemons might augment "frr-interface"
207 * while others don't).
208 */
209 if (!valid && callback_implemented && operation != NB_OP_GET_NEXT
210 && operation != NB_OP_GET_KEYS && operation != NB_OP_LOOKUP_ENTRY)
211 flog_warn(EC_LIB_NB_CB_UNNEEDED,
212 "unneeded '%s' callback for '%s'",
213 nb_operation_name(operation), nb_node->xpath);
214
215 if (!optional && valid && !callback_implemented) {
216 flog_err(EC_LIB_NB_CB_MISSING, "missing '%s' callback for '%s'",
217 nb_operation_name(operation), nb_node->xpath);
218 return 1;
219 }
220
221 return 0;
222 }
223
224 /*
225 * Check if the required callbacks were implemented for the given northbound
226 * node.
227 */
228 static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
229
230 {
231 unsigned int error = 0;
232
233 error += nb_node_validate_cb(nb_node, NB_OP_CREATE,
234 !!nb_node->cbs.create, false);
235 error += nb_node_validate_cb(nb_node, NB_OP_MODIFY,
236 !!nb_node->cbs.modify, false);
237 error += nb_node_validate_cb(nb_node, NB_OP_DESTROY,
238 !!nb_node->cbs.destroy, false);
239 error += nb_node_validate_cb(nb_node, NB_OP_MOVE, !!nb_node->cbs.move,
240 false);
241 error += nb_node_validate_cb(nb_node, NB_OP_PRE_VALIDATE,
242 !!nb_node->cbs.pre_validate, true);
243 error += nb_node_validate_cb(nb_node, NB_OP_APPLY_FINISH,
244 !!nb_node->cbs.apply_finish, true);
245 error += nb_node_validate_cb(nb_node, NB_OP_GET_ELEM,
246 !!nb_node->cbs.get_elem, false);
247 error += nb_node_validate_cb(nb_node, NB_OP_GET_NEXT,
248 !!nb_node->cbs.get_next, false);
249 error += nb_node_validate_cb(nb_node, NB_OP_GET_KEYS,
250 !!nb_node->cbs.get_keys, false);
251 error += nb_node_validate_cb(nb_node, NB_OP_LOOKUP_ENTRY,
252 !!nb_node->cbs.lookup_entry, false);
253 error += nb_node_validate_cb(nb_node, NB_OP_RPC, !!nb_node->cbs.rpc,
254 false);
255
256 return error;
257 }
258
259 static unsigned int nb_node_validate_priority(const struct nb_node *nb_node)
260 {
261 /* Top-level nodes can have any priority. */
262 if (!nb_node->parent)
263 return 0;
264
265 if (nb_node->priority < nb_node->parent->priority) {
266 flog_err(EC_LIB_NB_CB_INVALID_PRIO,
267 "node has higher priority than its parent [xpath %s]",
268 nb_node->xpath);
269 return 1;
270 }
271
272 return 0;
273 }
274
275 static int nb_node_validate(const struct lysc_node *snode, void *arg)
276 {
277 struct nb_node *nb_node = snode->priv;
278 unsigned int *errors = arg;
279
280 /* Validate callbacks and priority. */
281 if (nb_node) {
282 *errors += nb_node_validate_cbs(nb_node);
283 *errors += nb_node_validate_priority(nb_node);
284 }
285
286 return YANG_ITER_CONTINUE;
287 }
288
289 struct nb_config *nb_config_new(struct lyd_node *dnode)
290 {
291 struct nb_config *config;
292
293 config = XCALLOC(MTYPE_NB_CONFIG, sizeof(*config));
294 if (dnode)
295 config->dnode = dnode;
296 else
297 config->dnode = yang_dnode_new(ly_native_ctx, true);
298 config->version = 0;
299
300 return config;
301 }
302
303 void nb_config_free(struct nb_config *config)
304 {
305 if (config->dnode)
306 yang_dnode_free(config->dnode);
307 XFREE(MTYPE_NB_CONFIG, config);
308 }
309
310 struct nb_config *nb_config_dup(const struct nb_config *config)
311 {
312 struct nb_config *dup;
313
314 dup = XCALLOC(MTYPE_NB_CONFIG, sizeof(*dup));
315 dup->dnode = yang_dnode_dup(config->dnode);
316 dup->version = config->version;
317
318 return dup;
319 }
320
321 int nb_config_merge(struct nb_config *config_dst, struct nb_config *config_src,
322 bool preserve_source)
323 {
324 int ret;
325
326 ret = lyd_merge_siblings(&config_dst->dnode, config_src->dnode, 0);
327 if (ret != 0)
328 flog_warn(EC_LIB_LIBYANG, "%s: lyd_merge() failed", __func__);
329
330 if (!preserve_source)
331 nb_config_free(config_src);
332
333 return (ret == 0) ? NB_OK : NB_ERR;
334 }
335
336 void nb_config_replace(struct nb_config *config_dst,
337 struct nb_config *config_src, bool preserve_source)
338 {
339 /* Update version. */
340 if (config_src->version != 0)
341 config_dst->version = config_src->version;
342
343 /* Update dnode. */
344 if (config_dst->dnode)
345 yang_dnode_free(config_dst->dnode);
346 if (preserve_source) {
347 config_dst->dnode = yang_dnode_dup(config_src->dnode);
348 } else {
349 config_dst->dnode = config_src->dnode;
350 config_src->dnode = NULL;
351 nb_config_free(config_src);
352 }
353 }
354
355 /* Generate the nb_config_cbs tree. */
356 static inline int nb_config_cb_compare(const struct nb_config_cb *a,
357 const struct nb_config_cb *b)
358 {
359 /* Sort by priority first. */
360 if (a->nb_node->priority < b->nb_node->priority)
361 return -1;
362 if (a->nb_node->priority > b->nb_node->priority)
363 return 1;
364
365 /*
366 * Preserve the order of the configuration changes as told by libyang.
367 */
368 if (a->seq < b->seq)
369 return -1;
370 if (a->seq > b->seq)
371 return 1;
372
373 /*
374 * All 'apply_finish' callbacks have their sequence number set to zero.
375 * In this case, compare them using their dnode pointers (the order
376 * doesn't matter for callbacks that have the same priority).
377 */
378 if (a->dnode < b->dnode)
379 return -1;
380 if (a->dnode > b->dnode)
381 return 1;
382
383 return 0;
384 }
385 RB_GENERATE(nb_config_cbs, nb_config_cb, entry, nb_config_cb_compare);
386
387 static void nb_config_diff_add_change(struct nb_config_cbs *changes,
388 enum nb_operation operation,
389 uint32_t *seq,
390 const struct lyd_node *dnode)
391 {
392 struct nb_config_change *change;
393
394 /* Ignore unimplemented nodes. */
395 if (!dnode->schema->priv)
396 return;
397
398 change = XCALLOC(MTYPE_TMP, sizeof(*change));
399 change->cb.operation = operation;
400 change->cb.seq = *seq;
401 *seq = *seq + 1;
402 change->cb.nb_node = dnode->schema->priv;
403 change->cb.dnode = dnode;
404
405 RB_INSERT(nb_config_cbs, changes, &change->cb);
406 }
407
408 static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
409 {
410 while (!RB_EMPTY(nb_config_cbs, changes)) {
411 struct nb_config_change *change;
412
413 change = (struct nb_config_change *)RB_ROOT(nb_config_cbs,
414 changes);
415 RB_REMOVE(nb_config_cbs, changes, &change->cb);
416 XFREE(MTYPE_TMP, change);
417 }
418 }
419
420 /*
421 * Helper function used when calculating the delta between two different
422 * configurations. Given a new subtree, calculate all new YANG data nodes,
423 * excluding default leafs and leaf-lists. This is a recursive function.
424 */
425 static void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
426 struct nb_config_cbs *changes)
427 {
428 enum nb_operation operation;
429 struct lyd_node *child;
430
431 /* Ignore unimplemented nodes. */
432 if (!dnode->schema->priv)
433 return;
434
435 switch (dnode->schema->nodetype) {
436 case LYS_LEAF:
437 case LYS_LEAFLIST:
438 if (lyd_is_default(dnode))
439 break;
440
441 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
442 operation = NB_OP_CREATE;
443 else if (nb_operation_is_valid(NB_OP_MODIFY, dnode->schema))
444 operation = NB_OP_MODIFY;
445 else
446 return;
447
448 nb_config_diff_add_change(changes, operation, seq, dnode);
449 break;
450 case LYS_CONTAINER:
451 case LYS_LIST:
452 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
453 nb_config_diff_add_change(changes, NB_OP_CREATE, seq,
454 dnode);
455
456 /* Process child nodes recursively. */
457 LY_LIST_FOR (lyd_child(dnode), child) {
458 nb_config_diff_created(child, seq, changes);
459 }
460 break;
461 default:
462 break;
463 }
464 }
465
466 static void nb_config_diff_deleted(const struct lyd_node *dnode, uint32_t *seq,
467 struct nb_config_cbs *changes)
468 {
469 /* Ignore unimplemented nodes. */
470 if (!dnode->schema->priv)
471 return;
472
473 if (nb_operation_is_valid(NB_OP_DESTROY, dnode->schema))
474 nb_config_diff_add_change(changes, NB_OP_DESTROY, seq, dnode);
475 else if (CHECK_FLAG(dnode->schema->nodetype, LYS_CONTAINER)) {
476 struct lyd_node *child;
477
478 /*
479 * Non-presence containers need special handling since they
480 * don't have "destroy" callbacks. In this case, what we need to
481 * do is to call the "destroy" callbacks of their child nodes
482 * when applicable (i.e. optional nodes).
483 */
484 LY_LIST_FOR (lyd_child(dnode), child) {
485 nb_config_diff_deleted(child, seq, changes);
486 }
487 }
488 }
489
490 static int nb_lyd_diff_get_op(const struct lyd_node *dnode)
491 {
492 const struct lyd_meta *meta;
493 LY_LIST_FOR (dnode->meta, meta) {
494 if (strcmp(meta->name, "operation")
495 || strcmp(meta->annotation->module->name, "yang"))
496 continue;
497 return lyd_get_meta_value(meta)[0];
498 }
499 return 'n';
500 }
501
502 #if 0 /* Used below in nb_config_diff inside normally disabled code */
503 static inline void nb_config_diff_dnode_log_path(const char *context,
504 const char *path,
505 const struct lyd_node *dnode)
506 {
507 if (dnode->schema->nodetype & LYD_NODE_TERM)
508 zlog_debug("nb_config_diff: %s: %s: %s", context, path,
509 lyd_get_value(dnode));
510 else
511 zlog_debug("nb_config_diff: %s: %s", context, path);
512 }
513
514 static inline void nb_config_diff_dnode_log(const char *context,
515 const struct lyd_node *dnode)
516 {
517 if (!dnode) {
518 zlog_debug("nb_config_diff: %s: NULL", context);
519 return;
520 }
521
522 char *path = lyd_path(dnode, LYD_PATH_STD, NULL, 0);
523 nb_config_diff_dnode_log_path(context, path, dnode);
524 free(path);
525 }
526 #endif
527
528 /* Calculate the delta between two different configurations. */
529 static void nb_config_diff(const struct nb_config *config1,
530 const struct nb_config *config2,
531 struct nb_config_cbs *changes)
532 {
533 struct lyd_node *diff = NULL;
534 const struct lyd_node *root, *dnode;
535 struct lyd_node *target;
536 int op;
537 LY_ERR err;
538 char *path;
539
540 #if 0 /* Useful (noisy) when debugging diff code, and for improving later */
541 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
542 LY_LIST_FOR(config1->dnode, root) {
543 LYD_TREE_DFS_BEGIN(root, dnode) {
544 nb_config_diff_dnode_log("from", dnode);
545 LYD_TREE_DFS_END(root, dnode);
546 }
547 }
548 LY_LIST_FOR(config2->dnode, root) {
549 LYD_TREE_DFS_BEGIN(root, dnode) {
550 nb_config_diff_dnode_log("to", dnode);
551 LYD_TREE_DFS_END(root, dnode);
552 }
553 }
554 }
555 #endif
556
557 err = lyd_diff_siblings(config1->dnode, config2->dnode,
558 LYD_DIFF_DEFAULTS, &diff);
559 assert(!err);
560
561 if (diff && DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
562 char *s;
563
564 if (!lyd_print_mem(&s, diff, LYD_JSON,
565 LYD_PRINT_WITHSIBLINGS | LYD_PRINT_WD_ALL)) {
566 zlog_debug("%s: %s", __func__, s);
567 free(s);
568 }
569 }
570
571 uint32_t seq = 0;
572
573 LY_LIST_FOR (diff, root) {
574 LYD_TREE_DFS_BEGIN (root, dnode) {
575 op = nb_lyd_diff_get_op(dnode);
576
577 path = lyd_path(dnode, LYD_PATH_STD, NULL, 0);
578
579 #if 0 /* Useful (noisy) when debugging diff code, and for improving later */
580 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
581 char context[80];
582 snprintf(context, sizeof(context),
583 "iterating diff: oper: %c seq: %u", op, seq);
584 nb_config_diff_dnode_log_path(context, path, dnode);
585 }
586 #endif
587 switch (op) {
588 case 'c': /* create */
589 /*
590 * This is rather inefficient, but when we use
591 * dnode from the diff instead of the
592 * candidate config node we get failures when
593 * looking up default values, etc, based on
594 * the diff tree.
595 */
596 target = yang_dnode_get(config2->dnode, path);
597 assert(target);
598 nb_config_diff_created(target, &seq, changes);
599
600 /* Skip rest of sub-tree, move to next sibling
601 */
602 LYD_TREE_DFS_continue = 1;
603 break;
604 case 'd': /* delete */
605 target = yang_dnode_get(config1->dnode, path);
606 assert(target);
607 nb_config_diff_deleted(target, &seq, changes);
608
609 /* Skip rest of sub-tree, move to next sibling
610 */
611 LYD_TREE_DFS_continue = 1;
612 break;
613 case 'r': /* replace */
614 /* either moving an entry or changing a value */
615 target = yang_dnode_get(config2->dnode, path);
616 assert(target);
617 nb_config_diff_add_change(changes, NB_OP_MODIFY,
618 &seq, target);
619 break;
620 case 'n': /* none */
621 default:
622 break;
623 }
624 free(path);
625 LYD_TREE_DFS_END(root, dnode);
626 }
627 }
628
629 lyd_free_all(diff);
630 }
631
632 int nb_candidate_edit(struct nb_config *candidate,
633 const struct nb_node *nb_node,
634 enum nb_operation operation, const char *xpath,
635 const struct yang_data *previous,
636 const struct yang_data *data)
637 {
638 struct lyd_node *dnode, *dep_dnode;
639 char xpath_edit[XPATH_MAXLEN];
640 char dep_xpath[XPATH_MAXLEN];
641 LY_ERR err;
642
643 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
644 if (nb_node->snode->nodetype == LYS_LEAFLIST)
645 snprintf(xpath_edit, sizeof(xpath_edit), "%s[.='%s']", xpath,
646 data->value);
647 else
648 strlcpy(xpath_edit, xpath, sizeof(xpath_edit));
649
650 switch (operation) {
651 case NB_OP_CREATE:
652 case NB_OP_MODIFY:
653 err = lyd_new_path(candidate->dnode, ly_native_ctx, xpath_edit,
654 (void *)data->value, LYD_NEW_PATH_UPDATE,
655 &dnode);
656 if (err) {
657 flog_warn(EC_LIB_LIBYANG,
658 "%s: lyd_new_path(%s) failed: %d", __func__,
659 xpath_edit, err);
660 return NB_ERR;
661 } else if (dnode) {
662 /* Create default nodes */
663 LY_ERR err = lyd_new_implicit_tree(
664 dnode, LYD_IMPLICIT_NO_STATE, NULL);
665 if (err) {
666 flog_warn(EC_LIB_LIBYANG,
667 "%s: lyd_new_implicit_all failed: %d",
668 __func__, err);
669 }
670 /*
671 * create dependency
672 *
673 * dnode returned by the lyd_new_path may be from a
674 * different schema, so we need to update the nb_node
675 */
676 nb_node = dnode->schema->priv;
677 if (nb_node->dep_cbs.get_dependency_xpath) {
678 nb_node->dep_cbs.get_dependency_xpath(
679 dnode, dep_xpath);
680
681 err = lyd_new_path(candidate->dnode,
682 ly_native_ctx, dep_xpath,
683 NULL, LYD_NEW_PATH_UPDATE,
684 &dep_dnode);
685 /* Create default nodes */
686 if (!err && dep_dnode)
687 err = lyd_new_implicit_tree(
688 dep_dnode,
689 LYD_IMPLICIT_NO_STATE, NULL);
690 if (err) {
691 flog_warn(
692 EC_LIB_LIBYANG,
693 "%s: dependency: lyd_new_path(%s) failed: %d",
694 __func__, dep_xpath, err);
695 return NB_ERR;
696 }
697 }
698 }
699 break;
700 case NB_OP_DESTROY:
701 dnode = yang_dnode_get(candidate->dnode, xpath_edit);
702 if (!dnode)
703 /*
704 * Return a special error code so the caller can choose
705 * whether to ignore it or not.
706 */
707 return NB_ERR_NOT_FOUND;
708 /* destroy dependant */
709 if (nb_node->dep_cbs.get_dependant_xpath) {
710 nb_node->dep_cbs.get_dependant_xpath(dnode, dep_xpath);
711
712 dep_dnode = yang_dnode_get(candidate->dnode, dep_xpath);
713 if (dep_dnode)
714 lyd_free_tree(dep_dnode);
715 }
716 lyd_free_tree(dnode);
717 break;
718 case NB_OP_MOVE:
719 /* TODO: update configuration. */
720 break;
721 case NB_OP_PRE_VALIDATE:
722 case NB_OP_APPLY_FINISH:
723 case NB_OP_GET_ELEM:
724 case NB_OP_GET_NEXT:
725 case NB_OP_GET_KEYS:
726 case NB_OP_LOOKUP_ENTRY:
727 case NB_OP_RPC:
728 flog_warn(EC_LIB_DEVELOPMENT,
729 "%s: unknown operation (%u) [xpath %s]", __func__,
730 operation, xpath_edit);
731 return NB_ERR;
732 }
733
734 return NB_OK;
735 }
736
737 bool nb_candidate_needs_update(const struct nb_config *candidate)
738 {
739 if (candidate->version < running_config->version)
740 return true;
741
742 return false;
743 }
744
745 int nb_candidate_update(struct nb_config *candidate)
746 {
747 struct nb_config *updated_config;
748
749 updated_config = nb_config_dup(running_config);
750 if (nb_config_merge(updated_config, candidate, true) != NB_OK)
751 return NB_ERR;
752
753 nb_config_replace(candidate, updated_config, false);
754
755 return NB_OK;
756 }
757
758 /*
759 * Perform YANG syntactic and semantic validation.
760 *
761 * WARNING: lyd_validate() can change the configuration as part of the
762 * validation process.
763 */
764 static int nb_candidate_validate_yang(struct nb_config *candidate, char *errmsg,
765 size_t errmsg_len)
766 {
767 if (lyd_validate_all(&candidate->dnode, ly_native_ctx,
768 LYD_VALIDATE_NO_STATE, NULL)
769 != 0) {
770 yang_print_errors(ly_native_ctx, errmsg, errmsg_len);
771 return NB_ERR_VALIDATION;
772 }
773
774 return NB_OK;
775 }
776
777 /* Perform code-level validation using the northbound callbacks. */
778 static int nb_candidate_validate_code(struct nb_context *context,
779 struct nb_config *candidate,
780 struct nb_config_cbs *changes,
781 char *errmsg, size_t errmsg_len)
782 {
783 struct nb_config_cb *cb;
784 struct lyd_node *root, *child;
785 int ret;
786
787 /* First validate the candidate as a whole. */
788 LY_LIST_FOR (candidate->dnode, root) {
789 LYD_TREE_DFS_BEGIN (root, child) {
790 struct nb_node *nb_node;
791
792 nb_node = child->schema->priv;
793 if (!nb_node || !nb_node->cbs.pre_validate)
794 goto next;
795
796 ret = nb_callback_pre_validate(context, nb_node, child,
797 errmsg, errmsg_len);
798 if (ret != NB_OK)
799 return NB_ERR_VALIDATION;
800
801 next:
802 LYD_TREE_DFS_END(root, child);
803 }
804 }
805
806 /* Now validate the configuration changes. */
807 RB_FOREACH (cb, nb_config_cbs, changes) {
808 struct nb_config_change *change = (struct nb_config_change *)cb;
809
810 ret = nb_callback_configuration(context, NB_EV_VALIDATE, change,
811 errmsg, errmsg_len);
812 if (ret != NB_OK)
813 return NB_ERR_VALIDATION;
814 }
815
816 return NB_OK;
817 }
818
819 int nb_candidate_validate(struct nb_context *context,
820 struct nb_config *candidate, char *errmsg,
821 size_t errmsg_len)
822 {
823 struct nb_config_cbs changes;
824 int ret;
825
826 if (nb_candidate_validate_yang(candidate, errmsg, errmsg_len) != NB_OK)
827 return NB_ERR_VALIDATION;
828
829 RB_INIT(nb_config_cbs, &changes);
830 nb_config_diff(running_config, candidate, &changes);
831 ret = nb_candidate_validate_code(context, candidate, &changes, errmsg,
832 errmsg_len);
833 nb_config_diff_del_changes(&changes);
834
835 return ret;
836 }
837
838 int nb_candidate_commit_prepare(struct nb_context *context,
839 struct nb_config *candidate,
840 const char *comment,
841 struct nb_transaction **transaction,
842 char *errmsg, size_t errmsg_len)
843 {
844 struct nb_config_cbs changes;
845
846 if (nb_candidate_validate_yang(candidate, errmsg, errmsg_len)
847 != NB_OK) {
848 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
849 "%s: failed to validate candidate configuration",
850 __func__);
851 return NB_ERR_VALIDATION;
852 }
853
854 RB_INIT(nb_config_cbs, &changes);
855 nb_config_diff(running_config, candidate, &changes);
856 if (RB_EMPTY(nb_config_cbs, &changes)) {
857 snprintf(
858 errmsg, errmsg_len,
859 "No changes to apply were found during preparation phase");
860 return NB_ERR_NO_CHANGES;
861 }
862
863 if (nb_candidate_validate_code(context, candidate, &changes, errmsg,
864 errmsg_len)
865 != NB_OK) {
866 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
867 "%s: failed to validate candidate configuration",
868 __func__);
869 nb_config_diff_del_changes(&changes);
870 return NB_ERR_VALIDATION;
871 }
872
873 *transaction = nb_transaction_new(context, candidate, &changes, comment,
874 errmsg, errmsg_len);
875 if (*transaction == NULL) {
876 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
877 "%s: failed to create transaction: %s", __func__,
878 errmsg);
879 nb_config_diff_del_changes(&changes);
880 return NB_ERR_LOCKED;
881 }
882
883 return nb_transaction_process(NB_EV_PREPARE, *transaction, errmsg,
884 errmsg_len);
885 }
886
887 void nb_candidate_commit_abort(struct nb_transaction *transaction, char *errmsg,
888 size_t errmsg_len)
889 {
890 (void)nb_transaction_process(NB_EV_ABORT, transaction, errmsg,
891 errmsg_len);
892 nb_transaction_free(transaction);
893 }
894
895 void nb_candidate_commit_apply(struct nb_transaction *transaction,
896 bool save_transaction, uint32_t *transaction_id,
897 char *errmsg, size_t errmsg_len)
898 {
899 (void)nb_transaction_process(NB_EV_APPLY, transaction, errmsg,
900 errmsg_len);
901 nb_transaction_apply_finish(transaction, errmsg, errmsg_len);
902
903 /* Replace running by candidate. */
904 transaction->config->version++;
905 nb_config_replace(running_config, transaction->config, true);
906
907 /* Record transaction. */
908 if (save_transaction && nb_db_enabled
909 && nb_db_transaction_save(transaction, transaction_id) != NB_OK)
910 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED,
911 "%s: failed to record transaction", __func__);
912
913 nb_transaction_free(transaction);
914 }
915
916 int nb_candidate_commit(struct nb_context *context, struct nb_config *candidate,
917 bool save_transaction, const char *comment,
918 uint32_t *transaction_id, char *errmsg,
919 size_t errmsg_len)
920 {
921 struct nb_transaction *transaction = NULL;
922 int ret;
923
924 ret = nb_candidate_commit_prepare(context, candidate, comment,
925 &transaction, errmsg, errmsg_len);
926 /*
927 * Apply the changes if the preparation phase succeeded. Otherwise abort
928 * the transaction.
929 */
930 if (ret == NB_OK)
931 nb_candidate_commit_apply(transaction, save_transaction,
932 transaction_id, errmsg, errmsg_len);
933 else if (transaction != NULL)
934 nb_candidate_commit_abort(transaction, errmsg, errmsg_len);
935
936 return ret;
937 }
938
939 int nb_running_lock(enum nb_client client, const void *user)
940 {
941 int ret = -1;
942
943 frr_with_mutex (&running_config_mgmt_lock.mtx) {
944 if (!running_config_mgmt_lock.locked) {
945 running_config_mgmt_lock.locked = true;
946 running_config_mgmt_lock.owner_client = client;
947 running_config_mgmt_lock.owner_user = user;
948 ret = 0;
949 }
950 }
951
952 return ret;
953 }
954
955 int nb_running_unlock(enum nb_client client, const void *user)
956 {
957 int ret = -1;
958
959 frr_with_mutex (&running_config_mgmt_lock.mtx) {
960 if (running_config_mgmt_lock.locked
961 && running_config_mgmt_lock.owner_client == client
962 && running_config_mgmt_lock.owner_user == user) {
963 running_config_mgmt_lock.locked = false;
964 running_config_mgmt_lock.owner_client = NB_CLIENT_NONE;
965 running_config_mgmt_lock.owner_user = NULL;
966 ret = 0;
967 }
968 }
969
970 return ret;
971 }
972
973 int nb_running_lock_check(enum nb_client client, const void *user)
974 {
975 int ret = -1;
976
977 frr_with_mutex (&running_config_mgmt_lock.mtx) {
978 if (!running_config_mgmt_lock.locked
979 || (running_config_mgmt_lock.owner_client == client
980 && running_config_mgmt_lock.owner_user == user))
981 ret = 0;
982 }
983
984 return ret;
985 }
986
987 static void nb_log_config_callback(const enum nb_event event,
988 enum nb_operation operation,
989 const struct lyd_node *dnode)
990 {
991 const char *value;
992 char xpath[XPATH_MAXLEN];
993
994 if (!DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL))
995 return;
996
997 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
998 if (yang_snode_is_typeless_data(dnode->schema))
999 value = "(none)";
1000 else
1001 value = yang_dnode_get_string(dnode, NULL);
1002
1003 zlog_debug(
1004 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
1005 nb_event_name(event), nb_operation_name(operation), xpath,
1006 value);
1007 }
1008
1009 static int nb_callback_create(struct nb_context *context,
1010 const struct nb_node *nb_node,
1011 enum nb_event event, const struct lyd_node *dnode,
1012 union nb_resource *resource, char *errmsg,
1013 size_t errmsg_len)
1014 {
1015 struct nb_cb_create_args args = {};
1016 bool unexpected_error = false;
1017 int ret;
1018
1019 nb_log_config_callback(event, NB_OP_CREATE, dnode);
1020
1021 args.context = context;
1022 args.event = event;
1023 args.dnode = dnode;
1024 args.resource = resource;
1025 args.errmsg = errmsg;
1026 args.errmsg_len = errmsg_len;
1027 ret = nb_node->cbs.create(&args);
1028
1029 /* Detect and log unexpected errors. */
1030 switch (ret) {
1031 case NB_OK:
1032 case NB_ERR:
1033 break;
1034 case NB_ERR_VALIDATION:
1035 if (event != NB_EV_VALIDATE)
1036 unexpected_error = true;
1037 break;
1038 case NB_ERR_RESOURCE:
1039 if (event != NB_EV_PREPARE)
1040 unexpected_error = true;
1041 break;
1042 case NB_ERR_INCONSISTENCY:
1043 if (event == NB_EV_VALIDATE)
1044 unexpected_error = true;
1045 break;
1046 default:
1047 unexpected_error = true;
1048 break;
1049 }
1050 if (unexpected_error)
1051 DEBUGD(&nb_dbg_cbs_config,
1052 "northbound callback: unexpected return value: %s",
1053 nb_err_name(ret));
1054
1055 return ret;
1056 }
1057
1058 static int nb_callback_modify(struct nb_context *context,
1059 const struct nb_node *nb_node,
1060 enum nb_event event, const struct lyd_node *dnode,
1061 union nb_resource *resource, char *errmsg,
1062 size_t errmsg_len)
1063 {
1064 struct nb_cb_modify_args args = {};
1065 bool unexpected_error = false;
1066 int ret;
1067
1068 nb_log_config_callback(event, NB_OP_MODIFY, dnode);
1069
1070 args.context = context;
1071 args.event = event;
1072 args.dnode = dnode;
1073 args.resource = resource;
1074 args.errmsg = errmsg;
1075 args.errmsg_len = errmsg_len;
1076 ret = nb_node->cbs.modify(&args);
1077
1078 /* Detect and log unexpected errors. */
1079 switch (ret) {
1080 case NB_OK:
1081 case NB_ERR:
1082 break;
1083 case NB_ERR_VALIDATION:
1084 if (event != NB_EV_VALIDATE)
1085 unexpected_error = true;
1086 break;
1087 case NB_ERR_RESOURCE:
1088 if (event != NB_EV_PREPARE)
1089 unexpected_error = true;
1090 break;
1091 case NB_ERR_INCONSISTENCY:
1092 if (event == NB_EV_VALIDATE)
1093 unexpected_error = true;
1094 break;
1095 default:
1096 unexpected_error = true;
1097 break;
1098 }
1099 if (unexpected_error)
1100 DEBUGD(&nb_dbg_cbs_config,
1101 "northbound callback: unexpected return value: %s",
1102 nb_err_name(ret));
1103
1104 return ret;
1105 }
1106
1107 static int nb_callback_destroy(struct nb_context *context,
1108 const struct nb_node *nb_node,
1109 enum nb_event event,
1110 const struct lyd_node *dnode, char *errmsg,
1111 size_t errmsg_len)
1112 {
1113 struct nb_cb_destroy_args args = {};
1114 bool unexpected_error = false;
1115 int ret;
1116
1117 nb_log_config_callback(event, NB_OP_DESTROY, dnode);
1118
1119 args.context = context;
1120 args.event = event;
1121 args.dnode = dnode;
1122 args.errmsg = errmsg;
1123 args.errmsg_len = errmsg_len;
1124 ret = nb_node->cbs.destroy(&args);
1125
1126 /* Detect and log unexpected errors. */
1127 switch (ret) {
1128 case NB_OK:
1129 case NB_ERR:
1130 break;
1131 case NB_ERR_VALIDATION:
1132 if (event != NB_EV_VALIDATE)
1133 unexpected_error = true;
1134 break;
1135 case NB_ERR_INCONSISTENCY:
1136 if (event == NB_EV_VALIDATE)
1137 unexpected_error = true;
1138 break;
1139 default:
1140 unexpected_error = true;
1141 break;
1142 }
1143 if (unexpected_error)
1144 DEBUGD(&nb_dbg_cbs_config,
1145 "northbound callback: unexpected return value: %s",
1146 nb_err_name(ret));
1147
1148 return ret;
1149 }
1150
1151 static int nb_callback_move(struct nb_context *context,
1152 const struct nb_node *nb_node, enum nb_event event,
1153 const struct lyd_node *dnode, char *errmsg,
1154 size_t errmsg_len)
1155 {
1156 struct nb_cb_move_args args = {};
1157 bool unexpected_error = false;
1158 int ret;
1159
1160 nb_log_config_callback(event, NB_OP_MOVE, dnode);
1161
1162 args.context = context;
1163 args.event = event;
1164 args.dnode = dnode;
1165 args.errmsg = errmsg;
1166 args.errmsg_len = errmsg_len;
1167 ret = nb_node->cbs.move(&args);
1168
1169 /* Detect and log unexpected errors. */
1170 switch (ret) {
1171 case NB_OK:
1172 case NB_ERR:
1173 break;
1174 case NB_ERR_VALIDATION:
1175 if (event != NB_EV_VALIDATE)
1176 unexpected_error = true;
1177 break;
1178 case NB_ERR_INCONSISTENCY:
1179 if (event == NB_EV_VALIDATE)
1180 unexpected_error = true;
1181 break;
1182 default:
1183 unexpected_error = true;
1184 break;
1185 }
1186 if (unexpected_error)
1187 DEBUGD(&nb_dbg_cbs_config,
1188 "northbound callback: unexpected return value: %s",
1189 nb_err_name(ret));
1190
1191 return ret;
1192 }
1193
1194 static int nb_callback_pre_validate(struct nb_context *context,
1195 const struct nb_node *nb_node,
1196 const struct lyd_node *dnode, char *errmsg,
1197 size_t errmsg_len)
1198 {
1199 struct nb_cb_pre_validate_args args = {};
1200 bool unexpected_error = false;
1201 int ret;
1202
1203 nb_log_config_callback(NB_EV_VALIDATE, NB_OP_PRE_VALIDATE, dnode);
1204
1205 args.dnode = dnode;
1206 args.errmsg = errmsg;
1207 args.errmsg_len = errmsg_len;
1208 ret = nb_node->cbs.pre_validate(&args);
1209
1210 /* Detect and log unexpected errors. */
1211 switch (ret) {
1212 case NB_OK:
1213 case NB_ERR_VALIDATION:
1214 break;
1215 default:
1216 unexpected_error = true;
1217 break;
1218 }
1219 if (unexpected_error)
1220 DEBUGD(&nb_dbg_cbs_config,
1221 "northbound callback: unexpected return value: %s",
1222 nb_err_name(ret));
1223
1224 return ret;
1225 }
1226
1227 static void nb_callback_apply_finish(struct nb_context *context,
1228 const struct nb_node *nb_node,
1229 const struct lyd_node *dnode, char *errmsg,
1230 size_t errmsg_len)
1231 {
1232 struct nb_cb_apply_finish_args args = {};
1233
1234 nb_log_config_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH, dnode);
1235
1236 args.context = context;
1237 args.dnode = dnode;
1238 args.errmsg = errmsg;
1239 args.errmsg_len = errmsg_len;
1240 nb_node->cbs.apply_finish(&args);
1241 }
1242
1243 struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node,
1244 const char *xpath,
1245 const void *list_entry)
1246 {
1247 struct nb_cb_get_elem_args args = {};
1248
1249 DEBUGD(&nb_dbg_cbs_state,
1250 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
1251 xpath, list_entry);
1252
1253 args.xpath = xpath;
1254 args.list_entry = list_entry;
1255 return nb_node->cbs.get_elem(&args);
1256 }
1257
1258 const void *nb_callback_get_next(const struct nb_node *nb_node,
1259 const void *parent_list_entry,
1260 const void *list_entry)
1261 {
1262 struct nb_cb_get_next_args args = {};
1263
1264 DEBUGD(&nb_dbg_cbs_state,
1265 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
1266 nb_node->xpath, parent_list_entry, list_entry);
1267
1268 args.parent_list_entry = parent_list_entry;
1269 args.list_entry = list_entry;
1270 return nb_node->cbs.get_next(&args);
1271 }
1272
1273 int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
1274 struct yang_list_keys *keys)
1275 {
1276 struct nb_cb_get_keys_args args = {};
1277
1278 DEBUGD(&nb_dbg_cbs_state,
1279 "northbound callback (get_keys): node [%s] list_entry [%p]",
1280 nb_node->xpath, list_entry);
1281
1282 args.list_entry = list_entry;
1283 args.keys = keys;
1284 return nb_node->cbs.get_keys(&args);
1285 }
1286
1287 const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
1288 const void *parent_list_entry,
1289 const struct yang_list_keys *keys)
1290 {
1291 struct nb_cb_lookup_entry_args args = {};
1292
1293 DEBUGD(&nb_dbg_cbs_state,
1294 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
1295 nb_node->xpath, parent_list_entry);
1296
1297 args.parent_list_entry = parent_list_entry;
1298 args.keys = keys;
1299 return nb_node->cbs.lookup_entry(&args);
1300 }
1301
1302 int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
1303 const struct list *input, struct list *output, char *errmsg,
1304 size_t errmsg_len)
1305 {
1306 struct nb_cb_rpc_args args = {};
1307
1308 DEBUGD(&nb_dbg_cbs_rpc, "northbound RPC: %s", xpath);
1309
1310 args.xpath = xpath;
1311 args.input = input;
1312 args.output = output;
1313 args.errmsg = errmsg;
1314 args.errmsg_len = errmsg_len;
1315 return nb_node->cbs.rpc(&args);
1316 }
1317
1318 /*
1319 * Call the northbound configuration callback associated to a given
1320 * configuration change.
1321 */
1322 static int nb_callback_configuration(struct nb_context *context,
1323 const enum nb_event event,
1324 struct nb_config_change *change,
1325 char *errmsg, size_t errmsg_len)
1326 {
1327 enum nb_operation operation = change->cb.operation;
1328 char xpath[XPATH_MAXLEN];
1329 const struct nb_node *nb_node = change->cb.nb_node;
1330 const struct lyd_node *dnode = change->cb.dnode;
1331 union nb_resource *resource;
1332 int ret = NB_ERR;
1333
1334 if (event == NB_EV_VALIDATE)
1335 resource = NULL;
1336 else
1337 resource = &change->resource;
1338
1339 switch (operation) {
1340 case NB_OP_CREATE:
1341 ret = nb_callback_create(context, nb_node, event, dnode,
1342 resource, errmsg, errmsg_len);
1343 break;
1344 case NB_OP_MODIFY:
1345 ret = nb_callback_modify(context, nb_node, event, dnode,
1346 resource, errmsg, errmsg_len);
1347 break;
1348 case NB_OP_DESTROY:
1349 ret = nb_callback_destroy(context, nb_node, event, dnode,
1350 errmsg, errmsg_len);
1351 break;
1352 case NB_OP_MOVE:
1353 ret = nb_callback_move(context, nb_node, event, dnode, errmsg,
1354 errmsg_len);
1355 break;
1356 case NB_OP_PRE_VALIDATE:
1357 case NB_OP_APPLY_FINISH:
1358 case NB_OP_GET_ELEM:
1359 case NB_OP_GET_NEXT:
1360 case NB_OP_GET_KEYS:
1361 case NB_OP_LOOKUP_ENTRY:
1362 case NB_OP_RPC:
1363 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1364 flog_err(EC_LIB_DEVELOPMENT,
1365 "%s: unknown operation (%u) [xpath %s]", __func__,
1366 operation, xpath);
1367 exit(1);
1368 }
1369
1370 if (ret != NB_OK) {
1371 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1372
1373 switch (event) {
1374 case NB_EV_VALIDATE:
1375 flog_warn(EC_LIB_NB_CB_CONFIG_VALIDATE,
1376 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1377 nb_err_name(ret), nb_event_name(event),
1378 nb_operation_name(operation), xpath,
1379 errmsg[0] ? " message: " : "", errmsg);
1380 break;
1381 case NB_EV_PREPARE:
1382 flog_warn(EC_LIB_NB_CB_CONFIG_PREPARE,
1383 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1384 nb_err_name(ret), nb_event_name(event),
1385 nb_operation_name(operation), xpath,
1386 errmsg[0] ? " message: " : "", errmsg);
1387 break;
1388 case NB_EV_ABORT:
1389 flog_warn(EC_LIB_NB_CB_CONFIG_ABORT,
1390 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1391 nb_err_name(ret), nb_event_name(event),
1392 nb_operation_name(operation), xpath,
1393 errmsg[0] ? " message: " : "", errmsg);
1394 break;
1395 case NB_EV_APPLY:
1396 flog_err(EC_LIB_NB_CB_CONFIG_APPLY,
1397 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1398 nb_err_name(ret), nb_event_name(event),
1399 nb_operation_name(operation), xpath,
1400 errmsg[0] ? " message: " : "", errmsg);
1401 break;
1402 default:
1403 flog_err(EC_LIB_DEVELOPMENT,
1404 "%s: unknown event (%u) [xpath %s]", __func__,
1405 event, xpath);
1406 exit(1);
1407 }
1408 }
1409
1410 return ret;
1411 }
1412
1413 static struct nb_transaction *
1414 nb_transaction_new(struct nb_context *context, struct nb_config *config,
1415 struct nb_config_cbs *changes, const char *comment,
1416 char *errmsg, size_t errmsg_len)
1417 {
1418 struct nb_transaction *transaction;
1419
1420 if (nb_running_lock_check(context->client, context->user)) {
1421 strlcpy(errmsg,
1422 "running configuration is locked by another client",
1423 errmsg_len);
1424 return NULL;
1425 }
1426
1427 if (transaction_in_progress) {
1428 strlcpy(errmsg,
1429 "there's already another transaction in progress",
1430 errmsg_len);
1431 return NULL;
1432 }
1433 transaction_in_progress = true;
1434
1435 transaction = XCALLOC(MTYPE_TMP, sizeof(*transaction));
1436 transaction->context = context;
1437 if (comment)
1438 strlcpy(transaction->comment, comment,
1439 sizeof(transaction->comment));
1440 transaction->config = config;
1441 transaction->changes = *changes;
1442
1443 return transaction;
1444 }
1445
1446 static void nb_transaction_free(struct nb_transaction *transaction)
1447 {
1448 nb_config_diff_del_changes(&transaction->changes);
1449 XFREE(MTYPE_TMP, transaction);
1450 transaction_in_progress = false;
1451 }
1452
1453 /* Process all configuration changes associated to a transaction. */
1454 static int nb_transaction_process(enum nb_event event,
1455 struct nb_transaction *transaction,
1456 char *errmsg, size_t errmsg_len)
1457 {
1458 struct nb_config_cb *cb;
1459
1460 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1461 struct nb_config_change *change = (struct nb_config_change *)cb;
1462 int ret;
1463
1464 /*
1465 * Only try to release resources that were allocated
1466 * successfully.
1467 */
1468 if (event == NB_EV_ABORT && !change->prepare_ok)
1469 break;
1470
1471 /* Call the appropriate callback. */
1472 ret = nb_callback_configuration(transaction->context, event,
1473 change, errmsg, errmsg_len);
1474 switch (event) {
1475 case NB_EV_PREPARE:
1476 if (ret != NB_OK)
1477 return ret;
1478 change->prepare_ok = true;
1479 break;
1480 case NB_EV_ABORT:
1481 case NB_EV_APPLY:
1482 /*
1483 * At this point it's not possible to reject the
1484 * transaction anymore, so any failure here can lead to
1485 * inconsistencies and should be treated as a bug.
1486 * Operations prone to errors, like validations and
1487 * resource allocations, should be performed during the
1488 * 'prepare' phase.
1489 */
1490 break;
1491 case NB_EV_VALIDATE:
1492 break;
1493 }
1494 }
1495
1496 return NB_OK;
1497 }
1498
1499 static struct nb_config_cb *
1500 nb_apply_finish_cb_new(struct nb_config_cbs *cbs, const struct nb_node *nb_node,
1501 const struct lyd_node *dnode)
1502 {
1503 struct nb_config_cb *cb;
1504
1505 cb = XCALLOC(MTYPE_TMP, sizeof(*cb));
1506 cb->nb_node = nb_node;
1507 cb->dnode = dnode;
1508 RB_INSERT(nb_config_cbs, cbs, cb);
1509
1510 return cb;
1511 }
1512
1513 static struct nb_config_cb *
1514 nb_apply_finish_cb_find(struct nb_config_cbs *cbs,
1515 const struct nb_node *nb_node,
1516 const struct lyd_node *dnode)
1517 {
1518 struct nb_config_cb s;
1519
1520 s.seq = 0;
1521 s.nb_node = nb_node;
1522 s.dnode = dnode;
1523 return RB_FIND(nb_config_cbs, cbs, &s);
1524 }
1525
1526 /* Call the 'apply_finish' callbacks. */
1527 static void nb_transaction_apply_finish(struct nb_transaction *transaction,
1528 char *errmsg, size_t errmsg_len)
1529 {
1530 struct nb_config_cbs cbs;
1531 struct nb_config_cb *cb;
1532
1533 /* Initialize tree of 'apply_finish' callbacks. */
1534 RB_INIT(nb_config_cbs, &cbs);
1535
1536 /* Identify the 'apply_finish' callbacks that need to be called. */
1537 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1538 struct nb_config_change *change = (struct nb_config_change *)cb;
1539 const struct lyd_node *dnode = change->cb.dnode;
1540
1541 /*
1542 * Iterate up to the root of the data tree. When a node is being
1543 * deleted, skip its 'apply_finish' callback if one is defined
1544 * (the 'apply_finish' callbacks from the node ancestors should
1545 * be called though).
1546 */
1547 if (change->cb.operation == NB_OP_DESTROY) {
1548 char xpath[XPATH_MAXLEN];
1549
1550 dnode = lyd_parent(dnode);
1551 if (!dnode)
1552 break;
1553
1554 /*
1555 * The dnode from 'delete' callbacks point to elements
1556 * from the running configuration. Use yang_dnode_get()
1557 * to get the corresponding dnode from the candidate
1558 * configuration that is being committed.
1559 */
1560 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1561 dnode = yang_dnode_get(transaction->config->dnode,
1562 xpath);
1563 }
1564 while (dnode) {
1565 struct nb_node *nb_node;
1566
1567 nb_node = dnode->schema->priv;
1568 if (!nb_node || !nb_node->cbs.apply_finish)
1569 goto next;
1570
1571 /*
1572 * Don't call the callback more than once for the same
1573 * data node.
1574 */
1575 if (nb_apply_finish_cb_find(&cbs, nb_node, dnode))
1576 goto next;
1577
1578 nb_apply_finish_cb_new(&cbs, nb_node, dnode);
1579
1580 next:
1581 dnode = lyd_parent(dnode);
1582 }
1583 }
1584
1585 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1586 RB_FOREACH (cb, nb_config_cbs, &cbs)
1587 nb_callback_apply_finish(transaction->context, cb->nb_node,
1588 cb->dnode, errmsg, errmsg_len);
1589
1590 /* Release memory. */
1591 while (!RB_EMPTY(nb_config_cbs, &cbs)) {
1592 cb = RB_ROOT(nb_config_cbs, &cbs);
1593 RB_REMOVE(nb_config_cbs, &cbs, cb);
1594 XFREE(MTYPE_TMP, cb);
1595 }
1596 }
1597
1598 static int nb_oper_data_iter_children(const struct lysc_node *snode,
1599 const char *xpath, const void *list_entry,
1600 const struct yang_list_keys *list_keys,
1601 struct yang_translator *translator,
1602 bool first, uint32_t flags,
1603 nb_oper_data_cb cb, void *arg)
1604 {
1605 const struct lysc_node *child;
1606
1607 LY_LIST_FOR (lysc_node_child(snode), child) {
1608 int ret;
1609
1610 ret = nb_oper_data_iter_node(child, xpath, list_entry,
1611 list_keys, translator, false,
1612 flags, cb, arg);
1613 if (ret != NB_OK)
1614 return ret;
1615 }
1616
1617 return NB_OK;
1618 }
1619
1620 static int nb_oper_data_iter_leaf(const struct nb_node *nb_node,
1621 const char *xpath, const void *list_entry,
1622 const struct yang_list_keys *list_keys,
1623 struct yang_translator *translator,
1624 uint32_t flags, nb_oper_data_cb cb, void *arg)
1625 {
1626 struct yang_data *data;
1627
1628 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1629 return NB_OK;
1630
1631 /* Ignore list keys. */
1632 if (lysc_is_key(nb_node->snode))
1633 return NB_OK;
1634
1635 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1636 if (data == NULL)
1637 /* Leaf of type "empty" is not present. */
1638 return NB_OK;
1639
1640 return (*cb)(nb_node->snode, translator, data, arg);
1641 }
1642
1643 static int nb_oper_data_iter_container(const struct nb_node *nb_node,
1644 const char *xpath,
1645 const void *list_entry,
1646 const struct yang_list_keys *list_keys,
1647 struct yang_translator *translator,
1648 uint32_t flags, nb_oper_data_cb cb,
1649 void *arg)
1650 {
1651 const struct lysc_node *snode = nb_node->snode;
1652
1653 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1654 return NB_OK;
1655
1656 /* Read-only presence containers. */
1657 if (nb_node->cbs.get_elem) {
1658 struct yang_data *data;
1659 int ret;
1660
1661 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1662 if (data == NULL)
1663 /* Presence container is not present. */
1664 return NB_OK;
1665
1666 ret = (*cb)(snode, translator, data, arg);
1667 if (ret != NB_OK)
1668 return ret;
1669 }
1670
1671 /* Read-write presence containers. */
1672 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W)) {
1673 struct lysc_node_container *scontainer;
1674
1675 scontainer = (struct lysc_node_container *)snode;
1676 if (CHECK_FLAG(scontainer->flags, LYS_PRESENCE)
1677 && !yang_dnode_get(running_config->dnode, xpath))
1678 return NB_OK;
1679 }
1680
1681 /* Iterate over the child nodes. */
1682 return nb_oper_data_iter_children(snode, xpath, list_entry, list_keys,
1683 translator, false, flags, cb, arg);
1684 }
1685
1686 static int
1687 nb_oper_data_iter_leaflist(const struct nb_node *nb_node, const char *xpath,
1688 const void *parent_list_entry,
1689 const struct yang_list_keys *parent_list_keys,
1690 struct yang_translator *translator, uint32_t flags,
1691 nb_oper_data_cb cb, void *arg)
1692 {
1693 const void *list_entry = NULL;
1694
1695 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1696 return NB_OK;
1697
1698 do {
1699 struct yang_data *data;
1700 int ret;
1701
1702 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1703 list_entry);
1704 if (!list_entry)
1705 /* End of the list. */
1706 break;
1707
1708 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1709 if (data == NULL)
1710 continue;
1711
1712 ret = (*cb)(nb_node->snode, translator, data, arg);
1713 if (ret != NB_OK)
1714 return ret;
1715 } while (list_entry);
1716
1717 return NB_OK;
1718 }
1719
1720 static int nb_oper_data_iter_list(const struct nb_node *nb_node,
1721 const char *xpath_list,
1722 const void *parent_list_entry,
1723 const struct yang_list_keys *parent_list_keys,
1724 struct yang_translator *translator,
1725 uint32_t flags, nb_oper_data_cb cb, void *arg)
1726 {
1727 const struct lysc_node *snode = nb_node->snode;
1728 const void *list_entry = NULL;
1729 uint32_t position = 1;
1730
1731 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1732 return NB_OK;
1733
1734 /* Iterate over all list entries. */
1735 do {
1736 const struct lysc_node_leaf *skey;
1737 struct yang_list_keys list_keys;
1738 char xpath[XPATH_MAXLEN * 2];
1739 int ret;
1740
1741 /* Obtain list entry. */
1742 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1743 list_entry);
1744 if (!list_entry)
1745 /* End of the list. */
1746 break;
1747
1748 if (!CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST)) {
1749 /* Obtain the list entry keys. */
1750 if (nb_callback_get_keys(nb_node, list_entry,
1751 &list_keys)
1752 != NB_OK) {
1753 flog_warn(EC_LIB_NB_CB_STATE,
1754 "%s: failed to get list keys",
1755 __func__);
1756 return NB_ERR;
1757 }
1758
1759 /* Build XPath of the list entry. */
1760 strlcpy(xpath, xpath_list, sizeof(xpath));
1761 unsigned int i = 0;
1762 LY_FOR_KEYS (snode, skey) {
1763 assert(i < list_keys.num);
1764 snprintf(xpath + strlen(xpath),
1765 sizeof(xpath) - strlen(xpath),
1766 "[%s='%s']", skey->name,
1767 list_keys.key[i]);
1768 i++;
1769 }
1770 assert(i == list_keys.num);
1771 } else {
1772 /*
1773 * Keyless list - build XPath using a positional index.
1774 */
1775 snprintf(xpath, sizeof(xpath), "%s[%u]", xpath_list,
1776 position);
1777 position++;
1778 }
1779
1780 /* Iterate over the child nodes. */
1781 ret = nb_oper_data_iter_children(
1782 nb_node->snode, xpath, list_entry, &list_keys,
1783 translator, false, flags, cb, arg);
1784 if (ret != NB_OK)
1785 return ret;
1786 } while (list_entry);
1787
1788 return NB_OK;
1789 }
1790
1791 static int nb_oper_data_iter_node(const struct lysc_node *snode,
1792 const char *xpath_parent,
1793 const void *list_entry,
1794 const struct yang_list_keys *list_keys,
1795 struct yang_translator *translator,
1796 bool first, uint32_t flags,
1797 nb_oper_data_cb cb, void *arg)
1798 {
1799 struct nb_node *nb_node;
1800 char xpath[XPATH_MAXLEN];
1801 int ret = NB_OK;
1802
1803 if (!first && CHECK_FLAG(flags, NB_OPER_DATA_ITER_NORECURSE)
1804 && CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST))
1805 return NB_OK;
1806
1807 /* Update XPath. */
1808 strlcpy(xpath, xpath_parent, sizeof(xpath));
1809 if (!first && snode->nodetype != LYS_USES) {
1810 struct lysc_node *parent;
1811
1812 /* Get the real parent. */
1813 parent = snode->parent;
1814
1815 /*
1816 * When necessary, include the namespace of the augmenting
1817 * module.
1818 */
1819 if (parent && parent->module != snode->module)
1820 snprintf(xpath + strlen(xpath),
1821 sizeof(xpath) - strlen(xpath), "/%s:%s",
1822 snode->module->name, snode->name);
1823 else
1824 snprintf(xpath + strlen(xpath),
1825 sizeof(xpath) - strlen(xpath), "/%s",
1826 snode->name);
1827 }
1828
1829 nb_node = snode->priv;
1830 switch (snode->nodetype) {
1831 case LYS_CONTAINER:
1832 ret = nb_oper_data_iter_container(nb_node, xpath, list_entry,
1833 list_keys, translator, flags,
1834 cb, arg);
1835 break;
1836 case LYS_LEAF:
1837 ret = nb_oper_data_iter_leaf(nb_node, xpath, list_entry,
1838 list_keys, translator, flags, cb,
1839 arg);
1840 break;
1841 case LYS_LEAFLIST:
1842 ret = nb_oper_data_iter_leaflist(nb_node, xpath, list_entry,
1843 list_keys, translator, flags,
1844 cb, arg);
1845 break;
1846 case LYS_LIST:
1847 ret = nb_oper_data_iter_list(nb_node, xpath, list_entry,
1848 list_keys, translator, flags, cb,
1849 arg);
1850 break;
1851 case LYS_USES:
1852 ret = nb_oper_data_iter_children(snode, xpath, list_entry,
1853 list_keys, translator, false,
1854 flags, cb, arg);
1855 break;
1856 default:
1857 break;
1858 }
1859
1860 return ret;
1861 }
1862
1863 int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
1864 uint32_t flags, nb_oper_data_cb cb, void *arg)
1865 {
1866 struct nb_node *nb_node;
1867 const void *list_entry = NULL;
1868 struct yang_list_keys list_keys;
1869 struct list *list_dnodes;
1870 struct lyd_node *dnode, *dn;
1871 struct listnode *ln;
1872 int ret;
1873
1874 nb_node = nb_node_find(xpath);
1875 if (!nb_node) {
1876 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1877 "%s: unknown data path: %s", __func__, xpath);
1878 return NB_ERR;
1879 }
1880
1881 /* For now this function works only with containers and lists. */
1882 if (!CHECK_FLAG(nb_node->snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
1883 flog_warn(
1884 EC_LIB_NB_OPERATIONAL_DATA,
1885 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1886 __func__, xpath);
1887 return NB_ERR;
1888 }
1889
1890 /*
1891 * Create a data tree from the XPath so that we can parse the keys of
1892 * all YANG lists (if any).
1893 */
1894
1895 LY_ERR err = lyd_new_path(NULL, ly_native_ctx, xpath, NULL,
1896 LYD_NEW_PATH_UPDATE, &dnode);
1897 if (err || !dnode) {
1898 const char *errmsg =
1899 err ? ly_errmsg(ly_native_ctx) : "node not found";
1900 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed %s",
1901 __func__, errmsg);
1902 return NB_ERR;
1903 }
1904
1905 /*
1906 * Create a linked list to sort the data nodes starting from the root.
1907 */
1908 list_dnodes = list_new();
1909 for (dn = dnode; dn; dn = lyd_parent(dn)) {
1910 if (dn->schema->nodetype != LYS_LIST || !lyd_child(dn))
1911 continue;
1912 listnode_add_head(list_dnodes, dn);
1913 }
1914 /*
1915 * Use the northbound callbacks to find list entry pointer corresponding
1916 * to the given XPath.
1917 */
1918 for (ALL_LIST_ELEMENTS_RO(list_dnodes, ln, dn)) {
1919 struct lyd_node *child;
1920 struct nb_node *nn;
1921 unsigned int n = 0;
1922
1923 /* Obtain the list entry keys. */
1924 memset(&list_keys, 0, sizeof(list_keys));
1925 LY_LIST_FOR (lyd_child(dn), child) {
1926 if (!lysc_is_key(child->schema))
1927 break;
1928 strlcpy(list_keys.key[n],
1929 yang_dnode_get_string(child, NULL),
1930 sizeof(list_keys.key[n]));
1931 n++;
1932 }
1933 list_keys.num = n;
1934 if (list_keys.num != yang_snode_num_keys(dn->schema)) {
1935 list_delete(&list_dnodes);
1936 yang_dnode_free(dnode);
1937 return NB_ERR_NOT_FOUND;
1938 }
1939
1940 /* Find the list entry pointer. */
1941 nn = dn->schema->priv;
1942 if (!nn->cbs.lookup_entry) {
1943 flog_warn(
1944 EC_LIB_NB_OPERATIONAL_DATA,
1945 "%s: data path doesn't support iteration over operational data: %s",
1946 __func__, xpath);
1947 list_delete(&list_dnodes);
1948 yang_dnode_free(dnode);
1949 return NB_ERR;
1950 }
1951
1952 list_entry =
1953 nb_callback_lookup_entry(nn, list_entry, &list_keys);
1954 if (list_entry == NULL) {
1955 list_delete(&list_dnodes);
1956 yang_dnode_free(dnode);
1957 return NB_ERR_NOT_FOUND;
1958 }
1959 }
1960
1961 /* If a list entry was given, iterate over that list entry only. */
1962 if (dnode->schema->nodetype == LYS_LIST && lyd_child(dnode))
1963 ret = nb_oper_data_iter_children(
1964 nb_node->snode, xpath, list_entry, &list_keys,
1965 translator, true, flags, cb, arg);
1966 else
1967 ret = nb_oper_data_iter_node(nb_node->snode, xpath, list_entry,
1968 &list_keys, translator, true,
1969 flags, cb, arg);
1970
1971 list_delete(&list_dnodes);
1972 yang_dnode_free(dnode);
1973
1974 return ret;
1975 }
1976
1977 bool nb_operation_is_valid(enum nb_operation operation,
1978 const struct lysc_node *snode)
1979 {
1980 struct nb_node *nb_node = snode->priv;
1981 struct lysc_node_container *scontainer;
1982 struct lysc_node_leaf *sleaf;
1983
1984 switch (operation) {
1985 case NB_OP_CREATE:
1986 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1987 return false;
1988
1989 switch (snode->nodetype) {
1990 case LYS_LEAF:
1991 sleaf = (struct lysc_node_leaf *)snode;
1992 if (sleaf->type->basetype != LY_TYPE_EMPTY)
1993 return false;
1994 break;
1995 case LYS_CONTAINER:
1996 scontainer = (struct lysc_node_container *)snode;
1997 if (!CHECK_FLAG(scontainer->flags, LYS_PRESENCE))
1998 return false;
1999 break;
2000 case LYS_LIST:
2001 case LYS_LEAFLIST:
2002 break;
2003 default:
2004 return false;
2005 }
2006 return true;
2007 case NB_OP_MODIFY:
2008 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
2009 return false;
2010
2011 switch (snode->nodetype) {
2012 case LYS_LEAF:
2013 sleaf = (struct lysc_node_leaf *)snode;
2014 if (sleaf->type->basetype == LY_TYPE_EMPTY)
2015 return false;
2016
2017 /* List keys can't be modified. */
2018 if (lysc_is_key(sleaf))
2019 return false;
2020 break;
2021 default:
2022 return false;
2023 }
2024 return true;
2025 case NB_OP_DESTROY:
2026 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
2027 return false;
2028
2029 switch (snode->nodetype) {
2030 case LYS_LEAF:
2031 sleaf = (struct lysc_node_leaf *)snode;
2032
2033 /* List keys can't be deleted. */
2034 if (lysc_is_key(sleaf))
2035 return false;
2036
2037 /*
2038 * Only optional leafs can be deleted, or leafs whose
2039 * parent is a case statement.
2040 */
2041 if (snode->parent->nodetype == LYS_CASE)
2042 return true;
2043 if (sleaf->when)
2044 return true;
2045 if (CHECK_FLAG(sleaf->flags, LYS_MAND_TRUE)
2046 || sleaf->dflt)
2047 return false;
2048 break;
2049 case LYS_CONTAINER:
2050 scontainer = (struct lysc_node_container *)snode;
2051 if (!CHECK_FLAG(scontainer->flags, LYS_PRESENCE))
2052 return false;
2053 break;
2054 case LYS_LIST:
2055 case LYS_LEAFLIST:
2056 break;
2057 default:
2058 return false;
2059 }
2060 return true;
2061 case NB_OP_MOVE:
2062 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
2063 return false;
2064
2065 switch (snode->nodetype) {
2066 case LYS_LIST:
2067 case LYS_LEAFLIST:
2068 if (!CHECK_FLAG(snode->flags, LYS_ORDBY_USER))
2069 return false;
2070 break;
2071 default:
2072 return false;
2073 }
2074 return true;
2075 case NB_OP_PRE_VALIDATE:
2076 case NB_OP_APPLY_FINISH:
2077 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
2078 return false;
2079 return true;
2080 case NB_OP_GET_ELEM:
2081 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_R))
2082 return false;
2083
2084 switch (snode->nodetype) {
2085 case LYS_LEAF:
2086 case LYS_LEAFLIST:
2087 break;
2088 case LYS_CONTAINER:
2089 scontainer = (struct lysc_node_container *)snode;
2090 if (!CHECK_FLAG(scontainer->flags, LYS_PRESENCE))
2091 return false;
2092 break;
2093 default:
2094 return false;
2095 }
2096 return true;
2097 case NB_OP_GET_NEXT:
2098 switch (snode->nodetype) {
2099 case LYS_LIST:
2100 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
2101 return false;
2102 break;
2103 case LYS_LEAFLIST:
2104 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
2105 return false;
2106 break;
2107 default:
2108 return false;
2109 }
2110 return true;
2111 case NB_OP_GET_KEYS:
2112 case NB_OP_LOOKUP_ENTRY:
2113 switch (snode->nodetype) {
2114 case LYS_LIST:
2115 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
2116 return false;
2117 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST))
2118 return false;
2119 break;
2120 default:
2121 return false;
2122 }
2123 return true;
2124 case NB_OP_RPC:
2125 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W | LYS_CONFIG_R))
2126 return false;
2127
2128 switch (snode->nodetype) {
2129 case LYS_RPC:
2130 case LYS_ACTION:
2131 break;
2132 default:
2133 return false;
2134 }
2135 return true;
2136 default:
2137 return false;
2138 }
2139 }
2140
2141 DEFINE_HOOK(nb_notification_send, (const char *xpath, struct list *arguments),
2142 (xpath, arguments));
2143
2144 int nb_notification_send(const char *xpath, struct list *arguments)
2145 {
2146 int ret;
2147
2148 DEBUGD(&nb_dbg_notif, "northbound notification: %s", xpath);
2149
2150 ret = hook_call(nb_notification_send, xpath, arguments);
2151 if (arguments)
2152 list_delete(&arguments);
2153
2154 return ret;
2155 }
2156
2157 /* Running configuration user pointers management. */
2158 struct nb_config_entry {
2159 char xpath[XPATH_MAXLEN];
2160 void *entry;
2161 };
2162
2163 static bool running_config_entry_cmp(const void *value1, const void *value2)
2164 {
2165 const struct nb_config_entry *c1 = value1;
2166 const struct nb_config_entry *c2 = value2;
2167
2168 return strmatch(c1->xpath, c2->xpath);
2169 }
2170
2171 static unsigned int running_config_entry_key_make(const void *value)
2172 {
2173 return string_hash_make(value);
2174 }
2175
2176 static void *running_config_entry_alloc(void *p)
2177 {
2178 struct nb_config_entry *new, *key = p;
2179
2180 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY, sizeof(*new));
2181 strlcpy(new->xpath, key->xpath, sizeof(new->xpath));
2182
2183 return new;
2184 }
2185
2186 static void running_config_entry_free(void *arg)
2187 {
2188 XFREE(MTYPE_NB_CONFIG_ENTRY, arg);
2189 }
2190
2191 void nb_running_set_entry(const struct lyd_node *dnode, void *entry)
2192 {
2193 struct nb_config_entry *config, s;
2194
2195 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2196 config = hash_get(running_config_entries, &s,
2197 running_config_entry_alloc);
2198 config->entry = entry;
2199 }
2200
2201 void nb_running_move_tree(const char *xpath_from, const char *xpath_to)
2202 {
2203 struct nb_config_entry *entry;
2204 struct list *entries = hash_to_list(running_config_entries);
2205 struct listnode *ln;
2206
2207 for (ALL_LIST_ELEMENTS_RO(entries, ln, entry)) {
2208 if (!frrstr_startswith(entry->xpath, xpath_from))
2209 continue;
2210
2211 hash_release(running_config_entries, entry);
2212
2213 char *newpath =
2214 frrstr_replace(entry->xpath, xpath_from, xpath_to);
2215 strlcpy(entry->xpath, newpath, sizeof(entry->xpath));
2216 XFREE(MTYPE_TMP, newpath);
2217
2218 (void)hash_get(running_config_entries, entry,
2219 hash_alloc_intern);
2220 }
2221
2222 list_delete(&entries);
2223 }
2224
2225 static void *nb_running_unset_entry_helper(const struct lyd_node *dnode)
2226 {
2227 struct nb_config_entry *config, s;
2228 struct lyd_node *child;
2229 void *entry = NULL;
2230
2231 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2232 config = hash_release(running_config_entries, &s);
2233 if (config) {
2234 entry = config->entry;
2235 running_config_entry_free(config);
2236 }
2237
2238 /* Unset user pointers from the child nodes. */
2239 if (CHECK_FLAG(dnode->schema->nodetype, LYS_LIST | LYS_CONTAINER)) {
2240 LY_LIST_FOR (lyd_child(dnode), child) {
2241 (void)nb_running_unset_entry_helper(child);
2242 }
2243 }
2244
2245 return entry;
2246 }
2247
2248 void *nb_running_unset_entry(const struct lyd_node *dnode)
2249 {
2250 void *entry;
2251
2252 entry = nb_running_unset_entry_helper(dnode);
2253 assert(entry);
2254
2255 return entry;
2256 }
2257
2258 static void *nb_running_get_entry_worker(const struct lyd_node *dnode,
2259 const char *xpath,
2260 bool abort_if_not_found,
2261 bool rec_search)
2262 {
2263 const struct lyd_node *orig_dnode = dnode;
2264 char xpath_buf[XPATH_MAXLEN];
2265 bool rec_flag = true;
2266
2267 assert(dnode || xpath);
2268
2269 if (!dnode)
2270 dnode = yang_dnode_get(running_config->dnode, xpath);
2271
2272 while (rec_flag && dnode) {
2273 struct nb_config_entry *config, s;
2274
2275 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2276 config = hash_lookup(running_config_entries, &s);
2277 if (config)
2278 return config->entry;
2279
2280 rec_flag = rec_search;
2281
2282 dnode = lyd_parent(dnode);
2283 }
2284
2285 if (!abort_if_not_found)
2286 return NULL;
2287
2288 yang_dnode_get_path(orig_dnode, xpath_buf, sizeof(xpath_buf));
2289 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
2290 "%s: failed to find entry [xpath %s]", __func__, xpath_buf);
2291 zlog_backtrace(LOG_ERR);
2292 abort();
2293 }
2294
2295 void *nb_running_get_entry(const struct lyd_node *dnode, const char *xpath,
2296 bool abort_if_not_found)
2297 {
2298 return nb_running_get_entry_worker(dnode, xpath, abort_if_not_found,
2299 true);
2300 }
2301
2302 void *nb_running_get_entry_non_rec(const struct lyd_node *dnode,
2303 const char *xpath, bool abort_if_not_found)
2304 {
2305 return nb_running_get_entry_worker(dnode, xpath, abort_if_not_found,
2306 false);
2307 }
2308
2309 /* Logging functions. */
2310 const char *nb_event_name(enum nb_event event)
2311 {
2312 switch (event) {
2313 case NB_EV_VALIDATE:
2314 return "validate";
2315 case NB_EV_PREPARE:
2316 return "prepare";
2317 case NB_EV_ABORT:
2318 return "abort";
2319 case NB_EV_APPLY:
2320 return "apply";
2321 }
2322
2323 assert(!"Reached end of function we should never hit");
2324 }
2325
2326 const char *nb_operation_name(enum nb_operation operation)
2327 {
2328 switch (operation) {
2329 case NB_OP_CREATE:
2330 return "create";
2331 case NB_OP_MODIFY:
2332 return "modify";
2333 case NB_OP_DESTROY:
2334 return "destroy";
2335 case NB_OP_MOVE:
2336 return "move";
2337 case NB_OP_PRE_VALIDATE:
2338 return "pre_validate";
2339 case NB_OP_APPLY_FINISH:
2340 return "apply_finish";
2341 case NB_OP_GET_ELEM:
2342 return "get_elem";
2343 case NB_OP_GET_NEXT:
2344 return "get_next";
2345 case NB_OP_GET_KEYS:
2346 return "get_keys";
2347 case NB_OP_LOOKUP_ENTRY:
2348 return "lookup_entry";
2349 case NB_OP_RPC:
2350 return "rpc";
2351 }
2352
2353 assert(!"Reached end of function we should never hit");
2354 }
2355
2356 const char *nb_err_name(enum nb_error error)
2357 {
2358 switch (error) {
2359 case NB_OK:
2360 return "ok";
2361 case NB_ERR:
2362 return "generic error";
2363 case NB_ERR_NO_CHANGES:
2364 return "no changes";
2365 case NB_ERR_NOT_FOUND:
2366 return "element not found";
2367 case NB_ERR_LOCKED:
2368 return "resource is locked";
2369 case NB_ERR_VALIDATION:
2370 return "validation";
2371 case NB_ERR_RESOURCE:
2372 return "failed to allocate resource";
2373 case NB_ERR_INCONSISTENCY:
2374 return "internal inconsistency";
2375 }
2376
2377 assert(!"Reached end of function we should never hit");
2378 }
2379
2380 const char *nb_client_name(enum nb_client client)
2381 {
2382 switch (client) {
2383 case NB_CLIENT_CLI:
2384 return "CLI";
2385 case NB_CLIENT_CONFD:
2386 return "ConfD";
2387 case NB_CLIENT_SYSREPO:
2388 return "Sysrepo";
2389 case NB_CLIENT_GRPC:
2390 return "gRPC";
2391 case NB_CLIENT_PCEP:
2392 return "Pcep";
2393 case NB_CLIENT_NONE:
2394 return "None";
2395 }
2396
2397 assert(!"Reached end of function we should never hit");
2398 }
2399
2400 static void nb_load_callbacks(const struct frr_yang_module_info *module)
2401 {
2402 for (size_t i = 0; module->nodes[i].xpath; i++) {
2403 struct nb_node *nb_node;
2404 uint32_t priority;
2405
2406 if (i > YANG_MODULE_MAX_NODES) {
2407 zlog_err(
2408 "%s: %s.yang has more than %u nodes. Please increase YANG_MODULE_MAX_NODES to fix this problem.",
2409 __func__, module->name, YANG_MODULE_MAX_NODES);
2410 exit(1);
2411 }
2412
2413 nb_node = nb_node_find(module->nodes[i].xpath);
2414 if (!nb_node) {
2415 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
2416 "%s: unknown data path: %s", __func__,
2417 module->nodes[i].xpath);
2418 continue;
2419 }
2420
2421 nb_node->cbs = module->nodes[i].cbs;
2422 priority = module->nodes[i].priority;
2423 if (priority != 0)
2424 nb_node->priority = priority;
2425 }
2426 }
2427
2428 void nb_validate_callbacks(void)
2429 {
2430 unsigned int errors = 0;
2431
2432 yang_snodes_iterate(NULL, nb_node_validate, 0, &errors);
2433 if (errors > 0) {
2434 flog_err(
2435 EC_LIB_NB_CBS_VALIDATION,
2436 "%s: failed to validate northbound callbacks: %u error(s)",
2437 __func__, errors);
2438 exit(1);
2439 }
2440 }
2441
2442
2443 void nb_init(struct thread_master *tm,
2444 const struct frr_yang_module_info *const modules[],
2445 size_t nmodules, bool db_enabled)
2446 {
2447 struct yang_module *loaded[nmodules], **loadedp = loaded;
2448 bool explicit_compile;
2449
2450 /*
2451 * Currently using this explicit compile feature in libyang2 leads to
2452 * incorrect behavior in FRR. The functionality suppresses the compiling
2453 * of modules until they have all been loaded into the context. This
2454 * avoids multiple recompiles of the same modules as they are
2455 * imported/augmented etc.
2456 */
2457 explicit_compile = false;
2458
2459 nb_db_enabled = db_enabled;
2460
2461 yang_init(true, explicit_compile);
2462
2463 /* Load YANG modules and their corresponding northbound callbacks. */
2464 for (size_t i = 0; i < nmodules; i++) {
2465 DEBUGD(&nb_dbg_events, "northbound: loading %s.yang",
2466 modules[i]->name);
2467 *loadedp++ = yang_module_load(modules[i]->name);
2468 }
2469
2470 if (explicit_compile)
2471 yang_init_loading_complete();
2472
2473 /* Initialize the compiled nodes with northbound data */
2474 for (size_t i = 0; i < nmodules; i++) {
2475 yang_snodes_iterate(loaded[i]->info, nb_node_new_cb, 0, NULL);
2476 nb_load_callbacks(modules[i]);
2477 }
2478
2479 /* Validate northbound callbacks. */
2480 nb_validate_callbacks();
2481
2482 /* Create an empty running configuration. */
2483 running_config = nb_config_new(NULL);
2484 running_config_entries = hash_create(running_config_entry_key_make,
2485 running_config_entry_cmp,
2486 "Running Configuration Entries");
2487 pthread_mutex_init(&running_config_mgmt_lock.mtx, NULL);
2488
2489 /* Initialize the northbound CLI. */
2490 nb_cli_init(tm);
2491 }
2492
2493 void nb_terminate(void)
2494 {
2495 /* Terminate the northbound CLI. */
2496 nb_cli_terminate();
2497
2498 /* Delete all nb_node's from all YANG modules. */
2499 nb_nodes_delete();
2500
2501 /* Delete the running configuration. */
2502 hash_clean(running_config_entries, running_config_entry_free);
2503 hash_free(running_config_entries);
2504 nb_config_free(running_config);
2505 pthread_mutex_destroy(&running_config_mgmt_lock.mtx);
2506 }