]> git.proxmox.com Git - mirror_frr.git/blob - lib/northbound.c
Merge pull request #13649 from donaldsharp/unlock_the_node_or_else
[mirror_frr.git] / lib / northbound.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2018 NetDEF, Inc.
4 * Renato Westphal
5 */
6
7 #include <zebra.h>
8
9 #include "libfrr.h"
10 #include "log.h"
11 #include "lib_errors.h"
12 #include "hash.h"
13 #include "command.h"
14 #include "debug.h"
15 #include "db.h"
16 #include "frr_pthread.h"
17 #include "northbound.h"
18 #include "northbound_cli.h"
19 #include "northbound_db.h"
20 #include "frrstr.h"
21
22 DEFINE_MTYPE_STATIC(LIB, NB_NODE, "Northbound Node");
23 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG, "Northbound Configuration");
24 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG_ENTRY, "Northbound Configuration Entry");
25
26 /* Running configuration - shouldn't be modified directly. */
27 struct nb_config *running_config;
28
29 /* Hash table of user pointers associated with configuration entries. */
30 static struct hash *running_config_entries;
31
32 /* Management lock for the running configuration. */
33 static struct {
34 /* Mutex protecting this structure. */
35 pthread_mutex_t mtx;
36
37 /* Actual lock. */
38 bool locked;
39
40 /* Northbound client who owns this lock. */
41 enum nb_client owner_client;
42
43 /* Northbound user who owns this lock. */
44 const void *owner_user;
45 } running_config_mgmt_lock;
46
47 /* Knob to record config transaction */
48 static bool nb_db_enabled;
49 /*
50 * Global lock used to prevent multiple configuration transactions from
51 * happening concurrently.
52 */
53 static bool transaction_in_progress;
54
55 static int nb_callback_pre_validate(struct nb_context *context,
56 const struct nb_node *nb_node,
57 const struct lyd_node *dnode, char *errmsg,
58 size_t errmsg_len);
59 static int nb_callback_configuration(struct nb_context *context,
60 const enum nb_event event,
61 struct nb_config_change *change,
62 char *errmsg, size_t errmsg_len);
63 static struct nb_transaction *
64 nb_transaction_new(struct nb_context context, struct nb_config *config,
65 struct nb_config_cbs *changes, const char *comment,
66 char *errmsg, size_t errmsg_len);
67 static void nb_transaction_free(struct nb_transaction *transaction);
68 static int nb_transaction_process(enum nb_event event,
69 struct nb_transaction *transaction,
70 char *errmsg, size_t errmsg_len);
71 static void nb_transaction_apply_finish(struct nb_transaction *transaction,
72 char *errmsg, size_t errmsg_len);
73 static int nb_oper_data_iter_node(const struct lysc_node *snode,
74 const char *xpath, const void *list_entry,
75 const struct yang_list_keys *list_keys,
76 struct yang_translator *translator,
77 bool first, uint32_t flags,
78 nb_oper_data_cb cb, void *arg);
79
80 static int nb_node_check_config_only(const struct lysc_node *snode, void *arg)
81 {
82 bool *config_only = arg;
83
84 if (CHECK_FLAG(snode->flags, LYS_CONFIG_R)) {
85 *config_only = false;
86 return YANG_ITER_STOP;
87 }
88
89 return YANG_ITER_CONTINUE;
90 }
91
92 static int nb_node_new_cb(const struct lysc_node *snode, void *arg)
93 {
94 struct nb_node *nb_node;
95 struct lysc_node *sparent, *sparent_list;
96 struct frr_yang_module_info *module;
97
98 module = (struct frr_yang_module_info *)arg;
99 nb_node = XCALLOC(MTYPE_NB_NODE, sizeof(*nb_node));
100 yang_snode_get_path(snode, YANG_PATH_DATA, nb_node->xpath,
101 sizeof(nb_node->xpath));
102 nb_node->priority = NB_DFLT_PRIORITY;
103 sparent = yang_snode_real_parent(snode);
104 if (sparent)
105 nb_node->parent = sparent->priv;
106 sparent_list = yang_snode_parent_list(snode);
107 if (sparent_list)
108 nb_node->parent_list = sparent_list->priv;
109
110 /* Set flags. */
111 if (CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
112 bool config_only = true;
113
114 (void)yang_snodes_iterate_subtree(snode, NULL,
115 nb_node_check_config_only, 0,
116 &config_only);
117 if (config_only)
118 SET_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY);
119 }
120 if (CHECK_FLAG(snode->nodetype, LYS_LIST)) {
121 if (yang_snode_num_keys(snode) == 0)
122 SET_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST);
123 }
124
125 /*
126 * Link the northbound node and the libyang schema node with one
127 * another.
128 */
129 nb_node->snode = snode;
130 assert(snode->priv == NULL);
131 ((struct lysc_node *)snode)->priv = nb_node;
132
133 if (module && module->ignore_cbs)
134 SET_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS);
135
136 return YANG_ITER_CONTINUE;
137 }
138
139 static int nb_node_del_cb(const struct lysc_node *snode, void *arg)
140 {
141 struct nb_node *nb_node;
142
143 nb_node = snode->priv;
144 if (nb_node) {
145 ((struct lysc_node *)snode)->priv = NULL;
146 XFREE(MTYPE_NB_NODE, nb_node);
147 }
148
149 return YANG_ITER_CONTINUE;
150 }
151
152 void nb_nodes_create(void)
153 {
154 yang_snodes_iterate(NULL, nb_node_new_cb, 0, NULL);
155 }
156
157 void nb_nodes_delete(void)
158 {
159 yang_snodes_iterate(NULL, nb_node_del_cb, 0, NULL);
160 }
161
162 struct nb_node *nb_node_find(const char *path)
163 {
164 const struct lysc_node *snode;
165
166 /*
167 * Use libyang to find the schema node associated to the path and get
168 * the northbound node from there (snode private pointer).
169 */
170 snode = yang_find_snode(ly_native_ctx, path, 0);
171 if (!snode)
172 return NULL;
173
174 return snode->priv;
175 }
176
177 void nb_node_set_dependency_cbs(const char *dependency_xpath,
178 const char *dependant_xpath,
179 struct nb_dependency_callbacks *cbs)
180 {
181 struct nb_node *dependency = nb_node_find(dependency_xpath);
182 struct nb_node *dependant = nb_node_find(dependant_xpath);
183
184 if (!dependency || !dependant)
185 return;
186
187 dependency->dep_cbs.get_dependant_xpath = cbs->get_dependant_xpath;
188 dependant->dep_cbs.get_dependency_xpath = cbs->get_dependency_xpath;
189 }
190
191 bool nb_node_has_dependency(struct nb_node *node)
192 {
193 return node->dep_cbs.get_dependency_xpath != NULL;
194 }
195
196 static int nb_node_validate_cb(const struct nb_node *nb_node,
197 enum nb_operation operation,
198 int callback_implemented, bool optional)
199 {
200 bool valid;
201
202 valid = nb_operation_is_valid(operation, nb_node->snode);
203
204 /*
205 * Add an exception for operational data callbacks. A rw list usually
206 * doesn't need any associated operational data callbacks. But if this
207 * rw list is augmented by another module which adds state nodes under
208 * it, then this list will need to have the 'get_next()', 'get_keys()'
209 * and 'lookup_entry()' callbacks. As such, never log a warning when
210 * these callbacks are implemented when they are not needed, since this
211 * depends on context (e.g. some daemons might augment "frr-interface"
212 * while others don't).
213 */
214 if (!valid && callback_implemented && operation != NB_OP_GET_NEXT
215 && operation != NB_OP_GET_KEYS && operation != NB_OP_LOOKUP_ENTRY)
216 flog_warn(EC_LIB_NB_CB_UNNEEDED,
217 "unneeded '%s' callback for '%s'",
218 nb_operation_name(operation), nb_node->xpath);
219
220 if (!optional && valid && !callback_implemented) {
221 flog_err(EC_LIB_NB_CB_MISSING, "missing '%s' callback for '%s'",
222 nb_operation_name(operation), nb_node->xpath);
223 return 1;
224 }
225
226 return 0;
227 }
228
229 /*
230 * Check if the required callbacks were implemented for the given northbound
231 * node.
232 */
233 static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
234
235 {
236 unsigned int error = 0;
237
238 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
239 return error;
240
241 error += nb_node_validate_cb(nb_node, NB_OP_CREATE,
242 !!nb_node->cbs.create, false);
243 error += nb_node_validate_cb(nb_node, NB_OP_MODIFY,
244 !!nb_node->cbs.modify, false);
245 error += nb_node_validate_cb(nb_node, NB_OP_DESTROY,
246 !!nb_node->cbs.destroy, false);
247 error += nb_node_validate_cb(nb_node, NB_OP_MOVE, !!nb_node->cbs.move,
248 false);
249 error += nb_node_validate_cb(nb_node, NB_OP_PRE_VALIDATE,
250 !!nb_node->cbs.pre_validate, true);
251 error += nb_node_validate_cb(nb_node, NB_OP_APPLY_FINISH,
252 !!nb_node->cbs.apply_finish, true);
253 error += nb_node_validate_cb(nb_node, NB_OP_GET_ELEM,
254 !!nb_node->cbs.get_elem, false);
255 error += nb_node_validate_cb(nb_node, NB_OP_GET_NEXT,
256 !!nb_node->cbs.get_next, false);
257 error += nb_node_validate_cb(nb_node, NB_OP_GET_KEYS,
258 !!nb_node->cbs.get_keys, false);
259 error += nb_node_validate_cb(nb_node, NB_OP_LOOKUP_ENTRY,
260 !!nb_node->cbs.lookup_entry, false);
261 error += nb_node_validate_cb(nb_node, NB_OP_RPC, !!nb_node->cbs.rpc,
262 false);
263
264 return error;
265 }
266
267 static unsigned int nb_node_validate_priority(const struct nb_node *nb_node)
268 {
269 /* Top-level nodes can have any priority. */
270 if (!nb_node->parent)
271 return 0;
272
273 if (nb_node->priority < nb_node->parent->priority) {
274 flog_err(EC_LIB_NB_CB_INVALID_PRIO,
275 "node has higher priority than its parent [xpath %s]",
276 nb_node->xpath);
277 return 1;
278 }
279
280 return 0;
281 }
282
283 static int nb_node_validate(const struct lysc_node *snode, void *arg)
284 {
285 struct nb_node *nb_node = snode->priv;
286 unsigned int *errors = arg;
287
288 /* Validate callbacks and priority. */
289 if (nb_node) {
290 *errors += nb_node_validate_cbs(nb_node);
291 *errors += nb_node_validate_priority(nb_node);
292 }
293
294 return YANG_ITER_CONTINUE;
295 }
296
297 struct nb_config *nb_config_new(struct lyd_node *dnode)
298 {
299 struct nb_config *config;
300
301 config = XCALLOC(MTYPE_NB_CONFIG, sizeof(*config));
302 if (dnode)
303 config->dnode = dnode;
304 else
305 config->dnode = yang_dnode_new(ly_native_ctx, true);
306 config->version = 0;
307
308 RB_INIT(nb_config_cbs, &config->cfg_chgs);
309
310 return config;
311 }
312
313 void nb_config_free(struct nb_config *config)
314 {
315 if (config->dnode)
316 yang_dnode_free(config->dnode);
317 nb_config_diff_del_changes(&config->cfg_chgs);
318 XFREE(MTYPE_NB_CONFIG, config);
319 }
320
321 struct nb_config *nb_config_dup(const struct nb_config *config)
322 {
323 struct nb_config *dup;
324
325 dup = XCALLOC(MTYPE_NB_CONFIG, sizeof(*dup));
326 dup->dnode = yang_dnode_dup(config->dnode);
327 dup->version = config->version;
328
329 RB_INIT(nb_config_cbs, &dup->cfg_chgs);
330
331 return dup;
332 }
333
334 int nb_config_merge(struct nb_config *config_dst, struct nb_config *config_src,
335 bool preserve_source)
336 {
337 int ret;
338
339 ret = lyd_merge_siblings(&config_dst->dnode, config_src->dnode, 0);
340 if (ret != 0)
341 flog_warn(EC_LIB_LIBYANG, "%s: lyd_merge() failed", __func__);
342
343 if (!preserve_source)
344 nb_config_free(config_src);
345
346 return (ret == 0) ? NB_OK : NB_ERR;
347 }
348
349 void nb_config_replace(struct nb_config *config_dst,
350 struct nb_config *config_src, bool preserve_source)
351 {
352 /* Update version. */
353 if (config_src->version != 0)
354 config_dst->version = config_src->version;
355
356 /* Update dnode. */
357 if (config_dst->dnode)
358 yang_dnode_free(config_dst->dnode);
359 if (preserve_source) {
360 config_dst->dnode = yang_dnode_dup(config_src->dnode);
361 } else {
362 config_dst->dnode = config_src->dnode;
363 config_src->dnode = NULL;
364 nb_config_free(config_src);
365 }
366 }
367
368 /* Generate the nb_config_cbs tree. */
369 static inline int nb_config_cb_compare(const struct nb_config_cb *a,
370 const struct nb_config_cb *b)
371 {
372 /* Sort by priority first. */
373 if (a->nb_node->priority < b->nb_node->priority)
374 return -1;
375 if (a->nb_node->priority > b->nb_node->priority)
376 return 1;
377
378 /*
379 * Preserve the order of the configuration changes as told by libyang.
380 */
381 if (a->seq < b->seq)
382 return -1;
383 if (a->seq > b->seq)
384 return 1;
385
386 /*
387 * All 'apply_finish' callbacks have their sequence number set to zero.
388 * In this case, compare them using their dnode pointers (the order
389 * doesn't matter for callbacks that have the same priority).
390 */
391 if (a->dnode < b->dnode)
392 return -1;
393 if (a->dnode > b->dnode)
394 return 1;
395
396 return 0;
397 }
398 RB_GENERATE(nb_config_cbs, nb_config_cb, entry, nb_config_cb_compare);
399
400 static void nb_config_diff_add_change(struct nb_config_cbs *changes,
401 enum nb_operation operation,
402 uint32_t *seq,
403 const struct lyd_node *dnode)
404 {
405 struct nb_config_change *change;
406
407 /* Ignore unimplemented nodes. */
408 if (!dnode->schema->priv)
409 return;
410
411 change = XCALLOC(MTYPE_TMP, sizeof(*change));
412 change->cb.operation = operation;
413 change->cb.seq = *seq;
414 *seq = *seq + 1;
415 change->cb.nb_node = dnode->schema->priv;
416 change->cb.dnode = dnode;
417
418 RB_INSERT(nb_config_cbs, changes, &change->cb);
419 }
420
421 void nb_config_diff_del_changes(struct nb_config_cbs *changes)
422 {
423 while (!RB_EMPTY(nb_config_cbs, changes)) {
424 struct nb_config_change *change;
425
426 change = (struct nb_config_change *)RB_ROOT(nb_config_cbs,
427 changes);
428 RB_REMOVE(nb_config_cbs, changes, &change->cb);
429 XFREE(MTYPE_TMP, change);
430 }
431 }
432
433 /*
434 * Helper function used when calculating the delta between two different
435 * configurations. Given a new subtree, calculate all new YANG data nodes,
436 * excluding default leafs and leaf-lists. This is a recursive function.
437 */
438 void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
439 struct nb_config_cbs *changes)
440 {
441 enum nb_operation operation;
442 struct lyd_node *child;
443
444 /* Ignore unimplemented nodes. */
445 if (!dnode->schema->priv)
446 return;
447
448 switch (dnode->schema->nodetype) {
449 case LYS_LEAF:
450 case LYS_LEAFLIST:
451 if (lyd_is_default(dnode))
452 break;
453
454 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
455 operation = NB_OP_CREATE;
456 else if (nb_operation_is_valid(NB_OP_MODIFY, dnode->schema))
457 operation = NB_OP_MODIFY;
458 else
459 return;
460
461 nb_config_diff_add_change(changes, operation, seq, dnode);
462 break;
463 case LYS_CONTAINER:
464 case LYS_LIST:
465 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
466 nb_config_diff_add_change(changes, NB_OP_CREATE, seq,
467 dnode);
468
469 /* Process child nodes recursively. */
470 LY_LIST_FOR (lyd_child(dnode), child) {
471 nb_config_diff_created(child, seq, changes);
472 }
473 break;
474 default:
475 break;
476 }
477 }
478
479 static void nb_config_diff_deleted(const struct lyd_node *dnode, uint32_t *seq,
480 struct nb_config_cbs *changes)
481 {
482 /* Ignore unimplemented nodes. */
483 if (!dnode->schema->priv)
484 return;
485
486 if (nb_operation_is_valid(NB_OP_DESTROY, dnode->schema))
487 nb_config_diff_add_change(changes, NB_OP_DESTROY, seq, dnode);
488 else if (CHECK_FLAG(dnode->schema->nodetype, LYS_CONTAINER)) {
489 struct lyd_node *child;
490
491 /*
492 * Non-presence containers need special handling since they
493 * don't have "destroy" callbacks. In this case, what we need to
494 * do is to call the "destroy" callbacks of their child nodes
495 * when applicable (i.e. optional nodes).
496 */
497 LY_LIST_FOR (lyd_child(dnode), child) {
498 nb_config_diff_deleted(child, seq, changes);
499 }
500 }
501 }
502
503 static int nb_lyd_diff_get_op(const struct lyd_node *dnode)
504 {
505 const struct lyd_meta *meta;
506 LY_LIST_FOR (dnode->meta, meta) {
507 if (strcmp(meta->name, "operation")
508 || strcmp(meta->annotation->module->name, "yang"))
509 continue;
510 return lyd_get_meta_value(meta)[0];
511 }
512 return 'n';
513 }
514
515 #if 0 /* Used below in nb_config_diff inside normally disabled code */
516 static inline void nb_config_diff_dnode_log_path(const char *context,
517 const char *path,
518 const struct lyd_node *dnode)
519 {
520 if (dnode->schema->nodetype & LYD_NODE_TERM)
521 zlog_debug("nb_config_diff: %s: %s: %s", context, path,
522 lyd_get_value(dnode));
523 else
524 zlog_debug("nb_config_diff: %s: %s", context, path);
525 }
526
527 static inline void nb_config_diff_dnode_log(const char *context,
528 const struct lyd_node *dnode)
529 {
530 if (!dnode) {
531 zlog_debug("nb_config_diff: %s: NULL", context);
532 return;
533 }
534
535 char *path = lyd_path(dnode, LYD_PATH_STD, NULL, 0);
536 nb_config_diff_dnode_log_path(context, path, dnode);
537 free(path);
538 }
539 #endif
540
541 /*
542 * Calculate the delta between two different configurations.
543 *
544 * NOTE: 'config1' is the reference DB, while 'config2' is
545 * the DB being compared against 'config1'. Typically 'config1'
546 * should be the Running DB and 'config2' is the Candidate DB.
547 */
548 void nb_config_diff(const struct nb_config *config1,
549 const struct nb_config *config2,
550 struct nb_config_cbs *changes)
551 {
552 struct lyd_node *diff = NULL;
553 const struct lyd_node *root, *dnode;
554 struct lyd_node *target;
555 int op;
556 LY_ERR err;
557 char *path;
558
559 #if 0 /* Useful (noisy) when debugging diff code, and for improving later */
560 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
561 LY_LIST_FOR(config1->dnode, root) {
562 LYD_TREE_DFS_BEGIN(root, dnode) {
563 nb_config_diff_dnode_log("from", dnode);
564 LYD_TREE_DFS_END(root, dnode);
565 }
566 }
567 LY_LIST_FOR(config2->dnode, root) {
568 LYD_TREE_DFS_BEGIN(root, dnode) {
569 nb_config_diff_dnode_log("to", dnode);
570 LYD_TREE_DFS_END(root, dnode);
571 }
572 }
573 }
574 #endif
575
576 err = lyd_diff_siblings(config1->dnode, config2->dnode,
577 LYD_DIFF_DEFAULTS, &diff);
578 assert(!err);
579
580 if (diff && DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
581 char *s;
582
583 if (!lyd_print_mem(&s, diff, LYD_JSON,
584 LYD_PRINT_WITHSIBLINGS | LYD_PRINT_WD_ALL)) {
585 zlog_debug("%s: %s", __func__, s);
586 free(s);
587 }
588 }
589
590 uint32_t seq = 0;
591
592 LY_LIST_FOR (diff, root) {
593 LYD_TREE_DFS_BEGIN (root, dnode) {
594 op = nb_lyd_diff_get_op(dnode);
595
596 path = lyd_path(dnode, LYD_PATH_STD, NULL, 0);
597
598 #if 0 /* Useful (noisy) when debugging diff code, and for improving later */
599 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
600 char context[80];
601 snprintf(context, sizeof(context),
602 "iterating diff: oper: %c seq: %u", op, seq);
603 nb_config_diff_dnode_log_path(context, path, dnode);
604 }
605 #endif
606 switch (op) {
607 case 'c': /* create */
608 /*
609 * This is rather inefficient, but when we use
610 * dnode from the diff instead of the
611 * candidate config node we get failures when
612 * looking up default values, etc, based on
613 * the diff tree.
614 */
615 target = yang_dnode_get(config2->dnode, path);
616 assert(target);
617 nb_config_diff_created(target, &seq, changes);
618
619 /* Skip rest of sub-tree, move to next sibling
620 */
621 LYD_TREE_DFS_continue = 1;
622 break;
623 case 'd': /* delete */
624 target = yang_dnode_get(config1->dnode, path);
625 assert(target);
626 nb_config_diff_deleted(target, &seq, changes);
627
628 /* Skip rest of sub-tree, move to next sibling
629 */
630 LYD_TREE_DFS_continue = 1;
631 break;
632 case 'r': /* replace */
633 /* either moving an entry or changing a value */
634 target = yang_dnode_get(config2->dnode, path);
635 assert(target);
636 nb_config_diff_add_change(changes, NB_OP_MODIFY,
637 &seq, target);
638 break;
639 case 'n': /* none */
640 default:
641 break;
642 }
643 free(path);
644 LYD_TREE_DFS_END(root, dnode);
645 }
646 }
647
648 lyd_free_all(diff);
649 }
650
651 int nb_candidate_edit(struct nb_config *candidate,
652 const struct nb_node *nb_node,
653 enum nb_operation operation, const char *xpath,
654 const struct yang_data *previous,
655 const struct yang_data *data)
656 {
657 struct lyd_node *dnode, *dep_dnode;
658 char xpath_edit[XPATH_MAXLEN];
659 char dep_xpath[XPATH_MAXLEN];
660 LY_ERR err;
661
662 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
663 if (nb_node->snode->nodetype == LYS_LEAFLIST)
664 snprintf(xpath_edit, sizeof(xpath_edit), "%s[.='%s']", xpath,
665 data->value);
666 else
667 strlcpy(xpath_edit, xpath, sizeof(xpath_edit));
668
669 switch (operation) {
670 case NB_OP_CREATE:
671 case NB_OP_MODIFY:
672 err = lyd_new_path(candidate->dnode, ly_native_ctx, xpath_edit,
673 (void *)data->value, LYD_NEW_PATH_UPDATE,
674 &dnode);
675 if (err) {
676 flog_warn(EC_LIB_LIBYANG,
677 "%s: lyd_new_path(%s) failed: %d", __func__,
678 xpath_edit, err);
679 return NB_ERR;
680 } else if (dnode) {
681 /* Create default nodes */
682 LY_ERR err = lyd_new_implicit_tree(
683 dnode, LYD_IMPLICIT_NO_STATE, NULL);
684 if (err) {
685 flog_warn(EC_LIB_LIBYANG,
686 "%s: lyd_new_implicit_all failed: %d",
687 __func__, err);
688 }
689 /*
690 * create dependency
691 *
692 * dnode returned by the lyd_new_path may be from a
693 * different schema, so we need to update the nb_node
694 */
695 nb_node = dnode->schema->priv;
696 if (nb_node->dep_cbs.get_dependency_xpath) {
697 nb_node->dep_cbs.get_dependency_xpath(
698 dnode, dep_xpath);
699
700 err = lyd_new_path(candidate->dnode,
701 ly_native_ctx, dep_xpath,
702 NULL, LYD_NEW_PATH_UPDATE,
703 &dep_dnode);
704 /* Create default nodes */
705 if (!err && dep_dnode)
706 err = lyd_new_implicit_tree(
707 dep_dnode,
708 LYD_IMPLICIT_NO_STATE, NULL);
709 if (err) {
710 flog_warn(
711 EC_LIB_LIBYANG,
712 "%s: dependency: lyd_new_path(%s) failed: %d",
713 __func__, dep_xpath, err);
714 return NB_ERR;
715 }
716 }
717 }
718 break;
719 case NB_OP_DESTROY:
720 dnode = yang_dnode_get(candidate->dnode, xpath_edit);
721 if (!dnode)
722 /*
723 * Return a special error code so the caller can choose
724 * whether to ignore it or not.
725 */
726 return NB_ERR_NOT_FOUND;
727 /* destroy dependant */
728 if (nb_node->dep_cbs.get_dependant_xpath) {
729 nb_node->dep_cbs.get_dependant_xpath(dnode, dep_xpath);
730
731 dep_dnode = yang_dnode_get(candidate->dnode, dep_xpath);
732 if (dep_dnode)
733 lyd_free_tree(dep_dnode);
734 }
735 lyd_free_tree(dnode);
736 break;
737 case NB_OP_MOVE:
738 /* TODO: update configuration. */
739 break;
740 case NB_OP_PRE_VALIDATE:
741 case NB_OP_APPLY_FINISH:
742 case NB_OP_GET_ELEM:
743 case NB_OP_GET_NEXT:
744 case NB_OP_GET_KEYS:
745 case NB_OP_LOOKUP_ENTRY:
746 case NB_OP_RPC:
747 flog_warn(EC_LIB_DEVELOPMENT,
748 "%s: unknown operation (%u) [xpath %s]", __func__,
749 operation, xpath_edit);
750 return NB_ERR;
751 }
752
753 return NB_OK;
754 }
755
756 static void nb_update_candidate_changes(struct nb_config *candidate,
757 struct nb_cfg_change *change,
758 uint32_t *seq)
759 {
760 enum nb_operation oper = change->operation;
761 char *xpath = change->xpath;
762 struct lyd_node *root = NULL;
763 struct lyd_node *dnode;
764 struct nb_config_cbs *cfg_chgs = &candidate->cfg_chgs;
765 int op;
766
767 switch (oper) {
768 case NB_OP_CREATE:
769 case NB_OP_MODIFY:
770 root = yang_dnode_get(candidate->dnode, xpath);
771 break;
772 case NB_OP_DESTROY:
773 root = yang_dnode_get(running_config->dnode, xpath);
774 /* code */
775 break;
776 case NB_OP_MOVE:
777 case NB_OP_PRE_VALIDATE:
778 case NB_OP_APPLY_FINISH:
779 case NB_OP_GET_ELEM:
780 case NB_OP_GET_NEXT:
781 case NB_OP_GET_KEYS:
782 case NB_OP_LOOKUP_ENTRY:
783 case NB_OP_RPC:
784 break;
785 default:
786 assert(!"non-enum value, invalid");
787 }
788
789 if (!root)
790 return;
791
792 LYD_TREE_DFS_BEGIN (root, dnode) {
793 op = nb_lyd_diff_get_op(dnode);
794 switch (op) {
795 case 'c':
796 nb_config_diff_created(dnode, seq, cfg_chgs);
797 LYD_TREE_DFS_continue = 1;
798 break;
799 case 'd':
800 nb_config_diff_deleted(dnode, seq, cfg_chgs);
801 LYD_TREE_DFS_continue = 1;
802 break;
803 case 'r':
804 nb_config_diff_add_change(cfg_chgs, NB_OP_MODIFY, seq,
805 dnode);
806 break;
807 default:
808 break;
809 }
810 LYD_TREE_DFS_END(root, dnode);
811 }
812 }
813
814 static bool nb_is_operation_allowed(struct nb_node *nb_node,
815 struct nb_cfg_change *change)
816 {
817 enum nb_operation oper = change->operation;
818
819 if (lysc_is_key(nb_node->snode)) {
820 if (oper == NB_OP_MODIFY || oper == NB_OP_DESTROY)
821 return false;
822 }
823 return true;
824 }
825
826 void nb_candidate_edit_config_changes(
827 struct nb_config *candidate_config, struct nb_cfg_change cfg_changes[],
828 size_t num_cfg_changes, const char *xpath_base, const char *curr_xpath,
829 int xpath_index, char *err_buf, int err_bufsize, bool *error)
830 {
831 uint32_t seq = 0;
832
833 if (error)
834 *error = false;
835
836 if (xpath_base == NULL)
837 xpath_base = "";
838
839 /* Edit candidate configuration. */
840 for (size_t i = 0; i < num_cfg_changes; i++) {
841 struct nb_cfg_change *change = &cfg_changes[i];
842 struct nb_node *nb_node;
843 char xpath[XPATH_MAXLEN];
844 struct yang_data *data;
845 int ret;
846
847 /* Handle relative XPaths. */
848 memset(xpath, 0, sizeof(xpath));
849 if (xpath_index > 0 &&
850 (xpath_base[0] == '.' || change->xpath[0] == '.'))
851 strlcpy(xpath, curr_xpath, sizeof(xpath));
852 if (xpath_base[0]) {
853 if (xpath_base[0] == '.')
854 strlcat(xpath, xpath_base + 1, sizeof(xpath));
855 else
856 strlcat(xpath, xpath_base, sizeof(xpath));
857 }
858 if (change->xpath[0] == '.')
859 strlcat(xpath, change->xpath + 1, sizeof(xpath));
860 else
861 strlcpy(xpath, change->xpath, sizeof(xpath));
862
863 /* Find the northbound node associated to the data path. */
864 nb_node = nb_node_find(xpath);
865 if (!nb_node) {
866 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
867 "%s: unknown data path: %s", __func__, xpath);
868 if (error)
869 *error = true;
870 continue;
871 }
872 /* Find if the node to be edited is not a key node */
873 if (!nb_is_operation_allowed(nb_node, change)) {
874 zlog_err(" Xpath %s points to key node", xpath);
875 if (error)
876 *error = true;
877 break;
878 }
879
880 /* If the value is not set, get the default if it exists. */
881 if (change->value == NULL)
882 change->value = yang_snode_get_default(nb_node->snode);
883 data = yang_data_new(xpath, change->value);
884
885 /*
886 * Ignore "not found" errors when editing the candidate
887 * configuration.
888 */
889 ret = nb_candidate_edit(candidate_config, nb_node,
890 change->operation, xpath, NULL, data);
891 yang_data_free(data);
892 if (ret != NB_OK && ret != NB_ERR_NOT_FOUND) {
893 flog_warn(
894 EC_LIB_NB_CANDIDATE_EDIT_ERROR,
895 "%s: failed to edit candidate configuration: operation [%s] xpath [%s]",
896 __func__, nb_operation_name(change->operation),
897 xpath);
898 if (error)
899 *error = true;
900 continue;
901 }
902 nb_update_candidate_changes(candidate_config, change, &seq);
903 }
904
905 if (error && *error) {
906 char buf[BUFSIZ];
907
908 /*
909 * Failure to edit the candidate configuration should never
910 * happen in practice, unless there's a bug in the code. When
911 * that happens, log the error but otherwise ignore it.
912 */
913 snprintf(err_buf, err_bufsize,
914 "%% Failed to edit configuration.\n\n%s",
915 yang_print_errors(ly_native_ctx, buf, sizeof(buf)));
916 }
917 }
918
919 bool nb_candidate_needs_update(const struct nb_config *candidate)
920 {
921 if (candidate->version < running_config->version)
922 return true;
923
924 return false;
925 }
926
927 int nb_candidate_update(struct nb_config *candidate)
928 {
929 struct nb_config *updated_config;
930
931 updated_config = nb_config_dup(running_config);
932 if (nb_config_merge(updated_config, candidate, true) != NB_OK)
933 return NB_ERR;
934
935 nb_config_replace(candidate, updated_config, false);
936
937 return NB_OK;
938 }
939
940 /*
941 * Perform YANG syntactic and semantic validation.
942 *
943 * WARNING: lyd_validate() can change the configuration as part of the
944 * validation process.
945 */
946 int nb_candidate_validate_yang(struct nb_config *candidate, bool no_state,
947 char *errmsg, size_t errmsg_len)
948 {
949 if (lyd_validate_all(&candidate->dnode, ly_native_ctx,
950 no_state ? LYD_VALIDATE_NO_STATE
951 : LYD_VALIDATE_PRESENT,
952 NULL) != 0) {
953 yang_print_errors(ly_native_ctx, errmsg, errmsg_len);
954 return NB_ERR_VALIDATION;
955 }
956
957 return NB_OK;
958 }
959
960 /* Perform code-level validation using the northbound callbacks. */
961 int nb_candidate_validate_code(struct nb_context *context,
962 struct nb_config *candidate,
963 struct nb_config_cbs *changes, char *errmsg,
964 size_t errmsg_len)
965 {
966 struct nb_config_cb *cb;
967 struct lyd_node *root, *child;
968 int ret;
969
970 /* First validate the candidate as a whole. */
971 LY_LIST_FOR (candidate->dnode, root) {
972 LYD_TREE_DFS_BEGIN (root, child) {
973 struct nb_node *nb_node;
974
975 nb_node = child->schema->priv;
976 if (!nb_node || !nb_node->cbs.pre_validate)
977 goto next;
978
979 ret = nb_callback_pre_validate(context, nb_node, child,
980 errmsg, errmsg_len);
981 if (ret != NB_OK)
982 return NB_ERR_VALIDATION;
983
984 next:
985 LYD_TREE_DFS_END(root, child);
986 }
987 }
988
989 /* Now validate the configuration changes. */
990 RB_FOREACH (cb, nb_config_cbs, changes) {
991 struct nb_config_change *change = (struct nb_config_change *)cb;
992
993 ret = nb_callback_configuration(context, NB_EV_VALIDATE, change,
994 errmsg, errmsg_len);
995 if (ret != NB_OK)
996 return NB_ERR_VALIDATION;
997 }
998
999 return NB_OK;
1000 }
1001
1002 int nb_candidate_diff_and_validate_yang(struct nb_context *context,
1003 struct nb_config *candidate,
1004 struct nb_config_cbs *changes,
1005 char *errmsg, size_t errmsg_len)
1006 {
1007 if (nb_candidate_validate_yang(candidate, true, errmsg,
1008 sizeof(errmsg_len)) != NB_OK)
1009 return NB_ERR_VALIDATION;
1010
1011 RB_INIT(nb_config_cbs, changes);
1012 nb_config_diff(running_config, candidate, changes);
1013
1014 return NB_OK;
1015 }
1016
1017 int nb_candidate_validate(struct nb_context *context,
1018 struct nb_config *candidate, char *errmsg,
1019 size_t errmsg_len)
1020 {
1021 struct nb_config_cbs changes;
1022 int ret;
1023
1024 ret = nb_candidate_diff_and_validate_yang(context, candidate, &changes,
1025 errmsg, errmsg_len);
1026 if (ret != NB_OK)
1027 return ret;
1028
1029 ret = nb_candidate_validate_code(context, candidate, &changes, errmsg,
1030 errmsg_len);
1031 nb_config_diff_del_changes(&changes);
1032
1033 return ret;
1034 }
1035
1036 int nb_candidate_commit_prepare(struct nb_context context,
1037 struct nb_config *candidate,
1038 const char *comment,
1039 struct nb_transaction **transaction,
1040 bool skip_validate, bool ignore_zero_change,
1041 char *errmsg, size_t errmsg_len)
1042 {
1043 struct nb_config_cbs changes;
1044
1045 if (!skip_validate &&
1046 nb_candidate_validate_yang(candidate, true, errmsg, errmsg_len) !=
1047 NB_OK) {
1048 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
1049 "%s: failed to validate candidate configuration",
1050 __func__);
1051 return NB_ERR_VALIDATION;
1052 }
1053
1054 RB_INIT(nb_config_cbs, &changes);
1055 nb_config_diff(running_config, candidate, &changes);
1056 if (!ignore_zero_change && RB_EMPTY(nb_config_cbs, &changes)) {
1057 snprintf(
1058 errmsg, errmsg_len,
1059 "No changes to apply were found during preparation phase");
1060 return NB_ERR_NO_CHANGES;
1061 }
1062
1063 if (!skip_validate &&
1064 nb_candidate_validate_code(&context, candidate, &changes, errmsg,
1065 errmsg_len) != NB_OK) {
1066 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
1067 "%s: failed to validate candidate configuration",
1068 __func__);
1069 nb_config_diff_del_changes(&changes);
1070 return NB_ERR_VALIDATION;
1071 }
1072
1073 /*
1074 * Re-use an existing transaction if provided. Else allocate a new one.
1075 */
1076 if (!*transaction)
1077 *transaction = nb_transaction_new(context, candidate, &changes,
1078 comment, errmsg, errmsg_len);
1079 if (*transaction == NULL) {
1080 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
1081 "%s: failed to create transaction: %s", __func__,
1082 errmsg);
1083 nb_config_diff_del_changes(&changes);
1084 return NB_ERR_LOCKED;
1085 }
1086
1087 return nb_transaction_process(NB_EV_PREPARE, *transaction, errmsg,
1088 errmsg_len);
1089 }
1090
1091 void nb_candidate_commit_abort(struct nb_transaction *transaction, char *errmsg,
1092 size_t errmsg_len)
1093 {
1094 (void)nb_transaction_process(NB_EV_ABORT, transaction, errmsg,
1095 errmsg_len);
1096 nb_transaction_free(transaction);
1097 }
1098
1099 void nb_candidate_commit_apply(struct nb_transaction *transaction,
1100 bool save_transaction, uint32_t *transaction_id,
1101 char *errmsg, size_t errmsg_len)
1102 {
1103 (void)nb_transaction_process(NB_EV_APPLY, transaction, errmsg,
1104 errmsg_len);
1105 nb_transaction_apply_finish(transaction, errmsg, errmsg_len);
1106
1107 /* Replace running by candidate. */
1108 transaction->config->version++;
1109 nb_config_replace(running_config, transaction->config, true);
1110
1111 /* Record transaction. */
1112 if (save_transaction && nb_db_enabled
1113 && nb_db_transaction_save(transaction, transaction_id) != NB_OK)
1114 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED,
1115 "%s: failed to record transaction", __func__);
1116
1117 nb_transaction_free(transaction);
1118 }
1119
1120 int nb_candidate_commit(struct nb_context context, struct nb_config *candidate,
1121 bool save_transaction, const char *comment,
1122 uint32_t *transaction_id, char *errmsg,
1123 size_t errmsg_len)
1124 {
1125 struct nb_transaction *transaction = NULL;
1126 int ret;
1127
1128 ret = nb_candidate_commit_prepare(context, candidate, comment,
1129 &transaction, false, false, errmsg,
1130 errmsg_len);
1131 /*
1132 * Apply the changes if the preparation phase succeeded. Otherwise abort
1133 * the transaction.
1134 */
1135 if (ret == NB_OK)
1136 nb_candidate_commit_apply(transaction, save_transaction,
1137 transaction_id, errmsg, errmsg_len);
1138 else if (transaction != NULL)
1139 nb_candidate_commit_abort(transaction, errmsg, errmsg_len);
1140
1141 return ret;
1142 }
1143
1144 int nb_running_lock(enum nb_client client, const void *user)
1145 {
1146 int ret = -1;
1147
1148 frr_with_mutex (&running_config_mgmt_lock.mtx) {
1149 if (!running_config_mgmt_lock.locked) {
1150 running_config_mgmt_lock.locked = true;
1151 running_config_mgmt_lock.owner_client = client;
1152 running_config_mgmt_lock.owner_user = user;
1153 ret = 0;
1154 }
1155 }
1156
1157 return ret;
1158 }
1159
1160 int nb_running_unlock(enum nb_client client, const void *user)
1161 {
1162 int ret = -1;
1163
1164 frr_with_mutex (&running_config_mgmt_lock.mtx) {
1165 if (running_config_mgmt_lock.locked
1166 && running_config_mgmt_lock.owner_client == client
1167 && running_config_mgmt_lock.owner_user == user) {
1168 running_config_mgmt_lock.locked = false;
1169 running_config_mgmt_lock.owner_client = NB_CLIENT_NONE;
1170 running_config_mgmt_lock.owner_user = NULL;
1171 ret = 0;
1172 }
1173 }
1174
1175 return ret;
1176 }
1177
1178 int nb_running_lock_check(enum nb_client client, const void *user)
1179 {
1180 int ret = -1;
1181
1182 frr_with_mutex (&running_config_mgmt_lock.mtx) {
1183 if (!running_config_mgmt_lock.locked
1184 || (running_config_mgmt_lock.owner_client == client
1185 && running_config_mgmt_lock.owner_user == user))
1186 ret = 0;
1187 }
1188
1189 return ret;
1190 }
1191
1192 static void nb_log_config_callback(const enum nb_event event,
1193 enum nb_operation operation,
1194 const struct lyd_node *dnode)
1195 {
1196 const char *value;
1197 char xpath[XPATH_MAXLEN];
1198
1199 if (!DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL))
1200 return;
1201
1202 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1203 if (yang_snode_is_typeless_data(dnode->schema))
1204 value = "(none)";
1205 else
1206 value = yang_dnode_get_string(dnode, NULL);
1207
1208 zlog_debug(
1209 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
1210 nb_event_name(event), nb_operation_name(operation), xpath,
1211 value);
1212 }
1213
1214 static int nb_callback_create(struct nb_context *context,
1215 const struct nb_node *nb_node,
1216 enum nb_event event, const struct lyd_node *dnode,
1217 union nb_resource *resource, char *errmsg,
1218 size_t errmsg_len)
1219 {
1220 struct nb_cb_create_args args = {};
1221 bool unexpected_error = false;
1222 int ret;
1223
1224 assert(!CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS));
1225
1226 nb_log_config_callback(event, NB_OP_CREATE, dnode);
1227
1228 args.context = context;
1229 args.event = event;
1230 args.dnode = dnode;
1231 args.resource = resource;
1232 args.errmsg = errmsg;
1233 args.errmsg_len = errmsg_len;
1234 ret = nb_node->cbs.create(&args);
1235
1236 /* Detect and log unexpected errors. */
1237 switch (ret) {
1238 case NB_OK:
1239 case NB_ERR:
1240 break;
1241 case NB_ERR_VALIDATION:
1242 if (event != NB_EV_VALIDATE)
1243 unexpected_error = true;
1244 break;
1245 case NB_ERR_RESOURCE:
1246 if (event != NB_EV_PREPARE)
1247 unexpected_error = true;
1248 break;
1249 case NB_ERR_INCONSISTENCY:
1250 if (event == NB_EV_VALIDATE)
1251 unexpected_error = true;
1252 break;
1253 default:
1254 unexpected_error = true;
1255 break;
1256 }
1257 if (unexpected_error)
1258 DEBUGD(&nb_dbg_cbs_config,
1259 "northbound callback: unexpected return value: %s",
1260 nb_err_name(ret));
1261
1262 return ret;
1263 }
1264
1265 static int nb_callback_modify(struct nb_context *context,
1266 const struct nb_node *nb_node,
1267 enum nb_event event, const struct lyd_node *dnode,
1268 union nb_resource *resource, char *errmsg,
1269 size_t errmsg_len)
1270 {
1271 struct nb_cb_modify_args args = {};
1272 bool unexpected_error = false;
1273 int ret;
1274
1275 assert(!CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS));
1276
1277 nb_log_config_callback(event, NB_OP_MODIFY, dnode);
1278
1279 args.context = context;
1280 args.event = event;
1281 args.dnode = dnode;
1282 args.resource = resource;
1283 args.errmsg = errmsg;
1284 args.errmsg_len = errmsg_len;
1285 ret = nb_node->cbs.modify(&args);
1286
1287 /* Detect and log unexpected errors. */
1288 switch (ret) {
1289 case NB_OK:
1290 case NB_ERR:
1291 break;
1292 case NB_ERR_VALIDATION:
1293 if (event != NB_EV_VALIDATE)
1294 unexpected_error = true;
1295 break;
1296 case NB_ERR_RESOURCE:
1297 if (event != NB_EV_PREPARE)
1298 unexpected_error = true;
1299 break;
1300 case NB_ERR_INCONSISTENCY:
1301 if (event == NB_EV_VALIDATE)
1302 unexpected_error = true;
1303 break;
1304 default:
1305 unexpected_error = true;
1306 break;
1307 }
1308 if (unexpected_error)
1309 DEBUGD(&nb_dbg_cbs_config,
1310 "northbound callback: unexpected return value: %s",
1311 nb_err_name(ret));
1312
1313 return ret;
1314 }
1315
1316 static int nb_callback_destroy(struct nb_context *context,
1317 const struct nb_node *nb_node,
1318 enum nb_event event,
1319 const struct lyd_node *dnode, char *errmsg,
1320 size_t errmsg_len)
1321 {
1322 struct nb_cb_destroy_args args = {};
1323 bool unexpected_error = false;
1324 int ret;
1325
1326 assert(!CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS));
1327
1328 nb_log_config_callback(event, NB_OP_DESTROY, dnode);
1329
1330 args.context = context;
1331 args.event = event;
1332 args.dnode = dnode;
1333 args.errmsg = errmsg;
1334 args.errmsg_len = errmsg_len;
1335 ret = nb_node->cbs.destroy(&args);
1336
1337 /* Detect and log unexpected errors. */
1338 switch (ret) {
1339 case NB_OK:
1340 case NB_ERR:
1341 break;
1342 case NB_ERR_VALIDATION:
1343 if (event != NB_EV_VALIDATE)
1344 unexpected_error = true;
1345 break;
1346 case NB_ERR_INCONSISTENCY:
1347 if (event == NB_EV_VALIDATE)
1348 unexpected_error = true;
1349 break;
1350 default:
1351 unexpected_error = true;
1352 break;
1353 }
1354 if (unexpected_error)
1355 DEBUGD(&nb_dbg_cbs_config,
1356 "northbound callback: unexpected return value: %s",
1357 nb_err_name(ret));
1358
1359 return ret;
1360 }
1361
1362 static int nb_callback_move(struct nb_context *context,
1363 const struct nb_node *nb_node, enum nb_event event,
1364 const struct lyd_node *dnode, char *errmsg,
1365 size_t errmsg_len)
1366 {
1367 struct nb_cb_move_args args = {};
1368 bool unexpected_error = false;
1369 int ret;
1370
1371 assert(!CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS));
1372
1373 nb_log_config_callback(event, NB_OP_MOVE, dnode);
1374
1375 args.context = context;
1376 args.event = event;
1377 args.dnode = dnode;
1378 args.errmsg = errmsg;
1379 args.errmsg_len = errmsg_len;
1380 ret = nb_node->cbs.move(&args);
1381
1382 /* Detect and log unexpected errors. */
1383 switch (ret) {
1384 case NB_OK:
1385 case NB_ERR:
1386 break;
1387 case NB_ERR_VALIDATION:
1388 if (event != NB_EV_VALIDATE)
1389 unexpected_error = true;
1390 break;
1391 case NB_ERR_INCONSISTENCY:
1392 if (event == NB_EV_VALIDATE)
1393 unexpected_error = true;
1394 break;
1395 default:
1396 unexpected_error = true;
1397 break;
1398 }
1399 if (unexpected_error)
1400 DEBUGD(&nb_dbg_cbs_config,
1401 "northbound callback: unexpected return value: %s",
1402 nb_err_name(ret));
1403
1404 return ret;
1405 }
1406
1407 static int nb_callback_pre_validate(struct nb_context *context,
1408 const struct nb_node *nb_node,
1409 const struct lyd_node *dnode, char *errmsg,
1410 size_t errmsg_len)
1411 {
1412 struct nb_cb_pre_validate_args args = {};
1413 bool unexpected_error = false;
1414 int ret;
1415
1416 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
1417 return 0;
1418
1419 nb_log_config_callback(NB_EV_VALIDATE, NB_OP_PRE_VALIDATE, dnode);
1420
1421 args.dnode = dnode;
1422 args.errmsg = errmsg;
1423 args.errmsg_len = errmsg_len;
1424 ret = nb_node->cbs.pre_validate(&args);
1425
1426 /* Detect and log unexpected errors. */
1427 switch (ret) {
1428 case NB_OK:
1429 case NB_ERR_VALIDATION:
1430 break;
1431 default:
1432 unexpected_error = true;
1433 break;
1434 }
1435 if (unexpected_error)
1436 DEBUGD(&nb_dbg_cbs_config,
1437 "northbound callback: unexpected return value: %s",
1438 nb_err_name(ret));
1439
1440 return ret;
1441 }
1442
1443 static void nb_callback_apply_finish(struct nb_context *context,
1444 const struct nb_node *nb_node,
1445 const struct lyd_node *dnode, char *errmsg,
1446 size_t errmsg_len)
1447 {
1448 struct nb_cb_apply_finish_args args = {};
1449
1450 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
1451 return;
1452
1453 nb_log_config_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH, dnode);
1454
1455 args.context = context;
1456 args.dnode = dnode;
1457 args.errmsg = errmsg;
1458 args.errmsg_len = errmsg_len;
1459 nb_node->cbs.apply_finish(&args);
1460 }
1461
1462 struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node,
1463 const char *xpath,
1464 const void *list_entry)
1465 {
1466 struct nb_cb_get_elem_args args = {};
1467
1468 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
1469 return NULL;
1470
1471 DEBUGD(&nb_dbg_cbs_state,
1472 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
1473 xpath, list_entry);
1474
1475 args.xpath = xpath;
1476 args.list_entry = list_entry;
1477 return nb_node->cbs.get_elem(&args);
1478 }
1479
1480 const void *nb_callback_get_next(const struct nb_node *nb_node,
1481 const void *parent_list_entry,
1482 const void *list_entry)
1483 {
1484 struct nb_cb_get_next_args args = {};
1485
1486 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
1487 return NULL;
1488
1489 DEBUGD(&nb_dbg_cbs_state,
1490 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
1491 nb_node->xpath, parent_list_entry, list_entry);
1492
1493 args.parent_list_entry = parent_list_entry;
1494 args.list_entry = list_entry;
1495 return nb_node->cbs.get_next(&args);
1496 }
1497
1498 int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
1499 struct yang_list_keys *keys)
1500 {
1501 struct nb_cb_get_keys_args args = {};
1502
1503 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
1504 return 0;
1505
1506 DEBUGD(&nb_dbg_cbs_state,
1507 "northbound callback (get_keys): node [%s] list_entry [%p]",
1508 nb_node->xpath, list_entry);
1509
1510 args.list_entry = list_entry;
1511 args.keys = keys;
1512 return nb_node->cbs.get_keys(&args);
1513 }
1514
1515 const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
1516 const void *parent_list_entry,
1517 const struct yang_list_keys *keys)
1518 {
1519 struct nb_cb_lookup_entry_args args = {};
1520
1521 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
1522 return NULL;
1523
1524 DEBUGD(&nb_dbg_cbs_state,
1525 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
1526 nb_node->xpath, parent_list_entry);
1527
1528 args.parent_list_entry = parent_list_entry;
1529 args.keys = keys;
1530 return nb_node->cbs.lookup_entry(&args);
1531 }
1532
1533 int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
1534 const struct list *input, struct list *output, char *errmsg,
1535 size_t errmsg_len)
1536 {
1537 struct nb_cb_rpc_args args = {};
1538
1539 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
1540 return 0;
1541
1542 DEBUGD(&nb_dbg_cbs_rpc, "northbound RPC: %s", xpath);
1543
1544 args.xpath = xpath;
1545 args.input = input;
1546 args.output = output;
1547 args.errmsg = errmsg;
1548 args.errmsg_len = errmsg_len;
1549 return nb_node->cbs.rpc(&args);
1550 }
1551
1552 /*
1553 * Call the northbound configuration callback associated to a given
1554 * configuration change.
1555 */
1556 static int nb_callback_configuration(struct nb_context *context,
1557 const enum nb_event event,
1558 struct nb_config_change *change,
1559 char *errmsg, size_t errmsg_len)
1560 {
1561 enum nb_operation operation = change->cb.operation;
1562 char xpath[XPATH_MAXLEN];
1563 const struct nb_node *nb_node = change->cb.nb_node;
1564 const struct lyd_node *dnode = change->cb.dnode;
1565 union nb_resource *resource;
1566 int ret = NB_ERR;
1567
1568 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
1569 return NB_OK;
1570
1571 if (event == NB_EV_VALIDATE)
1572 resource = NULL;
1573 else
1574 resource = &change->resource;
1575
1576 switch (operation) {
1577 case NB_OP_CREATE:
1578 ret = nb_callback_create(context, nb_node, event, dnode,
1579 resource, errmsg, errmsg_len);
1580 break;
1581 case NB_OP_MODIFY:
1582 ret = nb_callback_modify(context, nb_node, event, dnode,
1583 resource, errmsg, errmsg_len);
1584 break;
1585 case NB_OP_DESTROY:
1586 ret = nb_callback_destroy(context, nb_node, event, dnode,
1587 errmsg, errmsg_len);
1588 break;
1589 case NB_OP_MOVE:
1590 ret = nb_callback_move(context, nb_node, event, dnode, errmsg,
1591 errmsg_len);
1592 break;
1593 case NB_OP_PRE_VALIDATE:
1594 case NB_OP_APPLY_FINISH:
1595 case NB_OP_GET_ELEM:
1596 case NB_OP_GET_NEXT:
1597 case NB_OP_GET_KEYS:
1598 case NB_OP_LOOKUP_ENTRY:
1599 case NB_OP_RPC:
1600 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1601 flog_err(EC_LIB_DEVELOPMENT,
1602 "%s: unknown operation (%u) [xpath %s]", __func__,
1603 operation, xpath);
1604 exit(1);
1605 }
1606
1607 if (ret != NB_OK) {
1608 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1609
1610 switch (event) {
1611 case NB_EV_VALIDATE:
1612 flog_warn(EC_LIB_NB_CB_CONFIG_VALIDATE,
1613 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1614 nb_err_name(ret), nb_event_name(event),
1615 nb_operation_name(operation), xpath,
1616 errmsg[0] ? " message: " : "", errmsg);
1617 break;
1618 case NB_EV_PREPARE:
1619 flog_warn(EC_LIB_NB_CB_CONFIG_PREPARE,
1620 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1621 nb_err_name(ret), nb_event_name(event),
1622 nb_operation_name(operation), xpath,
1623 errmsg[0] ? " message: " : "", errmsg);
1624 break;
1625 case NB_EV_ABORT:
1626 flog_warn(EC_LIB_NB_CB_CONFIG_ABORT,
1627 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1628 nb_err_name(ret), nb_event_name(event),
1629 nb_operation_name(operation), xpath,
1630 errmsg[0] ? " message: " : "", errmsg);
1631 break;
1632 case NB_EV_APPLY:
1633 flog_err(EC_LIB_NB_CB_CONFIG_APPLY,
1634 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]%s%s",
1635 nb_err_name(ret), nb_event_name(event),
1636 nb_operation_name(operation), xpath,
1637 errmsg[0] ? " message: " : "", errmsg);
1638 break;
1639 default:
1640 flog_err(EC_LIB_DEVELOPMENT,
1641 "%s: unknown event (%u) [xpath %s]", __func__,
1642 event, xpath);
1643 exit(1);
1644 }
1645 }
1646
1647 return ret;
1648 }
1649
1650 static struct nb_transaction *
1651 nb_transaction_new(struct nb_context context, struct nb_config *config,
1652 struct nb_config_cbs *changes, const char *comment,
1653 char *errmsg, size_t errmsg_len)
1654 {
1655 struct nb_transaction *transaction;
1656
1657 if (nb_running_lock_check(context.client, context.user)) {
1658 strlcpy(errmsg,
1659 "running configuration is locked by another client",
1660 errmsg_len);
1661 return NULL;
1662 }
1663
1664 if (transaction_in_progress) {
1665 strlcpy(errmsg,
1666 "there's already another transaction in progress",
1667 errmsg_len);
1668 return NULL;
1669 }
1670 transaction_in_progress = true;
1671
1672 transaction = XCALLOC(MTYPE_TMP, sizeof(*transaction));
1673 transaction->context = context;
1674 if (comment)
1675 strlcpy(transaction->comment, comment,
1676 sizeof(transaction->comment));
1677 transaction->config = config;
1678 transaction->changes = *changes;
1679
1680 return transaction;
1681 }
1682
1683 static void nb_transaction_free(struct nb_transaction *transaction)
1684 {
1685 nb_config_diff_del_changes(&transaction->changes);
1686 XFREE(MTYPE_TMP, transaction);
1687 transaction_in_progress = false;
1688 }
1689
1690 /* Process all configuration changes associated to a transaction. */
1691 static int nb_transaction_process(enum nb_event event,
1692 struct nb_transaction *transaction,
1693 char *errmsg, size_t errmsg_len)
1694 {
1695 struct nb_config_cb *cb;
1696
1697 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1698 struct nb_config_change *change = (struct nb_config_change *)cb;
1699 int ret;
1700
1701 /*
1702 * Only try to release resources that were allocated
1703 * successfully.
1704 */
1705 if (event == NB_EV_ABORT && !change->prepare_ok)
1706 break;
1707
1708 /* Call the appropriate callback. */
1709 ret = nb_callback_configuration(&transaction->context, event,
1710 change, errmsg, errmsg_len);
1711 switch (event) {
1712 case NB_EV_PREPARE:
1713 if (ret != NB_OK)
1714 return ret;
1715 change->prepare_ok = true;
1716 break;
1717 case NB_EV_ABORT:
1718 case NB_EV_APPLY:
1719 /*
1720 * At this point it's not possible to reject the
1721 * transaction anymore, so any failure here can lead to
1722 * inconsistencies and should be treated as a bug.
1723 * Operations prone to errors, like validations and
1724 * resource allocations, should be performed during the
1725 * 'prepare' phase.
1726 */
1727 break;
1728 case NB_EV_VALIDATE:
1729 break;
1730 }
1731 }
1732
1733 return NB_OK;
1734 }
1735
1736 static struct nb_config_cb *
1737 nb_apply_finish_cb_new(struct nb_config_cbs *cbs, const struct nb_node *nb_node,
1738 const struct lyd_node *dnode)
1739 {
1740 struct nb_config_cb *cb;
1741
1742 cb = XCALLOC(MTYPE_TMP, sizeof(*cb));
1743 cb->nb_node = nb_node;
1744 cb->dnode = dnode;
1745 RB_INSERT(nb_config_cbs, cbs, cb);
1746
1747 return cb;
1748 }
1749
1750 static struct nb_config_cb *
1751 nb_apply_finish_cb_find(struct nb_config_cbs *cbs,
1752 const struct nb_node *nb_node,
1753 const struct lyd_node *dnode)
1754 {
1755 struct nb_config_cb s;
1756
1757 s.seq = 0;
1758 s.nb_node = nb_node;
1759 s.dnode = dnode;
1760 return RB_FIND(nb_config_cbs, cbs, &s);
1761 }
1762
1763 /* Call the 'apply_finish' callbacks. */
1764 static void nb_transaction_apply_finish(struct nb_transaction *transaction,
1765 char *errmsg, size_t errmsg_len)
1766 {
1767 struct nb_config_cbs cbs;
1768 struct nb_config_cb *cb;
1769
1770 /* Initialize tree of 'apply_finish' callbacks. */
1771 RB_INIT(nb_config_cbs, &cbs);
1772
1773 /* Identify the 'apply_finish' callbacks that need to be called. */
1774 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1775 struct nb_config_change *change = (struct nb_config_change *)cb;
1776 const struct lyd_node *dnode = change->cb.dnode;
1777
1778 /*
1779 * Iterate up to the root of the data tree. When a node is being
1780 * deleted, skip its 'apply_finish' callback if one is defined
1781 * (the 'apply_finish' callbacks from the node ancestors should
1782 * be called though).
1783 */
1784 if (change->cb.operation == NB_OP_DESTROY) {
1785 char xpath[XPATH_MAXLEN];
1786
1787 dnode = lyd_parent(dnode);
1788 if (!dnode)
1789 break;
1790
1791 /*
1792 * The dnode from 'delete' callbacks point to elements
1793 * from the running configuration. Use yang_dnode_get()
1794 * to get the corresponding dnode from the candidate
1795 * configuration that is being committed.
1796 */
1797 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1798 dnode = yang_dnode_get(transaction->config->dnode,
1799 xpath);
1800 }
1801 while (dnode) {
1802 struct nb_node *nb_node;
1803
1804 nb_node = dnode->schema->priv;
1805 if (!nb_node || !nb_node->cbs.apply_finish)
1806 goto next;
1807
1808 /*
1809 * Don't call the callback more than once for the same
1810 * data node.
1811 */
1812 if (nb_apply_finish_cb_find(&cbs, nb_node, dnode))
1813 goto next;
1814
1815 nb_apply_finish_cb_new(&cbs, nb_node, dnode);
1816
1817 next:
1818 dnode = lyd_parent(dnode);
1819 }
1820 }
1821
1822 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1823 RB_FOREACH (cb, nb_config_cbs, &cbs)
1824 nb_callback_apply_finish(&transaction->context, cb->nb_node,
1825 cb->dnode, errmsg, errmsg_len);
1826
1827 /* Release memory. */
1828 while (!RB_EMPTY(nb_config_cbs, &cbs)) {
1829 cb = RB_ROOT(nb_config_cbs, &cbs);
1830 RB_REMOVE(nb_config_cbs, &cbs, cb);
1831 XFREE(MTYPE_TMP, cb);
1832 }
1833 }
1834
1835 static int nb_oper_data_iter_children(const struct lysc_node *snode,
1836 const char *xpath, const void *list_entry,
1837 const struct yang_list_keys *list_keys,
1838 struct yang_translator *translator,
1839 bool first, uint32_t flags,
1840 nb_oper_data_cb cb, void *arg)
1841 {
1842 const struct lysc_node *child;
1843
1844 LY_LIST_FOR (lysc_node_child(snode), child) {
1845 int ret;
1846
1847 ret = nb_oper_data_iter_node(child, xpath, list_entry,
1848 list_keys, translator, false,
1849 flags, cb, arg);
1850 if (ret != NB_OK)
1851 return ret;
1852 }
1853
1854 return NB_OK;
1855 }
1856
1857 static int nb_oper_data_iter_leaf(const struct nb_node *nb_node,
1858 const char *xpath, const void *list_entry,
1859 const struct yang_list_keys *list_keys,
1860 struct yang_translator *translator,
1861 uint32_t flags, nb_oper_data_cb cb, void *arg)
1862 {
1863 struct yang_data *data;
1864
1865 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1866 return NB_OK;
1867
1868 /* Ignore list keys. */
1869 if (lysc_is_key(nb_node->snode))
1870 return NB_OK;
1871
1872 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1873 if (data == NULL)
1874 /* Leaf of type "empty" is not present. */
1875 return NB_OK;
1876
1877 return (*cb)(nb_node->snode, translator, data, arg);
1878 }
1879
1880 static int nb_oper_data_iter_container(const struct nb_node *nb_node,
1881 const char *xpath,
1882 const void *list_entry,
1883 const struct yang_list_keys *list_keys,
1884 struct yang_translator *translator,
1885 uint32_t flags, nb_oper_data_cb cb,
1886 void *arg)
1887 {
1888 const struct lysc_node *snode = nb_node->snode;
1889
1890 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1891 return NB_OK;
1892
1893 /* Read-only presence containers. */
1894 if (nb_node->cbs.get_elem) {
1895 struct yang_data *data;
1896 int ret;
1897
1898 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1899 if (data == NULL)
1900 /* Presence container is not present. */
1901 return NB_OK;
1902
1903 ret = (*cb)(snode, translator, data, arg);
1904 if (ret != NB_OK)
1905 return ret;
1906 }
1907
1908 /* Read-write presence containers. */
1909 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W)) {
1910 struct lysc_node_container *scontainer;
1911
1912 scontainer = (struct lysc_node_container *)snode;
1913 if (CHECK_FLAG(scontainer->flags, LYS_PRESENCE)
1914 && !yang_dnode_get(running_config->dnode, xpath))
1915 return NB_OK;
1916 }
1917
1918 /* Iterate over the child nodes. */
1919 return nb_oper_data_iter_children(snode, xpath, list_entry, list_keys,
1920 translator, false, flags, cb, arg);
1921 }
1922
1923 static int
1924 nb_oper_data_iter_leaflist(const struct nb_node *nb_node, const char *xpath,
1925 const void *parent_list_entry,
1926 const struct yang_list_keys *parent_list_keys,
1927 struct yang_translator *translator, uint32_t flags,
1928 nb_oper_data_cb cb, void *arg)
1929 {
1930 const void *list_entry = NULL;
1931
1932 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1933 return NB_OK;
1934
1935 do {
1936 struct yang_data *data;
1937 int ret;
1938
1939 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1940 list_entry);
1941 if (!list_entry)
1942 /* End of the list. */
1943 break;
1944
1945 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1946 if (data == NULL)
1947 continue;
1948
1949 ret = (*cb)(nb_node->snode, translator, data, arg);
1950 if (ret != NB_OK)
1951 return ret;
1952 } while (list_entry);
1953
1954 return NB_OK;
1955 }
1956
1957 static int nb_oper_data_iter_list(const struct nb_node *nb_node,
1958 const char *xpath_list,
1959 const void *parent_list_entry,
1960 const struct yang_list_keys *parent_list_keys,
1961 struct yang_translator *translator,
1962 uint32_t flags, nb_oper_data_cb cb, void *arg)
1963 {
1964 const struct lysc_node *snode = nb_node->snode;
1965 const void *list_entry = NULL;
1966 uint32_t position = 1;
1967
1968 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1969 return NB_OK;
1970
1971 /* Iterate over all list entries. */
1972 do {
1973 const struct lysc_node_leaf *skey;
1974 struct yang_list_keys list_keys = {};
1975 char xpath[XPATH_MAXLEN * 2];
1976 int ret;
1977
1978 /* Obtain list entry. */
1979 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1980 list_entry);
1981 if (!list_entry)
1982 /* End of the list. */
1983 break;
1984
1985 if (!CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST)) {
1986 /* Obtain the list entry keys. */
1987 if (nb_callback_get_keys(nb_node, list_entry,
1988 &list_keys)
1989 != NB_OK) {
1990 flog_warn(EC_LIB_NB_CB_STATE,
1991 "%s: failed to get list keys",
1992 __func__);
1993 return NB_ERR;
1994 }
1995
1996 /* Build XPath of the list entry. */
1997 strlcpy(xpath, xpath_list, sizeof(xpath));
1998 unsigned int i = 0;
1999 LY_FOR_KEYS (snode, skey) {
2000 assert(i < list_keys.num);
2001 snprintf(xpath + strlen(xpath),
2002 sizeof(xpath) - strlen(xpath),
2003 "[%s='%s']", skey->name,
2004 list_keys.key[i]);
2005 i++;
2006 }
2007 assert(i == list_keys.num);
2008 } else {
2009 /*
2010 * Keyless list - build XPath using a positional index.
2011 */
2012 snprintf(xpath, sizeof(xpath), "%s[%u]", xpath_list,
2013 position);
2014 position++;
2015 }
2016
2017 /* Iterate over the child nodes. */
2018 ret = nb_oper_data_iter_children(
2019 nb_node->snode, xpath, list_entry, &list_keys,
2020 translator, false, flags, cb, arg);
2021 if (ret != NB_OK)
2022 return ret;
2023 } while (list_entry);
2024
2025 return NB_OK;
2026 }
2027
2028 static int nb_oper_data_iter_node(const struct lysc_node *snode,
2029 const char *xpath_parent,
2030 const void *list_entry,
2031 const struct yang_list_keys *list_keys,
2032 struct yang_translator *translator,
2033 bool first, uint32_t flags,
2034 nb_oper_data_cb cb, void *arg)
2035 {
2036 struct nb_node *nb_node;
2037 char xpath[XPATH_MAXLEN];
2038 int ret = NB_OK;
2039
2040 if (!first && CHECK_FLAG(flags, NB_OPER_DATA_ITER_NORECURSE)
2041 && CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST))
2042 return NB_OK;
2043
2044 /* Update XPath. */
2045 strlcpy(xpath, xpath_parent, sizeof(xpath));
2046 if (!first && snode->nodetype != LYS_USES) {
2047 struct lysc_node *parent;
2048
2049 /* Get the real parent. */
2050 parent = snode->parent;
2051
2052 /*
2053 * When necessary, include the namespace of the augmenting
2054 * module.
2055 */
2056 if (parent && parent->module != snode->module)
2057 snprintf(xpath + strlen(xpath),
2058 sizeof(xpath) - strlen(xpath), "/%s:%s",
2059 snode->module->name, snode->name);
2060 else
2061 snprintf(xpath + strlen(xpath),
2062 sizeof(xpath) - strlen(xpath), "/%s",
2063 snode->name);
2064 }
2065
2066 nb_node = snode->priv;
2067 switch (snode->nodetype) {
2068 case LYS_CONTAINER:
2069 ret = nb_oper_data_iter_container(nb_node, xpath, list_entry,
2070 list_keys, translator, flags,
2071 cb, arg);
2072 break;
2073 case LYS_LEAF:
2074 ret = nb_oper_data_iter_leaf(nb_node, xpath, list_entry,
2075 list_keys, translator, flags, cb,
2076 arg);
2077 break;
2078 case LYS_LEAFLIST:
2079 ret = nb_oper_data_iter_leaflist(nb_node, xpath, list_entry,
2080 list_keys, translator, flags,
2081 cb, arg);
2082 break;
2083 case LYS_LIST:
2084 ret = nb_oper_data_iter_list(nb_node, xpath, list_entry,
2085 list_keys, translator, flags, cb,
2086 arg);
2087 break;
2088 case LYS_USES:
2089 ret = nb_oper_data_iter_children(snode, xpath, list_entry,
2090 list_keys, translator, false,
2091 flags, cb, arg);
2092 break;
2093 default:
2094 break;
2095 }
2096
2097 return ret;
2098 }
2099
2100 int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
2101 uint32_t flags, nb_oper_data_cb cb, void *arg)
2102 {
2103 struct nb_node *nb_node;
2104 const void *list_entry = NULL;
2105 struct yang_list_keys list_keys;
2106 struct list *list_dnodes;
2107 struct lyd_node *dnode, *dn;
2108 struct listnode *ln;
2109 int ret;
2110
2111 nb_node = nb_node_find(xpath);
2112 if (!nb_node) {
2113 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
2114 "%s: unknown data path: %s", __func__, xpath);
2115 return NB_ERR;
2116 }
2117
2118 /* For now this function works only with containers and lists. */
2119 if (!CHECK_FLAG(nb_node->snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
2120 flog_warn(
2121 EC_LIB_NB_OPERATIONAL_DATA,
2122 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
2123 __func__, xpath);
2124 return NB_ERR;
2125 }
2126
2127 /*
2128 * Create a data tree from the XPath so that we can parse the keys of
2129 * all YANG lists (if any).
2130 */
2131
2132 LY_ERR err = lyd_new_path2(NULL, ly_native_ctx, xpath, NULL, 0, 0,
2133 LYD_NEW_PATH_UPDATE, NULL, &dnode);
2134 if (err || !dnode) {
2135 const char *errmsg =
2136 err ? ly_errmsg(ly_native_ctx) : "node not found";
2137 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed %s",
2138 __func__, errmsg);
2139 return NB_ERR;
2140 }
2141
2142 /*
2143 * Create a linked list to sort the data nodes starting from the root.
2144 */
2145 list_dnodes = list_new();
2146 for (dn = dnode; dn; dn = lyd_parent(dn)) {
2147 if (dn->schema->nodetype != LYS_LIST || !lyd_child(dn))
2148 continue;
2149 listnode_add_head(list_dnodes, dn);
2150 }
2151 /*
2152 * Use the northbound callbacks to find list entry pointer corresponding
2153 * to the given XPath.
2154 */
2155 for (ALL_LIST_ELEMENTS_RO(list_dnodes, ln, dn)) {
2156 struct lyd_node *child;
2157 struct nb_node *nn;
2158 unsigned int n = 0;
2159
2160 /* Obtain the list entry keys. */
2161 memset(&list_keys, 0, sizeof(list_keys));
2162 LY_LIST_FOR (lyd_child(dn), child) {
2163 if (!lysc_is_key(child->schema))
2164 break;
2165 strlcpy(list_keys.key[n],
2166 yang_dnode_get_string(child, NULL),
2167 sizeof(list_keys.key[n]));
2168 n++;
2169 }
2170 list_keys.num = n;
2171 if (list_keys.num != yang_snode_num_keys(dn->schema)) {
2172 list_delete(&list_dnodes);
2173 yang_dnode_free(dnode);
2174 return NB_ERR_NOT_FOUND;
2175 }
2176
2177 /* Find the list entry pointer. */
2178 nn = dn->schema->priv;
2179 if (!nn->cbs.lookup_entry) {
2180 flog_warn(
2181 EC_LIB_NB_OPERATIONAL_DATA,
2182 "%s: data path doesn't support iteration over operational data: %s",
2183 __func__, xpath);
2184 list_delete(&list_dnodes);
2185 yang_dnode_free(dnode);
2186 return NB_ERR;
2187 }
2188
2189 list_entry =
2190 nb_callback_lookup_entry(nn, list_entry, &list_keys);
2191 if (list_entry == NULL) {
2192 list_delete(&list_dnodes);
2193 yang_dnode_free(dnode);
2194 return NB_ERR_NOT_FOUND;
2195 }
2196 }
2197
2198 /* If a list entry was given, iterate over that list entry only. */
2199 if (dnode->schema->nodetype == LYS_LIST && lyd_child(dnode))
2200 ret = nb_oper_data_iter_children(
2201 nb_node->snode, xpath, list_entry, &list_keys,
2202 translator, true, flags, cb, arg);
2203 else
2204 ret = nb_oper_data_iter_node(nb_node->snode, xpath, list_entry,
2205 &list_keys, translator, true,
2206 flags, cb, arg);
2207
2208 list_delete(&list_dnodes);
2209 yang_dnode_free(dnode);
2210
2211 return ret;
2212 }
2213
2214 bool nb_operation_is_valid(enum nb_operation operation,
2215 const struct lysc_node *snode)
2216 {
2217 struct nb_node *nb_node = snode->priv;
2218 struct lysc_node_container *scontainer;
2219 struct lysc_node_leaf *sleaf;
2220
2221 switch (operation) {
2222 case NB_OP_CREATE:
2223 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
2224 return false;
2225
2226 switch (snode->nodetype) {
2227 case LYS_LEAF:
2228 sleaf = (struct lysc_node_leaf *)snode;
2229 if (sleaf->type->basetype != LY_TYPE_EMPTY)
2230 return false;
2231 break;
2232 case LYS_CONTAINER:
2233 scontainer = (struct lysc_node_container *)snode;
2234 if (!CHECK_FLAG(scontainer->flags, LYS_PRESENCE))
2235 return false;
2236 break;
2237 case LYS_LIST:
2238 case LYS_LEAFLIST:
2239 break;
2240 default:
2241 return false;
2242 }
2243 return true;
2244 case NB_OP_MODIFY:
2245 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
2246 return false;
2247
2248 switch (snode->nodetype) {
2249 case LYS_LEAF:
2250 sleaf = (struct lysc_node_leaf *)snode;
2251 if (sleaf->type->basetype == LY_TYPE_EMPTY)
2252 return false;
2253
2254 /* List keys can't be modified. */
2255 if (lysc_is_key(sleaf))
2256 return false;
2257 break;
2258 default:
2259 return false;
2260 }
2261 return true;
2262 case NB_OP_DESTROY:
2263 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
2264 return false;
2265
2266 switch (snode->nodetype) {
2267 case LYS_LEAF:
2268 sleaf = (struct lysc_node_leaf *)snode;
2269
2270 /* List keys can't be deleted. */
2271 if (lysc_is_key(sleaf))
2272 return false;
2273
2274 /*
2275 * Only optional leafs can be deleted, or leafs whose
2276 * parent is a case statement.
2277 */
2278 if (snode->parent->nodetype == LYS_CASE)
2279 return true;
2280 if (sleaf->when)
2281 return true;
2282 if (CHECK_FLAG(sleaf->flags, LYS_MAND_TRUE)
2283 || sleaf->dflt)
2284 return false;
2285 break;
2286 case LYS_CONTAINER:
2287 scontainer = (struct lysc_node_container *)snode;
2288 if (!CHECK_FLAG(scontainer->flags, LYS_PRESENCE))
2289 return false;
2290 break;
2291 case LYS_LIST:
2292 case LYS_LEAFLIST:
2293 break;
2294 default:
2295 return false;
2296 }
2297 return true;
2298 case NB_OP_MOVE:
2299 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
2300 return false;
2301
2302 switch (snode->nodetype) {
2303 case LYS_LIST:
2304 case LYS_LEAFLIST:
2305 if (!CHECK_FLAG(snode->flags, LYS_ORDBY_USER))
2306 return false;
2307 break;
2308 default:
2309 return false;
2310 }
2311 return true;
2312 case NB_OP_PRE_VALIDATE:
2313 case NB_OP_APPLY_FINISH:
2314 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
2315 return false;
2316 return true;
2317 case NB_OP_GET_ELEM:
2318 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_R))
2319 return false;
2320
2321 switch (snode->nodetype) {
2322 case LYS_LEAF:
2323 case LYS_LEAFLIST:
2324 break;
2325 case LYS_CONTAINER:
2326 scontainer = (struct lysc_node_container *)snode;
2327 if (!CHECK_FLAG(scontainer->flags, LYS_PRESENCE))
2328 return false;
2329 break;
2330 default:
2331 return false;
2332 }
2333 return true;
2334 case NB_OP_GET_NEXT:
2335 switch (snode->nodetype) {
2336 case LYS_LIST:
2337 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
2338 return false;
2339 break;
2340 case LYS_LEAFLIST:
2341 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
2342 return false;
2343 break;
2344 default:
2345 return false;
2346 }
2347 return true;
2348 case NB_OP_GET_KEYS:
2349 case NB_OP_LOOKUP_ENTRY:
2350 switch (snode->nodetype) {
2351 case LYS_LIST:
2352 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
2353 return false;
2354 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST))
2355 return false;
2356 break;
2357 default:
2358 return false;
2359 }
2360 return true;
2361 case NB_OP_RPC:
2362 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W | LYS_CONFIG_R))
2363 return false;
2364
2365 switch (snode->nodetype) {
2366 case LYS_RPC:
2367 case LYS_ACTION:
2368 break;
2369 default:
2370 return false;
2371 }
2372 return true;
2373 default:
2374 return false;
2375 }
2376 }
2377
2378 DEFINE_HOOK(nb_notification_send, (const char *xpath, struct list *arguments),
2379 (xpath, arguments));
2380
2381 int nb_notification_send(const char *xpath, struct list *arguments)
2382 {
2383 int ret;
2384
2385 DEBUGD(&nb_dbg_notif, "northbound notification: %s", xpath);
2386
2387 ret = hook_call(nb_notification_send, xpath, arguments);
2388 if (arguments)
2389 list_delete(&arguments);
2390
2391 return ret;
2392 }
2393
2394 /* Running configuration user pointers management. */
2395 struct nb_config_entry {
2396 char xpath[XPATH_MAXLEN];
2397 void *entry;
2398 };
2399
2400 static bool running_config_entry_cmp(const void *value1, const void *value2)
2401 {
2402 const struct nb_config_entry *c1 = value1;
2403 const struct nb_config_entry *c2 = value2;
2404
2405 return strmatch(c1->xpath, c2->xpath);
2406 }
2407
2408 static unsigned int running_config_entry_key_make(const void *value)
2409 {
2410 return string_hash_make(value);
2411 }
2412
2413 static void *running_config_entry_alloc(void *p)
2414 {
2415 struct nb_config_entry *new, *key = p;
2416
2417 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY, sizeof(*new));
2418 strlcpy(new->xpath, key->xpath, sizeof(new->xpath));
2419
2420 return new;
2421 }
2422
2423 static void running_config_entry_free(void *arg)
2424 {
2425 XFREE(MTYPE_NB_CONFIG_ENTRY, arg);
2426 }
2427
2428 void nb_running_set_entry(const struct lyd_node *dnode, void *entry)
2429 {
2430 struct nb_config_entry *config, s;
2431
2432 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2433 config = hash_get(running_config_entries, &s,
2434 running_config_entry_alloc);
2435 config->entry = entry;
2436 }
2437
2438 void nb_running_move_tree(const char *xpath_from, const char *xpath_to)
2439 {
2440 struct nb_config_entry *entry;
2441 struct list *entries = hash_to_list(running_config_entries);
2442 struct listnode *ln;
2443
2444 for (ALL_LIST_ELEMENTS_RO(entries, ln, entry)) {
2445 if (!frrstr_startswith(entry->xpath, xpath_from))
2446 continue;
2447
2448 hash_release(running_config_entries, entry);
2449
2450 char *newpath =
2451 frrstr_replace(entry->xpath, xpath_from, xpath_to);
2452 strlcpy(entry->xpath, newpath, sizeof(entry->xpath));
2453 XFREE(MTYPE_TMP, newpath);
2454
2455 (void)hash_get(running_config_entries, entry,
2456 hash_alloc_intern);
2457 }
2458
2459 list_delete(&entries);
2460 }
2461
2462 static void *nb_running_unset_entry_helper(const struct lyd_node *dnode)
2463 {
2464 struct nb_config_entry *config, s;
2465 struct lyd_node *child;
2466 void *entry = NULL;
2467
2468 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2469 config = hash_release(running_config_entries, &s);
2470 if (config) {
2471 entry = config->entry;
2472 running_config_entry_free(config);
2473 }
2474
2475 /* Unset user pointers from the child nodes. */
2476 if (CHECK_FLAG(dnode->schema->nodetype, LYS_LIST | LYS_CONTAINER)) {
2477 LY_LIST_FOR (lyd_child(dnode), child) {
2478 (void)nb_running_unset_entry_helper(child);
2479 }
2480 }
2481
2482 return entry;
2483 }
2484
2485 void *nb_running_unset_entry(const struct lyd_node *dnode)
2486 {
2487 void *entry;
2488
2489 entry = nb_running_unset_entry_helper(dnode);
2490 assert(entry);
2491
2492 return entry;
2493 }
2494
2495 static void *nb_running_get_entry_worker(const struct lyd_node *dnode,
2496 const char *xpath,
2497 bool abort_if_not_found,
2498 bool rec_search)
2499 {
2500 const struct lyd_node *orig_dnode = dnode;
2501 char xpath_buf[XPATH_MAXLEN];
2502 bool rec_flag = true;
2503
2504 assert(dnode || xpath);
2505
2506 if (!dnode)
2507 dnode = yang_dnode_get(running_config->dnode, xpath);
2508
2509 while (rec_flag && dnode) {
2510 struct nb_config_entry *config, s;
2511
2512 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2513 config = hash_lookup(running_config_entries, &s);
2514 if (config)
2515 return config->entry;
2516
2517 rec_flag = rec_search;
2518
2519 dnode = lyd_parent(dnode);
2520 }
2521
2522 if (!abort_if_not_found)
2523 return NULL;
2524
2525 yang_dnode_get_path(orig_dnode, xpath_buf, sizeof(xpath_buf));
2526 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
2527 "%s: failed to find entry [xpath %s]", __func__, xpath_buf);
2528 zlog_backtrace(LOG_ERR);
2529 abort();
2530 }
2531
2532 void *nb_running_get_entry(const struct lyd_node *dnode, const char *xpath,
2533 bool abort_if_not_found)
2534 {
2535 return nb_running_get_entry_worker(dnode, xpath, abort_if_not_found,
2536 true);
2537 }
2538
2539 void *nb_running_get_entry_non_rec(const struct lyd_node *dnode,
2540 const char *xpath, bool abort_if_not_found)
2541 {
2542 return nb_running_get_entry_worker(dnode, xpath, abort_if_not_found,
2543 false);
2544 }
2545
2546 /* Logging functions. */
2547 const char *nb_event_name(enum nb_event event)
2548 {
2549 switch (event) {
2550 case NB_EV_VALIDATE:
2551 return "validate";
2552 case NB_EV_PREPARE:
2553 return "prepare";
2554 case NB_EV_ABORT:
2555 return "abort";
2556 case NB_EV_APPLY:
2557 return "apply";
2558 }
2559
2560 assert(!"Reached end of function we should never hit");
2561 }
2562
2563 const char *nb_operation_name(enum nb_operation operation)
2564 {
2565 switch (operation) {
2566 case NB_OP_CREATE:
2567 return "create";
2568 case NB_OP_MODIFY:
2569 return "modify";
2570 case NB_OP_DESTROY:
2571 return "destroy";
2572 case NB_OP_MOVE:
2573 return "move";
2574 case NB_OP_PRE_VALIDATE:
2575 return "pre_validate";
2576 case NB_OP_APPLY_FINISH:
2577 return "apply_finish";
2578 case NB_OP_GET_ELEM:
2579 return "get_elem";
2580 case NB_OP_GET_NEXT:
2581 return "get_next";
2582 case NB_OP_GET_KEYS:
2583 return "get_keys";
2584 case NB_OP_LOOKUP_ENTRY:
2585 return "lookup_entry";
2586 case NB_OP_RPC:
2587 return "rpc";
2588 }
2589
2590 assert(!"Reached end of function we should never hit");
2591 }
2592
2593 const char *nb_err_name(enum nb_error error)
2594 {
2595 switch (error) {
2596 case NB_OK:
2597 return "ok";
2598 case NB_ERR:
2599 return "generic error";
2600 case NB_ERR_NO_CHANGES:
2601 return "no changes";
2602 case NB_ERR_NOT_FOUND:
2603 return "element not found";
2604 case NB_ERR_LOCKED:
2605 return "resource is locked";
2606 case NB_ERR_VALIDATION:
2607 return "validation";
2608 case NB_ERR_RESOURCE:
2609 return "failed to allocate resource";
2610 case NB_ERR_INCONSISTENCY:
2611 return "internal inconsistency";
2612 }
2613
2614 assert(!"Reached end of function we should never hit");
2615 }
2616
2617 const char *nb_client_name(enum nb_client client)
2618 {
2619 switch (client) {
2620 case NB_CLIENT_CLI:
2621 return "CLI";
2622 case NB_CLIENT_CONFD:
2623 return "ConfD";
2624 case NB_CLIENT_SYSREPO:
2625 return "Sysrepo";
2626 case NB_CLIENT_GRPC:
2627 return "gRPC";
2628 case NB_CLIENT_PCEP:
2629 return "Pcep";
2630 case NB_CLIENT_MGMTD_SERVER:
2631 return "MGMTD Server";
2632 case NB_CLIENT_MGMTD_BE:
2633 return "MGMT Backend";
2634 case NB_CLIENT_NONE:
2635 return "None";
2636 }
2637
2638 assert(!"Reached end of function we should never hit");
2639 }
2640
2641 static void nb_load_callbacks(const struct frr_yang_module_info *module)
2642 {
2643
2644 if (module->ignore_cbs)
2645 return;
2646
2647 for (size_t i = 0; module->nodes[i].xpath; i++) {
2648 struct nb_node *nb_node;
2649 uint32_t priority;
2650
2651 if (i > YANG_MODULE_MAX_NODES) {
2652 zlog_err(
2653 "%s: %s.yang has more than %u nodes. Please increase YANG_MODULE_MAX_NODES to fix this problem.",
2654 __func__, module->name, YANG_MODULE_MAX_NODES);
2655 exit(1);
2656 }
2657
2658 nb_node = nb_node_find(module->nodes[i].xpath);
2659 if (!nb_node) {
2660 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
2661 "%s: unknown data path: %s", __func__,
2662 module->nodes[i].xpath);
2663 continue;
2664 }
2665
2666 nb_node->cbs = module->nodes[i].cbs;
2667 priority = module->nodes[i].priority;
2668 if (priority != 0)
2669 nb_node->priority = priority;
2670 }
2671 }
2672
2673 void nb_validate_callbacks(void)
2674 {
2675 unsigned int errors = 0;
2676
2677 yang_snodes_iterate(NULL, nb_node_validate, 0, &errors);
2678 if (errors > 0) {
2679 flog_err(
2680 EC_LIB_NB_CBS_VALIDATION,
2681 "%s: failed to validate northbound callbacks: %u error(s)",
2682 __func__, errors);
2683 exit(1);
2684 }
2685 }
2686
2687
2688 void nb_init(struct event_loop *tm,
2689 const struct frr_yang_module_info *const modules[],
2690 size_t nmodules, bool db_enabled)
2691 {
2692 struct yang_module *loaded[nmodules], **loadedp = loaded;
2693 bool explicit_compile;
2694
2695 /*
2696 * Currently using this explicit compile feature in libyang2 leads to
2697 * incorrect behavior in FRR. The functionality suppresses the compiling
2698 * of modules until they have all been loaded into the context. This
2699 * avoids multiple recompiles of the same modules as they are
2700 * imported/augmented etc.
2701 */
2702 explicit_compile = false;
2703
2704 nb_db_enabled = db_enabled;
2705
2706 yang_init(true, explicit_compile);
2707
2708 /* Load YANG modules and their corresponding northbound callbacks. */
2709 for (size_t i = 0; i < nmodules; i++) {
2710 DEBUGD(&nb_dbg_events, "northbound: loading %s.yang",
2711 modules[i]->name);
2712 *loadedp++ = yang_module_load(modules[i]->name);
2713 }
2714
2715 if (explicit_compile)
2716 yang_init_loading_complete();
2717
2718 /* Initialize the compiled nodes with northbound data */
2719 for (size_t i = 0; i < nmodules; i++) {
2720 yang_snodes_iterate(loaded[i]->info, nb_node_new_cb, 0,
2721 (void *)modules[i]);
2722 nb_load_callbacks(modules[i]);
2723 }
2724
2725 /* Validate northbound callbacks. */
2726 nb_validate_callbacks();
2727
2728 /* Create an empty running configuration. */
2729 running_config = nb_config_new(NULL);
2730 running_config_entries = hash_create(running_config_entry_key_make,
2731 running_config_entry_cmp,
2732 "Running Configuration Entries");
2733 pthread_mutex_init(&running_config_mgmt_lock.mtx, NULL);
2734
2735 /* Initialize the northbound CLI. */
2736 nb_cli_init(tm);
2737 }
2738
2739 void nb_terminate(void)
2740 {
2741 /* Terminate the northbound CLI. */
2742 nb_cli_terminate();
2743
2744 /* Delete all nb_node's from all YANG modules. */
2745 nb_nodes_delete();
2746
2747 /* Delete the running configuration. */
2748 hash_clean_and_free(&running_config_entries, running_config_entry_free);
2749 nb_config_free(running_config);
2750 pthread_mutex_destroy(&running_config_mgmt_lock.mtx);
2751 }