]> git.proxmox.com Git - mirror_frr.git/blame - lib/northbound.c
lib: fix corner case when iterating over YANG-modeled operational data
[mirror_frr.git] / lib / northbound.c
CommitLineData
1c2facd1
RW
1/*
2 * Copyright (C) 2018 NetDEF, Inc.
3 * Renato Westphal
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20#include <zebra.h>
21
22#include "libfrr.h"
23#include "log.h"
24#include "lib_errors.h"
ccd43ada 25#include "hash.h"
1c2facd1 26#include "command.h"
9eb2c0a1 27#include "debug.h"
1c2facd1 28#include "db.h"
00dffa8c 29#include "frr_pthread.h"
1c2facd1
RW
30#include "northbound.h"
31#include "northbound_cli.h"
32#include "northbound_db.h"
33
34DEFINE_MTYPE_STATIC(LIB, NB_NODE, "Northbound Node")
35DEFINE_MTYPE_STATIC(LIB, NB_CONFIG, "Northbound Configuration")
ccd43ada 36DEFINE_MTYPE_STATIC(LIB, NB_CONFIG_ENTRY, "Northbound Configuration Entry")
1c2facd1
RW
37
38/* Running configuration - shouldn't be modified directly. */
39struct nb_config *running_config;
40
ccd43ada
RW
41/* Hash table of user pointers associated with configuration entries. */
42static struct hash *running_config_entries;
43
364ad673
RW
44/* Management lock for the running configuration. */
45static struct {
46 /* Mutex protecting this structure. */
47 pthread_mutex_t mtx;
48
49 /* Actual lock. */
50 bool locked;
51
52 /* Northbound client who owns this lock. */
53 enum nb_client owner_client;
54
55 /* Northbound user who owns this lock. */
56 const void *owner_user;
57} running_config_mgmt_lock;
58
1c2facd1
RW
59/*
60 * Global lock used to prevent multiple configuration transactions from
61 * happening concurrently.
62 */
63static bool transaction_in_progress;
64
9eb2c0a1 65static int nb_callback_configuration(const enum nb_event event,
1c2facd1 66 struct nb_config_change *change);
34224f0c
RW
67static void nb_log_callback(const enum nb_event event,
68 enum nb_operation operation, const char *xpath,
69 const char *value);
1c2facd1
RW
70static struct nb_transaction *nb_transaction_new(struct nb_config *config,
71 struct nb_config_cbs *changes,
72 enum nb_client client,
364ad673 73 const void *user,
1c2facd1
RW
74 const char *comment);
75static void nb_transaction_free(struct nb_transaction *transaction);
76static int nb_transaction_process(enum nb_event event,
77 struct nb_transaction *transaction);
78static void nb_transaction_apply_finish(struct nb_transaction *transaction);
1a4bc045
RW
79static int nb_oper_data_iter_node(const struct lys_node *snode,
80 const char *xpath, const void *list_entry,
81 const struct yang_list_keys *list_keys,
82 struct yang_translator *translator,
83 bool first, uint32_t flags,
84 nb_oper_data_cb cb, void *arg);
1c2facd1 85
544ca69a
RW
86static int nb_node_check_config_only(const struct lys_node *snode, void *arg)
87{
88 bool *config_only = arg;
89
90 if (CHECK_FLAG(snode->flags, LYS_CONFIG_R)) {
91 *config_only = false;
92 return YANG_ITER_STOP;
93 }
94
95 return YANG_ITER_CONTINUE;
96}
97
e0ccfad2 98static int nb_node_new_cb(const struct lys_node *snode, void *arg)
1c2facd1
RW
99{
100 struct nb_node *nb_node;
101 struct lys_node *sparent, *sparent_list;
102
103 nb_node = XCALLOC(MTYPE_NB_NODE, sizeof(*nb_node));
104 yang_snode_get_path(snode, YANG_PATH_DATA, nb_node->xpath,
105 sizeof(nb_node->xpath));
106 nb_node->priority = NB_DFLT_PRIORITY;
107 sparent = yang_snode_real_parent(snode);
108 if (sparent)
109 nb_node->parent = sparent->priv;
110 sparent_list = yang_snode_parent_list(snode);
111 if (sparent_list)
112 nb_node->parent_list = sparent_list->priv;
113
544ca69a
RW
114 /* Set flags. */
115 if (CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
116 bool config_only = true;
117
118 yang_snodes_iterate_subtree(snode, nb_node_check_config_only,
119 YANG_ITER_ALLOW_AUGMENTATIONS,
120 &config_only);
121 if (config_only)
122 SET_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY);
123 }
99fb518f
RW
124 if (CHECK_FLAG(snode->nodetype, LYS_LIST)) {
125 struct lys_node_list *slist;
126
127 slist = (struct lys_node_list *)snode;
128 if (slist->keys_size == 0)
129 SET_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST);
130 }
544ca69a 131
1c2facd1
RW
132 /*
133 * Link the northbound node and the libyang schema node with one
134 * another.
135 */
136 nb_node->snode = snode;
137 lys_set_private(snode, nb_node);
e0ccfad2
RW
138
139 return YANG_ITER_CONTINUE;
1c2facd1
RW
140}
141
e0ccfad2 142static int nb_node_del_cb(const struct lys_node *snode, void *arg)
1c2facd1
RW
143{
144 struct nb_node *nb_node;
145
146 nb_node = snode->priv;
147 lys_set_private(snode, NULL);
148 XFREE(MTYPE_NB_NODE, nb_node);
e0ccfad2
RW
149
150 return YANG_ITER_CONTINUE;
1c2facd1
RW
151}
152
544ca69a
RW
153void nb_nodes_create(void)
154{
155 yang_snodes_iterate_all(nb_node_new_cb, 0, NULL);
156}
157
158void nb_nodes_delete(void)
159{
160 yang_snodes_iterate_all(nb_node_del_cb, 0, NULL);
161}
162
1c2facd1
RW
163struct nb_node *nb_node_find(const char *xpath)
164{
165 const struct lys_node *snode;
166
167 /*
168 * Use libyang to find the schema node associated to the xpath and get
169 * the northbound node from there (snode private pointer).
170 */
171 snode = ly_ctx_get_node(ly_native_ctx, NULL, xpath, 0);
172 if (!snode)
173 return NULL;
174
175 return snode->priv;
176}
177
178static int nb_node_validate_cb(const struct nb_node *nb_node,
179 enum nb_operation operation,
180 int callback_implemented, bool optional)
181{
182 bool valid;
183
184 valid = nb_operation_is_valid(operation, nb_node->snode);
185
186 if (!valid && callback_implemented)
187 flog_warn(EC_LIB_NB_CB_UNNEEDED,
188 "unneeded '%s' callback for '%s'",
189 nb_operation_name(operation), nb_node->xpath);
190
191 if (!optional && valid && !callback_implemented) {
192 flog_err(EC_LIB_NB_CB_MISSING, "missing '%s' callback for '%s'",
193 nb_operation_name(operation), nb_node->xpath);
194 return 1;
195 }
196
197 return 0;
198}
199
200/*
201 * Check if the required callbacks were implemented for the given northbound
202 * node.
203 */
204static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
205
206{
207 unsigned int error = 0;
208
209 error += nb_node_validate_cb(nb_node, NB_OP_CREATE,
210 !!nb_node->cbs.create, false);
211 error += nb_node_validate_cb(nb_node, NB_OP_MODIFY,
212 !!nb_node->cbs.modify, false);
95ce849b 213 error += nb_node_validate_cb(nb_node, NB_OP_DESTROY,
d01b92fd 214 !!nb_node->cbs.destroy, false);
1c2facd1
RW
215 error += nb_node_validate_cb(nb_node, NB_OP_MOVE, !!nb_node->cbs.move,
216 false);
34224f0c
RW
217 error += nb_node_validate_cb(nb_node, NB_OP_PRE_VALIDATE,
218 !!nb_node->cbs.pre_validate, true);
1c2facd1
RW
219 error += nb_node_validate_cb(nb_node, NB_OP_APPLY_FINISH,
220 !!nb_node->cbs.apply_finish, true);
221 error += nb_node_validate_cb(nb_node, NB_OP_GET_ELEM,
222 !!nb_node->cbs.get_elem, false);
223 error += nb_node_validate_cb(nb_node, NB_OP_GET_NEXT,
224 !!nb_node->cbs.get_next, false);
225 error += nb_node_validate_cb(nb_node, NB_OP_GET_KEYS,
226 !!nb_node->cbs.get_keys, false);
227 error += nb_node_validate_cb(nb_node, NB_OP_LOOKUP_ENTRY,
228 !!nb_node->cbs.lookup_entry, false);
229 error += nb_node_validate_cb(nb_node, NB_OP_RPC, !!nb_node->cbs.rpc,
230 false);
231
232 return error;
233}
234
235static unsigned int nb_node_validate_priority(const struct nb_node *nb_node)
236{
237 /* Top-level nodes can have any priority. */
238 if (!nb_node->parent)
239 return 0;
240
241 if (nb_node->priority < nb_node->parent->priority) {
242 flog_err(EC_LIB_NB_CB_INVALID_PRIO,
243 "node has higher priority than its parent [xpath %s]",
244 nb_node->xpath);
245 return 1;
246 }
247
248 return 0;
249}
250
e0ccfad2 251static int nb_node_validate(const struct lys_node *snode, void *arg)
1c2facd1
RW
252{
253 struct nb_node *nb_node = snode->priv;
e0ccfad2 254 unsigned int *errors = arg;
1c2facd1
RW
255
256 /* Validate callbacks and priority. */
257 *errors += nb_node_validate_cbs(nb_node);
258 *errors += nb_node_validate_priority(nb_node);
e0ccfad2
RW
259
260 return YANG_ITER_CONTINUE;
1c2facd1
RW
261}
262
263struct nb_config *nb_config_new(struct lyd_node *dnode)
264{
265 struct nb_config *config;
266
267 config = XCALLOC(MTYPE_NB_CONFIG, sizeof(*config));
268 if (dnode)
269 config->dnode = dnode;
270 else
5e02643a 271 config->dnode = yang_dnode_new(ly_native_ctx, true);
1c2facd1
RW
272 config->version = 0;
273
274 return config;
275}
276
277void nb_config_free(struct nb_config *config)
278{
279 if (config->dnode)
280 yang_dnode_free(config->dnode);
281 XFREE(MTYPE_NB_CONFIG, config);
282}
283
284struct nb_config *nb_config_dup(const struct nb_config *config)
285{
286 struct nb_config *dup;
287
288 dup = XCALLOC(MTYPE_NB_CONFIG, sizeof(*dup));
289 dup->dnode = yang_dnode_dup(config->dnode);
290 dup->version = config->version;
291
292 return dup;
293}
294
295int nb_config_merge(struct nb_config *config_dst, struct nb_config *config_src,
296 bool preserve_source)
297{
298 int ret;
299
300 ret = lyd_merge(config_dst->dnode, config_src->dnode, LYD_OPT_EXPLICIT);
301 if (ret != 0)
302 flog_warn(EC_LIB_LIBYANG, "%s: lyd_merge() failed", __func__);
303
304 if (!preserve_source)
305 nb_config_free(config_src);
306
307 return (ret == 0) ? NB_OK : NB_ERR;
308}
309
310void nb_config_replace(struct nb_config *config_dst,
311 struct nb_config *config_src, bool preserve_source)
312{
313 /* Update version. */
314 if (config_src->version != 0)
315 config_dst->version = config_src->version;
316
317 /* Update dnode. */
e5dc8a44
RW
318 if (config_dst->dnode)
319 yang_dnode_free(config_dst->dnode);
1c2facd1
RW
320 if (preserve_source) {
321 config_dst->dnode = yang_dnode_dup(config_src->dnode);
322 } else {
323 config_dst->dnode = config_src->dnode;
324 config_src->dnode = NULL;
325 nb_config_free(config_src);
326 }
327}
328
329/* Generate the nb_config_cbs tree. */
330static inline int nb_config_cb_compare(const struct nb_config_cb *a,
331 const struct nb_config_cb *b)
332{
333 /* Sort by priority first. */
334 if (a->nb_node->priority < b->nb_node->priority)
335 return -1;
336 if (a->nb_node->priority > b->nb_node->priority)
337 return 1;
338
339 /*
340 * Use XPath as a tie-breaker. This will naturally sort parent nodes
341 * before their children.
342 */
343 return strcmp(a->xpath, b->xpath);
344}
345RB_GENERATE(nb_config_cbs, nb_config_cb, entry, nb_config_cb_compare);
346
347static void nb_config_diff_add_change(struct nb_config_cbs *changes,
348 enum nb_operation operation,
349 const struct lyd_node *dnode)
350{
351 struct nb_config_change *change;
352
353 change = XCALLOC(MTYPE_TMP, sizeof(*change));
354 change->cb.operation = operation;
355 change->cb.nb_node = dnode->schema->priv;
356 yang_dnode_get_path(dnode, change->cb.xpath, sizeof(change->cb.xpath));
357 change->cb.dnode = dnode;
358
359 RB_INSERT(nb_config_cbs, changes, &change->cb);
360}
361
362static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
363{
364 while (!RB_EMPTY(nb_config_cbs, changes)) {
365 struct nb_config_change *change;
366
367 change = (struct nb_config_change *)RB_ROOT(nb_config_cbs,
368 changes);
369 RB_REMOVE(nb_config_cbs, changes, &change->cb);
370 XFREE(MTYPE_TMP, change);
371 }
372}
373
374/*
375 * Helper function used when calculating the delta between two different
376 * configurations. Given a new subtree, calculate all new YANG data nodes,
377 * excluding default leafs and leaf-lists. This is a recursive function.
378 */
cacbffaf
RW
379static void nb_config_diff_created(const struct lyd_node *dnode,
380 struct nb_config_cbs *changes)
1c2facd1 381{
cacbffaf 382 enum nb_operation operation;
1c2facd1
RW
383 struct lyd_node *child;
384
cacbffaf
RW
385 switch (dnode->schema->nodetype) {
386 case LYS_LEAF:
387 case LYS_LEAFLIST:
388 if (lyd_wd_default((struct lyd_node_leaf_list *)dnode))
389 break;
1c2facd1 390
cacbffaf
RW
391 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
392 operation = NB_OP_CREATE;
393 else if (nb_operation_is_valid(NB_OP_MODIFY, dnode->schema))
394 operation = NB_OP_MODIFY;
395 else
396 return;
1c2facd1 397
cacbffaf
RW
398 nb_config_diff_add_change(changes, operation, dnode);
399 break;
400 case LYS_CONTAINER:
401 case LYS_LIST:
402 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
403 nb_config_diff_add_change(changes, NB_OP_CREATE, dnode);
404
405 /* Process child nodes recursively. */
406 LY_TREE_FOR (dnode->child, child) {
407 nb_config_diff_created(child, changes);
1c2facd1 408 }
cacbffaf
RW
409 break;
410 default:
411 break;
1c2facd1
RW
412 }
413}
414
1912caa2
RW
415static void nb_config_diff_deleted(const struct lyd_node *dnode,
416 struct nb_config_cbs *changes)
417{
418 if (nb_operation_is_valid(NB_OP_DESTROY, dnode->schema))
419 nb_config_diff_add_change(changes, NB_OP_DESTROY, dnode);
420 else if (CHECK_FLAG(dnode->schema->nodetype, LYS_CONTAINER)) {
421 struct lyd_node *child;
422
423 /*
424 * Non-presence containers need special handling since they
425 * don't have "destroy" callbacks. In this case, what we need to
426 * do is to call the "destroy" callbacks of their child nodes
427 * when applicable (i.e. optional nodes).
428 */
429 LY_TREE_FOR (dnode->child, child) {
430 nb_config_diff_deleted(child, changes);
431 }
432 }
433}
434
1c2facd1
RW
435/* Calculate the delta between two different configurations. */
436static void nb_config_diff(const struct nb_config *config1,
437 const struct nb_config *config2,
438 struct nb_config_cbs *changes)
439{
440 struct lyd_difflist *diff;
441
442 diff = lyd_diff(config1->dnode, config2->dnode,
443 LYD_DIFFOPT_WITHDEFAULTS);
444 assert(diff);
445
446 for (int i = 0; diff->type[i] != LYD_DIFF_END; i++) {
447 LYD_DIFFTYPE type;
448 struct lyd_node *dnode;
1c2facd1
RW
449
450 type = diff->type[i];
451
452 switch (type) {
453 case LYD_DIFF_CREATED:
454 dnode = diff->second[i];
cacbffaf 455 nb_config_diff_created(dnode, changes);
1c2facd1
RW
456 break;
457 case LYD_DIFF_DELETED:
458 dnode = diff->first[i];
1912caa2 459 nb_config_diff_deleted(dnode, changes);
1c2facd1
RW
460 break;
461 case LYD_DIFF_CHANGED:
462 dnode = diff->second[i];
cacbffaf 463 nb_config_diff_add_change(changes, NB_OP_MODIFY, dnode);
1c2facd1
RW
464 break;
465 case LYD_DIFF_MOVEDAFTER1:
466 case LYD_DIFF_MOVEDAFTER2:
467 default:
468 continue;
469 }
1c2facd1
RW
470 }
471
472 lyd_free_diff(diff);
473}
474
475int nb_candidate_edit(struct nb_config *candidate,
476 const struct nb_node *nb_node,
477 enum nb_operation operation, const char *xpath,
478 const struct yang_data *previous,
479 const struct yang_data *data)
480{
481 struct lyd_node *dnode;
482 char xpath_edit[XPATH_MAXLEN];
483
1c2facd1
RW
484 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
485 if (nb_node->snode->nodetype == LYS_LEAFLIST)
486 snprintf(xpath_edit, sizeof(xpath_edit), "%s[.='%s']", xpath,
487 data->value);
488 else
489 strlcpy(xpath_edit, xpath, sizeof(xpath_edit));
490
491 switch (operation) {
492 case NB_OP_CREATE:
493 case NB_OP_MODIFY:
494 ly_errno = 0;
495 dnode = lyd_new_path(candidate->dnode, ly_native_ctx,
496 xpath_edit, (void *)data->value, 0,
497 LYD_PATH_OPT_UPDATE);
498 if (!dnode && ly_errno) {
499 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
500 __func__);
501 return NB_ERR;
502 }
503
504 /*
505 * If a new node was created, call lyd_validate() only to create
506 * default child nodes.
507 */
508 if (dnode) {
509 lyd_schema_sort(dnode, 0);
cd327983
RW
510 lyd_validate(&dnode,
511 LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
512 ly_native_ctx);
1c2facd1
RW
513 }
514 break;
95ce849b 515 case NB_OP_DESTROY:
1c2facd1
RW
516 dnode = yang_dnode_get(candidate->dnode, xpath_edit);
517 if (!dnode)
518 /*
519 * Return a special error code so the caller can choose
520 * whether to ignore it or not.
521 */
522 return NB_ERR_NOT_FOUND;
523 lyd_free(dnode);
524 break;
525 case NB_OP_MOVE:
526 /* TODO: update configuration. */
527 break;
528 default:
529 flog_warn(EC_LIB_DEVELOPMENT,
530 "%s: unknown operation (%u) [xpath %s]", __func__,
531 operation, xpath_edit);
532 return NB_ERR;
533 }
534
535 return NB_OK;
536}
537
538bool nb_candidate_needs_update(const struct nb_config *candidate)
539{
8685be73
RW
540 if (candidate->version < running_config->version)
541 return true;
1c2facd1 542
8685be73 543 return false;
1c2facd1
RW
544}
545
546int nb_candidate_update(struct nb_config *candidate)
547{
548 struct nb_config *updated_config;
549
8685be73 550 updated_config = nb_config_dup(running_config);
1c2facd1
RW
551 if (nb_config_merge(updated_config, candidate, true) != NB_OK)
552 return NB_ERR;
553
554 nb_config_replace(candidate, updated_config, false);
555
556 return NB_OK;
557}
558
1c2facd1
RW
559/*
560 * Perform YANG syntactic and semantic validation.
561 *
562 * WARNING: lyd_validate() can change the configuration as part of the
563 * validation process.
564 */
565static int nb_candidate_validate_yang(struct nb_config *candidate)
566{
cd327983
RW
567 if (lyd_validate(&candidate->dnode,
568 LYD_OPT_STRICT | LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
1c2facd1
RW
569 ly_native_ctx)
570 != 0)
571 return NB_ERR_VALIDATION;
572
573 return NB_OK;
574}
575
576/* Perform code-level validation using the northbound callbacks. */
34224f0c
RW
577static int nb_candidate_validate_code(struct nb_config *candidate,
578 struct nb_config_cbs *changes)
1c2facd1
RW
579{
580 struct nb_config_cb *cb;
34224f0c
RW
581 struct lyd_node *root, *next, *child;
582 int ret;
583
584 /* First validate the candidate as a whole. */
585 LY_TREE_FOR (candidate->dnode, root) {
586 LY_TREE_DFS_BEGIN (root, next, child) {
587 struct nb_node *nb_node;
588
589 nb_node = child->schema->priv;
590 if (!nb_node->cbs.pre_validate)
591 goto next;
592
593 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config,
594 DEBUG_MODE_ALL)) {
595 char xpath[XPATH_MAXLEN];
1c2facd1 596
34224f0c
RW
597 yang_dnode_get_path(child, xpath,
598 sizeof(xpath));
599 nb_log_callback(NB_EV_VALIDATE,
600 NB_OP_PRE_VALIDATE, xpath,
601 NULL);
602 }
603
604 ret = (*nb_node->cbs.pre_validate)(child);
605 if (ret != NB_OK)
606 return NB_ERR_VALIDATION;
607
608 next:
609 LY_TREE_DFS_END(root, next, child);
610 }
611 }
612
613 /* Now validate the configuration changes. */
1c2facd1
RW
614 RB_FOREACH (cb, nb_config_cbs, changes) {
615 struct nb_config_change *change = (struct nb_config_change *)cb;
1c2facd1 616
9eb2c0a1 617 ret = nb_callback_configuration(NB_EV_VALIDATE, change);
1c2facd1
RW
618 if (ret != NB_OK)
619 return NB_ERR_VALIDATION;
620 }
621
622 return NB_OK;
623}
624
625int nb_candidate_validate(struct nb_config *candidate)
626{
627 struct nb_config_cbs changes;
628 int ret;
629
630 if (nb_candidate_validate_yang(candidate) != NB_OK)
631 return NB_ERR_VALIDATION;
632
633 RB_INIT(nb_config_cbs, &changes);
8685be73
RW
634 nb_config_diff(running_config, candidate, &changes);
635 ret = nb_candidate_validate_code(candidate, &changes);
636 nb_config_diff_del_changes(&changes);
1c2facd1
RW
637
638 return ret;
639}
640
641int nb_candidate_commit_prepare(struct nb_config *candidate,
364ad673
RW
642 enum nb_client client, const void *user,
643 const char *comment,
1c2facd1
RW
644 struct nb_transaction **transaction)
645{
646 struct nb_config_cbs changes;
647
648 if (nb_candidate_validate_yang(candidate) != NB_OK) {
649 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
650 "%s: failed to validate candidate configuration",
651 __func__);
652 return NB_ERR_VALIDATION;
653 }
654
655 RB_INIT(nb_config_cbs, &changes);
8685be73
RW
656 nb_config_diff(running_config, candidate, &changes);
657 if (RB_EMPTY(nb_config_cbs, &changes))
658 return NB_ERR_NO_CHANGES;
1c2facd1 659
8685be73
RW
660 if (nb_candidate_validate_code(candidate, &changes) != NB_OK) {
661 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
662 "%s: failed to validate candidate configuration",
663 __func__);
664 nb_config_diff_del_changes(&changes);
665 return NB_ERR_VALIDATION;
666 }
1c2facd1 667
8685be73
RW
668 *transaction =
669 nb_transaction_new(candidate, &changes, client, user, comment);
670 if (*transaction == NULL) {
671 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
672 "%s: failed to create transaction", __func__);
673 nb_config_diff_del_changes(&changes);
674 return NB_ERR_LOCKED;
1c2facd1
RW
675 }
676
677 return nb_transaction_process(NB_EV_PREPARE, *transaction);
678}
679
680void nb_candidate_commit_abort(struct nb_transaction *transaction)
681{
682 (void)nb_transaction_process(NB_EV_ABORT, transaction);
683 nb_transaction_free(transaction);
684}
685
686void nb_candidate_commit_apply(struct nb_transaction *transaction,
687 bool save_transaction, uint32_t *transaction_id)
688{
689 (void)nb_transaction_process(NB_EV_APPLY, transaction);
690 nb_transaction_apply_finish(transaction);
691
692 /* Replace running by candidate. */
693 transaction->config->version++;
8685be73 694 nb_config_replace(running_config, transaction->config, true);
1c2facd1
RW
695
696 /* Record transaction. */
697 if (save_transaction
698 && nb_db_transaction_save(transaction, transaction_id) != NB_OK)
699 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED,
700 "%s: failed to record transaction", __func__);
701
702 nb_transaction_free(transaction);
703}
704
705int nb_candidate_commit(struct nb_config *candidate, enum nb_client client,
364ad673
RW
706 const void *user, bool save_transaction,
707 const char *comment, uint32_t *transaction_id)
1c2facd1
RW
708{
709 struct nb_transaction *transaction = NULL;
710 int ret;
711
364ad673 712 ret = nb_candidate_commit_prepare(candidate, client, user, comment,
1c2facd1
RW
713 &transaction);
714 /*
715 * Apply the changes if the preparation phase succeeded. Otherwise abort
716 * the transaction.
717 */
718 if (ret == NB_OK)
719 nb_candidate_commit_apply(transaction, save_transaction,
720 transaction_id);
721 else if (transaction != NULL)
722 nb_candidate_commit_abort(transaction);
723
724 return ret;
725}
726
364ad673
RW
727int nb_running_lock(enum nb_client client, const void *user)
728{
729 int ret = -1;
730
00dffa8c 731 frr_with_mutex(&running_config_mgmt_lock.mtx) {
364ad673
RW
732 if (!running_config_mgmt_lock.locked) {
733 running_config_mgmt_lock.locked = true;
734 running_config_mgmt_lock.owner_client = client;
735 running_config_mgmt_lock.owner_user = user;
736 ret = 0;
737 }
738 }
364ad673
RW
739
740 return ret;
741}
742
743int nb_running_unlock(enum nb_client client, const void *user)
744{
745 int ret = -1;
746
00dffa8c 747 frr_with_mutex(&running_config_mgmt_lock.mtx) {
364ad673
RW
748 if (running_config_mgmt_lock.locked
749 && running_config_mgmt_lock.owner_client == client
750 && running_config_mgmt_lock.owner_user == user) {
751 running_config_mgmt_lock.locked = false;
752 running_config_mgmt_lock.owner_client = NB_CLIENT_NONE;
753 running_config_mgmt_lock.owner_user = NULL;
754 ret = 0;
755 }
756 }
364ad673
RW
757
758 return ret;
759}
760
761int nb_running_lock_check(enum nb_client client, const void *user)
762{
763 int ret = -1;
764
00dffa8c 765 frr_with_mutex(&running_config_mgmt_lock.mtx) {
364ad673
RW
766 if (!running_config_mgmt_lock.locked
767 || (running_config_mgmt_lock.owner_client == client
768 && running_config_mgmt_lock.owner_user == user))
769 ret = 0;
770 }
364ad673
RW
771
772 return ret;
773}
774
1c2facd1
RW
775static void nb_log_callback(const enum nb_event event,
776 enum nb_operation operation, const char *xpath,
777 const char *value)
778{
779 zlog_debug(
780 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
781 nb_event_name(event), nb_operation_name(operation), xpath,
1b3f6ff1 782 value ? value : "(NULL)");
1c2facd1
RW
783}
784
785/*
786 * Call the northbound configuration callback associated to a given
787 * configuration change.
788 */
9eb2c0a1 789static int nb_callback_configuration(const enum nb_event event,
1c2facd1
RW
790 struct nb_config_change *change)
791{
792 enum nb_operation operation = change->cb.operation;
793 const char *xpath = change->cb.xpath;
794 const struct nb_node *nb_node = change->cb.nb_node;
795 const struct lyd_node *dnode = change->cb.dnode;
796 union nb_resource *resource;
797 int ret = NB_ERR;
798
9eb2c0a1 799 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
1c2facd1
RW
800 const char *value = "(none)";
801
802 if (dnode && !yang_snode_is_typeless_data(dnode->schema))
803 value = yang_dnode_get_string(dnode, NULL);
804
805 nb_log_callback(event, operation, xpath, value);
806 }
807
808 if (event == NB_EV_VALIDATE)
809 resource = NULL;
810 else
811 resource = &change->resource;
812
813 switch (operation) {
814 case NB_OP_CREATE:
815 ret = (*nb_node->cbs.create)(event, dnode, resource);
816 break;
817 case NB_OP_MODIFY:
818 ret = (*nb_node->cbs.modify)(event, dnode, resource);
819 break;
95ce849b 820 case NB_OP_DESTROY:
d01b92fd 821 ret = (*nb_node->cbs.destroy)(event, dnode);
1c2facd1
RW
822 break;
823 case NB_OP_MOVE:
824 ret = (*nb_node->cbs.move)(event, dnode);
825 break;
826 default:
c650e48c
RW
827 flog_err(EC_LIB_DEVELOPMENT,
828 "%s: unknown operation (%u) [xpath %s]", __func__,
829 operation, xpath);
830 exit(1);
1c2facd1
RW
831 }
832
625b70e3 833 if (ret != NB_OK) {
c650e48c
RW
834 int priority;
835 enum lib_log_refs ref;
ec348d43 836
625b70e3
EDP
837 switch (event) {
838 case NB_EV_VALIDATE:
c650e48c 839 priority = LOG_WARNING;
625b70e3
EDP
840 ref = EC_LIB_NB_CB_CONFIG_VALIDATE;
841 break;
842 case NB_EV_PREPARE:
c650e48c 843 priority = LOG_WARNING;
625b70e3
EDP
844 ref = EC_LIB_NB_CB_CONFIG_PREPARE;
845 break;
846 case NB_EV_ABORT:
c650e48c 847 priority = LOG_WARNING;
625b70e3
EDP
848 ref = EC_LIB_NB_CB_CONFIG_ABORT;
849 break;
850 case NB_EV_APPLY:
c650e48c 851 priority = LOG_ERR;
625b70e3
EDP
852 ref = EC_LIB_NB_CB_CONFIG_APPLY;
853 break;
c650e48c
RW
854 default:
855 flog_err(EC_LIB_DEVELOPMENT,
856 "%s: unknown event (%u) [xpath %s]",
857 __func__, event, xpath);
858 exit(1);
625b70e3 859 }
c650e48c
RW
860
861 flog(priority, ref,
862 "%s: error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
863 __func__, nb_err_name(ret), nb_event_name(event),
864 nb_operation_name(operation), xpath);
625b70e3 865 }
1c2facd1
RW
866
867 return ret;
868}
869
9eb2c0a1
RW
870struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node,
871 const char *xpath,
872 const void *list_entry)
873{
874 DEBUGD(&nb_dbg_cbs_state,
875 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
876 xpath, list_entry);
877
878 return nb_node->cbs.get_elem(xpath, list_entry);
879}
880
881const void *nb_callback_get_next(const struct nb_node *nb_node,
882 const void *parent_list_entry,
883 const void *list_entry)
884{
885 DEBUGD(&nb_dbg_cbs_state,
886 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
887 nb_node->xpath, parent_list_entry, list_entry);
888
889 return nb_node->cbs.get_next(parent_list_entry, list_entry);
890}
891
892int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
893 struct yang_list_keys *keys)
894{
895 DEBUGD(&nb_dbg_cbs_state,
896 "northbound callback (get_keys): node [%s] list_entry [%p]",
897 nb_node->xpath, list_entry);
898
899 return nb_node->cbs.get_keys(list_entry, keys);
900}
901
902const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
903 const void *parent_list_entry,
904 const struct yang_list_keys *keys)
905{
906 DEBUGD(&nb_dbg_cbs_state,
907 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
908 nb_node->xpath, parent_list_entry);
909
910 return nb_node->cbs.lookup_entry(parent_list_entry, keys);
911}
912
913int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
914 const struct list *input, struct list *output)
915{
916 DEBUGD(&nb_dbg_cbs_rpc, "northbound RPC: %s", xpath);
917
918 return nb_node->cbs.rpc(xpath, input, output);
919}
920
364ad673
RW
921static struct nb_transaction *
922nb_transaction_new(struct nb_config *config, struct nb_config_cbs *changes,
923 enum nb_client client, const void *user, const char *comment)
1c2facd1
RW
924{
925 struct nb_transaction *transaction;
926
364ad673
RW
927 if (nb_running_lock_check(client, user)) {
928 flog_warn(
929 EC_LIB_NB_TRANSACTION_CREATION_FAILED,
930 "%s: running configuration is locked by another client",
931 __func__);
932 return NULL;
933 }
934
1c2facd1
RW
935 if (transaction_in_progress) {
936 flog_warn(
937 EC_LIB_NB_TRANSACTION_CREATION_FAILED,
938 "%s: error - there's already another transaction in progress",
939 __func__);
940 return NULL;
941 }
942 transaction_in_progress = true;
943
944 transaction = XCALLOC(MTYPE_TMP, sizeof(*transaction));
945 transaction->client = client;
946 if (comment)
947 strlcpy(transaction->comment, comment,
948 sizeof(transaction->comment));
949 transaction->config = config;
950 transaction->changes = *changes;
951
952 return transaction;
953}
954
955static void nb_transaction_free(struct nb_transaction *transaction)
956{
957 nb_config_diff_del_changes(&transaction->changes);
958 XFREE(MTYPE_TMP, transaction);
959 transaction_in_progress = false;
960}
961
962/* Process all configuration changes associated to a transaction. */
963static int nb_transaction_process(enum nb_event event,
964 struct nb_transaction *transaction)
965{
966 struct nb_config_cb *cb;
967
8685be73
RW
968 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
969 struct nb_config_change *change = (struct nb_config_change *)cb;
970 int ret;
1c2facd1 971
8685be73
RW
972 /*
973 * Only try to release resources that were allocated
974 * successfully.
975 */
976 if (event == NB_EV_ABORT && change->prepare_ok == false)
977 break;
978
979 /* Call the appropriate callback. */
980 ret = nb_callback_configuration(event, change);
981 switch (event) {
982 case NB_EV_PREPARE:
983 if (ret != NB_OK)
984 return ret;
985 change->prepare_ok = true;
986 break;
987 case NB_EV_ABORT:
988 case NB_EV_APPLY:
1c2facd1 989 /*
8685be73
RW
990 * At this point it's not possible to reject the
991 * transaction anymore, so any failure here can lead to
992 * inconsistencies and should be treated as a bug.
993 * Operations prone to errors, like validations and
994 * resource allocations, should be performed during the
995 * 'prepare' phase.
1c2facd1 996 */
8685be73
RW
997 break;
998 default:
999 break;
1c2facd1
RW
1000 }
1001 }
1002
1003 return NB_OK;
1004}
1005
1006static struct nb_config_cb *
1007nb_apply_finish_cb_new(struct nb_config_cbs *cbs, const char *xpath,
1008 const struct nb_node *nb_node,
1009 const struct lyd_node *dnode)
1010{
1011 struct nb_config_cb *cb;
1012
1013 cb = XCALLOC(MTYPE_TMP, sizeof(*cb));
1014 strlcpy(cb->xpath, xpath, sizeof(cb->xpath));
1015 cb->nb_node = nb_node;
1016 cb->dnode = dnode;
1017 RB_INSERT(nb_config_cbs, cbs, cb);
1018
1019 return cb;
1020}
1021
1022static struct nb_config_cb *
1023nb_apply_finish_cb_find(struct nb_config_cbs *cbs, const char *xpath,
1024 const struct nb_node *nb_node)
1025{
1026 struct nb_config_cb s;
1027
1028 strlcpy(s.xpath, xpath, sizeof(s.xpath));
1029 s.nb_node = nb_node;
1030 return RB_FIND(nb_config_cbs, cbs, &s);
1031}
1032
1033/* Call the 'apply_finish' callbacks. */
1034static void nb_transaction_apply_finish(struct nb_transaction *transaction)
1035{
1036 struct nb_config_cbs cbs;
1037 struct nb_config_cb *cb;
1038
1039 /* Initialize tree of 'apply_finish' callbacks. */
1040 RB_INIT(nb_config_cbs, &cbs);
1041
1042 /* Identify the 'apply_finish' callbacks that need to be called. */
1043 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1044 struct nb_config_change *change = (struct nb_config_change *)cb;
1045 const struct lyd_node *dnode = change->cb.dnode;
1046
1047 /*
1048 * Iterate up to the root of the data tree. When a node is being
1049 * deleted, skip its 'apply_finish' callback if one is defined
1050 * (the 'apply_finish' callbacks from the node ancestors should
1051 * be called though).
1052 */
95ce849b 1053 if (change->cb.operation == NB_OP_DESTROY) {
1c2facd1
RW
1054 char xpath[XPATH_MAXLEN];
1055
1056 dnode = dnode->parent;
1057 if (!dnode)
1058 break;
1059
1060 /*
1061 * The dnode from 'delete' callbacks point to elements
1062 * from the running configuration. Use yang_dnode_get()
1063 * to get the corresponding dnode from the candidate
1064 * configuration that is being committed.
1065 */
1066 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1067 dnode = yang_dnode_get(transaction->config->dnode,
1068 xpath);
1069 }
1070 while (dnode) {
1071 char xpath[XPATH_MAXLEN];
1072 struct nb_node *nb_node;
1073
1074 nb_node = dnode->schema->priv;
1075 if (!nb_node->cbs.apply_finish)
1076 goto next;
1077
1078 /*
1079 * Don't call the callback more than once for the same
1080 * data node.
1081 */
1082 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1083 if (nb_apply_finish_cb_find(&cbs, xpath, nb_node))
1084 goto next;
1085
1086 nb_apply_finish_cb_new(&cbs, xpath, nb_node, dnode);
1087
1088 next:
1089 dnode = dnode->parent;
1090 }
1091 }
1092
1093 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1094 RB_FOREACH (cb, nb_config_cbs, &cbs) {
9eb2c0a1 1095 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL))
1c2facd1
RW
1096 nb_log_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH,
1097 cb->xpath, NULL);
1098
1099 (*cb->nb_node->cbs.apply_finish)(cb->dnode);
1100 }
1101
1102 /* Release memory. */
1103 while (!RB_EMPTY(nb_config_cbs, &cbs)) {
1104 cb = RB_ROOT(nb_config_cbs, &cbs);
1105 RB_REMOVE(nb_config_cbs, &cbs, cb);
1106 XFREE(MTYPE_TMP, cb);
1107 }
1108}
1109
1a4bc045
RW
1110static int nb_oper_data_iter_children(const struct lys_node *snode,
1111 const char *xpath, const void *list_entry,
1112 const struct yang_list_keys *list_keys,
1113 struct yang_translator *translator,
1114 bool first, uint32_t flags,
1115 nb_oper_data_cb cb, void *arg)
1116{
1117 struct lys_node *child;
1118
1119 LY_TREE_FOR (snode->child, child) {
1120 int ret;
1121
1122 ret = nb_oper_data_iter_node(child, xpath, list_entry,
1123 list_keys, translator, false,
1124 flags, cb, arg);
1125 if (ret != NB_OK)
1126 return ret;
1127 }
1128
1129 return NB_OK;
1130}
1131
1132static int nb_oper_data_iter_leaf(const struct nb_node *nb_node,
1133 const char *xpath, const void *list_entry,
1134 const struct yang_list_keys *list_keys,
1135 struct yang_translator *translator,
1136 uint32_t flags, nb_oper_data_cb cb, void *arg)
1137{
1138 struct yang_data *data;
1139
1140 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1141 return NB_OK;
1142
1143 /* Ignore list keys. */
1144 if (lys_is_key((struct lys_node_leaf *)nb_node->snode, NULL))
1145 return NB_OK;
1146
9eb2c0a1 1147 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1a4bc045
RW
1148 if (data == NULL)
1149 /* Leaf of type "empty" is not present. */
1150 return NB_OK;
1151
1152 return (*cb)(nb_node->snode, translator, data, arg);
1153}
1154
1155static int nb_oper_data_iter_container(const struct nb_node *nb_node,
1156 const char *xpath,
1157 const void *list_entry,
1158 const struct yang_list_keys *list_keys,
1159 struct yang_translator *translator,
1160 uint32_t flags, nb_oper_data_cb cb,
1161 void *arg)
1162{
1163 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1164 return NB_OK;
1165
1166 /* Presence containers. */
1167 if (nb_node->cbs.get_elem) {
1168 struct yang_data *data;
1169 int ret;
1170
9eb2c0a1 1171 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1a4bc045
RW
1172 if (data == NULL)
1173 /* Presence container is not present. */
1174 return NB_OK;
1175
1176 ret = (*cb)(nb_node->snode, translator, data, arg);
1177 if (ret != NB_OK)
1178 return ret;
1179 }
1180
1181 /* Iterate over the child nodes. */
1182 return nb_oper_data_iter_children(nb_node->snode, xpath, list_entry,
1183 list_keys, translator, false, flags,
1184 cb, arg);
1185}
1186
1187static int
1188nb_oper_data_iter_leaflist(const struct nb_node *nb_node, const char *xpath,
1189 const void *parent_list_entry,
1190 const struct yang_list_keys *parent_list_keys,
1191 struct yang_translator *translator, uint32_t flags,
1192 nb_oper_data_cb cb, void *arg)
1193{
1194 const void *list_entry = NULL;
1195
1196 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1197 return NB_OK;
1198
1199 do {
1200 struct yang_data *data;
1201 int ret;
1202
9eb2c0a1
RW
1203 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1204 list_entry);
1a4bc045
RW
1205 if (!list_entry)
1206 /* End of the list. */
1207 break;
1208
9eb2c0a1 1209 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1a4bc045
RW
1210 if (data == NULL)
1211 continue;
1212
1213 ret = (*cb)(nb_node->snode, translator, data, arg);
1214 if (ret != NB_OK)
1215 return ret;
1216 } while (list_entry);
1217
1218 return NB_OK;
1219}
1220
1221static int nb_oper_data_iter_list(const struct nb_node *nb_node,
1222 const char *xpath_list,
1223 const void *parent_list_entry,
1224 const struct yang_list_keys *parent_list_keys,
1225 struct yang_translator *translator,
1226 uint32_t flags, nb_oper_data_cb cb, void *arg)
1227{
1228 struct lys_node_list *slist = (struct lys_node_list *)nb_node->snode;
1229 const void *list_entry = NULL;
99fb518f 1230 uint32_t position = 1;
1a4bc045
RW
1231
1232 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1233 return NB_OK;
1234
1235 /* Iterate over all list entries. */
1236 do {
1237 struct yang_list_keys list_keys;
f999f11e 1238 char xpath[XPATH_MAXLEN * 2];
1a4bc045
RW
1239 int ret;
1240
1241 /* Obtain list entry. */
9eb2c0a1
RW
1242 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1243 list_entry);
1a4bc045
RW
1244 if (!list_entry)
1245 /* End of the list. */
1246 break;
1247
99fb518f
RW
1248 if (!CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST)) {
1249 /* Obtain the list entry keys. */
9eb2c0a1
RW
1250 if (nb_callback_get_keys(nb_node, list_entry,
1251 &list_keys)
99fb518f
RW
1252 != NB_OK) {
1253 flog_warn(EC_LIB_NB_CB_STATE,
1254 "%s: failed to get list keys",
1255 __func__);
1256 return NB_ERR;
1257 }
1258
1259 /* Build XPath of the list entry. */
1260 strlcpy(xpath, xpath_list, sizeof(xpath));
1261 for (unsigned int i = 0; i < list_keys.num; i++) {
1262 snprintf(xpath + strlen(xpath),
1263 sizeof(xpath) - strlen(xpath),
1264 "[%s='%s']", slist->keys[i]->name,
1265 list_keys.key[i]);
1266 }
1267 } else {
1268 /*
1269 * Keyless list - build XPath using a positional index.
1270 */
1271 snprintf(xpath, sizeof(xpath), "%s[%u]", xpath_list,
1272 position);
1273 position++;
1a4bc045
RW
1274 }
1275
1276 /* Iterate over the child nodes. */
1277 ret = nb_oper_data_iter_children(
1278 nb_node->snode, xpath, list_entry, &list_keys,
1279 translator, false, flags, cb, arg);
1280 if (ret != NB_OK)
1281 return ret;
1282 } while (list_entry);
1283
1284 return NB_OK;
1285}
1286
1287static int nb_oper_data_iter_node(const struct lys_node *snode,
1288 const char *xpath_parent,
1289 const void *list_entry,
1290 const struct yang_list_keys *list_keys,
1291 struct yang_translator *translator,
1292 bool first, uint32_t flags,
1293 nb_oper_data_cb cb, void *arg)
1294{
1295 struct nb_node *nb_node;
1296 char xpath[XPATH_MAXLEN];
1297 int ret = NB_OK;
1298
1299 if (!first && CHECK_FLAG(flags, NB_OPER_DATA_ITER_NORECURSE)
1300 && CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST))
1301 return NB_OK;
1302
1303 /* Update XPath. */
1304 strlcpy(xpath, xpath_parent, sizeof(xpath));
6cd301e0
RW
1305 if (!first && snode->nodetype != LYS_USES) {
1306 struct lys_node *parent;
1307
1308 /* Get the real parent. */
1309 parent = snode->parent;
1310 while (parent && parent->nodetype == LYS_USES)
1311 parent = parent->parent;
1312
1313 /*
1314 * When necessary, include the namespace of the augmenting
1315 * module.
1316 */
1317 if (parent && parent->nodetype == LYS_AUGMENT)
1318 snprintf(xpath + strlen(xpath),
1319 sizeof(xpath) - strlen(xpath), "/%s:%s",
1320 snode->module->name, snode->name);
1321 else
1322 snprintf(xpath + strlen(xpath),
1323 sizeof(xpath) - strlen(xpath), "/%s",
1324 snode->name);
1325 }
1a4bc045
RW
1326
1327 nb_node = snode->priv;
1328 switch (snode->nodetype) {
1329 case LYS_CONTAINER:
1330 ret = nb_oper_data_iter_container(nb_node, xpath, list_entry,
1331 list_keys, translator, flags,
1332 cb, arg);
1333 break;
1334 case LYS_LEAF:
1335 ret = nb_oper_data_iter_leaf(nb_node, xpath, list_entry,
1336 list_keys, translator, flags, cb,
1337 arg);
1338 break;
1339 case LYS_LEAFLIST:
1340 ret = nb_oper_data_iter_leaflist(nb_node, xpath, list_entry,
1341 list_keys, translator, flags,
1342 cb, arg);
1343 break;
1344 case LYS_LIST:
1345 ret = nb_oper_data_iter_list(nb_node, xpath, list_entry,
1346 list_keys, translator, flags, cb,
1347 arg);
1348 break;
1349 case LYS_USES:
1350 ret = nb_oper_data_iter_children(snode, xpath, list_entry,
1351 list_keys, translator, false,
1352 flags, cb, arg);
1353 break;
1354 default:
1355 break;
1356 }
1357
1358 return ret;
1359}
1360
1361int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
1362 uint32_t flags, nb_oper_data_cb cb, void *arg)
1363{
1364 struct nb_node *nb_node;
1365 const void *list_entry = NULL;
1366 struct yang_list_keys list_keys;
1367 struct list *list_dnodes;
1368 struct lyd_node *dnode, *dn;
1369 struct listnode *ln;
1370 int ret;
1371
1372 nb_node = nb_node_find(xpath);
1373 if (!nb_node) {
1374 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1375 "%s: unknown data path: %s", __func__, xpath);
1376 return NB_ERR;
1377 }
1378
1379 /* For now this function works only with containers and lists. */
1380 if (!CHECK_FLAG(nb_node->snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
1381 flog_warn(
1382 EC_LIB_NB_OPERATIONAL_DATA,
1383 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1384 __func__, xpath);
1385 return NB_ERR;
1386 }
1387
1388 /*
1389 * Create a data tree from the XPath so that we can parse the keys of
1390 * all YANG lists (if any).
1391 */
1392 ly_errno = 0;
1393 dnode = lyd_new_path(NULL, ly_native_ctx, xpath, NULL, 0,
dfe22738
RW
1394 LYD_PATH_OPT_UPDATE | LYD_PATH_OPT_NOPARENTRET);
1395 if (!dnode) {
1a4bc045
RW
1396 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
1397 __func__);
1398 return NB_ERR;
1399 }
1a4bc045
RW
1400
1401 /*
1402 * Create a linked list to sort the data nodes starting from the root.
1403 */
1404 list_dnodes = list_new();
1405 for (dn = dnode; dn; dn = dn->parent) {
1406 if (dn->schema->nodetype != LYS_LIST || !dn->child)
1407 continue;
1408 listnode_add_head(list_dnodes, dn);
1409 }
1410 /*
1411 * Use the northbound callbacks to find list entry pointer corresponding
1412 * to the given XPath.
1413 */
1414 for (ALL_LIST_ELEMENTS_RO(list_dnodes, ln, dn)) {
1415 struct lyd_node *child;
1416 struct nb_node *nn;
1417 unsigned int n = 0;
1418
1419 /* Obtain the list entry keys. */
1420 memset(&list_keys, 0, sizeof(list_keys));
1421 LY_TREE_FOR (dn->child, child) {
1422 if (!lys_is_key((struct lys_node_leaf *)child->schema,
1423 NULL))
1424 continue;
1425 strlcpy(list_keys.key[n],
1426 yang_dnode_get_string(child, NULL),
1427 sizeof(list_keys.key[n]));
1428 n++;
1429 }
1430 list_keys.num = n;
9f6de299
RW
1431 if (list_keys.num
1432 != ((struct lys_node_list *)dn->schema)->keys_size) {
1433 list_delete(&list_dnodes);
1434 yang_dnode_free(dnode);
1435 return NB_ERR_NOT_FOUND;
1436 }
1a4bc045
RW
1437
1438 /* Find the list entry pointer. */
1439 nn = dn->schema->priv;
9eb2c0a1
RW
1440 list_entry =
1441 nb_callback_lookup_entry(nn, list_entry, &list_keys);
1a4bc045
RW
1442 if (list_entry == NULL) {
1443 list_delete(&list_dnodes);
1444 yang_dnode_free(dnode);
1445 return NB_ERR_NOT_FOUND;
1446 }
1447 }
1448
1449 /* If a list entry was given, iterate over that list entry only. */
1450 if (dnode->schema->nodetype == LYS_LIST && dnode->child)
1451 ret = nb_oper_data_iter_children(
1452 nb_node->snode, xpath, list_entry, &list_keys,
1453 translator, true, flags, cb, arg);
1454 else
1455 ret = nb_oper_data_iter_node(nb_node->snode, xpath, list_entry,
1456 &list_keys, translator, true,
1457 flags, cb, arg);
1458
1459 list_delete(&list_dnodes);
1460 yang_dnode_free(dnode);
1461
1462 return ret;
1463}
1464
1c2facd1
RW
1465bool nb_operation_is_valid(enum nb_operation operation,
1466 const struct lys_node *snode)
1467{
544ca69a 1468 struct nb_node *nb_node = snode->priv;
1c2facd1
RW
1469 struct lys_node_container *scontainer;
1470 struct lys_node_leaf *sleaf;
1471
1472 switch (operation) {
1473 case NB_OP_CREATE:
db452508 1474 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
1475 return false;
1476
1477 switch (snode->nodetype) {
1478 case LYS_LEAF:
1479 sleaf = (struct lys_node_leaf *)snode;
1480 if (sleaf->type.base != LY_TYPE_EMPTY)
1481 return false;
1482 break;
1483 case LYS_CONTAINER:
1484 scontainer = (struct lys_node_container *)snode;
1485 if (!scontainer->presence)
1486 return false;
1487 break;
1488 case LYS_LIST:
1489 case LYS_LEAFLIST:
1490 break;
1491 default:
1492 return false;
1493 }
1494 return true;
1495 case NB_OP_MODIFY:
db452508 1496 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
1497 return false;
1498
1499 switch (snode->nodetype) {
1500 case LYS_LEAF:
1501 sleaf = (struct lys_node_leaf *)snode;
1502 if (sleaf->type.base == LY_TYPE_EMPTY)
1503 return false;
1504
1505 /* List keys can't be modified. */
1506 if (lys_is_key(sleaf, NULL))
1507 return false;
1508 break;
1509 default:
1510 return false;
1511 }
1512 return true;
95ce849b 1513 case NB_OP_DESTROY:
db452508 1514 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
1515 return false;
1516
1517 switch (snode->nodetype) {
1518 case LYS_LEAF:
1519 sleaf = (struct lys_node_leaf *)snode;
1520
1521 /* List keys can't be deleted. */
1522 if (lys_is_key(sleaf, NULL))
1523 return false;
1524
1525 /*
1526 * Only optional leafs can be deleted, or leafs whose
1527 * parent is a case statement.
1528 */
1529 if (snode->parent->nodetype == LYS_CASE)
1530 return true;
1531 if (sleaf->when)
1532 return true;
db452508
RW
1533 if (CHECK_FLAG(sleaf->flags, LYS_MAND_TRUE)
1534 || sleaf->dflt)
1c2facd1
RW
1535 return false;
1536 break;
1537 case LYS_CONTAINER:
1538 scontainer = (struct lys_node_container *)snode;
1539 if (!scontainer->presence)
1540 return false;
1541 break;
1542 case LYS_LIST:
1543 case LYS_LEAFLIST:
1544 break;
1545 default:
1546 return false;
1547 }
1548 return true;
1549 case NB_OP_MOVE:
db452508 1550 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
1551 return false;
1552
1553 switch (snode->nodetype) {
1554 case LYS_LIST:
1555 case LYS_LEAFLIST:
db452508 1556 if (!CHECK_FLAG(snode->flags, LYS_USERORDERED))
1c2facd1
RW
1557 return false;
1558 break;
1559 default:
1560 return false;
1561 }
1562 return true;
34224f0c 1563 case NB_OP_PRE_VALIDATE:
1c2facd1 1564 case NB_OP_APPLY_FINISH:
db452508 1565 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
1566 return false;
1567 return true;
1568 case NB_OP_GET_ELEM:
db452508 1569 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_R))
1c2facd1
RW
1570 return false;
1571
1572 switch (snode->nodetype) {
1573 case LYS_LEAF:
1a4bc045 1574 case LYS_LEAFLIST:
1c2facd1
RW
1575 break;
1576 case LYS_CONTAINER:
1577 scontainer = (struct lys_node_container *)snode;
1578 if (!scontainer->presence)
1579 return false;
1580 break;
1581 default:
1582 return false;
1583 }
1584 return true;
1585 case NB_OP_GET_NEXT:
1a4bc045
RW
1586 switch (snode->nodetype) {
1587 case LYS_LIST:
1588 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1589 return false;
1590 break;
1591 case LYS_LEAFLIST:
1592 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1593 return false;
1594 break;
1595 default:
1596 return false;
1597 }
1598 return true;
1c2facd1
RW
1599 case NB_OP_GET_KEYS:
1600 case NB_OP_LOOKUP_ENTRY:
1c2facd1
RW
1601 switch (snode->nodetype) {
1602 case LYS_LIST:
544ca69a
RW
1603 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1604 return false;
99fb518f
RW
1605 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST))
1606 return false;
1c2facd1
RW
1607 break;
1608 default:
1609 return false;
1610 }
1611 return true;
1612 case NB_OP_RPC:
db452508 1613 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W | LYS_CONFIG_R))
1c2facd1
RW
1614 return false;
1615
1616 switch (snode->nodetype) {
1617 case LYS_RPC:
1618 case LYS_ACTION:
1619 break;
1620 default:
1621 return false;
1622 }
1623 return true;
1624 default:
1625 return false;
1626 }
1627}
1628
1629DEFINE_HOOK(nb_notification_send, (const char *xpath, struct list *arguments),
1630 (xpath, arguments));
1631
1632int nb_notification_send(const char *xpath, struct list *arguments)
1633{
1634 int ret;
1635
9eb2c0a1
RW
1636 DEBUGD(&nb_dbg_notif, "northbound notification: %s", xpath);
1637
1c2facd1
RW
1638 ret = hook_call(nb_notification_send, xpath, arguments);
1639 if (arguments)
1640 list_delete(&arguments);
1641
1642 return ret;
1643}
1644
ccd43ada
RW
1645/* Running configuration user pointers management. */
1646struct nb_config_entry {
1647 char xpath[XPATH_MAXLEN];
1648 void *entry;
1649};
1650
1651static bool running_config_entry_cmp(const void *value1, const void *value2)
1652{
1653 const struct nb_config_entry *c1 = value1;
1654 const struct nb_config_entry *c2 = value2;
1655
1656 return strmatch(c1->xpath, c2->xpath);
1657}
1658
d8b87afe 1659static unsigned int running_config_entry_key_make(const void *value)
ccd43ada
RW
1660{
1661 return string_hash_make(value);
1662}
1663
1664static void *running_config_entry_alloc(void *p)
1665{
1666 struct nb_config_entry *new, *key = p;
1667
1668 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY, sizeof(*new));
1669 strlcpy(new->xpath, key->xpath, sizeof(new->xpath));
1670
1671 return new;
1672}
1673
1674static void running_config_entry_free(void *arg)
1675{
1676 XFREE(MTYPE_NB_CONFIG_ENTRY, arg);
1677}
1678
1679void nb_running_set_entry(const struct lyd_node *dnode, void *entry)
1680{
1681 struct nb_config_entry *config, s;
1682
1683 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1684 config = hash_get(running_config_entries, &s,
1685 running_config_entry_alloc);
1686 config->entry = entry;
1687}
1688
1689static void *nb_running_unset_entry_helper(const struct lyd_node *dnode)
1690{
1691 struct nb_config_entry *config, s;
1692 struct lyd_node *child;
1693 void *entry = NULL;
1694
1695 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1696 config = hash_release(running_config_entries, &s);
1697 if (config) {
1698 entry = config->entry;
1699 running_config_entry_free(config);
1700 }
1701
1702 /* Unset user pointers from the child nodes. */
1703 if (CHECK_FLAG(dnode->schema->nodetype, LYS_LIST | LYS_CONTAINER)) {
1704 LY_TREE_FOR (dnode->child, child) {
1705 (void)nb_running_unset_entry_helper(child);
1706 }
1707 }
1708
1709 return entry;
1710}
1711
1712void *nb_running_unset_entry(const struct lyd_node *dnode)
1713{
1714 void *entry;
1715
1716 entry = nb_running_unset_entry_helper(dnode);
1717 assert(entry);
1718
1719 return entry;
1720}
1721
1722void *nb_running_get_entry(const struct lyd_node *dnode, const char *xpath,
1723 bool abort_if_not_found)
1724{
1725 const struct lyd_node *orig_dnode = dnode;
1726 char xpath_buf[XPATH_MAXLEN];
1727
1728 assert(dnode || xpath);
1729
1730 if (!dnode)
1731 dnode = yang_dnode_get(running_config->dnode, xpath);
1732
1733 while (dnode) {
1734 struct nb_config_entry *config, s;
1735
1736 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1737 config = hash_lookup(running_config_entries, &s);
1738 if (config)
1739 return config->entry;
1740
1741 dnode = dnode->parent;
1742 }
1743
1744 if (!abort_if_not_found)
1745 return NULL;
1746
1747 yang_dnode_get_path(orig_dnode, xpath_buf, sizeof(xpath_buf));
1748 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
1749 "%s: failed to find entry [xpath %s]", __func__, xpath_buf);
1750 zlog_backtrace(LOG_ERR);
1751 abort();
1752}
1753
1754/* Logging functions. */
1c2facd1
RW
1755const char *nb_event_name(enum nb_event event)
1756{
1757 switch (event) {
1758 case NB_EV_VALIDATE:
1759 return "validate";
1760 case NB_EV_PREPARE:
1761 return "prepare";
1762 case NB_EV_ABORT:
1763 return "abort";
1764 case NB_EV_APPLY:
1765 return "apply";
1766 default:
1767 return "unknown";
1768 }
1769}
1770
1771const char *nb_operation_name(enum nb_operation operation)
1772{
1773 switch (operation) {
1774 case NB_OP_CREATE:
1775 return "create";
1776 case NB_OP_MODIFY:
1777 return "modify";
95ce849b
MS
1778 case NB_OP_DESTROY:
1779 return "destroy";
1c2facd1
RW
1780 case NB_OP_MOVE:
1781 return "move";
34224f0c
RW
1782 case NB_OP_PRE_VALIDATE:
1783 return "pre_validate";
1c2facd1
RW
1784 case NB_OP_APPLY_FINISH:
1785 return "apply_finish";
1786 case NB_OP_GET_ELEM:
1787 return "get_elem";
1788 case NB_OP_GET_NEXT:
1789 return "get_next";
1790 case NB_OP_GET_KEYS:
1791 return "get_keys";
1792 case NB_OP_LOOKUP_ENTRY:
1793 return "lookup_entry";
1794 case NB_OP_RPC:
1795 return "rpc";
1796 default:
1797 return "unknown";
1798 }
1799}
1800
1801const char *nb_err_name(enum nb_error error)
1802{
1803 switch (error) {
1804 case NB_OK:
1805 return "ok";
1806 case NB_ERR:
1807 return "generic error";
1808 case NB_ERR_NO_CHANGES:
1809 return "no changes";
1810 case NB_ERR_NOT_FOUND:
1811 return "element not found";
1812 case NB_ERR_LOCKED:
1813 return "resource is locked";
1814 case NB_ERR_VALIDATION:
1815 return "validation error";
1816 case NB_ERR_RESOURCE:
1817 return "failed to allocate resource";
1818 case NB_ERR_INCONSISTENCY:
1819 return "internal inconsistency";
1820 default:
1821 return "unknown";
1822 }
1823}
1824
1825const char *nb_client_name(enum nb_client client)
1826{
1827 switch (client) {
1828 case NB_CLIENT_CLI:
1829 return "CLI";
5bce33b3
RW
1830 case NB_CLIENT_CONFD:
1831 return "ConfD";
a7ca2199
RW
1832 case NB_CLIENT_SYSREPO:
1833 return "Sysrepo";
ec2ac5f2
RW
1834 case NB_CLIENT_GRPC:
1835 return "gRPC";
1c2facd1
RW
1836 default:
1837 return "unknown";
1838 }
1839}
1840
1841static void nb_load_callbacks(const struct frr_yang_module_info *module)
1842{
1843 for (size_t i = 0; module->nodes[i].xpath; i++) {
1844 struct nb_node *nb_node;
1845 uint32_t priority;
1846
1847 nb_node = nb_node_find(module->nodes[i].xpath);
1848 if (!nb_node) {
1849 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1850 "%s: unknown data path: %s", __func__,
1851 module->nodes[i].xpath);
1852 continue;
1853 }
1854
1855 nb_node->cbs = module->nodes[i].cbs;
1856 priority = module->nodes[i].priority;
1857 if (priority != 0)
1858 nb_node->priority = priority;
1859 }
1860}
1861
fbdc1c0a
RW
1862void nb_init(struct thread_master *tm,
1863 const struct frr_yang_module_info *modules[], size_t nmodules)
1c2facd1
RW
1864{
1865 unsigned int errors = 0;
1866
1867 /* Load YANG modules. */
1868 for (size_t i = 0; i < nmodules; i++)
1869 yang_module_load(modules[i]->name);
1870
1871 /* Create a nb_node for all YANG schema nodes. */
544ca69a 1872 nb_nodes_create();
1c2facd1
RW
1873
1874 /* Load northbound callbacks. */
1875 for (size_t i = 0; i < nmodules; i++)
1876 nb_load_callbacks(modules[i]);
1877
1878 /* Validate northbound callbacks. */
e0ccfad2 1879 yang_snodes_iterate_all(nb_node_validate, 0, &errors);
1c2facd1
RW
1880 if (errors > 0) {
1881 flog_err(
1882 EC_LIB_NB_CBS_VALIDATION,
1883 "%s: failed to validate northbound callbacks: %u error(s)",
1884 __func__, errors);
1885 exit(1);
1886 }
1887
1c2facd1
RW
1888 /* Create an empty running configuration. */
1889 running_config = nb_config_new(NULL);
ccd43ada
RW
1890 running_config_entries = hash_create(running_config_entry_key_make,
1891 running_config_entry_cmp,
1892 "Running Configuration Entries");
364ad673 1893 pthread_mutex_init(&running_config_mgmt_lock.mtx, NULL);
1c2facd1
RW
1894
1895 /* Initialize the northbound CLI. */
fbdc1c0a 1896 nb_cli_init(tm);
1c2facd1
RW
1897}
1898
1899void nb_terminate(void)
1900{
1901 /* Terminate the northbound CLI. */
1902 nb_cli_terminate();
1903
1904 /* Delete all nb_node's from all YANG modules. */
544ca69a 1905 nb_nodes_delete();
1c2facd1
RW
1906
1907 /* Delete the running configuration. */
ccd43ada
RW
1908 hash_clean(running_config_entries, running_config_entry_free);
1909 hash_free(running_config_entries);
1c2facd1 1910 nb_config_free(running_config);
364ad673 1911 pthread_mutex_destroy(&running_config_mgmt_lock.mtx);
1c2facd1 1912}