]> git.proxmox.com Git - mirror_frr.git/blob - lib/northbound.c
Revert "lib: introduce a read-write lock for northbound configurations"
[mirror_frr.git] / lib / northbound.c
1 /*
2 * Copyright (C) 2018 NetDEF, Inc.
3 * Renato Westphal
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #include <zebra.h>
21
22 #include "libfrr.h"
23 #include "log.h"
24 #include "lib_errors.h"
25 #include "hash.h"
26 #include "command.h"
27 #include "debug.h"
28 #include "db.h"
29 #include "frr_pthread.h"
30 #include "northbound.h"
31 #include "northbound_cli.h"
32 #include "northbound_db.h"
33
34 DEFINE_MTYPE_STATIC(LIB, NB_NODE, "Northbound Node")
35 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG, "Northbound Configuration")
36 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG_ENTRY, "Northbound Configuration Entry")
37
38 /* Running configuration - shouldn't be modified directly. */
39 struct nb_config *running_config;
40
41 /* Hash table of user pointers associated with configuration entries. */
42 static struct hash *running_config_entries;
43
44 /* Management lock for the running configuration. */
45 static struct {
46 /* Mutex protecting this structure. */
47 pthread_mutex_t mtx;
48
49 /* Actual lock. */
50 bool locked;
51
52 /* Northbound client who owns this lock. */
53 enum nb_client owner_client;
54
55 /* Northbound user who owns this lock. */
56 const void *owner_user;
57 } running_config_mgmt_lock;
58
59 /*
60 * Global lock used to prevent multiple configuration transactions from
61 * happening concurrently.
62 */
63 static bool transaction_in_progress;
64
65 static int nb_callback_configuration(const enum nb_event event,
66 struct nb_config_change *change);
67 static void nb_log_callback(const enum nb_event event,
68 enum nb_operation operation, const char *xpath,
69 const char *value);
70 static struct nb_transaction *nb_transaction_new(struct nb_config *config,
71 struct nb_config_cbs *changes,
72 enum nb_client client,
73 const void *user,
74 const char *comment);
75 static void nb_transaction_free(struct nb_transaction *transaction);
76 static int nb_transaction_process(enum nb_event event,
77 struct nb_transaction *transaction);
78 static void nb_transaction_apply_finish(struct nb_transaction *transaction);
79 static int nb_oper_data_iter_node(const struct lys_node *snode,
80 const char *xpath, const void *list_entry,
81 const struct yang_list_keys *list_keys,
82 struct yang_translator *translator,
83 bool first, uint32_t flags,
84 nb_oper_data_cb cb, void *arg);
85
86 static int nb_node_check_config_only(const struct lys_node *snode, void *arg)
87 {
88 bool *config_only = arg;
89
90 if (CHECK_FLAG(snode->flags, LYS_CONFIG_R)) {
91 *config_only = false;
92 return YANG_ITER_STOP;
93 }
94
95 return YANG_ITER_CONTINUE;
96 }
97
98 static int nb_node_new_cb(const struct lys_node *snode, void *arg)
99 {
100 struct nb_node *nb_node;
101 struct lys_node *sparent, *sparent_list;
102
103 nb_node = XCALLOC(MTYPE_NB_NODE, sizeof(*nb_node));
104 yang_snode_get_path(snode, YANG_PATH_DATA, nb_node->xpath,
105 sizeof(nb_node->xpath));
106 nb_node->priority = NB_DFLT_PRIORITY;
107 sparent = yang_snode_real_parent(snode);
108 if (sparent)
109 nb_node->parent = sparent->priv;
110 sparent_list = yang_snode_parent_list(snode);
111 if (sparent_list)
112 nb_node->parent_list = sparent_list->priv;
113
114 /* Set flags. */
115 if (CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
116 bool config_only = true;
117
118 yang_snodes_iterate_subtree(snode, nb_node_check_config_only,
119 YANG_ITER_ALLOW_AUGMENTATIONS,
120 &config_only);
121 if (config_only)
122 SET_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY);
123 }
124 if (CHECK_FLAG(snode->nodetype, LYS_LIST)) {
125 struct lys_node_list *slist;
126
127 slist = (struct lys_node_list *)snode;
128 if (slist->keys_size == 0)
129 SET_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST);
130 }
131
132 /*
133 * Link the northbound node and the libyang schema node with one
134 * another.
135 */
136 nb_node->snode = snode;
137 lys_set_private(snode, nb_node);
138
139 return YANG_ITER_CONTINUE;
140 }
141
142 static int nb_node_del_cb(const struct lys_node *snode, void *arg)
143 {
144 struct nb_node *nb_node;
145
146 nb_node = snode->priv;
147 lys_set_private(snode, NULL);
148 XFREE(MTYPE_NB_NODE, nb_node);
149
150 return YANG_ITER_CONTINUE;
151 }
152
153 void nb_nodes_create(void)
154 {
155 yang_snodes_iterate_all(nb_node_new_cb, 0, NULL);
156 }
157
158 void nb_nodes_delete(void)
159 {
160 yang_snodes_iterate_all(nb_node_del_cb, 0, NULL);
161 }
162
163 struct nb_node *nb_node_find(const char *xpath)
164 {
165 const struct lys_node *snode;
166
167 /*
168 * Use libyang to find the schema node associated to the xpath and get
169 * the northbound node from there (snode private pointer).
170 */
171 snode = ly_ctx_get_node(ly_native_ctx, NULL, xpath, 0);
172 if (!snode)
173 return NULL;
174
175 return snode->priv;
176 }
177
178 static int nb_node_validate_cb(const struct nb_node *nb_node,
179 enum nb_operation operation,
180 int callback_implemented, bool optional)
181 {
182 bool valid;
183
184 valid = nb_operation_is_valid(operation, nb_node->snode);
185
186 if (!valid && callback_implemented)
187 flog_warn(EC_LIB_NB_CB_UNNEEDED,
188 "unneeded '%s' callback for '%s'",
189 nb_operation_name(operation), nb_node->xpath);
190
191 if (!optional && valid && !callback_implemented) {
192 flog_err(EC_LIB_NB_CB_MISSING, "missing '%s' callback for '%s'",
193 nb_operation_name(operation), nb_node->xpath);
194 return 1;
195 }
196
197 return 0;
198 }
199
200 /*
201 * Check if the required callbacks were implemented for the given northbound
202 * node.
203 */
204 static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
205
206 {
207 unsigned int error = 0;
208
209 error += nb_node_validate_cb(nb_node, NB_OP_CREATE,
210 !!nb_node->cbs.create, false);
211 error += nb_node_validate_cb(nb_node, NB_OP_MODIFY,
212 !!nb_node->cbs.modify, false);
213 error += nb_node_validate_cb(nb_node, NB_OP_DESTROY,
214 !!nb_node->cbs.destroy, false);
215 error += nb_node_validate_cb(nb_node, NB_OP_MOVE, !!nb_node->cbs.move,
216 false);
217 error += nb_node_validate_cb(nb_node, NB_OP_PRE_VALIDATE,
218 !!nb_node->cbs.pre_validate, true);
219 error += nb_node_validate_cb(nb_node, NB_OP_APPLY_FINISH,
220 !!nb_node->cbs.apply_finish, true);
221 error += nb_node_validate_cb(nb_node, NB_OP_GET_ELEM,
222 !!nb_node->cbs.get_elem, false);
223 error += nb_node_validate_cb(nb_node, NB_OP_GET_NEXT,
224 !!nb_node->cbs.get_next, false);
225 error += nb_node_validate_cb(nb_node, NB_OP_GET_KEYS,
226 !!nb_node->cbs.get_keys, false);
227 error += nb_node_validate_cb(nb_node, NB_OP_LOOKUP_ENTRY,
228 !!nb_node->cbs.lookup_entry, false);
229 error += nb_node_validate_cb(nb_node, NB_OP_RPC, !!nb_node->cbs.rpc,
230 false);
231
232 return error;
233 }
234
235 static unsigned int nb_node_validate_priority(const struct nb_node *nb_node)
236 {
237 /* Top-level nodes can have any priority. */
238 if (!nb_node->parent)
239 return 0;
240
241 if (nb_node->priority < nb_node->parent->priority) {
242 flog_err(EC_LIB_NB_CB_INVALID_PRIO,
243 "node has higher priority than its parent [xpath %s]",
244 nb_node->xpath);
245 return 1;
246 }
247
248 return 0;
249 }
250
251 static int nb_node_validate(const struct lys_node *snode, void *arg)
252 {
253 struct nb_node *nb_node = snode->priv;
254 unsigned int *errors = arg;
255
256 /* Validate callbacks and priority. */
257 *errors += nb_node_validate_cbs(nb_node);
258 *errors += nb_node_validate_priority(nb_node);
259
260 return YANG_ITER_CONTINUE;
261 }
262
263 struct nb_config *nb_config_new(struct lyd_node *dnode)
264 {
265 struct nb_config *config;
266
267 config = XCALLOC(MTYPE_NB_CONFIG, sizeof(*config));
268 if (dnode)
269 config->dnode = dnode;
270 else
271 config->dnode = yang_dnode_new(ly_native_ctx, true);
272 config->version = 0;
273
274 return config;
275 }
276
277 void nb_config_free(struct nb_config *config)
278 {
279 if (config->dnode)
280 yang_dnode_free(config->dnode);
281 XFREE(MTYPE_NB_CONFIG, config);
282 }
283
284 struct nb_config *nb_config_dup(const struct nb_config *config)
285 {
286 struct nb_config *dup;
287
288 dup = XCALLOC(MTYPE_NB_CONFIG, sizeof(*dup));
289 dup->dnode = yang_dnode_dup(config->dnode);
290 dup->version = config->version;
291
292 return dup;
293 }
294
295 int nb_config_merge(struct nb_config *config_dst, struct nb_config *config_src,
296 bool preserve_source)
297 {
298 int ret;
299
300 ret = lyd_merge(config_dst->dnode, config_src->dnode, LYD_OPT_EXPLICIT);
301 if (ret != 0)
302 flog_warn(EC_LIB_LIBYANG, "%s: lyd_merge() failed", __func__);
303
304 if (!preserve_source)
305 nb_config_free(config_src);
306
307 return (ret == 0) ? NB_OK : NB_ERR;
308 }
309
310 void nb_config_replace(struct nb_config *config_dst,
311 struct nb_config *config_src, bool preserve_source)
312 {
313 /* Update version. */
314 if (config_src->version != 0)
315 config_dst->version = config_src->version;
316
317 /* Update dnode. */
318 if (config_dst->dnode)
319 yang_dnode_free(config_dst->dnode);
320 if (preserve_source) {
321 config_dst->dnode = yang_dnode_dup(config_src->dnode);
322 } else {
323 config_dst->dnode = config_src->dnode;
324 config_src->dnode = NULL;
325 nb_config_free(config_src);
326 }
327 }
328
329 /* Generate the nb_config_cbs tree. */
330 static inline int nb_config_cb_compare(const struct nb_config_cb *a,
331 const struct nb_config_cb *b)
332 {
333 /* Sort by priority first. */
334 if (a->nb_node->priority < b->nb_node->priority)
335 return -1;
336 if (a->nb_node->priority > b->nb_node->priority)
337 return 1;
338
339 /*
340 * Use XPath as a tie-breaker. This will naturally sort parent nodes
341 * before their children.
342 */
343 return strcmp(a->xpath, b->xpath);
344 }
345 RB_GENERATE(nb_config_cbs, nb_config_cb, entry, nb_config_cb_compare);
346
347 static void nb_config_diff_add_change(struct nb_config_cbs *changes,
348 enum nb_operation operation,
349 const struct lyd_node *dnode)
350 {
351 struct nb_config_change *change;
352
353 change = XCALLOC(MTYPE_TMP, sizeof(*change));
354 change->cb.operation = operation;
355 change->cb.nb_node = dnode->schema->priv;
356 yang_dnode_get_path(dnode, change->cb.xpath, sizeof(change->cb.xpath));
357 change->cb.dnode = dnode;
358
359 RB_INSERT(nb_config_cbs, changes, &change->cb);
360 }
361
362 static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
363 {
364 while (!RB_EMPTY(nb_config_cbs, changes)) {
365 struct nb_config_change *change;
366
367 change = (struct nb_config_change *)RB_ROOT(nb_config_cbs,
368 changes);
369 RB_REMOVE(nb_config_cbs, changes, &change->cb);
370 XFREE(MTYPE_TMP, change);
371 }
372 }
373
374 /*
375 * Helper function used when calculating the delta between two different
376 * configurations. Given a new subtree, calculate all new YANG data nodes,
377 * excluding default leafs and leaf-lists. This is a recursive function.
378 */
379 static void nb_config_diff_created(const struct lyd_node *dnode,
380 struct nb_config_cbs *changes)
381 {
382 enum nb_operation operation;
383 struct lyd_node *child;
384
385 switch (dnode->schema->nodetype) {
386 case LYS_LEAF:
387 case LYS_LEAFLIST:
388 if (lyd_wd_default((struct lyd_node_leaf_list *)dnode))
389 break;
390
391 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
392 operation = NB_OP_CREATE;
393 else if (nb_operation_is_valid(NB_OP_MODIFY, dnode->schema))
394 operation = NB_OP_MODIFY;
395 else
396 return;
397
398 nb_config_diff_add_change(changes, operation, dnode);
399 break;
400 case LYS_CONTAINER:
401 case LYS_LIST:
402 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
403 nb_config_diff_add_change(changes, NB_OP_CREATE, dnode);
404
405 /* Process child nodes recursively. */
406 LY_TREE_FOR (dnode->child, child) {
407 nb_config_diff_created(child, changes);
408 }
409 break;
410 default:
411 break;
412 }
413 }
414
415 static void nb_config_diff_deleted(const struct lyd_node *dnode,
416 struct nb_config_cbs *changes)
417 {
418 if (nb_operation_is_valid(NB_OP_DESTROY, dnode->schema))
419 nb_config_diff_add_change(changes, NB_OP_DESTROY, dnode);
420 else if (CHECK_FLAG(dnode->schema->nodetype, LYS_CONTAINER)) {
421 struct lyd_node *child;
422
423 /*
424 * Non-presence containers need special handling since they
425 * don't have "destroy" callbacks. In this case, what we need to
426 * do is to call the "destroy" callbacks of their child nodes
427 * when applicable (i.e. optional nodes).
428 */
429 LY_TREE_FOR (dnode->child, child) {
430 nb_config_diff_deleted(child, changes);
431 }
432 }
433 }
434
435 /* Calculate the delta between two different configurations. */
436 static void nb_config_diff(const struct nb_config *config1,
437 const struct nb_config *config2,
438 struct nb_config_cbs *changes)
439 {
440 struct lyd_difflist *diff;
441
442 diff = lyd_diff(config1->dnode, config2->dnode,
443 LYD_DIFFOPT_WITHDEFAULTS);
444 assert(diff);
445
446 for (int i = 0; diff->type[i] != LYD_DIFF_END; i++) {
447 LYD_DIFFTYPE type;
448 struct lyd_node *dnode;
449
450 type = diff->type[i];
451
452 switch (type) {
453 case LYD_DIFF_CREATED:
454 dnode = diff->second[i];
455 nb_config_diff_created(dnode, changes);
456 break;
457 case LYD_DIFF_DELETED:
458 dnode = diff->first[i];
459 nb_config_diff_deleted(dnode, changes);
460 break;
461 case LYD_DIFF_CHANGED:
462 dnode = diff->second[i];
463 nb_config_diff_add_change(changes, NB_OP_MODIFY, dnode);
464 break;
465 case LYD_DIFF_MOVEDAFTER1:
466 case LYD_DIFF_MOVEDAFTER2:
467 default:
468 continue;
469 }
470 }
471
472 lyd_free_diff(diff);
473 }
474
475 int nb_candidate_edit(struct nb_config *candidate,
476 const struct nb_node *nb_node,
477 enum nb_operation operation, const char *xpath,
478 const struct yang_data *previous,
479 const struct yang_data *data)
480 {
481 struct lyd_node *dnode;
482 char xpath_edit[XPATH_MAXLEN];
483
484 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
485 if (nb_node->snode->nodetype == LYS_LEAFLIST)
486 snprintf(xpath_edit, sizeof(xpath_edit), "%s[.='%s']", xpath,
487 data->value);
488 else
489 strlcpy(xpath_edit, xpath, sizeof(xpath_edit));
490
491 switch (operation) {
492 case NB_OP_CREATE:
493 case NB_OP_MODIFY:
494 ly_errno = 0;
495 dnode = lyd_new_path(candidate->dnode, ly_native_ctx,
496 xpath_edit, (void *)data->value, 0,
497 LYD_PATH_OPT_UPDATE);
498 if (!dnode && ly_errno) {
499 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
500 __func__);
501 return NB_ERR;
502 }
503
504 /*
505 * If a new node was created, call lyd_validate() only to create
506 * default child nodes.
507 */
508 if (dnode) {
509 lyd_schema_sort(dnode, 0);
510 lyd_validate(&dnode,
511 LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
512 ly_native_ctx);
513 }
514 break;
515 case NB_OP_DESTROY:
516 dnode = yang_dnode_get(candidate->dnode, xpath_edit);
517 if (!dnode)
518 /*
519 * Return a special error code so the caller can choose
520 * whether to ignore it or not.
521 */
522 return NB_ERR_NOT_FOUND;
523 lyd_free(dnode);
524 break;
525 case NB_OP_MOVE:
526 /* TODO: update configuration. */
527 break;
528 default:
529 flog_warn(EC_LIB_DEVELOPMENT,
530 "%s: unknown operation (%u) [xpath %s]", __func__,
531 operation, xpath_edit);
532 return NB_ERR;
533 }
534
535 return NB_OK;
536 }
537
538 bool nb_candidate_needs_update(const struct nb_config *candidate)
539 {
540 if (candidate->version < running_config->version)
541 return true;
542
543 return false;
544 }
545
546 int nb_candidate_update(struct nb_config *candidate)
547 {
548 struct nb_config *updated_config;
549
550 updated_config = nb_config_dup(running_config);
551 if (nb_config_merge(updated_config, candidate, true) != NB_OK)
552 return NB_ERR;
553
554 nb_config_replace(candidate, updated_config, false);
555
556 return NB_OK;
557 }
558
559 /*
560 * Perform YANG syntactic and semantic validation.
561 *
562 * WARNING: lyd_validate() can change the configuration as part of the
563 * validation process.
564 */
565 static int nb_candidate_validate_yang(struct nb_config *candidate)
566 {
567 if (lyd_validate(&candidate->dnode,
568 LYD_OPT_STRICT | LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
569 ly_native_ctx)
570 != 0)
571 return NB_ERR_VALIDATION;
572
573 return NB_OK;
574 }
575
576 /* Perform code-level validation using the northbound callbacks. */
577 static int nb_candidate_validate_code(struct nb_config *candidate,
578 struct nb_config_cbs *changes)
579 {
580 struct nb_config_cb *cb;
581 struct lyd_node *root, *next, *child;
582 int ret;
583
584 /* First validate the candidate as a whole. */
585 LY_TREE_FOR (candidate->dnode, root) {
586 LY_TREE_DFS_BEGIN (root, next, child) {
587 struct nb_node *nb_node;
588
589 nb_node = child->schema->priv;
590 if (!nb_node->cbs.pre_validate)
591 goto next;
592
593 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config,
594 DEBUG_MODE_ALL)) {
595 char xpath[XPATH_MAXLEN];
596
597 yang_dnode_get_path(child, xpath,
598 sizeof(xpath));
599 nb_log_callback(NB_EV_VALIDATE,
600 NB_OP_PRE_VALIDATE, xpath,
601 NULL);
602 }
603
604 ret = (*nb_node->cbs.pre_validate)(child);
605 if (ret != NB_OK)
606 return NB_ERR_VALIDATION;
607
608 next:
609 LY_TREE_DFS_END(root, next, child);
610 }
611 }
612
613 /* Now validate the configuration changes. */
614 RB_FOREACH (cb, nb_config_cbs, changes) {
615 struct nb_config_change *change = (struct nb_config_change *)cb;
616
617 ret = nb_callback_configuration(NB_EV_VALIDATE, change);
618 if (ret != NB_OK)
619 return NB_ERR_VALIDATION;
620 }
621
622 return NB_OK;
623 }
624
625 int nb_candidate_validate(struct nb_config *candidate)
626 {
627 struct nb_config_cbs changes;
628 int ret;
629
630 if (nb_candidate_validate_yang(candidate) != NB_OK)
631 return NB_ERR_VALIDATION;
632
633 RB_INIT(nb_config_cbs, &changes);
634 nb_config_diff(running_config, candidate, &changes);
635 ret = nb_candidate_validate_code(candidate, &changes);
636 nb_config_diff_del_changes(&changes);
637
638 return ret;
639 }
640
641 int nb_candidate_commit_prepare(struct nb_config *candidate,
642 enum nb_client client, const void *user,
643 const char *comment,
644 struct nb_transaction **transaction)
645 {
646 struct nb_config_cbs changes;
647
648 if (nb_candidate_validate_yang(candidate) != NB_OK) {
649 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
650 "%s: failed to validate candidate configuration",
651 __func__);
652 return NB_ERR_VALIDATION;
653 }
654
655 RB_INIT(nb_config_cbs, &changes);
656 nb_config_diff(running_config, candidate, &changes);
657 if (RB_EMPTY(nb_config_cbs, &changes))
658 return NB_ERR_NO_CHANGES;
659
660 if (nb_candidate_validate_code(candidate, &changes) != NB_OK) {
661 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
662 "%s: failed to validate candidate configuration",
663 __func__);
664 nb_config_diff_del_changes(&changes);
665 return NB_ERR_VALIDATION;
666 }
667
668 *transaction =
669 nb_transaction_new(candidate, &changes, client, user, comment);
670 if (*transaction == NULL) {
671 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
672 "%s: failed to create transaction", __func__);
673 nb_config_diff_del_changes(&changes);
674 return NB_ERR_LOCKED;
675 }
676
677 return nb_transaction_process(NB_EV_PREPARE, *transaction);
678 }
679
680 void nb_candidate_commit_abort(struct nb_transaction *transaction)
681 {
682 (void)nb_transaction_process(NB_EV_ABORT, transaction);
683 nb_transaction_free(transaction);
684 }
685
686 void nb_candidate_commit_apply(struct nb_transaction *transaction,
687 bool save_transaction, uint32_t *transaction_id)
688 {
689 (void)nb_transaction_process(NB_EV_APPLY, transaction);
690 nb_transaction_apply_finish(transaction);
691
692 /* Replace running by candidate. */
693 transaction->config->version++;
694 nb_config_replace(running_config, transaction->config, true);
695
696 /* Record transaction. */
697 if (save_transaction
698 && nb_db_transaction_save(transaction, transaction_id) != NB_OK)
699 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED,
700 "%s: failed to record transaction", __func__);
701
702 nb_transaction_free(transaction);
703 }
704
705 int nb_candidate_commit(struct nb_config *candidate, enum nb_client client,
706 const void *user, bool save_transaction,
707 const char *comment, uint32_t *transaction_id)
708 {
709 struct nb_transaction *transaction = NULL;
710 int ret;
711
712 ret = nb_candidate_commit_prepare(candidate, client, user, comment,
713 &transaction);
714 /*
715 * Apply the changes if the preparation phase succeeded. Otherwise abort
716 * the transaction.
717 */
718 if (ret == NB_OK)
719 nb_candidate_commit_apply(transaction, save_transaction,
720 transaction_id);
721 else if (transaction != NULL)
722 nb_candidate_commit_abort(transaction);
723
724 return ret;
725 }
726
727 int nb_running_lock(enum nb_client client, const void *user)
728 {
729 int ret = -1;
730
731 frr_with_mutex(&running_config_mgmt_lock.mtx) {
732 if (!running_config_mgmt_lock.locked) {
733 running_config_mgmt_lock.locked = true;
734 running_config_mgmt_lock.owner_client = client;
735 running_config_mgmt_lock.owner_user = user;
736 ret = 0;
737 }
738 }
739
740 return ret;
741 }
742
743 int nb_running_unlock(enum nb_client client, const void *user)
744 {
745 int ret = -1;
746
747 frr_with_mutex(&running_config_mgmt_lock.mtx) {
748 if (running_config_mgmt_lock.locked
749 && running_config_mgmt_lock.owner_client == client
750 && running_config_mgmt_lock.owner_user == user) {
751 running_config_mgmt_lock.locked = false;
752 running_config_mgmt_lock.owner_client = NB_CLIENT_NONE;
753 running_config_mgmt_lock.owner_user = NULL;
754 ret = 0;
755 }
756 }
757
758 return ret;
759 }
760
761 int nb_running_lock_check(enum nb_client client, const void *user)
762 {
763 int ret = -1;
764
765 frr_with_mutex(&running_config_mgmt_lock.mtx) {
766 if (!running_config_mgmt_lock.locked
767 || (running_config_mgmt_lock.owner_client == client
768 && running_config_mgmt_lock.owner_user == user))
769 ret = 0;
770 }
771
772 return ret;
773 }
774
775 static void nb_log_callback(const enum nb_event event,
776 enum nb_operation operation, const char *xpath,
777 const char *value)
778 {
779 zlog_debug(
780 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
781 nb_event_name(event), nb_operation_name(operation), xpath,
782 value ? value : "(NULL)");
783 }
784
785 /*
786 * Call the northbound configuration callback associated to a given
787 * configuration change.
788 */
789 static int nb_callback_configuration(const enum nb_event event,
790 struct nb_config_change *change)
791 {
792 enum nb_operation operation = change->cb.operation;
793 const char *xpath = change->cb.xpath;
794 const struct nb_node *nb_node = change->cb.nb_node;
795 const struct lyd_node *dnode = change->cb.dnode;
796 union nb_resource *resource;
797 int ret = NB_ERR;
798
799 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
800 const char *value = "(none)";
801
802 if (dnode && !yang_snode_is_typeless_data(dnode->schema))
803 value = yang_dnode_get_string(dnode, NULL);
804
805 nb_log_callback(event, operation, xpath, value);
806 }
807
808 if (event == NB_EV_VALIDATE)
809 resource = NULL;
810 else
811 resource = &change->resource;
812
813 switch (operation) {
814 case NB_OP_CREATE:
815 ret = (*nb_node->cbs.create)(event, dnode, resource);
816 break;
817 case NB_OP_MODIFY:
818 ret = (*nb_node->cbs.modify)(event, dnode, resource);
819 break;
820 case NB_OP_DESTROY:
821 ret = (*nb_node->cbs.destroy)(event, dnode);
822 break;
823 case NB_OP_MOVE:
824 ret = (*nb_node->cbs.move)(event, dnode);
825 break;
826 default:
827 flog_err(EC_LIB_DEVELOPMENT,
828 "%s: unknown operation (%u) [xpath %s]", __func__,
829 operation, xpath);
830 exit(1);
831 }
832
833 if (ret != NB_OK) {
834 int priority;
835 enum lib_log_refs ref;
836
837 switch (event) {
838 case NB_EV_VALIDATE:
839 priority = LOG_WARNING;
840 ref = EC_LIB_NB_CB_CONFIG_VALIDATE;
841 break;
842 case NB_EV_PREPARE:
843 priority = LOG_WARNING;
844 ref = EC_LIB_NB_CB_CONFIG_PREPARE;
845 break;
846 case NB_EV_ABORT:
847 priority = LOG_WARNING;
848 ref = EC_LIB_NB_CB_CONFIG_ABORT;
849 break;
850 case NB_EV_APPLY:
851 priority = LOG_ERR;
852 ref = EC_LIB_NB_CB_CONFIG_APPLY;
853 break;
854 default:
855 flog_err(EC_LIB_DEVELOPMENT,
856 "%s: unknown event (%u) [xpath %s]",
857 __func__, event, xpath);
858 exit(1);
859 }
860
861 flog(priority, ref,
862 "%s: error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
863 __func__, nb_err_name(ret), nb_event_name(event),
864 nb_operation_name(operation), xpath);
865 }
866
867 return ret;
868 }
869
870 struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node,
871 const char *xpath,
872 const void *list_entry)
873 {
874 DEBUGD(&nb_dbg_cbs_state,
875 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
876 xpath, list_entry);
877
878 return nb_node->cbs.get_elem(xpath, list_entry);
879 }
880
881 const void *nb_callback_get_next(const struct nb_node *nb_node,
882 const void *parent_list_entry,
883 const void *list_entry)
884 {
885 DEBUGD(&nb_dbg_cbs_state,
886 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
887 nb_node->xpath, parent_list_entry, list_entry);
888
889 return nb_node->cbs.get_next(parent_list_entry, list_entry);
890 }
891
892 int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
893 struct yang_list_keys *keys)
894 {
895 DEBUGD(&nb_dbg_cbs_state,
896 "northbound callback (get_keys): node [%s] list_entry [%p]",
897 nb_node->xpath, list_entry);
898
899 return nb_node->cbs.get_keys(list_entry, keys);
900 }
901
902 const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
903 const void *parent_list_entry,
904 const struct yang_list_keys *keys)
905 {
906 DEBUGD(&nb_dbg_cbs_state,
907 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
908 nb_node->xpath, parent_list_entry);
909
910 return nb_node->cbs.lookup_entry(parent_list_entry, keys);
911 }
912
913 int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
914 const struct list *input, struct list *output)
915 {
916 DEBUGD(&nb_dbg_cbs_rpc, "northbound RPC: %s", xpath);
917
918 return nb_node->cbs.rpc(xpath, input, output);
919 }
920
921 static struct nb_transaction *
922 nb_transaction_new(struct nb_config *config, struct nb_config_cbs *changes,
923 enum nb_client client, const void *user, const char *comment)
924 {
925 struct nb_transaction *transaction;
926
927 if (nb_running_lock_check(client, user)) {
928 flog_warn(
929 EC_LIB_NB_TRANSACTION_CREATION_FAILED,
930 "%s: running configuration is locked by another client",
931 __func__);
932 return NULL;
933 }
934
935 if (transaction_in_progress) {
936 flog_warn(
937 EC_LIB_NB_TRANSACTION_CREATION_FAILED,
938 "%s: error - there's already another transaction in progress",
939 __func__);
940 return NULL;
941 }
942 transaction_in_progress = true;
943
944 transaction = XCALLOC(MTYPE_TMP, sizeof(*transaction));
945 transaction->client = client;
946 if (comment)
947 strlcpy(transaction->comment, comment,
948 sizeof(transaction->comment));
949 transaction->config = config;
950 transaction->changes = *changes;
951
952 return transaction;
953 }
954
955 static void nb_transaction_free(struct nb_transaction *transaction)
956 {
957 nb_config_diff_del_changes(&transaction->changes);
958 XFREE(MTYPE_TMP, transaction);
959 transaction_in_progress = false;
960 }
961
962 /* Process all configuration changes associated to a transaction. */
963 static int nb_transaction_process(enum nb_event event,
964 struct nb_transaction *transaction)
965 {
966 struct nb_config_cb *cb;
967
968 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
969 struct nb_config_change *change = (struct nb_config_change *)cb;
970 int ret;
971
972 /*
973 * Only try to release resources that were allocated
974 * successfully.
975 */
976 if (event == NB_EV_ABORT && change->prepare_ok == false)
977 break;
978
979 /* Call the appropriate callback. */
980 ret = nb_callback_configuration(event, change);
981 switch (event) {
982 case NB_EV_PREPARE:
983 if (ret != NB_OK)
984 return ret;
985 change->prepare_ok = true;
986 break;
987 case NB_EV_ABORT:
988 case NB_EV_APPLY:
989 /*
990 * At this point it's not possible to reject the
991 * transaction anymore, so any failure here can lead to
992 * inconsistencies and should be treated as a bug.
993 * Operations prone to errors, like validations and
994 * resource allocations, should be performed during the
995 * 'prepare' phase.
996 */
997 break;
998 default:
999 break;
1000 }
1001 }
1002
1003 return NB_OK;
1004 }
1005
1006 static struct nb_config_cb *
1007 nb_apply_finish_cb_new(struct nb_config_cbs *cbs, const char *xpath,
1008 const struct nb_node *nb_node,
1009 const struct lyd_node *dnode)
1010 {
1011 struct nb_config_cb *cb;
1012
1013 cb = XCALLOC(MTYPE_TMP, sizeof(*cb));
1014 strlcpy(cb->xpath, xpath, sizeof(cb->xpath));
1015 cb->nb_node = nb_node;
1016 cb->dnode = dnode;
1017 RB_INSERT(nb_config_cbs, cbs, cb);
1018
1019 return cb;
1020 }
1021
1022 static struct nb_config_cb *
1023 nb_apply_finish_cb_find(struct nb_config_cbs *cbs, const char *xpath,
1024 const struct nb_node *nb_node)
1025 {
1026 struct nb_config_cb s;
1027
1028 strlcpy(s.xpath, xpath, sizeof(s.xpath));
1029 s.nb_node = nb_node;
1030 return RB_FIND(nb_config_cbs, cbs, &s);
1031 }
1032
1033 /* Call the 'apply_finish' callbacks. */
1034 static void nb_transaction_apply_finish(struct nb_transaction *transaction)
1035 {
1036 struct nb_config_cbs cbs;
1037 struct nb_config_cb *cb;
1038
1039 /* Initialize tree of 'apply_finish' callbacks. */
1040 RB_INIT(nb_config_cbs, &cbs);
1041
1042 /* Identify the 'apply_finish' callbacks that need to be called. */
1043 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1044 struct nb_config_change *change = (struct nb_config_change *)cb;
1045 const struct lyd_node *dnode = change->cb.dnode;
1046
1047 /*
1048 * Iterate up to the root of the data tree. When a node is being
1049 * deleted, skip its 'apply_finish' callback if one is defined
1050 * (the 'apply_finish' callbacks from the node ancestors should
1051 * be called though).
1052 */
1053 if (change->cb.operation == NB_OP_DESTROY) {
1054 char xpath[XPATH_MAXLEN];
1055
1056 dnode = dnode->parent;
1057 if (!dnode)
1058 break;
1059
1060 /*
1061 * The dnode from 'delete' callbacks point to elements
1062 * from the running configuration. Use yang_dnode_get()
1063 * to get the corresponding dnode from the candidate
1064 * configuration that is being committed.
1065 */
1066 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1067 dnode = yang_dnode_get(transaction->config->dnode,
1068 xpath);
1069 }
1070 while (dnode) {
1071 char xpath[XPATH_MAXLEN];
1072 struct nb_node *nb_node;
1073
1074 nb_node = dnode->schema->priv;
1075 if (!nb_node->cbs.apply_finish)
1076 goto next;
1077
1078 /*
1079 * Don't call the callback more than once for the same
1080 * data node.
1081 */
1082 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1083 if (nb_apply_finish_cb_find(&cbs, xpath, nb_node))
1084 goto next;
1085
1086 nb_apply_finish_cb_new(&cbs, xpath, nb_node, dnode);
1087
1088 next:
1089 dnode = dnode->parent;
1090 }
1091 }
1092
1093 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1094 RB_FOREACH (cb, nb_config_cbs, &cbs) {
1095 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL))
1096 nb_log_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH,
1097 cb->xpath, NULL);
1098
1099 (*cb->nb_node->cbs.apply_finish)(cb->dnode);
1100 }
1101
1102 /* Release memory. */
1103 while (!RB_EMPTY(nb_config_cbs, &cbs)) {
1104 cb = RB_ROOT(nb_config_cbs, &cbs);
1105 RB_REMOVE(nb_config_cbs, &cbs, cb);
1106 XFREE(MTYPE_TMP, cb);
1107 }
1108 }
1109
1110 static int nb_oper_data_iter_children(const struct lys_node *snode,
1111 const char *xpath, const void *list_entry,
1112 const struct yang_list_keys *list_keys,
1113 struct yang_translator *translator,
1114 bool first, uint32_t flags,
1115 nb_oper_data_cb cb, void *arg)
1116 {
1117 struct lys_node *child;
1118
1119 LY_TREE_FOR (snode->child, child) {
1120 int ret;
1121
1122 ret = nb_oper_data_iter_node(child, xpath, list_entry,
1123 list_keys, translator, false,
1124 flags, cb, arg);
1125 if (ret != NB_OK)
1126 return ret;
1127 }
1128
1129 return NB_OK;
1130 }
1131
1132 static int nb_oper_data_iter_leaf(const struct nb_node *nb_node,
1133 const char *xpath, const void *list_entry,
1134 const struct yang_list_keys *list_keys,
1135 struct yang_translator *translator,
1136 uint32_t flags, nb_oper_data_cb cb, void *arg)
1137 {
1138 struct yang_data *data;
1139
1140 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1141 return NB_OK;
1142
1143 /* Ignore list keys. */
1144 if (lys_is_key((struct lys_node_leaf *)nb_node->snode, NULL))
1145 return NB_OK;
1146
1147 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1148 if (data == NULL)
1149 /* Leaf of type "empty" is not present. */
1150 return NB_OK;
1151
1152 return (*cb)(nb_node->snode, translator, data, arg);
1153 }
1154
1155 static int nb_oper_data_iter_container(const struct nb_node *nb_node,
1156 const char *xpath,
1157 const void *list_entry,
1158 const struct yang_list_keys *list_keys,
1159 struct yang_translator *translator,
1160 uint32_t flags, nb_oper_data_cb cb,
1161 void *arg)
1162 {
1163 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1164 return NB_OK;
1165
1166 /* Presence containers. */
1167 if (nb_node->cbs.get_elem) {
1168 struct yang_data *data;
1169 int ret;
1170
1171 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1172 if (data == NULL)
1173 /* Presence container is not present. */
1174 return NB_OK;
1175
1176 ret = (*cb)(nb_node->snode, translator, data, arg);
1177 if (ret != NB_OK)
1178 return ret;
1179 }
1180
1181 /* Iterate over the child nodes. */
1182 return nb_oper_data_iter_children(nb_node->snode, xpath, list_entry,
1183 list_keys, translator, false, flags,
1184 cb, arg);
1185 }
1186
1187 static int
1188 nb_oper_data_iter_leaflist(const struct nb_node *nb_node, const char *xpath,
1189 const void *parent_list_entry,
1190 const struct yang_list_keys *parent_list_keys,
1191 struct yang_translator *translator, uint32_t flags,
1192 nb_oper_data_cb cb, void *arg)
1193 {
1194 const void *list_entry = NULL;
1195
1196 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1197 return NB_OK;
1198
1199 do {
1200 struct yang_data *data;
1201 int ret;
1202
1203 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1204 list_entry);
1205 if (!list_entry)
1206 /* End of the list. */
1207 break;
1208
1209 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1210 if (data == NULL)
1211 continue;
1212
1213 ret = (*cb)(nb_node->snode, translator, data, arg);
1214 if (ret != NB_OK)
1215 return ret;
1216 } while (list_entry);
1217
1218 return NB_OK;
1219 }
1220
1221 static int nb_oper_data_iter_list(const struct nb_node *nb_node,
1222 const char *xpath_list,
1223 const void *parent_list_entry,
1224 const struct yang_list_keys *parent_list_keys,
1225 struct yang_translator *translator,
1226 uint32_t flags, nb_oper_data_cb cb, void *arg)
1227 {
1228 struct lys_node_list *slist = (struct lys_node_list *)nb_node->snode;
1229 const void *list_entry = NULL;
1230 uint32_t position = 1;
1231
1232 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1233 return NB_OK;
1234
1235 /* Iterate over all list entries. */
1236 do {
1237 struct yang_list_keys list_keys;
1238 char xpath[XPATH_MAXLEN * 2];
1239 int ret;
1240
1241 /* Obtain list entry. */
1242 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1243 list_entry);
1244 if (!list_entry)
1245 /* End of the list. */
1246 break;
1247
1248 if (!CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST)) {
1249 /* Obtain the list entry keys. */
1250 if (nb_callback_get_keys(nb_node, list_entry,
1251 &list_keys)
1252 != NB_OK) {
1253 flog_warn(EC_LIB_NB_CB_STATE,
1254 "%s: failed to get list keys",
1255 __func__);
1256 return NB_ERR;
1257 }
1258
1259 /* Build XPath of the list entry. */
1260 strlcpy(xpath, xpath_list, sizeof(xpath));
1261 for (unsigned int i = 0; i < list_keys.num; i++) {
1262 snprintf(xpath + strlen(xpath),
1263 sizeof(xpath) - strlen(xpath),
1264 "[%s='%s']", slist->keys[i]->name,
1265 list_keys.key[i]);
1266 }
1267 } else {
1268 /*
1269 * Keyless list - build XPath using a positional index.
1270 */
1271 snprintf(xpath, sizeof(xpath), "%s[%u]", xpath_list,
1272 position);
1273 position++;
1274 }
1275
1276 /* Iterate over the child nodes. */
1277 ret = nb_oper_data_iter_children(
1278 nb_node->snode, xpath, list_entry, &list_keys,
1279 translator, false, flags, cb, arg);
1280 if (ret != NB_OK)
1281 return ret;
1282 } while (list_entry);
1283
1284 return NB_OK;
1285 }
1286
1287 static int nb_oper_data_iter_node(const struct lys_node *snode,
1288 const char *xpath_parent,
1289 const void *list_entry,
1290 const struct yang_list_keys *list_keys,
1291 struct yang_translator *translator,
1292 bool first, uint32_t flags,
1293 nb_oper_data_cb cb, void *arg)
1294 {
1295 struct nb_node *nb_node;
1296 char xpath[XPATH_MAXLEN];
1297 int ret = NB_OK;
1298
1299 if (!first && CHECK_FLAG(flags, NB_OPER_DATA_ITER_NORECURSE)
1300 && CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST))
1301 return NB_OK;
1302
1303 /* Update XPath. */
1304 strlcpy(xpath, xpath_parent, sizeof(xpath));
1305 if (!first && snode->nodetype != LYS_USES)
1306 snprintf(xpath + strlen(xpath), sizeof(xpath) - strlen(xpath),
1307 "/%s", snode->name);
1308
1309 nb_node = snode->priv;
1310 switch (snode->nodetype) {
1311 case LYS_CONTAINER:
1312 ret = nb_oper_data_iter_container(nb_node, xpath, list_entry,
1313 list_keys, translator, flags,
1314 cb, arg);
1315 break;
1316 case LYS_LEAF:
1317 ret = nb_oper_data_iter_leaf(nb_node, xpath, list_entry,
1318 list_keys, translator, flags, cb,
1319 arg);
1320 break;
1321 case LYS_LEAFLIST:
1322 ret = nb_oper_data_iter_leaflist(nb_node, xpath, list_entry,
1323 list_keys, translator, flags,
1324 cb, arg);
1325 break;
1326 case LYS_LIST:
1327 ret = nb_oper_data_iter_list(nb_node, xpath, list_entry,
1328 list_keys, translator, flags, cb,
1329 arg);
1330 break;
1331 case LYS_USES:
1332 ret = nb_oper_data_iter_children(snode, xpath, list_entry,
1333 list_keys, translator, false,
1334 flags, cb, arg);
1335 break;
1336 default:
1337 break;
1338 }
1339
1340 return ret;
1341 }
1342
1343 int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
1344 uint32_t flags, nb_oper_data_cb cb, void *arg)
1345 {
1346 struct nb_node *nb_node;
1347 const void *list_entry = NULL;
1348 struct yang_list_keys list_keys;
1349 struct list *list_dnodes;
1350 struct lyd_node *dnode, *dn;
1351 struct listnode *ln;
1352 int ret;
1353
1354 nb_node = nb_node_find(xpath);
1355 if (!nb_node) {
1356 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1357 "%s: unknown data path: %s", __func__, xpath);
1358 return NB_ERR;
1359 }
1360
1361 /* For now this function works only with containers and lists. */
1362 if (!CHECK_FLAG(nb_node->snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
1363 flog_warn(
1364 EC_LIB_NB_OPERATIONAL_DATA,
1365 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1366 __func__, xpath);
1367 return NB_ERR;
1368 }
1369
1370 /*
1371 * Create a data tree from the XPath so that we can parse the keys of
1372 * all YANG lists (if any).
1373 */
1374 ly_errno = 0;
1375 dnode = lyd_new_path(NULL, ly_native_ctx, xpath, NULL, 0,
1376 LYD_PATH_OPT_UPDATE | LYD_PATH_OPT_NOPARENTRET);
1377 if (!dnode) {
1378 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
1379 __func__);
1380 return NB_ERR;
1381 }
1382
1383 /*
1384 * Create a linked list to sort the data nodes starting from the root.
1385 */
1386 list_dnodes = list_new();
1387 for (dn = dnode; dn; dn = dn->parent) {
1388 if (dn->schema->nodetype != LYS_LIST || !dn->child)
1389 continue;
1390 listnode_add_head(list_dnodes, dn);
1391 }
1392 /*
1393 * Use the northbound callbacks to find list entry pointer corresponding
1394 * to the given XPath.
1395 */
1396 for (ALL_LIST_ELEMENTS_RO(list_dnodes, ln, dn)) {
1397 struct lyd_node *child;
1398 struct nb_node *nn;
1399 unsigned int n = 0;
1400
1401 /* Obtain the list entry keys. */
1402 memset(&list_keys, 0, sizeof(list_keys));
1403 LY_TREE_FOR (dn->child, child) {
1404 if (!lys_is_key((struct lys_node_leaf *)child->schema,
1405 NULL))
1406 continue;
1407 strlcpy(list_keys.key[n],
1408 yang_dnode_get_string(child, NULL),
1409 sizeof(list_keys.key[n]));
1410 n++;
1411 }
1412 list_keys.num = n;
1413 if (list_keys.num
1414 != ((struct lys_node_list *)dn->schema)->keys_size) {
1415 list_delete(&list_dnodes);
1416 yang_dnode_free(dnode);
1417 return NB_ERR_NOT_FOUND;
1418 }
1419
1420 /* Find the list entry pointer. */
1421 nn = dn->schema->priv;
1422 list_entry =
1423 nb_callback_lookup_entry(nn, list_entry, &list_keys);
1424 if (list_entry == NULL) {
1425 list_delete(&list_dnodes);
1426 yang_dnode_free(dnode);
1427 return NB_ERR_NOT_FOUND;
1428 }
1429 }
1430
1431 /* If a list entry was given, iterate over that list entry only. */
1432 if (dnode->schema->nodetype == LYS_LIST && dnode->child)
1433 ret = nb_oper_data_iter_children(
1434 nb_node->snode, xpath, list_entry, &list_keys,
1435 translator, true, flags, cb, arg);
1436 else
1437 ret = nb_oper_data_iter_node(nb_node->snode, xpath, list_entry,
1438 &list_keys, translator, true,
1439 flags, cb, arg);
1440
1441 list_delete(&list_dnodes);
1442 yang_dnode_free(dnode);
1443
1444 return ret;
1445 }
1446
1447 bool nb_operation_is_valid(enum nb_operation operation,
1448 const struct lys_node *snode)
1449 {
1450 struct nb_node *nb_node = snode->priv;
1451 struct lys_node_container *scontainer;
1452 struct lys_node_leaf *sleaf;
1453
1454 switch (operation) {
1455 case NB_OP_CREATE:
1456 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1457 return false;
1458
1459 switch (snode->nodetype) {
1460 case LYS_LEAF:
1461 sleaf = (struct lys_node_leaf *)snode;
1462 if (sleaf->type.base != LY_TYPE_EMPTY)
1463 return false;
1464 break;
1465 case LYS_CONTAINER:
1466 scontainer = (struct lys_node_container *)snode;
1467 if (!scontainer->presence)
1468 return false;
1469 break;
1470 case LYS_LIST:
1471 case LYS_LEAFLIST:
1472 break;
1473 default:
1474 return false;
1475 }
1476 return true;
1477 case NB_OP_MODIFY:
1478 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1479 return false;
1480
1481 switch (snode->nodetype) {
1482 case LYS_LEAF:
1483 sleaf = (struct lys_node_leaf *)snode;
1484 if (sleaf->type.base == LY_TYPE_EMPTY)
1485 return false;
1486
1487 /* List keys can't be modified. */
1488 if (lys_is_key(sleaf, NULL))
1489 return false;
1490 break;
1491 default:
1492 return false;
1493 }
1494 return true;
1495 case NB_OP_DESTROY:
1496 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1497 return false;
1498
1499 switch (snode->nodetype) {
1500 case LYS_LEAF:
1501 sleaf = (struct lys_node_leaf *)snode;
1502
1503 /* List keys can't be deleted. */
1504 if (lys_is_key(sleaf, NULL))
1505 return false;
1506
1507 /*
1508 * Only optional leafs can be deleted, or leafs whose
1509 * parent is a case statement.
1510 */
1511 if (snode->parent->nodetype == LYS_CASE)
1512 return true;
1513 if (sleaf->when)
1514 return true;
1515 if (CHECK_FLAG(sleaf->flags, LYS_MAND_TRUE)
1516 || sleaf->dflt)
1517 return false;
1518 break;
1519 case LYS_CONTAINER:
1520 scontainer = (struct lys_node_container *)snode;
1521 if (!scontainer->presence)
1522 return false;
1523 break;
1524 case LYS_LIST:
1525 case LYS_LEAFLIST:
1526 break;
1527 default:
1528 return false;
1529 }
1530 return true;
1531 case NB_OP_MOVE:
1532 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1533 return false;
1534
1535 switch (snode->nodetype) {
1536 case LYS_LIST:
1537 case LYS_LEAFLIST:
1538 if (!CHECK_FLAG(snode->flags, LYS_USERORDERED))
1539 return false;
1540 break;
1541 default:
1542 return false;
1543 }
1544 return true;
1545 case NB_OP_PRE_VALIDATE:
1546 case NB_OP_APPLY_FINISH:
1547 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1548 return false;
1549 return true;
1550 case NB_OP_GET_ELEM:
1551 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_R))
1552 return false;
1553
1554 switch (snode->nodetype) {
1555 case LYS_LEAF:
1556 case LYS_LEAFLIST:
1557 break;
1558 case LYS_CONTAINER:
1559 scontainer = (struct lys_node_container *)snode;
1560 if (!scontainer->presence)
1561 return false;
1562 break;
1563 default:
1564 return false;
1565 }
1566 return true;
1567 case NB_OP_GET_NEXT:
1568 switch (snode->nodetype) {
1569 case LYS_LIST:
1570 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1571 return false;
1572 break;
1573 case LYS_LEAFLIST:
1574 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1575 return false;
1576 break;
1577 default:
1578 return false;
1579 }
1580 return true;
1581 case NB_OP_GET_KEYS:
1582 case NB_OP_LOOKUP_ENTRY:
1583 switch (snode->nodetype) {
1584 case LYS_LIST:
1585 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1586 return false;
1587 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST))
1588 return false;
1589 break;
1590 default:
1591 return false;
1592 }
1593 return true;
1594 case NB_OP_RPC:
1595 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W | LYS_CONFIG_R))
1596 return false;
1597
1598 switch (snode->nodetype) {
1599 case LYS_RPC:
1600 case LYS_ACTION:
1601 break;
1602 default:
1603 return false;
1604 }
1605 return true;
1606 default:
1607 return false;
1608 }
1609 }
1610
1611 DEFINE_HOOK(nb_notification_send, (const char *xpath, struct list *arguments),
1612 (xpath, arguments));
1613
1614 int nb_notification_send(const char *xpath, struct list *arguments)
1615 {
1616 int ret;
1617
1618 DEBUGD(&nb_dbg_notif, "northbound notification: %s", xpath);
1619
1620 ret = hook_call(nb_notification_send, xpath, arguments);
1621 if (arguments)
1622 list_delete(&arguments);
1623
1624 return ret;
1625 }
1626
1627 /* Running configuration user pointers management. */
1628 struct nb_config_entry {
1629 char xpath[XPATH_MAXLEN];
1630 void *entry;
1631 };
1632
1633 static bool running_config_entry_cmp(const void *value1, const void *value2)
1634 {
1635 const struct nb_config_entry *c1 = value1;
1636 const struct nb_config_entry *c2 = value2;
1637
1638 return strmatch(c1->xpath, c2->xpath);
1639 }
1640
1641 static unsigned int running_config_entry_key_make(const void *value)
1642 {
1643 return string_hash_make(value);
1644 }
1645
1646 static void *running_config_entry_alloc(void *p)
1647 {
1648 struct nb_config_entry *new, *key = p;
1649
1650 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY, sizeof(*new));
1651 strlcpy(new->xpath, key->xpath, sizeof(new->xpath));
1652
1653 return new;
1654 }
1655
1656 static void running_config_entry_free(void *arg)
1657 {
1658 XFREE(MTYPE_NB_CONFIG_ENTRY, arg);
1659 }
1660
1661 void nb_running_set_entry(const struct lyd_node *dnode, void *entry)
1662 {
1663 struct nb_config_entry *config, s;
1664
1665 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1666 config = hash_get(running_config_entries, &s,
1667 running_config_entry_alloc);
1668 config->entry = entry;
1669 }
1670
1671 static void *nb_running_unset_entry_helper(const struct lyd_node *dnode)
1672 {
1673 struct nb_config_entry *config, s;
1674 struct lyd_node *child;
1675 void *entry = NULL;
1676
1677 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1678 config = hash_release(running_config_entries, &s);
1679 if (config) {
1680 entry = config->entry;
1681 running_config_entry_free(config);
1682 }
1683
1684 /* Unset user pointers from the child nodes. */
1685 if (CHECK_FLAG(dnode->schema->nodetype, LYS_LIST | LYS_CONTAINER)) {
1686 LY_TREE_FOR (dnode->child, child) {
1687 (void)nb_running_unset_entry_helper(child);
1688 }
1689 }
1690
1691 return entry;
1692 }
1693
1694 void *nb_running_unset_entry(const struct lyd_node *dnode)
1695 {
1696 void *entry;
1697
1698 entry = nb_running_unset_entry_helper(dnode);
1699 assert(entry);
1700
1701 return entry;
1702 }
1703
1704 void *nb_running_get_entry(const struct lyd_node *dnode, const char *xpath,
1705 bool abort_if_not_found)
1706 {
1707 const struct lyd_node *orig_dnode = dnode;
1708 char xpath_buf[XPATH_MAXLEN];
1709
1710 assert(dnode || xpath);
1711
1712 if (!dnode)
1713 dnode = yang_dnode_get(running_config->dnode, xpath);
1714
1715 while (dnode) {
1716 struct nb_config_entry *config, s;
1717
1718 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1719 config = hash_lookup(running_config_entries, &s);
1720 if (config)
1721 return config->entry;
1722
1723 dnode = dnode->parent;
1724 }
1725
1726 if (!abort_if_not_found)
1727 return NULL;
1728
1729 yang_dnode_get_path(orig_dnode, xpath_buf, sizeof(xpath_buf));
1730 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
1731 "%s: failed to find entry [xpath %s]", __func__, xpath_buf);
1732 zlog_backtrace(LOG_ERR);
1733 abort();
1734 }
1735
1736 /* Logging functions. */
1737 const char *nb_event_name(enum nb_event event)
1738 {
1739 switch (event) {
1740 case NB_EV_VALIDATE:
1741 return "validate";
1742 case NB_EV_PREPARE:
1743 return "prepare";
1744 case NB_EV_ABORT:
1745 return "abort";
1746 case NB_EV_APPLY:
1747 return "apply";
1748 default:
1749 return "unknown";
1750 }
1751 }
1752
1753 const char *nb_operation_name(enum nb_operation operation)
1754 {
1755 switch (operation) {
1756 case NB_OP_CREATE:
1757 return "create";
1758 case NB_OP_MODIFY:
1759 return "modify";
1760 case NB_OP_DESTROY:
1761 return "destroy";
1762 case NB_OP_MOVE:
1763 return "move";
1764 case NB_OP_PRE_VALIDATE:
1765 return "pre_validate";
1766 case NB_OP_APPLY_FINISH:
1767 return "apply_finish";
1768 case NB_OP_GET_ELEM:
1769 return "get_elem";
1770 case NB_OP_GET_NEXT:
1771 return "get_next";
1772 case NB_OP_GET_KEYS:
1773 return "get_keys";
1774 case NB_OP_LOOKUP_ENTRY:
1775 return "lookup_entry";
1776 case NB_OP_RPC:
1777 return "rpc";
1778 default:
1779 return "unknown";
1780 }
1781 }
1782
1783 const char *nb_err_name(enum nb_error error)
1784 {
1785 switch (error) {
1786 case NB_OK:
1787 return "ok";
1788 case NB_ERR:
1789 return "generic error";
1790 case NB_ERR_NO_CHANGES:
1791 return "no changes";
1792 case NB_ERR_NOT_FOUND:
1793 return "element not found";
1794 case NB_ERR_LOCKED:
1795 return "resource is locked";
1796 case NB_ERR_VALIDATION:
1797 return "validation error";
1798 case NB_ERR_RESOURCE:
1799 return "failed to allocate resource";
1800 case NB_ERR_INCONSISTENCY:
1801 return "internal inconsistency";
1802 default:
1803 return "unknown";
1804 }
1805 }
1806
1807 const char *nb_client_name(enum nb_client client)
1808 {
1809 switch (client) {
1810 case NB_CLIENT_CLI:
1811 return "CLI";
1812 case NB_CLIENT_CONFD:
1813 return "ConfD";
1814 case NB_CLIENT_SYSREPO:
1815 return "Sysrepo";
1816 case NB_CLIENT_GRPC:
1817 return "gRPC";
1818 default:
1819 return "unknown";
1820 }
1821 }
1822
1823 static void nb_load_callbacks(const struct frr_yang_module_info *module)
1824 {
1825 for (size_t i = 0; module->nodes[i].xpath; i++) {
1826 struct nb_node *nb_node;
1827 uint32_t priority;
1828
1829 nb_node = nb_node_find(module->nodes[i].xpath);
1830 if (!nb_node) {
1831 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1832 "%s: unknown data path: %s", __func__,
1833 module->nodes[i].xpath);
1834 continue;
1835 }
1836
1837 nb_node->cbs = module->nodes[i].cbs;
1838 priority = module->nodes[i].priority;
1839 if (priority != 0)
1840 nb_node->priority = priority;
1841 }
1842 }
1843
1844 void nb_init(struct thread_master *tm,
1845 const struct frr_yang_module_info *modules[], size_t nmodules)
1846 {
1847 unsigned int errors = 0;
1848
1849 /* Load YANG modules. */
1850 for (size_t i = 0; i < nmodules; i++)
1851 yang_module_load(modules[i]->name);
1852
1853 /* Create a nb_node for all YANG schema nodes. */
1854 nb_nodes_create();
1855
1856 /* Load northbound callbacks. */
1857 for (size_t i = 0; i < nmodules; i++)
1858 nb_load_callbacks(modules[i]);
1859
1860 /* Validate northbound callbacks. */
1861 yang_snodes_iterate_all(nb_node_validate, 0, &errors);
1862 if (errors > 0) {
1863 flog_err(
1864 EC_LIB_NB_CBS_VALIDATION,
1865 "%s: failed to validate northbound callbacks: %u error(s)",
1866 __func__, errors);
1867 exit(1);
1868 }
1869
1870 /* Create an empty running configuration. */
1871 running_config = nb_config_new(NULL);
1872 running_config_entries = hash_create(running_config_entry_key_make,
1873 running_config_entry_cmp,
1874 "Running Configuration Entries");
1875 pthread_mutex_init(&running_config_mgmt_lock.mtx, NULL);
1876
1877 /* Initialize the northbound CLI. */
1878 nb_cli_init(tm);
1879 }
1880
1881 void nb_terminate(void)
1882 {
1883 /* Terminate the northbound CLI. */
1884 nb_cli_terminate();
1885
1886 /* Delete all nb_node's from all YANG modules. */
1887 nb_nodes_delete();
1888
1889 /* Delete the running configuration. */
1890 hash_clean(running_config_entries, running_config_entry_free);
1891 hash_free(running_config_entries);
1892 nb_config_free(running_config);
1893 pthread_mutex_destroy(&running_config_mgmt_lock.mtx);
1894 }