]> git.proxmox.com Git - mirror_frr.git/blob - lib/northbound.c
fe7c8bfbc6de56295e0d59358fa5acd318f60195
[mirror_frr.git] / lib / northbound.c
1 /*
2 * Copyright (C) 2018 NetDEF, Inc.
3 * Renato Westphal
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #include <zebra.h>
21
22 #include "libfrr.h"
23 #include "log.h"
24 #include "lib_errors.h"
25 #include "hash.h"
26 #include "command.h"
27 #include "debug.h"
28 #include "db.h"
29 #include "frr_pthread.h"
30 #include "northbound.h"
31 #include "northbound_cli.h"
32 #include "northbound_db.h"
33
34 DEFINE_MTYPE_STATIC(LIB, NB_NODE, "Northbound Node")
35 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG, "Northbound Configuration")
36 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG_ENTRY, "Northbound Configuration Entry")
37
38 /* Running configuration - shouldn't be modified directly. */
39 struct nb_config *running_config;
40
41 /* Hash table of user pointers associated with configuration entries. */
42 static struct hash *running_config_entries;
43
44 /* Management lock for the running configuration. */
45 static struct {
46 /* Mutex protecting this structure. */
47 pthread_mutex_t mtx;
48
49 /* Actual lock. */
50 bool locked;
51
52 /* Northbound client who owns this lock. */
53 enum nb_client owner_client;
54
55 /* Northbound user who owns this lock. */
56 const void *owner_user;
57 } running_config_mgmt_lock;
58
59 /*
60 * Global lock used to prevent multiple configuration transactions from
61 * happening concurrently.
62 */
63 static bool transaction_in_progress;
64
65 static int nb_callback_configuration(const enum nb_event event,
66 struct nb_config_change *change);
67 static void nb_log_callback(const enum nb_event event,
68 enum nb_operation operation, const char *xpath,
69 const char *value);
70 static struct nb_transaction *nb_transaction_new(struct nb_config *config,
71 struct nb_config_cbs *changes,
72 enum nb_client client,
73 const void *user,
74 const char *comment);
75 static void nb_transaction_free(struct nb_transaction *transaction);
76 static int nb_transaction_process(enum nb_event event,
77 struct nb_transaction *transaction);
78 static void nb_transaction_apply_finish(struct nb_transaction *transaction);
79 static int nb_oper_data_iter_node(const struct lys_node *snode,
80 const char *xpath, const void *list_entry,
81 const struct yang_list_keys *list_keys,
82 struct yang_translator *translator,
83 bool first, uint32_t flags,
84 nb_oper_data_cb cb, void *arg);
85
86 static int nb_node_check_config_only(const struct lys_node *snode, void *arg)
87 {
88 bool *config_only = arg;
89
90 if (CHECK_FLAG(snode->flags, LYS_CONFIG_R)) {
91 *config_only = false;
92 return YANG_ITER_STOP;
93 }
94
95 return YANG_ITER_CONTINUE;
96 }
97
98 static int nb_node_new_cb(const struct lys_node *snode, void *arg)
99 {
100 struct nb_node *nb_node;
101 struct lys_node *sparent, *sparent_list;
102
103 nb_node = XCALLOC(MTYPE_NB_NODE, sizeof(*nb_node));
104 yang_snode_get_path(snode, YANG_PATH_DATA, nb_node->xpath,
105 sizeof(nb_node->xpath));
106 nb_node->priority = NB_DFLT_PRIORITY;
107 sparent = yang_snode_real_parent(snode);
108 if (sparent)
109 nb_node->parent = sparent->priv;
110 sparent_list = yang_snode_parent_list(snode);
111 if (sparent_list)
112 nb_node->parent_list = sparent_list->priv;
113
114 /* Set flags. */
115 if (CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
116 bool config_only = true;
117
118 yang_snodes_iterate_subtree(snode, nb_node_check_config_only,
119 YANG_ITER_ALLOW_AUGMENTATIONS,
120 &config_only);
121 if (config_only)
122 SET_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY);
123 }
124 if (CHECK_FLAG(snode->nodetype, LYS_LIST)) {
125 struct lys_node_list *slist;
126
127 slist = (struct lys_node_list *)snode;
128 if (slist->keys_size == 0)
129 SET_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST);
130 }
131
132 /*
133 * Link the northbound node and the libyang schema node with one
134 * another.
135 */
136 nb_node->snode = snode;
137 lys_set_private(snode, nb_node);
138
139 return YANG_ITER_CONTINUE;
140 }
141
142 static int nb_node_del_cb(const struct lys_node *snode, void *arg)
143 {
144 struct nb_node *nb_node;
145
146 nb_node = snode->priv;
147 lys_set_private(snode, NULL);
148 XFREE(MTYPE_NB_NODE, nb_node);
149
150 return YANG_ITER_CONTINUE;
151 }
152
153 void nb_nodes_create(void)
154 {
155 yang_snodes_iterate_all(nb_node_new_cb, 0, NULL);
156 }
157
158 void nb_nodes_delete(void)
159 {
160 yang_snodes_iterate_all(nb_node_del_cb, 0, NULL);
161 }
162
163 struct nb_node *nb_node_find(const char *xpath)
164 {
165 const struct lys_node *snode;
166
167 /*
168 * Use libyang to find the schema node associated to the xpath and get
169 * the northbound node from there (snode private pointer).
170 */
171 snode = ly_ctx_get_node(ly_native_ctx, NULL, xpath, 0);
172 if (!snode)
173 return NULL;
174
175 return snode->priv;
176 }
177
178 static int nb_node_validate_cb(const struct nb_node *nb_node,
179 enum nb_operation operation,
180 int callback_implemented, bool optional)
181 {
182 bool valid;
183
184 valid = nb_operation_is_valid(operation, nb_node->snode);
185
186 if (!valid && callback_implemented)
187 flog_warn(EC_LIB_NB_CB_UNNEEDED,
188 "unneeded '%s' callback for '%s'",
189 nb_operation_name(operation), nb_node->xpath);
190
191 if (!optional && valid && !callback_implemented) {
192 flog_err(EC_LIB_NB_CB_MISSING, "missing '%s' callback for '%s'",
193 nb_operation_name(operation), nb_node->xpath);
194 return 1;
195 }
196
197 return 0;
198 }
199
200 /*
201 * Check if the required callbacks were implemented for the given northbound
202 * node.
203 */
204 static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
205
206 {
207 unsigned int error = 0;
208
209 error += nb_node_validate_cb(nb_node, NB_OP_CREATE,
210 !!nb_node->cbs.create, false);
211 error += nb_node_validate_cb(nb_node, NB_OP_MODIFY,
212 !!nb_node->cbs.modify, false);
213 error += nb_node_validate_cb(nb_node, NB_OP_DESTROY,
214 !!nb_node->cbs.destroy, false);
215 error += nb_node_validate_cb(nb_node, NB_OP_MOVE, !!nb_node->cbs.move,
216 false);
217 error += nb_node_validate_cb(nb_node, NB_OP_PRE_VALIDATE,
218 !!nb_node->cbs.pre_validate, true);
219 error += nb_node_validate_cb(nb_node, NB_OP_APPLY_FINISH,
220 !!nb_node->cbs.apply_finish, true);
221 error += nb_node_validate_cb(nb_node, NB_OP_GET_ELEM,
222 !!nb_node->cbs.get_elem, false);
223 error += nb_node_validate_cb(nb_node, NB_OP_GET_NEXT,
224 !!nb_node->cbs.get_next, false);
225 error += nb_node_validate_cb(nb_node, NB_OP_GET_KEYS,
226 !!nb_node->cbs.get_keys, false);
227 error += nb_node_validate_cb(nb_node, NB_OP_LOOKUP_ENTRY,
228 !!nb_node->cbs.lookup_entry, false);
229 error += nb_node_validate_cb(nb_node, NB_OP_RPC, !!nb_node->cbs.rpc,
230 false);
231
232 return error;
233 }
234
235 static unsigned int nb_node_validate_priority(const struct nb_node *nb_node)
236 {
237 /* Top-level nodes can have any priority. */
238 if (!nb_node->parent)
239 return 0;
240
241 if (nb_node->priority < nb_node->parent->priority) {
242 flog_err(EC_LIB_NB_CB_INVALID_PRIO,
243 "node has higher priority than its parent [xpath %s]",
244 nb_node->xpath);
245 return 1;
246 }
247
248 return 0;
249 }
250
251 static int nb_node_validate(const struct lys_node *snode, void *arg)
252 {
253 struct nb_node *nb_node = snode->priv;
254 unsigned int *errors = arg;
255
256 /* Validate callbacks and priority. */
257 *errors += nb_node_validate_cbs(nb_node);
258 *errors += nb_node_validate_priority(nb_node);
259
260 return YANG_ITER_CONTINUE;
261 }
262
263 struct nb_config *nb_config_new(struct lyd_node *dnode)
264 {
265 struct nb_config *config;
266
267 config = XCALLOC(MTYPE_NB_CONFIG, sizeof(*config));
268 if (dnode)
269 config->dnode = dnode;
270 else
271 config->dnode = yang_dnode_new(ly_native_ctx, true);
272 config->version = 0;
273 pthread_rwlock_init(&config->lock, NULL);
274
275 return config;
276 }
277
278 void nb_config_free(struct nb_config *config)
279 {
280 if (config->dnode)
281 yang_dnode_free(config->dnode);
282 pthread_rwlock_destroy(&config->lock);
283 XFREE(MTYPE_NB_CONFIG, config);
284 }
285
286 struct nb_config *nb_config_dup(const struct nb_config *config)
287 {
288 struct nb_config *dup;
289
290 dup = XCALLOC(MTYPE_NB_CONFIG, sizeof(*dup));
291 dup->dnode = yang_dnode_dup(config->dnode);
292 dup->version = config->version;
293 pthread_rwlock_init(&dup->lock, NULL);
294
295 return dup;
296 }
297
298 int nb_config_merge(struct nb_config *config_dst, struct nb_config *config_src,
299 bool preserve_source)
300 {
301 int ret;
302
303 ret = lyd_merge(config_dst->dnode, config_src->dnode, LYD_OPT_EXPLICIT);
304 if (ret != 0)
305 flog_warn(EC_LIB_LIBYANG, "%s: lyd_merge() failed", __func__);
306
307 if (!preserve_source)
308 nb_config_free(config_src);
309
310 return (ret == 0) ? NB_OK : NB_ERR;
311 }
312
313 void nb_config_replace(struct nb_config *config_dst,
314 struct nb_config *config_src, bool preserve_source)
315 {
316 /* Update version. */
317 if (config_src->version != 0)
318 config_dst->version = config_src->version;
319
320 /* Update dnode. */
321 if (config_dst->dnode)
322 yang_dnode_free(config_dst->dnode);
323 if (preserve_source) {
324 config_dst->dnode = yang_dnode_dup(config_src->dnode);
325 } else {
326 config_dst->dnode = config_src->dnode;
327 config_src->dnode = NULL;
328 nb_config_free(config_src);
329 }
330 }
331
332 /* Generate the nb_config_cbs tree. */
333 static inline int nb_config_cb_compare(const struct nb_config_cb *a,
334 const struct nb_config_cb *b)
335 {
336 /* Sort by priority first. */
337 if (a->nb_node->priority < b->nb_node->priority)
338 return -1;
339 if (a->nb_node->priority > b->nb_node->priority)
340 return 1;
341
342 /*
343 * Use XPath as a tie-breaker. This will naturally sort parent nodes
344 * before their children.
345 */
346 return strcmp(a->xpath, b->xpath);
347 }
348 RB_GENERATE(nb_config_cbs, nb_config_cb, entry, nb_config_cb_compare);
349
350 static void nb_config_diff_add_change(struct nb_config_cbs *changes,
351 enum nb_operation operation,
352 const struct lyd_node *dnode)
353 {
354 struct nb_config_change *change;
355
356 change = XCALLOC(MTYPE_TMP, sizeof(*change));
357 change->cb.operation = operation;
358 change->cb.nb_node = dnode->schema->priv;
359 yang_dnode_get_path(dnode, change->cb.xpath, sizeof(change->cb.xpath));
360 change->cb.dnode = dnode;
361
362 RB_INSERT(nb_config_cbs, changes, &change->cb);
363 }
364
365 static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
366 {
367 while (!RB_EMPTY(nb_config_cbs, changes)) {
368 struct nb_config_change *change;
369
370 change = (struct nb_config_change *)RB_ROOT(nb_config_cbs,
371 changes);
372 RB_REMOVE(nb_config_cbs, changes, &change->cb);
373 XFREE(MTYPE_TMP, change);
374 }
375 }
376
377 /*
378 * Helper function used when calculating the delta between two different
379 * configurations. Given a new subtree, calculate all new YANG data nodes,
380 * excluding default leafs and leaf-lists. This is a recursive function.
381 */
382 static void nb_config_diff_created(const struct lyd_node *dnode,
383 struct nb_config_cbs *changes)
384 {
385 enum nb_operation operation;
386 struct lyd_node *child;
387
388 switch (dnode->schema->nodetype) {
389 case LYS_LEAF:
390 case LYS_LEAFLIST:
391 if (lyd_wd_default((struct lyd_node_leaf_list *)dnode))
392 break;
393
394 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
395 operation = NB_OP_CREATE;
396 else if (nb_operation_is_valid(NB_OP_MODIFY, dnode->schema))
397 operation = NB_OP_MODIFY;
398 else
399 return;
400
401 nb_config_diff_add_change(changes, operation, dnode);
402 break;
403 case LYS_CONTAINER:
404 case LYS_LIST:
405 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
406 nb_config_diff_add_change(changes, NB_OP_CREATE, dnode);
407
408 /* Process child nodes recursively. */
409 LY_TREE_FOR (dnode->child, child) {
410 nb_config_diff_created(child, changes);
411 }
412 break;
413 default:
414 break;
415 }
416 }
417
418 static void nb_config_diff_deleted(const struct lyd_node *dnode,
419 struct nb_config_cbs *changes)
420 {
421 if (nb_operation_is_valid(NB_OP_DESTROY, dnode->schema))
422 nb_config_diff_add_change(changes, NB_OP_DESTROY, dnode);
423 else if (CHECK_FLAG(dnode->schema->nodetype, LYS_CONTAINER)) {
424 struct lyd_node *child;
425
426 /*
427 * Non-presence containers need special handling since they
428 * don't have "destroy" callbacks. In this case, what we need to
429 * do is to call the "destroy" callbacks of their child nodes
430 * when applicable (i.e. optional nodes).
431 */
432 LY_TREE_FOR (dnode->child, child) {
433 nb_config_diff_deleted(child, changes);
434 }
435 }
436 }
437
438 /* Calculate the delta between two different configurations. */
439 static void nb_config_diff(const struct nb_config *config1,
440 const struct nb_config *config2,
441 struct nb_config_cbs *changes)
442 {
443 struct lyd_difflist *diff;
444
445 diff = lyd_diff(config1->dnode, config2->dnode,
446 LYD_DIFFOPT_WITHDEFAULTS);
447 assert(diff);
448
449 for (int i = 0; diff->type[i] != LYD_DIFF_END; i++) {
450 LYD_DIFFTYPE type;
451 struct lyd_node *dnode;
452
453 type = diff->type[i];
454
455 switch (type) {
456 case LYD_DIFF_CREATED:
457 dnode = diff->second[i];
458 nb_config_diff_created(dnode, changes);
459 break;
460 case LYD_DIFF_DELETED:
461 dnode = diff->first[i];
462 nb_config_diff_deleted(dnode, changes);
463 break;
464 case LYD_DIFF_CHANGED:
465 dnode = diff->second[i];
466 nb_config_diff_add_change(changes, NB_OP_MODIFY, dnode);
467 break;
468 case LYD_DIFF_MOVEDAFTER1:
469 case LYD_DIFF_MOVEDAFTER2:
470 default:
471 continue;
472 }
473 }
474
475 lyd_free_diff(diff);
476 }
477
478 int nb_candidate_edit(struct nb_config *candidate,
479 const struct nb_node *nb_node,
480 enum nb_operation operation, const char *xpath,
481 const struct yang_data *previous,
482 const struct yang_data *data)
483 {
484 struct lyd_node *dnode;
485 char xpath_edit[XPATH_MAXLEN];
486
487 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
488 if (nb_node->snode->nodetype == LYS_LEAFLIST)
489 snprintf(xpath_edit, sizeof(xpath_edit), "%s[.='%s']", xpath,
490 data->value);
491 else
492 strlcpy(xpath_edit, xpath, sizeof(xpath_edit));
493
494 switch (operation) {
495 case NB_OP_CREATE:
496 case NB_OP_MODIFY:
497 ly_errno = 0;
498 dnode = lyd_new_path(candidate->dnode, ly_native_ctx,
499 xpath_edit, (void *)data->value, 0,
500 LYD_PATH_OPT_UPDATE);
501 if (!dnode && ly_errno) {
502 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
503 __func__);
504 return NB_ERR;
505 }
506
507 /*
508 * If a new node was created, call lyd_validate() only to create
509 * default child nodes.
510 */
511 if (dnode) {
512 lyd_schema_sort(dnode, 0);
513 lyd_validate(&dnode,
514 LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
515 ly_native_ctx);
516 }
517 break;
518 case NB_OP_DESTROY:
519 dnode = yang_dnode_get(candidate->dnode, xpath_edit);
520 if (!dnode)
521 /*
522 * Return a special error code so the caller can choose
523 * whether to ignore it or not.
524 */
525 return NB_ERR_NOT_FOUND;
526 lyd_free(dnode);
527 break;
528 case NB_OP_MOVE:
529 /* TODO: update configuration. */
530 break;
531 default:
532 flog_warn(EC_LIB_DEVELOPMENT,
533 "%s: unknown operation (%u) [xpath %s]", __func__,
534 operation, xpath_edit);
535 return NB_ERR;
536 }
537
538 return NB_OK;
539 }
540
541 bool nb_candidate_needs_update(const struct nb_config *candidate)
542 {
543 bool ret = false;
544
545 pthread_rwlock_rdlock(&running_config->lock);
546 {
547 if (candidate->version < running_config->version)
548 ret = true;
549 }
550 pthread_rwlock_unlock(&running_config->lock);
551
552 return ret;
553 }
554
555 int nb_candidate_update(struct nb_config *candidate)
556 {
557 struct nb_config *updated_config;
558
559 pthread_rwlock_rdlock(&running_config->lock);
560 {
561 updated_config = nb_config_dup(running_config);
562 }
563 pthread_rwlock_unlock(&running_config->lock);
564
565 if (nb_config_merge(updated_config, candidate, true) != NB_OK)
566 return NB_ERR;
567
568 nb_config_replace(candidate, updated_config, false);
569
570 return NB_OK;
571 }
572
573 /*
574 * Perform YANG syntactic and semantic validation.
575 *
576 * WARNING: lyd_validate() can change the configuration as part of the
577 * validation process.
578 */
579 static int nb_candidate_validate_yang(struct nb_config *candidate)
580 {
581 if (lyd_validate(&candidate->dnode,
582 LYD_OPT_STRICT | LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
583 ly_native_ctx)
584 != 0)
585 return NB_ERR_VALIDATION;
586
587 return NB_OK;
588 }
589
590 /* Perform code-level validation using the northbound callbacks. */
591 static int nb_candidate_validate_code(struct nb_config *candidate,
592 struct nb_config_cbs *changes)
593 {
594 struct nb_config_cb *cb;
595 struct lyd_node *root, *next, *child;
596 int ret;
597
598 /* First validate the candidate as a whole. */
599 LY_TREE_FOR (candidate->dnode, root) {
600 LY_TREE_DFS_BEGIN (root, next, child) {
601 struct nb_node *nb_node;
602
603 nb_node = child->schema->priv;
604 if (!nb_node->cbs.pre_validate)
605 goto next;
606
607 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config,
608 DEBUG_MODE_ALL)) {
609 char xpath[XPATH_MAXLEN];
610
611 yang_dnode_get_path(child, xpath,
612 sizeof(xpath));
613 nb_log_callback(NB_EV_VALIDATE,
614 NB_OP_PRE_VALIDATE, xpath,
615 NULL);
616 }
617
618 ret = (*nb_node->cbs.pre_validate)(child);
619 if (ret != NB_OK)
620 return NB_ERR_VALIDATION;
621
622 next:
623 LY_TREE_DFS_END(root, next, child);
624 }
625 }
626
627 /* Now validate the configuration changes. */
628 RB_FOREACH (cb, nb_config_cbs, changes) {
629 struct nb_config_change *change = (struct nb_config_change *)cb;
630
631 ret = nb_callback_configuration(NB_EV_VALIDATE, change);
632 if (ret != NB_OK)
633 return NB_ERR_VALIDATION;
634 }
635
636 return NB_OK;
637 }
638
639 int nb_candidate_validate(struct nb_config *candidate)
640 {
641 struct nb_config_cbs changes;
642 int ret;
643
644 if (nb_candidate_validate_yang(candidate) != NB_OK)
645 return NB_ERR_VALIDATION;
646
647 RB_INIT(nb_config_cbs, &changes);
648 pthread_rwlock_rdlock(&running_config->lock);
649 {
650 nb_config_diff(running_config, candidate, &changes);
651 ret = nb_candidate_validate_code(candidate, &changes);
652 nb_config_diff_del_changes(&changes);
653 }
654 pthread_rwlock_unlock(&running_config->lock);
655
656 return ret;
657 }
658
659 int nb_candidate_commit_prepare(struct nb_config *candidate,
660 enum nb_client client, const void *user,
661 const char *comment,
662 struct nb_transaction **transaction)
663 {
664 struct nb_config_cbs changes;
665
666 if (nb_candidate_validate_yang(candidate) != NB_OK) {
667 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
668 "%s: failed to validate candidate configuration",
669 __func__);
670 return NB_ERR_VALIDATION;
671 }
672
673 RB_INIT(nb_config_cbs, &changes);
674 pthread_rwlock_rdlock(&running_config->lock);
675 {
676 nb_config_diff(running_config, candidate, &changes);
677 if (RB_EMPTY(nb_config_cbs, &changes)) {
678 pthread_rwlock_unlock(&running_config->lock);
679 return NB_ERR_NO_CHANGES;
680 }
681
682 if (nb_candidate_validate_code(candidate, &changes) != NB_OK) {
683 flog_warn(
684 EC_LIB_NB_CANDIDATE_INVALID,
685 "%s: failed to validate candidate configuration",
686 __func__);
687 nb_config_diff_del_changes(&changes);
688 pthread_rwlock_unlock(&running_config->lock);
689 return NB_ERR_VALIDATION;
690 }
691
692 *transaction = nb_transaction_new(candidate, &changes, client,
693 user, comment);
694 if (*transaction == NULL) {
695 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
696 "%s: failed to create transaction", __func__);
697 nb_config_diff_del_changes(&changes);
698 pthread_rwlock_unlock(&running_config->lock);
699 return NB_ERR_LOCKED;
700 }
701 }
702 pthread_rwlock_unlock(&running_config->lock);
703
704 return nb_transaction_process(NB_EV_PREPARE, *transaction);
705 }
706
707 void nb_candidate_commit_abort(struct nb_transaction *transaction)
708 {
709 (void)nb_transaction_process(NB_EV_ABORT, transaction);
710 nb_transaction_free(transaction);
711 }
712
713 void nb_candidate_commit_apply(struct nb_transaction *transaction,
714 bool save_transaction, uint32_t *transaction_id)
715 {
716 (void)nb_transaction_process(NB_EV_APPLY, transaction);
717 nb_transaction_apply_finish(transaction);
718
719 /* Replace running by candidate. */
720 transaction->config->version++;
721 pthread_rwlock_wrlock(&running_config->lock);
722 {
723 nb_config_replace(running_config, transaction->config, true);
724 }
725 pthread_rwlock_unlock(&running_config->lock);
726
727 /* Record transaction. */
728 if (save_transaction
729 && nb_db_transaction_save(transaction, transaction_id) != NB_OK)
730 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED,
731 "%s: failed to record transaction", __func__);
732
733 nb_transaction_free(transaction);
734 }
735
736 int nb_candidate_commit(struct nb_config *candidate, enum nb_client client,
737 const void *user, bool save_transaction,
738 const char *comment, uint32_t *transaction_id)
739 {
740 struct nb_transaction *transaction = NULL;
741 int ret;
742
743 ret = nb_candidate_commit_prepare(candidate, client, user, comment,
744 &transaction);
745 /*
746 * Apply the changes if the preparation phase succeeded. Otherwise abort
747 * the transaction.
748 */
749 if (ret == NB_OK)
750 nb_candidate_commit_apply(transaction, save_transaction,
751 transaction_id);
752 else if (transaction != NULL)
753 nb_candidate_commit_abort(transaction);
754
755 return ret;
756 }
757
758 int nb_running_lock(enum nb_client client, const void *user)
759 {
760 int ret = -1;
761
762 frr_with_mutex(&running_config_mgmt_lock.mtx) {
763 if (!running_config_mgmt_lock.locked) {
764 running_config_mgmt_lock.locked = true;
765 running_config_mgmt_lock.owner_client = client;
766 running_config_mgmt_lock.owner_user = user;
767 ret = 0;
768 }
769 }
770
771 return ret;
772 }
773
774 int nb_running_unlock(enum nb_client client, const void *user)
775 {
776 int ret = -1;
777
778 frr_with_mutex(&running_config_mgmt_lock.mtx) {
779 if (running_config_mgmt_lock.locked
780 && running_config_mgmt_lock.owner_client == client
781 && running_config_mgmt_lock.owner_user == user) {
782 running_config_mgmt_lock.locked = false;
783 running_config_mgmt_lock.owner_client = NB_CLIENT_NONE;
784 running_config_mgmt_lock.owner_user = NULL;
785 ret = 0;
786 }
787 }
788
789 return ret;
790 }
791
792 int nb_running_lock_check(enum nb_client client, const void *user)
793 {
794 int ret = -1;
795
796 frr_with_mutex(&running_config_mgmt_lock.mtx) {
797 if (!running_config_mgmt_lock.locked
798 || (running_config_mgmt_lock.owner_client == client
799 && running_config_mgmt_lock.owner_user == user))
800 ret = 0;
801 }
802
803 return ret;
804 }
805
806 static void nb_log_callback(const enum nb_event event,
807 enum nb_operation operation, const char *xpath,
808 const char *value)
809 {
810 zlog_debug(
811 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
812 nb_event_name(event), nb_operation_name(operation), xpath,
813 value ? value : "(NULL)");
814 }
815
816 /*
817 * Call the northbound configuration callback associated to a given
818 * configuration change.
819 */
820 static int nb_callback_configuration(const enum nb_event event,
821 struct nb_config_change *change)
822 {
823 enum nb_operation operation = change->cb.operation;
824 const char *xpath = change->cb.xpath;
825 const struct nb_node *nb_node = change->cb.nb_node;
826 const struct lyd_node *dnode = change->cb.dnode;
827 union nb_resource *resource;
828 int ret = NB_ERR;
829
830 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
831 const char *value = "(none)";
832
833 if (dnode && !yang_snode_is_typeless_data(dnode->schema))
834 value = yang_dnode_get_string(dnode, NULL);
835
836 nb_log_callback(event, operation, xpath, value);
837 }
838
839 if (event == NB_EV_VALIDATE)
840 resource = NULL;
841 else
842 resource = &change->resource;
843
844 switch (operation) {
845 case NB_OP_CREATE:
846 ret = (*nb_node->cbs.create)(event, dnode, resource);
847 break;
848 case NB_OP_MODIFY:
849 ret = (*nb_node->cbs.modify)(event, dnode, resource);
850 break;
851 case NB_OP_DESTROY:
852 ret = (*nb_node->cbs.destroy)(event, dnode);
853 break;
854 case NB_OP_MOVE:
855 ret = (*nb_node->cbs.move)(event, dnode);
856 break;
857 default:
858 flog_err(EC_LIB_DEVELOPMENT,
859 "%s: unknown operation (%u) [xpath %s]", __func__,
860 operation, xpath);
861 exit(1);
862 }
863
864 if (ret != NB_OK) {
865 int priority;
866 enum lib_log_refs ref;
867
868 switch (event) {
869 case NB_EV_VALIDATE:
870 priority = LOG_WARNING;
871 ref = EC_LIB_NB_CB_CONFIG_VALIDATE;
872 break;
873 case NB_EV_PREPARE:
874 priority = LOG_WARNING;
875 ref = EC_LIB_NB_CB_CONFIG_PREPARE;
876 break;
877 case NB_EV_ABORT:
878 priority = LOG_WARNING;
879 ref = EC_LIB_NB_CB_CONFIG_ABORT;
880 break;
881 case NB_EV_APPLY:
882 priority = LOG_ERR;
883 ref = EC_LIB_NB_CB_CONFIG_APPLY;
884 break;
885 default:
886 flog_err(EC_LIB_DEVELOPMENT,
887 "%s: unknown event (%u) [xpath %s]",
888 __func__, event, xpath);
889 exit(1);
890 }
891
892 flog(priority, ref,
893 "%s: error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
894 __func__, nb_err_name(ret), nb_event_name(event),
895 nb_operation_name(operation), xpath);
896 }
897
898 return ret;
899 }
900
901 struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node,
902 const char *xpath,
903 const void *list_entry)
904 {
905 DEBUGD(&nb_dbg_cbs_state,
906 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
907 xpath, list_entry);
908
909 return nb_node->cbs.get_elem(xpath, list_entry);
910 }
911
912 const void *nb_callback_get_next(const struct nb_node *nb_node,
913 const void *parent_list_entry,
914 const void *list_entry)
915 {
916 DEBUGD(&nb_dbg_cbs_state,
917 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
918 nb_node->xpath, parent_list_entry, list_entry);
919
920 return nb_node->cbs.get_next(parent_list_entry, list_entry);
921 }
922
923 int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
924 struct yang_list_keys *keys)
925 {
926 DEBUGD(&nb_dbg_cbs_state,
927 "northbound callback (get_keys): node [%s] list_entry [%p]",
928 nb_node->xpath, list_entry);
929
930 return nb_node->cbs.get_keys(list_entry, keys);
931 }
932
933 const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
934 const void *parent_list_entry,
935 const struct yang_list_keys *keys)
936 {
937 DEBUGD(&nb_dbg_cbs_state,
938 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
939 nb_node->xpath, parent_list_entry);
940
941 return nb_node->cbs.lookup_entry(parent_list_entry, keys);
942 }
943
944 int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
945 const struct list *input, struct list *output)
946 {
947 DEBUGD(&nb_dbg_cbs_rpc, "northbound RPC: %s", xpath);
948
949 return nb_node->cbs.rpc(xpath, input, output);
950 }
951
952 static struct nb_transaction *
953 nb_transaction_new(struct nb_config *config, struct nb_config_cbs *changes,
954 enum nb_client client, const void *user, const char *comment)
955 {
956 struct nb_transaction *transaction;
957
958 if (nb_running_lock_check(client, user)) {
959 flog_warn(
960 EC_LIB_NB_TRANSACTION_CREATION_FAILED,
961 "%s: running configuration is locked by another client",
962 __func__);
963 return NULL;
964 }
965
966 if (transaction_in_progress) {
967 flog_warn(
968 EC_LIB_NB_TRANSACTION_CREATION_FAILED,
969 "%s: error - there's already another transaction in progress",
970 __func__);
971 return NULL;
972 }
973 transaction_in_progress = true;
974
975 transaction = XCALLOC(MTYPE_TMP, sizeof(*transaction));
976 transaction->client = client;
977 if (comment)
978 strlcpy(transaction->comment, comment,
979 sizeof(transaction->comment));
980 transaction->config = config;
981 transaction->changes = *changes;
982
983 return transaction;
984 }
985
986 static void nb_transaction_free(struct nb_transaction *transaction)
987 {
988 nb_config_diff_del_changes(&transaction->changes);
989 XFREE(MTYPE_TMP, transaction);
990 transaction_in_progress = false;
991 }
992
993 /* Process all configuration changes associated to a transaction. */
994 static int nb_transaction_process(enum nb_event event,
995 struct nb_transaction *transaction)
996 {
997 struct nb_config_cb *cb;
998
999 /*
1000 * Need to lock the running configuration since transaction->changes
1001 * can contain pointers to data nodes from the running configuration.
1002 */
1003 pthread_rwlock_rdlock(&running_config->lock);
1004 {
1005 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1006 struct nb_config_change *change =
1007 (struct nb_config_change *)cb;
1008 int ret;
1009
1010 /*
1011 * Only try to release resources that were allocated
1012 * successfully.
1013 */
1014 if (event == NB_EV_ABORT && change->prepare_ok == false)
1015 break;
1016
1017 /* Call the appropriate callback. */
1018 ret = nb_callback_configuration(event, change);
1019 switch (event) {
1020 case NB_EV_PREPARE:
1021 if (ret != NB_OK) {
1022 pthread_rwlock_unlock(
1023 &running_config->lock);
1024 return ret;
1025 }
1026 change->prepare_ok = true;
1027 break;
1028 case NB_EV_ABORT:
1029 case NB_EV_APPLY:
1030 /*
1031 * At this point it's not possible to reject the
1032 * transaction anymore, so any failure here can
1033 * lead to inconsistencies and should be treated
1034 * as a bug. Operations prone to errors, like
1035 * validations and resource allocations, should
1036 * be performed during the 'prepare' phase.
1037 */
1038 break;
1039 default:
1040 break;
1041 }
1042 }
1043 }
1044 pthread_rwlock_unlock(&running_config->lock);
1045
1046 return NB_OK;
1047 }
1048
1049 static struct nb_config_cb *
1050 nb_apply_finish_cb_new(struct nb_config_cbs *cbs, const char *xpath,
1051 const struct nb_node *nb_node,
1052 const struct lyd_node *dnode)
1053 {
1054 struct nb_config_cb *cb;
1055
1056 cb = XCALLOC(MTYPE_TMP, sizeof(*cb));
1057 strlcpy(cb->xpath, xpath, sizeof(cb->xpath));
1058 cb->nb_node = nb_node;
1059 cb->dnode = dnode;
1060 RB_INSERT(nb_config_cbs, cbs, cb);
1061
1062 return cb;
1063 }
1064
1065 static struct nb_config_cb *
1066 nb_apply_finish_cb_find(struct nb_config_cbs *cbs, const char *xpath,
1067 const struct nb_node *nb_node)
1068 {
1069 struct nb_config_cb s;
1070
1071 strlcpy(s.xpath, xpath, sizeof(s.xpath));
1072 s.nb_node = nb_node;
1073 return RB_FIND(nb_config_cbs, cbs, &s);
1074 }
1075
1076 /* Call the 'apply_finish' callbacks. */
1077 static void nb_transaction_apply_finish(struct nb_transaction *transaction)
1078 {
1079 struct nb_config_cbs cbs;
1080 struct nb_config_cb *cb;
1081
1082 /* Initialize tree of 'apply_finish' callbacks. */
1083 RB_INIT(nb_config_cbs, &cbs);
1084
1085 /* Identify the 'apply_finish' callbacks that need to be called. */
1086 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1087 struct nb_config_change *change = (struct nb_config_change *)cb;
1088 const struct lyd_node *dnode = change->cb.dnode;
1089
1090 /*
1091 * Iterate up to the root of the data tree. When a node is being
1092 * deleted, skip its 'apply_finish' callback if one is defined
1093 * (the 'apply_finish' callbacks from the node ancestors should
1094 * be called though).
1095 */
1096 if (change->cb.operation == NB_OP_DESTROY) {
1097 char xpath[XPATH_MAXLEN];
1098
1099 dnode = dnode->parent;
1100 if (!dnode)
1101 break;
1102
1103 /*
1104 * The dnode from 'delete' callbacks point to elements
1105 * from the running configuration. Use yang_dnode_get()
1106 * to get the corresponding dnode from the candidate
1107 * configuration that is being committed.
1108 */
1109 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1110 dnode = yang_dnode_get(transaction->config->dnode,
1111 xpath);
1112 }
1113 while (dnode) {
1114 char xpath[XPATH_MAXLEN];
1115 struct nb_node *nb_node;
1116
1117 nb_node = dnode->schema->priv;
1118 if (!nb_node->cbs.apply_finish)
1119 goto next;
1120
1121 /*
1122 * Don't call the callback more than once for the same
1123 * data node.
1124 */
1125 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1126 if (nb_apply_finish_cb_find(&cbs, xpath, nb_node))
1127 goto next;
1128
1129 nb_apply_finish_cb_new(&cbs, xpath, nb_node, dnode);
1130
1131 next:
1132 dnode = dnode->parent;
1133 }
1134 }
1135
1136 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1137 RB_FOREACH (cb, nb_config_cbs, &cbs) {
1138 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL))
1139 nb_log_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH,
1140 cb->xpath, NULL);
1141
1142 (*cb->nb_node->cbs.apply_finish)(cb->dnode);
1143 }
1144
1145 /* Release memory. */
1146 while (!RB_EMPTY(nb_config_cbs, &cbs)) {
1147 cb = RB_ROOT(nb_config_cbs, &cbs);
1148 RB_REMOVE(nb_config_cbs, &cbs, cb);
1149 XFREE(MTYPE_TMP, cb);
1150 }
1151 }
1152
1153 static int nb_oper_data_iter_children(const struct lys_node *snode,
1154 const char *xpath, const void *list_entry,
1155 const struct yang_list_keys *list_keys,
1156 struct yang_translator *translator,
1157 bool first, uint32_t flags,
1158 nb_oper_data_cb cb, void *arg)
1159 {
1160 struct lys_node *child;
1161
1162 LY_TREE_FOR (snode->child, child) {
1163 int ret;
1164
1165 ret = nb_oper_data_iter_node(child, xpath, list_entry,
1166 list_keys, translator, false,
1167 flags, cb, arg);
1168 if (ret != NB_OK)
1169 return ret;
1170 }
1171
1172 return NB_OK;
1173 }
1174
1175 static int nb_oper_data_iter_leaf(const struct nb_node *nb_node,
1176 const char *xpath, const void *list_entry,
1177 const struct yang_list_keys *list_keys,
1178 struct yang_translator *translator,
1179 uint32_t flags, nb_oper_data_cb cb, void *arg)
1180 {
1181 struct yang_data *data;
1182
1183 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1184 return NB_OK;
1185
1186 /* Ignore list keys. */
1187 if (lys_is_key((struct lys_node_leaf *)nb_node->snode, NULL))
1188 return NB_OK;
1189
1190 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1191 if (data == NULL)
1192 /* Leaf of type "empty" is not present. */
1193 return NB_OK;
1194
1195 return (*cb)(nb_node->snode, translator, data, arg);
1196 }
1197
1198 static int nb_oper_data_iter_container(const struct nb_node *nb_node,
1199 const char *xpath,
1200 const void *list_entry,
1201 const struct yang_list_keys *list_keys,
1202 struct yang_translator *translator,
1203 uint32_t flags, nb_oper_data_cb cb,
1204 void *arg)
1205 {
1206 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1207 return NB_OK;
1208
1209 /* Presence containers. */
1210 if (nb_node->cbs.get_elem) {
1211 struct yang_data *data;
1212 int ret;
1213
1214 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1215 if (data == NULL)
1216 /* Presence container is not present. */
1217 return NB_OK;
1218
1219 ret = (*cb)(nb_node->snode, translator, data, arg);
1220 if (ret != NB_OK)
1221 return ret;
1222 }
1223
1224 /* Iterate over the child nodes. */
1225 return nb_oper_data_iter_children(nb_node->snode, xpath, list_entry,
1226 list_keys, translator, false, flags,
1227 cb, arg);
1228 }
1229
1230 static int
1231 nb_oper_data_iter_leaflist(const struct nb_node *nb_node, const char *xpath,
1232 const void *parent_list_entry,
1233 const struct yang_list_keys *parent_list_keys,
1234 struct yang_translator *translator, uint32_t flags,
1235 nb_oper_data_cb cb, void *arg)
1236 {
1237 const void *list_entry = NULL;
1238
1239 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1240 return NB_OK;
1241
1242 do {
1243 struct yang_data *data;
1244 int ret;
1245
1246 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1247 list_entry);
1248 if (!list_entry)
1249 /* End of the list. */
1250 break;
1251
1252 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1253 if (data == NULL)
1254 continue;
1255
1256 ret = (*cb)(nb_node->snode, translator, data, arg);
1257 if (ret != NB_OK)
1258 return ret;
1259 } while (list_entry);
1260
1261 return NB_OK;
1262 }
1263
1264 static int nb_oper_data_iter_list(const struct nb_node *nb_node,
1265 const char *xpath_list,
1266 const void *parent_list_entry,
1267 const struct yang_list_keys *parent_list_keys,
1268 struct yang_translator *translator,
1269 uint32_t flags, nb_oper_data_cb cb, void *arg)
1270 {
1271 struct lys_node_list *slist = (struct lys_node_list *)nb_node->snode;
1272 const void *list_entry = NULL;
1273 uint32_t position = 1;
1274
1275 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1276 return NB_OK;
1277
1278 /* Iterate over all list entries. */
1279 do {
1280 struct yang_list_keys list_keys;
1281 char xpath[XPATH_MAXLEN * 2];
1282 int ret;
1283
1284 /* Obtain list entry. */
1285 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1286 list_entry);
1287 if (!list_entry)
1288 /* End of the list. */
1289 break;
1290
1291 if (!CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST)) {
1292 /* Obtain the list entry keys. */
1293 if (nb_callback_get_keys(nb_node, list_entry,
1294 &list_keys)
1295 != NB_OK) {
1296 flog_warn(EC_LIB_NB_CB_STATE,
1297 "%s: failed to get list keys",
1298 __func__);
1299 return NB_ERR;
1300 }
1301
1302 /* Build XPath of the list entry. */
1303 strlcpy(xpath, xpath_list, sizeof(xpath));
1304 for (unsigned int i = 0; i < list_keys.num; i++) {
1305 snprintf(xpath + strlen(xpath),
1306 sizeof(xpath) - strlen(xpath),
1307 "[%s='%s']", slist->keys[i]->name,
1308 list_keys.key[i]);
1309 }
1310 } else {
1311 /*
1312 * Keyless list - build XPath using a positional index.
1313 */
1314 snprintf(xpath, sizeof(xpath), "%s[%u]", xpath_list,
1315 position);
1316 position++;
1317 }
1318
1319 /* Iterate over the child nodes. */
1320 ret = nb_oper_data_iter_children(
1321 nb_node->snode, xpath, list_entry, &list_keys,
1322 translator, false, flags, cb, arg);
1323 if (ret != NB_OK)
1324 return ret;
1325 } while (list_entry);
1326
1327 return NB_OK;
1328 }
1329
1330 static int nb_oper_data_iter_node(const struct lys_node *snode,
1331 const char *xpath_parent,
1332 const void *list_entry,
1333 const struct yang_list_keys *list_keys,
1334 struct yang_translator *translator,
1335 bool first, uint32_t flags,
1336 nb_oper_data_cb cb, void *arg)
1337 {
1338 struct nb_node *nb_node;
1339 char xpath[XPATH_MAXLEN];
1340 int ret = NB_OK;
1341
1342 if (!first && CHECK_FLAG(flags, NB_OPER_DATA_ITER_NORECURSE)
1343 && CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST))
1344 return NB_OK;
1345
1346 /* Update XPath. */
1347 strlcpy(xpath, xpath_parent, sizeof(xpath));
1348 if (!first && snode->nodetype != LYS_USES)
1349 snprintf(xpath + strlen(xpath), sizeof(xpath) - strlen(xpath),
1350 "/%s", snode->name);
1351
1352 nb_node = snode->priv;
1353 switch (snode->nodetype) {
1354 case LYS_CONTAINER:
1355 ret = nb_oper_data_iter_container(nb_node, xpath, list_entry,
1356 list_keys, translator, flags,
1357 cb, arg);
1358 break;
1359 case LYS_LEAF:
1360 ret = nb_oper_data_iter_leaf(nb_node, xpath, list_entry,
1361 list_keys, translator, flags, cb,
1362 arg);
1363 break;
1364 case LYS_LEAFLIST:
1365 ret = nb_oper_data_iter_leaflist(nb_node, xpath, list_entry,
1366 list_keys, translator, flags,
1367 cb, arg);
1368 break;
1369 case LYS_LIST:
1370 ret = nb_oper_data_iter_list(nb_node, xpath, list_entry,
1371 list_keys, translator, flags, cb,
1372 arg);
1373 break;
1374 case LYS_USES:
1375 ret = nb_oper_data_iter_children(snode, xpath, list_entry,
1376 list_keys, translator, false,
1377 flags, cb, arg);
1378 break;
1379 default:
1380 break;
1381 }
1382
1383 return ret;
1384 }
1385
1386 int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
1387 uint32_t flags, nb_oper_data_cb cb, void *arg)
1388 {
1389 struct nb_node *nb_node;
1390 const void *list_entry = NULL;
1391 struct yang_list_keys list_keys;
1392 struct list *list_dnodes;
1393 struct lyd_node *dnode, *dn;
1394 struct listnode *ln;
1395 int ret;
1396
1397 nb_node = nb_node_find(xpath);
1398 if (!nb_node) {
1399 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1400 "%s: unknown data path: %s", __func__, xpath);
1401 return NB_ERR;
1402 }
1403
1404 /* For now this function works only with containers and lists. */
1405 if (!CHECK_FLAG(nb_node->snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
1406 flog_warn(
1407 EC_LIB_NB_OPERATIONAL_DATA,
1408 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1409 __func__, xpath);
1410 return NB_ERR;
1411 }
1412
1413 /*
1414 * Create a data tree from the XPath so that we can parse the keys of
1415 * all YANG lists (if any).
1416 */
1417 ly_errno = 0;
1418 dnode = lyd_new_path(NULL, ly_native_ctx, xpath, NULL, 0,
1419 LYD_PATH_OPT_UPDATE | LYD_PATH_OPT_NOPARENTRET);
1420 if (!dnode) {
1421 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
1422 __func__);
1423 return NB_ERR;
1424 }
1425
1426 /*
1427 * Create a linked list to sort the data nodes starting from the root.
1428 */
1429 list_dnodes = list_new();
1430 for (dn = dnode; dn; dn = dn->parent) {
1431 if (dn->schema->nodetype != LYS_LIST || !dn->child)
1432 continue;
1433 listnode_add_head(list_dnodes, dn);
1434 }
1435 /*
1436 * Use the northbound callbacks to find list entry pointer corresponding
1437 * to the given XPath.
1438 */
1439 for (ALL_LIST_ELEMENTS_RO(list_dnodes, ln, dn)) {
1440 struct lyd_node *child;
1441 struct nb_node *nn;
1442 unsigned int n = 0;
1443
1444 /* Obtain the list entry keys. */
1445 memset(&list_keys, 0, sizeof(list_keys));
1446 LY_TREE_FOR (dn->child, child) {
1447 if (!lys_is_key((struct lys_node_leaf *)child->schema,
1448 NULL))
1449 continue;
1450 strlcpy(list_keys.key[n],
1451 yang_dnode_get_string(child, NULL),
1452 sizeof(list_keys.key[n]));
1453 n++;
1454 }
1455 list_keys.num = n;
1456 if (list_keys.num
1457 != ((struct lys_node_list *)dn->schema)->keys_size) {
1458 list_delete(&list_dnodes);
1459 yang_dnode_free(dnode);
1460 return NB_ERR_NOT_FOUND;
1461 }
1462
1463 /* Find the list entry pointer. */
1464 nn = dn->schema->priv;
1465 list_entry =
1466 nb_callback_lookup_entry(nn, list_entry, &list_keys);
1467 if (list_entry == NULL) {
1468 list_delete(&list_dnodes);
1469 yang_dnode_free(dnode);
1470 return NB_ERR_NOT_FOUND;
1471 }
1472 }
1473
1474 /* If a list entry was given, iterate over that list entry only. */
1475 if (dnode->schema->nodetype == LYS_LIST && dnode->child)
1476 ret = nb_oper_data_iter_children(
1477 nb_node->snode, xpath, list_entry, &list_keys,
1478 translator, true, flags, cb, arg);
1479 else
1480 ret = nb_oper_data_iter_node(nb_node->snode, xpath, list_entry,
1481 &list_keys, translator, true,
1482 flags, cb, arg);
1483
1484 list_delete(&list_dnodes);
1485 yang_dnode_free(dnode);
1486
1487 return ret;
1488 }
1489
1490 bool nb_operation_is_valid(enum nb_operation operation,
1491 const struct lys_node *snode)
1492 {
1493 struct nb_node *nb_node = snode->priv;
1494 struct lys_node_container *scontainer;
1495 struct lys_node_leaf *sleaf;
1496
1497 switch (operation) {
1498 case NB_OP_CREATE:
1499 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1500 return false;
1501
1502 switch (snode->nodetype) {
1503 case LYS_LEAF:
1504 sleaf = (struct lys_node_leaf *)snode;
1505 if (sleaf->type.base != LY_TYPE_EMPTY)
1506 return false;
1507 break;
1508 case LYS_CONTAINER:
1509 scontainer = (struct lys_node_container *)snode;
1510 if (!scontainer->presence)
1511 return false;
1512 break;
1513 case LYS_LIST:
1514 case LYS_LEAFLIST:
1515 break;
1516 default:
1517 return false;
1518 }
1519 return true;
1520 case NB_OP_MODIFY:
1521 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1522 return false;
1523
1524 switch (snode->nodetype) {
1525 case LYS_LEAF:
1526 sleaf = (struct lys_node_leaf *)snode;
1527 if (sleaf->type.base == LY_TYPE_EMPTY)
1528 return false;
1529
1530 /* List keys can't be modified. */
1531 if (lys_is_key(sleaf, NULL))
1532 return false;
1533 break;
1534 default:
1535 return false;
1536 }
1537 return true;
1538 case NB_OP_DESTROY:
1539 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1540 return false;
1541
1542 switch (snode->nodetype) {
1543 case LYS_LEAF:
1544 sleaf = (struct lys_node_leaf *)snode;
1545
1546 /* List keys can't be deleted. */
1547 if (lys_is_key(sleaf, NULL))
1548 return false;
1549
1550 /*
1551 * Only optional leafs can be deleted, or leafs whose
1552 * parent is a case statement.
1553 */
1554 if (snode->parent->nodetype == LYS_CASE)
1555 return true;
1556 if (sleaf->when)
1557 return true;
1558 if (CHECK_FLAG(sleaf->flags, LYS_MAND_TRUE)
1559 || sleaf->dflt)
1560 return false;
1561 break;
1562 case LYS_CONTAINER:
1563 scontainer = (struct lys_node_container *)snode;
1564 if (!scontainer->presence)
1565 return false;
1566 break;
1567 case LYS_LIST:
1568 case LYS_LEAFLIST:
1569 break;
1570 default:
1571 return false;
1572 }
1573 return true;
1574 case NB_OP_MOVE:
1575 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1576 return false;
1577
1578 switch (snode->nodetype) {
1579 case LYS_LIST:
1580 case LYS_LEAFLIST:
1581 if (!CHECK_FLAG(snode->flags, LYS_USERORDERED))
1582 return false;
1583 break;
1584 default:
1585 return false;
1586 }
1587 return true;
1588 case NB_OP_PRE_VALIDATE:
1589 case NB_OP_APPLY_FINISH:
1590 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1591 return false;
1592 return true;
1593 case NB_OP_GET_ELEM:
1594 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_R))
1595 return false;
1596
1597 switch (snode->nodetype) {
1598 case LYS_LEAF:
1599 case LYS_LEAFLIST:
1600 break;
1601 case LYS_CONTAINER:
1602 scontainer = (struct lys_node_container *)snode;
1603 if (!scontainer->presence)
1604 return false;
1605 break;
1606 default:
1607 return false;
1608 }
1609 return true;
1610 case NB_OP_GET_NEXT:
1611 switch (snode->nodetype) {
1612 case LYS_LIST:
1613 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1614 return false;
1615 break;
1616 case LYS_LEAFLIST:
1617 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1618 return false;
1619 break;
1620 default:
1621 return false;
1622 }
1623 return true;
1624 case NB_OP_GET_KEYS:
1625 case NB_OP_LOOKUP_ENTRY:
1626 switch (snode->nodetype) {
1627 case LYS_LIST:
1628 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1629 return false;
1630 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST))
1631 return false;
1632 break;
1633 default:
1634 return false;
1635 }
1636 return true;
1637 case NB_OP_RPC:
1638 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W | LYS_CONFIG_R))
1639 return false;
1640
1641 switch (snode->nodetype) {
1642 case LYS_RPC:
1643 case LYS_ACTION:
1644 break;
1645 default:
1646 return false;
1647 }
1648 return true;
1649 default:
1650 return false;
1651 }
1652 }
1653
1654 DEFINE_HOOK(nb_notification_send, (const char *xpath, struct list *arguments),
1655 (xpath, arguments));
1656
1657 int nb_notification_send(const char *xpath, struct list *arguments)
1658 {
1659 int ret;
1660
1661 DEBUGD(&nb_dbg_notif, "northbound notification: %s", xpath);
1662
1663 ret = hook_call(nb_notification_send, xpath, arguments);
1664 if (arguments)
1665 list_delete(&arguments);
1666
1667 return ret;
1668 }
1669
1670 /* Running configuration user pointers management. */
1671 struct nb_config_entry {
1672 char xpath[XPATH_MAXLEN];
1673 void *entry;
1674 };
1675
1676 static bool running_config_entry_cmp(const void *value1, const void *value2)
1677 {
1678 const struct nb_config_entry *c1 = value1;
1679 const struct nb_config_entry *c2 = value2;
1680
1681 return strmatch(c1->xpath, c2->xpath);
1682 }
1683
1684 static unsigned int running_config_entry_key_make(const void *value)
1685 {
1686 return string_hash_make(value);
1687 }
1688
1689 static void *running_config_entry_alloc(void *p)
1690 {
1691 struct nb_config_entry *new, *key = p;
1692
1693 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY, sizeof(*new));
1694 strlcpy(new->xpath, key->xpath, sizeof(new->xpath));
1695
1696 return new;
1697 }
1698
1699 static void running_config_entry_free(void *arg)
1700 {
1701 XFREE(MTYPE_NB_CONFIG_ENTRY, arg);
1702 }
1703
1704 void nb_running_set_entry(const struct lyd_node *dnode, void *entry)
1705 {
1706 struct nb_config_entry *config, s;
1707
1708 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1709 config = hash_get(running_config_entries, &s,
1710 running_config_entry_alloc);
1711 config->entry = entry;
1712 }
1713
1714 static void *nb_running_unset_entry_helper(const struct lyd_node *dnode)
1715 {
1716 struct nb_config_entry *config, s;
1717 struct lyd_node *child;
1718 void *entry = NULL;
1719
1720 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1721 config = hash_release(running_config_entries, &s);
1722 if (config) {
1723 entry = config->entry;
1724 running_config_entry_free(config);
1725 }
1726
1727 /* Unset user pointers from the child nodes. */
1728 if (CHECK_FLAG(dnode->schema->nodetype, LYS_LIST | LYS_CONTAINER)) {
1729 LY_TREE_FOR (dnode->child, child) {
1730 (void)nb_running_unset_entry_helper(child);
1731 }
1732 }
1733
1734 return entry;
1735 }
1736
1737 void *nb_running_unset_entry(const struct lyd_node *dnode)
1738 {
1739 void *entry;
1740
1741 entry = nb_running_unset_entry_helper(dnode);
1742 assert(entry);
1743
1744 return entry;
1745 }
1746
1747 void *nb_running_get_entry(const struct lyd_node *dnode, const char *xpath,
1748 bool abort_if_not_found)
1749 {
1750 const struct lyd_node *orig_dnode = dnode;
1751 char xpath_buf[XPATH_MAXLEN];
1752
1753 assert(dnode || xpath);
1754
1755 if (!dnode)
1756 dnode = yang_dnode_get(running_config->dnode, xpath);
1757
1758 while (dnode) {
1759 struct nb_config_entry *config, s;
1760
1761 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1762 config = hash_lookup(running_config_entries, &s);
1763 if (config)
1764 return config->entry;
1765
1766 dnode = dnode->parent;
1767 }
1768
1769 if (!abort_if_not_found)
1770 return NULL;
1771
1772 yang_dnode_get_path(orig_dnode, xpath_buf, sizeof(xpath_buf));
1773 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
1774 "%s: failed to find entry [xpath %s]", __func__, xpath_buf);
1775 zlog_backtrace(LOG_ERR);
1776 abort();
1777 }
1778
1779 /* Logging functions. */
1780 const char *nb_event_name(enum nb_event event)
1781 {
1782 switch (event) {
1783 case NB_EV_VALIDATE:
1784 return "validate";
1785 case NB_EV_PREPARE:
1786 return "prepare";
1787 case NB_EV_ABORT:
1788 return "abort";
1789 case NB_EV_APPLY:
1790 return "apply";
1791 default:
1792 return "unknown";
1793 }
1794 }
1795
1796 const char *nb_operation_name(enum nb_operation operation)
1797 {
1798 switch (operation) {
1799 case NB_OP_CREATE:
1800 return "create";
1801 case NB_OP_MODIFY:
1802 return "modify";
1803 case NB_OP_DESTROY:
1804 return "destroy";
1805 case NB_OP_MOVE:
1806 return "move";
1807 case NB_OP_PRE_VALIDATE:
1808 return "pre_validate";
1809 case NB_OP_APPLY_FINISH:
1810 return "apply_finish";
1811 case NB_OP_GET_ELEM:
1812 return "get_elem";
1813 case NB_OP_GET_NEXT:
1814 return "get_next";
1815 case NB_OP_GET_KEYS:
1816 return "get_keys";
1817 case NB_OP_LOOKUP_ENTRY:
1818 return "lookup_entry";
1819 case NB_OP_RPC:
1820 return "rpc";
1821 default:
1822 return "unknown";
1823 }
1824 }
1825
1826 const char *nb_err_name(enum nb_error error)
1827 {
1828 switch (error) {
1829 case NB_OK:
1830 return "ok";
1831 case NB_ERR:
1832 return "generic error";
1833 case NB_ERR_NO_CHANGES:
1834 return "no changes";
1835 case NB_ERR_NOT_FOUND:
1836 return "element not found";
1837 case NB_ERR_LOCKED:
1838 return "resource is locked";
1839 case NB_ERR_VALIDATION:
1840 return "validation error";
1841 case NB_ERR_RESOURCE:
1842 return "failed to allocate resource";
1843 case NB_ERR_INCONSISTENCY:
1844 return "internal inconsistency";
1845 default:
1846 return "unknown";
1847 }
1848 }
1849
1850 const char *nb_client_name(enum nb_client client)
1851 {
1852 switch (client) {
1853 case NB_CLIENT_CLI:
1854 return "CLI";
1855 case NB_CLIENT_CONFD:
1856 return "ConfD";
1857 case NB_CLIENT_SYSREPO:
1858 return "Sysrepo";
1859 case NB_CLIENT_GRPC:
1860 return "gRPC";
1861 default:
1862 return "unknown";
1863 }
1864 }
1865
1866 static void nb_load_callbacks(const struct frr_yang_module_info *module)
1867 {
1868 for (size_t i = 0; module->nodes[i].xpath; i++) {
1869 struct nb_node *nb_node;
1870 uint32_t priority;
1871
1872 nb_node = nb_node_find(module->nodes[i].xpath);
1873 if (!nb_node) {
1874 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1875 "%s: unknown data path: %s", __func__,
1876 module->nodes[i].xpath);
1877 continue;
1878 }
1879
1880 nb_node->cbs = module->nodes[i].cbs;
1881 priority = module->nodes[i].priority;
1882 if (priority != 0)
1883 nb_node->priority = priority;
1884 }
1885 }
1886
1887 void nb_init(struct thread_master *tm,
1888 const struct frr_yang_module_info *modules[], size_t nmodules)
1889 {
1890 unsigned int errors = 0;
1891
1892 /* Load YANG modules. */
1893 for (size_t i = 0; i < nmodules; i++)
1894 yang_module_load(modules[i]->name);
1895
1896 /* Create a nb_node for all YANG schema nodes. */
1897 nb_nodes_create();
1898
1899 /* Load northbound callbacks. */
1900 for (size_t i = 0; i < nmodules; i++)
1901 nb_load_callbacks(modules[i]);
1902
1903 /* Validate northbound callbacks. */
1904 yang_snodes_iterate_all(nb_node_validate, 0, &errors);
1905 if (errors > 0) {
1906 flog_err(
1907 EC_LIB_NB_CBS_VALIDATION,
1908 "%s: failed to validate northbound callbacks: %u error(s)",
1909 __func__, errors);
1910 exit(1);
1911 }
1912
1913 /* Create an empty running configuration. */
1914 running_config = nb_config_new(NULL);
1915 running_config_entries = hash_create(running_config_entry_key_make,
1916 running_config_entry_cmp,
1917 "Running Configuration Entries");
1918 pthread_mutex_init(&running_config_mgmt_lock.mtx, NULL);
1919
1920 /* Initialize the northbound CLI. */
1921 nb_cli_init(tm);
1922 }
1923
1924 void nb_terminate(void)
1925 {
1926 /* Terminate the northbound CLI. */
1927 nb_cli_terminate();
1928
1929 /* Delete all nb_node's from all YANG modules. */
1930 nb_nodes_delete();
1931
1932 /* Delete the running configuration. */
1933 hash_clean(running_config_entries, running_config_entry_free);
1934 hash_free(running_config_entries);
1935 nb_config_free(running_config);
1936 pthread_mutex_destroy(&running_config_mgmt_lock.mtx);
1937 }