]> git.proxmox.com Git - mirror_frr.git/blob - lib/northbound.c
Merge pull request #4869 from rtrlib/2019-08-22-bugfix-master
[mirror_frr.git] / lib / northbound.c
1 /*
2 * Copyright (C) 2018 NetDEF, Inc.
3 * Renato Westphal
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #include <zebra.h>
21
22 #include "libfrr.h"
23 #include "log.h"
24 #include "lib_errors.h"
25 #include "hash.h"
26 #include "command.h"
27 #include "debug.h"
28 #include "db.h"
29 #include "frr_pthread.h"
30 #include "northbound.h"
31 #include "northbound_cli.h"
32 #include "northbound_db.h"
33
34 DEFINE_MTYPE_STATIC(LIB, NB_NODE, "Northbound Node")
35 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG, "Northbound Configuration")
36 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG_ENTRY, "Northbound Configuration Entry")
37
38 /* Running configuration - shouldn't be modified directly. */
39 struct nb_config *running_config;
40
41 /* Hash table of user pointers associated with configuration entries. */
42 static struct hash *running_config_entries;
43
44 /* Management lock for the running configuration. */
45 static struct {
46 /* Mutex protecting this structure. */
47 pthread_mutex_t mtx;
48
49 /* Actual lock. */
50 bool locked;
51
52 /* Northbound client who owns this lock. */
53 enum nb_client owner_client;
54
55 /* Northbound user who owns this lock. */
56 const void *owner_user;
57 } running_config_mgmt_lock;
58
59 /*
60 * Global lock used to prevent multiple configuration transactions from
61 * happening concurrently.
62 */
63 static bool transaction_in_progress;
64
65 static int nb_callback_configuration(const enum nb_event event,
66 struct nb_config_change *change);
67 static struct nb_transaction *nb_transaction_new(struct nb_config *config,
68 struct nb_config_cbs *changes,
69 enum nb_client client,
70 const void *user,
71 const char *comment);
72 static void nb_transaction_free(struct nb_transaction *transaction);
73 static int nb_transaction_process(enum nb_event event,
74 struct nb_transaction *transaction);
75 static void nb_transaction_apply_finish(struct nb_transaction *transaction);
76 static int nb_oper_data_iter_node(const struct lys_node *snode,
77 const char *xpath, const void *list_entry,
78 const struct yang_list_keys *list_keys,
79 struct yang_translator *translator,
80 bool first, uint32_t flags,
81 nb_oper_data_cb cb, void *arg);
82
83 static int nb_node_check_config_only(const struct lys_node *snode, void *arg)
84 {
85 bool *config_only = arg;
86
87 if (CHECK_FLAG(snode->flags, LYS_CONFIG_R)) {
88 *config_only = false;
89 return YANG_ITER_STOP;
90 }
91
92 return YANG_ITER_CONTINUE;
93 }
94
95 static int nb_node_new_cb(const struct lys_node *snode, void *arg)
96 {
97 struct nb_node *nb_node;
98 struct lys_node *sparent, *sparent_list;
99
100 nb_node = XCALLOC(MTYPE_NB_NODE, sizeof(*nb_node));
101 yang_snode_get_path(snode, YANG_PATH_DATA, nb_node->xpath,
102 sizeof(nb_node->xpath));
103 nb_node->priority = NB_DFLT_PRIORITY;
104 sparent = yang_snode_real_parent(snode);
105 if (sparent)
106 nb_node->parent = sparent->priv;
107 sparent_list = yang_snode_parent_list(snode);
108 if (sparent_list)
109 nb_node->parent_list = sparent_list->priv;
110
111 /* Set flags. */
112 if (CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
113 bool config_only = true;
114
115 yang_snodes_iterate_subtree(snode, nb_node_check_config_only,
116 YANG_ITER_ALLOW_AUGMENTATIONS,
117 &config_only);
118 if (config_only)
119 SET_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY);
120 }
121 if (CHECK_FLAG(snode->nodetype, LYS_LIST)) {
122 struct lys_node_list *slist;
123
124 slist = (struct lys_node_list *)snode;
125 if (slist->keys_size == 0)
126 SET_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST);
127 }
128
129 /*
130 * Link the northbound node and the libyang schema node with one
131 * another.
132 */
133 nb_node->snode = snode;
134 lys_set_private(snode, nb_node);
135
136 return YANG_ITER_CONTINUE;
137 }
138
139 static int nb_node_del_cb(const struct lys_node *snode, void *arg)
140 {
141 struct nb_node *nb_node;
142
143 nb_node = snode->priv;
144 lys_set_private(snode, NULL);
145 XFREE(MTYPE_NB_NODE, nb_node);
146
147 return YANG_ITER_CONTINUE;
148 }
149
150 void nb_nodes_create(void)
151 {
152 yang_snodes_iterate_all(nb_node_new_cb, 0, NULL);
153 }
154
155 void nb_nodes_delete(void)
156 {
157 yang_snodes_iterate_all(nb_node_del_cb, 0, NULL);
158 }
159
160 struct nb_node *nb_node_find(const char *xpath)
161 {
162 const struct lys_node *snode;
163
164 /*
165 * Use libyang to find the schema node associated to the xpath and get
166 * the northbound node from there (snode private pointer).
167 */
168 snode = ly_ctx_get_node(ly_native_ctx, NULL, xpath, 0);
169 if (!snode)
170 return NULL;
171
172 return snode->priv;
173 }
174
175 static int nb_node_validate_cb(const struct nb_node *nb_node,
176 enum nb_operation operation,
177 int callback_implemented, bool optional)
178 {
179 bool valid;
180
181 valid = nb_operation_is_valid(operation, nb_node->snode);
182
183 if (!valid && callback_implemented)
184 flog_warn(EC_LIB_NB_CB_UNNEEDED,
185 "unneeded '%s' callback for '%s'",
186 nb_operation_name(operation), nb_node->xpath);
187
188 if (!optional && valid && !callback_implemented) {
189 flog_err(EC_LIB_NB_CB_MISSING, "missing '%s' callback for '%s'",
190 nb_operation_name(operation), nb_node->xpath);
191 return 1;
192 }
193
194 return 0;
195 }
196
197 /*
198 * Check if the required callbacks were implemented for the given northbound
199 * node.
200 */
201 static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
202
203 {
204 unsigned int error = 0;
205
206 error += nb_node_validate_cb(nb_node, NB_OP_CREATE,
207 !!nb_node->cbs.create, false);
208 error += nb_node_validate_cb(nb_node, NB_OP_MODIFY,
209 !!nb_node->cbs.modify, false);
210 error += nb_node_validate_cb(nb_node, NB_OP_DESTROY,
211 !!nb_node->cbs.destroy, false);
212 error += nb_node_validate_cb(nb_node, NB_OP_MOVE, !!nb_node->cbs.move,
213 false);
214 error += nb_node_validate_cb(nb_node, NB_OP_APPLY_FINISH,
215 !!nb_node->cbs.apply_finish, true);
216 error += nb_node_validate_cb(nb_node, NB_OP_GET_ELEM,
217 !!nb_node->cbs.get_elem, false);
218 error += nb_node_validate_cb(nb_node, NB_OP_GET_NEXT,
219 !!nb_node->cbs.get_next, false);
220 error += nb_node_validate_cb(nb_node, NB_OP_GET_KEYS,
221 !!nb_node->cbs.get_keys, false);
222 error += nb_node_validate_cb(nb_node, NB_OP_LOOKUP_ENTRY,
223 !!nb_node->cbs.lookup_entry, false);
224 error += nb_node_validate_cb(nb_node, NB_OP_RPC, !!nb_node->cbs.rpc,
225 false);
226
227 return error;
228 }
229
230 static unsigned int nb_node_validate_priority(const struct nb_node *nb_node)
231 {
232 /* Top-level nodes can have any priority. */
233 if (!nb_node->parent)
234 return 0;
235
236 if (nb_node->priority < nb_node->parent->priority) {
237 flog_err(EC_LIB_NB_CB_INVALID_PRIO,
238 "node has higher priority than its parent [xpath %s]",
239 nb_node->xpath);
240 return 1;
241 }
242
243 return 0;
244 }
245
246 static int nb_node_validate(const struct lys_node *snode, void *arg)
247 {
248 struct nb_node *nb_node = snode->priv;
249 unsigned int *errors = arg;
250
251 /* Validate callbacks and priority. */
252 *errors += nb_node_validate_cbs(nb_node);
253 *errors += nb_node_validate_priority(nb_node);
254
255 return YANG_ITER_CONTINUE;
256 }
257
258 struct nb_config *nb_config_new(struct lyd_node *dnode)
259 {
260 struct nb_config *config;
261
262 config = XCALLOC(MTYPE_NB_CONFIG, sizeof(*config));
263 if (dnode)
264 config->dnode = dnode;
265 else
266 config->dnode = yang_dnode_new(ly_native_ctx, true);
267 config->version = 0;
268 pthread_rwlock_init(&config->lock, NULL);
269
270 return config;
271 }
272
273 void nb_config_free(struct nb_config *config)
274 {
275 if (config->dnode)
276 yang_dnode_free(config->dnode);
277 pthread_rwlock_destroy(&config->lock);
278 XFREE(MTYPE_NB_CONFIG, config);
279 }
280
281 struct nb_config *nb_config_dup(const struct nb_config *config)
282 {
283 struct nb_config *dup;
284
285 dup = XCALLOC(MTYPE_NB_CONFIG, sizeof(*dup));
286 dup->dnode = yang_dnode_dup(config->dnode);
287 dup->version = config->version;
288 pthread_rwlock_init(&dup->lock, NULL);
289
290 return dup;
291 }
292
293 int nb_config_merge(struct nb_config *config_dst, struct nb_config *config_src,
294 bool preserve_source)
295 {
296 int ret;
297
298 ret = lyd_merge(config_dst->dnode, config_src->dnode, LYD_OPT_EXPLICIT);
299 if (ret != 0)
300 flog_warn(EC_LIB_LIBYANG, "%s: lyd_merge() failed", __func__);
301
302 if (!preserve_source)
303 nb_config_free(config_src);
304
305 return (ret == 0) ? NB_OK : NB_ERR;
306 }
307
308 void nb_config_replace(struct nb_config *config_dst,
309 struct nb_config *config_src, bool preserve_source)
310 {
311 /* Update version. */
312 if (config_src->version != 0)
313 config_dst->version = config_src->version;
314
315 /* Update dnode. */
316 if (config_dst->dnode)
317 yang_dnode_free(config_dst->dnode);
318 if (preserve_source) {
319 config_dst->dnode = yang_dnode_dup(config_src->dnode);
320 } else {
321 config_dst->dnode = config_src->dnode;
322 config_src->dnode = NULL;
323 nb_config_free(config_src);
324 }
325 }
326
327 /* Generate the nb_config_cbs tree. */
328 static inline int nb_config_cb_compare(const struct nb_config_cb *a,
329 const struct nb_config_cb *b)
330 {
331 /* Sort by priority first. */
332 if (a->nb_node->priority < b->nb_node->priority)
333 return -1;
334 if (a->nb_node->priority > b->nb_node->priority)
335 return 1;
336
337 /*
338 * Use XPath as a tie-breaker. This will naturally sort parent nodes
339 * before their children.
340 */
341 return strcmp(a->xpath, b->xpath);
342 }
343 RB_GENERATE(nb_config_cbs, nb_config_cb, entry, nb_config_cb_compare);
344
345 static void nb_config_diff_add_change(struct nb_config_cbs *changes,
346 enum nb_operation operation,
347 const struct lyd_node *dnode)
348 {
349 struct nb_config_change *change;
350
351 change = XCALLOC(MTYPE_TMP, sizeof(*change));
352 change->cb.operation = operation;
353 change->cb.nb_node = dnode->schema->priv;
354 yang_dnode_get_path(dnode, change->cb.xpath, sizeof(change->cb.xpath));
355 change->cb.dnode = dnode;
356
357 RB_INSERT(nb_config_cbs, changes, &change->cb);
358 }
359
360 static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
361 {
362 while (!RB_EMPTY(nb_config_cbs, changes)) {
363 struct nb_config_change *change;
364
365 change = (struct nb_config_change *)RB_ROOT(nb_config_cbs,
366 changes);
367 RB_REMOVE(nb_config_cbs, changes, &change->cb);
368 XFREE(MTYPE_TMP, change);
369 }
370 }
371
372 /*
373 * Helper function used when calculating the delta between two different
374 * configurations. Given a new subtree, calculate all new YANG data nodes,
375 * excluding default leafs and leaf-lists. This is a recursive function.
376 */
377 static void nb_config_diff_created(const struct lyd_node *dnode,
378 struct nb_config_cbs *changes)
379 {
380 enum nb_operation operation;
381 struct lyd_node *child;
382
383 switch (dnode->schema->nodetype) {
384 case LYS_LEAF:
385 case LYS_LEAFLIST:
386 if (lyd_wd_default((struct lyd_node_leaf_list *)dnode))
387 break;
388
389 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
390 operation = NB_OP_CREATE;
391 else if (nb_operation_is_valid(NB_OP_MODIFY, dnode->schema))
392 operation = NB_OP_MODIFY;
393 else
394 return;
395
396 nb_config_diff_add_change(changes, operation, dnode);
397 break;
398 case LYS_CONTAINER:
399 case LYS_LIST:
400 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
401 nb_config_diff_add_change(changes, NB_OP_CREATE, dnode);
402
403 /* Process child nodes recursively. */
404 LY_TREE_FOR (dnode->child, child) {
405 nb_config_diff_created(child, changes);
406 }
407 break;
408 default:
409 break;
410 }
411 }
412
413 static void nb_config_diff_deleted(const struct lyd_node *dnode,
414 struct nb_config_cbs *changes)
415 {
416 if (nb_operation_is_valid(NB_OP_DESTROY, dnode->schema))
417 nb_config_diff_add_change(changes, NB_OP_DESTROY, dnode);
418 else if (CHECK_FLAG(dnode->schema->nodetype, LYS_CONTAINER)) {
419 struct lyd_node *child;
420
421 /*
422 * Non-presence containers need special handling since they
423 * don't have "destroy" callbacks. In this case, what we need to
424 * do is to call the "destroy" callbacks of their child nodes
425 * when applicable (i.e. optional nodes).
426 */
427 LY_TREE_FOR (dnode->child, child) {
428 nb_config_diff_deleted(child, changes);
429 }
430 }
431 }
432
433 /* Calculate the delta between two different configurations. */
434 static void nb_config_diff(const struct nb_config *config1,
435 const struct nb_config *config2,
436 struct nb_config_cbs *changes)
437 {
438 struct lyd_difflist *diff;
439
440 diff = lyd_diff(config1->dnode, config2->dnode,
441 LYD_DIFFOPT_WITHDEFAULTS);
442 assert(diff);
443
444 for (int i = 0; diff->type[i] != LYD_DIFF_END; i++) {
445 LYD_DIFFTYPE type;
446 struct lyd_node *dnode;
447
448 type = diff->type[i];
449
450 switch (type) {
451 case LYD_DIFF_CREATED:
452 dnode = diff->second[i];
453 nb_config_diff_created(dnode, changes);
454 break;
455 case LYD_DIFF_DELETED:
456 dnode = diff->first[i];
457 nb_config_diff_deleted(dnode, changes);
458 break;
459 case LYD_DIFF_CHANGED:
460 dnode = diff->second[i];
461 nb_config_diff_add_change(changes, NB_OP_MODIFY, dnode);
462 break;
463 case LYD_DIFF_MOVEDAFTER1:
464 case LYD_DIFF_MOVEDAFTER2:
465 default:
466 continue;
467 }
468 }
469
470 lyd_free_diff(diff);
471 }
472
473 int nb_candidate_edit(struct nb_config *candidate,
474 const struct nb_node *nb_node,
475 enum nb_operation operation, const char *xpath,
476 const struct yang_data *previous,
477 const struct yang_data *data)
478 {
479 struct lyd_node *dnode;
480 char xpath_edit[XPATH_MAXLEN];
481
482 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
483 if (nb_node->snode->nodetype == LYS_LEAFLIST)
484 snprintf(xpath_edit, sizeof(xpath_edit), "%s[.='%s']", xpath,
485 data->value);
486 else
487 strlcpy(xpath_edit, xpath, sizeof(xpath_edit));
488
489 switch (operation) {
490 case NB_OP_CREATE:
491 case NB_OP_MODIFY:
492 ly_errno = 0;
493 dnode = lyd_new_path(candidate->dnode, ly_native_ctx,
494 xpath_edit, (void *)data->value, 0,
495 LYD_PATH_OPT_UPDATE);
496 if (!dnode && ly_errno) {
497 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
498 __func__);
499 return NB_ERR;
500 }
501
502 /*
503 * If a new node was created, call lyd_validate() only to create
504 * default child nodes.
505 */
506 if (dnode) {
507 lyd_schema_sort(dnode, 0);
508 lyd_validate(&dnode,
509 LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
510 ly_native_ctx);
511 }
512 break;
513 case NB_OP_DESTROY:
514 dnode = yang_dnode_get(candidate->dnode, xpath_edit);
515 if (!dnode)
516 /*
517 * Return a special error code so the caller can choose
518 * whether to ignore it or not.
519 */
520 return NB_ERR_NOT_FOUND;
521 lyd_free(dnode);
522 break;
523 case NB_OP_MOVE:
524 /* TODO: update configuration. */
525 break;
526 default:
527 flog_warn(EC_LIB_DEVELOPMENT,
528 "%s: unknown operation (%u) [xpath %s]", __func__,
529 operation, xpath_edit);
530 return NB_ERR;
531 }
532
533 return NB_OK;
534 }
535
536 bool nb_candidate_needs_update(const struct nb_config *candidate)
537 {
538 bool ret = false;
539
540 pthread_rwlock_rdlock(&running_config->lock);
541 {
542 if (candidate->version < running_config->version)
543 ret = true;
544 }
545 pthread_rwlock_unlock(&running_config->lock);
546
547 return ret;
548 }
549
550 int nb_candidate_update(struct nb_config *candidate)
551 {
552 struct nb_config *updated_config;
553
554 pthread_rwlock_rdlock(&running_config->lock);
555 {
556 updated_config = nb_config_dup(running_config);
557 }
558 pthread_rwlock_unlock(&running_config->lock);
559
560 if (nb_config_merge(updated_config, candidate, true) != NB_OK)
561 return NB_ERR;
562
563 nb_config_replace(candidate, updated_config, false);
564
565 return NB_OK;
566 }
567
568 /*
569 * Perform YANG syntactic and semantic validation.
570 *
571 * WARNING: lyd_validate() can change the configuration as part of the
572 * validation process.
573 */
574 static int nb_candidate_validate_yang(struct nb_config *candidate)
575 {
576 if (lyd_validate(&candidate->dnode,
577 LYD_OPT_STRICT | LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
578 ly_native_ctx)
579 != 0)
580 return NB_ERR_VALIDATION;
581
582 return NB_OK;
583 }
584
585 /* Perform code-level validation using the northbound callbacks. */
586 static int nb_candidate_validate_changes(struct nb_config *candidate,
587 struct nb_config_cbs *changes)
588 {
589 struct nb_config_cb *cb;
590
591 RB_FOREACH (cb, nb_config_cbs, changes) {
592 struct nb_config_change *change = (struct nb_config_change *)cb;
593 int ret;
594
595 ret = nb_callback_configuration(NB_EV_VALIDATE, change);
596 if (ret != NB_OK)
597 return NB_ERR_VALIDATION;
598 }
599
600 return NB_OK;
601 }
602
603 int nb_candidate_validate(struct nb_config *candidate)
604 {
605 struct nb_config_cbs changes;
606 int ret;
607
608 if (nb_candidate_validate_yang(candidate) != NB_OK)
609 return NB_ERR_VALIDATION;
610
611 RB_INIT(nb_config_cbs, &changes);
612 pthread_rwlock_rdlock(&running_config->lock);
613 {
614 nb_config_diff(running_config, candidate, &changes);
615 ret = nb_candidate_validate_changes(candidate, &changes);
616 nb_config_diff_del_changes(&changes);
617 }
618 pthread_rwlock_unlock(&running_config->lock);
619
620 return ret;
621 }
622
623 int nb_candidate_commit_prepare(struct nb_config *candidate,
624 enum nb_client client, const void *user,
625 const char *comment,
626 struct nb_transaction **transaction)
627 {
628 struct nb_config_cbs changes;
629
630 if (nb_candidate_validate_yang(candidate) != NB_OK) {
631 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
632 "%s: failed to validate candidate configuration",
633 __func__);
634 return NB_ERR_VALIDATION;
635 }
636
637 RB_INIT(nb_config_cbs, &changes);
638 pthread_rwlock_rdlock(&running_config->lock);
639 {
640 nb_config_diff(running_config, candidate, &changes);
641 if (RB_EMPTY(nb_config_cbs, &changes)) {
642 pthread_rwlock_unlock(&running_config->lock);
643 return NB_ERR_NO_CHANGES;
644 }
645
646 if (nb_candidate_validate_changes(candidate, &changes)
647 != NB_OK) {
648 flog_warn(
649 EC_LIB_NB_CANDIDATE_INVALID,
650 "%s: failed to validate candidate configuration",
651 __func__);
652 nb_config_diff_del_changes(&changes);
653 pthread_rwlock_unlock(&running_config->lock);
654 return NB_ERR_VALIDATION;
655 }
656
657 *transaction = nb_transaction_new(candidate, &changes, client,
658 user, comment);
659 if (*transaction == NULL) {
660 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
661 "%s: failed to create transaction", __func__);
662 nb_config_diff_del_changes(&changes);
663 pthread_rwlock_unlock(&running_config->lock);
664 return NB_ERR_LOCKED;
665 }
666 }
667 pthread_rwlock_unlock(&running_config->lock);
668
669 return nb_transaction_process(NB_EV_PREPARE, *transaction);
670 }
671
672 void nb_candidate_commit_abort(struct nb_transaction *transaction)
673 {
674 (void)nb_transaction_process(NB_EV_ABORT, transaction);
675 nb_transaction_free(transaction);
676 }
677
678 void nb_candidate_commit_apply(struct nb_transaction *transaction,
679 bool save_transaction, uint32_t *transaction_id)
680 {
681 (void)nb_transaction_process(NB_EV_APPLY, transaction);
682 nb_transaction_apply_finish(transaction);
683
684 /* Replace running by candidate. */
685 transaction->config->version++;
686 pthread_rwlock_wrlock(&running_config->lock);
687 {
688 nb_config_replace(running_config, transaction->config, true);
689 }
690 pthread_rwlock_unlock(&running_config->lock);
691
692 /* Record transaction. */
693 if (save_transaction
694 && nb_db_transaction_save(transaction, transaction_id) != NB_OK)
695 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED,
696 "%s: failed to record transaction", __func__);
697
698 nb_transaction_free(transaction);
699 }
700
701 int nb_candidate_commit(struct nb_config *candidate, enum nb_client client,
702 const void *user, bool save_transaction,
703 const char *comment, uint32_t *transaction_id)
704 {
705 struct nb_transaction *transaction = NULL;
706 int ret;
707
708 ret = nb_candidate_commit_prepare(candidate, client, user, comment,
709 &transaction);
710 /*
711 * Apply the changes if the preparation phase succeeded. Otherwise abort
712 * the transaction.
713 */
714 if (ret == NB_OK)
715 nb_candidate_commit_apply(transaction, save_transaction,
716 transaction_id);
717 else if (transaction != NULL)
718 nb_candidate_commit_abort(transaction);
719
720 return ret;
721 }
722
723 int nb_running_lock(enum nb_client client, const void *user)
724 {
725 int ret = -1;
726
727 frr_with_mutex(&running_config_mgmt_lock.mtx) {
728 if (!running_config_mgmt_lock.locked) {
729 running_config_mgmt_lock.locked = true;
730 running_config_mgmt_lock.owner_client = client;
731 running_config_mgmt_lock.owner_user = user;
732 ret = 0;
733 }
734 }
735
736 return ret;
737 }
738
739 int nb_running_unlock(enum nb_client client, const void *user)
740 {
741 int ret = -1;
742
743 frr_with_mutex(&running_config_mgmt_lock.mtx) {
744 if (running_config_mgmt_lock.locked
745 && running_config_mgmt_lock.owner_client == client
746 && running_config_mgmt_lock.owner_user == user) {
747 running_config_mgmt_lock.locked = false;
748 running_config_mgmt_lock.owner_client = NB_CLIENT_NONE;
749 running_config_mgmt_lock.owner_user = NULL;
750 ret = 0;
751 }
752 }
753
754 return ret;
755 }
756
757 int nb_running_lock_check(enum nb_client client, const void *user)
758 {
759 int ret = -1;
760
761 frr_with_mutex(&running_config_mgmt_lock.mtx) {
762 if (!running_config_mgmt_lock.locked
763 || (running_config_mgmt_lock.owner_client == client
764 && running_config_mgmt_lock.owner_user == user))
765 ret = 0;
766 }
767
768 return ret;
769 }
770
771 static void nb_log_callback(const enum nb_event event,
772 enum nb_operation operation, const char *xpath,
773 const char *value)
774 {
775 zlog_debug(
776 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
777 nb_event_name(event), nb_operation_name(operation), xpath,
778 value ? value : "(NULL)");
779 }
780
781 /*
782 * Call the northbound configuration callback associated to a given
783 * configuration change.
784 */
785 static int nb_callback_configuration(const enum nb_event event,
786 struct nb_config_change *change)
787 {
788 enum nb_operation operation = change->cb.operation;
789 const char *xpath = change->cb.xpath;
790 const struct nb_node *nb_node = change->cb.nb_node;
791 const struct lyd_node *dnode = change->cb.dnode;
792 union nb_resource *resource;
793 int ret = NB_ERR;
794
795 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
796 const char *value = "(none)";
797
798 if (dnode && !yang_snode_is_typeless_data(dnode->schema))
799 value = yang_dnode_get_string(dnode, NULL);
800
801 nb_log_callback(event, operation, xpath, value);
802 }
803
804 if (event == NB_EV_VALIDATE)
805 resource = NULL;
806 else
807 resource = &change->resource;
808
809 switch (operation) {
810 case NB_OP_CREATE:
811 ret = (*nb_node->cbs.create)(event, dnode, resource);
812 break;
813 case NB_OP_MODIFY:
814 ret = (*nb_node->cbs.modify)(event, dnode, resource);
815 break;
816 case NB_OP_DESTROY:
817 ret = (*nb_node->cbs.destroy)(event, dnode);
818 break;
819 case NB_OP_MOVE:
820 ret = (*nb_node->cbs.move)(event, dnode);
821 break;
822 default:
823 flog_err(EC_LIB_DEVELOPMENT,
824 "%s: unknown operation (%u) [xpath %s]", __func__,
825 operation, xpath);
826 exit(1);
827 }
828
829 if (ret != NB_OK) {
830 int priority;
831 enum lib_log_refs ref;
832
833 switch (event) {
834 case NB_EV_VALIDATE:
835 priority = LOG_WARNING;
836 ref = EC_LIB_NB_CB_CONFIG_VALIDATE;
837 break;
838 case NB_EV_PREPARE:
839 priority = LOG_WARNING;
840 ref = EC_LIB_NB_CB_CONFIG_PREPARE;
841 break;
842 case NB_EV_ABORT:
843 priority = LOG_WARNING;
844 ref = EC_LIB_NB_CB_CONFIG_ABORT;
845 break;
846 case NB_EV_APPLY:
847 priority = LOG_ERR;
848 ref = EC_LIB_NB_CB_CONFIG_APPLY;
849 break;
850 default:
851 flog_err(EC_LIB_DEVELOPMENT,
852 "%s: unknown event (%u) [xpath %s]",
853 __func__, event, xpath);
854 exit(1);
855 }
856
857 flog(priority, ref,
858 "%s: error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
859 __func__, nb_err_name(ret), nb_event_name(event),
860 nb_operation_name(operation), xpath);
861 }
862
863 return ret;
864 }
865
866 struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node,
867 const char *xpath,
868 const void *list_entry)
869 {
870 DEBUGD(&nb_dbg_cbs_state,
871 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
872 xpath, list_entry);
873
874 return nb_node->cbs.get_elem(xpath, list_entry);
875 }
876
877 const void *nb_callback_get_next(const struct nb_node *nb_node,
878 const void *parent_list_entry,
879 const void *list_entry)
880 {
881 DEBUGD(&nb_dbg_cbs_state,
882 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
883 nb_node->xpath, parent_list_entry, list_entry);
884
885 return nb_node->cbs.get_next(parent_list_entry, list_entry);
886 }
887
888 int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
889 struct yang_list_keys *keys)
890 {
891 DEBUGD(&nb_dbg_cbs_state,
892 "northbound callback (get_keys): node [%s] list_entry [%p]",
893 nb_node->xpath, list_entry);
894
895 return nb_node->cbs.get_keys(list_entry, keys);
896 }
897
898 const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
899 const void *parent_list_entry,
900 const struct yang_list_keys *keys)
901 {
902 DEBUGD(&nb_dbg_cbs_state,
903 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
904 nb_node->xpath, parent_list_entry);
905
906 return nb_node->cbs.lookup_entry(parent_list_entry, keys);
907 }
908
909 int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
910 const struct list *input, struct list *output)
911 {
912 DEBUGD(&nb_dbg_cbs_rpc, "northbound RPC: %s", xpath);
913
914 return nb_node->cbs.rpc(xpath, input, output);
915 }
916
917 static struct nb_transaction *
918 nb_transaction_new(struct nb_config *config, struct nb_config_cbs *changes,
919 enum nb_client client, const void *user, const char *comment)
920 {
921 struct nb_transaction *transaction;
922
923 if (nb_running_lock_check(client, user)) {
924 flog_warn(
925 EC_LIB_NB_TRANSACTION_CREATION_FAILED,
926 "%s: running configuration is locked by another client",
927 __func__);
928 return NULL;
929 }
930
931 if (transaction_in_progress) {
932 flog_warn(
933 EC_LIB_NB_TRANSACTION_CREATION_FAILED,
934 "%s: error - there's already another transaction in progress",
935 __func__);
936 return NULL;
937 }
938 transaction_in_progress = true;
939
940 transaction = XCALLOC(MTYPE_TMP, sizeof(*transaction));
941 transaction->client = client;
942 if (comment)
943 strlcpy(transaction->comment, comment,
944 sizeof(transaction->comment));
945 transaction->config = config;
946 transaction->changes = *changes;
947
948 return transaction;
949 }
950
951 static void nb_transaction_free(struct nb_transaction *transaction)
952 {
953 nb_config_diff_del_changes(&transaction->changes);
954 XFREE(MTYPE_TMP, transaction);
955 transaction_in_progress = false;
956 }
957
958 /* Process all configuration changes associated to a transaction. */
959 static int nb_transaction_process(enum nb_event event,
960 struct nb_transaction *transaction)
961 {
962 struct nb_config_cb *cb;
963
964 /*
965 * Need to lock the running configuration since transaction->changes
966 * can contain pointers to data nodes from the running configuration.
967 */
968 pthread_rwlock_rdlock(&running_config->lock);
969 {
970 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
971 struct nb_config_change *change =
972 (struct nb_config_change *)cb;
973 int ret;
974
975 /*
976 * Only try to release resources that were allocated
977 * successfully.
978 */
979 if (event == NB_EV_ABORT && change->prepare_ok == false)
980 break;
981
982 /* Call the appropriate callback. */
983 ret = nb_callback_configuration(event, change);
984 switch (event) {
985 case NB_EV_PREPARE:
986 if (ret != NB_OK) {
987 pthread_rwlock_unlock(
988 &running_config->lock);
989 return ret;
990 }
991 change->prepare_ok = true;
992 break;
993 case NB_EV_ABORT:
994 case NB_EV_APPLY:
995 /*
996 * At this point it's not possible to reject the
997 * transaction anymore, so any failure here can
998 * lead to inconsistencies and should be treated
999 * as a bug. Operations prone to errors, like
1000 * validations and resource allocations, should
1001 * be performed during the 'prepare' phase.
1002 */
1003 break;
1004 default:
1005 break;
1006 }
1007 }
1008 }
1009 pthread_rwlock_unlock(&running_config->lock);
1010
1011 return NB_OK;
1012 }
1013
1014 static struct nb_config_cb *
1015 nb_apply_finish_cb_new(struct nb_config_cbs *cbs, const char *xpath,
1016 const struct nb_node *nb_node,
1017 const struct lyd_node *dnode)
1018 {
1019 struct nb_config_cb *cb;
1020
1021 cb = XCALLOC(MTYPE_TMP, sizeof(*cb));
1022 strlcpy(cb->xpath, xpath, sizeof(cb->xpath));
1023 cb->nb_node = nb_node;
1024 cb->dnode = dnode;
1025 RB_INSERT(nb_config_cbs, cbs, cb);
1026
1027 return cb;
1028 }
1029
1030 static struct nb_config_cb *
1031 nb_apply_finish_cb_find(struct nb_config_cbs *cbs, const char *xpath,
1032 const struct nb_node *nb_node)
1033 {
1034 struct nb_config_cb s;
1035
1036 strlcpy(s.xpath, xpath, sizeof(s.xpath));
1037 s.nb_node = nb_node;
1038 return RB_FIND(nb_config_cbs, cbs, &s);
1039 }
1040
1041 /* Call the 'apply_finish' callbacks. */
1042 static void nb_transaction_apply_finish(struct nb_transaction *transaction)
1043 {
1044 struct nb_config_cbs cbs;
1045 struct nb_config_cb *cb;
1046
1047 /* Initialize tree of 'apply_finish' callbacks. */
1048 RB_INIT(nb_config_cbs, &cbs);
1049
1050 /* Identify the 'apply_finish' callbacks that need to be called. */
1051 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1052 struct nb_config_change *change = (struct nb_config_change *)cb;
1053 const struct lyd_node *dnode = change->cb.dnode;
1054
1055 /*
1056 * Iterate up to the root of the data tree. When a node is being
1057 * deleted, skip its 'apply_finish' callback if one is defined
1058 * (the 'apply_finish' callbacks from the node ancestors should
1059 * be called though).
1060 */
1061 if (change->cb.operation == NB_OP_DESTROY) {
1062 char xpath[XPATH_MAXLEN];
1063
1064 dnode = dnode->parent;
1065 if (!dnode)
1066 break;
1067
1068 /*
1069 * The dnode from 'delete' callbacks point to elements
1070 * from the running configuration. Use yang_dnode_get()
1071 * to get the corresponding dnode from the candidate
1072 * configuration that is being committed.
1073 */
1074 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1075 dnode = yang_dnode_get(transaction->config->dnode,
1076 xpath);
1077 }
1078 while (dnode) {
1079 char xpath[XPATH_MAXLEN];
1080 struct nb_node *nb_node;
1081
1082 nb_node = dnode->schema->priv;
1083 if (!nb_node->cbs.apply_finish)
1084 goto next;
1085
1086 /*
1087 * Don't call the callback more than once for the same
1088 * data node.
1089 */
1090 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1091 if (nb_apply_finish_cb_find(&cbs, xpath, nb_node))
1092 goto next;
1093
1094 nb_apply_finish_cb_new(&cbs, xpath, nb_node, dnode);
1095
1096 next:
1097 dnode = dnode->parent;
1098 }
1099 }
1100
1101 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1102 RB_FOREACH (cb, nb_config_cbs, &cbs) {
1103 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL))
1104 nb_log_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH,
1105 cb->xpath, NULL);
1106
1107 (*cb->nb_node->cbs.apply_finish)(cb->dnode);
1108 }
1109
1110 /* Release memory. */
1111 while (!RB_EMPTY(nb_config_cbs, &cbs)) {
1112 cb = RB_ROOT(nb_config_cbs, &cbs);
1113 RB_REMOVE(nb_config_cbs, &cbs, cb);
1114 XFREE(MTYPE_TMP, cb);
1115 }
1116 }
1117
1118 static int nb_oper_data_iter_children(const struct lys_node *snode,
1119 const char *xpath, const void *list_entry,
1120 const struct yang_list_keys *list_keys,
1121 struct yang_translator *translator,
1122 bool first, uint32_t flags,
1123 nb_oper_data_cb cb, void *arg)
1124 {
1125 struct lys_node *child;
1126
1127 LY_TREE_FOR (snode->child, child) {
1128 int ret;
1129
1130 ret = nb_oper_data_iter_node(child, xpath, list_entry,
1131 list_keys, translator, false,
1132 flags, cb, arg);
1133 if (ret != NB_OK)
1134 return ret;
1135 }
1136
1137 return NB_OK;
1138 }
1139
1140 static int nb_oper_data_iter_leaf(const struct nb_node *nb_node,
1141 const char *xpath, const void *list_entry,
1142 const struct yang_list_keys *list_keys,
1143 struct yang_translator *translator,
1144 uint32_t flags, nb_oper_data_cb cb, void *arg)
1145 {
1146 struct yang_data *data;
1147
1148 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1149 return NB_OK;
1150
1151 /* Ignore list keys. */
1152 if (lys_is_key((struct lys_node_leaf *)nb_node->snode, NULL))
1153 return NB_OK;
1154
1155 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1156 if (data == NULL)
1157 /* Leaf of type "empty" is not present. */
1158 return NB_OK;
1159
1160 return (*cb)(nb_node->snode, translator, data, arg);
1161 }
1162
1163 static int nb_oper_data_iter_container(const struct nb_node *nb_node,
1164 const char *xpath,
1165 const void *list_entry,
1166 const struct yang_list_keys *list_keys,
1167 struct yang_translator *translator,
1168 uint32_t flags, nb_oper_data_cb cb,
1169 void *arg)
1170 {
1171 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1172 return NB_OK;
1173
1174 /* Presence containers. */
1175 if (nb_node->cbs.get_elem) {
1176 struct yang_data *data;
1177 int ret;
1178
1179 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1180 if (data == NULL)
1181 /* Presence container is not present. */
1182 return NB_OK;
1183
1184 ret = (*cb)(nb_node->snode, translator, data, arg);
1185 if (ret != NB_OK)
1186 return ret;
1187 }
1188
1189 /* Iterate over the child nodes. */
1190 return nb_oper_data_iter_children(nb_node->snode, xpath, list_entry,
1191 list_keys, translator, false, flags,
1192 cb, arg);
1193 }
1194
1195 static int
1196 nb_oper_data_iter_leaflist(const struct nb_node *nb_node, const char *xpath,
1197 const void *parent_list_entry,
1198 const struct yang_list_keys *parent_list_keys,
1199 struct yang_translator *translator, uint32_t flags,
1200 nb_oper_data_cb cb, void *arg)
1201 {
1202 const void *list_entry = NULL;
1203
1204 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1205 return NB_OK;
1206
1207 do {
1208 struct yang_data *data;
1209 int ret;
1210
1211 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1212 list_entry);
1213 if (!list_entry)
1214 /* End of the list. */
1215 break;
1216
1217 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1218 if (data == NULL)
1219 continue;
1220
1221 ret = (*cb)(nb_node->snode, translator, data, arg);
1222 if (ret != NB_OK)
1223 return ret;
1224 } while (list_entry);
1225
1226 return NB_OK;
1227 }
1228
1229 static int nb_oper_data_iter_list(const struct nb_node *nb_node,
1230 const char *xpath_list,
1231 const void *parent_list_entry,
1232 const struct yang_list_keys *parent_list_keys,
1233 struct yang_translator *translator,
1234 uint32_t flags, nb_oper_data_cb cb, void *arg)
1235 {
1236 struct lys_node_list *slist = (struct lys_node_list *)nb_node->snode;
1237 const void *list_entry = NULL;
1238 uint32_t position = 1;
1239
1240 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1241 return NB_OK;
1242
1243 /* Iterate over all list entries. */
1244 do {
1245 struct yang_list_keys list_keys;
1246 char xpath[XPATH_MAXLEN * 2];
1247 int ret;
1248
1249 /* Obtain list entry. */
1250 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1251 list_entry);
1252 if (!list_entry)
1253 /* End of the list. */
1254 break;
1255
1256 if (!CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST)) {
1257 /* Obtain the list entry keys. */
1258 if (nb_callback_get_keys(nb_node, list_entry,
1259 &list_keys)
1260 != NB_OK) {
1261 flog_warn(EC_LIB_NB_CB_STATE,
1262 "%s: failed to get list keys",
1263 __func__);
1264 return NB_ERR;
1265 }
1266
1267 /* Build XPath of the list entry. */
1268 strlcpy(xpath, xpath_list, sizeof(xpath));
1269 for (unsigned int i = 0; i < list_keys.num; i++) {
1270 snprintf(xpath + strlen(xpath),
1271 sizeof(xpath) - strlen(xpath),
1272 "[%s='%s']", slist->keys[i]->name,
1273 list_keys.key[i]);
1274 }
1275 } else {
1276 /*
1277 * Keyless list - build XPath using a positional index.
1278 */
1279 snprintf(xpath, sizeof(xpath), "%s[%u]", xpath_list,
1280 position);
1281 position++;
1282 }
1283
1284 /* Iterate over the child nodes. */
1285 ret = nb_oper_data_iter_children(
1286 nb_node->snode, xpath, list_entry, &list_keys,
1287 translator, false, flags, cb, arg);
1288 if (ret != NB_OK)
1289 return ret;
1290 } while (list_entry);
1291
1292 return NB_OK;
1293 }
1294
1295 static int nb_oper_data_iter_node(const struct lys_node *snode,
1296 const char *xpath_parent,
1297 const void *list_entry,
1298 const struct yang_list_keys *list_keys,
1299 struct yang_translator *translator,
1300 bool first, uint32_t flags,
1301 nb_oper_data_cb cb, void *arg)
1302 {
1303 struct nb_node *nb_node;
1304 char xpath[XPATH_MAXLEN];
1305 int ret = NB_OK;
1306
1307 if (!first && CHECK_FLAG(flags, NB_OPER_DATA_ITER_NORECURSE)
1308 && CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST))
1309 return NB_OK;
1310
1311 /* Update XPath. */
1312 strlcpy(xpath, xpath_parent, sizeof(xpath));
1313 if (!first && snode->nodetype != LYS_USES)
1314 snprintf(xpath + strlen(xpath), sizeof(xpath) - strlen(xpath),
1315 "/%s", snode->name);
1316
1317 nb_node = snode->priv;
1318 switch (snode->nodetype) {
1319 case LYS_CONTAINER:
1320 ret = nb_oper_data_iter_container(nb_node, xpath, list_entry,
1321 list_keys, translator, flags,
1322 cb, arg);
1323 break;
1324 case LYS_LEAF:
1325 ret = nb_oper_data_iter_leaf(nb_node, xpath, list_entry,
1326 list_keys, translator, flags, cb,
1327 arg);
1328 break;
1329 case LYS_LEAFLIST:
1330 ret = nb_oper_data_iter_leaflist(nb_node, xpath, list_entry,
1331 list_keys, translator, flags,
1332 cb, arg);
1333 break;
1334 case LYS_LIST:
1335 ret = nb_oper_data_iter_list(nb_node, xpath, list_entry,
1336 list_keys, translator, flags, cb,
1337 arg);
1338 break;
1339 case LYS_USES:
1340 ret = nb_oper_data_iter_children(snode, xpath, list_entry,
1341 list_keys, translator, false,
1342 flags, cb, arg);
1343 break;
1344 default:
1345 break;
1346 }
1347
1348 return ret;
1349 }
1350
1351 int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
1352 uint32_t flags, nb_oper_data_cb cb, void *arg)
1353 {
1354 struct nb_node *nb_node;
1355 const void *list_entry = NULL;
1356 struct yang_list_keys list_keys;
1357 struct list *list_dnodes;
1358 struct lyd_node *dnode, *dn;
1359 struct listnode *ln;
1360 int ret;
1361
1362 nb_node = nb_node_find(xpath);
1363 if (!nb_node) {
1364 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1365 "%s: unknown data path: %s", __func__, xpath);
1366 return NB_ERR;
1367 }
1368
1369 /* For now this function works only with containers and lists. */
1370 if (!CHECK_FLAG(nb_node->snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
1371 flog_warn(
1372 EC_LIB_NB_OPERATIONAL_DATA,
1373 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1374 __func__, xpath);
1375 return NB_ERR;
1376 }
1377
1378 /*
1379 * Create a data tree from the XPath so that we can parse the keys of
1380 * all YANG lists (if any).
1381 */
1382 ly_errno = 0;
1383 dnode = lyd_new_path(NULL, ly_native_ctx, xpath, NULL, 0,
1384 LYD_PATH_OPT_UPDATE | LYD_PATH_OPT_NOPARENTRET);
1385 if (!dnode) {
1386 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
1387 __func__);
1388 return NB_ERR;
1389 }
1390
1391 /*
1392 * Create a linked list to sort the data nodes starting from the root.
1393 */
1394 list_dnodes = list_new();
1395 for (dn = dnode; dn; dn = dn->parent) {
1396 if (dn->schema->nodetype != LYS_LIST || !dn->child)
1397 continue;
1398 listnode_add_head(list_dnodes, dn);
1399 }
1400 /*
1401 * Use the northbound callbacks to find list entry pointer corresponding
1402 * to the given XPath.
1403 */
1404 for (ALL_LIST_ELEMENTS_RO(list_dnodes, ln, dn)) {
1405 struct lyd_node *child;
1406 struct nb_node *nn;
1407 unsigned int n = 0;
1408
1409 /* Obtain the list entry keys. */
1410 memset(&list_keys, 0, sizeof(list_keys));
1411 LY_TREE_FOR (dn->child, child) {
1412 if (!lys_is_key((struct lys_node_leaf *)child->schema,
1413 NULL))
1414 continue;
1415 strlcpy(list_keys.key[n],
1416 yang_dnode_get_string(child, NULL),
1417 sizeof(list_keys.key[n]));
1418 n++;
1419 }
1420 list_keys.num = n;
1421 if (list_keys.num
1422 != ((struct lys_node_list *)dn->schema)->keys_size) {
1423 list_delete(&list_dnodes);
1424 yang_dnode_free(dnode);
1425 return NB_ERR_NOT_FOUND;
1426 }
1427
1428 /* Find the list entry pointer. */
1429 nn = dn->schema->priv;
1430 list_entry =
1431 nb_callback_lookup_entry(nn, list_entry, &list_keys);
1432 if (list_entry == NULL) {
1433 list_delete(&list_dnodes);
1434 yang_dnode_free(dnode);
1435 return NB_ERR_NOT_FOUND;
1436 }
1437 }
1438
1439 /* If a list entry was given, iterate over that list entry only. */
1440 if (dnode->schema->nodetype == LYS_LIST && dnode->child)
1441 ret = nb_oper_data_iter_children(
1442 nb_node->snode, xpath, list_entry, &list_keys,
1443 translator, true, flags, cb, arg);
1444 else
1445 ret = nb_oper_data_iter_node(nb_node->snode, xpath, list_entry,
1446 &list_keys, translator, true,
1447 flags, cb, arg);
1448
1449 list_delete(&list_dnodes);
1450 yang_dnode_free(dnode);
1451
1452 return ret;
1453 }
1454
1455 bool nb_operation_is_valid(enum nb_operation operation,
1456 const struct lys_node *snode)
1457 {
1458 struct nb_node *nb_node = snode->priv;
1459 struct lys_node_container *scontainer;
1460 struct lys_node_leaf *sleaf;
1461
1462 switch (operation) {
1463 case NB_OP_CREATE:
1464 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1465 return false;
1466
1467 switch (snode->nodetype) {
1468 case LYS_LEAF:
1469 sleaf = (struct lys_node_leaf *)snode;
1470 if (sleaf->type.base != LY_TYPE_EMPTY)
1471 return false;
1472 break;
1473 case LYS_CONTAINER:
1474 scontainer = (struct lys_node_container *)snode;
1475 if (!scontainer->presence)
1476 return false;
1477 break;
1478 case LYS_LIST:
1479 case LYS_LEAFLIST:
1480 break;
1481 default:
1482 return false;
1483 }
1484 return true;
1485 case NB_OP_MODIFY:
1486 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1487 return false;
1488
1489 switch (snode->nodetype) {
1490 case LYS_LEAF:
1491 sleaf = (struct lys_node_leaf *)snode;
1492 if (sleaf->type.base == LY_TYPE_EMPTY)
1493 return false;
1494
1495 /* List keys can't be modified. */
1496 if (lys_is_key(sleaf, NULL))
1497 return false;
1498 break;
1499 default:
1500 return false;
1501 }
1502 return true;
1503 case NB_OP_DESTROY:
1504 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1505 return false;
1506
1507 switch (snode->nodetype) {
1508 case LYS_LEAF:
1509 sleaf = (struct lys_node_leaf *)snode;
1510
1511 /* List keys can't be deleted. */
1512 if (lys_is_key(sleaf, NULL))
1513 return false;
1514
1515 /*
1516 * Only optional leafs can be deleted, or leafs whose
1517 * parent is a case statement.
1518 */
1519 if (snode->parent->nodetype == LYS_CASE)
1520 return true;
1521 if (sleaf->when)
1522 return true;
1523 if (CHECK_FLAG(sleaf->flags, LYS_MAND_TRUE)
1524 || sleaf->dflt)
1525 return false;
1526 break;
1527 case LYS_CONTAINER:
1528 scontainer = (struct lys_node_container *)snode;
1529 if (!scontainer->presence)
1530 return false;
1531 break;
1532 case LYS_LIST:
1533 case LYS_LEAFLIST:
1534 break;
1535 default:
1536 return false;
1537 }
1538 return true;
1539 case NB_OP_MOVE:
1540 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1541 return false;
1542
1543 switch (snode->nodetype) {
1544 case LYS_LIST:
1545 case LYS_LEAFLIST:
1546 if (!CHECK_FLAG(snode->flags, LYS_USERORDERED))
1547 return false;
1548 break;
1549 default:
1550 return false;
1551 }
1552 return true;
1553 case NB_OP_APPLY_FINISH:
1554 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1555 return false;
1556 return true;
1557 case NB_OP_GET_ELEM:
1558 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_R))
1559 return false;
1560
1561 switch (snode->nodetype) {
1562 case LYS_LEAF:
1563 case LYS_LEAFLIST:
1564 break;
1565 case LYS_CONTAINER:
1566 scontainer = (struct lys_node_container *)snode;
1567 if (!scontainer->presence)
1568 return false;
1569 break;
1570 default:
1571 return false;
1572 }
1573 return true;
1574 case NB_OP_GET_NEXT:
1575 switch (snode->nodetype) {
1576 case LYS_LIST:
1577 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1578 return false;
1579 break;
1580 case LYS_LEAFLIST:
1581 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1582 return false;
1583 break;
1584 default:
1585 return false;
1586 }
1587 return true;
1588 case NB_OP_GET_KEYS:
1589 case NB_OP_LOOKUP_ENTRY:
1590 switch (snode->nodetype) {
1591 case LYS_LIST:
1592 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1593 return false;
1594 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST))
1595 return false;
1596 break;
1597 default:
1598 return false;
1599 }
1600 return true;
1601 case NB_OP_RPC:
1602 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W | LYS_CONFIG_R))
1603 return false;
1604
1605 switch (snode->nodetype) {
1606 case LYS_RPC:
1607 case LYS_ACTION:
1608 break;
1609 default:
1610 return false;
1611 }
1612 return true;
1613 default:
1614 return false;
1615 }
1616 }
1617
1618 DEFINE_HOOK(nb_notification_send, (const char *xpath, struct list *arguments),
1619 (xpath, arguments));
1620
1621 int nb_notification_send(const char *xpath, struct list *arguments)
1622 {
1623 int ret;
1624
1625 DEBUGD(&nb_dbg_notif, "northbound notification: %s", xpath);
1626
1627 ret = hook_call(nb_notification_send, xpath, arguments);
1628 if (arguments)
1629 list_delete(&arguments);
1630
1631 return ret;
1632 }
1633
1634 /* Running configuration user pointers management. */
1635 struct nb_config_entry {
1636 char xpath[XPATH_MAXLEN];
1637 void *entry;
1638 };
1639
1640 static bool running_config_entry_cmp(const void *value1, const void *value2)
1641 {
1642 const struct nb_config_entry *c1 = value1;
1643 const struct nb_config_entry *c2 = value2;
1644
1645 return strmatch(c1->xpath, c2->xpath);
1646 }
1647
1648 static unsigned int running_config_entry_key_make(const void *value)
1649 {
1650 return string_hash_make(value);
1651 }
1652
1653 static void *running_config_entry_alloc(void *p)
1654 {
1655 struct nb_config_entry *new, *key = p;
1656
1657 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY, sizeof(*new));
1658 strlcpy(new->xpath, key->xpath, sizeof(new->xpath));
1659
1660 return new;
1661 }
1662
1663 static void running_config_entry_free(void *arg)
1664 {
1665 XFREE(MTYPE_NB_CONFIG_ENTRY, arg);
1666 }
1667
1668 void nb_running_set_entry(const struct lyd_node *dnode, void *entry)
1669 {
1670 struct nb_config_entry *config, s;
1671
1672 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1673 config = hash_get(running_config_entries, &s,
1674 running_config_entry_alloc);
1675 config->entry = entry;
1676 }
1677
1678 static void *nb_running_unset_entry_helper(const struct lyd_node *dnode)
1679 {
1680 struct nb_config_entry *config, s;
1681 struct lyd_node *child;
1682 void *entry = NULL;
1683
1684 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1685 config = hash_release(running_config_entries, &s);
1686 if (config) {
1687 entry = config->entry;
1688 running_config_entry_free(config);
1689 }
1690
1691 /* Unset user pointers from the child nodes. */
1692 if (CHECK_FLAG(dnode->schema->nodetype, LYS_LIST | LYS_CONTAINER)) {
1693 LY_TREE_FOR (dnode->child, child) {
1694 (void)nb_running_unset_entry_helper(child);
1695 }
1696 }
1697
1698 return entry;
1699 }
1700
1701 void *nb_running_unset_entry(const struct lyd_node *dnode)
1702 {
1703 void *entry;
1704
1705 entry = nb_running_unset_entry_helper(dnode);
1706 assert(entry);
1707
1708 return entry;
1709 }
1710
1711 void *nb_running_get_entry(const struct lyd_node *dnode, const char *xpath,
1712 bool abort_if_not_found)
1713 {
1714 const struct lyd_node *orig_dnode = dnode;
1715 char xpath_buf[XPATH_MAXLEN];
1716
1717 assert(dnode || xpath);
1718
1719 if (!dnode)
1720 dnode = yang_dnode_get(running_config->dnode, xpath);
1721
1722 while (dnode) {
1723 struct nb_config_entry *config, s;
1724
1725 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1726 config = hash_lookup(running_config_entries, &s);
1727 if (config)
1728 return config->entry;
1729
1730 dnode = dnode->parent;
1731 }
1732
1733 if (!abort_if_not_found)
1734 return NULL;
1735
1736 yang_dnode_get_path(orig_dnode, xpath_buf, sizeof(xpath_buf));
1737 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
1738 "%s: failed to find entry [xpath %s]", __func__, xpath_buf);
1739 zlog_backtrace(LOG_ERR);
1740 abort();
1741 }
1742
1743 /* Logging functions. */
1744 const char *nb_event_name(enum nb_event event)
1745 {
1746 switch (event) {
1747 case NB_EV_VALIDATE:
1748 return "validate";
1749 case NB_EV_PREPARE:
1750 return "prepare";
1751 case NB_EV_ABORT:
1752 return "abort";
1753 case NB_EV_APPLY:
1754 return "apply";
1755 default:
1756 return "unknown";
1757 }
1758 }
1759
1760 const char *nb_operation_name(enum nb_operation operation)
1761 {
1762 switch (operation) {
1763 case NB_OP_CREATE:
1764 return "create";
1765 case NB_OP_MODIFY:
1766 return "modify";
1767 case NB_OP_DESTROY:
1768 return "destroy";
1769 case NB_OP_MOVE:
1770 return "move";
1771 case NB_OP_APPLY_FINISH:
1772 return "apply_finish";
1773 case NB_OP_GET_ELEM:
1774 return "get_elem";
1775 case NB_OP_GET_NEXT:
1776 return "get_next";
1777 case NB_OP_GET_KEYS:
1778 return "get_keys";
1779 case NB_OP_LOOKUP_ENTRY:
1780 return "lookup_entry";
1781 case NB_OP_RPC:
1782 return "rpc";
1783 default:
1784 return "unknown";
1785 }
1786 }
1787
1788 const char *nb_err_name(enum nb_error error)
1789 {
1790 switch (error) {
1791 case NB_OK:
1792 return "ok";
1793 case NB_ERR:
1794 return "generic error";
1795 case NB_ERR_NO_CHANGES:
1796 return "no changes";
1797 case NB_ERR_NOT_FOUND:
1798 return "element not found";
1799 case NB_ERR_LOCKED:
1800 return "resource is locked";
1801 case NB_ERR_VALIDATION:
1802 return "validation error";
1803 case NB_ERR_RESOURCE:
1804 return "failed to allocate resource";
1805 case NB_ERR_INCONSISTENCY:
1806 return "internal inconsistency";
1807 default:
1808 return "unknown";
1809 }
1810 }
1811
1812 const char *nb_client_name(enum nb_client client)
1813 {
1814 switch (client) {
1815 case NB_CLIENT_CLI:
1816 return "CLI";
1817 case NB_CLIENT_CONFD:
1818 return "ConfD";
1819 case NB_CLIENT_SYSREPO:
1820 return "Sysrepo";
1821 case NB_CLIENT_GRPC:
1822 return "gRPC";
1823 default:
1824 return "unknown";
1825 }
1826 }
1827
1828 static void nb_load_callbacks(const struct frr_yang_module_info *module)
1829 {
1830 for (size_t i = 0; module->nodes[i].xpath; i++) {
1831 struct nb_node *nb_node;
1832 uint32_t priority;
1833
1834 nb_node = nb_node_find(module->nodes[i].xpath);
1835 if (!nb_node) {
1836 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1837 "%s: unknown data path: %s", __func__,
1838 module->nodes[i].xpath);
1839 continue;
1840 }
1841
1842 nb_node->cbs = module->nodes[i].cbs;
1843 priority = module->nodes[i].priority;
1844 if (priority != 0)
1845 nb_node->priority = priority;
1846 }
1847 }
1848
1849 void nb_init(struct thread_master *tm,
1850 const struct frr_yang_module_info *modules[], size_t nmodules)
1851 {
1852 unsigned int errors = 0;
1853
1854 /* Load YANG modules. */
1855 for (size_t i = 0; i < nmodules; i++)
1856 yang_module_load(modules[i]->name);
1857
1858 /* Create a nb_node for all YANG schema nodes. */
1859 nb_nodes_create();
1860
1861 /* Load northbound callbacks. */
1862 for (size_t i = 0; i < nmodules; i++)
1863 nb_load_callbacks(modules[i]);
1864
1865 /* Validate northbound callbacks. */
1866 yang_snodes_iterate_all(nb_node_validate, 0, &errors);
1867 if (errors > 0) {
1868 flog_err(
1869 EC_LIB_NB_CBS_VALIDATION,
1870 "%s: failed to validate northbound callbacks: %u error(s)",
1871 __func__, errors);
1872 exit(1);
1873 }
1874
1875 /* Create an empty running configuration. */
1876 running_config = nb_config_new(NULL);
1877 running_config_entries = hash_create(running_config_entry_key_make,
1878 running_config_entry_cmp,
1879 "Running Configuration Entries");
1880 pthread_mutex_init(&running_config_mgmt_lock.mtx, NULL);
1881
1882 /* Initialize the northbound CLI. */
1883 nb_cli_init(tm);
1884 }
1885
1886 void nb_terminate(void)
1887 {
1888 /* Terminate the northbound CLI. */
1889 nb_cli_terminate();
1890
1891 /* Delete all nb_node's from all YANG modules. */
1892 nb_nodes_delete();
1893
1894 /* Delete the running configuration. */
1895 hash_clean(running_config_entries, running_config_entry_free);
1896 hash_free(running_config_entries);
1897 nb_config_free(running_config);
1898 pthread_mutex_destroy(&running_config_mgmt_lock.mtx);
1899 }