]> git.proxmox.com Git - mirror_frr.git/blob - lib/northbound.c
Merge pull request #5005 from Frankkkkk/dockerfile
[mirror_frr.git] / lib / northbound.c
1 /*
2 * Copyright (C) 2018 NetDEF, Inc.
3 * Renato Westphal
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #include <zebra.h>
21
22 #include "libfrr.h"
23 #include "log.h"
24 #include "lib_errors.h"
25 #include "hash.h"
26 #include "command.h"
27 #include "debug.h"
28 #include "db.h"
29 #include "frr_pthread.h"
30 #include "northbound.h"
31 #include "northbound_cli.h"
32 #include "northbound_db.h"
33
34 DEFINE_MTYPE_STATIC(LIB, NB_NODE, "Northbound Node")
35 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG, "Northbound Configuration")
36 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG_ENTRY, "Northbound Configuration Entry")
37
38 /* Running configuration - shouldn't be modified directly. */
39 struct nb_config *running_config;
40
41 /* Hash table of user pointers associated with configuration entries. */
42 static struct hash *running_config_entries;
43
44 /* Management lock for the running configuration. */
45 static struct {
46 /* Mutex protecting this structure. */
47 pthread_mutex_t mtx;
48
49 /* Actual lock. */
50 bool locked;
51
52 /* Northbound client who owns this lock. */
53 enum nb_client owner_client;
54
55 /* Northbound user who owns this lock. */
56 const void *owner_user;
57 } running_config_mgmt_lock;
58
59 /*
60 * Global lock used to prevent multiple configuration transactions from
61 * happening concurrently.
62 */
63 static bool transaction_in_progress;
64
65 static int nb_callback_configuration(const enum nb_event event,
66 struct nb_config_change *change);
67 static void nb_log_callback(const enum nb_event event,
68 enum nb_operation operation, const char *xpath,
69 const char *value);
70 static struct nb_transaction *nb_transaction_new(struct nb_config *config,
71 struct nb_config_cbs *changes,
72 enum nb_client client,
73 const void *user,
74 const char *comment);
75 static void nb_transaction_free(struct nb_transaction *transaction);
76 static int nb_transaction_process(enum nb_event event,
77 struct nb_transaction *transaction);
78 static void nb_transaction_apply_finish(struct nb_transaction *transaction);
79 static int nb_oper_data_iter_node(const struct lys_node *snode,
80 const char *xpath, const void *list_entry,
81 const struct yang_list_keys *list_keys,
82 struct yang_translator *translator,
83 bool first, uint32_t flags,
84 nb_oper_data_cb cb, void *arg);
85
86 static int nb_node_check_config_only(const struct lys_node *snode, void *arg)
87 {
88 bool *config_only = arg;
89
90 if (CHECK_FLAG(snode->flags, LYS_CONFIG_R)) {
91 *config_only = false;
92 return YANG_ITER_STOP;
93 }
94
95 return YANG_ITER_CONTINUE;
96 }
97
98 static int nb_node_new_cb(const struct lys_node *snode, void *arg)
99 {
100 struct nb_node *nb_node;
101 struct lys_node *sparent, *sparent_list;
102
103 nb_node = XCALLOC(MTYPE_NB_NODE, sizeof(*nb_node));
104 yang_snode_get_path(snode, YANG_PATH_DATA, nb_node->xpath,
105 sizeof(nb_node->xpath));
106 nb_node->priority = NB_DFLT_PRIORITY;
107 sparent = yang_snode_real_parent(snode);
108 if (sparent)
109 nb_node->parent = sparent->priv;
110 sparent_list = yang_snode_parent_list(snode);
111 if (sparent_list)
112 nb_node->parent_list = sparent_list->priv;
113
114 /* Set flags. */
115 if (CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
116 bool config_only = true;
117
118 yang_snodes_iterate_subtree(snode, nb_node_check_config_only,
119 YANG_ITER_ALLOW_AUGMENTATIONS,
120 &config_only);
121 if (config_only)
122 SET_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY);
123 }
124 if (CHECK_FLAG(snode->nodetype, LYS_LIST)) {
125 struct lys_node_list *slist;
126
127 slist = (struct lys_node_list *)snode;
128 if (slist->keys_size == 0)
129 SET_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST);
130 }
131
132 /*
133 * Link the northbound node and the libyang schema node with one
134 * another.
135 */
136 nb_node->snode = snode;
137 lys_set_private(snode, nb_node);
138
139 return YANG_ITER_CONTINUE;
140 }
141
142 static int nb_node_del_cb(const struct lys_node *snode, void *arg)
143 {
144 struct nb_node *nb_node;
145
146 nb_node = snode->priv;
147 lys_set_private(snode, NULL);
148 XFREE(MTYPE_NB_NODE, nb_node);
149
150 return YANG_ITER_CONTINUE;
151 }
152
153 void nb_nodes_create(void)
154 {
155 yang_snodes_iterate_all(nb_node_new_cb, 0, NULL);
156 }
157
158 void nb_nodes_delete(void)
159 {
160 yang_snodes_iterate_all(nb_node_del_cb, 0, NULL);
161 }
162
163 struct nb_node *nb_node_find(const char *xpath)
164 {
165 const struct lys_node *snode;
166
167 /*
168 * Use libyang to find the schema node associated to the xpath and get
169 * the northbound node from there (snode private pointer).
170 */
171 snode = ly_ctx_get_node(ly_native_ctx, NULL, xpath, 0);
172 if (!snode)
173 return NULL;
174
175 return snode->priv;
176 }
177
178 static int nb_node_validate_cb(const struct nb_node *nb_node,
179 enum nb_operation operation,
180 int callback_implemented, bool optional)
181 {
182 bool valid;
183
184 valid = nb_operation_is_valid(operation, nb_node->snode);
185
186 /*
187 * Add an exception for operational data callbacks. A rw list usually
188 * doesn't need any associated operational data callbacks. But if this
189 * rw list is augmented by another module which adds state nodes under
190 * it, then this list will need to have the 'get_next()', 'get_keys()'
191 * and 'lookup_entry()' callbacks. As such, never log a warning when
192 * these callbacks are implemented when they are not needed, since this
193 * depends on context (e.g. some daemons might augment "frr-interface"
194 * while others don't).
195 */
196 if (!valid && callback_implemented && operation != NB_OP_GET_NEXT
197 && operation != NB_OP_GET_KEYS && operation != NB_OP_LOOKUP_ENTRY)
198 flog_warn(EC_LIB_NB_CB_UNNEEDED,
199 "unneeded '%s' callback for '%s'",
200 nb_operation_name(operation), nb_node->xpath);
201
202 if (!optional && valid && !callback_implemented) {
203 flog_err(EC_LIB_NB_CB_MISSING, "missing '%s' callback for '%s'",
204 nb_operation_name(operation), nb_node->xpath);
205 return 1;
206 }
207
208 return 0;
209 }
210
211 /*
212 * Check if the required callbacks were implemented for the given northbound
213 * node.
214 */
215 static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
216
217 {
218 unsigned int error = 0;
219
220 error += nb_node_validate_cb(nb_node, NB_OP_CREATE,
221 !!nb_node->cbs.create, false);
222 error += nb_node_validate_cb(nb_node, NB_OP_MODIFY,
223 !!nb_node->cbs.modify, false);
224 error += nb_node_validate_cb(nb_node, NB_OP_DESTROY,
225 !!nb_node->cbs.destroy, false);
226 error += nb_node_validate_cb(nb_node, NB_OP_MOVE, !!nb_node->cbs.move,
227 false);
228 error += nb_node_validate_cb(nb_node, NB_OP_PRE_VALIDATE,
229 !!nb_node->cbs.pre_validate, true);
230 error += nb_node_validate_cb(nb_node, NB_OP_APPLY_FINISH,
231 !!nb_node->cbs.apply_finish, true);
232 error += nb_node_validate_cb(nb_node, NB_OP_GET_ELEM,
233 !!nb_node->cbs.get_elem, false);
234 error += nb_node_validate_cb(nb_node, NB_OP_GET_NEXT,
235 !!nb_node->cbs.get_next, false);
236 error += nb_node_validate_cb(nb_node, NB_OP_GET_KEYS,
237 !!nb_node->cbs.get_keys, false);
238 error += nb_node_validate_cb(nb_node, NB_OP_LOOKUP_ENTRY,
239 !!nb_node->cbs.lookup_entry, false);
240 error += nb_node_validate_cb(nb_node, NB_OP_RPC, !!nb_node->cbs.rpc,
241 false);
242
243 return error;
244 }
245
246 static unsigned int nb_node_validate_priority(const struct nb_node *nb_node)
247 {
248 /* Top-level nodes can have any priority. */
249 if (!nb_node->parent)
250 return 0;
251
252 if (nb_node->priority < nb_node->parent->priority) {
253 flog_err(EC_LIB_NB_CB_INVALID_PRIO,
254 "node has higher priority than its parent [xpath %s]",
255 nb_node->xpath);
256 return 1;
257 }
258
259 return 0;
260 }
261
262 static int nb_node_validate(const struct lys_node *snode, void *arg)
263 {
264 struct nb_node *nb_node = snode->priv;
265 unsigned int *errors = arg;
266
267 /* Validate callbacks and priority. */
268 *errors += nb_node_validate_cbs(nb_node);
269 *errors += nb_node_validate_priority(nb_node);
270
271 return YANG_ITER_CONTINUE;
272 }
273
274 struct nb_config *nb_config_new(struct lyd_node *dnode)
275 {
276 struct nb_config *config;
277
278 config = XCALLOC(MTYPE_NB_CONFIG, sizeof(*config));
279 if (dnode)
280 config->dnode = dnode;
281 else
282 config->dnode = yang_dnode_new(ly_native_ctx, true);
283 config->version = 0;
284
285 return config;
286 }
287
288 void nb_config_free(struct nb_config *config)
289 {
290 if (config->dnode)
291 yang_dnode_free(config->dnode);
292 XFREE(MTYPE_NB_CONFIG, config);
293 }
294
295 struct nb_config *nb_config_dup(const struct nb_config *config)
296 {
297 struct nb_config *dup;
298
299 dup = XCALLOC(MTYPE_NB_CONFIG, sizeof(*dup));
300 dup->dnode = yang_dnode_dup(config->dnode);
301 dup->version = config->version;
302
303 return dup;
304 }
305
306 int nb_config_merge(struct nb_config *config_dst, struct nb_config *config_src,
307 bool preserve_source)
308 {
309 int ret;
310
311 ret = lyd_merge(config_dst->dnode, config_src->dnode, LYD_OPT_EXPLICIT);
312 if (ret != 0)
313 flog_warn(EC_LIB_LIBYANG, "%s: lyd_merge() failed", __func__);
314
315 if (!preserve_source)
316 nb_config_free(config_src);
317
318 return (ret == 0) ? NB_OK : NB_ERR;
319 }
320
321 void nb_config_replace(struct nb_config *config_dst,
322 struct nb_config *config_src, bool preserve_source)
323 {
324 /* Update version. */
325 if (config_src->version != 0)
326 config_dst->version = config_src->version;
327
328 /* Update dnode. */
329 if (config_dst->dnode)
330 yang_dnode_free(config_dst->dnode);
331 if (preserve_source) {
332 config_dst->dnode = yang_dnode_dup(config_src->dnode);
333 } else {
334 config_dst->dnode = config_src->dnode;
335 config_src->dnode = NULL;
336 nb_config_free(config_src);
337 }
338 }
339
340 /* Generate the nb_config_cbs tree. */
341 static inline int nb_config_cb_compare(const struct nb_config_cb *a,
342 const struct nb_config_cb *b)
343 {
344 /* Sort by priority first. */
345 if (a->nb_node->priority < b->nb_node->priority)
346 return -1;
347 if (a->nb_node->priority > b->nb_node->priority)
348 return 1;
349
350 /*
351 * Preserve the order of the configuration changes as told by libyang.
352 */
353 return a->seq - b->seq;
354 }
355 RB_GENERATE(nb_config_cbs, nb_config_cb, entry, nb_config_cb_compare);
356
357 static void nb_config_diff_add_change(struct nb_config_cbs *changes,
358 enum nb_operation operation,
359 uint32_t *seq,
360 const struct lyd_node *dnode)
361 {
362 struct nb_config_change *change;
363
364 change = XCALLOC(MTYPE_TMP, sizeof(*change));
365 change->cb.operation = operation;
366 change->cb.seq = *seq;
367 *seq = *seq + 1;
368 change->cb.nb_node = dnode->schema->priv;
369 yang_dnode_get_path(dnode, change->cb.xpath, sizeof(change->cb.xpath));
370 change->cb.dnode = dnode;
371
372 RB_INSERT(nb_config_cbs, changes, &change->cb);
373 }
374
375 static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
376 {
377 while (!RB_EMPTY(nb_config_cbs, changes)) {
378 struct nb_config_change *change;
379
380 change = (struct nb_config_change *)RB_ROOT(nb_config_cbs,
381 changes);
382 RB_REMOVE(nb_config_cbs, changes, &change->cb);
383 XFREE(MTYPE_TMP, change);
384 }
385 }
386
387 /*
388 * Helper function used when calculating the delta between two different
389 * configurations. Given a new subtree, calculate all new YANG data nodes,
390 * excluding default leafs and leaf-lists. This is a recursive function.
391 */
392 static void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
393 struct nb_config_cbs *changes)
394 {
395 enum nb_operation operation;
396 struct lyd_node *child;
397
398 switch (dnode->schema->nodetype) {
399 case LYS_LEAF:
400 case LYS_LEAFLIST:
401 if (lyd_wd_default((struct lyd_node_leaf_list *)dnode))
402 break;
403
404 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
405 operation = NB_OP_CREATE;
406 else if (nb_operation_is_valid(NB_OP_MODIFY, dnode->schema))
407 operation = NB_OP_MODIFY;
408 else
409 return;
410
411 nb_config_diff_add_change(changes, operation, seq, dnode);
412 break;
413 case LYS_CONTAINER:
414 case LYS_LIST:
415 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
416 nb_config_diff_add_change(changes, NB_OP_CREATE, seq,
417 dnode);
418
419 /* Process child nodes recursively. */
420 LY_TREE_FOR (dnode->child, child) {
421 nb_config_diff_created(child, seq, changes);
422 }
423 break;
424 default:
425 break;
426 }
427 }
428
429 static void nb_config_diff_deleted(const struct lyd_node *dnode, uint32_t *seq,
430 struct nb_config_cbs *changes)
431 {
432 if (nb_operation_is_valid(NB_OP_DESTROY, dnode->schema))
433 nb_config_diff_add_change(changes, NB_OP_DESTROY, seq, dnode);
434 else if (CHECK_FLAG(dnode->schema->nodetype, LYS_CONTAINER)) {
435 struct lyd_node *child;
436
437 /*
438 * Non-presence containers need special handling since they
439 * don't have "destroy" callbacks. In this case, what we need to
440 * do is to call the "destroy" callbacks of their child nodes
441 * when applicable (i.e. optional nodes).
442 */
443 LY_TREE_FOR (dnode->child, child) {
444 nb_config_diff_deleted(child, seq, changes);
445 }
446 }
447 }
448
449 /* Calculate the delta between two different configurations. */
450 static void nb_config_diff(const struct nb_config *config1,
451 const struct nb_config *config2,
452 struct nb_config_cbs *changes)
453 {
454 struct lyd_difflist *diff;
455 uint32_t seq = 0;
456
457 diff = lyd_diff(config1->dnode, config2->dnode,
458 LYD_DIFFOPT_WITHDEFAULTS);
459 assert(diff);
460
461 for (int i = 0; diff->type[i] != LYD_DIFF_END; i++) {
462 LYD_DIFFTYPE type;
463 struct lyd_node *dnode;
464
465 type = diff->type[i];
466
467 switch (type) {
468 case LYD_DIFF_CREATED:
469 dnode = diff->second[i];
470 nb_config_diff_created(dnode, &seq, changes);
471 break;
472 case LYD_DIFF_DELETED:
473 dnode = diff->first[i];
474 nb_config_diff_deleted(dnode, &seq, changes);
475 break;
476 case LYD_DIFF_CHANGED:
477 dnode = diff->second[i];
478 nb_config_diff_add_change(changes, NB_OP_MODIFY, &seq,
479 dnode);
480 break;
481 case LYD_DIFF_MOVEDAFTER1:
482 case LYD_DIFF_MOVEDAFTER2:
483 default:
484 continue;
485 }
486 }
487
488 lyd_free_diff(diff);
489 }
490
491 int nb_candidate_edit(struct nb_config *candidate,
492 const struct nb_node *nb_node,
493 enum nb_operation operation, const char *xpath,
494 const struct yang_data *previous,
495 const struct yang_data *data)
496 {
497 struct lyd_node *dnode;
498 char xpath_edit[XPATH_MAXLEN];
499
500 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
501 if (nb_node->snode->nodetype == LYS_LEAFLIST)
502 snprintf(xpath_edit, sizeof(xpath_edit), "%s[.='%s']", xpath,
503 data->value);
504 else
505 strlcpy(xpath_edit, xpath, sizeof(xpath_edit));
506
507 switch (operation) {
508 case NB_OP_CREATE:
509 case NB_OP_MODIFY:
510 ly_errno = 0;
511 dnode = lyd_new_path(candidate->dnode, ly_native_ctx,
512 xpath_edit, (void *)data->value, 0,
513 LYD_PATH_OPT_UPDATE);
514 if (!dnode && ly_errno) {
515 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
516 __func__);
517 return NB_ERR;
518 }
519
520 /*
521 * If a new node was created, call lyd_validate() only to create
522 * default child nodes.
523 */
524 if (dnode) {
525 lyd_schema_sort(dnode, 0);
526 lyd_validate(&dnode,
527 LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
528 ly_native_ctx);
529 }
530 break;
531 case NB_OP_DESTROY:
532 dnode = yang_dnode_get(candidate->dnode, xpath_edit);
533 if (!dnode)
534 /*
535 * Return a special error code so the caller can choose
536 * whether to ignore it or not.
537 */
538 return NB_ERR_NOT_FOUND;
539 lyd_free(dnode);
540 break;
541 case NB_OP_MOVE:
542 /* TODO: update configuration. */
543 break;
544 default:
545 flog_warn(EC_LIB_DEVELOPMENT,
546 "%s: unknown operation (%u) [xpath %s]", __func__,
547 operation, xpath_edit);
548 return NB_ERR;
549 }
550
551 return NB_OK;
552 }
553
554 bool nb_candidate_needs_update(const struct nb_config *candidate)
555 {
556 if (candidate->version < running_config->version)
557 return true;
558
559 return false;
560 }
561
562 int nb_candidate_update(struct nb_config *candidate)
563 {
564 struct nb_config *updated_config;
565
566 updated_config = nb_config_dup(running_config);
567 if (nb_config_merge(updated_config, candidate, true) != NB_OK)
568 return NB_ERR;
569
570 nb_config_replace(candidate, updated_config, false);
571
572 return NB_OK;
573 }
574
575 /*
576 * Perform YANG syntactic and semantic validation.
577 *
578 * WARNING: lyd_validate() can change the configuration as part of the
579 * validation process.
580 */
581 static int nb_candidate_validate_yang(struct nb_config *candidate)
582 {
583 if (lyd_validate(&candidate->dnode,
584 LYD_OPT_STRICT | LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
585 ly_native_ctx)
586 != 0)
587 return NB_ERR_VALIDATION;
588
589 return NB_OK;
590 }
591
592 /* Perform code-level validation using the northbound callbacks. */
593 static int nb_candidate_validate_code(struct nb_config *candidate,
594 struct nb_config_cbs *changes)
595 {
596 struct nb_config_cb *cb;
597 struct lyd_node *root, *next, *child;
598 int ret;
599
600 /* First validate the candidate as a whole. */
601 LY_TREE_FOR (candidate->dnode, root) {
602 LY_TREE_DFS_BEGIN (root, next, child) {
603 struct nb_node *nb_node;
604
605 nb_node = child->schema->priv;
606 if (!nb_node->cbs.pre_validate)
607 goto next;
608
609 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config,
610 DEBUG_MODE_ALL)) {
611 char xpath[XPATH_MAXLEN];
612
613 yang_dnode_get_path(child, xpath,
614 sizeof(xpath));
615 nb_log_callback(NB_EV_VALIDATE,
616 NB_OP_PRE_VALIDATE, xpath,
617 NULL);
618 }
619
620 ret = (*nb_node->cbs.pre_validate)(child);
621 if (ret != NB_OK)
622 return NB_ERR_VALIDATION;
623
624 next:
625 LY_TREE_DFS_END(root, next, child);
626 }
627 }
628
629 /* Now validate the configuration changes. */
630 RB_FOREACH (cb, nb_config_cbs, changes) {
631 struct nb_config_change *change = (struct nb_config_change *)cb;
632
633 ret = nb_callback_configuration(NB_EV_VALIDATE, change);
634 if (ret != NB_OK)
635 return NB_ERR_VALIDATION;
636 }
637
638 return NB_OK;
639 }
640
641 int nb_candidate_validate(struct nb_config *candidate)
642 {
643 struct nb_config_cbs changes;
644 int ret;
645
646 if (nb_candidate_validate_yang(candidate) != NB_OK)
647 return NB_ERR_VALIDATION;
648
649 RB_INIT(nb_config_cbs, &changes);
650 nb_config_diff(running_config, candidate, &changes);
651 ret = nb_candidate_validate_code(candidate, &changes);
652 nb_config_diff_del_changes(&changes);
653
654 return ret;
655 }
656
657 int nb_candidate_commit_prepare(struct nb_config *candidate,
658 enum nb_client client, const void *user,
659 const char *comment,
660 struct nb_transaction **transaction)
661 {
662 struct nb_config_cbs changes;
663
664 if (nb_candidate_validate_yang(candidate) != NB_OK) {
665 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
666 "%s: failed to validate candidate configuration",
667 __func__);
668 return NB_ERR_VALIDATION;
669 }
670
671 RB_INIT(nb_config_cbs, &changes);
672 nb_config_diff(running_config, candidate, &changes);
673 if (RB_EMPTY(nb_config_cbs, &changes))
674 return NB_ERR_NO_CHANGES;
675
676 if (nb_candidate_validate_code(candidate, &changes) != NB_OK) {
677 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
678 "%s: failed to validate candidate configuration",
679 __func__);
680 nb_config_diff_del_changes(&changes);
681 return NB_ERR_VALIDATION;
682 }
683
684 *transaction =
685 nb_transaction_new(candidate, &changes, client, user, comment);
686 if (*transaction == NULL) {
687 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
688 "%s: failed to create transaction", __func__);
689 nb_config_diff_del_changes(&changes);
690 return NB_ERR_LOCKED;
691 }
692
693 return nb_transaction_process(NB_EV_PREPARE, *transaction);
694 }
695
696 void nb_candidate_commit_abort(struct nb_transaction *transaction)
697 {
698 (void)nb_transaction_process(NB_EV_ABORT, transaction);
699 nb_transaction_free(transaction);
700 }
701
702 void nb_candidate_commit_apply(struct nb_transaction *transaction,
703 bool save_transaction, uint32_t *transaction_id)
704 {
705 (void)nb_transaction_process(NB_EV_APPLY, transaction);
706 nb_transaction_apply_finish(transaction);
707
708 /* Replace running by candidate. */
709 transaction->config->version++;
710 nb_config_replace(running_config, transaction->config, true);
711
712 /* Record transaction. */
713 if (save_transaction
714 && nb_db_transaction_save(transaction, transaction_id) != NB_OK)
715 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED,
716 "%s: failed to record transaction", __func__);
717
718 nb_transaction_free(transaction);
719 }
720
721 int nb_candidate_commit(struct nb_config *candidate, enum nb_client client,
722 const void *user, bool save_transaction,
723 const char *comment, uint32_t *transaction_id)
724 {
725 struct nb_transaction *transaction = NULL;
726 int ret;
727
728 ret = nb_candidate_commit_prepare(candidate, client, user, comment,
729 &transaction);
730 /*
731 * Apply the changes if the preparation phase succeeded. Otherwise abort
732 * the transaction.
733 */
734 if (ret == NB_OK)
735 nb_candidate_commit_apply(transaction, save_transaction,
736 transaction_id);
737 else if (transaction != NULL)
738 nb_candidate_commit_abort(transaction);
739
740 return ret;
741 }
742
743 int nb_running_lock(enum nb_client client, const void *user)
744 {
745 int ret = -1;
746
747 frr_with_mutex(&running_config_mgmt_lock.mtx) {
748 if (!running_config_mgmt_lock.locked) {
749 running_config_mgmt_lock.locked = true;
750 running_config_mgmt_lock.owner_client = client;
751 running_config_mgmt_lock.owner_user = user;
752 ret = 0;
753 }
754 }
755
756 return ret;
757 }
758
759 int nb_running_unlock(enum nb_client client, const void *user)
760 {
761 int ret = -1;
762
763 frr_with_mutex(&running_config_mgmt_lock.mtx) {
764 if (running_config_mgmt_lock.locked
765 && running_config_mgmt_lock.owner_client == client
766 && running_config_mgmt_lock.owner_user == user) {
767 running_config_mgmt_lock.locked = false;
768 running_config_mgmt_lock.owner_client = NB_CLIENT_NONE;
769 running_config_mgmt_lock.owner_user = NULL;
770 ret = 0;
771 }
772 }
773
774 return ret;
775 }
776
777 int nb_running_lock_check(enum nb_client client, const void *user)
778 {
779 int ret = -1;
780
781 frr_with_mutex(&running_config_mgmt_lock.mtx) {
782 if (!running_config_mgmt_lock.locked
783 || (running_config_mgmt_lock.owner_client == client
784 && running_config_mgmt_lock.owner_user == user))
785 ret = 0;
786 }
787
788 return ret;
789 }
790
791 static void nb_log_callback(const enum nb_event event,
792 enum nb_operation operation, const char *xpath,
793 const char *value)
794 {
795 zlog_debug(
796 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
797 nb_event_name(event), nb_operation_name(operation), xpath,
798 value ? value : "(NULL)");
799 }
800
801 /*
802 * Call the northbound configuration callback associated to a given
803 * configuration change.
804 */
805 static int nb_callback_configuration(const enum nb_event event,
806 struct nb_config_change *change)
807 {
808 enum nb_operation operation = change->cb.operation;
809 const char *xpath = change->cb.xpath;
810 const struct nb_node *nb_node = change->cb.nb_node;
811 const struct lyd_node *dnode = change->cb.dnode;
812 union nb_resource *resource;
813 int ret = NB_ERR;
814
815 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
816 const char *value = "(none)";
817
818 if (dnode && !yang_snode_is_typeless_data(dnode->schema))
819 value = yang_dnode_get_string(dnode, NULL);
820
821 nb_log_callback(event, operation, xpath, value);
822 }
823
824 if (event == NB_EV_VALIDATE)
825 resource = NULL;
826 else
827 resource = &change->resource;
828
829 switch (operation) {
830 case NB_OP_CREATE:
831 ret = (*nb_node->cbs.create)(event, dnode, resource);
832 break;
833 case NB_OP_MODIFY:
834 ret = (*nb_node->cbs.modify)(event, dnode, resource);
835 break;
836 case NB_OP_DESTROY:
837 ret = (*nb_node->cbs.destroy)(event, dnode);
838 break;
839 case NB_OP_MOVE:
840 ret = (*nb_node->cbs.move)(event, dnode);
841 break;
842 default:
843 flog_err(EC_LIB_DEVELOPMENT,
844 "%s: unknown operation (%u) [xpath %s]", __func__,
845 operation, xpath);
846 exit(1);
847 }
848
849 if (ret != NB_OK) {
850 int priority;
851 enum lib_log_refs ref;
852
853 switch (event) {
854 case NB_EV_VALIDATE:
855 priority = LOG_WARNING;
856 ref = EC_LIB_NB_CB_CONFIG_VALIDATE;
857 break;
858 case NB_EV_PREPARE:
859 priority = LOG_WARNING;
860 ref = EC_LIB_NB_CB_CONFIG_PREPARE;
861 break;
862 case NB_EV_ABORT:
863 priority = LOG_WARNING;
864 ref = EC_LIB_NB_CB_CONFIG_ABORT;
865 break;
866 case NB_EV_APPLY:
867 priority = LOG_ERR;
868 ref = EC_LIB_NB_CB_CONFIG_APPLY;
869 break;
870 default:
871 flog_err(EC_LIB_DEVELOPMENT,
872 "%s: unknown event (%u) [xpath %s]",
873 __func__, event, xpath);
874 exit(1);
875 }
876
877 flog(priority, ref,
878 "%s: error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
879 __func__, nb_err_name(ret), nb_event_name(event),
880 nb_operation_name(operation), xpath);
881 }
882
883 return ret;
884 }
885
886 struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node,
887 const char *xpath,
888 const void *list_entry)
889 {
890 DEBUGD(&nb_dbg_cbs_state,
891 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
892 xpath, list_entry);
893
894 return nb_node->cbs.get_elem(xpath, list_entry);
895 }
896
897 const void *nb_callback_get_next(const struct nb_node *nb_node,
898 const void *parent_list_entry,
899 const void *list_entry)
900 {
901 DEBUGD(&nb_dbg_cbs_state,
902 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
903 nb_node->xpath, parent_list_entry, list_entry);
904
905 return nb_node->cbs.get_next(parent_list_entry, list_entry);
906 }
907
908 int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
909 struct yang_list_keys *keys)
910 {
911 DEBUGD(&nb_dbg_cbs_state,
912 "northbound callback (get_keys): node [%s] list_entry [%p]",
913 nb_node->xpath, list_entry);
914
915 return nb_node->cbs.get_keys(list_entry, keys);
916 }
917
918 const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
919 const void *parent_list_entry,
920 const struct yang_list_keys *keys)
921 {
922 DEBUGD(&nb_dbg_cbs_state,
923 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
924 nb_node->xpath, parent_list_entry);
925
926 return nb_node->cbs.lookup_entry(parent_list_entry, keys);
927 }
928
929 int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
930 const struct list *input, struct list *output)
931 {
932 DEBUGD(&nb_dbg_cbs_rpc, "northbound RPC: %s", xpath);
933
934 return nb_node->cbs.rpc(xpath, input, output);
935 }
936
937 static struct nb_transaction *
938 nb_transaction_new(struct nb_config *config, struct nb_config_cbs *changes,
939 enum nb_client client, const void *user, const char *comment)
940 {
941 struct nb_transaction *transaction;
942
943 if (nb_running_lock_check(client, user)) {
944 flog_warn(
945 EC_LIB_NB_TRANSACTION_CREATION_FAILED,
946 "%s: running configuration is locked by another client",
947 __func__);
948 return NULL;
949 }
950
951 if (transaction_in_progress) {
952 flog_warn(
953 EC_LIB_NB_TRANSACTION_CREATION_FAILED,
954 "%s: error - there's already another transaction in progress",
955 __func__);
956 return NULL;
957 }
958 transaction_in_progress = true;
959
960 transaction = XCALLOC(MTYPE_TMP, sizeof(*transaction));
961 transaction->client = client;
962 if (comment)
963 strlcpy(transaction->comment, comment,
964 sizeof(transaction->comment));
965 transaction->config = config;
966 transaction->changes = *changes;
967
968 return transaction;
969 }
970
971 static void nb_transaction_free(struct nb_transaction *transaction)
972 {
973 nb_config_diff_del_changes(&transaction->changes);
974 XFREE(MTYPE_TMP, transaction);
975 transaction_in_progress = false;
976 }
977
978 /* Process all configuration changes associated to a transaction. */
979 static int nb_transaction_process(enum nb_event event,
980 struct nb_transaction *transaction)
981 {
982 struct nb_config_cb *cb;
983
984 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
985 struct nb_config_change *change = (struct nb_config_change *)cb;
986 int ret;
987
988 /*
989 * Only try to release resources that were allocated
990 * successfully.
991 */
992 if (event == NB_EV_ABORT && change->prepare_ok == false)
993 break;
994
995 /* Call the appropriate callback. */
996 ret = nb_callback_configuration(event, change);
997 switch (event) {
998 case NB_EV_PREPARE:
999 if (ret != NB_OK)
1000 return ret;
1001 change->prepare_ok = true;
1002 break;
1003 case NB_EV_ABORT:
1004 case NB_EV_APPLY:
1005 /*
1006 * At this point it's not possible to reject the
1007 * transaction anymore, so any failure here can lead to
1008 * inconsistencies and should be treated as a bug.
1009 * Operations prone to errors, like validations and
1010 * resource allocations, should be performed during the
1011 * 'prepare' phase.
1012 */
1013 break;
1014 default:
1015 break;
1016 }
1017 }
1018
1019 return NB_OK;
1020 }
1021
1022 static struct nb_config_cb *
1023 nb_apply_finish_cb_new(struct nb_config_cbs *cbs, const char *xpath,
1024 const struct nb_node *nb_node,
1025 const struct lyd_node *dnode)
1026 {
1027 struct nb_config_cb *cb;
1028
1029 cb = XCALLOC(MTYPE_TMP, sizeof(*cb));
1030 strlcpy(cb->xpath, xpath, sizeof(cb->xpath));
1031 cb->nb_node = nb_node;
1032 cb->dnode = dnode;
1033 RB_INSERT(nb_config_cbs, cbs, cb);
1034
1035 return cb;
1036 }
1037
1038 static struct nb_config_cb *
1039 nb_apply_finish_cb_find(struct nb_config_cbs *cbs, const char *xpath,
1040 const struct nb_node *nb_node)
1041 {
1042 struct nb_config_cb s;
1043
1044 strlcpy(s.xpath, xpath, sizeof(s.xpath));
1045 s.nb_node = nb_node;
1046 return RB_FIND(nb_config_cbs, cbs, &s);
1047 }
1048
1049 /* Call the 'apply_finish' callbacks. */
1050 static void nb_transaction_apply_finish(struct nb_transaction *transaction)
1051 {
1052 struct nb_config_cbs cbs;
1053 struct nb_config_cb *cb;
1054
1055 /* Initialize tree of 'apply_finish' callbacks. */
1056 RB_INIT(nb_config_cbs, &cbs);
1057
1058 /* Identify the 'apply_finish' callbacks that need to be called. */
1059 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1060 struct nb_config_change *change = (struct nb_config_change *)cb;
1061 const struct lyd_node *dnode = change->cb.dnode;
1062
1063 /*
1064 * Iterate up to the root of the data tree. When a node is being
1065 * deleted, skip its 'apply_finish' callback if one is defined
1066 * (the 'apply_finish' callbacks from the node ancestors should
1067 * be called though).
1068 */
1069 if (change->cb.operation == NB_OP_DESTROY) {
1070 char xpath[XPATH_MAXLEN];
1071
1072 dnode = dnode->parent;
1073 if (!dnode)
1074 break;
1075
1076 /*
1077 * The dnode from 'delete' callbacks point to elements
1078 * from the running configuration. Use yang_dnode_get()
1079 * to get the corresponding dnode from the candidate
1080 * configuration that is being committed.
1081 */
1082 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1083 dnode = yang_dnode_get(transaction->config->dnode,
1084 xpath);
1085 }
1086 while (dnode) {
1087 char xpath[XPATH_MAXLEN];
1088 struct nb_node *nb_node;
1089
1090 nb_node = dnode->schema->priv;
1091 if (!nb_node->cbs.apply_finish)
1092 goto next;
1093
1094 /*
1095 * Don't call the callback more than once for the same
1096 * data node.
1097 */
1098 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1099 if (nb_apply_finish_cb_find(&cbs, xpath, nb_node))
1100 goto next;
1101
1102 nb_apply_finish_cb_new(&cbs, xpath, nb_node, dnode);
1103
1104 next:
1105 dnode = dnode->parent;
1106 }
1107 }
1108
1109 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1110 RB_FOREACH (cb, nb_config_cbs, &cbs) {
1111 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL))
1112 nb_log_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH,
1113 cb->xpath, NULL);
1114
1115 (*cb->nb_node->cbs.apply_finish)(cb->dnode);
1116 }
1117
1118 /* Release memory. */
1119 while (!RB_EMPTY(nb_config_cbs, &cbs)) {
1120 cb = RB_ROOT(nb_config_cbs, &cbs);
1121 RB_REMOVE(nb_config_cbs, &cbs, cb);
1122 XFREE(MTYPE_TMP, cb);
1123 }
1124 }
1125
1126 static int nb_oper_data_iter_children(const struct lys_node *snode,
1127 const char *xpath, const void *list_entry,
1128 const struct yang_list_keys *list_keys,
1129 struct yang_translator *translator,
1130 bool first, uint32_t flags,
1131 nb_oper_data_cb cb, void *arg)
1132 {
1133 struct lys_node *child;
1134
1135 LY_TREE_FOR (snode->child, child) {
1136 int ret;
1137
1138 ret = nb_oper_data_iter_node(child, xpath, list_entry,
1139 list_keys, translator, false,
1140 flags, cb, arg);
1141 if (ret != NB_OK)
1142 return ret;
1143 }
1144
1145 return NB_OK;
1146 }
1147
1148 static int nb_oper_data_iter_leaf(const struct nb_node *nb_node,
1149 const char *xpath, const void *list_entry,
1150 const struct yang_list_keys *list_keys,
1151 struct yang_translator *translator,
1152 uint32_t flags, nb_oper_data_cb cb, void *arg)
1153 {
1154 struct yang_data *data;
1155
1156 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1157 return NB_OK;
1158
1159 /* Ignore list keys. */
1160 if (lys_is_key((struct lys_node_leaf *)nb_node->snode, NULL))
1161 return NB_OK;
1162
1163 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1164 if (data == NULL)
1165 /* Leaf of type "empty" is not present. */
1166 return NB_OK;
1167
1168 return (*cb)(nb_node->snode, translator, data, arg);
1169 }
1170
1171 static int nb_oper_data_iter_container(const struct nb_node *nb_node,
1172 const char *xpath,
1173 const void *list_entry,
1174 const struct yang_list_keys *list_keys,
1175 struct yang_translator *translator,
1176 uint32_t flags, nb_oper_data_cb cb,
1177 void *arg)
1178 {
1179 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1180 return NB_OK;
1181
1182 /* Presence containers. */
1183 if (nb_node->cbs.get_elem) {
1184 struct yang_data *data;
1185 int ret;
1186
1187 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1188 if (data == NULL)
1189 /* Presence container is not present. */
1190 return NB_OK;
1191
1192 ret = (*cb)(nb_node->snode, translator, data, arg);
1193 if (ret != NB_OK)
1194 return ret;
1195 }
1196
1197 /* Iterate over the child nodes. */
1198 return nb_oper_data_iter_children(nb_node->snode, xpath, list_entry,
1199 list_keys, translator, false, flags,
1200 cb, arg);
1201 }
1202
1203 static int
1204 nb_oper_data_iter_leaflist(const struct nb_node *nb_node, const char *xpath,
1205 const void *parent_list_entry,
1206 const struct yang_list_keys *parent_list_keys,
1207 struct yang_translator *translator, uint32_t flags,
1208 nb_oper_data_cb cb, void *arg)
1209 {
1210 const void *list_entry = NULL;
1211
1212 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1213 return NB_OK;
1214
1215 do {
1216 struct yang_data *data;
1217 int ret;
1218
1219 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1220 list_entry);
1221 if (!list_entry)
1222 /* End of the list. */
1223 break;
1224
1225 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1226 if (data == NULL)
1227 continue;
1228
1229 ret = (*cb)(nb_node->snode, translator, data, arg);
1230 if (ret != NB_OK)
1231 return ret;
1232 } while (list_entry);
1233
1234 return NB_OK;
1235 }
1236
1237 static int nb_oper_data_iter_list(const struct nb_node *nb_node,
1238 const char *xpath_list,
1239 const void *parent_list_entry,
1240 const struct yang_list_keys *parent_list_keys,
1241 struct yang_translator *translator,
1242 uint32_t flags, nb_oper_data_cb cb, void *arg)
1243 {
1244 struct lys_node_list *slist = (struct lys_node_list *)nb_node->snode;
1245 const void *list_entry = NULL;
1246 uint32_t position = 1;
1247
1248 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1249 return NB_OK;
1250
1251 /* Iterate over all list entries. */
1252 do {
1253 struct yang_list_keys list_keys;
1254 char xpath[XPATH_MAXLEN * 2];
1255 int ret;
1256
1257 /* Obtain list entry. */
1258 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1259 list_entry);
1260 if (!list_entry)
1261 /* End of the list. */
1262 break;
1263
1264 if (!CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST)) {
1265 /* Obtain the list entry keys. */
1266 if (nb_callback_get_keys(nb_node, list_entry,
1267 &list_keys)
1268 != NB_OK) {
1269 flog_warn(EC_LIB_NB_CB_STATE,
1270 "%s: failed to get list keys",
1271 __func__);
1272 return NB_ERR;
1273 }
1274
1275 /* Build XPath of the list entry. */
1276 strlcpy(xpath, xpath_list, sizeof(xpath));
1277 for (unsigned int i = 0; i < list_keys.num; i++) {
1278 snprintf(xpath + strlen(xpath),
1279 sizeof(xpath) - strlen(xpath),
1280 "[%s='%s']", slist->keys[i]->name,
1281 list_keys.key[i]);
1282 }
1283 } else {
1284 /*
1285 * Keyless list - build XPath using a positional index.
1286 */
1287 snprintf(xpath, sizeof(xpath), "%s[%u]", xpath_list,
1288 position);
1289 position++;
1290 }
1291
1292 /* Iterate over the child nodes. */
1293 ret = nb_oper_data_iter_children(
1294 nb_node->snode, xpath, list_entry, &list_keys,
1295 translator, false, flags, cb, arg);
1296 if (ret != NB_OK)
1297 return ret;
1298 } while (list_entry);
1299
1300 return NB_OK;
1301 }
1302
1303 static int nb_oper_data_iter_node(const struct lys_node *snode,
1304 const char *xpath_parent,
1305 const void *list_entry,
1306 const struct yang_list_keys *list_keys,
1307 struct yang_translator *translator,
1308 bool first, uint32_t flags,
1309 nb_oper_data_cb cb, void *arg)
1310 {
1311 struct nb_node *nb_node;
1312 char xpath[XPATH_MAXLEN];
1313 int ret = NB_OK;
1314
1315 if (!first && CHECK_FLAG(flags, NB_OPER_DATA_ITER_NORECURSE)
1316 && CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST))
1317 return NB_OK;
1318
1319 /* Update XPath. */
1320 strlcpy(xpath, xpath_parent, sizeof(xpath));
1321 if (!first && snode->nodetype != LYS_USES) {
1322 struct lys_node *parent;
1323
1324 /* Get the real parent. */
1325 parent = snode->parent;
1326 while (parent && parent->nodetype == LYS_USES)
1327 parent = parent->parent;
1328
1329 /*
1330 * When necessary, include the namespace of the augmenting
1331 * module.
1332 */
1333 if (parent && parent->nodetype == LYS_AUGMENT)
1334 snprintf(xpath + strlen(xpath),
1335 sizeof(xpath) - strlen(xpath), "/%s:%s",
1336 snode->module->name, snode->name);
1337 else
1338 snprintf(xpath + strlen(xpath),
1339 sizeof(xpath) - strlen(xpath), "/%s",
1340 snode->name);
1341 }
1342
1343 nb_node = snode->priv;
1344 switch (snode->nodetype) {
1345 case LYS_CONTAINER:
1346 ret = nb_oper_data_iter_container(nb_node, xpath, list_entry,
1347 list_keys, translator, flags,
1348 cb, arg);
1349 break;
1350 case LYS_LEAF:
1351 ret = nb_oper_data_iter_leaf(nb_node, xpath, list_entry,
1352 list_keys, translator, flags, cb,
1353 arg);
1354 break;
1355 case LYS_LEAFLIST:
1356 ret = nb_oper_data_iter_leaflist(nb_node, xpath, list_entry,
1357 list_keys, translator, flags,
1358 cb, arg);
1359 break;
1360 case LYS_LIST:
1361 ret = nb_oper_data_iter_list(nb_node, xpath, list_entry,
1362 list_keys, translator, flags, cb,
1363 arg);
1364 break;
1365 case LYS_USES:
1366 ret = nb_oper_data_iter_children(snode, xpath, list_entry,
1367 list_keys, translator, false,
1368 flags, cb, arg);
1369 break;
1370 default:
1371 break;
1372 }
1373
1374 return ret;
1375 }
1376
1377 int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
1378 uint32_t flags, nb_oper_data_cb cb, void *arg)
1379 {
1380 struct nb_node *nb_node;
1381 const void *list_entry = NULL;
1382 struct yang_list_keys list_keys;
1383 struct list *list_dnodes;
1384 struct lyd_node *dnode, *dn;
1385 struct listnode *ln;
1386 int ret;
1387
1388 nb_node = nb_node_find(xpath);
1389 if (!nb_node) {
1390 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1391 "%s: unknown data path: %s", __func__, xpath);
1392 return NB_ERR;
1393 }
1394
1395 /* For now this function works only with containers and lists. */
1396 if (!CHECK_FLAG(nb_node->snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
1397 flog_warn(
1398 EC_LIB_NB_OPERATIONAL_DATA,
1399 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1400 __func__, xpath);
1401 return NB_ERR;
1402 }
1403
1404 /*
1405 * Create a data tree from the XPath so that we can parse the keys of
1406 * all YANG lists (if any).
1407 */
1408 ly_errno = 0;
1409 dnode = lyd_new_path(NULL, ly_native_ctx, xpath, NULL, 0,
1410 LYD_PATH_OPT_UPDATE | LYD_PATH_OPT_NOPARENTRET);
1411 if (!dnode) {
1412 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
1413 __func__);
1414 return NB_ERR;
1415 }
1416
1417 /*
1418 * Create a linked list to sort the data nodes starting from the root.
1419 */
1420 list_dnodes = list_new();
1421 for (dn = dnode; dn; dn = dn->parent) {
1422 if (dn->schema->nodetype != LYS_LIST || !dn->child)
1423 continue;
1424 listnode_add_head(list_dnodes, dn);
1425 }
1426 /*
1427 * Use the northbound callbacks to find list entry pointer corresponding
1428 * to the given XPath.
1429 */
1430 for (ALL_LIST_ELEMENTS_RO(list_dnodes, ln, dn)) {
1431 struct lyd_node *child;
1432 struct nb_node *nn;
1433 unsigned int n = 0;
1434
1435 /* Obtain the list entry keys. */
1436 memset(&list_keys, 0, sizeof(list_keys));
1437 LY_TREE_FOR (dn->child, child) {
1438 if (!lys_is_key((struct lys_node_leaf *)child->schema,
1439 NULL))
1440 continue;
1441 strlcpy(list_keys.key[n],
1442 yang_dnode_get_string(child, NULL),
1443 sizeof(list_keys.key[n]));
1444 n++;
1445 }
1446 list_keys.num = n;
1447 if (list_keys.num
1448 != ((struct lys_node_list *)dn->schema)->keys_size) {
1449 list_delete(&list_dnodes);
1450 yang_dnode_free(dnode);
1451 return NB_ERR_NOT_FOUND;
1452 }
1453
1454 /* Find the list entry pointer. */
1455 nn = dn->schema->priv;
1456 list_entry =
1457 nb_callback_lookup_entry(nn, list_entry, &list_keys);
1458 if (list_entry == NULL) {
1459 list_delete(&list_dnodes);
1460 yang_dnode_free(dnode);
1461 return NB_ERR_NOT_FOUND;
1462 }
1463 }
1464
1465 /* If a list entry was given, iterate over that list entry only. */
1466 if (dnode->schema->nodetype == LYS_LIST && dnode->child)
1467 ret = nb_oper_data_iter_children(
1468 nb_node->snode, xpath, list_entry, &list_keys,
1469 translator, true, flags, cb, arg);
1470 else
1471 ret = nb_oper_data_iter_node(nb_node->snode, xpath, list_entry,
1472 &list_keys, translator, true,
1473 flags, cb, arg);
1474
1475 list_delete(&list_dnodes);
1476 yang_dnode_free(dnode);
1477
1478 return ret;
1479 }
1480
1481 bool nb_operation_is_valid(enum nb_operation operation,
1482 const struct lys_node *snode)
1483 {
1484 struct nb_node *nb_node = snode->priv;
1485 struct lys_node_container *scontainer;
1486 struct lys_node_leaf *sleaf;
1487
1488 switch (operation) {
1489 case NB_OP_CREATE:
1490 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1491 return false;
1492
1493 switch (snode->nodetype) {
1494 case LYS_LEAF:
1495 sleaf = (struct lys_node_leaf *)snode;
1496 if (sleaf->type.base != LY_TYPE_EMPTY)
1497 return false;
1498 break;
1499 case LYS_CONTAINER:
1500 scontainer = (struct lys_node_container *)snode;
1501 if (!scontainer->presence)
1502 return false;
1503 break;
1504 case LYS_LIST:
1505 case LYS_LEAFLIST:
1506 break;
1507 default:
1508 return false;
1509 }
1510 return true;
1511 case NB_OP_MODIFY:
1512 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1513 return false;
1514
1515 switch (snode->nodetype) {
1516 case LYS_LEAF:
1517 sleaf = (struct lys_node_leaf *)snode;
1518 if (sleaf->type.base == LY_TYPE_EMPTY)
1519 return false;
1520
1521 /* List keys can't be modified. */
1522 if (lys_is_key(sleaf, NULL))
1523 return false;
1524 break;
1525 default:
1526 return false;
1527 }
1528 return true;
1529 case NB_OP_DESTROY:
1530 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1531 return false;
1532
1533 switch (snode->nodetype) {
1534 case LYS_LEAF:
1535 sleaf = (struct lys_node_leaf *)snode;
1536
1537 /* List keys can't be deleted. */
1538 if (lys_is_key(sleaf, NULL))
1539 return false;
1540
1541 /*
1542 * Only optional leafs can be deleted, or leafs whose
1543 * parent is a case statement.
1544 */
1545 if (snode->parent->nodetype == LYS_CASE)
1546 return true;
1547 if (sleaf->when)
1548 return true;
1549 if (CHECK_FLAG(sleaf->flags, LYS_MAND_TRUE)
1550 || sleaf->dflt)
1551 return false;
1552 break;
1553 case LYS_CONTAINER:
1554 scontainer = (struct lys_node_container *)snode;
1555 if (!scontainer->presence)
1556 return false;
1557 break;
1558 case LYS_LIST:
1559 case LYS_LEAFLIST:
1560 break;
1561 default:
1562 return false;
1563 }
1564 return true;
1565 case NB_OP_MOVE:
1566 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1567 return false;
1568
1569 switch (snode->nodetype) {
1570 case LYS_LIST:
1571 case LYS_LEAFLIST:
1572 if (!CHECK_FLAG(snode->flags, LYS_USERORDERED))
1573 return false;
1574 break;
1575 default:
1576 return false;
1577 }
1578 return true;
1579 case NB_OP_PRE_VALIDATE:
1580 case NB_OP_APPLY_FINISH:
1581 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1582 return false;
1583 return true;
1584 case NB_OP_GET_ELEM:
1585 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_R))
1586 return false;
1587
1588 switch (snode->nodetype) {
1589 case LYS_LEAF:
1590 case LYS_LEAFLIST:
1591 break;
1592 case LYS_CONTAINER:
1593 scontainer = (struct lys_node_container *)snode;
1594 if (!scontainer->presence)
1595 return false;
1596 break;
1597 default:
1598 return false;
1599 }
1600 return true;
1601 case NB_OP_GET_NEXT:
1602 switch (snode->nodetype) {
1603 case LYS_LIST:
1604 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1605 return false;
1606 break;
1607 case LYS_LEAFLIST:
1608 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1609 return false;
1610 break;
1611 default:
1612 return false;
1613 }
1614 return true;
1615 case NB_OP_GET_KEYS:
1616 case NB_OP_LOOKUP_ENTRY:
1617 switch (snode->nodetype) {
1618 case LYS_LIST:
1619 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1620 return false;
1621 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST))
1622 return false;
1623 break;
1624 default:
1625 return false;
1626 }
1627 return true;
1628 case NB_OP_RPC:
1629 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W | LYS_CONFIG_R))
1630 return false;
1631
1632 switch (snode->nodetype) {
1633 case LYS_RPC:
1634 case LYS_ACTION:
1635 break;
1636 default:
1637 return false;
1638 }
1639 return true;
1640 default:
1641 return false;
1642 }
1643 }
1644
1645 DEFINE_HOOK(nb_notification_send, (const char *xpath, struct list *arguments),
1646 (xpath, arguments));
1647
1648 int nb_notification_send(const char *xpath, struct list *arguments)
1649 {
1650 int ret;
1651
1652 DEBUGD(&nb_dbg_notif, "northbound notification: %s", xpath);
1653
1654 ret = hook_call(nb_notification_send, xpath, arguments);
1655 if (arguments)
1656 list_delete(&arguments);
1657
1658 return ret;
1659 }
1660
1661 /* Running configuration user pointers management. */
1662 struct nb_config_entry {
1663 char xpath[XPATH_MAXLEN];
1664 void *entry;
1665 };
1666
1667 static bool running_config_entry_cmp(const void *value1, const void *value2)
1668 {
1669 const struct nb_config_entry *c1 = value1;
1670 const struct nb_config_entry *c2 = value2;
1671
1672 return strmatch(c1->xpath, c2->xpath);
1673 }
1674
1675 static unsigned int running_config_entry_key_make(const void *value)
1676 {
1677 return string_hash_make(value);
1678 }
1679
1680 static void *running_config_entry_alloc(void *p)
1681 {
1682 struct nb_config_entry *new, *key = p;
1683
1684 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY, sizeof(*new));
1685 strlcpy(new->xpath, key->xpath, sizeof(new->xpath));
1686
1687 return new;
1688 }
1689
1690 static void running_config_entry_free(void *arg)
1691 {
1692 XFREE(MTYPE_NB_CONFIG_ENTRY, arg);
1693 }
1694
1695 void nb_running_set_entry(const struct lyd_node *dnode, void *entry)
1696 {
1697 struct nb_config_entry *config, s;
1698
1699 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1700 config = hash_get(running_config_entries, &s,
1701 running_config_entry_alloc);
1702 config->entry = entry;
1703 }
1704
1705 static void *nb_running_unset_entry_helper(const struct lyd_node *dnode)
1706 {
1707 struct nb_config_entry *config, s;
1708 struct lyd_node *child;
1709 void *entry = NULL;
1710
1711 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1712 config = hash_release(running_config_entries, &s);
1713 if (config) {
1714 entry = config->entry;
1715 running_config_entry_free(config);
1716 }
1717
1718 /* Unset user pointers from the child nodes. */
1719 if (CHECK_FLAG(dnode->schema->nodetype, LYS_LIST | LYS_CONTAINER)) {
1720 LY_TREE_FOR (dnode->child, child) {
1721 (void)nb_running_unset_entry_helper(child);
1722 }
1723 }
1724
1725 return entry;
1726 }
1727
1728 void *nb_running_unset_entry(const struct lyd_node *dnode)
1729 {
1730 void *entry;
1731
1732 entry = nb_running_unset_entry_helper(dnode);
1733 assert(entry);
1734
1735 return entry;
1736 }
1737
1738 void *nb_running_get_entry(const struct lyd_node *dnode, const char *xpath,
1739 bool abort_if_not_found)
1740 {
1741 const struct lyd_node *orig_dnode = dnode;
1742 char xpath_buf[XPATH_MAXLEN];
1743
1744 assert(dnode || xpath);
1745
1746 if (!dnode)
1747 dnode = yang_dnode_get(running_config->dnode, xpath);
1748
1749 while (dnode) {
1750 struct nb_config_entry *config, s;
1751
1752 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1753 config = hash_lookup(running_config_entries, &s);
1754 if (config)
1755 return config->entry;
1756
1757 dnode = dnode->parent;
1758 }
1759
1760 if (!abort_if_not_found)
1761 return NULL;
1762
1763 yang_dnode_get_path(orig_dnode, xpath_buf, sizeof(xpath_buf));
1764 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
1765 "%s: failed to find entry [xpath %s]", __func__, xpath_buf);
1766 zlog_backtrace(LOG_ERR);
1767 abort();
1768 }
1769
1770 /* Logging functions. */
1771 const char *nb_event_name(enum nb_event event)
1772 {
1773 switch (event) {
1774 case NB_EV_VALIDATE:
1775 return "validate";
1776 case NB_EV_PREPARE:
1777 return "prepare";
1778 case NB_EV_ABORT:
1779 return "abort";
1780 case NB_EV_APPLY:
1781 return "apply";
1782 default:
1783 return "unknown";
1784 }
1785 }
1786
1787 const char *nb_operation_name(enum nb_operation operation)
1788 {
1789 switch (operation) {
1790 case NB_OP_CREATE:
1791 return "create";
1792 case NB_OP_MODIFY:
1793 return "modify";
1794 case NB_OP_DESTROY:
1795 return "destroy";
1796 case NB_OP_MOVE:
1797 return "move";
1798 case NB_OP_PRE_VALIDATE:
1799 return "pre_validate";
1800 case NB_OP_APPLY_FINISH:
1801 return "apply_finish";
1802 case NB_OP_GET_ELEM:
1803 return "get_elem";
1804 case NB_OP_GET_NEXT:
1805 return "get_next";
1806 case NB_OP_GET_KEYS:
1807 return "get_keys";
1808 case NB_OP_LOOKUP_ENTRY:
1809 return "lookup_entry";
1810 case NB_OP_RPC:
1811 return "rpc";
1812 default:
1813 return "unknown";
1814 }
1815 }
1816
1817 const char *nb_err_name(enum nb_error error)
1818 {
1819 switch (error) {
1820 case NB_OK:
1821 return "ok";
1822 case NB_ERR:
1823 return "generic error";
1824 case NB_ERR_NO_CHANGES:
1825 return "no changes";
1826 case NB_ERR_NOT_FOUND:
1827 return "element not found";
1828 case NB_ERR_LOCKED:
1829 return "resource is locked";
1830 case NB_ERR_VALIDATION:
1831 return "validation error";
1832 case NB_ERR_RESOURCE:
1833 return "failed to allocate resource";
1834 case NB_ERR_INCONSISTENCY:
1835 return "internal inconsistency";
1836 default:
1837 return "unknown";
1838 }
1839 }
1840
1841 const char *nb_client_name(enum nb_client client)
1842 {
1843 switch (client) {
1844 case NB_CLIENT_CLI:
1845 return "CLI";
1846 case NB_CLIENT_CONFD:
1847 return "ConfD";
1848 case NB_CLIENT_SYSREPO:
1849 return "Sysrepo";
1850 case NB_CLIENT_GRPC:
1851 return "gRPC";
1852 default:
1853 return "unknown";
1854 }
1855 }
1856
1857 static void nb_load_callbacks(const struct frr_yang_module_info *module)
1858 {
1859 for (size_t i = 0; module->nodes[i].xpath; i++) {
1860 struct nb_node *nb_node;
1861 uint32_t priority;
1862
1863 nb_node = nb_node_find(module->nodes[i].xpath);
1864 if (!nb_node) {
1865 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1866 "%s: unknown data path: %s", __func__,
1867 module->nodes[i].xpath);
1868 continue;
1869 }
1870
1871 nb_node->cbs = module->nodes[i].cbs;
1872 priority = module->nodes[i].priority;
1873 if (priority != 0)
1874 nb_node->priority = priority;
1875 }
1876 }
1877
1878 void nb_init(struct thread_master *tm,
1879 const struct frr_yang_module_info *modules[], size_t nmodules)
1880 {
1881 unsigned int errors = 0;
1882
1883 /* Load YANG modules. */
1884 for (size_t i = 0; i < nmodules; i++)
1885 yang_module_load(modules[i]->name);
1886
1887 /* Create a nb_node for all YANG schema nodes. */
1888 nb_nodes_create();
1889
1890 /* Load northbound callbacks. */
1891 for (size_t i = 0; i < nmodules; i++)
1892 nb_load_callbacks(modules[i]);
1893
1894 /* Validate northbound callbacks. */
1895 yang_snodes_iterate_all(nb_node_validate, 0, &errors);
1896 if (errors > 0) {
1897 flog_err(
1898 EC_LIB_NB_CBS_VALIDATION,
1899 "%s: failed to validate northbound callbacks: %u error(s)",
1900 __func__, errors);
1901 exit(1);
1902 }
1903
1904 /* Create an empty running configuration. */
1905 running_config = nb_config_new(NULL);
1906 running_config_entries = hash_create(running_config_entry_key_make,
1907 running_config_entry_cmp,
1908 "Running Configuration Entries");
1909 pthread_mutex_init(&running_config_mgmt_lock.mtx, NULL);
1910
1911 /* Initialize the northbound CLI. */
1912 nb_cli_init(tm);
1913 }
1914
1915 void nb_terminate(void)
1916 {
1917 /* Terminate the northbound CLI. */
1918 nb_cli_terminate();
1919
1920 /* Delete all nb_node's from all YANG modules. */
1921 nb_nodes_delete();
1922
1923 /* Delete the running configuration. */
1924 hash_clean(running_config_entries, running_config_entry_free);
1925 hash_free(running_config_entries);
1926 nb_config_free(running_config);
1927 pthread_mutex_destroy(&running_config_mgmt_lock.mtx);
1928 }