]> git.proxmox.com Git - mirror_frr.git/blob - lib/northbound.c
lib: fix ordering issues in the northbound
[mirror_frr.git] / lib / northbound.c
1 /*
2 * Copyright (C) 2018 NetDEF, Inc.
3 * Renato Westphal
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #include <zebra.h>
21
22 #include "libfrr.h"
23 #include "log.h"
24 #include "lib_errors.h"
25 #include "hash.h"
26 #include "command.h"
27 #include "debug.h"
28 #include "db.h"
29 #include "frr_pthread.h"
30 #include "northbound.h"
31 #include "northbound_cli.h"
32 #include "northbound_db.h"
33
34 DEFINE_MTYPE_STATIC(LIB, NB_NODE, "Northbound Node")
35 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG, "Northbound Configuration")
36 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG_ENTRY, "Northbound Configuration Entry")
37
38 /* Running configuration - shouldn't be modified directly. */
39 struct nb_config *running_config;
40
41 /* Hash table of user pointers associated with configuration entries. */
42 static struct hash *running_config_entries;
43
44 /* Management lock for the running configuration. */
45 static struct {
46 /* Mutex protecting this structure. */
47 pthread_mutex_t mtx;
48
49 /* Actual lock. */
50 bool locked;
51
52 /* Northbound client who owns this lock. */
53 enum nb_client owner_client;
54
55 /* Northbound user who owns this lock. */
56 const void *owner_user;
57 } running_config_mgmt_lock;
58
59 /*
60 * Global lock used to prevent multiple configuration transactions from
61 * happening concurrently.
62 */
63 static bool transaction_in_progress;
64
65 static int nb_callback_configuration(const enum nb_event event,
66 struct nb_config_change *change);
67 static void nb_log_callback(const enum nb_event event,
68 enum nb_operation operation, const char *xpath,
69 const char *value);
70 static struct nb_transaction *nb_transaction_new(struct nb_config *config,
71 struct nb_config_cbs *changes,
72 enum nb_client client,
73 const void *user,
74 const char *comment);
75 static void nb_transaction_free(struct nb_transaction *transaction);
76 static int nb_transaction_process(enum nb_event event,
77 struct nb_transaction *transaction);
78 static void nb_transaction_apply_finish(struct nb_transaction *transaction);
79 static int nb_oper_data_iter_node(const struct lys_node *snode,
80 const char *xpath, const void *list_entry,
81 const struct yang_list_keys *list_keys,
82 struct yang_translator *translator,
83 bool first, uint32_t flags,
84 nb_oper_data_cb cb, void *arg);
85
86 static int nb_node_check_config_only(const struct lys_node *snode, void *arg)
87 {
88 bool *config_only = arg;
89
90 if (CHECK_FLAG(snode->flags, LYS_CONFIG_R)) {
91 *config_only = false;
92 return YANG_ITER_STOP;
93 }
94
95 return YANG_ITER_CONTINUE;
96 }
97
98 static int nb_node_new_cb(const struct lys_node *snode, void *arg)
99 {
100 struct nb_node *nb_node;
101 struct lys_node *sparent, *sparent_list;
102
103 nb_node = XCALLOC(MTYPE_NB_NODE, sizeof(*nb_node));
104 yang_snode_get_path(snode, YANG_PATH_DATA, nb_node->xpath,
105 sizeof(nb_node->xpath));
106 nb_node->priority = NB_DFLT_PRIORITY;
107 sparent = yang_snode_real_parent(snode);
108 if (sparent)
109 nb_node->parent = sparent->priv;
110 sparent_list = yang_snode_parent_list(snode);
111 if (sparent_list)
112 nb_node->parent_list = sparent_list->priv;
113
114 /* Set flags. */
115 if (CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
116 bool config_only = true;
117
118 yang_snodes_iterate_subtree(snode, nb_node_check_config_only,
119 YANG_ITER_ALLOW_AUGMENTATIONS,
120 &config_only);
121 if (config_only)
122 SET_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY);
123 }
124 if (CHECK_FLAG(snode->nodetype, LYS_LIST)) {
125 struct lys_node_list *slist;
126
127 slist = (struct lys_node_list *)snode;
128 if (slist->keys_size == 0)
129 SET_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST);
130 }
131
132 /*
133 * Link the northbound node and the libyang schema node with one
134 * another.
135 */
136 nb_node->snode = snode;
137 lys_set_private(snode, nb_node);
138
139 return YANG_ITER_CONTINUE;
140 }
141
142 static int nb_node_del_cb(const struct lys_node *snode, void *arg)
143 {
144 struct nb_node *nb_node;
145
146 nb_node = snode->priv;
147 lys_set_private(snode, NULL);
148 XFREE(MTYPE_NB_NODE, nb_node);
149
150 return YANG_ITER_CONTINUE;
151 }
152
153 void nb_nodes_create(void)
154 {
155 yang_snodes_iterate_all(nb_node_new_cb, 0, NULL);
156 }
157
158 void nb_nodes_delete(void)
159 {
160 yang_snodes_iterate_all(nb_node_del_cb, 0, NULL);
161 }
162
163 struct nb_node *nb_node_find(const char *xpath)
164 {
165 const struct lys_node *snode;
166
167 /*
168 * Use libyang to find the schema node associated to the xpath and get
169 * the northbound node from there (snode private pointer).
170 */
171 snode = ly_ctx_get_node(ly_native_ctx, NULL, xpath, 0);
172 if (!snode)
173 return NULL;
174
175 return snode->priv;
176 }
177
178 static int nb_node_validate_cb(const struct nb_node *nb_node,
179 enum nb_operation operation,
180 int callback_implemented, bool optional)
181 {
182 bool valid;
183
184 valid = nb_operation_is_valid(operation, nb_node->snode);
185
186 if (!valid && callback_implemented)
187 flog_warn(EC_LIB_NB_CB_UNNEEDED,
188 "unneeded '%s' callback for '%s'",
189 nb_operation_name(operation), nb_node->xpath);
190
191 if (!optional && valid && !callback_implemented) {
192 flog_err(EC_LIB_NB_CB_MISSING, "missing '%s' callback for '%s'",
193 nb_operation_name(operation), nb_node->xpath);
194 return 1;
195 }
196
197 return 0;
198 }
199
200 /*
201 * Check if the required callbacks were implemented for the given northbound
202 * node.
203 */
204 static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
205
206 {
207 unsigned int error = 0;
208
209 error += nb_node_validate_cb(nb_node, NB_OP_CREATE,
210 !!nb_node->cbs.create, false);
211 error += nb_node_validate_cb(nb_node, NB_OP_MODIFY,
212 !!nb_node->cbs.modify, false);
213 error += nb_node_validate_cb(nb_node, NB_OP_DESTROY,
214 !!nb_node->cbs.destroy, false);
215 error += nb_node_validate_cb(nb_node, NB_OP_MOVE, !!nb_node->cbs.move,
216 false);
217 error += nb_node_validate_cb(nb_node, NB_OP_PRE_VALIDATE,
218 !!nb_node->cbs.pre_validate, true);
219 error += nb_node_validate_cb(nb_node, NB_OP_APPLY_FINISH,
220 !!nb_node->cbs.apply_finish, true);
221 error += nb_node_validate_cb(nb_node, NB_OP_GET_ELEM,
222 !!nb_node->cbs.get_elem, false);
223 error += nb_node_validate_cb(nb_node, NB_OP_GET_NEXT,
224 !!nb_node->cbs.get_next, false);
225 error += nb_node_validate_cb(nb_node, NB_OP_GET_KEYS,
226 !!nb_node->cbs.get_keys, false);
227 error += nb_node_validate_cb(nb_node, NB_OP_LOOKUP_ENTRY,
228 !!nb_node->cbs.lookup_entry, false);
229 error += nb_node_validate_cb(nb_node, NB_OP_RPC, !!nb_node->cbs.rpc,
230 false);
231
232 return error;
233 }
234
235 static unsigned int nb_node_validate_priority(const struct nb_node *nb_node)
236 {
237 /* Top-level nodes can have any priority. */
238 if (!nb_node->parent)
239 return 0;
240
241 if (nb_node->priority < nb_node->parent->priority) {
242 flog_err(EC_LIB_NB_CB_INVALID_PRIO,
243 "node has higher priority than its parent [xpath %s]",
244 nb_node->xpath);
245 return 1;
246 }
247
248 return 0;
249 }
250
251 static int nb_node_validate(const struct lys_node *snode, void *arg)
252 {
253 struct nb_node *nb_node = snode->priv;
254 unsigned int *errors = arg;
255
256 /* Validate callbacks and priority. */
257 *errors += nb_node_validate_cbs(nb_node);
258 *errors += nb_node_validate_priority(nb_node);
259
260 return YANG_ITER_CONTINUE;
261 }
262
263 struct nb_config *nb_config_new(struct lyd_node *dnode)
264 {
265 struct nb_config *config;
266
267 config = XCALLOC(MTYPE_NB_CONFIG, sizeof(*config));
268 if (dnode)
269 config->dnode = dnode;
270 else
271 config->dnode = yang_dnode_new(ly_native_ctx, true);
272 config->version = 0;
273
274 return config;
275 }
276
277 void nb_config_free(struct nb_config *config)
278 {
279 if (config->dnode)
280 yang_dnode_free(config->dnode);
281 XFREE(MTYPE_NB_CONFIG, config);
282 }
283
284 struct nb_config *nb_config_dup(const struct nb_config *config)
285 {
286 struct nb_config *dup;
287
288 dup = XCALLOC(MTYPE_NB_CONFIG, sizeof(*dup));
289 dup->dnode = yang_dnode_dup(config->dnode);
290 dup->version = config->version;
291
292 return dup;
293 }
294
295 int nb_config_merge(struct nb_config *config_dst, struct nb_config *config_src,
296 bool preserve_source)
297 {
298 int ret;
299
300 ret = lyd_merge(config_dst->dnode, config_src->dnode, LYD_OPT_EXPLICIT);
301 if (ret != 0)
302 flog_warn(EC_LIB_LIBYANG, "%s: lyd_merge() failed", __func__);
303
304 if (!preserve_source)
305 nb_config_free(config_src);
306
307 return (ret == 0) ? NB_OK : NB_ERR;
308 }
309
310 void nb_config_replace(struct nb_config *config_dst,
311 struct nb_config *config_src, bool preserve_source)
312 {
313 /* Update version. */
314 if (config_src->version != 0)
315 config_dst->version = config_src->version;
316
317 /* Update dnode. */
318 if (config_dst->dnode)
319 yang_dnode_free(config_dst->dnode);
320 if (preserve_source) {
321 config_dst->dnode = yang_dnode_dup(config_src->dnode);
322 } else {
323 config_dst->dnode = config_src->dnode;
324 config_src->dnode = NULL;
325 nb_config_free(config_src);
326 }
327 }
328
329 /* Generate the nb_config_cbs tree. */
330 static inline int nb_config_cb_compare(const struct nb_config_cb *a,
331 const struct nb_config_cb *b)
332 {
333 /* Sort by priority first. */
334 if (a->nb_node->priority < b->nb_node->priority)
335 return -1;
336 if (a->nb_node->priority > b->nb_node->priority)
337 return 1;
338
339 /*
340 * Preserve the order of the configuration changes as told by libyang.
341 */
342 return a->seq - b->seq;
343 }
344 RB_GENERATE(nb_config_cbs, nb_config_cb, entry, nb_config_cb_compare);
345
346 static void nb_config_diff_add_change(struct nb_config_cbs *changes,
347 enum nb_operation operation,
348 uint32_t *seq,
349 const struct lyd_node *dnode)
350 {
351 struct nb_config_change *change;
352
353 change = XCALLOC(MTYPE_TMP, sizeof(*change));
354 change->cb.operation = operation;
355 change->cb.seq = *seq;
356 *seq = *seq + 1;
357 change->cb.nb_node = dnode->schema->priv;
358 yang_dnode_get_path(dnode, change->cb.xpath, sizeof(change->cb.xpath));
359 change->cb.dnode = dnode;
360
361 RB_INSERT(nb_config_cbs, changes, &change->cb);
362 }
363
364 static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
365 {
366 while (!RB_EMPTY(nb_config_cbs, changes)) {
367 struct nb_config_change *change;
368
369 change = (struct nb_config_change *)RB_ROOT(nb_config_cbs,
370 changes);
371 RB_REMOVE(nb_config_cbs, changes, &change->cb);
372 XFREE(MTYPE_TMP, change);
373 }
374 }
375
376 /*
377 * Helper function used when calculating the delta between two different
378 * configurations. Given a new subtree, calculate all new YANG data nodes,
379 * excluding default leafs and leaf-lists. This is a recursive function.
380 */
381 static void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
382 struct nb_config_cbs *changes)
383 {
384 enum nb_operation operation;
385 struct lyd_node *child;
386
387 switch (dnode->schema->nodetype) {
388 case LYS_LEAF:
389 case LYS_LEAFLIST:
390 if (lyd_wd_default((struct lyd_node_leaf_list *)dnode))
391 break;
392
393 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
394 operation = NB_OP_CREATE;
395 else if (nb_operation_is_valid(NB_OP_MODIFY, dnode->schema))
396 operation = NB_OP_MODIFY;
397 else
398 return;
399
400 nb_config_diff_add_change(changes, operation, seq, dnode);
401 break;
402 case LYS_CONTAINER:
403 case LYS_LIST:
404 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
405 nb_config_diff_add_change(changes, NB_OP_CREATE, seq,
406 dnode);
407
408 /* Process child nodes recursively. */
409 LY_TREE_FOR (dnode->child, child) {
410 nb_config_diff_created(child, seq, changes);
411 }
412 break;
413 default:
414 break;
415 }
416 }
417
418 static void nb_config_diff_deleted(const struct lyd_node *dnode, uint32_t *seq,
419 struct nb_config_cbs *changes)
420 {
421 if (nb_operation_is_valid(NB_OP_DESTROY, dnode->schema))
422 nb_config_diff_add_change(changes, NB_OP_DESTROY, seq, dnode);
423 else if (CHECK_FLAG(dnode->schema->nodetype, LYS_CONTAINER)) {
424 struct lyd_node *child;
425
426 /*
427 * Non-presence containers need special handling since they
428 * don't have "destroy" callbacks. In this case, what we need to
429 * do is to call the "destroy" callbacks of their child nodes
430 * when applicable (i.e. optional nodes).
431 */
432 LY_TREE_FOR (dnode->child, child) {
433 nb_config_diff_deleted(child, seq, changes);
434 }
435 }
436 }
437
438 /* Calculate the delta between two different configurations. */
439 static void nb_config_diff(const struct nb_config *config1,
440 const struct nb_config *config2,
441 struct nb_config_cbs *changes)
442 {
443 struct lyd_difflist *diff;
444 uint32_t seq = 0;
445
446 diff = lyd_diff(config1->dnode, config2->dnode,
447 LYD_DIFFOPT_WITHDEFAULTS);
448 assert(diff);
449
450 for (int i = 0; diff->type[i] != LYD_DIFF_END; i++) {
451 LYD_DIFFTYPE type;
452 struct lyd_node *dnode;
453
454 type = diff->type[i];
455
456 switch (type) {
457 case LYD_DIFF_CREATED:
458 dnode = diff->second[i];
459 nb_config_diff_created(dnode, &seq, changes);
460 break;
461 case LYD_DIFF_DELETED:
462 dnode = diff->first[i];
463 nb_config_diff_deleted(dnode, &seq, changes);
464 break;
465 case LYD_DIFF_CHANGED:
466 dnode = diff->second[i];
467 nb_config_diff_add_change(changes, NB_OP_MODIFY, &seq,
468 dnode);
469 break;
470 case LYD_DIFF_MOVEDAFTER1:
471 case LYD_DIFF_MOVEDAFTER2:
472 default:
473 continue;
474 }
475 }
476
477 lyd_free_diff(diff);
478 }
479
480 int nb_candidate_edit(struct nb_config *candidate,
481 const struct nb_node *nb_node,
482 enum nb_operation operation, const char *xpath,
483 const struct yang_data *previous,
484 const struct yang_data *data)
485 {
486 struct lyd_node *dnode;
487 char xpath_edit[XPATH_MAXLEN];
488
489 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
490 if (nb_node->snode->nodetype == LYS_LEAFLIST)
491 snprintf(xpath_edit, sizeof(xpath_edit), "%s[.='%s']", xpath,
492 data->value);
493 else
494 strlcpy(xpath_edit, xpath, sizeof(xpath_edit));
495
496 switch (operation) {
497 case NB_OP_CREATE:
498 case NB_OP_MODIFY:
499 ly_errno = 0;
500 dnode = lyd_new_path(candidate->dnode, ly_native_ctx,
501 xpath_edit, (void *)data->value, 0,
502 LYD_PATH_OPT_UPDATE);
503 if (!dnode && ly_errno) {
504 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
505 __func__);
506 return NB_ERR;
507 }
508
509 /*
510 * If a new node was created, call lyd_validate() only to create
511 * default child nodes.
512 */
513 if (dnode) {
514 lyd_schema_sort(dnode, 0);
515 lyd_validate(&dnode,
516 LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
517 ly_native_ctx);
518 }
519 break;
520 case NB_OP_DESTROY:
521 dnode = yang_dnode_get(candidate->dnode, xpath_edit);
522 if (!dnode)
523 /*
524 * Return a special error code so the caller can choose
525 * whether to ignore it or not.
526 */
527 return NB_ERR_NOT_FOUND;
528 lyd_free(dnode);
529 break;
530 case NB_OP_MOVE:
531 /* TODO: update configuration. */
532 break;
533 default:
534 flog_warn(EC_LIB_DEVELOPMENT,
535 "%s: unknown operation (%u) [xpath %s]", __func__,
536 operation, xpath_edit);
537 return NB_ERR;
538 }
539
540 return NB_OK;
541 }
542
543 bool nb_candidate_needs_update(const struct nb_config *candidate)
544 {
545 if (candidate->version < running_config->version)
546 return true;
547
548 return false;
549 }
550
551 int nb_candidate_update(struct nb_config *candidate)
552 {
553 struct nb_config *updated_config;
554
555 updated_config = nb_config_dup(running_config);
556 if (nb_config_merge(updated_config, candidate, true) != NB_OK)
557 return NB_ERR;
558
559 nb_config_replace(candidate, updated_config, false);
560
561 return NB_OK;
562 }
563
564 /*
565 * Perform YANG syntactic and semantic validation.
566 *
567 * WARNING: lyd_validate() can change the configuration as part of the
568 * validation process.
569 */
570 static int nb_candidate_validate_yang(struct nb_config *candidate)
571 {
572 if (lyd_validate(&candidate->dnode,
573 LYD_OPT_STRICT | LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
574 ly_native_ctx)
575 != 0)
576 return NB_ERR_VALIDATION;
577
578 return NB_OK;
579 }
580
581 /* Perform code-level validation using the northbound callbacks. */
582 static int nb_candidate_validate_code(struct nb_config *candidate,
583 struct nb_config_cbs *changes)
584 {
585 struct nb_config_cb *cb;
586 struct lyd_node *root, *next, *child;
587 int ret;
588
589 /* First validate the candidate as a whole. */
590 LY_TREE_FOR (candidate->dnode, root) {
591 LY_TREE_DFS_BEGIN (root, next, child) {
592 struct nb_node *nb_node;
593
594 nb_node = child->schema->priv;
595 if (!nb_node->cbs.pre_validate)
596 goto next;
597
598 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config,
599 DEBUG_MODE_ALL)) {
600 char xpath[XPATH_MAXLEN];
601
602 yang_dnode_get_path(child, xpath,
603 sizeof(xpath));
604 nb_log_callback(NB_EV_VALIDATE,
605 NB_OP_PRE_VALIDATE, xpath,
606 NULL);
607 }
608
609 ret = (*nb_node->cbs.pre_validate)(child);
610 if (ret != NB_OK)
611 return NB_ERR_VALIDATION;
612
613 next:
614 LY_TREE_DFS_END(root, next, child);
615 }
616 }
617
618 /* Now validate the configuration changes. */
619 RB_FOREACH (cb, nb_config_cbs, changes) {
620 struct nb_config_change *change = (struct nb_config_change *)cb;
621
622 ret = nb_callback_configuration(NB_EV_VALIDATE, change);
623 if (ret != NB_OK)
624 return NB_ERR_VALIDATION;
625 }
626
627 return NB_OK;
628 }
629
630 int nb_candidate_validate(struct nb_config *candidate)
631 {
632 struct nb_config_cbs changes;
633 int ret;
634
635 if (nb_candidate_validate_yang(candidate) != NB_OK)
636 return NB_ERR_VALIDATION;
637
638 RB_INIT(nb_config_cbs, &changes);
639 nb_config_diff(running_config, candidate, &changes);
640 ret = nb_candidate_validate_code(candidate, &changes);
641 nb_config_diff_del_changes(&changes);
642
643 return ret;
644 }
645
646 int nb_candidate_commit_prepare(struct nb_config *candidate,
647 enum nb_client client, const void *user,
648 const char *comment,
649 struct nb_transaction **transaction)
650 {
651 struct nb_config_cbs changes;
652
653 if (nb_candidate_validate_yang(candidate) != NB_OK) {
654 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
655 "%s: failed to validate candidate configuration",
656 __func__);
657 return NB_ERR_VALIDATION;
658 }
659
660 RB_INIT(nb_config_cbs, &changes);
661 nb_config_diff(running_config, candidate, &changes);
662 if (RB_EMPTY(nb_config_cbs, &changes))
663 return NB_ERR_NO_CHANGES;
664
665 if (nb_candidate_validate_code(candidate, &changes) != NB_OK) {
666 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
667 "%s: failed to validate candidate configuration",
668 __func__);
669 nb_config_diff_del_changes(&changes);
670 return NB_ERR_VALIDATION;
671 }
672
673 *transaction =
674 nb_transaction_new(candidate, &changes, client, user, comment);
675 if (*transaction == NULL) {
676 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
677 "%s: failed to create transaction", __func__);
678 nb_config_diff_del_changes(&changes);
679 return NB_ERR_LOCKED;
680 }
681
682 return nb_transaction_process(NB_EV_PREPARE, *transaction);
683 }
684
685 void nb_candidate_commit_abort(struct nb_transaction *transaction)
686 {
687 (void)nb_transaction_process(NB_EV_ABORT, transaction);
688 nb_transaction_free(transaction);
689 }
690
691 void nb_candidate_commit_apply(struct nb_transaction *transaction,
692 bool save_transaction, uint32_t *transaction_id)
693 {
694 (void)nb_transaction_process(NB_EV_APPLY, transaction);
695 nb_transaction_apply_finish(transaction);
696
697 /* Replace running by candidate. */
698 transaction->config->version++;
699 nb_config_replace(running_config, transaction->config, true);
700
701 /* Record transaction. */
702 if (save_transaction
703 && nb_db_transaction_save(transaction, transaction_id) != NB_OK)
704 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED,
705 "%s: failed to record transaction", __func__);
706
707 nb_transaction_free(transaction);
708 }
709
710 int nb_candidate_commit(struct nb_config *candidate, enum nb_client client,
711 const void *user, bool save_transaction,
712 const char *comment, uint32_t *transaction_id)
713 {
714 struct nb_transaction *transaction = NULL;
715 int ret;
716
717 ret = nb_candidate_commit_prepare(candidate, client, user, comment,
718 &transaction);
719 /*
720 * Apply the changes if the preparation phase succeeded. Otherwise abort
721 * the transaction.
722 */
723 if (ret == NB_OK)
724 nb_candidate_commit_apply(transaction, save_transaction,
725 transaction_id);
726 else if (transaction != NULL)
727 nb_candidate_commit_abort(transaction);
728
729 return ret;
730 }
731
732 int nb_running_lock(enum nb_client client, const void *user)
733 {
734 int ret = -1;
735
736 frr_with_mutex(&running_config_mgmt_lock.mtx) {
737 if (!running_config_mgmt_lock.locked) {
738 running_config_mgmt_lock.locked = true;
739 running_config_mgmt_lock.owner_client = client;
740 running_config_mgmt_lock.owner_user = user;
741 ret = 0;
742 }
743 }
744
745 return ret;
746 }
747
748 int nb_running_unlock(enum nb_client client, const void *user)
749 {
750 int ret = -1;
751
752 frr_with_mutex(&running_config_mgmt_lock.mtx) {
753 if (running_config_mgmt_lock.locked
754 && running_config_mgmt_lock.owner_client == client
755 && running_config_mgmt_lock.owner_user == user) {
756 running_config_mgmt_lock.locked = false;
757 running_config_mgmt_lock.owner_client = NB_CLIENT_NONE;
758 running_config_mgmt_lock.owner_user = NULL;
759 ret = 0;
760 }
761 }
762
763 return ret;
764 }
765
766 int nb_running_lock_check(enum nb_client client, const void *user)
767 {
768 int ret = -1;
769
770 frr_with_mutex(&running_config_mgmt_lock.mtx) {
771 if (!running_config_mgmt_lock.locked
772 || (running_config_mgmt_lock.owner_client == client
773 && running_config_mgmt_lock.owner_user == user))
774 ret = 0;
775 }
776
777 return ret;
778 }
779
780 static void nb_log_callback(const enum nb_event event,
781 enum nb_operation operation, const char *xpath,
782 const char *value)
783 {
784 zlog_debug(
785 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
786 nb_event_name(event), nb_operation_name(operation), xpath,
787 value ? value : "(NULL)");
788 }
789
790 /*
791 * Call the northbound configuration callback associated to a given
792 * configuration change.
793 */
794 static int nb_callback_configuration(const enum nb_event event,
795 struct nb_config_change *change)
796 {
797 enum nb_operation operation = change->cb.operation;
798 const char *xpath = change->cb.xpath;
799 const struct nb_node *nb_node = change->cb.nb_node;
800 const struct lyd_node *dnode = change->cb.dnode;
801 union nb_resource *resource;
802 int ret = NB_ERR;
803
804 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
805 const char *value = "(none)";
806
807 if (dnode && !yang_snode_is_typeless_data(dnode->schema))
808 value = yang_dnode_get_string(dnode, NULL);
809
810 nb_log_callback(event, operation, xpath, value);
811 }
812
813 if (event == NB_EV_VALIDATE)
814 resource = NULL;
815 else
816 resource = &change->resource;
817
818 switch (operation) {
819 case NB_OP_CREATE:
820 ret = (*nb_node->cbs.create)(event, dnode, resource);
821 break;
822 case NB_OP_MODIFY:
823 ret = (*nb_node->cbs.modify)(event, dnode, resource);
824 break;
825 case NB_OP_DESTROY:
826 ret = (*nb_node->cbs.destroy)(event, dnode);
827 break;
828 case NB_OP_MOVE:
829 ret = (*nb_node->cbs.move)(event, dnode);
830 break;
831 default:
832 flog_err(EC_LIB_DEVELOPMENT,
833 "%s: unknown operation (%u) [xpath %s]", __func__,
834 operation, xpath);
835 exit(1);
836 }
837
838 if (ret != NB_OK) {
839 int priority;
840 enum lib_log_refs ref;
841
842 switch (event) {
843 case NB_EV_VALIDATE:
844 priority = LOG_WARNING;
845 ref = EC_LIB_NB_CB_CONFIG_VALIDATE;
846 break;
847 case NB_EV_PREPARE:
848 priority = LOG_WARNING;
849 ref = EC_LIB_NB_CB_CONFIG_PREPARE;
850 break;
851 case NB_EV_ABORT:
852 priority = LOG_WARNING;
853 ref = EC_LIB_NB_CB_CONFIG_ABORT;
854 break;
855 case NB_EV_APPLY:
856 priority = LOG_ERR;
857 ref = EC_LIB_NB_CB_CONFIG_APPLY;
858 break;
859 default:
860 flog_err(EC_LIB_DEVELOPMENT,
861 "%s: unknown event (%u) [xpath %s]",
862 __func__, event, xpath);
863 exit(1);
864 }
865
866 flog(priority, ref,
867 "%s: error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
868 __func__, nb_err_name(ret), nb_event_name(event),
869 nb_operation_name(operation), xpath);
870 }
871
872 return ret;
873 }
874
875 struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node,
876 const char *xpath,
877 const void *list_entry)
878 {
879 DEBUGD(&nb_dbg_cbs_state,
880 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
881 xpath, list_entry);
882
883 return nb_node->cbs.get_elem(xpath, list_entry);
884 }
885
886 const void *nb_callback_get_next(const struct nb_node *nb_node,
887 const void *parent_list_entry,
888 const void *list_entry)
889 {
890 DEBUGD(&nb_dbg_cbs_state,
891 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
892 nb_node->xpath, parent_list_entry, list_entry);
893
894 return nb_node->cbs.get_next(parent_list_entry, list_entry);
895 }
896
897 int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
898 struct yang_list_keys *keys)
899 {
900 DEBUGD(&nb_dbg_cbs_state,
901 "northbound callback (get_keys): node [%s] list_entry [%p]",
902 nb_node->xpath, list_entry);
903
904 return nb_node->cbs.get_keys(list_entry, keys);
905 }
906
907 const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
908 const void *parent_list_entry,
909 const struct yang_list_keys *keys)
910 {
911 DEBUGD(&nb_dbg_cbs_state,
912 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
913 nb_node->xpath, parent_list_entry);
914
915 return nb_node->cbs.lookup_entry(parent_list_entry, keys);
916 }
917
918 int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
919 const struct list *input, struct list *output)
920 {
921 DEBUGD(&nb_dbg_cbs_rpc, "northbound RPC: %s", xpath);
922
923 return nb_node->cbs.rpc(xpath, input, output);
924 }
925
926 static struct nb_transaction *
927 nb_transaction_new(struct nb_config *config, struct nb_config_cbs *changes,
928 enum nb_client client, const void *user, const char *comment)
929 {
930 struct nb_transaction *transaction;
931
932 if (nb_running_lock_check(client, user)) {
933 flog_warn(
934 EC_LIB_NB_TRANSACTION_CREATION_FAILED,
935 "%s: running configuration is locked by another client",
936 __func__);
937 return NULL;
938 }
939
940 if (transaction_in_progress) {
941 flog_warn(
942 EC_LIB_NB_TRANSACTION_CREATION_FAILED,
943 "%s: error - there's already another transaction in progress",
944 __func__);
945 return NULL;
946 }
947 transaction_in_progress = true;
948
949 transaction = XCALLOC(MTYPE_TMP, sizeof(*transaction));
950 transaction->client = client;
951 if (comment)
952 strlcpy(transaction->comment, comment,
953 sizeof(transaction->comment));
954 transaction->config = config;
955 transaction->changes = *changes;
956
957 return transaction;
958 }
959
960 static void nb_transaction_free(struct nb_transaction *transaction)
961 {
962 nb_config_diff_del_changes(&transaction->changes);
963 XFREE(MTYPE_TMP, transaction);
964 transaction_in_progress = false;
965 }
966
967 /* Process all configuration changes associated to a transaction. */
968 static int nb_transaction_process(enum nb_event event,
969 struct nb_transaction *transaction)
970 {
971 struct nb_config_cb *cb;
972
973 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
974 struct nb_config_change *change = (struct nb_config_change *)cb;
975 int ret;
976
977 /*
978 * Only try to release resources that were allocated
979 * successfully.
980 */
981 if (event == NB_EV_ABORT && change->prepare_ok == false)
982 break;
983
984 /* Call the appropriate callback. */
985 ret = nb_callback_configuration(event, change);
986 switch (event) {
987 case NB_EV_PREPARE:
988 if (ret != NB_OK)
989 return ret;
990 change->prepare_ok = true;
991 break;
992 case NB_EV_ABORT:
993 case NB_EV_APPLY:
994 /*
995 * At this point it's not possible to reject the
996 * transaction anymore, so any failure here can lead to
997 * inconsistencies and should be treated as a bug.
998 * Operations prone to errors, like validations and
999 * resource allocations, should be performed during the
1000 * 'prepare' phase.
1001 */
1002 break;
1003 default:
1004 break;
1005 }
1006 }
1007
1008 return NB_OK;
1009 }
1010
1011 static struct nb_config_cb *
1012 nb_apply_finish_cb_new(struct nb_config_cbs *cbs, const char *xpath,
1013 const struct nb_node *nb_node,
1014 const struct lyd_node *dnode)
1015 {
1016 struct nb_config_cb *cb;
1017
1018 cb = XCALLOC(MTYPE_TMP, sizeof(*cb));
1019 strlcpy(cb->xpath, xpath, sizeof(cb->xpath));
1020 cb->nb_node = nb_node;
1021 cb->dnode = dnode;
1022 RB_INSERT(nb_config_cbs, cbs, cb);
1023
1024 return cb;
1025 }
1026
1027 static struct nb_config_cb *
1028 nb_apply_finish_cb_find(struct nb_config_cbs *cbs, const char *xpath,
1029 const struct nb_node *nb_node)
1030 {
1031 struct nb_config_cb s;
1032
1033 strlcpy(s.xpath, xpath, sizeof(s.xpath));
1034 s.nb_node = nb_node;
1035 return RB_FIND(nb_config_cbs, cbs, &s);
1036 }
1037
1038 /* Call the 'apply_finish' callbacks. */
1039 static void nb_transaction_apply_finish(struct nb_transaction *transaction)
1040 {
1041 struct nb_config_cbs cbs;
1042 struct nb_config_cb *cb;
1043
1044 /* Initialize tree of 'apply_finish' callbacks. */
1045 RB_INIT(nb_config_cbs, &cbs);
1046
1047 /* Identify the 'apply_finish' callbacks that need to be called. */
1048 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1049 struct nb_config_change *change = (struct nb_config_change *)cb;
1050 const struct lyd_node *dnode = change->cb.dnode;
1051
1052 /*
1053 * Iterate up to the root of the data tree. When a node is being
1054 * deleted, skip its 'apply_finish' callback if one is defined
1055 * (the 'apply_finish' callbacks from the node ancestors should
1056 * be called though).
1057 */
1058 if (change->cb.operation == NB_OP_DESTROY) {
1059 char xpath[XPATH_MAXLEN];
1060
1061 dnode = dnode->parent;
1062 if (!dnode)
1063 break;
1064
1065 /*
1066 * The dnode from 'delete' callbacks point to elements
1067 * from the running configuration. Use yang_dnode_get()
1068 * to get the corresponding dnode from the candidate
1069 * configuration that is being committed.
1070 */
1071 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1072 dnode = yang_dnode_get(transaction->config->dnode,
1073 xpath);
1074 }
1075 while (dnode) {
1076 char xpath[XPATH_MAXLEN];
1077 struct nb_node *nb_node;
1078
1079 nb_node = dnode->schema->priv;
1080 if (!nb_node->cbs.apply_finish)
1081 goto next;
1082
1083 /*
1084 * Don't call the callback more than once for the same
1085 * data node.
1086 */
1087 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1088 if (nb_apply_finish_cb_find(&cbs, xpath, nb_node))
1089 goto next;
1090
1091 nb_apply_finish_cb_new(&cbs, xpath, nb_node, dnode);
1092
1093 next:
1094 dnode = dnode->parent;
1095 }
1096 }
1097
1098 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1099 RB_FOREACH (cb, nb_config_cbs, &cbs) {
1100 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL))
1101 nb_log_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH,
1102 cb->xpath, NULL);
1103
1104 (*cb->nb_node->cbs.apply_finish)(cb->dnode);
1105 }
1106
1107 /* Release memory. */
1108 while (!RB_EMPTY(nb_config_cbs, &cbs)) {
1109 cb = RB_ROOT(nb_config_cbs, &cbs);
1110 RB_REMOVE(nb_config_cbs, &cbs, cb);
1111 XFREE(MTYPE_TMP, cb);
1112 }
1113 }
1114
1115 static int nb_oper_data_iter_children(const struct lys_node *snode,
1116 const char *xpath, const void *list_entry,
1117 const struct yang_list_keys *list_keys,
1118 struct yang_translator *translator,
1119 bool first, uint32_t flags,
1120 nb_oper_data_cb cb, void *arg)
1121 {
1122 struct lys_node *child;
1123
1124 LY_TREE_FOR (snode->child, child) {
1125 int ret;
1126
1127 ret = nb_oper_data_iter_node(child, xpath, list_entry,
1128 list_keys, translator, false,
1129 flags, cb, arg);
1130 if (ret != NB_OK)
1131 return ret;
1132 }
1133
1134 return NB_OK;
1135 }
1136
1137 static int nb_oper_data_iter_leaf(const struct nb_node *nb_node,
1138 const char *xpath, const void *list_entry,
1139 const struct yang_list_keys *list_keys,
1140 struct yang_translator *translator,
1141 uint32_t flags, nb_oper_data_cb cb, void *arg)
1142 {
1143 struct yang_data *data;
1144
1145 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1146 return NB_OK;
1147
1148 /* Ignore list keys. */
1149 if (lys_is_key((struct lys_node_leaf *)nb_node->snode, NULL))
1150 return NB_OK;
1151
1152 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1153 if (data == NULL)
1154 /* Leaf of type "empty" is not present. */
1155 return NB_OK;
1156
1157 return (*cb)(nb_node->snode, translator, data, arg);
1158 }
1159
1160 static int nb_oper_data_iter_container(const struct nb_node *nb_node,
1161 const char *xpath,
1162 const void *list_entry,
1163 const struct yang_list_keys *list_keys,
1164 struct yang_translator *translator,
1165 uint32_t flags, nb_oper_data_cb cb,
1166 void *arg)
1167 {
1168 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1169 return NB_OK;
1170
1171 /* Presence containers. */
1172 if (nb_node->cbs.get_elem) {
1173 struct yang_data *data;
1174 int ret;
1175
1176 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1177 if (data == NULL)
1178 /* Presence container is not present. */
1179 return NB_OK;
1180
1181 ret = (*cb)(nb_node->snode, translator, data, arg);
1182 if (ret != NB_OK)
1183 return ret;
1184 }
1185
1186 /* Iterate over the child nodes. */
1187 return nb_oper_data_iter_children(nb_node->snode, xpath, list_entry,
1188 list_keys, translator, false, flags,
1189 cb, arg);
1190 }
1191
1192 static int
1193 nb_oper_data_iter_leaflist(const struct nb_node *nb_node, const char *xpath,
1194 const void *parent_list_entry,
1195 const struct yang_list_keys *parent_list_keys,
1196 struct yang_translator *translator, uint32_t flags,
1197 nb_oper_data_cb cb, void *arg)
1198 {
1199 const void *list_entry = NULL;
1200
1201 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1202 return NB_OK;
1203
1204 do {
1205 struct yang_data *data;
1206 int ret;
1207
1208 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1209 list_entry);
1210 if (!list_entry)
1211 /* End of the list. */
1212 break;
1213
1214 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1215 if (data == NULL)
1216 continue;
1217
1218 ret = (*cb)(nb_node->snode, translator, data, arg);
1219 if (ret != NB_OK)
1220 return ret;
1221 } while (list_entry);
1222
1223 return NB_OK;
1224 }
1225
1226 static int nb_oper_data_iter_list(const struct nb_node *nb_node,
1227 const char *xpath_list,
1228 const void *parent_list_entry,
1229 const struct yang_list_keys *parent_list_keys,
1230 struct yang_translator *translator,
1231 uint32_t flags, nb_oper_data_cb cb, void *arg)
1232 {
1233 struct lys_node_list *slist = (struct lys_node_list *)nb_node->snode;
1234 const void *list_entry = NULL;
1235 uint32_t position = 1;
1236
1237 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1238 return NB_OK;
1239
1240 /* Iterate over all list entries. */
1241 do {
1242 struct yang_list_keys list_keys;
1243 char xpath[XPATH_MAXLEN * 2];
1244 int ret;
1245
1246 /* Obtain list entry. */
1247 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1248 list_entry);
1249 if (!list_entry)
1250 /* End of the list. */
1251 break;
1252
1253 if (!CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST)) {
1254 /* Obtain the list entry keys. */
1255 if (nb_callback_get_keys(nb_node, list_entry,
1256 &list_keys)
1257 != NB_OK) {
1258 flog_warn(EC_LIB_NB_CB_STATE,
1259 "%s: failed to get list keys",
1260 __func__);
1261 return NB_ERR;
1262 }
1263
1264 /* Build XPath of the list entry. */
1265 strlcpy(xpath, xpath_list, sizeof(xpath));
1266 for (unsigned int i = 0; i < list_keys.num; i++) {
1267 snprintf(xpath + strlen(xpath),
1268 sizeof(xpath) - strlen(xpath),
1269 "[%s='%s']", slist->keys[i]->name,
1270 list_keys.key[i]);
1271 }
1272 } else {
1273 /*
1274 * Keyless list - build XPath using a positional index.
1275 */
1276 snprintf(xpath, sizeof(xpath), "%s[%u]", xpath_list,
1277 position);
1278 position++;
1279 }
1280
1281 /* Iterate over the child nodes. */
1282 ret = nb_oper_data_iter_children(
1283 nb_node->snode, xpath, list_entry, &list_keys,
1284 translator, false, flags, cb, arg);
1285 if (ret != NB_OK)
1286 return ret;
1287 } while (list_entry);
1288
1289 return NB_OK;
1290 }
1291
1292 static int nb_oper_data_iter_node(const struct lys_node *snode,
1293 const char *xpath_parent,
1294 const void *list_entry,
1295 const struct yang_list_keys *list_keys,
1296 struct yang_translator *translator,
1297 bool first, uint32_t flags,
1298 nb_oper_data_cb cb, void *arg)
1299 {
1300 struct nb_node *nb_node;
1301 char xpath[XPATH_MAXLEN];
1302 int ret = NB_OK;
1303
1304 if (!first && CHECK_FLAG(flags, NB_OPER_DATA_ITER_NORECURSE)
1305 && CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST))
1306 return NB_OK;
1307
1308 /* Update XPath. */
1309 strlcpy(xpath, xpath_parent, sizeof(xpath));
1310 if (!first && snode->nodetype != LYS_USES) {
1311 struct lys_node *parent;
1312
1313 /* Get the real parent. */
1314 parent = snode->parent;
1315 while (parent && parent->nodetype == LYS_USES)
1316 parent = parent->parent;
1317
1318 /*
1319 * When necessary, include the namespace of the augmenting
1320 * module.
1321 */
1322 if (parent && parent->nodetype == LYS_AUGMENT)
1323 snprintf(xpath + strlen(xpath),
1324 sizeof(xpath) - strlen(xpath), "/%s:%s",
1325 snode->module->name, snode->name);
1326 else
1327 snprintf(xpath + strlen(xpath),
1328 sizeof(xpath) - strlen(xpath), "/%s",
1329 snode->name);
1330 }
1331
1332 nb_node = snode->priv;
1333 switch (snode->nodetype) {
1334 case LYS_CONTAINER:
1335 ret = nb_oper_data_iter_container(nb_node, xpath, list_entry,
1336 list_keys, translator, flags,
1337 cb, arg);
1338 break;
1339 case LYS_LEAF:
1340 ret = nb_oper_data_iter_leaf(nb_node, xpath, list_entry,
1341 list_keys, translator, flags, cb,
1342 arg);
1343 break;
1344 case LYS_LEAFLIST:
1345 ret = nb_oper_data_iter_leaflist(nb_node, xpath, list_entry,
1346 list_keys, translator, flags,
1347 cb, arg);
1348 break;
1349 case LYS_LIST:
1350 ret = nb_oper_data_iter_list(nb_node, xpath, list_entry,
1351 list_keys, translator, flags, cb,
1352 arg);
1353 break;
1354 case LYS_USES:
1355 ret = nb_oper_data_iter_children(snode, xpath, list_entry,
1356 list_keys, translator, false,
1357 flags, cb, arg);
1358 break;
1359 default:
1360 break;
1361 }
1362
1363 return ret;
1364 }
1365
1366 int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
1367 uint32_t flags, nb_oper_data_cb cb, void *arg)
1368 {
1369 struct nb_node *nb_node;
1370 const void *list_entry = NULL;
1371 struct yang_list_keys list_keys;
1372 struct list *list_dnodes;
1373 struct lyd_node *dnode, *dn;
1374 struct listnode *ln;
1375 int ret;
1376
1377 nb_node = nb_node_find(xpath);
1378 if (!nb_node) {
1379 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1380 "%s: unknown data path: %s", __func__, xpath);
1381 return NB_ERR;
1382 }
1383
1384 /* For now this function works only with containers and lists. */
1385 if (!CHECK_FLAG(nb_node->snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
1386 flog_warn(
1387 EC_LIB_NB_OPERATIONAL_DATA,
1388 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1389 __func__, xpath);
1390 return NB_ERR;
1391 }
1392
1393 /*
1394 * Create a data tree from the XPath so that we can parse the keys of
1395 * all YANG lists (if any).
1396 */
1397 ly_errno = 0;
1398 dnode = lyd_new_path(NULL, ly_native_ctx, xpath, NULL, 0,
1399 LYD_PATH_OPT_UPDATE | LYD_PATH_OPT_NOPARENTRET);
1400 if (!dnode) {
1401 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
1402 __func__);
1403 return NB_ERR;
1404 }
1405
1406 /*
1407 * Create a linked list to sort the data nodes starting from the root.
1408 */
1409 list_dnodes = list_new();
1410 for (dn = dnode; dn; dn = dn->parent) {
1411 if (dn->schema->nodetype != LYS_LIST || !dn->child)
1412 continue;
1413 listnode_add_head(list_dnodes, dn);
1414 }
1415 /*
1416 * Use the northbound callbacks to find list entry pointer corresponding
1417 * to the given XPath.
1418 */
1419 for (ALL_LIST_ELEMENTS_RO(list_dnodes, ln, dn)) {
1420 struct lyd_node *child;
1421 struct nb_node *nn;
1422 unsigned int n = 0;
1423
1424 /* Obtain the list entry keys. */
1425 memset(&list_keys, 0, sizeof(list_keys));
1426 LY_TREE_FOR (dn->child, child) {
1427 if (!lys_is_key((struct lys_node_leaf *)child->schema,
1428 NULL))
1429 continue;
1430 strlcpy(list_keys.key[n],
1431 yang_dnode_get_string(child, NULL),
1432 sizeof(list_keys.key[n]));
1433 n++;
1434 }
1435 list_keys.num = n;
1436 if (list_keys.num
1437 != ((struct lys_node_list *)dn->schema)->keys_size) {
1438 list_delete(&list_dnodes);
1439 yang_dnode_free(dnode);
1440 return NB_ERR_NOT_FOUND;
1441 }
1442
1443 /* Find the list entry pointer. */
1444 nn = dn->schema->priv;
1445 list_entry =
1446 nb_callback_lookup_entry(nn, list_entry, &list_keys);
1447 if (list_entry == NULL) {
1448 list_delete(&list_dnodes);
1449 yang_dnode_free(dnode);
1450 return NB_ERR_NOT_FOUND;
1451 }
1452 }
1453
1454 /* If a list entry was given, iterate over that list entry only. */
1455 if (dnode->schema->nodetype == LYS_LIST && dnode->child)
1456 ret = nb_oper_data_iter_children(
1457 nb_node->snode, xpath, list_entry, &list_keys,
1458 translator, true, flags, cb, arg);
1459 else
1460 ret = nb_oper_data_iter_node(nb_node->snode, xpath, list_entry,
1461 &list_keys, translator, true,
1462 flags, cb, arg);
1463
1464 list_delete(&list_dnodes);
1465 yang_dnode_free(dnode);
1466
1467 return ret;
1468 }
1469
1470 bool nb_operation_is_valid(enum nb_operation operation,
1471 const struct lys_node *snode)
1472 {
1473 struct nb_node *nb_node = snode->priv;
1474 struct lys_node_container *scontainer;
1475 struct lys_node_leaf *sleaf;
1476
1477 switch (operation) {
1478 case NB_OP_CREATE:
1479 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1480 return false;
1481
1482 switch (snode->nodetype) {
1483 case LYS_LEAF:
1484 sleaf = (struct lys_node_leaf *)snode;
1485 if (sleaf->type.base != LY_TYPE_EMPTY)
1486 return false;
1487 break;
1488 case LYS_CONTAINER:
1489 scontainer = (struct lys_node_container *)snode;
1490 if (!scontainer->presence)
1491 return false;
1492 break;
1493 case LYS_LIST:
1494 case LYS_LEAFLIST:
1495 break;
1496 default:
1497 return false;
1498 }
1499 return true;
1500 case NB_OP_MODIFY:
1501 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1502 return false;
1503
1504 switch (snode->nodetype) {
1505 case LYS_LEAF:
1506 sleaf = (struct lys_node_leaf *)snode;
1507 if (sleaf->type.base == LY_TYPE_EMPTY)
1508 return false;
1509
1510 /* List keys can't be modified. */
1511 if (lys_is_key(sleaf, NULL))
1512 return false;
1513 break;
1514 default:
1515 return false;
1516 }
1517 return true;
1518 case NB_OP_DESTROY:
1519 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1520 return false;
1521
1522 switch (snode->nodetype) {
1523 case LYS_LEAF:
1524 sleaf = (struct lys_node_leaf *)snode;
1525
1526 /* List keys can't be deleted. */
1527 if (lys_is_key(sleaf, NULL))
1528 return false;
1529
1530 /*
1531 * Only optional leafs can be deleted, or leafs whose
1532 * parent is a case statement.
1533 */
1534 if (snode->parent->nodetype == LYS_CASE)
1535 return true;
1536 if (sleaf->when)
1537 return true;
1538 if (CHECK_FLAG(sleaf->flags, LYS_MAND_TRUE)
1539 || sleaf->dflt)
1540 return false;
1541 break;
1542 case LYS_CONTAINER:
1543 scontainer = (struct lys_node_container *)snode;
1544 if (!scontainer->presence)
1545 return false;
1546 break;
1547 case LYS_LIST:
1548 case LYS_LEAFLIST:
1549 break;
1550 default:
1551 return false;
1552 }
1553 return true;
1554 case NB_OP_MOVE:
1555 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1556 return false;
1557
1558 switch (snode->nodetype) {
1559 case LYS_LIST:
1560 case LYS_LEAFLIST:
1561 if (!CHECK_FLAG(snode->flags, LYS_USERORDERED))
1562 return false;
1563 break;
1564 default:
1565 return false;
1566 }
1567 return true;
1568 case NB_OP_PRE_VALIDATE:
1569 case NB_OP_APPLY_FINISH:
1570 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1571 return false;
1572 return true;
1573 case NB_OP_GET_ELEM:
1574 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_R))
1575 return false;
1576
1577 switch (snode->nodetype) {
1578 case LYS_LEAF:
1579 case LYS_LEAFLIST:
1580 break;
1581 case LYS_CONTAINER:
1582 scontainer = (struct lys_node_container *)snode;
1583 if (!scontainer->presence)
1584 return false;
1585 break;
1586 default:
1587 return false;
1588 }
1589 return true;
1590 case NB_OP_GET_NEXT:
1591 switch (snode->nodetype) {
1592 case LYS_LIST:
1593 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1594 return false;
1595 break;
1596 case LYS_LEAFLIST:
1597 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1598 return false;
1599 break;
1600 default:
1601 return false;
1602 }
1603 return true;
1604 case NB_OP_GET_KEYS:
1605 case NB_OP_LOOKUP_ENTRY:
1606 switch (snode->nodetype) {
1607 case LYS_LIST:
1608 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1609 return false;
1610 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST))
1611 return false;
1612 break;
1613 default:
1614 return false;
1615 }
1616 return true;
1617 case NB_OP_RPC:
1618 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W | LYS_CONFIG_R))
1619 return false;
1620
1621 switch (snode->nodetype) {
1622 case LYS_RPC:
1623 case LYS_ACTION:
1624 break;
1625 default:
1626 return false;
1627 }
1628 return true;
1629 default:
1630 return false;
1631 }
1632 }
1633
1634 DEFINE_HOOK(nb_notification_send, (const char *xpath, struct list *arguments),
1635 (xpath, arguments));
1636
1637 int nb_notification_send(const char *xpath, struct list *arguments)
1638 {
1639 int ret;
1640
1641 DEBUGD(&nb_dbg_notif, "northbound notification: %s", xpath);
1642
1643 ret = hook_call(nb_notification_send, xpath, arguments);
1644 if (arguments)
1645 list_delete(&arguments);
1646
1647 return ret;
1648 }
1649
1650 /* Running configuration user pointers management. */
1651 struct nb_config_entry {
1652 char xpath[XPATH_MAXLEN];
1653 void *entry;
1654 };
1655
1656 static bool running_config_entry_cmp(const void *value1, const void *value2)
1657 {
1658 const struct nb_config_entry *c1 = value1;
1659 const struct nb_config_entry *c2 = value2;
1660
1661 return strmatch(c1->xpath, c2->xpath);
1662 }
1663
1664 static unsigned int running_config_entry_key_make(const void *value)
1665 {
1666 return string_hash_make(value);
1667 }
1668
1669 static void *running_config_entry_alloc(void *p)
1670 {
1671 struct nb_config_entry *new, *key = p;
1672
1673 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY, sizeof(*new));
1674 strlcpy(new->xpath, key->xpath, sizeof(new->xpath));
1675
1676 return new;
1677 }
1678
1679 static void running_config_entry_free(void *arg)
1680 {
1681 XFREE(MTYPE_NB_CONFIG_ENTRY, arg);
1682 }
1683
1684 void nb_running_set_entry(const struct lyd_node *dnode, void *entry)
1685 {
1686 struct nb_config_entry *config, s;
1687
1688 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1689 config = hash_get(running_config_entries, &s,
1690 running_config_entry_alloc);
1691 config->entry = entry;
1692 }
1693
1694 static void *nb_running_unset_entry_helper(const struct lyd_node *dnode)
1695 {
1696 struct nb_config_entry *config, s;
1697 struct lyd_node *child;
1698 void *entry = NULL;
1699
1700 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1701 config = hash_release(running_config_entries, &s);
1702 if (config) {
1703 entry = config->entry;
1704 running_config_entry_free(config);
1705 }
1706
1707 /* Unset user pointers from the child nodes. */
1708 if (CHECK_FLAG(dnode->schema->nodetype, LYS_LIST | LYS_CONTAINER)) {
1709 LY_TREE_FOR (dnode->child, child) {
1710 (void)nb_running_unset_entry_helper(child);
1711 }
1712 }
1713
1714 return entry;
1715 }
1716
1717 void *nb_running_unset_entry(const struct lyd_node *dnode)
1718 {
1719 void *entry;
1720
1721 entry = nb_running_unset_entry_helper(dnode);
1722 assert(entry);
1723
1724 return entry;
1725 }
1726
1727 void *nb_running_get_entry(const struct lyd_node *dnode, const char *xpath,
1728 bool abort_if_not_found)
1729 {
1730 const struct lyd_node *orig_dnode = dnode;
1731 char xpath_buf[XPATH_MAXLEN];
1732
1733 assert(dnode || xpath);
1734
1735 if (!dnode)
1736 dnode = yang_dnode_get(running_config->dnode, xpath);
1737
1738 while (dnode) {
1739 struct nb_config_entry *config, s;
1740
1741 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1742 config = hash_lookup(running_config_entries, &s);
1743 if (config)
1744 return config->entry;
1745
1746 dnode = dnode->parent;
1747 }
1748
1749 if (!abort_if_not_found)
1750 return NULL;
1751
1752 yang_dnode_get_path(orig_dnode, xpath_buf, sizeof(xpath_buf));
1753 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
1754 "%s: failed to find entry [xpath %s]", __func__, xpath_buf);
1755 zlog_backtrace(LOG_ERR);
1756 abort();
1757 }
1758
1759 /* Logging functions. */
1760 const char *nb_event_name(enum nb_event event)
1761 {
1762 switch (event) {
1763 case NB_EV_VALIDATE:
1764 return "validate";
1765 case NB_EV_PREPARE:
1766 return "prepare";
1767 case NB_EV_ABORT:
1768 return "abort";
1769 case NB_EV_APPLY:
1770 return "apply";
1771 default:
1772 return "unknown";
1773 }
1774 }
1775
1776 const char *nb_operation_name(enum nb_operation operation)
1777 {
1778 switch (operation) {
1779 case NB_OP_CREATE:
1780 return "create";
1781 case NB_OP_MODIFY:
1782 return "modify";
1783 case NB_OP_DESTROY:
1784 return "destroy";
1785 case NB_OP_MOVE:
1786 return "move";
1787 case NB_OP_PRE_VALIDATE:
1788 return "pre_validate";
1789 case NB_OP_APPLY_FINISH:
1790 return "apply_finish";
1791 case NB_OP_GET_ELEM:
1792 return "get_elem";
1793 case NB_OP_GET_NEXT:
1794 return "get_next";
1795 case NB_OP_GET_KEYS:
1796 return "get_keys";
1797 case NB_OP_LOOKUP_ENTRY:
1798 return "lookup_entry";
1799 case NB_OP_RPC:
1800 return "rpc";
1801 default:
1802 return "unknown";
1803 }
1804 }
1805
1806 const char *nb_err_name(enum nb_error error)
1807 {
1808 switch (error) {
1809 case NB_OK:
1810 return "ok";
1811 case NB_ERR:
1812 return "generic error";
1813 case NB_ERR_NO_CHANGES:
1814 return "no changes";
1815 case NB_ERR_NOT_FOUND:
1816 return "element not found";
1817 case NB_ERR_LOCKED:
1818 return "resource is locked";
1819 case NB_ERR_VALIDATION:
1820 return "validation error";
1821 case NB_ERR_RESOURCE:
1822 return "failed to allocate resource";
1823 case NB_ERR_INCONSISTENCY:
1824 return "internal inconsistency";
1825 default:
1826 return "unknown";
1827 }
1828 }
1829
1830 const char *nb_client_name(enum nb_client client)
1831 {
1832 switch (client) {
1833 case NB_CLIENT_CLI:
1834 return "CLI";
1835 case NB_CLIENT_CONFD:
1836 return "ConfD";
1837 case NB_CLIENT_SYSREPO:
1838 return "Sysrepo";
1839 case NB_CLIENT_GRPC:
1840 return "gRPC";
1841 default:
1842 return "unknown";
1843 }
1844 }
1845
1846 static void nb_load_callbacks(const struct frr_yang_module_info *module)
1847 {
1848 for (size_t i = 0; module->nodes[i].xpath; i++) {
1849 struct nb_node *nb_node;
1850 uint32_t priority;
1851
1852 nb_node = nb_node_find(module->nodes[i].xpath);
1853 if (!nb_node) {
1854 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1855 "%s: unknown data path: %s", __func__,
1856 module->nodes[i].xpath);
1857 continue;
1858 }
1859
1860 nb_node->cbs = module->nodes[i].cbs;
1861 priority = module->nodes[i].priority;
1862 if (priority != 0)
1863 nb_node->priority = priority;
1864 }
1865 }
1866
1867 void nb_init(struct thread_master *tm,
1868 const struct frr_yang_module_info *modules[], size_t nmodules)
1869 {
1870 unsigned int errors = 0;
1871
1872 /* Load YANG modules. */
1873 for (size_t i = 0; i < nmodules; i++)
1874 yang_module_load(modules[i]->name);
1875
1876 /* Create a nb_node for all YANG schema nodes. */
1877 nb_nodes_create();
1878
1879 /* Load northbound callbacks. */
1880 for (size_t i = 0; i < nmodules; i++)
1881 nb_load_callbacks(modules[i]);
1882
1883 /* Validate northbound callbacks. */
1884 yang_snodes_iterate_all(nb_node_validate, 0, &errors);
1885 if (errors > 0) {
1886 flog_err(
1887 EC_LIB_NB_CBS_VALIDATION,
1888 "%s: failed to validate northbound callbacks: %u error(s)",
1889 __func__, errors);
1890 exit(1);
1891 }
1892
1893 /* Create an empty running configuration. */
1894 running_config = nb_config_new(NULL);
1895 running_config_entries = hash_create(running_config_entry_key_make,
1896 running_config_entry_cmp,
1897 "Running Configuration Entries");
1898 pthread_mutex_init(&running_config_mgmt_lock.mtx, NULL);
1899
1900 /* Initialize the northbound CLI. */
1901 nb_cli_init(tm);
1902 }
1903
1904 void nb_terminate(void)
1905 {
1906 /* Terminate the northbound CLI. */
1907 nb_cli_terminate();
1908
1909 /* Delete all nb_node's from all YANG modules. */
1910 nb_nodes_delete();
1911
1912 /* Delete the running configuration. */
1913 hash_clean(running_config_entries, running_config_entry_free);
1914 hash_free(running_config_entries);
1915 nb_config_free(running_config);
1916 pthread_mutex_destroy(&running_config_mgmt_lock.mtx);
1917 }