]> git.proxmox.com Git - mirror_frr.git/blob - lib/northbound.c
Merge pull request #4609 from ton31337/fix/show_set_comm-list_delete
[mirror_frr.git] / lib / northbound.c
1 /*
2 * Copyright (C) 2018 NetDEF, Inc.
3 * Renato Westphal
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #include <zebra.h>
21
22 #include "libfrr.h"
23 #include "log.h"
24 #include "lib_errors.h"
25 #include "hash.h"
26 #include "command.h"
27 #include "debug.h"
28 #include "db.h"
29 #include "northbound.h"
30 #include "northbound_cli.h"
31 #include "northbound_db.h"
32
33 DEFINE_MTYPE_STATIC(LIB, NB_NODE, "Northbound Node")
34 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG, "Northbound Configuration")
35 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG_ENTRY, "Northbound Configuration Entry")
36
37 /* Running configuration - shouldn't be modified directly. */
38 struct nb_config *running_config;
39
40 /* Hash table of user pointers associated with configuration entries. */
41 static struct hash *running_config_entries;
42
43 /* Management lock for the running configuration. */
44 static struct {
45 /* Mutex protecting this structure. */
46 pthread_mutex_t mtx;
47
48 /* Actual lock. */
49 bool locked;
50
51 /* Northbound client who owns this lock. */
52 enum nb_client owner_client;
53
54 /* Northbound user who owns this lock. */
55 const void *owner_user;
56 } running_config_mgmt_lock;
57
58 /*
59 * Global lock used to prevent multiple configuration transactions from
60 * happening concurrently.
61 */
62 static bool transaction_in_progress;
63
64 static int nb_callback_configuration(const enum nb_event event,
65 struct nb_config_change *change);
66 static struct nb_transaction *nb_transaction_new(struct nb_config *config,
67 struct nb_config_cbs *changes,
68 enum nb_client client,
69 const void *user,
70 const char *comment);
71 static void nb_transaction_free(struct nb_transaction *transaction);
72 static int nb_transaction_process(enum nb_event event,
73 struct nb_transaction *transaction);
74 static void nb_transaction_apply_finish(struct nb_transaction *transaction);
75 static int nb_oper_data_iter_node(const struct lys_node *snode,
76 const char *xpath, const void *list_entry,
77 const struct yang_list_keys *list_keys,
78 struct yang_translator *translator,
79 bool first, uint32_t flags,
80 nb_oper_data_cb cb, void *arg);
81
82 static int nb_node_check_config_only(const struct lys_node *snode, void *arg)
83 {
84 bool *config_only = arg;
85
86 if (CHECK_FLAG(snode->flags, LYS_CONFIG_R)) {
87 *config_only = false;
88 return YANG_ITER_STOP;
89 }
90
91 return YANG_ITER_CONTINUE;
92 }
93
94 static int nb_node_new_cb(const struct lys_node *snode, void *arg)
95 {
96 struct nb_node *nb_node;
97 struct lys_node *sparent, *sparent_list;
98
99 nb_node = XCALLOC(MTYPE_NB_NODE, sizeof(*nb_node));
100 yang_snode_get_path(snode, YANG_PATH_DATA, nb_node->xpath,
101 sizeof(nb_node->xpath));
102 nb_node->priority = NB_DFLT_PRIORITY;
103 sparent = yang_snode_real_parent(snode);
104 if (sparent)
105 nb_node->parent = sparent->priv;
106 sparent_list = yang_snode_parent_list(snode);
107 if (sparent_list)
108 nb_node->parent_list = sparent_list->priv;
109
110 /* Set flags. */
111 if (CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
112 bool config_only = true;
113
114 yang_snodes_iterate_subtree(snode, nb_node_check_config_only,
115 YANG_ITER_ALLOW_AUGMENTATIONS,
116 &config_only);
117 if (config_only)
118 SET_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY);
119 }
120 if (CHECK_FLAG(snode->nodetype, LYS_LIST)) {
121 struct lys_node_list *slist;
122
123 slist = (struct lys_node_list *)snode;
124 if (slist->keys_size == 0)
125 SET_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST);
126 }
127
128 /*
129 * Link the northbound node and the libyang schema node with one
130 * another.
131 */
132 nb_node->snode = snode;
133 lys_set_private(snode, nb_node);
134
135 return YANG_ITER_CONTINUE;
136 }
137
138 static int nb_node_del_cb(const struct lys_node *snode, void *arg)
139 {
140 struct nb_node *nb_node;
141
142 nb_node = snode->priv;
143 lys_set_private(snode, NULL);
144 XFREE(MTYPE_NB_NODE, nb_node);
145
146 return YANG_ITER_CONTINUE;
147 }
148
149 void nb_nodes_create(void)
150 {
151 yang_snodes_iterate_all(nb_node_new_cb, 0, NULL);
152 }
153
154 void nb_nodes_delete(void)
155 {
156 yang_snodes_iterate_all(nb_node_del_cb, 0, NULL);
157 }
158
159 struct nb_node *nb_node_find(const char *xpath)
160 {
161 const struct lys_node *snode;
162
163 /*
164 * Use libyang to find the schema node associated to the xpath and get
165 * the northbound node from there (snode private pointer).
166 */
167 snode = ly_ctx_get_node(ly_native_ctx, NULL, xpath, 0);
168 if (!snode)
169 return NULL;
170
171 return snode->priv;
172 }
173
174 static int nb_node_validate_cb(const struct nb_node *nb_node,
175 enum nb_operation operation,
176 int callback_implemented, bool optional)
177 {
178 bool valid;
179
180 valid = nb_operation_is_valid(operation, nb_node->snode);
181
182 if (!valid && callback_implemented)
183 flog_warn(EC_LIB_NB_CB_UNNEEDED,
184 "unneeded '%s' callback for '%s'",
185 nb_operation_name(operation), nb_node->xpath);
186
187 if (!optional && valid && !callback_implemented) {
188 flog_err(EC_LIB_NB_CB_MISSING, "missing '%s' callback for '%s'",
189 nb_operation_name(operation), nb_node->xpath);
190 return 1;
191 }
192
193 return 0;
194 }
195
196 /*
197 * Check if the required callbacks were implemented for the given northbound
198 * node.
199 */
200 static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
201
202 {
203 unsigned int error = 0;
204
205 error += nb_node_validate_cb(nb_node, NB_OP_CREATE,
206 !!nb_node->cbs.create, false);
207 error += nb_node_validate_cb(nb_node, NB_OP_MODIFY,
208 !!nb_node->cbs.modify, false);
209 error += nb_node_validate_cb(nb_node, NB_OP_DESTROY,
210 !!nb_node->cbs.destroy, false);
211 error += nb_node_validate_cb(nb_node, NB_OP_MOVE, !!nb_node->cbs.move,
212 false);
213 error += nb_node_validate_cb(nb_node, NB_OP_APPLY_FINISH,
214 !!nb_node->cbs.apply_finish, true);
215 error += nb_node_validate_cb(nb_node, NB_OP_GET_ELEM,
216 !!nb_node->cbs.get_elem, false);
217 error += nb_node_validate_cb(nb_node, NB_OP_GET_NEXT,
218 !!nb_node->cbs.get_next, false);
219 error += nb_node_validate_cb(nb_node, NB_OP_GET_KEYS,
220 !!nb_node->cbs.get_keys, false);
221 error += nb_node_validate_cb(nb_node, NB_OP_LOOKUP_ENTRY,
222 !!nb_node->cbs.lookup_entry, false);
223 error += nb_node_validate_cb(nb_node, NB_OP_RPC, !!nb_node->cbs.rpc,
224 false);
225
226 return error;
227 }
228
229 static unsigned int nb_node_validate_priority(const struct nb_node *nb_node)
230 {
231 /* Top-level nodes can have any priority. */
232 if (!nb_node->parent)
233 return 0;
234
235 if (nb_node->priority < nb_node->parent->priority) {
236 flog_err(EC_LIB_NB_CB_INVALID_PRIO,
237 "node has higher priority than its parent [xpath %s]",
238 nb_node->xpath);
239 return 1;
240 }
241
242 return 0;
243 }
244
245 static int nb_node_validate(const struct lys_node *snode, void *arg)
246 {
247 struct nb_node *nb_node = snode->priv;
248 unsigned int *errors = arg;
249
250 /* Validate callbacks and priority. */
251 *errors += nb_node_validate_cbs(nb_node);
252 *errors += nb_node_validate_priority(nb_node);
253
254 return YANG_ITER_CONTINUE;
255 }
256
257 struct nb_config *nb_config_new(struct lyd_node *dnode)
258 {
259 struct nb_config *config;
260
261 config = XCALLOC(MTYPE_NB_CONFIG, sizeof(*config));
262 if (dnode)
263 config->dnode = dnode;
264 else
265 config->dnode = yang_dnode_new(ly_native_ctx, true);
266 config->version = 0;
267 pthread_rwlock_init(&config->lock, NULL);
268
269 return config;
270 }
271
272 void nb_config_free(struct nb_config *config)
273 {
274 if (config->dnode)
275 yang_dnode_free(config->dnode);
276 pthread_rwlock_destroy(&config->lock);
277 XFREE(MTYPE_NB_CONFIG, config);
278 }
279
280 struct nb_config *nb_config_dup(const struct nb_config *config)
281 {
282 struct nb_config *dup;
283
284 dup = XCALLOC(MTYPE_NB_CONFIG, sizeof(*dup));
285 dup->dnode = yang_dnode_dup(config->dnode);
286 dup->version = config->version;
287 pthread_rwlock_init(&dup->lock, NULL);
288
289 return dup;
290 }
291
292 int nb_config_merge(struct nb_config *config_dst, struct nb_config *config_src,
293 bool preserve_source)
294 {
295 int ret;
296
297 ret = lyd_merge(config_dst->dnode, config_src->dnode, LYD_OPT_EXPLICIT);
298 if (ret != 0)
299 flog_warn(EC_LIB_LIBYANG, "%s: lyd_merge() failed", __func__);
300
301 if (!preserve_source)
302 nb_config_free(config_src);
303
304 return (ret == 0) ? NB_OK : NB_ERR;
305 }
306
307 void nb_config_replace(struct nb_config *config_dst,
308 struct nb_config *config_src, bool preserve_source)
309 {
310 /* Update version. */
311 if (config_src->version != 0)
312 config_dst->version = config_src->version;
313
314 /* Update dnode. */
315 if (config_dst->dnode)
316 yang_dnode_free(config_dst->dnode);
317 if (preserve_source) {
318 config_dst->dnode = yang_dnode_dup(config_src->dnode);
319 } else {
320 config_dst->dnode = config_src->dnode;
321 config_src->dnode = NULL;
322 nb_config_free(config_src);
323 }
324 }
325
326 /* Generate the nb_config_cbs tree. */
327 static inline int nb_config_cb_compare(const struct nb_config_cb *a,
328 const struct nb_config_cb *b)
329 {
330 /* Sort by priority first. */
331 if (a->nb_node->priority < b->nb_node->priority)
332 return -1;
333 if (a->nb_node->priority > b->nb_node->priority)
334 return 1;
335
336 /*
337 * Use XPath as a tie-breaker. This will naturally sort parent nodes
338 * before their children.
339 */
340 return strcmp(a->xpath, b->xpath);
341 }
342 RB_GENERATE(nb_config_cbs, nb_config_cb, entry, nb_config_cb_compare);
343
344 static void nb_config_diff_add_change(struct nb_config_cbs *changes,
345 enum nb_operation operation,
346 const struct lyd_node *dnode)
347 {
348 struct nb_config_change *change;
349
350 change = XCALLOC(MTYPE_TMP, sizeof(*change));
351 change->cb.operation = operation;
352 change->cb.nb_node = dnode->schema->priv;
353 yang_dnode_get_path(dnode, change->cb.xpath, sizeof(change->cb.xpath));
354 change->cb.dnode = dnode;
355
356 RB_INSERT(nb_config_cbs, changes, &change->cb);
357 }
358
359 static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
360 {
361 while (!RB_EMPTY(nb_config_cbs, changes)) {
362 struct nb_config_change *change;
363
364 change = (struct nb_config_change *)RB_ROOT(nb_config_cbs,
365 changes);
366 RB_REMOVE(nb_config_cbs, changes, &change->cb);
367 XFREE(MTYPE_TMP, change);
368 }
369 }
370
371 /*
372 * Helper function used when calculating the delta between two different
373 * configurations. Given a new subtree, calculate all new YANG data nodes,
374 * excluding default leafs and leaf-lists. This is a recursive function.
375 */
376 static void nb_config_diff_created(const struct lyd_node *dnode,
377 struct nb_config_cbs *changes)
378 {
379 enum nb_operation operation;
380 struct lyd_node *child;
381
382 switch (dnode->schema->nodetype) {
383 case LYS_LEAF:
384 case LYS_LEAFLIST:
385 if (lyd_wd_default((struct lyd_node_leaf_list *)dnode))
386 break;
387
388 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
389 operation = NB_OP_CREATE;
390 else if (nb_operation_is_valid(NB_OP_MODIFY, dnode->schema))
391 operation = NB_OP_MODIFY;
392 else
393 return;
394
395 nb_config_diff_add_change(changes, operation, dnode);
396 break;
397 case LYS_CONTAINER:
398 case LYS_LIST:
399 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
400 nb_config_diff_add_change(changes, NB_OP_CREATE, dnode);
401
402 /* Process child nodes recursively. */
403 LY_TREE_FOR (dnode->child, child) {
404 nb_config_diff_created(child, changes);
405 }
406 break;
407 default:
408 break;
409 }
410 }
411
412 static void nb_config_diff_deleted(const struct lyd_node *dnode,
413 struct nb_config_cbs *changes)
414 {
415 if (nb_operation_is_valid(NB_OP_DESTROY, dnode->schema))
416 nb_config_diff_add_change(changes, NB_OP_DESTROY, dnode);
417 else if (CHECK_FLAG(dnode->schema->nodetype, LYS_CONTAINER)) {
418 struct lyd_node *child;
419
420 /*
421 * Non-presence containers need special handling since they
422 * don't have "destroy" callbacks. In this case, what we need to
423 * do is to call the "destroy" callbacks of their child nodes
424 * when applicable (i.e. optional nodes).
425 */
426 LY_TREE_FOR (dnode->child, child) {
427 nb_config_diff_deleted(child, changes);
428 }
429 }
430 }
431
432 /* Calculate the delta between two different configurations. */
433 static void nb_config_diff(const struct nb_config *config1,
434 const struct nb_config *config2,
435 struct nb_config_cbs *changes)
436 {
437 struct lyd_difflist *diff;
438
439 diff = lyd_diff(config1->dnode, config2->dnode,
440 LYD_DIFFOPT_WITHDEFAULTS);
441 assert(diff);
442
443 for (int i = 0; diff->type[i] != LYD_DIFF_END; i++) {
444 LYD_DIFFTYPE type;
445 struct lyd_node *dnode;
446
447 type = diff->type[i];
448
449 switch (type) {
450 case LYD_DIFF_CREATED:
451 dnode = diff->second[i];
452 nb_config_diff_created(dnode, changes);
453 break;
454 case LYD_DIFF_DELETED:
455 dnode = diff->first[i];
456 nb_config_diff_deleted(dnode, changes);
457 break;
458 case LYD_DIFF_CHANGED:
459 dnode = diff->second[i];
460 nb_config_diff_add_change(changes, NB_OP_MODIFY, dnode);
461 break;
462 case LYD_DIFF_MOVEDAFTER1:
463 case LYD_DIFF_MOVEDAFTER2:
464 default:
465 continue;
466 }
467 }
468
469 lyd_free_diff(diff);
470 }
471
472 int nb_candidate_edit(struct nb_config *candidate,
473 const struct nb_node *nb_node,
474 enum nb_operation operation, const char *xpath,
475 const struct yang_data *previous,
476 const struct yang_data *data)
477 {
478 struct lyd_node *dnode;
479 char xpath_edit[XPATH_MAXLEN];
480
481 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
482 if (nb_node->snode->nodetype == LYS_LEAFLIST)
483 snprintf(xpath_edit, sizeof(xpath_edit), "%s[.='%s']", xpath,
484 data->value);
485 else
486 strlcpy(xpath_edit, xpath, sizeof(xpath_edit));
487
488 switch (operation) {
489 case NB_OP_CREATE:
490 case NB_OP_MODIFY:
491 ly_errno = 0;
492 dnode = lyd_new_path(candidate->dnode, ly_native_ctx,
493 xpath_edit, (void *)data->value, 0,
494 LYD_PATH_OPT_UPDATE);
495 if (!dnode && ly_errno) {
496 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
497 __func__);
498 return NB_ERR;
499 }
500
501 /*
502 * If a new node was created, call lyd_validate() only to create
503 * default child nodes.
504 */
505 if (dnode) {
506 lyd_schema_sort(dnode, 0);
507 lyd_validate(&dnode,
508 LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
509 ly_native_ctx);
510 }
511 break;
512 case NB_OP_DESTROY:
513 dnode = yang_dnode_get(candidate->dnode, xpath_edit);
514 if (!dnode)
515 /*
516 * Return a special error code so the caller can choose
517 * whether to ignore it or not.
518 */
519 return NB_ERR_NOT_FOUND;
520 lyd_free(dnode);
521 break;
522 case NB_OP_MOVE:
523 /* TODO: update configuration. */
524 break;
525 default:
526 flog_warn(EC_LIB_DEVELOPMENT,
527 "%s: unknown operation (%u) [xpath %s]", __func__,
528 operation, xpath_edit);
529 return NB_ERR;
530 }
531
532 return NB_OK;
533 }
534
535 bool nb_candidate_needs_update(const struct nb_config *candidate)
536 {
537 bool ret = false;
538
539 pthread_rwlock_rdlock(&running_config->lock);
540 {
541 if (candidate->version < running_config->version)
542 ret = true;
543 }
544 pthread_rwlock_unlock(&running_config->lock);
545
546 return ret;
547 }
548
549 int nb_candidate_update(struct nb_config *candidate)
550 {
551 struct nb_config *updated_config;
552
553 pthread_rwlock_rdlock(&running_config->lock);
554 {
555 updated_config = nb_config_dup(running_config);
556 }
557 pthread_rwlock_unlock(&running_config->lock);
558
559 if (nb_config_merge(updated_config, candidate, true) != NB_OK)
560 return NB_ERR;
561
562 nb_config_replace(candidate, updated_config, false);
563
564 return NB_OK;
565 }
566
567 /*
568 * Perform YANG syntactic and semantic validation.
569 *
570 * WARNING: lyd_validate() can change the configuration as part of the
571 * validation process.
572 */
573 static int nb_candidate_validate_yang(struct nb_config *candidate)
574 {
575 if (lyd_validate(&candidate->dnode,
576 LYD_OPT_STRICT | LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
577 ly_native_ctx)
578 != 0)
579 return NB_ERR_VALIDATION;
580
581 return NB_OK;
582 }
583
584 /* Perform code-level validation using the northbound callbacks. */
585 static int nb_candidate_validate_changes(struct nb_config *candidate,
586 struct nb_config_cbs *changes)
587 {
588 struct nb_config_cb *cb;
589
590 RB_FOREACH (cb, nb_config_cbs, changes) {
591 struct nb_config_change *change = (struct nb_config_change *)cb;
592 int ret;
593
594 ret = nb_callback_configuration(NB_EV_VALIDATE, change);
595 if (ret != NB_OK)
596 return NB_ERR_VALIDATION;
597 }
598
599 return NB_OK;
600 }
601
602 int nb_candidate_validate(struct nb_config *candidate)
603 {
604 struct nb_config_cbs changes;
605 int ret;
606
607 if (nb_candidate_validate_yang(candidate) != NB_OK)
608 return NB_ERR_VALIDATION;
609
610 RB_INIT(nb_config_cbs, &changes);
611 pthread_rwlock_rdlock(&running_config->lock);
612 {
613 nb_config_diff(running_config, candidate, &changes);
614 ret = nb_candidate_validate_changes(candidate, &changes);
615 nb_config_diff_del_changes(&changes);
616 }
617 pthread_rwlock_unlock(&running_config->lock);
618
619 return ret;
620 }
621
622 int nb_candidate_commit_prepare(struct nb_config *candidate,
623 enum nb_client client, const void *user,
624 const char *comment,
625 struct nb_transaction **transaction)
626 {
627 struct nb_config_cbs changes;
628
629 if (nb_candidate_validate_yang(candidate) != NB_OK) {
630 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
631 "%s: failed to validate candidate configuration",
632 __func__);
633 return NB_ERR_VALIDATION;
634 }
635
636 RB_INIT(nb_config_cbs, &changes);
637 pthread_rwlock_rdlock(&running_config->lock);
638 {
639 nb_config_diff(running_config, candidate, &changes);
640 if (RB_EMPTY(nb_config_cbs, &changes)) {
641 pthread_rwlock_unlock(&running_config->lock);
642 return NB_ERR_NO_CHANGES;
643 }
644
645 if (nb_candidate_validate_changes(candidate, &changes)
646 != NB_OK) {
647 flog_warn(
648 EC_LIB_NB_CANDIDATE_INVALID,
649 "%s: failed to validate candidate configuration",
650 __func__);
651 nb_config_diff_del_changes(&changes);
652 pthread_rwlock_unlock(&running_config->lock);
653 return NB_ERR_VALIDATION;
654 }
655
656 *transaction = nb_transaction_new(candidate, &changes, client,
657 user, comment);
658 if (*transaction == NULL) {
659 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
660 "%s: failed to create transaction", __func__);
661 nb_config_diff_del_changes(&changes);
662 pthread_rwlock_unlock(&running_config->lock);
663 return NB_ERR_LOCKED;
664 }
665 }
666 pthread_rwlock_unlock(&running_config->lock);
667
668 return nb_transaction_process(NB_EV_PREPARE, *transaction);
669 }
670
671 void nb_candidate_commit_abort(struct nb_transaction *transaction)
672 {
673 (void)nb_transaction_process(NB_EV_ABORT, transaction);
674 nb_transaction_free(transaction);
675 }
676
677 void nb_candidate_commit_apply(struct nb_transaction *transaction,
678 bool save_transaction, uint32_t *transaction_id)
679 {
680 (void)nb_transaction_process(NB_EV_APPLY, transaction);
681 nb_transaction_apply_finish(transaction);
682
683 /* Replace running by candidate. */
684 transaction->config->version++;
685 pthread_rwlock_wrlock(&running_config->lock);
686 {
687 nb_config_replace(running_config, transaction->config, true);
688 }
689 pthread_rwlock_unlock(&running_config->lock);
690
691 /* Record transaction. */
692 if (save_transaction
693 && nb_db_transaction_save(transaction, transaction_id) != NB_OK)
694 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED,
695 "%s: failed to record transaction", __func__);
696
697 nb_transaction_free(transaction);
698 }
699
700 int nb_candidate_commit(struct nb_config *candidate, enum nb_client client,
701 const void *user, bool save_transaction,
702 const char *comment, uint32_t *transaction_id)
703 {
704 struct nb_transaction *transaction = NULL;
705 int ret;
706
707 ret = nb_candidate_commit_prepare(candidate, client, user, comment,
708 &transaction);
709 /*
710 * Apply the changes if the preparation phase succeeded. Otherwise abort
711 * the transaction.
712 */
713 if (ret == NB_OK)
714 nb_candidate_commit_apply(transaction, save_transaction,
715 transaction_id);
716 else if (transaction != NULL)
717 nb_candidate_commit_abort(transaction);
718
719 return ret;
720 }
721
722 int nb_running_lock(enum nb_client client, const void *user)
723 {
724 int ret = -1;
725
726 pthread_mutex_lock(&running_config_mgmt_lock.mtx);
727 {
728 if (!running_config_mgmt_lock.locked) {
729 running_config_mgmt_lock.locked = true;
730 running_config_mgmt_lock.owner_client = client;
731 running_config_mgmt_lock.owner_user = user;
732 ret = 0;
733 }
734 }
735 pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
736
737 return ret;
738 }
739
740 int nb_running_unlock(enum nb_client client, const void *user)
741 {
742 int ret = -1;
743
744 pthread_mutex_lock(&running_config_mgmt_lock.mtx);
745 {
746 if (running_config_mgmt_lock.locked
747 && running_config_mgmt_lock.owner_client == client
748 && running_config_mgmt_lock.owner_user == user) {
749 running_config_mgmt_lock.locked = false;
750 running_config_mgmt_lock.owner_client = NB_CLIENT_NONE;
751 running_config_mgmt_lock.owner_user = NULL;
752 ret = 0;
753 }
754 }
755 pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
756
757 return ret;
758 }
759
760 int nb_running_lock_check(enum nb_client client, const void *user)
761 {
762 int ret = -1;
763
764 pthread_mutex_lock(&running_config_mgmt_lock.mtx);
765 {
766 if (!running_config_mgmt_lock.locked
767 || (running_config_mgmt_lock.owner_client == client
768 && running_config_mgmt_lock.owner_user == user))
769 ret = 0;
770 }
771 pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
772
773 return ret;
774 }
775
776 static void nb_log_callback(const enum nb_event event,
777 enum nb_operation operation, const char *xpath,
778 const char *value)
779 {
780 zlog_debug(
781 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
782 nb_event_name(event), nb_operation_name(operation), xpath,
783 value ? value : "(NULL)");
784 }
785
786 /*
787 * Call the northbound configuration callback associated to a given
788 * configuration change.
789 */
790 static int nb_callback_configuration(const enum nb_event event,
791 struct nb_config_change *change)
792 {
793 enum nb_operation operation = change->cb.operation;
794 const char *xpath = change->cb.xpath;
795 const struct nb_node *nb_node = change->cb.nb_node;
796 const struct lyd_node *dnode = change->cb.dnode;
797 union nb_resource *resource;
798 int ret = NB_ERR;
799
800 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
801 const char *value = "(none)";
802
803 if (dnode && !yang_snode_is_typeless_data(dnode->schema))
804 value = yang_dnode_get_string(dnode, NULL);
805
806 nb_log_callback(event, operation, xpath, value);
807 }
808
809 if (event == NB_EV_VALIDATE)
810 resource = NULL;
811 else
812 resource = &change->resource;
813
814 switch (operation) {
815 case NB_OP_CREATE:
816 ret = (*nb_node->cbs.create)(event, dnode, resource);
817 break;
818 case NB_OP_MODIFY:
819 ret = (*nb_node->cbs.modify)(event, dnode, resource);
820 break;
821 case NB_OP_DESTROY:
822 ret = (*nb_node->cbs.destroy)(event, dnode);
823 break;
824 case NB_OP_MOVE:
825 ret = (*nb_node->cbs.move)(event, dnode);
826 break;
827 default:
828 flog_err(EC_LIB_DEVELOPMENT,
829 "%s: unknown operation (%u) [xpath %s]", __func__,
830 operation, xpath);
831 exit(1);
832 }
833
834 if (ret != NB_OK) {
835 int priority;
836 enum lib_log_refs ref;
837
838 switch (event) {
839 case NB_EV_VALIDATE:
840 priority = LOG_WARNING;
841 ref = EC_LIB_NB_CB_CONFIG_VALIDATE;
842 break;
843 case NB_EV_PREPARE:
844 priority = LOG_WARNING;
845 ref = EC_LIB_NB_CB_CONFIG_PREPARE;
846 break;
847 case NB_EV_ABORT:
848 priority = LOG_WARNING;
849 ref = EC_LIB_NB_CB_CONFIG_ABORT;
850 break;
851 case NB_EV_APPLY:
852 priority = LOG_ERR;
853 ref = EC_LIB_NB_CB_CONFIG_APPLY;
854 break;
855 default:
856 flog_err(EC_LIB_DEVELOPMENT,
857 "%s: unknown event (%u) [xpath %s]",
858 __func__, event, xpath);
859 exit(1);
860 }
861
862 flog(priority, ref,
863 "%s: error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
864 __func__, nb_err_name(ret), nb_event_name(event),
865 nb_operation_name(operation), xpath);
866 }
867
868 return ret;
869 }
870
871 struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node,
872 const char *xpath,
873 const void *list_entry)
874 {
875 DEBUGD(&nb_dbg_cbs_state,
876 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
877 xpath, list_entry);
878
879 return nb_node->cbs.get_elem(xpath, list_entry);
880 }
881
882 const void *nb_callback_get_next(const struct nb_node *nb_node,
883 const void *parent_list_entry,
884 const void *list_entry)
885 {
886 DEBUGD(&nb_dbg_cbs_state,
887 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
888 nb_node->xpath, parent_list_entry, list_entry);
889
890 return nb_node->cbs.get_next(parent_list_entry, list_entry);
891 }
892
893 int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
894 struct yang_list_keys *keys)
895 {
896 DEBUGD(&nb_dbg_cbs_state,
897 "northbound callback (get_keys): node [%s] list_entry [%p]",
898 nb_node->xpath, list_entry);
899
900 return nb_node->cbs.get_keys(list_entry, keys);
901 }
902
903 const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
904 const void *parent_list_entry,
905 const struct yang_list_keys *keys)
906 {
907 DEBUGD(&nb_dbg_cbs_state,
908 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
909 nb_node->xpath, parent_list_entry);
910
911 return nb_node->cbs.lookup_entry(parent_list_entry, keys);
912 }
913
914 int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
915 const struct list *input, struct list *output)
916 {
917 DEBUGD(&nb_dbg_cbs_rpc, "northbound RPC: %s", xpath);
918
919 return nb_node->cbs.rpc(xpath, input, output);
920 }
921
922 static struct nb_transaction *
923 nb_transaction_new(struct nb_config *config, struct nb_config_cbs *changes,
924 enum nb_client client, const void *user, const char *comment)
925 {
926 struct nb_transaction *transaction;
927
928 if (nb_running_lock_check(client, user)) {
929 flog_warn(
930 EC_LIB_NB_TRANSACTION_CREATION_FAILED,
931 "%s: running configuration is locked by another client",
932 __func__);
933 return NULL;
934 }
935
936 if (transaction_in_progress) {
937 flog_warn(
938 EC_LIB_NB_TRANSACTION_CREATION_FAILED,
939 "%s: error - there's already another transaction in progress",
940 __func__);
941 return NULL;
942 }
943 transaction_in_progress = true;
944
945 transaction = XCALLOC(MTYPE_TMP, sizeof(*transaction));
946 transaction->client = client;
947 if (comment)
948 strlcpy(transaction->comment, comment,
949 sizeof(transaction->comment));
950 transaction->config = config;
951 transaction->changes = *changes;
952
953 return transaction;
954 }
955
956 static void nb_transaction_free(struct nb_transaction *transaction)
957 {
958 nb_config_diff_del_changes(&transaction->changes);
959 XFREE(MTYPE_TMP, transaction);
960 transaction_in_progress = false;
961 }
962
963 /* Process all configuration changes associated to a transaction. */
964 static int nb_transaction_process(enum nb_event event,
965 struct nb_transaction *transaction)
966 {
967 struct nb_config_cb *cb;
968
969 /*
970 * Need to lock the running configuration since transaction->changes
971 * can contain pointers to data nodes from the running configuration.
972 */
973 pthread_rwlock_rdlock(&running_config->lock);
974 {
975 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
976 struct nb_config_change *change =
977 (struct nb_config_change *)cb;
978 int ret;
979
980 /*
981 * Only try to release resources that were allocated
982 * successfully.
983 */
984 if (event == NB_EV_ABORT && change->prepare_ok == false)
985 break;
986
987 /* Call the appropriate callback. */
988 ret = nb_callback_configuration(event, change);
989 switch (event) {
990 case NB_EV_PREPARE:
991 if (ret != NB_OK) {
992 pthread_rwlock_unlock(
993 &running_config->lock);
994 return ret;
995 }
996 change->prepare_ok = true;
997 break;
998 case NB_EV_ABORT:
999 case NB_EV_APPLY:
1000 /*
1001 * At this point it's not possible to reject the
1002 * transaction anymore, so any failure here can
1003 * lead to inconsistencies and should be treated
1004 * as a bug. Operations prone to errors, like
1005 * validations and resource allocations, should
1006 * be performed during the 'prepare' phase.
1007 */
1008 break;
1009 default:
1010 break;
1011 }
1012 }
1013 }
1014 pthread_rwlock_unlock(&running_config->lock);
1015
1016 return NB_OK;
1017 }
1018
1019 static struct nb_config_cb *
1020 nb_apply_finish_cb_new(struct nb_config_cbs *cbs, const char *xpath,
1021 const struct nb_node *nb_node,
1022 const struct lyd_node *dnode)
1023 {
1024 struct nb_config_cb *cb;
1025
1026 cb = XCALLOC(MTYPE_TMP, sizeof(*cb));
1027 strlcpy(cb->xpath, xpath, sizeof(cb->xpath));
1028 cb->nb_node = nb_node;
1029 cb->dnode = dnode;
1030 RB_INSERT(nb_config_cbs, cbs, cb);
1031
1032 return cb;
1033 }
1034
1035 static struct nb_config_cb *
1036 nb_apply_finish_cb_find(struct nb_config_cbs *cbs, const char *xpath,
1037 const struct nb_node *nb_node)
1038 {
1039 struct nb_config_cb s;
1040
1041 strlcpy(s.xpath, xpath, sizeof(s.xpath));
1042 s.nb_node = nb_node;
1043 return RB_FIND(nb_config_cbs, cbs, &s);
1044 }
1045
1046 /* Call the 'apply_finish' callbacks. */
1047 static void nb_transaction_apply_finish(struct nb_transaction *transaction)
1048 {
1049 struct nb_config_cbs cbs;
1050 struct nb_config_cb *cb;
1051
1052 /* Initialize tree of 'apply_finish' callbacks. */
1053 RB_INIT(nb_config_cbs, &cbs);
1054
1055 /* Identify the 'apply_finish' callbacks that need to be called. */
1056 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1057 struct nb_config_change *change = (struct nb_config_change *)cb;
1058 const struct lyd_node *dnode = change->cb.dnode;
1059
1060 /*
1061 * Iterate up to the root of the data tree. When a node is being
1062 * deleted, skip its 'apply_finish' callback if one is defined
1063 * (the 'apply_finish' callbacks from the node ancestors should
1064 * be called though).
1065 */
1066 if (change->cb.operation == NB_OP_DESTROY) {
1067 char xpath[XPATH_MAXLEN];
1068
1069 dnode = dnode->parent;
1070 if (!dnode)
1071 break;
1072
1073 /*
1074 * The dnode from 'delete' callbacks point to elements
1075 * from the running configuration. Use yang_dnode_get()
1076 * to get the corresponding dnode from the candidate
1077 * configuration that is being committed.
1078 */
1079 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1080 dnode = yang_dnode_get(transaction->config->dnode,
1081 xpath);
1082 }
1083 while (dnode) {
1084 char xpath[XPATH_MAXLEN];
1085 struct nb_node *nb_node;
1086
1087 nb_node = dnode->schema->priv;
1088 if (!nb_node->cbs.apply_finish)
1089 goto next;
1090
1091 /*
1092 * Don't call the callback more than once for the same
1093 * data node.
1094 */
1095 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1096 if (nb_apply_finish_cb_find(&cbs, xpath, nb_node))
1097 goto next;
1098
1099 nb_apply_finish_cb_new(&cbs, xpath, nb_node, dnode);
1100
1101 next:
1102 dnode = dnode->parent;
1103 }
1104 }
1105
1106 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1107 RB_FOREACH (cb, nb_config_cbs, &cbs) {
1108 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL))
1109 nb_log_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH,
1110 cb->xpath, NULL);
1111
1112 (*cb->nb_node->cbs.apply_finish)(cb->dnode);
1113 }
1114
1115 /* Release memory. */
1116 while (!RB_EMPTY(nb_config_cbs, &cbs)) {
1117 cb = RB_ROOT(nb_config_cbs, &cbs);
1118 RB_REMOVE(nb_config_cbs, &cbs, cb);
1119 XFREE(MTYPE_TMP, cb);
1120 }
1121 }
1122
1123 static int nb_oper_data_iter_children(const struct lys_node *snode,
1124 const char *xpath, const void *list_entry,
1125 const struct yang_list_keys *list_keys,
1126 struct yang_translator *translator,
1127 bool first, uint32_t flags,
1128 nb_oper_data_cb cb, void *arg)
1129 {
1130 struct lys_node *child;
1131
1132 LY_TREE_FOR (snode->child, child) {
1133 int ret;
1134
1135 ret = nb_oper_data_iter_node(child, xpath, list_entry,
1136 list_keys, translator, false,
1137 flags, cb, arg);
1138 if (ret != NB_OK)
1139 return ret;
1140 }
1141
1142 return NB_OK;
1143 }
1144
1145 static int nb_oper_data_iter_leaf(const struct nb_node *nb_node,
1146 const char *xpath, const void *list_entry,
1147 const struct yang_list_keys *list_keys,
1148 struct yang_translator *translator,
1149 uint32_t flags, nb_oper_data_cb cb, void *arg)
1150 {
1151 struct yang_data *data;
1152
1153 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1154 return NB_OK;
1155
1156 /* Ignore list keys. */
1157 if (lys_is_key((struct lys_node_leaf *)nb_node->snode, NULL))
1158 return NB_OK;
1159
1160 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1161 if (data == NULL)
1162 /* Leaf of type "empty" is not present. */
1163 return NB_OK;
1164
1165 return (*cb)(nb_node->snode, translator, data, arg);
1166 }
1167
1168 static int nb_oper_data_iter_container(const struct nb_node *nb_node,
1169 const char *xpath,
1170 const void *list_entry,
1171 const struct yang_list_keys *list_keys,
1172 struct yang_translator *translator,
1173 uint32_t flags, nb_oper_data_cb cb,
1174 void *arg)
1175 {
1176 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1177 return NB_OK;
1178
1179 /* Presence containers. */
1180 if (nb_node->cbs.get_elem) {
1181 struct yang_data *data;
1182 int ret;
1183
1184 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1185 if (data == NULL)
1186 /* Presence container is not present. */
1187 return NB_OK;
1188
1189 ret = (*cb)(nb_node->snode, translator, data, arg);
1190 if (ret != NB_OK)
1191 return ret;
1192 }
1193
1194 /* Iterate over the child nodes. */
1195 return nb_oper_data_iter_children(nb_node->snode, xpath, list_entry,
1196 list_keys, translator, false, flags,
1197 cb, arg);
1198 }
1199
1200 static int
1201 nb_oper_data_iter_leaflist(const struct nb_node *nb_node, const char *xpath,
1202 const void *parent_list_entry,
1203 const struct yang_list_keys *parent_list_keys,
1204 struct yang_translator *translator, uint32_t flags,
1205 nb_oper_data_cb cb, void *arg)
1206 {
1207 const void *list_entry = NULL;
1208
1209 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1210 return NB_OK;
1211
1212 do {
1213 struct yang_data *data;
1214 int ret;
1215
1216 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1217 list_entry);
1218 if (!list_entry)
1219 /* End of the list. */
1220 break;
1221
1222 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1223 if (data == NULL)
1224 continue;
1225
1226 ret = (*cb)(nb_node->snode, translator, data, arg);
1227 if (ret != NB_OK)
1228 return ret;
1229 } while (list_entry);
1230
1231 return NB_OK;
1232 }
1233
1234 static int nb_oper_data_iter_list(const struct nb_node *nb_node,
1235 const char *xpath_list,
1236 const void *parent_list_entry,
1237 const struct yang_list_keys *parent_list_keys,
1238 struct yang_translator *translator,
1239 uint32_t flags, nb_oper_data_cb cb, void *arg)
1240 {
1241 struct lys_node_list *slist = (struct lys_node_list *)nb_node->snode;
1242 const void *list_entry = NULL;
1243 uint32_t position = 1;
1244
1245 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1246 return NB_OK;
1247
1248 /* Iterate over all list entries. */
1249 do {
1250 struct yang_list_keys list_keys;
1251 char xpath[XPATH_MAXLEN * 2];
1252 int ret;
1253
1254 /* Obtain list entry. */
1255 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1256 list_entry);
1257 if (!list_entry)
1258 /* End of the list. */
1259 break;
1260
1261 if (!CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST)) {
1262 /* Obtain the list entry keys. */
1263 if (nb_callback_get_keys(nb_node, list_entry,
1264 &list_keys)
1265 != NB_OK) {
1266 flog_warn(EC_LIB_NB_CB_STATE,
1267 "%s: failed to get list keys",
1268 __func__);
1269 return NB_ERR;
1270 }
1271
1272 /* Build XPath of the list entry. */
1273 strlcpy(xpath, xpath_list, sizeof(xpath));
1274 for (unsigned int i = 0; i < list_keys.num; i++) {
1275 snprintf(xpath + strlen(xpath),
1276 sizeof(xpath) - strlen(xpath),
1277 "[%s='%s']", slist->keys[i]->name,
1278 list_keys.key[i]);
1279 }
1280 } else {
1281 /*
1282 * Keyless list - build XPath using a positional index.
1283 */
1284 snprintf(xpath, sizeof(xpath), "%s[%u]", xpath_list,
1285 position);
1286 position++;
1287 }
1288
1289 /* Iterate over the child nodes. */
1290 ret = nb_oper_data_iter_children(
1291 nb_node->snode, xpath, list_entry, &list_keys,
1292 translator, false, flags, cb, arg);
1293 if (ret != NB_OK)
1294 return ret;
1295 } while (list_entry);
1296
1297 return NB_OK;
1298 }
1299
1300 static int nb_oper_data_iter_node(const struct lys_node *snode,
1301 const char *xpath_parent,
1302 const void *list_entry,
1303 const struct yang_list_keys *list_keys,
1304 struct yang_translator *translator,
1305 bool first, uint32_t flags,
1306 nb_oper_data_cb cb, void *arg)
1307 {
1308 struct nb_node *nb_node;
1309 char xpath[XPATH_MAXLEN];
1310 int ret = NB_OK;
1311
1312 if (!first && CHECK_FLAG(flags, NB_OPER_DATA_ITER_NORECURSE)
1313 && CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST))
1314 return NB_OK;
1315
1316 /* Update XPath. */
1317 strlcpy(xpath, xpath_parent, sizeof(xpath));
1318 if (!first && snode->nodetype != LYS_USES)
1319 snprintf(xpath + strlen(xpath), sizeof(xpath) - strlen(xpath),
1320 "/%s", snode->name);
1321
1322 nb_node = snode->priv;
1323 switch (snode->nodetype) {
1324 case LYS_CONTAINER:
1325 ret = nb_oper_data_iter_container(nb_node, xpath, list_entry,
1326 list_keys, translator, flags,
1327 cb, arg);
1328 break;
1329 case LYS_LEAF:
1330 ret = nb_oper_data_iter_leaf(nb_node, xpath, list_entry,
1331 list_keys, translator, flags, cb,
1332 arg);
1333 break;
1334 case LYS_LEAFLIST:
1335 ret = nb_oper_data_iter_leaflist(nb_node, xpath, list_entry,
1336 list_keys, translator, flags,
1337 cb, arg);
1338 break;
1339 case LYS_LIST:
1340 ret = nb_oper_data_iter_list(nb_node, xpath, list_entry,
1341 list_keys, translator, flags, cb,
1342 arg);
1343 break;
1344 case LYS_USES:
1345 ret = nb_oper_data_iter_children(snode, xpath, list_entry,
1346 list_keys, translator, false,
1347 flags, cb, arg);
1348 break;
1349 default:
1350 break;
1351 }
1352
1353 return ret;
1354 }
1355
1356 int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
1357 uint32_t flags, nb_oper_data_cb cb, void *arg)
1358 {
1359 struct nb_node *nb_node;
1360 const void *list_entry = NULL;
1361 struct yang_list_keys list_keys;
1362 struct list *list_dnodes;
1363 struct lyd_node *dnode, *dn;
1364 struct listnode *ln;
1365 int ret;
1366
1367 nb_node = nb_node_find(xpath);
1368 if (!nb_node) {
1369 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1370 "%s: unknown data path: %s", __func__, xpath);
1371 return NB_ERR;
1372 }
1373
1374 /* For now this function works only with containers and lists. */
1375 if (!CHECK_FLAG(nb_node->snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
1376 flog_warn(
1377 EC_LIB_NB_OPERATIONAL_DATA,
1378 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1379 __func__, xpath);
1380 return NB_ERR;
1381 }
1382
1383 /*
1384 * Create a data tree from the XPath so that we can parse the keys of
1385 * all YANG lists (if any).
1386 */
1387 ly_errno = 0;
1388 dnode = lyd_new_path(NULL, ly_native_ctx, xpath, NULL, 0,
1389 LYD_PATH_OPT_UPDATE | LYD_PATH_OPT_NOPARENTRET);
1390 if (!dnode) {
1391 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
1392 __func__);
1393 return NB_ERR;
1394 }
1395
1396 /*
1397 * Create a linked list to sort the data nodes starting from the root.
1398 */
1399 list_dnodes = list_new();
1400 for (dn = dnode; dn; dn = dn->parent) {
1401 if (dn->schema->nodetype != LYS_LIST || !dn->child)
1402 continue;
1403 listnode_add_head(list_dnodes, dn);
1404 }
1405 /*
1406 * Use the northbound callbacks to find list entry pointer corresponding
1407 * to the given XPath.
1408 */
1409 for (ALL_LIST_ELEMENTS_RO(list_dnodes, ln, dn)) {
1410 struct lyd_node *child;
1411 struct nb_node *nn;
1412 unsigned int n = 0;
1413
1414 /* Obtain the list entry keys. */
1415 memset(&list_keys, 0, sizeof(list_keys));
1416 LY_TREE_FOR (dn->child, child) {
1417 if (!lys_is_key((struct lys_node_leaf *)child->schema,
1418 NULL))
1419 continue;
1420 strlcpy(list_keys.key[n],
1421 yang_dnode_get_string(child, NULL),
1422 sizeof(list_keys.key[n]));
1423 n++;
1424 }
1425 list_keys.num = n;
1426 if (list_keys.num
1427 != ((struct lys_node_list *)dn->schema)->keys_size) {
1428 list_delete(&list_dnodes);
1429 yang_dnode_free(dnode);
1430 return NB_ERR_NOT_FOUND;
1431 }
1432
1433 /* Find the list entry pointer. */
1434 nn = dn->schema->priv;
1435 list_entry =
1436 nb_callback_lookup_entry(nn, list_entry, &list_keys);
1437 if (list_entry == NULL) {
1438 list_delete(&list_dnodes);
1439 yang_dnode_free(dnode);
1440 return NB_ERR_NOT_FOUND;
1441 }
1442 }
1443
1444 /* If a list entry was given, iterate over that list entry only. */
1445 if (dnode->schema->nodetype == LYS_LIST && dnode->child)
1446 ret = nb_oper_data_iter_children(
1447 nb_node->snode, xpath, list_entry, &list_keys,
1448 translator, true, flags, cb, arg);
1449 else
1450 ret = nb_oper_data_iter_node(nb_node->snode, xpath, list_entry,
1451 &list_keys, translator, true,
1452 flags, cb, arg);
1453
1454 list_delete(&list_dnodes);
1455 yang_dnode_free(dnode);
1456
1457 return ret;
1458 }
1459
1460 bool nb_operation_is_valid(enum nb_operation operation,
1461 const struct lys_node *snode)
1462 {
1463 struct nb_node *nb_node = snode->priv;
1464 struct lys_node_container *scontainer;
1465 struct lys_node_leaf *sleaf;
1466
1467 switch (operation) {
1468 case NB_OP_CREATE:
1469 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1470 return false;
1471
1472 switch (snode->nodetype) {
1473 case LYS_LEAF:
1474 sleaf = (struct lys_node_leaf *)snode;
1475 if (sleaf->type.base != LY_TYPE_EMPTY)
1476 return false;
1477 break;
1478 case LYS_CONTAINER:
1479 scontainer = (struct lys_node_container *)snode;
1480 if (!scontainer->presence)
1481 return false;
1482 break;
1483 case LYS_LIST:
1484 case LYS_LEAFLIST:
1485 break;
1486 default:
1487 return false;
1488 }
1489 return true;
1490 case NB_OP_MODIFY:
1491 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1492 return false;
1493
1494 switch (snode->nodetype) {
1495 case LYS_LEAF:
1496 sleaf = (struct lys_node_leaf *)snode;
1497 if (sleaf->type.base == LY_TYPE_EMPTY)
1498 return false;
1499
1500 /* List keys can't be modified. */
1501 if (lys_is_key(sleaf, NULL))
1502 return false;
1503 break;
1504 default:
1505 return false;
1506 }
1507 return true;
1508 case NB_OP_DESTROY:
1509 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1510 return false;
1511
1512 switch (snode->nodetype) {
1513 case LYS_LEAF:
1514 sleaf = (struct lys_node_leaf *)snode;
1515
1516 /* List keys can't be deleted. */
1517 if (lys_is_key(sleaf, NULL))
1518 return false;
1519
1520 /*
1521 * Only optional leafs can be deleted, or leafs whose
1522 * parent is a case statement.
1523 */
1524 if (snode->parent->nodetype == LYS_CASE)
1525 return true;
1526 if (sleaf->when)
1527 return true;
1528 if (CHECK_FLAG(sleaf->flags, LYS_MAND_TRUE)
1529 || sleaf->dflt)
1530 return false;
1531 break;
1532 case LYS_CONTAINER:
1533 scontainer = (struct lys_node_container *)snode;
1534 if (!scontainer->presence)
1535 return false;
1536 break;
1537 case LYS_LIST:
1538 case LYS_LEAFLIST:
1539 break;
1540 default:
1541 return false;
1542 }
1543 return true;
1544 case NB_OP_MOVE:
1545 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1546 return false;
1547
1548 switch (snode->nodetype) {
1549 case LYS_LIST:
1550 case LYS_LEAFLIST:
1551 if (!CHECK_FLAG(snode->flags, LYS_USERORDERED))
1552 return false;
1553 break;
1554 default:
1555 return false;
1556 }
1557 return true;
1558 case NB_OP_APPLY_FINISH:
1559 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1560 return false;
1561 return true;
1562 case NB_OP_GET_ELEM:
1563 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_R))
1564 return false;
1565
1566 switch (snode->nodetype) {
1567 case LYS_LEAF:
1568 case LYS_LEAFLIST:
1569 break;
1570 case LYS_CONTAINER:
1571 scontainer = (struct lys_node_container *)snode;
1572 if (!scontainer->presence)
1573 return false;
1574 break;
1575 default:
1576 return false;
1577 }
1578 return true;
1579 case NB_OP_GET_NEXT:
1580 switch (snode->nodetype) {
1581 case LYS_LIST:
1582 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1583 return false;
1584 break;
1585 case LYS_LEAFLIST:
1586 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1587 return false;
1588 break;
1589 default:
1590 return false;
1591 }
1592 return true;
1593 case NB_OP_GET_KEYS:
1594 case NB_OP_LOOKUP_ENTRY:
1595 switch (snode->nodetype) {
1596 case LYS_LIST:
1597 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1598 return false;
1599 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST))
1600 return false;
1601 break;
1602 default:
1603 return false;
1604 }
1605 return true;
1606 case NB_OP_RPC:
1607 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W | LYS_CONFIG_R))
1608 return false;
1609
1610 switch (snode->nodetype) {
1611 case LYS_RPC:
1612 case LYS_ACTION:
1613 break;
1614 default:
1615 return false;
1616 }
1617 return true;
1618 default:
1619 return false;
1620 }
1621 }
1622
1623 DEFINE_HOOK(nb_notification_send, (const char *xpath, struct list *arguments),
1624 (xpath, arguments));
1625
1626 int nb_notification_send(const char *xpath, struct list *arguments)
1627 {
1628 int ret;
1629
1630 DEBUGD(&nb_dbg_notif, "northbound notification: %s", xpath);
1631
1632 ret = hook_call(nb_notification_send, xpath, arguments);
1633 if (arguments)
1634 list_delete(&arguments);
1635
1636 return ret;
1637 }
1638
1639 /* Running configuration user pointers management. */
1640 struct nb_config_entry {
1641 char xpath[XPATH_MAXLEN];
1642 void *entry;
1643 };
1644
1645 static bool running_config_entry_cmp(const void *value1, const void *value2)
1646 {
1647 const struct nb_config_entry *c1 = value1;
1648 const struct nb_config_entry *c2 = value2;
1649
1650 return strmatch(c1->xpath, c2->xpath);
1651 }
1652
1653 static unsigned int running_config_entry_key_make(const void *value)
1654 {
1655 return string_hash_make(value);
1656 }
1657
1658 static void *running_config_entry_alloc(void *p)
1659 {
1660 struct nb_config_entry *new, *key = p;
1661
1662 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY, sizeof(*new));
1663 strlcpy(new->xpath, key->xpath, sizeof(new->xpath));
1664
1665 return new;
1666 }
1667
1668 static void running_config_entry_free(void *arg)
1669 {
1670 XFREE(MTYPE_NB_CONFIG_ENTRY, arg);
1671 }
1672
1673 void nb_running_set_entry(const struct lyd_node *dnode, void *entry)
1674 {
1675 struct nb_config_entry *config, s;
1676
1677 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1678 config = hash_get(running_config_entries, &s,
1679 running_config_entry_alloc);
1680 config->entry = entry;
1681 }
1682
1683 static void *nb_running_unset_entry_helper(const struct lyd_node *dnode)
1684 {
1685 struct nb_config_entry *config, s;
1686 struct lyd_node *child;
1687 void *entry = NULL;
1688
1689 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1690 config = hash_release(running_config_entries, &s);
1691 if (config) {
1692 entry = config->entry;
1693 running_config_entry_free(config);
1694 }
1695
1696 /* Unset user pointers from the child nodes. */
1697 if (CHECK_FLAG(dnode->schema->nodetype, LYS_LIST | LYS_CONTAINER)) {
1698 LY_TREE_FOR (dnode->child, child) {
1699 (void)nb_running_unset_entry_helper(child);
1700 }
1701 }
1702
1703 return entry;
1704 }
1705
1706 void *nb_running_unset_entry(const struct lyd_node *dnode)
1707 {
1708 void *entry;
1709
1710 entry = nb_running_unset_entry_helper(dnode);
1711 assert(entry);
1712
1713 return entry;
1714 }
1715
1716 void *nb_running_get_entry(const struct lyd_node *dnode, const char *xpath,
1717 bool abort_if_not_found)
1718 {
1719 const struct lyd_node *orig_dnode = dnode;
1720 char xpath_buf[XPATH_MAXLEN];
1721
1722 assert(dnode || xpath);
1723
1724 if (!dnode)
1725 dnode = yang_dnode_get(running_config->dnode, xpath);
1726
1727 while (dnode) {
1728 struct nb_config_entry *config, s;
1729
1730 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1731 config = hash_lookup(running_config_entries, &s);
1732 if (config)
1733 return config->entry;
1734
1735 dnode = dnode->parent;
1736 }
1737
1738 if (!abort_if_not_found)
1739 return NULL;
1740
1741 yang_dnode_get_path(orig_dnode, xpath_buf, sizeof(xpath_buf));
1742 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
1743 "%s: failed to find entry [xpath %s]", __func__, xpath_buf);
1744 zlog_backtrace(LOG_ERR);
1745 abort();
1746 }
1747
1748 /* Logging functions. */
1749 const char *nb_event_name(enum nb_event event)
1750 {
1751 switch (event) {
1752 case NB_EV_VALIDATE:
1753 return "validate";
1754 case NB_EV_PREPARE:
1755 return "prepare";
1756 case NB_EV_ABORT:
1757 return "abort";
1758 case NB_EV_APPLY:
1759 return "apply";
1760 default:
1761 return "unknown";
1762 }
1763 }
1764
1765 const char *nb_operation_name(enum nb_operation operation)
1766 {
1767 switch (operation) {
1768 case NB_OP_CREATE:
1769 return "create";
1770 case NB_OP_MODIFY:
1771 return "modify";
1772 case NB_OP_DESTROY:
1773 return "destroy";
1774 case NB_OP_MOVE:
1775 return "move";
1776 case NB_OP_APPLY_FINISH:
1777 return "apply_finish";
1778 case NB_OP_GET_ELEM:
1779 return "get_elem";
1780 case NB_OP_GET_NEXT:
1781 return "get_next";
1782 case NB_OP_GET_KEYS:
1783 return "get_keys";
1784 case NB_OP_LOOKUP_ENTRY:
1785 return "lookup_entry";
1786 case NB_OP_RPC:
1787 return "rpc";
1788 default:
1789 return "unknown";
1790 }
1791 }
1792
1793 const char *nb_err_name(enum nb_error error)
1794 {
1795 switch (error) {
1796 case NB_OK:
1797 return "ok";
1798 case NB_ERR:
1799 return "generic error";
1800 case NB_ERR_NO_CHANGES:
1801 return "no changes";
1802 case NB_ERR_NOT_FOUND:
1803 return "element not found";
1804 case NB_ERR_LOCKED:
1805 return "resource is locked";
1806 case NB_ERR_VALIDATION:
1807 return "validation error";
1808 case NB_ERR_RESOURCE:
1809 return "failed to allocate resource";
1810 case NB_ERR_INCONSISTENCY:
1811 return "internal inconsistency";
1812 default:
1813 return "unknown";
1814 }
1815 }
1816
1817 const char *nb_client_name(enum nb_client client)
1818 {
1819 switch (client) {
1820 case NB_CLIENT_CLI:
1821 return "CLI";
1822 case NB_CLIENT_CONFD:
1823 return "ConfD";
1824 case NB_CLIENT_SYSREPO:
1825 return "Sysrepo";
1826 case NB_CLIENT_GRPC:
1827 return "gRPC";
1828 default:
1829 return "unknown";
1830 }
1831 }
1832
1833 static void nb_load_callbacks(const struct frr_yang_module_info *module)
1834 {
1835 for (size_t i = 0; module->nodes[i].xpath; i++) {
1836 struct nb_node *nb_node;
1837 uint32_t priority;
1838
1839 nb_node = nb_node_find(module->nodes[i].xpath);
1840 if (!nb_node) {
1841 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1842 "%s: unknown data path: %s", __func__,
1843 module->nodes[i].xpath);
1844 continue;
1845 }
1846
1847 nb_node->cbs = module->nodes[i].cbs;
1848 priority = module->nodes[i].priority;
1849 if (priority != 0)
1850 nb_node->priority = priority;
1851 }
1852 }
1853
1854 void nb_init(struct thread_master *tm,
1855 const struct frr_yang_module_info *modules[], size_t nmodules)
1856 {
1857 unsigned int errors = 0;
1858
1859 /* Load YANG modules. */
1860 for (size_t i = 0; i < nmodules; i++)
1861 yang_module_load(modules[i]->name);
1862
1863 /* Create a nb_node for all YANG schema nodes. */
1864 nb_nodes_create();
1865
1866 /* Load northbound callbacks. */
1867 for (size_t i = 0; i < nmodules; i++)
1868 nb_load_callbacks(modules[i]);
1869
1870 /* Validate northbound callbacks. */
1871 yang_snodes_iterate_all(nb_node_validate, 0, &errors);
1872 if (errors > 0) {
1873 flog_err(
1874 EC_LIB_NB_CBS_VALIDATION,
1875 "%s: failed to validate northbound callbacks: %u error(s)",
1876 __func__, errors);
1877 exit(1);
1878 }
1879
1880 /* Create an empty running configuration. */
1881 running_config = nb_config_new(NULL);
1882 running_config_entries = hash_create(running_config_entry_key_make,
1883 running_config_entry_cmp,
1884 "Running Configuration Entries");
1885 pthread_mutex_init(&running_config_mgmt_lock.mtx, NULL);
1886
1887 /* Initialize the northbound CLI. */
1888 nb_cli_init(tm);
1889 }
1890
1891 void nb_terminate(void)
1892 {
1893 /* Terminate the northbound CLI. */
1894 nb_cli_terminate();
1895
1896 /* Delete all nb_node's from all YANG modules. */
1897 nb_nodes_delete();
1898
1899 /* Delete the running configuration. */
1900 hash_clean(running_config_entries, running_config_entry_free);
1901 hash_free(running_config_entries);
1902 nb_config_free(running_config);
1903 pthread_mutex_destroy(&running_config_mgmt_lock.mtx);
1904 }