]> git.proxmox.com Git - mirror_frr.git/blob - lib/northbound.c
Merge pull request #4300 from adharkar/frr-master-rmap_debug
[mirror_frr.git] / lib / northbound.c
1 /*
2 * Copyright (C) 2018 NetDEF, Inc.
3 * Renato Westphal
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #include <zebra.h>
21
22 #include "libfrr.h"
23 #include "log.h"
24 #include "lib_errors.h"
25 #include "hash.h"
26 #include "command.h"
27 #include "debug.h"
28 #include "db.h"
29 #include "northbound.h"
30 #include "northbound_cli.h"
31 #include "northbound_db.h"
32
33 DEFINE_MTYPE_STATIC(LIB, NB_NODE, "Northbound Node")
34 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG, "Northbound Configuration")
35 DEFINE_MTYPE_STATIC(LIB, NB_CONFIG_ENTRY, "Northbound Configuration Entry")
36
37 /* Running configuration - shouldn't be modified directly. */
38 struct nb_config *running_config;
39
40 /* Hash table of user pointers associated with configuration entries. */
41 static struct hash *running_config_entries;
42
43 /* Management lock for the running configuration. */
44 static struct {
45 /* Mutex protecting this structure. */
46 pthread_mutex_t mtx;
47
48 /* Actual lock. */
49 bool locked;
50
51 /* Northbound client who owns this lock. */
52 enum nb_client owner_client;
53
54 /* Northbound user who owns this lock. */
55 const void *owner_user;
56 } running_config_mgmt_lock;
57
58 /*
59 * Global lock used to prevent multiple configuration transactions from
60 * happening concurrently.
61 */
62 static bool transaction_in_progress;
63
64 static int nb_callback_configuration(const enum nb_event event,
65 struct nb_config_change *change);
66 static struct nb_transaction *nb_transaction_new(struct nb_config *config,
67 struct nb_config_cbs *changes,
68 enum nb_client client,
69 const void *user,
70 const char *comment);
71 static void nb_transaction_free(struct nb_transaction *transaction);
72 static int nb_transaction_process(enum nb_event event,
73 struct nb_transaction *transaction);
74 static void nb_transaction_apply_finish(struct nb_transaction *transaction);
75 static int nb_oper_data_iter_node(const struct lys_node *snode,
76 const char *xpath, const void *list_entry,
77 const struct yang_list_keys *list_keys,
78 struct yang_translator *translator,
79 bool first, uint32_t flags,
80 nb_oper_data_cb cb, void *arg);
81
82 static int nb_node_check_config_only(const struct lys_node *snode, void *arg)
83 {
84 bool *config_only = arg;
85
86 if (CHECK_FLAG(snode->flags, LYS_CONFIG_R)) {
87 *config_only = false;
88 return YANG_ITER_STOP;
89 }
90
91 return YANG_ITER_CONTINUE;
92 }
93
94 static int nb_node_new_cb(const struct lys_node *snode, void *arg)
95 {
96 struct nb_node *nb_node;
97 struct lys_node *sparent, *sparent_list;
98
99 nb_node = XCALLOC(MTYPE_NB_NODE, sizeof(*nb_node));
100 yang_snode_get_path(snode, YANG_PATH_DATA, nb_node->xpath,
101 sizeof(nb_node->xpath));
102 nb_node->priority = NB_DFLT_PRIORITY;
103 sparent = yang_snode_real_parent(snode);
104 if (sparent)
105 nb_node->parent = sparent->priv;
106 sparent_list = yang_snode_parent_list(snode);
107 if (sparent_list)
108 nb_node->parent_list = sparent_list->priv;
109
110 /* Set flags. */
111 if (CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
112 bool config_only = true;
113
114 yang_snodes_iterate_subtree(snode, nb_node_check_config_only,
115 YANG_ITER_ALLOW_AUGMENTATIONS,
116 &config_only);
117 if (config_only)
118 SET_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY);
119 }
120 if (CHECK_FLAG(snode->nodetype, LYS_LIST)) {
121 struct lys_node_list *slist;
122
123 slist = (struct lys_node_list *)snode;
124 if (slist->keys_size == 0)
125 SET_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST);
126 }
127
128 /*
129 * Link the northbound node and the libyang schema node with one
130 * another.
131 */
132 nb_node->snode = snode;
133 lys_set_private(snode, nb_node);
134
135 return YANG_ITER_CONTINUE;
136 }
137
138 static int nb_node_del_cb(const struct lys_node *snode, void *arg)
139 {
140 struct nb_node *nb_node;
141
142 nb_node = snode->priv;
143 lys_set_private(snode, NULL);
144 XFREE(MTYPE_NB_NODE, nb_node);
145
146 return YANG_ITER_CONTINUE;
147 }
148
149 void nb_nodes_create(void)
150 {
151 yang_snodes_iterate_all(nb_node_new_cb, 0, NULL);
152 }
153
154 void nb_nodes_delete(void)
155 {
156 yang_snodes_iterate_all(nb_node_del_cb, 0, NULL);
157 }
158
159 struct nb_node *nb_node_find(const char *xpath)
160 {
161 const struct lys_node *snode;
162
163 /*
164 * Use libyang to find the schema node associated to the xpath and get
165 * the northbound node from there (snode private pointer).
166 */
167 snode = ly_ctx_get_node(ly_native_ctx, NULL, xpath, 0);
168 if (!snode)
169 return NULL;
170
171 return snode->priv;
172 }
173
174 static int nb_node_validate_cb(const struct nb_node *nb_node,
175 enum nb_operation operation,
176 int callback_implemented, bool optional)
177 {
178 bool valid;
179
180 valid = nb_operation_is_valid(operation, nb_node->snode);
181
182 if (!valid && callback_implemented)
183 flog_warn(EC_LIB_NB_CB_UNNEEDED,
184 "unneeded '%s' callback for '%s'",
185 nb_operation_name(operation), nb_node->xpath);
186
187 if (!optional && valid && !callback_implemented) {
188 flog_err(EC_LIB_NB_CB_MISSING, "missing '%s' callback for '%s'",
189 nb_operation_name(operation), nb_node->xpath);
190 return 1;
191 }
192
193 return 0;
194 }
195
196 /*
197 * Check if the required callbacks were implemented for the given northbound
198 * node.
199 */
200 static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
201
202 {
203 unsigned int error = 0;
204
205 error += nb_node_validate_cb(nb_node, NB_OP_CREATE,
206 !!nb_node->cbs.create, false);
207 error += nb_node_validate_cb(nb_node, NB_OP_MODIFY,
208 !!nb_node->cbs.modify, false);
209 error += nb_node_validate_cb(nb_node, NB_OP_DESTROY,
210 !!nb_node->cbs.destroy, false);
211 error += nb_node_validate_cb(nb_node, NB_OP_MOVE, !!nb_node->cbs.move,
212 false);
213 error += nb_node_validate_cb(nb_node, NB_OP_APPLY_FINISH,
214 !!nb_node->cbs.apply_finish, true);
215 error += nb_node_validate_cb(nb_node, NB_OP_GET_ELEM,
216 !!nb_node->cbs.get_elem, false);
217 error += nb_node_validate_cb(nb_node, NB_OP_GET_NEXT,
218 !!nb_node->cbs.get_next, false);
219 error += nb_node_validate_cb(nb_node, NB_OP_GET_KEYS,
220 !!nb_node->cbs.get_keys, false);
221 error += nb_node_validate_cb(nb_node, NB_OP_LOOKUP_ENTRY,
222 !!nb_node->cbs.lookup_entry, false);
223 error += nb_node_validate_cb(nb_node, NB_OP_RPC, !!nb_node->cbs.rpc,
224 false);
225
226 return error;
227 }
228
229 static unsigned int nb_node_validate_priority(const struct nb_node *nb_node)
230 {
231 /* Top-level nodes can have any priority. */
232 if (!nb_node->parent)
233 return 0;
234
235 if (nb_node->priority < nb_node->parent->priority) {
236 flog_err(EC_LIB_NB_CB_INVALID_PRIO,
237 "node has higher priority than its parent [xpath %s]",
238 nb_node->xpath);
239 return 1;
240 }
241
242 return 0;
243 }
244
245 static int nb_node_validate(const struct lys_node *snode, void *arg)
246 {
247 struct nb_node *nb_node = snode->priv;
248 unsigned int *errors = arg;
249
250 /* Validate callbacks and priority. */
251 *errors += nb_node_validate_cbs(nb_node);
252 *errors += nb_node_validate_priority(nb_node);
253
254 return YANG_ITER_CONTINUE;
255 }
256
257 struct nb_config *nb_config_new(struct lyd_node *dnode)
258 {
259 struct nb_config *config;
260
261 config = XCALLOC(MTYPE_NB_CONFIG, sizeof(*config));
262 if (dnode)
263 config->dnode = dnode;
264 else
265 config->dnode = yang_dnode_new(ly_native_ctx, true);
266 config->version = 0;
267 pthread_rwlock_init(&config->lock, NULL);
268
269 return config;
270 }
271
272 void nb_config_free(struct nb_config *config)
273 {
274 if (config->dnode)
275 yang_dnode_free(config->dnode);
276 pthread_rwlock_destroy(&config->lock);
277 XFREE(MTYPE_NB_CONFIG, config);
278 }
279
280 struct nb_config *nb_config_dup(const struct nb_config *config)
281 {
282 struct nb_config *dup;
283
284 dup = XCALLOC(MTYPE_NB_CONFIG, sizeof(*dup));
285 dup->dnode = yang_dnode_dup(config->dnode);
286 dup->version = config->version;
287 pthread_rwlock_init(&dup->lock, NULL);
288
289 return dup;
290 }
291
292 int nb_config_merge(struct nb_config *config_dst, struct nb_config *config_src,
293 bool preserve_source)
294 {
295 int ret;
296
297 ret = lyd_merge(config_dst->dnode, config_src->dnode, LYD_OPT_EXPLICIT);
298 if (ret != 0)
299 flog_warn(EC_LIB_LIBYANG, "%s: lyd_merge() failed", __func__);
300
301 if (!preserve_source)
302 nb_config_free(config_src);
303
304 return (ret == 0) ? NB_OK : NB_ERR;
305 }
306
307 void nb_config_replace(struct nb_config *config_dst,
308 struct nb_config *config_src, bool preserve_source)
309 {
310 /* Update version. */
311 if (config_src->version != 0)
312 config_dst->version = config_src->version;
313
314 /* Update dnode. */
315 if (config_dst->dnode)
316 yang_dnode_free(config_dst->dnode);
317 if (preserve_source) {
318 config_dst->dnode = yang_dnode_dup(config_src->dnode);
319 } else {
320 config_dst->dnode = config_src->dnode;
321 config_src->dnode = NULL;
322 nb_config_free(config_src);
323 }
324 }
325
326 /* Generate the nb_config_cbs tree. */
327 static inline int nb_config_cb_compare(const struct nb_config_cb *a,
328 const struct nb_config_cb *b)
329 {
330 /* Sort by priority first. */
331 if (a->nb_node->priority < b->nb_node->priority)
332 return -1;
333 if (a->nb_node->priority > b->nb_node->priority)
334 return 1;
335
336 /*
337 * Use XPath as a tie-breaker. This will naturally sort parent nodes
338 * before their children.
339 */
340 return strcmp(a->xpath, b->xpath);
341 }
342 RB_GENERATE(nb_config_cbs, nb_config_cb, entry, nb_config_cb_compare);
343
344 static void nb_config_diff_add_change(struct nb_config_cbs *changes,
345 enum nb_operation operation,
346 const struct lyd_node *dnode)
347 {
348 struct nb_config_change *change;
349
350 change = XCALLOC(MTYPE_TMP, sizeof(*change));
351 change->cb.operation = operation;
352 change->cb.nb_node = dnode->schema->priv;
353 yang_dnode_get_path(dnode, change->cb.xpath, sizeof(change->cb.xpath));
354 change->cb.dnode = dnode;
355
356 RB_INSERT(nb_config_cbs, changes, &change->cb);
357 }
358
359 static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
360 {
361 while (!RB_EMPTY(nb_config_cbs, changes)) {
362 struct nb_config_change *change;
363
364 change = (struct nb_config_change *)RB_ROOT(nb_config_cbs,
365 changes);
366 RB_REMOVE(nb_config_cbs, changes, &change->cb);
367 XFREE(MTYPE_TMP, change);
368 }
369 }
370
371 /*
372 * Helper function used when calculating the delta between two different
373 * configurations. Given a new subtree, calculate all new YANG data nodes,
374 * excluding default leafs and leaf-lists. This is a recursive function.
375 */
376 static void nb_config_diff_created(const struct lyd_node *dnode,
377 struct nb_config_cbs *changes)
378 {
379 enum nb_operation operation;
380 struct lyd_node *child;
381
382 switch (dnode->schema->nodetype) {
383 case LYS_LEAF:
384 case LYS_LEAFLIST:
385 if (lyd_wd_default((struct lyd_node_leaf_list *)dnode))
386 break;
387
388 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
389 operation = NB_OP_CREATE;
390 else if (nb_operation_is_valid(NB_OP_MODIFY, dnode->schema))
391 operation = NB_OP_MODIFY;
392 else
393 return;
394
395 nb_config_diff_add_change(changes, operation, dnode);
396 break;
397 case LYS_CONTAINER:
398 case LYS_LIST:
399 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
400 nb_config_diff_add_change(changes, NB_OP_CREATE, dnode);
401
402 /* Process child nodes recursively. */
403 LY_TREE_FOR (dnode->child, child) {
404 nb_config_diff_created(child, changes);
405 }
406 break;
407 default:
408 break;
409 }
410 }
411
412 static void nb_config_diff_deleted(const struct lyd_node *dnode,
413 struct nb_config_cbs *changes)
414 {
415 if (nb_operation_is_valid(NB_OP_DESTROY, dnode->schema))
416 nb_config_diff_add_change(changes, NB_OP_DESTROY, dnode);
417 else if (CHECK_FLAG(dnode->schema->nodetype, LYS_CONTAINER)) {
418 struct lyd_node *child;
419
420 /*
421 * Non-presence containers need special handling since they
422 * don't have "destroy" callbacks. In this case, what we need to
423 * do is to call the "destroy" callbacks of their child nodes
424 * when applicable (i.e. optional nodes).
425 */
426 LY_TREE_FOR (dnode->child, child) {
427 nb_config_diff_deleted(child, changes);
428 }
429 }
430 }
431
432 /* Calculate the delta between two different configurations. */
433 static void nb_config_diff(const struct nb_config *config1,
434 const struct nb_config *config2,
435 struct nb_config_cbs *changes)
436 {
437 struct lyd_difflist *diff;
438
439 diff = lyd_diff(config1->dnode, config2->dnode,
440 LYD_DIFFOPT_WITHDEFAULTS);
441 assert(diff);
442
443 for (int i = 0; diff->type[i] != LYD_DIFF_END; i++) {
444 LYD_DIFFTYPE type;
445 struct lyd_node *dnode;
446
447 type = diff->type[i];
448
449 switch (type) {
450 case LYD_DIFF_CREATED:
451 dnode = diff->second[i];
452 nb_config_diff_created(dnode, changes);
453 break;
454 case LYD_DIFF_DELETED:
455 dnode = diff->first[i];
456 nb_config_diff_deleted(dnode, changes);
457 break;
458 case LYD_DIFF_CHANGED:
459 dnode = diff->second[i];
460 nb_config_diff_add_change(changes, NB_OP_MODIFY, dnode);
461 break;
462 case LYD_DIFF_MOVEDAFTER1:
463 case LYD_DIFF_MOVEDAFTER2:
464 default:
465 continue;
466 }
467 }
468
469 lyd_free_diff(diff);
470 }
471
472 int nb_candidate_edit(struct nb_config *candidate,
473 const struct nb_node *nb_node,
474 enum nb_operation operation, const char *xpath,
475 const struct yang_data *previous,
476 const struct yang_data *data)
477 {
478 struct lyd_node *dnode;
479 char xpath_edit[XPATH_MAXLEN];
480
481 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
482 if (nb_node->snode->nodetype == LYS_LEAFLIST)
483 snprintf(xpath_edit, sizeof(xpath_edit), "%s[.='%s']", xpath,
484 data->value);
485 else
486 strlcpy(xpath_edit, xpath, sizeof(xpath_edit));
487
488 switch (operation) {
489 case NB_OP_CREATE:
490 case NB_OP_MODIFY:
491 ly_errno = 0;
492 dnode = lyd_new_path(candidate->dnode, ly_native_ctx,
493 xpath_edit, (void *)data->value, 0,
494 LYD_PATH_OPT_UPDATE);
495 if (!dnode && ly_errno) {
496 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
497 __func__);
498 return NB_ERR;
499 }
500
501 /*
502 * If a new node was created, call lyd_validate() only to create
503 * default child nodes.
504 */
505 if (dnode) {
506 lyd_schema_sort(dnode, 0);
507 lyd_validate(&dnode, LYD_OPT_CONFIG, ly_native_ctx);
508 }
509 break;
510 case NB_OP_DESTROY:
511 dnode = yang_dnode_get(candidate->dnode, xpath_edit);
512 if (!dnode)
513 /*
514 * Return a special error code so the caller can choose
515 * whether to ignore it or not.
516 */
517 return NB_ERR_NOT_FOUND;
518 lyd_free(dnode);
519 break;
520 case NB_OP_MOVE:
521 /* TODO: update configuration. */
522 break;
523 default:
524 flog_warn(EC_LIB_DEVELOPMENT,
525 "%s: unknown operation (%u) [xpath %s]", __func__,
526 operation, xpath_edit);
527 return NB_ERR;
528 }
529
530 return NB_OK;
531 }
532
533 bool nb_candidate_needs_update(const struct nb_config *candidate)
534 {
535 bool ret = false;
536
537 pthread_rwlock_rdlock(&running_config->lock);
538 {
539 if (candidate->version < running_config->version)
540 ret = true;
541 }
542 pthread_rwlock_unlock(&running_config->lock);
543
544 return ret;
545 }
546
547 int nb_candidate_update(struct nb_config *candidate)
548 {
549 struct nb_config *updated_config;
550
551 pthread_rwlock_rdlock(&running_config->lock);
552 {
553 updated_config = nb_config_dup(running_config);
554 }
555 pthread_rwlock_unlock(&running_config->lock);
556
557 if (nb_config_merge(updated_config, candidate, true) != NB_OK)
558 return NB_ERR;
559
560 nb_config_replace(candidate, updated_config, false);
561
562 return NB_OK;
563 }
564
565 /*
566 * Perform YANG syntactic and semantic validation.
567 *
568 * WARNING: lyd_validate() can change the configuration as part of the
569 * validation process.
570 */
571 static int nb_candidate_validate_yang(struct nb_config *candidate)
572 {
573 if (lyd_validate(&candidate->dnode, LYD_OPT_STRICT | LYD_OPT_CONFIG,
574 ly_native_ctx)
575 != 0)
576 return NB_ERR_VALIDATION;
577
578 return NB_OK;
579 }
580
581 /* Perform code-level validation using the northbound callbacks. */
582 static int nb_candidate_validate_changes(struct nb_config *candidate,
583 struct nb_config_cbs *changes)
584 {
585 struct nb_config_cb *cb;
586
587 RB_FOREACH (cb, nb_config_cbs, changes) {
588 struct nb_config_change *change = (struct nb_config_change *)cb;
589 int ret;
590
591 ret = nb_callback_configuration(NB_EV_VALIDATE, change);
592 if (ret != NB_OK)
593 return NB_ERR_VALIDATION;
594 }
595
596 return NB_OK;
597 }
598
599 int nb_candidate_validate(struct nb_config *candidate)
600 {
601 struct nb_config_cbs changes;
602 int ret;
603
604 if (nb_candidate_validate_yang(candidate) != NB_OK)
605 return NB_ERR_VALIDATION;
606
607 RB_INIT(nb_config_cbs, &changes);
608 pthread_rwlock_rdlock(&running_config->lock);
609 {
610 nb_config_diff(running_config, candidate, &changes);
611 ret = nb_candidate_validate_changes(candidate, &changes);
612 nb_config_diff_del_changes(&changes);
613 }
614 pthread_rwlock_unlock(&running_config->lock);
615
616 return ret;
617 }
618
619 int nb_candidate_commit_prepare(struct nb_config *candidate,
620 enum nb_client client, const void *user,
621 const char *comment,
622 struct nb_transaction **transaction)
623 {
624 struct nb_config_cbs changes;
625
626 if (nb_candidate_validate_yang(candidate) != NB_OK) {
627 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
628 "%s: failed to validate candidate configuration",
629 __func__);
630 return NB_ERR_VALIDATION;
631 }
632
633 RB_INIT(nb_config_cbs, &changes);
634 pthread_rwlock_rdlock(&running_config->lock);
635 {
636 nb_config_diff(running_config, candidate, &changes);
637 if (RB_EMPTY(nb_config_cbs, &changes)) {
638 pthread_rwlock_unlock(&running_config->lock);
639 return NB_ERR_NO_CHANGES;
640 }
641
642 if (nb_candidate_validate_changes(candidate, &changes)
643 != NB_OK) {
644 flog_warn(
645 EC_LIB_NB_CANDIDATE_INVALID,
646 "%s: failed to validate candidate configuration",
647 __func__);
648 nb_config_diff_del_changes(&changes);
649 pthread_rwlock_unlock(&running_config->lock);
650 return NB_ERR_VALIDATION;
651 }
652
653 *transaction = nb_transaction_new(candidate, &changes, client,
654 user, comment);
655 if (*transaction == NULL) {
656 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
657 "%s: failed to create transaction", __func__);
658 nb_config_diff_del_changes(&changes);
659 pthread_rwlock_unlock(&running_config->lock);
660 return NB_ERR_LOCKED;
661 }
662 }
663 pthread_rwlock_unlock(&running_config->lock);
664
665 return nb_transaction_process(NB_EV_PREPARE, *transaction);
666 }
667
668 void nb_candidate_commit_abort(struct nb_transaction *transaction)
669 {
670 (void)nb_transaction_process(NB_EV_ABORT, transaction);
671 nb_transaction_free(transaction);
672 }
673
674 void nb_candidate_commit_apply(struct nb_transaction *transaction,
675 bool save_transaction, uint32_t *transaction_id)
676 {
677 (void)nb_transaction_process(NB_EV_APPLY, transaction);
678 nb_transaction_apply_finish(transaction);
679
680 /* Replace running by candidate. */
681 transaction->config->version++;
682 pthread_rwlock_wrlock(&running_config->lock);
683 {
684 nb_config_replace(running_config, transaction->config, true);
685 }
686 pthread_rwlock_unlock(&running_config->lock);
687
688 /* Record transaction. */
689 if (save_transaction
690 && nb_db_transaction_save(transaction, transaction_id) != NB_OK)
691 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED,
692 "%s: failed to record transaction", __func__);
693
694 nb_transaction_free(transaction);
695 }
696
697 int nb_candidate_commit(struct nb_config *candidate, enum nb_client client,
698 const void *user, bool save_transaction,
699 const char *comment, uint32_t *transaction_id)
700 {
701 struct nb_transaction *transaction = NULL;
702 int ret;
703
704 ret = nb_candidate_commit_prepare(candidate, client, user, comment,
705 &transaction);
706 /*
707 * Apply the changes if the preparation phase succeeded. Otherwise abort
708 * the transaction.
709 */
710 if (ret == NB_OK)
711 nb_candidate_commit_apply(transaction, save_transaction,
712 transaction_id);
713 else if (transaction != NULL)
714 nb_candidate_commit_abort(transaction);
715
716 return ret;
717 }
718
719 int nb_running_lock(enum nb_client client, const void *user)
720 {
721 int ret = -1;
722
723 pthread_mutex_lock(&running_config_mgmt_lock.mtx);
724 {
725 if (!running_config_mgmt_lock.locked) {
726 running_config_mgmt_lock.locked = true;
727 running_config_mgmt_lock.owner_client = client;
728 running_config_mgmt_lock.owner_user = user;
729 ret = 0;
730 }
731 }
732 pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
733
734 return ret;
735 }
736
737 int nb_running_unlock(enum nb_client client, const void *user)
738 {
739 int ret = -1;
740
741 pthread_mutex_lock(&running_config_mgmt_lock.mtx);
742 {
743 if (running_config_mgmt_lock.locked
744 && running_config_mgmt_lock.owner_client == client
745 && running_config_mgmt_lock.owner_user == user) {
746 running_config_mgmt_lock.locked = false;
747 running_config_mgmt_lock.owner_client = NB_CLIENT_NONE;
748 running_config_mgmt_lock.owner_user = NULL;
749 ret = 0;
750 }
751 }
752 pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
753
754 return ret;
755 }
756
757 int nb_running_lock_check(enum nb_client client, const void *user)
758 {
759 int ret = -1;
760
761 pthread_mutex_lock(&running_config_mgmt_lock.mtx);
762 {
763 if (!running_config_mgmt_lock.locked
764 || (running_config_mgmt_lock.owner_client == client
765 && running_config_mgmt_lock.owner_user == user))
766 ret = 0;
767 }
768 pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
769
770 return ret;
771 }
772
773 static void nb_log_callback(const enum nb_event event,
774 enum nb_operation operation, const char *xpath,
775 const char *value)
776 {
777 zlog_debug(
778 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
779 nb_event_name(event), nb_operation_name(operation), xpath,
780 value ? value : "(NULL)");
781 }
782
783 /*
784 * Call the northbound configuration callback associated to a given
785 * configuration change.
786 */
787 static int nb_callback_configuration(const enum nb_event event,
788 struct nb_config_change *change)
789 {
790 enum nb_operation operation = change->cb.operation;
791 const char *xpath = change->cb.xpath;
792 const struct nb_node *nb_node = change->cb.nb_node;
793 const struct lyd_node *dnode = change->cb.dnode;
794 union nb_resource *resource;
795 int ret = NB_ERR;
796
797 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
798 const char *value = "(none)";
799
800 if (dnode && !yang_snode_is_typeless_data(dnode->schema))
801 value = yang_dnode_get_string(dnode, NULL);
802
803 nb_log_callback(event, operation, xpath, value);
804 }
805
806 if (event == NB_EV_VALIDATE)
807 resource = NULL;
808 else
809 resource = &change->resource;
810
811 switch (operation) {
812 case NB_OP_CREATE:
813 ret = (*nb_node->cbs.create)(event, dnode, resource);
814 break;
815 case NB_OP_MODIFY:
816 ret = (*nb_node->cbs.modify)(event, dnode, resource);
817 break;
818 case NB_OP_DESTROY:
819 ret = (*nb_node->cbs.destroy)(event, dnode);
820 break;
821 case NB_OP_MOVE:
822 ret = (*nb_node->cbs.move)(event, dnode);
823 break;
824 default:
825 flog_err(EC_LIB_DEVELOPMENT,
826 "%s: unknown operation (%u) [xpath %s]", __func__,
827 operation, xpath);
828 exit(1);
829 }
830
831 if (ret != NB_OK) {
832 int priority;
833 enum lib_log_refs ref;
834
835 switch (event) {
836 case NB_EV_VALIDATE:
837 priority = LOG_WARNING;
838 ref = EC_LIB_NB_CB_CONFIG_VALIDATE;
839 break;
840 case NB_EV_PREPARE:
841 priority = LOG_WARNING;
842 ref = EC_LIB_NB_CB_CONFIG_PREPARE;
843 break;
844 case NB_EV_ABORT:
845 priority = LOG_WARNING;
846 ref = EC_LIB_NB_CB_CONFIG_ABORT;
847 break;
848 case NB_EV_APPLY:
849 priority = LOG_ERR;
850 ref = EC_LIB_NB_CB_CONFIG_APPLY;
851 break;
852 default:
853 flog_err(EC_LIB_DEVELOPMENT,
854 "%s: unknown event (%u) [xpath %s]",
855 __func__, event, xpath);
856 exit(1);
857 }
858
859 flog(priority, ref,
860 "%s: error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
861 __func__, nb_err_name(ret), nb_event_name(event),
862 nb_operation_name(operation), xpath);
863 }
864
865 return ret;
866 }
867
868 struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node,
869 const char *xpath,
870 const void *list_entry)
871 {
872 DEBUGD(&nb_dbg_cbs_state,
873 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
874 xpath, list_entry);
875
876 return nb_node->cbs.get_elem(xpath, list_entry);
877 }
878
879 const void *nb_callback_get_next(const struct nb_node *nb_node,
880 const void *parent_list_entry,
881 const void *list_entry)
882 {
883 DEBUGD(&nb_dbg_cbs_state,
884 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
885 nb_node->xpath, parent_list_entry, list_entry);
886
887 return nb_node->cbs.get_next(parent_list_entry, list_entry);
888 }
889
890 int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
891 struct yang_list_keys *keys)
892 {
893 DEBUGD(&nb_dbg_cbs_state,
894 "northbound callback (get_keys): node [%s] list_entry [%p]",
895 nb_node->xpath, list_entry);
896
897 return nb_node->cbs.get_keys(list_entry, keys);
898 }
899
900 const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
901 const void *parent_list_entry,
902 const struct yang_list_keys *keys)
903 {
904 DEBUGD(&nb_dbg_cbs_state,
905 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
906 nb_node->xpath, parent_list_entry);
907
908 return nb_node->cbs.lookup_entry(parent_list_entry, keys);
909 }
910
911 int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
912 const struct list *input, struct list *output)
913 {
914 DEBUGD(&nb_dbg_cbs_rpc, "northbound RPC: %s", xpath);
915
916 return nb_node->cbs.rpc(xpath, input, output);
917 }
918
919 static struct nb_transaction *
920 nb_transaction_new(struct nb_config *config, struct nb_config_cbs *changes,
921 enum nb_client client, const void *user, const char *comment)
922 {
923 struct nb_transaction *transaction;
924
925 if (nb_running_lock_check(client, user)) {
926 flog_warn(
927 EC_LIB_NB_TRANSACTION_CREATION_FAILED,
928 "%s: running configuration is locked by another client",
929 __func__);
930 return NULL;
931 }
932
933 if (transaction_in_progress) {
934 flog_warn(
935 EC_LIB_NB_TRANSACTION_CREATION_FAILED,
936 "%s: error - there's already another transaction in progress",
937 __func__);
938 return NULL;
939 }
940 transaction_in_progress = true;
941
942 transaction = XCALLOC(MTYPE_TMP, sizeof(*transaction));
943 transaction->client = client;
944 if (comment)
945 strlcpy(transaction->comment, comment,
946 sizeof(transaction->comment));
947 transaction->config = config;
948 transaction->changes = *changes;
949
950 return transaction;
951 }
952
953 static void nb_transaction_free(struct nb_transaction *transaction)
954 {
955 nb_config_diff_del_changes(&transaction->changes);
956 XFREE(MTYPE_TMP, transaction);
957 transaction_in_progress = false;
958 }
959
960 /* Process all configuration changes associated to a transaction. */
961 static int nb_transaction_process(enum nb_event event,
962 struct nb_transaction *transaction)
963 {
964 struct nb_config_cb *cb;
965
966 /*
967 * Need to lock the running configuration since transaction->changes
968 * can contain pointers to data nodes from the running configuration.
969 */
970 pthread_rwlock_rdlock(&running_config->lock);
971 {
972 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
973 struct nb_config_change *change =
974 (struct nb_config_change *)cb;
975 int ret;
976
977 /*
978 * Only try to release resources that were allocated
979 * successfully.
980 */
981 if (event == NB_EV_ABORT && change->prepare_ok == false)
982 break;
983
984 /* Call the appropriate callback. */
985 ret = nb_callback_configuration(event, change);
986 switch (event) {
987 case NB_EV_PREPARE:
988 if (ret != NB_OK) {
989 pthread_rwlock_unlock(
990 &running_config->lock);
991 return ret;
992 }
993 change->prepare_ok = true;
994 break;
995 case NB_EV_ABORT:
996 case NB_EV_APPLY:
997 /*
998 * At this point it's not possible to reject the
999 * transaction anymore, so any failure here can
1000 * lead to inconsistencies and should be treated
1001 * as a bug. Operations prone to errors, like
1002 * validations and resource allocations, should
1003 * be performed during the 'prepare' phase.
1004 */
1005 break;
1006 default:
1007 break;
1008 }
1009 }
1010 }
1011 pthread_rwlock_unlock(&running_config->lock);
1012
1013 return NB_OK;
1014 }
1015
1016 static struct nb_config_cb *
1017 nb_apply_finish_cb_new(struct nb_config_cbs *cbs, const char *xpath,
1018 const struct nb_node *nb_node,
1019 const struct lyd_node *dnode)
1020 {
1021 struct nb_config_cb *cb;
1022
1023 cb = XCALLOC(MTYPE_TMP, sizeof(*cb));
1024 strlcpy(cb->xpath, xpath, sizeof(cb->xpath));
1025 cb->nb_node = nb_node;
1026 cb->dnode = dnode;
1027 RB_INSERT(nb_config_cbs, cbs, cb);
1028
1029 return cb;
1030 }
1031
1032 static struct nb_config_cb *
1033 nb_apply_finish_cb_find(struct nb_config_cbs *cbs, const char *xpath,
1034 const struct nb_node *nb_node)
1035 {
1036 struct nb_config_cb s;
1037
1038 strlcpy(s.xpath, xpath, sizeof(s.xpath));
1039 s.nb_node = nb_node;
1040 return RB_FIND(nb_config_cbs, cbs, &s);
1041 }
1042
1043 /* Call the 'apply_finish' callbacks. */
1044 static void nb_transaction_apply_finish(struct nb_transaction *transaction)
1045 {
1046 struct nb_config_cbs cbs;
1047 struct nb_config_cb *cb;
1048
1049 /* Initialize tree of 'apply_finish' callbacks. */
1050 RB_INIT(nb_config_cbs, &cbs);
1051
1052 /* Identify the 'apply_finish' callbacks that need to be called. */
1053 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1054 struct nb_config_change *change = (struct nb_config_change *)cb;
1055 const struct lyd_node *dnode = change->cb.dnode;
1056
1057 /*
1058 * Iterate up to the root of the data tree. When a node is being
1059 * deleted, skip its 'apply_finish' callback if one is defined
1060 * (the 'apply_finish' callbacks from the node ancestors should
1061 * be called though).
1062 */
1063 if (change->cb.operation == NB_OP_DESTROY) {
1064 char xpath[XPATH_MAXLEN];
1065
1066 dnode = dnode->parent;
1067 if (!dnode)
1068 break;
1069
1070 /*
1071 * The dnode from 'delete' callbacks point to elements
1072 * from the running configuration. Use yang_dnode_get()
1073 * to get the corresponding dnode from the candidate
1074 * configuration that is being committed.
1075 */
1076 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1077 dnode = yang_dnode_get(transaction->config->dnode,
1078 xpath);
1079 }
1080 while (dnode) {
1081 char xpath[XPATH_MAXLEN];
1082 struct nb_node *nb_node;
1083
1084 nb_node = dnode->schema->priv;
1085 if (!nb_node->cbs.apply_finish)
1086 goto next;
1087
1088 /*
1089 * Don't call the callback more than once for the same
1090 * data node.
1091 */
1092 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1093 if (nb_apply_finish_cb_find(&cbs, xpath, nb_node))
1094 goto next;
1095
1096 nb_apply_finish_cb_new(&cbs, xpath, nb_node, dnode);
1097
1098 next:
1099 dnode = dnode->parent;
1100 }
1101 }
1102
1103 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
1104 RB_FOREACH (cb, nb_config_cbs, &cbs) {
1105 if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL))
1106 nb_log_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH,
1107 cb->xpath, NULL);
1108
1109 (*cb->nb_node->cbs.apply_finish)(cb->dnode);
1110 }
1111
1112 /* Release memory. */
1113 while (!RB_EMPTY(nb_config_cbs, &cbs)) {
1114 cb = RB_ROOT(nb_config_cbs, &cbs);
1115 RB_REMOVE(nb_config_cbs, &cbs, cb);
1116 XFREE(MTYPE_TMP, cb);
1117 }
1118 }
1119
1120 static int nb_oper_data_iter_children(const struct lys_node *snode,
1121 const char *xpath, const void *list_entry,
1122 const struct yang_list_keys *list_keys,
1123 struct yang_translator *translator,
1124 bool first, uint32_t flags,
1125 nb_oper_data_cb cb, void *arg)
1126 {
1127 struct lys_node *child;
1128
1129 LY_TREE_FOR (snode->child, child) {
1130 int ret;
1131
1132 ret = nb_oper_data_iter_node(child, xpath, list_entry,
1133 list_keys, translator, false,
1134 flags, cb, arg);
1135 if (ret != NB_OK)
1136 return ret;
1137 }
1138
1139 return NB_OK;
1140 }
1141
1142 static int nb_oper_data_iter_leaf(const struct nb_node *nb_node,
1143 const char *xpath, const void *list_entry,
1144 const struct yang_list_keys *list_keys,
1145 struct yang_translator *translator,
1146 uint32_t flags, nb_oper_data_cb cb, void *arg)
1147 {
1148 struct yang_data *data;
1149
1150 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1151 return NB_OK;
1152
1153 /* Ignore list keys. */
1154 if (lys_is_key((struct lys_node_leaf *)nb_node->snode, NULL))
1155 return NB_OK;
1156
1157 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1158 if (data == NULL)
1159 /* Leaf of type "empty" is not present. */
1160 return NB_OK;
1161
1162 return (*cb)(nb_node->snode, translator, data, arg);
1163 }
1164
1165 static int nb_oper_data_iter_container(const struct nb_node *nb_node,
1166 const char *xpath,
1167 const void *list_entry,
1168 const struct yang_list_keys *list_keys,
1169 struct yang_translator *translator,
1170 uint32_t flags, nb_oper_data_cb cb,
1171 void *arg)
1172 {
1173 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1174 return NB_OK;
1175
1176 /* Presence containers. */
1177 if (nb_node->cbs.get_elem) {
1178 struct yang_data *data;
1179 int ret;
1180
1181 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1182 if (data == NULL)
1183 /* Presence container is not present. */
1184 return NB_OK;
1185
1186 ret = (*cb)(nb_node->snode, translator, data, arg);
1187 if (ret != NB_OK)
1188 return ret;
1189 }
1190
1191 /* Iterate over the child nodes. */
1192 return nb_oper_data_iter_children(nb_node->snode, xpath, list_entry,
1193 list_keys, translator, false, flags,
1194 cb, arg);
1195 }
1196
1197 static int
1198 nb_oper_data_iter_leaflist(const struct nb_node *nb_node, const char *xpath,
1199 const void *parent_list_entry,
1200 const struct yang_list_keys *parent_list_keys,
1201 struct yang_translator *translator, uint32_t flags,
1202 nb_oper_data_cb cb, void *arg)
1203 {
1204 const void *list_entry = NULL;
1205
1206 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1207 return NB_OK;
1208
1209 do {
1210 struct yang_data *data;
1211 int ret;
1212
1213 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1214 list_entry);
1215 if (!list_entry)
1216 /* End of the list. */
1217 break;
1218
1219 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1220 if (data == NULL)
1221 continue;
1222
1223 ret = (*cb)(nb_node->snode, translator, data, arg);
1224 if (ret != NB_OK)
1225 return ret;
1226 } while (list_entry);
1227
1228 return NB_OK;
1229 }
1230
1231 static int nb_oper_data_iter_list(const struct nb_node *nb_node,
1232 const char *xpath_list,
1233 const void *parent_list_entry,
1234 const struct yang_list_keys *parent_list_keys,
1235 struct yang_translator *translator,
1236 uint32_t flags, nb_oper_data_cb cb, void *arg)
1237 {
1238 struct lys_node_list *slist = (struct lys_node_list *)nb_node->snode;
1239 const void *list_entry = NULL;
1240 uint32_t position = 1;
1241
1242 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1243 return NB_OK;
1244
1245 /* Iterate over all list entries. */
1246 do {
1247 struct yang_list_keys list_keys;
1248 char xpath[XPATH_MAXLEN * 2];
1249 int ret;
1250
1251 /* Obtain list entry. */
1252 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1253 list_entry);
1254 if (!list_entry)
1255 /* End of the list. */
1256 break;
1257
1258 if (!CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST)) {
1259 /* Obtain the list entry keys. */
1260 if (nb_callback_get_keys(nb_node, list_entry,
1261 &list_keys)
1262 != NB_OK) {
1263 flog_warn(EC_LIB_NB_CB_STATE,
1264 "%s: failed to get list keys",
1265 __func__);
1266 return NB_ERR;
1267 }
1268
1269 /* Build XPath of the list entry. */
1270 strlcpy(xpath, xpath_list, sizeof(xpath));
1271 for (unsigned int i = 0; i < list_keys.num; i++) {
1272 snprintf(xpath + strlen(xpath),
1273 sizeof(xpath) - strlen(xpath),
1274 "[%s='%s']", slist->keys[i]->name,
1275 list_keys.key[i]);
1276 }
1277 } else {
1278 /*
1279 * Keyless list - build XPath using a positional index.
1280 */
1281 snprintf(xpath, sizeof(xpath), "%s[%u]", xpath_list,
1282 position);
1283 position++;
1284 }
1285
1286 /* Iterate over the child nodes. */
1287 ret = nb_oper_data_iter_children(
1288 nb_node->snode, xpath, list_entry, &list_keys,
1289 translator, false, flags, cb, arg);
1290 if (ret != NB_OK)
1291 return ret;
1292 } while (list_entry);
1293
1294 return NB_OK;
1295 }
1296
1297 static int nb_oper_data_iter_node(const struct lys_node *snode,
1298 const char *xpath_parent,
1299 const void *list_entry,
1300 const struct yang_list_keys *list_keys,
1301 struct yang_translator *translator,
1302 bool first, uint32_t flags,
1303 nb_oper_data_cb cb, void *arg)
1304 {
1305 struct nb_node *nb_node;
1306 char xpath[XPATH_MAXLEN];
1307 int ret = NB_OK;
1308
1309 if (!first && CHECK_FLAG(flags, NB_OPER_DATA_ITER_NORECURSE)
1310 && CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST))
1311 return NB_OK;
1312
1313 /* Update XPath. */
1314 strlcpy(xpath, xpath_parent, sizeof(xpath));
1315 if (!first && snode->nodetype != LYS_USES)
1316 snprintf(xpath + strlen(xpath), sizeof(xpath) - strlen(xpath),
1317 "/%s", snode->name);
1318
1319 nb_node = snode->priv;
1320 switch (snode->nodetype) {
1321 case LYS_CONTAINER:
1322 ret = nb_oper_data_iter_container(nb_node, xpath, list_entry,
1323 list_keys, translator, flags,
1324 cb, arg);
1325 break;
1326 case LYS_LEAF:
1327 ret = nb_oper_data_iter_leaf(nb_node, xpath, list_entry,
1328 list_keys, translator, flags, cb,
1329 arg);
1330 break;
1331 case LYS_LEAFLIST:
1332 ret = nb_oper_data_iter_leaflist(nb_node, xpath, list_entry,
1333 list_keys, translator, flags,
1334 cb, arg);
1335 break;
1336 case LYS_LIST:
1337 ret = nb_oper_data_iter_list(nb_node, xpath, list_entry,
1338 list_keys, translator, flags, cb,
1339 arg);
1340 break;
1341 case LYS_USES:
1342 ret = nb_oper_data_iter_children(snode, xpath, list_entry,
1343 list_keys, translator, false,
1344 flags, cb, arg);
1345 break;
1346 default:
1347 break;
1348 }
1349
1350 return ret;
1351 }
1352
1353 int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
1354 uint32_t flags, nb_oper_data_cb cb, void *arg)
1355 {
1356 struct nb_node *nb_node;
1357 const void *list_entry = NULL;
1358 struct yang_list_keys list_keys;
1359 struct list *list_dnodes;
1360 struct lyd_node *dnode, *dn;
1361 struct listnode *ln;
1362 int ret;
1363
1364 nb_node = nb_node_find(xpath);
1365 if (!nb_node) {
1366 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1367 "%s: unknown data path: %s", __func__, xpath);
1368 return NB_ERR;
1369 }
1370
1371 /* For now this function works only with containers and lists. */
1372 if (!CHECK_FLAG(nb_node->snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
1373 flog_warn(
1374 EC_LIB_NB_OPERATIONAL_DATA,
1375 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1376 __func__, xpath);
1377 return NB_ERR;
1378 }
1379
1380 /*
1381 * Create a data tree from the XPath so that we can parse the keys of
1382 * all YANG lists (if any).
1383 */
1384 ly_errno = 0;
1385 dnode = lyd_new_path(NULL, ly_native_ctx, xpath, NULL, 0,
1386 LYD_PATH_OPT_UPDATE);
1387 if (!dnode && ly_errno) {
1388 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
1389 __func__);
1390 return NB_ERR;
1391 }
1392 /*
1393 * We can remove the following two lines once we depend on
1394 * libyang-v0.16-r2, which has the LYD_PATH_OPT_NOPARENTRET flag for
1395 * lyd_new_path().
1396 */
1397 dnode = yang_dnode_get(dnode, xpath);
1398 assert(dnode);
1399
1400 /*
1401 * Create a linked list to sort the data nodes starting from the root.
1402 */
1403 list_dnodes = list_new();
1404 for (dn = dnode; dn; dn = dn->parent) {
1405 if (dn->schema->nodetype != LYS_LIST || !dn->child)
1406 continue;
1407 listnode_add_head(list_dnodes, dn);
1408 }
1409 /*
1410 * Use the northbound callbacks to find list entry pointer corresponding
1411 * to the given XPath.
1412 */
1413 for (ALL_LIST_ELEMENTS_RO(list_dnodes, ln, dn)) {
1414 struct lyd_node *child;
1415 struct nb_node *nn;
1416 unsigned int n = 0;
1417
1418 /* Obtain the list entry keys. */
1419 memset(&list_keys, 0, sizeof(list_keys));
1420 LY_TREE_FOR (dn->child, child) {
1421 if (!lys_is_key((struct lys_node_leaf *)child->schema,
1422 NULL))
1423 continue;
1424 strlcpy(list_keys.key[n],
1425 yang_dnode_get_string(child, NULL),
1426 sizeof(list_keys.key[n]));
1427 n++;
1428 }
1429 list_keys.num = n;
1430 if (list_keys.num
1431 != ((struct lys_node_list *)dn->schema)->keys_size) {
1432 list_delete(&list_dnodes);
1433 yang_dnode_free(dnode);
1434 return NB_ERR_NOT_FOUND;
1435 }
1436
1437 /* Find the list entry pointer. */
1438 nn = dn->schema->priv;
1439 list_entry =
1440 nb_callback_lookup_entry(nn, list_entry, &list_keys);
1441 if (list_entry == NULL) {
1442 list_delete(&list_dnodes);
1443 yang_dnode_free(dnode);
1444 return NB_ERR_NOT_FOUND;
1445 }
1446 }
1447
1448 /* If a list entry was given, iterate over that list entry only. */
1449 if (dnode->schema->nodetype == LYS_LIST && dnode->child)
1450 ret = nb_oper_data_iter_children(
1451 nb_node->snode, xpath, list_entry, &list_keys,
1452 translator, true, flags, cb, arg);
1453 else
1454 ret = nb_oper_data_iter_node(nb_node->snode, xpath, list_entry,
1455 &list_keys, translator, true,
1456 flags, cb, arg);
1457
1458 list_delete(&list_dnodes);
1459 yang_dnode_free(dnode);
1460
1461 return ret;
1462 }
1463
1464 bool nb_operation_is_valid(enum nb_operation operation,
1465 const struct lys_node *snode)
1466 {
1467 struct nb_node *nb_node = snode->priv;
1468 struct lys_node_container *scontainer;
1469 struct lys_node_leaf *sleaf;
1470
1471 switch (operation) {
1472 case NB_OP_CREATE:
1473 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1474 return false;
1475
1476 switch (snode->nodetype) {
1477 case LYS_LEAF:
1478 sleaf = (struct lys_node_leaf *)snode;
1479 if (sleaf->type.base != LY_TYPE_EMPTY)
1480 return false;
1481 break;
1482 case LYS_CONTAINER:
1483 scontainer = (struct lys_node_container *)snode;
1484 if (!scontainer->presence)
1485 return false;
1486 break;
1487 case LYS_LIST:
1488 case LYS_LEAFLIST:
1489 break;
1490 default:
1491 return false;
1492 }
1493 return true;
1494 case NB_OP_MODIFY:
1495 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1496 return false;
1497
1498 switch (snode->nodetype) {
1499 case LYS_LEAF:
1500 sleaf = (struct lys_node_leaf *)snode;
1501 if (sleaf->type.base == LY_TYPE_EMPTY)
1502 return false;
1503
1504 /* List keys can't be modified. */
1505 if (lys_is_key(sleaf, NULL))
1506 return false;
1507 break;
1508 default:
1509 return false;
1510 }
1511 return true;
1512 case NB_OP_DESTROY:
1513 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1514 return false;
1515
1516 switch (snode->nodetype) {
1517 case LYS_LEAF:
1518 sleaf = (struct lys_node_leaf *)snode;
1519
1520 /* List keys can't be deleted. */
1521 if (lys_is_key(sleaf, NULL))
1522 return false;
1523
1524 /*
1525 * Only optional leafs can be deleted, or leafs whose
1526 * parent is a case statement.
1527 */
1528 if (snode->parent->nodetype == LYS_CASE)
1529 return true;
1530 if (sleaf->when)
1531 return true;
1532 if (CHECK_FLAG(sleaf->flags, LYS_MAND_TRUE)
1533 || sleaf->dflt)
1534 return false;
1535 break;
1536 case LYS_CONTAINER:
1537 scontainer = (struct lys_node_container *)snode;
1538 if (!scontainer->presence)
1539 return false;
1540 break;
1541 case LYS_LIST:
1542 case LYS_LEAFLIST:
1543 break;
1544 default:
1545 return false;
1546 }
1547 return true;
1548 case NB_OP_MOVE:
1549 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1550 return false;
1551
1552 switch (snode->nodetype) {
1553 case LYS_LIST:
1554 case LYS_LEAFLIST:
1555 if (!CHECK_FLAG(snode->flags, LYS_USERORDERED))
1556 return false;
1557 break;
1558 default:
1559 return false;
1560 }
1561 return true;
1562 case NB_OP_APPLY_FINISH:
1563 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1564 return false;
1565 return true;
1566 case NB_OP_GET_ELEM:
1567 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_R))
1568 return false;
1569
1570 switch (snode->nodetype) {
1571 case LYS_LEAF:
1572 case LYS_LEAFLIST:
1573 break;
1574 case LYS_CONTAINER:
1575 scontainer = (struct lys_node_container *)snode;
1576 if (!scontainer->presence)
1577 return false;
1578 break;
1579 default:
1580 return false;
1581 }
1582 return true;
1583 case NB_OP_GET_NEXT:
1584 switch (snode->nodetype) {
1585 case LYS_LIST:
1586 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1587 return false;
1588 break;
1589 case LYS_LEAFLIST:
1590 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1591 return false;
1592 break;
1593 default:
1594 return false;
1595 }
1596 return true;
1597 case NB_OP_GET_KEYS:
1598 case NB_OP_LOOKUP_ENTRY:
1599 switch (snode->nodetype) {
1600 case LYS_LIST:
1601 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1602 return false;
1603 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST))
1604 return false;
1605 break;
1606 default:
1607 return false;
1608 }
1609 return true;
1610 case NB_OP_RPC:
1611 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W | LYS_CONFIG_R))
1612 return false;
1613
1614 switch (snode->nodetype) {
1615 case LYS_RPC:
1616 case LYS_ACTION:
1617 break;
1618 default:
1619 return false;
1620 }
1621 return true;
1622 default:
1623 return false;
1624 }
1625 }
1626
1627 DEFINE_HOOK(nb_notification_send, (const char *xpath, struct list *arguments),
1628 (xpath, arguments));
1629
1630 int nb_notification_send(const char *xpath, struct list *arguments)
1631 {
1632 int ret;
1633
1634 DEBUGD(&nb_dbg_notif, "northbound notification: %s", xpath);
1635
1636 ret = hook_call(nb_notification_send, xpath, arguments);
1637 if (arguments)
1638 list_delete(&arguments);
1639
1640 return ret;
1641 }
1642
1643 /* Running configuration user pointers management. */
1644 struct nb_config_entry {
1645 char xpath[XPATH_MAXLEN];
1646 void *entry;
1647 };
1648
1649 static bool running_config_entry_cmp(const void *value1, const void *value2)
1650 {
1651 const struct nb_config_entry *c1 = value1;
1652 const struct nb_config_entry *c2 = value2;
1653
1654 return strmatch(c1->xpath, c2->xpath);
1655 }
1656
1657 static unsigned int running_config_entry_key_make(const void *value)
1658 {
1659 return string_hash_make(value);
1660 }
1661
1662 static void *running_config_entry_alloc(void *p)
1663 {
1664 struct nb_config_entry *new, *key = p;
1665
1666 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY, sizeof(*new));
1667 strlcpy(new->xpath, key->xpath, sizeof(new->xpath));
1668
1669 return new;
1670 }
1671
1672 static void running_config_entry_free(void *arg)
1673 {
1674 XFREE(MTYPE_NB_CONFIG_ENTRY, arg);
1675 }
1676
1677 void nb_running_set_entry(const struct lyd_node *dnode, void *entry)
1678 {
1679 struct nb_config_entry *config, s;
1680
1681 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1682 config = hash_get(running_config_entries, &s,
1683 running_config_entry_alloc);
1684 config->entry = entry;
1685 }
1686
1687 static void *nb_running_unset_entry_helper(const struct lyd_node *dnode)
1688 {
1689 struct nb_config_entry *config, s;
1690 struct lyd_node *child;
1691 void *entry = NULL;
1692
1693 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1694 config = hash_release(running_config_entries, &s);
1695 if (config) {
1696 entry = config->entry;
1697 running_config_entry_free(config);
1698 }
1699
1700 /* Unset user pointers from the child nodes. */
1701 if (CHECK_FLAG(dnode->schema->nodetype, LYS_LIST | LYS_CONTAINER)) {
1702 LY_TREE_FOR (dnode->child, child) {
1703 (void)nb_running_unset_entry_helper(child);
1704 }
1705 }
1706
1707 return entry;
1708 }
1709
1710 void *nb_running_unset_entry(const struct lyd_node *dnode)
1711 {
1712 void *entry;
1713
1714 entry = nb_running_unset_entry_helper(dnode);
1715 assert(entry);
1716
1717 return entry;
1718 }
1719
1720 void *nb_running_get_entry(const struct lyd_node *dnode, const char *xpath,
1721 bool abort_if_not_found)
1722 {
1723 const struct lyd_node *orig_dnode = dnode;
1724 char xpath_buf[XPATH_MAXLEN];
1725
1726 assert(dnode || xpath);
1727
1728 if (!dnode)
1729 dnode = yang_dnode_get(running_config->dnode, xpath);
1730
1731 while (dnode) {
1732 struct nb_config_entry *config, s;
1733
1734 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1735 config = hash_lookup(running_config_entries, &s);
1736 if (config)
1737 return config->entry;
1738
1739 dnode = dnode->parent;
1740 }
1741
1742 if (!abort_if_not_found)
1743 return NULL;
1744
1745 yang_dnode_get_path(orig_dnode, xpath_buf, sizeof(xpath_buf));
1746 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
1747 "%s: failed to find entry [xpath %s]", __func__, xpath_buf);
1748 zlog_backtrace(LOG_ERR);
1749 abort();
1750 }
1751
1752 /* Logging functions. */
1753 const char *nb_event_name(enum nb_event event)
1754 {
1755 switch (event) {
1756 case NB_EV_VALIDATE:
1757 return "validate";
1758 case NB_EV_PREPARE:
1759 return "prepare";
1760 case NB_EV_ABORT:
1761 return "abort";
1762 case NB_EV_APPLY:
1763 return "apply";
1764 default:
1765 return "unknown";
1766 }
1767 }
1768
1769 const char *nb_operation_name(enum nb_operation operation)
1770 {
1771 switch (operation) {
1772 case NB_OP_CREATE:
1773 return "create";
1774 case NB_OP_MODIFY:
1775 return "modify";
1776 case NB_OP_DESTROY:
1777 return "destroy";
1778 case NB_OP_MOVE:
1779 return "move";
1780 case NB_OP_APPLY_FINISH:
1781 return "apply_finish";
1782 case NB_OP_GET_ELEM:
1783 return "get_elem";
1784 case NB_OP_GET_NEXT:
1785 return "get_next";
1786 case NB_OP_GET_KEYS:
1787 return "get_keys";
1788 case NB_OP_LOOKUP_ENTRY:
1789 return "lookup_entry";
1790 case NB_OP_RPC:
1791 return "rpc";
1792 default:
1793 return "unknown";
1794 }
1795 }
1796
1797 const char *nb_err_name(enum nb_error error)
1798 {
1799 switch (error) {
1800 case NB_OK:
1801 return "ok";
1802 case NB_ERR:
1803 return "generic error";
1804 case NB_ERR_NO_CHANGES:
1805 return "no changes";
1806 case NB_ERR_NOT_FOUND:
1807 return "element not found";
1808 case NB_ERR_LOCKED:
1809 return "resource is locked";
1810 case NB_ERR_VALIDATION:
1811 return "validation error";
1812 case NB_ERR_RESOURCE:
1813 return "failed to allocate resource";
1814 case NB_ERR_INCONSISTENCY:
1815 return "internal inconsistency";
1816 default:
1817 return "unknown";
1818 }
1819 }
1820
1821 const char *nb_client_name(enum nb_client client)
1822 {
1823 switch (client) {
1824 case NB_CLIENT_CLI:
1825 return "CLI";
1826 case NB_CLIENT_CONFD:
1827 return "ConfD";
1828 case NB_CLIENT_SYSREPO:
1829 return "Sysrepo";
1830 case NB_CLIENT_GRPC:
1831 return "gRPC";
1832 default:
1833 return "unknown";
1834 }
1835 }
1836
1837 static void nb_load_callbacks(const struct frr_yang_module_info *module)
1838 {
1839 for (size_t i = 0; module->nodes[i].xpath; i++) {
1840 struct nb_node *nb_node;
1841 uint32_t priority;
1842
1843 nb_node = nb_node_find(module->nodes[i].xpath);
1844 if (!nb_node) {
1845 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1846 "%s: unknown data path: %s", __func__,
1847 module->nodes[i].xpath);
1848 continue;
1849 }
1850
1851 nb_node->cbs = module->nodes[i].cbs;
1852 priority = module->nodes[i].priority;
1853 if (priority != 0)
1854 nb_node->priority = priority;
1855 }
1856 }
1857
1858 void nb_init(struct thread_master *tm,
1859 const struct frr_yang_module_info *modules[], size_t nmodules)
1860 {
1861 unsigned int errors = 0;
1862
1863 /* Load YANG modules. */
1864 for (size_t i = 0; i < nmodules; i++)
1865 yang_module_load(modules[i]->name);
1866
1867 /* Create a nb_node for all YANG schema nodes. */
1868 nb_nodes_create();
1869
1870 /* Load northbound callbacks. */
1871 for (size_t i = 0; i < nmodules; i++)
1872 nb_load_callbacks(modules[i]);
1873
1874 /* Validate northbound callbacks. */
1875 yang_snodes_iterate_all(nb_node_validate, 0, &errors);
1876 if (errors > 0) {
1877 flog_err(
1878 EC_LIB_NB_CBS_VALIDATION,
1879 "%s: failed to validate northbound callbacks: %u error(s)",
1880 __func__, errors);
1881 exit(1);
1882 }
1883
1884 /* Create an empty running configuration. */
1885 running_config = nb_config_new(NULL);
1886 running_config_entries = hash_create(running_config_entry_key_make,
1887 running_config_entry_cmp,
1888 "Running Configuration Entries");
1889 pthread_mutex_init(&running_config_mgmt_lock.mtx, NULL);
1890
1891 /* Initialize the northbound CLI. */
1892 nb_cli_init(tm);
1893 }
1894
1895 void nb_terminate(void)
1896 {
1897 /* Terminate the northbound CLI. */
1898 nb_cli_terminate();
1899
1900 /* Delete all nb_node's from all YANG modules. */
1901 nb_nodes_delete();
1902
1903 /* Delete the running configuration. */
1904 hash_clean(running_config_entries, running_config_entry_free);
1905 hash_free(running_config_entries);
1906 nb_config_free(running_config);
1907 pthread_mutex_destroy(&running_config_mgmt_lock.mtx);
1908 }