]> git.proxmox.com Git - mirror_frr.git/blame - lib/northbound.c
zebra: Fix selection of label chunks in label manager
[mirror_frr.git] / lib / northbound.c
CommitLineData
1c2facd1
RW
1/*
2 * Copyright (C) 2018 NetDEF, Inc.
3 * Renato Westphal
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20#include <zebra.h>
21
22#include "libfrr.h"
23#include "log.h"
24#include "lib_errors.h"
ccd43ada 25#include "hash.h"
1c2facd1 26#include "command.h"
9eb2c0a1 27#include "debug.h"
1c2facd1 28#include "db.h"
00dffa8c 29#include "frr_pthread.h"
1c2facd1
RW
30#include "northbound.h"
31#include "northbound_cli.h"
32#include "northbound_db.h"
f7c20aa1 33#include "frrstr.h"
1c2facd1
RW
34
35DEFINE_MTYPE_STATIC(LIB, NB_NODE, "Northbound Node")
36DEFINE_MTYPE_STATIC(LIB, NB_CONFIG, "Northbound Configuration")
ccd43ada 37DEFINE_MTYPE_STATIC(LIB, NB_CONFIG_ENTRY, "Northbound Configuration Entry")
1c2facd1
RW
38
39/* Running configuration - shouldn't be modified directly. */
40struct nb_config *running_config;
41
ccd43ada
RW
42/* Hash table of user pointers associated with configuration entries. */
43static struct hash *running_config_entries;
44
364ad673
RW
45/* Management lock for the running configuration. */
46static struct {
47 /* Mutex protecting this structure. */
48 pthread_mutex_t mtx;
49
50 /* Actual lock. */
51 bool locked;
52
53 /* Northbound client who owns this lock. */
54 enum nb_client owner_client;
55
56 /* Northbound user who owns this lock. */
57 const void *owner_user;
58} running_config_mgmt_lock;
59
1c2facd1
RW
60/*
61 * Global lock used to prevent multiple configuration transactions from
62 * happening concurrently.
63 */
64static bool transaction_in_progress;
65
13d6b9c1
RW
66static int nb_callback_pre_validate(struct nb_context *context,
67 const struct nb_node *nb_node,
df5eda3d
RW
68 const struct lyd_node *dnode, char *errmsg,
69 size_t errmsg_len);
13d6b9c1
RW
70static int nb_callback_configuration(struct nb_context *context,
71 const enum nb_event event,
df5eda3d
RW
72 struct nb_config_change *change,
73 char *errmsg, size_t errmsg_len);
74static struct nb_transaction *
75nb_transaction_new(struct nb_context *context, struct nb_config *config,
76 struct nb_config_cbs *changes, const char *comment,
77 char *errmsg, size_t errmsg_len);
1c2facd1
RW
78static void nb_transaction_free(struct nb_transaction *transaction);
79static int nb_transaction_process(enum nb_event event,
df5eda3d
RW
80 struct nb_transaction *transaction,
81 char *errmsg, size_t errmsg_len);
82static void nb_transaction_apply_finish(struct nb_transaction *transaction,
83 char *errmsg, size_t errmsg_len);
1a4bc045
RW
84static int nb_oper_data_iter_node(const struct lys_node *snode,
85 const char *xpath, const void *list_entry,
86 const struct yang_list_keys *list_keys,
87 struct yang_translator *translator,
88 bool first, uint32_t flags,
89 nb_oper_data_cb cb, void *arg);
1c2facd1 90
544ca69a
RW
91static int nb_node_check_config_only(const struct lys_node *snode, void *arg)
92{
93 bool *config_only = arg;
94
95 if (CHECK_FLAG(snode->flags, LYS_CONFIG_R)) {
96 *config_only = false;
97 return YANG_ITER_STOP;
98 }
99
100 return YANG_ITER_CONTINUE;
101}
102
e0ccfad2 103static int nb_node_new_cb(const struct lys_node *snode, void *arg)
1c2facd1
RW
104{
105 struct nb_node *nb_node;
106 struct lys_node *sparent, *sparent_list;
107
108 nb_node = XCALLOC(MTYPE_NB_NODE, sizeof(*nb_node));
109 yang_snode_get_path(snode, YANG_PATH_DATA, nb_node->xpath,
110 sizeof(nb_node->xpath));
111 nb_node->priority = NB_DFLT_PRIORITY;
112 sparent = yang_snode_real_parent(snode);
113 if (sparent)
114 nb_node->parent = sparent->priv;
115 sparent_list = yang_snode_parent_list(snode);
116 if (sparent_list)
117 nb_node->parent_list = sparent_list->priv;
118
544ca69a
RW
119 /* Set flags. */
120 if (CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
121 bool config_only = true;
122
123 yang_snodes_iterate_subtree(snode, nb_node_check_config_only,
124 YANG_ITER_ALLOW_AUGMENTATIONS,
125 &config_only);
126 if (config_only)
127 SET_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY);
128 }
99fb518f
RW
129 if (CHECK_FLAG(snode->nodetype, LYS_LIST)) {
130 struct lys_node_list *slist;
131
132 slist = (struct lys_node_list *)snode;
133 if (slist->keys_size == 0)
134 SET_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST);
135 }
544ca69a 136
1c2facd1
RW
137 /*
138 * Link the northbound node and the libyang schema node with one
139 * another.
140 */
141 nb_node->snode = snode;
142 lys_set_private(snode, nb_node);
e0ccfad2
RW
143
144 return YANG_ITER_CONTINUE;
1c2facd1
RW
145}
146
e0ccfad2 147static int nb_node_del_cb(const struct lys_node *snode, void *arg)
1c2facd1
RW
148{
149 struct nb_node *nb_node;
150
151 nb_node = snode->priv;
152 lys_set_private(snode, NULL);
153 XFREE(MTYPE_NB_NODE, nb_node);
e0ccfad2
RW
154
155 return YANG_ITER_CONTINUE;
1c2facd1
RW
156}
157
544ca69a
RW
158void nb_nodes_create(void)
159{
160 yang_snodes_iterate_all(nb_node_new_cb, 0, NULL);
161}
162
163void nb_nodes_delete(void)
164{
165 yang_snodes_iterate_all(nb_node_del_cb, 0, NULL);
166}
167
1c2facd1
RW
168struct nb_node *nb_node_find(const char *xpath)
169{
170 const struct lys_node *snode;
171
172 /*
173 * Use libyang to find the schema node associated to the xpath and get
174 * the northbound node from there (snode private pointer).
175 */
176 snode = ly_ctx_get_node(ly_native_ctx, NULL, xpath, 0);
177 if (!snode)
178 return NULL;
179
180 return snode->priv;
181}
182
183static int nb_node_validate_cb(const struct nb_node *nb_node,
184 enum nb_operation operation,
185 int callback_implemented, bool optional)
186{
187 bool valid;
188
189 valid = nb_operation_is_valid(operation, nb_node->snode);
190
6f4e5edd
RW
191 /*
192 * Add an exception for operational data callbacks. A rw list usually
193 * doesn't need any associated operational data callbacks. But if this
194 * rw list is augmented by another module which adds state nodes under
195 * it, then this list will need to have the 'get_next()', 'get_keys()'
196 * and 'lookup_entry()' callbacks. As such, never log a warning when
197 * these callbacks are implemented when they are not needed, since this
198 * depends on context (e.g. some daemons might augment "frr-interface"
199 * while others don't).
200 */
201 if (!valid && callback_implemented && operation != NB_OP_GET_NEXT
202 && operation != NB_OP_GET_KEYS && operation != NB_OP_LOOKUP_ENTRY)
1c2facd1
RW
203 flog_warn(EC_LIB_NB_CB_UNNEEDED,
204 "unneeded '%s' callback for '%s'",
205 nb_operation_name(operation), nb_node->xpath);
206
207 if (!optional && valid && !callback_implemented) {
208 flog_err(EC_LIB_NB_CB_MISSING, "missing '%s' callback for '%s'",
209 nb_operation_name(operation), nb_node->xpath);
210 return 1;
211 }
212
213 return 0;
214}
215
216/*
217 * Check if the required callbacks were implemented for the given northbound
218 * node.
219 */
220static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
221
222{
223 unsigned int error = 0;
224
225 error += nb_node_validate_cb(nb_node, NB_OP_CREATE,
226 !!nb_node->cbs.create, false);
227 error += nb_node_validate_cb(nb_node, NB_OP_MODIFY,
228 !!nb_node->cbs.modify, false);
95ce849b 229 error += nb_node_validate_cb(nb_node, NB_OP_DESTROY,
d01b92fd 230 !!nb_node->cbs.destroy, false);
1c2facd1
RW
231 error += nb_node_validate_cb(nb_node, NB_OP_MOVE, !!nb_node->cbs.move,
232 false);
34224f0c
RW
233 error += nb_node_validate_cb(nb_node, NB_OP_PRE_VALIDATE,
234 !!nb_node->cbs.pre_validate, true);
1c2facd1
RW
235 error += nb_node_validate_cb(nb_node, NB_OP_APPLY_FINISH,
236 !!nb_node->cbs.apply_finish, true);
237 error += nb_node_validate_cb(nb_node, NB_OP_GET_ELEM,
238 !!nb_node->cbs.get_elem, false);
239 error += nb_node_validate_cb(nb_node, NB_OP_GET_NEXT,
240 !!nb_node->cbs.get_next, false);
241 error += nb_node_validate_cb(nb_node, NB_OP_GET_KEYS,
242 !!nb_node->cbs.get_keys, false);
243 error += nb_node_validate_cb(nb_node, NB_OP_LOOKUP_ENTRY,
244 !!nb_node->cbs.lookup_entry, false);
245 error += nb_node_validate_cb(nb_node, NB_OP_RPC, !!nb_node->cbs.rpc,
246 false);
247
248 return error;
249}
250
251static unsigned int nb_node_validate_priority(const struct nb_node *nb_node)
252{
253 /* Top-level nodes can have any priority. */
254 if (!nb_node->parent)
255 return 0;
256
257 if (nb_node->priority < nb_node->parent->priority) {
258 flog_err(EC_LIB_NB_CB_INVALID_PRIO,
259 "node has higher priority than its parent [xpath %s]",
260 nb_node->xpath);
261 return 1;
262 }
263
264 return 0;
265}
266
e0ccfad2 267static int nb_node_validate(const struct lys_node *snode, void *arg)
1c2facd1
RW
268{
269 struct nb_node *nb_node = snode->priv;
e0ccfad2 270 unsigned int *errors = arg;
1c2facd1
RW
271
272 /* Validate callbacks and priority. */
273 *errors += nb_node_validate_cbs(nb_node);
274 *errors += nb_node_validate_priority(nb_node);
e0ccfad2
RW
275
276 return YANG_ITER_CONTINUE;
1c2facd1
RW
277}
278
279struct nb_config *nb_config_new(struct lyd_node *dnode)
280{
281 struct nb_config *config;
282
283 config = XCALLOC(MTYPE_NB_CONFIG, sizeof(*config));
284 if (dnode)
285 config->dnode = dnode;
286 else
5e02643a 287 config->dnode = yang_dnode_new(ly_native_ctx, true);
1c2facd1
RW
288 config->version = 0;
289
290 return config;
291}
292
293void nb_config_free(struct nb_config *config)
294{
295 if (config->dnode)
296 yang_dnode_free(config->dnode);
297 XFREE(MTYPE_NB_CONFIG, config);
298}
299
300struct nb_config *nb_config_dup(const struct nb_config *config)
301{
302 struct nb_config *dup;
303
304 dup = XCALLOC(MTYPE_NB_CONFIG, sizeof(*dup));
305 dup->dnode = yang_dnode_dup(config->dnode);
306 dup->version = config->version;
307
308 return dup;
309}
310
311int nb_config_merge(struct nb_config *config_dst, struct nb_config *config_src,
312 bool preserve_source)
313{
314 int ret;
315
316 ret = lyd_merge(config_dst->dnode, config_src->dnode, LYD_OPT_EXPLICIT);
317 if (ret != 0)
318 flog_warn(EC_LIB_LIBYANG, "%s: lyd_merge() failed", __func__);
319
320 if (!preserve_source)
321 nb_config_free(config_src);
322
323 return (ret == 0) ? NB_OK : NB_ERR;
324}
325
326void nb_config_replace(struct nb_config *config_dst,
327 struct nb_config *config_src, bool preserve_source)
328{
329 /* Update version. */
330 if (config_src->version != 0)
331 config_dst->version = config_src->version;
332
333 /* Update dnode. */
e5dc8a44
RW
334 if (config_dst->dnode)
335 yang_dnode_free(config_dst->dnode);
1c2facd1
RW
336 if (preserve_source) {
337 config_dst->dnode = yang_dnode_dup(config_src->dnode);
338 } else {
339 config_dst->dnode = config_src->dnode;
340 config_src->dnode = NULL;
341 nb_config_free(config_src);
342 }
343}
344
345/* Generate the nb_config_cbs tree. */
346static inline int nb_config_cb_compare(const struct nb_config_cb *a,
347 const struct nb_config_cb *b)
348{
349 /* Sort by priority first. */
350 if (a->nb_node->priority < b->nb_node->priority)
351 return -1;
352 if (a->nb_node->priority > b->nb_node->priority)
353 return 1;
354
355 /*
6b5d6e2d 356 * Preserve the order of the configuration changes as told by libyang.
1c2facd1 357 */
fe3f2c61
RW
358 if (a->seq < b->seq)
359 return -1;
360 if (a->seq > b->seq)
361 return 1;
362
363 /*
364 * All 'apply_finish' callbacks have their sequence number set to zero.
365 * In this case, compare them using their dnode pointers (the order
366 * doesn't matter for callbacks that have the same priority).
367 */
368 if (a->dnode < b->dnode)
369 return -1;
370 if (a->dnode > b->dnode)
371 return 1;
372
373 return 0;
1c2facd1
RW
374}
375RB_GENERATE(nb_config_cbs, nb_config_cb, entry, nb_config_cb_compare);
376
377static void nb_config_diff_add_change(struct nb_config_cbs *changes,
378 enum nb_operation operation,
6b5d6e2d 379 uint32_t *seq,
1c2facd1
RW
380 const struct lyd_node *dnode)
381{
382 struct nb_config_change *change;
383
384 change = XCALLOC(MTYPE_TMP, sizeof(*change));
385 change->cb.operation = operation;
6b5d6e2d
RW
386 change->cb.seq = *seq;
387 *seq = *seq + 1;
1c2facd1 388 change->cb.nb_node = dnode->schema->priv;
1c2facd1
RW
389 change->cb.dnode = dnode;
390
391 RB_INSERT(nb_config_cbs, changes, &change->cb);
392}
393
394static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
395{
396 while (!RB_EMPTY(nb_config_cbs, changes)) {
397 struct nb_config_change *change;
398
399 change = (struct nb_config_change *)RB_ROOT(nb_config_cbs,
400 changes);
401 RB_REMOVE(nb_config_cbs, changes, &change->cb);
402 XFREE(MTYPE_TMP, change);
403 }
404}
405
406/*
407 * Helper function used when calculating the delta between two different
408 * configurations. Given a new subtree, calculate all new YANG data nodes,
409 * excluding default leafs and leaf-lists. This is a recursive function.
410 */
6b5d6e2d 411static void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
cacbffaf 412 struct nb_config_cbs *changes)
1c2facd1 413{
cacbffaf 414 enum nb_operation operation;
1c2facd1
RW
415 struct lyd_node *child;
416
cacbffaf
RW
417 switch (dnode->schema->nodetype) {
418 case LYS_LEAF:
419 case LYS_LEAFLIST:
420 if (lyd_wd_default((struct lyd_node_leaf_list *)dnode))
421 break;
1c2facd1 422
cacbffaf
RW
423 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
424 operation = NB_OP_CREATE;
425 else if (nb_operation_is_valid(NB_OP_MODIFY, dnode->schema))
426 operation = NB_OP_MODIFY;
427 else
428 return;
1c2facd1 429
6b5d6e2d 430 nb_config_diff_add_change(changes, operation, seq, dnode);
cacbffaf
RW
431 break;
432 case LYS_CONTAINER:
433 case LYS_LIST:
434 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
6b5d6e2d
RW
435 nb_config_diff_add_change(changes, NB_OP_CREATE, seq,
436 dnode);
cacbffaf
RW
437
438 /* Process child nodes recursively. */
439 LY_TREE_FOR (dnode->child, child) {
6b5d6e2d 440 nb_config_diff_created(child, seq, changes);
1c2facd1 441 }
cacbffaf
RW
442 break;
443 default:
444 break;
1c2facd1
RW
445 }
446}
447
6b5d6e2d 448static void nb_config_diff_deleted(const struct lyd_node *dnode, uint32_t *seq,
1912caa2
RW
449 struct nb_config_cbs *changes)
450{
451 if (nb_operation_is_valid(NB_OP_DESTROY, dnode->schema))
6b5d6e2d 452 nb_config_diff_add_change(changes, NB_OP_DESTROY, seq, dnode);
1912caa2
RW
453 else if (CHECK_FLAG(dnode->schema->nodetype, LYS_CONTAINER)) {
454 struct lyd_node *child;
455
456 /*
457 * Non-presence containers need special handling since they
458 * don't have "destroy" callbacks. In this case, what we need to
459 * do is to call the "destroy" callbacks of their child nodes
460 * when applicable (i.e. optional nodes).
461 */
462 LY_TREE_FOR (dnode->child, child) {
6b5d6e2d 463 nb_config_diff_deleted(child, seq, changes);
1912caa2
RW
464 }
465 }
466}
467
1c2facd1
RW
468/* Calculate the delta between two different configurations. */
469static void nb_config_diff(const struct nb_config *config1,
470 const struct nb_config *config2,
471 struct nb_config_cbs *changes)
472{
473 struct lyd_difflist *diff;
6b5d6e2d 474 uint32_t seq = 0;
1c2facd1
RW
475
476 diff = lyd_diff(config1->dnode, config2->dnode,
477 LYD_DIFFOPT_WITHDEFAULTS);
478 assert(diff);
479
480 for (int i = 0; diff->type[i] != LYD_DIFF_END; i++) {
481 LYD_DIFFTYPE type;
482 struct lyd_node *dnode;
1c2facd1
RW
483
484 type = diff->type[i];
485
486 switch (type) {
487 case LYD_DIFF_CREATED:
488 dnode = diff->second[i];
6b5d6e2d 489 nb_config_diff_created(dnode, &seq, changes);
1c2facd1
RW
490 break;
491 case LYD_DIFF_DELETED:
492 dnode = diff->first[i];
6b5d6e2d 493 nb_config_diff_deleted(dnode, &seq, changes);
1c2facd1
RW
494 break;
495 case LYD_DIFF_CHANGED:
496 dnode = diff->second[i];
6b5d6e2d
RW
497 nb_config_diff_add_change(changes, NB_OP_MODIFY, &seq,
498 dnode);
1c2facd1
RW
499 break;
500 case LYD_DIFF_MOVEDAFTER1:
501 case LYD_DIFF_MOVEDAFTER2:
502 default:
503 continue;
504 }
1c2facd1
RW
505 }
506
507 lyd_free_diff(diff);
508}
509
510int nb_candidate_edit(struct nb_config *candidate,
511 const struct nb_node *nb_node,
512 enum nb_operation operation, const char *xpath,
513 const struct yang_data *previous,
514 const struct yang_data *data)
515{
516 struct lyd_node *dnode;
517 char xpath_edit[XPATH_MAXLEN];
518
1c2facd1
RW
519 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
520 if (nb_node->snode->nodetype == LYS_LEAFLIST)
521 snprintf(xpath_edit, sizeof(xpath_edit), "%s[.='%s']", xpath,
522 data->value);
523 else
524 strlcpy(xpath_edit, xpath, sizeof(xpath_edit));
525
526 switch (operation) {
527 case NB_OP_CREATE:
528 case NB_OP_MODIFY:
529 ly_errno = 0;
530 dnode = lyd_new_path(candidate->dnode, ly_native_ctx,
531 xpath_edit, (void *)data->value, 0,
532 LYD_PATH_OPT_UPDATE);
533 if (!dnode && ly_errno) {
534 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
535 __func__);
536 return NB_ERR;
537 }
1c2facd1 538 break;
95ce849b 539 case NB_OP_DESTROY:
1c2facd1
RW
540 dnode = yang_dnode_get(candidate->dnode, xpath_edit);
541 if (!dnode)
542 /*
543 * Return a special error code so the caller can choose
544 * whether to ignore it or not.
545 */
546 return NB_ERR_NOT_FOUND;
547 lyd_free(dnode);
548 break;
549 case NB_OP_MOVE:
550 /* TODO: update configuration. */
551 break;
552 default:
553 flog_warn(EC_LIB_DEVELOPMENT,
554 "%s: unknown operation (%u) [xpath %s]", __func__,
555 operation, xpath_edit);
556 return NB_ERR;
557 }
558
559 return NB_OK;
560}
561
562bool nb_candidate_needs_update(const struct nb_config *candidate)
563{
8685be73
RW
564 if (candidate->version < running_config->version)
565 return true;
1c2facd1 566
8685be73 567 return false;
1c2facd1
RW
568}
569
570int nb_candidate_update(struct nb_config *candidate)
571{
572 struct nb_config *updated_config;
573
8685be73 574 updated_config = nb_config_dup(running_config);
1c2facd1
RW
575 if (nb_config_merge(updated_config, candidate, true) != NB_OK)
576 return NB_ERR;
577
578 nb_config_replace(candidate, updated_config, false);
579
580 return NB_OK;
581}
582
1c2facd1
RW
583/*
584 * Perform YANG syntactic and semantic validation.
585 *
586 * WARNING: lyd_validate() can change the configuration as part of the
587 * validation process.
588 */
df5eda3d
RW
589static int nb_candidate_validate_yang(struct nb_config *candidate, char *errmsg,
590 size_t errmsg_len)
1c2facd1 591{
cd327983
RW
592 if (lyd_validate(&candidate->dnode,
593 LYD_OPT_STRICT | LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
1c2facd1 594 ly_native_ctx)
df5eda3d
RW
595 != 0) {
596 yang_print_errors(ly_native_ctx, errmsg, errmsg_len);
1c2facd1 597 return NB_ERR_VALIDATION;
df5eda3d 598 }
1c2facd1
RW
599
600 return NB_OK;
601}
602
603/* Perform code-level validation using the northbound callbacks. */
13d6b9c1
RW
604static int nb_candidate_validate_code(struct nb_context *context,
605 struct nb_config *candidate,
df5eda3d
RW
606 struct nb_config_cbs *changes,
607 char *errmsg, size_t errmsg_len)
1c2facd1
RW
608{
609 struct nb_config_cb *cb;
34224f0c
RW
610 struct lyd_node *root, *next, *child;
611 int ret;
612
613 /* First validate the candidate as a whole. */
614 LY_TREE_FOR (candidate->dnode, root) {
615 LY_TREE_DFS_BEGIN (root, next, child) {
616 struct nb_node *nb_node;
617
618 nb_node = child->schema->priv;
619 if (!nb_node->cbs.pre_validate)
620 goto next;
621
df5eda3d
RW
622 ret = nb_callback_pre_validate(context, nb_node, child,
623 errmsg, errmsg_len);
34224f0c
RW
624 if (ret != NB_OK)
625 return NB_ERR_VALIDATION;
626
627 next:
628 LY_TREE_DFS_END(root, next, child);
629 }
630 }
631
632 /* Now validate the configuration changes. */
1c2facd1
RW
633 RB_FOREACH (cb, nb_config_cbs, changes) {
634 struct nb_config_change *change = (struct nb_config_change *)cb;
1c2facd1 635
df5eda3d
RW
636 ret = nb_callback_configuration(context, NB_EV_VALIDATE, change,
637 errmsg, errmsg_len);
1c2facd1
RW
638 if (ret != NB_OK)
639 return NB_ERR_VALIDATION;
640 }
641
642 return NB_OK;
643}
644
13d6b9c1 645int nb_candidate_validate(struct nb_context *context,
df5eda3d
RW
646 struct nb_config *candidate, char *errmsg,
647 size_t errmsg_len)
1c2facd1
RW
648{
649 struct nb_config_cbs changes;
650 int ret;
651
df5eda3d
RW
652 if (nb_candidate_validate_yang(candidate, errmsg, sizeof(errmsg_len))
653 != NB_OK)
1c2facd1
RW
654 return NB_ERR_VALIDATION;
655
656 RB_INIT(nb_config_cbs, &changes);
8685be73 657 nb_config_diff(running_config, candidate, &changes);
df5eda3d
RW
658 ret = nb_candidate_validate_code(context, candidate, &changes, errmsg,
659 errmsg_len);
8685be73 660 nb_config_diff_del_changes(&changes);
1c2facd1
RW
661
662 return ret;
663}
664
13d6b9c1
RW
665int nb_candidate_commit_prepare(struct nb_context *context,
666 struct nb_config *candidate,
364ad673 667 const char *comment,
df5eda3d
RW
668 struct nb_transaction **transaction,
669 char *errmsg, size_t errmsg_len)
1c2facd1
RW
670{
671 struct nb_config_cbs changes;
672
df5eda3d
RW
673 if (nb_candidate_validate_yang(candidate, errmsg, errmsg_len)
674 != NB_OK) {
1c2facd1
RW
675 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
676 "%s: failed to validate candidate configuration",
677 __func__);
678 return NB_ERR_VALIDATION;
679 }
680
681 RB_INIT(nb_config_cbs, &changes);
8685be73
RW
682 nb_config_diff(running_config, candidate, &changes);
683 if (RB_EMPTY(nb_config_cbs, &changes))
684 return NB_ERR_NO_CHANGES;
1c2facd1 685
df5eda3d
RW
686 if (nb_candidate_validate_code(context, candidate, &changes, errmsg,
687 errmsg_len)
688 != NB_OK) {
8685be73
RW
689 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
690 "%s: failed to validate candidate configuration",
691 __func__);
692 nb_config_diff_del_changes(&changes);
693 return NB_ERR_VALIDATION;
694 }
1c2facd1 695
df5eda3d
RW
696 *transaction = nb_transaction_new(context, candidate, &changes, comment,
697 errmsg, errmsg_len);
8685be73
RW
698 if (*transaction == NULL) {
699 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
df5eda3d
RW
700 "%s: failed to create transaction: %s", __func__,
701 errmsg);
8685be73
RW
702 nb_config_diff_del_changes(&changes);
703 return NB_ERR_LOCKED;
1c2facd1
RW
704 }
705
df5eda3d
RW
706 return nb_transaction_process(NB_EV_PREPARE, *transaction, errmsg,
707 errmsg_len);
1c2facd1
RW
708}
709
710void nb_candidate_commit_abort(struct nb_transaction *transaction)
711{
df5eda3d
RW
712 char errmsg[BUFSIZ] = {0};
713
714 (void)nb_transaction_process(NB_EV_ABORT, transaction, errmsg,
715 sizeof(errmsg));
1c2facd1
RW
716 nb_transaction_free(transaction);
717}
718
719void nb_candidate_commit_apply(struct nb_transaction *transaction,
720 bool save_transaction, uint32_t *transaction_id)
721{
df5eda3d
RW
722 char errmsg[BUFSIZ] = {0};
723
724 (void)nb_transaction_process(NB_EV_APPLY, transaction, errmsg,
725 sizeof(errmsg));
726 nb_transaction_apply_finish(transaction, errmsg, sizeof(errmsg));
1c2facd1
RW
727
728 /* Replace running by candidate. */
729 transaction->config->version++;
8685be73 730 nb_config_replace(running_config, transaction->config, true);
1c2facd1
RW
731
732 /* Record transaction. */
733 if (save_transaction
734 && nb_db_transaction_save(transaction, transaction_id) != NB_OK)
735 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED,
736 "%s: failed to record transaction", __func__);
737
738 nb_transaction_free(transaction);
739}
740
13d6b9c1
RW
741int nb_candidate_commit(struct nb_context *context, struct nb_config *candidate,
742 bool save_transaction, const char *comment,
df5eda3d
RW
743 uint32_t *transaction_id, char *errmsg,
744 size_t errmsg_len)
1c2facd1
RW
745{
746 struct nb_transaction *transaction = NULL;
747 int ret;
748
13d6b9c1 749 ret = nb_candidate_commit_prepare(context, candidate, comment,
df5eda3d 750 &transaction, errmsg, errmsg_len);
1c2facd1
RW
751 /*
752 * Apply the changes if the preparation phase succeeded. Otherwise abort
753 * the transaction.
754 */
755 if (ret == NB_OK)
756 nb_candidate_commit_apply(transaction, save_transaction,
757 transaction_id);
758 else if (transaction != NULL)
759 nb_candidate_commit_abort(transaction);
760
761 return ret;
762}
763
364ad673
RW
764int nb_running_lock(enum nb_client client, const void *user)
765{
766 int ret = -1;
767
1be4decb 768 frr_with_mutex (&running_config_mgmt_lock.mtx) {
364ad673
RW
769 if (!running_config_mgmt_lock.locked) {
770 running_config_mgmt_lock.locked = true;
771 running_config_mgmt_lock.owner_client = client;
772 running_config_mgmt_lock.owner_user = user;
773 ret = 0;
774 }
775 }
364ad673
RW
776
777 return ret;
778}
779
780int nb_running_unlock(enum nb_client client, const void *user)
781{
782 int ret = -1;
783
1be4decb 784 frr_with_mutex (&running_config_mgmt_lock.mtx) {
364ad673
RW
785 if (running_config_mgmt_lock.locked
786 && running_config_mgmt_lock.owner_client == client
787 && running_config_mgmt_lock.owner_user == user) {
788 running_config_mgmt_lock.locked = false;
789 running_config_mgmt_lock.owner_client = NB_CLIENT_NONE;
790 running_config_mgmt_lock.owner_user = NULL;
791 ret = 0;
792 }
793 }
364ad673
RW
794
795 return ret;
796}
797
798int nb_running_lock_check(enum nb_client client, const void *user)
799{
800 int ret = -1;
801
1be4decb 802 frr_with_mutex (&running_config_mgmt_lock.mtx) {
364ad673
RW
803 if (!running_config_mgmt_lock.locked
804 || (running_config_mgmt_lock.owner_client == client
805 && running_config_mgmt_lock.owner_user == user))
806 ret = 0;
807 }
364ad673
RW
808
809 return ret;
810}
811
97cd8493
RW
812static void nb_log_config_callback(const enum nb_event event,
813 enum nb_operation operation,
814 const struct lyd_node *dnode)
1c2facd1 815{
97cd8493
RW
816 const char *value;
817 char xpath[XPATH_MAXLEN];
818
819 if (!DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL))
820 return;
821
822 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
823 if (yang_snode_is_typeless_data(dnode->schema))
824 value = "(none)";
825 else
826 value = yang_dnode_get_string(dnode, NULL);
827
1c2facd1
RW
828 zlog_debug(
829 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
830 nb_event_name(event), nb_operation_name(operation), xpath,
97cd8493
RW
831 value);
832}
833
13d6b9c1
RW
834static int nb_callback_create(struct nb_context *context,
835 const struct nb_node *nb_node,
97cd8493 836 enum nb_event event, const struct lyd_node *dnode,
df5eda3d
RW
837 union nb_resource *resource, char *errmsg,
838 size_t errmsg_len)
97cd8493 839{
60ee8be1 840 struct nb_cb_create_args args = {};
1abe6c53
RW
841 bool unexpected_error = false;
842 int ret;
60ee8be1 843
97cd8493
RW
844 nb_log_config_callback(event, NB_OP_CREATE, dnode);
845
13d6b9c1 846 args.context = context;
60ee8be1
RW
847 args.event = event;
848 args.dnode = dnode;
849 args.resource = resource;
df5eda3d
RW
850 args.errmsg = errmsg;
851 args.errmsg_len = errmsg_len;
1abe6c53
RW
852 ret = nb_node->cbs.create(&args);
853
854 /* Detect and log unexpected errors. */
855 switch (ret) {
856 case NB_OK:
857 case NB_ERR:
858 break;
859 case NB_ERR_VALIDATION:
860 if (event != NB_EV_VALIDATE)
861 unexpected_error = true;
862 break;
863 case NB_ERR_RESOURCE:
864 if (event != NB_EV_PREPARE)
865 unexpected_error = true;
866 break;
867 case NB_ERR_INCONSISTENCY:
868 if (event == NB_EV_VALIDATE)
869 unexpected_error = true;
870 break;
871 default:
872 unexpected_error = true;
873 break;
874 }
875 if (unexpected_error)
876 DEBUGD(&nb_dbg_cbs_config,
877 "northbound callback: unexpected return value: %s",
878 nb_err_name(ret));
879
880 return ret;
97cd8493
RW
881}
882
13d6b9c1
RW
883static int nb_callback_modify(struct nb_context *context,
884 const struct nb_node *nb_node,
97cd8493 885 enum nb_event event, const struct lyd_node *dnode,
df5eda3d
RW
886 union nb_resource *resource, char *errmsg,
887 size_t errmsg_len)
97cd8493 888{
60ee8be1 889 struct nb_cb_modify_args args = {};
1abe6c53
RW
890 bool unexpected_error = false;
891 int ret;
60ee8be1 892
97cd8493
RW
893 nb_log_config_callback(event, NB_OP_MODIFY, dnode);
894
13d6b9c1 895 args.context = context;
60ee8be1
RW
896 args.event = event;
897 args.dnode = dnode;
898 args.resource = resource;
df5eda3d
RW
899 args.errmsg = errmsg;
900 args.errmsg_len = errmsg_len;
1abe6c53
RW
901 ret = nb_node->cbs.modify(&args);
902
903 /* Detect and log unexpected errors. */
904 switch (ret) {
905 case NB_OK:
906 case NB_ERR:
907 break;
908 case NB_ERR_VALIDATION:
909 if (event != NB_EV_VALIDATE)
910 unexpected_error = true;
911 break;
912 case NB_ERR_RESOURCE:
913 if (event != NB_EV_PREPARE)
914 unexpected_error = true;
915 break;
916 case NB_ERR_INCONSISTENCY:
917 if (event == NB_EV_VALIDATE)
918 unexpected_error = true;
919 break;
920 default:
921 unexpected_error = true;
922 break;
923 }
924 if (unexpected_error)
925 DEBUGD(&nb_dbg_cbs_config,
926 "northbound callback: unexpected return value: %s",
927 nb_err_name(ret));
928
929 return ret;
97cd8493
RW
930}
931
13d6b9c1
RW
932static int nb_callback_destroy(struct nb_context *context,
933 const struct nb_node *nb_node,
97cd8493 934 enum nb_event event,
df5eda3d
RW
935 const struct lyd_node *dnode, char *errmsg,
936 size_t errmsg_len)
97cd8493 937{
60ee8be1 938 struct nb_cb_destroy_args args = {};
1abe6c53
RW
939 bool unexpected_error = false;
940 int ret;
60ee8be1 941
97cd8493
RW
942 nb_log_config_callback(event, NB_OP_DESTROY, dnode);
943
13d6b9c1 944 args.context = context;
60ee8be1
RW
945 args.event = event;
946 args.dnode = dnode;
df5eda3d
RW
947 args.errmsg = errmsg;
948 args.errmsg_len = errmsg_len;
1abe6c53
RW
949 ret = nb_node->cbs.destroy(&args);
950
951 /* Detect and log unexpected errors. */
952 switch (ret) {
953 case NB_OK:
954 case NB_ERR:
955 break;
956 case NB_ERR_VALIDATION:
957 if (event != NB_EV_VALIDATE)
958 unexpected_error = true;
959 break;
960 case NB_ERR_INCONSISTENCY:
961 if (event == NB_EV_VALIDATE)
962 unexpected_error = true;
963 break;
964 default:
965 unexpected_error = true;
966 break;
967 }
968 if (unexpected_error)
969 DEBUGD(&nb_dbg_cbs_config,
970 "northbound callback: unexpected return value: %s",
971 nb_err_name(ret));
972
973 return ret;
97cd8493
RW
974}
975
13d6b9c1
RW
976static int nb_callback_move(struct nb_context *context,
977 const struct nb_node *nb_node, enum nb_event event,
df5eda3d
RW
978 const struct lyd_node *dnode, char *errmsg,
979 size_t errmsg_len)
97cd8493 980{
60ee8be1 981 struct nb_cb_move_args args = {};
1abe6c53
RW
982 bool unexpected_error = false;
983 int ret;
60ee8be1 984
97cd8493
RW
985 nb_log_config_callback(event, NB_OP_MOVE, dnode);
986
13d6b9c1 987 args.context = context;
60ee8be1
RW
988 args.event = event;
989 args.dnode = dnode;
df5eda3d
RW
990 args.errmsg = errmsg;
991 args.errmsg_len = errmsg_len;
1abe6c53
RW
992 ret = nb_node->cbs.move(&args);
993
994 /* Detect and log unexpected errors. */
995 switch (ret) {
996 case NB_OK:
997 case NB_ERR:
998 break;
999 case NB_ERR_VALIDATION:
1000 if (event != NB_EV_VALIDATE)
1001 unexpected_error = true;
1002 break;
1003 case NB_ERR_INCONSISTENCY:
1004 if (event == NB_EV_VALIDATE)
1005 unexpected_error = true;
1006 break;
1007 default:
1008 unexpected_error = true;
1009 break;
1010 }
1011 if (unexpected_error)
1012 DEBUGD(&nb_dbg_cbs_config,
1013 "northbound callback: unexpected return value: %s",
1014 nb_err_name(ret));
1015
1016 return ret;
97cd8493
RW
1017}
1018
13d6b9c1
RW
1019static int nb_callback_pre_validate(struct nb_context *context,
1020 const struct nb_node *nb_node,
df5eda3d
RW
1021 const struct lyd_node *dnode, char *errmsg,
1022 size_t errmsg_len)
97cd8493 1023{
60ee8be1 1024 struct nb_cb_pre_validate_args args = {};
1abe6c53
RW
1025 bool unexpected_error = false;
1026 int ret;
60ee8be1 1027
97cd8493
RW
1028 nb_log_config_callback(NB_EV_VALIDATE, NB_OP_PRE_VALIDATE, dnode);
1029
60ee8be1 1030 args.dnode = dnode;
df5eda3d
RW
1031 args.errmsg = errmsg;
1032 args.errmsg_len = errmsg_len;
1abe6c53
RW
1033 ret = nb_node->cbs.pre_validate(&args);
1034
1035 /* Detect and log unexpected errors. */
1036 switch (ret) {
1037 case NB_OK:
1038 case NB_ERR_VALIDATION:
1039 break;
1040 default:
1041 unexpected_error = true;
1042 break;
1043 }
1044 if (unexpected_error)
1045 DEBUGD(&nb_dbg_cbs_config,
1046 "northbound callback: unexpected return value: %s",
1047 nb_err_name(ret));
1048
1049 return ret;
97cd8493
RW
1050}
1051
13d6b9c1
RW
1052static void nb_callback_apply_finish(struct nb_context *context,
1053 const struct nb_node *nb_node,
df5eda3d
RW
1054 const struct lyd_node *dnode, char *errmsg,
1055 size_t errmsg_len)
97cd8493 1056{
60ee8be1
RW
1057 struct nb_cb_apply_finish_args args = {};
1058
97cd8493
RW
1059 nb_log_config_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH, dnode);
1060
13d6b9c1 1061 args.context = context;
60ee8be1 1062 args.dnode = dnode;
df5eda3d
RW
1063 args.errmsg = errmsg;
1064 args.errmsg_len = errmsg_len;
60ee8be1 1065 nb_node->cbs.apply_finish(&args);
97cd8493
RW
1066}
1067
1068struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node,
1069 const char *xpath,
1070 const void *list_entry)
1071{
60ee8be1
RW
1072 struct nb_cb_get_elem_args args = {};
1073
97cd8493
RW
1074 DEBUGD(&nb_dbg_cbs_state,
1075 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
1076 xpath, list_entry);
1077
60ee8be1
RW
1078 args.xpath = xpath;
1079 args.list_entry = list_entry;
1080 return nb_node->cbs.get_elem(&args);
97cd8493
RW
1081}
1082
1083const void *nb_callback_get_next(const struct nb_node *nb_node,
1084 const void *parent_list_entry,
1085 const void *list_entry)
1086{
60ee8be1
RW
1087 struct nb_cb_get_next_args args = {};
1088
97cd8493
RW
1089 DEBUGD(&nb_dbg_cbs_state,
1090 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
1091 nb_node->xpath, parent_list_entry, list_entry);
1092
60ee8be1
RW
1093 args.parent_list_entry = parent_list_entry;
1094 args.list_entry = list_entry;
1095 return nb_node->cbs.get_next(&args);
97cd8493
RW
1096}
1097
1098int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
1099 struct yang_list_keys *keys)
1100{
60ee8be1
RW
1101 struct nb_cb_get_keys_args args = {};
1102
97cd8493
RW
1103 DEBUGD(&nb_dbg_cbs_state,
1104 "northbound callback (get_keys): node [%s] list_entry [%p]",
1105 nb_node->xpath, list_entry);
1106
60ee8be1
RW
1107 args.list_entry = list_entry;
1108 args.keys = keys;
1109 return nb_node->cbs.get_keys(&args);
97cd8493
RW
1110}
1111
1112const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
1113 const void *parent_list_entry,
1114 const struct yang_list_keys *keys)
1115{
60ee8be1
RW
1116 struct nb_cb_lookup_entry_args args = {};
1117
97cd8493
RW
1118 DEBUGD(&nb_dbg_cbs_state,
1119 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
1120 nb_node->xpath, parent_list_entry);
1121
60ee8be1
RW
1122 args.parent_list_entry = parent_list_entry;
1123 args.keys = keys;
1124 return nb_node->cbs.lookup_entry(&args);
97cd8493
RW
1125}
1126
1127int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
1128 const struct list *input, struct list *output)
1129{
60ee8be1
RW
1130 struct nb_cb_rpc_args args = {};
1131
97cd8493
RW
1132 DEBUGD(&nb_dbg_cbs_rpc, "northbound RPC: %s", xpath);
1133
60ee8be1
RW
1134 args.xpath = xpath;
1135 args.input = input;
1136 args.output = output;
1137 return nb_node->cbs.rpc(&args);
1c2facd1
RW
1138}
1139
1140/*
1141 * Call the northbound configuration callback associated to a given
1142 * configuration change.
1143 */
13d6b9c1
RW
1144static int nb_callback_configuration(struct nb_context *context,
1145 const enum nb_event event,
df5eda3d
RW
1146 struct nb_config_change *change,
1147 char *errmsg, size_t errmsg_len)
1c2facd1
RW
1148{
1149 enum nb_operation operation = change->cb.operation;
0de19c0e 1150 char xpath[XPATH_MAXLEN];
1c2facd1
RW
1151 const struct nb_node *nb_node = change->cb.nb_node;
1152 const struct lyd_node *dnode = change->cb.dnode;
1153 union nb_resource *resource;
1154 int ret = NB_ERR;
1155
1c2facd1
RW
1156 if (event == NB_EV_VALIDATE)
1157 resource = NULL;
1158 else
1159 resource = &change->resource;
1160
1161 switch (operation) {
1162 case NB_OP_CREATE:
13d6b9c1 1163 ret = nb_callback_create(context, nb_node, event, dnode,
df5eda3d 1164 resource, errmsg, errmsg_len);
1c2facd1
RW
1165 break;
1166 case NB_OP_MODIFY:
13d6b9c1 1167 ret = nb_callback_modify(context, nb_node, event, dnode,
df5eda3d 1168 resource, errmsg, errmsg_len);
1c2facd1 1169 break;
95ce849b 1170 case NB_OP_DESTROY:
df5eda3d
RW
1171 ret = nb_callback_destroy(context, nb_node, event, dnode,
1172 errmsg, errmsg_len);
1c2facd1
RW
1173 break;
1174 case NB_OP_MOVE:
df5eda3d
RW
1175 ret = nb_callback_move(context, nb_node, event, dnode, errmsg,
1176 errmsg_len);
1c2facd1
RW
1177 break;
1178 default:
0de19c0e 1179 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
c650e48c
RW
1180 flog_err(EC_LIB_DEVELOPMENT,
1181 "%s: unknown operation (%u) [xpath %s]", __func__,
1182 operation, xpath);
1183 exit(1);
1c2facd1
RW
1184 }
1185
625b70e3 1186 if (ret != NB_OK) {
c650e48c
RW
1187 int priority;
1188 enum lib_log_refs ref;
ec348d43 1189
0de19c0e
RW
1190 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1191
625b70e3
EDP
1192 switch (event) {
1193 case NB_EV_VALIDATE:
c650e48c 1194 priority = LOG_WARNING;
625b70e3
EDP
1195 ref = EC_LIB_NB_CB_CONFIG_VALIDATE;
1196 break;
1197 case NB_EV_PREPARE:
c650e48c 1198 priority = LOG_WARNING;
625b70e3
EDP
1199 ref = EC_LIB_NB_CB_CONFIG_PREPARE;
1200 break;
1201 case NB_EV_ABORT:
c650e48c 1202 priority = LOG_WARNING;
625b70e3
EDP
1203 ref = EC_LIB_NB_CB_CONFIG_ABORT;
1204 break;
1205 case NB_EV_APPLY:
c650e48c 1206 priority = LOG_ERR;
625b70e3
EDP
1207 ref = EC_LIB_NB_CB_CONFIG_APPLY;
1208 break;
c650e48c
RW
1209 default:
1210 flog_err(EC_LIB_DEVELOPMENT,
1be4decb
RW
1211 "%s: unknown event (%u) [xpath %s]", __func__,
1212 event, xpath);
c650e48c 1213 exit(1);
625b70e3 1214 }
c650e48c
RW
1215
1216 flog(priority, ref,
df5eda3d
RW
1217 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
1218 nb_err_name(ret), nb_event_name(event),
c650e48c 1219 nb_operation_name(operation), xpath);
df5eda3d
RW
1220 if (strlen(errmsg) > 0)
1221 flog(priority, ref,
1222 "error processing configuration change: %s",
1223 errmsg);
625b70e3 1224 }
1c2facd1
RW
1225
1226 return ret;
1227}
1228
364ad673 1229static struct nb_transaction *
df5eda3d
RW
1230nb_transaction_new(struct nb_context *context, struct nb_config *config,
1231 struct nb_config_cbs *changes, const char *comment,
1232 char *errmsg, size_t errmsg_len)
1c2facd1
RW
1233{
1234 struct nb_transaction *transaction;
1235
13d6b9c1 1236 if (nb_running_lock_check(context->client, context->user)) {
df5eda3d
RW
1237 strlcpy(errmsg,
1238 "running configuration is locked by another client",
1239 errmsg_len);
364ad673
RW
1240 return NULL;
1241 }
1242
1c2facd1 1243 if (transaction_in_progress) {
df5eda3d
RW
1244 strlcpy(errmsg,
1245 "there's already another transaction in progress",
1246 errmsg_len);
1c2facd1
RW
1247 return NULL;
1248 }
1249 transaction_in_progress = true;
1250
1251 transaction = XCALLOC(MTYPE_TMP, sizeof(*transaction));
13d6b9c1 1252 transaction->context = context;
1c2facd1
RW
1253 if (comment)
1254 strlcpy(transaction->comment, comment,
1255 sizeof(transaction->comment));
1256 transaction->config = config;
1257 transaction->changes = *changes;
1258
1259 return transaction;
1260}
1261
1262static void nb_transaction_free(struct nb_transaction *transaction)
1263{
1264 nb_config_diff_del_changes(&transaction->changes);
1265 XFREE(MTYPE_TMP, transaction);
1266 transaction_in_progress = false;
1267}
1268
1269/* Process all configuration changes associated to a transaction. */
1270static int nb_transaction_process(enum nb_event event,
df5eda3d
RW
1271 struct nb_transaction *transaction,
1272 char *errmsg, size_t errmsg_len)
1c2facd1
RW
1273{
1274 struct nb_config_cb *cb;
1275
8685be73
RW
1276 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1277 struct nb_config_change *change = (struct nb_config_change *)cb;
1278 int ret;
1c2facd1 1279
8685be73
RW
1280 /*
1281 * Only try to release resources that were allocated
1282 * successfully.
1283 */
a8f58eb6 1284 if (event == NB_EV_ABORT && !change->prepare_ok)
8685be73
RW
1285 break;
1286
1287 /* Call the appropriate callback. */
13d6b9c1 1288 ret = nb_callback_configuration(transaction->context, event,
df5eda3d 1289 change, errmsg, errmsg_len);
8685be73
RW
1290 switch (event) {
1291 case NB_EV_PREPARE:
1292 if (ret != NB_OK)
1293 return ret;
1294 change->prepare_ok = true;
1295 break;
1296 case NB_EV_ABORT:
1297 case NB_EV_APPLY:
1c2facd1 1298 /*
8685be73
RW
1299 * At this point it's not possible to reject the
1300 * transaction anymore, so any failure here can lead to
1301 * inconsistencies and should be treated as a bug.
1302 * Operations prone to errors, like validations and
1303 * resource allocations, should be performed during the
1304 * 'prepare' phase.
1c2facd1 1305 */
8685be73
RW
1306 break;
1307 default:
1308 break;
1c2facd1
RW
1309 }
1310 }
1311
1312 return NB_OK;
1313}
1314
1315static struct nb_config_cb *
0de19c0e 1316nb_apply_finish_cb_new(struct nb_config_cbs *cbs, const struct nb_node *nb_node,
1c2facd1
RW
1317 const struct lyd_node *dnode)
1318{
1319 struct nb_config_cb *cb;
1320
1321 cb = XCALLOC(MTYPE_TMP, sizeof(*cb));
1c2facd1
RW
1322 cb->nb_node = nb_node;
1323 cb->dnode = dnode;
1324 RB_INSERT(nb_config_cbs, cbs, cb);
1325
1326 return cb;
1327}
1328
1329static struct nb_config_cb *
fe3f2c61
RW
1330nb_apply_finish_cb_find(struct nb_config_cbs *cbs,
1331 const struct nb_node *nb_node,
1332 const struct lyd_node *dnode)
1c2facd1
RW
1333{
1334 struct nb_config_cb s;
1335
fe3f2c61 1336 s.seq = 0;
1c2facd1 1337 s.nb_node = nb_node;
fe3f2c61 1338 s.dnode = dnode;
1c2facd1
RW
1339 return RB_FIND(nb_config_cbs, cbs, &s);
1340}
1341
1342/* Call the 'apply_finish' callbacks. */
df5eda3d
RW
1343static void nb_transaction_apply_finish(struct nb_transaction *transaction,
1344 char *errmsg, size_t errmsg_len)
1c2facd1
RW
1345{
1346 struct nb_config_cbs cbs;
1347 struct nb_config_cb *cb;
1348
1349 /* Initialize tree of 'apply_finish' callbacks. */
1350 RB_INIT(nb_config_cbs, &cbs);
1351
1352 /* Identify the 'apply_finish' callbacks that need to be called. */
1353 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1354 struct nb_config_change *change = (struct nb_config_change *)cb;
1355 const struct lyd_node *dnode = change->cb.dnode;
1356
1357 /*
1358 * Iterate up to the root of the data tree. When a node is being
1359 * deleted, skip its 'apply_finish' callback if one is defined
1360 * (the 'apply_finish' callbacks from the node ancestors should
1361 * be called though).
1362 */
95ce849b 1363 if (change->cb.operation == NB_OP_DESTROY) {
97cd8493
RW
1364 char xpath[XPATH_MAXLEN];
1365
1c2facd1
RW
1366 dnode = dnode->parent;
1367 if (!dnode)
1368 break;
1369
1370 /*
1371 * The dnode from 'delete' callbacks point to elements
1372 * from the running configuration. Use yang_dnode_get()
1373 * to get the corresponding dnode from the candidate
1374 * configuration that is being committed.
1375 */
1376 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1377 dnode = yang_dnode_get(transaction->config->dnode,
1378 xpath);
1379 }
1380 while (dnode) {
1c2facd1
RW
1381 struct nb_node *nb_node;
1382
1383 nb_node = dnode->schema->priv;
1384 if (!nb_node->cbs.apply_finish)
1385 goto next;
1386
1387 /*
1388 * Don't call the callback more than once for the same
1389 * data node.
1390 */
fe3f2c61 1391 if (nb_apply_finish_cb_find(&cbs, nb_node, dnode))
1c2facd1
RW
1392 goto next;
1393
0de19c0e 1394 nb_apply_finish_cb_new(&cbs, nb_node, dnode);
1c2facd1
RW
1395
1396 next:
1397 dnode = dnode->parent;
1398 }
1399 }
1400
1401 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
97cd8493 1402 RB_FOREACH (cb, nb_config_cbs, &cbs)
13d6b9c1 1403 nb_callback_apply_finish(transaction->context, cb->nb_node,
df5eda3d 1404 cb->dnode, errmsg, errmsg_len);
1c2facd1
RW
1405
1406 /* Release memory. */
1407 while (!RB_EMPTY(nb_config_cbs, &cbs)) {
1408 cb = RB_ROOT(nb_config_cbs, &cbs);
1409 RB_REMOVE(nb_config_cbs, &cbs, cb);
1410 XFREE(MTYPE_TMP, cb);
1411 }
1412}
1413
1a4bc045
RW
1414static int nb_oper_data_iter_children(const struct lys_node *snode,
1415 const char *xpath, const void *list_entry,
1416 const struct yang_list_keys *list_keys,
1417 struct yang_translator *translator,
1418 bool first, uint32_t flags,
1419 nb_oper_data_cb cb, void *arg)
1420{
1421 struct lys_node *child;
1422
1423 LY_TREE_FOR (snode->child, child) {
1424 int ret;
1425
1426 ret = nb_oper_data_iter_node(child, xpath, list_entry,
1427 list_keys, translator, false,
1428 flags, cb, arg);
1429 if (ret != NB_OK)
1430 return ret;
1431 }
1432
1433 return NB_OK;
1434}
1435
1436static int nb_oper_data_iter_leaf(const struct nb_node *nb_node,
1437 const char *xpath, const void *list_entry,
1438 const struct yang_list_keys *list_keys,
1439 struct yang_translator *translator,
1440 uint32_t flags, nb_oper_data_cb cb, void *arg)
1441{
1442 struct yang_data *data;
1443
1444 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1445 return NB_OK;
1446
1447 /* Ignore list keys. */
1448 if (lys_is_key((struct lys_node_leaf *)nb_node->snode, NULL))
1449 return NB_OK;
1450
9eb2c0a1 1451 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1a4bc045
RW
1452 if (data == NULL)
1453 /* Leaf of type "empty" is not present. */
1454 return NB_OK;
1455
1456 return (*cb)(nb_node->snode, translator, data, arg);
1457}
1458
1459static int nb_oper_data_iter_container(const struct nb_node *nb_node,
1460 const char *xpath,
1461 const void *list_entry,
1462 const struct yang_list_keys *list_keys,
1463 struct yang_translator *translator,
1464 uint32_t flags, nb_oper_data_cb cb,
1465 void *arg)
1466{
1467 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1468 return NB_OK;
1469
1470 /* Presence containers. */
1471 if (nb_node->cbs.get_elem) {
1472 struct yang_data *data;
1473 int ret;
1474
9eb2c0a1 1475 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1a4bc045
RW
1476 if (data == NULL)
1477 /* Presence container is not present. */
1478 return NB_OK;
1479
1480 ret = (*cb)(nb_node->snode, translator, data, arg);
1481 if (ret != NB_OK)
1482 return ret;
1483 }
1484
1485 /* Iterate over the child nodes. */
1486 return nb_oper_data_iter_children(nb_node->snode, xpath, list_entry,
1487 list_keys, translator, false, flags,
1488 cb, arg);
1489}
1490
1491static int
1492nb_oper_data_iter_leaflist(const struct nb_node *nb_node, const char *xpath,
1493 const void *parent_list_entry,
1494 const struct yang_list_keys *parent_list_keys,
1495 struct yang_translator *translator, uint32_t flags,
1496 nb_oper_data_cb cb, void *arg)
1497{
1498 const void *list_entry = NULL;
1499
1500 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1501 return NB_OK;
1502
1503 do {
1504 struct yang_data *data;
1505 int ret;
1506
9eb2c0a1
RW
1507 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1508 list_entry);
1a4bc045
RW
1509 if (!list_entry)
1510 /* End of the list. */
1511 break;
1512
9eb2c0a1 1513 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1a4bc045
RW
1514 if (data == NULL)
1515 continue;
1516
1517 ret = (*cb)(nb_node->snode, translator, data, arg);
1518 if (ret != NB_OK)
1519 return ret;
1520 } while (list_entry);
1521
1522 return NB_OK;
1523}
1524
1525static int nb_oper_data_iter_list(const struct nb_node *nb_node,
1526 const char *xpath_list,
1527 const void *parent_list_entry,
1528 const struct yang_list_keys *parent_list_keys,
1529 struct yang_translator *translator,
1530 uint32_t flags, nb_oper_data_cb cb, void *arg)
1531{
1532 struct lys_node_list *slist = (struct lys_node_list *)nb_node->snode;
1533 const void *list_entry = NULL;
99fb518f 1534 uint32_t position = 1;
1a4bc045
RW
1535
1536 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1537 return NB_OK;
1538
1539 /* Iterate over all list entries. */
1540 do {
1541 struct yang_list_keys list_keys;
f999f11e 1542 char xpath[XPATH_MAXLEN * 2];
1a4bc045
RW
1543 int ret;
1544
1545 /* Obtain list entry. */
9eb2c0a1
RW
1546 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1547 list_entry);
1a4bc045
RW
1548 if (!list_entry)
1549 /* End of the list. */
1550 break;
1551
99fb518f
RW
1552 if (!CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST)) {
1553 /* Obtain the list entry keys. */
9eb2c0a1
RW
1554 if (nb_callback_get_keys(nb_node, list_entry,
1555 &list_keys)
99fb518f
RW
1556 != NB_OK) {
1557 flog_warn(EC_LIB_NB_CB_STATE,
1558 "%s: failed to get list keys",
1559 __func__);
1560 return NB_ERR;
1561 }
1562
1563 /* Build XPath of the list entry. */
1564 strlcpy(xpath, xpath_list, sizeof(xpath));
1565 for (unsigned int i = 0; i < list_keys.num; i++) {
1566 snprintf(xpath + strlen(xpath),
1567 sizeof(xpath) - strlen(xpath),
1568 "[%s='%s']", slist->keys[i]->name,
1569 list_keys.key[i]);
1570 }
1571 } else {
1572 /*
1573 * Keyless list - build XPath using a positional index.
1574 */
1575 snprintf(xpath, sizeof(xpath), "%s[%u]", xpath_list,
1576 position);
1577 position++;
1a4bc045
RW
1578 }
1579
1580 /* Iterate over the child nodes. */
1581 ret = nb_oper_data_iter_children(
1582 nb_node->snode, xpath, list_entry, &list_keys,
1583 translator, false, flags, cb, arg);
1584 if (ret != NB_OK)
1585 return ret;
1586 } while (list_entry);
1587
1588 return NB_OK;
1589}
1590
1591static int nb_oper_data_iter_node(const struct lys_node *snode,
1592 const char *xpath_parent,
1593 const void *list_entry,
1594 const struct yang_list_keys *list_keys,
1595 struct yang_translator *translator,
1596 bool first, uint32_t flags,
1597 nb_oper_data_cb cb, void *arg)
1598{
1599 struct nb_node *nb_node;
1600 char xpath[XPATH_MAXLEN];
1601 int ret = NB_OK;
1602
1603 if (!first && CHECK_FLAG(flags, NB_OPER_DATA_ITER_NORECURSE)
1604 && CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST))
1605 return NB_OK;
1606
1607 /* Update XPath. */
1608 strlcpy(xpath, xpath_parent, sizeof(xpath));
6cd301e0
RW
1609 if (!first && snode->nodetype != LYS_USES) {
1610 struct lys_node *parent;
1611
1612 /* Get the real parent. */
1613 parent = snode->parent;
1614 while (parent && parent->nodetype == LYS_USES)
1615 parent = parent->parent;
1616
1617 /*
1618 * When necessary, include the namespace of the augmenting
1619 * module.
1620 */
1621 if (parent && parent->nodetype == LYS_AUGMENT)
1622 snprintf(xpath + strlen(xpath),
1623 sizeof(xpath) - strlen(xpath), "/%s:%s",
1624 snode->module->name, snode->name);
1625 else
1626 snprintf(xpath + strlen(xpath),
1627 sizeof(xpath) - strlen(xpath), "/%s",
1628 snode->name);
1629 }
1a4bc045
RW
1630
1631 nb_node = snode->priv;
1632 switch (snode->nodetype) {
1633 case LYS_CONTAINER:
1634 ret = nb_oper_data_iter_container(nb_node, xpath, list_entry,
1635 list_keys, translator, flags,
1636 cb, arg);
1637 break;
1638 case LYS_LEAF:
1639 ret = nb_oper_data_iter_leaf(nb_node, xpath, list_entry,
1640 list_keys, translator, flags, cb,
1641 arg);
1642 break;
1643 case LYS_LEAFLIST:
1644 ret = nb_oper_data_iter_leaflist(nb_node, xpath, list_entry,
1645 list_keys, translator, flags,
1646 cb, arg);
1647 break;
1648 case LYS_LIST:
1649 ret = nb_oper_data_iter_list(nb_node, xpath, list_entry,
1650 list_keys, translator, flags, cb,
1651 arg);
1652 break;
1653 case LYS_USES:
1654 ret = nb_oper_data_iter_children(snode, xpath, list_entry,
1655 list_keys, translator, false,
1656 flags, cb, arg);
1657 break;
1658 default:
1659 break;
1660 }
1661
1662 return ret;
1663}
1664
1665int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
1666 uint32_t flags, nb_oper_data_cb cb, void *arg)
1667{
1668 struct nb_node *nb_node;
1669 const void *list_entry = NULL;
1670 struct yang_list_keys list_keys;
1671 struct list *list_dnodes;
1672 struct lyd_node *dnode, *dn;
1673 struct listnode *ln;
1674 int ret;
1675
1676 nb_node = nb_node_find(xpath);
1677 if (!nb_node) {
1678 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1679 "%s: unknown data path: %s", __func__, xpath);
1680 return NB_ERR;
1681 }
1682
1683 /* For now this function works only with containers and lists. */
1684 if (!CHECK_FLAG(nb_node->snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
1685 flog_warn(
1686 EC_LIB_NB_OPERATIONAL_DATA,
1687 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1688 __func__, xpath);
1689 return NB_ERR;
1690 }
1691
1692 /*
1693 * Create a data tree from the XPath so that we can parse the keys of
1694 * all YANG lists (if any).
1695 */
1696 ly_errno = 0;
1697 dnode = lyd_new_path(NULL, ly_native_ctx, xpath, NULL, 0,
dfe22738
RW
1698 LYD_PATH_OPT_UPDATE | LYD_PATH_OPT_NOPARENTRET);
1699 if (!dnode) {
1a4bc045
RW
1700 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
1701 __func__);
1702 return NB_ERR;
1703 }
1a4bc045
RW
1704
1705 /*
1706 * Create a linked list to sort the data nodes starting from the root.
1707 */
1708 list_dnodes = list_new();
1709 for (dn = dnode; dn; dn = dn->parent) {
1710 if (dn->schema->nodetype != LYS_LIST || !dn->child)
1711 continue;
1712 listnode_add_head(list_dnodes, dn);
1713 }
1714 /*
1715 * Use the northbound callbacks to find list entry pointer corresponding
1716 * to the given XPath.
1717 */
1718 for (ALL_LIST_ELEMENTS_RO(list_dnodes, ln, dn)) {
1719 struct lyd_node *child;
1720 struct nb_node *nn;
1721 unsigned int n = 0;
1722
1723 /* Obtain the list entry keys. */
1724 memset(&list_keys, 0, sizeof(list_keys));
1725 LY_TREE_FOR (dn->child, child) {
1726 if (!lys_is_key((struct lys_node_leaf *)child->schema,
1727 NULL))
1728 continue;
1729 strlcpy(list_keys.key[n],
1730 yang_dnode_get_string(child, NULL),
1731 sizeof(list_keys.key[n]));
1732 n++;
1733 }
1734 list_keys.num = n;
9f6de299
RW
1735 if (list_keys.num
1736 != ((struct lys_node_list *)dn->schema)->keys_size) {
1737 list_delete(&list_dnodes);
1738 yang_dnode_free(dnode);
1739 return NB_ERR_NOT_FOUND;
1740 }
1a4bc045
RW
1741
1742 /* Find the list entry pointer. */
1743 nn = dn->schema->priv;
9eb2c0a1
RW
1744 list_entry =
1745 nb_callback_lookup_entry(nn, list_entry, &list_keys);
1a4bc045
RW
1746 if (list_entry == NULL) {
1747 list_delete(&list_dnodes);
1748 yang_dnode_free(dnode);
1749 return NB_ERR_NOT_FOUND;
1750 }
1751 }
1752
1753 /* If a list entry was given, iterate over that list entry only. */
1754 if (dnode->schema->nodetype == LYS_LIST && dnode->child)
1755 ret = nb_oper_data_iter_children(
1756 nb_node->snode, xpath, list_entry, &list_keys,
1757 translator, true, flags, cb, arg);
1758 else
1759 ret = nb_oper_data_iter_node(nb_node->snode, xpath, list_entry,
1760 &list_keys, translator, true,
1761 flags, cb, arg);
1762
1763 list_delete(&list_dnodes);
1764 yang_dnode_free(dnode);
1765
1766 return ret;
1767}
1768
1c2facd1
RW
1769bool nb_operation_is_valid(enum nb_operation operation,
1770 const struct lys_node *snode)
1771{
544ca69a 1772 struct nb_node *nb_node = snode->priv;
1c2facd1
RW
1773 struct lys_node_container *scontainer;
1774 struct lys_node_leaf *sleaf;
1775
1776 switch (operation) {
1777 case NB_OP_CREATE:
db452508 1778 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
1779 return false;
1780
1781 switch (snode->nodetype) {
1782 case LYS_LEAF:
1783 sleaf = (struct lys_node_leaf *)snode;
1784 if (sleaf->type.base != LY_TYPE_EMPTY)
1785 return false;
1786 break;
1787 case LYS_CONTAINER:
1788 scontainer = (struct lys_node_container *)snode;
1789 if (!scontainer->presence)
1790 return false;
1791 break;
1792 case LYS_LIST:
1793 case LYS_LEAFLIST:
1794 break;
1795 default:
1796 return false;
1797 }
1798 return true;
1799 case NB_OP_MODIFY:
db452508 1800 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
1801 return false;
1802
1803 switch (snode->nodetype) {
1804 case LYS_LEAF:
1805 sleaf = (struct lys_node_leaf *)snode;
1806 if (sleaf->type.base == LY_TYPE_EMPTY)
1807 return false;
1808
1809 /* List keys can't be modified. */
1810 if (lys_is_key(sleaf, NULL))
1811 return false;
1812 break;
1813 default:
1814 return false;
1815 }
1816 return true;
95ce849b 1817 case NB_OP_DESTROY:
db452508 1818 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
1819 return false;
1820
1821 switch (snode->nodetype) {
1822 case LYS_LEAF:
1823 sleaf = (struct lys_node_leaf *)snode;
1824
1825 /* List keys can't be deleted. */
1826 if (lys_is_key(sleaf, NULL))
1827 return false;
1828
1829 /*
1830 * Only optional leafs can be deleted, or leafs whose
1831 * parent is a case statement.
1832 */
1833 if (snode->parent->nodetype == LYS_CASE)
1834 return true;
1835 if (sleaf->when)
1836 return true;
db452508
RW
1837 if (CHECK_FLAG(sleaf->flags, LYS_MAND_TRUE)
1838 || sleaf->dflt)
1c2facd1
RW
1839 return false;
1840 break;
1841 case LYS_CONTAINER:
1842 scontainer = (struct lys_node_container *)snode;
1843 if (!scontainer->presence)
1844 return false;
1845 break;
1846 case LYS_LIST:
1847 case LYS_LEAFLIST:
1848 break;
1849 default:
1850 return false;
1851 }
1852 return true;
1853 case NB_OP_MOVE:
db452508 1854 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
1855 return false;
1856
1857 switch (snode->nodetype) {
1858 case LYS_LIST:
1859 case LYS_LEAFLIST:
db452508 1860 if (!CHECK_FLAG(snode->flags, LYS_USERORDERED))
1c2facd1
RW
1861 return false;
1862 break;
1863 default:
1864 return false;
1865 }
1866 return true;
34224f0c 1867 case NB_OP_PRE_VALIDATE:
1c2facd1 1868 case NB_OP_APPLY_FINISH:
db452508 1869 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
1870 return false;
1871 return true;
1872 case NB_OP_GET_ELEM:
db452508 1873 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_R))
1c2facd1
RW
1874 return false;
1875
1876 switch (snode->nodetype) {
1877 case LYS_LEAF:
1a4bc045 1878 case LYS_LEAFLIST:
1c2facd1
RW
1879 break;
1880 case LYS_CONTAINER:
1881 scontainer = (struct lys_node_container *)snode;
1882 if (!scontainer->presence)
1883 return false;
1884 break;
1885 default:
1886 return false;
1887 }
1888 return true;
1889 case NB_OP_GET_NEXT:
1a4bc045
RW
1890 switch (snode->nodetype) {
1891 case LYS_LIST:
1892 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1893 return false;
1894 break;
1895 case LYS_LEAFLIST:
1896 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1897 return false;
1898 break;
1899 default:
1900 return false;
1901 }
1902 return true;
1c2facd1
RW
1903 case NB_OP_GET_KEYS:
1904 case NB_OP_LOOKUP_ENTRY:
1c2facd1
RW
1905 switch (snode->nodetype) {
1906 case LYS_LIST:
544ca69a
RW
1907 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1908 return false;
99fb518f
RW
1909 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST))
1910 return false;
1c2facd1
RW
1911 break;
1912 default:
1913 return false;
1914 }
1915 return true;
1916 case NB_OP_RPC:
db452508 1917 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W | LYS_CONFIG_R))
1c2facd1
RW
1918 return false;
1919
1920 switch (snode->nodetype) {
1921 case LYS_RPC:
1922 case LYS_ACTION:
1923 break;
1924 default:
1925 return false;
1926 }
1927 return true;
1928 default:
1929 return false;
1930 }
1931}
1932
1933DEFINE_HOOK(nb_notification_send, (const char *xpath, struct list *arguments),
1934 (xpath, arguments));
1935
1936int nb_notification_send(const char *xpath, struct list *arguments)
1937{
1938 int ret;
1939
9eb2c0a1
RW
1940 DEBUGD(&nb_dbg_notif, "northbound notification: %s", xpath);
1941
1c2facd1
RW
1942 ret = hook_call(nb_notification_send, xpath, arguments);
1943 if (arguments)
1944 list_delete(&arguments);
1945
1946 return ret;
1947}
1948
ccd43ada
RW
1949/* Running configuration user pointers management. */
1950struct nb_config_entry {
1951 char xpath[XPATH_MAXLEN];
1952 void *entry;
1953};
1954
1955static bool running_config_entry_cmp(const void *value1, const void *value2)
1956{
1957 const struct nb_config_entry *c1 = value1;
1958 const struct nb_config_entry *c2 = value2;
1959
1960 return strmatch(c1->xpath, c2->xpath);
1961}
1962
d8b87afe 1963static unsigned int running_config_entry_key_make(const void *value)
ccd43ada
RW
1964{
1965 return string_hash_make(value);
1966}
1967
1968static void *running_config_entry_alloc(void *p)
1969{
1970 struct nb_config_entry *new, *key = p;
1971
1972 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY, sizeof(*new));
1973 strlcpy(new->xpath, key->xpath, sizeof(new->xpath));
1974
1975 return new;
1976}
1977
1978static void running_config_entry_free(void *arg)
1979{
1980 XFREE(MTYPE_NB_CONFIG_ENTRY, arg);
1981}
1982
1983void nb_running_set_entry(const struct lyd_node *dnode, void *entry)
1984{
1985 struct nb_config_entry *config, s;
1986
1987 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
1988 config = hash_get(running_config_entries, &s,
1989 running_config_entry_alloc);
1990 config->entry = entry;
1991}
1992
f7c20aa1
QY
1993void nb_running_move_tree(const char *xpath_from, const char *xpath_to)
1994{
1995 struct nb_config_entry *entry;
1996 struct list *entries = hash_to_list(running_config_entries);
1997 struct listnode *ln;
1998
1999 for (ALL_LIST_ELEMENTS_RO(entries, ln, entry)) {
2000 if (!frrstr_startswith(entry->xpath, xpath_from))
2001 continue;
2002
2003 hash_release(running_config_entries, entry);
2004
2005 char *newpath =
2006 frrstr_replace(entry->xpath, xpath_from, xpath_to);
2007 strlcpy(entry->xpath, newpath, sizeof(entry->xpath));
2008 XFREE(MTYPE_TMP, newpath);
2009
2010 hash_get(running_config_entries, entry, hash_alloc_intern);
2011 }
2012
2013 list_delete(&entries);
2014}
2015
ccd43ada
RW
2016static void *nb_running_unset_entry_helper(const struct lyd_node *dnode)
2017{
2018 struct nb_config_entry *config, s;
2019 struct lyd_node *child;
2020 void *entry = NULL;
2021
2022 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2023 config = hash_release(running_config_entries, &s);
2024 if (config) {
2025 entry = config->entry;
2026 running_config_entry_free(config);
2027 }
2028
2029 /* Unset user pointers from the child nodes. */
2030 if (CHECK_FLAG(dnode->schema->nodetype, LYS_LIST | LYS_CONTAINER)) {
2031 LY_TREE_FOR (dnode->child, child) {
2032 (void)nb_running_unset_entry_helper(child);
2033 }
2034 }
2035
2036 return entry;
2037}
2038
2039void *nb_running_unset_entry(const struct lyd_node *dnode)
2040{
2041 void *entry;
2042
2043 entry = nb_running_unset_entry_helper(dnode);
2044 assert(entry);
2045
2046 return entry;
2047}
2048
2049void *nb_running_get_entry(const struct lyd_node *dnode, const char *xpath,
2050 bool abort_if_not_found)
2051{
2052 const struct lyd_node *orig_dnode = dnode;
2053 char xpath_buf[XPATH_MAXLEN];
2054
2055 assert(dnode || xpath);
2056
2057 if (!dnode)
2058 dnode = yang_dnode_get(running_config->dnode, xpath);
2059
2060 while (dnode) {
2061 struct nb_config_entry *config, s;
2062
2063 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2064 config = hash_lookup(running_config_entries, &s);
2065 if (config)
2066 return config->entry;
2067
2068 dnode = dnode->parent;
2069 }
2070
2071 if (!abort_if_not_found)
2072 return NULL;
2073
2074 yang_dnode_get_path(orig_dnode, xpath_buf, sizeof(xpath_buf));
2075 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
2076 "%s: failed to find entry [xpath %s]", __func__, xpath_buf);
2077 zlog_backtrace(LOG_ERR);
2078 abort();
2079}
2080
2081/* Logging functions. */
1c2facd1
RW
2082const char *nb_event_name(enum nb_event event)
2083{
2084 switch (event) {
2085 case NB_EV_VALIDATE:
2086 return "validate";
2087 case NB_EV_PREPARE:
2088 return "prepare";
2089 case NB_EV_ABORT:
2090 return "abort";
2091 case NB_EV_APPLY:
2092 return "apply";
2093 default:
2094 return "unknown";
2095 }
2096}
2097
2098const char *nb_operation_name(enum nb_operation operation)
2099{
2100 switch (operation) {
2101 case NB_OP_CREATE:
2102 return "create";
2103 case NB_OP_MODIFY:
2104 return "modify";
95ce849b
MS
2105 case NB_OP_DESTROY:
2106 return "destroy";
1c2facd1
RW
2107 case NB_OP_MOVE:
2108 return "move";
34224f0c
RW
2109 case NB_OP_PRE_VALIDATE:
2110 return "pre_validate";
1c2facd1
RW
2111 case NB_OP_APPLY_FINISH:
2112 return "apply_finish";
2113 case NB_OP_GET_ELEM:
2114 return "get_elem";
2115 case NB_OP_GET_NEXT:
2116 return "get_next";
2117 case NB_OP_GET_KEYS:
2118 return "get_keys";
2119 case NB_OP_LOOKUP_ENTRY:
2120 return "lookup_entry";
2121 case NB_OP_RPC:
2122 return "rpc";
2123 default:
2124 return "unknown";
2125 }
2126}
2127
2128const char *nb_err_name(enum nb_error error)
2129{
2130 switch (error) {
2131 case NB_OK:
2132 return "ok";
2133 case NB_ERR:
2134 return "generic error";
2135 case NB_ERR_NO_CHANGES:
2136 return "no changes";
2137 case NB_ERR_NOT_FOUND:
2138 return "element not found";
2139 case NB_ERR_LOCKED:
2140 return "resource is locked";
2141 case NB_ERR_VALIDATION:
df5eda3d 2142 return "validation";
1c2facd1
RW
2143 case NB_ERR_RESOURCE:
2144 return "failed to allocate resource";
2145 case NB_ERR_INCONSISTENCY:
2146 return "internal inconsistency";
2147 default:
2148 return "unknown";
2149 }
2150}
2151
2152const char *nb_client_name(enum nb_client client)
2153{
2154 switch (client) {
2155 case NB_CLIENT_CLI:
2156 return "CLI";
5bce33b3
RW
2157 case NB_CLIENT_CONFD:
2158 return "ConfD";
a7ca2199
RW
2159 case NB_CLIENT_SYSREPO:
2160 return "Sysrepo";
ec2ac5f2
RW
2161 case NB_CLIENT_GRPC:
2162 return "gRPC";
1c2facd1
RW
2163 default:
2164 return "unknown";
2165 }
2166}
2167
2168static void nb_load_callbacks(const struct frr_yang_module_info *module)
2169{
2170 for (size_t i = 0; module->nodes[i].xpath; i++) {
2171 struct nb_node *nb_node;
2172 uint32_t priority;
2173
dc397e4c
RW
2174 if (i > YANG_MODULE_MAX_NODES) {
2175 zlog_err(
2176 "%s: %s.yang has more than %u nodes. Please increase YANG_MODULE_MAX_NODES to fix this problem.",
2177 __func__, module->name, YANG_MODULE_MAX_NODES);
2178 exit(1);
2179 }
2180
1c2facd1
RW
2181 nb_node = nb_node_find(module->nodes[i].xpath);
2182 if (!nb_node) {
2183 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
2184 "%s: unknown data path: %s", __func__,
2185 module->nodes[i].xpath);
2186 continue;
2187 }
2188
2189 nb_node->cbs = module->nodes[i].cbs;
2190 priority = module->nodes[i].priority;
2191 if (priority != 0)
2192 nb_node->priority = priority;
2193 }
2194}
2195
fbdc1c0a 2196void nb_init(struct thread_master *tm,
0d8c7a26
DL
2197 const struct frr_yang_module_info *const modules[],
2198 size_t nmodules)
1c2facd1
RW
2199{
2200 unsigned int errors = 0;
2201
2202 /* Load YANG modules. */
2203 for (size_t i = 0; i < nmodules; i++)
2204 yang_module_load(modules[i]->name);
2205
2206 /* Create a nb_node for all YANG schema nodes. */
544ca69a 2207 nb_nodes_create();
1c2facd1
RW
2208
2209 /* Load northbound callbacks. */
2210 for (size_t i = 0; i < nmodules; i++)
2211 nb_load_callbacks(modules[i]);
2212
2213 /* Validate northbound callbacks. */
e0ccfad2 2214 yang_snodes_iterate_all(nb_node_validate, 0, &errors);
1c2facd1
RW
2215 if (errors > 0) {
2216 flog_err(
2217 EC_LIB_NB_CBS_VALIDATION,
2218 "%s: failed to validate northbound callbacks: %u error(s)",
2219 __func__, errors);
2220 exit(1);
2221 }
2222
1c2facd1
RW
2223 /* Create an empty running configuration. */
2224 running_config = nb_config_new(NULL);
ccd43ada
RW
2225 running_config_entries = hash_create(running_config_entry_key_make,
2226 running_config_entry_cmp,
2227 "Running Configuration Entries");
364ad673 2228 pthread_mutex_init(&running_config_mgmt_lock.mtx, NULL);
1c2facd1
RW
2229
2230 /* Initialize the northbound CLI. */
fbdc1c0a 2231 nb_cli_init(tm);
1c2facd1
RW
2232}
2233
2234void nb_terminate(void)
2235{
2236 /* Terminate the northbound CLI. */
2237 nb_cli_terminate();
2238
2239 /* Delete all nb_node's from all YANG modules. */
544ca69a 2240 nb_nodes_delete();
1c2facd1
RW
2241
2242 /* Delete the running configuration. */
ccd43ada
RW
2243 hash_clean(running_config_entries, running_config_entry_free);
2244 hash_free(running_config_entries);
1c2facd1 2245 nb_config_free(running_config);
364ad673 2246 pthread_mutex_destroy(&running_config_mgmt_lock.mtx);
1c2facd1 2247}