]> git.proxmox.com Git - mirror_frr.git/blame - lib/northbound.c
Merge pull request #8015 from mjstapp/fix_topo_gen_support
[mirror_frr.git] / lib / northbound.c
CommitLineData
1c2facd1
RW
1/*
2 * Copyright (C) 2018 NetDEF, Inc.
3 * Renato Westphal
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20#include <zebra.h>
21
22#include "libfrr.h"
23#include "log.h"
24#include "lib_errors.h"
ccd43ada 25#include "hash.h"
1c2facd1 26#include "command.h"
9eb2c0a1 27#include "debug.h"
1c2facd1 28#include "db.h"
00dffa8c 29#include "frr_pthread.h"
1c2facd1
RW
30#include "northbound.h"
31#include "northbound_cli.h"
32#include "northbound_db.h"
f7c20aa1 33#include "frrstr.h"
1c2facd1
RW
34
35DEFINE_MTYPE_STATIC(LIB, NB_NODE, "Northbound Node")
36DEFINE_MTYPE_STATIC(LIB, NB_CONFIG, "Northbound Configuration")
ccd43ada 37DEFINE_MTYPE_STATIC(LIB, NB_CONFIG_ENTRY, "Northbound Configuration Entry")
1c2facd1
RW
38
39/* Running configuration - shouldn't be modified directly. */
40struct nb_config *running_config;
41
ccd43ada
RW
42/* Hash table of user pointers associated with configuration entries. */
43static struct hash *running_config_entries;
44
364ad673
RW
45/* Management lock for the running configuration. */
46static struct {
47 /* Mutex protecting this structure. */
48 pthread_mutex_t mtx;
49
50 /* Actual lock. */
51 bool locked;
52
53 /* Northbound client who owns this lock. */
54 enum nb_client owner_client;
55
56 /* Northbound user who owns this lock. */
57 const void *owner_user;
58} running_config_mgmt_lock;
59
390a8862
CS
60/* Knob to record config transaction */
61static bool nb_db_enabled;
1c2facd1
RW
62/*
63 * Global lock used to prevent multiple configuration transactions from
64 * happening concurrently.
65 */
66static bool transaction_in_progress;
67
13d6b9c1
RW
68static int nb_callback_pre_validate(struct nb_context *context,
69 const struct nb_node *nb_node,
df5eda3d
RW
70 const struct lyd_node *dnode, char *errmsg,
71 size_t errmsg_len);
13d6b9c1
RW
72static int nb_callback_configuration(struct nb_context *context,
73 const enum nb_event event,
df5eda3d
RW
74 struct nb_config_change *change,
75 char *errmsg, size_t errmsg_len);
76static struct nb_transaction *
77nb_transaction_new(struct nb_context *context, struct nb_config *config,
78 struct nb_config_cbs *changes, const char *comment,
79 char *errmsg, size_t errmsg_len);
1c2facd1
RW
80static void nb_transaction_free(struct nb_transaction *transaction);
81static int nb_transaction_process(enum nb_event event,
df5eda3d
RW
82 struct nb_transaction *transaction,
83 char *errmsg, size_t errmsg_len);
84static void nb_transaction_apply_finish(struct nb_transaction *transaction,
85 char *errmsg, size_t errmsg_len);
1a4bc045
RW
86static int nb_oper_data_iter_node(const struct lys_node *snode,
87 const char *xpath, const void *list_entry,
88 const struct yang_list_keys *list_keys,
89 struct yang_translator *translator,
90 bool first, uint32_t flags,
91 nb_oper_data_cb cb, void *arg);
1c2facd1 92
544ca69a
RW
93static int nb_node_check_config_only(const struct lys_node *snode, void *arg)
94{
95 bool *config_only = arg;
96
97 if (CHECK_FLAG(snode->flags, LYS_CONFIG_R)) {
98 *config_only = false;
99 return YANG_ITER_STOP;
100 }
101
102 return YANG_ITER_CONTINUE;
103}
104
e0ccfad2 105static int nb_node_new_cb(const struct lys_node *snode, void *arg)
1c2facd1
RW
106{
107 struct nb_node *nb_node;
108 struct lys_node *sparent, *sparent_list;
109
110 nb_node = XCALLOC(MTYPE_NB_NODE, sizeof(*nb_node));
111 yang_snode_get_path(snode, YANG_PATH_DATA, nb_node->xpath,
112 sizeof(nb_node->xpath));
113 nb_node->priority = NB_DFLT_PRIORITY;
114 sparent = yang_snode_real_parent(snode);
115 if (sparent)
116 nb_node->parent = sparent->priv;
117 sparent_list = yang_snode_parent_list(snode);
118 if (sparent_list)
119 nb_node->parent_list = sparent_list->priv;
120
544ca69a
RW
121 /* Set flags. */
122 if (CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
123 bool config_only = true;
124
7895c3bc
DS
125 (void)yang_snodes_iterate_subtree(snode, NULL,
126 nb_node_check_config_only, 0,
127 &config_only);
544ca69a
RW
128 if (config_only)
129 SET_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY);
130 }
99fb518f
RW
131 if (CHECK_FLAG(snode->nodetype, LYS_LIST)) {
132 struct lys_node_list *slist;
133
134 slist = (struct lys_node_list *)snode;
135 if (slist->keys_size == 0)
136 SET_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST);
137 }
544ca69a 138
1c2facd1
RW
139 /*
140 * Link the northbound node and the libyang schema node with one
141 * another.
142 */
143 nb_node->snode = snode;
8a923b48 144 assert(snode->priv == NULL);
1c2facd1 145 lys_set_private(snode, nb_node);
e0ccfad2
RW
146
147 return YANG_ITER_CONTINUE;
1c2facd1
RW
148}
149
e0ccfad2 150static int nb_node_del_cb(const struct lys_node *snode, void *arg)
1c2facd1
RW
151{
152 struct nb_node *nb_node;
153
154 nb_node = snode->priv;
9bde0b25
RW
155 if (nb_node) {
156 lys_set_private(snode, NULL);
157 XFREE(MTYPE_NB_NODE, nb_node);
158 }
e0ccfad2
RW
159
160 return YANG_ITER_CONTINUE;
1c2facd1
RW
161}
162
544ca69a
RW
163void nb_nodes_create(void)
164{
8d869d37 165 yang_snodes_iterate(NULL, nb_node_new_cb, 0, NULL);
544ca69a
RW
166}
167
168void nb_nodes_delete(void)
169{
8d869d37 170 yang_snodes_iterate(NULL, nb_node_del_cb, 0, NULL);
544ca69a
RW
171}
172
1c2facd1
RW
173struct nb_node *nb_node_find(const char *xpath)
174{
175 const struct lys_node *snode;
176
177 /*
178 * Use libyang to find the schema node associated to the xpath and get
179 * the northbound node from there (snode private pointer).
180 */
181 snode = ly_ctx_get_node(ly_native_ctx, NULL, xpath, 0);
182 if (!snode)
183 return NULL;
184
185 return snode->priv;
186}
187
188static int nb_node_validate_cb(const struct nb_node *nb_node,
189 enum nb_operation operation,
190 int callback_implemented, bool optional)
191{
192 bool valid;
193
194 valid = nb_operation_is_valid(operation, nb_node->snode);
195
6f4e5edd
RW
196 /*
197 * Add an exception for operational data callbacks. A rw list usually
198 * doesn't need any associated operational data callbacks. But if this
199 * rw list is augmented by another module which adds state nodes under
200 * it, then this list will need to have the 'get_next()', 'get_keys()'
201 * and 'lookup_entry()' callbacks. As such, never log a warning when
202 * these callbacks are implemented when they are not needed, since this
203 * depends on context (e.g. some daemons might augment "frr-interface"
204 * while others don't).
205 */
206 if (!valid && callback_implemented && operation != NB_OP_GET_NEXT
207 && operation != NB_OP_GET_KEYS && operation != NB_OP_LOOKUP_ENTRY)
1c2facd1
RW
208 flog_warn(EC_LIB_NB_CB_UNNEEDED,
209 "unneeded '%s' callback for '%s'",
210 nb_operation_name(operation), nb_node->xpath);
211
212 if (!optional && valid && !callback_implemented) {
213 flog_err(EC_LIB_NB_CB_MISSING, "missing '%s' callback for '%s'",
214 nb_operation_name(operation), nb_node->xpath);
215 return 1;
216 }
217
218 return 0;
219}
220
221/*
222 * Check if the required callbacks were implemented for the given northbound
223 * node.
224 */
225static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
226
227{
228 unsigned int error = 0;
229
230 error += nb_node_validate_cb(nb_node, NB_OP_CREATE,
231 !!nb_node->cbs.create, false);
232 error += nb_node_validate_cb(nb_node, NB_OP_MODIFY,
233 !!nb_node->cbs.modify, false);
95ce849b 234 error += nb_node_validate_cb(nb_node, NB_OP_DESTROY,
d01b92fd 235 !!nb_node->cbs.destroy, false);
1c2facd1
RW
236 error += nb_node_validate_cb(nb_node, NB_OP_MOVE, !!nb_node->cbs.move,
237 false);
34224f0c
RW
238 error += nb_node_validate_cb(nb_node, NB_OP_PRE_VALIDATE,
239 !!nb_node->cbs.pre_validate, true);
1c2facd1
RW
240 error += nb_node_validate_cb(nb_node, NB_OP_APPLY_FINISH,
241 !!nb_node->cbs.apply_finish, true);
242 error += nb_node_validate_cb(nb_node, NB_OP_GET_ELEM,
243 !!nb_node->cbs.get_elem, false);
244 error += nb_node_validate_cb(nb_node, NB_OP_GET_NEXT,
245 !!nb_node->cbs.get_next, false);
246 error += nb_node_validate_cb(nb_node, NB_OP_GET_KEYS,
247 !!nb_node->cbs.get_keys, false);
248 error += nb_node_validate_cb(nb_node, NB_OP_LOOKUP_ENTRY,
249 !!nb_node->cbs.lookup_entry, false);
250 error += nb_node_validate_cb(nb_node, NB_OP_RPC, !!nb_node->cbs.rpc,
251 false);
252
253 return error;
254}
255
256static unsigned int nb_node_validate_priority(const struct nb_node *nb_node)
257{
258 /* Top-level nodes can have any priority. */
259 if (!nb_node->parent)
260 return 0;
261
262 if (nb_node->priority < nb_node->parent->priority) {
263 flog_err(EC_LIB_NB_CB_INVALID_PRIO,
264 "node has higher priority than its parent [xpath %s]",
265 nb_node->xpath);
266 return 1;
267 }
268
269 return 0;
270}
271
e0ccfad2 272static int nb_node_validate(const struct lys_node *snode, void *arg)
1c2facd1
RW
273{
274 struct nb_node *nb_node = snode->priv;
e0ccfad2 275 unsigned int *errors = arg;
1c2facd1
RW
276
277 /* Validate callbacks and priority. */
9bde0b25
RW
278 if (nb_node) {
279 *errors += nb_node_validate_cbs(nb_node);
280 *errors += nb_node_validate_priority(nb_node);
281 }
e0ccfad2
RW
282
283 return YANG_ITER_CONTINUE;
1c2facd1
RW
284}
285
286struct nb_config *nb_config_new(struct lyd_node *dnode)
287{
288 struct nb_config *config;
289
290 config = XCALLOC(MTYPE_NB_CONFIG, sizeof(*config));
291 if (dnode)
292 config->dnode = dnode;
293 else
5e02643a 294 config->dnode = yang_dnode_new(ly_native_ctx, true);
1c2facd1
RW
295 config->version = 0;
296
297 return config;
298}
299
300void nb_config_free(struct nb_config *config)
301{
302 if (config->dnode)
303 yang_dnode_free(config->dnode);
304 XFREE(MTYPE_NB_CONFIG, config);
305}
306
307struct nb_config *nb_config_dup(const struct nb_config *config)
308{
309 struct nb_config *dup;
310
311 dup = XCALLOC(MTYPE_NB_CONFIG, sizeof(*dup));
312 dup->dnode = yang_dnode_dup(config->dnode);
313 dup->version = config->version;
314
315 return dup;
316}
317
318int nb_config_merge(struct nb_config *config_dst, struct nb_config *config_src,
319 bool preserve_source)
320{
321 int ret;
322
323 ret = lyd_merge(config_dst->dnode, config_src->dnode, LYD_OPT_EXPLICIT);
324 if (ret != 0)
325 flog_warn(EC_LIB_LIBYANG, "%s: lyd_merge() failed", __func__);
326
327 if (!preserve_source)
328 nb_config_free(config_src);
329
330 return (ret == 0) ? NB_OK : NB_ERR;
331}
332
333void nb_config_replace(struct nb_config *config_dst,
334 struct nb_config *config_src, bool preserve_source)
335{
336 /* Update version. */
337 if (config_src->version != 0)
338 config_dst->version = config_src->version;
339
340 /* Update dnode. */
e5dc8a44
RW
341 if (config_dst->dnode)
342 yang_dnode_free(config_dst->dnode);
1c2facd1
RW
343 if (preserve_source) {
344 config_dst->dnode = yang_dnode_dup(config_src->dnode);
345 } else {
346 config_dst->dnode = config_src->dnode;
347 config_src->dnode = NULL;
348 nb_config_free(config_src);
349 }
350}
351
352/* Generate the nb_config_cbs tree. */
353static inline int nb_config_cb_compare(const struct nb_config_cb *a,
354 const struct nb_config_cb *b)
355{
356 /* Sort by priority first. */
357 if (a->nb_node->priority < b->nb_node->priority)
358 return -1;
359 if (a->nb_node->priority > b->nb_node->priority)
360 return 1;
361
362 /*
6b5d6e2d 363 * Preserve the order of the configuration changes as told by libyang.
1c2facd1 364 */
fe3f2c61
RW
365 if (a->seq < b->seq)
366 return -1;
367 if (a->seq > b->seq)
368 return 1;
369
370 /*
371 * All 'apply_finish' callbacks have their sequence number set to zero.
372 * In this case, compare them using their dnode pointers (the order
373 * doesn't matter for callbacks that have the same priority).
374 */
375 if (a->dnode < b->dnode)
376 return -1;
377 if (a->dnode > b->dnode)
378 return 1;
379
380 return 0;
1c2facd1
RW
381}
382RB_GENERATE(nb_config_cbs, nb_config_cb, entry, nb_config_cb_compare);
383
384static void nb_config_diff_add_change(struct nb_config_cbs *changes,
385 enum nb_operation operation,
6b5d6e2d 386 uint32_t *seq,
1c2facd1
RW
387 const struct lyd_node *dnode)
388{
389 struct nb_config_change *change;
390
f267201b
RW
391 /* Ignore unimplemented nodes. */
392 if (!dnode->schema->priv)
393 return;
394
1c2facd1
RW
395 change = XCALLOC(MTYPE_TMP, sizeof(*change));
396 change->cb.operation = operation;
6b5d6e2d
RW
397 change->cb.seq = *seq;
398 *seq = *seq + 1;
1c2facd1 399 change->cb.nb_node = dnode->schema->priv;
1c2facd1
RW
400 change->cb.dnode = dnode;
401
402 RB_INSERT(nb_config_cbs, changes, &change->cb);
403}
404
405static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
406{
407 while (!RB_EMPTY(nb_config_cbs, changes)) {
408 struct nb_config_change *change;
409
410 change = (struct nb_config_change *)RB_ROOT(nb_config_cbs,
411 changes);
412 RB_REMOVE(nb_config_cbs, changes, &change->cb);
413 XFREE(MTYPE_TMP, change);
414 }
415}
416
417/*
418 * Helper function used when calculating the delta between two different
419 * configurations. Given a new subtree, calculate all new YANG data nodes,
420 * excluding default leafs and leaf-lists. This is a recursive function.
421 */
6b5d6e2d 422static void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
cacbffaf 423 struct nb_config_cbs *changes)
1c2facd1 424{
cacbffaf 425 enum nb_operation operation;
1c2facd1
RW
426 struct lyd_node *child;
427
f267201b
RW
428 /* Ignore unimplemented nodes. */
429 if (!dnode->schema->priv)
430 return;
431
cacbffaf
RW
432 switch (dnode->schema->nodetype) {
433 case LYS_LEAF:
434 case LYS_LEAFLIST:
435 if (lyd_wd_default((struct lyd_node_leaf_list *)dnode))
436 break;
1c2facd1 437
cacbffaf
RW
438 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
439 operation = NB_OP_CREATE;
440 else if (nb_operation_is_valid(NB_OP_MODIFY, dnode->schema))
441 operation = NB_OP_MODIFY;
442 else
443 return;
1c2facd1 444
6b5d6e2d 445 nb_config_diff_add_change(changes, operation, seq, dnode);
cacbffaf
RW
446 break;
447 case LYS_CONTAINER:
448 case LYS_LIST:
449 if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
6b5d6e2d
RW
450 nb_config_diff_add_change(changes, NB_OP_CREATE, seq,
451 dnode);
cacbffaf
RW
452
453 /* Process child nodes recursively. */
454 LY_TREE_FOR (dnode->child, child) {
6b5d6e2d 455 nb_config_diff_created(child, seq, changes);
1c2facd1 456 }
cacbffaf
RW
457 break;
458 default:
459 break;
1c2facd1
RW
460 }
461}
462
6b5d6e2d 463static void nb_config_diff_deleted(const struct lyd_node *dnode, uint32_t *seq,
1912caa2
RW
464 struct nb_config_cbs *changes)
465{
f267201b
RW
466 /* Ignore unimplemented nodes. */
467 if (!dnode->schema->priv)
468 return;
469
1912caa2 470 if (nb_operation_is_valid(NB_OP_DESTROY, dnode->schema))
6b5d6e2d 471 nb_config_diff_add_change(changes, NB_OP_DESTROY, seq, dnode);
1912caa2
RW
472 else if (CHECK_FLAG(dnode->schema->nodetype, LYS_CONTAINER)) {
473 struct lyd_node *child;
474
475 /*
476 * Non-presence containers need special handling since they
477 * don't have "destroy" callbacks. In this case, what we need to
478 * do is to call the "destroy" callbacks of their child nodes
479 * when applicable (i.e. optional nodes).
480 */
481 LY_TREE_FOR (dnode->child, child) {
6b5d6e2d 482 nb_config_diff_deleted(child, seq, changes);
1912caa2
RW
483 }
484 }
485}
486
1c2facd1
RW
487/* Calculate the delta between two different configurations. */
488static void nb_config_diff(const struct nb_config *config1,
489 const struct nb_config *config2,
490 struct nb_config_cbs *changes)
491{
492 struct lyd_difflist *diff;
6b5d6e2d 493 uint32_t seq = 0;
1c2facd1
RW
494
495 diff = lyd_diff(config1->dnode, config2->dnode,
496 LYD_DIFFOPT_WITHDEFAULTS);
497 assert(diff);
498
499 for (int i = 0; diff->type[i] != LYD_DIFF_END; i++) {
500 LYD_DIFFTYPE type;
501 struct lyd_node *dnode;
1c2facd1
RW
502
503 type = diff->type[i];
504
505 switch (type) {
506 case LYD_DIFF_CREATED:
507 dnode = diff->second[i];
6b5d6e2d 508 nb_config_diff_created(dnode, &seq, changes);
1c2facd1
RW
509 break;
510 case LYD_DIFF_DELETED:
511 dnode = diff->first[i];
6b5d6e2d 512 nb_config_diff_deleted(dnode, &seq, changes);
1c2facd1
RW
513 break;
514 case LYD_DIFF_CHANGED:
515 dnode = diff->second[i];
6b5d6e2d
RW
516 nb_config_diff_add_change(changes, NB_OP_MODIFY, &seq,
517 dnode);
1c2facd1
RW
518 break;
519 case LYD_DIFF_MOVEDAFTER1:
520 case LYD_DIFF_MOVEDAFTER2:
521 default:
522 continue;
523 }
1c2facd1
RW
524 }
525
526 lyd_free_diff(diff);
527}
528
529int nb_candidate_edit(struct nb_config *candidate,
530 const struct nb_node *nb_node,
531 enum nb_operation operation, const char *xpath,
532 const struct yang_data *previous,
533 const struct yang_data *data)
534{
535 struct lyd_node *dnode;
536 char xpath_edit[XPATH_MAXLEN];
537
1c2facd1
RW
538 /* Use special notation for leaf-lists (RFC 6020, section 9.13.5). */
539 if (nb_node->snode->nodetype == LYS_LEAFLIST)
540 snprintf(xpath_edit, sizeof(xpath_edit), "%s[.='%s']", xpath,
541 data->value);
542 else
543 strlcpy(xpath_edit, xpath, sizeof(xpath_edit));
544
545 switch (operation) {
546 case NB_OP_CREATE:
547 case NB_OP_MODIFY:
548 ly_errno = 0;
549 dnode = lyd_new_path(candidate->dnode, ly_native_ctx,
550 xpath_edit, (void *)data->value, 0,
551 LYD_PATH_OPT_UPDATE);
552 if (!dnode && ly_errno) {
553 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
554 __func__);
555 return NB_ERR;
556 }
1c2facd1 557 break;
95ce849b 558 case NB_OP_DESTROY:
1c2facd1
RW
559 dnode = yang_dnode_get(candidate->dnode, xpath_edit);
560 if (!dnode)
561 /*
562 * Return a special error code so the caller can choose
563 * whether to ignore it or not.
564 */
565 return NB_ERR_NOT_FOUND;
566 lyd_free(dnode);
567 break;
568 case NB_OP_MOVE:
569 /* TODO: update configuration. */
570 break;
571 default:
572 flog_warn(EC_LIB_DEVELOPMENT,
573 "%s: unknown operation (%u) [xpath %s]", __func__,
574 operation, xpath_edit);
575 return NB_ERR;
576 }
577
578 return NB_OK;
579}
580
581bool nb_candidate_needs_update(const struct nb_config *candidate)
582{
8685be73
RW
583 if (candidate->version < running_config->version)
584 return true;
1c2facd1 585
8685be73 586 return false;
1c2facd1
RW
587}
588
589int nb_candidate_update(struct nb_config *candidate)
590{
591 struct nb_config *updated_config;
592
8685be73 593 updated_config = nb_config_dup(running_config);
1c2facd1
RW
594 if (nb_config_merge(updated_config, candidate, true) != NB_OK)
595 return NB_ERR;
596
597 nb_config_replace(candidate, updated_config, false);
598
599 return NB_OK;
600}
601
1c2facd1
RW
602/*
603 * Perform YANG syntactic and semantic validation.
604 *
605 * WARNING: lyd_validate() can change the configuration as part of the
606 * validation process.
607 */
df5eda3d
RW
608static int nb_candidate_validate_yang(struct nb_config *candidate, char *errmsg,
609 size_t errmsg_len)
1c2facd1 610{
cd327983
RW
611 if (lyd_validate(&candidate->dnode,
612 LYD_OPT_STRICT | LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
1c2facd1 613 ly_native_ctx)
df5eda3d
RW
614 != 0) {
615 yang_print_errors(ly_native_ctx, errmsg, errmsg_len);
1c2facd1 616 return NB_ERR_VALIDATION;
df5eda3d 617 }
1c2facd1
RW
618
619 return NB_OK;
620}
621
622/* Perform code-level validation using the northbound callbacks. */
13d6b9c1
RW
623static int nb_candidate_validate_code(struct nb_context *context,
624 struct nb_config *candidate,
df5eda3d
RW
625 struct nb_config_cbs *changes,
626 char *errmsg, size_t errmsg_len)
1c2facd1
RW
627{
628 struct nb_config_cb *cb;
34224f0c
RW
629 struct lyd_node *root, *next, *child;
630 int ret;
631
632 /* First validate the candidate as a whole. */
633 LY_TREE_FOR (candidate->dnode, root) {
634 LY_TREE_DFS_BEGIN (root, next, child) {
635 struct nb_node *nb_node;
636
637 nb_node = child->schema->priv;
f267201b 638 if (!nb_node || !nb_node->cbs.pre_validate)
34224f0c
RW
639 goto next;
640
df5eda3d
RW
641 ret = nb_callback_pre_validate(context, nb_node, child,
642 errmsg, errmsg_len);
34224f0c
RW
643 if (ret != NB_OK)
644 return NB_ERR_VALIDATION;
645
646 next:
647 LY_TREE_DFS_END(root, next, child);
648 }
649 }
650
651 /* Now validate the configuration changes. */
1c2facd1
RW
652 RB_FOREACH (cb, nb_config_cbs, changes) {
653 struct nb_config_change *change = (struct nb_config_change *)cb;
1c2facd1 654
df5eda3d
RW
655 ret = nb_callback_configuration(context, NB_EV_VALIDATE, change,
656 errmsg, errmsg_len);
1c2facd1
RW
657 if (ret != NB_OK)
658 return NB_ERR_VALIDATION;
659 }
660
661 return NB_OK;
662}
663
13d6b9c1 664int nb_candidate_validate(struct nb_context *context,
df5eda3d
RW
665 struct nb_config *candidate, char *errmsg,
666 size_t errmsg_len)
1c2facd1
RW
667{
668 struct nb_config_cbs changes;
669 int ret;
670
df5eda3d
RW
671 if (nb_candidate_validate_yang(candidate, errmsg, sizeof(errmsg_len))
672 != NB_OK)
1c2facd1
RW
673 return NB_ERR_VALIDATION;
674
675 RB_INIT(nb_config_cbs, &changes);
8685be73 676 nb_config_diff(running_config, candidate, &changes);
df5eda3d
RW
677 ret = nb_candidate_validate_code(context, candidate, &changes, errmsg,
678 errmsg_len);
8685be73 679 nb_config_diff_del_changes(&changes);
1c2facd1
RW
680
681 return ret;
682}
683
13d6b9c1
RW
684int nb_candidate_commit_prepare(struct nb_context *context,
685 struct nb_config *candidate,
364ad673 686 const char *comment,
df5eda3d
RW
687 struct nb_transaction **transaction,
688 char *errmsg, size_t errmsg_len)
1c2facd1
RW
689{
690 struct nb_config_cbs changes;
691
df5eda3d
RW
692 if (nb_candidate_validate_yang(candidate, errmsg, errmsg_len)
693 != NB_OK) {
1c2facd1
RW
694 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
695 "%s: failed to validate candidate configuration",
696 __func__);
697 return NB_ERR_VALIDATION;
698 }
699
700 RB_INIT(nb_config_cbs, &changes);
8685be73 701 nb_config_diff(running_config, candidate, &changes);
5bfb669b
QY
702 if (RB_EMPTY(nb_config_cbs, &changes)) {
703 snprintf(
704 errmsg, errmsg_len,
705 "No changes to apply were found during preparation phase");
8685be73 706 return NB_ERR_NO_CHANGES;
5bfb669b 707 }
1c2facd1 708
df5eda3d
RW
709 if (nb_candidate_validate_code(context, candidate, &changes, errmsg,
710 errmsg_len)
711 != NB_OK) {
8685be73
RW
712 flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
713 "%s: failed to validate candidate configuration",
714 __func__);
715 nb_config_diff_del_changes(&changes);
716 return NB_ERR_VALIDATION;
717 }
1c2facd1 718
df5eda3d
RW
719 *transaction = nb_transaction_new(context, candidate, &changes, comment,
720 errmsg, errmsg_len);
8685be73
RW
721 if (*transaction == NULL) {
722 flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
df5eda3d
RW
723 "%s: failed to create transaction: %s", __func__,
724 errmsg);
8685be73
RW
725 nb_config_diff_del_changes(&changes);
726 return NB_ERR_LOCKED;
1c2facd1
RW
727 }
728
df5eda3d
RW
729 return nb_transaction_process(NB_EV_PREPARE, *transaction, errmsg,
730 errmsg_len);
1c2facd1
RW
731}
732
0fe5b904
RW
733void nb_candidate_commit_abort(struct nb_transaction *transaction, char *errmsg,
734 size_t errmsg_len)
1c2facd1 735{
df5eda3d 736 (void)nb_transaction_process(NB_EV_ABORT, transaction, errmsg,
0fe5b904 737 errmsg_len);
1c2facd1
RW
738 nb_transaction_free(transaction);
739}
740
741void nb_candidate_commit_apply(struct nb_transaction *transaction,
0fe5b904
RW
742 bool save_transaction, uint32_t *transaction_id,
743 char *errmsg, size_t errmsg_len)
1c2facd1 744{
df5eda3d 745 (void)nb_transaction_process(NB_EV_APPLY, transaction, errmsg,
0fe5b904
RW
746 errmsg_len);
747 nb_transaction_apply_finish(transaction, errmsg, errmsg_len);
1c2facd1
RW
748
749 /* Replace running by candidate. */
750 transaction->config->version++;
8685be73 751 nb_config_replace(running_config, transaction->config, true);
1c2facd1
RW
752
753 /* Record transaction. */
390a8862 754 if (save_transaction && nb_db_enabled
1c2facd1
RW
755 && nb_db_transaction_save(transaction, transaction_id) != NB_OK)
756 flog_warn(EC_LIB_NB_TRANSACTION_RECORD_FAILED,
757 "%s: failed to record transaction", __func__);
758
759 nb_transaction_free(transaction);
760}
761
13d6b9c1
RW
762int nb_candidate_commit(struct nb_context *context, struct nb_config *candidate,
763 bool save_transaction, const char *comment,
df5eda3d
RW
764 uint32_t *transaction_id, char *errmsg,
765 size_t errmsg_len)
1c2facd1
RW
766{
767 struct nb_transaction *transaction = NULL;
768 int ret;
769
13d6b9c1 770 ret = nb_candidate_commit_prepare(context, candidate, comment,
df5eda3d 771 &transaction, errmsg, errmsg_len);
1c2facd1
RW
772 /*
773 * Apply the changes if the preparation phase succeeded. Otherwise abort
774 * the transaction.
775 */
776 if (ret == NB_OK)
777 nb_candidate_commit_apply(transaction, save_transaction,
0fe5b904 778 transaction_id, errmsg, errmsg_len);
1c2facd1 779 else if (transaction != NULL)
0fe5b904 780 nb_candidate_commit_abort(transaction, errmsg, errmsg_len);
1c2facd1
RW
781
782 return ret;
783}
784
364ad673
RW
785int nb_running_lock(enum nb_client client, const void *user)
786{
787 int ret = -1;
788
1be4decb 789 frr_with_mutex (&running_config_mgmt_lock.mtx) {
364ad673
RW
790 if (!running_config_mgmt_lock.locked) {
791 running_config_mgmt_lock.locked = true;
792 running_config_mgmt_lock.owner_client = client;
793 running_config_mgmt_lock.owner_user = user;
794 ret = 0;
795 }
796 }
364ad673
RW
797
798 return ret;
799}
800
801int nb_running_unlock(enum nb_client client, const void *user)
802{
803 int ret = -1;
804
1be4decb 805 frr_with_mutex (&running_config_mgmt_lock.mtx) {
364ad673
RW
806 if (running_config_mgmt_lock.locked
807 && running_config_mgmt_lock.owner_client == client
808 && running_config_mgmt_lock.owner_user == user) {
809 running_config_mgmt_lock.locked = false;
810 running_config_mgmt_lock.owner_client = NB_CLIENT_NONE;
811 running_config_mgmt_lock.owner_user = NULL;
812 ret = 0;
813 }
814 }
364ad673
RW
815
816 return ret;
817}
818
819int nb_running_lock_check(enum nb_client client, const void *user)
820{
821 int ret = -1;
822
1be4decb 823 frr_with_mutex (&running_config_mgmt_lock.mtx) {
364ad673
RW
824 if (!running_config_mgmt_lock.locked
825 || (running_config_mgmt_lock.owner_client == client
826 && running_config_mgmt_lock.owner_user == user))
827 ret = 0;
828 }
364ad673
RW
829
830 return ret;
831}
832
97cd8493
RW
833static void nb_log_config_callback(const enum nb_event event,
834 enum nb_operation operation,
835 const struct lyd_node *dnode)
1c2facd1 836{
97cd8493
RW
837 const char *value;
838 char xpath[XPATH_MAXLEN];
839
840 if (!DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL))
841 return;
842
843 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
844 if (yang_snode_is_typeless_data(dnode->schema))
845 value = "(none)";
846 else
847 value = yang_dnode_get_string(dnode, NULL);
848
1c2facd1
RW
849 zlog_debug(
850 "northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
851 nb_event_name(event), nb_operation_name(operation), xpath,
97cd8493
RW
852 value);
853}
854
13d6b9c1
RW
855static int nb_callback_create(struct nb_context *context,
856 const struct nb_node *nb_node,
97cd8493 857 enum nb_event event, const struct lyd_node *dnode,
df5eda3d
RW
858 union nb_resource *resource, char *errmsg,
859 size_t errmsg_len)
97cd8493 860{
60ee8be1 861 struct nb_cb_create_args args = {};
1abe6c53
RW
862 bool unexpected_error = false;
863 int ret;
60ee8be1 864
97cd8493
RW
865 nb_log_config_callback(event, NB_OP_CREATE, dnode);
866
13d6b9c1 867 args.context = context;
60ee8be1
RW
868 args.event = event;
869 args.dnode = dnode;
870 args.resource = resource;
df5eda3d
RW
871 args.errmsg = errmsg;
872 args.errmsg_len = errmsg_len;
1abe6c53
RW
873 ret = nb_node->cbs.create(&args);
874
875 /* Detect and log unexpected errors. */
876 switch (ret) {
877 case NB_OK:
878 case NB_ERR:
879 break;
880 case NB_ERR_VALIDATION:
881 if (event != NB_EV_VALIDATE)
882 unexpected_error = true;
883 break;
884 case NB_ERR_RESOURCE:
885 if (event != NB_EV_PREPARE)
886 unexpected_error = true;
887 break;
888 case NB_ERR_INCONSISTENCY:
889 if (event == NB_EV_VALIDATE)
890 unexpected_error = true;
891 break;
892 default:
893 unexpected_error = true;
894 break;
895 }
896 if (unexpected_error)
897 DEBUGD(&nb_dbg_cbs_config,
898 "northbound callback: unexpected return value: %s",
899 nb_err_name(ret));
900
901 return ret;
97cd8493
RW
902}
903
13d6b9c1
RW
904static int nb_callback_modify(struct nb_context *context,
905 const struct nb_node *nb_node,
97cd8493 906 enum nb_event event, const struct lyd_node *dnode,
df5eda3d
RW
907 union nb_resource *resource, char *errmsg,
908 size_t errmsg_len)
97cd8493 909{
60ee8be1 910 struct nb_cb_modify_args args = {};
1abe6c53
RW
911 bool unexpected_error = false;
912 int ret;
60ee8be1 913
97cd8493
RW
914 nb_log_config_callback(event, NB_OP_MODIFY, dnode);
915
13d6b9c1 916 args.context = context;
60ee8be1
RW
917 args.event = event;
918 args.dnode = dnode;
919 args.resource = resource;
df5eda3d
RW
920 args.errmsg = errmsg;
921 args.errmsg_len = errmsg_len;
1abe6c53
RW
922 ret = nb_node->cbs.modify(&args);
923
924 /* Detect and log unexpected errors. */
925 switch (ret) {
926 case NB_OK:
927 case NB_ERR:
928 break;
929 case NB_ERR_VALIDATION:
930 if (event != NB_EV_VALIDATE)
931 unexpected_error = true;
932 break;
933 case NB_ERR_RESOURCE:
934 if (event != NB_EV_PREPARE)
935 unexpected_error = true;
936 break;
937 case NB_ERR_INCONSISTENCY:
938 if (event == NB_EV_VALIDATE)
939 unexpected_error = true;
940 break;
941 default:
942 unexpected_error = true;
943 break;
944 }
945 if (unexpected_error)
946 DEBUGD(&nb_dbg_cbs_config,
947 "northbound callback: unexpected return value: %s",
948 nb_err_name(ret));
949
950 return ret;
97cd8493
RW
951}
952
13d6b9c1
RW
953static int nb_callback_destroy(struct nb_context *context,
954 const struct nb_node *nb_node,
97cd8493 955 enum nb_event event,
df5eda3d
RW
956 const struct lyd_node *dnode, char *errmsg,
957 size_t errmsg_len)
97cd8493 958{
60ee8be1 959 struct nb_cb_destroy_args args = {};
1abe6c53
RW
960 bool unexpected_error = false;
961 int ret;
60ee8be1 962
97cd8493
RW
963 nb_log_config_callback(event, NB_OP_DESTROY, dnode);
964
13d6b9c1 965 args.context = context;
60ee8be1
RW
966 args.event = event;
967 args.dnode = dnode;
df5eda3d
RW
968 args.errmsg = errmsg;
969 args.errmsg_len = errmsg_len;
1abe6c53
RW
970 ret = nb_node->cbs.destroy(&args);
971
972 /* Detect and log unexpected errors. */
973 switch (ret) {
974 case NB_OK:
975 case NB_ERR:
976 break;
977 case NB_ERR_VALIDATION:
978 if (event != NB_EV_VALIDATE)
979 unexpected_error = true;
980 break;
981 case NB_ERR_INCONSISTENCY:
982 if (event == NB_EV_VALIDATE)
983 unexpected_error = true;
984 break;
985 default:
986 unexpected_error = true;
987 break;
988 }
989 if (unexpected_error)
990 DEBUGD(&nb_dbg_cbs_config,
991 "northbound callback: unexpected return value: %s",
992 nb_err_name(ret));
993
994 return ret;
97cd8493
RW
995}
996
13d6b9c1
RW
997static int nb_callback_move(struct nb_context *context,
998 const struct nb_node *nb_node, enum nb_event event,
df5eda3d
RW
999 const struct lyd_node *dnode, char *errmsg,
1000 size_t errmsg_len)
97cd8493 1001{
60ee8be1 1002 struct nb_cb_move_args args = {};
1abe6c53
RW
1003 bool unexpected_error = false;
1004 int ret;
60ee8be1 1005
97cd8493
RW
1006 nb_log_config_callback(event, NB_OP_MOVE, dnode);
1007
13d6b9c1 1008 args.context = context;
60ee8be1
RW
1009 args.event = event;
1010 args.dnode = dnode;
df5eda3d
RW
1011 args.errmsg = errmsg;
1012 args.errmsg_len = errmsg_len;
1abe6c53
RW
1013 ret = nb_node->cbs.move(&args);
1014
1015 /* Detect and log unexpected errors. */
1016 switch (ret) {
1017 case NB_OK:
1018 case NB_ERR:
1019 break;
1020 case NB_ERR_VALIDATION:
1021 if (event != NB_EV_VALIDATE)
1022 unexpected_error = true;
1023 break;
1024 case NB_ERR_INCONSISTENCY:
1025 if (event == NB_EV_VALIDATE)
1026 unexpected_error = true;
1027 break;
1028 default:
1029 unexpected_error = true;
1030 break;
1031 }
1032 if (unexpected_error)
1033 DEBUGD(&nb_dbg_cbs_config,
1034 "northbound callback: unexpected return value: %s",
1035 nb_err_name(ret));
1036
1037 return ret;
97cd8493
RW
1038}
1039
13d6b9c1
RW
1040static int nb_callback_pre_validate(struct nb_context *context,
1041 const struct nb_node *nb_node,
df5eda3d
RW
1042 const struct lyd_node *dnode, char *errmsg,
1043 size_t errmsg_len)
97cd8493 1044{
60ee8be1 1045 struct nb_cb_pre_validate_args args = {};
1abe6c53
RW
1046 bool unexpected_error = false;
1047 int ret;
60ee8be1 1048
97cd8493
RW
1049 nb_log_config_callback(NB_EV_VALIDATE, NB_OP_PRE_VALIDATE, dnode);
1050
60ee8be1 1051 args.dnode = dnode;
df5eda3d
RW
1052 args.errmsg = errmsg;
1053 args.errmsg_len = errmsg_len;
1abe6c53
RW
1054 ret = nb_node->cbs.pre_validate(&args);
1055
1056 /* Detect and log unexpected errors. */
1057 switch (ret) {
1058 case NB_OK:
1059 case NB_ERR_VALIDATION:
1060 break;
1061 default:
1062 unexpected_error = true;
1063 break;
1064 }
1065 if (unexpected_error)
1066 DEBUGD(&nb_dbg_cbs_config,
1067 "northbound callback: unexpected return value: %s",
1068 nb_err_name(ret));
1069
1070 return ret;
97cd8493
RW
1071}
1072
13d6b9c1
RW
1073static void nb_callback_apply_finish(struct nb_context *context,
1074 const struct nb_node *nb_node,
df5eda3d
RW
1075 const struct lyd_node *dnode, char *errmsg,
1076 size_t errmsg_len)
97cd8493 1077{
60ee8be1
RW
1078 struct nb_cb_apply_finish_args args = {};
1079
97cd8493
RW
1080 nb_log_config_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH, dnode);
1081
13d6b9c1 1082 args.context = context;
60ee8be1 1083 args.dnode = dnode;
df5eda3d
RW
1084 args.errmsg = errmsg;
1085 args.errmsg_len = errmsg_len;
60ee8be1 1086 nb_node->cbs.apply_finish(&args);
97cd8493
RW
1087}
1088
1089struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node,
1090 const char *xpath,
1091 const void *list_entry)
1092{
60ee8be1
RW
1093 struct nb_cb_get_elem_args args = {};
1094
97cd8493
RW
1095 DEBUGD(&nb_dbg_cbs_state,
1096 "northbound callback (get_elem): xpath [%s] list_entry [%p]",
1097 xpath, list_entry);
1098
60ee8be1
RW
1099 args.xpath = xpath;
1100 args.list_entry = list_entry;
1101 return nb_node->cbs.get_elem(&args);
97cd8493
RW
1102}
1103
1104const void *nb_callback_get_next(const struct nb_node *nb_node,
1105 const void *parent_list_entry,
1106 const void *list_entry)
1107{
60ee8be1
RW
1108 struct nb_cb_get_next_args args = {};
1109
97cd8493
RW
1110 DEBUGD(&nb_dbg_cbs_state,
1111 "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
1112 nb_node->xpath, parent_list_entry, list_entry);
1113
60ee8be1
RW
1114 args.parent_list_entry = parent_list_entry;
1115 args.list_entry = list_entry;
1116 return nb_node->cbs.get_next(&args);
97cd8493
RW
1117}
1118
1119int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
1120 struct yang_list_keys *keys)
1121{
60ee8be1
RW
1122 struct nb_cb_get_keys_args args = {};
1123
97cd8493
RW
1124 DEBUGD(&nb_dbg_cbs_state,
1125 "northbound callback (get_keys): node [%s] list_entry [%p]",
1126 nb_node->xpath, list_entry);
1127
60ee8be1
RW
1128 args.list_entry = list_entry;
1129 args.keys = keys;
1130 return nb_node->cbs.get_keys(&args);
97cd8493
RW
1131}
1132
1133const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
1134 const void *parent_list_entry,
1135 const struct yang_list_keys *keys)
1136{
60ee8be1
RW
1137 struct nb_cb_lookup_entry_args args = {};
1138
97cd8493
RW
1139 DEBUGD(&nb_dbg_cbs_state,
1140 "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
1141 nb_node->xpath, parent_list_entry);
1142
60ee8be1
RW
1143 args.parent_list_entry = parent_list_entry;
1144 args.keys = keys;
1145 return nb_node->cbs.lookup_entry(&args);
97cd8493
RW
1146}
1147
1148int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
f63f5f19
CS
1149 const struct list *input, struct list *output, char *errmsg,
1150 size_t errmsg_len)
97cd8493 1151{
60ee8be1
RW
1152 struct nb_cb_rpc_args args = {};
1153
97cd8493
RW
1154 DEBUGD(&nb_dbg_cbs_rpc, "northbound RPC: %s", xpath);
1155
60ee8be1
RW
1156 args.xpath = xpath;
1157 args.input = input;
1158 args.output = output;
f63f5f19
CS
1159 args.errmsg = errmsg;
1160 args.errmsg_len = errmsg_len;
60ee8be1 1161 return nb_node->cbs.rpc(&args);
1c2facd1
RW
1162}
1163
1164/*
1165 * Call the northbound configuration callback associated to a given
1166 * configuration change.
1167 */
13d6b9c1
RW
1168static int nb_callback_configuration(struct nb_context *context,
1169 const enum nb_event event,
df5eda3d
RW
1170 struct nb_config_change *change,
1171 char *errmsg, size_t errmsg_len)
1c2facd1
RW
1172{
1173 enum nb_operation operation = change->cb.operation;
0de19c0e 1174 char xpath[XPATH_MAXLEN];
1c2facd1
RW
1175 const struct nb_node *nb_node = change->cb.nb_node;
1176 const struct lyd_node *dnode = change->cb.dnode;
1177 union nb_resource *resource;
1178 int ret = NB_ERR;
1179
1c2facd1
RW
1180 if (event == NB_EV_VALIDATE)
1181 resource = NULL;
1182 else
1183 resource = &change->resource;
1184
1185 switch (operation) {
1186 case NB_OP_CREATE:
13d6b9c1 1187 ret = nb_callback_create(context, nb_node, event, dnode,
df5eda3d 1188 resource, errmsg, errmsg_len);
1c2facd1
RW
1189 break;
1190 case NB_OP_MODIFY:
13d6b9c1 1191 ret = nb_callback_modify(context, nb_node, event, dnode,
df5eda3d 1192 resource, errmsg, errmsg_len);
1c2facd1 1193 break;
95ce849b 1194 case NB_OP_DESTROY:
df5eda3d
RW
1195 ret = nb_callback_destroy(context, nb_node, event, dnode,
1196 errmsg, errmsg_len);
1c2facd1
RW
1197 break;
1198 case NB_OP_MOVE:
df5eda3d
RW
1199 ret = nb_callback_move(context, nb_node, event, dnode, errmsg,
1200 errmsg_len);
1c2facd1
RW
1201 break;
1202 default:
0de19c0e 1203 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
c650e48c
RW
1204 flog_err(EC_LIB_DEVELOPMENT,
1205 "%s: unknown operation (%u) [xpath %s]", __func__,
1206 operation, xpath);
1207 exit(1);
1c2facd1
RW
1208 }
1209
625b70e3 1210 if (ret != NB_OK) {
c650e48c
RW
1211 int priority;
1212 enum lib_log_refs ref;
ec348d43 1213
0de19c0e
RW
1214 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1215
625b70e3
EDP
1216 switch (event) {
1217 case NB_EV_VALIDATE:
c650e48c 1218 priority = LOG_WARNING;
625b70e3
EDP
1219 ref = EC_LIB_NB_CB_CONFIG_VALIDATE;
1220 break;
1221 case NB_EV_PREPARE:
c650e48c 1222 priority = LOG_WARNING;
625b70e3
EDP
1223 ref = EC_LIB_NB_CB_CONFIG_PREPARE;
1224 break;
1225 case NB_EV_ABORT:
c650e48c 1226 priority = LOG_WARNING;
625b70e3
EDP
1227 ref = EC_LIB_NB_CB_CONFIG_ABORT;
1228 break;
1229 case NB_EV_APPLY:
c650e48c 1230 priority = LOG_ERR;
625b70e3
EDP
1231 ref = EC_LIB_NB_CB_CONFIG_APPLY;
1232 break;
c650e48c
RW
1233 default:
1234 flog_err(EC_LIB_DEVELOPMENT,
1be4decb
RW
1235 "%s: unknown event (%u) [xpath %s]", __func__,
1236 event, xpath);
c650e48c 1237 exit(1);
625b70e3 1238 }
c650e48c
RW
1239
1240 flog(priority, ref,
df5eda3d
RW
1241 "error processing configuration change: error [%s] event [%s] operation [%s] xpath [%s]",
1242 nb_err_name(ret), nb_event_name(event),
c650e48c 1243 nb_operation_name(operation), xpath);
df5eda3d
RW
1244 if (strlen(errmsg) > 0)
1245 flog(priority, ref,
1246 "error processing configuration change: %s",
1247 errmsg);
625b70e3 1248 }
1c2facd1
RW
1249
1250 return ret;
1251}
1252
364ad673 1253static struct nb_transaction *
df5eda3d
RW
1254nb_transaction_new(struct nb_context *context, struct nb_config *config,
1255 struct nb_config_cbs *changes, const char *comment,
1256 char *errmsg, size_t errmsg_len)
1c2facd1
RW
1257{
1258 struct nb_transaction *transaction;
1259
13d6b9c1 1260 if (nb_running_lock_check(context->client, context->user)) {
df5eda3d
RW
1261 strlcpy(errmsg,
1262 "running configuration is locked by another client",
1263 errmsg_len);
364ad673
RW
1264 return NULL;
1265 }
1266
1c2facd1 1267 if (transaction_in_progress) {
df5eda3d
RW
1268 strlcpy(errmsg,
1269 "there's already another transaction in progress",
1270 errmsg_len);
1c2facd1
RW
1271 return NULL;
1272 }
1273 transaction_in_progress = true;
1274
1275 transaction = XCALLOC(MTYPE_TMP, sizeof(*transaction));
13d6b9c1 1276 transaction->context = context;
1c2facd1
RW
1277 if (comment)
1278 strlcpy(transaction->comment, comment,
1279 sizeof(transaction->comment));
1280 transaction->config = config;
1281 transaction->changes = *changes;
1282
1283 return transaction;
1284}
1285
1286static void nb_transaction_free(struct nb_transaction *transaction)
1287{
1288 nb_config_diff_del_changes(&transaction->changes);
1289 XFREE(MTYPE_TMP, transaction);
1290 transaction_in_progress = false;
1291}
1292
1293/* Process all configuration changes associated to a transaction. */
1294static int nb_transaction_process(enum nb_event event,
df5eda3d
RW
1295 struct nb_transaction *transaction,
1296 char *errmsg, size_t errmsg_len)
1c2facd1
RW
1297{
1298 struct nb_config_cb *cb;
1299
8685be73
RW
1300 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1301 struct nb_config_change *change = (struct nb_config_change *)cb;
1302 int ret;
1c2facd1 1303
8685be73
RW
1304 /*
1305 * Only try to release resources that were allocated
1306 * successfully.
1307 */
a8f58eb6 1308 if (event == NB_EV_ABORT && !change->prepare_ok)
8685be73
RW
1309 break;
1310
1311 /* Call the appropriate callback. */
13d6b9c1 1312 ret = nb_callback_configuration(transaction->context, event,
df5eda3d 1313 change, errmsg, errmsg_len);
8685be73
RW
1314 switch (event) {
1315 case NB_EV_PREPARE:
1316 if (ret != NB_OK)
1317 return ret;
1318 change->prepare_ok = true;
1319 break;
1320 case NB_EV_ABORT:
1321 case NB_EV_APPLY:
1c2facd1 1322 /*
8685be73
RW
1323 * At this point it's not possible to reject the
1324 * transaction anymore, so any failure here can lead to
1325 * inconsistencies and should be treated as a bug.
1326 * Operations prone to errors, like validations and
1327 * resource allocations, should be performed during the
1328 * 'prepare' phase.
1c2facd1 1329 */
8685be73
RW
1330 break;
1331 default:
1332 break;
1c2facd1
RW
1333 }
1334 }
1335
1336 return NB_OK;
1337}
1338
1339static struct nb_config_cb *
0de19c0e 1340nb_apply_finish_cb_new(struct nb_config_cbs *cbs, const struct nb_node *nb_node,
1c2facd1
RW
1341 const struct lyd_node *dnode)
1342{
1343 struct nb_config_cb *cb;
1344
1345 cb = XCALLOC(MTYPE_TMP, sizeof(*cb));
1c2facd1
RW
1346 cb->nb_node = nb_node;
1347 cb->dnode = dnode;
1348 RB_INSERT(nb_config_cbs, cbs, cb);
1349
1350 return cb;
1351}
1352
1353static struct nb_config_cb *
fe3f2c61
RW
1354nb_apply_finish_cb_find(struct nb_config_cbs *cbs,
1355 const struct nb_node *nb_node,
1356 const struct lyd_node *dnode)
1c2facd1
RW
1357{
1358 struct nb_config_cb s;
1359
fe3f2c61 1360 s.seq = 0;
1c2facd1 1361 s.nb_node = nb_node;
fe3f2c61 1362 s.dnode = dnode;
1c2facd1
RW
1363 return RB_FIND(nb_config_cbs, cbs, &s);
1364}
1365
1366/* Call the 'apply_finish' callbacks. */
df5eda3d
RW
1367static void nb_transaction_apply_finish(struct nb_transaction *transaction,
1368 char *errmsg, size_t errmsg_len)
1c2facd1
RW
1369{
1370 struct nb_config_cbs cbs;
1371 struct nb_config_cb *cb;
1372
1373 /* Initialize tree of 'apply_finish' callbacks. */
1374 RB_INIT(nb_config_cbs, &cbs);
1375
1376 /* Identify the 'apply_finish' callbacks that need to be called. */
1377 RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
1378 struct nb_config_change *change = (struct nb_config_change *)cb;
1379 const struct lyd_node *dnode = change->cb.dnode;
1380
1381 /*
1382 * Iterate up to the root of the data tree. When a node is being
1383 * deleted, skip its 'apply_finish' callback if one is defined
1384 * (the 'apply_finish' callbacks from the node ancestors should
1385 * be called though).
1386 */
95ce849b 1387 if (change->cb.operation == NB_OP_DESTROY) {
97cd8493
RW
1388 char xpath[XPATH_MAXLEN];
1389
1c2facd1
RW
1390 dnode = dnode->parent;
1391 if (!dnode)
1392 break;
1393
1394 /*
1395 * The dnode from 'delete' callbacks point to elements
1396 * from the running configuration. Use yang_dnode_get()
1397 * to get the corresponding dnode from the candidate
1398 * configuration that is being committed.
1399 */
1400 yang_dnode_get_path(dnode, xpath, sizeof(xpath));
1401 dnode = yang_dnode_get(transaction->config->dnode,
1402 xpath);
1403 }
1404 while (dnode) {
1c2facd1
RW
1405 struct nb_node *nb_node;
1406
1407 nb_node = dnode->schema->priv;
f267201b 1408 if (!nb_node || !nb_node->cbs.apply_finish)
1c2facd1
RW
1409 goto next;
1410
1411 /*
1412 * Don't call the callback more than once for the same
1413 * data node.
1414 */
fe3f2c61 1415 if (nb_apply_finish_cb_find(&cbs, nb_node, dnode))
1c2facd1
RW
1416 goto next;
1417
0de19c0e 1418 nb_apply_finish_cb_new(&cbs, nb_node, dnode);
1c2facd1
RW
1419
1420 next:
1421 dnode = dnode->parent;
1422 }
1423 }
1424
1425 /* Call the 'apply_finish' callbacks, sorted by their priorities. */
97cd8493 1426 RB_FOREACH (cb, nb_config_cbs, &cbs)
13d6b9c1 1427 nb_callback_apply_finish(transaction->context, cb->nb_node,
df5eda3d 1428 cb->dnode, errmsg, errmsg_len);
1c2facd1
RW
1429
1430 /* Release memory. */
1431 while (!RB_EMPTY(nb_config_cbs, &cbs)) {
1432 cb = RB_ROOT(nb_config_cbs, &cbs);
1433 RB_REMOVE(nb_config_cbs, &cbs, cb);
1434 XFREE(MTYPE_TMP, cb);
1435 }
1436}
1437
1a4bc045
RW
1438static int nb_oper_data_iter_children(const struct lys_node *snode,
1439 const char *xpath, const void *list_entry,
1440 const struct yang_list_keys *list_keys,
1441 struct yang_translator *translator,
1442 bool first, uint32_t flags,
1443 nb_oper_data_cb cb, void *arg)
1444{
1445 struct lys_node *child;
1446
1447 LY_TREE_FOR (snode->child, child) {
1448 int ret;
1449
1450 ret = nb_oper_data_iter_node(child, xpath, list_entry,
1451 list_keys, translator, false,
1452 flags, cb, arg);
1453 if (ret != NB_OK)
1454 return ret;
1455 }
1456
1457 return NB_OK;
1458}
1459
1460static int nb_oper_data_iter_leaf(const struct nb_node *nb_node,
1461 const char *xpath, const void *list_entry,
1462 const struct yang_list_keys *list_keys,
1463 struct yang_translator *translator,
1464 uint32_t flags, nb_oper_data_cb cb, void *arg)
1465{
1466 struct yang_data *data;
1467
1468 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1469 return NB_OK;
1470
1471 /* Ignore list keys. */
1472 if (lys_is_key((struct lys_node_leaf *)nb_node->snode, NULL))
1473 return NB_OK;
1474
9eb2c0a1 1475 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1a4bc045
RW
1476 if (data == NULL)
1477 /* Leaf of type "empty" is not present. */
1478 return NB_OK;
1479
1480 return (*cb)(nb_node->snode, translator, data, arg);
1481}
1482
1483static int nb_oper_data_iter_container(const struct nb_node *nb_node,
1484 const char *xpath,
1485 const void *list_entry,
1486 const struct yang_list_keys *list_keys,
1487 struct yang_translator *translator,
1488 uint32_t flags, nb_oper_data_cb cb,
1489 void *arg)
1490{
1491 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1492 return NB_OK;
1493
1494 /* Presence containers. */
1495 if (nb_node->cbs.get_elem) {
1496 struct yang_data *data;
1497 int ret;
1498
9eb2c0a1 1499 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1a4bc045
RW
1500 if (data == NULL)
1501 /* Presence container is not present. */
1502 return NB_OK;
1503
1504 ret = (*cb)(nb_node->snode, translator, data, arg);
1505 if (ret != NB_OK)
1506 return ret;
1507 }
1508
1509 /* Iterate over the child nodes. */
1510 return nb_oper_data_iter_children(nb_node->snode, xpath, list_entry,
1511 list_keys, translator, false, flags,
1512 cb, arg);
1513}
1514
1515static int
1516nb_oper_data_iter_leaflist(const struct nb_node *nb_node, const char *xpath,
1517 const void *parent_list_entry,
1518 const struct yang_list_keys *parent_list_keys,
1519 struct yang_translator *translator, uint32_t flags,
1520 nb_oper_data_cb cb, void *arg)
1521{
1522 const void *list_entry = NULL;
1523
1524 if (CHECK_FLAG(nb_node->snode->flags, LYS_CONFIG_W))
1525 return NB_OK;
1526
1527 do {
1528 struct yang_data *data;
1529 int ret;
1530
9eb2c0a1
RW
1531 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1532 list_entry);
1a4bc045
RW
1533 if (!list_entry)
1534 /* End of the list. */
1535 break;
1536
9eb2c0a1 1537 data = nb_callback_get_elem(nb_node, xpath, list_entry);
1a4bc045
RW
1538 if (data == NULL)
1539 continue;
1540
1541 ret = (*cb)(nb_node->snode, translator, data, arg);
1542 if (ret != NB_OK)
1543 return ret;
1544 } while (list_entry);
1545
1546 return NB_OK;
1547}
1548
1549static int nb_oper_data_iter_list(const struct nb_node *nb_node,
1550 const char *xpath_list,
1551 const void *parent_list_entry,
1552 const struct yang_list_keys *parent_list_keys,
1553 struct yang_translator *translator,
1554 uint32_t flags, nb_oper_data_cb cb, void *arg)
1555{
1556 struct lys_node_list *slist = (struct lys_node_list *)nb_node->snode;
1557 const void *list_entry = NULL;
99fb518f 1558 uint32_t position = 1;
1a4bc045
RW
1559
1560 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1561 return NB_OK;
1562
1563 /* Iterate over all list entries. */
1564 do {
1565 struct yang_list_keys list_keys;
f999f11e 1566 char xpath[XPATH_MAXLEN * 2];
1a4bc045
RW
1567 int ret;
1568
1569 /* Obtain list entry. */
9eb2c0a1
RW
1570 list_entry = nb_callback_get_next(nb_node, parent_list_entry,
1571 list_entry);
1a4bc045
RW
1572 if (!list_entry)
1573 /* End of the list. */
1574 break;
1575
99fb518f
RW
1576 if (!CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST)) {
1577 /* Obtain the list entry keys. */
9eb2c0a1
RW
1578 if (nb_callback_get_keys(nb_node, list_entry,
1579 &list_keys)
99fb518f
RW
1580 != NB_OK) {
1581 flog_warn(EC_LIB_NB_CB_STATE,
1582 "%s: failed to get list keys",
1583 __func__);
1584 return NB_ERR;
1585 }
1586
1587 /* Build XPath of the list entry. */
1588 strlcpy(xpath, xpath_list, sizeof(xpath));
1589 for (unsigned int i = 0; i < list_keys.num; i++) {
1590 snprintf(xpath + strlen(xpath),
1591 sizeof(xpath) - strlen(xpath),
1592 "[%s='%s']", slist->keys[i]->name,
1593 list_keys.key[i]);
1594 }
1595 } else {
1596 /*
1597 * Keyless list - build XPath using a positional index.
1598 */
1599 snprintf(xpath, sizeof(xpath), "%s[%u]", xpath_list,
1600 position);
1601 position++;
1a4bc045
RW
1602 }
1603
1604 /* Iterate over the child nodes. */
1605 ret = nb_oper_data_iter_children(
1606 nb_node->snode, xpath, list_entry, &list_keys,
1607 translator, false, flags, cb, arg);
1608 if (ret != NB_OK)
1609 return ret;
1610 } while (list_entry);
1611
1612 return NB_OK;
1613}
1614
1615static int nb_oper_data_iter_node(const struct lys_node *snode,
1616 const char *xpath_parent,
1617 const void *list_entry,
1618 const struct yang_list_keys *list_keys,
1619 struct yang_translator *translator,
1620 bool first, uint32_t flags,
1621 nb_oper_data_cb cb, void *arg)
1622{
1623 struct nb_node *nb_node;
1624 char xpath[XPATH_MAXLEN];
1625 int ret = NB_OK;
1626
1627 if (!first && CHECK_FLAG(flags, NB_OPER_DATA_ITER_NORECURSE)
1628 && CHECK_FLAG(snode->nodetype, LYS_CONTAINER | LYS_LIST))
1629 return NB_OK;
1630
1631 /* Update XPath. */
1632 strlcpy(xpath, xpath_parent, sizeof(xpath));
6cd301e0
RW
1633 if (!first && snode->nodetype != LYS_USES) {
1634 struct lys_node *parent;
1635
1636 /* Get the real parent. */
1637 parent = snode->parent;
1638 while (parent && parent->nodetype == LYS_USES)
1639 parent = parent->parent;
1640
1641 /*
1642 * When necessary, include the namespace of the augmenting
1643 * module.
1644 */
1645 if (parent && parent->nodetype == LYS_AUGMENT)
1646 snprintf(xpath + strlen(xpath),
1647 sizeof(xpath) - strlen(xpath), "/%s:%s",
1648 snode->module->name, snode->name);
1649 else
1650 snprintf(xpath + strlen(xpath),
1651 sizeof(xpath) - strlen(xpath), "/%s",
1652 snode->name);
1653 }
1a4bc045
RW
1654
1655 nb_node = snode->priv;
1656 switch (snode->nodetype) {
1657 case LYS_CONTAINER:
1658 ret = nb_oper_data_iter_container(nb_node, xpath, list_entry,
1659 list_keys, translator, flags,
1660 cb, arg);
1661 break;
1662 case LYS_LEAF:
1663 ret = nb_oper_data_iter_leaf(nb_node, xpath, list_entry,
1664 list_keys, translator, flags, cb,
1665 arg);
1666 break;
1667 case LYS_LEAFLIST:
1668 ret = nb_oper_data_iter_leaflist(nb_node, xpath, list_entry,
1669 list_keys, translator, flags,
1670 cb, arg);
1671 break;
1672 case LYS_LIST:
1673 ret = nb_oper_data_iter_list(nb_node, xpath, list_entry,
1674 list_keys, translator, flags, cb,
1675 arg);
1676 break;
1677 case LYS_USES:
1678 ret = nb_oper_data_iter_children(snode, xpath, list_entry,
1679 list_keys, translator, false,
1680 flags, cb, arg);
1681 break;
1682 default:
1683 break;
1684 }
1685
1686 return ret;
1687}
1688
1689int nb_oper_data_iterate(const char *xpath, struct yang_translator *translator,
1690 uint32_t flags, nb_oper_data_cb cb, void *arg)
1691{
1692 struct nb_node *nb_node;
1693 const void *list_entry = NULL;
1694 struct yang_list_keys list_keys;
1695 struct list *list_dnodes;
1696 struct lyd_node *dnode, *dn;
1697 struct listnode *ln;
1698 int ret;
1699
1700 nb_node = nb_node_find(xpath);
1701 if (!nb_node) {
1702 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
1703 "%s: unknown data path: %s", __func__, xpath);
1704 return NB_ERR;
1705 }
1706
1707 /* For now this function works only with containers and lists. */
1708 if (!CHECK_FLAG(nb_node->snode->nodetype, LYS_CONTAINER | LYS_LIST)) {
1709 flog_warn(
1710 EC_LIB_NB_OPERATIONAL_DATA,
1711 "%s: can't iterate over YANG leaf or leaf-list [xpath %s]",
1712 __func__, xpath);
1713 return NB_ERR;
1714 }
1715
1716 /*
1717 * Create a data tree from the XPath so that we can parse the keys of
1718 * all YANG lists (if any).
1719 */
1720 ly_errno = 0;
1721 dnode = lyd_new_path(NULL, ly_native_ctx, xpath, NULL, 0,
dfe22738
RW
1722 LYD_PATH_OPT_UPDATE | LYD_PATH_OPT_NOPARENTRET);
1723 if (!dnode) {
1a4bc045
RW
1724 flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
1725 __func__);
1726 return NB_ERR;
1727 }
1a4bc045
RW
1728
1729 /*
1730 * Create a linked list to sort the data nodes starting from the root.
1731 */
1732 list_dnodes = list_new();
1733 for (dn = dnode; dn; dn = dn->parent) {
1734 if (dn->schema->nodetype != LYS_LIST || !dn->child)
1735 continue;
1736 listnode_add_head(list_dnodes, dn);
1737 }
1738 /*
1739 * Use the northbound callbacks to find list entry pointer corresponding
1740 * to the given XPath.
1741 */
1742 for (ALL_LIST_ELEMENTS_RO(list_dnodes, ln, dn)) {
1743 struct lyd_node *child;
1744 struct nb_node *nn;
1745 unsigned int n = 0;
1746
1747 /* Obtain the list entry keys. */
1748 memset(&list_keys, 0, sizeof(list_keys));
1749 LY_TREE_FOR (dn->child, child) {
1750 if (!lys_is_key((struct lys_node_leaf *)child->schema,
1751 NULL))
1752 continue;
1753 strlcpy(list_keys.key[n],
1754 yang_dnode_get_string(child, NULL),
1755 sizeof(list_keys.key[n]));
1756 n++;
1757 }
1758 list_keys.num = n;
9f6de299
RW
1759 if (list_keys.num
1760 != ((struct lys_node_list *)dn->schema)->keys_size) {
1761 list_delete(&list_dnodes);
1762 yang_dnode_free(dnode);
1763 return NB_ERR_NOT_FOUND;
1764 }
1a4bc045
RW
1765
1766 /* Find the list entry pointer. */
1767 nn = dn->schema->priv;
9eb2c0a1
RW
1768 list_entry =
1769 nb_callback_lookup_entry(nn, list_entry, &list_keys);
1a4bc045
RW
1770 if (list_entry == NULL) {
1771 list_delete(&list_dnodes);
1772 yang_dnode_free(dnode);
1773 return NB_ERR_NOT_FOUND;
1774 }
1775 }
1776
1777 /* If a list entry was given, iterate over that list entry only. */
1778 if (dnode->schema->nodetype == LYS_LIST && dnode->child)
1779 ret = nb_oper_data_iter_children(
1780 nb_node->snode, xpath, list_entry, &list_keys,
1781 translator, true, flags, cb, arg);
1782 else
1783 ret = nb_oper_data_iter_node(nb_node->snode, xpath, list_entry,
1784 &list_keys, translator, true,
1785 flags, cb, arg);
1786
1787 list_delete(&list_dnodes);
1788 yang_dnode_free(dnode);
1789
1790 return ret;
1791}
1792
1c2facd1
RW
1793bool nb_operation_is_valid(enum nb_operation operation,
1794 const struct lys_node *snode)
1795{
544ca69a 1796 struct nb_node *nb_node = snode->priv;
1c2facd1
RW
1797 struct lys_node_container *scontainer;
1798 struct lys_node_leaf *sleaf;
1799
1800 switch (operation) {
1801 case NB_OP_CREATE:
db452508 1802 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
1803 return false;
1804
1805 switch (snode->nodetype) {
1806 case LYS_LEAF:
1807 sleaf = (struct lys_node_leaf *)snode;
1808 if (sleaf->type.base != LY_TYPE_EMPTY)
1809 return false;
1810 break;
1811 case LYS_CONTAINER:
1812 scontainer = (struct lys_node_container *)snode;
1813 if (!scontainer->presence)
1814 return false;
1815 break;
1816 case LYS_LIST:
1817 case LYS_LEAFLIST:
1818 break;
1819 default:
1820 return false;
1821 }
1822 return true;
1823 case NB_OP_MODIFY:
db452508 1824 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
1825 return false;
1826
1827 switch (snode->nodetype) {
1828 case LYS_LEAF:
1829 sleaf = (struct lys_node_leaf *)snode;
1830 if (sleaf->type.base == LY_TYPE_EMPTY)
1831 return false;
1832
1833 /* List keys can't be modified. */
1834 if (lys_is_key(sleaf, NULL))
1835 return false;
1836 break;
1837 default:
1838 return false;
1839 }
1840 return true;
95ce849b 1841 case NB_OP_DESTROY:
db452508 1842 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
1843 return false;
1844
1845 switch (snode->nodetype) {
1846 case LYS_LEAF:
1847 sleaf = (struct lys_node_leaf *)snode;
1848
1849 /* List keys can't be deleted. */
1850 if (lys_is_key(sleaf, NULL))
1851 return false;
1852
1853 /*
1854 * Only optional leafs can be deleted, or leafs whose
1855 * parent is a case statement.
1856 */
1857 if (snode->parent->nodetype == LYS_CASE)
1858 return true;
1859 if (sleaf->when)
1860 return true;
db452508
RW
1861 if (CHECK_FLAG(sleaf->flags, LYS_MAND_TRUE)
1862 || sleaf->dflt)
1c2facd1
RW
1863 return false;
1864 break;
1865 case LYS_CONTAINER:
1866 scontainer = (struct lys_node_container *)snode;
1867 if (!scontainer->presence)
1868 return false;
1869 break;
1870 case LYS_LIST:
1871 case LYS_LEAFLIST:
1872 break;
1873 default:
1874 return false;
1875 }
1876 return true;
1877 case NB_OP_MOVE:
db452508 1878 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
1879 return false;
1880
1881 switch (snode->nodetype) {
1882 case LYS_LIST:
1883 case LYS_LEAFLIST:
db452508 1884 if (!CHECK_FLAG(snode->flags, LYS_USERORDERED))
1c2facd1
RW
1885 return false;
1886 break;
1887 default:
1888 return false;
1889 }
1890 return true;
34224f0c 1891 case NB_OP_PRE_VALIDATE:
1c2facd1 1892 case NB_OP_APPLY_FINISH:
db452508 1893 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1c2facd1
RW
1894 return false;
1895 return true;
1896 case NB_OP_GET_ELEM:
db452508 1897 if (!CHECK_FLAG(snode->flags, LYS_CONFIG_R))
1c2facd1
RW
1898 return false;
1899
1900 switch (snode->nodetype) {
1901 case LYS_LEAF:
1a4bc045 1902 case LYS_LEAFLIST:
1c2facd1
RW
1903 break;
1904 case LYS_CONTAINER:
1905 scontainer = (struct lys_node_container *)snode;
1906 if (!scontainer->presence)
1907 return false;
1908 break;
1909 default:
1910 return false;
1911 }
1912 return true;
1913 case NB_OP_GET_NEXT:
1a4bc045
RW
1914 switch (snode->nodetype) {
1915 case LYS_LIST:
1916 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1917 return false;
1918 break;
1919 case LYS_LEAFLIST:
1920 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W))
1921 return false;
1922 break;
1923 default:
1924 return false;
1925 }
1926 return true;
1c2facd1
RW
1927 case NB_OP_GET_KEYS:
1928 case NB_OP_LOOKUP_ENTRY:
1c2facd1
RW
1929 switch (snode->nodetype) {
1930 case LYS_LIST:
544ca69a
RW
1931 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_CONFIG_ONLY))
1932 return false;
99fb518f
RW
1933 if (CHECK_FLAG(nb_node->flags, F_NB_NODE_KEYLESS_LIST))
1934 return false;
1c2facd1
RW
1935 break;
1936 default:
1937 return false;
1938 }
1939 return true;
1940 case NB_OP_RPC:
db452508 1941 if (CHECK_FLAG(snode->flags, LYS_CONFIG_W | LYS_CONFIG_R))
1c2facd1
RW
1942 return false;
1943
1944 switch (snode->nodetype) {
1945 case LYS_RPC:
1946 case LYS_ACTION:
1947 break;
1948 default:
1949 return false;
1950 }
1951 return true;
1952 default:
1953 return false;
1954 }
1955}
1956
1957DEFINE_HOOK(nb_notification_send, (const char *xpath, struct list *arguments),
1958 (xpath, arguments));
1959
1960int nb_notification_send(const char *xpath, struct list *arguments)
1961{
1962 int ret;
1963
9eb2c0a1
RW
1964 DEBUGD(&nb_dbg_notif, "northbound notification: %s", xpath);
1965
1c2facd1
RW
1966 ret = hook_call(nb_notification_send, xpath, arguments);
1967 if (arguments)
1968 list_delete(&arguments);
1969
1970 return ret;
1971}
1972
ccd43ada
RW
1973/* Running configuration user pointers management. */
1974struct nb_config_entry {
1975 char xpath[XPATH_MAXLEN];
1976 void *entry;
1977};
1978
1979static bool running_config_entry_cmp(const void *value1, const void *value2)
1980{
1981 const struct nb_config_entry *c1 = value1;
1982 const struct nb_config_entry *c2 = value2;
1983
1984 return strmatch(c1->xpath, c2->xpath);
1985}
1986
d8b87afe 1987static unsigned int running_config_entry_key_make(const void *value)
ccd43ada
RW
1988{
1989 return string_hash_make(value);
1990}
1991
1992static void *running_config_entry_alloc(void *p)
1993{
1994 struct nb_config_entry *new, *key = p;
1995
1996 new = XCALLOC(MTYPE_NB_CONFIG_ENTRY, sizeof(*new));
1997 strlcpy(new->xpath, key->xpath, sizeof(new->xpath));
1998
1999 return new;
2000}
2001
2002static void running_config_entry_free(void *arg)
2003{
2004 XFREE(MTYPE_NB_CONFIG_ENTRY, arg);
2005}
2006
2007void nb_running_set_entry(const struct lyd_node *dnode, void *entry)
2008{
2009 struct nb_config_entry *config, s;
2010
2011 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2012 config = hash_get(running_config_entries, &s,
2013 running_config_entry_alloc);
2014 config->entry = entry;
2015}
2016
f7c20aa1
QY
2017void nb_running_move_tree(const char *xpath_from, const char *xpath_to)
2018{
2019 struct nb_config_entry *entry;
2020 struct list *entries = hash_to_list(running_config_entries);
2021 struct listnode *ln;
2022
2023 for (ALL_LIST_ELEMENTS_RO(entries, ln, entry)) {
2024 if (!frrstr_startswith(entry->xpath, xpath_from))
2025 continue;
2026
2027 hash_release(running_config_entries, entry);
2028
2029 char *newpath =
2030 frrstr_replace(entry->xpath, xpath_from, xpath_to);
2031 strlcpy(entry->xpath, newpath, sizeof(entry->xpath));
2032 XFREE(MTYPE_TMP, newpath);
2033
2034 hash_get(running_config_entries, entry, hash_alloc_intern);
2035 }
2036
2037 list_delete(&entries);
2038}
2039
ccd43ada
RW
2040static void *nb_running_unset_entry_helper(const struct lyd_node *dnode)
2041{
2042 struct nb_config_entry *config, s;
2043 struct lyd_node *child;
2044 void *entry = NULL;
2045
2046 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2047 config = hash_release(running_config_entries, &s);
2048 if (config) {
2049 entry = config->entry;
2050 running_config_entry_free(config);
2051 }
2052
2053 /* Unset user pointers from the child nodes. */
2054 if (CHECK_FLAG(dnode->schema->nodetype, LYS_LIST | LYS_CONTAINER)) {
2055 LY_TREE_FOR (dnode->child, child) {
2056 (void)nb_running_unset_entry_helper(child);
2057 }
2058 }
2059
2060 return entry;
2061}
2062
2063void *nb_running_unset_entry(const struct lyd_node *dnode)
2064{
2065 void *entry;
2066
2067 entry = nb_running_unset_entry_helper(dnode);
2068 assert(entry);
2069
2070 return entry;
2071}
2072
b112b1ab
G
2073static void *nb_running_get_entry_worker(const struct lyd_node *dnode,
2074 const char *xpath,
2075 bool abort_if_not_found,
2076 bool rec_search)
ccd43ada
RW
2077{
2078 const struct lyd_node *orig_dnode = dnode;
2079 char xpath_buf[XPATH_MAXLEN];
b112b1ab 2080 bool rec_flag = true;
ccd43ada
RW
2081
2082 assert(dnode || xpath);
2083
2084 if (!dnode)
2085 dnode = yang_dnode_get(running_config->dnode, xpath);
2086
b112b1ab 2087 while (rec_flag && dnode) {
ccd43ada
RW
2088 struct nb_config_entry *config, s;
2089
2090 yang_dnode_get_path(dnode, s.xpath, sizeof(s.xpath));
2091 config = hash_lookup(running_config_entries, &s);
2092 if (config)
2093 return config->entry;
2094
b112b1ab
G
2095 rec_flag = rec_search;
2096
ccd43ada
RW
2097 dnode = dnode->parent;
2098 }
2099
2100 if (!abort_if_not_found)
2101 return NULL;
2102
2103 yang_dnode_get_path(orig_dnode, xpath_buf, sizeof(xpath_buf));
2104 flog_err(EC_LIB_YANG_DNODE_NOT_FOUND,
2105 "%s: failed to find entry [xpath %s]", __func__, xpath_buf);
2106 zlog_backtrace(LOG_ERR);
2107 abort();
2108}
2109
b112b1ab
G
2110void *nb_running_get_entry(const struct lyd_node *dnode, const char *xpath,
2111 bool abort_if_not_found)
2112{
2113 return nb_running_get_entry_worker(dnode, xpath, abort_if_not_found,
2114 true);
2115}
2116
2117void *nb_running_get_entry_non_rec(const struct lyd_node *dnode,
2118 const char *xpath, bool abort_if_not_found)
2119{
2120 return nb_running_get_entry_worker(dnode, xpath, abort_if_not_found,
2121 false);
2122}
2123
ccd43ada 2124/* Logging functions. */
1c2facd1
RW
2125const char *nb_event_name(enum nb_event event)
2126{
2127 switch (event) {
2128 case NB_EV_VALIDATE:
2129 return "validate";
2130 case NB_EV_PREPARE:
2131 return "prepare";
2132 case NB_EV_ABORT:
2133 return "abort";
2134 case NB_EV_APPLY:
2135 return "apply";
2136 default:
2137 return "unknown";
2138 }
2139}
2140
2141const char *nb_operation_name(enum nb_operation operation)
2142{
2143 switch (operation) {
2144 case NB_OP_CREATE:
2145 return "create";
2146 case NB_OP_MODIFY:
2147 return "modify";
95ce849b
MS
2148 case NB_OP_DESTROY:
2149 return "destroy";
1c2facd1
RW
2150 case NB_OP_MOVE:
2151 return "move";
34224f0c
RW
2152 case NB_OP_PRE_VALIDATE:
2153 return "pre_validate";
1c2facd1
RW
2154 case NB_OP_APPLY_FINISH:
2155 return "apply_finish";
2156 case NB_OP_GET_ELEM:
2157 return "get_elem";
2158 case NB_OP_GET_NEXT:
2159 return "get_next";
2160 case NB_OP_GET_KEYS:
2161 return "get_keys";
2162 case NB_OP_LOOKUP_ENTRY:
2163 return "lookup_entry";
2164 case NB_OP_RPC:
2165 return "rpc";
2166 default:
2167 return "unknown";
2168 }
2169}
2170
2171const char *nb_err_name(enum nb_error error)
2172{
2173 switch (error) {
2174 case NB_OK:
2175 return "ok";
2176 case NB_ERR:
2177 return "generic error";
2178 case NB_ERR_NO_CHANGES:
2179 return "no changes";
2180 case NB_ERR_NOT_FOUND:
2181 return "element not found";
2182 case NB_ERR_LOCKED:
2183 return "resource is locked";
2184 case NB_ERR_VALIDATION:
df5eda3d 2185 return "validation";
1c2facd1
RW
2186 case NB_ERR_RESOURCE:
2187 return "failed to allocate resource";
2188 case NB_ERR_INCONSISTENCY:
2189 return "internal inconsistency";
2190 default:
2191 return "unknown";
2192 }
2193}
2194
2195const char *nb_client_name(enum nb_client client)
2196{
2197 switch (client) {
2198 case NB_CLIENT_CLI:
2199 return "CLI";
5bce33b3
RW
2200 case NB_CLIENT_CONFD:
2201 return "ConfD";
a7ca2199
RW
2202 case NB_CLIENT_SYSREPO:
2203 return "Sysrepo";
ec2ac5f2
RW
2204 case NB_CLIENT_GRPC:
2205 return "gRPC";
1c2facd1
RW
2206 default:
2207 return "unknown";
2208 }
2209}
2210
2211static void nb_load_callbacks(const struct frr_yang_module_info *module)
2212{
2213 for (size_t i = 0; module->nodes[i].xpath; i++) {
2214 struct nb_node *nb_node;
2215 uint32_t priority;
2216
dc397e4c
RW
2217 if (i > YANG_MODULE_MAX_NODES) {
2218 zlog_err(
2219 "%s: %s.yang has more than %u nodes. Please increase YANG_MODULE_MAX_NODES to fix this problem.",
2220 __func__, module->name, YANG_MODULE_MAX_NODES);
2221 exit(1);
2222 }
2223
1c2facd1
RW
2224 nb_node = nb_node_find(module->nodes[i].xpath);
2225 if (!nb_node) {
2226 flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
2227 "%s: unknown data path: %s", __func__,
2228 module->nodes[i].xpath);
2229 continue;
2230 }
2231
2232 nb_node->cbs = module->nodes[i].cbs;
2233 priority = module->nodes[i].priority;
2234 if (priority != 0)
2235 nb_node->priority = priority;
2236 }
2237}
2238
59e85ca1 2239void nb_validate_callbacks(void)
1c2facd1
RW
2240{
2241 unsigned int errors = 0;
2242
8d869d37 2243 yang_snodes_iterate(NULL, nb_node_validate, 0, &errors);
1c2facd1
RW
2244 if (errors > 0) {
2245 flog_err(
2246 EC_LIB_NB_CBS_VALIDATION,
2247 "%s: failed to validate northbound callbacks: %u error(s)",
2248 __func__, errors);
2249 exit(1);
2250 }
59e85ca1
RW
2251}
2252
2253void nb_load_module(const struct frr_yang_module_info *module_info)
2254{
2255 struct yang_module *module;
1c2facd1 2256
59e85ca1
RW
2257 DEBUGD(&nb_dbg_events, "northbound: loading %s.yang",
2258 module_info->name);
2259
2260 module = yang_module_load(module_info->name);
2261 yang_snodes_iterate(module->info, nb_node_new_cb, 0, NULL);
2262 nb_load_callbacks(module_info);
2263}
2264
2265void nb_init(struct thread_master *tm,
2266 const struct frr_yang_module_info *const modules[],
2267 size_t nmodules, bool db_enabled)
2268{
390a8862
CS
2269 nb_db_enabled = db_enabled;
2270
59e85ca1
RW
2271 /* Load YANG modules and their corresponding northbound callbacks. */
2272 for (size_t i = 0; i < nmodules; i++)
2273 nb_load_module(modules[i]);
2274
2275 /* Validate northbound callbacks. */
2276 nb_validate_callbacks();
2277
1c2facd1
RW
2278 /* Create an empty running configuration. */
2279 running_config = nb_config_new(NULL);
ccd43ada
RW
2280 running_config_entries = hash_create(running_config_entry_key_make,
2281 running_config_entry_cmp,
2282 "Running Configuration Entries");
364ad673 2283 pthread_mutex_init(&running_config_mgmt_lock.mtx, NULL);
1c2facd1
RW
2284
2285 /* Initialize the northbound CLI. */
fbdc1c0a 2286 nb_cli_init(tm);
1c2facd1
RW
2287}
2288
2289void nb_terminate(void)
2290{
2291 /* Terminate the northbound CLI. */
2292 nb_cli_terminate();
2293
2294 /* Delete all nb_node's from all YANG modules. */
544ca69a 2295 nb_nodes_delete();
1c2facd1
RW
2296
2297 /* Delete the running configuration. */
ccd43ada
RW
2298 hash_clean(running_config_entries, running_config_entry_free);
2299 hash_free(running_config_entries);
1c2facd1 2300 nb_config_free(running_config);
364ad673 2301 pthread_mutex_destroy(&running_config_mgmt_lock.mtx);
1c2facd1 2302}