#include "command.h"
#include "debug.h"
#include "db.h"
+#include "frr_pthread.h"
#include "northbound.h"
#include "northbound_cli.h"
#include "northbound_db.h"
static int nb_callback_configuration(const enum nb_event event,
struct nb_config_change *change);
+static void nb_log_callback(const enum nb_event event,
+ enum nb_operation operation, const char *xpath,
+ const char *value);
static struct nb_transaction *nb_transaction_new(struct nb_config *config,
struct nb_config_cbs *changes,
enum nb_client client,
valid = nb_operation_is_valid(operation, nb_node->snode);
- if (!valid && callback_implemented)
+ /*
+ * Add an exception for operational data callbacks. A rw list usually
+ * doesn't need any associated operational data callbacks. But if this
+ * rw list is augmented by another module which adds state nodes under
+ * it, then this list will need to have the 'get_next()', 'get_keys()'
+ * and 'lookup_entry()' callbacks. As such, never log a warning when
+ * these callbacks are implemented when they are not needed, since this
+ * depends on context (e.g. some daemons might augment "frr-interface"
+ * while others don't).
+ */
+ if (!valid && callback_implemented && operation != NB_OP_GET_NEXT
+ && operation != NB_OP_GET_KEYS && operation != NB_OP_LOOKUP_ENTRY)
flog_warn(EC_LIB_NB_CB_UNNEEDED,
"unneeded '%s' callback for '%s'",
nb_operation_name(operation), nb_node->xpath);
!!nb_node->cbs.destroy, false);
error += nb_node_validate_cb(nb_node, NB_OP_MOVE, !!nb_node->cbs.move,
false);
+ error += nb_node_validate_cb(nb_node, NB_OP_PRE_VALIDATE,
+ !!nb_node->cbs.pre_validate, true);
error += nb_node_validate_cb(nb_node, NB_OP_APPLY_FINISH,
!!nb_node->cbs.apply_finish, true);
error += nb_node_validate_cb(nb_node, NB_OP_GET_ELEM,
else
config->dnode = yang_dnode_new(ly_native_ctx, true);
config->version = 0;
- pthread_rwlock_init(&config->lock, NULL);
return config;
}
{
if (config->dnode)
yang_dnode_free(config->dnode);
- pthread_rwlock_destroy(&config->lock);
XFREE(MTYPE_NB_CONFIG, config);
}
dup = XCALLOC(MTYPE_NB_CONFIG, sizeof(*dup));
dup->dnode = yang_dnode_dup(config->dnode);
dup->version = config->version;
- pthread_rwlock_init(&dup->lock, NULL);
return dup;
}
return 1;
/*
- * Use XPath as a tie-breaker. This will naturally sort parent nodes
- * before their children.
+ * Preserve the order of the configuration changes as told by libyang.
*/
- return strcmp(a->xpath, b->xpath);
+ if (a->seq < b->seq)
+ return -1;
+ if (a->seq > b->seq)
+ return 1;
+
+ /*
+ * All 'apply_finish' callbacks have their sequence number set to zero.
+ * In this case, compare them using their dnode pointers (the order
+ * doesn't matter for callbacks that have the same priority).
+ */
+ if (a->dnode < b->dnode)
+ return -1;
+ if (a->dnode > b->dnode)
+ return 1;
+
+ return 0;
}
RB_GENERATE(nb_config_cbs, nb_config_cb, entry, nb_config_cb_compare);
static void nb_config_diff_add_change(struct nb_config_cbs *changes,
enum nb_operation operation,
+ uint32_t *seq,
const struct lyd_node *dnode)
{
struct nb_config_change *change;
change = XCALLOC(MTYPE_TMP, sizeof(*change));
change->cb.operation = operation;
+ change->cb.seq = *seq;
+ *seq = *seq + 1;
change->cb.nb_node = dnode->schema->priv;
- yang_dnode_get_path(dnode, change->cb.xpath, sizeof(change->cb.xpath));
change->cb.dnode = dnode;
RB_INSERT(nb_config_cbs, changes, &change->cb);
* configurations. Given a new subtree, calculate all new YANG data nodes,
* excluding default leafs and leaf-lists. This is a recursive function.
*/
-static void nb_config_diff_created(const struct lyd_node *dnode,
+static void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
struct nb_config_cbs *changes)
{
enum nb_operation operation;
else
return;
- nb_config_diff_add_change(changes, operation, dnode);
+ nb_config_diff_add_change(changes, operation, seq, dnode);
break;
case LYS_CONTAINER:
case LYS_LIST:
if (nb_operation_is_valid(NB_OP_CREATE, dnode->schema))
- nb_config_diff_add_change(changes, NB_OP_CREATE, dnode);
+ nb_config_diff_add_change(changes, NB_OP_CREATE, seq,
+ dnode);
/* Process child nodes recursively. */
LY_TREE_FOR (dnode->child, child) {
- nb_config_diff_created(child, changes);
+ nb_config_diff_created(child, seq, changes);
}
break;
default:
}
}
-static void nb_config_diff_deleted(const struct lyd_node *dnode,
+static void nb_config_diff_deleted(const struct lyd_node *dnode, uint32_t *seq,
struct nb_config_cbs *changes)
{
if (nb_operation_is_valid(NB_OP_DESTROY, dnode->schema))
- nb_config_diff_add_change(changes, NB_OP_DESTROY, dnode);
+ nb_config_diff_add_change(changes, NB_OP_DESTROY, seq, dnode);
else if (CHECK_FLAG(dnode->schema->nodetype, LYS_CONTAINER)) {
struct lyd_node *child;
* when applicable (i.e. optional nodes).
*/
LY_TREE_FOR (dnode->child, child) {
- nb_config_diff_deleted(child, changes);
+ nb_config_diff_deleted(child, seq, changes);
}
}
}
struct nb_config_cbs *changes)
{
struct lyd_difflist *diff;
+ uint32_t seq = 0;
diff = lyd_diff(config1->dnode, config2->dnode,
LYD_DIFFOPT_WITHDEFAULTS);
switch (type) {
case LYD_DIFF_CREATED:
dnode = diff->second[i];
- nb_config_diff_created(dnode, changes);
+ nb_config_diff_created(dnode, &seq, changes);
break;
case LYD_DIFF_DELETED:
dnode = diff->first[i];
- nb_config_diff_deleted(dnode, changes);
+ nb_config_diff_deleted(dnode, &seq, changes);
break;
case LYD_DIFF_CHANGED:
dnode = diff->second[i];
- nb_config_diff_add_change(changes, NB_OP_MODIFY, dnode);
+ nb_config_diff_add_change(changes, NB_OP_MODIFY, &seq,
+ dnode);
break;
case LYD_DIFF_MOVEDAFTER1:
case LYD_DIFF_MOVEDAFTER2:
__func__);
return NB_ERR;
}
-
- /*
- * If a new node was created, call lyd_validate() only to create
- * default child nodes.
- */
- if (dnode) {
- lyd_schema_sort(dnode, 0);
- lyd_validate(&dnode, LYD_OPT_CONFIG, ly_native_ctx);
- }
break;
case NB_OP_DESTROY:
dnode = yang_dnode_get(candidate->dnode, xpath_edit);
bool nb_candidate_needs_update(const struct nb_config *candidate)
{
- bool ret = false;
-
- pthread_rwlock_rdlock(&running_config->lock);
- {
- if (candidate->version < running_config->version)
- ret = true;
- }
- pthread_rwlock_unlock(&running_config->lock);
+ if (candidate->version < running_config->version)
+ return true;
- return ret;
+ return false;
}
int nb_candidate_update(struct nb_config *candidate)
{
struct nb_config *updated_config;
- pthread_rwlock_rdlock(&running_config->lock);
- {
- updated_config = nb_config_dup(running_config);
- }
- pthread_rwlock_unlock(&running_config->lock);
-
+ updated_config = nb_config_dup(running_config);
if (nb_config_merge(updated_config, candidate, true) != NB_OK)
return NB_ERR;
*/
static int nb_candidate_validate_yang(struct nb_config *candidate)
{
- if (lyd_validate(&candidate->dnode, LYD_OPT_STRICT | LYD_OPT_CONFIG,
+ if (lyd_validate(&candidate->dnode,
+ LYD_OPT_STRICT | LYD_OPT_CONFIG | LYD_OPT_WHENAUTODEL,
ly_native_ctx)
!= 0)
return NB_ERR_VALIDATION;
}
/* Perform code-level validation using the northbound callbacks. */
-static int nb_candidate_validate_changes(struct nb_config *candidate,
- struct nb_config_cbs *changes)
+static int nb_candidate_validate_code(struct nb_config *candidate,
+ struct nb_config_cbs *changes)
{
struct nb_config_cb *cb;
+ struct lyd_node *root, *next, *child;
+ int ret;
+
+ /* First validate the candidate as a whole. */
+ LY_TREE_FOR (candidate->dnode, root) {
+ LY_TREE_DFS_BEGIN (root, next, child) {
+ struct nb_node *nb_node;
+
+ nb_node = child->schema->priv;
+ if (!nb_node->cbs.pre_validate)
+ goto next;
+
+ if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config,
+ DEBUG_MODE_ALL)) {
+ char xpath[XPATH_MAXLEN];
+
+ yang_dnode_get_path(child, xpath,
+ sizeof(xpath));
+ nb_log_callback(NB_EV_VALIDATE,
+ NB_OP_PRE_VALIDATE, xpath,
+ NULL);
+ }
+
+ ret = (*nb_node->cbs.pre_validate)(child);
+ if (ret != NB_OK)
+ return NB_ERR_VALIDATION;
+
+ next:
+ LY_TREE_DFS_END(root, next, child);
+ }
+ }
+ /* Now validate the configuration changes. */
RB_FOREACH (cb, nb_config_cbs, changes) {
struct nb_config_change *change = (struct nb_config_change *)cb;
- int ret;
ret = nb_callback_configuration(NB_EV_VALIDATE, change);
if (ret != NB_OK)
return NB_ERR_VALIDATION;
RB_INIT(nb_config_cbs, &changes);
- pthread_rwlock_rdlock(&running_config->lock);
- {
- nb_config_diff(running_config, candidate, &changes);
- ret = nb_candidate_validate_changes(candidate, &changes);
- nb_config_diff_del_changes(&changes);
- }
- pthread_rwlock_unlock(&running_config->lock);
+ nb_config_diff(running_config, candidate, &changes);
+ ret = nb_candidate_validate_code(candidate, &changes);
+ nb_config_diff_del_changes(&changes);
return ret;
}
}
RB_INIT(nb_config_cbs, &changes);
- pthread_rwlock_rdlock(&running_config->lock);
- {
- nb_config_diff(running_config, candidate, &changes);
- if (RB_EMPTY(nb_config_cbs, &changes)) {
- pthread_rwlock_unlock(&running_config->lock);
- return NB_ERR_NO_CHANGES;
- }
+ nb_config_diff(running_config, candidate, &changes);
+ if (RB_EMPTY(nb_config_cbs, &changes))
+ return NB_ERR_NO_CHANGES;
- if (nb_candidate_validate_changes(candidate, &changes)
- != NB_OK) {
- flog_warn(
- EC_LIB_NB_CANDIDATE_INVALID,
- "%s: failed to validate candidate configuration",
- __func__);
- nb_config_diff_del_changes(&changes);
- pthread_rwlock_unlock(&running_config->lock);
- return NB_ERR_VALIDATION;
- }
+ if (nb_candidate_validate_code(candidate, &changes) != NB_OK) {
+ flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
+ "%s: failed to validate candidate configuration",
+ __func__);
+ nb_config_diff_del_changes(&changes);
+ return NB_ERR_VALIDATION;
+ }
- *transaction = nb_transaction_new(candidate, &changes, client,
- user, comment);
- if (*transaction == NULL) {
- flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
- "%s: failed to create transaction", __func__);
- nb_config_diff_del_changes(&changes);
- pthread_rwlock_unlock(&running_config->lock);
- return NB_ERR_LOCKED;
- }
+ *transaction =
+ nb_transaction_new(candidate, &changes, client, user, comment);
+ if (*transaction == NULL) {
+ flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
+ "%s: failed to create transaction", __func__);
+ nb_config_diff_del_changes(&changes);
+ return NB_ERR_LOCKED;
}
- pthread_rwlock_unlock(&running_config->lock);
return nb_transaction_process(NB_EV_PREPARE, *transaction);
}
/* Replace running by candidate. */
transaction->config->version++;
- pthread_rwlock_wrlock(&running_config->lock);
- {
- nb_config_replace(running_config, transaction->config, true);
- }
- pthread_rwlock_unlock(&running_config->lock);
+ nb_config_replace(running_config, transaction->config, true);
/* Record transaction. */
if (save_transaction
{
int ret = -1;
- pthread_mutex_lock(&running_config_mgmt_lock.mtx);
- {
+ frr_with_mutex(&running_config_mgmt_lock.mtx) {
if (!running_config_mgmt_lock.locked) {
running_config_mgmt_lock.locked = true;
running_config_mgmt_lock.owner_client = client;
ret = 0;
}
}
- pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
return ret;
}
{
int ret = -1;
- pthread_mutex_lock(&running_config_mgmt_lock.mtx);
- {
+ frr_with_mutex(&running_config_mgmt_lock.mtx) {
if (running_config_mgmt_lock.locked
&& running_config_mgmt_lock.owner_client == client
&& running_config_mgmt_lock.owner_user == user) {
ret = 0;
}
}
- pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
return ret;
}
{
int ret = -1;
- pthread_mutex_lock(&running_config_mgmt_lock.mtx);
- {
+ frr_with_mutex(&running_config_mgmt_lock.mtx) {
if (!running_config_mgmt_lock.locked
|| (running_config_mgmt_lock.owner_client == client
&& running_config_mgmt_lock.owner_user == user))
ret = 0;
}
- pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
return ret;
}
zlog_debug(
"northbound callback: event [%s] op [%s] xpath [%s] value [%s]",
nb_event_name(event), nb_operation_name(operation), xpath,
- value);
+ value ? value : "(NULL)");
}
/*
struct nb_config_change *change)
{
enum nb_operation operation = change->cb.operation;
- const char *xpath = change->cb.xpath;
+ char xpath[XPATH_MAXLEN];
const struct nb_node *nb_node = change->cb.nb_node;
const struct lyd_node *dnode = change->cb.dnode;
union nb_resource *resource;
if (dnode && !yang_snode_is_typeless_data(dnode->schema))
value = yang_dnode_get_string(dnode, NULL);
+ yang_dnode_get_path(dnode, xpath, sizeof(xpath));
nb_log_callback(event, operation, xpath, value);
}
ret = (*nb_node->cbs.move)(event, dnode);
break;
default:
+ yang_dnode_get_path(dnode, xpath, sizeof(xpath));
flog_err(EC_LIB_DEVELOPMENT,
"%s: unknown operation (%u) [xpath %s]", __func__,
operation, xpath);
int priority;
enum lib_log_refs ref;
+ yang_dnode_get_path(dnode, xpath, sizeof(xpath));
+
switch (event) {
case NB_EV_VALIDATE:
priority = LOG_WARNING;
{
struct nb_config_cb *cb;
- /*
- * Need to lock the running configuration since transaction->changes
- * can contain pointers to data nodes from the running configuration.
- */
- pthread_rwlock_rdlock(&running_config->lock);
- {
- RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
- struct nb_config_change *change =
- (struct nb_config_change *)cb;
- int ret;
+ RB_FOREACH (cb, nb_config_cbs, &transaction->changes) {
+ struct nb_config_change *change = (struct nb_config_change *)cb;
+ int ret;
+ /*
+ * Only try to release resources that were allocated
+ * successfully.
+ */
+ if (event == NB_EV_ABORT && change->prepare_ok == false)
+ break;
+
+ /* Call the appropriate callback. */
+ ret = nb_callback_configuration(event, change);
+ switch (event) {
+ case NB_EV_PREPARE:
+ if (ret != NB_OK)
+ return ret;
+ change->prepare_ok = true;
+ break;
+ case NB_EV_ABORT:
+ case NB_EV_APPLY:
/*
- * Only try to release resources that were allocated
- * successfully.
+ * At this point it's not possible to reject the
+ * transaction anymore, so any failure here can lead to
+ * inconsistencies and should be treated as a bug.
+ * Operations prone to errors, like validations and
+ * resource allocations, should be performed during the
+ * 'prepare' phase.
*/
- if (event == NB_EV_ABORT && change->prepare_ok == false)
- break;
-
- /* Call the appropriate callback. */
- ret = nb_callback_configuration(event, change);
- switch (event) {
- case NB_EV_PREPARE:
- if (ret != NB_OK) {
- pthread_rwlock_unlock(
- &running_config->lock);
- return ret;
- }
- change->prepare_ok = true;
- break;
- case NB_EV_ABORT:
- case NB_EV_APPLY:
- /*
- * At this point it's not possible to reject the
- * transaction anymore, so any failure here can
- * lead to inconsistencies and should be treated
- * as a bug. Operations prone to errors, like
- * validations and resource allocations, should
- * be performed during the 'prepare' phase.
- */
- break;
- default:
- break;
- }
+ break;
+ default:
+ break;
}
}
- pthread_rwlock_unlock(&running_config->lock);
return NB_OK;
}
static struct nb_config_cb *
-nb_apply_finish_cb_new(struct nb_config_cbs *cbs, const char *xpath,
- const struct nb_node *nb_node,
+nb_apply_finish_cb_new(struct nb_config_cbs *cbs, const struct nb_node *nb_node,
const struct lyd_node *dnode)
{
struct nb_config_cb *cb;
cb = XCALLOC(MTYPE_TMP, sizeof(*cb));
- strlcpy(cb->xpath, xpath, sizeof(cb->xpath));
cb->nb_node = nb_node;
cb->dnode = dnode;
RB_INSERT(nb_config_cbs, cbs, cb);
}
static struct nb_config_cb *
-nb_apply_finish_cb_find(struct nb_config_cbs *cbs, const char *xpath,
- const struct nb_node *nb_node)
+nb_apply_finish_cb_find(struct nb_config_cbs *cbs,
+ const struct nb_node *nb_node,
+ const struct lyd_node *dnode)
{
struct nb_config_cb s;
- strlcpy(s.xpath, xpath, sizeof(s.xpath));
+ s.seq = 0;
s.nb_node = nb_node;
+ s.dnode = dnode;
return RB_FIND(nb_config_cbs, cbs, &s);
}
{
struct nb_config_cbs cbs;
struct nb_config_cb *cb;
+ char xpath[XPATH_MAXLEN];
/* Initialize tree of 'apply_finish' callbacks. */
RB_INIT(nb_config_cbs, &cbs);
* be called though).
*/
if (change->cb.operation == NB_OP_DESTROY) {
- char xpath[XPATH_MAXLEN];
-
dnode = dnode->parent;
if (!dnode)
break;
xpath);
}
while (dnode) {
- char xpath[XPATH_MAXLEN];
struct nb_node *nb_node;
nb_node = dnode->schema->priv;
* Don't call the callback more than once for the same
* data node.
*/
- yang_dnode_get_path(dnode, xpath, sizeof(xpath));
- if (nb_apply_finish_cb_find(&cbs, xpath, nb_node))
+ if (nb_apply_finish_cb_find(&cbs, nb_node, dnode))
goto next;
- nb_apply_finish_cb_new(&cbs, xpath, nb_node, dnode);
+ nb_apply_finish_cb_new(&cbs, nb_node, dnode);
next:
dnode = dnode->parent;
/* Call the 'apply_finish' callbacks, sorted by their priorities. */
RB_FOREACH (cb, nb_config_cbs, &cbs) {
- if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL))
- nb_log_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH,
- cb->xpath, NULL);
+ if (DEBUG_MODE_CHECK(&nb_dbg_cbs_config, DEBUG_MODE_ALL)) {
+ yang_dnode_get_path(cb->dnode, xpath, sizeof(xpath));
+ nb_log_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH, xpath,
+ NULL);
+ }
(*cb->nb_node->cbs.apply_finish)(cb->dnode);
}
/* Update XPath. */
strlcpy(xpath, xpath_parent, sizeof(xpath));
- if (!first && snode->nodetype != LYS_USES)
- snprintf(xpath + strlen(xpath), sizeof(xpath) - strlen(xpath),
- "/%s", snode->name);
+ if (!first && snode->nodetype != LYS_USES) {
+ struct lys_node *parent;
+
+ /* Get the real parent. */
+ parent = snode->parent;
+ while (parent && parent->nodetype == LYS_USES)
+ parent = parent->parent;
+
+ /*
+ * When necessary, include the namespace of the augmenting
+ * module.
+ */
+ if (parent && parent->nodetype == LYS_AUGMENT)
+ snprintf(xpath + strlen(xpath),
+ sizeof(xpath) - strlen(xpath), "/%s:%s",
+ snode->module->name, snode->name);
+ else
+ snprintf(xpath + strlen(xpath),
+ sizeof(xpath) - strlen(xpath), "/%s",
+ snode->name);
+ }
nb_node = snode->priv;
switch (snode->nodetype) {
*/
ly_errno = 0;
dnode = lyd_new_path(NULL, ly_native_ctx, xpath, NULL, 0,
- LYD_PATH_OPT_UPDATE);
- if (!dnode && ly_errno) {
+ LYD_PATH_OPT_UPDATE | LYD_PATH_OPT_NOPARENTRET);
+ if (!dnode) {
flog_warn(EC_LIB_LIBYANG, "%s: lyd_new_path() failed",
__func__);
return NB_ERR;
}
- /*
- * We can remove the following two lines once we depend on
- * libyang-v0.16-r2, which has the LYD_PATH_OPT_NOPARENTRET flag for
- * lyd_new_path().
- */
- dnode = yang_dnode_get(dnode, xpath);
- assert(dnode);
/*
* Create a linked list to sort the data nodes starting from the root.
return false;
}
return true;
+ case NB_OP_PRE_VALIDATE:
case NB_OP_APPLY_FINISH:
if (!CHECK_FLAG(snode->flags, LYS_CONFIG_W))
return false;
return strmatch(c1->xpath, c2->xpath);
}
-static unsigned int running_config_entry_key_make(void *value)
+static unsigned int running_config_entry_key_make(const void *value)
{
return string_hash_make(value);
}
return "destroy";
case NB_OP_MOVE:
return "move";
+ case NB_OP_PRE_VALIDATE:
+ return "pre_validate";
case NB_OP_APPLY_FINISH:
return "apply_finish";
case NB_OP_GET_ELEM:
}
void nb_init(struct thread_master *tm,
- const struct frr_yang_module_info *modules[], size_t nmodules)
+ const struct frr_yang_module_info *const modules[],
+ size_t nmodules)
{
unsigned int errors = 0;