]> git.proxmox.com Git - mirror_frr.git/blame - mgmtd/mgmt_txn.c
*: Rename `struct thread` to `struct event`
[mirror_frr.git] / mgmtd / mgmt_txn.c
CommitLineData
74335ceb
YR
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * MGMTD Transactions
4 *
5 * Copyright (C) 2021 Vmware, Inc.
6 * Pushpasis Sarkar <spushpasis@vmware.com>
7 */
8
9#include <zebra.h>
10#include "hash.h"
11#include "jhash.h"
12#include "libfrr.h"
13#include "mgmtd/mgmt.h"
14#include "mgmtd/mgmt_memory.h"
15#include "mgmtd/mgmt_txn.h"
16
17#ifdef REDIRECT_DEBUG_TO_STDERR
18#define MGMTD_TXN_DBG(fmt, ...) \
19 fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
20#define MGMTD_TXN_ERR(fmt, ...) \
21 fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
22#else /* REDIRECT_DEBUG_TO_STDERR */
23#define MGMTD_TXN_DBG(fmt, ...) \
24 do { \
25 if (mgmt_debug_txn) \
26 zlog_err("%s: " fmt, __func__, ##__VA_ARGS__); \
27 } while (0)
28#define MGMTD_TXN_ERR(fmt, ...) \
29 zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
30#endif /* REDIRECT_DEBUG_TO_STDERR */
31
32#define MGMTD_TXN_LOCK(txn) mgmt_txn_lock(txn, __FILE__, __LINE__)
33#define MGMTD_TXN_UNLOCK(txn) mgmt_txn_unlock(txn, __FILE__, __LINE__)
34
35enum mgmt_txn_event {
36 MGMTD_TXN_PROC_SETCFG = 1,
37 MGMTD_TXN_PROC_COMMITCFG,
38 MGMTD_TXN_PROC_GETCFG,
39 MGMTD_TXN_PROC_GETDATA,
40 MGMTD_TXN_COMMITCFG_TIMEOUT,
41 MGMTD_TXN_CLEANUP
42};
43
44PREDECL_LIST(mgmt_txn_reqs);
45
46struct mgmt_set_cfg_req {
47 Mgmtd__DatastoreId ds_id;
48 struct mgmt_ds_ctx *ds_ctx;
49 struct nb_cfg_change cfg_changes[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
50 uint16_t num_cfg_changes;
51 bool implicit_commit;
52 Mgmtd__DatastoreId dst_ds_id;
53 struct mgmt_ds_ctx *dst_ds_ctx;
54 struct mgmt_setcfg_stats *setcfg_stats;
55};
56
57enum mgmt_commit_phase {
58 MGMTD_COMMIT_PHASE_PREPARE_CFG = 0,
59 MGMTD_COMMIT_PHASE_TXN_CREATE,
60 MGMTD_COMMIT_PHASE_SEND_CFG,
61 MGMTD_COMMIT_PHASE_APPLY_CFG,
62 MGMTD_COMMIT_PHASE_TXN_DELETE,
63 MGMTD_COMMIT_PHASE_MAX
64};
65
66static inline const char *
67mgmt_commit_phase2str(enum mgmt_commit_phase cmt_phase)
68{
69 switch (cmt_phase) {
70 case MGMTD_COMMIT_PHASE_PREPARE_CFG:
71 return "PREP-CFG";
72 case MGMTD_COMMIT_PHASE_TXN_CREATE:
73 return "CREATE-TXN";
74 case MGMTD_COMMIT_PHASE_SEND_CFG:
75 return "SEND-CFG";
76 case MGMTD_COMMIT_PHASE_APPLY_CFG:
77 return "APPLY-CFG";
78 case MGMTD_COMMIT_PHASE_TXN_DELETE:
79 return "DELETE-TXN";
80 case MGMTD_COMMIT_PHASE_MAX:
81 return "Invalid/Unknown";
82 }
83
84 return "Invalid/Unknown";
85}
86
87PREDECL_LIST(mgmt_txn_batches);
88
89struct mgmt_txn_be_cfg_batch {
90 struct mgmt_txn_ctx *txn;
91 uint64_t batch_id;
92 enum mgmt_be_client_id be_id;
93 struct mgmt_be_client_adapter *be_adapter;
94 union mgmt_be_xpath_subscr_info
95 xp_subscr[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
96 Mgmtd__YangCfgDataReq cfg_data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
97 Mgmtd__YangCfgDataReq * cfg_datap[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
98 Mgmtd__YangData data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
99 Mgmtd__YangDataValue value[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
100 size_t num_cfg_data;
101 int buf_space_left;
102 enum mgmt_commit_phase comm_phase;
103 struct mgmt_txn_batches_item list_linkage;
104};
105
106DECLARE_LIST(mgmt_txn_batches, struct mgmt_txn_be_cfg_batch, list_linkage);
107
108#define FOREACH_TXN_CFG_BATCH_IN_LIST(list, batch) \
109 frr_each_safe (mgmt_txn_batches, list, batch)
110
111struct mgmt_commit_cfg_req {
112 Mgmtd__DatastoreId src_ds_id;
113 struct mgmt_ds_ctx *src_ds_ctx;
114 Mgmtd__DatastoreId dst_ds_id;
115 struct mgmt_ds_ctx *dst_ds_ctx;
116 uint32_t nb_txn_id;
117 uint8_t validate_only : 1;
118 uint8_t abort : 1;
119 uint8_t implicit : 1;
120 uint8_t rollback : 1;
121
122 /* Track commit phases */
123 enum mgmt_commit_phase curr_phase;
124 enum mgmt_commit_phase next_phase;
125
126 /*
127 * Set of config changes to commit. This is used only
128 * when changes are NOT to be determined by comparing
129 * candidate and running DSs. This is typically used
130 * for downloading all relevant configs for a new backend
131 * client that has recently come up and connected with
132 * MGMTD.
133 */
134 struct nb_config_cbs *cfg_chgs;
135
136 /*
137 * Details on all the Backend Clients associated with
138 * this commit.
139 */
140 struct mgmt_be_client_subscr_info subscr_info;
141
142 /*
143 * List of backend batches for this commit to be validated
144 * and applied at the backend.
145 *
146 * FIXME: Need to re-think this design for the case set of
147 * validators for a given YANG data item is different from
148 * the set of notifiers for the same. We may need to have
149 * separate list of batches for VALIDATE and APPLY.
150 */
151 struct mgmt_txn_batches_head curr_batches[MGMTD_BE_CLIENT_ID_MAX];
152 struct mgmt_txn_batches_head next_batches[MGMTD_BE_CLIENT_ID_MAX];
153 /*
154 * The last batch added for any backend client. This is always on
155 * 'curr_batches'
156 */
157 struct mgmt_txn_be_cfg_batch
158 *last_be_cfg_batch[MGMTD_BE_CLIENT_ID_MAX];
159 struct hash *batches;
160 uint64_t next_batch_id;
161
162 struct mgmt_commit_stats *cmt_stats;
163};
164
165struct mgmt_get_data_reply {
166 /* Buffer space for preparing data reply */
167 int num_reply;
168 int last_batch;
169 Mgmtd__YangDataReply data_reply;
170 Mgmtd__YangData reply_data[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
171 Mgmtd__YangData * reply_datap[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
172 Mgmtd__YangDataValue reply_value[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
173 char *reply_xpathp[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
174};
175
176struct mgmt_get_data_req {
177 Mgmtd__DatastoreId ds_id;
178 struct mgmt_ds_ctx *ds_ctx;
179 char *xpaths[MGMTD_MAX_NUM_DATA_REQ_IN_BATCH];
180 int num_xpaths;
181
182 /*
183 * Buffer space for preparing reply.
184 * NOTE: Should only be malloc-ed on demand to reduce
185 * memory footprint. Freed up via mgmt_trx_req_free()
186 */
187 struct mgmt_get_data_reply *reply;
188
189 int total_reply;
190};
191
192struct mgmt_txn_req {
193 struct mgmt_txn_ctx *txn;
194 enum mgmt_txn_event req_event;
195 uint64_t req_id;
196 union {
197 struct mgmt_set_cfg_req *set_cfg;
198 struct mgmt_get_data_req *get_data;
199 struct mgmt_commit_cfg_req commit_cfg;
200 } req;
201
202 bool pending_be_proc;
203 struct mgmt_txn_reqs_item list_linkage;
204};
205
206DECLARE_LIST(mgmt_txn_reqs, struct mgmt_txn_req, list_linkage);
207
208#define FOREACH_TXN_REQ_IN_LIST(list, req) \
209 frr_each_safe (mgmt_txn_reqs, list, req)
210
211struct mgmt_txn_ctx {
212 uint64_t session_id; /* One transaction per client session */
213 uint64_t txn_id;
214 enum mgmt_txn_type type;
215
216 /* struct mgmt_master *mm; */
217
e6685141
DS
218 struct event *proc_set_cfg;
219 struct event *proc_comm_cfg;
220 struct event *proc_get_cfg;
221 struct event *proc_get_data;
222 struct event *comm_cfg_timeout;
223 struct event *clnup;
74335ceb
YR
224
225 /* List of backend adapters involved in this transaction */
226 struct mgmt_txn_badapters_head be_adapters;
227
228 int refcount;
229
230 struct mgmt_txns_item list_linkage;
231
232 /*
233 * List of pending set-config requests for a given
234 * transaction/session. Just one list for requests
235 * not processed at all. There's no backend interaction
236 * involved.
237 */
238 struct mgmt_txn_reqs_head set_cfg_reqs;
239 /*
240 * List of pending get-config requests for a given
241 * transaction/session. Just one list for requests
242 * not processed at all. There's no backend interaction
243 * involved.
244 */
245 struct mgmt_txn_reqs_head get_cfg_reqs;
246 /*
247 * List of pending get-data requests for a given
248 * transaction/session Two lists, one for requests
249 * not processed at all, and one for requests that
250 * has been sent to backend for processing.
251 */
252 struct mgmt_txn_reqs_head get_data_reqs;
253 struct mgmt_txn_reqs_head pending_get_datas;
254 /*
255 * There will always be one commit-config allowed for a given
256 * transaction/session. No need to maintain lists for it.
257 */
258 struct mgmt_txn_req *commit_cfg_req;
259};
260
261DECLARE_LIST(mgmt_txns, struct mgmt_txn_ctx, list_linkage);
262
263#define FOREACH_TXN_IN_LIST(mm, txn) \
264 frr_each_safe (mgmt_txns, &(mm)->txn_list, (txn))
265
266static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
267 enum mgmt_result result,
268 const char *error_if_any);
269
270static inline const char *
271mgmt_txn_commit_phase_str(struct mgmt_txn_ctx *txn, bool curr)
272{
273 if (!txn->commit_cfg_req)
274 return "None";
275
276 return (mgmt_commit_phase2str(
277 curr ? txn->commit_cfg_req->req.commit_cfg.curr_phase
278 : txn->commit_cfg_req->req.commit_cfg.next_phase));
279}
280
281static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file,
282 int line);
283static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
284 int line);
285static int
286mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
287 struct mgmt_be_client_adapter *adapter);
288
289static struct thread_master *mgmt_txn_tm;
290static struct mgmt_master *mgmt_txn_mm;
291
292static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
293 enum mgmt_txn_event event);
294
295static int
296mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
297 struct mgmt_be_client_adapter *adapter);
298
299static struct mgmt_txn_be_cfg_batch *
300mgmt_txn_cfg_batch_alloc(struct mgmt_txn_ctx *txn,
301 enum mgmt_be_client_id id,
302 struct mgmt_be_client_adapter *be_adapter)
303{
304 struct mgmt_txn_be_cfg_batch *cfg_btch;
305
306 cfg_btch = XCALLOC(MTYPE_MGMTD_TXN_CFG_BATCH,
307 sizeof(struct mgmt_txn_be_cfg_batch));
308 assert(cfg_btch);
309 cfg_btch->be_id = id;
310
311 cfg_btch->txn = txn;
312 MGMTD_TXN_LOCK(txn);
313 assert(txn->commit_cfg_req);
314 mgmt_txn_batches_add_tail(
315 &txn->commit_cfg_req->req.commit_cfg.curr_batches[id],
316 cfg_btch);
317 cfg_btch->be_adapter = be_adapter;
318 cfg_btch->buf_space_left = MGMTD_BE_CFGDATA_MAX_MSG_LEN;
319 if (be_adapter)
320 mgmt_be_adapter_lock(be_adapter);
321
322 txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] =
323 cfg_btch;
324 if (!txn->commit_cfg_req->req.commit_cfg.next_batch_id)
325 txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
326 cfg_btch->batch_id =
327 txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
328 hash_get(txn->commit_cfg_req->req.commit_cfg.batches, cfg_btch,
329 hash_alloc_intern);
330
331 return cfg_btch;
332}
333
334static void
335mgmt_txn_cfg_batch_free(struct mgmt_txn_be_cfg_batch **cfg_btch)
336{
337 size_t indx;
338 struct mgmt_commit_cfg_req *cmtcfg_req;
339
340 MGMTD_TXN_DBG(" Batch: %p, Txn: %p", *cfg_btch, (*cfg_btch)->txn);
341
342 assert((*cfg_btch)->txn
343 && (*cfg_btch)->txn->type == MGMTD_TXN_TYPE_CONFIG);
344
345 cmtcfg_req = &(*cfg_btch)->txn->commit_cfg_req->req.commit_cfg;
346 hash_release(cmtcfg_req->batches, *cfg_btch);
347 mgmt_txn_batches_del(&cmtcfg_req->curr_batches[(*cfg_btch)->be_id],
348 *cfg_btch);
349 mgmt_txn_batches_del(&cmtcfg_req->next_batches[(*cfg_btch)->be_id],
350 *cfg_btch);
351
352 if ((*cfg_btch)->be_adapter)
353 mgmt_be_adapter_unlock(&(*cfg_btch)->be_adapter);
354
355 for (indx = 0; indx < (*cfg_btch)->num_cfg_data; indx++) {
356 if ((*cfg_btch)->data[indx].xpath) {
357 free((*cfg_btch)->data[indx].xpath);
358 (*cfg_btch)->data[indx].xpath = NULL;
359 }
360 }
361
362 MGMTD_TXN_UNLOCK(&(*cfg_btch)->txn);
363
364 XFREE(MTYPE_MGMTD_TXN_CFG_BATCH, *cfg_btch);
365 *cfg_btch = NULL;
366}
367
368static unsigned int mgmt_txn_cfgbatch_hash_key(const void *data)
369{
370 const struct mgmt_txn_be_cfg_batch *batch = data;
371
372 return jhash2((uint32_t *) &batch->batch_id,
373 sizeof(batch->batch_id) / sizeof(uint32_t), 0);
374}
375
376static bool mgmt_txn_cfgbatch_hash_cmp(const void *d1, const void *d2)
377{
378 const struct mgmt_txn_be_cfg_batch *batch1 = d1;
379 const struct mgmt_txn_be_cfg_batch *batch2 = d2;
380
381 return (batch1->batch_id == batch2->batch_id);
382}
383
384static void mgmt_txn_cfgbatch_hash_free(void *data)
385{
386 struct mgmt_txn_be_cfg_batch *batch = data;
387
388 mgmt_txn_cfg_batch_free(&batch);
389}
390
391static inline struct mgmt_txn_be_cfg_batch *
392mgmt_txn_cfgbatch_id2ctx(struct mgmt_txn_ctx *txn, uint64_t batch_id)
393{
394 struct mgmt_txn_be_cfg_batch key = {0};
395 struct mgmt_txn_be_cfg_batch *batch;
396
397 if (!txn->commit_cfg_req)
398 return NULL;
399
400 key.batch_id = batch_id;
401 batch = hash_lookup(txn->commit_cfg_req->req.commit_cfg.batches,
402 &key);
403
404 return batch;
405}
406
407static void mgmt_txn_cleanup_be_cfg_batches(struct mgmt_txn_ctx *txn,
408 enum mgmt_be_client_id id)
409{
410 struct mgmt_txn_be_cfg_batch *cfg_btch;
411 struct mgmt_txn_batches_head *list;
412
413 list = &txn->commit_cfg_req->req.commit_cfg.curr_batches[id];
414 FOREACH_TXN_CFG_BATCH_IN_LIST (list, cfg_btch)
415 mgmt_txn_cfg_batch_free(&cfg_btch);
416
417 mgmt_txn_batches_fini(list);
418
419 list = &txn->commit_cfg_req->req.commit_cfg.next_batches[id];
420 FOREACH_TXN_CFG_BATCH_IN_LIST (list, cfg_btch)
421 mgmt_txn_cfg_batch_free(&cfg_btch);
422
423 mgmt_txn_batches_fini(list);
424
425 txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] = NULL;
426}
427
428static struct mgmt_txn_req *mgmt_txn_req_alloc(struct mgmt_txn_ctx *txn,
429 uint64_t req_id,
430 enum mgmt_txn_event req_event)
431{
432 struct mgmt_txn_req *txn_req;
433 enum mgmt_be_client_id id;
434
435 txn_req = XCALLOC(MTYPE_MGMTD_TXN_REQ, sizeof(struct mgmt_txn_req));
436 assert(txn_req);
437 txn_req->txn = txn;
438 txn_req->req_id = req_id;
439 txn_req->req_event = req_event;
440 txn_req->pending_be_proc = false;
441
442 switch (txn_req->req_event) {
443 case MGMTD_TXN_PROC_SETCFG:
444 txn_req->req.set_cfg = XCALLOC(MTYPE_MGMTD_TXN_SETCFG_REQ,
445 sizeof(struct mgmt_set_cfg_req));
446 assert(txn_req->req.set_cfg);
447 mgmt_txn_reqs_add_tail(&txn->set_cfg_reqs, txn_req);
448 MGMTD_TXN_DBG(
449 "Added a new SETCFG Req: %p for Txn: %p, Sessn: 0x%llx",
450 txn_req, txn, (unsigned long long)txn->session_id);
451 break;
452 case MGMTD_TXN_PROC_COMMITCFG:
453 txn->commit_cfg_req = txn_req;
454 MGMTD_TXN_DBG(
455 "Added a new COMMITCFG Req: %p for Txn: %p, Sessn: 0x%llx",
456 txn_req, txn, (unsigned long long)txn->session_id);
457
458 FOREACH_MGMTD_BE_CLIENT_ID (id) {
459 mgmt_txn_batches_init(
460 &txn_req->req.commit_cfg.curr_batches[id]);
461 mgmt_txn_batches_init(
462 &txn_req->req.commit_cfg.next_batches[id]);
463 }
464
465 txn_req->req.commit_cfg.batches =
466 hash_create(mgmt_txn_cfgbatch_hash_key,
467 mgmt_txn_cfgbatch_hash_cmp,
468 "MGMT Config Batches");
469 break;
470 case MGMTD_TXN_PROC_GETCFG:
471 txn_req->req.get_data =
472 XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
473 sizeof(struct mgmt_get_data_req));
474 assert(txn_req->req.get_data);
475 mgmt_txn_reqs_add_tail(&txn->get_cfg_reqs, txn_req);
476 MGMTD_TXN_DBG(
477 "Added a new GETCFG Req: %p for Txn: %p, Sessn: 0x%llx",
478 txn_req, txn, (unsigned long long)txn->session_id);
479 break;
480 case MGMTD_TXN_PROC_GETDATA:
481 txn_req->req.get_data =
482 XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
483 sizeof(struct mgmt_get_data_req));
484 assert(txn_req->req.get_data);
485 mgmt_txn_reqs_add_tail(&txn->get_data_reqs, txn_req);
486 MGMTD_TXN_DBG(
487 "Added a new GETDATA Req: %p for Txn: %p, Sessn: 0x%llx",
488 txn_req, txn, (unsigned long long)txn->session_id);
489 break;
490 case MGMTD_TXN_COMMITCFG_TIMEOUT:
491 case MGMTD_TXN_CLEANUP:
492 break;
493 }
494
495 MGMTD_TXN_LOCK(txn);
496
497 return txn_req;
498}
499
500static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
501{
502 int indx;
503 struct mgmt_txn_reqs_head *req_list = NULL;
504 struct mgmt_txn_reqs_head *pending_list = NULL;
505 enum mgmt_be_client_id id;
506 struct mgmt_be_client_adapter *adapter;
507
508 switch ((*txn_req)->req_event) {
509 case MGMTD_TXN_PROC_SETCFG:
510 for (indx = 0; indx < (*txn_req)->req.set_cfg->num_cfg_changes;
511 indx++) {
512 if ((*txn_req)->req.set_cfg->cfg_changes[indx].value) {
513 MGMTD_TXN_DBG(
514 "Freeing value for %s at %p ==> '%s'",
515 (*txn_req)
516 ->req.set_cfg->cfg_changes[indx]
517 .xpath,
518 (*txn_req)
519 ->req.set_cfg->cfg_changes[indx]
520 .value,
521 (*txn_req)
522 ->req.set_cfg->cfg_changes[indx]
523 .value);
524 free((void *)(*txn_req)
525 ->req.set_cfg->cfg_changes[indx]
526 .value);
527 }
528 }
529 req_list = &(*txn_req)->txn->set_cfg_reqs;
530 MGMTD_TXN_DBG("Deleting SETCFG Req: %p for Txn: %p",
531 *txn_req, (*txn_req)->txn);
532 XFREE(MTYPE_MGMTD_TXN_SETCFG_REQ, (*txn_req)->req.set_cfg);
533 break;
534 case MGMTD_TXN_PROC_COMMITCFG:
535 MGMTD_TXN_DBG("Deleting COMMITCFG Req: %p for Txn: %p",
536 *txn_req, (*txn_req)->txn);
537 FOREACH_MGMTD_BE_CLIENT_ID (id) {
538 /*
539 * Send TXN_DELETE to cleanup state for this
540 * transaction on backend
541 */
542 if ((*txn_req)->req.commit_cfg.curr_phase
543 >= MGMTD_COMMIT_PHASE_TXN_CREATE
544 && (*txn_req)->req.commit_cfg.curr_phase
545 < MGMTD_COMMIT_PHASE_TXN_DELETE
546 && (*txn_req)
547 ->req.commit_cfg.subscr_info
548 .xpath_subscr[id]
549 .subscribed) {
550 adapter = mgmt_be_get_adapter_by_id(id);
551 if (adapter)
552 mgmt_txn_send_be_txn_delete(
553 (*txn_req)->txn, adapter);
554 }
555
556 mgmt_txn_cleanup_be_cfg_batches((*txn_req)->txn,
557 id);
558 if ((*txn_req)->req.commit_cfg.batches) {
559 hash_clean((*txn_req)->req.commit_cfg.batches,
560 mgmt_txn_cfgbatch_hash_free);
561 hash_free((*txn_req)->req.commit_cfg.batches);
562 (*txn_req)->req.commit_cfg.batches = NULL;
563 }
564 }
565 break;
566 case MGMTD_TXN_PROC_GETCFG:
567 for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
568 indx++) {
569 if ((*txn_req)->req.get_data->xpaths[indx])
570 free((void *)(*txn_req)
571 ->req.get_data->xpaths[indx]);
572 }
573 req_list = &(*txn_req)->txn->get_cfg_reqs;
574 MGMTD_TXN_DBG("Deleting GETCFG Req: %p for Txn: %p",
575 *txn_req, (*txn_req)->txn);
576 if ((*txn_req)->req.get_data->reply)
577 XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
578 (*txn_req)->req.get_data->reply);
579 XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
580 break;
581 case MGMTD_TXN_PROC_GETDATA:
582 for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
583 indx++) {
584 if ((*txn_req)->req.get_data->xpaths[indx])
585 free((void *)(*txn_req)
586 ->req.get_data->xpaths[indx]);
587 }
588 pending_list = &(*txn_req)->txn->pending_get_datas;
589 req_list = &(*txn_req)->txn->get_data_reqs;
590 MGMTD_TXN_DBG("Deleting GETDATA Req: %p for Txn: %p",
591 *txn_req, (*txn_req)->txn);
592 if ((*txn_req)->req.get_data->reply)
593 XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
594 (*txn_req)->req.get_data->reply);
595 XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
596 break;
597 case MGMTD_TXN_COMMITCFG_TIMEOUT:
598 case MGMTD_TXN_CLEANUP:
599 break;
600 }
601
602 if ((*txn_req)->pending_be_proc && pending_list) {
603 mgmt_txn_reqs_del(pending_list, *txn_req);
604 MGMTD_TXN_DBG("Removed Req: %p from pending-list (left:%d)",
605 *txn_req, (int)mgmt_txn_reqs_count(pending_list));
606 } else if (req_list) {
607 mgmt_txn_reqs_del(req_list, *txn_req);
608 MGMTD_TXN_DBG("Removed Req: %p from request-list (left:%d)",
609 *txn_req, (int)mgmt_txn_reqs_count(req_list));
610 }
611
612 (*txn_req)->pending_be_proc = false;
613 MGMTD_TXN_UNLOCK(&(*txn_req)->txn);
614 XFREE(MTYPE_MGMTD_TXN_REQ, (*txn_req));
615 *txn_req = NULL;
616}
617
e6685141 618static void mgmt_txn_process_set_cfg(struct event *thread)
74335ceb
YR
619{
620 struct mgmt_txn_ctx *txn;
621 struct mgmt_txn_req *txn_req;
622 struct mgmt_ds_ctx *ds_ctx;
623 struct nb_config *nb_config;
624 char err_buf[1024];
625 bool error;
626 int num_processed = 0;
627 size_t left;
628 struct mgmt_commit_stats *cmt_stats;
629 int ret = 0;
630
631 txn = (struct mgmt_txn_ctx *)THREAD_ARG(thread);
632 assert(txn);
633 cmt_stats = mgmt_fe_get_session_commit_stats(txn->session_id);
634
635 MGMTD_TXN_DBG(
636 "Processing %d SET_CONFIG requests for Txn:%p Session:0x%llx",
637 (int)mgmt_txn_reqs_count(&txn->set_cfg_reqs), txn,
638 (unsigned long long)txn->session_id);
639
640 FOREACH_TXN_REQ_IN_LIST (&txn->set_cfg_reqs, txn_req) {
641 error = false;
642 assert(txn_req->req_event == MGMTD_TXN_PROC_SETCFG);
643 ds_ctx = txn_req->req.set_cfg->ds_ctx;
644 if (!ds_ctx) {
645 mgmt_fe_send_set_cfg_reply(
646 txn->session_id, txn->txn_id,
647 txn_req->req.set_cfg->ds_id, txn_req->req_id,
648 MGMTD_INTERNAL_ERROR, "No such datastore!",
649 txn_req->req.set_cfg->implicit_commit);
650 error = true;
651 goto mgmt_txn_process_set_cfg_done;
652 }
653
654 nb_config = mgmt_ds_get_nb_config(ds_ctx);
655 if (!nb_config) {
656 mgmt_fe_send_set_cfg_reply(
657 txn->session_id, txn->txn_id,
658 txn_req->req.set_cfg->ds_id, txn_req->req_id,
659 MGMTD_INTERNAL_ERROR,
660 "Unable to retrieve DS Config Tree!",
661 txn_req->req.set_cfg->implicit_commit);
662 error = true;
663 goto mgmt_txn_process_set_cfg_done;
664 }
665
666 error = false;
667 nb_candidate_edit_config_changes(
668 nb_config, txn_req->req.set_cfg->cfg_changes,
669 (size_t)txn_req->req.set_cfg->num_cfg_changes, NULL,
670 NULL, 0, err_buf, sizeof(err_buf), &error);
671 if (error) {
672 mgmt_fe_send_set_cfg_reply(
673 txn->session_id, txn->txn_id,
674 txn_req->req.set_cfg->ds_id, txn_req->req_id,
675 MGMTD_INTERNAL_ERROR, err_buf,
676 txn_req->req.set_cfg->implicit_commit);
677 goto mgmt_txn_process_set_cfg_done;
678 }
679
680 if (txn_req->req.set_cfg->implicit_commit) {
681 assert(mgmt_txn_reqs_count(&txn->set_cfg_reqs) == 1);
682 assert(txn_req->req.set_cfg->dst_ds_ctx);
683
684 ret = mgmt_ds_write_lock(
685 txn_req->req.set_cfg->dst_ds_ctx);
686 if (ret != 0) {
687 MGMTD_TXN_ERR(
688 "Failed to lock the DS %u for txn: %p session 0x%llx, errstr %s!",
689 txn_req->req.set_cfg->dst_ds_id, txn,
690 (unsigned long long)txn->session_id,
691 strerror(ret));
692 mgmt_txn_send_commit_cfg_reply(
693 txn, MGMTD_DS_LOCK_FAILED,
694 "Lock running DS before implicit commit failed!");
695 goto mgmt_txn_process_set_cfg_done;
696 }
697
698 mgmt_txn_send_commit_config_req(
699 txn->txn_id, txn_req->req_id,
700 txn_req->req.set_cfg->ds_id,
701 txn_req->req.set_cfg->ds_ctx,
702 txn_req->req.set_cfg->dst_ds_id,
703 txn_req->req.set_cfg->dst_ds_ctx, false,
704 false, true);
705
706 if (mm->perf_stats_en)
707 gettimeofday(&cmt_stats->last_start, NULL);
708 cmt_stats->commit_cnt++;
709 } else if (mgmt_fe_send_set_cfg_reply(
710 txn->session_id, txn->txn_id,
711 txn_req->req.set_cfg->ds_id,
712 txn_req->req_id, MGMTD_SUCCESS, NULL, false)
713 != 0) {
714 MGMTD_TXN_ERR(
715 "Failed to send SET_CONFIG_REPLY for txn %p session 0x%llx",
716 txn, (unsigned long long)txn->session_id);
717 error = true;
718 }
719
720 mgmt_txn_process_set_cfg_done:
721
722 /*
723 * Note: The following will remove it from the list as well.
724 */
725 mgmt_txn_req_free(&txn_req);
726
727 num_processed++;
728 if (num_processed == MGMTD_TXN_MAX_NUM_SETCFG_PROC)
729 break;
730 }
731
732 left = mgmt_txn_reqs_count(&txn->set_cfg_reqs);
733 if (left) {
734 MGMTD_TXN_DBG(
735 "Processed maximum number of Set-Config requests (%d/%d/%d). Rescheduling for rest.",
736 num_processed, MGMTD_TXN_MAX_NUM_SETCFG_PROC,
737 (int)left);
738 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
739 }
740}
741
742static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
743 enum mgmt_result result,
744 const char *error_if_any)
745{
746 int ret = 0;
747 bool success, create_cmt_info_rec;
748
749 if (!txn->commit_cfg_req)
750 return -1;
751
752 success = (result == MGMTD_SUCCESS || result == MGMTD_NO_CFG_CHANGES);
753
754 if (!txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id
755 && mgmt_fe_send_commit_cfg_reply(
756 txn->session_id, txn->txn_id,
757 txn->commit_cfg_req->req.commit_cfg.src_ds_id,
758 txn->commit_cfg_req->req.commit_cfg.dst_ds_id,
759 txn->commit_cfg_req->req_id,
760 txn->commit_cfg_req->req.commit_cfg.validate_only,
761 result, error_if_any)
762 != 0) {
763 MGMTD_TXN_ERR(
764 "Failed to send COMMIT-CONFIG-REPLY for Txn %p Sessn 0x%llx",
765 txn, (unsigned long long)txn->session_id);
766 }
767
768 if (txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id
769 && mgmt_fe_send_set_cfg_reply(
770 txn->session_id, txn->txn_id,
771 txn->commit_cfg_req->req.commit_cfg.src_ds_id,
772 txn->commit_cfg_req->req_id,
773 success ? MGMTD_SUCCESS : MGMTD_INTERNAL_ERROR,
774 error_if_any, true)
775 != 0) {
776 MGMTD_TXN_ERR(
777 "Failed to send SET-CONFIG-REPLY for Txn %p Sessn 0x%llx",
778 txn, (unsigned long long)txn->session_id);
779 }
780
781 if (success) {
782 /* Stop the commit-timeout timer */
783 THREAD_OFF(txn->comm_cfg_timeout);
784
785 create_cmt_info_rec =
786 (result != MGMTD_NO_CFG_CHANGES &&
787 !txn->commit_cfg_req->req.commit_cfg.rollback);
788
789 /*
790 * Successful commit: Merge Src DS into Dst DS if and only if
791 * this was not a validate-only or abort request.
792 */
793 if ((txn->session_id
794 && !txn->commit_cfg_req->req.commit_cfg.validate_only
795 && !txn->commit_cfg_req->req.commit_cfg.abort)
796 || txn->commit_cfg_req->req.commit_cfg.rollback) {
797 mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
798 .src_ds_ctx,
799 txn->commit_cfg_req->req.commit_cfg
800 .dst_ds_ctx,
801 create_cmt_info_rec);
802 }
803
804 /*
805 * Restore Src DS back to Dest DS only through a commit abort
806 * request.
807 */
808 if (txn->session_id
809 && txn->commit_cfg_req->req.commit_cfg.abort)
810 mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
811 .dst_ds_ctx,
812 txn->commit_cfg_req->req.commit_cfg
813 .src_ds_ctx,
814 false);
815 } else {
816 /*
817 * The commit has failied. For implicit commit requests restore
818 * back the contents of the candidate DS.
819 */
820 if (txn->commit_cfg_req->req.commit_cfg.implicit)
821 mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
822 .dst_ds_ctx,
823 txn->commit_cfg_req->req.commit_cfg
824 .src_ds_ctx,
825 false);
826 }
827
828 if (txn->commit_cfg_req->req.commit_cfg.rollback) {
829 ret = mgmt_ds_unlock(
830 txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx);
831 if (ret != 0)
832 MGMTD_TXN_ERR(
833 "Failed to unlock the dst DS during rollback : %s",
834 strerror(ret));
1401ee8b
PS
835
836 /*
837 * Resume processing the rollback command.
838 */
839 mgmt_history_rollback_complete(success);
74335ceb
YR
840 }
841
842 if (txn->commit_cfg_req->req.commit_cfg.implicit)
843 if (mgmt_ds_unlock(
844 txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx)
845 != 0)
846 MGMTD_TXN_ERR(
847 "Failed to unlock the dst DS during implicit : %s",
848 strerror(ret));
849
850 txn->commit_cfg_req->req.commit_cfg.cmt_stats = NULL;
851 mgmt_txn_req_free(&txn->commit_cfg_req);
852
853 /*
854 * The CONFIG Transaction should be destroyed from Frontend-adapter.
855 * But in case the transaction is not triggered from a front-end session
856 * we need to cleanup by itself.
857 */
858 if (!txn->session_id)
859 mgmt_txn_register_event(txn, MGMTD_TXN_CLEANUP);
860
861 return 0;
862}
863
864static void
865mgmt_move_txn_cfg_batch_to_next(struct mgmt_commit_cfg_req *cmtcfg_req,
866 struct mgmt_txn_be_cfg_batch *cfg_btch,
867 struct mgmt_txn_batches_head *src_list,
868 struct mgmt_txn_batches_head *dst_list,
869 bool update_commit_phase,
870 enum mgmt_commit_phase to_phase)
871{
872 mgmt_txn_batches_del(src_list, cfg_btch);
873
874 if (update_commit_phase) {
875 MGMTD_TXN_DBG("Move Txn-Id %p Batch-Id %p from '%s' --> '%s'",
876 cfg_btch->txn, cfg_btch,
877 mgmt_commit_phase2str(cfg_btch->comm_phase),
878 mgmt_txn_commit_phase_str(cfg_btch->txn, false));
879 cfg_btch->comm_phase = to_phase;
880 }
881
882 mgmt_txn_batches_add_tail(dst_list, cfg_btch);
883}
884
885static void mgmt_move_txn_cfg_batches(struct mgmt_txn_ctx *txn,
886 struct mgmt_commit_cfg_req *cmtcfg_req,
887 struct mgmt_txn_batches_head *src_list,
888 struct mgmt_txn_batches_head *dst_list,
889 bool update_commit_phase,
890 enum mgmt_commit_phase to_phase)
891{
892 struct mgmt_txn_be_cfg_batch *cfg_btch;
893
894 FOREACH_TXN_CFG_BATCH_IN_LIST (src_list, cfg_btch) {
895 mgmt_move_txn_cfg_batch_to_next(cmtcfg_req, cfg_btch, src_list,
896 dst_list, update_commit_phase,
897 to_phase);
898 }
899}
900
901static int
902mgmt_try_move_commit_to_next_phase(struct mgmt_txn_ctx *txn,
903 struct mgmt_commit_cfg_req *cmtcfg_req)
904{
905 struct mgmt_txn_batches_head *curr_list, *next_list;
906 enum mgmt_be_client_id id;
907
908 MGMTD_TXN_DBG("Txn-Id %p, Phase(current:'%s' next:'%s')", txn,
909 mgmt_txn_commit_phase_str(txn, true),
910 mgmt_txn_commit_phase_str(txn, false));
911
912 /*
913 * Check if all clients has moved to next phase or not.
914 */
915 FOREACH_MGMTD_BE_CLIENT_ID (id) {
916 if (cmtcfg_req->subscr_info.xpath_subscr[id].subscribed &&
917 mgmt_txn_batches_count(&cmtcfg_req->curr_batches[id])) {
918 /*
919 * There's atleast once client who hasn't moved to
920 * next phase.
921 *
922 * TODO: Need to re-think this design for the case
923 * set of validators for a given YANG data item is
924 * different from the set of notifiers for the same.
925 */
926 return -1;
927 }
928 }
929
930 MGMTD_TXN_DBG("Move entire Txn-Id %p from '%s' to '%s'", txn,
931 mgmt_txn_commit_phase_str(txn, true),
932 mgmt_txn_commit_phase_str(txn, false));
933
934 /*
935 * If we are here, it means all the clients has moved to next phase.
936 * So we can move the whole commit to next phase.
937 */
938 cmtcfg_req->curr_phase = cmtcfg_req->next_phase;
939 cmtcfg_req->next_phase++;
940 MGMTD_TXN_DBG(
941 "Move back all config batches for Txn %p from next to current branch",
942 txn);
943 FOREACH_MGMTD_BE_CLIENT_ID (id) {
944 curr_list = &cmtcfg_req->curr_batches[id];
945 next_list = &cmtcfg_req->next_batches[id];
946 mgmt_move_txn_cfg_batches(txn, cmtcfg_req, next_list,
947 curr_list, false, 0);
948 }
949
950 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
951
952 return 0;
953}
954
955static int
956mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
957 struct mgmt_be_client_adapter *adapter)
958{
959 struct mgmt_commit_cfg_req *cmtcfg_req;
960 struct mgmt_txn_batches_head *curr_list, *next_list;
961
962 if (txn->type != MGMTD_TXN_TYPE_CONFIG || !txn->commit_cfg_req)
963 return -1;
964
965 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
966
967 MGMTD_TXN_DBG(
968 "Move Txn-Id %p for '%s' Phase(current: '%s' next:'%s')", txn,
969 adapter->name, mgmt_txn_commit_phase_str(txn, true),
970 mgmt_txn_commit_phase_str(txn, false));
971
972 MGMTD_TXN_DBG(
973 "Move all config batches for '%s' from current to next list",
974 adapter->name);
975 curr_list = &cmtcfg_req->curr_batches[adapter->id];
976 next_list = &cmtcfg_req->next_batches[adapter->id];
977 mgmt_move_txn_cfg_batches(txn, cmtcfg_req, curr_list, next_list, true,
978 cmtcfg_req->next_phase);
979
980 MGMTD_TXN_DBG("Txn-Id %p, Phase(current:'%s' next:'%s')", txn,
981 mgmt_txn_commit_phase_str(txn, true),
982 mgmt_txn_commit_phase_str(txn, false));
983
984 /*
985 * Check if all clients has moved to next phase or not.
986 */
987 mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
988
989 return 0;
990}
991
992static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
993 struct nb_config_cbs *changes)
994{
995 struct nb_config_cb *cb, *nxt;
996 struct nb_config_change *chg;
997 struct mgmt_txn_be_cfg_batch *cfg_btch;
998 struct mgmt_be_client_subscr_info subscr_info;
999 char *xpath = NULL, *value = NULL;
1000 char err_buf[1024];
1001 enum mgmt_be_client_id id;
1002 struct mgmt_be_client_adapter *adapter;
1003 struct mgmt_commit_cfg_req *cmtcfg_req;
1004 bool found_validator;
1005 int num_chgs = 0;
1006 int xpath_len, value_len;
1007
1008 cmtcfg_req = &txn_req->req.commit_cfg;
1009
1010 RB_FOREACH_SAFE (cb, nb_config_cbs, changes, nxt) {
1011 chg = (struct nb_config_change *)cb;
1012
1013 /*
1014 * Could have directly pointed to xpath in nb_node.
1015 * But dont want to mess with it now.
1016 * xpath = chg->cb.nb_node->xpath;
1017 */
1018 xpath = lyd_path(chg->cb.dnode, LYD_PATH_STD, NULL, 0);
1019 if (!xpath) {
1020 (void)mgmt_txn_send_commit_cfg_reply(
1021 txn_req->txn, MGMTD_INTERNAL_ERROR,
1022 "Internal error! Could not get Xpath from Ds node!");
1023 goto mgmt_txn_create_config_batches_failed;
1024 }
1025
1026 value = (char *)lyd_get_value(chg->cb.dnode);
1027 if (!value)
1028 value = (char *)MGMTD_BE_CONTAINER_NODE_VAL;
1029
1030 MGMTD_TXN_DBG("XPATH: %s, Value: '%s'", xpath,
1031 value ? value : "NIL");
1032
1033 if (mgmt_be_get_subscr_info_for_xpath(xpath, &subscr_info)
1034 != 0) {
1035 snprintf(err_buf, sizeof(err_buf),
1036 "No backend module found for XPATH: '%s",
1037 xpath);
1038 (void)mgmt_txn_send_commit_cfg_reply(
1039 txn_req->txn, MGMTD_INTERNAL_ERROR, err_buf);
1040 goto mgmt_txn_create_config_batches_failed;
1041 }
1042
1043 xpath_len = strlen(xpath) + 1;
1044 value_len = strlen(value) + 1;
1045 found_validator = false;
1046 FOREACH_MGMTD_BE_CLIENT_ID (id) {
1047 if (!subscr_info.xpath_subscr[id].validate_config
1048 && !subscr_info.xpath_subscr[id].notify_config)
1049 continue;
1050
1051 adapter = mgmt_be_get_adapter_by_id(id);
1052 if (!adapter)
1053 continue;
1054
1055 cfg_btch = cmtcfg_req->last_be_cfg_batch[id];
1056 if (!cfg_btch
1057 || (cfg_btch->num_cfg_data
1058 == MGMTD_MAX_CFG_CHANGES_IN_BATCH)
1059 || (cfg_btch->buf_space_left
1060 < (xpath_len + value_len))) {
1061 /* Allocate a new config batch */
1062 cfg_btch = mgmt_txn_cfg_batch_alloc(
1063 txn_req->txn, id, adapter);
1064 }
1065
1066 cfg_btch->buf_space_left -= (xpath_len + value_len);
1067 memcpy(&cfg_btch->xp_subscr[cfg_btch->num_cfg_data],
1068 &subscr_info.xpath_subscr[id],
1069 sizeof(cfg_btch->xp_subscr[0]));
1070
1071 mgmt_yang_cfg_data_req_init(
1072 &cfg_btch->cfg_data[cfg_btch->num_cfg_data]);
1073 cfg_btch->cfg_datap[cfg_btch->num_cfg_data] =
1074 &cfg_btch->cfg_data[cfg_btch->num_cfg_data];
1075
1076 if (chg->cb.operation == NB_OP_DESTROY)
1077 cfg_btch->cfg_data[cfg_btch->num_cfg_data]
1078 .req_type =
1079 MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA;
1080 else
1081 cfg_btch->cfg_data[cfg_btch->num_cfg_data]
1082 .req_type =
1083 MGMTD__CFG_DATA_REQ_TYPE__SET_DATA;
1084
1085 mgmt_yang_data_init(
1086 &cfg_btch->data[cfg_btch->num_cfg_data]);
1087 cfg_btch->cfg_data[cfg_btch->num_cfg_data].data =
1088 &cfg_btch->data[cfg_btch->num_cfg_data];
1089 cfg_btch->data[cfg_btch->num_cfg_data].xpath = xpath;
1090 xpath = NULL;
1091
1092 mgmt_yang_data_value_init(
1093 &cfg_btch->value[cfg_btch->num_cfg_data]);
1094 cfg_btch->data[cfg_btch->num_cfg_data].value =
1095 &cfg_btch->value[cfg_btch->num_cfg_data];
1096 cfg_btch->value[cfg_btch->num_cfg_data].value_case =
1097 MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
1098 cfg_btch->value[cfg_btch->num_cfg_data]
1099 .encoded_str_val = value;
1100 value = NULL;
1101
1102 if (subscr_info.xpath_subscr[id].validate_config)
1103 found_validator = true;
1104
1105 cmtcfg_req->subscr_info.xpath_subscr[id].subscribed |=
1106 subscr_info.xpath_subscr[id].subscribed;
1107 MGMTD_TXN_DBG(
1108 " -- %s, {V:%d, N:%d}, Batch: %p, Item:%d",
1109 adapter->name,
1110 subscr_info.xpath_subscr[id].validate_config,
1111 subscr_info.xpath_subscr[id].notify_config,
1112 cfg_btch, (int)cfg_btch->num_cfg_data);
1113
1114 cfg_btch->num_cfg_data++;
1115 num_chgs++;
1116 }
1117
1118 if (!found_validator) {
1119 snprintf(err_buf, sizeof(err_buf),
1120 "No validator module found for XPATH: '%s",
1121 xpath);
1122 MGMTD_TXN_ERR("***** %s", err_buf);
1123 }
1124 }
1125
1126 cmtcfg_req->cmt_stats->last_batch_cnt = num_chgs;
1127 if (!num_chgs) {
1128 (void)mgmt_txn_send_commit_cfg_reply(
1129 txn_req->txn, MGMTD_NO_CFG_CHANGES,
1130 "No changes found to commit!");
1131 goto mgmt_txn_create_config_batches_failed;
1132 }
1133
1134 cmtcfg_req->next_phase = MGMTD_COMMIT_PHASE_TXN_CREATE;
1135 return 0;
1136
1137mgmt_txn_create_config_batches_failed:
1138
1139 if (xpath)
1140 free(xpath);
1141
1142 return -1;
1143}
1144
1145static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
1146{
1147 struct nb_context nb_ctx;
1148 struct nb_config *nb_config;
1149 struct nb_config_cbs changes;
1150 struct nb_config_cbs *cfg_chgs = NULL;
1151 int ret;
1152 bool del_cfg_chgs = false;
1153
1154 ret = 0;
1155 memset(&nb_ctx, 0, sizeof(nb_ctx));
1156 memset(&changes, 0, sizeof(changes));
1157 if (txn->commit_cfg_req->req.commit_cfg.cfg_chgs) {
1158 cfg_chgs = txn->commit_cfg_req->req.commit_cfg.cfg_chgs;
1159 del_cfg_chgs = true;
1160 goto mgmt_txn_prep_config_validation_done;
1161 }
1162
1163 if (txn->commit_cfg_req->req.commit_cfg.src_ds_id
1164 != MGMTD_DS_CANDIDATE) {
1165 (void)mgmt_txn_send_commit_cfg_reply(
1166 txn, MGMTD_INVALID_PARAM,
1167 "Source DS cannot be any other than CANDIDATE!");
1168 ret = -1;
1169 goto mgmt_txn_prepare_config_done;
1170 }
1171
1172 if (txn->commit_cfg_req->req.commit_cfg.dst_ds_id
1173 != MGMTD_DS_RUNNING) {
1174 (void)mgmt_txn_send_commit_cfg_reply(
1175 txn, MGMTD_INVALID_PARAM,
1176 "Destination DS cannot be any other than RUNNING!");
1177 ret = -1;
1178 goto mgmt_txn_prepare_config_done;
1179 }
1180
1181 if (!txn->commit_cfg_req->req.commit_cfg.src_ds_ctx) {
1182 (void)mgmt_txn_send_commit_cfg_reply(
1183 txn, MGMTD_INVALID_PARAM, "No such source datastore!");
1184 ret = -1;
1185 goto mgmt_txn_prepare_config_done;
1186 }
1187
1188 if (!txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx) {
1189 (void)mgmt_txn_send_commit_cfg_reply(
1190 txn, MGMTD_INVALID_PARAM,
1191 "No such destination datastore!");
1192 ret = -1;
1193 goto mgmt_txn_prepare_config_done;
1194 }
1195
1196 if (txn->commit_cfg_req->req.commit_cfg.abort) {
1197 /*
1198 * This is a commit abort request. Return back success.
1199 * That should trigger a restore of Candidate datastore to
1200 * Running.
1201 */
1202 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
1203 NULL);
1204 goto mgmt_txn_prepare_config_done;
1205 }
1206
1207 nb_config = mgmt_ds_get_nb_config(
1208 txn->commit_cfg_req->req.commit_cfg.src_ds_ctx);
1209 if (!nb_config) {
1210 (void)mgmt_txn_send_commit_cfg_reply(
1211 txn, MGMTD_INTERNAL_ERROR,
1212 "Unable to retrieve Commit DS Config Tree!");
1213 ret = -1;
1214 goto mgmt_txn_prepare_config_done;
1215 }
1216
1217 /*
1218 * Check for diffs from scratch buffer. If found empty
1219 * get the diff from Candidate DS itself.
1220 */
1221 cfg_chgs = &nb_config->cfg_chgs;
1222 if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
1223 /*
1224 * This could be the case when the config is directly
1225 * loaded onto the candidate DS from a file. Get the
1226 * diff from a full comparison of the candidate and
1227 * running DSs.
1228 */
1229 nb_config_diff(
1230 mgmt_ds_get_nb_config(txn->commit_cfg_req->req
1231 .commit_cfg.dst_ds_ctx),
1232 nb_config, &changes);
1233 cfg_chgs = &changes;
1234 del_cfg_chgs = true;
1235 }
1236
1237 if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
1238 /*
1239 * This means there's no changes to commit whatsoever
1240 * is the source of the changes in config.
1241 */
1242 (void)mgmt_txn_send_commit_cfg_reply(
1243 txn, MGMTD_NO_CFG_CHANGES,
1244 "No changes found to be committed!");
1245 ret = -1;
1246 goto mgmt_txn_prepare_config_done;
1247 }
1248
1249#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
1250 if (mm->perf_stats_en)
1251 gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
1252 ->validate_start,
1253 NULL);
1254 /*
1255 * Validate YANG contents of the source DS and get the diff
1256 * between source and destination DS contents.
1257 */
1258 char err_buf[1024] = {0};
1259 nb_ctx.client = NB_CLIENT_MGMTD_SERVER;
1260 nb_ctx.user = (void *)txn;
1261 ret = nb_candidate_validate_yang(nb_config, false, err_buf,
1262 sizeof(err_buf) - 1);
1263 if (ret != NB_OK) {
1264 if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
1265 strlcpy(err_buf, "Validation failed", sizeof(err_buf));
1266 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
1267 err_buf);
1268 ret = -1;
1269 goto mgmt_txn_prepare_config_done;
1270 }
1271 /*
1272 * Perform application level validations locally on the MGMTD
1273 * process by calling application specific validation routines
1274 * loaded onto MGMTD process using libraries.
1275 */
1276 ret = nb_candidate_validate_code(&nb_ctx, nb_config, &changes, err_buf,
1277 sizeof(err_buf) - 1);
1278 if (ret != NB_OK) {
1279 if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
1280 strlcpy(err_buf, "Validation failed", sizeof(err_buf));
1281 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
1282 err_buf);
1283 ret = -1;
1284 goto mgmt_txn_prepare_config_done;
1285 }
1286
1287 if (txn->commit_cfg_req->req.commit_cfg.validate_only) {
1288 /*
1289 * This was a validate-only COMMIT request return success.
1290 */
1291 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
1292 NULL);
1293 goto mgmt_txn_prepare_config_done;
1294 }
1295#endif /* ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED */
1296
1297mgmt_txn_prep_config_validation_done:
1298
1299 if (mm->perf_stats_en)
1300 gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
1301 ->prep_cfg_start,
1302 NULL);
1303
1304 /*
1305 * Iterate over the diffs and create ordered batches of config
1306 * commands to be validated.
1307 */
1308 ret = mgmt_txn_create_config_batches(txn->commit_cfg_req, cfg_chgs);
1309 if (ret != 0) {
1310 ret = -1;
1311 goto mgmt_txn_prepare_config_done;
1312 }
1313
1314 /* Move to the Transaction Create Phase */
1315 txn->commit_cfg_req->req.commit_cfg.curr_phase =
1316 MGMTD_COMMIT_PHASE_TXN_CREATE;
1317 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
1318
1319 /*
1320 * Start the COMMIT Timeout Timer to abort Txn if things get stuck at
1321 * backend.
1322 */
1323 mgmt_txn_register_event(txn, MGMTD_TXN_COMMITCFG_TIMEOUT);
1324mgmt_txn_prepare_config_done:
1325
1326 if (cfg_chgs && del_cfg_chgs)
1327 nb_config_diff_del_changes(cfg_chgs);
1328
1329 return ret;
1330}
1331
1332static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn)
1333{
1334 enum mgmt_be_client_id id;
1335 struct mgmt_be_client_adapter *adapter;
1336 struct mgmt_commit_cfg_req *cmtcfg_req;
1337 struct mgmt_txn_be_cfg_batch *cfg_btch;
1338
1339 assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
1340
1341 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1342 FOREACH_MGMTD_BE_CLIENT_ID (id) {
1343 if (cmtcfg_req->subscr_info.xpath_subscr[id].subscribed) {
1344 adapter = mgmt_be_get_adapter_by_id(id);
1345 if (mgmt_be_create_txn(adapter, txn->txn_id)
1346 != 0) {
1347 (void)mgmt_txn_send_commit_cfg_reply(
1348 txn, MGMTD_INTERNAL_ERROR,
1349 "Could not send TXN_CREATE to backend adapter");
1350 return -1;
1351 }
1352
1353 FOREACH_TXN_CFG_BATCH_IN_LIST (
1354 &txn->commit_cfg_req->req.commit_cfg
1355 .curr_batches[id],
1356 cfg_btch)
1357 cfg_btch->comm_phase =
1358 MGMTD_COMMIT_PHASE_TXN_CREATE;
1359 }
1360 }
1361
1362 txn->commit_cfg_req->req.commit_cfg.next_phase =
1363 MGMTD_COMMIT_PHASE_SEND_CFG;
1364
1365 /*
1366 * Dont move the commit to next phase yet. Wait for the TXN_REPLY to
1367 * come back.
1368 */
1369
1370 MGMTD_TXN_DBG(
1371 "Txn:%p Session:0x%llx, Phase(Current:'%s', Next: '%s')", txn,
1372 (unsigned long long)txn->session_id,
1373 mgmt_txn_commit_phase_str(txn, true),
1374 mgmt_txn_commit_phase_str(txn, false));
1375
1376 return 0;
1377}
1378
1379static int
1380mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
1381 struct mgmt_be_client_adapter *adapter)
1382{
1383 struct mgmt_commit_cfg_req *cmtcfg_req;
1384 struct mgmt_txn_be_cfg_batch *cfg_btch;
1385 struct mgmt_be_cfgreq cfg_req = {0};
1386 size_t num_batches, indx;
1387
1388 assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
1389
1390 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1391 assert(cmtcfg_req->subscr_info.xpath_subscr[adapter->id].subscribed);
1392
1393 indx = 0;
1394 num_batches =
1395 mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id]);
1396 FOREACH_TXN_CFG_BATCH_IN_LIST (&cmtcfg_req->curr_batches[adapter->id],
1397 cfg_btch) {
1398 assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_SEND_CFG);
1399
1400 cfg_req.cfgdata_reqs = cfg_btch->cfg_datap;
1401 cfg_req.num_reqs = cfg_btch->num_cfg_data;
1402 indx++;
1403 if (mgmt_be_send_cfg_data_create_req(
1404 adapter, txn->txn_id, cfg_btch->batch_id, &cfg_req,
1405 indx == num_batches ? true : false)
1406 != 0) {
1407 (void)mgmt_txn_send_commit_cfg_reply(
1408 txn, MGMTD_INTERNAL_ERROR,
1409 "Internal Error! Could not send config data to backend!");
1410 MGMTD_TXN_ERR(
1411 "Could not send CFGDATA_CREATE for Txn %p Batch %p to client '%s",
1412 txn, cfg_btch, adapter->name);
1413 return -1;
1414 }
1415
1416 cmtcfg_req->cmt_stats->last_num_cfgdata_reqs++;
1417 mgmt_move_txn_cfg_batch_to_next(
1418 cmtcfg_req, cfg_btch,
1419 &cmtcfg_req->curr_batches[adapter->id],
1420 &cmtcfg_req->next_batches[adapter->id], true,
1421 MGMTD_COMMIT_PHASE_SEND_CFG);
1422 }
1423
1424 /*
1425 * This could ne the last Backend Client to send CFGDATA_CREATE_REQ to.
1426 * Try moving the commit to next phase.
1427 */
1428 mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
1429
1430 return 0;
1431}
1432
1433static int
1434mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
1435 struct mgmt_be_client_adapter *adapter)
1436{
1437 struct mgmt_commit_cfg_req *cmtcfg_req;
1438 struct mgmt_txn_be_cfg_batch *cfg_btch;
1439
1440 assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
1441
1442 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1443 if (cmtcfg_req->subscr_info.xpath_subscr[adapter->id].subscribed) {
1444 adapter = mgmt_be_get_adapter_by_id(adapter->id);
1445 (void)mgmt_be_destroy_txn(adapter, txn->txn_id);
1446
1447 FOREACH_TXN_CFG_BATCH_IN_LIST (
1448 &txn->commit_cfg_req->req.commit_cfg
1449 .curr_batches[adapter->id],
1450 cfg_btch)
1451 cfg_btch->comm_phase = MGMTD_COMMIT_PHASE_TXN_DELETE;
1452 }
1453
1454 return 0;
1455}
1456
e6685141 1457static void mgmt_txn_cfg_commit_timedout(struct event *thread)
74335ceb
YR
1458{
1459 struct mgmt_txn_ctx *txn;
1460
1461 txn = (struct mgmt_txn_ctx *)THREAD_ARG(thread);
1462 assert(txn);
1463
1464 assert(txn->type == MGMTD_TXN_TYPE_CONFIG);
1465
1466 if (!txn->commit_cfg_req)
1467 return;
1468
1469 MGMTD_TXN_ERR(
1470 "Backend operations for Config Txn %p has timedout! Aborting commit!!",
1471 txn);
1472
1473 /*
1474 * Send a COMMIT_CONFIG_REPLY with failure.
1475 * NOTE: The transaction cleanup will be triggered from Front-end
1476 * adapter.
1477 */
1478 mgmt_txn_send_commit_cfg_reply(
1479 txn, MGMTD_INTERNAL_ERROR,
1480 "Operation on the backend timed-out. Aborting commit!");
1481}
1482
1483/*
1484 * Send CFG_APPLY_REQs to all the backend client.
1485 *
1486 * NOTE: This is always dispatched when all CFGDATA_CREATE_REQs
1487 * for all backend clients has been generated. Please see
1488 * mgmt_txn_register_event() and mgmt_txn_process_commit_cfg()
1489 * for details.
1490 */
1491static int mgmt_txn_send_be_cfg_apply(struct mgmt_txn_ctx *txn)
1492{
1493 enum mgmt_be_client_id id;
1494 struct mgmt_be_client_adapter *adapter;
1495 struct mgmt_commit_cfg_req *cmtcfg_req;
1496 struct mgmt_txn_batches_head *btch_list;
1497 struct mgmt_txn_be_cfg_batch *cfg_btch;
1498
1499 assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
1500
1501 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1502 if (cmtcfg_req->validate_only) {
1503 /*
1504 * If this was a validate-only COMMIT request return success.
1505 */
1506 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
1507 NULL);
1508 return 0;
1509 }
1510
1511 FOREACH_MGMTD_BE_CLIENT_ID (id) {
1512 if (cmtcfg_req->subscr_info.xpath_subscr[id].notify_config) {
1513 adapter = mgmt_be_get_adapter_by_id(id);
1514 if (!adapter)
1515 return -1;
1516
1517 btch_list = &cmtcfg_req->curr_batches[id];
1518 if (mgmt_be_send_cfg_apply_req(adapter, txn->txn_id)
1519 != 0) {
1520 (void)mgmt_txn_send_commit_cfg_reply(
1521 txn, MGMTD_INTERNAL_ERROR,
1522 "Could not send CFG_APPLY_REQ to backend adapter");
1523 return -1;
1524 }
1525 cmtcfg_req->cmt_stats->last_num_apply_reqs++;
1526
1527 UNSET_FLAG(adapter->flags,
1528 MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
1529
1530 FOREACH_TXN_CFG_BATCH_IN_LIST (btch_list, cfg_btch)
1531 cfg_btch->comm_phase =
1532 MGMTD_COMMIT_PHASE_APPLY_CFG;
1533 }
1534 }
1535
1536 txn->commit_cfg_req->req.commit_cfg.next_phase =
1537 MGMTD_COMMIT_PHASE_TXN_DELETE;
1538
1539 /*
1540 * Dont move the commit to next phase yet. Wait for all VALIDATE_REPLIES
1541 * to come back.
1542 */
1543
1544 return 0;
1545}
1546
e6685141 1547static void mgmt_txn_process_commit_cfg(struct event *thread)
74335ceb
YR
1548{
1549 struct mgmt_txn_ctx *txn;
1550 struct mgmt_commit_cfg_req *cmtcfg_req;
1551
1552 txn = (struct mgmt_txn_ctx *)THREAD_ARG(thread);
1553 assert(txn);
1554
1555 MGMTD_TXN_DBG(
1556 "Processing COMMIT_CONFIG for Txn:%p Session:0x%llx, Phase(Current:'%s', Next: '%s')",
1557 txn, (unsigned long long)txn->session_id,
1558 mgmt_txn_commit_phase_str(txn, true),
1559 mgmt_txn_commit_phase_str(txn, false));
1560
1561 assert(txn->commit_cfg_req);
1562 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1563 switch (cmtcfg_req->curr_phase) {
1564 case MGMTD_COMMIT_PHASE_PREPARE_CFG:
1565 mgmt_txn_prepare_config(txn);
1566 break;
1567 case MGMTD_COMMIT_PHASE_TXN_CREATE:
1568 if (mm->perf_stats_en)
1569 gettimeofday(&cmtcfg_req->cmt_stats->txn_create_start,
1570 NULL);
1571 /*
1572 * Send TXN_CREATE_REQ to all Backend now.
1573 */
1574 mgmt_txn_send_be_txn_create(txn);
1575 break;
1576 case MGMTD_COMMIT_PHASE_SEND_CFG:
1577 if (mm->perf_stats_en)
1578 gettimeofday(&cmtcfg_req->cmt_stats->send_cfg_start,
1579 NULL);
1580 /*
1581 * All CFGDATA_CREATE_REQ should have been sent to
1582 * Backend by now.
1583 */
1584#ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED
1585 assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
1586 MGMTD_TXN_DBG(
1587 "Txn:%p Session:0x%llx, trigger sending CFG_VALIDATE_REQ to all backend clients",
1588 txn, (unsigned long long)txn->session_id);
1589#else /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
1590 assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
1591 MGMTD_TXN_DBG(
1592 "Txn:%p Session:0x%llx, trigger sending CFG_APPLY_REQ to all backend clients",
1593 txn, (unsigned long long)txn->session_id);
1594#endif /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
1595 break;
1596 case MGMTD_COMMIT_PHASE_APPLY_CFG:
1597 if (mm->perf_stats_en)
1598 gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_start,
1599 NULL);
1600 /*
1601 * We should have received successful CFG_VALIDATE_REPLY from
1602 * all concerned Backend Clients by now. Send out the
1603 * CFG_APPLY_REQs now.
1604 */
1605 mgmt_txn_send_be_cfg_apply(txn);
1606 break;
1607 case MGMTD_COMMIT_PHASE_TXN_DELETE:
1608 if (mm->perf_stats_en)
1609 gettimeofday(&cmtcfg_req->cmt_stats->txn_del_start,
1610 NULL);
1611 /*
1612 * We would have sent TXN_DELETE_REQ to all backend by now.
1613 * Send a successful CONFIG_COMMIT_REPLY back to front-end.
1614 * NOTE: This should also trigger DS merge/unlock and Txn
1615 * cleanup. Please see mgmt_fe_send_commit_cfg_reply() for
1616 * more details.
1617 */
1618 THREAD_OFF(txn->comm_cfg_timeout);
1619 mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
1620 break;
1621 case MGMTD_COMMIT_PHASE_MAX:
1622 break;
1623 }
1624
1625 MGMTD_TXN_DBG(
1626 "Txn:%p Session:0x%llx, Phase updated to (Current:'%s', Next: '%s')",
1627 txn, (unsigned long long)txn->session_id,
1628 mgmt_txn_commit_phase_str(txn, true),
1629 mgmt_txn_commit_phase_str(txn, false));
1630}
1631
1632static void mgmt_init_get_data_reply(struct mgmt_get_data_reply *get_reply)
1633{
1634 size_t indx;
1635
1636 for (indx = 0; indx < array_size(get_reply->reply_data); indx++)
1637 get_reply->reply_datap[indx] = &get_reply->reply_data[indx];
1638}
1639
1640static void mgmt_reset_get_data_reply(struct mgmt_get_data_reply *get_reply)
1641{
1642 int indx;
1643
1644 for (indx = 0; indx < get_reply->num_reply; indx++) {
1645 if (get_reply->reply_xpathp[indx]) {
1646 free(get_reply->reply_xpathp[indx]);
1647 get_reply->reply_xpathp[indx] = 0;
1648 }
1649 if (get_reply->reply_data[indx].xpath) {
1650 zlog_debug("%s free xpath %p", __func__,
1651 get_reply->reply_data[indx].xpath);
1652 free(get_reply->reply_data[indx].xpath);
1653 get_reply->reply_data[indx].xpath = 0;
1654 }
1655 }
1656
1657 get_reply->num_reply = 0;
1658 memset(&get_reply->data_reply, 0, sizeof(get_reply->data_reply));
1659 memset(&get_reply->reply_data, 0, sizeof(get_reply->reply_data));
1660 memset(&get_reply->reply_datap, 0, sizeof(get_reply->reply_datap));
1661
1662 memset(&get_reply->reply_value, 0, sizeof(get_reply->reply_value));
1663
1664 mgmt_init_get_data_reply(get_reply);
1665}
1666
1667static void mgmt_reset_get_data_reply_buf(struct mgmt_get_data_req *get_data)
1668{
1669 if (get_data->reply)
1670 mgmt_reset_get_data_reply(get_data->reply);
1671}
1672
1673static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
1674 struct mgmt_get_data_req *get_req)
1675{
1676 struct mgmt_get_data_reply *get_reply;
1677 Mgmtd__YangDataReply *data_reply;
1678
1679 get_reply = get_req->reply;
1680 if (!get_reply)
1681 return;
1682
1683 data_reply = &get_reply->data_reply;
1684 mgmt_yang_data_reply_init(data_reply);
1685 data_reply->n_data = get_reply->num_reply;
1686 data_reply->data = get_reply->reply_datap;
1687 data_reply->next_indx =
1688 (!get_reply->last_batch ? get_req->total_reply : -1);
1689
1690 MGMTD_TXN_DBG("Sending %d Get-Config/Data replies (next-idx:%lld)",
1691 (int) data_reply->n_data,
1692 (long long)data_reply->next_indx);
1693
1694 switch (txn_req->req_event) {
1695 case MGMTD_TXN_PROC_GETCFG:
1696 if (mgmt_fe_send_get_cfg_reply(
1697 txn_req->txn->session_id, txn_req->txn->txn_id,
1698 get_req->ds_id, txn_req->req_id, MGMTD_SUCCESS,
1699 data_reply, NULL)
1700 != 0) {
1701 MGMTD_TXN_ERR(
1702 "Failed to send GET-CONFIG-REPLY for Txn %p, Sessn: 0x%llx, Req: %llu",
1703 txn_req->txn,
1704 (unsigned long long)txn_req->txn->session_id,
1705 (unsigned long long)txn_req->req_id);
1706 }
1707 break;
1708 case MGMTD_TXN_PROC_GETDATA:
1709 if (mgmt_fe_send_get_data_reply(
1710 txn_req->txn->session_id, txn_req->txn->txn_id,
1711 get_req->ds_id, txn_req->req_id, MGMTD_SUCCESS,
1712 data_reply, NULL)
1713 != 0) {
1714 MGMTD_TXN_ERR(
1715 "Failed to send GET-DATA-REPLY for Txn %p, Sessn: 0x%llx, Req: %llu",
1716 txn_req->txn,
1717 (unsigned long long)txn_req->txn->session_id,
1718 (unsigned long long)txn_req->req_id);
1719 }
1720 break;
1721 case MGMTD_TXN_PROC_SETCFG:
1722 case MGMTD_TXN_PROC_COMMITCFG:
1723 case MGMTD_TXN_COMMITCFG_TIMEOUT:
1724 case MGMTD_TXN_CLEANUP:
1725 MGMTD_TXN_ERR("Invalid Txn-Req-Event %u",
1726 txn_req->req_event);
1727 break;
1728 }
1729
1730 /*
1731 * Reset reply buffer for next reply.
1732 */
1733 mgmt_reset_get_data_reply_buf(get_req);
1734}
1735
1736static void mgmt_txn_iter_and_send_get_cfg_reply(struct mgmt_ds_ctx *ds_ctx,
1737 char *xpath,
1738 struct lyd_node *node,
1739 struct nb_node *nb_node,
1740 void *ctx)
1741{
1742 struct mgmt_txn_req *txn_req;
1743 struct mgmt_get_data_req *get_req;
1744 struct mgmt_get_data_reply *get_reply;
1745 Mgmtd__YangData *data;
1746 Mgmtd__YangDataValue *data_value;
1747
1748 txn_req = (struct mgmt_txn_req *)ctx;
1749 if (!txn_req)
1750 goto mgmtd_ignore_get_cfg_reply_data;
1751
1752 if (!(node->schema->nodetype & LYD_NODE_TERM))
1753 goto mgmtd_ignore_get_cfg_reply_data;
1754
1755 assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG
1756 || txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
1757
1758 get_req = txn_req->req.get_data;
1759 assert(get_req);
1760 get_reply = get_req->reply;
1761 data = &get_reply->reply_data[get_reply->num_reply];
1762 data_value = &get_reply->reply_value[get_reply->num_reply];
1763
1764 mgmt_yang_data_init(data);
1765 data->xpath = xpath;
1766 mgmt_yang_data_value_init(data_value);
1767 data_value->value_case = MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
1768 data_value->encoded_str_val = (char *)lyd_get_value(node);
1769 data->value = data_value;
1770
1771 get_reply->num_reply++;
1772 get_req->total_reply++;
1773 MGMTD_TXN_DBG(" [%d] XPATH: '%s', Value: '%s'", get_req->total_reply,
1774 data->xpath, data_value->encoded_str_val);
1775
1776 if (get_reply->num_reply == MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH)
1777 mgmt_txn_send_getcfg_reply_data(txn_req, get_req);
1778
1779 return;
1780
1781mgmtd_ignore_get_cfg_reply_data:
1782 if (xpath)
1783 free(xpath);
1784}
1785
1786static int mgmt_txn_get_config(struct mgmt_txn_ctx *txn,
1787 struct mgmt_txn_req *txn_req,
1788 struct mgmt_ds_ctx *ds_ctx)
1789{
1790 struct mgmt_txn_reqs_head *req_list = NULL;
1791 struct mgmt_txn_reqs_head *pending_list = NULL;
1792 int indx;
1793 struct mgmt_get_data_req *get_data;
1794 struct mgmt_get_data_reply *get_reply;
1795
1796 switch (txn_req->req_event) {
1797 case MGMTD_TXN_PROC_GETCFG:
1798 req_list = &txn->get_cfg_reqs;
1799 break;
1800 case MGMTD_TXN_PROC_GETDATA:
1801 req_list = &txn->get_data_reqs;
1802 break;
1803 case MGMTD_TXN_PROC_SETCFG:
1804 case MGMTD_TXN_PROC_COMMITCFG:
1805 case MGMTD_TXN_COMMITCFG_TIMEOUT:
1806 case MGMTD_TXN_CLEANUP:
1807 assert(!"Wrong txn request type!");
1808 break;
1809 }
1810
1811 get_data = txn_req->req.get_data;
1812
1813 if (!get_data->reply) {
1814 get_data->reply = XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REPLY,
1815 sizeof(struct mgmt_get_data_reply));
1816 if (!get_data->reply) {
1817 mgmt_fe_send_get_cfg_reply(
1818 txn->session_id, txn->txn_id,
1819 get_data->ds_id, txn_req->req_id,
1820 MGMTD_INTERNAL_ERROR, NULL,
1821 "Internal error: Unable to allocate reply buffers!");
1822 goto mgmt_txn_get_config_failed;
1823 }
1824 }
1825
1826 /*
1827 * Read data contents from the DS and respond back directly.
1828 * No need to go to backend for getting data.
1829 */
1830 get_reply = get_data->reply;
1831 for (indx = 0; indx < get_data->num_xpaths; indx++) {
1832 MGMTD_TXN_DBG("Trying to get all data under '%s'",
1833 get_data->xpaths[indx]);
1834 mgmt_init_get_data_reply(get_reply);
1835 if (mgmt_ds_iter_data(get_data->ds_ctx, get_data->xpaths[indx],
1836 mgmt_txn_iter_and_send_get_cfg_reply,
1837 (void *)txn_req, true)
1838 == -1) {
1839 MGMTD_TXN_DBG("Invalid Xpath '%s",
1840 get_data->xpaths[indx]);
1841 mgmt_fe_send_get_cfg_reply(
1842 txn->session_id, txn->txn_id,
1843 get_data->ds_id, txn_req->req_id,
1844 MGMTD_INTERNAL_ERROR, NULL, "Invalid xpath");
1845 goto mgmt_txn_get_config_failed;
1846 }
1847 MGMTD_TXN_DBG("Got %d remaining data-replies for xpath '%s'",
1848 get_reply->num_reply, get_data->xpaths[indx]);
1849 get_reply->last_batch = true;
1850 mgmt_txn_send_getcfg_reply_data(txn_req, get_data);
1851 }
1852
1853mgmt_txn_get_config_failed:
1854
1855 if (pending_list) {
1856 /*
1857 * Move the transaction to corresponding pending list.
1858 */
1859 if (req_list)
1860 mgmt_txn_reqs_del(req_list, txn_req);
1861 txn_req->pending_be_proc = true;
1862 mgmt_txn_reqs_add_tail(pending_list, txn_req);
1863 MGMTD_TXN_DBG(
1864 "Moved Req: %p for Txn: %p from Req-List to Pending-List",
1865 txn_req, txn_req->txn);
1866 } else {
1867 /*
1868 * Delete the txn request. It will also remove it from request
1869 * list.
1870 */
1871 mgmt_txn_req_free(&txn_req);
1872 }
1873
1874 return 0;
1875}
1876
e6685141 1877static void mgmt_txn_process_get_cfg(struct event *thread)
74335ceb
YR
1878{
1879 struct mgmt_txn_ctx *txn;
1880 struct mgmt_txn_req *txn_req;
1881 struct mgmt_ds_ctx *ds_ctx;
1882 int num_processed = 0;
1883 bool error;
1884
1885 txn = (struct mgmt_txn_ctx *)THREAD_ARG(thread);
1886 assert(txn);
1887
1888 MGMTD_TXN_DBG(
1889 "Processing %d GET_CONFIG requests for Txn:%p Session:0x%llx",
1890 (int)mgmt_txn_reqs_count(&txn->get_cfg_reqs), txn,
1891 (unsigned long long)txn->session_id);
1892
1893 FOREACH_TXN_REQ_IN_LIST (&txn->get_cfg_reqs, txn_req) {
1894 error = false;
1895 assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG);
1896 ds_ctx = txn_req->req.get_data->ds_ctx;
1897 if (!ds_ctx) {
1898 mgmt_fe_send_get_cfg_reply(
1899 txn->session_id, txn->txn_id,
1900 txn_req->req.get_data->ds_id, txn_req->req_id,
1901 MGMTD_INTERNAL_ERROR, NULL,
1902 "No such datastore!");
1903 error = true;
1904 goto mgmt_txn_process_get_cfg_done;
1905 }
1906
1907 if (mgmt_txn_get_config(txn, txn_req, ds_ctx) != 0) {
1908 MGMTD_TXN_ERR(
1909 "Unable to retrieve Config from DS %d for Txn %p, Sessn: 0x%llx, Req: %llu!",
1910 txn_req->req.get_data->ds_id, txn,
1911 (unsigned long long)txn->session_id,
1912 (unsigned long long)txn_req->req_id);
1913 error = true;
1914 }
1915
1916 mgmt_txn_process_get_cfg_done:
1917
1918 if (error) {
1919 /*
1920 * Delete the txn request.
1921 * Note: The following will remove it from the list
1922 * as well.
1923 */
1924 mgmt_txn_req_free(&txn_req);
1925 }
1926
1927 /*
1928 * Else the transaction would have been already deleted or
1929 * moved to corresponding pending list. No need to delete it.
1930 */
1931 num_processed++;
1932 if (num_processed == MGMTD_TXN_MAX_NUM_GETCFG_PROC)
1933 break;
1934 }
1935
1936 if (mgmt_txn_reqs_count(&txn->get_cfg_reqs)) {
1937 MGMTD_TXN_DBG(
1938 "Processed maximum number of Get-Config requests (%d/%d). Rescheduling for rest.",
1939 num_processed, MGMTD_TXN_MAX_NUM_GETCFG_PROC);
1940 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
1941 }
1942}
1943
e6685141 1944static void mgmt_txn_process_get_data(struct event *thread)
74335ceb
YR
1945{
1946 struct mgmt_txn_ctx *txn;
1947 struct mgmt_txn_req *txn_req;
1948 struct mgmt_ds_ctx *ds_ctx;
1949 int num_processed = 0;
1950 bool error;
1951
1952 txn = (struct mgmt_txn_ctx *)THREAD_ARG(thread);
1953 assert(txn);
1954
1955 MGMTD_TXN_DBG(
1956 "Processing %d GET_DATA requests for Txn:%p Session:0x%llx",
1957 (int)mgmt_txn_reqs_count(&txn->get_data_reqs), txn,
1958 (unsigned long long)txn->session_id);
1959
1960 FOREACH_TXN_REQ_IN_LIST (&txn->get_data_reqs, txn_req) {
1961 error = false;
1962 assert(txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
1963 ds_ctx = txn_req->req.get_data->ds_ctx;
1964 if (!ds_ctx) {
1965 mgmt_fe_send_get_data_reply(
1966 txn->session_id, txn->txn_id,
1967 txn_req->req.get_data->ds_id, txn_req->req_id,
1968 MGMTD_INTERNAL_ERROR, NULL,
1969 "No such datastore!");
1970 error = true;
1971 goto mgmt_txn_process_get_data_done;
1972 }
1973
1974 if (mgmt_ds_is_config(ds_ctx)) {
1975 if (mgmt_txn_get_config(txn, txn_req, ds_ctx)
1976 != 0) {
1977 MGMTD_TXN_ERR(
1978 "Unable to retrieve Config from DS %d for Txn %p, Sessn: 0x%llx, Req: %llu!",
1979 txn_req->req.get_data->ds_id, txn,
1980 (unsigned long long)txn->session_id,
1981 (unsigned long long)txn_req->req_id);
1982 error = true;
1983 }
1984 } else {
1985 /*
1986 * TODO: Trigger GET procedures for Backend
1987 * For now return back error.
1988 */
1989 mgmt_fe_send_get_data_reply(
1990 txn->session_id, txn->txn_id,
1991 txn_req->req.get_data->ds_id, txn_req->req_id,
1992 MGMTD_INTERNAL_ERROR, NULL,
1993 "GET-DATA on Oper DS is not supported yet!");
1994 error = true;
1995 }
1996
1997 mgmt_txn_process_get_data_done:
1998
1999 if (error) {
2000 /*
2001 * Delete the txn request.
2002 * Note: The following will remove it from the list
2003 * as well.
2004 */
2005 mgmt_txn_req_free(&txn_req);
2006 }
2007
2008 /*
2009 * Else the transaction would have been already deleted or
2010 * moved to corresponding pending list. No need to delete it.
2011 */
2012 num_processed++;
2013 if (num_processed == MGMTD_TXN_MAX_NUM_GETDATA_PROC)
2014 break;
2015 }
2016
2017 if (mgmt_txn_reqs_count(&txn->get_data_reqs)) {
2018 MGMTD_TXN_DBG(
2019 "Processed maximum number of Get-Data requests (%d/%d). Rescheduling for rest.",
2020 num_processed, MGMTD_TXN_MAX_NUM_GETDATA_PROC);
2021 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETDATA);
2022 }
2023}
2024
2025static struct mgmt_txn_ctx *
2026mgmt_fe_find_txn_by_session_id(struct mgmt_master *cm, uint64_t session_id,
2027 enum mgmt_txn_type type)
2028{
2029 struct mgmt_txn_ctx *txn;
2030
2031 FOREACH_TXN_IN_LIST (cm, txn) {
2032 if (txn->session_id == session_id && txn->type == type)
2033 return txn;
2034 }
2035
2036 return NULL;
2037}
2038
2039static struct mgmt_txn_ctx *mgmt_txn_create_new(uint64_t session_id,
2040 enum mgmt_txn_type type)
2041{
2042 struct mgmt_txn_ctx *txn = NULL;
2043
2044 /*
2045 * For 'CONFIG' transaction check if one is already created
2046 * or not.
2047 */
2048 if (type == MGMTD_TXN_TYPE_CONFIG && mgmt_txn_mm->cfg_txn) {
2049 if (mgmt_config_txn_in_progress() == session_id)
2050 txn = mgmt_txn_mm->cfg_txn;
2051 goto mgmt_create_txn_done;
2052 }
2053
2054 txn = mgmt_fe_find_txn_by_session_id(mgmt_txn_mm, session_id,
2055 type);
2056 if (!txn) {
2057 txn = XCALLOC(MTYPE_MGMTD_TXN, sizeof(struct mgmt_txn_ctx));
2058 assert(txn);
2059
2060 txn->session_id = session_id;
2061 txn->type = type;
2062 mgmt_txn_badapters_init(&txn->be_adapters);
2063 mgmt_txns_add_tail(&mgmt_txn_mm->txn_list, txn);
2064 mgmt_txn_reqs_init(&txn->set_cfg_reqs);
2065 mgmt_txn_reqs_init(&txn->get_cfg_reqs);
2066 mgmt_txn_reqs_init(&txn->get_data_reqs);
2067 mgmt_txn_reqs_init(&txn->pending_get_datas);
2068 txn->commit_cfg_req = NULL;
2069 txn->refcount = 0;
2070 if (!mgmt_txn_mm->next_txn_id)
2071 mgmt_txn_mm->next_txn_id++;
2072 txn->txn_id = mgmt_txn_mm->next_txn_id++;
2073 hash_get(mgmt_txn_mm->txn_hash, txn, hash_alloc_intern);
2074
2075 MGMTD_TXN_DBG("Added new '%s' MGMTD Transaction '%p'",
2076 mgmt_txn_type2str(type), txn);
2077
2078 if (type == MGMTD_TXN_TYPE_CONFIG)
2079 mgmt_txn_mm->cfg_txn = txn;
2080
2081 MGMTD_TXN_LOCK(txn);
2082 }
2083
2084mgmt_create_txn_done:
2085 return txn;
2086}
2087
2088static void mgmt_txn_delete(struct mgmt_txn_ctx **txn)
2089{
2090 MGMTD_TXN_UNLOCK(txn);
2091}
2092
2093static unsigned int mgmt_txn_hash_key(const void *data)
2094{
2095 const struct mgmt_txn_ctx *txn = data;
2096
2097 return jhash2((uint32_t *) &txn->txn_id,
2098 sizeof(txn->txn_id) / sizeof(uint32_t), 0);
2099}
2100
2101static bool mgmt_txn_hash_cmp(const void *d1, const void *d2)
2102{
2103 const struct mgmt_txn_ctx *txn1 = d1;
2104 const struct mgmt_txn_ctx *txn2 = d2;
2105
2106 return (txn1->txn_id == txn2->txn_id);
2107}
2108
2109static void mgmt_txn_hash_free(void *data)
2110{
2111 struct mgmt_txn_ctx *txn = data;
2112
2113 mgmt_txn_delete(&txn);
2114}
2115
2116static void mgmt_txn_hash_init(void)
2117{
2118 if (!mgmt_txn_mm || mgmt_txn_mm->txn_hash)
2119 return;
2120
2121 mgmt_txn_mm->txn_hash = hash_create(mgmt_txn_hash_key,
2122 mgmt_txn_hash_cmp,
2123 "MGMT Transactions");
2124}
2125
2126static void mgmt_txn_hash_destroy(void)
2127{
2128 if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
2129 return;
2130
2131 hash_clean(mgmt_txn_mm->txn_hash,
2132 mgmt_txn_hash_free);
2133 hash_free(mgmt_txn_mm->txn_hash);
2134 mgmt_txn_mm->txn_hash = NULL;
2135}
2136
2137static inline struct mgmt_txn_ctx *
2138mgmt_txn_id2ctx(uint64_t txn_id)
2139{
2140 struct mgmt_txn_ctx key = {0};
2141 struct mgmt_txn_ctx *txn;
2142
2143 if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
2144 return NULL;
2145
2146 key.txn_id = txn_id;
2147 txn = hash_lookup(mgmt_txn_mm->txn_hash, &key);
2148
2149 return txn;
2150}
2151
2152static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file,
2153 int line)
2154{
2155 txn->refcount++;
2156 MGMTD_TXN_DBG("%s:%d --> Lock %s Txn %p, Count: %d", file, line,
2157 mgmt_txn_type2str(txn->type), txn, txn->refcount);
2158}
2159
2160static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
2161 int line)
2162{
2163 assert(*txn && (*txn)->refcount);
2164
2165 (*txn)->refcount--;
2166 MGMTD_TXN_DBG("%s:%d --> Unlock %s Txn %p, Count: %d", file, line,
2167 mgmt_txn_type2str((*txn)->type), *txn,
2168 (*txn)->refcount);
2169 if (!(*txn)->refcount) {
2170 if ((*txn)->type == MGMTD_TXN_TYPE_CONFIG)
2171 if (mgmt_txn_mm->cfg_txn == *txn)
2172 mgmt_txn_mm->cfg_txn = NULL;
2173 THREAD_OFF((*txn)->proc_get_cfg);
2174 THREAD_OFF((*txn)->proc_get_data);
2175 THREAD_OFF((*txn)->proc_comm_cfg);
2176 THREAD_OFF((*txn)->comm_cfg_timeout);
2177 hash_release(mgmt_txn_mm->txn_hash, *txn);
2178 mgmt_txns_del(&mgmt_txn_mm->txn_list, *txn);
2179
2180 MGMTD_TXN_DBG("Deleted %s Txn %p for Sessn: 0x%llx",
2181 mgmt_txn_type2str((*txn)->type), *txn,
2182 (unsigned long long)(*txn)->session_id);
2183
2184 XFREE(MTYPE_MGMTD_TXN, *txn);
2185 }
2186
2187 *txn = NULL;
2188}
2189
2190static void mgmt_txn_cleanup_txn(struct mgmt_txn_ctx **txn)
2191{
2192 /* TODO: Any other cleanup applicable */
2193
2194 mgmt_txn_delete(txn);
2195}
2196
2197static void
2198mgmt_txn_cleanup_all_txns(void)
2199{
2200 struct mgmt_txn_ctx *txn;
2201
2202 if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
2203 return;
2204
2205 FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn)
2206 mgmt_txn_cleanup_txn(&txn);
2207}
2208
e6685141 2209static void mgmt_txn_cleanup(struct event *thread)
74335ceb
YR
2210{
2211 struct mgmt_txn_ctx *txn;
2212
2213 txn = (struct mgmt_txn_ctx *)THREAD_ARG(thread);
2214 assert(txn);
2215
2216 mgmt_txn_cleanup_txn(&txn);
2217}
2218
2219static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
2220 enum mgmt_txn_event event)
2221{
2222 struct timeval tv = {.tv_sec = 0,
2223 .tv_usec = MGMTD_TXN_PROC_DELAY_USEC};
2224
2225 assert(mgmt_txn_mm && mgmt_txn_tm);
2226
2227 switch (event) {
2228 case MGMTD_TXN_PROC_SETCFG:
2229 thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_set_cfg,
2230 txn, &tv, &txn->proc_set_cfg);
2231 assert(txn->proc_set_cfg);
2232 break;
2233 case MGMTD_TXN_PROC_COMMITCFG:
2234 thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_commit_cfg,
2235 txn, &tv, &txn->proc_comm_cfg);
2236 assert(txn->proc_comm_cfg);
2237 break;
2238 case MGMTD_TXN_PROC_GETCFG:
2239 thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_cfg,
2240 txn, &tv, &txn->proc_get_cfg);
2241 assert(txn->proc_get_cfg);
2242 break;
2243 case MGMTD_TXN_PROC_GETDATA:
2244 thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_data,
2245 txn, &tv, &txn->proc_get_data);
2246 assert(txn->proc_get_data);
2247 break;
2248 case MGMTD_TXN_COMMITCFG_TIMEOUT:
2249 thread_add_timer_msec(mgmt_txn_tm,
2250 mgmt_txn_cfg_commit_timedout, txn,
2251 MGMTD_TXN_CFG_COMMIT_MAX_DELAY_MSEC,
2252 &txn->comm_cfg_timeout);
2253 assert(txn->comm_cfg_timeout);
2254 break;
2255 case MGMTD_TXN_CLEANUP:
2256 tv.tv_usec = MGMTD_TXN_CLEANUP_DELAY_USEC;
2257 thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_cleanup, txn, &tv,
2258 &txn->clnup);
2259 assert(txn->clnup);
2260 }
2261}
2262
2263int mgmt_txn_init(struct mgmt_master *mm, struct thread_master *tm)
2264{
2265 if (mgmt_txn_mm || mgmt_txn_tm)
2266 assert(!"MGMTD TXN: Call txn_init() only once");
2267
2268 mgmt_txn_mm = mm;
2269 mgmt_txn_tm = tm;
2270 mgmt_txns_init(&mm->txn_list);
2271 mgmt_txn_hash_init();
2272 assert(!mm->cfg_txn);
2273 mm->cfg_txn = NULL;
2274
2275 return 0;
2276}
2277
2278void mgmt_txn_destroy(void)
2279{
2280 mgmt_txn_cleanup_all_txns();
2281 mgmt_txn_hash_destroy();
2282}
2283
2284uint64_t mgmt_config_txn_in_progress(void)
2285{
2286 if (mgmt_txn_mm && mgmt_txn_mm->cfg_txn)
2287 return mgmt_txn_mm->cfg_txn->session_id;
2288
2289 return MGMTD_SESSION_ID_NONE;
2290}
2291
2292uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type)
2293{
2294 struct mgmt_txn_ctx *txn;
2295
2296 txn = mgmt_txn_create_new(session_id, type);
2297 return txn ? txn->txn_id : MGMTD_TXN_ID_NONE;
2298}
2299
2300bool mgmt_txn_id_is_valid(uint64_t txn_id)
2301{
2302 return mgmt_txn_id2ctx(txn_id) ? true : false;
2303}
2304
2305void mgmt_destroy_txn(uint64_t *txn_id)
2306{
2307 struct mgmt_txn_ctx *txn;
2308
2309 txn = mgmt_txn_id2ctx(*txn_id);
2310 if (!txn)
2311 return;
2312
2313 mgmt_txn_delete(&txn);
2314 *txn_id = MGMTD_TXN_ID_NONE;
2315}
2316
2317enum mgmt_txn_type mgmt_get_txn_type(uint64_t txn_id)
2318{
2319 struct mgmt_txn_ctx *txn;
2320
2321 txn = mgmt_txn_id2ctx(txn_id);
2322 if (!txn)
2323 return MGMTD_TXN_TYPE_NONE;
2324
2325 return txn->type;
2326}
2327
2328int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
2329 Mgmtd__DatastoreId ds_id,
2330 struct mgmt_ds_ctx *ds_ctx,
2331 Mgmtd__YangCfgDataReq **cfg_req,
2332 size_t num_req, bool implicit_commit,
2333 Mgmtd__DatastoreId dst_ds_id,
2334 struct mgmt_ds_ctx *dst_ds_ctx)
2335{
2336 struct mgmt_txn_ctx *txn;
2337 struct mgmt_txn_req *txn_req;
2338 size_t indx;
2339 uint16_t *num_chgs;
2340 struct nb_cfg_change *cfg_chg;
2341
2342 txn = mgmt_txn_id2ctx(txn_id);
2343 if (!txn)
2344 return -1;
2345
2346 if (implicit_commit && mgmt_txn_reqs_count(&txn->set_cfg_reqs)) {
2347 MGMTD_TXN_ERR(
2348 "For implicit commit config only one SETCFG-REQ can be allowed!");
2349 return -1;
2350 }
2351
2352 txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_SETCFG);
2353 txn_req->req.set_cfg->ds_id = ds_id;
2354 txn_req->req.set_cfg->ds_ctx = ds_ctx;
2355 num_chgs = &txn_req->req.set_cfg->num_cfg_changes;
2356 for (indx = 0; indx < num_req; indx++) {
2357 cfg_chg = &txn_req->req.set_cfg->cfg_changes[*num_chgs];
2358
2359 if (cfg_req[indx]->req_type
2360 == MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA)
2361 cfg_chg->operation = NB_OP_DESTROY;
2362 else if (cfg_req[indx]->req_type
2363 == MGMTD__CFG_DATA_REQ_TYPE__SET_DATA)
2364 cfg_chg->operation =
2365 mgmt_ds_find_data_node_by_xpath(
2366 ds_ctx, cfg_req[indx]->data->xpath)
2367 ? NB_OP_MODIFY
2368 : NB_OP_CREATE;
2369 else
2370 continue;
2371
2372 MGMTD_TXN_DBG(
2373 "XPath: '%s', Value: '%s'", cfg_req[indx]->data->xpath,
2374 (cfg_req[indx]->data->value
2375 && cfg_req[indx]
2376 ->data->value
2377 ->encoded_str_val
2378 ? cfg_req[indx]->data->value->encoded_str_val
2379 : "NULL"));
2380 strlcpy(cfg_chg->xpath, cfg_req[indx]->data->xpath,
2381 sizeof(cfg_chg->xpath));
2382 cfg_chg->value = (cfg_req[indx]->data->value
2383 && cfg_req[indx]
2384 ->data->value
2385 ->encoded_str_val
2386 ? strdup(cfg_req[indx]
2387 ->data->value
2388 ->encoded_str_val)
2389 : NULL);
2390 if (cfg_chg->value)
2391 MGMTD_TXN_DBG("Allocated value at %p ==> '%s'",
2392 cfg_chg->value, cfg_chg->value);
2393
2394 (*num_chgs)++;
2395 }
2396 txn_req->req.set_cfg->implicit_commit = implicit_commit;
2397 txn_req->req.set_cfg->dst_ds_id = dst_ds_id;
2398 txn_req->req.set_cfg->dst_ds_ctx = dst_ds_ctx;
2399 txn_req->req.set_cfg->setcfg_stats =
2400 mgmt_fe_get_session_setcfg_stats(txn->session_id);
2401 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
2402
2403 return 0;
2404}
2405
2406int mgmt_txn_send_commit_config_req(uint64_t txn_id, uint64_t req_id,
2407 Mgmtd__DatastoreId src_ds_id,
2408 struct mgmt_ds_ctx *src_ds_ctx,
2409 Mgmtd__DatastoreId dst_ds_id,
2410 struct mgmt_ds_ctx *dst_ds_ctx,
2411 bool validate_only, bool abort,
2412 bool implicit)
2413{
2414 struct mgmt_txn_ctx *txn;
2415 struct mgmt_txn_req *txn_req;
2416
2417 txn = mgmt_txn_id2ctx(txn_id);
2418 if (!txn)
2419 return -1;
2420
2421 if (txn->commit_cfg_req) {
2422 MGMTD_TXN_ERR(
2423 "A commit is already in-progress for Txn %p, session 0x%llx. Cannot start another!",
2424 txn, (unsigned long long)txn->session_id);
2425 return -1;
2426 }
2427
2428 txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_COMMITCFG);
2429 txn_req->req.commit_cfg.src_ds_id = src_ds_id;
2430 txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
2431 txn_req->req.commit_cfg.dst_ds_id = dst_ds_id;
2432 txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
2433 txn_req->req.commit_cfg.validate_only = validate_only;
2434 txn_req->req.commit_cfg.abort = abort;
2435 txn_req->req.commit_cfg.implicit = implicit;
2436 txn_req->req.commit_cfg.cmt_stats =
2437 mgmt_fe_get_session_commit_stats(txn->session_id);
2438
2439 /*
2440 * Trigger a COMMIT-CONFIG process.
2441 */
2442 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
2443 return 0;
2444}
2445
2446int mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
2447 bool connect)
2448{
2449 struct mgmt_txn_ctx *txn;
2450 struct mgmt_txn_req *txn_req;
2451 struct mgmt_commit_cfg_req *cmtcfg_req;
2452 static struct mgmt_commit_stats dummy_stats;
2453 struct nb_config_cbs *adapter_cfgs = NULL;
2454
2455 memset(&dummy_stats, 0, sizeof(dummy_stats));
2456 if (connect) {
2457 /* Get config for this single backend client */
2458 mgmt_be_get_adapter_config(adapter, mm->running_ds,
2459 &adapter_cfgs);
2460
2461 if (!adapter_cfgs || RB_EMPTY(nb_config_cbs, adapter_cfgs)) {
2462 SET_FLAG(adapter->flags,
2463 MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
2464 return 0;
2465 }
2466
2467 /*
2468 * Create a CONFIG transaction to push the config changes
2469 * provided to the backend client.
2470 */
2471 txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
2472 if (!txn) {
2473 MGMTD_TXN_ERR(
2474 "Failed to create CONFIG Transaction for downloading CONFIGs for client '%s'",
2475 adapter->name);
2476 return -1;
2477 }
2478
0b645fd2
CH
2479 MGMTD_TXN_DBG("Created initial txn %" PRIu64
2480 " for BE connection %s",
f82370b4 2481 txn->txn_id, adapter->name);
74335ceb
YR
2482 /*
2483 * Set the changeset for transaction to commit and trigger the
2484 * commit request.
2485 */
2486 txn_req =
2487 mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
2488 txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_NONE;
2489 txn_req->req.commit_cfg.src_ds_ctx = 0;
2490 txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_NONE;
2491 txn_req->req.commit_cfg.dst_ds_ctx = 0;
2492 txn_req->req.commit_cfg.validate_only = false;
2493 txn_req->req.commit_cfg.abort = false;
2494 txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
2495 txn_req->req.commit_cfg.cfg_chgs = adapter_cfgs;
2496
2497 /*
2498 * Trigger a COMMIT-CONFIG process.
2499 */
2500 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
2501
2502 } else {
2503 /*
2504 * Check if any transaction is currently on-going that
2505 * involves this backend client. If so, report the transaction
2506 * has failed.
2507 */
2508 FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
2509 if (txn->type == MGMTD_TXN_TYPE_CONFIG) {
2510 cmtcfg_req = txn->commit_cfg_req
2511 ? &txn->commit_cfg_req
2512 ->req.commit_cfg
2513 : NULL;
2514 if (cmtcfg_req
2515 && cmtcfg_req->subscr_info
2516 .xpath_subscr[adapter->id]
2517 .subscribed) {
2518 mgmt_txn_send_commit_cfg_reply(
2519 txn, MGMTD_INTERNAL_ERROR,
2520 "Backend daemon disconnected while processing commit!");
2521 }
2522 }
2523 }
2524 }
2525
2526 return 0;
2527}
2528
2529int mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create,
2530 bool success,
2531 struct mgmt_be_client_adapter *adapter)
2532{
2533 struct mgmt_txn_ctx *txn;
2534 struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
2535
2536 txn = mgmt_txn_id2ctx(txn_id);
2537 if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
2538 return -1;
2539
2540 if (!create && !txn->commit_cfg_req)
2541 return 0;
2542
2543 assert(txn->commit_cfg_req);
2544 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
2545 if (create) {
2546 if (success) {
2547 /*
2548 * Done with TXN_CREATE. Move the backend client to
2549 * next phase.
2550 */
2551 assert(cmtcfg_req->curr_phase
2552 == MGMTD_COMMIT_PHASE_TXN_CREATE);
2553
2554 /*
2555 * Send CFGDATA_CREATE-REQs to the backend immediately.
2556 */
2557 mgmt_txn_send_be_cfg_data(txn, adapter);
2558 } else {
2559 mgmt_txn_send_commit_cfg_reply(
2560 txn, MGMTD_INTERNAL_ERROR,
2561 "Internal error! Failed to initiate transaction at backend!");
2562 }
2563 } else {
2564 /*
2565 * Done with TXN_DELETE. Move the backend client to next phase.
2566 */
2567 if (false)
2568 mgmt_move_be_commit_to_next_phase(txn, adapter);
2569 }
2570
2571 return 0;
2572}
2573
2574int mgmt_txn_notify_be_cfgdata_reply(
2575 uint64_t txn_id, uint64_t batch_id, bool success, char *error_if_any,
2576 struct mgmt_be_client_adapter *adapter)
2577{
2578 struct mgmt_txn_ctx *txn;
2579 struct mgmt_txn_be_cfg_batch *cfg_btch;
2580 struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
2581
2582 txn = mgmt_txn_id2ctx(txn_id);
2583 if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
2584 return -1;
2585
2586 if (!txn->commit_cfg_req)
2587 return -1;
2588 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
2589
2590 cfg_btch = mgmt_txn_cfgbatch_id2ctx(txn, batch_id);
2591 if (!cfg_btch || cfg_btch->txn != txn)
2592 return -1;
2593
2594 if (!success) {
2595 MGMTD_TXN_ERR(
2596 "CFGDATA_CREATE_REQ sent to '%s' failed for Txn %p, Batch %p, Err: %s",
2597 adapter->name, txn, cfg_btch,
2598 error_if_any ? error_if_any : "None");
2599 mgmt_txn_send_commit_cfg_reply(
2600 txn, MGMTD_INTERNAL_ERROR,
1401ee8b 2601 error_if_any ? error_if_any :
74335ceb
YR
2602 "Internal error! Failed to download config data to backend!");
2603 return 0;
2604 }
2605
2606 MGMTD_TXN_DBG(
2607 "CFGDATA_CREATE_REQ sent to '%s' was successful for Txn %p, Batch %p, Err: %s",
2608 adapter->name, txn, cfg_btch,
2609 error_if_any ? error_if_any : "None");
2610 mgmt_move_txn_cfg_batch_to_next(
2611 cmtcfg_req, cfg_btch, &cmtcfg_req->curr_batches[adapter->id],
2612 &cmtcfg_req->next_batches[adapter->id], true,
2613 MGMTD_COMMIT_PHASE_APPLY_CFG);
2614
2615 mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
2616
2617 return 0;
2618}
2619
f82370b4 2620int mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
74335ceb
YR
2621 uint64_t batch_ids[],
2622 size_t num_batch_ids, char *error_if_any,
2623 struct mgmt_be_client_adapter *adapter)
2624{
2625 struct mgmt_txn_ctx *txn;
2626 struct mgmt_txn_be_cfg_batch *cfg_btch;
2627 struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
2628 size_t indx;
2629
2630 txn = mgmt_txn_id2ctx(txn_id);
2631 if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG
2632 || !txn->commit_cfg_req)
2633 return -1;
2634
2635 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
2636
2637 if (!success) {
2638 MGMTD_TXN_ERR(
2639 "CFGDATA_APPLY_REQ sent to '%s' failed for Txn %p, Batches [0x%llx - 0x%llx], Err: %s",
2640 adapter->name, txn, (unsigned long long)batch_ids[0],
2641 (unsigned long long)batch_ids[num_batch_ids - 1],
2642 error_if_any ? error_if_any : "None");
2643 mgmt_txn_send_commit_cfg_reply(
2644 txn, MGMTD_INTERNAL_ERROR,
1401ee8b 2645 error_if_any ? error_if_any :
74335ceb
YR
2646 "Internal error! Failed to apply config data on backend!");
2647 return 0;
2648 }
2649
2650 for (indx = 0; indx < num_batch_ids; indx++) {
2651 cfg_btch = mgmt_txn_cfgbatch_id2ctx(txn, batch_ids[indx]);
2652 if (cfg_btch->txn != txn)
2653 return -1;
2654 mgmt_move_txn_cfg_batch_to_next(
2655 cmtcfg_req, cfg_btch,
2656 &cmtcfg_req->curr_batches[adapter->id],
2657 &cmtcfg_req->next_batches[adapter->id], true,
2658 MGMTD_COMMIT_PHASE_TXN_DELETE);
2659 }
2660
2661 if (!mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id])) {
2662 /*
2663 * All configuration for the specific backend has been applied.
2664 * Send TXN-DELETE to wrap up the transaction for this backend.
2665 */
2666 SET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
2667 mgmt_txn_send_be_txn_delete(txn, adapter);
2668 }
2669
2670 mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
2671 if (mm->perf_stats_en)
2672 gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_end, NULL);
2673
2674 return 0;
2675}
2676
2677int mgmt_txn_send_commit_config_reply(uint64_t txn_id,
2678 enum mgmt_result result,
2679 const char *error_if_any)
2680{
2681 struct mgmt_txn_ctx *txn;
2682
2683 txn = mgmt_txn_id2ctx(txn_id);
2684 if (!txn)
2685 return -1;
2686
2687 if (!txn->commit_cfg_req) {
2688 MGMTD_TXN_ERR(
2689 "NO commit in-progress for Txn %p, session 0x%llx!",
2690 txn, (unsigned long long)txn->session_id);
2691 return -1;
2692 }
2693
2694 return mgmt_txn_send_commit_cfg_reply(txn, result, error_if_any);
2695}
2696
2697int mgmt_txn_send_get_config_req(uint64_t txn_id, uint64_t req_id,
2698 Mgmtd__DatastoreId ds_id,
2699 struct mgmt_ds_ctx *ds_ctx,
2700 Mgmtd__YangGetDataReq **data_req,
2701 size_t num_reqs)
2702{
2703 struct mgmt_txn_ctx *txn;
2704 struct mgmt_txn_req *txn_req;
2705 size_t indx;
2706
2707 txn = mgmt_txn_id2ctx(txn_id);
2708 if (!txn)
2709 return -1;
2710
2711 txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETCFG);
2712 txn_req->req.get_data->ds_id = ds_id;
2713 txn_req->req.get_data->ds_ctx = ds_ctx;
2714 for (indx = 0;
2715 indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
2716 indx++) {
2717 MGMTD_TXN_DBG("XPath: '%s'", data_req[indx]->data->xpath);
2718 txn_req->req.get_data->xpaths[indx] =
2719 strdup(data_req[indx]->data->xpath);
2720 txn_req->req.get_data->num_xpaths++;
2721 }
2722
2723 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
2724
2725 return 0;
2726}
2727
2728int mgmt_txn_send_get_data_req(uint64_t txn_id, uint64_t req_id,
2729 Mgmtd__DatastoreId ds_id,
2730 struct mgmt_ds_ctx *ds_ctx,
2731 Mgmtd__YangGetDataReq **data_req,
2732 size_t num_reqs)
2733{
2734 struct mgmt_txn_ctx *txn;
2735 struct mgmt_txn_req *txn_req;
2736 size_t indx;
2737
2738 txn = mgmt_txn_id2ctx(txn_id);
2739 if (!txn)
2740 return -1;
2741
2742 txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETDATA);
2743 txn_req->req.get_data->ds_id = ds_id;
2744 txn_req->req.get_data->ds_ctx = ds_ctx;
2745 for (indx = 0;
2746 indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
2747 indx++) {
2748 MGMTD_TXN_DBG("XPath: '%s'", data_req[indx]->data->xpath);
2749 txn_req->req.get_data->xpaths[indx] =
2750 strdup(data_req[indx]->data->xpath);
2751 txn_req->req.get_data->num_xpaths++;
2752 }
2753
2754 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETDATA);
2755
2756 return 0;
2757}
2758
2759void mgmt_txn_status_write(struct vty *vty)
2760{
2761 struct mgmt_txn_ctx *txn;
2762
2763 vty_out(vty, "MGMTD Transactions\n");
2764
2765 FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
2766 vty_out(vty, " Txn: \t\t\t%p\n", txn);
2767 vty_out(vty, " Txn-Id: \t\t\t%llu\n",
2768 (unsigned long long)txn->txn_id);
2769 vty_out(vty, " Session-Id: \t\t%llu\n",
2770 (unsigned long long)txn->session_id);
2771 vty_out(vty, " Type: \t\t\t%s\n",
2772 mgmt_txn_type2str(txn->type));
2773 vty_out(vty, " Ref-Count: \t\t\t%d\n", txn->refcount);
2774 }
2775 vty_out(vty, " Total: %d\n",
2776 (int)mgmt_txns_count(&mgmt_txn_mm->txn_list));
2777}
2778
2779int mgmt_txn_rollback_trigger_cfg_apply(struct mgmt_ds_ctx *src_ds_ctx,
2780 struct mgmt_ds_ctx *dst_ds_ctx)
2781{
2782 static struct nb_config_cbs changes;
2783 struct nb_config_cbs *cfg_chgs = NULL;
2784 struct mgmt_txn_ctx *txn;
2785 struct mgmt_txn_req *txn_req;
2786 static struct mgmt_commit_stats dummy_stats;
2787
2788 memset(&changes, 0, sizeof(changes));
2789 memset(&dummy_stats, 0, sizeof(dummy_stats));
2790 /*
2791 * This could be the case when the config is directly
2792 * loaded onto the candidate DS from a file. Get the
2793 * diff from a full comparison of the candidate and
2794 * running DSs.
2795 */
2796 nb_config_diff(mgmt_ds_get_nb_config(dst_ds_ctx),
2797 mgmt_ds_get_nb_config(src_ds_ctx), &changes);
2798 cfg_chgs = &changes;
2799
2800 if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
2801 /*
2802 * This means there's no changes to commit whatsoever
2803 * is the source of the changes in config.
2804 */
2805 return -1;
2806 }
2807
2808 /*
2809 * Create a CONFIG transaction to push the config changes
2810 * provided to the backend client.
2811 */
2812 txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
2813 if (!txn) {
2814 MGMTD_TXN_ERR(
2815 "Failed to create CONFIG Transaction for downloading CONFIGs");
2816 return -1;
2817 }
2818
0b645fd2 2819 MGMTD_TXN_DBG("Created rollback txn %" PRIu64, txn->txn_id);
f82370b4 2820
74335ceb
YR
2821 /*
2822 * Set the changeset for transaction to commit and trigger the commit
2823 * request.
2824 */
2825 txn_req = mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
2826 txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_CANDIDATE;
2827 txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
2828 txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_RUNNING;
2829 txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
2830 txn_req->req.commit_cfg.validate_only = false;
2831 txn_req->req.commit_cfg.abort = false;
2832 txn_req->req.commit_cfg.rollback = true;
2833 txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
2834 txn_req->req.commit_cfg.cfg_chgs = cfg_chgs;
2835
2836 /*
2837 * Trigger a COMMIT-CONFIG process.
2838 */
2839 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
2840 return 0;
2841}