]> git.proxmox.com Git - mirror_frr.git/blob - mgmtd/mgmt_txn.c
Merge pull request #13060 from opensourcerouting/feature/allow_peering_with_127.0.0.1
[mirror_frr.git] / mgmtd / mgmt_txn.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * MGMTD Transactions
4 *
5 * Copyright (C) 2021 Vmware, Inc.
6 * Pushpasis Sarkar <spushpasis@vmware.com>
7 */
8
9 #include <zebra.h>
10 #include "hash.h"
11 #include "jhash.h"
12 #include "libfrr.h"
13 #include "mgmtd/mgmt.h"
14 #include "mgmtd/mgmt_memory.h"
15 #include "mgmtd/mgmt_txn.h"
16
17 #ifdef REDIRECT_DEBUG_TO_STDERR
18 #define MGMTD_TXN_DBG(fmt, ...) \
19 fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
20 #define MGMTD_TXN_ERR(fmt, ...) \
21 fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
22 #else /* REDIRECT_DEBUG_TO_STDERR */
23 #define MGMTD_TXN_DBG(fmt, ...) \
24 do { \
25 if (mgmt_debug_txn) \
26 zlog_err("%s: " fmt, __func__, ##__VA_ARGS__); \
27 } while (0)
28 #define MGMTD_TXN_ERR(fmt, ...) \
29 zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
30 #endif /* REDIRECT_DEBUG_TO_STDERR */
31
32 #define MGMTD_TXN_LOCK(txn) mgmt_txn_lock(txn, __FILE__, __LINE__)
33 #define MGMTD_TXN_UNLOCK(txn) mgmt_txn_unlock(txn, __FILE__, __LINE__)
34
35 enum mgmt_txn_event {
36 MGMTD_TXN_PROC_SETCFG = 1,
37 MGMTD_TXN_PROC_COMMITCFG,
38 MGMTD_TXN_PROC_GETCFG,
39 MGMTD_TXN_PROC_GETDATA,
40 MGMTD_TXN_COMMITCFG_TIMEOUT,
41 MGMTD_TXN_CLEANUP
42 };
43
44 PREDECL_LIST(mgmt_txn_reqs);
45
46 struct mgmt_set_cfg_req {
47 Mgmtd__DatastoreId ds_id;
48 struct mgmt_ds_ctx *ds_ctx;
49 struct nb_cfg_change cfg_changes[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
50 uint16_t num_cfg_changes;
51 bool implicit_commit;
52 Mgmtd__DatastoreId dst_ds_id;
53 struct mgmt_ds_ctx *dst_ds_ctx;
54 struct mgmt_setcfg_stats *setcfg_stats;
55 };
56
57 enum mgmt_commit_phase {
58 MGMTD_COMMIT_PHASE_PREPARE_CFG = 0,
59 MGMTD_COMMIT_PHASE_TXN_CREATE,
60 MGMTD_COMMIT_PHASE_SEND_CFG,
61 MGMTD_COMMIT_PHASE_APPLY_CFG,
62 MGMTD_COMMIT_PHASE_TXN_DELETE,
63 MGMTD_COMMIT_PHASE_MAX
64 };
65
66 static inline const char *
67 mgmt_commit_phase2str(enum mgmt_commit_phase cmt_phase)
68 {
69 switch (cmt_phase) {
70 case MGMTD_COMMIT_PHASE_PREPARE_CFG:
71 return "PREP-CFG";
72 case MGMTD_COMMIT_PHASE_TXN_CREATE:
73 return "CREATE-TXN";
74 case MGMTD_COMMIT_PHASE_SEND_CFG:
75 return "SEND-CFG";
76 case MGMTD_COMMIT_PHASE_APPLY_CFG:
77 return "APPLY-CFG";
78 case MGMTD_COMMIT_PHASE_TXN_DELETE:
79 return "DELETE-TXN";
80 case MGMTD_COMMIT_PHASE_MAX:
81 return "Invalid/Unknown";
82 }
83
84 return "Invalid/Unknown";
85 }
86
87 PREDECL_LIST(mgmt_txn_batches);
88
89 struct mgmt_txn_be_cfg_batch {
90 struct mgmt_txn_ctx *txn;
91 uint64_t batch_id;
92 enum mgmt_be_client_id be_id;
93 struct mgmt_be_client_adapter *be_adapter;
94 union mgmt_be_xpath_subscr_info
95 xp_subscr[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
96 Mgmtd__YangCfgDataReq cfg_data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
97 Mgmtd__YangCfgDataReq * cfg_datap[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
98 Mgmtd__YangData data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
99 Mgmtd__YangDataValue value[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
100 size_t num_cfg_data;
101 int buf_space_left;
102 enum mgmt_commit_phase comm_phase;
103 struct mgmt_txn_batches_item list_linkage;
104 };
105
106 DECLARE_LIST(mgmt_txn_batches, struct mgmt_txn_be_cfg_batch, list_linkage);
107
108 #define FOREACH_TXN_CFG_BATCH_IN_LIST(list, batch) \
109 frr_each_safe (mgmt_txn_batches, list, batch)
110
111 struct mgmt_commit_cfg_req {
112 Mgmtd__DatastoreId src_ds_id;
113 struct mgmt_ds_ctx *src_ds_ctx;
114 Mgmtd__DatastoreId dst_ds_id;
115 struct mgmt_ds_ctx *dst_ds_ctx;
116 uint32_t nb_txn_id;
117 uint8_t validate_only : 1;
118 uint8_t abort : 1;
119 uint8_t implicit : 1;
120 uint8_t rollback : 1;
121
122 /* Track commit phases */
123 enum mgmt_commit_phase curr_phase;
124 enum mgmt_commit_phase next_phase;
125
126 /*
127 * Set of config changes to commit. This is used only
128 * when changes are NOT to be determined by comparing
129 * candidate and running DSs. This is typically used
130 * for downloading all relevant configs for a new backend
131 * client that has recently come up and connected with
132 * MGMTD.
133 */
134 struct nb_config_cbs *cfg_chgs;
135
136 /*
137 * Details on all the Backend Clients associated with
138 * this commit.
139 */
140 struct mgmt_be_client_subscr_info subscr_info;
141
142 /*
143 * List of backend batches for this commit to be validated
144 * and applied at the backend.
145 *
146 * FIXME: Need to re-think this design for the case set of
147 * validators for a given YANG data item is different from
148 * the set of notifiers for the same. We may need to have
149 * separate list of batches for VALIDATE and APPLY.
150 */
151 struct mgmt_txn_batches_head curr_batches[MGMTD_BE_CLIENT_ID_MAX];
152 struct mgmt_txn_batches_head next_batches[MGMTD_BE_CLIENT_ID_MAX];
153 /*
154 * The last batch added for any backend client. This is always on
155 * 'curr_batches'
156 */
157 struct mgmt_txn_be_cfg_batch
158 *last_be_cfg_batch[MGMTD_BE_CLIENT_ID_MAX];
159 struct hash *batches;
160 uint64_t next_batch_id;
161
162 struct mgmt_commit_stats *cmt_stats;
163 };
164
165 struct mgmt_get_data_reply {
166 /* Buffer space for preparing data reply */
167 int num_reply;
168 int last_batch;
169 Mgmtd__YangDataReply data_reply;
170 Mgmtd__YangData reply_data[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
171 Mgmtd__YangData * reply_datap[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
172 Mgmtd__YangDataValue reply_value[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
173 char *reply_xpathp[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
174 };
175
176 struct mgmt_get_data_req {
177 Mgmtd__DatastoreId ds_id;
178 struct mgmt_ds_ctx *ds_ctx;
179 char *xpaths[MGMTD_MAX_NUM_DATA_REQ_IN_BATCH];
180 int num_xpaths;
181
182 /*
183 * Buffer space for preparing reply.
184 * NOTE: Should only be malloc-ed on demand to reduce
185 * memory footprint. Freed up via mgmt_trx_req_free()
186 */
187 struct mgmt_get_data_reply *reply;
188
189 int total_reply;
190 };
191
192 struct mgmt_txn_req {
193 struct mgmt_txn_ctx *txn;
194 enum mgmt_txn_event req_event;
195 uint64_t req_id;
196 union {
197 struct mgmt_set_cfg_req *set_cfg;
198 struct mgmt_get_data_req *get_data;
199 struct mgmt_commit_cfg_req commit_cfg;
200 } req;
201
202 bool pending_be_proc;
203 struct mgmt_txn_reqs_item list_linkage;
204 };
205
206 DECLARE_LIST(mgmt_txn_reqs, struct mgmt_txn_req, list_linkage);
207
208 #define FOREACH_TXN_REQ_IN_LIST(list, req) \
209 frr_each_safe (mgmt_txn_reqs, list, req)
210
211 struct mgmt_txn_ctx {
212 uint64_t session_id; /* One transaction per client session */
213 uint64_t txn_id;
214 enum mgmt_txn_type type;
215
216 /* struct mgmt_master *mm; */
217
218 struct event *proc_set_cfg;
219 struct event *proc_comm_cfg;
220 struct event *proc_get_cfg;
221 struct event *proc_get_data;
222 struct event *comm_cfg_timeout;
223 struct event *clnup;
224
225 /* List of backend adapters involved in this transaction */
226 struct mgmt_txn_badapters_head be_adapters;
227
228 int refcount;
229
230 struct mgmt_txns_item list_linkage;
231
232 /*
233 * List of pending set-config requests for a given
234 * transaction/session. Just one list for requests
235 * not processed at all. There's no backend interaction
236 * involved.
237 */
238 struct mgmt_txn_reqs_head set_cfg_reqs;
239 /*
240 * List of pending get-config requests for a given
241 * transaction/session. Just one list for requests
242 * not processed at all. There's no backend interaction
243 * involved.
244 */
245 struct mgmt_txn_reqs_head get_cfg_reqs;
246 /*
247 * List of pending get-data requests for a given
248 * transaction/session Two lists, one for requests
249 * not processed at all, and one for requests that
250 * has been sent to backend for processing.
251 */
252 struct mgmt_txn_reqs_head get_data_reqs;
253 struct mgmt_txn_reqs_head pending_get_datas;
254 /*
255 * There will always be one commit-config allowed for a given
256 * transaction/session. No need to maintain lists for it.
257 */
258 struct mgmt_txn_req *commit_cfg_req;
259 };
260
261 DECLARE_LIST(mgmt_txns, struct mgmt_txn_ctx, list_linkage);
262
263 #define FOREACH_TXN_IN_LIST(mm, txn) \
264 frr_each_safe (mgmt_txns, &(mm)->txn_list, (txn))
265
266 static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
267 enum mgmt_result result,
268 const char *error_if_any);
269
270 static inline const char *
271 mgmt_txn_commit_phase_str(struct mgmt_txn_ctx *txn, bool curr)
272 {
273 if (!txn->commit_cfg_req)
274 return "None";
275
276 return (mgmt_commit_phase2str(
277 curr ? txn->commit_cfg_req->req.commit_cfg.curr_phase
278 : txn->commit_cfg_req->req.commit_cfg.next_phase));
279 }
280
281 static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file,
282 int line);
283 static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
284 int line);
285 static int
286 mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
287 struct mgmt_be_client_adapter *adapter);
288
289 static struct event_loop *mgmt_txn_tm;
290 static struct mgmt_master *mgmt_txn_mm;
291
292 static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
293 enum mgmt_txn_event event);
294
295 static int
296 mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
297 struct mgmt_be_client_adapter *adapter);
298
299 static struct mgmt_txn_be_cfg_batch *
300 mgmt_txn_cfg_batch_alloc(struct mgmt_txn_ctx *txn,
301 enum mgmt_be_client_id id,
302 struct mgmt_be_client_adapter *be_adapter)
303 {
304 struct mgmt_txn_be_cfg_batch *cfg_btch;
305
306 cfg_btch = XCALLOC(MTYPE_MGMTD_TXN_CFG_BATCH,
307 sizeof(struct mgmt_txn_be_cfg_batch));
308 assert(cfg_btch);
309 cfg_btch->be_id = id;
310
311 cfg_btch->txn = txn;
312 MGMTD_TXN_LOCK(txn);
313 assert(txn->commit_cfg_req);
314 mgmt_txn_batches_add_tail(
315 &txn->commit_cfg_req->req.commit_cfg.curr_batches[id],
316 cfg_btch);
317 cfg_btch->be_adapter = be_adapter;
318 cfg_btch->buf_space_left = MGMTD_BE_CFGDATA_MAX_MSG_LEN;
319 if (be_adapter)
320 mgmt_be_adapter_lock(be_adapter);
321
322 txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] =
323 cfg_btch;
324 if (!txn->commit_cfg_req->req.commit_cfg.next_batch_id)
325 txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
326 cfg_btch->batch_id =
327 txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
328 hash_get(txn->commit_cfg_req->req.commit_cfg.batches, cfg_btch,
329 hash_alloc_intern);
330
331 return cfg_btch;
332 }
333
334 static void
335 mgmt_txn_cfg_batch_free(struct mgmt_txn_be_cfg_batch **cfg_btch)
336 {
337 size_t indx;
338 struct mgmt_commit_cfg_req *cmtcfg_req;
339
340 MGMTD_TXN_DBG(" Batch: %p, Txn: %p", *cfg_btch, (*cfg_btch)->txn);
341
342 assert((*cfg_btch)->txn
343 && (*cfg_btch)->txn->type == MGMTD_TXN_TYPE_CONFIG);
344
345 cmtcfg_req = &(*cfg_btch)->txn->commit_cfg_req->req.commit_cfg;
346 hash_release(cmtcfg_req->batches, *cfg_btch);
347 mgmt_txn_batches_del(&cmtcfg_req->curr_batches[(*cfg_btch)->be_id],
348 *cfg_btch);
349 mgmt_txn_batches_del(&cmtcfg_req->next_batches[(*cfg_btch)->be_id],
350 *cfg_btch);
351
352 if ((*cfg_btch)->be_adapter)
353 mgmt_be_adapter_unlock(&(*cfg_btch)->be_adapter);
354
355 for (indx = 0; indx < (*cfg_btch)->num_cfg_data; indx++) {
356 if ((*cfg_btch)->data[indx].xpath) {
357 free((*cfg_btch)->data[indx].xpath);
358 (*cfg_btch)->data[indx].xpath = NULL;
359 }
360 }
361
362 MGMTD_TXN_UNLOCK(&(*cfg_btch)->txn);
363
364 XFREE(MTYPE_MGMTD_TXN_CFG_BATCH, *cfg_btch);
365 *cfg_btch = NULL;
366 }
367
368 static unsigned int mgmt_txn_cfgbatch_hash_key(const void *data)
369 {
370 const struct mgmt_txn_be_cfg_batch *batch = data;
371
372 return jhash2((uint32_t *) &batch->batch_id,
373 sizeof(batch->batch_id) / sizeof(uint32_t), 0);
374 }
375
376 static bool mgmt_txn_cfgbatch_hash_cmp(const void *d1, const void *d2)
377 {
378 const struct mgmt_txn_be_cfg_batch *batch1 = d1;
379 const struct mgmt_txn_be_cfg_batch *batch2 = d2;
380
381 return (batch1->batch_id == batch2->batch_id);
382 }
383
384 static void mgmt_txn_cfgbatch_hash_free(void *data)
385 {
386 struct mgmt_txn_be_cfg_batch *batch = data;
387
388 mgmt_txn_cfg_batch_free(&batch);
389 }
390
391 static inline struct mgmt_txn_be_cfg_batch *
392 mgmt_txn_cfgbatch_id2ctx(struct mgmt_txn_ctx *txn, uint64_t batch_id)
393 {
394 struct mgmt_txn_be_cfg_batch key = {0};
395 struct mgmt_txn_be_cfg_batch *batch;
396
397 if (!txn->commit_cfg_req)
398 return NULL;
399
400 key.batch_id = batch_id;
401 batch = hash_lookup(txn->commit_cfg_req->req.commit_cfg.batches,
402 &key);
403
404 return batch;
405 }
406
407 static void mgmt_txn_cleanup_be_cfg_batches(struct mgmt_txn_ctx *txn,
408 enum mgmt_be_client_id id)
409 {
410 struct mgmt_txn_be_cfg_batch *cfg_btch;
411 struct mgmt_txn_batches_head *list;
412
413 list = &txn->commit_cfg_req->req.commit_cfg.curr_batches[id];
414 FOREACH_TXN_CFG_BATCH_IN_LIST (list, cfg_btch)
415 mgmt_txn_cfg_batch_free(&cfg_btch);
416
417 mgmt_txn_batches_fini(list);
418
419 list = &txn->commit_cfg_req->req.commit_cfg.next_batches[id];
420 FOREACH_TXN_CFG_BATCH_IN_LIST (list, cfg_btch)
421 mgmt_txn_cfg_batch_free(&cfg_btch);
422
423 mgmt_txn_batches_fini(list);
424
425 txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] = NULL;
426 }
427
428 static struct mgmt_txn_req *mgmt_txn_req_alloc(struct mgmt_txn_ctx *txn,
429 uint64_t req_id,
430 enum mgmt_txn_event req_event)
431 {
432 struct mgmt_txn_req *txn_req;
433 enum mgmt_be_client_id id;
434
435 txn_req = XCALLOC(MTYPE_MGMTD_TXN_REQ, sizeof(struct mgmt_txn_req));
436 assert(txn_req);
437 txn_req->txn = txn;
438 txn_req->req_id = req_id;
439 txn_req->req_event = req_event;
440 txn_req->pending_be_proc = false;
441
442 switch (txn_req->req_event) {
443 case MGMTD_TXN_PROC_SETCFG:
444 txn_req->req.set_cfg = XCALLOC(MTYPE_MGMTD_TXN_SETCFG_REQ,
445 sizeof(struct mgmt_set_cfg_req));
446 assert(txn_req->req.set_cfg);
447 mgmt_txn_reqs_add_tail(&txn->set_cfg_reqs, txn_req);
448 MGMTD_TXN_DBG(
449 "Added a new SETCFG Req: %p for Txn: %p, Sessn: 0x%llx",
450 txn_req, txn, (unsigned long long)txn->session_id);
451 break;
452 case MGMTD_TXN_PROC_COMMITCFG:
453 txn->commit_cfg_req = txn_req;
454 MGMTD_TXN_DBG(
455 "Added a new COMMITCFG Req: %p for Txn: %p, Sessn: 0x%llx",
456 txn_req, txn, (unsigned long long)txn->session_id);
457
458 FOREACH_MGMTD_BE_CLIENT_ID (id) {
459 mgmt_txn_batches_init(
460 &txn_req->req.commit_cfg.curr_batches[id]);
461 mgmt_txn_batches_init(
462 &txn_req->req.commit_cfg.next_batches[id]);
463 }
464
465 txn_req->req.commit_cfg.batches =
466 hash_create(mgmt_txn_cfgbatch_hash_key,
467 mgmt_txn_cfgbatch_hash_cmp,
468 "MGMT Config Batches");
469 break;
470 case MGMTD_TXN_PROC_GETCFG:
471 txn_req->req.get_data =
472 XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
473 sizeof(struct mgmt_get_data_req));
474 assert(txn_req->req.get_data);
475 mgmt_txn_reqs_add_tail(&txn->get_cfg_reqs, txn_req);
476 MGMTD_TXN_DBG(
477 "Added a new GETCFG Req: %p for Txn: %p, Sessn: 0x%llx",
478 txn_req, txn, (unsigned long long)txn->session_id);
479 break;
480 case MGMTD_TXN_PROC_GETDATA:
481 txn_req->req.get_data =
482 XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
483 sizeof(struct mgmt_get_data_req));
484 assert(txn_req->req.get_data);
485 mgmt_txn_reqs_add_tail(&txn->get_data_reqs, txn_req);
486 MGMTD_TXN_DBG(
487 "Added a new GETDATA Req: %p for Txn: %p, Sessn: 0x%llx",
488 txn_req, txn, (unsigned long long)txn->session_id);
489 break;
490 case MGMTD_TXN_COMMITCFG_TIMEOUT:
491 case MGMTD_TXN_CLEANUP:
492 break;
493 }
494
495 MGMTD_TXN_LOCK(txn);
496
497 return txn_req;
498 }
499
500 static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
501 {
502 int indx;
503 struct mgmt_txn_reqs_head *req_list = NULL;
504 struct mgmt_txn_reqs_head *pending_list = NULL;
505 enum mgmt_be_client_id id;
506 struct mgmt_be_client_adapter *adapter;
507
508 switch ((*txn_req)->req_event) {
509 case MGMTD_TXN_PROC_SETCFG:
510 for (indx = 0; indx < (*txn_req)->req.set_cfg->num_cfg_changes;
511 indx++) {
512 if ((*txn_req)->req.set_cfg->cfg_changes[indx].value) {
513 MGMTD_TXN_DBG(
514 "Freeing value for %s at %p ==> '%s'",
515 (*txn_req)
516 ->req.set_cfg->cfg_changes[indx]
517 .xpath,
518 (*txn_req)
519 ->req.set_cfg->cfg_changes[indx]
520 .value,
521 (*txn_req)
522 ->req.set_cfg->cfg_changes[indx]
523 .value);
524 free((void *)(*txn_req)
525 ->req.set_cfg->cfg_changes[indx]
526 .value);
527 }
528 }
529 req_list = &(*txn_req)->txn->set_cfg_reqs;
530 MGMTD_TXN_DBG("Deleting SETCFG Req: %p for Txn: %p",
531 *txn_req, (*txn_req)->txn);
532 XFREE(MTYPE_MGMTD_TXN_SETCFG_REQ, (*txn_req)->req.set_cfg);
533 break;
534 case MGMTD_TXN_PROC_COMMITCFG:
535 MGMTD_TXN_DBG("Deleting COMMITCFG Req: %p for Txn: %p",
536 *txn_req, (*txn_req)->txn);
537 FOREACH_MGMTD_BE_CLIENT_ID (id) {
538 /*
539 * Send TXN_DELETE to cleanup state for this
540 * transaction on backend
541 */
542 if ((*txn_req)->req.commit_cfg.curr_phase
543 >= MGMTD_COMMIT_PHASE_TXN_CREATE
544 && (*txn_req)->req.commit_cfg.curr_phase
545 < MGMTD_COMMIT_PHASE_TXN_DELETE
546 && (*txn_req)
547 ->req.commit_cfg.subscr_info
548 .xpath_subscr[id]
549 .subscribed) {
550 adapter = mgmt_be_get_adapter_by_id(id);
551 if (adapter)
552 mgmt_txn_send_be_txn_delete(
553 (*txn_req)->txn, adapter);
554 }
555
556 mgmt_txn_cleanup_be_cfg_batches((*txn_req)->txn,
557 id);
558 if ((*txn_req)->req.commit_cfg.batches) {
559 hash_clean((*txn_req)->req.commit_cfg.batches,
560 mgmt_txn_cfgbatch_hash_free);
561 hash_free((*txn_req)->req.commit_cfg.batches);
562 (*txn_req)->req.commit_cfg.batches = NULL;
563 }
564 }
565 break;
566 case MGMTD_TXN_PROC_GETCFG:
567 for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
568 indx++) {
569 if ((*txn_req)->req.get_data->xpaths[indx])
570 free((void *)(*txn_req)
571 ->req.get_data->xpaths[indx]);
572 }
573 req_list = &(*txn_req)->txn->get_cfg_reqs;
574 MGMTD_TXN_DBG("Deleting GETCFG Req: %p for Txn: %p",
575 *txn_req, (*txn_req)->txn);
576 if ((*txn_req)->req.get_data->reply)
577 XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
578 (*txn_req)->req.get_data->reply);
579 XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
580 break;
581 case MGMTD_TXN_PROC_GETDATA:
582 for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
583 indx++) {
584 if ((*txn_req)->req.get_data->xpaths[indx])
585 free((void *)(*txn_req)
586 ->req.get_data->xpaths[indx]);
587 }
588 pending_list = &(*txn_req)->txn->pending_get_datas;
589 req_list = &(*txn_req)->txn->get_data_reqs;
590 MGMTD_TXN_DBG("Deleting GETDATA Req: %p for Txn: %p",
591 *txn_req, (*txn_req)->txn);
592 if ((*txn_req)->req.get_data->reply)
593 XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
594 (*txn_req)->req.get_data->reply);
595 XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
596 break;
597 case MGMTD_TXN_COMMITCFG_TIMEOUT:
598 case MGMTD_TXN_CLEANUP:
599 break;
600 }
601
602 if ((*txn_req)->pending_be_proc && pending_list) {
603 mgmt_txn_reqs_del(pending_list, *txn_req);
604 MGMTD_TXN_DBG("Removed Req: %p from pending-list (left:%d)",
605 *txn_req, (int)mgmt_txn_reqs_count(pending_list));
606 } else if (req_list) {
607 mgmt_txn_reqs_del(req_list, *txn_req);
608 MGMTD_TXN_DBG("Removed Req: %p from request-list (left:%d)",
609 *txn_req, (int)mgmt_txn_reqs_count(req_list));
610 }
611
612 (*txn_req)->pending_be_proc = false;
613 MGMTD_TXN_UNLOCK(&(*txn_req)->txn);
614 XFREE(MTYPE_MGMTD_TXN_REQ, (*txn_req));
615 *txn_req = NULL;
616 }
617
618 static void mgmt_txn_process_set_cfg(struct event *thread)
619 {
620 struct mgmt_txn_ctx *txn;
621 struct mgmt_txn_req *txn_req;
622 struct mgmt_ds_ctx *ds_ctx;
623 struct nb_config *nb_config;
624 char err_buf[1024];
625 bool error;
626 int num_processed = 0;
627 size_t left;
628 struct mgmt_commit_stats *cmt_stats;
629 int ret = 0;
630
631 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
632 assert(txn);
633 cmt_stats = mgmt_fe_get_session_commit_stats(txn->session_id);
634
635 MGMTD_TXN_DBG(
636 "Processing %d SET_CONFIG requests for Txn:%p Session:0x%llx",
637 (int)mgmt_txn_reqs_count(&txn->set_cfg_reqs), txn,
638 (unsigned long long)txn->session_id);
639
640 FOREACH_TXN_REQ_IN_LIST (&txn->set_cfg_reqs, txn_req) {
641 error = false;
642 assert(txn_req->req_event == MGMTD_TXN_PROC_SETCFG);
643 ds_ctx = txn_req->req.set_cfg->ds_ctx;
644 if (!ds_ctx) {
645 mgmt_fe_send_set_cfg_reply(
646 txn->session_id, txn->txn_id,
647 txn_req->req.set_cfg->ds_id, txn_req->req_id,
648 MGMTD_INTERNAL_ERROR, "No such datastore!",
649 txn_req->req.set_cfg->implicit_commit);
650 error = true;
651 goto mgmt_txn_process_set_cfg_done;
652 }
653
654 nb_config = mgmt_ds_get_nb_config(ds_ctx);
655 if (!nb_config) {
656 mgmt_fe_send_set_cfg_reply(
657 txn->session_id, txn->txn_id,
658 txn_req->req.set_cfg->ds_id, txn_req->req_id,
659 MGMTD_INTERNAL_ERROR,
660 "Unable to retrieve DS Config Tree!",
661 txn_req->req.set_cfg->implicit_commit);
662 error = true;
663 goto mgmt_txn_process_set_cfg_done;
664 }
665
666 error = false;
667 nb_candidate_edit_config_changes(
668 nb_config, txn_req->req.set_cfg->cfg_changes,
669 (size_t)txn_req->req.set_cfg->num_cfg_changes, NULL,
670 NULL, 0, err_buf, sizeof(err_buf), &error);
671 if (error) {
672 mgmt_fe_send_set_cfg_reply(
673 txn->session_id, txn->txn_id,
674 txn_req->req.set_cfg->ds_id, txn_req->req_id,
675 MGMTD_INTERNAL_ERROR, err_buf,
676 txn_req->req.set_cfg->implicit_commit);
677 goto mgmt_txn_process_set_cfg_done;
678 }
679
680 if (txn_req->req.set_cfg->implicit_commit) {
681 assert(mgmt_txn_reqs_count(&txn->set_cfg_reqs) == 1);
682 assert(txn_req->req.set_cfg->dst_ds_ctx);
683
684 ret = mgmt_ds_write_lock(
685 txn_req->req.set_cfg->dst_ds_ctx);
686 if (ret != 0) {
687 MGMTD_TXN_ERR(
688 "Failed to lock the DS %u for txn: %p session 0x%llx, errstr %s!",
689 txn_req->req.set_cfg->dst_ds_id, txn,
690 (unsigned long long)txn->session_id,
691 strerror(ret));
692 mgmt_txn_send_commit_cfg_reply(
693 txn, MGMTD_DS_LOCK_FAILED,
694 "Lock running DS before implicit commit failed!");
695 goto mgmt_txn_process_set_cfg_done;
696 }
697
698 mgmt_txn_send_commit_config_req(
699 txn->txn_id, txn_req->req_id,
700 txn_req->req.set_cfg->ds_id,
701 txn_req->req.set_cfg->ds_ctx,
702 txn_req->req.set_cfg->dst_ds_id,
703 txn_req->req.set_cfg->dst_ds_ctx, false,
704 false, true);
705
706 if (mm->perf_stats_en)
707 gettimeofday(&cmt_stats->last_start, NULL);
708 cmt_stats->commit_cnt++;
709 } else if (mgmt_fe_send_set_cfg_reply(
710 txn->session_id, txn->txn_id,
711 txn_req->req.set_cfg->ds_id,
712 txn_req->req_id, MGMTD_SUCCESS, NULL, false)
713 != 0) {
714 MGMTD_TXN_ERR(
715 "Failed to send SET_CONFIG_REPLY for txn %p session 0x%llx",
716 txn, (unsigned long long)txn->session_id);
717 error = true;
718 }
719
720 mgmt_txn_process_set_cfg_done:
721
722 /*
723 * Note: The following will remove it from the list as well.
724 */
725 mgmt_txn_req_free(&txn_req);
726
727 num_processed++;
728 if (num_processed == MGMTD_TXN_MAX_NUM_SETCFG_PROC)
729 break;
730 }
731
732 left = mgmt_txn_reqs_count(&txn->set_cfg_reqs);
733 if (left) {
734 MGMTD_TXN_DBG(
735 "Processed maximum number of Set-Config requests (%d/%d/%d). Rescheduling for rest.",
736 num_processed, MGMTD_TXN_MAX_NUM_SETCFG_PROC,
737 (int)left);
738 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
739 }
740 }
741
742 static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
743 enum mgmt_result result,
744 const char *error_if_any)
745 {
746 int ret = 0;
747 bool success, create_cmt_info_rec;
748
749 if (!txn->commit_cfg_req)
750 return -1;
751
752 success = (result == MGMTD_SUCCESS || result == MGMTD_NO_CFG_CHANGES);
753
754 if (!txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id
755 && mgmt_fe_send_commit_cfg_reply(
756 txn->session_id, txn->txn_id,
757 txn->commit_cfg_req->req.commit_cfg.src_ds_id,
758 txn->commit_cfg_req->req.commit_cfg.dst_ds_id,
759 txn->commit_cfg_req->req_id,
760 txn->commit_cfg_req->req.commit_cfg.validate_only,
761 result, error_if_any)
762 != 0) {
763 MGMTD_TXN_ERR(
764 "Failed to send COMMIT-CONFIG-REPLY for Txn %p Sessn 0x%llx",
765 txn, (unsigned long long)txn->session_id);
766 }
767
768 if (txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id
769 && mgmt_fe_send_set_cfg_reply(
770 txn->session_id, txn->txn_id,
771 txn->commit_cfg_req->req.commit_cfg.src_ds_id,
772 txn->commit_cfg_req->req_id,
773 success ? MGMTD_SUCCESS : MGMTD_INTERNAL_ERROR,
774 error_if_any, true)
775 != 0) {
776 MGMTD_TXN_ERR(
777 "Failed to send SET-CONFIG-REPLY for Txn %p Sessn 0x%llx",
778 txn, (unsigned long long)txn->session_id);
779 }
780
781 if (success) {
782 /* Stop the commit-timeout timer */
783 EVENT_OFF(txn->comm_cfg_timeout);
784
785 create_cmt_info_rec =
786 (result != MGMTD_NO_CFG_CHANGES &&
787 !txn->commit_cfg_req->req.commit_cfg.rollback);
788
789 /*
790 * Successful commit: Merge Src DS into Dst DS if and only if
791 * this was not a validate-only or abort request.
792 */
793 if ((txn->session_id
794 && !txn->commit_cfg_req->req.commit_cfg.validate_only
795 && !txn->commit_cfg_req->req.commit_cfg.abort)
796 || txn->commit_cfg_req->req.commit_cfg.rollback) {
797 mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
798 .src_ds_ctx,
799 txn->commit_cfg_req->req.commit_cfg
800 .dst_ds_ctx,
801 create_cmt_info_rec);
802 }
803
804 /*
805 * Restore Src DS back to Dest DS only through a commit abort
806 * request.
807 */
808 if (txn->session_id
809 && txn->commit_cfg_req->req.commit_cfg.abort)
810 mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
811 .dst_ds_ctx,
812 txn->commit_cfg_req->req.commit_cfg
813 .src_ds_ctx,
814 false);
815 } else {
816 /*
817 * The commit has failied. For implicit commit requests restore
818 * back the contents of the candidate DS.
819 */
820 if (txn->commit_cfg_req->req.commit_cfg.implicit)
821 mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
822 .dst_ds_ctx,
823 txn->commit_cfg_req->req.commit_cfg
824 .src_ds_ctx,
825 false);
826 }
827
828 if (txn->commit_cfg_req->req.commit_cfg.rollback) {
829 ret = mgmt_ds_unlock(
830 txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx);
831 if (ret != 0)
832 MGMTD_TXN_ERR(
833 "Failed to unlock the dst DS during rollback : %s",
834 strerror(ret));
835
836 /*
837 * Resume processing the rollback command.
838 */
839 mgmt_history_rollback_complete(success);
840 }
841
842 if (txn->commit_cfg_req->req.commit_cfg.implicit)
843 if (mgmt_ds_unlock(
844 txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx)
845 != 0)
846 MGMTD_TXN_ERR(
847 "Failed to unlock the dst DS during implicit : %s",
848 strerror(ret));
849
850 txn->commit_cfg_req->req.commit_cfg.cmt_stats = NULL;
851 mgmt_txn_req_free(&txn->commit_cfg_req);
852
853 /*
854 * The CONFIG Transaction should be destroyed from Frontend-adapter.
855 * But in case the transaction is not triggered from a front-end session
856 * we need to cleanup by itself.
857 */
858 if (!txn->session_id)
859 mgmt_txn_register_event(txn, MGMTD_TXN_CLEANUP);
860
861 return 0;
862 }
863
864 static void
865 mgmt_move_txn_cfg_batch_to_next(struct mgmt_commit_cfg_req *cmtcfg_req,
866 struct mgmt_txn_be_cfg_batch *cfg_btch,
867 struct mgmt_txn_batches_head *src_list,
868 struct mgmt_txn_batches_head *dst_list,
869 bool update_commit_phase,
870 enum mgmt_commit_phase to_phase)
871 {
872 mgmt_txn_batches_del(src_list, cfg_btch);
873
874 if (update_commit_phase) {
875 MGMTD_TXN_DBG("Move Txn-Id %p Batch-Id %p from '%s' --> '%s'",
876 cfg_btch->txn, cfg_btch,
877 mgmt_commit_phase2str(cfg_btch->comm_phase),
878 mgmt_txn_commit_phase_str(cfg_btch->txn, false));
879 cfg_btch->comm_phase = to_phase;
880 }
881
882 mgmt_txn_batches_add_tail(dst_list, cfg_btch);
883 }
884
885 static void mgmt_move_txn_cfg_batches(struct mgmt_txn_ctx *txn,
886 struct mgmt_commit_cfg_req *cmtcfg_req,
887 struct mgmt_txn_batches_head *src_list,
888 struct mgmt_txn_batches_head *dst_list,
889 bool update_commit_phase,
890 enum mgmt_commit_phase to_phase)
891 {
892 struct mgmt_txn_be_cfg_batch *cfg_btch;
893
894 FOREACH_TXN_CFG_BATCH_IN_LIST (src_list, cfg_btch) {
895 mgmt_move_txn_cfg_batch_to_next(cmtcfg_req, cfg_btch, src_list,
896 dst_list, update_commit_phase,
897 to_phase);
898 }
899 }
900
901 static int
902 mgmt_try_move_commit_to_next_phase(struct mgmt_txn_ctx *txn,
903 struct mgmt_commit_cfg_req *cmtcfg_req)
904 {
905 struct mgmt_txn_batches_head *curr_list, *next_list;
906 enum mgmt_be_client_id id;
907
908 MGMTD_TXN_DBG("Txn-Id %p, Phase(current:'%s' next:'%s')", txn,
909 mgmt_txn_commit_phase_str(txn, true),
910 mgmt_txn_commit_phase_str(txn, false));
911
912 /*
913 * Check if all clients has moved to next phase or not.
914 */
915 FOREACH_MGMTD_BE_CLIENT_ID (id) {
916 if (cmtcfg_req->subscr_info.xpath_subscr[id].subscribed &&
917 mgmt_txn_batches_count(&cmtcfg_req->curr_batches[id])) {
918 /*
919 * There's atleast once client who hasn't moved to
920 * next phase.
921 *
922 * TODO: Need to re-think this design for the case
923 * set of validators for a given YANG data item is
924 * different from the set of notifiers for the same.
925 */
926 return -1;
927 }
928 }
929
930 MGMTD_TXN_DBG("Move entire Txn-Id %p from '%s' to '%s'", txn,
931 mgmt_txn_commit_phase_str(txn, true),
932 mgmt_txn_commit_phase_str(txn, false));
933
934 /*
935 * If we are here, it means all the clients has moved to next phase.
936 * So we can move the whole commit to next phase.
937 */
938 cmtcfg_req->curr_phase = cmtcfg_req->next_phase;
939 cmtcfg_req->next_phase++;
940 MGMTD_TXN_DBG(
941 "Move back all config batches for Txn %p from next to current branch",
942 txn);
943 FOREACH_MGMTD_BE_CLIENT_ID (id) {
944 curr_list = &cmtcfg_req->curr_batches[id];
945 next_list = &cmtcfg_req->next_batches[id];
946 mgmt_move_txn_cfg_batches(txn, cmtcfg_req, next_list,
947 curr_list, false, 0);
948 }
949
950 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
951
952 return 0;
953 }
954
955 static int
956 mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
957 struct mgmt_be_client_adapter *adapter)
958 {
959 struct mgmt_commit_cfg_req *cmtcfg_req;
960 struct mgmt_txn_batches_head *curr_list, *next_list;
961
962 if (txn->type != MGMTD_TXN_TYPE_CONFIG || !txn->commit_cfg_req)
963 return -1;
964
965 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
966
967 MGMTD_TXN_DBG(
968 "Move Txn-Id %p for '%s' Phase(current: '%s' next:'%s')", txn,
969 adapter->name, mgmt_txn_commit_phase_str(txn, true),
970 mgmt_txn_commit_phase_str(txn, false));
971
972 MGMTD_TXN_DBG(
973 "Move all config batches for '%s' from current to next list",
974 adapter->name);
975 curr_list = &cmtcfg_req->curr_batches[adapter->id];
976 next_list = &cmtcfg_req->next_batches[adapter->id];
977 mgmt_move_txn_cfg_batches(txn, cmtcfg_req, curr_list, next_list, true,
978 cmtcfg_req->next_phase);
979
980 MGMTD_TXN_DBG("Txn-Id %p, Phase(current:'%s' next:'%s')", txn,
981 mgmt_txn_commit_phase_str(txn, true),
982 mgmt_txn_commit_phase_str(txn, false));
983
984 /*
985 * Check if all clients has moved to next phase or not.
986 */
987 mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
988
989 return 0;
990 }
991
992 static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
993 struct nb_config_cbs *changes)
994 {
995 struct nb_config_cb *cb, *nxt;
996 struct nb_config_change *chg;
997 struct mgmt_txn_be_cfg_batch *cfg_btch;
998 struct mgmt_be_client_subscr_info subscr_info;
999 char *xpath = NULL, *value = NULL;
1000 char err_buf[1024];
1001 enum mgmt_be_client_id id;
1002 struct mgmt_be_client_adapter *adapter;
1003 struct mgmt_commit_cfg_req *cmtcfg_req;
1004 bool found_validator;
1005 int num_chgs = 0;
1006 int xpath_len, value_len;
1007
1008 cmtcfg_req = &txn_req->req.commit_cfg;
1009
1010 RB_FOREACH_SAFE (cb, nb_config_cbs, changes, nxt) {
1011 chg = (struct nb_config_change *)cb;
1012
1013 /*
1014 * Could have directly pointed to xpath in nb_node.
1015 * But dont want to mess with it now.
1016 * xpath = chg->cb.nb_node->xpath;
1017 */
1018 xpath = lyd_path(chg->cb.dnode, LYD_PATH_STD, NULL, 0);
1019 if (!xpath) {
1020 (void)mgmt_txn_send_commit_cfg_reply(
1021 txn_req->txn, MGMTD_INTERNAL_ERROR,
1022 "Internal error! Could not get Xpath from Ds node!");
1023 goto mgmt_txn_create_config_batches_failed;
1024 }
1025
1026 value = (char *)lyd_get_value(chg->cb.dnode);
1027 if (!value)
1028 value = (char *)MGMTD_BE_CONTAINER_NODE_VAL;
1029
1030 MGMTD_TXN_DBG("XPATH: %s, Value: '%s'", xpath,
1031 value ? value : "NIL");
1032
1033 if (mgmt_be_get_subscr_info_for_xpath(xpath, &subscr_info)
1034 != 0) {
1035 snprintf(err_buf, sizeof(err_buf),
1036 "No backend module found for XPATH: '%s",
1037 xpath);
1038 (void)mgmt_txn_send_commit_cfg_reply(
1039 txn_req->txn, MGMTD_INTERNAL_ERROR, err_buf);
1040 goto mgmt_txn_create_config_batches_failed;
1041 }
1042
1043 xpath_len = strlen(xpath) + 1;
1044 value_len = strlen(value) + 1;
1045 found_validator = false;
1046 FOREACH_MGMTD_BE_CLIENT_ID (id) {
1047 if (!subscr_info.xpath_subscr[id].validate_config
1048 && !subscr_info.xpath_subscr[id].notify_config)
1049 continue;
1050
1051 adapter = mgmt_be_get_adapter_by_id(id);
1052 if (!adapter)
1053 continue;
1054
1055 cfg_btch = cmtcfg_req->last_be_cfg_batch[id];
1056 if (!cfg_btch
1057 || (cfg_btch->num_cfg_data
1058 == MGMTD_MAX_CFG_CHANGES_IN_BATCH)
1059 || (cfg_btch->buf_space_left
1060 < (xpath_len + value_len))) {
1061 /* Allocate a new config batch */
1062 cfg_btch = mgmt_txn_cfg_batch_alloc(
1063 txn_req->txn, id, adapter);
1064 }
1065
1066 cfg_btch->buf_space_left -= (xpath_len + value_len);
1067 memcpy(&cfg_btch->xp_subscr[cfg_btch->num_cfg_data],
1068 &subscr_info.xpath_subscr[id],
1069 sizeof(cfg_btch->xp_subscr[0]));
1070
1071 mgmt_yang_cfg_data_req_init(
1072 &cfg_btch->cfg_data[cfg_btch->num_cfg_data]);
1073 cfg_btch->cfg_datap[cfg_btch->num_cfg_data] =
1074 &cfg_btch->cfg_data[cfg_btch->num_cfg_data];
1075
1076 if (chg->cb.operation == NB_OP_DESTROY)
1077 cfg_btch->cfg_data[cfg_btch->num_cfg_data]
1078 .req_type =
1079 MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA;
1080 else
1081 cfg_btch->cfg_data[cfg_btch->num_cfg_data]
1082 .req_type =
1083 MGMTD__CFG_DATA_REQ_TYPE__SET_DATA;
1084
1085 mgmt_yang_data_init(
1086 &cfg_btch->data[cfg_btch->num_cfg_data]);
1087 cfg_btch->cfg_data[cfg_btch->num_cfg_data].data =
1088 &cfg_btch->data[cfg_btch->num_cfg_data];
1089 cfg_btch->data[cfg_btch->num_cfg_data].xpath = xpath;
1090 xpath = NULL;
1091
1092 mgmt_yang_data_value_init(
1093 &cfg_btch->value[cfg_btch->num_cfg_data]);
1094 cfg_btch->data[cfg_btch->num_cfg_data].value =
1095 &cfg_btch->value[cfg_btch->num_cfg_data];
1096 cfg_btch->value[cfg_btch->num_cfg_data].value_case =
1097 MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
1098 cfg_btch->value[cfg_btch->num_cfg_data]
1099 .encoded_str_val = value;
1100 value = NULL;
1101
1102 if (subscr_info.xpath_subscr[id].validate_config)
1103 found_validator = true;
1104
1105 cmtcfg_req->subscr_info.xpath_subscr[id].subscribed |=
1106 subscr_info.xpath_subscr[id].subscribed;
1107 MGMTD_TXN_DBG(
1108 " -- %s, {V:%d, N:%d}, Batch: %p, Item:%d",
1109 adapter->name,
1110 subscr_info.xpath_subscr[id].validate_config,
1111 subscr_info.xpath_subscr[id].notify_config,
1112 cfg_btch, (int)cfg_btch->num_cfg_data);
1113
1114 cfg_btch->num_cfg_data++;
1115 num_chgs++;
1116 }
1117
1118 if (!found_validator) {
1119 snprintf(err_buf, sizeof(err_buf),
1120 "No validator module found for XPATH: '%s",
1121 xpath);
1122 MGMTD_TXN_ERR("***** %s", err_buf);
1123 }
1124 }
1125
1126 cmtcfg_req->cmt_stats->last_batch_cnt = num_chgs;
1127 if (!num_chgs) {
1128 (void)mgmt_txn_send_commit_cfg_reply(
1129 txn_req->txn, MGMTD_NO_CFG_CHANGES,
1130 "No changes found to commit!");
1131 goto mgmt_txn_create_config_batches_failed;
1132 }
1133
1134 cmtcfg_req->next_phase = MGMTD_COMMIT_PHASE_TXN_CREATE;
1135 return 0;
1136
1137 mgmt_txn_create_config_batches_failed:
1138
1139 if (xpath)
1140 free(xpath);
1141
1142 return -1;
1143 }
1144
1145 static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
1146 {
1147 struct nb_context nb_ctx;
1148 struct nb_config *nb_config;
1149 struct nb_config_cbs changes;
1150 struct nb_config_cbs *cfg_chgs = NULL;
1151 int ret;
1152 bool del_cfg_chgs = false;
1153
1154 ret = 0;
1155 memset(&nb_ctx, 0, sizeof(nb_ctx));
1156 memset(&changes, 0, sizeof(changes));
1157 if (txn->commit_cfg_req->req.commit_cfg.cfg_chgs) {
1158 cfg_chgs = txn->commit_cfg_req->req.commit_cfg.cfg_chgs;
1159 del_cfg_chgs = true;
1160 goto mgmt_txn_prep_config_validation_done;
1161 }
1162
1163 if (txn->commit_cfg_req->req.commit_cfg.src_ds_id
1164 != MGMTD_DS_CANDIDATE) {
1165 (void)mgmt_txn_send_commit_cfg_reply(
1166 txn, MGMTD_INVALID_PARAM,
1167 "Source DS cannot be any other than CANDIDATE!");
1168 ret = -1;
1169 goto mgmt_txn_prepare_config_done;
1170 }
1171
1172 if (txn->commit_cfg_req->req.commit_cfg.dst_ds_id
1173 != MGMTD_DS_RUNNING) {
1174 (void)mgmt_txn_send_commit_cfg_reply(
1175 txn, MGMTD_INVALID_PARAM,
1176 "Destination DS cannot be any other than RUNNING!");
1177 ret = -1;
1178 goto mgmt_txn_prepare_config_done;
1179 }
1180
1181 if (!txn->commit_cfg_req->req.commit_cfg.src_ds_ctx) {
1182 (void)mgmt_txn_send_commit_cfg_reply(
1183 txn, MGMTD_INVALID_PARAM, "No such source datastore!");
1184 ret = -1;
1185 goto mgmt_txn_prepare_config_done;
1186 }
1187
1188 if (!txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx) {
1189 (void)mgmt_txn_send_commit_cfg_reply(
1190 txn, MGMTD_INVALID_PARAM,
1191 "No such destination datastore!");
1192 ret = -1;
1193 goto mgmt_txn_prepare_config_done;
1194 }
1195
1196 if (txn->commit_cfg_req->req.commit_cfg.abort) {
1197 /*
1198 * This is a commit abort request. Return back success.
1199 * That should trigger a restore of Candidate datastore to
1200 * Running.
1201 */
1202 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
1203 NULL);
1204 goto mgmt_txn_prepare_config_done;
1205 }
1206
1207 nb_config = mgmt_ds_get_nb_config(
1208 txn->commit_cfg_req->req.commit_cfg.src_ds_ctx);
1209 if (!nb_config) {
1210 (void)mgmt_txn_send_commit_cfg_reply(
1211 txn, MGMTD_INTERNAL_ERROR,
1212 "Unable to retrieve Commit DS Config Tree!");
1213 ret = -1;
1214 goto mgmt_txn_prepare_config_done;
1215 }
1216
1217 /*
1218 * Check for diffs from scratch buffer. If found empty
1219 * get the diff from Candidate DS itself.
1220 */
1221 cfg_chgs = &nb_config->cfg_chgs;
1222 if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
1223 /*
1224 * This could be the case when the config is directly
1225 * loaded onto the candidate DS from a file. Get the
1226 * diff from a full comparison of the candidate and
1227 * running DSs.
1228 */
1229 nb_config_diff(
1230 mgmt_ds_get_nb_config(txn->commit_cfg_req->req
1231 .commit_cfg.dst_ds_ctx),
1232 nb_config, &changes);
1233 cfg_chgs = &changes;
1234 del_cfg_chgs = true;
1235 }
1236
1237 if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
1238 /*
1239 * This means there's no changes to commit whatsoever
1240 * is the source of the changes in config.
1241 */
1242 (void)mgmt_txn_send_commit_cfg_reply(
1243 txn, MGMTD_NO_CFG_CHANGES,
1244 "No changes found to be committed!");
1245 ret = -1;
1246 goto mgmt_txn_prepare_config_done;
1247 }
1248
1249 #ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
1250 if (mm->perf_stats_en)
1251 gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
1252 ->validate_start,
1253 NULL);
1254 /*
1255 * Validate YANG contents of the source DS and get the diff
1256 * between source and destination DS contents.
1257 */
1258 char err_buf[1024] = {0};
1259 nb_ctx.client = NB_CLIENT_MGMTD_SERVER;
1260 nb_ctx.user = (void *)txn;
1261 ret = nb_candidate_validate_yang(nb_config, false, err_buf,
1262 sizeof(err_buf) - 1);
1263 if (ret != NB_OK) {
1264 if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
1265 strlcpy(err_buf, "Validation failed", sizeof(err_buf));
1266 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
1267 err_buf);
1268 ret = -1;
1269 goto mgmt_txn_prepare_config_done;
1270 }
1271 /*
1272 * Perform application level validations locally on the MGMTD
1273 * process by calling application specific validation routines
1274 * loaded onto MGMTD process using libraries.
1275 */
1276 ret = nb_candidate_validate_code(&nb_ctx, nb_config, &changes, err_buf,
1277 sizeof(err_buf) - 1);
1278 if (ret != NB_OK) {
1279 if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
1280 strlcpy(err_buf, "Validation failed", sizeof(err_buf));
1281 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
1282 err_buf);
1283 ret = -1;
1284 goto mgmt_txn_prepare_config_done;
1285 }
1286
1287 if (txn->commit_cfg_req->req.commit_cfg.validate_only) {
1288 /*
1289 * This was a validate-only COMMIT request return success.
1290 */
1291 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
1292 NULL);
1293 goto mgmt_txn_prepare_config_done;
1294 }
1295 #endif /* ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED */
1296
1297 mgmt_txn_prep_config_validation_done:
1298
1299 if (mm->perf_stats_en)
1300 gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
1301 ->prep_cfg_start,
1302 NULL);
1303
1304 /*
1305 * Iterate over the diffs and create ordered batches of config
1306 * commands to be validated.
1307 */
1308 ret = mgmt_txn_create_config_batches(txn->commit_cfg_req, cfg_chgs);
1309 if (ret != 0) {
1310 ret = -1;
1311 goto mgmt_txn_prepare_config_done;
1312 }
1313
1314 /* Move to the Transaction Create Phase */
1315 txn->commit_cfg_req->req.commit_cfg.curr_phase =
1316 MGMTD_COMMIT_PHASE_TXN_CREATE;
1317 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
1318
1319 /*
1320 * Start the COMMIT Timeout Timer to abort Txn if things get stuck at
1321 * backend.
1322 */
1323 mgmt_txn_register_event(txn, MGMTD_TXN_COMMITCFG_TIMEOUT);
1324 mgmt_txn_prepare_config_done:
1325
1326 if (cfg_chgs && del_cfg_chgs)
1327 nb_config_diff_del_changes(cfg_chgs);
1328
1329 return ret;
1330 }
1331
1332 static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn)
1333 {
1334 enum mgmt_be_client_id id;
1335 struct mgmt_be_client_adapter *adapter;
1336 struct mgmt_commit_cfg_req *cmtcfg_req;
1337 struct mgmt_txn_be_cfg_batch *cfg_btch;
1338
1339 assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
1340
1341 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1342 FOREACH_MGMTD_BE_CLIENT_ID (id) {
1343 if (cmtcfg_req->subscr_info.xpath_subscr[id].subscribed) {
1344 adapter = mgmt_be_get_adapter_by_id(id);
1345 if (mgmt_be_create_txn(adapter, txn->txn_id)
1346 != 0) {
1347 (void)mgmt_txn_send_commit_cfg_reply(
1348 txn, MGMTD_INTERNAL_ERROR,
1349 "Could not send TXN_CREATE to backend adapter");
1350 return -1;
1351 }
1352
1353 FOREACH_TXN_CFG_BATCH_IN_LIST (
1354 &txn->commit_cfg_req->req.commit_cfg
1355 .curr_batches[id],
1356 cfg_btch)
1357 cfg_btch->comm_phase =
1358 MGMTD_COMMIT_PHASE_TXN_CREATE;
1359 }
1360 }
1361
1362 txn->commit_cfg_req->req.commit_cfg.next_phase =
1363 MGMTD_COMMIT_PHASE_SEND_CFG;
1364
1365 /*
1366 * Dont move the commit to next phase yet. Wait for the TXN_REPLY to
1367 * come back.
1368 */
1369
1370 MGMTD_TXN_DBG(
1371 "Txn:%p Session:0x%llx, Phase(Current:'%s', Next: '%s')", txn,
1372 (unsigned long long)txn->session_id,
1373 mgmt_txn_commit_phase_str(txn, true),
1374 mgmt_txn_commit_phase_str(txn, false));
1375
1376 return 0;
1377 }
1378
1379 static int
1380 mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
1381 struct mgmt_be_client_adapter *adapter)
1382 {
1383 struct mgmt_commit_cfg_req *cmtcfg_req;
1384 struct mgmt_txn_be_cfg_batch *cfg_btch;
1385 struct mgmt_be_cfgreq cfg_req = {0};
1386 size_t num_batches, indx;
1387
1388 assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
1389
1390 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1391 assert(cmtcfg_req->subscr_info.xpath_subscr[adapter->id].subscribed);
1392
1393 indx = 0;
1394 num_batches =
1395 mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id]);
1396 FOREACH_TXN_CFG_BATCH_IN_LIST (&cmtcfg_req->curr_batches[adapter->id],
1397 cfg_btch) {
1398 assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_SEND_CFG);
1399
1400 cfg_req.cfgdata_reqs = cfg_btch->cfg_datap;
1401 cfg_req.num_reqs = cfg_btch->num_cfg_data;
1402 indx++;
1403 if (mgmt_be_send_cfg_data_create_req(
1404 adapter, txn->txn_id, cfg_btch->batch_id, &cfg_req,
1405 indx == num_batches ? true : false)
1406 != 0) {
1407 (void)mgmt_txn_send_commit_cfg_reply(
1408 txn, MGMTD_INTERNAL_ERROR,
1409 "Internal Error! Could not send config data to backend!");
1410 MGMTD_TXN_ERR(
1411 "Could not send CFGDATA_CREATE for Txn %p Batch %p to client '%s",
1412 txn, cfg_btch, adapter->name);
1413 return -1;
1414 }
1415
1416 cmtcfg_req->cmt_stats->last_num_cfgdata_reqs++;
1417 mgmt_move_txn_cfg_batch_to_next(
1418 cmtcfg_req, cfg_btch,
1419 &cmtcfg_req->curr_batches[adapter->id],
1420 &cmtcfg_req->next_batches[adapter->id], true,
1421 MGMTD_COMMIT_PHASE_SEND_CFG);
1422 }
1423
1424 /*
1425 * This could ne the last Backend Client to send CFGDATA_CREATE_REQ to.
1426 * Try moving the commit to next phase.
1427 */
1428 mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
1429
1430 return 0;
1431 }
1432
1433 static int
1434 mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
1435 struct mgmt_be_client_adapter *adapter)
1436 {
1437 struct mgmt_commit_cfg_req *cmtcfg_req;
1438 struct mgmt_txn_be_cfg_batch *cfg_btch;
1439
1440 assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
1441
1442 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1443 if (cmtcfg_req->subscr_info.xpath_subscr[adapter->id].subscribed) {
1444 adapter = mgmt_be_get_adapter_by_id(adapter->id);
1445 (void)mgmt_be_destroy_txn(adapter, txn->txn_id);
1446
1447 FOREACH_TXN_CFG_BATCH_IN_LIST (
1448 &txn->commit_cfg_req->req.commit_cfg
1449 .curr_batches[adapter->id],
1450 cfg_btch)
1451 cfg_btch->comm_phase = MGMTD_COMMIT_PHASE_TXN_DELETE;
1452 }
1453
1454 return 0;
1455 }
1456
1457 static void mgmt_txn_cfg_commit_timedout(struct event *thread)
1458 {
1459 struct mgmt_txn_ctx *txn;
1460
1461 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
1462 assert(txn);
1463
1464 assert(txn->type == MGMTD_TXN_TYPE_CONFIG);
1465
1466 if (!txn->commit_cfg_req)
1467 return;
1468
1469 MGMTD_TXN_ERR(
1470 "Backend operations for Config Txn %p has timedout! Aborting commit!!",
1471 txn);
1472
1473 /*
1474 * Send a COMMIT_CONFIG_REPLY with failure.
1475 * NOTE: The transaction cleanup will be triggered from Front-end
1476 * adapter.
1477 */
1478 mgmt_txn_send_commit_cfg_reply(
1479 txn, MGMTD_INTERNAL_ERROR,
1480 "Operation on the backend timed-out. Aborting commit!");
1481 }
1482
1483 /*
1484 * Send CFG_APPLY_REQs to all the backend client.
1485 *
1486 * NOTE: This is always dispatched when all CFGDATA_CREATE_REQs
1487 * for all backend clients has been generated. Please see
1488 * mgmt_txn_register_event() and mgmt_txn_process_commit_cfg()
1489 * for details.
1490 */
1491 static int mgmt_txn_send_be_cfg_apply(struct mgmt_txn_ctx *txn)
1492 {
1493 enum mgmt_be_client_id id;
1494 struct mgmt_be_client_adapter *adapter;
1495 struct mgmt_commit_cfg_req *cmtcfg_req;
1496 struct mgmt_txn_batches_head *btch_list;
1497 struct mgmt_txn_be_cfg_batch *cfg_btch;
1498
1499 assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
1500
1501 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1502 if (cmtcfg_req->validate_only) {
1503 /*
1504 * If this was a validate-only COMMIT request return success.
1505 */
1506 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
1507 NULL);
1508 return 0;
1509 }
1510
1511 FOREACH_MGMTD_BE_CLIENT_ID (id) {
1512 if (cmtcfg_req->subscr_info.xpath_subscr[id].notify_config) {
1513 adapter = mgmt_be_get_adapter_by_id(id);
1514 if (!adapter)
1515 return -1;
1516
1517 btch_list = &cmtcfg_req->curr_batches[id];
1518 if (mgmt_be_send_cfg_apply_req(adapter, txn->txn_id)
1519 != 0) {
1520 (void)mgmt_txn_send_commit_cfg_reply(
1521 txn, MGMTD_INTERNAL_ERROR,
1522 "Could not send CFG_APPLY_REQ to backend adapter");
1523 return -1;
1524 }
1525 cmtcfg_req->cmt_stats->last_num_apply_reqs++;
1526
1527 UNSET_FLAG(adapter->flags,
1528 MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
1529
1530 FOREACH_TXN_CFG_BATCH_IN_LIST (btch_list, cfg_btch)
1531 cfg_btch->comm_phase =
1532 MGMTD_COMMIT_PHASE_APPLY_CFG;
1533 }
1534 }
1535
1536 txn->commit_cfg_req->req.commit_cfg.next_phase =
1537 MGMTD_COMMIT_PHASE_TXN_DELETE;
1538
1539 /*
1540 * Dont move the commit to next phase yet. Wait for all VALIDATE_REPLIES
1541 * to come back.
1542 */
1543
1544 return 0;
1545 }
1546
1547 static void mgmt_txn_process_commit_cfg(struct event *thread)
1548 {
1549 struct mgmt_txn_ctx *txn;
1550 struct mgmt_commit_cfg_req *cmtcfg_req;
1551
1552 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
1553 assert(txn);
1554
1555 MGMTD_TXN_DBG(
1556 "Processing COMMIT_CONFIG for Txn:%p Session:0x%llx, Phase(Current:'%s', Next: '%s')",
1557 txn, (unsigned long long)txn->session_id,
1558 mgmt_txn_commit_phase_str(txn, true),
1559 mgmt_txn_commit_phase_str(txn, false));
1560
1561 assert(txn->commit_cfg_req);
1562 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1563 switch (cmtcfg_req->curr_phase) {
1564 case MGMTD_COMMIT_PHASE_PREPARE_CFG:
1565 mgmt_txn_prepare_config(txn);
1566 break;
1567 case MGMTD_COMMIT_PHASE_TXN_CREATE:
1568 if (mm->perf_stats_en)
1569 gettimeofday(&cmtcfg_req->cmt_stats->txn_create_start,
1570 NULL);
1571 /*
1572 * Send TXN_CREATE_REQ to all Backend now.
1573 */
1574 mgmt_txn_send_be_txn_create(txn);
1575 break;
1576 case MGMTD_COMMIT_PHASE_SEND_CFG:
1577 if (mm->perf_stats_en)
1578 gettimeofday(&cmtcfg_req->cmt_stats->send_cfg_start,
1579 NULL);
1580 /*
1581 * All CFGDATA_CREATE_REQ should have been sent to
1582 * Backend by now.
1583 */
1584 #ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED
1585 assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
1586 MGMTD_TXN_DBG(
1587 "Txn:%p Session:0x%llx, trigger sending CFG_VALIDATE_REQ to all backend clients",
1588 txn, (unsigned long long)txn->session_id);
1589 #else /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
1590 assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
1591 MGMTD_TXN_DBG(
1592 "Txn:%p Session:0x%llx, trigger sending CFG_APPLY_REQ to all backend clients",
1593 txn, (unsigned long long)txn->session_id);
1594 #endif /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
1595 break;
1596 case MGMTD_COMMIT_PHASE_APPLY_CFG:
1597 if (mm->perf_stats_en)
1598 gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_start,
1599 NULL);
1600 /*
1601 * We should have received successful CFG_VALIDATE_REPLY from
1602 * all concerned Backend Clients by now. Send out the
1603 * CFG_APPLY_REQs now.
1604 */
1605 mgmt_txn_send_be_cfg_apply(txn);
1606 break;
1607 case MGMTD_COMMIT_PHASE_TXN_DELETE:
1608 if (mm->perf_stats_en)
1609 gettimeofday(&cmtcfg_req->cmt_stats->txn_del_start,
1610 NULL);
1611 /*
1612 * We would have sent TXN_DELETE_REQ to all backend by now.
1613 * Send a successful CONFIG_COMMIT_REPLY back to front-end.
1614 * NOTE: This should also trigger DS merge/unlock and Txn
1615 * cleanup. Please see mgmt_fe_send_commit_cfg_reply() for
1616 * more details.
1617 */
1618 EVENT_OFF(txn->comm_cfg_timeout);
1619 mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
1620 break;
1621 case MGMTD_COMMIT_PHASE_MAX:
1622 break;
1623 }
1624
1625 MGMTD_TXN_DBG(
1626 "Txn:%p Session:0x%llx, Phase updated to (Current:'%s', Next: '%s')",
1627 txn, (unsigned long long)txn->session_id,
1628 mgmt_txn_commit_phase_str(txn, true),
1629 mgmt_txn_commit_phase_str(txn, false));
1630 }
1631
1632 static void mgmt_init_get_data_reply(struct mgmt_get_data_reply *get_reply)
1633 {
1634 size_t indx;
1635
1636 for (indx = 0; indx < array_size(get_reply->reply_data); indx++)
1637 get_reply->reply_datap[indx] = &get_reply->reply_data[indx];
1638 }
1639
1640 static void mgmt_reset_get_data_reply(struct mgmt_get_data_reply *get_reply)
1641 {
1642 int indx;
1643
1644 for (indx = 0; indx < get_reply->num_reply; indx++) {
1645 if (get_reply->reply_xpathp[indx]) {
1646 free(get_reply->reply_xpathp[indx]);
1647 get_reply->reply_xpathp[indx] = 0;
1648 }
1649 if (get_reply->reply_data[indx].xpath) {
1650 zlog_debug("%s free xpath %p", __func__,
1651 get_reply->reply_data[indx].xpath);
1652 free(get_reply->reply_data[indx].xpath);
1653 get_reply->reply_data[indx].xpath = 0;
1654 }
1655 }
1656
1657 get_reply->num_reply = 0;
1658 memset(&get_reply->data_reply, 0, sizeof(get_reply->data_reply));
1659 memset(&get_reply->reply_data, 0, sizeof(get_reply->reply_data));
1660 memset(&get_reply->reply_datap, 0, sizeof(get_reply->reply_datap));
1661
1662 memset(&get_reply->reply_value, 0, sizeof(get_reply->reply_value));
1663
1664 mgmt_init_get_data_reply(get_reply);
1665 }
1666
1667 static void mgmt_reset_get_data_reply_buf(struct mgmt_get_data_req *get_data)
1668 {
1669 if (get_data->reply)
1670 mgmt_reset_get_data_reply(get_data->reply);
1671 }
1672
1673 static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
1674 struct mgmt_get_data_req *get_req)
1675 {
1676 struct mgmt_get_data_reply *get_reply;
1677 Mgmtd__YangDataReply *data_reply;
1678
1679 get_reply = get_req->reply;
1680 if (!get_reply)
1681 return;
1682
1683 data_reply = &get_reply->data_reply;
1684 mgmt_yang_data_reply_init(data_reply);
1685 data_reply->n_data = get_reply->num_reply;
1686 data_reply->data = get_reply->reply_datap;
1687 data_reply->next_indx =
1688 (!get_reply->last_batch ? get_req->total_reply : -1);
1689
1690 MGMTD_TXN_DBG("Sending %d Get-Config/Data replies (next-idx:%lld)",
1691 (int) data_reply->n_data,
1692 (long long)data_reply->next_indx);
1693
1694 switch (txn_req->req_event) {
1695 case MGMTD_TXN_PROC_GETCFG:
1696 if (mgmt_fe_send_get_cfg_reply(
1697 txn_req->txn->session_id, txn_req->txn->txn_id,
1698 get_req->ds_id, txn_req->req_id, MGMTD_SUCCESS,
1699 data_reply, NULL)
1700 != 0) {
1701 MGMTD_TXN_ERR(
1702 "Failed to send GET-CONFIG-REPLY for Txn %p, Sessn: 0x%llx, Req: %llu",
1703 txn_req->txn,
1704 (unsigned long long)txn_req->txn->session_id,
1705 (unsigned long long)txn_req->req_id);
1706 }
1707 break;
1708 case MGMTD_TXN_PROC_GETDATA:
1709 if (mgmt_fe_send_get_data_reply(
1710 txn_req->txn->session_id, txn_req->txn->txn_id,
1711 get_req->ds_id, txn_req->req_id, MGMTD_SUCCESS,
1712 data_reply, NULL)
1713 != 0) {
1714 MGMTD_TXN_ERR(
1715 "Failed to send GET-DATA-REPLY for Txn %p, Sessn: 0x%llx, Req: %llu",
1716 txn_req->txn,
1717 (unsigned long long)txn_req->txn->session_id,
1718 (unsigned long long)txn_req->req_id);
1719 }
1720 break;
1721 case MGMTD_TXN_PROC_SETCFG:
1722 case MGMTD_TXN_PROC_COMMITCFG:
1723 case MGMTD_TXN_COMMITCFG_TIMEOUT:
1724 case MGMTD_TXN_CLEANUP:
1725 MGMTD_TXN_ERR("Invalid Txn-Req-Event %u",
1726 txn_req->req_event);
1727 break;
1728 }
1729
1730 /*
1731 * Reset reply buffer for next reply.
1732 */
1733 mgmt_reset_get_data_reply_buf(get_req);
1734 }
1735
1736 static void mgmt_txn_iter_and_send_get_cfg_reply(struct mgmt_ds_ctx *ds_ctx,
1737 char *xpath,
1738 struct lyd_node *node,
1739 struct nb_node *nb_node,
1740 void *ctx)
1741 {
1742 struct mgmt_txn_req *txn_req;
1743 struct mgmt_get_data_req *get_req;
1744 struct mgmt_get_data_reply *get_reply;
1745 Mgmtd__YangData *data;
1746 Mgmtd__YangDataValue *data_value;
1747
1748 txn_req = (struct mgmt_txn_req *)ctx;
1749 if (!txn_req)
1750 goto mgmtd_ignore_get_cfg_reply_data;
1751
1752 if (!(node->schema->nodetype & LYD_NODE_TERM))
1753 goto mgmtd_ignore_get_cfg_reply_data;
1754
1755 assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG
1756 || txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
1757
1758 get_req = txn_req->req.get_data;
1759 assert(get_req);
1760 get_reply = get_req->reply;
1761 data = &get_reply->reply_data[get_reply->num_reply];
1762 data_value = &get_reply->reply_value[get_reply->num_reply];
1763
1764 mgmt_yang_data_init(data);
1765 data->xpath = xpath;
1766 mgmt_yang_data_value_init(data_value);
1767 data_value->value_case = MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
1768 data_value->encoded_str_val = (char *)lyd_get_value(node);
1769 data->value = data_value;
1770
1771 get_reply->num_reply++;
1772 get_req->total_reply++;
1773 MGMTD_TXN_DBG(" [%d] XPATH: '%s', Value: '%s'", get_req->total_reply,
1774 data->xpath, data_value->encoded_str_val);
1775
1776 if (get_reply->num_reply == MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH)
1777 mgmt_txn_send_getcfg_reply_data(txn_req, get_req);
1778
1779 return;
1780
1781 mgmtd_ignore_get_cfg_reply_data:
1782 if (xpath)
1783 free(xpath);
1784 }
1785
1786 static int mgmt_txn_get_config(struct mgmt_txn_ctx *txn,
1787 struct mgmt_txn_req *txn_req,
1788 struct mgmt_ds_ctx *ds_ctx)
1789 {
1790 struct mgmt_txn_reqs_head *req_list = NULL;
1791 struct mgmt_txn_reqs_head *pending_list = NULL;
1792 int indx;
1793 struct mgmt_get_data_req *get_data;
1794 struct mgmt_get_data_reply *get_reply;
1795
1796 switch (txn_req->req_event) {
1797 case MGMTD_TXN_PROC_GETCFG:
1798 req_list = &txn->get_cfg_reqs;
1799 break;
1800 case MGMTD_TXN_PROC_GETDATA:
1801 req_list = &txn->get_data_reqs;
1802 break;
1803 case MGMTD_TXN_PROC_SETCFG:
1804 case MGMTD_TXN_PROC_COMMITCFG:
1805 case MGMTD_TXN_COMMITCFG_TIMEOUT:
1806 case MGMTD_TXN_CLEANUP:
1807 assert(!"Wrong txn request type!");
1808 break;
1809 }
1810
1811 get_data = txn_req->req.get_data;
1812
1813 if (!get_data->reply) {
1814 get_data->reply = XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REPLY,
1815 sizeof(struct mgmt_get_data_reply));
1816 if (!get_data->reply) {
1817 mgmt_fe_send_get_cfg_reply(
1818 txn->session_id, txn->txn_id,
1819 get_data->ds_id, txn_req->req_id,
1820 MGMTD_INTERNAL_ERROR, NULL,
1821 "Internal error: Unable to allocate reply buffers!");
1822 goto mgmt_txn_get_config_failed;
1823 }
1824 }
1825
1826 /*
1827 * Read data contents from the DS and respond back directly.
1828 * No need to go to backend for getting data.
1829 */
1830 get_reply = get_data->reply;
1831 for (indx = 0; indx < get_data->num_xpaths; indx++) {
1832 MGMTD_TXN_DBG("Trying to get all data under '%s'",
1833 get_data->xpaths[indx]);
1834 mgmt_init_get_data_reply(get_reply);
1835 if (mgmt_ds_iter_data(get_data->ds_ctx, get_data->xpaths[indx],
1836 mgmt_txn_iter_and_send_get_cfg_reply,
1837 (void *)txn_req, true)
1838 == -1) {
1839 MGMTD_TXN_DBG("Invalid Xpath '%s",
1840 get_data->xpaths[indx]);
1841 mgmt_fe_send_get_cfg_reply(
1842 txn->session_id, txn->txn_id,
1843 get_data->ds_id, txn_req->req_id,
1844 MGMTD_INTERNAL_ERROR, NULL, "Invalid xpath");
1845 goto mgmt_txn_get_config_failed;
1846 }
1847 MGMTD_TXN_DBG("Got %d remaining data-replies for xpath '%s'",
1848 get_reply->num_reply, get_data->xpaths[indx]);
1849 get_reply->last_batch = true;
1850 mgmt_txn_send_getcfg_reply_data(txn_req, get_data);
1851 }
1852
1853 mgmt_txn_get_config_failed:
1854
1855 if (pending_list) {
1856 /*
1857 * Move the transaction to corresponding pending list.
1858 */
1859 if (req_list)
1860 mgmt_txn_reqs_del(req_list, txn_req);
1861 txn_req->pending_be_proc = true;
1862 mgmt_txn_reqs_add_tail(pending_list, txn_req);
1863 MGMTD_TXN_DBG(
1864 "Moved Req: %p for Txn: %p from Req-List to Pending-List",
1865 txn_req, txn_req->txn);
1866 } else {
1867 /*
1868 * Delete the txn request. It will also remove it from request
1869 * list.
1870 */
1871 mgmt_txn_req_free(&txn_req);
1872 }
1873
1874 return 0;
1875 }
1876
1877 static void mgmt_txn_process_get_cfg(struct event *thread)
1878 {
1879 struct mgmt_txn_ctx *txn;
1880 struct mgmt_txn_req *txn_req;
1881 struct mgmt_ds_ctx *ds_ctx;
1882 int num_processed = 0;
1883 bool error;
1884
1885 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
1886 assert(txn);
1887
1888 MGMTD_TXN_DBG(
1889 "Processing %d GET_CONFIG requests for Txn:%p Session:0x%llx",
1890 (int)mgmt_txn_reqs_count(&txn->get_cfg_reqs), txn,
1891 (unsigned long long)txn->session_id);
1892
1893 FOREACH_TXN_REQ_IN_LIST (&txn->get_cfg_reqs, txn_req) {
1894 error = false;
1895 assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG);
1896 ds_ctx = txn_req->req.get_data->ds_ctx;
1897 if (!ds_ctx) {
1898 mgmt_fe_send_get_cfg_reply(
1899 txn->session_id, txn->txn_id,
1900 txn_req->req.get_data->ds_id, txn_req->req_id,
1901 MGMTD_INTERNAL_ERROR, NULL,
1902 "No such datastore!");
1903 error = true;
1904 goto mgmt_txn_process_get_cfg_done;
1905 }
1906
1907 if (mgmt_txn_get_config(txn, txn_req, ds_ctx) != 0) {
1908 MGMTD_TXN_ERR(
1909 "Unable to retrieve Config from DS %d for Txn %p, Sessn: 0x%llx, Req: %llu!",
1910 txn_req->req.get_data->ds_id, txn,
1911 (unsigned long long)txn->session_id,
1912 (unsigned long long)txn_req->req_id);
1913 error = true;
1914 }
1915
1916 mgmt_txn_process_get_cfg_done:
1917
1918 if (error) {
1919 /*
1920 * Delete the txn request.
1921 * Note: The following will remove it from the list
1922 * as well.
1923 */
1924 mgmt_txn_req_free(&txn_req);
1925 }
1926
1927 /*
1928 * Else the transaction would have been already deleted or
1929 * moved to corresponding pending list. No need to delete it.
1930 */
1931 num_processed++;
1932 if (num_processed == MGMTD_TXN_MAX_NUM_GETCFG_PROC)
1933 break;
1934 }
1935
1936 if (mgmt_txn_reqs_count(&txn->get_cfg_reqs)) {
1937 MGMTD_TXN_DBG(
1938 "Processed maximum number of Get-Config requests (%d/%d). Rescheduling for rest.",
1939 num_processed, MGMTD_TXN_MAX_NUM_GETCFG_PROC);
1940 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
1941 }
1942 }
1943
1944 static void mgmt_txn_process_get_data(struct event *thread)
1945 {
1946 struct mgmt_txn_ctx *txn;
1947 struct mgmt_txn_req *txn_req;
1948 struct mgmt_ds_ctx *ds_ctx;
1949 int num_processed = 0;
1950 bool error;
1951
1952 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
1953 assert(txn);
1954
1955 MGMTD_TXN_DBG(
1956 "Processing %d GET_DATA requests for Txn:%p Session:0x%llx",
1957 (int)mgmt_txn_reqs_count(&txn->get_data_reqs), txn,
1958 (unsigned long long)txn->session_id);
1959
1960 FOREACH_TXN_REQ_IN_LIST (&txn->get_data_reqs, txn_req) {
1961 error = false;
1962 assert(txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
1963 ds_ctx = txn_req->req.get_data->ds_ctx;
1964 if (!ds_ctx) {
1965 mgmt_fe_send_get_data_reply(
1966 txn->session_id, txn->txn_id,
1967 txn_req->req.get_data->ds_id, txn_req->req_id,
1968 MGMTD_INTERNAL_ERROR, NULL,
1969 "No such datastore!");
1970 error = true;
1971 goto mgmt_txn_process_get_data_done;
1972 }
1973
1974 if (mgmt_ds_is_config(ds_ctx)) {
1975 if (mgmt_txn_get_config(txn, txn_req, ds_ctx)
1976 != 0) {
1977 MGMTD_TXN_ERR(
1978 "Unable to retrieve Config from DS %d for Txn %p, Sessn: 0x%llx, Req: %llu!",
1979 txn_req->req.get_data->ds_id, txn,
1980 (unsigned long long)txn->session_id,
1981 (unsigned long long)txn_req->req_id);
1982 error = true;
1983 }
1984 } else {
1985 /*
1986 * TODO: Trigger GET procedures for Backend
1987 * For now return back error.
1988 */
1989 mgmt_fe_send_get_data_reply(
1990 txn->session_id, txn->txn_id,
1991 txn_req->req.get_data->ds_id, txn_req->req_id,
1992 MGMTD_INTERNAL_ERROR, NULL,
1993 "GET-DATA on Oper DS is not supported yet!");
1994 error = true;
1995 }
1996
1997 mgmt_txn_process_get_data_done:
1998
1999 if (error) {
2000 /*
2001 * Delete the txn request.
2002 * Note: The following will remove it from the list
2003 * as well.
2004 */
2005 mgmt_txn_req_free(&txn_req);
2006 }
2007
2008 /*
2009 * Else the transaction would have been already deleted or
2010 * moved to corresponding pending list. No need to delete it.
2011 */
2012 num_processed++;
2013 if (num_processed == MGMTD_TXN_MAX_NUM_GETDATA_PROC)
2014 break;
2015 }
2016
2017 if (mgmt_txn_reqs_count(&txn->get_data_reqs)) {
2018 MGMTD_TXN_DBG(
2019 "Processed maximum number of Get-Data requests (%d/%d). Rescheduling for rest.",
2020 num_processed, MGMTD_TXN_MAX_NUM_GETDATA_PROC);
2021 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETDATA);
2022 }
2023 }
2024
2025 static struct mgmt_txn_ctx *
2026 mgmt_fe_find_txn_by_session_id(struct mgmt_master *cm, uint64_t session_id,
2027 enum mgmt_txn_type type)
2028 {
2029 struct mgmt_txn_ctx *txn;
2030
2031 FOREACH_TXN_IN_LIST (cm, txn) {
2032 if (txn->session_id == session_id && txn->type == type)
2033 return txn;
2034 }
2035
2036 return NULL;
2037 }
2038
2039 static struct mgmt_txn_ctx *mgmt_txn_create_new(uint64_t session_id,
2040 enum mgmt_txn_type type)
2041 {
2042 struct mgmt_txn_ctx *txn = NULL;
2043
2044 /*
2045 * For 'CONFIG' transaction check if one is already created
2046 * or not.
2047 */
2048 if (type == MGMTD_TXN_TYPE_CONFIG && mgmt_txn_mm->cfg_txn) {
2049 if (mgmt_config_txn_in_progress() == session_id)
2050 txn = mgmt_txn_mm->cfg_txn;
2051 goto mgmt_create_txn_done;
2052 }
2053
2054 txn = mgmt_fe_find_txn_by_session_id(mgmt_txn_mm, session_id,
2055 type);
2056 if (!txn) {
2057 txn = XCALLOC(MTYPE_MGMTD_TXN, sizeof(struct mgmt_txn_ctx));
2058 assert(txn);
2059
2060 txn->session_id = session_id;
2061 txn->type = type;
2062 mgmt_txn_badapters_init(&txn->be_adapters);
2063 mgmt_txns_add_tail(&mgmt_txn_mm->txn_list, txn);
2064 mgmt_txn_reqs_init(&txn->set_cfg_reqs);
2065 mgmt_txn_reqs_init(&txn->get_cfg_reqs);
2066 mgmt_txn_reqs_init(&txn->get_data_reqs);
2067 mgmt_txn_reqs_init(&txn->pending_get_datas);
2068 txn->commit_cfg_req = NULL;
2069 txn->refcount = 0;
2070 if (!mgmt_txn_mm->next_txn_id)
2071 mgmt_txn_mm->next_txn_id++;
2072 txn->txn_id = mgmt_txn_mm->next_txn_id++;
2073 hash_get(mgmt_txn_mm->txn_hash, txn, hash_alloc_intern);
2074
2075 MGMTD_TXN_DBG("Added new '%s' MGMTD Transaction '%p'",
2076 mgmt_txn_type2str(type), txn);
2077
2078 if (type == MGMTD_TXN_TYPE_CONFIG)
2079 mgmt_txn_mm->cfg_txn = txn;
2080
2081 MGMTD_TXN_LOCK(txn);
2082 }
2083
2084 mgmt_create_txn_done:
2085 return txn;
2086 }
2087
2088 static void mgmt_txn_delete(struct mgmt_txn_ctx **txn)
2089 {
2090 MGMTD_TXN_UNLOCK(txn);
2091 }
2092
2093 static unsigned int mgmt_txn_hash_key(const void *data)
2094 {
2095 const struct mgmt_txn_ctx *txn = data;
2096
2097 return jhash2((uint32_t *) &txn->txn_id,
2098 sizeof(txn->txn_id) / sizeof(uint32_t), 0);
2099 }
2100
2101 static bool mgmt_txn_hash_cmp(const void *d1, const void *d2)
2102 {
2103 const struct mgmt_txn_ctx *txn1 = d1;
2104 const struct mgmt_txn_ctx *txn2 = d2;
2105
2106 return (txn1->txn_id == txn2->txn_id);
2107 }
2108
2109 static void mgmt_txn_hash_free(void *data)
2110 {
2111 struct mgmt_txn_ctx *txn = data;
2112
2113 mgmt_txn_delete(&txn);
2114 }
2115
2116 static void mgmt_txn_hash_init(void)
2117 {
2118 if (!mgmt_txn_mm || mgmt_txn_mm->txn_hash)
2119 return;
2120
2121 mgmt_txn_mm->txn_hash = hash_create(mgmt_txn_hash_key,
2122 mgmt_txn_hash_cmp,
2123 "MGMT Transactions");
2124 }
2125
2126 static void mgmt_txn_hash_destroy(void)
2127 {
2128 if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
2129 return;
2130
2131 hash_clean(mgmt_txn_mm->txn_hash,
2132 mgmt_txn_hash_free);
2133 hash_free(mgmt_txn_mm->txn_hash);
2134 mgmt_txn_mm->txn_hash = NULL;
2135 }
2136
2137 static inline struct mgmt_txn_ctx *
2138 mgmt_txn_id2ctx(uint64_t txn_id)
2139 {
2140 struct mgmt_txn_ctx key = {0};
2141 struct mgmt_txn_ctx *txn;
2142
2143 if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
2144 return NULL;
2145
2146 key.txn_id = txn_id;
2147 txn = hash_lookup(mgmt_txn_mm->txn_hash, &key);
2148
2149 return txn;
2150 }
2151
2152 static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file,
2153 int line)
2154 {
2155 txn->refcount++;
2156 MGMTD_TXN_DBG("%s:%d --> Lock %s Txn %p, Count: %d", file, line,
2157 mgmt_txn_type2str(txn->type), txn, txn->refcount);
2158 }
2159
2160 static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
2161 int line)
2162 {
2163 assert(*txn && (*txn)->refcount);
2164
2165 (*txn)->refcount--;
2166 MGMTD_TXN_DBG("%s:%d --> Unlock %s Txn %p, Count: %d", file, line,
2167 mgmt_txn_type2str((*txn)->type), *txn,
2168 (*txn)->refcount);
2169 if (!(*txn)->refcount) {
2170 if ((*txn)->type == MGMTD_TXN_TYPE_CONFIG)
2171 if (mgmt_txn_mm->cfg_txn == *txn)
2172 mgmt_txn_mm->cfg_txn = NULL;
2173 EVENT_OFF((*txn)->proc_get_cfg);
2174 EVENT_OFF((*txn)->proc_get_data);
2175 EVENT_OFF((*txn)->proc_comm_cfg);
2176 EVENT_OFF((*txn)->comm_cfg_timeout);
2177 hash_release(mgmt_txn_mm->txn_hash, *txn);
2178 mgmt_txns_del(&mgmt_txn_mm->txn_list, *txn);
2179
2180 MGMTD_TXN_DBG("Deleted %s Txn %p for Sessn: 0x%llx",
2181 mgmt_txn_type2str((*txn)->type), *txn,
2182 (unsigned long long)(*txn)->session_id);
2183
2184 XFREE(MTYPE_MGMTD_TXN, *txn);
2185 }
2186
2187 *txn = NULL;
2188 }
2189
2190 static void mgmt_txn_cleanup_txn(struct mgmt_txn_ctx **txn)
2191 {
2192 /* TODO: Any other cleanup applicable */
2193
2194 mgmt_txn_delete(txn);
2195 }
2196
2197 static void
2198 mgmt_txn_cleanup_all_txns(void)
2199 {
2200 struct mgmt_txn_ctx *txn;
2201
2202 if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
2203 return;
2204
2205 FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn)
2206 mgmt_txn_cleanup_txn(&txn);
2207 }
2208
2209 static void mgmt_txn_cleanup(struct event *thread)
2210 {
2211 struct mgmt_txn_ctx *txn;
2212
2213 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
2214 assert(txn);
2215
2216 mgmt_txn_cleanup_txn(&txn);
2217 }
2218
2219 static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
2220 enum mgmt_txn_event event)
2221 {
2222 struct timeval tv = {.tv_sec = 0,
2223 .tv_usec = MGMTD_TXN_PROC_DELAY_USEC};
2224
2225 assert(mgmt_txn_mm && mgmt_txn_tm);
2226
2227 switch (event) {
2228 case MGMTD_TXN_PROC_SETCFG:
2229 event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_set_cfg,
2230 txn, &tv, &txn->proc_set_cfg);
2231 break;
2232 case MGMTD_TXN_PROC_COMMITCFG:
2233 event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_commit_cfg,
2234 txn, &tv, &txn->proc_comm_cfg);
2235 break;
2236 case MGMTD_TXN_PROC_GETCFG:
2237 event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_cfg,
2238 txn, &tv, &txn->proc_get_cfg);
2239 break;
2240 case MGMTD_TXN_PROC_GETDATA:
2241 event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_data,
2242 txn, &tv, &txn->proc_get_data);
2243 break;
2244 case MGMTD_TXN_COMMITCFG_TIMEOUT:
2245 event_add_timer_msec(mgmt_txn_tm,
2246 mgmt_txn_cfg_commit_timedout, txn,
2247 MGMTD_TXN_CFG_COMMIT_MAX_DELAY_MSEC,
2248 &txn->comm_cfg_timeout);
2249 break;
2250 case MGMTD_TXN_CLEANUP:
2251 tv.tv_usec = MGMTD_TXN_CLEANUP_DELAY_USEC;
2252 event_add_timer_tv(mgmt_txn_tm, mgmt_txn_cleanup, txn, &tv,
2253 &txn->clnup);
2254 }
2255 }
2256
2257 int mgmt_txn_init(struct mgmt_master *mm, struct event_loop *tm)
2258 {
2259 if (mgmt_txn_mm || mgmt_txn_tm)
2260 assert(!"MGMTD TXN: Call txn_init() only once");
2261
2262 mgmt_txn_mm = mm;
2263 mgmt_txn_tm = tm;
2264 mgmt_txns_init(&mm->txn_list);
2265 mgmt_txn_hash_init();
2266 assert(!mm->cfg_txn);
2267 mm->cfg_txn = NULL;
2268
2269 return 0;
2270 }
2271
2272 void mgmt_txn_destroy(void)
2273 {
2274 mgmt_txn_cleanup_all_txns();
2275 mgmt_txn_hash_destroy();
2276 }
2277
2278 uint64_t mgmt_config_txn_in_progress(void)
2279 {
2280 if (mgmt_txn_mm && mgmt_txn_mm->cfg_txn)
2281 return mgmt_txn_mm->cfg_txn->session_id;
2282
2283 return MGMTD_SESSION_ID_NONE;
2284 }
2285
2286 uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type)
2287 {
2288 struct mgmt_txn_ctx *txn;
2289
2290 txn = mgmt_txn_create_new(session_id, type);
2291 return txn ? txn->txn_id : MGMTD_TXN_ID_NONE;
2292 }
2293
2294 bool mgmt_txn_id_is_valid(uint64_t txn_id)
2295 {
2296 return mgmt_txn_id2ctx(txn_id) ? true : false;
2297 }
2298
2299 void mgmt_destroy_txn(uint64_t *txn_id)
2300 {
2301 struct mgmt_txn_ctx *txn;
2302
2303 txn = mgmt_txn_id2ctx(*txn_id);
2304 if (!txn)
2305 return;
2306
2307 mgmt_txn_delete(&txn);
2308 *txn_id = MGMTD_TXN_ID_NONE;
2309 }
2310
2311 enum mgmt_txn_type mgmt_get_txn_type(uint64_t txn_id)
2312 {
2313 struct mgmt_txn_ctx *txn;
2314
2315 txn = mgmt_txn_id2ctx(txn_id);
2316 if (!txn)
2317 return MGMTD_TXN_TYPE_NONE;
2318
2319 return txn->type;
2320 }
2321
2322 int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
2323 Mgmtd__DatastoreId ds_id,
2324 struct mgmt_ds_ctx *ds_ctx,
2325 Mgmtd__YangCfgDataReq **cfg_req,
2326 size_t num_req, bool implicit_commit,
2327 Mgmtd__DatastoreId dst_ds_id,
2328 struct mgmt_ds_ctx *dst_ds_ctx)
2329 {
2330 struct mgmt_txn_ctx *txn;
2331 struct mgmt_txn_req *txn_req;
2332 size_t indx;
2333 uint16_t *num_chgs;
2334 struct nb_cfg_change *cfg_chg;
2335
2336 txn = mgmt_txn_id2ctx(txn_id);
2337 if (!txn)
2338 return -1;
2339
2340 if (implicit_commit && mgmt_txn_reqs_count(&txn->set_cfg_reqs)) {
2341 MGMTD_TXN_ERR(
2342 "For implicit commit config only one SETCFG-REQ can be allowed!");
2343 return -1;
2344 }
2345
2346 txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_SETCFG);
2347 txn_req->req.set_cfg->ds_id = ds_id;
2348 txn_req->req.set_cfg->ds_ctx = ds_ctx;
2349 num_chgs = &txn_req->req.set_cfg->num_cfg_changes;
2350 for (indx = 0; indx < num_req; indx++) {
2351 cfg_chg = &txn_req->req.set_cfg->cfg_changes[*num_chgs];
2352
2353 if (cfg_req[indx]->req_type
2354 == MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA)
2355 cfg_chg->operation = NB_OP_DESTROY;
2356 else if (cfg_req[indx]->req_type
2357 == MGMTD__CFG_DATA_REQ_TYPE__SET_DATA)
2358 cfg_chg->operation =
2359 mgmt_ds_find_data_node_by_xpath(
2360 ds_ctx, cfg_req[indx]->data->xpath)
2361 ? NB_OP_MODIFY
2362 : NB_OP_CREATE;
2363 else
2364 continue;
2365
2366 MGMTD_TXN_DBG(
2367 "XPath: '%s', Value: '%s'", cfg_req[indx]->data->xpath,
2368 (cfg_req[indx]->data->value
2369 && cfg_req[indx]
2370 ->data->value
2371 ->encoded_str_val
2372 ? cfg_req[indx]->data->value->encoded_str_val
2373 : "NULL"));
2374 strlcpy(cfg_chg->xpath, cfg_req[indx]->data->xpath,
2375 sizeof(cfg_chg->xpath));
2376 cfg_chg->value = (cfg_req[indx]->data->value
2377 && cfg_req[indx]
2378 ->data->value
2379 ->encoded_str_val
2380 ? strdup(cfg_req[indx]
2381 ->data->value
2382 ->encoded_str_val)
2383 : NULL);
2384 if (cfg_chg->value)
2385 MGMTD_TXN_DBG("Allocated value at %p ==> '%s'",
2386 cfg_chg->value, cfg_chg->value);
2387
2388 (*num_chgs)++;
2389 }
2390 txn_req->req.set_cfg->implicit_commit = implicit_commit;
2391 txn_req->req.set_cfg->dst_ds_id = dst_ds_id;
2392 txn_req->req.set_cfg->dst_ds_ctx = dst_ds_ctx;
2393 txn_req->req.set_cfg->setcfg_stats =
2394 mgmt_fe_get_session_setcfg_stats(txn->session_id);
2395 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
2396
2397 return 0;
2398 }
2399
2400 int mgmt_txn_send_commit_config_req(uint64_t txn_id, uint64_t req_id,
2401 Mgmtd__DatastoreId src_ds_id,
2402 struct mgmt_ds_ctx *src_ds_ctx,
2403 Mgmtd__DatastoreId dst_ds_id,
2404 struct mgmt_ds_ctx *dst_ds_ctx,
2405 bool validate_only, bool abort,
2406 bool implicit)
2407 {
2408 struct mgmt_txn_ctx *txn;
2409 struct mgmt_txn_req *txn_req;
2410
2411 txn = mgmt_txn_id2ctx(txn_id);
2412 if (!txn)
2413 return -1;
2414
2415 if (txn->commit_cfg_req) {
2416 MGMTD_TXN_ERR(
2417 "A commit is already in-progress for Txn %p, session 0x%llx. Cannot start another!",
2418 txn, (unsigned long long)txn->session_id);
2419 return -1;
2420 }
2421
2422 txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_COMMITCFG);
2423 txn_req->req.commit_cfg.src_ds_id = src_ds_id;
2424 txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
2425 txn_req->req.commit_cfg.dst_ds_id = dst_ds_id;
2426 txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
2427 txn_req->req.commit_cfg.validate_only = validate_only;
2428 txn_req->req.commit_cfg.abort = abort;
2429 txn_req->req.commit_cfg.implicit = implicit;
2430 txn_req->req.commit_cfg.cmt_stats =
2431 mgmt_fe_get_session_commit_stats(txn->session_id);
2432
2433 /*
2434 * Trigger a COMMIT-CONFIG process.
2435 */
2436 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
2437 return 0;
2438 }
2439
2440 int mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
2441 bool connect)
2442 {
2443 struct mgmt_txn_ctx *txn;
2444 struct mgmt_txn_req *txn_req;
2445 struct mgmt_commit_cfg_req *cmtcfg_req;
2446 static struct mgmt_commit_stats dummy_stats;
2447 struct nb_config_cbs *adapter_cfgs = NULL;
2448
2449 memset(&dummy_stats, 0, sizeof(dummy_stats));
2450 if (connect) {
2451 /* Get config for this single backend client */
2452 mgmt_be_get_adapter_config(adapter, mm->running_ds,
2453 &adapter_cfgs);
2454
2455 if (!adapter_cfgs || RB_EMPTY(nb_config_cbs, adapter_cfgs)) {
2456 SET_FLAG(adapter->flags,
2457 MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
2458 return 0;
2459 }
2460
2461 /*
2462 * Create a CONFIG transaction to push the config changes
2463 * provided to the backend client.
2464 */
2465 txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
2466 if (!txn) {
2467 MGMTD_TXN_ERR(
2468 "Failed to create CONFIG Transaction for downloading CONFIGs for client '%s'",
2469 adapter->name);
2470 return -1;
2471 }
2472
2473 MGMTD_TXN_DBG("Created initial txn %" PRIu64
2474 " for BE connection %s",
2475 txn->txn_id, adapter->name);
2476 /*
2477 * Set the changeset for transaction to commit and trigger the
2478 * commit request.
2479 */
2480 txn_req =
2481 mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
2482 txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_NONE;
2483 txn_req->req.commit_cfg.src_ds_ctx = 0;
2484 txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_NONE;
2485 txn_req->req.commit_cfg.dst_ds_ctx = 0;
2486 txn_req->req.commit_cfg.validate_only = false;
2487 txn_req->req.commit_cfg.abort = false;
2488 txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
2489 txn_req->req.commit_cfg.cfg_chgs = adapter_cfgs;
2490
2491 /*
2492 * Trigger a COMMIT-CONFIG process.
2493 */
2494 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
2495
2496 } else {
2497 /*
2498 * Check if any transaction is currently on-going that
2499 * involves this backend client. If so, report the transaction
2500 * has failed.
2501 */
2502 FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
2503 if (txn->type == MGMTD_TXN_TYPE_CONFIG) {
2504 cmtcfg_req = txn->commit_cfg_req
2505 ? &txn->commit_cfg_req
2506 ->req.commit_cfg
2507 : NULL;
2508 if (cmtcfg_req
2509 && cmtcfg_req->subscr_info
2510 .xpath_subscr[adapter->id]
2511 .subscribed) {
2512 mgmt_txn_send_commit_cfg_reply(
2513 txn, MGMTD_INTERNAL_ERROR,
2514 "Backend daemon disconnected while processing commit!");
2515 }
2516 }
2517 }
2518 }
2519
2520 return 0;
2521 }
2522
2523 int mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create,
2524 bool success,
2525 struct mgmt_be_client_adapter *adapter)
2526 {
2527 struct mgmt_txn_ctx *txn;
2528 struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
2529
2530 txn = mgmt_txn_id2ctx(txn_id);
2531 if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
2532 return -1;
2533
2534 if (!create && !txn->commit_cfg_req)
2535 return 0;
2536
2537 assert(txn->commit_cfg_req);
2538 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
2539 if (create) {
2540 if (success) {
2541 /*
2542 * Done with TXN_CREATE. Move the backend client to
2543 * next phase.
2544 */
2545 assert(cmtcfg_req->curr_phase
2546 == MGMTD_COMMIT_PHASE_TXN_CREATE);
2547
2548 /*
2549 * Send CFGDATA_CREATE-REQs to the backend immediately.
2550 */
2551 mgmt_txn_send_be_cfg_data(txn, adapter);
2552 } else {
2553 mgmt_txn_send_commit_cfg_reply(
2554 txn, MGMTD_INTERNAL_ERROR,
2555 "Internal error! Failed to initiate transaction at backend!");
2556 }
2557 } else {
2558 /*
2559 * Done with TXN_DELETE. Move the backend client to next phase.
2560 */
2561 if (false)
2562 mgmt_move_be_commit_to_next_phase(txn, adapter);
2563 }
2564
2565 return 0;
2566 }
2567
2568 int mgmt_txn_notify_be_cfgdata_reply(
2569 uint64_t txn_id, uint64_t batch_id, bool success, char *error_if_any,
2570 struct mgmt_be_client_adapter *adapter)
2571 {
2572 struct mgmt_txn_ctx *txn;
2573 struct mgmt_txn_be_cfg_batch *cfg_btch;
2574 struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
2575
2576 txn = mgmt_txn_id2ctx(txn_id);
2577 if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
2578 return -1;
2579
2580 if (!txn->commit_cfg_req)
2581 return -1;
2582 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
2583
2584 cfg_btch = mgmt_txn_cfgbatch_id2ctx(txn, batch_id);
2585 if (!cfg_btch || cfg_btch->txn != txn)
2586 return -1;
2587
2588 if (!success) {
2589 MGMTD_TXN_ERR(
2590 "CFGDATA_CREATE_REQ sent to '%s' failed for Txn %p, Batch %p, Err: %s",
2591 adapter->name, txn, cfg_btch,
2592 error_if_any ? error_if_any : "None");
2593 mgmt_txn_send_commit_cfg_reply(
2594 txn, MGMTD_INTERNAL_ERROR,
2595 error_if_any ? error_if_any :
2596 "Internal error! Failed to download config data to backend!");
2597 return 0;
2598 }
2599
2600 MGMTD_TXN_DBG(
2601 "CFGDATA_CREATE_REQ sent to '%s' was successful for Txn %p, Batch %p, Err: %s",
2602 adapter->name, txn, cfg_btch,
2603 error_if_any ? error_if_any : "None");
2604 mgmt_move_txn_cfg_batch_to_next(
2605 cmtcfg_req, cfg_btch, &cmtcfg_req->curr_batches[adapter->id],
2606 &cmtcfg_req->next_batches[adapter->id], true,
2607 MGMTD_COMMIT_PHASE_APPLY_CFG);
2608
2609 mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
2610
2611 return 0;
2612 }
2613
2614 int mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
2615 uint64_t batch_ids[],
2616 size_t num_batch_ids, char *error_if_any,
2617 struct mgmt_be_client_adapter *adapter)
2618 {
2619 struct mgmt_txn_ctx *txn;
2620 struct mgmt_txn_be_cfg_batch *cfg_btch;
2621 struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
2622 size_t indx;
2623
2624 txn = mgmt_txn_id2ctx(txn_id);
2625 if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG
2626 || !txn->commit_cfg_req)
2627 return -1;
2628
2629 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
2630
2631 if (!success) {
2632 MGMTD_TXN_ERR(
2633 "CFGDATA_APPLY_REQ sent to '%s' failed for Txn %p, Batches [0x%llx - 0x%llx], Err: %s",
2634 adapter->name, txn, (unsigned long long)batch_ids[0],
2635 (unsigned long long)batch_ids[num_batch_ids - 1],
2636 error_if_any ? error_if_any : "None");
2637 mgmt_txn_send_commit_cfg_reply(
2638 txn, MGMTD_INTERNAL_ERROR,
2639 error_if_any ? error_if_any :
2640 "Internal error! Failed to apply config data on backend!");
2641 return 0;
2642 }
2643
2644 for (indx = 0; indx < num_batch_ids; indx++) {
2645 cfg_btch = mgmt_txn_cfgbatch_id2ctx(txn, batch_ids[indx]);
2646 if (cfg_btch->txn != txn)
2647 return -1;
2648 mgmt_move_txn_cfg_batch_to_next(
2649 cmtcfg_req, cfg_btch,
2650 &cmtcfg_req->curr_batches[adapter->id],
2651 &cmtcfg_req->next_batches[adapter->id], true,
2652 MGMTD_COMMIT_PHASE_TXN_DELETE);
2653 }
2654
2655 if (!mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id])) {
2656 /*
2657 * All configuration for the specific backend has been applied.
2658 * Send TXN-DELETE to wrap up the transaction for this backend.
2659 */
2660 SET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
2661 mgmt_txn_send_be_txn_delete(txn, adapter);
2662 }
2663
2664 mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
2665 if (mm->perf_stats_en)
2666 gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_end, NULL);
2667
2668 return 0;
2669 }
2670
2671 int mgmt_txn_send_commit_config_reply(uint64_t txn_id,
2672 enum mgmt_result result,
2673 const char *error_if_any)
2674 {
2675 struct mgmt_txn_ctx *txn;
2676
2677 txn = mgmt_txn_id2ctx(txn_id);
2678 if (!txn)
2679 return -1;
2680
2681 if (!txn->commit_cfg_req) {
2682 MGMTD_TXN_ERR(
2683 "NO commit in-progress for Txn %p, session 0x%llx!",
2684 txn, (unsigned long long)txn->session_id);
2685 return -1;
2686 }
2687
2688 return mgmt_txn_send_commit_cfg_reply(txn, result, error_if_any);
2689 }
2690
2691 int mgmt_txn_send_get_config_req(uint64_t txn_id, uint64_t req_id,
2692 Mgmtd__DatastoreId ds_id,
2693 struct mgmt_ds_ctx *ds_ctx,
2694 Mgmtd__YangGetDataReq **data_req,
2695 size_t num_reqs)
2696 {
2697 struct mgmt_txn_ctx *txn;
2698 struct mgmt_txn_req *txn_req;
2699 size_t indx;
2700
2701 txn = mgmt_txn_id2ctx(txn_id);
2702 if (!txn)
2703 return -1;
2704
2705 txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETCFG);
2706 txn_req->req.get_data->ds_id = ds_id;
2707 txn_req->req.get_data->ds_ctx = ds_ctx;
2708 for (indx = 0;
2709 indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
2710 indx++) {
2711 MGMTD_TXN_DBG("XPath: '%s'", data_req[indx]->data->xpath);
2712 txn_req->req.get_data->xpaths[indx] =
2713 strdup(data_req[indx]->data->xpath);
2714 txn_req->req.get_data->num_xpaths++;
2715 }
2716
2717 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
2718
2719 return 0;
2720 }
2721
2722 int mgmt_txn_send_get_data_req(uint64_t txn_id, uint64_t req_id,
2723 Mgmtd__DatastoreId ds_id,
2724 struct mgmt_ds_ctx *ds_ctx,
2725 Mgmtd__YangGetDataReq **data_req,
2726 size_t num_reqs)
2727 {
2728 struct mgmt_txn_ctx *txn;
2729 struct mgmt_txn_req *txn_req;
2730 size_t indx;
2731
2732 txn = mgmt_txn_id2ctx(txn_id);
2733 if (!txn)
2734 return -1;
2735
2736 txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETDATA);
2737 txn_req->req.get_data->ds_id = ds_id;
2738 txn_req->req.get_data->ds_ctx = ds_ctx;
2739 for (indx = 0;
2740 indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
2741 indx++) {
2742 MGMTD_TXN_DBG("XPath: '%s'", data_req[indx]->data->xpath);
2743 txn_req->req.get_data->xpaths[indx] =
2744 strdup(data_req[indx]->data->xpath);
2745 txn_req->req.get_data->num_xpaths++;
2746 }
2747
2748 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETDATA);
2749
2750 return 0;
2751 }
2752
2753 void mgmt_txn_status_write(struct vty *vty)
2754 {
2755 struct mgmt_txn_ctx *txn;
2756
2757 vty_out(vty, "MGMTD Transactions\n");
2758
2759 FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
2760 vty_out(vty, " Txn: \t\t\t%p\n", txn);
2761 vty_out(vty, " Txn-Id: \t\t\t%llu\n",
2762 (unsigned long long)txn->txn_id);
2763 vty_out(vty, " Session-Id: \t\t%llu\n",
2764 (unsigned long long)txn->session_id);
2765 vty_out(vty, " Type: \t\t\t%s\n",
2766 mgmt_txn_type2str(txn->type));
2767 vty_out(vty, " Ref-Count: \t\t\t%d\n", txn->refcount);
2768 }
2769 vty_out(vty, " Total: %d\n",
2770 (int)mgmt_txns_count(&mgmt_txn_mm->txn_list));
2771 }
2772
2773 int mgmt_txn_rollback_trigger_cfg_apply(struct mgmt_ds_ctx *src_ds_ctx,
2774 struct mgmt_ds_ctx *dst_ds_ctx)
2775 {
2776 static struct nb_config_cbs changes;
2777 struct nb_config_cbs *cfg_chgs = NULL;
2778 struct mgmt_txn_ctx *txn;
2779 struct mgmt_txn_req *txn_req;
2780 static struct mgmt_commit_stats dummy_stats;
2781
2782 memset(&changes, 0, sizeof(changes));
2783 memset(&dummy_stats, 0, sizeof(dummy_stats));
2784 /*
2785 * This could be the case when the config is directly
2786 * loaded onto the candidate DS from a file. Get the
2787 * diff from a full comparison of the candidate and
2788 * running DSs.
2789 */
2790 nb_config_diff(mgmt_ds_get_nb_config(dst_ds_ctx),
2791 mgmt_ds_get_nb_config(src_ds_ctx), &changes);
2792 cfg_chgs = &changes;
2793
2794 if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
2795 /*
2796 * This means there's no changes to commit whatsoever
2797 * is the source of the changes in config.
2798 */
2799 return -1;
2800 }
2801
2802 /*
2803 * Create a CONFIG transaction to push the config changes
2804 * provided to the backend client.
2805 */
2806 txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
2807 if (!txn) {
2808 MGMTD_TXN_ERR(
2809 "Failed to create CONFIG Transaction for downloading CONFIGs");
2810 return -1;
2811 }
2812
2813 MGMTD_TXN_DBG("Created rollback txn %" PRIu64, txn->txn_id);
2814
2815 /*
2816 * Set the changeset for transaction to commit and trigger the commit
2817 * request.
2818 */
2819 txn_req = mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
2820 txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_CANDIDATE;
2821 txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
2822 txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_RUNNING;
2823 txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
2824 txn_req->req.commit_cfg.validate_only = false;
2825 txn_req->req.commit_cfg.abort = false;
2826 txn_req->req.commit_cfg.rollback = true;
2827 txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
2828 txn_req->req.commit_cfg.cfg_chgs = cfg_chgs;
2829
2830 /*
2831 * Trigger a COMMIT-CONFIG process.
2832 */
2833 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
2834 return 0;
2835 }