]> git.proxmox.com Git - mirror_frr.git/blob - mgmtd/mgmt_txn.c
3d818cb4c2598ae8f82a3615aa151738d03c4aa8
[mirror_frr.git] / mgmtd / mgmt_txn.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * MGMTD Transactions
4 *
5 * Copyright (C) 2021 Vmware, Inc.
6 * Pushpasis Sarkar <spushpasis@vmware.com>
7 */
8
9 #include <zebra.h>
10 #include "hash.h"
11 #include "jhash.h"
12 #include "libfrr.h"
13 #include "mgmtd/mgmt.h"
14 #include "mgmtd/mgmt_memory.h"
15 #include "mgmtd/mgmt_txn.h"
16
17 #define MGMTD_TXN_DBG(fmt, ...) \
18 DEBUGD(&mgmt_debug_txn, "%s:" fmt, __func__, ##__VA_ARGS__)
19 #define MGMTD_TXN_ERR(fmt, ...) \
20 zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
21
22 #define MGMTD_TXN_LOCK(txn) mgmt_txn_lock(txn, __FILE__, __LINE__)
23 #define MGMTD_TXN_UNLOCK(txn) mgmt_txn_unlock(txn, __FILE__, __LINE__)
24
25 enum mgmt_txn_event {
26 MGMTD_TXN_PROC_SETCFG = 1,
27 MGMTD_TXN_PROC_COMMITCFG,
28 MGMTD_TXN_PROC_GETCFG,
29 MGMTD_TXN_PROC_GETDATA,
30 MGMTD_TXN_COMMITCFG_TIMEOUT,
31 MGMTD_TXN_CLEANUP
32 };
33
34 PREDECL_LIST(mgmt_txn_reqs);
35
36 struct mgmt_set_cfg_req {
37 Mgmtd__DatastoreId ds_id;
38 struct mgmt_ds_ctx *ds_ctx;
39 struct nb_cfg_change cfg_changes[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
40 uint16_t num_cfg_changes;
41 bool implicit_commit;
42 Mgmtd__DatastoreId dst_ds_id;
43 struct mgmt_ds_ctx *dst_ds_ctx;
44 struct mgmt_setcfg_stats *setcfg_stats;
45 };
46
47 enum mgmt_commit_phase {
48 MGMTD_COMMIT_PHASE_PREPARE_CFG = 0,
49 MGMTD_COMMIT_PHASE_TXN_CREATE,
50 MGMTD_COMMIT_PHASE_SEND_CFG,
51 MGMTD_COMMIT_PHASE_APPLY_CFG,
52 MGMTD_COMMIT_PHASE_TXN_DELETE,
53 MGMTD_COMMIT_PHASE_MAX
54 };
55
56 static inline const char *
57 mgmt_commit_phase2str(enum mgmt_commit_phase cmt_phase)
58 {
59 switch (cmt_phase) {
60 case MGMTD_COMMIT_PHASE_PREPARE_CFG:
61 return "PREP-CFG";
62 case MGMTD_COMMIT_PHASE_TXN_CREATE:
63 return "CREATE-TXN";
64 case MGMTD_COMMIT_PHASE_SEND_CFG:
65 return "SEND-CFG";
66 case MGMTD_COMMIT_PHASE_APPLY_CFG:
67 return "APPLY-CFG";
68 case MGMTD_COMMIT_PHASE_TXN_DELETE:
69 return "DELETE-TXN";
70 case MGMTD_COMMIT_PHASE_MAX:
71 return "Invalid/Unknown";
72 }
73
74 return "Invalid/Unknown";
75 }
76
77 PREDECL_LIST(mgmt_txn_batches);
78
79 struct mgmt_txn_be_cfg_batch {
80 struct mgmt_txn_ctx *txn;
81 uint64_t batch_id;
82 enum mgmt_be_client_id be_id;
83 struct mgmt_be_client_adapter *be_adapter;
84 uint xp_subscr[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
85 Mgmtd__YangCfgDataReq cfg_data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
86 Mgmtd__YangCfgDataReq * cfg_datap[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
87 Mgmtd__YangData data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
88 Mgmtd__YangDataValue value[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
89 size_t num_cfg_data;
90 int buf_space_left;
91 enum mgmt_commit_phase comm_phase;
92 struct mgmt_txn_batches_item list_linkage;
93 };
94
95 DECLARE_LIST(mgmt_txn_batches, struct mgmt_txn_be_cfg_batch, list_linkage);
96
97 #define FOREACH_TXN_CFG_BATCH_IN_LIST(list, batch) \
98 frr_each_safe (mgmt_txn_batches, list, batch)
99
100 struct mgmt_commit_cfg_req {
101 Mgmtd__DatastoreId src_ds_id;
102 struct mgmt_ds_ctx *src_ds_ctx;
103 Mgmtd__DatastoreId dst_ds_id;
104 struct mgmt_ds_ctx *dst_ds_ctx;
105 uint32_t nb_txn_id;
106 uint8_t validate_only : 1;
107 uint8_t abort : 1;
108 uint8_t implicit : 1;
109 uint8_t rollback : 1;
110
111 /* Track commit phases */
112 enum mgmt_commit_phase curr_phase;
113 enum mgmt_commit_phase next_phase;
114
115 /*
116 * Set of config changes to commit. This is used only
117 * when changes are NOT to be determined by comparing
118 * candidate and running DSs. This is typically used
119 * for downloading all relevant configs for a new backend
120 * client that has recently come up and connected with
121 * MGMTD.
122 */
123 struct nb_config_cbs *cfg_chgs;
124
125 /*
126 * Details on all the Backend Clients associated with
127 * this commit.
128 */
129 struct mgmt_be_client_subscr_info subscr_info;
130
131 /*
132 * List of backend batches for this commit to be validated
133 * and applied at the backend.
134 *
135 * FIXME: Need to re-think this design for the case set of
136 * validators for a given YANG data item is different from
137 * the set of notifiers for the same. We may need to have
138 * separate list of batches for VALIDATE and APPLY.
139 */
140 struct mgmt_txn_batches_head curr_batches[MGMTD_BE_CLIENT_ID_MAX];
141 struct mgmt_txn_batches_head next_batches[MGMTD_BE_CLIENT_ID_MAX];
142 /*
143 * The last batch added for any backend client. This is always on
144 * 'curr_batches'
145 */
146 struct mgmt_txn_be_cfg_batch
147 *last_be_cfg_batch[MGMTD_BE_CLIENT_ID_MAX];
148 struct hash *batches;
149 uint64_t next_batch_id;
150
151 struct mgmt_commit_stats *cmt_stats;
152 };
153
154 struct mgmt_get_data_reply {
155 /* Buffer space for preparing data reply */
156 int num_reply;
157 int last_batch;
158 Mgmtd__YangDataReply data_reply;
159 Mgmtd__YangData reply_data[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
160 Mgmtd__YangData * reply_datap[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
161 Mgmtd__YangDataValue reply_value[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
162 char *reply_xpathp[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
163 };
164
165 struct mgmt_get_data_req {
166 Mgmtd__DatastoreId ds_id;
167 struct mgmt_ds_ctx *ds_ctx;
168 char *xpaths[MGMTD_MAX_NUM_DATA_REQ_IN_BATCH];
169 int num_xpaths;
170
171 /*
172 * Buffer space for preparing reply.
173 * NOTE: Should only be malloc-ed on demand to reduce
174 * memory footprint. Freed up via mgmt_trx_req_free()
175 */
176 struct mgmt_get_data_reply *reply;
177
178 int total_reply;
179 };
180
181 struct mgmt_txn_req {
182 struct mgmt_txn_ctx *txn;
183 enum mgmt_txn_event req_event;
184 uint64_t req_id;
185 union {
186 struct mgmt_set_cfg_req *set_cfg;
187 struct mgmt_get_data_req *get_data;
188 struct mgmt_commit_cfg_req commit_cfg;
189 } req;
190
191 bool pending_be_proc;
192 struct mgmt_txn_reqs_item list_linkage;
193 };
194
195 DECLARE_LIST(mgmt_txn_reqs, struct mgmt_txn_req, list_linkage);
196
197 #define FOREACH_TXN_REQ_IN_LIST(list, req) \
198 frr_each_safe (mgmt_txn_reqs, list, req)
199
200 struct mgmt_txn_ctx {
201 uint64_t session_id; /* One transaction per client session */
202 uint64_t txn_id;
203 enum mgmt_txn_type type;
204
205 /* struct mgmt_master *mm; */
206
207 struct event *proc_set_cfg;
208 struct event *proc_comm_cfg;
209 struct event *proc_get_cfg;
210 struct event *proc_get_data;
211 struct event *comm_cfg_timeout;
212 struct event *clnup;
213
214 /* List of backend adapters involved in this transaction */
215 struct mgmt_txn_badapters_head be_adapters;
216
217 int refcount;
218
219 struct mgmt_txns_item list_linkage;
220
221 /*
222 * List of pending set-config requests for a given
223 * transaction/session. Just one list for requests
224 * not processed at all. There's no backend interaction
225 * involved.
226 */
227 struct mgmt_txn_reqs_head set_cfg_reqs;
228 /*
229 * List of pending get-config requests for a given
230 * transaction/session. Just one list for requests
231 * not processed at all. There's no backend interaction
232 * involved.
233 */
234 struct mgmt_txn_reqs_head get_cfg_reqs;
235 /*
236 * List of pending get-data requests for a given
237 * transaction/session Two lists, one for requests
238 * not processed at all, and one for requests that
239 * has been sent to backend for processing.
240 */
241 struct mgmt_txn_reqs_head get_data_reqs;
242 struct mgmt_txn_reqs_head pending_get_datas;
243 /*
244 * There will always be one commit-config allowed for a given
245 * transaction/session. No need to maintain lists for it.
246 */
247 struct mgmt_txn_req *commit_cfg_req;
248 };
249
250 DECLARE_LIST(mgmt_txns, struct mgmt_txn_ctx, list_linkage);
251
252 #define FOREACH_TXN_IN_LIST(mm, txn) \
253 frr_each_safe (mgmt_txns, &(mm)->txn_list, (txn))
254
255 static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
256 enum mgmt_result result,
257 const char *error_if_any);
258
259 static inline const char *
260 mgmt_txn_commit_phase_str(struct mgmt_txn_ctx *txn, bool curr)
261 {
262 if (!txn->commit_cfg_req)
263 return "None";
264
265 return (mgmt_commit_phase2str(
266 curr ? txn->commit_cfg_req->req.commit_cfg.curr_phase
267 : txn->commit_cfg_req->req.commit_cfg.next_phase));
268 }
269
270 static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file,
271 int line);
272 static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
273 int line);
274 static int
275 mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
276 struct mgmt_be_client_adapter *adapter);
277
278 static struct event_loop *mgmt_txn_tm;
279 static struct mgmt_master *mgmt_txn_mm;
280
281 static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
282 enum mgmt_txn_event event);
283
284 static int
285 mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
286 struct mgmt_be_client_adapter *adapter);
287
288 static struct mgmt_txn_be_cfg_batch *
289 mgmt_txn_cfg_batch_alloc(struct mgmt_txn_ctx *txn,
290 enum mgmt_be_client_id id,
291 struct mgmt_be_client_adapter *be_adapter)
292 {
293 struct mgmt_txn_be_cfg_batch *cfg_btch;
294
295 cfg_btch = XCALLOC(MTYPE_MGMTD_TXN_CFG_BATCH,
296 sizeof(struct mgmt_txn_be_cfg_batch));
297 assert(cfg_btch);
298 cfg_btch->be_id = id;
299
300 cfg_btch->txn = txn;
301 MGMTD_TXN_LOCK(txn);
302 assert(txn->commit_cfg_req);
303 mgmt_txn_batches_add_tail(
304 &txn->commit_cfg_req->req.commit_cfg.curr_batches[id],
305 cfg_btch);
306 cfg_btch->be_adapter = be_adapter;
307 cfg_btch->buf_space_left = MGMTD_BE_CFGDATA_MAX_MSG_LEN;
308 if (be_adapter)
309 mgmt_be_adapter_lock(be_adapter);
310
311 txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] =
312 cfg_btch;
313 if (!txn->commit_cfg_req->req.commit_cfg.next_batch_id)
314 txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
315 cfg_btch->batch_id =
316 txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
317 hash_get(txn->commit_cfg_req->req.commit_cfg.batches, cfg_btch,
318 hash_alloc_intern);
319
320 return cfg_btch;
321 }
322
323 static void
324 mgmt_txn_cfg_batch_free(struct mgmt_txn_be_cfg_batch **cfg_btch)
325 {
326 size_t indx;
327 struct mgmt_commit_cfg_req *cmtcfg_req;
328
329 MGMTD_TXN_DBG(" freeing batch-id: %" PRIu64 " txn-id %" PRIu64,
330 (*cfg_btch)->batch_id, (*cfg_btch)->txn->txn_id);
331
332 assert((*cfg_btch)->txn
333 && (*cfg_btch)->txn->type == MGMTD_TXN_TYPE_CONFIG);
334
335 cmtcfg_req = &(*cfg_btch)->txn->commit_cfg_req->req.commit_cfg;
336 hash_release(cmtcfg_req->batches, *cfg_btch);
337 mgmt_txn_batches_del(&cmtcfg_req->curr_batches[(*cfg_btch)->be_id],
338 *cfg_btch);
339 mgmt_txn_batches_del(&cmtcfg_req->next_batches[(*cfg_btch)->be_id],
340 *cfg_btch);
341
342 if ((*cfg_btch)->be_adapter)
343 mgmt_be_adapter_unlock(&(*cfg_btch)->be_adapter);
344
345 for (indx = 0; indx < (*cfg_btch)->num_cfg_data; indx++) {
346 if ((*cfg_btch)->data[indx].xpath) {
347 free((*cfg_btch)->data[indx].xpath);
348 (*cfg_btch)->data[indx].xpath = NULL;
349 }
350 }
351
352 MGMTD_TXN_UNLOCK(&(*cfg_btch)->txn);
353
354 XFREE(MTYPE_MGMTD_TXN_CFG_BATCH, *cfg_btch);
355 *cfg_btch = NULL;
356 }
357
358 static unsigned int mgmt_txn_cfgbatch_hash_key(const void *data)
359 {
360 const struct mgmt_txn_be_cfg_batch *batch = data;
361
362 return jhash2((uint32_t *) &batch->batch_id,
363 sizeof(batch->batch_id) / sizeof(uint32_t), 0);
364 }
365
366 static bool mgmt_txn_cfgbatch_hash_cmp(const void *d1, const void *d2)
367 {
368 const struct mgmt_txn_be_cfg_batch *batch1 = d1;
369 const struct mgmt_txn_be_cfg_batch *batch2 = d2;
370
371 return (batch1->batch_id == batch2->batch_id);
372 }
373
374 static void mgmt_txn_cfgbatch_hash_free(void *data)
375 {
376 struct mgmt_txn_be_cfg_batch *batch = data;
377
378 mgmt_txn_cfg_batch_free(&batch);
379 }
380
381 static inline struct mgmt_txn_be_cfg_batch *
382 mgmt_txn_cfgbatch_id2ctx(struct mgmt_txn_ctx *txn, uint64_t batch_id)
383 {
384 struct mgmt_txn_be_cfg_batch key = {0};
385 struct mgmt_txn_be_cfg_batch *batch;
386
387 if (!txn->commit_cfg_req)
388 return NULL;
389
390 key.batch_id = batch_id;
391 batch = hash_lookup(txn->commit_cfg_req->req.commit_cfg.batches,
392 &key);
393
394 return batch;
395 }
396
397 static void mgmt_txn_cleanup_be_cfg_batches(struct mgmt_txn_ctx *txn,
398 enum mgmt_be_client_id id)
399 {
400 struct mgmt_txn_be_cfg_batch *cfg_btch;
401 struct mgmt_txn_batches_head *list;
402
403 list = &txn->commit_cfg_req->req.commit_cfg.curr_batches[id];
404 FOREACH_TXN_CFG_BATCH_IN_LIST (list, cfg_btch)
405 mgmt_txn_cfg_batch_free(&cfg_btch);
406
407 mgmt_txn_batches_fini(list);
408
409 list = &txn->commit_cfg_req->req.commit_cfg.next_batches[id];
410 FOREACH_TXN_CFG_BATCH_IN_LIST (list, cfg_btch)
411 mgmt_txn_cfg_batch_free(&cfg_btch);
412
413 mgmt_txn_batches_fini(list);
414
415 txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] = NULL;
416 }
417
418 static struct mgmt_txn_req *mgmt_txn_req_alloc(struct mgmt_txn_ctx *txn,
419 uint64_t req_id,
420 enum mgmt_txn_event req_event)
421 {
422 struct mgmt_txn_req *txn_req;
423 enum mgmt_be_client_id id;
424
425 txn_req = XCALLOC(MTYPE_MGMTD_TXN_REQ, sizeof(struct mgmt_txn_req));
426 assert(txn_req);
427 txn_req->txn = txn;
428 txn_req->req_id = req_id;
429 txn_req->req_event = req_event;
430 txn_req->pending_be_proc = false;
431
432 switch (txn_req->req_event) {
433 case MGMTD_TXN_PROC_SETCFG:
434 txn_req->req.set_cfg = XCALLOC(MTYPE_MGMTD_TXN_SETCFG_REQ,
435 sizeof(struct mgmt_set_cfg_req));
436 assert(txn_req->req.set_cfg);
437 mgmt_txn_reqs_add_tail(&txn->set_cfg_reqs, txn_req);
438 MGMTD_TXN_DBG("Added a new SETCFG req-id: %" PRIu64
439 " txn-id: %" PRIu64 ", session-id: %" PRIu64,
440 txn_req->req_id, txn->txn_id, txn->session_id);
441 break;
442 case MGMTD_TXN_PROC_COMMITCFG:
443 txn->commit_cfg_req = txn_req;
444 MGMTD_TXN_DBG("Added a new COMMITCFG req-id: %" PRIu64
445 " txn-id: %" PRIu64 " session-id: %" PRIu64,
446 txn_req->req_id, txn->txn_id, txn->session_id);
447
448 FOREACH_MGMTD_BE_CLIENT_ID (id) {
449 mgmt_txn_batches_init(
450 &txn_req->req.commit_cfg.curr_batches[id]);
451 mgmt_txn_batches_init(
452 &txn_req->req.commit_cfg.next_batches[id]);
453 }
454
455 txn_req->req.commit_cfg.batches =
456 hash_create(mgmt_txn_cfgbatch_hash_key,
457 mgmt_txn_cfgbatch_hash_cmp,
458 "MGMT Config Batches");
459 break;
460 case MGMTD_TXN_PROC_GETCFG:
461 txn_req->req.get_data =
462 XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
463 sizeof(struct mgmt_get_data_req));
464 assert(txn_req->req.get_data);
465 mgmt_txn_reqs_add_tail(&txn->get_cfg_reqs, txn_req);
466 MGMTD_TXN_DBG("Added a new GETCFG req-id: %" PRIu64
467 " txn-id: %" PRIu64 " session-id: %" PRIu64,
468 txn_req->req_id, txn->txn_id, txn->session_id);
469 break;
470 case MGMTD_TXN_PROC_GETDATA:
471 txn_req->req.get_data =
472 XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
473 sizeof(struct mgmt_get_data_req));
474 assert(txn_req->req.get_data);
475 mgmt_txn_reqs_add_tail(&txn->get_data_reqs, txn_req);
476 MGMTD_TXN_DBG("Added a new GETDATA req-id: %" PRIu64
477 " txn-id: %" PRIu64 " session-id: %" PRIu64,
478 txn_req->req_id, txn->txn_id, txn->session_id);
479 break;
480 case MGMTD_TXN_COMMITCFG_TIMEOUT:
481 case MGMTD_TXN_CLEANUP:
482 break;
483 }
484
485 MGMTD_TXN_LOCK(txn);
486
487 return txn_req;
488 }
489
490 static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
491 {
492 int indx;
493 struct mgmt_txn_reqs_head *req_list = NULL;
494 struct mgmt_txn_reqs_head *pending_list = NULL;
495 enum mgmt_be_client_id id;
496 struct mgmt_be_client_adapter *adapter;
497
498 switch ((*txn_req)->req_event) {
499 case MGMTD_TXN_PROC_SETCFG:
500 for (indx = 0; indx < (*txn_req)->req.set_cfg->num_cfg_changes;
501 indx++) {
502 if ((*txn_req)->req.set_cfg->cfg_changes[indx].value) {
503 MGMTD_TXN_DBG(
504 "Freeing value for %s at %p ==> '%s'",
505 (*txn_req)
506 ->req.set_cfg->cfg_changes[indx]
507 .xpath,
508 (*txn_req)
509 ->req.set_cfg->cfg_changes[indx]
510 .value,
511 (*txn_req)
512 ->req.set_cfg->cfg_changes[indx]
513 .value);
514 free((void *)(*txn_req)
515 ->req.set_cfg->cfg_changes[indx]
516 .value);
517 }
518 }
519 req_list = &(*txn_req)->txn->set_cfg_reqs;
520 MGMTD_TXN_DBG("Deleting SETCFG req-id: %" PRIu64
521 " txn-id: %" PRIu64,
522 (*txn_req)->req_id, (*txn_req)->txn->txn_id);
523 XFREE(MTYPE_MGMTD_TXN_SETCFG_REQ, (*txn_req)->req.set_cfg);
524 break;
525 case MGMTD_TXN_PROC_COMMITCFG:
526 MGMTD_TXN_DBG("Deleting COMMITCFG req-id: %" PRIu64
527 " txn-id: %" PRIu64,
528 (*txn_req)->req_id, (*txn_req)->txn->txn_id);
529 FOREACH_MGMTD_BE_CLIENT_ID (id) {
530 /*
531 * Send TXN_DELETE to cleanup state for this
532 * transaction on backend
533 */
534 if ((*txn_req)->req.commit_cfg.curr_phase >=
535 MGMTD_COMMIT_PHASE_TXN_CREATE &&
536 (*txn_req)->req.commit_cfg.curr_phase <
537 MGMTD_COMMIT_PHASE_TXN_DELETE &&
538 (*txn_req)
539 ->req.commit_cfg.subscr_info
540 .xpath_subscr[id]) {
541 adapter = mgmt_be_get_adapter_by_id(id);
542 if (adapter)
543 mgmt_txn_send_be_txn_delete(
544 (*txn_req)->txn, adapter);
545 }
546
547 mgmt_txn_cleanup_be_cfg_batches((*txn_req)->txn,
548 id);
549 if ((*txn_req)->req.commit_cfg.batches) {
550 hash_clean((*txn_req)->req.commit_cfg.batches,
551 mgmt_txn_cfgbatch_hash_free);
552 hash_free((*txn_req)->req.commit_cfg.batches);
553 (*txn_req)->req.commit_cfg.batches = NULL;
554 }
555 }
556 break;
557 case MGMTD_TXN_PROC_GETCFG:
558 for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
559 indx++) {
560 if ((*txn_req)->req.get_data->xpaths[indx])
561 free((void *)(*txn_req)
562 ->req.get_data->xpaths[indx]);
563 }
564 req_list = &(*txn_req)->txn->get_cfg_reqs;
565 MGMTD_TXN_DBG("Deleting GETCFG req-id: %" PRIu64
566 " txn-id: %" PRIu64,
567 (*txn_req)->req_id, (*txn_req)->txn->txn_id);
568 if ((*txn_req)->req.get_data->reply)
569 XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
570 (*txn_req)->req.get_data->reply);
571 XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
572 break;
573 case MGMTD_TXN_PROC_GETDATA:
574 for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
575 indx++) {
576 if ((*txn_req)->req.get_data->xpaths[indx])
577 free((void *)(*txn_req)
578 ->req.get_data->xpaths[indx]);
579 }
580 pending_list = &(*txn_req)->txn->pending_get_datas;
581 req_list = &(*txn_req)->txn->get_data_reqs;
582 MGMTD_TXN_DBG("Deleting GETDATA req-id: %" PRIu64
583 " txn-id: %" PRIu64,
584 (*txn_req)->req_id, (*txn_req)->txn->txn_id);
585 if ((*txn_req)->req.get_data->reply)
586 XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
587 (*txn_req)->req.get_data->reply);
588 XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
589 break;
590 case MGMTD_TXN_COMMITCFG_TIMEOUT:
591 case MGMTD_TXN_CLEANUP:
592 break;
593 }
594
595 if ((*txn_req)->pending_be_proc && pending_list) {
596 mgmt_txn_reqs_del(pending_list, *txn_req);
597 MGMTD_TXN_DBG("Removed req-id: %" PRIu64
598 " from pending-list (left:%zu)",
599 (*txn_req)->req_id,
600 mgmt_txn_reqs_count(pending_list));
601 } else if (req_list) {
602 mgmt_txn_reqs_del(req_list, *txn_req);
603 MGMTD_TXN_DBG("Removed req-id: %" PRIu64
604 " from request-list (left:%zu)",
605 (*txn_req)->req_id,
606 mgmt_txn_reqs_count(req_list));
607 }
608
609 (*txn_req)->pending_be_proc = false;
610 MGMTD_TXN_UNLOCK(&(*txn_req)->txn);
611 XFREE(MTYPE_MGMTD_TXN_REQ, (*txn_req));
612 *txn_req = NULL;
613 }
614
615 static void mgmt_txn_process_set_cfg(struct event *thread)
616 {
617 struct mgmt_txn_ctx *txn;
618 struct mgmt_txn_req *txn_req;
619 struct mgmt_ds_ctx *ds_ctx;
620 struct nb_config *nb_config;
621 char err_buf[1024];
622 bool error;
623 int num_processed = 0;
624 size_t left;
625 struct mgmt_commit_stats *cmt_stats;
626 int ret = 0;
627
628 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
629 assert(txn);
630 cmt_stats = mgmt_fe_get_session_commit_stats(txn->session_id);
631
632 MGMTD_TXN_DBG("Processing %zu SET_CONFIG requests txn-id:%" PRIu64
633 " session-id: %" PRIu64,
634 mgmt_txn_reqs_count(&txn->set_cfg_reqs), txn->txn_id,
635 txn->session_id);
636
637 FOREACH_TXN_REQ_IN_LIST (&txn->set_cfg_reqs, txn_req) {
638 error = false;
639 assert(txn_req->req_event == MGMTD_TXN_PROC_SETCFG);
640 ds_ctx = txn_req->req.set_cfg->ds_ctx;
641 if (!ds_ctx) {
642 mgmt_fe_send_set_cfg_reply(
643 txn->session_id, txn->txn_id,
644 txn_req->req.set_cfg->ds_id, txn_req->req_id,
645 MGMTD_INTERNAL_ERROR, "No such datastore!",
646 txn_req->req.set_cfg->implicit_commit);
647 error = true;
648 goto mgmt_txn_process_set_cfg_done;
649 }
650
651 nb_config = mgmt_ds_get_nb_config(ds_ctx);
652 if (!nb_config) {
653 mgmt_fe_send_set_cfg_reply(
654 txn->session_id, txn->txn_id,
655 txn_req->req.set_cfg->ds_id, txn_req->req_id,
656 MGMTD_INTERNAL_ERROR,
657 "Unable to retrieve DS Config Tree!",
658 txn_req->req.set_cfg->implicit_commit);
659 error = true;
660 goto mgmt_txn_process_set_cfg_done;
661 }
662
663 error = false;
664 nb_candidate_edit_config_changes(
665 nb_config, txn_req->req.set_cfg->cfg_changes,
666 (size_t)txn_req->req.set_cfg->num_cfg_changes, NULL,
667 NULL, 0, err_buf, sizeof(err_buf), &error);
668 if (error) {
669 mgmt_fe_send_set_cfg_reply(
670 txn->session_id, txn->txn_id,
671 txn_req->req.set_cfg->ds_id, txn_req->req_id,
672 MGMTD_INTERNAL_ERROR, err_buf,
673 txn_req->req.set_cfg->implicit_commit);
674 goto mgmt_txn_process_set_cfg_done;
675 }
676
677 if (txn_req->req.set_cfg->implicit_commit) {
678 assert(mgmt_txn_reqs_count(&txn->set_cfg_reqs) == 1);
679 assert(txn_req->req.set_cfg->dst_ds_ctx);
680
681 ret = mgmt_ds_write_lock(
682 txn_req->req.set_cfg->dst_ds_ctx);
683 if (ret != 0) {
684 MGMTD_TXN_ERR(
685 "Failed to lock DS %u txn-id: %" PRIu64
686 " session-id: %" PRIu64 " err: %s",
687 txn_req->req.set_cfg->dst_ds_id,
688 txn->txn_id, txn->session_id,
689 strerror(ret));
690 mgmt_txn_send_commit_cfg_reply(
691 txn, MGMTD_DS_LOCK_FAILED,
692 "Lock running DS before implicit commit failed!");
693 goto mgmt_txn_process_set_cfg_done;
694 }
695
696 mgmt_txn_send_commit_config_req(
697 txn->txn_id, txn_req->req_id,
698 txn_req->req.set_cfg->ds_id,
699 txn_req->req.set_cfg->ds_ctx,
700 txn_req->req.set_cfg->dst_ds_id,
701 txn_req->req.set_cfg->dst_ds_ctx, false,
702 false, true);
703
704 if (mm->perf_stats_en)
705 gettimeofday(&cmt_stats->last_start, NULL);
706 cmt_stats->commit_cnt++;
707 } else if (mgmt_fe_send_set_cfg_reply(
708 txn->session_id, txn->txn_id,
709 txn_req->req.set_cfg->ds_id,
710 txn_req->req_id, MGMTD_SUCCESS, NULL, false)
711 != 0) {
712 MGMTD_TXN_ERR(
713 "Failed to send SET_CONFIG_REPLY txn-id %" PRIu64
714 " session-id: %" PRIu64,
715 txn->txn_id, txn->session_id);
716 error = true;
717 }
718
719 mgmt_txn_process_set_cfg_done:
720
721 /*
722 * Note: The following will remove it from the list as well.
723 */
724 mgmt_txn_req_free(&txn_req);
725
726 num_processed++;
727 if (num_processed == MGMTD_TXN_MAX_NUM_SETCFG_PROC)
728 break;
729 }
730
731 left = mgmt_txn_reqs_count(&txn->set_cfg_reqs);
732 if (left) {
733 MGMTD_TXN_DBG(
734 "Processed maximum number of Set-Config requests (%d/%d/%d). Rescheduling for rest.",
735 num_processed, MGMTD_TXN_MAX_NUM_SETCFG_PROC,
736 (int)left);
737 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
738 }
739 }
740
741 static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
742 enum mgmt_result result,
743 const char *error_if_any)
744 {
745 int ret = 0;
746 bool success, create_cmt_info_rec;
747
748 if (!txn->commit_cfg_req)
749 return -1;
750
751 success = (result == MGMTD_SUCCESS || result == MGMTD_NO_CFG_CHANGES);
752
753 if (!txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id
754 && mgmt_fe_send_commit_cfg_reply(
755 txn->session_id, txn->txn_id,
756 txn->commit_cfg_req->req.commit_cfg.src_ds_id,
757 txn->commit_cfg_req->req.commit_cfg.dst_ds_id,
758 txn->commit_cfg_req->req_id,
759 txn->commit_cfg_req->req.commit_cfg.validate_only,
760 result, error_if_any)
761 != 0) {
762 MGMTD_TXN_ERR(
763 "Failed to send COMMIT-CONFIG-REPLY txn-id: %" PRIu64
764 " session-id: %" PRIu64,
765 txn->txn_id, txn->session_id);
766 }
767
768 if (txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id
769 && mgmt_fe_send_set_cfg_reply(
770 txn->session_id, txn->txn_id,
771 txn->commit_cfg_req->req.commit_cfg.src_ds_id,
772 txn->commit_cfg_req->req_id,
773 success ? MGMTD_SUCCESS : MGMTD_INTERNAL_ERROR,
774 error_if_any, true)
775 != 0) {
776 MGMTD_TXN_ERR("Failed to send SET-CONFIG-REPLY txn-id: %" PRIu64
777 " session-id: %" PRIu64,
778 txn->txn_id, txn->session_id);
779 }
780
781 if (success) {
782 /* Stop the commit-timeout timer */
783 EVENT_OFF(txn->comm_cfg_timeout);
784
785 create_cmt_info_rec =
786 (result != MGMTD_NO_CFG_CHANGES &&
787 !txn->commit_cfg_req->req.commit_cfg.rollback);
788
789 /*
790 * Successful commit: Merge Src DS into Dst DS if and only if
791 * this was not a validate-only or abort request.
792 */
793 if ((txn->session_id
794 && !txn->commit_cfg_req->req.commit_cfg.validate_only
795 && !txn->commit_cfg_req->req.commit_cfg.abort)
796 || txn->commit_cfg_req->req.commit_cfg.rollback) {
797 mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
798 .src_ds_ctx,
799 txn->commit_cfg_req->req.commit_cfg
800 .dst_ds_ctx,
801 create_cmt_info_rec);
802 }
803
804 /*
805 * Restore Src DS back to Dest DS only through a commit abort
806 * request.
807 */
808 if (txn->session_id
809 && txn->commit_cfg_req->req.commit_cfg.abort)
810 mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
811 .dst_ds_ctx,
812 txn->commit_cfg_req->req.commit_cfg
813 .src_ds_ctx,
814 false);
815 } else {
816 /*
817 * The commit has failied. For implicit commit requests restore
818 * back the contents of the candidate DS.
819 */
820 if (txn->commit_cfg_req->req.commit_cfg.implicit)
821 mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
822 .dst_ds_ctx,
823 txn->commit_cfg_req->req.commit_cfg
824 .src_ds_ctx,
825 false);
826 }
827
828 if (txn->commit_cfg_req->req.commit_cfg.rollback) {
829 ret = mgmt_ds_unlock(
830 txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx);
831 if (ret != 0)
832 MGMTD_TXN_ERR(
833 "Failed to unlock the dst DS during rollback : %s",
834 strerror(ret));
835
836 /*
837 * Resume processing the rollback command.
838 */
839 mgmt_history_rollback_complete(success);
840 }
841
842 if (txn->commit_cfg_req->req.commit_cfg.implicit)
843 if (mgmt_ds_unlock(
844 txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx)
845 != 0)
846 MGMTD_TXN_ERR(
847 "Failed to unlock the dst DS during implicit : %s",
848 strerror(ret));
849
850 txn->commit_cfg_req->req.commit_cfg.cmt_stats = NULL;
851 mgmt_txn_req_free(&txn->commit_cfg_req);
852
853 /*
854 * The CONFIG Transaction should be destroyed from Frontend-adapter.
855 * But in case the transaction is not triggered from a front-end session
856 * we need to cleanup by itself.
857 */
858 if (!txn->session_id)
859 mgmt_txn_register_event(txn, MGMTD_TXN_CLEANUP);
860
861 return 0;
862 }
863
864 static void
865 mgmt_move_txn_cfg_batch_to_next(struct mgmt_commit_cfg_req *cmtcfg_req,
866 struct mgmt_txn_be_cfg_batch *cfg_btch,
867 struct mgmt_txn_batches_head *src_list,
868 struct mgmt_txn_batches_head *dst_list,
869 bool update_commit_phase,
870 enum mgmt_commit_phase to_phase)
871 {
872 mgmt_txn_batches_del(src_list, cfg_btch);
873
874 if (update_commit_phase) {
875 MGMTD_TXN_DBG("Move txn-id %" PRIu64 " batch-id: %" PRIu64
876 " from '%s' --> '%s'",
877 cfg_btch->txn->txn_id, cfg_btch->batch_id,
878 mgmt_commit_phase2str(cfg_btch->comm_phase),
879 mgmt_txn_commit_phase_str(cfg_btch->txn, false));
880 cfg_btch->comm_phase = to_phase;
881 }
882
883 mgmt_txn_batches_add_tail(dst_list, cfg_btch);
884 }
885
886 static void mgmt_move_txn_cfg_batches(struct mgmt_txn_ctx *txn,
887 struct mgmt_commit_cfg_req *cmtcfg_req,
888 struct mgmt_txn_batches_head *src_list,
889 struct mgmt_txn_batches_head *dst_list,
890 bool update_commit_phase,
891 enum mgmt_commit_phase to_phase)
892 {
893 struct mgmt_txn_be_cfg_batch *cfg_btch;
894
895 FOREACH_TXN_CFG_BATCH_IN_LIST (src_list, cfg_btch) {
896 mgmt_move_txn_cfg_batch_to_next(cmtcfg_req, cfg_btch, src_list,
897 dst_list, update_commit_phase,
898 to_phase);
899 }
900 }
901
902 static int
903 mgmt_try_move_commit_to_next_phase(struct mgmt_txn_ctx *txn,
904 struct mgmt_commit_cfg_req *cmtcfg_req)
905 {
906 struct mgmt_txn_batches_head *curr_list, *next_list;
907 enum mgmt_be_client_id id;
908
909 MGMTD_TXN_DBG("txn-id: %" PRIu64 ", Phase(current:'%s' next:'%s')",
910 txn->txn_id, mgmt_txn_commit_phase_str(txn, true),
911 mgmt_txn_commit_phase_str(txn, false));
912
913 /*
914 * Check if all clients has moved to next phase or not.
915 */
916 FOREACH_MGMTD_BE_CLIENT_ID (id) {
917 if (cmtcfg_req->subscr_info.xpath_subscr[id] &&
918 mgmt_txn_batches_count(&cmtcfg_req->curr_batches[id])) {
919 /*
920 * There's atleast once client who hasn't moved to
921 * next phase.
922 *
923 * TODO: Need to re-think this design for the case
924 * set of validators for a given YANG data item is
925 * different from the set of notifiers for the same.
926 */
927 return -1;
928 }
929 }
930
931 MGMTD_TXN_DBG("Move entire txn-id: %" PRIu64 " from '%s' to '%s'",
932 txn->txn_id, mgmt_txn_commit_phase_str(txn, true),
933 mgmt_txn_commit_phase_str(txn, false));
934
935 /*
936 * If we are here, it means all the clients has moved to next phase.
937 * So we can move the whole commit to next phase.
938 */
939 cmtcfg_req->curr_phase = cmtcfg_req->next_phase;
940 cmtcfg_req->next_phase++;
941 MGMTD_TXN_DBG("Move back all config batches for txn-id: %" PRIu64
942 " from next to current branch",
943 txn->txn_id);
944 FOREACH_MGMTD_BE_CLIENT_ID (id) {
945 curr_list = &cmtcfg_req->curr_batches[id];
946 next_list = &cmtcfg_req->next_batches[id];
947 mgmt_move_txn_cfg_batches(txn, cmtcfg_req, next_list,
948 curr_list, false, 0);
949 }
950
951 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
952
953 return 0;
954 }
955
956 static int
957 mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
958 struct mgmt_be_client_adapter *adapter)
959 {
960 struct mgmt_commit_cfg_req *cmtcfg_req;
961 struct mgmt_txn_batches_head *curr_list, *next_list;
962
963 if (txn->type != MGMTD_TXN_TYPE_CONFIG || !txn->commit_cfg_req)
964 return -1;
965
966 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
967
968 MGMTD_TXN_DBG("Move txn-id: %" PRIu64
969 " for '%s' Phase(current: '%s' next:'%s')",
970 txn->txn_id, adapter->name,
971 mgmt_txn_commit_phase_str(txn, true),
972 mgmt_txn_commit_phase_str(txn, false));
973
974 MGMTD_TXN_DBG(
975 "Move all config batches for '%s' from current to next list",
976 adapter->name);
977 curr_list = &cmtcfg_req->curr_batches[adapter->id];
978 next_list = &cmtcfg_req->next_batches[adapter->id];
979 mgmt_move_txn_cfg_batches(txn, cmtcfg_req, curr_list, next_list, true,
980 cmtcfg_req->next_phase);
981
982 MGMTD_TXN_DBG("txn-id: %" PRIu64 ", Phase(current:'%s' next:'%s')",
983 txn->txn_id, mgmt_txn_commit_phase_str(txn, true),
984 mgmt_txn_commit_phase_str(txn, false));
985
986 /*
987 * Check if all clients has moved to next phase or not.
988 */
989 mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
990
991 return 0;
992 }
993
994 static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
995 struct nb_config_cbs *changes)
996 {
997 struct nb_config_cb *cb, *nxt;
998 struct nb_config_change *chg;
999 struct mgmt_txn_be_cfg_batch *cfg_btch;
1000 struct mgmt_be_client_subscr_info subscr_info;
1001 char *xpath = NULL, *value = NULL;
1002 char err_buf[1024];
1003 enum mgmt_be_client_id id;
1004 struct mgmt_be_client_adapter *adapter;
1005 struct mgmt_commit_cfg_req *cmtcfg_req;
1006 bool found_validator;
1007 int num_chgs = 0;
1008 int xpath_len, value_len;
1009
1010 cmtcfg_req = &txn_req->req.commit_cfg;
1011
1012 RB_FOREACH_SAFE (cb, nb_config_cbs, changes, nxt) {
1013 chg = (struct nb_config_change *)cb;
1014
1015 /*
1016 * Could have directly pointed to xpath in nb_node.
1017 * But dont want to mess with it now.
1018 * xpath = chg->cb.nb_node->xpath;
1019 */
1020 xpath = lyd_path(chg->cb.dnode, LYD_PATH_STD, NULL, 0);
1021 if (!xpath) {
1022 (void)mgmt_txn_send_commit_cfg_reply(
1023 txn_req->txn, MGMTD_INTERNAL_ERROR,
1024 "Internal error! Could not get Xpath from Ds node!");
1025 return -1;
1026 }
1027
1028 value = (char *)lyd_get_value(chg->cb.dnode);
1029 if (!value)
1030 value = (char *)MGMTD_BE_CONTAINER_NODE_VAL;
1031
1032 MGMTD_TXN_DBG("XPATH: %s, Value: '%s'", xpath,
1033 value ? value : "NIL");
1034
1035 mgmt_be_get_subscr_info_for_xpath(xpath, &subscr_info);
1036
1037 xpath_len = strlen(xpath) + 1;
1038 value_len = strlen(value) + 1;
1039 found_validator = false;
1040 FOREACH_MGMTD_BE_CLIENT_ID (id) {
1041 if (!(subscr_info.xpath_subscr[id] &
1042 (MGMT_SUBSCR_VALIDATE_CFG |
1043 MGMT_SUBSCR_NOTIFY_CFG)))
1044 continue;
1045
1046 adapter = mgmt_be_get_adapter_by_id(id);
1047 if (!adapter)
1048 continue;
1049
1050 cfg_btch = cmtcfg_req->last_be_cfg_batch[id];
1051 if (!cfg_btch
1052 || (cfg_btch->num_cfg_data
1053 == MGMTD_MAX_CFG_CHANGES_IN_BATCH)
1054 || (cfg_btch->buf_space_left
1055 < (xpath_len + value_len))) {
1056 /* Allocate a new config batch */
1057 cfg_btch = mgmt_txn_cfg_batch_alloc(
1058 txn_req->txn, id, adapter);
1059 }
1060
1061 cfg_btch->buf_space_left -= (xpath_len + value_len);
1062 memcpy(&cfg_btch->xp_subscr[cfg_btch->num_cfg_data],
1063 &subscr_info.xpath_subscr[id],
1064 sizeof(cfg_btch->xp_subscr[0]));
1065
1066 mgmt_yang_cfg_data_req_init(
1067 &cfg_btch->cfg_data[cfg_btch->num_cfg_data]);
1068 cfg_btch->cfg_datap[cfg_btch->num_cfg_data] =
1069 &cfg_btch->cfg_data[cfg_btch->num_cfg_data];
1070
1071 if (chg->cb.operation == NB_OP_DESTROY)
1072 cfg_btch->cfg_data[cfg_btch->num_cfg_data]
1073 .req_type =
1074 MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA;
1075 else
1076 cfg_btch->cfg_data[cfg_btch->num_cfg_data]
1077 .req_type =
1078 MGMTD__CFG_DATA_REQ_TYPE__SET_DATA;
1079
1080 mgmt_yang_data_init(
1081 &cfg_btch->data[cfg_btch->num_cfg_data]);
1082 cfg_btch->cfg_data[cfg_btch->num_cfg_data].data =
1083 &cfg_btch->data[cfg_btch->num_cfg_data];
1084 cfg_btch->data[cfg_btch->num_cfg_data].xpath =
1085 strdup(xpath);
1086
1087 mgmt_yang_data_value_init(
1088 &cfg_btch->value[cfg_btch->num_cfg_data]);
1089 cfg_btch->data[cfg_btch->num_cfg_data].value =
1090 &cfg_btch->value[cfg_btch->num_cfg_data];
1091 cfg_btch->value[cfg_btch->num_cfg_data].value_case =
1092 MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
1093 cfg_btch->value[cfg_btch->num_cfg_data]
1094 .encoded_str_val = value;
1095 value = NULL;
1096
1097 if (subscr_info.xpath_subscr[id] &
1098 MGMT_SUBSCR_VALIDATE_CFG)
1099 found_validator = true;
1100
1101 cmtcfg_req->subscr_info.xpath_subscr[id] |=
1102 subscr_info.xpath_subscr[id];
1103 MGMTD_TXN_DBG(" -- %s, {V:%d, N:%d}, batch-id: %" PRIu64
1104 " item:%d",
1105 adapter->name,
1106 (subscr_info.xpath_subscr[id] &
1107 MGMT_SUBSCR_VALIDATE_CFG) != 0,
1108 (subscr_info.xpath_subscr[id] &
1109 MGMT_SUBSCR_NOTIFY_CFG) != 0,
1110 cfg_btch->batch_id,
1111 (int)cfg_btch->num_cfg_data);
1112
1113 cfg_btch->num_cfg_data++;
1114 num_chgs++;
1115 }
1116
1117 if (!found_validator) {
1118 snprintf(err_buf, sizeof(err_buf),
1119 "No validator module found for XPATH: '%s",
1120 xpath);
1121 MGMTD_TXN_ERR("***** %s", err_buf);
1122 }
1123
1124 free(xpath);
1125 xpath = NULL;
1126 }
1127
1128 cmtcfg_req->cmt_stats->last_batch_cnt = num_chgs;
1129 if (!num_chgs) {
1130 (void)mgmt_txn_send_commit_cfg_reply(
1131 txn_req->txn, MGMTD_NO_CFG_CHANGES,
1132 "No changes found to commit!");
1133 return -1;
1134 }
1135
1136 cmtcfg_req->next_phase = MGMTD_COMMIT_PHASE_TXN_CREATE;
1137 return 0;
1138 }
1139
1140 static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
1141 {
1142 struct nb_context nb_ctx;
1143 struct nb_config *nb_config;
1144 struct nb_config_cbs changes;
1145 struct nb_config_cbs *cfg_chgs = NULL;
1146 int ret;
1147 bool del_cfg_chgs = false;
1148
1149 ret = 0;
1150 memset(&nb_ctx, 0, sizeof(nb_ctx));
1151 memset(&changes, 0, sizeof(changes));
1152 if (txn->commit_cfg_req->req.commit_cfg.cfg_chgs) {
1153 cfg_chgs = txn->commit_cfg_req->req.commit_cfg.cfg_chgs;
1154 del_cfg_chgs = true;
1155 goto mgmt_txn_prep_config_validation_done;
1156 }
1157
1158 if (txn->commit_cfg_req->req.commit_cfg.src_ds_id
1159 != MGMTD_DS_CANDIDATE) {
1160 (void)mgmt_txn_send_commit_cfg_reply(
1161 txn, MGMTD_INVALID_PARAM,
1162 "Source DS cannot be any other than CANDIDATE!");
1163 ret = -1;
1164 goto mgmt_txn_prepare_config_done;
1165 }
1166
1167 if (txn->commit_cfg_req->req.commit_cfg.dst_ds_id
1168 != MGMTD_DS_RUNNING) {
1169 (void)mgmt_txn_send_commit_cfg_reply(
1170 txn, MGMTD_INVALID_PARAM,
1171 "Destination DS cannot be any other than RUNNING!");
1172 ret = -1;
1173 goto mgmt_txn_prepare_config_done;
1174 }
1175
1176 if (!txn->commit_cfg_req->req.commit_cfg.src_ds_ctx) {
1177 (void)mgmt_txn_send_commit_cfg_reply(
1178 txn, MGMTD_INVALID_PARAM, "No such source datastore!");
1179 ret = -1;
1180 goto mgmt_txn_prepare_config_done;
1181 }
1182
1183 if (!txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx) {
1184 (void)mgmt_txn_send_commit_cfg_reply(
1185 txn, MGMTD_INVALID_PARAM,
1186 "No such destination datastore!");
1187 ret = -1;
1188 goto mgmt_txn_prepare_config_done;
1189 }
1190
1191 if (txn->commit_cfg_req->req.commit_cfg.abort) {
1192 /*
1193 * This is a commit abort request. Return back success.
1194 * That should trigger a restore of Candidate datastore to
1195 * Running.
1196 */
1197 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
1198 NULL);
1199 goto mgmt_txn_prepare_config_done;
1200 }
1201
1202 nb_config = mgmt_ds_get_nb_config(
1203 txn->commit_cfg_req->req.commit_cfg.src_ds_ctx);
1204 if (!nb_config) {
1205 (void)mgmt_txn_send_commit_cfg_reply(
1206 txn, MGMTD_INTERNAL_ERROR,
1207 "Unable to retrieve Commit DS Config Tree!");
1208 ret = -1;
1209 goto mgmt_txn_prepare_config_done;
1210 }
1211
1212 /*
1213 * Check for diffs from scratch buffer. If found empty
1214 * get the diff from Candidate DS itself.
1215 */
1216 cfg_chgs = &nb_config->cfg_chgs;
1217 if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
1218 /*
1219 * This could be the case when the config is directly
1220 * loaded onto the candidate DS from a file. Get the
1221 * diff from a full comparison of the candidate and
1222 * running DSs.
1223 */
1224 nb_config_diff(
1225 mgmt_ds_get_nb_config(txn->commit_cfg_req->req
1226 .commit_cfg.dst_ds_ctx),
1227 nb_config, &changes);
1228 cfg_chgs = &changes;
1229 del_cfg_chgs = true;
1230 }
1231
1232 if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
1233 /*
1234 * This means there's no changes to commit whatsoever
1235 * is the source of the changes in config.
1236 */
1237 (void)mgmt_txn_send_commit_cfg_reply(
1238 txn, MGMTD_NO_CFG_CHANGES,
1239 "No changes found to be committed!");
1240 ret = -1;
1241 goto mgmt_txn_prepare_config_done;
1242 }
1243
1244 #ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
1245 if (mm->perf_stats_en)
1246 gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
1247 ->validate_start,
1248 NULL);
1249 /*
1250 * Validate YANG contents of the source DS and get the diff
1251 * between source and destination DS contents.
1252 */
1253 char err_buf[1024] = {0};
1254 nb_ctx.client = NB_CLIENT_MGMTD_SERVER;
1255 nb_ctx.user = (void *)txn;
1256
1257 ret = nb_candidate_validate_yang(nb_config, false, err_buf,
1258 sizeof(err_buf) - 1);
1259 if (ret != NB_OK) {
1260 if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
1261 strlcpy(err_buf, "Validation failed", sizeof(err_buf));
1262 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
1263 err_buf);
1264 ret = -1;
1265 goto mgmt_txn_prepare_config_done;
1266 }
1267 /*
1268 * Perform application level validations locally on the MGMTD
1269 * process by calling application specific validation routines
1270 * loaded onto MGMTD process using libraries.
1271 */
1272 ret = nb_candidate_validate_code(&nb_ctx, nb_config, &changes, err_buf,
1273 sizeof(err_buf) - 1);
1274 if (ret != NB_OK) {
1275 if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
1276 strlcpy(err_buf, "Validation failed", sizeof(err_buf));
1277 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
1278 err_buf);
1279 ret = -1;
1280 goto mgmt_txn_prepare_config_done;
1281 }
1282
1283 if (txn->commit_cfg_req->req.commit_cfg.validate_only) {
1284 /*
1285 * This was a validate-only COMMIT request return success.
1286 */
1287 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
1288 NULL);
1289 goto mgmt_txn_prepare_config_done;
1290 }
1291 #endif /* ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED */
1292
1293 mgmt_txn_prep_config_validation_done:
1294
1295 if (mm->perf_stats_en)
1296 gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
1297 ->prep_cfg_start,
1298 NULL);
1299
1300 /*
1301 * Iterate over the diffs and create ordered batches of config
1302 * commands to be validated.
1303 */
1304 ret = mgmt_txn_create_config_batches(txn->commit_cfg_req, cfg_chgs);
1305 if (ret != 0) {
1306 ret = -1;
1307 goto mgmt_txn_prepare_config_done;
1308 }
1309
1310 /* Move to the Transaction Create Phase */
1311 txn->commit_cfg_req->req.commit_cfg.curr_phase =
1312 MGMTD_COMMIT_PHASE_TXN_CREATE;
1313 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
1314
1315 /*
1316 * Start the COMMIT Timeout Timer to abort Txn if things get stuck at
1317 * backend.
1318 */
1319 mgmt_txn_register_event(txn, MGMTD_TXN_COMMITCFG_TIMEOUT);
1320 mgmt_txn_prepare_config_done:
1321
1322 if (cfg_chgs && del_cfg_chgs)
1323 nb_config_diff_del_changes(cfg_chgs);
1324
1325 return ret;
1326 }
1327
1328 static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn)
1329 {
1330 enum mgmt_be_client_id id;
1331 struct mgmt_be_client_adapter *adapter;
1332 struct mgmt_commit_cfg_req *cmtcfg_req;
1333 struct mgmt_txn_be_cfg_batch *cfg_btch;
1334
1335 assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
1336
1337 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1338 FOREACH_MGMTD_BE_CLIENT_ID (id) {
1339 if (cmtcfg_req->subscr_info.xpath_subscr[id]) {
1340 adapter = mgmt_be_get_adapter_by_id(id);
1341 if (mgmt_be_create_txn(adapter, txn->txn_id)
1342 != 0) {
1343 (void)mgmt_txn_send_commit_cfg_reply(
1344 txn, MGMTD_INTERNAL_ERROR,
1345 "Could not send TXN_CREATE to backend adapter");
1346 return -1;
1347 }
1348
1349 FOREACH_TXN_CFG_BATCH_IN_LIST (
1350 &txn->commit_cfg_req->req.commit_cfg
1351 .curr_batches[id],
1352 cfg_btch)
1353 cfg_btch->comm_phase =
1354 MGMTD_COMMIT_PHASE_TXN_CREATE;
1355 }
1356 }
1357
1358 txn->commit_cfg_req->req.commit_cfg.next_phase =
1359 MGMTD_COMMIT_PHASE_SEND_CFG;
1360
1361 /*
1362 * Dont move the commit to next phase yet. Wait for the TXN_REPLY to
1363 * come back.
1364 */
1365
1366 MGMTD_TXN_DBG("txn-id: %" PRIu64 " session-id: %" PRIu64
1367 " Phase(Current:'%s', Next: '%s')",
1368 txn->txn_id, txn->session_id,
1369 mgmt_txn_commit_phase_str(txn, true),
1370 mgmt_txn_commit_phase_str(txn, false));
1371
1372 return 0;
1373 }
1374
1375 static int
1376 mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
1377 struct mgmt_be_client_adapter *adapter)
1378 {
1379 struct mgmt_commit_cfg_req *cmtcfg_req;
1380 struct mgmt_txn_be_cfg_batch *cfg_btch;
1381 struct mgmt_be_cfgreq cfg_req = {0};
1382 size_t num_batches, indx;
1383
1384 assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
1385
1386 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1387 assert(cmtcfg_req->subscr_info.xpath_subscr[adapter->id]);
1388
1389 indx = 0;
1390 num_batches =
1391 mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id]);
1392 FOREACH_TXN_CFG_BATCH_IN_LIST (&cmtcfg_req->curr_batches[adapter->id],
1393 cfg_btch) {
1394 assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_SEND_CFG);
1395
1396 cfg_req.cfgdata_reqs = cfg_btch->cfg_datap;
1397 cfg_req.num_reqs = cfg_btch->num_cfg_data;
1398 indx++;
1399 if (mgmt_be_send_cfg_data_create_req(
1400 adapter, txn->txn_id, cfg_btch->batch_id, &cfg_req,
1401 indx == num_batches ? true : false)
1402 != 0) {
1403 (void)mgmt_txn_send_commit_cfg_reply(
1404 txn, MGMTD_INTERNAL_ERROR,
1405 "Internal Error! Could not send config data to backend!");
1406 MGMTD_TXN_ERR(
1407 "Could not send CFGDATA_CREATE txn-id: %" PRIu64
1408 " batch-id: %" PRIu64 " to client '%s",
1409 txn->txn_id, cfg_btch->batch_id, adapter->name);
1410 return -1;
1411 }
1412
1413 cmtcfg_req->cmt_stats->last_num_cfgdata_reqs++;
1414 mgmt_move_txn_cfg_batch_to_next(
1415 cmtcfg_req, cfg_btch,
1416 &cmtcfg_req->curr_batches[adapter->id],
1417 &cmtcfg_req->next_batches[adapter->id], true,
1418 MGMTD_COMMIT_PHASE_SEND_CFG);
1419 }
1420
1421 /*
1422 * This could ne the last Backend Client to send CFGDATA_CREATE_REQ to.
1423 * Try moving the commit to next phase.
1424 */
1425 mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
1426
1427 return 0;
1428 }
1429
1430 static int
1431 mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
1432 struct mgmt_be_client_adapter *adapter)
1433 {
1434 struct mgmt_commit_cfg_req *cmtcfg_req;
1435 struct mgmt_txn_be_cfg_batch *cfg_btch;
1436
1437 assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
1438
1439 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1440 if (cmtcfg_req->subscr_info.xpath_subscr[adapter->id]) {
1441 adapter = mgmt_be_get_adapter_by_id(adapter->id);
1442 (void)mgmt_be_destroy_txn(adapter, txn->txn_id);
1443
1444 FOREACH_TXN_CFG_BATCH_IN_LIST (
1445 &txn->commit_cfg_req->req.commit_cfg
1446 .curr_batches[adapter->id],
1447 cfg_btch)
1448 cfg_btch->comm_phase = MGMTD_COMMIT_PHASE_TXN_DELETE;
1449 }
1450
1451 return 0;
1452 }
1453
1454 static void mgmt_txn_cfg_commit_timedout(struct event *thread)
1455 {
1456 struct mgmt_txn_ctx *txn;
1457
1458 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
1459 assert(txn);
1460
1461 assert(txn->type == MGMTD_TXN_TYPE_CONFIG);
1462
1463 if (!txn->commit_cfg_req)
1464 return;
1465
1466 MGMTD_TXN_ERR("Backend timeout txn-id: %" PRIu64 " aborting commit",
1467 txn->txn_id);
1468
1469 /*
1470 * Send a COMMIT_CONFIG_REPLY with failure.
1471 * NOTE: The transaction cleanup will be triggered from Front-end
1472 * adapter.
1473 */
1474 mgmt_txn_send_commit_cfg_reply(
1475 txn, MGMTD_INTERNAL_ERROR,
1476 "Operation on the backend timed-out. Aborting commit!");
1477 }
1478
1479 /*
1480 * Send CFG_APPLY_REQs to all the backend client.
1481 *
1482 * NOTE: This is always dispatched when all CFGDATA_CREATE_REQs
1483 * for all backend clients has been generated. Please see
1484 * mgmt_txn_register_event() and mgmt_txn_process_commit_cfg()
1485 * for details.
1486 */
1487 static int mgmt_txn_send_be_cfg_apply(struct mgmt_txn_ctx *txn)
1488 {
1489 enum mgmt_be_client_id id;
1490 struct mgmt_be_client_adapter *adapter;
1491 struct mgmt_commit_cfg_req *cmtcfg_req;
1492 struct mgmt_txn_batches_head *btch_list;
1493 struct mgmt_txn_be_cfg_batch *cfg_btch;
1494
1495 assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
1496
1497 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1498 if (cmtcfg_req->validate_only) {
1499 /*
1500 * If this was a validate-only COMMIT request return success.
1501 */
1502 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
1503 NULL);
1504 return 0;
1505 }
1506
1507 FOREACH_MGMTD_BE_CLIENT_ID (id) {
1508 if (cmtcfg_req->subscr_info.xpath_subscr[id] &
1509 MGMT_SUBSCR_NOTIFY_CFG) {
1510 adapter = mgmt_be_get_adapter_by_id(id);
1511 if (!adapter)
1512 return -1;
1513
1514 btch_list = &cmtcfg_req->curr_batches[id];
1515 if (mgmt_be_send_cfg_apply_req(adapter, txn->txn_id)
1516 != 0) {
1517 (void)mgmt_txn_send_commit_cfg_reply(
1518 txn, MGMTD_INTERNAL_ERROR,
1519 "Could not send CFG_APPLY_REQ to backend adapter");
1520 return -1;
1521 }
1522 cmtcfg_req->cmt_stats->last_num_apply_reqs++;
1523
1524 UNSET_FLAG(adapter->flags,
1525 MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
1526
1527 FOREACH_TXN_CFG_BATCH_IN_LIST (btch_list, cfg_btch)
1528 cfg_btch->comm_phase =
1529 MGMTD_COMMIT_PHASE_APPLY_CFG;
1530 }
1531 }
1532
1533 txn->commit_cfg_req->req.commit_cfg.next_phase =
1534 MGMTD_COMMIT_PHASE_TXN_DELETE;
1535
1536 /*
1537 * Dont move the commit to next phase yet. Wait for all VALIDATE_REPLIES
1538 * to come back.
1539 */
1540
1541 return 0;
1542 }
1543
1544 static void mgmt_txn_process_commit_cfg(struct event *thread)
1545 {
1546 struct mgmt_txn_ctx *txn;
1547 struct mgmt_commit_cfg_req *cmtcfg_req;
1548
1549 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
1550 assert(txn);
1551
1552 MGMTD_TXN_DBG("Processing COMMIT_CONFIG for txn-id: %" PRIu64
1553 " session-id: %" PRIu64
1554 " Phase(Current:'%s', Next: '%s')",
1555 txn->txn_id, txn->session_id,
1556 mgmt_txn_commit_phase_str(txn, true),
1557 mgmt_txn_commit_phase_str(txn, false));
1558
1559 assert(txn->commit_cfg_req);
1560 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1561 switch (cmtcfg_req->curr_phase) {
1562 case MGMTD_COMMIT_PHASE_PREPARE_CFG:
1563 mgmt_txn_prepare_config(txn);
1564 break;
1565 case MGMTD_COMMIT_PHASE_TXN_CREATE:
1566 if (mm->perf_stats_en)
1567 gettimeofday(&cmtcfg_req->cmt_stats->txn_create_start,
1568 NULL);
1569 /*
1570 * Send TXN_CREATE_REQ to all Backend now.
1571 */
1572 mgmt_txn_send_be_txn_create(txn);
1573 break;
1574 case MGMTD_COMMIT_PHASE_SEND_CFG:
1575 if (mm->perf_stats_en)
1576 gettimeofday(&cmtcfg_req->cmt_stats->send_cfg_start,
1577 NULL);
1578 /*
1579 * All CFGDATA_CREATE_REQ should have been sent to
1580 * Backend by now.
1581 */
1582 #ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED
1583 assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
1584 MGMTD_TXN_DBG(
1585 "txn-id: %" PRIu64 " session-id: %" PRIu64
1586 " trigger sending CFG_VALIDATE_REQ to all backend clients",
1587 txn->txn_id, txn->session_id);
1588 #else /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
1589 assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
1590 MGMTD_TXN_DBG(
1591 "txn-id: %" PRIu64 " session-id: %" PRIu64
1592 " trigger sending CFG_APPLY_REQ to all backend clients",
1593 txn->txn_id, txn->session_id);
1594 #endif /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
1595 break;
1596 case MGMTD_COMMIT_PHASE_APPLY_CFG:
1597 if (mm->perf_stats_en)
1598 gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_start,
1599 NULL);
1600 /*
1601 * We should have received successful CFG_VALIDATE_REPLY from
1602 * all concerned Backend Clients by now. Send out the
1603 * CFG_APPLY_REQs now.
1604 */
1605 mgmt_txn_send_be_cfg_apply(txn);
1606 break;
1607 case MGMTD_COMMIT_PHASE_TXN_DELETE:
1608 if (mm->perf_stats_en)
1609 gettimeofday(&cmtcfg_req->cmt_stats->txn_del_start,
1610 NULL);
1611 /*
1612 * We would have sent TXN_DELETE_REQ to all backend by now.
1613 * Send a successful CONFIG_COMMIT_REPLY back to front-end.
1614 * NOTE: This should also trigger DS merge/unlock and Txn
1615 * cleanup. Please see mgmt_fe_send_commit_cfg_reply() for
1616 * more details.
1617 */
1618 EVENT_OFF(txn->comm_cfg_timeout);
1619 mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
1620 break;
1621 case MGMTD_COMMIT_PHASE_MAX:
1622 break;
1623 }
1624
1625 MGMTD_TXN_DBG("txn-id:%" PRIu64 " session-id: %" PRIu64
1626 " phase updated to (current:'%s', next: '%s')",
1627 txn->txn_id, txn->session_id,
1628 mgmt_txn_commit_phase_str(txn, true),
1629 mgmt_txn_commit_phase_str(txn, false));
1630 }
1631
1632 static void mgmt_init_get_data_reply(struct mgmt_get_data_reply *get_reply)
1633 {
1634 size_t indx;
1635
1636 for (indx = 0; indx < array_size(get_reply->reply_data); indx++)
1637 get_reply->reply_datap[indx] = &get_reply->reply_data[indx];
1638 }
1639
1640 static void mgmt_reset_get_data_reply(struct mgmt_get_data_reply *get_reply)
1641 {
1642 int indx;
1643
1644 for (indx = 0; indx < get_reply->num_reply; indx++) {
1645 if (get_reply->reply_xpathp[indx]) {
1646 free(get_reply->reply_xpathp[indx]);
1647 get_reply->reply_xpathp[indx] = 0;
1648 }
1649 if (get_reply->reply_data[indx].xpath) {
1650 zlog_debug("%s free xpath %p", __func__,
1651 get_reply->reply_data[indx].xpath);
1652 free(get_reply->reply_data[indx].xpath);
1653 get_reply->reply_data[indx].xpath = 0;
1654 }
1655 }
1656
1657 get_reply->num_reply = 0;
1658 memset(&get_reply->data_reply, 0, sizeof(get_reply->data_reply));
1659 memset(&get_reply->reply_data, 0, sizeof(get_reply->reply_data));
1660 memset(&get_reply->reply_datap, 0, sizeof(get_reply->reply_datap));
1661
1662 memset(&get_reply->reply_value, 0, sizeof(get_reply->reply_value));
1663
1664 mgmt_init_get_data_reply(get_reply);
1665 }
1666
1667 static void mgmt_reset_get_data_reply_buf(struct mgmt_get_data_req *get_data)
1668 {
1669 if (get_data->reply)
1670 mgmt_reset_get_data_reply(get_data->reply);
1671 }
1672
1673 static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
1674 struct mgmt_get_data_req *get_req)
1675 {
1676 struct mgmt_get_data_reply *get_reply;
1677 Mgmtd__YangDataReply *data_reply;
1678
1679 get_reply = get_req->reply;
1680 if (!get_reply)
1681 return;
1682
1683 data_reply = &get_reply->data_reply;
1684 mgmt_yang_data_reply_init(data_reply);
1685 data_reply->n_data = get_reply->num_reply;
1686 data_reply->data = get_reply->reply_datap;
1687 data_reply->next_indx =
1688 (!get_reply->last_batch ? get_req->total_reply : -1);
1689
1690 MGMTD_TXN_DBG("Sending %zu Get-Config/Data replies next-index:%" PRId64,
1691 data_reply->n_data, data_reply->next_indx);
1692
1693 switch (txn_req->req_event) {
1694 case MGMTD_TXN_PROC_GETCFG:
1695 if (mgmt_fe_send_get_cfg_reply(
1696 txn_req->txn->session_id, txn_req->txn->txn_id,
1697 get_req->ds_id, txn_req->req_id, MGMTD_SUCCESS,
1698 data_reply, NULL)
1699 != 0) {
1700 MGMTD_TXN_ERR(
1701 "Failed to send GET-CONFIG-REPLY txn-id: %" PRIu64
1702 " session-id: %" PRIu64 " req-id: %" PRIu64,
1703 txn_req->txn->txn_id, txn_req->txn->session_id,
1704 txn_req->req_id);
1705 }
1706 break;
1707 case MGMTD_TXN_PROC_GETDATA:
1708 if (mgmt_fe_send_get_data_reply(
1709 txn_req->txn->session_id, txn_req->txn->txn_id,
1710 get_req->ds_id, txn_req->req_id, MGMTD_SUCCESS,
1711 data_reply, NULL)
1712 != 0) {
1713 MGMTD_TXN_ERR(
1714 "Failed to send GET-DATA-REPLY txn-id: %" PRIu64
1715 " session-id: %" PRIu64 " req-id: %" PRIu64,
1716 txn_req->txn->txn_id, txn_req->txn->session_id,
1717 txn_req->req_id);
1718 }
1719 break;
1720 case MGMTD_TXN_PROC_SETCFG:
1721 case MGMTD_TXN_PROC_COMMITCFG:
1722 case MGMTD_TXN_COMMITCFG_TIMEOUT:
1723 case MGMTD_TXN_CLEANUP:
1724 MGMTD_TXN_ERR("Invalid Txn-Req-Event %u",
1725 txn_req->req_event);
1726 break;
1727 }
1728
1729 /*
1730 * Reset reply buffer for next reply.
1731 */
1732 mgmt_reset_get_data_reply_buf(get_req);
1733 }
1734
1735 static void mgmt_txn_iter_and_send_get_cfg_reply(struct mgmt_ds_ctx *ds_ctx,
1736 const char *xpath,
1737 struct lyd_node *node,
1738 struct nb_node *nb_node,
1739 void *ctx)
1740 {
1741 struct mgmt_txn_req *txn_req;
1742 struct mgmt_get_data_req *get_req;
1743 struct mgmt_get_data_reply *get_reply;
1744 Mgmtd__YangData *data;
1745 Mgmtd__YangDataValue *data_value;
1746
1747 txn_req = (struct mgmt_txn_req *)ctx;
1748 if (!txn_req)
1749 return;
1750
1751 if (!(node->schema->nodetype & LYD_NODE_TERM))
1752 return;
1753
1754 assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG
1755 || txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
1756
1757 get_req = txn_req->req.get_data;
1758 assert(get_req);
1759 get_reply = get_req->reply;
1760 data = &get_reply->reply_data[get_reply->num_reply];
1761 data_value = &get_reply->reply_value[get_reply->num_reply];
1762
1763 mgmt_yang_data_init(data);
1764 data->xpath = strdup(xpath);
1765 mgmt_yang_data_value_init(data_value);
1766 data_value->value_case = MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
1767 data_value->encoded_str_val = (char *)lyd_get_value(node);
1768 data->value = data_value;
1769
1770 get_reply->num_reply++;
1771 get_req->total_reply++;
1772 MGMTD_TXN_DBG(" [%d] XPATH: '%s', Value: '%s'", get_req->total_reply,
1773 data->xpath, data_value->encoded_str_val);
1774
1775 if (get_reply->num_reply == MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH)
1776 mgmt_txn_send_getcfg_reply_data(txn_req, get_req);
1777 }
1778
1779 static int mgmt_txn_get_config(struct mgmt_txn_ctx *txn,
1780 struct mgmt_txn_req *txn_req,
1781 struct mgmt_ds_ctx *ds_ctx)
1782 {
1783 int indx;
1784 struct mgmt_get_data_req *get_data;
1785 struct mgmt_get_data_reply *get_reply;
1786
1787 get_data = txn_req->req.get_data;
1788
1789 if (!get_data->reply) {
1790 get_data->reply = XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REPLY,
1791 sizeof(struct mgmt_get_data_reply));
1792 if (!get_data->reply) {
1793 mgmt_fe_send_get_cfg_reply(
1794 txn->session_id, txn->txn_id,
1795 get_data->ds_id, txn_req->req_id,
1796 MGMTD_INTERNAL_ERROR, NULL,
1797 "Internal error: Unable to allocate reply buffers!");
1798 goto mgmt_txn_get_config_failed;
1799 }
1800 }
1801
1802 /*
1803 * Read data contents from the DS and respond back directly.
1804 * No need to go to backend for getting data.
1805 */
1806 get_reply = get_data->reply;
1807 for (indx = 0; indx < get_data->num_xpaths; indx++) {
1808 MGMTD_TXN_DBG("Trying to get all data under '%s'",
1809 get_data->xpaths[indx]);
1810 mgmt_init_get_data_reply(get_reply);
1811 /*
1812 * mgmt_ds_iter_data works on path prefixes, but the user may
1813 * want to also use an xpath regexp we need to add this
1814 * functionality.
1815 */
1816 if (mgmt_ds_iter_data(get_data->ds_ctx, get_data->xpaths[indx],
1817 mgmt_txn_iter_and_send_get_cfg_reply,
1818 (void *)txn_req) == -1) {
1819 MGMTD_TXN_DBG("Invalid Xpath '%s",
1820 get_data->xpaths[indx]);
1821 mgmt_fe_send_get_cfg_reply(
1822 txn->session_id, txn->txn_id,
1823 get_data->ds_id, txn_req->req_id,
1824 MGMTD_INTERNAL_ERROR, NULL, "Invalid xpath");
1825 goto mgmt_txn_get_config_failed;
1826 }
1827 MGMTD_TXN_DBG("Got %d remaining data-replies for xpath '%s'",
1828 get_reply->num_reply, get_data->xpaths[indx]);
1829 get_reply->last_batch = true;
1830 mgmt_txn_send_getcfg_reply_data(txn_req, get_data);
1831 }
1832
1833 mgmt_txn_get_config_failed:
1834
1835 /*
1836 * Delete the txn request. It will also remove it from request
1837 * list.
1838 */
1839 mgmt_txn_req_free(&txn_req);
1840
1841 return 0;
1842 }
1843
1844 static void mgmt_txn_process_get_cfg(struct event *thread)
1845 {
1846 struct mgmt_txn_ctx *txn;
1847 struct mgmt_txn_req *txn_req;
1848 struct mgmt_ds_ctx *ds_ctx;
1849 int num_processed = 0;
1850 bool error;
1851
1852 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
1853 assert(txn);
1854
1855 MGMTD_TXN_DBG("Processing %zu GET_CONFIG requests txn-id: %" PRIu64
1856 " session-id: %" PRIu64,
1857 mgmt_txn_reqs_count(&txn->get_cfg_reqs), txn->txn_id,
1858 txn->session_id);
1859
1860 FOREACH_TXN_REQ_IN_LIST (&txn->get_cfg_reqs, txn_req) {
1861 error = false;
1862 assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG);
1863 ds_ctx = txn_req->req.get_data->ds_ctx;
1864 if (!ds_ctx) {
1865 mgmt_fe_send_get_cfg_reply(
1866 txn->session_id, txn->txn_id,
1867 txn_req->req.get_data->ds_id, txn_req->req_id,
1868 MGMTD_INTERNAL_ERROR, NULL,
1869 "No such datastore!");
1870 error = true;
1871 goto mgmt_txn_process_get_cfg_done;
1872 }
1873
1874 if (mgmt_txn_get_config(txn, txn_req, ds_ctx) != 0) {
1875 MGMTD_TXN_ERR(
1876 "Unable to retrieve config from DS %d txn-id: %" PRIu64
1877 " session-id: %" PRIu64 " req-id: %" PRIu64,
1878 txn_req->req.get_data->ds_id, txn->txn_id,
1879 txn->session_id, txn_req->req_id);
1880 error = true;
1881 }
1882
1883 mgmt_txn_process_get_cfg_done:
1884
1885 if (error) {
1886 /*
1887 * Delete the txn request.
1888 * Note: The following will remove it from the list
1889 * as well.
1890 */
1891 mgmt_txn_req_free(&txn_req);
1892 }
1893
1894 /*
1895 * Else the transaction would have been already deleted or
1896 * moved to corresponding pending list. No need to delete it.
1897 */
1898 num_processed++;
1899 if (num_processed == MGMTD_TXN_MAX_NUM_GETCFG_PROC)
1900 break;
1901 }
1902
1903 if (mgmt_txn_reqs_count(&txn->get_cfg_reqs)) {
1904 MGMTD_TXN_DBG(
1905 "Processed maximum number of Get-Config requests (%d/%d). Rescheduling for rest.",
1906 num_processed, MGMTD_TXN_MAX_NUM_GETCFG_PROC);
1907 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
1908 }
1909 }
1910
1911 static void mgmt_txn_process_get_data(struct event *thread)
1912 {
1913 struct mgmt_txn_ctx *txn;
1914 struct mgmt_txn_req *txn_req;
1915 struct mgmt_ds_ctx *ds_ctx;
1916 int num_processed = 0;
1917 bool error;
1918
1919 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
1920 assert(txn);
1921
1922 MGMTD_TXN_DBG("Processing %zu GET_DATA requests txn-id: %" PRIu64
1923 " session-id: %" PRIu64,
1924 mgmt_txn_reqs_count(&txn->get_data_reqs), txn->txn_id,
1925 txn->session_id);
1926
1927 FOREACH_TXN_REQ_IN_LIST (&txn->get_data_reqs, txn_req) {
1928 error = false;
1929 assert(txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
1930 ds_ctx = txn_req->req.get_data->ds_ctx;
1931 if (!ds_ctx) {
1932 mgmt_fe_send_get_data_reply(
1933 txn->session_id, txn->txn_id,
1934 txn_req->req.get_data->ds_id, txn_req->req_id,
1935 MGMTD_INTERNAL_ERROR, NULL,
1936 "No such datastore!");
1937 error = true;
1938 goto mgmt_txn_process_get_data_done;
1939 }
1940
1941 if (mgmt_ds_is_config(ds_ctx)) {
1942 if (mgmt_txn_get_config(txn, txn_req, ds_ctx)
1943 != 0) {
1944 MGMTD_TXN_ERR(
1945 "Unable to retrieve config from DS %d txn-id %" PRIu64
1946 " session-id: %" PRIu64
1947 " req-id: %" PRIu64,
1948 txn_req->req.get_data->ds_id,
1949 txn->txn_id, txn->session_id,
1950 txn_req->req_id);
1951 error = true;
1952 }
1953 } else {
1954 /*
1955 * TODO: Trigger GET procedures for Backend
1956 * For now return back error.
1957 */
1958 mgmt_fe_send_get_data_reply(
1959 txn->session_id, txn->txn_id,
1960 txn_req->req.get_data->ds_id, txn_req->req_id,
1961 MGMTD_INTERNAL_ERROR, NULL,
1962 "GET-DATA on Oper DS is not supported yet!");
1963 error = true;
1964 }
1965
1966 mgmt_txn_process_get_data_done:
1967
1968 if (error) {
1969 /*
1970 * Delete the txn request.
1971 * Note: The following will remove it from the list
1972 * as well.
1973 */
1974 mgmt_txn_req_free(&txn_req);
1975 }
1976
1977 /*
1978 * Else the transaction would have been already deleted or
1979 * moved to corresponding pending list. No need to delete it.
1980 */
1981 num_processed++;
1982 if (num_processed == MGMTD_TXN_MAX_NUM_GETDATA_PROC)
1983 break;
1984 }
1985
1986 if (mgmt_txn_reqs_count(&txn->get_data_reqs)) {
1987 MGMTD_TXN_DBG(
1988 "Processed maximum number of Get-Data requests (%d/%d). Rescheduling for rest.",
1989 num_processed, MGMTD_TXN_MAX_NUM_GETDATA_PROC);
1990 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETDATA);
1991 }
1992 }
1993
1994 static struct mgmt_txn_ctx *
1995 mgmt_fe_find_txn_by_session_id(struct mgmt_master *cm, uint64_t session_id,
1996 enum mgmt_txn_type type)
1997 {
1998 struct mgmt_txn_ctx *txn;
1999
2000 FOREACH_TXN_IN_LIST (cm, txn) {
2001 if (txn->session_id == session_id && txn->type == type)
2002 return txn;
2003 }
2004
2005 return NULL;
2006 }
2007
2008 static struct mgmt_txn_ctx *mgmt_txn_create_new(uint64_t session_id,
2009 enum mgmt_txn_type type)
2010 {
2011 struct mgmt_txn_ctx *txn = NULL;
2012
2013 /*
2014 * For 'CONFIG' transaction check if one is already created
2015 * or not.
2016 */
2017 if (type == MGMTD_TXN_TYPE_CONFIG && mgmt_txn_mm->cfg_txn) {
2018 if (mgmt_config_txn_in_progress() == session_id)
2019 txn = mgmt_txn_mm->cfg_txn;
2020 goto mgmt_create_txn_done;
2021 }
2022
2023 txn = mgmt_fe_find_txn_by_session_id(mgmt_txn_mm, session_id,
2024 type);
2025 if (!txn) {
2026 txn = XCALLOC(MTYPE_MGMTD_TXN, sizeof(struct mgmt_txn_ctx));
2027 assert(txn);
2028
2029 txn->session_id = session_id;
2030 txn->type = type;
2031 mgmt_txns_add_tail(&mgmt_txn_mm->txn_list, txn);
2032 mgmt_txn_reqs_init(&txn->set_cfg_reqs);
2033 mgmt_txn_reqs_init(&txn->get_cfg_reqs);
2034 mgmt_txn_reqs_init(&txn->get_data_reqs);
2035 mgmt_txn_reqs_init(&txn->pending_get_datas);
2036 txn->commit_cfg_req = NULL;
2037 txn->refcount = 0;
2038 if (!mgmt_txn_mm->next_txn_id)
2039 mgmt_txn_mm->next_txn_id++;
2040 txn->txn_id = mgmt_txn_mm->next_txn_id++;
2041 hash_get(mgmt_txn_mm->txn_hash, txn, hash_alloc_intern);
2042
2043 MGMTD_TXN_DBG("Added new '%s' txn-id: %" PRIu64,
2044 mgmt_txn_type2str(type), txn->txn_id);
2045
2046 if (type == MGMTD_TXN_TYPE_CONFIG)
2047 mgmt_txn_mm->cfg_txn = txn;
2048
2049 MGMTD_TXN_LOCK(txn);
2050 }
2051
2052 mgmt_create_txn_done:
2053 return txn;
2054 }
2055
2056 static void mgmt_txn_delete(struct mgmt_txn_ctx **txn)
2057 {
2058 MGMTD_TXN_UNLOCK(txn);
2059 }
2060
2061 static unsigned int mgmt_txn_hash_key(const void *data)
2062 {
2063 const struct mgmt_txn_ctx *txn = data;
2064
2065 return jhash2((uint32_t *) &txn->txn_id,
2066 sizeof(txn->txn_id) / sizeof(uint32_t), 0);
2067 }
2068
2069 static bool mgmt_txn_hash_cmp(const void *d1, const void *d2)
2070 {
2071 const struct mgmt_txn_ctx *txn1 = d1;
2072 const struct mgmt_txn_ctx *txn2 = d2;
2073
2074 return (txn1->txn_id == txn2->txn_id);
2075 }
2076
2077 static void mgmt_txn_hash_free(void *data)
2078 {
2079 struct mgmt_txn_ctx *txn = data;
2080
2081 mgmt_txn_delete(&txn);
2082 }
2083
2084 static void mgmt_txn_hash_init(void)
2085 {
2086 if (!mgmt_txn_mm || mgmt_txn_mm->txn_hash)
2087 return;
2088
2089 mgmt_txn_mm->txn_hash = hash_create(mgmt_txn_hash_key,
2090 mgmt_txn_hash_cmp,
2091 "MGMT Transactions");
2092 }
2093
2094 static void mgmt_txn_hash_destroy(void)
2095 {
2096 if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
2097 return;
2098
2099 hash_clean(mgmt_txn_mm->txn_hash,
2100 mgmt_txn_hash_free);
2101 hash_free(mgmt_txn_mm->txn_hash);
2102 mgmt_txn_mm->txn_hash = NULL;
2103 }
2104
2105 static inline struct mgmt_txn_ctx *
2106 mgmt_txn_id2ctx(uint64_t txn_id)
2107 {
2108 struct mgmt_txn_ctx key = {0};
2109 struct mgmt_txn_ctx *txn;
2110
2111 if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
2112 return NULL;
2113
2114 key.txn_id = txn_id;
2115 txn = hash_lookup(mgmt_txn_mm->txn_hash, &key);
2116
2117 return txn;
2118 }
2119
2120 static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file,
2121 int line)
2122 {
2123 txn->refcount++;
2124 MGMTD_TXN_DBG("%s:%d --> Lock %s txn-id: %" PRIu64 " refcnt: %d", file,
2125 line, mgmt_txn_type2str(txn->type), txn->txn_id,
2126 txn->refcount);
2127 }
2128
2129 static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
2130 int line)
2131 {
2132 assert(*txn && (*txn)->refcount);
2133
2134 (*txn)->refcount--;
2135 MGMTD_TXN_DBG("%s:%d --> Unlock %s txn-id: %" PRIu64 " refcnt: %d",
2136 file, line, mgmt_txn_type2str((*txn)->type),
2137 (*txn)->txn_id, (*txn)->refcount);
2138 if (!(*txn)->refcount) {
2139 if ((*txn)->type == MGMTD_TXN_TYPE_CONFIG)
2140 if (mgmt_txn_mm->cfg_txn == *txn)
2141 mgmt_txn_mm->cfg_txn = NULL;
2142 EVENT_OFF((*txn)->proc_get_cfg);
2143 EVENT_OFF((*txn)->proc_get_data);
2144 EVENT_OFF((*txn)->proc_comm_cfg);
2145 EVENT_OFF((*txn)->comm_cfg_timeout);
2146 hash_release(mgmt_txn_mm->txn_hash, *txn);
2147 mgmt_txns_del(&mgmt_txn_mm->txn_list, *txn);
2148
2149 MGMTD_TXN_DBG("Deleted %s txn-id: %" PRIu64
2150 " session-id: %" PRIu64,
2151 mgmt_txn_type2str((*txn)->type), (*txn)->txn_id,
2152 (*txn)->session_id);
2153
2154 XFREE(MTYPE_MGMTD_TXN, *txn);
2155 }
2156
2157 *txn = NULL;
2158 }
2159
2160 static void mgmt_txn_cleanup_txn(struct mgmt_txn_ctx **txn)
2161 {
2162 /* TODO: Any other cleanup applicable */
2163
2164 mgmt_txn_delete(txn);
2165 }
2166
2167 static void
2168 mgmt_txn_cleanup_all_txns(void)
2169 {
2170 struct mgmt_txn_ctx *txn;
2171
2172 if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
2173 return;
2174
2175 FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn)
2176 mgmt_txn_cleanup_txn(&txn);
2177 }
2178
2179 static void mgmt_txn_cleanup(struct event *thread)
2180 {
2181 struct mgmt_txn_ctx *txn;
2182
2183 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
2184 assert(txn);
2185
2186 mgmt_txn_cleanup_txn(&txn);
2187 }
2188
2189 static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
2190 enum mgmt_txn_event event)
2191 {
2192 struct timeval tv = {.tv_sec = 0,
2193 .tv_usec = MGMTD_TXN_PROC_DELAY_USEC};
2194
2195 assert(mgmt_txn_mm && mgmt_txn_tm);
2196
2197 switch (event) {
2198 case MGMTD_TXN_PROC_SETCFG:
2199 event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_set_cfg,
2200 txn, &tv, &txn->proc_set_cfg);
2201 break;
2202 case MGMTD_TXN_PROC_COMMITCFG:
2203 event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_commit_cfg,
2204 txn, &tv, &txn->proc_comm_cfg);
2205 break;
2206 case MGMTD_TXN_PROC_GETCFG:
2207 event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_cfg,
2208 txn, &tv, &txn->proc_get_cfg);
2209 break;
2210 case MGMTD_TXN_PROC_GETDATA:
2211 event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_data,
2212 txn, &tv, &txn->proc_get_data);
2213 break;
2214 case MGMTD_TXN_COMMITCFG_TIMEOUT:
2215 event_add_timer_msec(mgmt_txn_tm,
2216 mgmt_txn_cfg_commit_timedout, txn,
2217 MGMTD_TXN_CFG_COMMIT_MAX_DELAY_MSEC,
2218 &txn->comm_cfg_timeout);
2219 break;
2220 case MGMTD_TXN_CLEANUP:
2221 tv.tv_usec = MGMTD_TXN_CLEANUP_DELAY_USEC;
2222 event_add_timer_tv(mgmt_txn_tm, mgmt_txn_cleanup, txn, &tv,
2223 &txn->clnup);
2224 }
2225 }
2226
2227 int mgmt_txn_init(struct mgmt_master *mm, struct event_loop *tm)
2228 {
2229 if (mgmt_txn_mm || mgmt_txn_tm)
2230 assert(!"MGMTD TXN: Call txn_init() only once");
2231
2232 mgmt_txn_mm = mm;
2233 mgmt_txn_tm = tm;
2234 mgmt_txns_init(&mm->txn_list);
2235 mgmt_txn_hash_init();
2236 assert(!mm->cfg_txn);
2237 mm->cfg_txn = NULL;
2238
2239 return 0;
2240 }
2241
2242 void mgmt_txn_destroy(void)
2243 {
2244 mgmt_txn_cleanup_all_txns();
2245 mgmt_txn_hash_destroy();
2246 }
2247
2248 uint64_t mgmt_config_txn_in_progress(void)
2249 {
2250 if (mgmt_txn_mm && mgmt_txn_mm->cfg_txn)
2251 return mgmt_txn_mm->cfg_txn->session_id;
2252
2253 return MGMTD_SESSION_ID_NONE;
2254 }
2255
2256 uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type)
2257 {
2258 struct mgmt_txn_ctx *txn;
2259
2260 txn = mgmt_txn_create_new(session_id, type);
2261 return txn ? txn->txn_id : MGMTD_TXN_ID_NONE;
2262 }
2263
2264 bool mgmt_txn_id_is_valid(uint64_t txn_id)
2265 {
2266 return mgmt_txn_id2ctx(txn_id) ? true : false;
2267 }
2268
2269 void mgmt_destroy_txn(uint64_t *txn_id)
2270 {
2271 struct mgmt_txn_ctx *txn;
2272
2273 txn = mgmt_txn_id2ctx(*txn_id);
2274 if (!txn)
2275 return;
2276
2277 mgmt_txn_delete(&txn);
2278 *txn_id = MGMTD_TXN_ID_NONE;
2279 }
2280
2281 enum mgmt_txn_type mgmt_get_txn_type(uint64_t txn_id)
2282 {
2283 struct mgmt_txn_ctx *txn;
2284
2285 txn = mgmt_txn_id2ctx(txn_id);
2286 if (!txn)
2287 return MGMTD_TXN_TYPE_NONE;
2288
2289 return txn->type;
2290 }
2291
2292 int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
2293 Mgmtd__DatastoreId ds_id,
2294 struct mgmt_ds_ctx *ds_ctx,
2295 Mgmtd__YangCfgDataReq **cfg_req,
2296 size_t num_req, bool implicit_commit,
2297 Mgmtd__DatastoreId dst_ds_id,
2298 struct mgmt_ds_ctx *dst_ds_ctx)
2299 {
2300 struct mgmt_txn_ctx *txn;
2301 struct mgmt_txn_req *txn_req;
2302 size_t indx;
2303 uint16_t *num_chgs;
2304 struct nb_cfg_change *cfg_chg;
2305
2306 txn = mgmt_txn_id2ctx(txn_id);
2307 if (!txn)
2308 return -1;
2309
2310 if (implicit_commit && mgmt_txn_reqs_count(&txn->set_cfg_reqs)) {
2311 MGMTD_TXN_ERR(
2312 "For implicit commit config only one SETCFG-REQ can be allowed!");
2313 return -1;
2314 }
2315
2316 txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_SETCFG);
2317 txn_req->req.set_cfg->ds_id = ds_id;
2318 txn_req->req.set_cfg->ds_ctx = ds_ctx;
2319 num_chgs = &txn_req->req.set_cfg->num_cfg_changes;
2320 for (indx = 0; indx < num_req; indx++) {
2321 cfg_chg = &txn_req->req.set_cfg->cfg_changes[*num_chgs];
2322
2323 if (cfg_req[indx]->req_type
2324 == MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA)
2325 cfg_chg->operation = NB_OP_DESTROY;
2326 else if (cfg_req[indx]->req_type
2327 == MGMTD__CFG_DATA_REQ_TYPE__SET_DATA)
2328 cfg_chg->operation =
2329 mgmt_ds_find_data_node_by_xpath(
2330 ds_ctx, cfg_req[indx]->data->xpath)
2331 ? NB_OP_MODIFY
2332 : NB_OP_CREATE;
2333 else
2334 continue;
2335
2336 MGMTD_TXN_DBG(
2337 "XPath: '%s', Value: '%s'", cfg_req[indx]->data->xpath,
2338 (cfg_req[indx]->data->value
2339 && cfg_req[indx]
2340 ->data->value
2341 ->encoded_str_val
2342 ? cfg_req[indx]->data->value->encoded_str_val
2343 : "NULL"));
2344 strlcpy(cfg_chg->xpath, cfg_req[indx]->data->xpath,
2345 sizeof(cfg_chg->xpath));
2346 cfg_chg->value = (cfg_req[indx]->data->value
2347 && cfg_req[indx]
2348 ->data->value
2349 ->encoded_str_val
2350 ? strdup(cfg_req[indx]
2351 ->data->value
2352 ->encoded_str_val)
2353 : NULL);
2354 if (cfg_chg->value)
2355 MGMTD_TXN_DBG("Allocated value at %p ==> '%s'",
2356 cfg_chg->value, cfg_chg->value);
2357
2358 (*num_chgs)++;
2359 }
2360 txn_req->req.set_cfg->implicit_commit = implicit_commit;
2361 txn_req->req.set_cfg->dst_ds_id = dst_ds_id;
2362 txn_req->req.set_cfg->dst_ds_ctx = dst_ds_ctx;
2363 txn_req->req.set_cfg->setcfg_stats =
2364 mgmt_fe_get_session_setcfg_stats(txn->session_id);
2365 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
2366
2367 return 0;
2368 }
2369
2370 int mgmt_txn_send_commit_config_req(uint64_t txn_id, uint64_t req_id,
2371 Mgmtd__DatastoreId src_ds_id,
2372 struct mgmt_ds_ctx *src_ds_ctx,
2373 Mgmtd__DatastoreId dst_ds_id,
2374 struct mgmt_ds_ctx *dst_ds_ctx,
2375 bool validate_only, bool abort,
2376 bool implicit)
2377 {
2378 struct mgmt_txn_ctx *txn;
2379 struct mgmt_txn_req *txn_req;
2380
2381 txn = mgmt_txn_id2ctx(txn_id);
2382 if (!txn)
2383 return -1;
2384
2385 if (txn->commit_cfg_req) {
2386 MGMTD_TXN_ERR("Commit already in-progress txn-id: %" PRIu64
2387 " session-id: %" PRIu64 ". Cannot start another",
2388 txn->txn_id, txn->session_id);
2389 return -1;
2390 }
2391
2392 txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_COMMITCFG);
2393 txn_req->req.commit_cfg.src_ds_id = src_ds_id;
2394 txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
2395 txn_req->req.commit_cfg.dst_ds_id = dst_ds_id;
2396 txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
2397 txn_req->req.commit_cfg.validate_only = validate_only;
2398 txn_req->req.commit_cfg.abort = abort;
2399 txn_req->req.commit_cfg.implicit = implicit;
2400 txn_req->req.commit_cfg.cmt_stats =
2401 mgmt_fe_get_session_commit_stats(txn->session_id);
2402
2403 /*
2404 * Trigger a COMMIT-CONFIG process.
2405 */
2406 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
2407 return 0;
2408 }
2409
2410 int mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
2411 bool connect)
2412 {
2413 struct mgmt_txn_ctx *txn;
2414 struct mgmt_txn_req *txn_req;
2415 struct mgmt_commit_cfg_req *cmtcfg_req;
2416 static struct mgmt_commit_stats dummy_stats;
2417 struct nb_config_cbs *adapter_cfgs = NULL;
2418
2419 memset(&dummy_stats, 0, sizeof(dummy_stats));
2420 if (connect) {
2421 /* Get config for this single backend client */
2422 mgmt_be_get_adapter_config(adapter, mm->running_ds,
2423 &adapter_cfgs);
2424
2425 if (!adapter_cfgs || RB_EMPTY(nb_config_cbs, adapter_cfgs)) {
2426 SET_FLAG(adapter->flags,
2427 MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
2428 return 0;
2429 }
2430
2431 /*
2432 * Create a CONFIG transaction to push the config changes
2433 * provided to the backend client.
2434 */
2435 txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
2436 if (!txn) {
2437 MGMTD_TXN_ERR(
2438 "Failed to create CONFIG Transaction for downloading CONFIGs for client '%s'",
2439 adapter->name);
2440 return -1;
2441 }
2442
2443 MGMTD_TXN_DBG("Created initial txn-id: %" PRIu64
2444 " for BE client '%s'",
2445 txn->txn_id, adapter->name);
2446 /*
2447 * Set the changeset for transaction to commit and trigger the
2448 * commit request.
2449 */
2450 txn_req =
2451 mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
2452 txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_NONE;
2453 txn_req->req.commit_cfg.src_ds_ctx = 0;
2454 txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_NONE;
2455 txn_req->req.commit_cfg.dst_ds_ctx = 0;
2456 txn_req->req.commit_cfg.validate_only = false;
2457 txn_req->req.commit_cfg.abort = false;
2458 txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
2459 txn_req->req.commit_cfg.cfg_chgs = adapter_cfgs;
2460
2461 /*
2462 * Trigger a COMMIT-CONFIG process.
2463 */
2464 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
2465
2466 } else {
2467 /*
2468 * Check if any transaction is currently on-going that
2469 * involves this backend client. If so, report the transaction
2470 * has failed.
2471 */
2472 FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
2473 /* TODO: update with operational state when that is
2474 * completed */
2475 if (txn->type == MGMTD_TXN_TYPE_CONFIG) {
2476 cmtcfg_req = txn->commit_cfg_req
2477 ? &txn->commit_cfg_req
2478 ->req.commit_cfg
2479 : NULL;
2480 if (cmtcfg_req &&
2481 cmtcfg_req->subscr_info
2482 .xpath_subscr[adapter->id]) {
2483 mgmt_txn_send_commit_cfg_reply(
2484 txn, MGMTD_INTERNAL_ERROR,
2485 "Backend daemon disconnected while processing commit!");
2486 }
2487 }
2488 }
2489 }
2490
2491 return 0;
2492 }
2493
2494 int mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create,
2495 bool success,
2496 struct mgmt_be_client_adapter *adapter)
2497 {
2498 struct mgmt_txn_ctx *txn;
2499 struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
2500
2501 txn = mgmt_txn_id2ctx(txn_id);
2502 if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
2503 return -1;
2504
2505 if (!create && !txn->commit_cfg_req)
2506 return 0;
2507
2508 assert(txn->commit_cfg_req);
2509 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
2510 if (create) {
2511 if (success) {
2512 /*
2513 * Done with TXN_CREATE. Move the backend client to
2514 * next phase.
2515 */
2516 assert(cmtcfg_req->curr_phase
2517 == MGMTD_COMMIT_PHASE_TXN_CREATE);
2518
2519 /*
2520 * Send CFGDATA_CREATE-REQs to the backend immediately.
2521 */
2522 mgmt_txn_send_be_cfg_data(txn, adapter);
2523 } else {
2524 mgmt_txn_send_commit_cfg_reply(
2525 txn, MGMTD_INTERNAL_ERROR,
2526 "Internal error! Failed to initiate transaction at backend!");
2527 }
2528 } else {
2529 /*
2530 * Done with TXN_DELETE. Move the backend client to next phase.
2531 */
2532 if (false)
2533 mgmt_move_be_commit_to_next_phase(txn, adapter);
2534 }
2535
2536 return 0;
2537 }
2538
2539 int mgmt_txn_notify_be_cfgdata_reply(
2540 uint64_t txn_id, uint64_t batch_id, bool success, char *error_if_any,
2541 struct mgmt_be_client_adapter *adapter)
2542 {
2543 struct mgmt_txn_ctx *txn;
2544 struct mgmt_txn_be_cfg_batch *cfg_btch;
2545 struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
2546
2547 txn = mgmt_txn_id2ctx(txn_id);
2548 if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
2549 return -1;
2550
2551 if (!txn->commit_cfg_req)
2552 return -1;
2553 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
2554
2555 cfg_btch = mgmt_txn_cfgbatch_id2ctx(txn, batch_id);
2556 if (!cfg_btch || cfg_btch->txn != txn)
2557 return -1;
2558
2559 if (!success) {
2560 MGMTD_TXN_ERR(
2561 "CFGDATA_CREATE_REQ sent to '%s' failed txn-id: %" PRIu64
2562 " batch-id %" PRIu64 " err: %s",
2563 adapter->name, txn->txn_id, cfg_btch->batch_id,
2564 error_if_any ? error_if_any : "None");
2565 mgmt_txn_send_commit_cfg_reply(
2566 txn, MGMTD_INTERNAL_ERROR,
2567 error_if_any ? error_if_any :
2568 "Internal error! Failed to download config data to backend!");
2569 return 0;
2570 }
2571
2572 MGMTD_TXN_DBG(
2573 "CFGDATA_CREATE_REQ sent to '%s' was successful txn-id: %" PRIu64
2574 " batch-id %" PRIu64 " err: %s",
2575 adapter->name, txn->txn_id, cfg_btch->batch_id,
2576 error_if_any ? error_if_any : "None");
2577 mgmt_move_txn_cfg_batch_to_next(
2578 cmtcfg_req, cfg_btch, &cmtcfg_req->curr_batches[adapter->id],
2579 &cmtcfg_req->next_batches[adapter->id], true,
2580 MGMTD_COMMIT_PHASE_APPLY_CFG);
2581
2582 mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
2583
2584 return 0;
2585 }
2586
2587 int mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
2588 uint64_t batch_ids[],
2589 size_t num_batch_ids, char *error_if_any,
2590 struct mgmt_be_client_adapter *adapter)
2591 {
2592 struct mgmt_txn_ctx *txn;
2593 struct mgmt_txn_be_cfg_batch *cfg_btch;
2594 struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
2595 size_t indx;
2596
2597 txn = mgmt_txn_id2ctx(txn_id);
2598 if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG
2599 || !txn->commit_cfg_req)
2600 return -1;
2601
2602 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
2603
2604 if (!success) {
2605 MGMTD_TXN_ERR(
2606 "CFGDATA_APPLY_REQ sent to '%s' failed txn-id: %" PRIu64
2607 " batch ids %" PRIu64 " - %" PRIu64 " err: %s",
2608 adapter->name, txn->txn_id, batch_ids[0],
2609 batch_ids[num_batch_ids - 1],
2610 error_if_any ? error_if_any : "None");
2611 mgmt_txn_send_commit_cfg_reply(
2612 txn, MGMTD_INTERNAL_ERROR,
2613 error_if_any ? error_if_any :
2614 "Internal error! Failed to apply config data on backend!");
2615 return 0;
2616 }
2617
2618 for (indx = 0; indx < num_batch_ids; indx++) {
2619 cfg_btch = mgmt_txn_cfgbatch_id2ctx(txn, batch_ids[indx]);
2620 if (cfg_btch->txn != txn)
2621 return -1;
2622 mgmt_move_txn_cfg_batch_to_next(
2623 cmtcfg_req, cfg_btch,
2624 &cmtcfg_req->curr_batches[adapter->id],
2625 &cmtcfg_req->next_batches[adapter->id], true,
2626 MGMTD_COMMIT_PHASE_TXN_DELETE);
2627 }
2628
2629 if (!mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id])) {
2630 /*
2631 * All configuration for the specific backend has been applied.
2632 * Send TXN-DELETE to wrap up the transaction for this backend.
2633 */
2634 SET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
2635 mgmt_txn_send_be_txn_delete(txn, adapter);
2636 }
2637
2638 mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
2639 if (mm->perf_stats_en)
2640 gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_end, NULL);
2641
2642 return 0;
2643 }
2644
2645 int mgmt_txn_send_commit_config_reply(uint64_t txn_id,
2646 enum mgmt_result result,
2647 const char *error_if_any)
2648 {
2649 struct mgmt_txn_ctx *txn;
2650
2651 txn = mgmt_txn_id2ctx(txn_id);
2652 if (!txn)
2653 return -1;
2654
2655 if (!txn->commit_cfg_req) {
2656 MGMTD_TXN_ERR("NO commit in-progress txn-id: %" PRIu64
2657 " session-id: %" PRIu64,
2658 txn->txn_id, txn->session_id);
2659 return -1;
2660 }
2661
2662 return mgmt_txn_send_commit_cfg_reply(txn, result, error_if_any);
2663 }
2664
2665 int mgmt_txn_send_get_config_req(uint64_t txn_id, uint64_t req_id,
2666 Mgmtd__DatastoreId ds_id,
2667 struct mgmt_ds_ctx *ds_ctx,
2668 Mgmtd__YangGetDataReq **data_req,
2669 size_t num_reqs)
2670 {
2671 struct mgmt_txn_ctx *txn;
2672 struct mgmt_txn_req *txn_req;
2673 size_t indx;
2674
2675 txn = mgmt_txn_id2ctx(txn_id);
2676 if (!txn)
2677 return -1;
2678
2679 txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETCFG);
2680 txn_req->req.get_data->ds_id = ds_id;
2681 txn_req->req.get_data->ds_ctx = ds_ctx;
2682 for (indx = 0;
2683 indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
2684 indx++) {
2685 MGMTD_TXN_DBG("XPath: '%s'", data_req[indx]->data->xpath);
2686 txn_req->req.get_data->xpaths[indx] =
2687 strdup(data_req[indx]->data->xpath);
2688 txn_req->req.get_data->num_xpaths++;
2689 }
2690
2691 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
2692
2693 return 0;
2694 }
2695
2696 int mgmt_txn_send_get_data_req(uint64_t txn_id, uint64_t req_id,
2697 Mgmtd__DatastoreId ds_id,
2698 struct mgmt_ds_ctx *ds_ctx,
2699 Mgmtd__YangGetDataReq **data_req,
2700 size_t num_reqs)
2701 {
2702 struct mgmt_txn_ctx *txn;
2703 struct mgmt_txn_req *txn_req;
2704 size_t indx;
2705
2706 txn = mgmt_txn_id2ctx(txn_id);
2707 if (!txn)
2708 return -1;
2709
2710 txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETDATA);
2711 txn_req->req.get_data->ds_id = ds_id;
2712 txn_req->req.get_data->ds_ctx = ds_ctx;
2713 for (indx = 0;
2714 indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
2715 indx++) {
2716 MGMTD_TXN_DBG("XPath: '%s'", data_req[indx]->data->xpath);
2717 txn_req->req.get_data->xpaths[indx] =
2718 strdup(data_req[indx]->data->xpath);
2719 txn_req->req.get_data->num_xpaths++;
2720 }
2721
2722 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETDATA);
2723
2724 return 0;
2725 }
2726
2727 void mgmt_txn_status_write(struct vty *vty)
2728 {
2729 struct mgmt_txn_ctx *txn;
2730
2731 vty_out(vty, "MGMTD Transactions\n");
2732
2733 FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
2734 vty_out(vty, " Txn: \t\t\t0x%p\n", txn);
2735 vty_out(vty, " Txn-Id: \t\t\t%" PRIu64 "\n", txn->txn_id);
2736 vty_out(vty, " Session-Id: \t\t%" PRIu64 "\n",
2737 txn->session_id);
2738 vty_out(vty, " Type: \t\t\t%s\n",
2739 mgmt_txn_type2str(txn->type));
2740 vty_out(vty, " Ref-Count: \t\t\t%d\n", txn->refcount);
2741 }
2742 vty_out(vty, " Total: %d\n",
2743 (int)mgmt_txns_count(&mgmt_txn_mm->txn_list));
2744 }
2745
2746 int mgmt_txn_rollback_trigger_cfg_apply(struct mgmt_ds_ctx *src_ds_ctx,
2747 struct mgmt_ds_ctx *dst_ds_ctx)
2748 {
2749 static struct nb_config_cbs changes;
2750 struct nb_config_cbs *cfg_chgs = NULL;
2751 struct mgmt_txn_ctx *txn;
2752 struct mgmt_txn_req *txn_req;
2753 static struct mgmt_commit_stats dummy_stats;
2754
2755 memset(&changes, 0, sizeof(changes));
2756 memset(&dummy_stats, 0, sizeof(dummy_stats));
2757 /*
2758 * This could be the case when the config is directly
2759 * loaded onto the candidate DS from a file. Get the
2760 * diff from a full comparison of the candidate and
2761 * running DSs.
2762 */
2763 nb_config_diff(mgmt_ds_get_nb_config(dst_ds_ctx),
2764 mgmt_ds_get_nb_config(src_ds_ctx), &changes);
2765 cfg_chgs = &changes;
2766
2767 if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
2768 /*
2769 * This means there's no changes to commit whatsoever
2770 * is the source of the changes in config.
2771 */
2772 return -1;
2773 }
2774
2775 /*
2776 * Create a CONFIG transaction to push the config changes
2777 * provided to the backend client.
2778 */
2779 txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
2780 if (!txn) {
2781 MGMTD_TXN_ERR(
2782 "Failed to create CONFIG Transaction for downloading CONFIGs");
2783 return -1;
2784 }
2785
2786 MGMTD_TXN_DBG("Created rollback txn-id: %" PRIu64, txn->txn_id);
2787
2788 /*
2789 * Set the changeset for transaction to commit and trigger the commit
2790 * request.
2791 */
2792 txn_req = mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
2793 txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_CANDIDATE;
2794 txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
2795 txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_RUNNING;
2796 txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
2797 txn_req->req.commit_cfg.validate_only = false;
2798 txn_req->req.commit_cfg.abort = false;
2799 txn_req->req.commit_cfg.rollback = true;
2800 txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
2801 txn_req->req.commit_cfg.cfg_chgs = cfg_chgs;
2802
2803 /*
2804 * Trigger a COMMIT-CONFIG process.
2805 */
2806 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
2807 return 0;
2808 }