]> git.proxmox.com Git - mirror_frr.git/blob - mgmtd/mgmt_txn.c
Merge pull request #13559 from opensourcerouting/fix/ignore_decoding_chars
[mirror_frr.git] / mgmtd / mgmt_txn.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * MGMTD Transactions
4 *
5 * Copyright (C) 2021 Vmware, Inc.
6 * Pushpasis Sarkar <spushpasis@vmware.com>
7 */
8
9 #include <zebra.h>
10 #include "hash.h"
11 #include "jhash.h"
12 #include "libfrr.h"
13 #include "mgmtd/mgmt.h"
14 #include "mgmtd/mgmt_memory.h"
15 #include "mgmtd/mgmt_txn.h"
16
17 #define MGMTD_TXN_DBG(fmt, ...) \
18 DEBUGD(&mgmt_debug_txn, "%s:" fmt, __func__, ##__VA_ARGS__)
19 #define MGMTD_TXN_ERR(fmt, ...) \
20 zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
21
22 #define MGMTD_TXN_LOCK(txn) mgmt_txn_lock(txn, __FILE__, __LINE__)
23 #define MGMTD_TXN_UNLOCK(txn) mgmt_txn_unlock(txn, __FILE__, __LINE__)
24
25 enum mgmt_txn_event {
26 MGMTD_TXN_PROC_SETCFG = 1,
27 MGMTD_TXN_PROC_COMMITCFG,
28 MGMTD_TXN_PROC_GETCFG,
29 MGMTD_TXN_PROC_GETDATA,
30 MGMTD_TXN_COMMITCFG_TIMEOUT,
31 MGMTD_TXN_CLEANUP
32 };
33
34 PREDECL_LIST(mgmt_txn_reqs);
35
36 struct mgmt_set_cfg_req {
37 Mgmtd__DatastoreId ds_id;
38 struct mgmt_ds_ctx *ds_ctx;
39 struct nb_cfg_change cfg_changes[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
40 uint16_t num_cfg_changes;
41 bool implicit_commit;
42 Mgmtd__DatastoreId dst_ds_id;
43 struct mgmt_ds_ctx *dst_ds_ctx;
44 struct mgmt_setcfg_stats *setcfg_stats;
45 };
46
47 enum mgmt_commit_phase {
48 MGMTD_COMMIT_PHASE_PREPARE_CFG = 0,
49 MGMTD_COMMIT_PHASE_TXN_CREATE,
50 MGMTD_COMMIT_PHASE_SEND_CFG,
51 MGMTD_COMMIT_PHASE_APPLY_CFG,
52 MGMTD_COMMIT_PHASE_TXN_DELETE,
53 MGMTD_COMMIT_PHASE_MAX
54 };
55
56 static inline const char *
57 mgmt_commit_phase2str(enum mgmt_commit_phase cmt_phase)
58 {
59 switch (cmt_phase) {
60 case MGMTD_COMMIT_PHASE_PREPARE_CFG:
61 return "PREP-CFG";
62 case MGMTD_COMMIT_PHASE_TXN_CREATE:
63 return "CREATE-TXN";
64 case MGMTD_COMMIT_PHASE_SEND_CFG:
65 return "SEND-CFG";
66 case MGMTD_COMMIT_PHASE_APPLY_CFG:
67 return "APPLY-CFG";
68 case MGMTD_COMMIT_PHASE_TXN_DELETE:
69 return "DELETE-TXN";
70 case MGMTD_COMMIT_PHASE_MAX:
71 return "Invalid/Unknown";
72 }
73
74 return "Invalid/Unknown";
75 }
76
77 PREDECL_LIST(mgmt_txn_batches);
78
79 struct mgmt_txn_be_cfg_batch {
80 struct mgmt_txn_ctx *txn;
81 uint64_t batch_id;
82 enum mgmt_be_client_id be_id;
83 struct mgmt_be_client_adapter *be_adapter;
84 union mgmt_be_xpath_subscr_info
85 xp_subscr[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
86 Mgmtd__YangCfgDataReq cfg_data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
87 Mgmtd__YangCfgDataReq * cfg_datap[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
88 Mgmtd__YangData data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
89 Mgmtd__YangDataValue value[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
90 size_t num_cfg_data;
91 int buf_space_left;
92 enum mgmt_commit_phase comm_phase;
93 struct mgmt_txn_batches_item list_linkage;
94 };
95
96 DECLARE_LIST(mgmt_txn_batches, struct mgmt_txn_be_cfg_batch, list_linkage);
97
98 #define FOREACH_TXN_CFG_BATCH_IN_LIST(list, batch) \
99 frr_each_safe (mgmt_txn_batches, list, batch)
100
101 struct mgmt_commit_cfg_req {
102 Mgmtd__DatastoreId src_ds_id;
103 struct mgmt_ds_ctx *src_ds_ctx;
104 Mgmtd__DatastoreId dst_ds_id;
105 struct mgmt_ds_ctx *dst_ds_ctx;
106 uint32_t nb_txn_id;
107 uint8_t validate_only : 1;
108 uint8_t abort : 1;
109 uint8_t implicit : 1;
110 uint8_t rollback : 1;
111
112 /* Track commit phases */
113 enum mgmt_commit_phase curr_phase;
114 enum mgmt_commit_phase next_phase;
115
116 /*
117 * Set of config changes to commit. This is used only
118 * when changes are NOT to be determined by comparing
119 * candidate and running DSs. This is typically used
120 * for downloading all relevant configs for a new backend
121 * client that has recently come up and connected with
122 * MGMTD.
123 */
124 struct nb_config_cbs *cfg_chgs;
125
126 /*
127 * Details on all the Backend Clients associated with
128 * this commit.
129 */
130 struct mgmt_be_client_subscr_info subscr_info;
131
132 /*
133 * List of backend batches for this commit to be validated
134 * and applied at the backend.
135 *
136 * FIXME: Need to re-think this design for the case set of
137 * validators for a given YANG data item is different from
138 * the set of notifiers for the same. We may need to have
139 * separate list of batches for VALIDATE and APPLY.
140 */
141 struct mgmt_txn_batches_head curr_batches[MGMTD_BE_CLIENT_ID_MAX];
142 struct mgmt_txn_batches_head next_batches[MGMTD_BE_CLIENT_ID_MAX];
143 /*
144 * The last batch added for any backend client. This is always on
145 * 'curr_batches'
146 */
147 struct mgmt_txn_be_cfg_batch
148 *last_be_cfg_batch[MGMTD_BE_CLIENT_ID_MAX];
149 struct hash *batches;
150 uint64_t next_batch_id;
151
152 struct mgmt_commit_stats *cmt_stats;
153 };
154
155 struct mgmt_get_data_reply {
156 /* Buffer space for preparing data reply */
157 int num_reply;
158 int last_batch;
159 Mgmtd__YangDataReply data_reply;
160 Mgmtd__YangData reply_data[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
161 Mgmtd__YangData * reply_datap[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
162 Mgmtd__YangDataValue reply_value[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
163 char *reply_xpathp[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
164 };
165
166 struct mgmt_get_data_req {
167 Mgmtd__DatastoreId ds_id;
168 struct mgmt_ds_ctx *ds_ctx;
169 char *xpaths[MGMTD_MAX_NUM_DATA_REQ_IN_BATCH];
170 int num_xpaths;
171
172 /*
173 * Buffer space for preparing reply.
174 * NOTE: Should only be malloc-ed on demand to reduce
175 * memory footprint. Freed up via mgmt_trx_req_free()
176 */
177 struct mgmt_get_data_reply *reply;
178
179 int total_reply;
180 };
181
182 struct mgmt_txn_req {
183 struct mgmt_txn_ctx *txn;
184 enum mgmt_txn_event req_event;
185 uint64_t req_id;
186 union {
187 struct mgmt_set_cfg_req *set_cfg;
188 struct mgmt_get_data_req *get_data;
189 struct mgmt_commit_cfg_req commit_cfg;
190 } req;
191
192 bool pending_be_proc;
193 struct mgmt_txn_reqs_item list_linkage;
194 };
195
196 DECLARE_LIST(mgmt_txn_reqs, struct mgmt_txn_req, list_linkage);
197
198 #define FOREACH_TXN_REQ_IN_LIST(list, req) \
199 frr_each_safe (mgmt_txn_reqs, list, req)
200
201 struct mgmt_txn_ctx {
202 uint64_t session_id; /* One transaction per client session */
203 uint64_t txn_id;
204 enum mgmt_txn_type type;
205
206 /* struct mgmt_master *mm; */
207
208 struct event *proc_set_cfg;
209 struct event *proc_comm_cfg;
210 struct event *proc_get_cfg;
211 struct event *proc_get_data;
212 struct event *comm_cfg_timeout;
213 struct event *clnup;
214
215 /* List of backend adapters involved in this transaction */
216 struct mgmt_txn_badapters_head be_adapters;
217
218 int refcount;
219
220 struct mgmt_txns_item list_linkage;
221
222 /*
223 * List of pending set-config requests for a given
224 * transaction/session. Just one list for requests
225 * not processed at all. There's no backend interaction
226 * involved.
227 */
228 struct mgmt_txn_reqs_head set_cfg_reqs;
229 /*
230 * List of pending get-config requests for a given
231 * transaction/session. Just one list for requests
232 * not processed at all. There's no backend interaction
233 * involved.
234 */
235 struct mgmt_txn_reqs_head get_cfg_reqs;
236 /*
237 * List of pending get-data requests for a given
238 * transaction/session Two lists, one for requests
239 * not processed at all, and one for requests that
240 * has been sent to backend for processing.
241 */
242 struct mgmt_txn_reqs_head get_data_reqs;
243 struct mgmt_txn_reqs_head pending_get_datas;
244 /*
245 * There will always be one commit-config allowed for a given
246 * transaction/session. No need to maintain lists for it.
247 */
248 struct mgmt_txn_req *commit_cfg_req;
249 };
250
251 DECLARE_LIST(mgmt_txns, struct mgmt_txn_ctx, list_linkage);
252
253 #define FOREACH_TXN_IN_LIST(mm, txn) \
254 frr_each_safe (mgmt_txns, &(mm)->txn_list, (txn))
255
256 static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
257 enum mgmt_result result,
258 const char *error_if_any);
259
260 static inline const char *
261 mgmt_txn_commit_phase_str(struct mgmt_txn_ctx *txn, bool curr)
262 {
263 if (!txn->commit_cfg_req)
264 return "None";
265
266 return (mgmt_commit_phase2str(
267 curr ? txn->commit_cfg_req->req.commit_cfg.curr_phase
268 : txn->commit_cfg_req->req.commit_cfg.next_phase));
269 }
270
271 static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file,
272 int line);
273 static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
274 int line);
275 static int
276 mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
277 struct mgmt_be_client_adapter *adapter);
278
279 static struct event_loop *mgmt_txn_tm;
280 static struct mgmt_master *mgmt_txn_mm;
281
282 static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
283 enum mgmt_txn_event event);
284
285 static int
286 mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
287 struct mgmt_be_client_adapter *adapter);
288
289 static struct mgmt_txn_be_cfg_batch *
290 mgmt_txn_cfg_batch_alloc(struct mgmt_txn_ctx *txn,
291 enum mgmt_be_client_id id,
292 struct mgmt_be_client_adapter *be_adapter)
293 {
294 struct mgmt_txn_be_cfg_batch *cfg_btch;
295
296 cfg_btch = XCALLOC(MTYPE_MGMTD_TXN_CFG_BATCH,
297 sizeof(struct mgmt_txn_be_cfg_batch));
298 assert(cfg_btch);
299 cfg_btch->be_id = id;
300
301 cfg_btch->txn = txn;
302 MGMTD_TXN_LOCK(txn);
303 assert(txn->commit_cfg_req);
304 mgmt_txn_batches_add_tail(
305 &txn->commit_cfg_req->req.commit_cfg.curr_batches[id],
306 cfg_btch);
307 cfg_btch->be_adapter = be_adapter;
308 cfg_btch->buf_space_left = MGMTD_BE_CFGDATA_MAX_MSG_LEN;
309 if (be_adapter)
310 mgmt_be_adapter_lock(be_adapter);
311
312 txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] =
313 cfg_btch;
314 if (!txn->commit_cfg_req->req.commit_cfg.next_batch_id)
315 txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
316 cfg_btch->batch_id =
317 txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
318 hash_get(txn->commit_cfg_req->req.commit_cfg.batches, cfg_btch,
319 hash_alloc_intern);
320
321 return cfg_btch;
322 }
323
324 static void
325 mgmt_txn_cfg_batch_free(struct mgmt_txn_be_cfg_batch **cfg_btch)
326 {
327 size_t indx;
328 struct mgmt_commit_cfg_req *cmtcfg_req;
329
330 MGMTD_TXN_DBG(" Batch: %p, Txn: %p", *cfg_btch, (*cfg_btch)->txn);
331
332 assert((*cfg_btch)->txn
333 && (*cfg_btch)->txn->type == MGMTD_TXN_TYPE_CONFIG);
334
335 cmtcfg_req = &(*cfg_btch)->txn->commit_cfg_req->req.commit_cfg;
336 hash_release(cmtcfg_req->batches, *cfg_btch);
337 mgmt_txn_batches_del(&cmtcfg_req->curr_batches[(*cfg_btch)->be_id],
338 *cfg_btch);
339 mgmt_txn_batches_del(&cmtcfg_req->next_batches[(*cfg_btch)->be_id],
340 *cfg_btch);
341
342 if ((*cfg_btch)->be_adapter)
343 mgmt_be_adapter_unlock(&(*cfg_btch)->be_adapter);
344
345 for (indx = 0; indx < (*cfg_btch)->num_cfg_data; indx++) {
346 if ((*cfg_btch)->data[indx].xpath) {
347 free((*cfg_btch)->data[indx].xpath);
348 (*cfg_btch)->data[indx].xpath = NULL;
349 }
350 }
351
352 MGMTD_TXN_UNLOCK(&(*cfg_btch)->txn);
353
354 XFREE(MTYPE_MGMTD_TXN_CFG_BATCH, *cfg_btch);
355 *cfg_btch = NULL;
356 }
357
358 static unsigned int mgmt_txn_cfgbatch_hash_key(const void *data)
359 {
360 const struct mgmt_txn_be_cfg_batch *batch = data;
361
362 return jhash2((uint32_t *) &batch->batch_id,
363 sizeof(batch->batch_id) / sizeof(uint32_t), 0);
364 }
365
366 static bool mgmt_txn_cfgbatch_hash_cmp(const void *d1, const void *d2)
367 {
368 const struct mgmt_txn_be_cfg_batch *batch1 = d1;
369 const struct mgmt_txn_be_cfg_batch *batch2 = d2;
370
371 return (batch1->batch_id == batch2->batch_id);
372 }
373
374 static void mgmt_txn_cfgbatch_hash_free(void *data)
375 {
376 struct mgmt_txn_be_cfg_batch *batch = data;
377
378 mgmt_txn_cfg_batch_free(&batch);
379 }
380
381 static inline struct mgmt_txn_be_cfg_batch *
382 mgmt_txn_cfgbatch_id2ctx(struct mgmt_txn_ctx *txn, uint64_t batch_id)
383 {
384 struct mgmt_txn_be_cfg_batch key = {0};
385 struct mgmt_txn_be_cfg_batch *batch;
386
387 if (!txn->commit_cfg_req)
388 return NULL;
389
390 key.batch_id = batch_id;
391 batch = hash_lookup(txn->commit_cfg_req->req.commit_cfg.batches,
392 &key);
393
394 return batch;
395 }
396
397 static void mgmt_txn_cleanup_be_cfg_batches(struct mgmt_txn_ctx *txn,
398 enum mgmt_be_client_id id)
399 {
400 struct mgmt_txn_be_cfg_batch *cfg_btch;
401 struct mgmt_txn_batches_head *list;
402
403 list = &txn->commit_cfg_req->req.commit_cfg.curr_batches[id];
404 FOREACH_TXN_CFG_BATCH_IN_LIST (list, cfg_btch)
405 mgmt_txn_cfg_batch_free(&cfg_btch);
406
407 mgmt_txn_batches_fini(list);
408
409 list = &txn->commit_cfg_req->req.commit_cfg.next_batches[id];
410 FOREACH_TXN_CFG_BATCH_IN_LIST (list, cfg_btch)
411 mgmt_txn_cfg_batch_free(&cfg_btch);
412
413 mgmt_txn_batches_fini(list);
414
415 txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] = NULL;
416 }
417
418 static struct mgmt_txn_req *mgmt_txn_req_alloc(struct mgmt_txn_ctx *txn,
419 uint64_t req_id,
420 enum mgmt_txn_event req_event)
421 {
422 struct mgmt_txn_req *txn_req;
423 enum mgmt_be_client_id id;
424
425 txn_req = XCALLOC(MTYPE_MGMTD_TXN_REQ, sizeof(struct mgmt_txn_req));
426 assert(txn_req);
427 txn_req->txn = txn;
428 txn_req->req_id = req_id;
429 txn_req->req_event = req_event;
430 txn_req->pending_be_proc = false;
431
432 switch (txn_req->req_event) {
433 case MGMTD_TXN_PROC_SETCFG:
434 txn_req->req.set_cfg = XCALLOC(MTYPE_MGMTD_TXN_SETCFG_REQ,
435 sizeof(struct mgmt_set_cfg_req));
436 assert(txn_req->req.set_cfg);
437 mgmt_txn_reqs_add_tail(&txn->set_cfg_reqs, txn_req);
438 MGMTD_TXN_DBG(
439 "Added a new SETCFG Req: %p for Txn: %p, Sessn: 0x%llx",
440 txn_req, txn, (unsigned long long)txn->session_id);
441 break;
442 case MGMTD_TXN_PROC_COMMITCFG:
443 txn->commit_cfg_req = txn_req;
444 MGMTD_TXN_DBG(
445 "Added a new COMMITCFG Req: %p for Txn: %p, Sessn: 0x%llx",
446 txn_req, txn, (unsigned long long)txn->session_id);
447
448 FOREACH_MGMTD_BE_CLIENT_ID (id) {
449 mgmt_txn_batches_init(
450 &txn_req->req.commit_cfg.curr_batches[id]);
451 mgmt_txn_batches_init(
452 &txn_req->req.commit_cfg.next_batches[id]);
453 }
454
455 txn_req->req.commit_cfg.batches =
456 hash_create(mgmt_txn_cfgbatch_hash_key,
457 mgmt_txn_cfgbatch_hash_cmp,
458 "MGMT Config Batches");
459 break;
460 case MGMTD_TXN_PROC_GETCFG:
461 txn_req->req.get_data =
462 XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
463 sizeof(struct mgmt_get_data_req));
464 assert(txn_req->req.get_data);
465 mgmt_txn_reqs_add_tail(&txn->get_cfg_reqs, txn_req);
466 MGMTD_TXN_DBG(
467 "Added a new GETCFG Req: %p for Txn: %p, Sessn: 0x%llx",
468 txn_req, txn, (unsigned long long)txn->session_id);
469 break;
470 case MGMTD_TXN_PROC_GETDATA:
471 txn_req->req.get_data =
472 XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
473 sizeof(struct mgmt_get_data_req));
474 assert(txn_req->req.get_data);
475 mgmt_txn_reqs_add_tail(&txn->get_data_reqs, txn_req);
476 MGMTD_TXN_DBG(
477 "Added a new GETDATA Req: %p for Txn: %p, Sessn: 0x%llx",
478 txn_req, txn, (unsigned long long)txn->session_id);
479 break;
480 case MGMTD_TXN_COMMITCFG_TIMEOUT:
481 case MGMTD_TXN_CLEANUP:
482 break;
483 }
484
485 MGMTD_TXN_LOCK(txn);
486
487 return txn_req;
488 }
489
490 static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
491 {
492 int indx;
493 struct mgmt_txn_reqs_head *req_list = NULL;
494 struct mgmt_txn_reqs_head *pending_list = NULL;
495 enum mgmt_be_client_id id;
496 struct mgmt_be_client_adapter *adapter;
497
498 switch ((*txn_req)->req_event) {
499 case MGMTD_TXN_PROC_SETCFG:
500 for (indx = 0; indx < (*txn_req)->req.set_cfg->num_cfg_changes;
501 indx++) {
502 if ((*txn_req)->req.set_cfg->cfg_changes[indx].value) {
503 MGMTD_TXN_DBG(
504 "Freeing value for %s at %p ==> '%s'",
505 (*txn_req)
506 ->req.set_cfg->cfg_changes[indx]
507 .xpath,
508 (*txn_req)
509 ->req.set_cfg->cfg_changes[indx]
510 .value,
511 (*txn_req)
512 ->req.set_cfg->cfg_changes[indx]
513 .value);
514 free((void *)(*txn_req)
515 ->req.set_cfg->cfg_changes[indx]
516 .value);
517 }
518 }
519 req_list = &(*txn_req)->txn->set_cfg_reqs;
520 MGMTD_TXN_DBG("Deleting SETCFG Req: %p for Txn: %p",
521 *txn_req, (*txn_req)->txn);
522 XFREE(MTYPE_MGMTD_TXN_SETCFG_REQ, (*txn_req)->req.set_cfg);
523 break;
524 case MGMTD_TXN_PROC_COMMITCFG:
525 MGMTD_TXN_DBG("Deleting COMMITCFG Req: %p for Txn: %p",
526 *txn_req, (*txn_req)->txn);
527 FOREACH_MGMTD_BE_CLIENT_ID (id) {
528 /*
529 * Send TXN_DELETE to cleanup state for this
530 * transaction on backend
531 */
532 if ((*txn_req)->req.commit_cfg.curr_phase
533 >= MGMTD_COMMIT_PHASE_TXN_CREATE
534 && (*txn_req)->req.commit_cfg.curr_phase
535 < MGMTD_COMMIT_PHASE_TXN_DELETE
536 && (*txn_req)
537 ->req.commit_cfg.subscr_info
538 .xpath_subscr[id]
539 .subscribed) {
540 adapter = mgmt_be_get_adapter_by_id(id);
541 if (adapter)
542 mgmt_txn_send_be_txn_delete(
543 (*txn_req)->txn, adapter);
544 }
545
546 mgmt_txn_cleanup_be_cfg_batches((*txn_req)->txn,
547 id);
548 if ((*txn_req)->req.commit_cfg.batches) {
549 hash_clean((*txn_req)->req.commit_cfg.batches,
550 mgmt_txn_cfgbatch_hash_free);
551 hash_free((*txn_req)->req.commit_cfg.batches);
552 (*txn_req)->req.commit_cfg.batches = NULL;
553 }
554 }
555 break;
556 case MGMTD_TXN_PROC_GETCFG:
557 for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
558 indx++) {
559 if ((*txn_req)->req.get_data->xpaths[indx])
560 free((void *)(*txn_req)
561 ->req.get_data->xpaths[indx]);
562 }
563 req_list = &(*txn_req)->txn->get_cfg_reqs;
564 MGMTD_TXN_DBG("Deleting GETCFG Req: %p for Txn: %p",
565 *txn_req, (*txn_req)->txn);
566 if ((*txn_req)->req.get_data->reply)
567 XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
568 (*txn_req)->req.get_data->reply);
569 XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
570 break;
571 case MGMTD_TXN_PROC_GETDATA:
572 for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
573 indx++) {
574 if ((*txn_req)->req.get_data->xpaths[indx])
575 free((void *)(*txn_req)
576 ->req.get_data->xpaths[indx]);
577 }
578 pending_list = &(*txn_req)->txn->pending_get_datas;
579 req_list = &(*txn_req)->txn->get_data_reqs;
580 MGMTD_TXN_DBG("Deleting GETDATA Req: %p for Txn: %p",
581 *txn_req, (*txn_req)->txn);
582 if ((*txn_req)->req.get_data->reply)
583 XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
584 (*txn_req)->req.get_data->reply);
585 XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
586 break;
587 case MGMTD_TXN_COMMITCFG_TIMEOUT:
588 case MGMTD_TXN_CLEANUP:
589 break;
590 }
591
592 if ((*txn_req)->pending_be_proc && pending_list) {
593 mgmt_txn_reqs_del(pending_list, *txn_req);
594 MGMTD_TXN_DBG("Removed Req: %p from pending-list (left:%d)",
595 *txn_req, (int)mgmt_txn_reqs_count(pending_list));
596 } else if (req_list) {
597 mgmt_txn_reqs_del(req_list, *txn_req);
598 MGMTD_TXN_DBG("Removed Req: %p from request-list (left:%d)",
599 *txn_req, (int)mgmt_txn_reqs_count(req_list));
600 }
601
602 (*txn_req)->pending_be_proc = false;
603 MGMTD_TXN_UNLOCK(&(*txn_req)->txn);
604 XFREE(MTYPE_MGMTD_TXN_REQ, (*txn_req));
605 *txn_req = NULL;
606 }
607
608 static void mgmt_txn_process_set_cfg(struct event *thread)
609 {
610 struct mgmt_txn_ctx *txn;
611 struct mgmt_txn_req *txn_req;
612 struct mgmt_ds_ctx *ds_ctx;
613 struct nb_config *nb_config;
614 char err_buf[1024];
615 bool error;
616 int num_processed = 0;
617 size_t left;
618 struct mgmt_commit_stats *cmt_stats;
619 int ret = 0;
620
621 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
622 assert(txn);
623 cmt_stats = mgmt_fe_get_session_commit_stats(txn->session_id);
624
625 MGMTD_TXN_DBG(
626 "Processing %d SET_CONFIG requests for Txn:%p Session:0x%llx",
627 (int)mgmt_txn_reqs_count(&txn->set_cfg_reqs), txn,
628 (unsigned long long)txn->session_id);
629
630 FOREACH_TXN_REQ_IN_LIST (&txn->set_cfg_reqs, txn_req) {
631 error = false;
632 assert(txn_req->req_event == MGMTD_TXN_PROC_SETCFG);
633 ds_ctx = txn_req->req.set_cfg->ds_ctx;
634 if (!ds_ctx) {
635 mgmt_fe_send_set_cfg_reply(
636 txn->session_id, txn->txn_id,
637 txn_req->req.set_cfg->ds_id, txn_req->req_id,
638 MGMTD_INTERNAL_ERROR, "No such datastore!",
639 txn_req->req.set_cfg->implicit_commit);
640 error = true;
641 goto mgmt_txn_process_set_cfg_done;
642 }
643
644 nb_config = mgmt_ds_get_nb_config(ds_ctx);
645 if (!nb_config) {
646 mgmt_fe_send_set_cfg_reply(
647 txn->session_id, txn->txn_id,
648 txn_req->req.set_cfg->ds_id, txn_req->req_id,
649 MGMTD_INTERNAL_ERROR,
650 "Unable to retrieve DS Config Tree!",
651 txn_req->req.set_cfg->implicit_commit);
652 error = true;
653 goto mgmt_txn_process_set_cfg_done;
654 }
655
656 error = false;
657 nb_candidate_edit_config_changes(
658 nb_config, txn_req->req.set_cfg->cfg_changes,
659 (size_t)txn_req->req.set_cfg->num_cfg_changes, NULL,
660 NULL, 0, err_buf, sizeof(err_buf), &error);
661 if (error) {
662 mgmt_fe_send_set_cfg_reply(
663 txn->session_id, txn->txn_id,
664 txn_req->req.set_cfg->ds_id, txn_req->req_id,
665 MGMTD_INTERNAL_ERROR, err_buf,
666 txn_req->req.set_cfg->implicit_commit);
667 goto mgmt_txn_process_set_cfg_done;
668 }
669
670 if (txn_req->req.set_cfg->implicit_commit) {
671 assert(mgmt_txn_reqs_count(&txn->set_cfg_reqs) == 1);
672 assert(txn_req->req.set_cfg->dst_ds_ctx);
673
674 ret = mgmt_ds_write_lock(
675 txn_req->req.set_cfg->dst_ds_ctx);
676 if (ret != 0) {
677 MGMTD_TXN_ERR(
678 "Failed to lock the DS %u for txn: %p session 0x%llx, errstr %s!",
679 txn_req->req.set_cfg->dst_ds_id, txn,
680 (unsigned long long)txn->session_id,
681 strerror(ret));
682 mgmt_txn_send_commit_cfg_reply(
683 txn, MGMTD_DS_LOCK_FAILED,
684 "Lock running DS before implicit commit failed!");
685 goto mgmt_txn_process_set_cfg_done;
686 }
687
688 mgmt_txn_send_commit_config_req(
689 txn->txn_id, txn_req->req_id,
690 txn_req->req.set_cfg->ds_id,
691 txn_req->req.set_cfg->ds_ctx,
692 txn_req->req.set_cfg->dst_ds_id,
693 txn_req->req.set_cfg->dst_ds_ctx, false,
694 false, true);
695
696 if (mm->perf_stats_en)
697 gettimeofday(&cmt_stats->last_start, NULL);
698 cmt_stats->commit_cnt++;
699 } else if (mgmt_fe_send_set_cfg_reply(
700 txn->session_id, txn->txn_id,
701 txn_req->req.set_cfg->ds_id,
702 txn_req->req_id, MGMTD_SUCCESS, NULL, false)
703 != 0) {
704 MGMTD_TXN_ERR(
705 "Failed to send SET_CONFIG_REPLY for txn %p session 0x%llx",
706 txn, (unsigned long long)txn->session_id);
707 error = true;
708 }
709
710 mgmt_txn_process_set_cfg_done:
711
712 /*
713 * Note: The following will remove it from the list as well.
714 */
715 mgmt_txn_req_free(&txn_req);
716
717 num_processed++;
718 if (num_processed == MGMTD_TXN_MAX_NUM_SETCFG_PROC)
719 break;
720 }
721
722 left = mgmt_txn_reqs_count(&txn->set_cfg_reqs);
723 if (left) {
724 MGMTD_TXN_DBG(
725 "Processed maximum number of Set-Config requests (%d/%d/%d). Rescheduling for rest.",
726 num_processed, MGMTD_TXN_MAX_NUM_SETCFG_PROC,
727 (int)left);
728 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
729 }
730 }
731
732 static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
733 enum mgmt_result result,
734 const char *error_if_any)
735 {
736 int ret = 0;
737 bool success, create_cmt_info_rec;
738
739 if (!txn->commit_cfg_req)
740 return -1;
741
742 success = (result == MGMTD_SUCCESS || result == MGMTD_NO_CFG_CHANGES);
743
744 if (!txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id
745 && mgmt_fe_send_commit_cfg_reply(
746 txn->session_id, txn->txn_id,
747 txn->commit_cfg_req->req.commit_cfg.src_ds_id,
748 txn->commit_cfg_req->req.commit_cfg.dst_ds_id,
749 txn->commit_cfg_req->req_id,
750 txn->commit_cfg_req->req.commit_cfg.validate_only,
751 result, error_if_any)
752 != 0) {
753 MGMTD_TXN_ERR(
754 "Failed to send COMMIT-CONFIG-REPLY for Txn %p Sessn 0x%llx",
755 txn, (unsigned long long)txn->session_id);
756 }
757
758 if (txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id
759 && mgmt_fe_send_set_cfg_reply(
760 txn->session_id, txn->txn_id,
761 txn->commit_cfg_req->req.commit_cfg.src_ds_id,
762 txn->commit_cfg_req->req_id,
763 success ? MGMTD_SUCCESS : MGMTD_INTERNAL_ERROR,
764 error_if_any, true)
765 != 0) {
766 MGMTD_TXN_ERR(
767 "Failed to send SET-CONFIG-REPLY for Txn %p Sessn 0x%llx",
768 txn, (unsigned long long)txn->session_id);
769 }
770
771 if (success) {
772 /* Stop the commit-timeout timer */
773 EVENT_OFF(txn->comm_cfg_timeout);
774
775 create_cmt_info_rec =
776 (result != MGMTD_NO_CFG_CHANGES &&
777 !txn->commit_cfg_req->req.commit_cfg.rollback);
778
779 /*
780 * Successful commit: Merge Src DS into Dst DS if and only if
781 * this was not a validate-only or abort request.
782 */
783 if ((txn->session_id
784 && !txn->commit_cfg_req->req.commit_cfg.validate_only
785 && !txn->commit_cfg_req->req.commit_cfg.abort)
786 || txn->commit_cfg_req->req.commit_cfg.rollback) {
787 mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
788 .src_ds_ctx,
789 txn->commit_cfg_req->req.commit_cfg
790 .dst_ds_ctx,
791 create_cmt_info_rec);
792 }
793
794 /*
795 * Restore Src DS back to Dest DS only through a commit abort
796 * request.
797 */
798 if (txn->session_id
799 && txn->commit_cfg_req->req.commit_cfg.abort)
800 mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
801 .dst_ds_ctx,
802 txn->commit_cfg_req->req.commit_cfg
803 .src_ds_ctx,
804 false);
805 } else {
806 /*
807 * The commit has failied. For implicit commit requests restore
808 * back the contents of the candidate DS.
809 */
810 if (txn->commit_cfg_req->req.commit_cfg.implicit)
811 mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
812 .dst_ds_ctx,
813 txn->commit_cfg_req->req.commit_cfg
814 .src_ds_ctx,
815 false);
816 }
817
818 if (txn->commit_cfg_req->req.commit_cfg.rollback) {
819 ret = mgmt_ds_unlock(
820 txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx);
821 if (ret != 0)
822 MGMTD_TXN_ERR(
823 "Failed to unlock the dst DS during rollback : %s",
824 strerror(ret));
825
826 /*
827 * Resume processing the rollback command.
828 */
829 mgmt_history_rollback_complete(success);
830 }
831
832 if (txn->commit_cfg_req->req.commit_cfg.implicit)
833 if (mgmt_ds_unlock(
834 txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx)
835 != 0)
836 MGMTD_TXN_ERR(
837 "Failed to unlock the dst DS during implicit : %s",
838 strerror(ret));
839
840 txn->commit_cfg_req->req.commit_cfg.cmt_stats = NULL;
841 mgmt_txn_req_free(&txn->commit_cfg_req);
842
843 /*
844 * The CONFIG Transaction should be destroyed from Frontend-adapter.
845 * But in case the transaction is not triggered from a front-end session
846 * we need to cleanup by itself.
847 */
848 if (!txn->session_id)
849 mgmt_txn_register_event(txn, MGMTD_TXN_CLEANUP);
850
851 return 0;
852 }
853
854 static void
855 mgmt_move_txn_cfg_batch_to_next(struct mgmt_commit_cfg_req *cmtcfg_req,
856 struct mgmt_txn_be_cfg_batch *cfg_btch,
857 struct mgmt_txn_batches_head *src_list,
858 struct mgmt_txn_batches_head *dst_list,
859 bool update_commit_phase,
860 enum mgmt_commit_phase to_phase)
861 {
862 mgmt_txn_batches_del(src_list, cfg_btch);
863
864 if (update_commit_phase) {
865 MGMTD_TXN_DBG("Move Txn-Id %p Batch-Id %p from '%s' --> '%s'",
866 cfg_btch->txn, cfg_btch,
867 mgmt_commit_phase2str(cfg_btch->comm_phase),
868 mgmt_txn_commit_phase_str(cfg_btch->txn, false));
869 cfg_btch->comm_phase = to_phase;
870 }
871
872 mgmt_txn_batches_add_tail(dst_list, cfg_btch);
873 }
874
875 static void mgmt_move_txn_cfg_batches(struct mgmt_txn_ctx *txn,
876 struct mgmt_commit_cfg_req *cmtcfg_req,
877 struct mgmt_txn_batches_head *src_list,
878 struct mgmt_txn_batches_head *dst_list,
879 bool update_commit_phase,
880 enum mgmt_commit_phase to_phase)
881 {
882 struct mgmt_txn_be_cfg_batch *cfg_btch;
883
884 FOREACH_TXN_CFG_BATCH_IN_LIST (src_list, cfg_btch) {
885 mgmt_move_txn_cfg_batch_to_next(cmtcfg_req, cfg_btch, src_list,
886 dst_list, update_commit_phase,
887 to_phase);
888 }
889 }
890
891 static int
892 mgmt_try_move_commit_to_next_phase(struct mgmt_txn_ctx *txn,
893 struct mgmt_commit_cfg_req *cmtcfg_req)
894 {
895 struct mgmt_txn_batches_head *curr_list, *next_list;
896 enum mgmt_be_client_id id;
897
898 MGMTD_TXN_DBG("Txn-Id %p, Phase(current:'%s' next:'%s')", txn,
899 mgmt_txn_commit_phase_str(txn, true),
900 mgmt_txn_commit_phase_str(txn, false));
901
902 /*
903 * Check if all clients has moved to next phase or not.
904 */
905 FOREACH_MGMTD_BE_CLIENT_ID (id) {
906 if (cmtcfg_req->subscr_info.xpath_subscr[id].subscribed &&
907 mgmt_txn_batches_count(&cmtcfg_req->curr_batches[id])) {
908 /*
909 * There's atleast once client who hasn't moved to
910 * next phase.
911 *
912 * TODO: Need to re-think this design for the case
913 * set of validators for a given YANG data item is
914 * different from the set of notifiers for the same.
915 */
916 return -1;
917 }
918 }
919
920 MGMTD_TXN_DBG("Move entire Txn-Id %p from '%s' to '%s'", txn,
921 mgmt_txn_commit_phase_str(txn, true),
922 mgmt_txn_commit_phase_str(txn, false));
923
924 /*
925 * If we are here, it means all the clients has moved to next phase.
926 * So we can move the whole commit to next phase.
927 */
928 cmtcfg_req->curr_phase = cmtcfg_req->next_phase;
929 cmtcfg_req->next_phase++;
930 MGMTD_TXN_DBG(
931 "Move back all config batches for Txn %p from next to current branch",
932 txn);
933 FOREACH_MGMTD_BE_CLIENT_ID (id) {
934 curr_list = &cmtcfg_req->curr_batches[id];
935 next_list = &cmtcfg_req->next_batches[id];
936 mgmt_move_txn_cfg_batches(txn, cmtcfg_req, next_list,
937 curr_list, false, 0);
938 }
939
940 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
941
942 return 0;
943 }
944
945 static int
946 mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
947 struct mgmt_be_client_adapter *adapter)
948 {
949 struct mgmt_commit_cfg_req *cmtcfg_req;
950 struct mgmt_txn_batches_head *curr_list, *next_list;
951
952 if (txn->type != MGMTD_TXN_TYPE_CONFIG || !txn->commit_cfg_req)
953 return -1;
954
955 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
956
957 MGMTD_TXN_DBG(
958 "Move Txn-Id %p for '%s' Phase(current: '%s' next:'%s')", txn,
959 adapter->name, mgmt_txn_commit_phase_str(txn, true),
960 mgmt_txn_commit_phase_str(txn, false));
961
962 MGMTD_TXN_DBG(
963 "Move all config batches for '%s' from current to next list",
964 adapter->name);
965 curr_list = &cmtcfg_req->curr_batches[adapter->id];
966 next_list = &cmtcfg_req->next_batches[adapter->id];
967 mgmt_move_txn_cfg_batches(txn, cmtcfg_req, curr_list, next_list, true,
968 cmtcfg_req->next_phase);
969
970 MGMTD_TXN_DBG("Txn-Id %p, Phase(current:'%s' next:'%s')", txn,
971 mgmt_txn_commit_phase_str(txn, true),
972 mgmt_txn_commit_phase_str(txn, false));
973
974 /*
975 * Check if all clients has moved to next phase or not.
976 */
977 mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
978
979 return 0;
980 }
981
982 static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
983 struct nb_config_cbs *changes)
984 {
985 struct nb_config_cb *cb, *nxt;
986 struct nb_config_change *chg;
987 struct mgmt_txn_be_cfg_batch *cfg_btch;
988 struct mgmt_be_client_subscr_info subscr_info;
989 char *xpath = NULL, *value = NULL;
990 char err_buf[1024];
991 enum mgmt_be_client_id id;
992 struct mgmt_be_client_adapter *adapter;
993 struct mgmt_commit_cfg_req *cmtcfg_req;
994 bool found_validator;
995 int num_chgs = 0;
996 int xpath_len, value_len;
997
998 cmtcfg_req = &txn_req->req.commit_cfg;
999
1000 RB_FOREACH_SAFE (cb, nb_config_cbs, changes, nxt) {
1001 chg = (struct nb_config_change *)cb;
1002
1003 /*
1004 * Could have directly pointed to xpath in nb_node.
1005 * But dont want to mess with it now.
1006 * xpath = chg->cb.nb_node->xpath;
1007 */
1008 xpath = lyd_path(chg->cb.dnode, LYD_PATH_STD, NULL, 0);
1009 if (!xpath) {
1010 (void)mgmt_txn_send_commit_cfg_reply(
1011 txn_req->txn, MGMTD_INTERNAL_ERROR,
1012 "Internal error! Could not get Xpath from Ds node!");
1013 goto mgmt_txn_create_config_batches_failed;
1014 }
1015
1016 value = (char *)lyd_get_value(chg->cb.dnode);
1017 if (!value)
1018 value = (char *)MGMTD_BE_CONTAINER_NODE_VAL;
1019
1020 MGMTD_TXN_DBG("XPATH: %s, Value: '%s'", xpath,
1021 value ? value : "NIL");
1022
1023 if (mgmt_be_get_subscr_info_for_xpath(xpath, &subscr_info)
1024 != 0) {
1025 snprintf(err_buf, sizeof(err_buf),
1026 "No backend module found for XPATH: '%s",
1027 xpath);
1028 (void)mgmt_txn_send_commit_cfg_reply(
1029 txn_req->txn, MGMTD_INTERNAL_ERROR, err_buf);
1030 goto mgmt_txn_create_config_batches_failed;
1031 }
1032
1033 xpath_len = strlen(xpath) + 1;
1034 value_len = strlen(value) + 1;
1035 found_validator = false;
1036 FOREACH_MGMTD_BE_CLIENT_ID (id) {
1037 if (!subscr_info.xpath_subscr[id].validate_config
1038 && !subscr_info.xpath_subscr[id].notify_config)
1039 continue;
1040
1041 adapter = mgmt_be_get_adapter_by_id(id);
1042 if (!adapter)
1043 continue;
1044
1045 cfg_btch = cmtcfg_req->last_be_cfg_batch[id];
1046 if (!cfg_btch
1047 || (cfg_btch->num_cfg_data
1048 == MGMTD_MAX_CFG_CHANGES_IN_BATCH)
1049 || (cfg_btch->buf_space_left
1050 < (xpath_len + value_len))) {
1051 /* Allocate a new config batch */
1052 cfg_btch = mgmt_txn_cfg_batch_alloc(
1053 txn_req->txn, id, adapter);
1054 }
1055
1056 cfg_btch->buf_space_left -= (xpath_len + value_len);
1057 memcpy(&cfg_btch->xp_subscr[cfg_btch->num_cfg_data],
1058 &subscr_info.xpath_subscr[id],
1059 sizeof(cfg_btch->xp_subscr[0]));
1060
1061 mgmt_yang_cfg_data_req_init(
1062 &cfg_btch->cfg_data[cfg_btch->num_cfg_data]);
1063 cfg_btch->cfg_datap[cfg_btch->num_cfg_data] =
1064 &cfg_btch->cfg_data[cfg_btch->num_cfg_data];
1065
1066 if (chg->cb.operation == NB_OP_DESTROY)
1067 cfg_btch->cfg_data[cfg_btch->num_cfg_data]
1068 .req_type =
1069 MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA;
1070 else
1071 cfg_btch->cfg_data[cfg_btch->num_cfg_data]
1072 .req_type =
1073 MGMTD__CFG_DATA_REQ_TYPE__SET_DATA;
1074
1075 mgmt_yang_data_init(
1076 &cfg_btch->data[cfg_btch->num_cfg_data]);
1077 cfg_btch->cfg_data[cfg_btch->num_cfg_data].data =
1078 &cfg_btch->data[cfg_btch->num_cfg_data];
1079 cfg_btch->data[cfg_btch->num_cfg_data].xpath = xpath;
1080 xpath = NULL;
1081
1082 mgmt_yang_data_value_init(
1083 &cfg_btch->value[cfg_btch->num_cfg_data]);
1084 cfg_btch->data[cfg_btch->num_cfg_data].value =
1085 &cfg_btch->value[cfg_btch->num_cfg_data];
1086 cfg_btch->value[cfg_btch->num_cfg_data].value_case =
1087 MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
1088 cfg_btch->value[cfg_btch->num_cfg_data]
1089 .encoded_str_val = value;
1090 value = NULL;
1091
1092 if (subscr_info.xpath_subscr[id].validate_config)
1093 found_validator = true;
1094
1095 cmtcfg_req->subscr_info.xpath_subscr[id].subscribed |=
1096 subscr_info.xpath_subscr[id].subscribed;
1097 MGMTD_TXN_DBG(
1098 " -- %s, {V:%d, N:%d}, Batch: %p, Item:%d",
1099 adapter->name,
1100 subscr_info.xpath_subscr[id].validate_config,
1101 subscr_info.xpath_subscr[id].notify_config,
1102 cfg_btch, (int)cfg_btch->num_cfg_data);
1103
1104 cfg_btch->num_cfg_data++;
1105 num_chgs++;
1106 }
1107
1108 if (!found_validator) {
1109 snprintf(err_buf, sizeof(err_buf),
1110 "No validator module found for XPATH: '%s",
1111 xpath);
1112 MGMTD_TXN_ERR("***** %s", err_buf);
1113 }
1114 }
1115
1116 cmtcfg_req->cmt_stats->last_batch_cnt = num_chgs;
1117 if (!num_chgs) {
1118 (void)mgmt_txn_send_commit_cfg_reply(
1119 txn_req->txn, MGMTD_NO_CFG_CHANGES,
1120 "No changes found to commit!");
1121 goto mgmt_txn_create_config_batches_failed;
1122 }
1123
1124 cmtcfg_req->next_phase = MGMTD_COMMIT_PHASE_TXN_CREATE;
1125 return 0;
1126
1127 mgmt_txn_create_config_batches_failed:
1128
1129 if (xpath)
1130 free(xpath);
1131
1132 return -1;
1133 }
1134
1135 static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
1136 {
1137 struct nb_context nb_ctx;
1138 struct nb_config *nb_config;
1139 struct nb_config_cbs changes;
1140 struct nb_config_cbs *cfg_chgs = NULL;
1141 int ret;
1142 bool del_cfg_chgs = false;
1143
1144 ret = 0;
1145 memset(&nb_ctx, 0, sizeof(nb_ctx));
1146 memset(&changes, 0, sizeof(changes));
1147 if (txn->commit_cfg_req->req.commit_cfg.cfg_chgs) {
1148 cfg_chgs = txn->commit_cfg_req->req.commit_cfg.cfg_chgs;
1149 del_cfg_chgs = true;
1150 goto mgmt_txn_prep_config_validation_done;
1151 }
1152
1153 if (txn->commit_cfg_req->req.commit_cfg.src_ds_id
1154 != MGMTD_DS_CANDIDATE) {
1155 (void)mgmt_txn_send_commit_cfg_reply(
1156 txn, MGMTD_INVALID_PARAM,
1157 "Source DS cannot be any other than CANDIDATE!");
1158 ret = -1;
1159 goto mgmt_txn_prepare_config_done;
1160 }
1161
1162 if (txn->commit_cfg_req->req.commit_cfg.dst_ds_id
1163 != MGMTD_DS_RUNNING) {
1164 (void)mgmt_txn_send_commit_cfg_reply(
1165 txn, MGMTD_INVALID_PARAM,
1166 "Destination DS cannot be any other than RUNNING!");
1167 ret = -1;
1168 goto mgmt_txn_prepare_config_done;
1169 }
1170
1171 if (!txn->commit_cfg_req->req.commit_cfg.src_ds_ctx) {
1172 (void)mgmt_txn_send_commit_cfg_reply(
1173 txn, MGMTD_INVALID_PARAM, "No such source datastore!");
1174 ret = -1;
1175 goto mgmt_txn_prepare_config_done;
1176 }
1177
1178 if (!txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx) {
1179 (void)mgmt_txn_send_commit_cfg_reply(
1180 txn, MGMTD_INVALID_PARAM,
1181 "No such destination datastore!");
1182 ret = -1;
1183 goto mgmt_txn_prepare_config_done;
1184 }
1185
1186 if (txn->commit_cfg_req->req.commit_cfg.abort) {
1187 /*
1188 * This is a commit abort request. Return back success.
1189 * That should trigger a restore of Candidate datastore to
1190 * Running.
1191 */
1192 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
1193 NULL);
1194 goto mgmt_txn_prepare_config_done;
1195 }
1196
1197 nb_config = mgmt_ds_get_nb_config(
1198 txn->commit_cfg_req->req.commit_cfg.src_ds_ctx);
1199 if (!nb_config) {
1200 (void)mgmt_txn_send_commit_cfg_reply(
1201 txn, MGMTD_INTERNAL_ERROR,
1202 "Unable to retrieve Commit DS Config Tree!");
1203 ret = -1;
1204 goto mgmt_txn_prepare_config_done;
1205 }
1206
1207 /*
1208 * Check for diffs from scratch buffer. If found empty
1209 * get the diff from Candidate DS itself.
1210 */
1211 cfg_chgs = &nb_config->cfg_chgs;
1212 if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
1213 /*
1214 * This could be the case when the config is directly
1215 * loaded onto the candidate DS from a file. Get the
1216 * diff from a full comparison of the candidate and
1217 * running DSs.
1218 */
1219 nb_config_diff(
1220 mgmt_ds_get_nb_config(txn->commit_cfg_req->req
1221 .commit_cfg.dst_ds_ctx),
1222 nb_config, &changes);
1223 cfg_chgs = &changes;
1224 del_cfg_chgs = true;
1225 }
1226
1227 if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
1228 /*
1229 * This means there's no changes to commit whatsoever
1230 * is the source of the changes in config.
1231 */
1232 (void)mgmt_txn_send_commit_cfg_reply(
1233 txn, MGMTD_NO_CFG_CHANGES,
1234 "No changes found to be committed!");
1235 ret = -1;
1236 goto mgmt_txn_prepare_config_done;
1237 }
1238
1239 #ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
1240 if (mm->perf_stats_en)
1241 gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
1242 ->validate_start,
1243 NULL);
1244 /*
1245 * Validate YANG contents of the source DS and get the diff
1246 * between source and destination DS contents.
1247 */
1248 char err_buf[1024] = {0};
1249 nb_ctx.client = NB_CLIENT_MGMTD_SERVER;
1250 nb_ctx.user = (void *)txn;
1251
1252 ret = nb_candidate_validate_yang(nb_config, false, err_buf,
1253 sizeof(err_buf) - 1);
1254 if (ret != NB_OK) {
1255 if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
1256 strlcpy(err_buf, "Validation failed", sizeof(err_buf));
1257 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
1258 err_buf);
1259 ret = -1;
1260 goto mgmt_txn_prepare_config_done;
1261 }
1262 /*
1263 * Perform application level validations locally on the MGMTD
1264 * process by calling application specific validation routines
1265 * loaded onto MGMTD process using libraries.
1266 */
1267 ret = nb_candidate_validate_code(&nb_ctx, nb_config, &changes, err_buf,
1268 sizeof(err_buf) - 1);
1269 if (ret != NB_OK) {
1270 if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
1271 strlcpy(err_buf, "Validation failed", sizeof(err_buf));
1272 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
1273 err_buf);
1274 ret = -1;
1275 goto mgmt_txn_prepare_config_done;
1276 }
1277
1278 if (txn->commit_cfg_req->req.commit_cfg.validate_only) {
1279 /*
1280 * This was a validate-only COMMIT request return success.
1281 */
1282 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
1283 NULL);
1284 goto mgmt_txn_prepare_config_done;
1285 }
1286 #endif /* ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED */
1287
1288 mgmt_txn_prep_config_validation_done:
1289
1290 if (mm->perf_stats_en)
1291 gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
1292 ->prep_cfg_start,
1293 NULL);
1294
1295 /*
1296 * Iterate over the diffs and create ordered batches of config
1297 * commands to be validated.
1298 */
1299 ret = mgmt_txn_create_config_batches(txn->commit_cfg_req, cfg_chgs);
1300 if (ret != 0) {
1301 ret = -1;
1302 goto mgmt_txn_prepare_config_done;
1303 }
1304
1305 /* Move to the Transaction Create Phase */
1306 txn->commit_cfg_req->req.commit_cfg.curr_phase =
1307 MGMTD_COMMIT_PHASE_TXN_CREATE;
1308 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
1309
1310 /*
1311 * Start the COMMIT Timeout Timer to abort Txn if things get stuck at
1312 * backend.
1313 */
1314 mgmt_txn_register_event(txn, MGMTD_TXN_COMMITCFG_TIMEOUT);
1315 mgmt_txn_prepare_config_done:
1316
1317 if (cfg_chgs && del_cfg_chgs)
1318 nb_config_diff_del_changes(cfg_chgs);
1319
1320 return ret;
1321 }
1322
1323 static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn)
1324 {
1325 enum mgmt_be_client_id id;
1326 struct mgmt_be_client_adapter *adapter;
1327 struct mgmt_commit_cfg_req *cmtcfg_req;
1328 struct mgmt_txn_be_cfg_batch *cfg_btch;
1329
1330 assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
1331
1332 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1333 FOREACH_MGMTD_BE_CLIENT_ID (id) {
1334 if (cmtcfg_req->subscr_info.xpath_subscr[id].subscribed) {
1335 adapter = mgmt_be_get_adapter_by_id(id);
1336 if (mgmt_be_create_txn(adapter, txn->txn_id)
1337 != 0) {
1338 (void)mgmt_txn_send_commit_cfg_reply(
1339 txn, MGMTD_INTERNAL_ERROR,
1340 "Could not send TXN_CREATE to backend adapter");
1341 return -1;
1342 }
1343
1344 FOREACH_TXN_CFG_BATCH_IN_LIST (
1345 &txn->commit_cfg_req->req.commit_cfg
1346 .curr_batches[id],
1347 cfg_btch)
1348 cfg_btch->comm_phase =
1349 MGMTD_COMMIT_PHASE_TXN_CREATE;
1350 }
1351 }
1352
1353 txn->commit_cfg_req->req.commit_cfg.next_phase =
1354 MGMTD_COMMIT_PHASE_SEND_CFG;
1355
1356 /*
1357 * Dont move the commit to next phase yet. Wait for the TXN_REPLY to
1358 * come back.
1359 */
1360
1361 MGMTD_TXN_DBG(
1362 "Txn:%p Session:0x%llx, Phase(Current:'%s', Next: '%s')", txn,
1363 (unsigned long long)txn->session_id,
1364 mgmt_txn_commit_phase_str(txn, true),
1365 mgmt_txn_commit_phase_str(txn, false));
1366
1367 return 0;
1368 }
1369
1370 static int
1371 mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
1372 struct mgmt_be_client_adapter *adapter)
1373 {
1374 struct mgmt_commit_cfg_req *cmtcfg_req;
1375 struct mgmt_txn_be_cfg_batch *cfg_btch;
1376 struct mgmt_be_cfgreq cfg_req = {0};
1377 size_t num_batches, indx;
1378
1379 assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
1380
1381 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1382 assert(cmtcfg_req->subscr_info.xpath_subscr[adapter->id].subscribed);
1383
1384 indx = 0;
1385 num_batches =
1386 mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id]);
1387 FOREACH_TXN_CFG_BATCH_IN_LIST (&cmtcfg_req->curr_batches[adapter->id],
1388 cfg_btch) {
1389 assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_SEND_CFG);
1390
1391 cfg_req.cfgdata_reqs = cfg_btch->cfg_datap;
1392 cfg_req.num_reqs = cfg_btch->num_cfg_data;
1393 indx++;
1394 if (mgmt_be_send_cfg_data_create_req(
1395 adapter, txn->txn_id, cfg_btch->batch_id, &cfg_req,
1396 indx == num_batches ? true : false)
1397 != 0) {
1398 (void)mgmt_txn_send_commit_cfg_reply(
1399 txn, MGMTD_INTERNAL_ERROR,
1400 "Internal Error! Could not send config data to backend!");
1401 MGMTD_TXN_ERR(
1402 "Could not send CFGDATA_CREATE for Txn %p Batch %p to client '%s",
1403 txn, cfg_btch, adapter->name);
1404 return -1;
1405 }
1406
1407 cmtcfg_req->cmt_stats->last_num_cfgdata_reqs++;
1408 mgmt_move_txn_cfg_batch_to_next(
1409 cmtcfg_req, cfg_btch,
1410 &cmtcfg_req->curr_batches[adapter->id],
1411 &cmtcfg_req->next_batches[adapter->id], true,
1412 MGMTD_COMMIT_PHASE_SEND_CFG);
1413 }
1414
1415 /*
1416 * This could ne the last Backend Client to send CFGDATA_CREATE_REQ to.
1417 * Try moving the commit to next phase.
1418 */
1419 mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
1420
1421 return 0;
1422 }
1423
1424 static int
1425 mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
1426 struct mgmt_be_client_adapter *adapter)
1427 {
1428 struct mgmt_commit_cfg_req *cmtcfg_req;
1429 struct mgmt_txn_be_cfg_batch *cfg_btch;
1430
1431 assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
1432
1433 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1434 if (cmtcfg_req->subscr_info.xpath_subscr[adapter->id].subscribed) {
1435 adapter = mgmt_be_get_adapter_by_id(adapter->id);
1436 (void)mgmt_be_destroy_txn(adapter, txn->txn_id);
1437
1438 FOREACH_TXN_CFG_BATCH_IN_LIST (
1439 &txn->commit_cfg_req->req.commit_cfg
1440 .curr_batches[adapter->id],
1441 cfg_btch)
1442 cfg_btch->comm_phase = MGMTD_COMMIT_PHASE_TXN_DELETE;
1443 }
1444
1445 return 0;
1446 }
1447
1448 static void mgmt_txn_cfg_commit_timedout(struct event *thread)
1449 {
1450 struct mgmt_txn_ctx *txn;
1451
1452 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
1453 assert(txn);
1454
1455 assert(txn->type == MGMTD_TXN_TYPE_CONFIG);
1456
1457 if (!txn->commit_cfg_req)
1458 return;
1459
1460 MGMTD_TXN_ERR(
1461 "Backend operations for Config Txn %p has timedout! Aborting commit!!",
1462 txn);
1463
1464 /*
1465 * Send a COMMIT_CONFIG_REPLY with failure.
1466 * NOTE: The transaction cleanup will be triggered from Front-end
1467 * adapter.
1468 */
1469 mgmt_txn_send_commit_cfg_reply(
1470 txn, MGMTD_INTERNAL_ERROR,
1471 "Operation on the backend timed-out. Aborting commit!");
1472 }
1473
1474 /*
1475 * Send CFG_APPLY_REQs to all the backend client.
1476 *
1477 * NOTE: This is always dispatched when all CFGDATA_CREATE_REQs
1478 * for all backend clients has been generated. Please see
1479 * mgmt_txn_register_event() and mgmt_txn_process_commit_cfg()
1480 * for details.
1481 */
1482 static int mgmt_txn_send_be_cfg_apply(struct mgmt_txn_ctx *txn)
1483 {
1484 enum mgmt_be_client_id id;
1485 struct mgmt_be_client_adapter *adapter;
1486 struct mgmt_commit_cfg_req *cmtcfg_req;
1487 struct mgmt_txn_batches_head *btch_list;
1488 struct mgmt_txn_be_cfg_batch *cfg_btch;
1489
1490 assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
1491
1492 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1493 if (cmtcfg_req->validate_only) {
1494 /*
1495 * If this was a validate-only COMMIT request return success.
1496 */
1497 (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
1498 NULL);
1499 return 0;
1500 }
1501
1502 FOREACH_MGMTD_BE_CLIENT_ID (id) {
1503 if (cmtcfg_req->subscr_info.xpath_subscr[id].notify_config) {
1504 adapter = mgmt_be_get_adapter_by_id(id);
1505 if (!adapter)
1506 return -1;
1507
1508 btch_list = &cmtcfg_req->curr_batches[id];
1509 if (mgmt_be_send_cfg_apply_req(adapter, txn->txn_id)
1510 != 0) {
1511 (void)mgmt_txn_send_commit_cfg_reply(
1512 txn, MGMTD_INTERNAL_ERROR,
1513 "Could not send CFG_APPLY_REQ to backend adapter");
1514 return -1;
1515 }
1516 cmtcfg_req->cmt_stats->last_num_apply_reqs++;
1517
1518 UNSET_FLAG(adapter->flags,
1519 MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
1520
1521 FOREACH_TXN_CFG_BATCH_IN_LIST (btch_list, cfg_btch)
1522 cfg_btch->comm_phase =
1523 MGMTD_COMMIT_PHASE_APPLY_CFG;
1524 }
1525 }
1526
1527 txn->commit_cfg_req->req.commit_cfg.next_phase =
1528 MGMTD_COMMIT_PHASE_TXN_DELETE;
1529
1530 /*
1531 * Dont move the commit to next phase yet. Wait for all VALIDATE_REPLIES
1532 * to come back.
1533 */
1534
1535 return 0;
1536 }
1537
1538 static void mgmt_txn_process_commit_cfg(struct event *thread)
1539 {
1540 struct mgmt_txn_ctx *txn;
1541 struct mgmt_commit_cfg_req *cmtcfg_req;
1542
1543 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
1544 assert(txn);
1545
1546 MGMTD_TXN_DBG(
1547 "Processing COMMIT_CONFIG for Txn:%p Session:0x%llx, Phase(Current:'%s', Next: '%s')",
1548 txn, (unsigned long long)txn->session_id,
1549 mgmt_txn_commit_phase_str(txn, true),
1550 mgmt_txn_commit_phase_str(txn, false));
1551
1552 assert(txn->commit_cfg_req);
1553 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
1554 switch (cmtcfg_req->curr_phase) {
1555 case MGMTD_COMMIT_PHASE_PREPARE_CFG:
1556 mgmt_txn_prepare_config(txn);
1557 break;
1558 case MGMTD_COMMIT_PHASE_TXN_CREATE:
1559 if (mm->perf_stats_en)
1560 gettimeofday(&cmtcfg_req->cmt_stats->txn_create_start,
1561 NULL);
1562 /*
1563 * Send TXN_CREATE_REQ to all Backend now.
1564 */
1565 mgmt_txn_send_be_txn_create(txn);
1566 break;
1567 case MGMTD_COMMIT_PHASE_SEND_CFG:
1568 if (mm->perf_stats_en)
1569 gettimeofday(&cmtcfg_req->cmt_stats->send_cfg_start,
1570 NULL);
1571 /*
1572 * All CFGDATA_CREATE_REQ should have been sent to
1573 * Backend by now.
1574 */
1575 #ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED
1576 assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
1577 MGMTD_TXN_DBG(
1578 "Txn:%p Session:0x%llx, trigger sending CFG_VALIDATE_REQ to all backend clients",
1579 txn, (unsigned long long)txn->session_id);
1580 #else /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
1581 assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
1582 MGMTD_TXN_DBG(
1583 "Txn:%p Session:0x%llx, trigger sending CFG_APPLY_REQ to all backend clients",
1584 txn, (unsigned long long)txn->session_id);
1585 #endif /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
1586 break;
1587 case MGMTD_COMMIT_PHASE_APPLY_CFG:
1588 if (mm->perf_stats_en)
1589 gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_start,
1590 NULL);
1591 /*
1592 * We should have received successful CFG_VALIDATE_REPLY from
1593 * all concerned Backend Clients by now. Send out the
1594 * CFG_APPLY_REQs now.
1595 */
1596 mgmt_txn_send_be_cfg_apply(txn);
1597 break;
1598 case MGMTD_COMMIT_PHASE_TXN_DELETE:
1599 if (mm->perf_stats_en)
1600 gettimeofday(&cmtcfg_req->cmt_stats->txn_del_start,
1601 NULL);
1602 /*
1603 * We would have sent TXN_DELETE_REQ to all backend by now.
1604 * Send a successful CONFIG_COMMIT_REPLY back to front-end.
1605 * NOTE: This should also trigger DS merge/unlock and Txn
1606 * cleanup. Please see mgmt_fe_send_commit_cfg_reply() for
1607 * more details.
1608 */
1609 EVENT_OFF(txn->comm_cfg_timeout);
1610 mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
1611 break;
1612 case MGMTD_COMMIT_PHASE_MAX:
1613 break;
1614 }
1615
1616 MGMTD_TXN_DBG(
1617 "Txn:%p Session:0x%llx, Phase updated to (Current:'%s', Next: '%s')",
1618 txn, (unsigned long long)txn->session_id,
1619 mgmt_txn_commit_phase_str(txn, true),
1620 mgmt_txn_commit_phase_str(txn, false));
1621 }
1622
1623 static void mgmt_init_get_data_reply(struct mgmt_get_data_reply *get_reply)
1624 {
1625 size_t indx;
1626
1627 for (indx = 0; indx < array_size(get_reply->reply_data); indx++)
1628 get_reply->reply_datap[indx] = &get_reply->reply_data[indx];
1629 }
1630
1631 static void mgmt_reset_get_data_reply(struct mgmt_get_data_reply *get_reply)
1632 {
1633 int indx;
1634
1635 for (indx = 0; indx < get_reply->num_reply; indx++) {
1636 if (get_reply->reply_xpathp[indx]) {
1637 free(get_reply->reply_xpathp[indx]);
1638 get_reply->reply_xpathp[indx] = 0;
1639 }
1640 if (get_reply->reply_data[indx].xpath) {
1641 zlog_debug("%s free xpath %p", __func__,
1642 get_reply->reply_data[indx].xpath);
1643 free(get_reply->reply_data[indx].xpath);
1644 get_reply->reply_data[indx].xpath = 0;
1645 }
1646 }
1647
1648 get_reply->num_reply = 0;
1649 memset(&get_reply->data_reply, 0, sizeof(get_reply->data_reply));
1650 memset(&get_reply->reply_data, 0, sizeof(get_reply->reply_data));
1651 memset(&get_reply->reply_datap, 0, sizeof(get_reply->reply_datap));
1652
1653 memset(&get_reply->reply_value, 0, sizeof(get_reply->reply_value));
1654
1655 mgmt_init_get_data_reply(get_reply);
1656 }
1657
1658 static void mgmt_reset_get_data_reply_buf(struct mgmt_get_data_req *get_data)
1659 {
1660 if (get_data->reply)
1661 mgmt_reset_get_data_reply(get_data->reply);
1662 }
1663
1664 static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
1665 struct mgmt_get_data_req *get_req)
1666 {
1667 struct mgmt_get_data_reply *get_reply;
1668 Mgmtd__YangDataReply *data_reply;
1669
1670 get_reply = get_req->reply;
1671 if (!get_reply)
1672 return;
1673
1674 data_reply = &get_reply->data_reply;
1675 mgmt_yang_data_reply_init(data_reply);
1676 data_reply->n_data = get_reply->num_reply;
1677 data_reply->data = get_reply->reply_datap;
1678 data_reply->next_indx =
1679 (!get_reply->last_batch ? get_req->total_reply : -1);
1680
1681 MGMTD_TXN_DBG("Sending %d Get-Config/Data replies (next-idx:%lld)",
1682 (int) data_reply->n_data,
1683 (long long)data_reply->next_indx);
1684
1685 switch (txn_req->req_event) {
1686 case MGMTD_TXN_PROC_GETCFG:
1687 if (mgmt_fe_send_get_cfg_reply(
1688 txn_req->txn->session_id, txn_req->txn->txn_id,
1689 get_req->ds_id, txn_req->req_id, MGMTD_SUCCESS,
1690 data_reply, NULL)
1691 != 0) {
1692 MGMTD_TXN_ERR(
1693 "Failed to send GET-CONFIG-REPLY for Txn %p, Sessn: 0x%llx, Req: %llu",
1694 txn_req->txn,
1695 (unsigned long long)txn_req->txn->session_id,
1696 (unsigned long long)txn_req->req_id);
1697 }
1698 break;
1699 case MGMTD_TXN_PROC_GETDATA:
1700 if (mgmt_fe_send_get_data_reply(
1701 txn_req->txn->session_id, txn_req->txn->txn_id,
1702 get_req->ds_id, txn_req->req_id, MGMTD_SUCCESS,
1703 data_reply, NULL)
1704 != 0) {
1705 MGMTD_TXN_ERR(
1706 "Failed to send GET-DATA-REPLY for Txn %p, Sessn: 0x%llx, Req: %llu",
1707 txn_req->txn,
1708 (unsigned long long)txn_req->txn->session_id,
1709 (unsigned long long)txn_req->req_id);
1710 }
1711 break;
1712 case MGMTD_TXN_PROC_SETCFG:
1713 case MGMTD_TXN_PROC_COMMITCFG:
1714 case MGMTD_TXN_COMMITCFG_TIMEOUT:
1715 case MGMTD_TXN_CLEANUP:
1716 MGMTD_TXN_ERR("Invalid Txn-Req-Event %u",
1717 txn_req->req_event);
1718 break;
1719 }
1720
1721 /*
1722 * Reset reply buffer for next reply.
1723 */
1724 mgmt_reset_get_data_reply_buf(get_req);
1725 }
1726
1727 static void mgmt_txn_iter_and_send_get_cfg_reply(struct mgmt_ds_ctx *ds_ctx,
1728 char *xpath,
1729 struct lyd_node *node,
1730 struct nb_node *nb_node,
1731 void *ctx)
1732 {
1733 struct mgmt_txn_req *txn_req;
1734 struct mgmt_get_data_req *get_req;
1735 struct mgmt_get_data_reply *get_reply;
1736 Mgmtd__YangData *data;
1737 Mgmtd__YangDataValue *data_value;
1738
1739 txn_req = (struct mgmt_txn_req *)ctx;
1740 if (!txn_req)
1741 goto mgmtd_ignore_get_cfg_reply_data;
1742
1743 if (!(node->schema->nodetype & LYD_NODE_TERM))
1744 goto mgmtd_ignore_get_cfg_reply_data;
1745
1746 assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG
1747 || txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
1748
1749 get_req = txn_req->req.get_data;
1750 assert(get_req);
1751 get_reply = get_req->reply;
1752 data = &get_reply->reply_data[get_reply->num_reply];
1753 data_value = &get_reply->reply_value[get_reply->num_reply];
1754
1755 mgmt_yang_data_init(data);
1756 data->xpath = xpath;
1757 mgmt_yang_data_value_init(data_value);
1758 data_value->value_case = MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
1759 data_value->encoded_str_val = (char *)lyd_get_value(node);
1760 data->value = data_value;
1761
1762 get_reply->num_reply++;
1763 get_req->total_reply++;
1764 MGMTD_TXN_DBG(" [%d] XPATH: '%s', Value: '%s'", get_req->total_reply,
1765 data->xpath, data_value->encoded_str_val);
1766
1767 if (get_reply->num_reply == MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH)
1768 mgmt_txn_send_getcfg_reply_data(txn_req, get_req);
1769
1770 return;
1771
1772 mgmtd_ignore_get_cfg_reply_data:
1773 if (xpath)
1774 free(xpath);
1775 }
1776
1777 static int mgmt_txn_get_config(struct mgmt_txn_ctx *txn,
1778 struct mgmt_txn_req *txn_req,
1779 struct mgmt_ds_ctx *ds_ctx)
1780 {
1781 int indx;
1782 struct mgmt_get_data_req *get_data;
1783 struct mgmt_get_data_reply *get_reply;
1784
1785 get_data = txn_req->req.get_data;
1786
1787 if (!get_data->reply) {
1788 get_data->reply = XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REPLY,
1789 sizeof(struct mgmt_get_data_reply));
1790 if (!get_data->reply) {
1791 mgmt_fe_send_get_cfg_reply(
1792 txn->session_id, txn->txn_id,
1793 get_data->ds_id, txn_req->req_id,
1794 MGMTD_INTERNAL_ERROR, NULL,
1795 "Internal error: Unable to allocate reply buffers!");
1796 goto mgmt_txn_get_config_failed;
1797 }
1798 }
1799
1800 /*
1801 * Read data contents from the DS and respond back directly.
1802 * No need to go to backend for getting data.
1803 */
1804 get_reply = get_data->reply;
1805 for (indx = 0; indx < get_data->num_xpaths; indx++) {
1806 MGMTD_TXN_DBG("Trying to get all data under '%s'",
1807 get_data->xpaths[indx]);
1808 mgmt_init_get_data_reply(get_reply);
1809 if (mgmt_ds_iter_data(get_data->ds_ctx, get_data->xpaths[indx],
1810 mgmt_txn_iter_and_send_get_cfg_reply,
1811 (void *)txn_req, true)
1812 == -1) {
1813 MGMTD_TXN_DBG("Invalid Xpath '%s",
1814 get_data->xpaths[indx]);
1815 mgmt_fe_send_get_cfg_reply(
1816 txn->session_id, txn->txn_id,
1817 get_data->ds_id, txn_req->req_id,
1818 MGMTD_INTERNAL_ERROR, NULL, "Invalid xpath");
1819 goto mgmt_txn_get_config_failed;
1820 }
1821 MGMTD_TXN_DBG("Got %d remaining data-replies for xpath '%s'",
1822 get_reply->num_reply, get_data->xpaths[indx]);
1823 get_reply->last_batch = true;
1824 mgmt_txn_send_getcfg_reply_data(txn_req, get_data);
1825 }
1826
1827 mgmt_txn_get_config_failed:
1828
1829 /*
1830 * Delete the txn request. It will also remove it from request
1831 * list.
1832 */
1833 mgmt_txn_req_free(&txn_req);
1834
1835 return 0;
1836 }
1837
1838 static void mgmt_txn_process_get_cfg(struct event *thread)
1839 {
1840 struct mgmt_txn_ctx *txn;
1841 struct mgmt_txn_req *txn_req;
1842 struct mgmt_ds_ctx *ds_ctx;
1843 int num_processed = 0;
1844 bool error;
1845
1846 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
1847 assert(txn);
1848
1849 MGMTD_TXN_DBG(
1850 "Processing %d GET_CONFIG requests for Txn:%p Session:0x%llx",
1851 (int)mgmt_txn_reqs_count(&txn->get_cfg_reqs), txn,
1852 (unsigned long long)txn->session_id);
1853
1854 FOREACH_TXN_REQ_IN_LIST (&txn->get_cfg_reqs, txn_req) {
1855 error = false;
1856 assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG);
1857 ds_ctx = txn_req->req.get_data->ds_ctx;
1858 if (!ds_ctx) {
1859 mgmt_fe_send_get_cfg_reply(
1860 txn->session_id, txn->txn_id,
1861 txn_req->req.get_data->ds_id, txn_req->req_id,
1862 MGMTD_INTERNAL_ERROR, NULL,
1863 "No such datastore!");
1864 error = true;
1865 goto mgmt_txn_process_get_cfg_done;
1866 }
1867
1868 if (mgmt_txn_get_config(txn, txn_req, ds_ctx) != 0) {
1869 MGMTD_TXN_ERR(
1870 "Unable to retrieve Config from DS %d for Txn %p, Sessn: 0x%llx, Req: %llu!",
1871 txn_req->req.get_data->ds_id, txn,
1872 (unsigned long long)txn->session_id,
1873 (unsigned long long)txn_req->req_id);
1874 error = true;
1875 }
1876
1877 mgmt_txn_process_get_cfg_done:
1878
1879 if (error) {
1880 /*
1881 * Delete the txn request.
1882 * Note: The following will remove it from the list
1883 * as well.
1884 */
1885 mgmt_txn_req_free(&txn_req);
1886 }
1887
1888 /*
1889 * Else the transaction would have been already deleted or
1890 * moved to corresponding pending list. No need to delete it.
1891 */
1892 num_processed++;
1893 if (num_processed == MGMTD_TXN_MAX_NUM_GETCFG_PROC)
1894 break;
1895 }
1896
1897 if (mgmt_txn_reqs_count(&txn->get_cfg_reqs)) {
1898 MGMTD_TXN_DBG(
1899 "Processed maximum number of Get-Config requests (%d/%d). Rescheduling for rest.",
1900 num_processed, MGMTD_TXN_MAX_NUM_GETCFG_PROC);
1901 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
1902 }
1903 }
1904
1905 static void mgmt_txn_process_get_data(struct event *thread)
1906 {
1907 struct mgmt_txn_ctx *txn;
1908 struct mgmt_txn_req *txn_req;
1909 struct mgmt_ds_ctx *ds_ctx;
1910 int num_processed = 0;
1911 bool error;
1912
1913 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
1914 assert(txn);
1915
1916 MGMTD_TXN_DBG(
1917 "Processing %d GET_DATA requests for Txn:%p Session:0x%llx",
1918 (int)mgmt_txn_reqs_count(&txn->get_data_reqs), txn,
1919 (unsigned long long)txn->session_id);
1920
1921 FOREACH_TXN_REQ_IN_LIST (&txn->get_data_reqs, txn_req) {
1922 error = false;
1923 assert(txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
1924 ds_ctx = txn_req->req.get_data->ds_ctx;
1925 if (!ds_ctx) {
1926 mgmt_fe_send_get_data_reply(
1927 txn->session_id, txn->txn_id,
1928 txn_req->req.get_data->ds_id, txn_req->req_id,
1929 MGMTD_INTERNAL_ERROR, NULL,
1930 "No such datastore!");
1931 error = true;
1932 goto mgmt_txn_process_get_data_done;
1933 }
1934
1935 if (mgmt_ds_is_config(ds_ctx)) {
1936 if (mgmt_txn_get_config(txn, txn_req, ds_ctx)
1937 != 0) {
1938 MGMTD_TXN_ERR(
1939 "Unable to retrieve Config from DS %d for Txn %p, Sessn: 0x%llx, Req: %llu!",
1940 txn_req->req.get_data->ds_id, txn,
1941 (unsigned long long)txn->session_id,
1942 (unsigned long long)txn_req->req_id);
1943 error = true;
1944 }
1945 } else {
1946 /*
1947 * TODO: Trigger GET procedures for Backend
1948 * For now return back error.
1949 */
1950 mgmt_fe_send_get_data_reply(
1951 txn->session_id, txn->txn_id,
1952 txn_req->req.get_data->ds_id, txn_req->req_id,
1953 MGMTD_INTERNAL_ERROR, NULL,
1954 "GET-DATA on Oper DS is not supported yet!");
1955 error = true;
1956 }
1957
1958 mgmt_txn_process_get_data_done:
1959
1960 if (error) {
1961 /*
1962 * Delete the txn request.
1963 * Note: The following will remove it from the list
1964 * as well.
1965 */
1966 mgmt_txn_req_free(&txn_req);
1967 }
1968
1969 /*
1970 * Else the transaction would have been already deleted or
1971 * moved to corresponding pending list. No need to delete it.
1972 */
1973 num_processed++;
1974 if (num_processed == MGMTD_TXN_MAX_NUM_GETDATA_PROC)
1975 break;
1976 }
1977
1978 if (mgmt_txn_reqs_count(&txn->get_data_reqs)) {
1979 MGMTD_TXN_DBG(
1980 "Processed maximum number of Get-Data requests (%d/%d). Rescheduling for rest.",
1981 num_processed, MGMTD_TXN_MAX_NUM_GETDATA_PROC);
1982 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETDATA);
1983 }
1984 }
1985
1986 static struct mgmt_txn_ctx *
1987 mgmt_fe_find_txn_by_session_id(struct mgmt_master *cm, uint64_t session_id,
1988 enum mgmt_txn_type type)
1989 {
1990 struct mgmt_txn_ctx *txn;
1991
1992 FOREACH_TXN_IN_LIST (cm, txn) {
1993 if (txn->session_id == session_id && txn->type == type)
1994 return txn;
1995 }
1996
1997 return NULL;
1998 }
1999
2000 static struct mgmt_txn_ctx *mgmt_txn_create_new(uint64_t session_id,
2001 enum mgmt_txn_type type)
2002 {
2003 struct mgmt_txn_ctx *txn = NULL;
2004
2005 /*
2006 * For 'CONFIG' transaction check if one is already created
2007 * or not.
2008 */
2009 if (type == MGMTD_TXN_TYPE_CONFIG && mgmt_txn_mm->cfg_txn) {
2010 if (mgmt_config_txn_in_progress() == session_id)
2011 txn = mgmt_txn_mm->cfg_txn;
2012 goto mgmt_create_txn_done;
2013 }
2014
2015 txn = mgmt_fe_find_txn_by_session_id(mgmt_txn_mm, session_id,
2016 type);
2017 if (!txn) {
2018 txn = XCALLOC(MTYPE_MGMTD_TXN, sizeof(struct mgmt_txn_ctx));
2019 assert(txn);
2020
2021 txn->session_id = session_id;
2022 txn->type = type;
2023 mgmt_txn_badapters_init(&txn->be_adapters);
2024 mgmt_txns_add_tail(&mgmt_txn_mm->txn_list, txn);
2025 mgmt_txn_reqs_init(&txn->set_cfg_reqs);
2026 mgmt_txn_reqs_init(&txn->get_cfg_reqs);
2027 mgmt_txn_reqs_init(&txn->get_data_reqs);
2028 mgmt_txn_reqs_init(&txn->pending_get_datas);
2029 txn->commit_cfg_req = NULL;
2030 txn->refcount = 0;
2031 if (!mgmt_txn_mm->next_txn_id)
2032 mgmt_txn_mm->next_txn_id++;
2033 txn->txn_id = mgmt_txn_mm->next_txn_id++;
2034 hash_get(mgmt_txn_mm->txn_hash, txn, hash_alloc_intern);
2035
2036 MGMTD_TXN_DBG("Added new '%s' MGMTD Transaction '%p'",
2037 mgmt_txn_type2str(type), txn);
2038
2039 if (type == MGMTD_TXN_TYPE_CONFIG)
2040 mgmt_txn_mm->cfg_txn = txn;
2041
2042 MGMTD_TXN_LOCK(txn);
2043 }
2044
2045 mgmt_create_txn_done:
2046 return txn;
2047 }
2048
2049 static void mgmt_txn_delete(struct mgmt_txn_ctx **txn)
2050 {
2051 MGMTD_TXN_UNLOCK(txn);
2052 }
2053
2054 static unsigned int mgmt_txn_hash_key(const void *data)
2055 {
2056 const struct mgmt_txn_ctx *txn = data;
2057
2058 return jhash2((uint32_t *) &txn->txn_id,
2059 sizeof(txn->txn_id) / sizeof(uint32_t), 0);
2060 }
2061
2062 static bool mgmt_txn_hash_cmp(const void *d1, const void *d2)
2063 {
2064 const struct mgmt_txn_ctx *txn1 = d1;
2065 const struct mgmt_txn_ctx *txn2 = d2;
2066
2067 return (txn1->txn_id == txn2->txn_id);
2068 }
2069
2070 static void mgmt_txn_hash_free(void *data)
2071 {
2072 struct mgmt_txn_ctx *txn = data;
2073
2074 mgmt_txn_delete(&txn);
2075 }
2076
2077 static void mgmt_txn_hash_init(void)
2078 {
2079 if (!mgmt_txn_mm || mgmt_txn_mm->txn_hash)
2080 return;
2081
2082 mgmt_txn_mm->txn_hash = hash_create(mgmt_txn_hash_key,
2083 mgmt_txn_hash_cmp,
2084 "MGMT Transactions");
2085 }
2086
2087 static void mgmt_txn_hash_destroy(void)
2088 {
2089 if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
2090 return;
2091
2092 hash_clean(mgmt_txn_mm->txn_hash,
2093 mgmt_txn_hash_free);
2094 hash_free(mgmt_txn_mm->txn_hash);
2095 mgmt_txn_mm->txn_hash = NULL;
2096 }
2097
2098 static inline struct mgmt_txn_ctx *
2099 mgmt_txn_id2ctx(uint64_t txn_id)
2100 {
2101 struct mgmt_txn_ctx key = {0};
2102 struct mgmt_txn_ctx *txn;
2103
2104 if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
2105 return NULL;
2106
2107 key.txn_id = txn_id;
2108 txn = hash_lookup(mgmt_txn_mm->txn_hash, &key);
2109
2110 return txn;
2111 }
2112
2113 static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file,
2114 int line)
2115 {
2116 txn->refcount++;
2117 MGMTD_TXN_DBG("%s:%d --> Lock %s Txn %p, Count: %d", file, line,
2118 mgmt_txn_type2str(txn->type), txn, txn->refcount);
2119 }
2120
2121 static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
2122 int line)
2123 {
2124 assert(*txn && (*txn)->refcount);
2125
2126 (*txn)->refcount--;
2127 MGMTD_TXN_DBG("%s:%d --> Unlock %s Txn %p, Count: %d", file, line,
2128 mgmt_txn_type2str((*txn)->type), *txn,
2129 (*txn)->refcount);
2130 if (!(*txn)->refcount) {
2131 if ((*txn)->type == MGMTD_TXN_TYPE_CONFIG)
2132 if (mgmt_txn_mm->cfg_txn == *txn)
2133 mgmt_txn_mm->cfg_txn = NULL;
2134 EVENT_OFF((*txn)->proc_get_cfg);
2135 EVENT_OFF((*txn)->proc_get_data);
2136 EVENT_OFF((*txn)->proc_comm_cfg);
2137 EVENT_OFF((*txn)->comm_cfg_timeout);
2138 hash_release(mgmt_txn_mm->txn_hash, *txn);
2139 mgmt_txns_del(&mgmt_txn_mm->txn_list, *txn);
2140
2141 MGMTD_TXN_DBG("Deleted %s Txn %p for Sessn: 0x%llx",
2142 mgmt_txn_type2str((*txn)->type), *txn,
2143 (unsigned long long)(*txn)->session_id);
2144
2145 XFREE(MTYPE_MGMTD_TXN, *txn);
2146 }
2147
2148 *txn = NULL;
2149 }
2150
2151 static void mgmt_txn_cleanup_txn(struct mgmt_txn_ctx **txn)
2152 {
2153 /* TODO: Any other cleanup applicable */
2154
2155 mgmt_txn_delete(txn);
2156 }
2157
2158 static void
2159 mgmt_txn_cleanup_all_txns(void)
2160 {
2161 struct mgmt_txn_ctx *txn;
2162
2163 if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
2164 return;
2165
2166 FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn)
2167 mgmt_txn_cleanup_txn(&txn);
2168 }
2169
2170 static void mgmt_txn_cleanup(struct event *thread)
2171 {
2172 struct mgmt_txn_ctx *txn;
2173
2174 txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
2175 assert(txn);
2176
2177 mgmt_txn_cleanup_txn(&txn);
2178 }
2179
2180 static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
2181 enum mgmt_txn_event event)
2182 {
2183 struct timeval tv = {.tv_sec = 0,
2184 .tv_usec = MGMTD_TXN_PROC_DELAY_USEC};
2185
2186 assert(mgmt_txn_mm && mgmt_txn_tm);
2187
2188 switch (event) {
2189 case MGMTD_TXN_PROC_SETCFG:
2190 event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_set_cfg,
2191 txn, &tv, &txn->proc_set_cfg);
2192 break;
2193 case MGMTD_TXN_PROC_COMMITCFG:
2194 event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_commit_cfg,
2195 txn, &tv, &txn->proc_comm_cfg);
2196 break;
2197 case MGMTD_TXN_PROC_GETCFG:
2198 event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_cfg,
2199 txn, &tv, &txn->proc_get_cfg);
2200 break;
2201 case MGMTD_TXN_PROC_GETDATA:
2202 event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_data,
2203 txn, &tv, &txn->proc_get_data);
2204 break;
2205 case MGMTD_TXN_COMMITCFG_TIMEOUT:
2206 event_add_timer_msec(mgmt_txn_tm,
2207 mgmt_txn_cfg_commit_timedout, txn,
2208 MGMTD_TXN_CFG_COMMIT_MAX_DELAY_MSEC,
2209 &txn->comm_cfg_timeout);
2210 break;
2211 case MGMTD_TXN_CLEANUP:
2212 tv.tv_usec = MGMTD_TXN_CLEANUP_DELAY_USEC;
2213 event_add_timer_tv(mgmt_txn_tm, mgmt_txn_cleanup, txn, &tv,
2214 &txn->clnup);
2215 }
2216 }
2217
2218 int mgmt_txn_init(struct mgmt_master *mm, struct event_loop *tm)
2219 {
2220 if (mgmt_txn_mm || mgmt_txn_tm)
2221 assert(!"MGMTD TXN: Call txn_init() only once");
2222
2223 mgmt_txn_mm = mm;
2224 mgmt_txn_tm = tm;
2225 mgmt_txns_init(&mm->txn_list);
2226 mgmt_txn_hash_init();
2227 assert(!mm->cfg_txn);
2228 mm->cfg_txn = NULL;
2229
2230 return 0;
2231 }
2232
2233 void mgmt_txn_destroy(void)
2234 {
2235 mgmt_txn_cleanup_all_txns();
2236 mgmt_txn_hash_destroy();
2237 }
2238
2239 uint64_t mgmt_config_txn_in_progress(void)
2240 {
2241 if (mgmt_txn_mm && mgmt_txn_mm->cfg_txn)
2242 return mgmt_txn_mm->cfg_txn->session_id;
2243
2244 return MGMTD_SESSION_ID_NONE;
2245 }
2246
2247 uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type)
2248 {
2249 struct mgmt_txn_ctx *txn;
2250
2251 txn = mgmt_txn_create_new(session_id, type);
2252 return txn ? txn->txn_id : MGMTD_TXN_ID_NONE;
2253 }
2254
2255 bool mgmt_txn_id_is_valid(uint64_t txn_id)
2256 {
2257 return mgmt_txn_id2ctx(txn_id) ? true : false;
2258 }
2259
2260 void mgmt_destroy_txn(uint64_t *txn_id)
2261 {
2262 struct mgmt_txn_ctx *txn;
2263
2264 txn = mgmt_txn_id2ctx(*txn_id);
2265 if (!txn)
2266 return;
2267
2268 mgmt_txn_delete(&txn);
2269 *txn_id = MGMTD_TXN_ID_NONE;
2270 }
2271
2272 enum mgmt_txn_type mgmt_get_txn_type(uint64_t txn_id)
2273 {
2274 struct mgmt_txn_ctx *txn;
2275
2276 txn = mgmt_txn_id2ctx(txn_id);
2277 if (!txn)
2278 return MGMTD_TXN_TYPE_NONE;
2279
2280 return txn->type;
2281 }
2282
2283 int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
2284 Mgmtd__DatastoreId ds_id,
2285 struct mgmt_ds_ctx *ds_ctx,
2286 Mgmtd__YangCfgDataReq **cfg_req,
2287 size_t num_req, bool implicit_commit,
2288 Mgmtd__DatastoreId dst_ds_id,
2289 struct mgmt_ds_ctx *dst_ds_ctx)
2290 {
2291 struct mgmt_txn_ctx *txn;
2292 struct mgmt_txn_req *txn_req;
2293 size_t indx;
2294 uint16_t *num_chgs;
2295 struct nb_cfg_change *cfg_chg;
2296
2297 txn = mgmt_txn_id2ctx(txn_id);
2298 if (!txn)
2299 return -1;
2300
2301 if (implicit_commit && mgmt_txn_reqs_count(&txn->set_cfg_reqs)) {
2302 MGMTD_TXN_ERR(
2303 "For implicit commit config only one SETCFG-REQ can be allowed!");
2304 return -1;
2305 }
2306
2307 txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_SETCFG);
2308 txn_req->req.set_cfg->ds_id = ds_id;
2309 txn_req->req.set_cfg->ds_ctx = ds_ctx;
2310 num_chgs = &txn_req->req.set_cfg->num_cfg_changes;
2311 for (indx = 0; indx < num_req; indx++) {
2312 cfg_chg = &txn_req->req.set_cfg->cfg_changes[*num_chgs];
2313
2314 if (cfg_req[indx]->req_type
2315 == MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA)
2316 cfg_chg->operation = NB_OP_DESTROY;
2317 else if (cfg_req[indx]->req_type
2318 == MGMTD__CFG_DATA_REQ_TYPE__SET_DATA)
2319 cfg_chg->operation =
2320 mgmt_ds_find_data_node_by_xpath(
2321 ds_ctx, cfg_req[indx]->data->xpath)
2322 ? NB_OP_MODIFY
2323 : NB_OP_CREATE;
2324 else
2325 continue;
2326
2327 MGMTD_TXN_DBG(
2328 "XPath: '%s', Value: '%s'", cfg_req[indx]->data->xpath,
2329 (cfg_req[indx]->data->value
2330 && cfg_req[indx]
2331 ->data->value
2332 ->encoded_str_val
2333 ? cfg_req[indx]->data->value->encoded_str_val
2334 : "NULL"));
2335 strlcpy(cfg_chg->xpath, cfg_req[indx]->data->xpath,
2336 sizeof(cfg_chg->xpath));
2337 cfg_chg->value = (cfg_req[indx]->data->value
2338 && cfg_req[indx]
2339 ->data->value
2340 ->encoded_str_val
2341 ? strdup(cfg_req[indx]
2342 ->data->value
2343 ->encoded_str_val)
2344 : NULL);
2345 if (cfg_chg->value)
2346 MGMTD_TXN_DBG("Allocated value at %p ==> '%s'",
2347 cfg_chg->value, cfg_chg->value);
2348
2349 (*num_chgs)++;
2350 }
2351 txn_req->req.set_cfg->implicit_commit = implicit_commit;
2352 txn_req->req.set_cfg->dst_ds_id = dst_ds_id;
2353 txn_req->req.set_cfg->dst_ds_ctx = dst_ds_ctx;
2354 txn_req->req.set_cfg->setcfg_stats =
2355 mgmt_fe_get_session_setcfg_stats(txn->session_id);
2356 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
2357
2358 return 0;
2359 }
2360
2361 int mgmt_txn_send_commit_config_req(uint64_t txn_id, uint64_t req_id,
2362 Mgmtd__DatastoreId src_ds_id,
2363 struct mgmt_ds_ctx *src_ds_ctx,
2364 Mgmtd__DatastoreId dst_ds_id,
2365 struct mgmt_ds_ctx *dst_ds_ctx,
2366 bool validate_only, bool abort,
2367 bool implicit)
2368 {
2369 struct mgmt_txn_ctx *txn;
2370 struct mgmt_txn_req *txn_req;
2371
2372 txn = mgmt_txn_id2ctx(txn_id);
2373 if (!txn)
2374 return -1;
2375
2376 if (txn->commit_cfg_req) {
2377 MGMTD_TXN_ERR(
2378 "A commit is already in-progress for Txn %p, session 0x%llx. Cannot start another!",
2379 txn, (unsigned long long)txn->session_id);
2380 return -1;
2381 }
2382
2383 txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_COMMITCFG);
2384 txn_req->req.commit_cfg.src_ds_id = src_ds_id;
2385 txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
2386 txn_req->req.commit_cfg.dst_ds_id = dst_ds_id;
2387 txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
2388 txn_req->req.commit_cfg.validate_only = validate_only;
2389 txn_req->req.commit_cfg.abort = abort;
2390 txn_req->req.commit_cfg.implicit = implicit;
2391 txn_req->req.commit_cfg.cmt_stats =
2392 mgmt_fe_get_session_commit_stats(txn->session_id);
2393
2394 /*
2395 * Trigger a COMMIT-CONFIG process.
2396 */
2397 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
2398 return 0;
2399 }
2400
2401 int mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
2402 bool connect)
2403 {
2404 struct mgmt_txn_ctx *txn;
2405 struct mgmt_txn_req *txn_req;
2406 struct mgmt_commit_cfg_req *cmtcfg_req;
2407 static struct mgmt_commit_stats dummy_stats;
2408 struct nb_config_cbs *adapter_cfgs = NULL;
2409
2410 memset(&dummy_stats, 0, sizeof(dummy_stats));
2411 if (connect) {
2412 /* Get config for this single backend client */
2413 mgmt_be_get_adapter_config(adapter, mm->running_ds,
2414 &adapter_cfgs);
2415
2416 if (!adapter_cfgs || RB_EMPTY(nb_config_cbs, adapter_cfgs)) {
2417 SET_FLAG(adapter->flags,
2418 MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
2419 return 0;
2420 }
2421
2422 /*
2423 * Create a CONFIG transaction to push the config changes
2424 * provided to the backend client.
2425 */
2426 txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
2427 if (!txn) {
2428 MGMTD_TXN_ERR(
2429 "Failed to create CONFIG Transaction for downloading CONFIGs for client '%s'",
2430 adapter->name);
2431 return -1;
2432 }
2433
2434 MGMTD_TXN_DBG("Created initial txn %" PRIu64
2435 " for BE connection %s",
2436 txn->txn_id, adapter->name);
2437 /*
2438 * Set the changeset for transaction to commit and trigger the
2439 * commit request.
2440 */
2441 txn_req =
2442 mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
2443 txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_NONE;
2444 txn_req->req.commit_cfg.src_ds_ctx = 0;
2445 txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_NONE;
2446 txn_req->req.commit_cfg.dst_ds_ctx = 0;
2447 txn_req->req.commit_cfg.validate_only = false;
2448 txn_req->req.commit_cfg.abort = false;
2449 txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
2450 txn_req->req.commit_cfg.cfg_chgs = adapter_cfgs;
2451
2452 /*
2453 * Trigger a COMMIT-CONFIG process.
2454 */
2455 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
2456
2457 } else {
2458 /*
2459 * Check if any transaction is currently on-going that
2460 * involves this backend client. If so, report the transaction
2461 * has failed.
2462 */
2463 FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
2464 if (txn->type == MGMTD_TXN_TYPE_CONFIG) {
2465 cmtcfg_req = txn->commit_cfg_req
2466 ? &txn->commit_cfg_req
2467 ->req.commit_cfg
2468 : NULL;
2469 if (cmtcfg_req
2470 && cmtcfg_req->subscr_info
2471 .xpath_subscr[adapter->id]
2472 .subscribed) {
2473 mgmt_txn_send_commit_cfg_reply(
2474 txn, MGMTD_INTERNAL_ERROR,
2475 "Backend daemon disconnected while processing commit!");
2476 }
2477 }
2478 }
2479 }
2480
2481 return 0;
2482 }
2483
2484 int mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create,
2485 bool success,
2486 struct mgmt_be_client_adapter *adapter)
2487 {
2488 struct mgmt_txn_ctx *txn;
2489 struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
2490
2491 txn = mgmt_txn_id2ctx(txn_id);
2492 if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
2493 return -1;
2494
2495 if (!create && !txn->commit_cfg_req)
2496 return 0;
2497
2498 assert(txn->commit_cfg_req);
2499 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
2500 if (create) {
2501 if (success) {
2502 /*
2503 * Done with TXN_CREATE. Move the backend client to
2504 * next phase.
2505 */
2506 assert(cmtcfg_req->curr_phase
2507 == MGMTD_COMMIT_PHASE_TXN_CREATE);
2508
2509 /*
2510 * Send CFGDATA_CREATE-REQs to the backend immediately.
2511 */
2512 mgmt_txn_send_be_cfg_data(txn, adapter);
2513 } else {
2514 mgmt_txn_send_commit_cfg_reply(
2515 txn, MGMTD_INTERNAL_ERROR,
2516 "Internal error! Failed to initiate transaction at backend!");
2517 }
2518 } else {
2519 /*
2520 * Done with TXN_DELETE. Move the backend client to next phase.
2521 */
2522 if (false)
2523 mgmt_move_be_commit_to_next_phase(txn, adapter);
2524 }
2525
2526 return 0;
2527 }
2528
2529 int mgmt_txn_notify_be_cfgdata_reply(
2530 uint64_t txn_id, uint64_t batch_id, bool success, char *error_if_any,
2531 struct mgmt_be_client_adapter *adapter)
2532 {
2533 struct mgmt_txn_ctx *txn;
2534 struct mgmt_txn_be_cfg_batch *cfg_btch;
2535 struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
2536
2537 txn = mgmt_txn_id2ctx(txn_id);
2538 if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
2539 return -1;
2540
2541 if (!txn->commit_cfg_req)
2542 return -1;
2543 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
2544
2545 cfg_btch = mgmt_txn_cfgbatch_id2ctx(txn, batch_id);
2546 if (!cfg_btch || cfg_btch->txn != txn)
2547 return -1;
2548
2549 if (!success) {
2550 MGMTD_TXN_ERR(
2551 "CFGDATA_CREATE_REQ sent to '%s' failed for Txn %p, Batch %p, Err: %s",
2552 adapter->name, txn, cfg_btch,
2553 error_if_any ? error_if_any : "None");
2554 mgmt_txn_send_commit_cfg_reply(
2555 txn, MGMTD_INTERNAL_ERROR,
2556 error_if_any ? error_if_any :
2557 "Internal error! Failed to download config data to backend!");
2558 return 0;
2559 }
2560
2561 MGMTD_TXN_DBG(
2562 "CFGDATA_CREATE_REQ sent to '%s' was successful for Txn %p, Batch %p, Err: %s",
2563 adapter->name, txn, cfg_btch,
2564 error_if_any ? error_if_any : "None");
2565 mgmt_move_txn_cfg_batch_to_next(
2566 cmtcfg_req, cfg_btch, &cmtcfg_req->curr_batches[adapter->id],
2567 &cmtcfg_req->next_batches[adapter->id], true,
2568 MGMTD_COMMIT_PHASE_APPLY_CFG);
2569
2570 mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
2571
2572 return 0;
2573 }
2574
2575 int mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
2576 uint64_t batch_ids[],
2577 size_t num_batch_ids, char *error_if_any,
2578 struct mgmt_be_client_adapter *adapter)
2579 {
2580 struct mgmt_txn_ctx *txn;
2581 struct mgmt_txn_be_cfg_batch *cfg_btch;
2582 struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
2583 size_t indx;
2584
2585 txn = mgmt_txn_id2ctx(txn_id);
2586 if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG
2587 || !txn->commit_cfg_req)
2588 return -1;
2589
2590 cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
2591
2592 if (!success) {
2593 MGMTD_TXN_ERR(
2594 "CFGDATA_APPLY_REQ sent to '%s' failed for Txn %p, Batches [0x%llx - 0x%llx], Err: %s",
2595 adapter->name, txn, (unsigned long long)batch_ids[0],
2596 (unsigned long long)batch_ids[num_batch_ids - 1],
2597 error_if_any ? error_if_any : "None");
2598 mgmt_txn_send_commit_cfg_reply(
2599 txn, MGMTD_INTERNAL_ERROR,
2600 error_if_any ? error_if_any :
2601 "Internal error! Failed to apply config data on backend!");
2602 return 0;
2603 }
2604
2605 for (indx = 0; indx < num_batch_ids; indx++) {
2606 cfg_btch = mgmt_txn_cfgbatch_id2ctx(txn, batch_ids[indx]);
2607 if (cfg_btch->txn != txn)
2608 return -1;
2609 mgmt_move_txn_cfg_batch_to_next(
2610 cmtcfg_req, cfg_btch,
2611 &cmtcfg_req->curr_batches[adapter->id],
2612 &cmtcfg_req->next_batches[adapter->id], true,
2613 MGMTD_COMMIT_PHASE_TXN_DELETE);
2614 }
2615
2616 if (!mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id])) {
2617 /*
2618 * All configuration for the specific backend has been applied.
2619 * Send TXN-DELETE to wrap up the transaction for this backend.
2620 */
2621 SET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
2622 mgmt_txn_send_be_txn_delete(txn, adapter);
2623 }
2624
2625 mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
2626 if (mm->perf_stats_en)
2627 gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_end, NULL);
2628
2629 return 0;
2630 }
2631
2632 int mgmt_txn_send_commit_config_reply(uint64_t txn_id,
2633 enum mgmt_result result,
2634 const char *error_if_any)
2635 {
2636 struct mgmt_txn_ctx *txn;
2637
2638 txn = mgmt_txn_id2ctx(txn_id);
2639 if (!txn)
2640 return -1;
2641
2642 if (!txn->commit_cfg_req) {
2643 MGMTD_TXN_ERR(
2644 "NO commit in-progress for Txn %p, session 0x%llx!",
2645 txn, (unsigned long long)txn->session_id);
2646 return -1;
2647 }
2648
2649 return mgmt_txn_send_commit_cfg_reply(txn, result, error_if_any);
2650 }
2651
2652 int mgmt_txn_send_get_config_req(uint64_t txn_id, uint64_t req_id,
2653 Mgmtd__DatastoreId ds_id,
2654 struct mgmt_ds_ctx *ds_ctx,
2655 Mgmtd__YangGetDataReq **data_req,
2656 size_t num_reqs)
2657 {
2658 struct mgmt_txn_ctx *txn;
2659 struct mgmt_txn_req *txn_req;
2660 size_t indx;
2661
2662 txn = mgmt_txn_id2ctx(txn_id);
2663 if (!txn)
2664 return -1;
2665
2666 txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETCFG);
2667 txn_req->req.get_data->ds_id = ds_id;
2668 txn_req->req.get_data->ds_ctx = ds_ctx;
2669 for (indx = 0;
2670 indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
2671 indx++) {
2672 MGMTD_TXN_DBG("XPath: '%s'", data_req[indx]->data->xpath);
2673 txn_req->req.get_data->xpaths[indx] =
2674 strdup(data_req[indx]->data->xpath);
2675 txn_req->req.get_data->num_xpaths++;
2676 }
2677
2678 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
2679
2680 return 0;
2681 }
2682
2683 int mgmt_txn_send_get_data_req(uint64_t txn_id, uint64_t req_id,
2684 Mgmtd__DatastoreId ds_id,
2685 struct mgmt_ds_ctx *ds_ctx,
2686 Mgmtd__YangGetDataReq **data_req,
2687 size_t num_reqs)
2688 {
2689 struct mgmt_txn_ctx *txn;
2690 struct mgmt_txn_req *txn_req;
2691 size_t indx;
2692
2693 txn = mgmt_txn_id2ctx(txn_id);
2694 if (!txn)
2695 return -1;
2696
2697 txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETDATA);
2698 txn_req->req.get_data->ds_id = ds_id;
2699 txn_req->req.get_data->ds_ctx = ds_ctx;
2700 for (indx = 0;
2701 indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
2702 indx++) {
2703 MGMTD_TXN_DBG("XPath: '%s'", data_req[indx]->data->xpath);
2704 txn_req->req.get_data->xpaths[indx] =
2705 strdup(data_req[indx]->data->xpath);
2706 txn_req->req.get_data->num_xpaths++;
2707 }
2708
2709 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETDATA);
2710
2711 return 0;
2712 }
2713
2714 void mgmt_txn_status_write(struct vty *vty)
2715 {
2716 struct mgmt_txn_ctx *txn;
2717
2718 vty_out(vty, "MGMTD Transactions\n");
2719
2720 FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
2721 vty_out(vty, " Txn: \t\t\t%p\n", txn);
2722 vty_out(vty, " Txn-Id: \t\t\t%llu\n",
2723 (unsigned long long)txn->txn_id);
2724 vty_out(vty, " Session-Id: \t\t%llu\n",
2725 (unsigned long long)txn->session_id);
2726 vty_out(vty, " Type: \t\t\t%s\n",
2727 mgmt_txn_type2str(txn->type));
2728 vty_out(vty, " Ref-Count: \t\t\t%d\n", txn->refcount);
2729 }
2730 vty_out(vty, " Total: %d\n",
2731 (int)mgmt_txns_count(&mgmt_txn_mm->txn_list));
2732 }
2733
2734 int mgmt_txn_rollback_trigger_cfg_apply(struct mgmt_ds_ctx *src_ds_ctx,
2735 struct mgmt_ds_ctx *dst_ds_ctx)
2736 {
2737 static struct nb_config_cbs changes;
2738 struct nb_config_cbs *cfg_chgs = NULL;
2739 struct mgmt_txn_ctx *txn;
2740 struct mgmt_txn_req *txn_req;
2741 static struct mgmt_commit_stats dummy_stats;
2742
2743 memset(&changes, 0, sizeof(changes));
2744 memset(&dummy_stats, 0, sizeof(dummy_stats));
2745 /*
2746 * This could be the case when the config is directly
2747 * loaded onto the candidate DS from a file. Get the
2748 * diff from a full comparison of the candidate and
2749 * running DSs.
2750 */
2751 nb_config_diff(mgmt_ds_get_nb_config(dst_ds_ctx),
2752 mgmt_ds_get_nb_config(src_ds_ctx), &changes);
2753 cfg_chgs = &changes;
2754
2755 if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
2756 /*
2757 * This means there's no changes to commit whatsoever
2758 * is the source of the changes in config.
2759 */
2760 return -1;
2761 }
2762
2763 /*
2764 * Create a CONFIG transaction to push the config changes
2765 * provided to the backend client.
2766 */
2767 txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
2768 if (!txn) {
2769 MGMTD_TXN_ERR(
2770 "Failed to create CONFIG Transaction for downloading CONFIGs");
2771 return -1;
2772 }
2773
2774 MGMTD_TXN_DBG("Created rollback txn %" PRIu64, txn->txn_id);
2775
2776 /*
2777 * Set the changeset for transaction to commit and trigger the commit
2778 * request.
2779 */
2780 txn_req = mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
2781 txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_CANDIDATE;
2782 txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
2783 txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_RUNNING;
2784 txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
2785 txn_req->req.commit_cfg.validate_only = false;
2786 txn_req->req.commit_cfg.abort = false;
2787 txn_req->req.commit_cfg.rollback = true;
2788 txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
2789 txn_req->req.commit_cfg.cfg_chgs = cfg_chgs;
2790
2791 /*
2792 * Trigger a COMMIT-CONFIG process.
2793 */
2794 mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
2795 return 0;
2796 }