]> git.proxmox.com Git - mirror_frr.git/commitdiff
Merge pull request #13074 from donaldsharp/hash_clean_and_free
authorDonatas Abraitis <donatas@opensourcerouting.org>
Thu, 23 Mar 2023 12:08:29 +0000 (14:08 +0200)
committerGitHub <noreply@github.com>
Thu, 23 Mar 2023 12:08:29 +0000 (14:08 +0200)
*: Add a hash_clean_and_free() function

107 files changed:
.clang-format
Makefile.am
bgpd/bgp_route.c
bgpd/bgp_routemap.c
bgpd/bgp_routemap_nb_config.c
configure.ac
debian/frr.install
doc/developer/building-frr-for-centos6.rst
doc/developer/building-frr-for-centos7.rst
doc/developer/building-frr-for-centos8.rst
doc/developer/building-frr-for-debian8.rst
doc/developer/building-frr-for-debian9.rst
doc/developer/building-frr-for-fedora.rst
doc/developer/building-frr-for-freebsd10.rst
doc/developer/building-frr-for-freebsd11.rst
doc/developer/building-frr-for-freebsd9.rst
doc/developer/building-frr-for-netbsd6.rst
doc/developer/building-frr-for-netbsd7.rst
doc/developer/building-frr-for-openbsd6.rst
doc/developer/building-frr-for-opensuse.rst
doc/developer/building-frr-for-ubuntu1404.rst
doc/developer/building-frr-for-ubuntu1604.rst
doc/user/index.rst
doc/user/mgmtd.rst [new file with mode: 0644]
doc/user/routemap.rst
doc/user/subdir.am
docker/alpine/Dockerfile
docker/centos-7/Dockerfile
docker/centos-8/Dockerfile
docker/debian/Dockerfile
docker/ubuntu18-ci/Dockerfile
docker/ubuntu20-ci/Dockerfile
lib/command.h
lib/lib_vty.c
lib/mgmt.proto [new file with mode: 0644]
lib/mgmt_be_client.c [new file with mode: 0644]
lib/mgmt_be_client.h [new file with mode: 0644]
lib/mgmt_fe_client.c [new file with mode: 0644]
lib/mgmt_fe_client.h [new file with mode: 0644]
lib/mgmt_msg.c [new file with mode: 0644]
lib/mgmt_msg.h [new file with mode: 0644]
lib/mgmt_pb.h [new file with mode: 0644]
lib/northbound.c
lib/northbound.h
lib/northbound_cli.c
lib/northbound_confd.c
lib/northbound_grpc.cpp
lib/northbound_sysrepo.c
lib/subdir.am
lib/vty.c
lib/vty.h
lib/yang.c
mgmtd/.gitignore [new file with mode: 0644]
mgmtd/Makefile [new file with mode: 0644]
mgmtd/mgmt.c [new file with mode: 0644]
mgmtd/mgmt.h [new file with mode: 0644]
mgmtd/mgmt_be_adapter.c [new file with mode: 0644]
mgmtd/mgmt_be_adapter.h [new file with mode: 0644]
mgmtd/mgmt_be_server.c [new file with mode: 0644]
mgmtd/mgmt_be_server.h [new file with mode: 0644]
mgmtd/mgmt_defines.h [new file with mode: 0644]
mgmtd/mgmt_ds.c [new file with mode: 0644]
mgmtd/mgmt_ds.h [new file with mode: 0644]
mgmtd/mgmt_fe_adapter.c [new file with mode: 0644]
mgmtd/mgmt_fe_adapter.h [new file with mode: 0644]
mgmtd/mgmt_fe_server.c [new file with mode: 0644]
mgmtd/mgmt_fe_server.h [new file with mode: 0644]
mgmtd/mgmt_history.c [new file with mode: 0644]
mgmtd/mgmt_history.h [new file with mode: 0644]
mgmtd/mgmt_main.c [new file with mode: 0644]
mgmtd/mgmt_memory.c [new file with mode: 0644]
mgmtd/mgmt_memory.h [new file with mode: 0644]
mgmtd/mgmt_txn.c [new file with mode: 0644]
mgmtd/mgmt_txn.h [new file with mode: 0644]
mgmtd/mgmt_vty.c [new file with mode: 0644]
mgmtd/mgmt_vty.c.safe [new file with mode: 0644]
mgmtd/subdir.am [new file with mode: 0644]
pkgsrc/mgmtd.sh.in [new file with mode: 0644]
python/xref2vtysh.py
qpb/subdir.am
redhat/frr.spec.in
staticd/static_main.c
staticd/static_routes.c
staticd/static_routes.h
staticd/static_vty.c
tests/topotests/all_protocol_startup/test_all_protocol_startup.py
tests/topotests/bfd_topo2/test_bfd_topo2.py
tests/topotests/config_timing/test_config_timing.py
tests/topotests/lib/common_config.py
tests/topotests/lib/topogen.py
tests/topotests/lib/topotest.py
tests/topotests/mgmt_tests/test_yang_mgmt.py [new file with mode: 0644]
tests/topotests/mgmt_tests/yang_mgmt.json [new file with mode: 0644]
tests/topotests/msdp_mesh_topo1/test_msdp_mesh_topo1.py
tests/topotests/msdp_topo1/test_msdp_topo1.py
tests/topotests/ospf6_topo2/test_ospf6_topo2.py
tests/topotests/static_simple/r1/mgmtd.conf [new file with mode: 0644]
tests/topotests/static_simple/r1/staticd.conf [new file with mode: 0644]
tests/topotests/static_simple/r1/zebra.conf [new file with mode: 0644]
tests/topotests/static_simple/test_static_simple.py [new file with mode: 0644]
tools/etc/frr/daemons
tools/frr.in
tools/frrcommon.sh.in
vtysh/vtysh.c
vtysh/vtysh.h
yang/frr-bgp-route-map.yang
zebra/zebra_vxlan.c

index b01157b05183992cf06a51aa05450947758c3087..1b18323348976628c99ae7494f4fa3b8e3b756f0 100644 (file)
@@ -52,6 +52,11 @@ ForEachMacros:
   - FOR_ALL_INTERFACES
   - FOR_ALL_INTERFACES_ADDRESSES
   - JSON_FOREACH
+  - FOREACH_BE_TXN_BATCH_IN_LIST
+  - FOREACH_BE_APPLY_BATCH_IN_LIST
+  - FOREACH_BE_TXN_IN_LIST
+  - FOREACH_SESSION_IN_LIST
+  - FOREACH_MGMTD_BE_CLIENT_ID
   # libyang
   - LY_FOR_KEYS
   - LY_LIST_FOR
@@ -76,3 +81,12 @@ ForEachMacros:
   - FOREACH_SAFI
   # ospfd
   - LSDB_LOOP
+  # mgmtd
+  - FOREACH_CMT_REC
+  - FOREACH_TXN_CFG_BATCH_IN_LIST
+  - FOREACH_TXN_REQ_IN_LIST
+  - FOREACH_TXN_IN_LIST
+  - FOREACH_MGMTD_DB_ID
+  - FOREACH_ADAPTER_IN_LIST
+  - FOREACH_SESSION_IN_LIST
+  - FOREACH_SESSION_IN_LIST_SAFE
index 44d2ab8e72b80d5533430df61f62ebb726519528..e70e65d333ef750e3547c8016a4610c03faddaf2 100644 (file)
@@ -155,6 +155,24 @@ $(AUTOMAKE_DUMMY)install-moduleLTLIBRARIES: install-libLTLIBRARIES
 $(AUTOMAKE_DUMMY)install-binPROGRAMS: install-libLTLIBRARIES
 $(AUTOMAKE_DUMMY)install-sbinPROGRAMS: install-libLTLIBRARIES
 
+# Include default rules to compile protobuf message sources
+SUFFIXES += .proto .pb-c.c .pb-c.h
+
+# Rules
+
+AM_V_PROTOC_C = $(am__v_PROTOC_C_$(V))
+am__v_PROTOC_C_ = $(am__v_PROTOC_C_$(AM_DEFAULT_VERBOSITY))
+am__v_PROTOC_C_0 = @echo "  PROTOC_C" $@;
+am__v_PROTOC_C_1 =
+
+.proto.pb-c.c:
+       $(AM_V_PROTOC_C)$(PROTOC_C) -I$(top_srcdir) --c_out=$(top_builddir) $^
+       $(AM_V_GEN)$(SED) -i -e '1i\
+       #include "config.h"' $@
+
+.pb-c.c.pb-c.h:
+       @echo "  GEN     " $@
+
 include doc/subdir.am
 include doc/user/subdir.am
 include doc/manpages/subdir.am
@@ -169,6 +187,8 @@ include fpm/subdir.am
 include grpc/subdir.am
 include tools/subdir.am
 
+include mgmtd/subdir.am
+
 include bgpd/subdir.am
 include bgpd/rfp-example/librfp/subdir.am
 include bgpd/rfp-example/rfptest/subdir.am
@@ -207,6 +227,7 @@ rc_SCRIPTS = \
        pkgsrc/ripd.sh \
        pkgsrc/ripngd.sh \
        pkgsrc/zebra.sh \
+       pkgsrc/mgmtd.sh \
        # end
 endif
 
@@ -244,6 +265,7 @@ EXTRA_DIST += \
        snapcraft/helpers \
        snapcraft/snap \
        babeld/Makefile \
+       mgmtd/Makefile \
        bgpd/Makefile \
        bgpd/rfp-example/librfp/Makefile \
        bgpd/rfp-example/rfptest/Makefile \
@@ -321,7 +343,7 @@ redistclean:
        $(MAKE) distclean CONFIG_CLEAN_FILES="$(filter-out $(EXTRA_DIST), $(CONFIG_CLEAN_FILES))"
 
 indent:
-       tools/indent.py `find sharpd bgpd eigrpd include isisd lib nhrpd ospf6d ospfd pimd qpb ripd vtysh zebra -name '*.[ch]' | grep -v include/linux`
+       tools/indent.py `find sharpd bgpd mgmtd eigrpd include isisd lib nhrpd ospf6d ospfd pimd qpb ripd vtysh zebra -name '*.[ch]' | grep -v include/linux`
 
 if HAVE_GCOV
 
index 069aa5b9234760232b8a84b80f8c50fed4b7d73f..14ce4e6bc9ba74e6f7a9e9d853c945a2c4c733dc 100644 (file)
@@ -316,10 +316,15 @@ static int bgp_dest_set_defer_flag(struct bgp_dest *dest, bool delete)
                return 0;
 
        if (CHECK_FLAG(dest->flags, BGP_NODE_PROCESS_SCHEDULED)) {
-               if (BGP_DEBUG(update, UPDATE_OUT))
+               if (BGP_DEBUG(update, UPDATE_OUT)) {
+                       table = bgp_dest_table(dest);
+                       if (table)
+                               bgp = table->bgp;
+
                        zlog_debug(
-                               "Route %pBD is in workqueue and being processed, not deferred.",
-                               dest);
+                               "Route %pBD(%s) is in workqueue and being processed, not deferred.",
+                               dest, bgp ? bgp->name_pretty : "(Unknown)");
+               }
 
                return 0;
        }
@@ -367,8 +372,8 @@ static int bgp_dest_set_defer_flag(struct bgp_dest *dest, bool delete)
                                bgp->gr_info[afi][safi].gr_deferred++;
                        SET_FLAG(dest->flags, BGP_NODE_SELECT_DEFER);
                        if (BGP_DEBUG(update, UPDATE_OUT))
-                               zlog_debug("DEFER route %pBD, dest %p", dest,
-                                          dest);
+                               zlog_debug("DEFER route %pBD(%s), dest %p",
+                                          dest, bgp->name_pretty, dest);
                        return 0;
                }
        }
@@ -2755,8 +2760,10 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_dest *dest,
                                bgp_path_info_reap(dest, pi);
 
                        if (debug)
-                               zlog_debug("%s: pi %p in holddown", __func__,
-                                          pi);
+                               zlog_debug(
+                                       "%s: %pBD(%s) pi from %s in holddown",
+                                       __func__, dest, bgp->name_pretty,
+                                       pi->peer->host);
 
                        continue;
                }
@@ -2767,8 +2774,10 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_dest *dest,
 
                                if (debug)
                                        zlog_debug(
-                                               "%s: pi %p non self peer %s not estab state",
-                                               __func__, pi, pi->peer->host);
+                                               "%s: %pBD(%s) non self peer %s not estab state",
+                                               __func__, dest,
+                                               bgp->name_pretty,
+                                               pi->peer->host);
 
                                continue;
                        }
@@ -2777,7 +2786,9 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_dest *dest,
                    && (!CHECK_FLAG(pi->flags, BGP_PATH_DMED_SELECTED))) {
                        bgp_path_info_unset_flag(dest, pi, BGP_PATH_DMED_CHECK);
                        if (debug)
-                               zlog_debug("%s: pi %p dmed", __func__, pi);
+                               zlog_debug("%s: %pBD(%s) pi %s dmed", __func__,
+                                          dest, bgp->name_pretty,
+                                          pi->peer->host);
                        continue;
                }
 
@@ -2840,8 +2851,9 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_dest *dest,
                        if (!bgp_path_info_nexthop_cmp(pi, new_select)) {
                                if (debug)
                                        zlog_debug(
-                                               "%pBD: %s has the same nexthop as the bestpath, skip it",
-                                               dest, path_buf);
+                                               "%pBD(%s): %s has the same nexthop as the bestpath, skip it",
+                                               dest, bgp->name_pretty,
+                                               path_buf);
                                continue;
                        }
 
@@ -2852,8 +2864,9 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_dest *dest,
                        if (paths_eq) {
                                if (debug)
                                        zlog_debug(
-                                               "%pBD: %s is equivalent to the bestpath, add to the multipath list",
-                                               dest, path_buf);
+                                               "%pBD(%s): %s is equivalent to the bestpath, add to the multipath list",
+                                               dest, bgp->name_pretty,
+                                               path_buf);
                                bgp_mp_list_add(&mp_list, pi);
                        }
                }
@@ -3103,8 +3116,8 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest,
                        debug = bgp_debug_bestpath(dest);
                if (debug)
                        zlog_debug(
-                               "%s: bgp delete in progress, ignoring event, p=%pBD",
-                               __func__, dest);
+                               "%s: bgp delete in progress, ignoring event, p=%pBD(%s)",
+                               __func__, dest, bgp->name_pretty);
                return;
        }
        /* Is it end of initial update? (after startup) */
@@ -3127,7 +3140,7 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest,
 
        debug = bgp_debug_bestpath(dest);
        if (debug)
-               zlog_debug("%s: p=%pBDi(%s) afi=%s, safi=%s start", __func__,
+               zlog_debug("%s: p=%pBD(%s) afi=%s, safi=%s start", __func__,
                           dest, bgp->name_pretty, afi2str(afi),
                           safi2str(safi));
 
@@ -3136,7 +3149,8 @@ static void bgp_process_main_one(struct bgp *bgp, struct bgp_dest *dest,
         */
        if (CHECK_FLAG(dest->flags, BGP_NODE_SELECT_DEFER)) {
                if (BGP_DEBUG(update, UPDATE_OUT))
-                       zlog_debug("SELECT_DEFER flag set for route %p", dest);
+                       zlog_debug("SELECT_DEFER flag set for route %p(%s)",
+                                  dest, bgp->name_pretty);
                return;
        }
 
@@ -7456,6 +7470,10 @@ static void bgp_aggregate_install(
                        aggregate, atomic_aggregate, p);
 
                if (!attr) {
+                       aspath_free(aspath);
+                       community_free(&community);
+                       ecommunity_free(&ecommunity);
+                       lcommunity_free(&lcommunity);
                        bgp_dest_unlock_node(dest);
                        bgp_aggregate_delete(bgp, p, afi, safi, aggregate);
                        if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
index b7bef75123b29760eca3fd36988dc542271d8db9..fa9d23874c8ecf1a9160234b23d7284434011225 100644 (file)
@@ -5512,7 +5512,7 @@ DEFUN_YANG (set_ip_nexthop_unchanged,
 
 DEFUN_YANG (set_distance,
            set_distance_cmd,
-           "set distance (0-255)",
+           "set distance (1-255)",
            SET_STR
            "BGP Administrative Distance to use\n"
            "Distance value\n")
@@ -5531,7 +5531,7 @@ DEFUN_YANG (set_distance,
 
 DEFUN_YANG (no_set_distance,
            no_set_distance_cmd,
-           "no set distance [(0-255)]",
+           "no set distance [(1-255)]",
            NO_STR SET_STR
            "BGP Administrative Distance to use\n"
            "Distance value\n")
index 142bded6c536895664502969966d306f7f11a5a6..4db8dba2ccf346ec665cf400c824d0d435fa5104 100644 (file)
@@ -2628,8 +2628,18 @@ int
 lib_route_map_entry_set_action_rmap_set_action_aggregator_aggregator_asn_modify(
        struct nb_cb_modify_args *args)
 {
+       const char *asn;
+       enum match_type match;
+
        switch (args->event) {
        case NB_EV_VALIDATE:
+               asn = yang_dnode_get_string(args->dnode, NULL);
+               if (!asn)
+                       return NB_ERR_VALIDATION;
+               match = asn_str2asn_match(asn);
+               if (match == exact_match)
+                       return NB_OK;
+               return NB_ERR_VALIDATION;
        case NB_EV_PREPARE:
        case NB_EV_ABORT:
        case NB_EV_APPLY:
index 495c89c0d6104ae21ca8b9f32379468d1a283eae..4b07536337395f479c32497cd27affb0d1157515 100644 (file)
@@ -616,6 +616,10 @@ AC_ARG_ENABLE([zebra],
   AS_HELP_STRING([--disable-zebra], [do not build zebra daemon]))
 AC_ARG_ENABLE([bgpd],
   AS_HELP_STRING([--disable-bgpd], [do not build bgpd]))
+AC_ARG_ENABLE([mgmtd],
+  AS_HELP_STRING([--disable-mgmtd], [do not build mgmtd]))
+AC_ARG_ENABLE([mgmtd_local_validations],
+  AS_HELP_STRING([--enable-mgmtd-local-validations], [dev: unimplemented local validation]))
 AC_ARG_ENABLE([ripd],
   AS_HELP_STRING([--disable-ripd], [do not build ripd]))
 AC_ARG_ENABLE([ripngd],
@@ -890,10 +894,6 @@ if test "$enable_oldvpn_commands" = "yes"; then
    AC_DEFINE([KEEP_OLD_VPN_COMMANDS], [1], [Define for compiling with old vpn commands])
 fi
 
-#
-# End of logic for protobuf support.
-#
-
 AC_MSG_CHECKING([if zebra should be configurable to send Route Advertisements])
 if test "$enable_rtadv" != "no"; then
   AC_MSG_RESULT([yes])
@@ -1338,21 +1338,21 @@ dnl ##########################################################################
 # Logic for protobuf support.
 #
 PROTO3=false
-if test "$enable_protobuf" = "yes"; then
-  # Check for protoc & protoc-c
-
-  # protoc is not required, it's only for a "be nice" helper target
-  AC_CHECK_PROGS([PROTOC], [protoc], [/bin/false])
+# Enable Protobuf by default at all times.
+# Check for protoc & protoc-c
+# protoc is not required, it's only for a "be nice" helper target
+AC_CHECK_PROGS([PROTOC], [protoc], [/bin/false])
 
-  AC_CHECK_PROGS([PROTOC_C], [protoc-c], [/bin/false])
-  if test "$PROTOC_C" = "/bin/false"; then
-    AC_MSG_FAILURE([protobuf requested but protoc-c not found.  Install protobuf-c.])
-  fi
+AC_CHECK_PROGS([PROTOC_C], [protoc-c], [/bin/false])
+if test "$PROTOC_C" = "/bin/false"; then
+  AC_MSG_FAILURE([protobuf requested but protoc-c not found.  Install protobuf-c.])
+fi
 
-  PKG_CHECK_MODULES([PROTOBUF_C], [libprotobuf-c >= 0.14],, [
-    AC_MSG_FAILURE([protobuf requested but libprotobuf-c not found.  Install protobuf-c.])
-  ])
+PKG_CHECK_MODULES([PROTOBUF_C], [libprotobuf-c >= 1.1.0],, [
+  AC_MSG_FAILURE([minimum version (1.1.0) of libprotobuf-c not found.  Install minimum required version of protobuf-c.])
+])
 
+if test "$enable_protobuf3" = "yes"; then
   PROTO3=true
   AC_CHECK_HEADER([google/protobuf-c/protobuf-c.h],
                   [AC_CHECK_DECLS(PROTOBUF_C_LABEL_NONE,
@@ -1360,11 +1360,14 @@ if test "$enable_protobuf" = "yes"; then
                                             [1], [Have Protobuf version 3]),
                                   [PROTO3=false],
                                   [#include <google/protobuf-c/protobuf-c.h>])],
-                  [PROTO3=false && AC_MSG_FAILURE([protobuf requested but protobuf-c.h not found.  Install protobuf-c.])])
-
-  AC_DEFINE([HAVE_PROTOBUF], [1], [protobuf])
+                  [PROTO3=false && AC_MSG_FAILURE([protobuf3 requested but protobuf-c.h not found.  Install protobuf-c.])])
 fi
 
+AC_DEFINE([HAVE_PROTOBUF], [1], [protobuf])
+#
+# End of logic for protobuf support.
+#
+
 
 dnl ---------------------
 dnl Integrated VTY option
@@ -1728,6 +1731,16 @@ AS_IF([test "$enable_bgpd" != "no"], [
   AC_DEFINE([HAVE_BGPD], [1], [bgpd])
 ])
 
+AS_IF([test "$enable_mgmtd" != "no"], [
+
+  AC_DEFINE([HAVE_MGMTD], [1], [mgmtd])
+
+  # Enable MGMTD local validations
+  AS_IF([test "$enable_mgmtd_local_validations" == "yes"], [
+    AC_DEFINE([MGMTD_LOCAL_VALIDATIONS_ENABLED], [1], [Enable mgmtd local validations.])
+  ])
+])
+
 AS_IF([test "$enable_ripd" != "no"], [
   AC_DEFINE([HAVE_RIPD], [1], [ripd])
 ])
@@ -2658,6 +2671,8 @@ AC_DEFINE_UNQUOTED([LDPD_SOCKET], ["$frr_statedir%s%s/ldpd.sock"], [ldpd control
 AC_DEFINE_UNQUOTED([ZEBRA_SERV_PATH], ["$frr_statedir%s%s/zserv.api"], [zebra api socket])
 AC_DEFINE_UNQUOTED([BFDD_CONTROL_SOCKET], ["$frr_statedir%s%s/bfdd.sock"], [bfdd control socket])
 AC_DEFINE_UNQUOTED([OSPFD_GR_STATE], ["$frr_statedir%s/ospfd-gr.json"], [ospfd GR state information])
+AC_DEFINE_UNQUOTED([MGMTD_FE_SERVER_PATH], ["$frr_statedir/mgmtd_fe.sock"], [mgmtd frontend server socket])
+AC_DEFINE_UNQUOTED([MGMTD_BE_SERVER_PATH], ["$frr_statedir/mgmtd_be.sock"], [mgmtd backend server socket])
 AC_DEFINE_UNQUOTED([OSPF6D_GR_STATE], ["$frr_statedir/ospf6d-gr.json"], [ospf6d GR state information])
 AC_DEFINE_UNQUOTED([ISISD_RESTART], ["$frr_statedir%s/isid-restart.json"], [isisd restart information])
 AC_DEFINE_UNQUOTED([OSPF6_AUTH_SEQ_NUM_FILE], ["$frr_statedir/ospf6d-at-seq-no.dat"], [ospf6d AT Sequence number information])
@@ -2716,7 +2731,7 @@ AM_CONDITIONAL([RPKI], [test "$RPKI" = "true"])
 AM_CONDITIONAL([SNMP], [test "$SNMP_METHOD" = "agentx"])
 AM_CONDITIONAL([IRDP], [$IRDP])
 AM_CONDITIONAL([FPM], [test "$enable_fpm" = "yes"])
-AM_CONDITIONAL([HAVE_PROTOBUF], [test "$enable_protobuf" = "yes"])
+AM_CONDITIONAL([HAVE_PROTOBUF], [test "$enable_protobuf" != "no"])
 AM_CONDITIONAL([HAVE_PROTOBUF3], [$PROTO3])
 
 dnl PCEP plugin
@@ -2733,6 +2748,7 @@ dnl daemons
 AM_CONDITIONAL([VTYSH], [test "$VTYSH" = "vtysh"])
 AM_CONDITIONAL([ZEBRA], [test "$enable_zebra" != "no"])
 AM_CONDITIONAL([BGPD], [test "$enable_bgpd" != "no"])
+AM_CONDITIONAL([MGMTD], [test "$enable_mgmtd" != "no"])
 AM_CONDITIONAL([RIPD], [test "$enable_ripd" != "no"])
 AM_CONDITIONAL([OSPFD], [test "$enable_ospfd" != "no"])
 AM_CONDITIONAL([LDPD], [test "$enable_ldpd" != "no"])
@@ -2770,7 +2786,7 @@ AC_CONFIG_FILES([
          alpine/APKBUILD
          snapcraft/snapcraft.yaml
          lib/version.h
-         tests/lib/cli/test_cli.refout
+         tests/lib/cli/test_cli.refout pkgsrc/mgmtd.sh
          pkgsrc/bgpd.sh pkgsrc/ospf6d.sh pkgsrc/ospfd.sh
          pkgsrc/ripd.sh pkgsrc/ripngd.sh pkgsrc/zebra.sh
          pkgsrc/eigrpd.sh])
index 044b48498458079cb46b7422844f7ea4229b30e6..02912d448dee5b49f3296c4cdffb730809ec6e5d 100644 (file)
@@ -8,6 +8,7 @@ usr/bin/vtysh
 usr/lib/*/frr/libfrr.*
 usr/lib/*/frr/libfrrcares.*
 usr/lib/*/frr/libfrrospfapiclient.*
+usr/lib/*/frr/libmgmt_be_nb.*
 usr/lib/*/frr/modules/bgpd_bmp.so
 usr/lib/*/frr/modules/dplane_fpm_nl.so
 usr/lib/*/frr/modules/zebra_cumulus_mlag.so
index 7a7af42119cdef460465ee5d4f63e6f9be7e83c5..233d089f79571cc781232b860df196874122c95d 100644 (file)
@@ -46,7 +46,7 @@ Add packages:
    sudo yum install git autoconf automake libtool make \
       readline-devel texinfo net-snmp-devel groff pkgconfig \
       json-c-devel pam-devel flex epel-release c-ares-devel libcap-devel \
-      elfutils-libelf-devel
+      elfutils-libelf-devel protobuf-c-devel
 
 Install newer version of bison (CentOS 6 package source is too old) from CentOS
 7:
index c40b5de594dfbaeef83afef5570ece0aef715032..e6da83019405c793d09c0273436f50428fdacbbd 100644 (file)
@@ -22,7 +22,7 @@ Add packages:
       readline-devel texinfo net-snmp-devel groff pkgconfig \
       json-c-devel pam-devel bison flex pytest c-ares-devel \
       python-devel python-sphinx libcap-devel \
-      elfutils-libelf-devel libunwind-devel
+      elfutils-libelf-devel libunwind-devel protobuf-c-devel
 
 .. include:: building-libunwind-note.rst
 
index 659752f6df98d379d9bccfcd0a36f21ddab1c18d..6d18e7be93a35b2ef9d7357245ec74afc790de42 100644 (file)
@@ -15,7 +15,8 @@ Add packages:
       automake libtool make readline-devel texinfo net-snmp-devel pkgconfig \
       groff pkgconfig json-c-devel pam-devel bison flex python2-pytest \
       c-ares-devel python2-devel libcap-devel \
-      elfutils-libelf-devel libunwind-devel
+      elfutils-libelf-devel libunwind-devel \
+      protobuf-c-devel
 
 .. include:: building-libunwind-note.rst
 
index 5e58854ed7ab1b6e4bffa26b080f5245461e434f..7071cb660de8d156df7b6256025a33c3eec22f28 100644 (file)
@@ -18,7 +18,7 @@ Add packages:
    sudo apt-get install git autoconf automake libtool make \
       libreadline-dev texinfo libjson-c-dev pkg-config bison flex python3-pip \
       libc-ares-dev python3-dev python3-sphinx build-essential \
-      libsnmp-dev libcap-dev libelf-dev
+      libsnmp-dev libcap-dev libelf-dev libprotobuf-c-dev protobuf-c-compiler
 
 Install newer pytest (>3.0) from pip
 
index b2fdef9990415d8312b8661fa14b65ec42129faa..1b2f1b933a4255fe63b07350cbef873474a97365 100644 (file)
@@ -11,7 +11,8 @@ Add packages:
    sudo apt-get install git autoconf automake libtool make \
      libreadline-dev texinfo libjson-c-dev pkg-config bison flex \
      libc-ares-dev python3-dev python3-pytest python3-sphinx build-essential \
-     libsnmp-dev libcap-dev libelf-dev libunwind-dev
+     libsnmp-dev libcap-dev libelf-dev libunwind-dev \
+     libprotobuf-c-dev protobuf-c-compiler
 
 .. include:: building-libunwind-note.rst
 
index aa10f1118dabc18860a82c299fe4b745c9abd3e2..35a24b2f4343b238cd74adf582a14ae7ca59d7e7 100644 (file)
@@ -15,7 +15,7 @@ Installing Dependencies
      readline-devel texinfo net-snmp-devel groff pkgconfig json-c-devel \
      pam-devel python3-pytest bison flex c-ares-devel python3-devel \
      python3-sphinx perl-core patch libcap-devel \
-     elfutils-libelf-devel libunwind-devel
+     elfutils-libelf-devel libunwind-devel protobuf-c-devel
 
 .. include:: building-libunwind-note.rst
 
index 5e70b81d43d7fc79f9f9386aacd7d5a3793e0a25..707f1e703364888b2cac4cad76f88ae98f3bcd6d 100644 (file)
@@ -17,7 +17,8 @@ is first package install and asked)
 ::
 
     pkg install git autoconf automake libtool gmake json-c pkgconf \
-        bison flex py36-pytest c-ares python3.6 py36-sphinx libunwind
+        bison flex py36-pytest c-ares python3.6 py36-sphinx libunwind \
+        protobuf-c
 
 .. include:: building-libunwind-note.rst
 
index 808207b8314869ea41fc889fcf0976113bdc34af..af0b72b16def36f7471e21ba4970510548635cf2 100644 (file)
@@ -17,7 +17,8 @@ is first package install and asked)
 .. code-block:: shell
 
    pkg install git autoconf automake libtool gmake json-c pkgconf \
-      bison flex py36-pytest c-ares python3.6 py36-sphinx texinfo libunwind
+      bison flex py36-pytest c-ares python3.6 py36-sphinx texinfo libunwind \
+      protobuf-c
 
 .. include:: building-libunwind-note.rst
 
index 1e97749795257dea96d94f133ca6b907107b76e4..30332875a01672ca244e9d17880e13f5a59fd55a 100644 (file)
@@ -18,7 +18,7 @@ is first package install and asked)
 
     pkg install -y git autoconf automake libtool gmake \
         pkgconf texinfo json-c bison flex py36-pytest c-ares \
-        python3 py36-sphinx libexecinfo
+        python3 py36-sphinx libexecinfo protobuf-c
 
 Make sure there is no /usr/bin/flex preinstalled (and use the newly
 installed in /usr/local/bin): (FreeBSD frequently provides a older flex
index a78f8b3c2f206f450a4c9753484912b56e036fd3..8958862fea7e36deebdc52b9101f01993ef50ec7 100644 (file)
@@ -23,7 +23,8 @@ Add packages:
 ::
 
     sudo pkg_add git autoconf automake libtool gmake openssl \
-       pkg-config json-c py36-test python36 py36-sphinx
+       pkg-config json-c py36-test python36 py36-sphinx \
+       protobuf-c
 
 Install SSL Root Certificates (for git https access):
 
index a52ece19a18b78d6b2f74ab41fc9a62bc91dce0e..e751ba338c11f27633513208803437eb005eb03b 100644 (file)
@@ -14,7 +14,8 @@ Install required packages
 ::
 
     sudo pkgin install git autoconf automake libtool gmake openssl \
-       pkg-config json-c python36 py36-test py36-sphinx
+       pkg-config json-c python36 py36-test py36-sphinx \
+       protobuf-c
 
 Install SSL Root Certificates (for git https access):
 
index 88446685e06784479b3adc4f1472b47d789fe261..00bc2e5f091d42ad4f49928e2421aa7dfa99b88d 100644 (file)
@@ -16,7 +16,7 @@ Add packages:
 
     pkg_add clang libcares python3
     pkg_add git autoconf-2.69p2 automake-1.15.1 libtool bison
-    pkg_add gmake json-c py-test py-sphinx libexecinfo
+    pkg_add gmake json-c py-test py-sphinx libexecinfo protobuf-c
 
 Select Python2.7 as default (required for pytest)
 
index 38346fe881158d41104a515b4dc32efc4223f18a..3ff445bcd0621e5430b950e443c8c5712b1bc2f3 100644 (file)
@@ -14,7 +14,7 @@ Installing Dependencies
      readline-devel texinfo net-snmp-devel groff pkgconfig libjson-c-devel\
      pam-devel python3-pytest bison flex c-ares-devel python3-devel\
      python3-Sphinx perl patch libcap-devel libyang-devel \
-     libelf-devel libunwind-devel
+     libelf-devel libunwind-devel protobuf-c
 
 .. include:: building-libunwind-note.rst
 
index 2711e92b6ff1ae15c778d61ba23d8c73d2237486..cc6c3c03f3838848ba5138ad0f2b4e27a4577ffd 100644 (file)
@@ -18,6 +18,13 @@ Installing Dependencies
 
 .. include:: building-libyang.rst
 
+Protobuf
+^^^^^^^^
+
+.. code-block:: console
+
+   sudo apt-get install protobuf-c-compiler libprotobuf-c-dev
+
 Building & Installing FRR
 -------------------------
 
index d79545c859b00262238b73e6940c88101fc138eb..e5c2389f399c67487d430957a82561f6357a389b 100644 (file)
@@ -15,10 +15,17 @@ Installing Dependencies
       pkg-config libpam0g-dev libjson-c-dev bison flex python3-pytest \
       libc-ares-dev python3-dev python-ipaddress python3-sphinx \
       install-info build-essential libsnmp-dev perl libcap-dev \
-      libelf-dev
+      libelf-dev libprotobuf-c-dev protobuf-c-compiler
 
 .. include:: building-libyang.rst
 
+Protobuf
+^^^^^^^^
+
+.. code-block:: console
+
+   sudo apt-get install protobuf-c-compiler libprotobuf-c-dev
+
 Building & Installing FRR
 -------------------------
 
index 8b9b7e5af7a4c6d5193d957fa5df77b4f0e717a5..c02d10deda7b07b5d86ed87a14525aa5d1d6c779 100644 (file)
@@ -66,6 +66,7 @@ Protocols
    vrrp
    bmp
    watchfrr
+   mgmtd
 
 ########
 Appendix
diff --git a/doc/user/mgmtd.rst b/doc/user/mgmtd.rst
new file mode 100644 (file)
index 0000000..6ea7e78
--- /dev/null
@@ -0,0 +1,375 @@
+.. _mgmtd:
+
+*************************
+MGMTd (Management Daemon)
+*************************
+
+The FRR Management Daemon (from now on referred to as MGMTd) is a new
+centralized entity representing the FRR Management Plane which can take
+management requests from any kind of UI/Frontend entity (e.g. CLI, Netconf,
+Restconf, Grpc etc.) over a new unified and common Frontend interface and
+can help maintain configurational data or retrieve operational data from
+any number of FRR managed entities/components that have been integrated
+with the new FRR Centralised Management Framework.
+
+For organizing the management data to be owned by the FRR Management plane,
+management data is stored in YANG in compliance with a pre-defined set
+of YANG based schema. Data shall also be stored/retrieved in YANG format only.
+
+The MGMTd also acts as a separate computational entity for offloading much
+of the management related computational overload involved in maintaining of
+management data and processing of management requests, from individual
+component daemons (which can otherwise be a signficant burden on the individual
+components, affecting performance of its other functionalities).
+
+Lastly, the MGMTd works in-tandem with one (or more) MGMT Frontend
+Clients and a bunch of MGMT Backend Clients to realize the entirety
+of the FRR Management plane. Some of the advanatages of this new framework
+are:
+
+ 1. Consolidation and management of all Management data by a single entity.
+ 2. Better control over configuration validation, commit and rollback.
+ 3. Faster collection of configuration data (without needing to involve
+    individual component daemons).
+ 4. Offload computational burden of YANG data parsing and validations
+    of new configuration data being provisoned away from individual
+    component daemons
+ 5. Improve performance of individual component daemons while loading
+    huge configuration or retrieving huge operational dataset.
+
+The new FRR Management Daemon consists of the following sub-components:
+ - MGMT Frontend Interface
+ - MGMT Backend Interface
+ - MGMT Transaction Engine
+
+.. _mgmt_fe:
+
+MGMT Frontend Interface
+=======================
+
+The MGMT Frontend Interface is a bunch of message-based APIs that lets
+any UI/Frontend client to interact with the MGMT daemon to requests a
+set of management operations on a specific datastore/database.
+Following is a list of databases/datastores supported by the MGMT
+Frontend Interface and MGMTd:
+
+ - Candidate Database:
+
+  - Consists of configuration data items only.
+  - Data can be edited anytime using SET_CONFIG API.
+  - Data can be retrieved anytime using GET_CONFIG/GET_DATA API.
+
+ - Running Database:
+
+  - Consists of configuration data items only.
+  - Data cannot be edited using SET_CONFIG API.
+  - Data can only be modified using COMMIT_CONFIG API after which un-committed
+    data from Candidate database will be first validated and applied to
+    individualBackend component(s). Only on successful validation and apply on
+    all individual components will the new data be copied over to the Running
+    database.
+  - Data can be retrieved anytime using GET_CONFIG/GET_DATA API.
+
+ - Startup Database:
+
+  - Consists of configuration data items only.
+  - This is a copy of Running database that is stored in persistent
+    storage and is used to load configurations on Running database during
+    MGMT daemon startup.
+  - Data cannot be edited/retrieved directly via Frontend interface.
+
+ - Operational Database:
+
+  - Consists of non-configurational data items.
+  - Data is not stored on MGMT daemon. Rather it will be need to be fetched
+    in real-time from the corresponding Backend component (if present).
+  - Data can be retrieved anytime using GET_DATA API.
+
+Frontend Clients connected to MGMTd via Frontend Interface can themselves have
+multiple connections from one (or more) of its own remote clients. The MGMT
+Frontend Interface supports reresenting each of the remote clients for a given
+Frontend client(e.g. Netconf clients on a single Netconf server) as individual
+Frontend Client Sessions. So a single connection from a single Frontend Client
+can create more than one Frontend Client sessions.
+
+Following are some of the management operations supported:
+ - INIT_SESSION/CLOSE_SESSION: Create/Destroy a session. Rest of all the
+   operations are supported only in the context of a specific session.
+ - LOCK_DB/UNLOCK_DB: Lock/Unlock Management datastores/databases.
+ - GET_CONFIG/GET_DATA: Retrieve configurational/operational data from a
+   specific datastore/database.
+ - SET_CONFIG/DELETE_CONFIG: Add/Modify/Delete specific data in a specific
+   datastore/database.
+ - COMMIT_CONFIG: Validate and/or apply the uncommited set of configurations
+   from one configuration database to another.
+ - Currently committing configurations from Candidate to Running database
+   is only allowed, and not vice versa.
+
+The exact set of message-based APIs are represented as Google Protobuf
+messages and can be found in the following file distributed with FRR codebase.
+
+.. code-block:: frr
+
+   lib/mgmt.proto
+
+The MGMT daemon implements a MGMT Frontend Server that opens a UNIX
+socket-based IPC channel on the following path to listen for incoming
+connections from all possible Frontend clients:
+
+.. code-block:: frr
+
+   /var/run/frr/mgmtd_fe.sock
+
+Each connection received from a Frontend client is managed and tracked
+as a MGMT Frontend adapter by the MGMT Frontend Adapter sub-component
+implemented by MGMTd.
+
+To facilitate faster development/integration of Frontend clients with
+MGMT Frontend Interface, a C-based library has been developed. The API
+specification of this library can be found at:
+
+.. code-block:: frr
+
+   lib/mgmt_fe_client.h
+
+Following is a list of message types supported on the MGMT Frontend Interface:
+ - SESSION_REQ<Client-Connection-Id, Destroy>
+ - SESSION_REPLY<Client-Connection-Id, Destroy, Session-Id>
+ - LOCK_DB_REQ <Session-Id, Database-Id>
+ - LOCK_DB_REPLY <Session-Id, Database-Id>
+ - UNLOCK_DB_REQ <Session-Id, Database-Id>
+ - UNLOCK_DB_REPLY <Session-Id, Database-Id>
+ - GET_CONFIG_REQ <Session-Id, Database-Id, Base-Yang-Xpath>
+ - GET_CONFIG_REPLY <Session-Id, Database-Id, Base-Yang-Xpath, Yang-Data-Set>
+ - SET_CONFIG_REQ <Session-Id, Database-Id, Base-Yang-Xpath, Delete, ...>
+ - SET_CONFIG_REPLY <Session-Id, Database-id, Base-Yang-Xpath, ..., Status>
+ - COMMIT_CONFIG_REQ <Session-Id, Source-Db-Id, Dest-Db-Id>
+ - COMMIT_CONFIG_REPLY <Session-Id, Source-Db-id, Dest-Db-Id, Status>
+ - GET_DATA_REQ <Session-Id, Database-Id, Base-Yang-Xpath>
+ - GET_DATA_REPLY <Session-Id, Database-id, Base-Yang-Xpath, Yang-Data-Set>
+ - REGISTER_NOTIFY_REQ <Session-Id, Database-Id, Base-Yang-Xpath>
+ - DATA_NOTIFY_REQ <Database-Id, Base-Yang-Xpath, Yang-Data-Set>
+
+Please refer to the MGMT Frontend Client Developers Reference and Guide
+(coming soon) for more details.
+
+MGMTD Backend Interface
+=======================
+The MGMT Backend Interface is a bunch of message-based APIs that can be
+used by individual component daemons like BGPd, Staticd, Zebra to connect
+with MGMTd and utilize the new FRR Management Framework to let any Frontend
+clients to retrieve any operational data or manipulate any configuration data
+owned by the individual daemon component.
+
+Like the MGMT Frontend Interface, the MGMT Backend Interface is is also
+comprised of the following:
+
+ - MGMT Backend Server (running on MGMT daemon)
+ - MGMT Backend Adapter (running on MGMT daemon)
+ - MGMT Backend client (running on Backend component daemons)
+
+The MGMT Backend Client and MGMT Backend Adapter sub-component communicates
+using a specific set of message-based APIs.
+
+The exact set of message-based APIs are represented as Google Protobuf
+messages and can be found in the following file distributed with FRR codebase.
+
+.. code-block:: frr
+
+   lib/mgmt.proto
+
+The MGMT daemon implements a MGMT Backend Server that opens a UNIX
+socket-based IPC channel on the following path to listen for incoming
+connections from all possible Backend clients:
+
+.. code-block:: frr
+
+   /var/run/frr/mgmtd_be.sock
+
+Each connection received from a Backend client is managed and tracked
+as a MGMT Backend adapter by the MGMT Backend Adapter sub-component
+implemented by MGMTd.
+
+To facilitate faster development/integration of Backend clients with
+MGMTd, a C-based library has been developed. The API specification
+of this library can be found at:
+
+.. code-block:: frr
+
+   lib/mgmt_be_client.h
+
+Following is a list of message types supported on the MGMT Backend Interface:
+
+ - SUBSCRIBE_REQ <Req-Id, Base-Yang-Xpath, Filter-Type>
+ - SUBSCRIBE_REPLY <Req-Id, Status>
+ - TXN_REQ <Txn-Id, Create>
+ - TXN_REPLY <Txn-Id, Status>
+ - CREATE_CFGDATA_REQ <Txn-Id, Req-Id, Batch-Id, ConfigDataContents>
+ - CREATE_CFGDATA_ERROR <Txn-Id, Req-Id, Batch-Id, Status>
+ - VALIDATE_CFGDATA_REQ <Txn-Id, Batch-Id>
+ - VALIDATE_CFGDATA_REPLY <Txn-Id, Batch-Id, Status, ErrorInfo>
+ - APPLY_CFGDATA_REQ <Txn-Id, Batch-Id>
+ - APPLY_CFGDATA_REPLY <Txn-Id, Batch-Id, Status, ErrorInfo>
+ - GET_OPERDATA_REQ <Txn-Id, Base-Yang-Xpath, Filter-Type>
+ - GET_OPERDATA_REPLY <Txn-Id, OperDataContents>
+
+Please refer to the MGMT Backend Client Developers Reference and Guide
+(coming soon) for more details.
+
+MGMTD Transaction Engine
+========================
+
+The MGMT Transaction sub-component is the main brain of the MGMT daemon that
+takes management requests from one (or more) Frontend Client translates
+them into transactions and drives them to completion in co-oridination with
+one (or more) Backend client daemons involved in the request.
+
+A transaction can be seen as a set of management procedures executed over
+the Backend Interface with one (or more) individual Backend component
+daemons, as a result of some management request initiated from a specific
+Frontend client session. These group of operations on the Backend Interface
+with one (or more) individual components involved should be executed without
+taking any further management requests from other Frontend client sessions.
+To maintain this kind of atomic behavior a lock needs to be acquired
+(sometimes implicitly if not explicitly) by the corresponding Frontend client
+session, on the various datastores/databases involved in the management request
+being executed. The same datastores/databases need to be unlocked when all
+the procedures have been executed and the transaction is being closed.
+
+Following are some of the transaction types supported by MGMT:
+
+ - Configuration Transactions
+
+  - Used to execute management operations like SET_CONFIG and COMMIT_CONFIG
+    that involve writing/over-writing the contents of Candidate and Running
+    databases.
+  - One (and only) can be created and be in-progress at any given time.
+  - Once initiated by a specific Frontend Client session and is still
+    in-progress, all subsequent SET_CONFIG and COMMIT_CONFIG operations
+    from other Frontend Client sessions will be rejected and responded
+    with failure.
+  - Requires acquiring write-lock on Candidate (and later Running) databases.
+
+ - Show Transactions
+
+  - Used to execute management operations like GET_CONFIG and GET_DATA
+    that involve only reading the contents of Candidate and Running
+    databases (and sometimes real-time retrieval of operational data
+    from individual component daemons).
+  - Multiple instance of this transaction type can be created and be
+    in-progress at any given time.
+  - However, when a configuration transaction is currently in-progress
+    show transaction can be initiated by any Frontend Client session.
+  - Requires acquiring read-lock on Candidate and/or Running databases.
+  - NOTE: Currently GET_DATA on Operational database is NOT supported. To
+    be added in a future time soon.
+
+MGMTD Configuration Rollback and Commit History
+===============================================
+
+The MGMT daemon maintains upto 10 last configuration commit buffers
+and can rollback the contents of the Running Database to any of the
+commit-ids maintained in the commit buffers.
+
+Once the number of commit buffers exceeds 10, the oldest commit
+buffer is deleted to make space for the latest commit. Also on
+rollback to a specific commit-id, buffer of all the later commits
+are deleted from commit record.
+
+Configuration rollback is only allowed via VTYSH shell as of today
+and is not possible through the MGMT Frontend interface.
+
+MGMT Configuration commands
+===========================
+
+.. clicmd:: mgmt set-config XPATH VALUE
+
+    This command uses a SET_CONFIG request over the MGMT Frontend Interface
+    for the specified xpath with specific value. This command is used for
+    testing purpose only. But can be used to set configuration data from CLI
+    using SET_CONFIG operations.
+
+.. clicmd:: mgmt delete-config XPATH
+
+    This command uses a SET_CONFIG request (with delete option) over the
+    MGMT Frontend Interface o delete the YANG data node at the given
+    xpath unless it is a key-leaf node(in which case it is not deleted).
+
+.. clicmd:: mgmt load-config FILE <merge|replace>
+
+    This command loads configuration in JSON format from the filepath specified,
+    and merges or replaces the Candidate DB as per the option specified.
+
+.. clicmd:: mgmt save-config <candidate|running> FILE
+
+    This command dumps the DB specified in the db-name into the file in JSON
+    format. This command in not supported for the Operational DB.
+
+.. clicmd:: mgmt commit abort
+
+    This command will abort any configuration present on the Candidate but not
+    been applied to the Running DB.
+
+.. clicmd:: mgmt commit apply
+
+    This command commits any uncommited changes in the Candidate DB to the
+    Running DB. It also dumps a copy of the tree in JSON format into
+    frr_startup.json.
+
+.. clicmd:: mgmt commit check
+
+    This command validates the configuration but does not apply them to the
+    Running DB.
+
+.. clicmd:: mgmt rollback commit-id WORD
+
+    This command rolls back the Running Database contents to the state
+    corresponding to the commit-id specified.
+
+.. clicmd:: mgmt rollback last WORD
+
+    This command rolls back the last specified number of recent commits.
+
+
+MGMT Show commands
+==================
+
+.. clicmd:: show mgmt backend-adapter all
+
+    This command shows the backend adapter information and the clients/daemons
+    connected to the adapters.
+
+.. clicmd:: show mgmt backend-yang-xpath-registry
+
+    This command shows which Backend adapters are registered for which YANG
+    data subtree(s).
+
+.. clicmd:: show mgmt frontend-adapter all [detail]
+
+    This command shows the frontend adapter information and the clients
+    connected to the adapters.
+
+.. clicmd:: show mgmt transaction all
+
+    Shows the list of transaction and bunch of information about the transaction.
+
+.. clicmd:: show mgmt get-config [candidate|running] XPATH
+
+    This command uses the GET_CONFIG operation over the MGMT Frontend interface and
+    returns the xpaths and values of the nodes of the subtree pointed by the <xpath>.
+
+.. clicmd:: show mgmt get-data [candidate|operation|running] XPATH
+
+    This command uses the GET_DATA operation over the MGMT Frontend interface and
+    returns the xpaths and values of the nodes of the subtree pointed by the <xpath>.
+    Currenlty supported values for 'candidate' and 'running' only
+    ('operational' shall be supported in future soon).
+
+.. clicmd:: show mgmt database-contents [candidate|operation|running] [xpath WORD] [file WORD] json|xml
+
+    This command dumps the subtree pointed by the xpath in JSON or XML format. If filepath is
+    not present then the tree will be printed on the shell.
+
+.. clicmd:: show mgmt commit-history
+
+    This command dumps details of upto last 10 commits handled by MGMTd.
index c205122b0b98f5ea0fd8929eec5bc692c2745d03..9e4d7a611aced3268fad9dff21a664ca47889a02 100644 (file)
@@ -294,9 +294,9 @@ Route Map Set Command
 
    Subtract the BGP local preference from an existing `local_pref`.
 
-.. clicmd:: set distance DISTANCE
+.. clicmd:: set distance (1-255)
 
-   Set the Administrative distance to DISTANCE to use for the route.
+   Set the Administrative distance to use for the route.
    This is only locally significant and will not be dispersed to peers.
 
 .. clicmd:: set weight WEIGHT
index b8c5c70d9c12aa1faf8461392a16ae55c301e66d..4879f7f7ef3c27264d2d6000b1caa8c03c8b9025 100644 (file)
@@ -54,6 +54,7 @@ user_RSTFILES = \
        doc/user/flowspec.rst \
        doc/user/watchfrr.rst \
        doc/user/wecmp_linkbw.rst \
+       doc/user/mgmtd.rst \
        # end
 
 EXTRA_DIST += \
index afc6be2312a1e0987139c8a20987c19df4e3696d..54621a49fdb05717af9be9ab7c5269765f9d4176 100644 (file)
@@ -25,6 +25,7 @@ RUN source /src/alpine/APKBUILD.in \
                gzip \
                py-pip \
                rtrlib \
+               protobuf-c-dev \
        && pip install pytest
 RUN mkdir -p /pkgs/apk
 COPY . /src
index 2d1ee9efa49887f217e6b24354a19868f26a46ed..8739cee09baf406ac6c1f2711288d398c20aa9c7 100644 (file)
@@ -5,6 +5,7 @@ RUN yum install -y rpm-build autoconf automake libtool make \
         readline-devel texinfo net-snmp-devel groff pkgconfig \
         json-c-devel pam-devel bison flex pytest c-ares-devel \
         python3-devel python3-sphinx libcap-devel systemd-devel \
+        protobuf-c-devel \
         https://ci1.netdef.org/artifact/LIBYANG-LIBYANGV2/shared/build-2/CentOS-7-x86_64-Packages/libyang2-2.0.0.10.g2eb910e4-1.el7.x86_64.rpm \
         https://ci1.netdef.org/artifact/LIBYANG-LIBYANGV2/shared/build-2/CentOS-7-x86_64-Packages/libyang2-devel-2.0.0.10.g2eb910e4-1.el7.x86_64.rpm \
         https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-00146/CentOS-7-x86_64-Packages/librtr-0.8.0-1.el7.x86_64.rpm \
index df772b83184ea96e3a08589f947c2712f8af498f..88a7d6a0076cba76f725312884ab0e43d1b21d5e 100644 (file)
@@ -9,6 +9,7 @@ RUN dnf install --enablerepo=powertools -y rpm-build git autoconf pcre-devel \
         automake libtool make readline-devel texinfo net-snmp-devel pkgconfig \
         groff pkgconfig json-c-devel pam-devel bison flex python3-pytest \
         c-ares-devel python3-devel python3-sphinx libcap-devel platform-python-devel \
+        protobuf-c-devel \
         https://ci1.netdef.org/artifact/LIBYANG-LIBYANGV2/shared/build-2/CentOS-8-x86_64-Packages/libyang2-2.0.0.10.g2eb910e4-1.el8.x86_64.rpm \
         https://ci1.netdef.org/artifact/LIBYANG-LIBYANGV2/shared/build-2/CentOS-8-x86_64-Packages/libyang2-devel-2.0.0.10.g2eb910e4-1.el8.x86_64.rpm \
         https://ci1.netdef.org/artifact/RPKI-RTRLIB/shared/build-00146/CentOS-7-x86_64-Packages/librtr-0.8.0-1.el7.x86_64.rpm \
index 7476e5fe3e7b5239f29b03055d473d78b0cf00be..d136538c7d0814177dbd57b534b3316b5ebb76a5 100644 (file)
@@ -7,7 +7,7 @@ ENV APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
 RUN apt-get update && \
     apt-get install -y libpcre3-dev apt-transport-https ca-certificates curl wget logrotate \
     libc-ares2 libjson-c3 vim procps libreadline7 gnupg2 lsb-release apt-utils \
-    tini && rm -rf /var/lib/apt/lists/*
+    libprotobuf-c-dev protobuf-c-compiler tini && rm -rf /var/lib/apt/lists/*
 
 RUN curl -s https://deb.frrouting.org/frr/keys.asc | apt-key add -
 RUN echo deb https://deb.frrouting.org/frr $(lsb_release -s -c) frr-stable | tee -a /etc/apt/sources.list.d/frr.list
index 07a5a2f7e0a53ef91d7ccac785304abdfd55d111..dab86067398861e5fce5324a0e21e27aa8c792e9 100644 (file)
@@ -9,7 +9,7 @@ RUN apt update && \
       pkg-config libpam0g-dev libjson-c-dev bison flex python3-pip \
       libc-ares-dev python3-dev python3-sphinx \
       install-info build-essential libsnmp-dev perl libcap-dev \
-      libelf-dev \
+      libelf-dev libprotobuf-c-dev protobuf-c-compiler \
       sudo gdb iputils-ping time \
       python-pip net-tools iproute2 && \
       python3 -m pip install wheel && \
index b9fe385c3a9eb8b0fc90658a46d14d56f101c7a5..7e49910a72947e97dfb56aadd8cc2a92c0df5dca 100644 (file)
@@ -9,7 +9,7 @@ RUN apt update && \
       pkg-config libpam0g-dev libjson-c-dev bison flex python3-pip \
       libc-ares-dev python3-dev python3-sphinx \
       install-info build-essential libsnmp-dev perl \
-      libcap-dev python2 libelf-dev \
+      libcap-dev python2 libelf-dev libprotobuf-c-dev protobuf-c-compiler \
       sudo gdb curl iputils-ping time \
       lua5.3 liblua5.3-dev \
       net-tools iproute2 && \
index 6538e565880923d89efb6200a535c1077a2d55d4..8856f9f09f2adf1cf4821c0b0f63fb48ae304904 100644 (file)
@@ -429,6 +429,11 @@ struct cmd_node {
 #define SHARP_STR "Sharp Routing Protocol\n"
 #define OSPF_GR_STR                                                            \
        "OSPF non-stop forwarding (NSF) also known as OSPF Graceful Restart\n"
+#define MGMTD_STR "Management Daemon (MGMTD) information\n"
+#define MGMTD_BE_ADAPTER_STR "MGMTD Backend Adapter information\n"
+#define MGMTD_FE_ADAPTER_STR "MGMTD Frontend Adapter information\n"
+#define MGMTD_TXN_STR "MGMTD Transaction information\n"
+#define MGMTD_DS_STR "MGMTD Datastore information\n"
 
 #define CMD_VNI_RANGE "(1-16777215)"
 #define CONF_BACKUP_EXT ".sav"
index 2c8f2e90473d3ac83039a91aa74d21a75dfdae78..c13d88a1e8216f877ba072eace5b499aef9a91ae 100644 (file)
@@ -242,6 +242,17 @@ DEFUN_NOSH(end_config, end_config_cmd, "XFRR_end_configuration",
        ret = nb_cli_pending_commit_check(vty);
 
        zlog_info("Configuration Read in Took: %s", readin_time_str);
+       zlog_debug("%s: VTY:%p, pending SET-CFG: %u", __func__, vty,
+                  (uint32_t)vty->mgmt_num_pending_setcfg);
+
+       /*
+        * If (and only if) we have sent any CLI config commands to MGMTd
+        * FE interface using vty_mgmt_send_config_data() without implicit
+        * commit before, should we need to send an explicit COMMIT-REQ now
+        * to apply all those commands at once.
+        */
+       if (vty->mgmt_num_pending_setcfg && vty_mgmt_fe_enabled())
+               vty_mgmt_send_commit_config(vty, false, false);
 
        if (callback.end_config)
                (*callback.end_config)();
diff --git a/lib/mgmt.proto b/lib/mgmt.proto
new file mode 100644 (file)
index 0000000..8a11ff0
--- /dev/null
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: ISC
+//
+// mgmt.proto
+//
+// @copyright Copyright (C) 2021  Vmware, Inc.
+//
+// @author Pushpasis Sarkar <spushpasis@vmware.com>
+//
+
+syntax = "proto2";
+
+//
+// Protobuf definitions pertaining to the MGMTD component.
+//
+
+package mgmtd;
+
+//
+// Common Sub-Messages
+//
+
+message YangDataXPath {
+  required string xpath = 1;
+}
+
+message YangDataValue {
+  oneof value {
+    //
+    // NOTE: For now let's use stringized value ONLY.
+    // We will enhance it later to pass native-format
+    // if needed.
+    //
+    // bool bool_val = 2;
+    // double double_val = 3;
+    // float float_val = 4;
+    // string string_val = 5;
+    // bytes bytes_val = 6;
+    // int32 int32_val = 7;
+    // int64 int64_val = 8;
+    // uint32 uint32_val = 9;
+    // uint64 uint64_val = 10;
+    // int32 int8_val = 11;
+    // uint32 uint8_val = 12;
+    // int32 int16_val = 13;
+    // uint32 uint16_val = 14;
+    string encoded_str_val = 100;
+  }
+}
+
+message YangData {
+  required string xpath = 1;
+  optional YangDataValue value = 2;
+}
+
+enum CfgDataReqType {
+  REQ_TYPE_NONE = 0;
+  SET_DATA = 1;
+  DELETE_DATA = 2;
+}
+
+message YangCfgDataReq {
+  required YangData data = 1;
+  required CfgDataReqType req_type = 2;
+}
+
+message YangGetDataReq {
+  required YangData data = 1;
+  required int64 next_indx = 2;
+}
+
+//
+// Backend Interface Messages
+//
+message BeSubscribeReq {
+  required string client_name = 1;
+  required bool subscribe_xpaths = 2;
+  repeated string xpath_reg = 3;
+}
+
+message BeSubscribeReply {
+  required bool success = 1;
+}
+
+message BeTxnReq {
+  required uint64 txn_id = 1;
+  required bool create = 2;
+}
+
+message BeTxnReply {
+  required uint64 txn_id = 1;
+  required bool create = 2;
+  required bool success = 3;
+}
+
+message BeCfgDataCreateReq {
+  required uint64 txn_id = 1;
+  required uint64 batch_id = 2;
+  repeated YangCfgDataReq data_req = 3;
+  required bool end_of_data = 4;
+}
+
+message BeCfgDataCreateReply {
+  required uint64 txn_id = 1;
+  required uint64 batch_id = 2;
+  required bool success = 3;
+  optional string error_if_any = 4;
+}
+
+message BeCfgDataApplyReq {
+  required uint64 txn_id = 1;
+}
+
+message BeCfgDataApplyReply {
+  required uint64 txn_id = 1;
+  repeated uint64 batch_ids = 2;
+  required bool success = 3;
+  optional string error_if_any = 4;
+}
+
+message BeOperDataGetReq {
+  required uint64 txn_id = 1;
+  required uint64 batch_id = 2;
+  repeated YangGetDataReq data = 3;
+}
+
+message YangDataReply {
+  repeated YangData data = 1;
+  required int64 next_indx = 2;
+}
+
+message BeOperDataGetReply {
+  required uint64 txn_id = 1;
+  required uint64 batch_id = 2;
+  required bool success = 3;
+  optional string error = 4;
+  optional YangDataReply data = 5;
+}
+
+message BeOperDataNotify {
+  required YangDataReply data = 5;
+}
+
+message BeConfigCmdReq {
+  required string cmd = 1;
+}
+
+message BeConfigCmdReply {
+  required bool success = 1;
+  required string error_if_any = 2;
+}
+
+message BeShowCmdReq {
+  required string cmd = 1;
+}
+
+message BeShowCmdReply {
+  required bool success = 1;
+  required string cmd_ouput = 2;
+}
+
+//
+// Any message on the MGMTD Backend Interface.
+//
+message BeMessage {
+  oneof message {
+    BeSubscribeReq subscr_req = 2;
+    BeSubscribeReply subscr_reply = 3;
+    BeTxnReq txn_req = 4;
+    BeTxnReply txn_reply = 5;
+    BeCfgDataCreateReq cfg_data_req = 6;
+    BeCfgDataCreateReply cfg_data_reply = 7;
+    BeCfgDataApplyReq cfg_apply_req = 8;
+    BeCfgDataApplyReply cfg_apply_reply = 9;
+    BeOperDataGetReq get_req = 10;
+    BeOperDataGetReply get_reply = 11;
+    BeOperDataNotify notify_data = 12;
+    BeConfigCmdReq cfg_cmd_req = 13;
+    BeConfigCmdReply cfg_cmd_reply = 14;
+    BeShowCmdReq show_cmd_req = 15;
+    BeShowCmdReply show_cmd_reply = 16;
+  }
+}
+
+
+//
+// Frontend Interface Messages
+//
+
+message FeRegisterReq {
+  required string client_name = 1;
+}
+
+message FeSessionReq {
+  required bool create = 1;
+  oneof id {
+    uint64 client_conn_id = 2; // Applicable for create request only
+    uint64 session_id = 3; // Applicable for delete request only
+  }
+}
+
+message FeSessionReply {
+  required bool create = 1;
+  required bool success = 2;
+  optional uint64 client_conn_id = 3; // Applicable for create request only
+  required uint64 session_id = 4;
+}
+
+enum DatastoreId {
+  DS_NONE = 0;
+  RUNNING_DS = 1;
+  CANDIDATE_DS = 2;
+  OPERATIONAL_DS = 3;
+  STARTUP_DS = 4;
+}
+
+message FeLockDsReq {
+  required uint64 session_id = 1;
+  required uint64 req_id = 2;
+  required DatastoreId ds_id = 3;
+  required bool lock = 4;
+}
+
+message FeLockDsReply {
+  required uint64 session_id = 1;
+  required uint64 req_id = 2;
+  required DatastoreId ds_id = 3;
+  required bool lock = 4;
+  required bool success = 5;
+  optional string error_if_any = 6;
+}
+
+message FeSetConfigReq {
+  required uint64 session_id = 1;
+  required DatastoreId ds_id = 2;
+  required uint64 req_id = 3;
+  repeated YangCfgDataReq data = 4;
+  required bool implicit_commit = 5;
+  required DatastoreId commit_ds_id = 6;
+}
+
+message FeSetConfigReply {
+  required uint64 session_id = 1;
+  required DatastoreId ds_id = 2;
+  required uint64 req_id = 3;
+  required bool success = 4;
+  optional string error_if_any = 5;
+}
+
+message FeCommitConfigReq {
+  required uint64 session_id = 1;
+  required DatastoreId src_ds_id = 2;
+  required DatastoreId dst_ds_id = 3;
+  required uint64 req_id = 4;
+  required bool validate_only = 5;
+  required bool abort = 6;
+}
+
+message FeCommitConfigReply {
+  required uint64 session_id = 1;
+  required DatastoreId src_ds_id = 2;
+  required DatastoreId dst_ds_id = 3;
+  required uint64 req_id = 4;
+  required bool validate_only = 5;
+  required bool success = 6;
+  required bool abort = 7;
+  optional string error_if_any = 8;
+}
+
+message FeGetConfigReq {
+  required uint64 session_id = 1;
+  required DatastoreId ds_id = 2;
+  required uint64 req_id = 3;
+  repeated YangGetDataReq data = 4;
+}
+
+message FeGetConfigReply {
+  required uint64 session_id = 1;
+  required DatastoreId ds_id = 2;
+  required uint64 req_id = 3;
+  required bool success = 4;
+  optional string error_if_any = 5;
+  optional YangDataReply data = 6;
+}
+
+message FeGetDataReq {
+  required uint64 session_id = 1;
+  required DatastoreId ds_id = 2;
+  required uint64 req_id = 3;
+  repeated YangGetDataReq data = 4;
+}
+
+message FeGetDataReply {
+  required uint64 session_id = 1;
+  required DatastoreId ds_id = 2;
+  required uint64 req_id = 3;
+  required bool success = 4;
+  optional string error_if_any = 5;
+  optional YangDataReply data = 6;
+}
+
+message FeNotifyDataReq {
+  repeated YangData data = 1;
+}
+
+message FeRegisterNotifyReq {
+  required uint64 session_id = 1;
+  required DatastoreId ds_id = 2;
+  required bool register_req = 3;
+  required uint64 req_id = 4;
+  repeated YangDataXPath data_xpath = 5;
+}
+
+message FeMessage {
+  oneof message {
+    FeRegisterReq register_req = 2;
+    FeSessionReq session_req = 3;
+    FeSessionReply session_reply = 4;
+    FeLockDsReq lockds_req = 5;
+    FeLockDsReply lockds_reply = 6;
+    FeSetConfigReq setcfg_req = 7;
+    FeSetConfigReply setcfg_reply = 8;
+    FeCommitConfigReq commcfg_req = 9;
+    FeCommitConfigReply commcfg_reply = 10;
+    FeGetConfigReq getcfg_req = 11;
+    FeGetConfigReply getcfg_reply = 12;
+    FeGetDataReq getdata_req = 13;
+    FeGetDataReply getdata_reply = 14;
+    FeNotifyDataReq notify_data_req = 15;
+    FeRegisterNotifyReq regnotify_req = 16;
+  }
+}
diff --git a/lib/mgmt_be_client.c b/lib/mgmt_be_client.c
new file mode 100644 (file)
index 0000000..bb45024
--- /dev/null
@@ -0,0 +1,1223 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Backend Client Library api interfaces
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "libfrr.h"
+#include "mgmtd/mgmt.h"
+#include "mgmt_be_client.h"
+#include "mgmt_msg.h"
+#include "mgmt_pb.h"
+#include "network.h"
+#include "stream.h"
+#include "sockopt.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_BE_CLIENT_DBG(fmt, ...)                                         \
+       fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_BE_CLIENT_ERR(fmt, ...)                                         \
+       fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_BE_CLIENT_DBG(fmt, ...)                                         \
+       do {                                                                   \
+               if (mgmt_debug_be_client)                                     \
+                       zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__);         \
+       } while (0)
+#define MGMTD_BE_CLIENT_ERR(fmt, ...)                                         \
+       zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_BATCH,
+                   "MGMTD backend transaction batch data");
+DEFINE_MTYPE_STATIC(LIB, MGMTD_BE_TXN, "MGMTD backend transaction data");
+
+enum mgmt_be_txn_event {
+       MGMTD_BE_TXN_PROC_SETCFG = 1,
+       MGMTD_BE_TXN_PROC_GETCFG,
+       MGMTD_BE_TXN_PROC_GETDATA
+};
+
+struct mgmt_be_set_cfg_req {
+       struct nb_cfg_change cfg_changes[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+       uint16_t num_cfg_changes;
+};
+
+struct mgmt_be_get_data_req {
+       char *xpaths[MGMTD_MAX_NUM_DATA_REQ_IN_BATCH];
+       uint16_t num_xpaths;
+};
+
+struct mgmt_be_txn_req {
+       enum mgmt_be_txn_event event;
+       union {
+               struct mgmt_be_set_cfg_req set_cfg;
+               struct mgmt_be_get_data_req get_data;
+       } req;
+};
+
+PREDECL_LIST(mgmt_be_batches);
+struct mgmt_be_batch_ctx {
+       /* Batch-Id as assigned by MGMTD */
+       uint64_t batch_id;
+
+       struct mgmt_be_txn_req txn_req;
+
+       uint32_t flags;
+
+       struct mgmt_be_batches_item list_linkage;
+};
+#define MGMTD_BE_BATCH_FLAGS_CFG_PREPARED (1U << 0)
+#define MGMTD_BE_TXN_FLAGS_CFG_APPLIED (1U << 1)
+DECLARE_LIST(mgmt_be_batches, struct mgmt_be_batch_ctx, list_linkage);
+
+struct mgmt_be_client_ctx;
+
+PREDECL_LIST(mgmt_be_txns);
+struct mgmt_be_txn_ctx {
+       /* Txn-Id as assigned by MGMTD */
+       uint64_t txn_id;
+       uint32_t flags;
+
+       struct mgmt_be_client_txn_ctx client_data;
+       struct mgmt_be_client_ctx *client_ctx;
+
+       /* List of batches belonging to this transaction */
+       struct mgmt_be_batches_head cfg_batches;
+       struct mgmt_be_batches_head apply_cfgs;
+
+       struct mgmt_be_txns_item list_linkage;
+
+       struct nb_transaction *nb_txn;
+       uint32_t nb_txn_id;
+};
+#define MGMTD_BE_TXN_FLAGS_CFGPREP_FAILED (1U << 1)
+
+DECLARE_LIST(mgmt_be_txns, struct mgmt_be_txn_ctx, list_linkage);
+
+#define FOREACH_BE_TXN_BATCH_IN_LIST(txn, batch)                               \
+       frr_each_safe (mgmt_be_batches, &(txn)->cfg_batches, (batch))
+
+#define FOREACH_BE_APPLY_BATCH_IN_LIST(txn, batch)                             \
+       frr_each_safe (mgmt_be_batches, &(txn)->apply_cfgs, (batch))
+
+struct mgmt_be_client_ctx {
+       int conn_fd;
+       struct thread_master *tm;
+       struct thread *conn_retry_tmr;
+       struct thread *conn_read_ev;
+       struct thread *conn_write_ev;
+       struct thread *conn_writes_on;
+       struct thread *msg_proc_ev;
+       uint32_t flags;
+
+       struct mgmt_msg_state mstate;
+
+       struct nb_config *candidate_config;
+       struct nb_config *running_config;
+
+       unsigned long num_batch_find;
+       unsigned long avg_batch_find_tm;
+       unsigned long num_edit_nb_cfg;
+       unsigned long avg_edit_nb_cfg_tm;
+       unsigned long num_prep_nb_cfg;
+       unsigned long avg_prep_nb_cfg_tm;
+       unsigned long num_apply_nb_cfg;
+       unsigned long avg_apply_nb_cfg_tm;
+
+       struct mgmt_be_txns_head txn_head;
+       struct mgmt_be_client_params client_params;
+};
+
+#define MGMTD_BE_CLIENT_FLAGS_WRITES_OFF (1U << 0)
+
+#define FOREACH_BE_TXN_IN_LIST(client_ctx, txn)                                \
+       frr_each_safe (mgmt_be_txns, &(client_ctx)->txn_head, (txn))
+
+static bool mgmt_debug_be_client;
+
+static struct mgmt_be_client_ctx mgmt_be_client_ctx = {
+       .conn_fd = -1,
+};
+
+const char *mgmt_be_client_names[MGMTD_BE_CLIENT_ID_MAX + 1] = {
+#ifdef HAVE_STATICD
+       [MGMTD_BE_CLIENT_ID_STATICD] = "staticd",
+#endif
+       [MGMTD_BE_CLIENT_ID_MAX] = "Unknown/Invalid",
+};
+
+/* Forward declarations */
+static void
+mgmt_be_client_register_event(struct mgmt_be_client_ctx *client_ctx,
+                                enum mgmt_be_event event);
+static void
+mgmt_be_client_schedule_conn_retry(struct mgmt_be_client_ctx *client_ctx,
+                                     unsigned long intvl_secs);
+static int mgmt_be_client_send_msg(struct mgmt_be_client_ctx *client_ctx,
+                                     Mgmtd__BeMessage *be_msg);
+
+static void
+mgmt_be_server_disconnect(struct mgmt_be_client_ctx *client_ctx,
+                            bool reconnect)
+{
+       /* Notify client through registered callback (if any) */
+       if (client_ctx->client_params.client_connect_notify)
+               (void)(*client_ctx->client_params.client_connect_notify)(
+                       (uintptr_t)client_ctx,
+                       client_ctx->client_params.user_data, false);
+
+       if (client_ctx->conn_fd != -1) {
+               close(client_ctx->conn_fd);
+               client_ctx->conn_fd = -1;
+       }
+
+       if (reconnect)
+               mgmt_be_client_schedule_conn_retry(
+                       client_ctx,
+                       client_ctx->client_params.conn_retry_intvl_sec);
+}
+
+static struct mgmt_be_batch_ctx *
+mgmt_be_find_batch_by_id(struct mgmt_be_txn_ctx *txn,
+                           uint64_t batch_id)
+{
+       struct mgmt_be_batch_ctx *batch = NULL;
+
+       FOREACH_BE_TXN_BATCH_IN_LIST (txn, batch) {
+               if (batch->batch_id == batch_id)
+                       return batch;
+       }
+
+       return NULL;
+}
+
+static struct mgmt_be_batch_ctx *
+mgmt_be_batch_create(struct mgmt_be_txn_ctx *txn, uint64_t batch_id)
+{
+       struct mgmt_be_batch_ctx *batch = NULL;
+
+       batch = mgmt_be_find_batch_by_id(txn, batch_id);
+       if (!batch) {
+               batch = XCALLOC(MTYPE_MGMTD_BE_BATCH,
+                               sizeof(struct mgmt_be_batch_ctx));
+               assert(batch);
+
+               batch->batch_id = batch_id;
+               mgmt_be_batches_add_tail(&txn->cfg_batches, batch);
+
+               MGMTD_BE_CLIENT_DBG("Added new batch 0x%llx to transaction",
+                                   (unsigned long long)batch_id);
+       }
+
+       return batch;
+}
+
+static void mgmt_be_batch_delete(struct mgmt_be_txn_ctx *txn,
+                                   struct mgmt_be_batch_ctx **batch)
+{
+       uint16_t indx;
+
+       if (!batch)
+               return;
+
+       mgmt_be_batches_del(&txn->cfg_batches, *batch);
+       if ((*batch)->txn_req.event == MGMTD_BE_TXN_PROC_SETCFG) {
+               for (indx = 0; indx < MGMTD_MAX_CFG_CHANGES_IN_BATCH; indx++) {
+                       if ((*batch)->txn_req.req.set_cfg.cfg_changes[indx]
+                                   .value) {
+                               free((char *)(*batch)
+                                            ->txn_req.req.set_cfg
+                                            .cfg_changes[indx]
+                                            .value);
+                       }
+               }
+       }
+
+       XFREE(MTYPE_MGMTD_BE_BATCH, *batch);
+       *batch = NULL;
+}
+
+static void mgmt_be_cleanup_all_batches(struct mgmt_be_txn_ctx *txn)
+{
+       struct mgmt_be_batch_ctx *batch = NULL;
+
+       FOREACH_BE_TXN_BATCH_IN_LIST (txn, batch) {
+               mgmt_be_batch_delete(txn, &batch);
+       }
+
+       FOREACH_BE_APPLY_BATCH_IN_LIST (txn, batch) {
+               mgmt_be_batch_delete(txn, &batch);
+       }
+}
+
+static struct mgmt_be_txn_ctx *
+mgmt_be_find_txn_by_id(struct mgmt_be_client_ctx *client_ctx,
+                          uint64_t txn_id)
+{
+       struct mgmt_be_txn_ctx *txn = NULL;
+
+       FOREACH_BE_TXN_IN_LIST (client_ctx, txn) {
+               if (txn->txn_id == txn_id)
+                       return txn;
+       }
+
+       return NULL;
+}
+
+static struct mgmt_be_txn_ctx *
+mgmt_be_txn_create(struct mgmt_be_client_ctx *client_ctx,
+                      uint64_t txn_id)
+{
+       struct mgmt_be_txn_ctx *txn = NULL;
+
+       txn = mgmt_be_find_txn_by_id(client_ctx, txn_id);
+       if (!txn) {
+               txn = XCALLOC(MTYPE_MGMTD_BE_TXN,
+                              sizeof(struct mgmt_be_txn_ctx));
+               assert(txn);
+
+               txn->txn_id = txn_id;
+               txn->client_ctx = client_ctx;
+               mgmt_be_batches_init(&txn->cfg_batches);
+               mgmt_be_batches_init(&txn->apply_cfgs);
+               mgmt_be_txns_add_tail(&client_ctx->txn_head, txn);
+
+               MGMTD_BE_CLIENT_DBG("Added new transaction 0x%llx",
+                                   (unsigned long long)txn_id);
+       }
+
+       return txn;
+}
+
+static void mgmt_be_txn_delete(struct mgmt_be_client_ctx *client_ctx,
+                                  struct mgmt_be_txn_ctx **txn)
+{
+       char err_msg[] = "MGMT Transaction Delete";
+
+       if (!txn)
+               return;
+
+       /*
+        * Remove the transaction from the list of transactions
+        * so that future lookups with the same transaction id
+        * does not return this one.
+        */
+       mgmt_be_txns_del(&client_ctx->txn_head, *txn);
+
+       /*
+        * Time to delete the transaction which should also
+        * take care of cleaning up all batches created via
+        * CFGDATA_CREATE_REQs. But first notify the client
+        * about the transaction delete.
+        */
+       if (client_ctx->client_params.txn_notify)
+               (void)(*client_ctx->client_params
+                               .txn_notify)(
+                       (uintptr_t)client_ctx,
+                       client_ctx->client_params.user_data,
+                       &(*txn)->client_data, true);
+
+       mgmt_be_cleanup_all_batches(*txn);
+       if ((*txn)->nb_txn)
+               nb_candidate_commit_abort((*txn)->nb_txn, err_msg,
+                                       sizeof(err_msg));
+       XFREE(MTYPE_MGMTD_BE_TXN, *txn);
+
+       *txn = NULL;
+}
+
+static void
+mgmt_be_cleanup_all_txns(struct mgmt_be_client_ctx *client_ctx)
+{
+       struct mgmt_be_txn_ctx *txn = NULL;
+
+       FOREACH_BE_TXN_IN_LIST (client_ctx, txn) {
+               mgmt_be_txn_delete(client_ctx, &txn);
+       }
+}
+
+static int mgmt_be_send_txn_reply(struct mgmt_be_client_ctx *client_ctx,
+                                     uint64_t txn_id, bool create,
+                                     bool success)
+{
+       Mgmtd__BeMessage be_msg;
+       Mgmtd__BeTxnReply txn_reply;
+
+       mgmtd__be_txn_reply__init(&txn_reply);
+       txn_reply.create = create;
+       txn_reply.txn_id = txn_id;
+       txn_reply.success = success;
+
+       mgmtd__be_message__init(&be_msg);
+       be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_TXN_REPLY;
+       be_msg.txn_reply = &txn_reply;
+
+       MGMTD_BE_CLIENT_DBG(
+               "Sending TXN_REPLY message to MGMTD for txn 0x%llx",
+               (unsigned long long)txn_id);
+
+       return mgmt_be_client_send_msg(client_ctx, &be_msg);
+}
+
+static int mgmt_be_process_txn_req(struct mgmt_be_client_ctx *client_ctx,
+                                      uint64_t txn_id, bool create)
+{
+       struct mgmt_be_txn_ctx *txn;
+
+       txn = mgmt_be_find_txn_by_id(client_ctx, txn_id);
+       if (create) {
+               if (txn) {
+                       /*
+                        * Transaction with same txn-id already exists.
+                        * Should not happen under any circumstances.
+                        */
+                       MGMTD_BE_CLIENT_ERR(
+                               "Transaction 0x%llx already exists!!!",
+                               (unsigned long long)txn_id);
+                       mgmt_be_send_txn_reply(client_ctx, txn_id, create,
+                                                  false);
+               }
+
+               MGMTD_BE_CLIENT_DBG("Created new transaction 0x%llx",
+                                    (unsigned long long)txn_id);
+               txn = mgmt_be_txn_create(client_ctx, txn_id);
+
+               if (client_ctx->client_params.txn_notify)
+                       (void)(*client_ctx->client_params
+                                       .txn_notify)(
+                               (uintptr_t)client_ctx,
+                               client_ctx->client_params.user_data,
+                               &txn->client_data, false);
+       } else {
+               if (!txn) {
+                       /*
+                        * Transaction with same txn-id does not exists.
+                        * Return sucess anyways.
+                        */
+                       MGMTD_BE_CLIENT_DBG(
+                               "Transaction to delete 0x%llx does NOT exists!!!",
+                               (unsigned long long)txn_id);
+               } else {
+                       MGMTD_BE_CLIENT_DBG("Delete transaction 0x%llx",
+                                            (unsigned long long)txn_id);
+                       mgmt_be_txn_delete(client_ctx, &txn);
+               }
+       }
+
+       mgmt_be_send_txn_reply(client_ctx, txn_id, create, true);
+
+       return 0;
+}
+
+static int
+mgmt_be_send_cfgdata_create_reply(struct mgmt_be_client_ctx *client_ctx,
+                                    uint64_t txn_id, uint64_t batch_id,
+                                    bool success, const char *error_if_any)
+{
+       Mgmtd__BeMessage be_msg;
+       Mgmtd__BeCfgDataCreateReply cfgdata_reply;
+
+       mgmtd__be_cfg_data_create_reply__init(&cfgdata_reply);
+       cfgdata_reply.txn_id = (uint64_t)txn_id;
+       cfgdata_reply.batch_id = (uint64_t)batch_id;
+       cfgdata_reply.success = success;
+       if (error_if_any)
+               cfgdata_reply.error_if_any = (char *)error_if_any;
+
+       mgmtd__be_message__init(&be_msg);
+       be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REPLY;
+       be_msg.cfg_data_reply = &cfgdata_reply;
+
+       MGMTD_BE_CLIENT_DBG(
+               "Sending CFGDATA_CREATE_REPLY message to MGMTD for txn 0x%llx batch 0x%llx",
+               (unsigned long long)txn_id, (unsigned long long)batch_id);
+
+       return mgmt_be_client_send_msg(client_ctx, &be_msg);
+}
+
+static void mgmt_be_txn_cfg_abort(struct mgmt_be_txn_ctx *txn)
+{
+       char errmsg[BUFSIZ] = {0};
+
+       assert(txn && txn->client_ctx);
+       if (txn->nb_txn) {
+               MGMTD_BE_CLIENT_ERR(
+                       "Aborting configurations after prep for Txn 0x%llx",
+                       (unsigned long long)txn->txn_id);
+               nb_candidate_commit_abort(txn->nb_txn, errmsg, sizeof(errmsg));
+               txn->nb_txn = 0;
+       }
+
+       /*
+        * revert candidate back to running
+        *
+        * This is one txn ctx but the candidate_config is per client ctx, how
+        * does that work?
+        */
+       MGMTD_BE_CLIENT_DBG(
+               "Reset candidate configurations after abort of Txn 0x%llx",
+               (unsigned long long)txn->txn_id);
+       nb_config_replace(txn->client_ctx->candidate_config,
+                         txn->client_ctx->running_config, true);
+}
+
+static int mgmt_be_txn_cfg_prepare(struct mgmt_be_txn_ctx *txn)
+{
+       struct mgmt_be_client_ctx *client_ctx;
+       struct mgmt_be_txn_req *txn_req = NULL;
+       struct nb_context nb_ctx = {0};
+       struct timeval edit_nb_cfg_start;
+       struct timeval edit_nb_cfg_end;
+       unsigned long edit_nb_cfg_tm;
+       struct timeval prep_nb_cfg_start;
+       struct timeval prep_nb_cfg_end;
+       unsigned long prep_nb_cfg_tm;
+       struct mgmt_be_batch_ctx *batch;
+       bool error;
+       char err_buf[BUFSIZ];
+       size_t num_processed;
+       bool debug_be = mgmt_debug_be_client;
+       int err;
+
+       assert(txn && txn->client_ctx);
+       client_ctx = txn->client_ctx;
+
+       num_processed = 0;
+       FOREACH_BE_TXN_BATCH_IN_LIST (txn, batch) {
+               txn_req = &batch->txn_req;
+               error = false;
+               nb_ctx.client = NB_CLIENT_CLI;
+               nb_ctx.user = (void *)client_ctx->client_params.user_data;
+
+               if (!txn->nb_txn) {
+                       /*
+                        * This happens when the current backend client is only
+                        * interested in consuming the config items but is not
+                        * interested in validating it.
+                        */
+                       error = false;
+                       if (debug_be)
+                               gettimeofday(&edit_nb_cfg_start, NULL);
+                       nb_candidate_edit_config_changes(
+                               client_ctx->candidate_config,
+                               txn_req->req.set_cfg.cfg_changes,
+                               (size_t)txn_req->req.set_cfg.num_cfg_changes,
+                               NULL, NULL, 0, err_buf, sizeof(err_buf),
+                               &error);
+                       if (error) {
+                               err_buf[sizeof(err_buf) - 1] = 0;
+                               MGMTD_BE_CLIENT_ERR(
+                                       "Failed to update configs for Txn %llx Batch %llx to Candidate! Err: '%s'",
+                                       (unsigned long long)txn->txn_id,
+                                       (unsigned long long)batch->batch_id,
+                                       err_buf);
+                               return -1;
+                       }
+                       if (debug_be) {
+                               gettimeofday(&edit_nb_cfg_end, NULL);
+                               edit_nb_cfg_tm = timeval_elapsed(
+                                       edit_nb_cfg_end, edit_nb_cfg_start);
+                               client_ctx->avg_edit_nb_cfg_tm =
+                                       ((client_ctx->avg_edit_nb_cfg_tm
+                                         * client_ctx->num_edit_nb_cfg)
+                                        + edit_nb_cfg_tm)
+                                       / (client_ctx->num_edit_nb_cfg + 1);
+                       }
+                       client_ctx->num_edit_nb_cfg++;
+               }
+
+               num_processed++;
+       }
+
+       if (!num_processed)
+               return 0;
+
+       /*
+        * Now prepare all the batches we have applied in one go.
+        */
+       nb_ctx.client = NB_CLIENT_CLI;
+       nb_ctx.user = (void *)client_ctx->client_params.user_data;
+       if (debug_be)
+               gettimeofday(&prep_nb_cfg_start, NULL);
+       err = nb_candidate_commit_prepare(nb_ctx, client_ctx->candidate_config,
+                                         "MGMTD Backend Txn", &txn->nb_txn,
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+                                         true, true,
+#else
+                                         false, true,
+#endif
+                                         err_buf, sizeof(err_buf) - 1);
+       if (err != NB_OK) {
+               err_buf[sizeof(err_buf) - 1] = 0;
+               if (err == NB_ERR_VALIDATION)
+                       MGMTD_BE_CLIENT_ERR(
+                               "Failed to validate configs for Txn %llx %u Batches! Err: '%s'",
+                               (unsigned long long)txn->txn_id,
+                               (uint32_t)num_processed, err_buf);
+               else
+                       MGMTD_BE_CLIENT_ERR(
+                               "Failed to prepare configs for Txn %llx, %u Batches! Err: '%s'",
+                               (unsigned long long)txn->txn_id,
+                               (uint32_t)num_processed, err_buf);
+               error = true;
+               SET_FLAG(txn->flags, MGMTD_BE_TXN_FLAGS_CFGPREP_FAILED);
+       } else
+               MGMTD_BE_CLIENT_DBG(
+                       "Prepared configs for Txn %llx, %u Batches! successfully!",
+                       (unsigned long long)txn->txn_id,
+                       (uint32_t)num_processed);
+       if (debug_be) {
+               gettimeofday(&prep_nb_cfg_end, NULL);
+               prep_nb_cfg_tm =
+                       timeval_elapsed(prep_nb_cfg_end, prep_nb_cfg_start);
+               client_ctx->avg_prep_nb_cfg_tm =
+                       ((client_ctx->avg_prep_nb_cfg_tm
+                         * client_ctx->num_prep_nb_cfg)
+                        + prep_nb_cfg_tm)
+                       / (client_ctx->num_prep_nb_cfg + 1);
+       }
+       client_ctx->num_prep_nb_cfg++;
+
+       FOREACH_BE_TXN_BATCH_IN_LIST (txn, batch) {
+               mgmt_be_send_cfgdata_create_reply(
+                       client_ctx, txn->txn_id, batch->batch_id,
+                       error ? false : true, error ? err_buf : NULL);
+               if (!error) {
+                       SET_FLAG(batch->flags,
+                                MGMTD_BE_BATCH_FLAGS_CFG_PREPARED);
+                       mgmt_be_batches_del(&txn->cfg_batches, batch);
+                       mgmt_be_batches_add_tail(&txn->apply_cfgs, batch);
+               }
+       }
+
+       if (debug_be)
+               MGMTD_BE_CLIENT_DBG(
+                       "Avg-nb-edit-duration %lu uSec, nb-prep-duration %lu (avg: %lu) uSec, batch size %u",
+                       client_ctx->avg_edit_nb_cfg_tm, prep_nb_cfg_tm,
+                       client_ctx->avg_prep_nb_cfg_tm, (uint32_t)num_processed);
+
+       if (error)
+               mgmt_be_txn_cfg_abort(txn);
+
+       return 0;
+}
+
+/*
+ * Process all CFG_DATA_REQs received so far and prepare them all in one go.
+ */
+static int
+mgmt_be_update_setcfg_in_batch(struct mgmt_be_client_ctx *client_ctx,
+                                 struct mgmt_be_txn_ctx *txn,
+                                 uint64_t batch_id,
+                                 Mgmtd__YangCfgDataReq * cfg_req[],
+                                 int num_req)
+{
+       struct mgmt_be_batch_ctx *batch = NULL;
+       struct mgmt_be_txn_req *txn_req = NULL;
+       int index;
+       struct nb_cfg_change *cfg_chg;
+
+       batch = mgmt_be_batch_create(txn, batch_id);
+       if (!batch) {
+               MGMTD_BE_CLIENT_ERR("Batch create failed!");
+               return -1;
+       }
+
+       txn_req = &batch->txn_req;
+       txn_req->event = MGMTD_BE_TXN_PROC_SETCFG;
+       MGMTD_BE_CLIENT_DBG(
+               "Created Set-Config request for batch 0x%llx, txn id 0x%llx, cfg-items:%d",
+               (unsigned long long)batch_id, (unsigned long long)txn->txn_id,
+               num_req);
+
+       txn_req->req.set_cfg.num_cfg_changes = num_req;
+       for (index = 0; index < num_req; index++) {
+               cfg_chg = &txn_req->req.set_cfg.cfg_changes[index];
+
+               if (cfg_req[index]->req_type
+                   == MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA)
+                       cfg_chg->operation = NB_OP_DESTROY;
+               else
+                       cfg_chg->operation = NB_OP_CREATE;
+
+               strlcpy(cfg_chg->xpath, cfg_req[index]->data->xpath,
+                       sizeof(cfg_chg->xpath));
+               cfg_chg->value = (cfg_req[index]->data->value
+                                                 && cfg_req[index]
+                                                            ->data->value
+                                                            ->encoded_str_val
+                                         ? strdup(cfg_req[index]
+                                                          ->data->value
+                                                          ->encoded_str_val)
+                                         : NULL);
+               if (cfg_chg->value
+                   && !strncmp(cfg_chg->value, MGMTD_BE_CONTAINER_NODE_VAL,
+                               strlen(MGMTD_BE_CONTAINER_NODE_VAL))) {
+                       free((char *)cfg_chg->value);
+                       cfg_chg->value = NULL;
+               }
+       }
+
+       return 0;
+}
+
+static int
+mgmt_be_process_cfgdata_req(struct mgmt_be_client_ctx *client_ctx,
+                              uint64_t txn_id, uint64_t batch_id,
+                              Mgmtd__YangCfgDataReq * cfg_req[], int num_req,
+                              bool end_of_data)
+{
+       struct mgmt_be_txn_ctx *txn;
+
+       txn = mgmt_be_find_txn_by_id(client_ctx, txn_id);
+       if (!txn) {
+               MGMTD_BE_CLIENT_ERR(
+                       "Invalid txn-id 0x%llx provided from MGMTD server",
+                       (unsigned long long)txn_id);
+               mgmt_be_send_cfgdata_create_reply(
+                       client_ctx, txn_id, batch_id, false,
+                       "Transaction context not created yet");
+       } else {
+               mgmt_be_update_setcfg_in_batch(client_ctx, txn, batch_id,
+                                                 cfg_req, num_req);
+       }
+
+       if (txn && end_of_data) {
+               MGMTD_BE_CLIENT_DBG("Triggering CFG_PREPARE_REQ processing");
+               mgmt_be_txn_cfg_prepare(txn);
+       }
+
+       return 0;
+}
+
+static int mgmt_be_send_apply_reply(struct mgmt_be_client_ctx *client_ctx,
+                                      uint64_t txn_id, uint64_t batch_ids[],
+                                      size_t num_batch_ids, bool success,
+                                      const char *error_if_any)
+{
+       Mgmtd__BeMessage be_msg;
+       Mgmtd__BeCfgDataApplyReply apply_reply;
+
+       mgmtd__be_cfg_data_apply_reply__init(&apply_reply);
+       apply_reply.success = success;
+       apply_reply.txn_id = txn_id;
+       apply_reply.batch_ids = (uint64_t *)batch_ids;
+       apply_reply.n_batch_ids = num_batch_ids;
+
+       if (error_if_any)
+               apply_reply.error_if_any = (char *)error_if_any;
+
+       mgmtd__be_message__init(&be_msg);
+       be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REPLY;
+       be_msg.cfg_apply_reply = &apply_reply;
+
+       MGMTD_BE_CLIENT_DBG(
+               "Sending CFG_APPLY_REPLY message to MGMTD for txn 0x%llx, %d batches [0x%llx - 0x%llx]",
+               (unsigned long long)txn_id, (int)num_batch_ids,
+               success && num_batch_ids ?
+                       (unsigned long long)batch_ids[0] : 0,
+               success && num_batch_ids ?
+               (unsigned long long)batch_ids[num_batch_ids - 1] : 0);
+
+       return mgmt_be_client_send_msg(client_ctx, &be_msg);
+}
+
+static int mgmt_be_txn_proc_cfgapply(struct mgmt_be_txn_ctx *txn)
+{
+       struct mgmt_be_client_ctx *client_ctx;
+       struct timeval apply_nb_cfg_start;
+       struct timeval apply_nb_cfg_end;
+       unsigned long apply_nb_cfg_tm;
+       struct mgmt_be_batch_ctx *batch;
+       char err_buf[BUFSIZ];
+       size_t num_processed;
+       static uint64_t batch_ids[MGMTD_BE_MAX_BATCH_IDS_IN_REQ];
+       bool debug_be = mgmt_debug_be_client;
+
+       assert(txn && txn->client_ctx);
+       client_ctx = txn->client_ctx;
+
+       assert(txn->nb_txn);
+       num_processed = 0;
+
+       /*
+        * Now apply all the batches we have applied in one go.
+        */
+       if (debug_be)
+               gettimeofday(&apply_nb_cfg_start, NULL);
+       (void)nb_candidate_commit_apply(txn->nb_txn, true, &txn->nb_txn_id,
+                                       err_buf, sizeof(err_buf) - 1);
+       if (debug_be) {
+               gettimeofday(&apply_nb_cfg_end, NULL);
+               apply_nb_cfg_tm =
+                       timeval_elapsed(apply_nb_cfg_end, apply_nb_cfg_start);
+               client_ctx->avg_apply_nb_cfg_tm =
+                       ((client_ctx->avg_apply_nb_cfg_tm
+                         * client_ctx->num_apply_nb_cfg)
+                        + apply_nb_cfg_tm)
+                       / (client_ctx->num_apply_nb_cfg + 1);
+       }
+       client_ctx->num_apply_nb_cfg++;
+       txn->nb_txn = NULL;
+
+       /*
+        * Send back CFG_APPLY_REPLY for all batches applied.
+        */
+       FOREACH_BE_APPLY_BATCH_IN_LIST (txn, batch) {
+               /*
+                * No need to delete the batch yet. Will be deleted during
+                * transaction cleanup on receiving TXN_DELETE_REQ.
+                */
+               SET_FLAG(batch->flags, MGMTD_BE_TXN_FLAGS_CFG_APPLIED);
+               mgmt_be_batches_del(&txn->apply_cfgs, batch);
+               mgmt_be_batches_add_tail(&txn->cfg_batches, batch);
+
+               batch_ids[num_processed] = batch->batch_id;
+               num_processed++;
+               if (num_processed == MGMTD_BE_MAX_BATCH_IDS_IN_REQ) {
+                       mgmt_be_send_apply_reply(client_ctx, txn->txn_id,
+                                                   batch_ids, num_processed,
+                                                   true, NULL);
+                       num_processed = 0;
+               }
+       }
+
+       mgmt_be_send_apply_reply(client_ctx, txn->txn_id, batch_ids,
+                                   num_processed, true, NULL);
+
+       if (debug_be)
+               MGMTD_BE_CLIENT_DBG("Nb-apply-duration %lu (avg: %lu) uSec",
+                                    apply_nb_cfg_tm,
+                                    client_ctx->avg_apply_nb_cfg_tm);
+
+       return 0;
+}
+
+static int
+mgmt_be_process_cfg_apply(struct mgmt_be_client_ctx *client_ctx,
+                            uint64_t txn_id)
+{
+       struct mgmt_be_txn_ctx *txn;
+
+       txn = mgmt_be_find_txn_by_id(client_ctx, txn_id);
+       if (!txn) {
+               mgmt_be_send_apply_reply(client_ctx, txn_id, NULL, 0, false,
+                                           "Transaction not created yet!");
+               return -1;
+       }
+
+       MGMTD_BE_CLIENT_DBG("Trigger CFG_APPLY_REQ processing");
+       mgmt_be_txn_proc_cfgapply(txn);
+
+       return 0;
+}
+
+static int
+mgmt_be_client_handle_msg(struct mgmt_be_client_ctx *client_ctx,
+                            Mgmtd__BeMessage *be_msg)
+{
+       /*
+        * protobuf-c adds a max size enum with an internal, and changing by
+        * version, name; cast to an int to avoid unhandled enum warnings
+        */
+       switch ((int)be_msg->message_case) {
+       case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REPLY:
+               MGMTD_BE_CLIENT_DBG("Subscribe Reply Msg from mgmt, status %u",
+                                    be_msg->subscr_reply->success);
+               break;
+       case MGMTD__BE_MESSAGE__MESSAGE_TXN_REQ:
+               mgmt_be_process_txn_req(client_ctx,
+                                           be_msg->txn_req->txn_id,
+                                           be_msg->txn_req->create);
+               break;
+       case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REQ:
+               mgmt_be_process_cfgdata_req(
+                       client_ctx, be_msg->cfg_data_req->txn_id,
+                       be_msg->cfg_data_req->batch_id,
+                       be_msg->cfg_data_req->data_req,
+                       be_msg->cfg_data_req->n_data_req,
+                       be_msg->cfg_data_req->end_of_data);
+               break;
+       case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REQ:
+               mgmt_be_process_cfg_apply(
+                       client_ctx, (uint64_t)be_msg->cfg_apply_req->txn_id);
+               break;
+       case MGMTD__BE_MESSAGE__MESSAGE_GET_REQ:
+       case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REQ:
+       case MGMTD__BE_MESSAGE__MESSAGE_CFG_CMD_REQ:
+       case MGMTD__BE_MESSAGE__MESSAGE_SHOW_CMD_REQ:
+               /*
+                * TODO: Add handling code in future.
+                */
+               break;
+       /*
+        * NOTE: The following messages are always sent from Backend
+        * clients to MGMTd only and/or need not be handled here.
+        */
+       case MGMTD__BE_MESSAGE__MESSAGE_GET_REPLY:
+       case MGMTD__BE_MESSAGE__MESSAGE_TXN_REPLY:
+       case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REPLY:
+       case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REPLY:
+       case MGMTD__BE_MESSAGE__MESSAGE_CFG_CMD_REPLY:
+       case MGMTD__BE_MESSAGE__MESSAGE_SHOW_CMD_REPLY:
+       case MGMTD__BE_MESSAGE__MESSAGE_NOTIFY_DATA:
+       case MGMTD__BE_MESSAGE__MESSAGE__NOT_SET:
+       default:
+               /*
+                * A 'default' case is being added contrary to the
+                * FRR code guidelines to take care of build
+                * failures on certain build systems (courtesy of
+                * the proto-c package).
+                */
+               break;
+       }
+
+       return 0;
+}
+
+static void mgmt_be_client_process_msg(void *user_ctx, uint8_t *data,
+                                      size_t len)
+{
+       struct mgmt_be_client_ctx *client_ctx = user_ctx;
+       Mgmtd__BeMessage *be_msg;
+
+       be_msg = mgmtd__be_message__unpack(NULL, len, data);
+       if (!be_msg) {
+               MGMTD_BE_CLIENT_DBG("Failed to decode %zu bytes from server",
+                                   len);
+               return;
+       }
+       MGMTD_BE_CLIENT_DBG(
+               "Decoded %zu bytes of message(msg: %u/%u) from server", len,
+               be_msg->message_case, be_msg->message_case);
+       (void)mgmt_be_client_handle_msg(client_ctx, be_msg);
+       mgmtd__be_message__free_unpacked(be_msg, NULL);
+}
+
+static void mgmt_be_client_proc_msgbufs(struct thread *thread)
+{
+       struct mgmt_be_client_ctx *client_ctx = THREAD_ARG(thread);
+
+       if (mgmt_msg_procbufs(&client_ctx->mstate, mgmt_be_client_process_msg,
+                             client_ctx, mgmt_debug_be_client))
+               mgmt_be_client_register_event(client_ctx, MGMTD_BE_PROC_MSG);
+}
+
+static void mgmt_be_client_read(struct thread *thread)
+{
+       struct mgmt_be_client_ctx *client_ctx = THREAD_ARG(thread);
+       enum mgmt_msg_rsched rv;
+
+       rv = mgmt_msg_read(&client_ctx->mstate, client_ctx->conn_fd,
+                          mgmt_debug_be_client);
+       if (rv == MSR_DISCONNECT) {
+               mgmt_be_server_disconnect(client_ctx, true);
+               return;
+       }
+       if (rv == MSR_SCHED_BOTH)
+               mgmt_be_client_register_event(client_ctx, MGMTD_BE_PROC_MSG);
+       mgmt_be_client_register_event(client_ctx, MGMTD_BE_CONN_READ);
+}
+
+static inline void
+mgmt_be_client_sched_msg_write(struct mgmt_be_client_ctx *client_ctx)
+{
+       if (!CHECK_FLAG(client_ctx->flags, MGMTD_BE_CLIENT_FLAGS_WRITES_OFF))
+               mgmt_be_client_register_event(client_ctx,
+                                                MGMTD_BE_CONN_WRITE);
+}
+
+static inline void
+mgmt_be_client_writes_on(struct mgmt_be_client_ctx *client_ctx)
+{
+       MGMTD_BE_CLIENT_DBG("Resume writing msgs");
+       UNSET_FLAG(client_ctx->flags, MGMTD_BE_CLIENT_FLAGS_WRITES_OFF);
+       mgmt_be_client_sched_msg_write(client_ctx);
+}
+
+static inline void
+mgmt_be_client_writes_off(struct mgmt_be_client_ctx *client_ctx)
+{
+       SET_FLAG(client_ctx->flags, MGMTD_BE_CLIENT_FLAGS_WRITES_OFF);
+       MGMTD_BE_CLIENT_DBG("Paused writing msgs");
+}
+
+static int mgmt_be_client_send_msg(struct mgmt_be_client_ctx *client_ctx,
+                                  Mgmtd__BeMessage *be_msg)
+{
+       if (client_ctx->conn_fd == -1) {
+               MGMTD_BE_CLIENT_DBG("can't send message on closed connection");
+               return -1;
+       }
+
+       int rv = mgmt_msg_send_msg(
+               &client_ctx->mstate, be_msg,
+               mgmtd__be_message__get_packed_size(be_msg),
+               (size_t(*)(void *, void *))mgmtd__be_message__pack,
+               mgmt_debug_be_client);
+       mgmt_be_client_sched_msg_write(client_ctx);
+       return rv;
+}
+
+static void mgmt_be_client_write(struct thread *thread)
+{
+       struct mgmt_be_client_ctx *client_ctx = THREAD_ARG(thread);
+       enum mgmt_msg_wsched rv;
+
+       rv = mgmt_msg_write(&client_ctx->mstate, client_ctx->conn_fd,
+                           mgmt_debug_be_client);
+       if (rv == MSW_SCHED_STREAM)
+               mgmt_be_client_register_event(client_ctx, MGMTD_BE_CONN_WRITE);
+       else if (rv == MSW_DISCONNECT)
+               mgmt_be_server_disconnect(client_ctx, true);
+       else if (rv == MSW_SCHED_WRITES_OFF) {
+               mgmt_be_client_writes_off(client_ctx);
+               mgmt_be_client_register_event(client_ctx,
+                                             MGMTD_BE_CONN_WRITES_ON);
+       } else
+               assert(rv == MSW_SCHED_NONE);
+}
+
+static void mgmt_be_client_resume_writes(struct thread *thread)
+{
+       struct mgmt_be_client_ctx *client_ctx;
+
+       client_ctx = (struct mgmt_be_client_ctx *)THREAD_ARG(thread);
+       assert(client_ctx && client_ctx->conn_fd != -1);
+
+       mgmt_be_client_writes_on(client_ctx);
+}
+
+static int mgmt_be_send_subscr_req(struct mgmt_be_client_ctx *client_ctx,
+                                  bool subscr_xpaths, uint16_t num_reg_xpaths,
+                                  char **reg_xpaths)
+{
+       Mgmtd__BeMessage be_msg;
+       Mgmtd__BeSubscribeReq subscr_req;
+
+       mgmtd__be_subscribe_req__init(&subscr_req);
+       subscr_req.client_name = client_ctx->client_params.name;
+       subscr_req.n_xpath_reg = num_reg_xpaths;
+       if (num_reg_xpaths)
+               subscr_req.xpath_reg = reg_xpaths;
+       else
+               subscr_req.xpath_reg = NULL;
+       subscr_req.subscribe_xpaths = subscr_xpaths;
+
+       mgmtd__be_message__init(&be_msg);
+       be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REQ;
+       be_msg.subscr_req = &subscr_req;
+
+       return mgmt_be_client_send_msg(client_ctx, &be_msg);
+}
+
+static void mgmt_be_server_connect(struct mgmt_be_client_ctx *client_ctx)
+{
+       const char *dbgtag = mgmt_debug_be_client ? "BE-client" : NULL;
+
+       assert(client_ctx->conn_fd == -1);
+       client_ctx->conn_fd = mgmt_msg_connect(
+               MGMTD_BE_SERVER_PATH, MGMTD_SOCKET_BE_SEND_BUF_SIZE,
+               MGMTD_SOCKET_BE_RECV_BUF_SIZE, dbgtag);
+
+       /* Send SUBSCRIBE_REQ message */
+       if (client_ctx->conn_fd == -1 ||
+           mgmt_be_send_subscr_req(client_ctx, false, 0, NULL) != 0) {
+               mgmt_be_server_disconnect(client_ctx, true);
+               return;
+       }
+
+       /* Start reading from the socket */
+       mgmt_be_client_register_event(client_ctx, MGMTD_BE_CONN_READ);
+
+       /* Notify client through registered callback (if any) */
+       if (client_ctx->client_params.client_connect_notify)
+               (void)(*client_ctx->client_params.client_connect_notify)(
+                       (uintptr_t)client_ctx,
+                       client_ctx->client_params.user_data, true);
+}
+
+static void mgmt_be_client_conn_timeout(struct thread *thread)
+{
+       mgmt_be_server_connect(THREAD_ARG(thread));
+}
+
+static void
+mgmt_be_client_register_event(struct mgmt_be_client_ctx *client_ctx,
+                                enum mgmt_be_event event)
+{
+       struct timeval tv = {0};
+
+       switch (event) {
+       case MGMTD_BE_CONN_READ:
+               thread_add_read(client_ctx->tm, mgmt_be_client_read,
+                               client_ctx, client_ctx->conn_fd,
+                               &client_ctx->conn_read_ev);
+               assert(client_ctx->conn_read_ev);
+               break;
+       case MGMTD_BE_CONN_WRITE:
+               thread_add_write(client_ctx->tm, mgmt_be_client_write,
+                                client_ctx, client_ctx->conn_fd,
+                                &client_ctx->conn_write_ev);
+               assert(client_ctx->conn_write_ev);
+               break;
+       case MGMTD_BE_PROC_MSG:
+               tv.tv_usec = MGMTD_BE_MSG_PROC_DELAY_USEC;
+               thread_add_timer_tv(client_ctx->tm, mgmt_be_client_proc_msgbufs,
+                                   client_ctx, &tv, &client_ctx->msg_proc_ev);
+               assert(client_ctx->msg_proc_ev);
+               break;
+       case MGMTD_BE_CONN_WRITES_ON:
+               thread_add_timer_msec(client_ctx->tm,
+                                     mgmt_be_client_resume_writes, client_ctx,
+                                     MGMTD_BE_MSG_WRITE_DELAY_MSEC,
+                                     &client_ctx->conn_writes_on);
+               assert(client_ctx->conn_writes_on);
+               break;
+       case MGMTD_BE_SERVER:
+       case MGMTD_BE_CONN_INIT:
+       case MGMTD_BE_SCHED_CFG_PREPARE:
+       case MGMTD_BE_RESCHED_CFG_PREPARE:
+       case MGMTD_BE_SCHED_CFG_APPLY:
+       case MGMTD_BE_RESCHED_CFG_APPLY:
+               assert(!"mgmt_be_client_post_event() called incorrectly");
+               break;
+       }
+}
+
+static void
+mgmt_be_client_schedule_conn_retry(struct mgmt_be_client_ctx *client_ctx,
+                                     unsigned long intvl_secs)
+{
+       MGMTD_BE_CLIENT_DBG(
+               "Scheduling MGMTD Backend server connection retry after %lu seconds",
+               intvl_secs);
+       thread_add_timer(client_ctx->tm, mgmt_be_client_conn_timeout,
+                        (void *)client_ctx, intvl_secs,
+                        &client_ctx->conn_retry_tmr);
+}
+
+extern struct nb_config *running_config;
+
+/*
+ * Initialize library and try connecting with MGMTD.
+ */
+uintptr_t mgmt_be_client_lib_init(struct mgmt_be_client_params *params,
+                                   struct thread_master *master_thread)
+{
+       assert(master_thread && params && strlen(params->name)
+              && !mgmt_be_client_ctx.tm);
+
+       mgmt_be_client_ctx.tm = master_thread;
+
+       if (!running_config)
+               assert(!"MGMTD Be Client lib_init() after frr_init() only!");
+       mgmt_be_client_ctx.running_config = running_config;
+       mgmt_be_client_ctx.candidate_config = nb_config_new(NULL);
+
+       memcpy(&mgmt_be_client_ctx.client_params, params,
+              sizeof(mgmt_be_client_ctx.client_params));
+       if (!mgmt_be_client_ctx.client_params.conn_retry_intvl_sec)
+               mgmt_be_client_ctx.client_params.conn_retry_intvl_sec =
+                       MGMTD_BE_DEFAULT_CONN_RETRY_INTVL_SEC;
+
+       mgmt_be_txns_init(&mgmt_be_client_ctx.txn_head);
+       mgmt_msg_init(&mgmt_be_client_ctx.mstate, MGMTD_BE_MAX_NUM_MSG_PROC,
+                     MGMTD_BE_MAX_NUM_MSG_WRITE, MGMTD_BE_MSG_MAX_LEN,
+                     "BE-client");
+
+       /* Start trying to connect to MGMTD backend server immediately */
+       mgmt_be_client_schedule_conn_retry(&mgmt_be_client_ctx, 1);
+
+       MGMTD_BE_CLIENT_DBG("Initialized client '%s'", params->name);
+
+       return (uintptr_t)&mgmt_be_client_ctx;
+}
+
+/*
+ * Subscribe with MGMTD for one or more YANG subtree(s).
+ */
+enum mgmt_result mgmt_be_subscribe_yang_data(uintptr_t lib_hndl,
+                                               char *reg_yang_xpaths[],
+                                               int num_reg_xpaths)
+{
+       struct mgmt_be_client_ctx *client_ctx;
+
+       client_ctx = (struct mgmt_be_client_ctx *)lib_hndl;
+       if (!client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       if (mgmt_be_send_subscr_req(client_ctx, true, num_reg_xpaths,
+                                      reg_yang_xpaths)
+           != 0)
+               return MGMTD_INTERNAL_ERROR;
+
+       return MGMTD_SUCCESS;
+}
+
+/*
+ * Unsubscribe with MGMTD for one or more YANG subtree(s).
+ */
+enum mgmt_result mgmt_be_unsubscribe_yang_data(uintptr_t lib_hndl,
+                                                 char *reg_yang_xpaths[],
+                                                 int num_reg_xpaths)
+{
+       struct mgmt_be_client_ctx *client_ctx;
+
+       client_ctx = (struct mgmt_be_client_ctx *)lib_hndl;
+       if (!client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+
+       if (mgmt_be_send_subscr_req(client_ctx, false, num_reg_xpaths,
+                                      reg_yang_xpaths)
+           < 0)
+               return MGMTD_INTERNAL_ERROR;
+
+       return MGMTD_SUCCESS;
+}
+
+/*
+ * Send one or more YANG notifications to MGMTD daemon.
+ */
+enum mgmt_result mgmt_be_send_yang_notify(uintptr_t lib_hndl,
+                                            Mgmtd__YangData * data_elems[],
+                                            int num_elems)
+{
+       struct mgmt_be_client_ctx *client_ctx;
+
+       client_ctx = (struct mgmt_be_client_ctx *)lib_hndl;
+       if (!client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       return MGMTD_SUCCESS;
+}
+
+/*
+ * Destroy library and cleanup everything.
+ */
+void mgmt_be_client_lib_destroy(uintptr_t lib_hndl)
+{
+       struct mgmt_be_client_ctx *client_ctx;
+
+       client_ctx = (struct mgmt_be_client_ctx *)lib_hndl;
+       assert(client_ctx);
+
+       MGMTD_BE_CLIENT_DBG("Destroying MGMTD Backend Client '%s'",
+                           client_ctx->client_params.name);
+
+       mgmt_be_server_disconnect(client_ctx, false);
+
+       mgmt_msg_destroy(&client_ctx->mstate);
+
+       THREAD_OFF(client_ctx->conn_retry_tmr);
+       THREAD_OFF(client_ctx->conn_read_ev);
+       THREAD_OFF(client_ctx->conn_write_ev);
+       THREAD_OFF(client_ctx->conn_writes_on);
+       THREAD_OFF(client_ctx->msg_proc_ev);
+       mgmt_be_cleanup_all_txns(client_ctx);
+       mgmt_be_txns_fini(&client_ctx->txn_head);
+}
diff --git a/lib/mgmt_be_client.h b/lib/mgmt_be_client.h
new file mode 100644 (file)
index 0000000..66bc62f
--- /dev/null
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Backend Client Library api interfaces
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_BE_CLIENT_H_
+#define _FRR_MGMTD_BE_CLIENT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "northbound.h"
+#include "mgmt_pb.h"
+#include "mgmtd/mgmt_defines.h"
+
+/***************************************************************
+ * Client IDs
+ ***************************************************************/
+
+/*
+ * Add enum value for each supported component, wrap with
+ * #ifdef HAVE_COMPONENT
+ */
+enum mgmt_be_client_id {
+       MGMTD_BE_CLIENT_ID_MIN = 0,
+       MGMTD_BE_CLIENT_ID_INIT = -1,
+#ifdef HAVE_STATICD
+       MGMTD_BE_CLIENT_ID_STATICD,
+#endif
+       MGMTD_BE_CLIENT_ID_MAX
+};
+
+#define FOREACH_MGMTD_BE_CLIENT_ID(id)                 \
+       for ((id) = MGMTD_BE_CLIENT_ID_MIN;             \
+            (id) < MGMTD_BE_CLIENT_ID_MAX; (id)++)
+
+/***************************************************************
+ * Constants
+ ***************************************************************/
+
+#define MGMTD_BE_CLIENT_ERROR_STRING_MAX_LEN 32
+
+#define MGMTD_BE_DEFAULT_CONN_RETRY_INTVL_SEC 5
+
+#define MGMTD_BE_MSG_PROC_DELAY_USEC 10
+#define MGMTD_BE_MAX_NUM_MSG_PROC 500
+
+#define MGMTD_BE_MSG_WRITE_DELAY_MSEC 1
+#define MGMTD_BE_MAX_NUM_MSG_WRITE 1000
+
+#define GMGD_BE_MAX_NUM_REQ_ITEMS 64
+
+#define MGMTD_BE_MSG_MAX_LEN 16384
+
+#define MGMTD_SOCKET_BE_SEND_BUF_SIZE 65535
+#define MGMTD_SOCKET_BE_RECV_BUF_SIZE MGMTD_SOCKET_BE_SEND_BUF_SIZE
+
+#define MGMTD_MAX_CFG_CHANGES_IN_BATCH                         \
+       ((10 * MGMTD_BE_MSG_MAX_LEN) /                          \
+        (MGMTD_MAX_XPATH_LEN + MGMTD_MAX_YANG_VALUE_LEN))
+
+/*
+ * MGMTD_BE_MSG_MAX_LEN must be used 80%
+ * since there is overhead of google protobuf
+ * that gets added to sent message
+ */
+#define MGMTD_BE_CFGDATA_PACKING_EFFICIENCY 0.8
+#define MGMTD_BE_CFGDATA_MAX_MSG_LEN                                        \
+       (MGMTD_BE_MSG_MAX_LEN * MGMTD_BE_CFGDATA_PACKING_EFFICIENCY)
+
+#define MGMTD_BE_MAX_BATCH_IDS_IN_REQ                                       \
+       (MGMTD_BE_MSG_MAX_LEN - 128) / sizeof(uint64_t)
+
+#define MGMTD_BE_CONTAINER_NODE_VAL "<<container>>"
+
+/***************************************************************
+ * Data-structures
+ ***************************************************************/
+
+#define MGMTD_BE_MAX_CLIENTS_PER_XPATH_REG 32
+
+struct mgmt_be_client_txn_ctx {
+       uintptr_t *user_ctx;
+};
+
+/*
+ * All the client-specific information this library needs to
+ * initialize itself, setup connection with MGMTD BackEnd interface
+ * and carry on all required procedures appropriately.
+ *
+ * BackEnd clients need to initialise a instance of this structure
+ * with appropriate data and pass it while calling the API
+ * to initialize the library (See mgmt_be_client_lib_init for
+ * more details).
+ */
+struct mgmt_be_client_params {
+       char name[MGMTD_CLIENT_NAME_MAX_LEN];
+       uintptr_t user_data;
+       unsigned long conn_retry_intvl_sec;
+
+       void (*client_connect_notify)(uintptr_t lib_hndl,
+                                     uintptr_t usr_data,
+                                     bool connected);
+
+       void (*client_subscribe_notify)(
+               uintptr_t lib_hndl, uintptr_t usr_data,
+               struct nb_yang_xpath **xpath,
+               enum mgmt_result subscribe_result[], int num_paths);
+
+       void (*txn_notify)(
+               uintptr_t lib_hndl, uintptr_t usr_data,
+               struct mgmt_be_client_txn_ctx *txn_ctx, bool destroyed);
+
+       enum mgmt_result (*data_validate)(
+               uintptr_t lib_hndl, uintptr_t usr_data,
+               struct mgmt_be_client_txn_ctx *txn_ctx,
+               struct nb_yang_xpath *xpath, struct nb_yang_value *data,
+               bool delete, char *error_if_any);
+
+       enum mgmt_result (*data_apply)(
+               uintptr_t lib_hndl, uintptr_t usr_data,
+               struct mgmt_be_client_txn_ctx *txn_ctx,
+               struct nb_yang_xpath *xpath, struct nb_yang_value *data,
+               bool delete);
+
+       enum mgmt_result (*get_data_elem)(
+               uintptr_t lib_hndl, uintptr_t usr_data,
+               struct mgmt_be_client_txn_ctx *txn_ctx,
+               struct nb_yang_xpath *xpath, struct nb_yang_xpath_elem *elem);
+
+       enum mgmt_result (*get_data)(
+               uintptr_t lib_hndl, uintptr_t usr_data,
+               struct mgmt_be_client_txn_ctx *txn_ctx,
+               struct nb_yang_xpath *xpath, bool keys_only,
+               struct nb_yang_xpath_elem **elems, int *num_elems,
+               int *next_key);
+
+       enum mgmt_result (*get_next_data)(
+               uintptr_t lib_hndl, uintptr_t usr_data,
+               struct mgmt_be_client_txn_ctx *txn_ctx,
+               struct nb_yang_xpath *xpath, bool keys_only,
+               struct nb_yang_xpath_elem **elems, int *num_elems);
+};
+
+/***************************************************************
+ * Global data exported
+ ***************************************************************/
+
+extern const char *mgmt_be_client_names[MGMTD_BE_CLIENT_ID_MAX + 1];
+
+static inline const char *mgmt_be_client_id2name(enum mgmt_be_client_id id)
+{
+       if (id > MGMTD_BE_CLIENT_ID_MAX)
+               id = MGMTD_BE_CLIENT_ID_MAX;
+       return mgmt_be_client_names[id];
+}
+
+static inline enum mgmt_be_client_id
+mgmt_be_client_name2id(const char *name)
+{
+       enum mgmt_be_client_id id;
+
+       FOREACH_MGMTD_BE_CLIENT_ID (id) {
+               if (!strncmp(mgmt_be_client_names[id], name,
+                            MGMTD_CLIENT_NAME_MAX_LEN))
+                       return id;
+       }
+
+       return MGMTD_BE_CLIENT_ID_MAX;
+}
+
+/***************************************************************
+ * API prototypes
+ ***************************************************************/
+
+/*
+ * Initialize library and try connecting with MGMTD.
+ *
+ * params
+ *    Backend client parameters.
+ *
+ * master_thread
+ *    Thread master.
+ *
+ * Returns:
+ *    Backend client lib handler (nothing but address of mgmt_be_client_ctx)
+ */
+extern uintptr_t
+mgmt_be_client_lib_init(struct mgmt_be_client_params *params,
+                          struct thread_master *master_thread);
+
+/*
+ * Subscribe with MGMTD for one or more YANG subtree(s).
+ *
+ * lib_hndl
+ *    Client library handler.
+ *
+ * reg_yang_xpaths
+ *    Yang xpath(s) that needs to be subscribed to.
+ *
+ * num_xpaths
+ *    Number of xpaths
+ *
+ * Returns:
+ *    MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result mgmt_be_subscribe_yang_data(uintptr_t lib_hndl,
+                                                      char **reg_yang_xpaths,
+                                                      int num_xpaths);
+
+/*
+ * Send one or more YANG notifications to MGMTD daemon.
+ *
+ * lib_hndl
+ *    Client library handler.
+ *
+ * data_elems
+ *    Yang data elements from data tree.
+ *
+ * num_elems
+ *    Number of data elements.
+ *
+ * Returns:
+ *    MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_be_send_yang_notify(uintptr_t lib_hndl, Mgmtd__YangData **data_elems,
+                           int num_elems);
+
+/*
+ * Un-subscribe with MGMTD for one or more YANG subtree(s).
+ *
+ * lib_hndl
+ *    Client library handler.
+ *
+ * reg_yang_xpaths
+ *    Yang xpath(s) that needs to be un-subscribed from.
+ *
+ * num_reg_xpaths
+ *    Number of subscribed xpaths
+ *
+ * Returns:
+ *    MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+enum mgmt_result mgmt_be_unsubscribe_yang_data(uintptr_t lib_hndl,
+                                                 char **reg_yang_xpaths,
+                                                 int num_reg_xpaths);
+
+/*
+ * Destroy library and cleanup everything.
+ */
+extern void mgmt_be_client_lib_destroy(uintptr_t lib_hndl);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FRR_MGMTD_BE_CLIENT_H_ */
diff --git a/lib/mgmt_fe_client.c b/lib/mgmt_fe_client.c
new file mode 100644 (file)
index 0000000..3624922
--- /dev/null
@@ -0,0 +1,1076 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Client Library api interfaces
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "memory.h"
+#include "libfrr.h"
+#include "mgmt_fe_client.h"
+#include "mgmt_msg.h"
+#include "mgmt_pb.h"
+#include "network.h"
+#include "stream.h"
+#include "sockopt.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_FE_CLIENT_DBG(fmt, ...)                                        \
+       fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_FE_CLIENT_ERR(fmt, ...)                                        \
+       fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_FE_CLIENT_DBG(fmt, ...)                                        \
+       do {                                                                 \
+               if (mgmt_debug_fe_client)                                    \
+                       zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__);     \
+       } while (0)
+#define MGMTD_FE_CLIENT_ERR(fmt, ...)                                        \
+       zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+struct mgmt_fe_client_ctx;
+
+PREDECL_LIST(mgmt_sessions);
+
+struct mgmt_fe_client_session {
+       uint64_t client_id;
+       uint64_t session_id;
+       struct mgmt_fe_client_ctx *client_ctx;
+       uintptr_t user_ctx;
+
+       struct mgmt_sessions_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_sessions, struct mgmt_fe_client_session, list_linkage);
+
+DEFINE_MTYPE_STATIC(LIB, MGMTD_FE_SESSION, "MGMTD Frontend session");
+
+struct mgmt_fe_client_ctx {
+       int conn_fd;
+       struct thread_master *tm;
+       struct thread *conn_retry_tmr;
+       struct thread *conn_read_ev;
+       struct thread *conn_write_ev;
+       struct thread *conn_writes_on;
+       struct thread *msg_proc_ev;
+       uint32_t flags;
+
+       struct mgmt_msg_state mstate;
+
+       struct mgmt_fe_client_params client_params;
+
+       struct mgmt_sessions_head client_sessions;
+};
+
+#define MGMTD_FE_CLIENT_FLAGS_WRITES_OFF (1U << 0)
+
+#define FOREACH_SESSION_IN_LIST(client_ctx, session)                           \
+       frr_each_safe (mgmt_sessions, &(client_ctx)->client_sessions, (session))
+
+static bool mgmt_debug_fe_client;
+
+static struct mgmt_fe_client_ctx mgmt_fe_client_ctx = {
+       .conn_fd = -1,
+};
+
+/* Forward declarations */
+static void
+mgmt_fe_client_register_event(struct mgmt_fe_client_ctx *client_ctx,
+                                 enum mgmt_fe_event event);
+static void mgmt_fe_client_schedule_conn_retry(
+       struct mgmt_fe_client_ctx *client_ctx, unsigned long intvl_secs);
+
+static struct mgmt_fe_client_session *
+mgmt_fe_find_session_by_client_id(struct mgmt_fe_client_ctx *client_ctx,
+                                     uint64_t client_id)
+{
+       struct mgmt_fe_client_session *session;
+
+       FOREACH_SESSION_IN_LIST (client_ctx, session) {
+               if (session->client_id == client_id) {
+                       MGMTD_FE_CLIENT_DBG(
+                               "Found session %p for client-id %llu.", session,
+                               (unsigned long long)client_id);
+                       return session;
+               }
+       }
+
+       return NULL;
+}
+
+static struct mgmt_fe_client_session *
+mgmt_fe_find_session_by_session_id(struct mgmt_fe_client_ctx *client_ctx,
+                                    uint64_t session_id)
+{
+       struct mgmt_fe_client_session *session;
+
+       FOREACH_SESSION_IN_LIST (client_ctx, session) {
+               if (session->session_id == session_id) {
+                       MGMTD_FE_CLIENT_DBG(
+                               "Found session %p for session-id %llu.", session,
+                               (unsigned long long)session_id);
+                       return session;
+               }
+       }
+
+       return NULL;
+}
+
+static void
+mgmt_fe_server_disconnect(struct mgmt_fe_client_ctx *client_ctx,
+                             bool reconnect)
+{
+       if (client_ctx->conn_fd != -1) {
+               close(client_ctx->conn_fd);
+               client_ctx->conn_fd = -1;
+       }
+
+       if (reconnect)
+               mgmt_fe_client_schedule_conn_retry(
+                       client_ctx,
+                       client_ctx->client_params.conn_retry_intvl_sec);
+}
+
+static inline void
+mgmt_fe_client_sched_msg_write(struct mgmt_fe_client_ctx *client_ctx)
+{
+       if (!CHECK_FLAG(client_ctx->flags, MGMTD_FE_CLIENT_FLAGS_WRITES_OFF))
+               mgmt_fe_client_register_event(client_ctx,
+                                                 MGMTD_FE_CONN_WRITE);
+}
+
+static inline void
+mgmt_fe_client_writes_on(struct mgmt_fe_client_ctx *client_ctx)
+{
+       MGMTD_FE_CLIENT_DBG("Resume writing msgs");
+       UNSET_FLAG(client_ctx->flags, MGMTD_FE_CLIENT_FLAGS_WRITES_OFF);
+       mgmt_fe_client_sched_msg_write(client_ctx);
+}
+
+static inline void
+mgmt_fe_client_writes_off(struct mgmt_fe_client_ctx *client_ctx)
+{
+       SET_FLAG(client_ctx->flags, MGMTD_FE_CLIENT_FLAGS_WRITES_OFF);
+       MGMTD_FE_CLIENT_DBG("Paused writing msgs");
+}
+
+static int mgmt_fe_client_send_msg(struct mgmt_fe_client_ctx *client_ctx,
+                                  Mgmtd__FeMessage *fe_msg)
+{
+       /* users current expect this to fail here */
+       if (client_ctx->conn_fd == -1) {
+               MGMTD_FE_CLIENT_DBG("can't send message on closed connection");
+               return -1;
+       }
+
+       int rv = mgmt_msg_send_msg(
+               &client_ctx->mstate, fe_msg,
+               mgmtd__fe_message__get_packed_size(fe_msg),
+               (size_t(*)(void *, void *))mgmtd__fe_message__pack,
+               mgmt_debug_fe_client);
+       mgmt_fe_client_sched_msg_write(client_ctx);
+       return rv;
+}
+
+static void mgmt_fe_client_write(struct thread *thread)
+{
+       struct mgmt_fe_client_ctx *client_ctx;
+       enum mgmt_msg_wsched rv;
+
+       client_ctx = (struct mgmt_fe_client_ctx *)THREAD_ARG(thread);
+       rv = mgmt_msg_write(&client_ctx->mstate, client_ctx->conn_fd,
+                           mgmt_debug_fe_client);
+       if (rv == MSW_SCHED_STREAM)
+               mgmt_fe_client_register_event(client_ctx, MGMTD_FE_CONN_WRITE);
+       else if (rv == MSW_DISCONNECT)
+               mgmt_fe_server_disconnect(client_ctx, true);
+       else if (rv == MSW_SCHED_WRITES_OFF) {
+               mgmt_fe_client_writes_off(client_ctx);
+               mgmt_fe_client_register_event(client_ctx,
+                                             MGMTD_FE_CONN_WRITES_ON);
+       } else
+               assert(rv == MSW_SCHED_NONE);
+}
+
+static void mgmt_fe_client_resume_writes(struct thread *thread)
+{
+       struct mgmt_fe_client_ctx *client_ctx;
+
+       client_ctx = (struct mgmt_fe_client_ctx *)THREAD_ARG(thread);
+       assert(client_ctx && client_ctx->conn_fd != -1);
+
+       mgmt_fe_client_writes_on(client_ctx);
+}
+
+static int
+mgmt_fe_send_register_req(struct mgmt_fe_client_ctx *client_ctx)
+{
+       Mgmtd__FeMessage fe_msg;
+       Mgmtd__FeRegisterReq rgstr_req;
+
+       mgmtd__fe_register_req__init(&rgstr_req);
+       rgstr_req.client_name = client_ctx->client_params.name;
+
+       mgmtd__fe_message__init(&fe_msg);
+       fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_REGISTER_REQ;
+       fe_msg.register_req = &rgstr_req;
+
+       MGMTD_FE_CLIENT_DBG(
+               "Sending REGISTER_REQ message to MGMTD Frontend server");
+
+       return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_session_req(struct mgmt_fe_client_ctx *client_ctx,
+                            struct mgmt_fe_client_session *session,
+                            bool create)
+{
+       Mgmtd__FeMessage fe_msg;
+       Mgmtd__FeSessionReq sess_req;
+
+       mgmtd__fe_session_req__init(&sess_req);
+       sess_req.create = create;
+       if (create) {
+               sess_req.id_case = MGMTD__FE_SESSION_REQ__ID_CLIENT_CONN_ID;
+               sess_req.client_conn_id = session->client_id;
+       } else {
+               sess_req.id_case = MGMTD__FE_SESSION_REQ__ID_SESSION_ID;
+               sess_req.session_id = session->session_id;
+       }
+
+       mgmtd__fe_message__init(&fe_msg);
+       fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_SESSION_REQ;
+       fe_msg.session_req = &sess_req;
+
+       MGMTD_FE_CLIENT_DBG(
+               "Sending SESSION_REQ message for %s session %llu to MGMTD Frontend server",
+               create ? "creating" : "destroying",
+               (unsigned long long)session->client_id);
+
+       return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_lockds_req(struct mgmt_fe_client_ctx *client_ctx,
+                           struct mgmt_fe_client_session *session, bool lock,
+                           uint64_t req_id, Mgmtd__DatastoreId ds_id)
+{
+       (void)req_id;
+       Mgmtd__FeMessage fe_msg;
+       Mgmtd__FeLockDsReq lockds_req;
+
+       mgmtd__fe_lock_ds_req__init(&lockds_req);
+       lockds_req.session_id = session->session_id;
+       lockds_req.req_id = req_id;
+       lockds_req.ds_id = ds_id;
+       lockds_req.lock = lock;
+
+       mgmtd__fe_message__init(&fe_msg);
+       fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REQ;
+       fe_msg.lockds_req = &lockds_req;
+
+       MGMTD_FE_CLIENT_DBG(
+               "Sending %sLOCK_REQ message for Ds:%d session %llu to MGMTD Frontend server",
+               lock ? "" : "UN", ds_id, (unsigned long long)session->client_id);
+
+       return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_setcfg_req(struct mgmt_fe_client_ctx *client_ctx,
+                           struct mgmt_fe_client_session *session,
+                           uint64_t req_id, Mgmtd__DatastoreId ds_id,
+                           Mgmtd__YangCfgDataReq **data_req, int num_data_reqs,
+                           bool implicit_commit, Mgmtd__DatastoreId dst_ds_id)
+{
+       (void)req_id;
+       Mgmtd__FeMessage fe_msg;
+       Mgmtd__FeSetConfigReq setcfg_req;
+
+       mgmtd__fe_set_config_req__init(&setcfg_req);
+       setcfg_req.session_id = session->session_id;
+       setcfg_req.ds_id = ds_id;
+       setcfg_req.req_id = req_id;
+       setcfg_req.data = data_req;
+       setcfg_req.n_data = (size_t)num_data_reqs;
+       setcfg_req.implicit_commit = implicit_commit;
+       setcfg_req.commit_ds_id = dst_ds_id;
+
+       mgmtd__fe_message__init(&fe_msg);
+       fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REQ;
+       fe_msg.setcfg_req = &setcfg_req;
+
+       MGMTD_FE_CLIENT_DBG(
+               "Sending SET_CONFIG_REQ message for Ds:%d session %llu (#xpaths:%d) to MGMTD Frontend server",
+               ds_id, (unsigned long long)session->client_id, num_data_reqs);
+
+       return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_commitcfg_req(struct mgmt_fe_client_ctx *client_ctx,
+                              struct mgmt_fe_client_session *session,
+                              uint64_t req_id, Mgmtd__DatastoreId src_ds_id,
+                              Mgmtd__DatastoreId dest_ds_id, bool validate_only,
+                              bool abort)
+{
+       (void)req_id;
+       Mgmtd__FeMessage fe_msg;
+       Mgmtd__FeCommitConfigReq commitcfg_req;
+
+       mgmtd__fe_commit_config_req__init(&commitcfg_req);
+       commitcfg_req.session_id = session->session_id;
+       commitcfg_req.src_ds_id = src_ds_id;
+       commitcfg_req.dst_ds_id = dest_ds_id;
+       commitcfg_req.req_id = req_id;
+       commitcfg_req.validate_only = validate_only;
+       commitcfg_req.abort = abort;
+
+       mgmtd__fe_message__init(&fe_msg);
+       fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REQ;
+       fe_msg.commcfg_req = &commitcfg_req;
+
+       MGMTD_FE_CLIENT_DBG(
+               "Sending COMMIT_CONFIG_REQ message for Src-Ds:%d, Dst-Ds:%d session %llu to MGMTD Frontend server",
+               src_ds_id, dest_ds_id, (unsigned long long)session->client_id);
+
+       return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_getcfg_req(struct mgmt_fe_client_ctx *client_ctx,
+                           struct mgmt_fe_client_session *session,
+                           uint64_t req_id, Mgmtd__DatastoreId ds_id,
+                           Mgmtd__YangGetDataReq * data_req[],
+                           int num_data_reqs)
+{
+       (void)req_id;
+       Mgmtd__FeMessage fe_msg;
+       Mgmtd__FeGetConfigReq getcfg_req;
+
+       mgmtd__fe_get_config_req__init(&getcfg_req);
+       getcfg_req.session_id = session->session_id;
+       getcfg_req.ds_id = ds_id;
+       getcfg_req.req_id = req_id;
+       getcfg_req.data = data_req;
+       getcfg_req.n_data = (size_t)num_data_reqs;
+
+       mgmtd__fe_message__init(&fe_msg);
+       fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REQ;
+       fe_msg.getcfg_req = &getcfg_req;
+
+       MGMTD_FE_CLIENT_DBG(
+               "Sending GET_CONFIG_REQ message for Ds:%d session %llu (#xpaths:%d) to MGMTD Frontend server",
+               ds_id, (unsigned long long)session->client_id, num_data_reqs);
+
+       return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_send_getdata_req(struct mgmt_fe_client_ctx *client_ctx,
+                            struct mgmt_fe_client_session *session,
+                            uint64_t req_id, Mgmtd__DatastoreId ds_id,
+                            Mgmtd__YangGetDataReq * data_req[],
+                            int num_data_reqs)
+{
+       (void)req_id;
+       Mgmtd__FeMessage fe_msg;
+       Mgmtd__FeGetDataReq getdata_req;
+
+       mgmtd__fe_get_data_req__init(&getdata_req);
+       getdata_req.session_id = session->session_id;
+       getdata_req.ds_id = ds_id;
+       getdata_req.req_id = req_id;
+       getdata_req.data = data_req;
+       getdata_req.n_data = (size_t)num_data_reqs;
+
+       mgmtd__fe_message__init(&fe_msg);
+       fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REQ;
+       fe_msg.getdata_req = &getdata_req;
+
+       MGMTD_FE_CLIENT_DBG(
+               "Sending GET_CONFIG_REQ message for Ds:%d session %llu (#xpaths:%d) to MGMTD Frontend server",
+               ds_id, (unsigned long long)session->client_id, num_data_reqs);
+
+       return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int mgmt_fe_send_regnotify_req(
+       struct mgmt_fe_client_ctx *client_ctx,
+       struct mgmt_fe_client_session *session, uint64_t req_id,
+       Mgmtd__DatastoreId ds_id, bool register_req,
+       Mgmtd__YangDataXPath * data_req[], int num_data_reqs)
+{
+       (void)req_id;
+       Mgmtd__FeMessage fe_msg;
+       Mgmtd__FeRegisterNotifyReq regntfy_req;
+
+       mgmtd__fe_register_notify_req__init(&regntfy_req);
+       regntfy_req.session_id = session->session_id;
+       regntfy_req.ds_id = ds_id;
+       regntfy_req.register_req = register_req;
+       regntfy_req.data_xpath = data_req;
+       regntfy_req.n_data_xpath = (size_t)num_data_reqs;
+
+       mgmtd__fe_message__init(&fe_msg);
+       fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ;
+       fe_msg.regnotify_req = &regntfy_req;
+
+       return mgmt_fe_client_send_msg(client_ctx, &fe_msg);
+}
+
+static int
+mgmt_fe_client_handle_msg(struct mgmt_fe_client_ctx *client_ctx,
+                             Mgmtd__FeMessage *fe_msg)
+{
+       struct mgmt_fe_client_session *session = NULL;
+
+       /*
+        * protobuf-c adds a max size enum with an internal, and changing by
+        * version, name; cast to an int to avoid unhandled enum warnings
+        */
+       switch ((int)fe_msg->message_case) {
+       case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REPLY:
+               if (fe_msg->session_reply->create
+                   && fe_msg->session_reply->has_client_conn_id) {
+                       MGMTD_FE_CLIENT_DBG(
+                               "Got Session Create Reply Msg for client-id %llu with session-id: %llu.",
+                               (unsigned long long)
+                                       fe_msg->session_reply->client_conn_id,
+                               (unsigned long long)
+                                       fe_msg->session_reply->session_id);
+
+                       session = mgmt_fe_find_session_by_client_id(
+                               client_ctx,
+                               fe_msg->session_reply->client_conn_id);
+
+                       if (session && fe_msg->session_reply->success) {
+                               MGMTD_FE_CLIENT_DBG(
+                                       "Session Create for client-id %llu successful.",
+                                       (unsigned long long)fe_msg
+                                               ->session_reply->client_conn_id);
+                               session->session_id =
+                                       fe_msg->session_reply->session_id;
+                       } else {
+                               MGMTD_FE_CLIENT_ERR(
+                                       "Session Create for client-id %llu failed.",
+                                       (unsigned long long)fe_msg
+                                               ->session_reply->client_conn_id);
+                       }
+               } else if (!fe_msg->session_reply->create) {
+                       MGMTD_FE_CLIENT_DBG(
+                               "Got Session Destroy Reply Msg for session-id %llu",
+                               (unsigned long long)
+                                       fe_msg->session_reply->session_id);
+
+                       session = mgmt_fe_find_session_by_session_id(
+                               client_ctx, fe_msg->session_req->session_id);
+               }
+
+               if (session && session->client_ctx
+                   && session->client_ctx->client_params
+                              .client_session_notify)
+                       (*session->client_ctx->client_params
+                                 .client_session_notify)(
+                               (uintptr_t)client_ctx,
+                               client_ctx->client_params.user_data,
+                               session->client_id,
+                               fe_msg->session_reply->create,
+                               fe_msg->session_reply->success,
+                               (uintptr_t)session, session->user_ctx);
+               break;
+       case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REPLY:
+               MGMTD_FE_CLIENT_DBG(
+                       "Got LockDs Reply Msg for session-id %llu",
+                       (unsigned long long)
+                               fe_msg->lockds_reply->session_id);
+               session = mgmt_fe_find_session_by_session_id(
+                       client_ctx, fe_msg->lockds_reply->session_id);
+
+               if (session && session->client_ctx
+                   && session->client_ctx->client_params
+                              .lock_ds_notify)
+                       (*session->client_ctx->client_params
+                                 .lock_ds_notify)(
+                               (uintptr_t)client_ctx,
+                               client_ctx->client_params.user_data,
+                               session->client_id, (uintptr_t)session,
+                               session->user_ctx,
+                               fe_msg->lockds_reply->req_id,
+                               fe_msg->lockds_reply->lock,
+                               fe_msg->lockds_reply->success,
+                               fe_msg->lockds_reply->ds_id,
+                               fe_msg->lockds_reply->error_if_any);
+               break;
+       case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REPLY:
+               MGMTD_FE_CLIENT_DBG(
+                       "Got Set Config Reply Msg for session-id %llu",
+                       (unsigned long long)
+                               fe_msg->setcfg_reply->session_id);
+
+               session = mgmt_fe_find_session_by_session_id(
+                       client_ctx, fe_msg->setcfg_reply->session_id);
+
+               if (session && session->client_ctx
+                   && session->client_ctx->client_params
+                              .set_config_notify)
+                       (*session->client_ctx->client_params
+                                 .set_config_notify)(
+                               (uintptr_t)client_ctx,
+                               client_ctx->client_params.user_data,
+                               session->client_id, (uintptr_t)session,
+                               session->user_ctx,
+                               fe_msg->setcfg_reply->req_id,
+                               fe_msg->setcfg_reply->success,
+                               fe_msg->setcfg_reply->ds_id,
+                               fe_msg->setcfg_reply->error_if_any);
+               break;
+       case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REPLY:
+               MGMTD_FE_CLIENT_DBG(
+                       "Got Commit Config Reply Msg for session-id %llu",
+                       (unsigned long long)
+                               fe_msg->commcfg_reply->session_id);
+
+               session = mgmt_fe_find_session_by_session_id(
+                       client_ctx, fe_msg->commcfg_reply->session_id);
+
+               if (session && session->client_ctx
+                   && session->client_ctx->client_params
+                              .commit_config_notify)
+                       (*session->client_ctx->client_params
+                                 .commit_config_notify)(
+                               (uintptr_t)client_ctx,
+                               client_ctx->client_params.user_data,
+                               session->client_id, (uintptr_t)session,
+                               session->user_ctx,
+                               fe_msg->commcfg_reply->req_id,
+                               fe_msg->commcfg_reply->success,
+                               fe_msg->commcfg_reply->src_ds_id,
+                               fe_msg->commcfg_reply->dst_ds_id,
+                               fe_msg->commcfg_reply->validate_only,
+                               fe_msg->commcfg_reply->error_if_any);
+               break;
+       case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REPLY:
+               MGMTD_FE_CLIENT_DBG(
+                       "Got Get Config Reply Msg for session-id %llu",
+                       (unsigned long long)
+                               fe_msg->getcfg_reply->session_id);
+
+               session = mgmt_fe_find_session_by_session_id(
+                       client_ctx, fe_msg->getcfg_reply->session_id);
+
+               if (session && session->client_ctx
+                   && session->client_ctx->client_params
+                              .get_data_notify)
+                       (*session->client_ctx->client_params
+                                 .get_data_notify)(
+                               (uintptr_t)client_ctx,
+                               client_ctx->client_params.user_data,
+                               session->client_id, (uintptr_t)session,
+                               session->user_ctx,
+                               fe_msg->getcfg_reply->req_id,
+                               fe_msg->getcfg_reply->success,
+                               fe_msg->getcfg_reply->ds_id,
+                               fe_msg->getcfg_reply->data
+                                       ? fe_msg->getcfg_reply->data->data
+                                       : NULL,
+                               fe_msg->getcfg_reply->data
+                                       ? fe_msg->getcfg_reply->data->n_data
+                                       : 0,
+                               fe_msg->getcfg_reply->data
+                                       ? fe_msg->getcfg_reply->data
+                                                 ->next_indx
+                                       : 0,
+                               fe_msg->getcfg_reply->error_if_any);
+               break;
+       case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REPLY:
+               MGMTD_FE_CLIENT_DBG(
+                       "Got Get Data Reply Msg for session-id %llu",
+                       (unsigned long long)
+                               fe_msg->getdata_reply->session_id);
+
+               session = mgmt_fe_find_session_by_session_id(
+                       client_ctx, fe_msg->getdata_reply->session_id);
+
+               if (session && session->client_ctx
+                   && session->client_ctx->client_params
+                              .get_data_notify)
+                       (*session->client_ctx->client_params
+                                 .get_data_notify)(
+                               (uintptr_t)client_ctx,
+                               client_ctx->client_params.user_data,
+                               session->client_id, (uintptr_t)session,
+                               session->user_ctx,
+                               fe_msg->getdata_reply->req_id,
+                               fe_msg->getdata_reply->success,
+                               fe_msg->getdata_reply->ds_id,
+                               fe_msg->getdata_reply->data
+                                       ? fe_msg->getdata_reply->data->data
+                                       : NULL,
+                               fe_msg->getdata_reply->data
+                                       ? fe_msg->getdata_reply->data
+                                                 ->n_data
+                                       : 0,
+                               fe_msg->getdata_reply->data
+                                       ? fe_msg->getdata_reply->data
+                                                 ->next_indx
+                                       : 0,
+                               fe_msg->getdata_reply->error_if_any);
+               break;
+       case MGMTD__FE_MESSAGE__MESSAGE_NOTIFY_DATA_REQ:
+       case MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ:
+               /*
+                * TODO: Add handling code in future.
+                */
+               break;
+       /*
+        * NOTE: The following messages are always sent from Frontend
+        * clients to MGMTd only and/or need not be handled here.
+        */
+       case MGMTD__FE_MESSAGE__MESSAGE_REGISTER_REQ:
+       case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REQ:
+       case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REQ:
+       case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REQ:
+       case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REQ:
+       case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REQ:
+       case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REQ:
+       case MGMTD__FE_MESSAGE__MESSAGE__NOT_SET:
+       default:
+               /*
+                * A 'default' case is being added contrary to the
+                * FRR code guidelines to take care of build
+                * failures on certain build systems (courtesy of
+                * the proto-c package).
+                */
+               break;
+       }
+
+       return 0;
+}
+
+static void mgmt_fe_client_process_msg(void *user_ctx, uint8_t *data,
+                                      size_t len)
+{
+       struct mgmt_fe_client_ctx *client_ctx = user_ctx;
+       Mgmtd__FeMessage *fe_msg;
+
+       fe_msg = mgmtd__fe_message__unpack(NULL, len, data);
+       if (!fe_msg) {
+               MGMTD_FE_CLIENT_DBG("Failed to decode %zu bytes from server.",
+                                   len);
+               return;
+       }
+       MGMTD_FE_CLIENT_DBG(
+               "Decoded %zu bytes of message(msg: %u/%u) from server", len,
+               fe_msg->message_case, fe_msg->message_case);
+       (void)mgmt_fe_client_handle_msg(client_ctx, fe_msg);
+       mgmtd__fe_message__free_unpacked(fe_msg, NULL);
+}
+
+static void mgmt_fe_client_proc_msgbufs(struct thread *thread)
+{
+       struct mgmt_fe_client_ctx *client_ctx;
+
+       client_ctx = (struct mgmt_fe_client_ctx *)THREAD_ARG(thread);
+       if (mgmt_msg_procbufs(&client_ctx->mstate, mgmt_fe_client_process_msg,
+                             client_ctx, mgmt_debug_fe_client))
+               mgmt_fe_client_register_event(client_ctx, MGMTD_FE_PROC_MSG);
+}
+
+static void mgmt_fe_client_read(struct thread *thread)
+{
+       struct mgmt_fe_client_ctx *client_ctx;
+       enum mgmt_msg_rsched rv;
+
+       client_ctx = (struct mgmt_fe_client_ctx *)THREAD_ARG(thread);
+
+       rv = mgmt_msg_read(&client_ctx->mstate, client_ctx->conn_fd,
+                          mgmt_debug_fe_client);
+       if (rv == MSR_DISCONNECT) {
+               mgmt_fe_server_disconnect(client_ctx, true);
+               return;
+       }
+       if (rv == MSR_SCHED_BOTH)
+               mgmt_fe_client_register_event(client_ctx, MGMTD_FE_PROC_MSG);
+       mgmt_fe_client_register_event(client_ctx, MGMTD_FE_CONN_READ);
+}
+
+static void mgmt_fe_server_connect(struct mgmt_fe_client_ctx *client_ctx)
+{
+       const char *dbgtag = mgmt_debug_fe_client ? "FE-client" : NULL;
+
+       assert(client_ctx->conn_fd == -1);
+       client_ctx->conn_fd = mgmt_msg_connect(
+               MGMTD_FE_SERVER_PATH, MGMTD_SOCKET_FE_SEND_BUF_SIZE,
+               MGMTD_SOCKET_FE_RECV_BUF_SIZE, dbgtag);
+
+       /* Send REGISTER_REQ message */
+       if (client_ctx->conn_fd == -1 ||
+           mgmt_fe_send_register_req(client_ctx) != 0) {
+               mgmt_fe_server_disconnect(client_ctx, true);
+               return;
+       }
+
+       /* Start reading from the socket */
+       mgmt_fe_client_register_event(client_ctx, MGMTD_FE_CONN_READ);
+
+       /* Notify client through registered callback (if any) */
+       if (client_ctx->client_params.client_connect_notify)
+               (void)(*client_ctx->client_params.client_connect_notify)(
+                       (uintptr_t)client_ctx,
+                       client_ctx->client_params.user_data, true);
+}
+
+
+static void mgmt_fe_client_conn_timeout(struct thread *thread)
+{
+       mgmt_fe_server_connect(THREAD_ARG(thread));
+}
+
+static void
+mgmt_fe_client_register_event(struct mgmt_fe_client_ctx *client_ctx,
+                                 enum mgmt_fe_event event)
+{
+       struct timeval tv = {0};
+
+       switch (event) {
+       case MGMTD_FE_CONN_READ:
+               thread_add_read(client_ctx->tm, mgmt_fe_client_read,
+                               client_ctx, client_ctx->conn_fd,
+                               &client_ctx->conn_read_ev);
+               assert(client_ctx->conn_read_ev);
+               break;
+       case MGMTD_FE_CONN_WRITE:
+               thread_add_write(client_ctx->tm, mgmt_fe_client_write,
+                                client_ctx, client_ctx->conn_fd,
+                                &client_ctx->conn_write_ev);
+               assert(client_ctx->conn_write_ev);
+               break;
+       case MGMTD_FE_PROC_MSG:
+               tv.tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC;
+               thread_add_timer_tv(client_ctx->tm,
+                                   mgmt_fe_client_proc_msgbufs, client_ctx,
+                                   &tv, &client_ctx->msg_proc_ev);
+               assert(client_ctx->msg_proc_ev);
+               break;
+       case MGMTD_FE_CONN_WRITES_ON:
+               thread_add_timer_msec(
+                       client_ctx->tm, mgmt_fe_client_resume_writes,
+                       client_ctx, MGMTD_FE_MSG_WRITE_DELAY_MSEC,
+                       &client_ctx->conn_writes_on);
+               assert(client_ctx->conn_writes_on);
+               break;
+       case MGMTD_FE_SERVER:
+               assert(!"mgmt_fe_client_ctx_post_event called incorrectly");
+               break;
+       }
+}
+
+static void mgmt_fe_client_schedule_conn_retry(
+       struct mgmt_fe_client_ctx *client_ctx, unsigned long intvl_secs)
+{
+       MGMTD_FE_CLIENT_DBG(
+               "Scheduling MGMTD Frontend server connection retry after %lu seconds",
+               intvl_secs);
+       thread_add_timer(client_ctx->tm, mgmt_fe_client_conn_timeout,
+                        (void *)client_ctx, intvl_secs,
+                        &client_ctx->conn_retry_tmr);
+}
+
+/*
+ * Initialize library and try connecting with MGMTD.
+ */
+uintptr_t mgmt_fe_client_lib_init(struct mgmt_fe_client_params *params,
+                                    struct thread_master *master_thread)
+{
+       assert(master_thread && params && strlen(params->name)
+              && !mgmt_fe_client_ctx.tm);
+
+       mgmt_fe_client_ctx.tm = master_thread;
+       memcpy(&mgmt_fe_client_ctx.client_params, params,
+              sizeof(mgmt_fe_client_ctx.client_params));
+       if (!mgmt_fe_client_ctx.client_params.conn_retry_intvl_sec)
+               mgmt_fe_client_ctx.client_params.conn_retry_intvl_sec =
+                       MGMTD_FE_DEFAULT_CONN_RETRY_INTVL_SEC;
+
+       mgmt_msg_init(&mgmt_fe_client_ctx.mstate, MGMTD_FE_MAX_NUM_MSG_PROC,
+                     MGMTD_FE_MAX_NUM_MSG_WRITE, MGMTD_FE_MSG_MAX_LEN,
+                     "FE-client");
+
+       mgmt_sessions_init(&mgmt_fe_client_ctx.client_sessions);
+
+       /* Start trying to connect to MGMTD frontend server immediately */
+       mgmt_fe_client_schedule_conn_retry(&mgmt_fe_client_ctx, 1);
+
+       MGMTD_FE_CLIENT_DBG("Initialized client '%s'", params->name);
+
+       return (uintptr_t)&mgmt_fe_client_ctx;
+}
+
+/*
+ * Create a new Session for a Frontend Client connection.
+ */
+enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl,
+                                                  uint64_t client_id,
+                                                  uintptr_t user_ctx)
+{
+       struct mgmt_fe_client_ctx *client_ctx;
+       struct mgmt_fe_client_session *session;
+
+       client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+       if (!client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       session = XCALLOC(MTYPE_MGMTD_FE_SESSION,
+                       sizeof(struct mgmt_fe_client_session));
+       assert(session);
+       session->user_ctx = user_ctx;
+       session->client_id = client_id;
+       session->client_ctx = client_ctx;
+       session->session_id = 0;
+
+       if (mgmt_fe_send_session_req(client_ctx, session, true) != 0) {
+               XFREE(MTYPE_MGMTD_FE_SESSION, session);
+               return MGMTD_INTERNAL_ERROR;
+       }
+       mgmt_sessions_add_tail(&client_ctx->client_sessions, session);
+
+       return MGMTD_SUCCESS;
+}
+
+/*
+ * Delete an existing Session for a Frontend Client connection.
+ */
+enum mgmt_result mgmt_fe_destroy_client_session(uintptr_t lib_hndl,
+                                               uint64_t client_id)
+{
+       struct mgmt_fe_client_ctx *client_ctx;
+       struct mgmt_fe_client_session *session;
+
+       client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+       if (!client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       session = mgmt_fe_find_session_by_client_id(client_ctx, client_id);
+       if (!session || session->client_ctx != client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       if (session->session_id &&
+           mgmt_fe_send_session_req(client_ctx, session, false) != 0)
+               MGMTD_FE_CLIENT_ERR(
+                       "Failed to send session destroy request for the session-id %lu",
+                       (unsigned long)session->session_id);
+
+       mgmt_sessions_del(&client_ctx->client_sessions, session);
+       XFREE(MTYPE_MGMTD_FE_SESSION, session);
+
+       return MGMTD_SUCCESS;
+}
+
+static void mgmt_fe_destroy_client_sessions(uintptr_t lib_hndl)
+{
+       struct mgmt_fe_client_ctx *client_ctx;
+       struct mgmt_fe_client_session *session;
+
+       client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+       if (!client_ctx)
+               return;
+
+       FOREACH_SESSION_IN_LIST (client_ctx, session)
+               mgmt_fe_destroy_client_session(lib_hndl, session->client_id);
+}
+
+/*
+ * Send UN/LOCK_DS_REQ to MGMTD for a specific Datastore DS.
+ */
+enum mgmt_result mgmt_fe_lock_ds(uintptr_t lib_hndl, uintptr_t session_id,
+                                    uint64_t req_id, Mgmtd__DatastoreId ds_id,
+                                    bool lock_ds)
+{
+       struct mgmt_fe_client_ctx *client_ctx;
+       struct mgmt_fe_client_session *session;
+
+       client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+       if (!client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       session = (struct mgmt_fe_client_session *)session_id;
+       if (!session || session->client_ctx != client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       if (mgmt_fe_send_lockds_req(client_ctx, session, lock_ds, req_id,
+                                       ds_id)
+           != 0)
+               return MGMTD_INTERNAL_ERROR;
+
+       return MGMTD_SUCCESS;
+}
+
+/*
+ * Send SET_CONFIG_REQ to MGMTD for one or more config data(s).
+ */
+enum mgmt_result
+mgmt_fe_set_config_data(uintptr_t lib_hndl, uintptr_t session_id,
+                           uint64_t req_id, Mgmtd__DatastoreId ds_id,
+                           Mgmtd__YangCfgDataReq **config_req, int num_reqs,
+                           bool implicit_commit, Mgmtd__DatastoreId dst_ds_id)
+{
+       struct mgmt_fe_client_ctx *client_ctx;
+       struct mgmt_fe_client_session *session;
+
+       client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+       if (!client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       session = (struct mgmt_fe_client_session *)session_id;
+       if (!session || session->client_ctx != client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       if (mgmt_fe_send_setcfg_req(client_ctx, session, req_id, ds_id,
+                                       config_req, num_reqs, implicit_commit,
+                                       dst_ds_id)
+           != 0)
+               return MGMTD_INTERNAL_ERROR;
+
+       return MGMTD_SUCCESS;
+}
+
+/*
+ * Send SET_CONFIG_REQ to MGMTD for one or more config data(s).
+ */
+enum mgmt_result mgmt_fe_commit_config_data(uintptr_t lib_hndl,
+                                               uintptr_t session_id,
+                                               uint64_t req_id,
+                                               Mgmtd__DatastoreId src_ds_id,
+                                               Mgmtd__DatastoreId dst_ds_id,
+                                               bool validate_only, bool abort)
+{
+       struct mgmt_fe_client_ctx *client_ctx;
+       struct mgmt_fe_client_session *session;
+
+       client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+       if (!client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       session = (struct mgmt_fe_client_session *)session_id;
+       if (!session || session->client_ctx != client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       if (mgmt_fe_send_commitcfg_req(client_ctx, session, req_id, src_ds_id,
+                                          dst_ds_id, validate_only, abort)
+           != 0)
+               return MGMTD_INTERNAL_ERROR;
+
+       return MGMTD_SUCCESS;
+}
+
+/*
+ * Send GET_CONFIG_REQ to MGMTD for one or more config data item(s).
+ */
+enum mgmt_result
+mgmt_fe_get_config_data(uintptr_t lib_hndl, uintptr_t session_id,
+                           uint64_t req_id, Mgmtd__DatastoreId ds_id,
+                           Mgmtd__YangGetDataReq * data_req[], int num_reqs)
+{
+       struct mgmt_fe_client_ctx *client_ctx;
+       struct mgmt_fe_client_session *session;
+
+       client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+       if (!client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       session = (struct mgmt_fe_client_session *)session_id;
+       if (!session || session->client_ctx != client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       if (mgmt_fe_send_getcfg_req(client_ctx, session, req_id, ds_id,
+                                       data_req, num_reqs)
+           != 0)
+               return MGMTD_INTERNAL_ERROR;
+
+       return MGMTD_SUCCESS;
+}
+
+/*
+ * Send GET_DATA_REQ to MGMTD for one or more config data item(s).
+ */
+enum mgmt_result mgmt_fe_get_data(uintptr_t lib_hndl, uintptr_t session_id,
+                                     uint64_t req_id, Mgmtd__DatastoreId ds_id,
+                                     Mgmtd__YangGetDataReq * data_req[],
+                                     int num_reqs)
+{
+       struct mgmt_fe_client_ctx *client_ctx;
+       struct mgmt_fe_client_session *session;
+
+       client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+       if (!client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       session = (struct mgmt_fe_client_session *)session_id;
+       if (!session || session->client_ctx != client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       if (mgmt_fe_send_getdata_req(client_ctx, session, req_id, ds_id,
+                                        data_req, num_reqs)
+           != 0)
+               return MGMTD_INTERNAL_ERROR;
+
+       return MGMTD_SUCCESS;
+}
+
+/*
+ * Send NOTIFY_REGISTER_REQ to MGMTD daemon.
+ */
+enum mgmt_result
+mgmt_fe_register_yang_notify(uintptr_t lib_hndl, uintptr_t session_id,
+                                uint64_t req_id, Mgmtd__DatastoreId ds_id,
+                                bool register_req,
+                                Mgmtd__YangDataXPath * data_req[],
+                                int num_reqs)
+{
+       struct mgmt_fe_client_ctx *client_ctx;
+       struct mgmt_fe_client_session *session;
+
+       client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+       if (!client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       session = (struct mgmt_fe_client_session *)session_id;
+       if (!session || session->client_ctx != client_ctx)
+               return MGMTD_INVALID_PARAM;
+
+       if (mgmt_fe_send_regnotify_req(client_ctx, session, req_id, ds_id,
+                                          register_req, data_req, num_reqs)
+           != 0)
+               return MGMTD_INTERNAL_ERROR;
+
+       return MGMTD_SUCCESS;
+}
+
+/*
+ * Destroy library and cleanup everything.
+ */
+void mgmt_fe_client_lib_destroy(uintptr_t lib_hndl)
+{
+       struct mgmt_fe_client_ctx *client_ctx;
+
+       client_ctx = (struct mgmt_fe_client_ctx *)lib_hndl;
+       assert(client_ctx);
+
+       MGMTD_FE_CLIENT_DBG("Destroying MGMTD Frontend Client '%s'",
+                             client_ctx->client_params.name);
+
+       mgmt_fe_server_disconnect(client_ctx, false);
+
+       mgmt_fe_destroy_client_sessions(lib_hndl);
+
+       THREAD_OFF(client_ctx->conn_retry_tmr);
+       THREAD_OFF(client_ctx->conn_read_ev);
+       THREAD_OFF(client_ctx->conn_write_ev);
+       THREAD_OFF(client_ctx->conn_writes_on);
+       THREAD_OFF(client_ctx->msg_proc_ev);
+       mgmt_msg_destroy(&client_ctx->mstate);
+}
diff --git a/lib/mgmt_fe_client.h b/lib/mgmt_fe_client.h
new file mode 100644 (file)
index 0000000..ac29b8f
--- /dev/null
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Client Library api interfaces
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_FE_CLIENT_H_
+#define _FRR_MGMTD_FE_CLIENT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mgmt_pb.h"
+#include "thread.h"
+#include "mgmtd/mgmt_defines.h"
+
+/***************************************************************
+ * Macros
+ ***************************************************************/
+
+/*
+ * The server port MGMTD daemon is listening for Backend Client
+ * connections.
+ */
+
+#define MGMTD_FE_CLIENT_ERROR_STRING_MAX_LEN 32
+
+#define MGMTD_FE_DEFAULT_CONN_RETRY_INTVL_SEC 5
+
+#define MGMTD_FE_MSG_PROC_DELAY_USEC 10
+#define MGMTD_FE_MAX_NUM_MSG_PROC 500
+
+#define MGMTD_FE_MSG_WRITE_DELAY_MSEC 1
+#define MGMTD_FE_MAX_NUM_MSG_WRITE 100
+
+#define GMGD_FE_MAX_NUM_REQ_ITEMS 64
+
+#define MGMTD_FE_MSG_MAX_LEN 9000
+
+#define MGMTD_SOCKET_FE_SEND_BUF_SIZE 65535
+#define MGMTD_SOCKET_FE_RECV_BUF_SIZE MGMTD_SOCKET_FE_SEND_BUF_SIZE
+
+/***************************************************************
+ * Data-structures
+ ***************************************************************/
+
+#define MGMTD_SESSION_ID_NONE 0
+
+#define MGMTD_CLIENT_ID_NONE 0
+
+#define MGMTD_DS_NONE MGMTD__DATASTORE_ID__DS_NONE
+#define MGMTD_DS_RUNNING MGMTD__DATASTORE_ID__RUNNING_DS
+#define MGMTD_DS_CANDIDATE MGMTD__DATASTORE_ID__CANDIDATE_DS
+#define MGMTD_DS_OPERATIONAL MGMTD__DATASTORE_ID__OPERATIONAL_DS
+#define MGMTD_DS_MAX_ID MGMTD_DS_OPERATIONAL + 1
+
+/*
+ * All the client specific information this library needs to
+ * initialize itself, setup connection with MGMTD FrontEnd interface
+ * and carry on all required procedures appropriately.
+ *
+ * FrontEnd clients need to initialise a instance of this structure
+ * with appropriate data and pass it while calling the API
+ * to initialize the library (See mgmt_fe_client_lib_init for
+ * more details).
+ */
+struct mgmt_fe_client_params {
+       char name[MGMTD_CLIENT_NAME_MAX_LEN];
+       uintptr_t user_data;
+       unsigned long conn_retry_intvl_sec;
+
+       void (*client_connect_notify)(uintptr_t lib_hndl,
+                                     uintptr_t user_data,
+                                     bool connected);
+
+       void (*client_session_notify)(uintptr_t lib_hndl,
+                                     uintptr_t user_data,
+                                     uint64_t client_id,
+                                     bool create, bool success,
+                                     uintptr_t session_id,
+                                     uintptr_t user_session_ctx);
+
+       void (*lock_ds_notify)(uintptr_t lib_hndl, uintptr_t user_data,
+                              uint64_t client_id, uintptr_t session_id,
+                              uintptr_t user_session_ctx, uint64_t req_id,
+                              bool lock_ds, bool success,
+                              Mgmtd__DatastoreId ds_id, char *errmsg_if_any);
+
+       void (*set_config_notify)(uintptr_t lib_hndl, uintptr_t user_data,
+                                 uint64_t client_id, uintptr_t session_id,
+                                 uintptr_t user_session_ctx, uint64_t req_id,
+                                 bool success, Mgmtd__DatastoreId ds_id,
+                                 char *errmsg_if_any);
+
+       void (*commit_config_notify)(
+               uintptr_t lib_hndl, uintptr_t user_data, uint64_t client_id,
+               uintptr_t session_id, uintptr_t user_session_ctx,
+               uint64_t req_id, bool success, Mgmtd__DatastoreId src_ds_id,
+               Mgmtd__DatastoreId dst_ds_id, bool validate_only,
+               char *errmsg_if_any);
+
+       enum mgmt_result (*get_data_notify)(
+               uintptr_t lib_hndl, uintptr_t user_data, uint64_t client_id,
+               uintptr_t session_id, uintptr_t user_session_ctx,
+               uint64_t req_id, bool success, Mgmtd__DatastoreId ds_id,
+               Mgmtd__YangData **yang_data, size_t num_data, int next_key,
+               char *errmsg_if_any);
+
+       enum mgmt_result (*data_notify)(
+               uint64_t client_id, uint64_t session_id, uintptr_t user_data,
+               uint64_t req_id, Mgmtd__DatastoreId ds_id,
+               Mgmtd__YangData **yang_data, size_t num_data);
+};
+
+/***************************************************************
+ * API prototypes
+ ***************************************************************/
+
+/*
+ * Initialize library and try connecting with MGMTD FrontEnd interface.
+ *
+ * params
+ *    Frontend client parameters.
+ *
+ * master_thread
+ *    Thread master.
+ *
+ * Returns:
+ *    Frontend client lib handler (nothing but address of mgmt_fe_client_ctx)
+ */
+extern uintptr_t
+mgmt_fe_client_lib_init(struct mgmt_fe_client_params *params,
+                           struct thread_master *master_thread);
+
+/*
+ * Create a new Session for a Frontend Client connection.
+ *
+ * lib_hndl
+ *    Client library handler.
+ *
+ * client_id
+ *    Unique identifier of client.
+ *
+ * user_ctx
+ *    Client context.
+ *
+ * Returns:
+ *    MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result mgmt_fe_create_client_session(uintptr_t lib_hndl,
+                                                         uint64_t client_id,
+                                                         uintptr_t user_ctx);
+
+/*
+ * Delete an existing Session for a Frontend Client connection.
+ *
+ * lib_hndl
+ *    Client library handler.
+ *
+ * client_id
+ *    Unique identifier of client.
+ *
+ * Returns:
+ *    MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result mgmt_fe_destroy_client_session(uintptr_t lib_hndl,
+                                                      uint64_t client_id);
+
+/*
+ * Send UN/LOCK_DS_REQ to MGMTD for a specific Datastore DS.
+ *
+ * lib_hndl
+ *    Client library handler.
+ *
+ * session_id
+ *    Client session ID.
+ *
+ * req_id
+ *    Client request ID.
+ *
+ * ds_id
+ *    Datastore ID (Running/Candidate/Oper/Startup)
+ *
+ * lock_ds
+ *    TRUE for lock request, FALSE for unlock request.
+ *
+ * Returns:
+ *    MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_fe_lock_ds(uintptr_t lib_hndl, uintptr_t session_id, uint64_t req_id,
+                   Mgmtd__DatastoreId ds_id, bool lock_ds);
+
+/*
+ * Send SET_CONFIG_REQ to MGMTD for one or more config data(s).
+ *
+ * lib_hndl
+ *    Client library handler.
+ *
+ * session_id
+ *    Client session ID.
+ *
+ * req_id
+ *    Client request ID.
+ *
+ * ds_id
+ *    Datastore ID (Running/Candidate/Oper/Startup)
+ *
+ * conf_req
+ *    Details regarding the SET_CONFIG_REQ.
+ *
+ * num_req
+ *    Number of config requests.
+ *
+ * implcit commit
+ *    TRUE for implicit commit, FALSE otherwise.
+ *
+ * dst_ds_id
+ *    Destination Datastore ID where data needs to be set.
+ *
+ * Returns:
+ *    MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_fe_set_config_data(uintptr_t lib_hndl, uintptr_t session_id,
+                           uint64_t req_id, Mgmtd__DatastoreId ds_id,
+                           Mgmtd__YangCfgDataReq **config_req, int num_req,
+                           bool implicit_commit, Mgmtd__DatastoreId dst_ds_id);
+
+/*
+ * Send SET_COMMMIT_REQ to MGMTD for one or more config data(s).
+ *
+ * lib_hndl
+ *    Client library handler.
+ *
+ * session_id
+ *    Client session ID.
+ *
+ * req_id
+ *    Client request ID.
+ *
+ * src_ds_id
+ *    Source datastore ID from where data needs to be committed from.
+ *
+ * dst_ds_id
+ *    Destination datastore ID where data needs to be committed to.
+ *
+ * validate_only
+ *    TRUE if data needs to be validated only, FALSE otherwise.
+ *
+ * abort
+ *    TRUE if need to restore Src DS back to Dest DS, FALSE otherwise.
+ *
+ * Returns:
+ *    MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_fe_commit_config_data(uintptr_t lib_hndl, uintptr_t session_id,
+                              uint64_t req_id, Mgmtd__DatastoreId src_ds_id,
+                              Mgmtd__DatastoreId dst_ds_id, bool validate_only,
+                              bool abort);
+
+/*
+ * Send GET_CONFIG_REQ to MGMTD for one or more config data item(s).
+ *
+ * lib_hndl
+ *    Client library handler.
+ *
+ * session_id
+ *    Client session ID.
+ *
+ * req_id
+ *    Client request ID.
+ *
+ * ds_id
+ *    Datastore ID (Running/Candidate)
+ *
+ * data_req
+ *    Get config requested.
+ *
+ * num_req
+ *    Number of get config requests.
+ *
+ * Returns:
+ *    MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_fe_get_config_data(uintptr_t lib_hndl, uintptr_t session_id,
+                           uint64_t req_id, Mgmtd__DatastoreId ds_id,
+                           Mgmtd__YangGetDataReq **data_req, int num_reqs);
+
+/*
+ * Send GET_DATA_REQ to MGMTD for one or more data item(s).
+ *
+ * Similar to get config request but supports getting data
+ * from operational ds aka backend clients directly.
+ */
+extern enum mgmt_result
+mgmt_fe_get_data(uintptr_t lib_hndl, uintptr_t session_id, uint64_t req_id,
+                    Mgmtd__DatastoreId ds_id, Mgmtd__YangGetDataReq **data_req,
+                    int num_reqs);
+
+/*
+ * Send NOTIFY_REGISTER_REQ to MGMTD daemon.
+ *
+ * lib_hndl
+ *    Client library handler.
+ *
+ * session_id
+ *    Client session ID.
+ *
+ * req_id
+ *    Client request ID.
+ *
+ * ds_id
+ *    Datastore ID.
+ *
+ * register_req
+ *    TRUE if registering, FALSE otherwise.
+ *
+ * data_req
+ *    Details of the YANG notification data.
+ *
+ * num_reqs
+ *    Number of data requests.
+ *
+ * Returns:
+ *    MGMTD_SUCCESS on success, MGMTD_* otherwise.
+ */
+extern enum mgmt_result
+mgmt_fe_register_yang_notify(uintptr_t lib_hndl, uintptr_t session_id,
+                                uint64_t req_id, Mgmtd__DatastoreId ds_id,
+                                bool register_req,
+                                Mgmtd__YangDataXPath **data_req, int num_reqs);
+
+/*
+ * Destroy library and cleanup everything.
+ */
+extern void mgmt_fe_client_lib_destroy(uintptr_t lib_hndl);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FRR_MGMTD_FE_CLIENT_H_ */
diff --git a/lib/mgmt_msg.c b/lib/mgmt_msg.c
new file mode 100644 (file)
index 0000000..2fab03b
--- /dev/null
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * March 6 2023, Christian Hopps <chopps@labn.net>
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+#include <zebra.h>
+#include "network.h"
+#include "sockopt.h"
+#include "stream.h"
+#include "thread.h"
+#include "mgmt_msg.h"
+
+
+#define MGMT_MSG_DBG(dbgtag, fmt, ...)                                         \
+       do {                                                                   \
+               if (dbgtag)                                                    \
+                       zlog_debug("%s: %s: " fmt, dbgtag, __func__,           \
+                                  ##__VA_ARGS__);                             \
+       } while (0)
+
+#define MGMT_MSG_ERR(ms, fmt, ...)                                             \
+       zlog_err("%s: %s: " fmt, ms->idtag, __func__, ##__VA_ARGS__)
+
+/**
+ * Read data from a socket into streams containing 1 or more full msgs headed by
+ * mgmt_msg_hdr which contain API messages (currently protobuf).
+ *
+ * Args:
+ *     ms: mgmt_msg_state for this process.
+ *     fd: socket/file to read data from.
+ *     debug: true to enable debug logging.
+ *
+ * Returns:
+ *     MPP_DISCONNECT - socket should be closed and connect retried.
+ *     MSV_SCHED_STREAM - this call should be rescheduled to run.
+ *     MPP_SCHED_BOTH - this call and the procmsg buf should be scheduled to
+ *run.
+ */
+enum mgmt_msg_rsched mgmt_msg_read(struct mgmt_msg_state *ms, int fd,
+                                  bool debug)
+{
+       const char *dbgtag = debug ? ms->idtag : NULL;
+       size_t avail = STREAM_WRITEABLE(ms->ins);
+       struct mgmt_msg_hdr *mhdr = NULL;
+       size_t total = 0;
+       size_t mcount = 0;
+       ssize_t n, left;
+
+       assert(ms && fd != -1);
+
+       /*
+        * Read as much as we can into the stream.
+        */
+       while (avail > sizeof(struct mgmt_msg_hdr)) {
+               n = stream_read_try(ms->ins, fd, avail);
+               MGMT_MSG_DBG(dbgtag, "got %zd bytes", n);
+
+               /* -2 is normal nothing read, and to retry */
+               if (n == -2)
+                       break;
+               if (n <= 0) {
+                       if (n == 0)
+                               MGMT_MSG_ERR(ms, "got EOF/disconnect");
+                       else
+                               MGMT_MSG_ERR(ms,
+                                            "got error while reading: '%s'",
+                                            safe_strerror(errno));
+                       return MSR_DISCONNECT;
+               }
+               ms->nrxb += n;
+               avail -= n;
+       }
+
+       /*
+        * Check if we have read a complete messages or not.
+        */
+       assert(stream_get_getp(ms->ins) == 0);
+       left = stream_get_endp(ms->ins);
+       while (left > (long)sizeof(struct mgmt_msg_hdr)) {
+               mhdr = (struct mgmt_msg_hdr *)(STREAM_DATA(ms->ins) + total);
+               if (mhdr->marker != MGMT_MSG_MARKER) {
+                       MGMT_MSG_DBG(dbgtag, "recv corrupt buffer, disconnect");
+                       return MSR_DISCONNECT;
+               }
+               if ((ssize_t)mhdr->len > left)
+                       break;
+
+               MGMT_MSG_DBG(dbgtag, "read full message len %u", mhdr->len);
+               total += mhdr->len;
+               left -= mhdr->len;
+               mcount++;
+       }
+
+       if (!mcount)
+               return MSR_SCHED_STREAM;
+
+       /*
+        * We have read at least one message into the stream, queue it up.
+        */
+       mhdr = (struct mgmt_msg_hdr *)(STREAM_DATA(ms->ins) + total);
+       stream_set_endp(ms->ins, total);
+       stream_fifo_push(&ms->inq, ms->ins);
+       ms->ins = stream_new(ms->max_msg_sz);
+       if (left) {
+               stream_put(ms->ins, mhdr, left);
+               stream_set_endp(ms->ins, left);
+       }
+
+       return MSR_SCHED_BOTH;
+}
+
+/**
+ * Process streams containing whole messages that have been pushed onto the
+ * FIFO. This should be called from an event/timer handler and should be
+ * reschedulable.
+ *
+ * Args:
+ *     ms: mgmt_msg_state for this process.
+ *     handle_mgs: function to call for each received message.
+ *     user: opaque value passed through to handle_msg.
+ *     debug: true to enable debug logging.
+ *
+ * Returns:
+ *     true if more to process (so reschedule) else false
+ */
+bool mgmt_msg_procbufs(struct mgmt_msg_state *ms,
+                      void (*handle_msg)(void *user, uint8_t *msg,
+                                         size_t msglen),
+                      void *user, bool debug)
+{
+       const char *dbgtag = debug ? ms->idtag : NULL;
+       struct mgmt_msg_hdr *mhdr;
+       struct stream *work;
+       uint8_t *data;
+       size_t left, nproc;
+
+       MGMT_MSG_DBG(dbgtag, "Have %zu streams to process", ms->inq.count);
+
+       nproc = 0;
+       while (nproc < ms->max_read_buf) {
+               work = stream_fifo_pop(&ms->inq);
+               if (!work)
+                       break;
+
+               data = STREAM_DATA(work);
+               left = stream_get_endp(work);
+               MGMT_MSG_DBG(dbgtag, "Processing stream of len %zu", left);
+
+               for (; left > sizeof(struct mgmt_msg_hdr);
+                    left -= mhdr->len, data += mhdr->len) {
+                       mhdr = (struct mgmt_msg_hdr *)data;
+
+                       assert(mhdr->marker == MGMT_MSG_MARKER);
+                       assert(left >= mhdr->len);
+
+                       handle_msg(user, (uint8_t *)(mhdr + 1),
+                                  mhdr->len - sizeof(struct mgmt_msg_hdr));
+                       ms->nrxm++;
+                       nproc++;
+               }
+
+               if (work != ms->ins)
+                       stream_free(work); /* Free it up */
+               else
+                       stream_reset(work); /* Reset stream for next read */
+       }
+
+       /* return true if should reschedule b/c more to process. */
+       return stream_fifo_head(&ms->inq) != NULL;
+}
+
+/**
+ * Write data from a onto the socket, using streams that have been queued for
+ * sending by mgmt_msg_send_msg. This function should be reschedulable.
+ *
+ * Args:
+ *     ms: mgmt_msg_state for this process.
+ *     fd: socket/file to read data from.
+ *     debug: true to enable debug logging.
+ *
+ * Returns:
+ *     MSW_SCHED_NONE - do not reschedule anything.
+ *     MSW_SCHED_STREAM - this call should be rescheduled to run again.
+ *     MSW_SCHED_WRITES_OFF - writes should be disabled with a timer to
+ *         re-enable them a short time later
+ *     MSW_DISCONNECT - socket should be closed and reconnect retried.
+ *run.
+ */
+enum mgmt_msg_wsched mgmt_msg_write(struct mgmt_msg_state *ms, int fd,
+                                   bool debug)
+{
+       const char *dbgtag = debug ? ms->idtag : NULL;
+       struct stream *s;
+       size_t nproc = 0;
+       ssize_t left;
+       ssize_t n;
+
+       if (ms->outs) {
+               MGMT_MSG_DBG(dbgtag,
+                            "found unqueued stream with %zu bytes, queueing",
+                            stream_get_endp(ms->outs));
+               stream_fifo_push(&ms->outq, ms->outs);
+               ms->outs = NULL;
+       }
+
+       for (s = stream_fifo_head(&ms->outq); s && nproc < ms->max_write_buf;
+            s = stream_fifo_head(&ms->outq)) {
+               left = STREAM_READABLE(s);
+               assert(left);
+
+               n = stream_flush(s, fd);
+               if (n <= 0) {
+                       if (n == 0)
+                               MGMT_MSG_ERR(ms,
+                                            "connection closed while writing");
+                       else if (ERRNO_IO_RETRY(errno)) {
+                               MGMT_MSG_DBG(
+                                       dbgtag,
+                                       "retry error while writing %zd bytes: %s (%d)",
+                                       left, safe_strerror(errno), errno);
+                               return MSW_SCHED_STREAM;
+                       } else
+                               MGMT_MSG_ERR(
+                                       ms,
+                                       "error while writing %zd bytes: %s (%d)",
+                                       left, safe_strerror(errno), errno);
+
+                       n = mgmt_msg_reset_writes(ms);
+                       MGMT_MSG_DBG(dbgtag, "drop and freed %zd streams", n);
+
+                       return MSW_DISCONNECT;
+               }
+
+               ms->ntxb += n;
+               if (n != left) {
+                       MGMT_MSG_DBG(dbgtag, "short stream write %zd of %zd", n,
+                                    left);
+                       stream_forward_getp(s, n);
+                       return MSW_SCHED_STREAM;
+               }
+
+               stream_free(stream_fifo_pop(&ms->outq));
+               MGMT_MSG_DBG(dbgtag, "wrote stream of %zd bytes", n);
+               nproc++;
+       }
+       if (s) {
+               MGMT_MSG_DBG(
+                       dbgtag,
+                       "reached %zu buffer writes, pausing with %zu streams left",
+                       ms->max_write_buf, ms->outq.count);
+               return MSW_SCHED_WRITES_OFF;
+       }
+       MGMT_MSG_DBG(dbgtag, "flushed all streams from output q");
+       return MSW_SCHED_NONE;
+}
+
+
+/**
+ * Send a message by enqueueing it to be written over the socket by
+ * mgmt_msg_write.
+ *
+ * Args:
+ *     ms: mgmt_msg_state for this process.
+ *     fd: socket/file to read data from.
+ *     debug: true to enable debug logging.
+ *
+ * Returns:
+ *      0 on success, otherwise -1 on failure. The only failure mode is if a
+ *      the message exceeds the maximum message size configured on init.
+ */
+int mgmt_msg_send_msg(struct mgmt_msg_state *ms, void *msg, size_t len,
+                     mgmt_msg_packf packf, bool debug)
+{
+       const char *dbgtag = debug ? ms->idtag : NULL;
+       struct mgmt_msg_hdr *mhdr;
+       struct stream *s;
+       uint8_t *dstbuf;
+       size_t endp, n;
+       size_t mlen = len + sizeof(*mhdr);
+
+       if (mlen > ms->max_msg_sz) {
+               MGMT_MSG_ERR(ms, "Message %zu > max size %zu, dropping", mlen,
+                            ms->max_msg_sz);
+               return -1;
+       }
+
+       if (!ms->outs) {
+               MGMT_MSG_DBG(dbgtag, "creating new stream for msg len %zu",
+                            len);
+               ms->outs = stream_new(ms->max_msg_sz);
+       } else if (STREAM_WRITEABLE(ms->outs) < mlen) {
+               MGMT_MSG_DBG(
+                       dbgtag,
+                       "enq existing stream len %zu and creating new stream for msg len %zu",
+                       STREAM_WRITEABLE(ms->outs), mlen);
+               stream_fifo_push(&ms->outq, ms->outs);
+               ms->outs = stream_new(ms->max_msg_sz);
+       } else {
+               MGMT_MSG_DBG(
+                       dbgtag,
+                       "using existing stream with avail %zu for msg len %zu",
+                       STREAM_WRITEABLE(ms->outs), mlen);
+       }
+       s = ms->outs;
+
+       /* We have a stream with space, pack the message into it. */
+       mhdr = (struct mgmt_msg_hdr *)(STREAM_DATA(s) + s->endp);
+       mhdr->marker = MGMT_MSG_MARKER;
+       mhdr->len = mlen;
+       stream_forward_endp(s, sizeof(*mhdr));
+       endp = stream_get_endp(s);
+       dstbuf = STREAM_DATA(s) + endp;
+       n = packf(msg, dstbuf);
+       stream_set_endp(s, endp + n);
+       ms->ntxm++;
+
+       return 0;
+}
+
+/**
+ * Create and open a unix domain stream socket on the given path
+ * setting non-blocking and send and receive buffer sizes.
+ *
+ * Args:
+ *     path: path of unix domain socket to connect to.
+ *     sendbuf: size of socket send buffer.
+ *     recvbuf: size of socket receive buffer.
+ *     dbgtag: if non-NULL enable log debug, and use this tag.
+ *
+ * Returns:
+ *     socket fd or -1 on error.
+ */
+int mgmt_msg_connect(const char *path, size_t sendbuf, size_t recvbuf,
+                    const char *dbgtag)
+{
+       int ret, sock, len;
+       struct sockaddr_un addr;
+
+       MGMT_MSG_DBG(dbgtag, "connecting to server on %s", path);
+       sock = socket(AF_UNIX, SOCK_STREAM, 0);
+       if (sock < 0) {
+               MGMT_MSG_DBG(dbgtag, "socket failed: %s", safe_strerror(errno));
+               return -1;
+       }
+
+       memset(&addr, 0, sizeof(struct sockaddr_un));
+       addr.sun_family = AF_UNIX;
+       strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
+#ifdef HAVE_STRUCT_SOCKADDR_UN_SUN_LEN
+       len = addr.sun_len = SUN_LEN(&addr);
+#else
+       len = sizeof(addr.sun_family) + strlen(addr.sun_path);
+#endif /* HAVE_STRUCT_SOCKADDR_UN_SUN_LEN */
+       ret = connect(sock, (struct sockaddr *)&addr, len);
+       if (ret < 0) {
+               MGMT_MSG_DBG(dbgtag, "failed to connect on %s: %s", path,
+                            safe_strerror(errno));
+               close(sock);
+               return -1;
+       }
+
+       MGMT_MSG_DBG(dbgtag, "connected to server on %s", path);
+       set_nonblocking(sock);
+       setsockopt_so_sendbuf(sock, sendbuf);
+       setsockopt_so_recvbuf(sock, recvbuf);
+       return sock;
+}
+
+/**
+ * Reset the sending queue, by dequeueing all streams and freeing them. Return
+ * the number of streams freed.
+ *
+ * Args:
+ *     ms: mgmt_msg_state for this process.
+ *
+ * Returns:
+ *      Number of streams that were freed.
+ *
+ */
+size_t mgmt_msg_reset_writes(struct mgmt_msg_state *ms)
+{
+       struct stream *s;
+       size_t nproc = 0;
+
+       for (s = stream_fifo_pop(&ms->outq); s;
+            s = stream_fifo_pop(&ms->outq), nproc++)
+               stream_free(s);
+
+       return nproc;
+}
+
+void mgmt_msg_init(struct mgmt_msg_state *ms, size_t max_read_buf,
+                  size_t max_write_buf, size_t max_msg_sz, const char *idtag)
+{
+       memset(ms, 0, sizeof(*ms));
+       ms->ins = stream_new(max_msg_sz);
+       stream_fifo_init(&ms->inq);
+       stream_fifo_init(&ms->outq);
+       ms->max_read_buf = max_write_buf;
+       ms->max_write_buf = max_read_buf;
+       ms->max_msg_sz = max_msg_sz;
+       ms->idtag = strdup(idtag);
+}
+
+void mgmt_msg_destroy(struct mgmt_msg_state *ms)
+{
+       mgmt_msg_reset_writes(ms);
+       if (ms->ins)
+               stream_free(ms->ins);
+       free(ms->idtag);
+}
diff --git a/lib/mgmt_msg.h b/lib/mgmt_msg.h
new file mode 100644 (file)
index 0000000..8548751
--- /dev/null
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * March 6 2023, Christian Hopps <chopps@labn.net>
+ *
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+#ifndef _MGMT_MSG_H
+#define _MGMT_MSG_H
+
+#include "stream.h"
+#include "thread.h"
+
+#define MGMT_MSG_MARKER (0x4D724B21u) /* ASCII - "MrK!"*/
+
+struct mgmt_msg_state {
+       struct stream *ins;
+       struct stream *outs;
+       struct stream_fifo inq;
+       struct stream_fifo outq;
+       uint64_t nrxm;          /* number of received messages */
+       uint64_t nrxb;          /* number of received bytes */
+       uint64_t ntxm;          /* number of sent messages */
+       uint64_t ntxb;          /* number of sent bytes */
+       size_t max_read_buf;    /* should replace with max time value */
+       size_t max_write_buf;   /* should replace with max time value */
+       size_t max_msg_sz;
+       char *idtag; /* identifying tag for messages */
+};
+
+struct mgmt_msg_hdr {
+       uint32_t marker;
+       uint32_t len;
+};
+
+enum mgmt_msg_rsched {
+       MSR_SCHED_BOTH,   /* schedule both queue and read */
+       MSR_SCHED_STREAM, /* schedule read */
+       MSR_DISCONNECT,   /* disconnect and start reconnecting */
+};
+
+enum mgmt_msg_wsched {
+       MSW_SCHED_NONE,       /* no scheduling required */
+       MSW_SCHED_STREAM,     /* schedule writing */
+       MSW_SCHED_WRITES_OFF, /* toggle writes off */
+       MSW_DISCONNECT,       /* disconnect and start reconnecting */
+};
+
+static inline uint8_t *msg_payload(struct mgmt_msg_hdr *mhdr)
+{
+       return (uint8_t *)(mhdr + 1);
+}
+
+typedef size_t (*mgmt_msg_packf)(void *msg, void *data);
+
+extern int mgmt_msg_connect(const char *path, size_t sendbuf, size_t recvbuf,
+                           const char *dbgtag);
+extern void mgmt_msg_destroy(struct mgmt_msg_state *ms);
+extern void mgmt_msg_init(struct mgmt_msg_state *ms, size_t max_read_buf,
+                         size_t max_write_buf, size_t max_msg_sz,
+                         const char *idtag);
+extern bool mgmt_msg_procbufs(struct mgmt_msg_state *ms,
+                             void (*handle_msg)(void *user, uint8_t *msg,
+                                                size_t msglen),
+                             void *user, bool debug);
+extern enum mgmt_msg_rsched mgmt_msg_read(struct mgmt_msg_state *ms, int fd,
+                                         bool debug);
+extern size_t mgmt_msg_reset_writes(struct mgmt_msg_state *ms);
+extern int mgmt_msg_send_msg(struct mgmt_msg_state *ms, void *msg, size_t len,
+                            size_t (*packf)(void *msg, void *buf), bool debug);
+extern enum mgmt_msg_wsched mgmt_msg_write(struct mgmt_msg_state *ms, int fd,
+                                          bool debug);
+
+#endif /* _MGMT_MSG_H */
diff --git a/lib/mgmt_pb.h b/lib/mgmt_pb.h
new file mode 100644 (file)
index 0000000..08bb748
--- /dev/null
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD protobuf main header file
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_PB_H_
+#define _FRR_MGMTD_PB_H_
+
+#include "lib/mgmt.pb-c.h"
+
+#define mgmt_yang_data_xpath_init(ptr) mgmtd__yang_data_xpath__init(ptr)
+
+#define mgmt_yang_data_value_init(ptr) mgmtd__yang_data_value__init(ptr)
+
+#define mgmt_yang_data_init(ptr) mgmtd__yang_data__init(ptr)
+
+#define mgmt_yang_data_reply_init(ptr) mgmtd__yang_data_reply__init(ptr)
+
+#define mgmt_yang_cfg_data_req_init(ptr) mgmtd__yang_cfg_data_req__init(ptr)
+
+#define mgmt_yang_get_data_req_init(ptr) mgmtd__yang_get_data_req__init(ptr)
+
+#endif /* _FRR_MGMTD_PB_H_ */
index b208e45d6220e0cfe963e55d52f759ee780d0940..1259294a22285bfd70690f556b4f8d9cacd2672d 100644 (file)
@@ -93,7 +93,9 @@ static int nb_node_new_cb(const struct lysc_node *snode, void *arg)
 {
        struct nb_node *nb_node;
        struct lysc_node *sparent, *sparent_list;
+       struct frr_yang_module_info *module;
 
+       module = (struct frr_yang_module_info *)arg;
        nb_node = XCALLOC(MTYPE_NB_NODE, sizeof(*nb_node));
        yang_snode_get_path(snode, YANG_PATH_DATA, nb_node->xpath,
                            sizeof(nb_node->xpath));
@@ -128,6 +130,9 @@ static int nb_node_new_cb(const struct lysc_node *snode, void *arg)
        assert(snode->priv == NULL);
        ((struct lysc_node *)snode)->priv = nb_node;
 
+       if (module && module->ignore_cbs)
+               SET_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS);
+
        return YANG_ITER_CONTINUE;
 }
 
@@ -230,6 +235,9 @@ static unsigned int nb_node_validate_cbs(const struct nb_node *nb_node)
 {
        unsigned int error = 0;
 
+       if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+               return error;
+
        error += nb_node_validate_cb(nb_node, NB_OP_CREATE,
                                     !!nb_node->cbs.create, false);
        error += nb_node_validate_cb(nb_node, NB_OP_MODIFY,
@@ -297,6 +305,8 @@ struct nb_config *nb_config_new(struct lyd_node *dnode)
                config->dnode = yang_dnode_new(ly_native_ctx, true);
        config->version = 0;
 
+       RB_INIT(nb_config_cbs, &config->cfg_chgs);
+
        return config;
 }
 
@@ -304,6 +314,7 @@ void nb_config_free(struct nb_config *config)
 {
        if (config->dnode)
                yang_dnode_free(config->dnode);
+       nb_config_diff_del_changes(&config->cfg_chgs);
        XFREE(MTYPE_NB_CONFIG, config);
 }
 
@@ -315,6 +326,8 @@ struct nb_config *nb_config_dup(const struct nb_config *config)
        dup->dnode = yang_dnode_dup(config->dnode);
        dup->version = config->version;
 
+       RB_INIT(nb_config_cbs, &dup->cfg_chgs);
+
        return dup;
 }
 
@@ -405,7 +418,7 @@ static void nb_config_diff_add_change(struct nb_config_cbs *changes,
        RB_INSERT(nb_config_cbs, changes, &change->cb);
 }
 
-static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
+void nb_config_diff_del_changes(struct nb_config_cbs *changes)
 {
        while (!RB_EMPTY(nb_config_cbs, changes)) {
                struct nb_config_change *change;
@@ -422,8 +435,8 @@ static void nb_config_diff_del_changes(struct nb_config_cbs *changes)
  * configurations. Given a new subtree, calculate all new YANG data nodes,
  * excluding default leafs and leaf-lists. This is a recursive function.
  */
-static void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
-                                  struct nb_config_cbs *changes)
+void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
+                           struct nb_config_cbs *changes)
 {
        enum nb_operation operation;
        struct lyd_node *child;
@@ -525,10 +538,16 @@ static inline void nb_config_diff_dnode_log(const char *context,
 }
 #endif
 
-/* Calculate the delta between two different configurations. */
-static void nb_config_diff(const struct nb_config *config1,
-                          const struct nb_config *config2,
-                          struct nb_config_cbs *changes)
+/*
+ * Calculate the delta between two different configurations.
+ *
+ * NOTE: 'config1' is the reference DB, while 'config2' is
+ * the DB being compared against 'config1'. Typically 'config1'
+ * should be the Running DB and 'config2' is the Candidate DB.
+ */
+void nb_config_diff(const struct nb_config *config1,
+                   const struct nb_config *config2,
+                   struct nb_config_cbs *changes)
 {
        struct lyd_node *diff = NULL;
        const struct lyd_node *root, *dnode;
@@ -734,6 +753,169 @@ int nb_candidate_edit(struct nb_config *candidate,
        return NB_OK;
 }
 
+static void nb_update_candidate_changes(struct nb_config *candidate,
+                                       struct nb_cfg_change *change,
+                                       uint32_t *seq)
+{
+       enum nb_operation oper = change->operation;
+       char *xpath = change->xpath;
+       struct lyd_node *root = NULL;
+       struct lyd_node *dnode;
+       struct nb_config_cbs *cfg_chgs = &candidate->cfg_chgs;
+       int op;
+
+       switch (oper) {
+       case NB_OP_CREATE:
+       case NB_OP_MODIFY:
+               root = yang_dnode_get(candidate->dnode, xpath);
+               break;
+       case NB_OP_DESTROY:
+               root = yang_dnode_get(running_config->dnode, xpath);
+               /* code */
+               break;
+       case NB_OP_MOVE:
+       case NB_OP_PRE_VALIDATE:
+       case NB_OP_APPLY_FINISH:
+       case NB_OP_GET_ELEM:
+       case NB_OP_GET_NEXT:
+       case NB_OP_GET_KEYS:
+       case NB_OP_LOOKUP_ENTRY:
+       case NB_OP_RPC:
+               break;
+       default:
+               assert(!"non-enum value, invalid");
+       }
+
+       if (!root)
+               return;
+
+       LYD_TREE_DFS_BEGIN (root, dnode) {
+               op = nb_lyd_diff_get_op(dnode);
+               switch (op) {
+               case 'c':
+                       nb_config_diff_created(dnode, seq, cfg_chgs);
+                       LYD_TREE_DFS_continue = 1;
+                       break;
+               case 'd':
+                       nb_config_diff_deleted(dnode, seq, cfg_chgs);
+                       LYD_TREE_DFS_continue = 1;
+                       break;
+               case 'r':
+                       nb_config_diff_add_change(cfg_chgs, NB_OP_MODIFY, seq,
+                                                 dnode);
+                       break;
+               default:
+                       break;
+               }
+               LYD_TREE_DFS_END(root, dnode);
+       }
+}
+
+static bool nb_is_operation_allowed(struct nb_node *nb_node,
+                                   struct nb_cfg_change *change)
+{
+       enum nb_operation oper = change->operation;
+
+       if (lysc_is_key(nb_node->snode)) {
+               if (oper == NB_OP_MODIFY || oper == NB_OP_DESTROY)
+                       return false;
+       }
+       return true;
+}
+
+void nb_candidate_edit_config_changes(
+       struct nb_config *candidate_config, struct nb_cfg_change cfg_changes[],
+       size_t num_cfg_changes, const char *xpath_base, const char *curr_xpath,
+       int xpath_index, char *err_buf, int err_bufsize, bool *error)
+{
+       uint32_t seq = 0;
+
+       if (error)
+               *error = false;
+
+       if (xpath_base == NULL)
+               xpath_base = "";
+
+       /* Edit candidate configuration. */
+       for (size_t i = 0; i < num_cfg_changes; i++) {
+               struct nb_cfg_change *change = &cfg_changes[i];
+               struct nb_node *nb_node;
+               char xpath[XPATH_MAXLEN];
+               struct yang_data *data;
+               int ret;
+
+               /* Handle relative XPaths. */
+               memset(xpath, 0, sizeof(xpath));
+               if (xpath_index > 0 &&
+                   (xpath_base[0] == '.' || change->xpath[0] == '.'))
+                       strlcpy(xpath, curr_xpath, sizeof(xpath));
+               if (xpath_base[0]) {
+                       if (xpath_base[0] == '.')
+                               strlcat(xpath, xpath_base + 1, sizeof(xpath));
+                       else
+                               strlcat(xpath, xpath_base, sizeof(xpath));
+               }
+               if (change->xpath[0] == '.')
+                       strlcat(xpath, change->xpath + 1, sizeof(xpath));
+               else
+                       strlcpy(xpath, change->xpath, sizeof(xpath));
+
+               /* Find the northbound node associated to the data path. */
+               nb_node = nb_node_find(xpath);
+               if (!nb_node) {
+                       flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
+                                 "%s: unknown data path: %s", __func__, xpath);
+                       if (error)
+                               *error = true;
+                       continue;
+               }
+               /* Find if the node to be edited is not a key node */
+               if (!nb_is_operation_allowed(nb_node, change)) {
+                       zlog_err(" Xpath %s points to key node", xpath);
+                       if (error)
+                               *error = true;
+                       break;
+               }
+
+               /* If the value is not set, get the default if it exists. */
+               if (change->value == NULL)
+                       change->value = yang_snode_get_default(nb_node->snode);
+               data = yang_data_new(xpath, change->value);
+
+               /*
+                * Ignore "not found" errors when editing the candidate
+                * configuration.
+                */
+               ret = nb_candidate_edit(candidate_config, nb_node,
+                                       change->operation, xpath, NULL, data);
+               yang_data_free(data);
+               if (ret != NB_OK && ret != NB_ERR_NOT_FOUND) {
+                       flog_warn(
+                               EC_LIB_NB_CANDIDATE_EDIT_ERROR,
+                               "%s: failed to edit candidate configuration: operation [%s] xpath [%s]",
+                               __func__, nb_operation_name(change->operation),
+                               xpath);
+                       if (error)
+                               *error = true;
+                       continue;
+               }
+               nb_update_candidate_changes(candidate_config, change, &seq);
+       }
+
+       if (error && *error) {
+               char buf[BUFSIZ];
+
+               /*
+                * Failure to edit the candidate configuration should never
+                * happen in practice, unless there's a bug in the code. When
+                * that happens, log the error but otherwise ignore it.
+                */
+               snprintf(err_buf, err_bufsize,
+                        "%% Failed to edit configuration.\n\n%s",
+                        yang_print_errors(ly_native_ctx, buf, sizeof(buf)));
+       }
+}
+
 bool nb_candidate_needs_update(const struct nb_config *candidate)
 {
        if (candidate->version < running_config->version)
@@ -761,12 +943,13 @@ int nb_candidate_update(struct nb_config *candidate)
  * WARNING: lyd_validate() can change the configuration as part of the
  * validation process.
  */
-static int nb_candidate_validate_yang(struct nb_config *candidate, char *errmsg,
-                                     size_t errmsg_len)
+int nb_candidate_validate_yang(struct nb_config *candidate, bool no_state,
+                              char *errmsg, size_t errmsg_len)
 {
        if (lyd_validate_all(&candidate->dnode, ly_native_ctx,
-                            LYD_VALIDATE_NO_STATE, NULL)
-           != 0) {
+                            no_state ? LYD_VALIDATE_NO_STATE
+                                     : LYD_VALIDATE_PRESENT,
+                            NULL) != 0) {
                yang_print_errors(ly_native_ctx, errmsg, errmsg_len);
                return NB_ERR_VALIDATION;
        }
@@ -775,10 +958,10 @@ static int nb_candidate_validate_yang(struct nb_config *candidate, char *errmsg,
 }
 
 /* Perform code-level validation using the northbound callbacks. */
-static int nb_candidate_validate_code(struct nb_context *context,
-                                     struct nb_config *candidate,
-                                     struct nb_config_cbs *changes,
-                                     char *errmsg, size_t errmsg_len)
+int nb_candidate_validate_code(struct nb_context *context,
+                              struct nb_config *candidate,
+                              struct nb_config_cbs *changes, char *errmsg,
+                              size_t errmsg_len)
 {
        struct nb_config_cb *cb;
        struct lyd_node *root, *child;
@@ -816,6 +999,21 @@ static int nb_candidate_validate_code(struct nb_context *context,
        return NB_OK;
 }
 
+int nb_candidate_diff_and_validate_yang(struct nb_context *context,
+                                       struct nb_config *candidate,
+                                       struct nb_config_cbs *changes,
+                                       char *errmsg, size_t errmsg_len)
+{
+       if (nb_candidate_validate_yang(candidate, true, errmsg,
+                                      sizeof(errmsg_len)) != NB_OK)
+               return NB_ERR_VALIDATION;
+
+       RB_INIT(nb_config_cbs, changes);
+       nb_config_diff(running_config, candidate, changes);
+
+       return NB_OK;
+}
+
 int nb_candidate_validate(struct nb_context *context,
                          struct nb_config *candidate, char *errmsg,
                          size_t errmsg_len)
@@ -823,11 +1021,11 @@ int nb_candidate_validate(struct nb_context *context,
        struct nb_config_cbs changes;
        int ret;
 
-       if (nb_candidate_validate_yang(candidate, errmsg, errmsg_len) != NB_OK)
-               return NB_ERR_VALIDATION;
+       ret = nb_candidate_diff_and_validate_yang(context, candidate, &changes,
+                                                 errmsg, errmsg_len);
+       if (ret != NB_OK)
+               return ret;
 
-       RB_INIT(nb_config_cbs, &changes);
-       nb_config_diff(running_config, candidate, &changes);
        ret = nb_candidate_validate_code(context, candidate, &changes, errmsg,
                                         errmsg_len);
        nb_config_diff_del_changes(&changes);
@@ -839,12 +1037,14 @@ int nb_candidate_commit_prepare(struct nb_context context,
                                struct nb_config *candidate,
                                const char *comment,
                                struct nb_transaction **transaction,
+                               bool skip_validate, bool ignore_zero_change,
                                char *errmsg, size_t errmsg_len)
 {
        struct nb_config_cbs changes;
 
-       if (nb_candidate_validate_yang(candidate, errmsg, errmsg_len)
-           != NB_OK) {
+       if (!skip_validate &&
+           nb_candidate_validate_yang(candidate, true, errmsg, errmsg_len) !=
+                   NB_OK) {
                flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
                          "%s: failed to validate candidate configuration",
                          __func__);
@@ -853,14 +1053,15 @@ int nb_candidate_commit_prepare(struct nb_context context,
 
        RB_INIT(nb_config_cbs, &changes);
        nb_config_diff(running_config, candidate, &changes);
-       if (RB_EMPTY(nb_config_cbs, &changes)) {
+       if (!ignore_zero_change && RB_EMPTY(nb_config_cbs, &changes)) {
                snprintf(
                        errmsg, errmsg_len,
                        "No changes to apply were found during preparation phase");
                return NB_ERR_NO_CHANGES;
        }
 
-       if (nb_candidate_validate_code(&context, candidate, &changes, errmsg,
+       if (!skip_validate &&
+           nb_candidate_validate_code(&context, candidate, &changes, errmsg,
                                       errmsg_len) != NB_OK) {
                flog_warn(EC_LIB_NB_CANDIDATE_INVALID,
                          "%s: failed to validate candidate configuration",
@@ -869,8 +1070,12 @@ int nb_candidate_commit_prepare(struct nb_context context,
                return NB_ERR_VALIDATION;
        }
 
-       *transaction = nb_transaction_new(context, candidate, &changes, comment,
-                                         errmsg, errmsg_len);
+       /*
+        * Re-use an existing transaction if provided. Else allocate a new one.
+        */
+       if (!*transaction)
+               *transaction = nb_transaction_new(context, candidate, &changes,
+                                                 comment, errmsg, errmsg_len);
        if (*transaction == NULL) {
                flog_warn(EC_LIB_NB_TRANSACTION_CREATION_FAILED,
                          "%s: failed to create transaction: %s", __func__,
@@ -921,7 +1126,8 @@ int nb_candidate_commit(struct nb_context context, struct nb_config *candidate,
        int ret;
 
        ret = nb_candidate_commit_prepare(context, candidate, comment,
-                                         &transaction, errmsg, errmsg_len);
+                                         &transaction, false, false, errmsg,
+                                         errmsg_len);
        /*
         * Apply the changes if the preparation phase succeeded. Otherwise abort
         * the transaction.
@@ -1015,6 +1221,8 @@ static int nb_callback_create(struct nb_context *context,
        bool unexpected_error = false;
        int ret;
 
+       assert(!CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS));
+
        nb_log_config_callback(event, NB_OP_CREATE, dnode);
 
        args.context = context;
@@ -1064,6 +1272,8 @@ static int nb_callback_modify(struct nb_context *context,
        bool unexpected_error = false;
        int ret;
 
+       assert(!CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS));
+
        nb_log_config_callback(event, NB_OP_MODIFY, dnode);
 
        args.context = context;
@@ -1113,6 +1323,8 @@ static int nb_callback_destroy(struct nb_context *context,
        bool unexpected_error = false;
        int ret;
 
+       assert(!CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS));
+
        nb_log_config_callback(event, NB_OP_DESTROY, dnode);
 
        args.context = context;
@@ -1156,6 +1368,8 @@ static int nb_callback_move(struct nb_context *context,
        bool unexpected_error = false;
        int ret;
 
+       assert(!CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS));
+
        nb_log_config_callback(event, NB_OP_MOVE, dnode);
 
        args.context = context;
@@ -1199,6 +1413,9 @@ static int nb_callback_pre_validate(struct nb_context *context,
        bool unexpected_error = false;
        int ret;
 
+       if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+               return 0;
+
        nb_log_config_callback(NB_EV_VALIDATE, NB_OP_PRE_VALIDATE, dnode);
 
        args.dnode = dnode;
@@ -1230,6 +1447,9 @@ static void nb_callback_apply_finish(struct nb_context *context,
 {
        struct nb_cb_apply_finish_args args = {};
 
+       if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+               return;
+
        nb_log_config_callback(NB_EV_APPLY, NB_OP_APPLY_FINISH, dnode);
 
        args.context = context;
@@ -1245,6 +1465,9 @@ struct yang_data *nb_callback_get_elem(const struct nb_node *nb_node,
 {
        struct nb_cb_get_elem_args args = {};
 
+       if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+               return NULL;
+
        DEBUGD(&nb_dbg_cbs_state,
               "northbound callback (get_elem): xpath [%s] list_entry [%p]",
               xpath, list_entry);
@@ -1260,6 +1483,9 @@ const void *nb_callback_get_next(const struct nb_node *nb_node,
 {
        struct nb_cb_get_next_args args = {};
 
+       if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+               return NULL;
+
        DEBUGD(&nb_dbg_cbs_state,
               "northbound callback (get_next): node [%s] parent_list_entry [%p] list_entry [%p]",
               nb_node->xpath, parent_list_entry, list_entry);
@@ -1274,6 +1500,9 @@ int nb_callback_get_keys(const struct nb_node *nb_node, const void *list_entry,
 {
        struct nb_cb_get_keys_args args = {};
 
+       if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+               return 0;
+
        DEBUGD(&nb_dbg_cbs_state,
               "northbound callback (get_keys): node [%s] list_entry [%p]",
               nb_node->xpath, list_entry);
@@ -1289,6 +1518,9 @@ const void *nb_callback_lookup_entry(const struct nb_node *nb_node,
 {
        struct nb_cb_lookup_entry_args args = {};
 
+       if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+               return NULL;
+
        DEBUGD(&nb_dbg_cbs_state,
               "northbound callback (lookup_entry): node [%s] parent_list_entry [%p]",
               nb_node->xpath, parent_list_entry);
@@ -1304,6 +1536,9 @@ int nb_callback_rpc(const struct nb_node *nb_node, const char *xpath,
 {
        struct nb_cb_rpc_args args = {};
 
+       if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+               return 0;
+
        DEBUGD(&nb_dbg_cbs_rpc, "northbound RPC: %s", xpath);
 
        args.xpath = xpath;
@@ -1330,6 +1565,9 @@ static int nb_callback_configuration(struct nb_context *context,
        union nb_resource *resource;
        int ret = NB_ERR;
 
+       if (CHECK_FLAG(nb_node->flags, F_NB_NODE_IGNORE_CBS))
+               return NB_OK;
+
        if (event == NB_EV_VALIDATE)
                resource = NULL;
        else
@@ -1733,7 +1971,7 @@ static int nb_oper_data_iter_list(const struct nb_node *nb_node,
        /* Iterate over all list entries. */
        do {
                const struct lysc_node_leaf *skey;
-               struct yang_list_keys list_keys;
+               struct yang_list_keys list_keys = {};
                char xpath[XPATH_MAXLEN * 2];
                int ret;
 
@@ -2389,6 +2627,10 @@ const char *nb_client_name(enum nb_client client)
                return "gRPC";
        case NB_CLIENT_PCEP:
                return "Pcep";
+       case NB_CLIENT_MGMTD_SERVER:
+               return "MGMTD Server";
+       case NB_CLIENT_MGMTD_BE:
+               return "MGMT Backend";
        case NB_CLIENT_NONE:
                return "None";
        }
@@ -2398,6 +2640,10 @@ const char *nb_client_name(enum nb_client client)
 
 static void nb_load_callbacks(const struct frr_yang_module_info *module)
 {
+
+       if (module->ignore_cbs)
+               return;
+
        for (size_t i = 0; module->nodes[i].xpath; i++) {
                struct nb_node *nb_node;
                uint32_t priority;
@@ -2471,7 +2717,8 @@ void nb_init(struct thread_master *tm,
 
        /* Initialize the compiled nodes with northbound data */
        for (size_t i = 0; i < nmodules; i++) {
-               yang_snodes_iterate(loaded[i]->info, nb_node_new_cb, 0, NULL);
+               yang_snodes_iterate(loaded[i]->info, nb_node_new_cb, 0,
+                                   (void *)modules[i]);
                nb_load_callbacks(modules[i]);
        }
 
index 152810b3a98221db90a2b10eb0a530fad06d876d..4b5028c87e24d80eff95c546cc550597bbd24d87 100644 (file)
@@ -22,6 +22,39 @@ extern "C" {
 struct vty;
 struct debug;
 
+struct nb_yang_xpath_tag {
+       uint32_t ns;
+       uint32_t id;
+};
+
+struct nb_yang_value {
+       struct lyd_value value;
+       LY_DATA_TYPE value_type;
+       uint8_t value_flags;
+};
+
+struct nb_yang_xpath_elem {
+       struct nb_yang_xpath_tag tag;
+       struct nb_yang_value val;
+};
+
+#define NB_MAX_NUM_KEYS UINT8_MAX
+#define NB_MAX_NUM_XPATH_TAGS UINT8_MAX
+
+struct nb_yang_xpath {
+       uint8_t length;
+       struct {
+               uint8_t num_keys;
+               struct nb_yang_xpath_elem keys[NB_MAX_NUM_KEYS];
+       } tags[NB_MAX_NUM_XPATH_TAGS];
+};
+
+#define NB_YANG_XPATH_KEY(__xpath, __indx1, __indx2)                           \
+       ((__xpath->num_tags > __indx1) &&                                      \
+                        (__xpath->tags[__indx1].num_keys > __indx2)           \
+                ? &__xpath->tags[__indx1].keys[__indx2]                       \
+                : NULL)
+
 /* Northbound events. */
 enum nb_event {
        /*
@@ -68,6 +101,12 @@ enum nb_operation {
        NB_OP_RPC,
 };
 
+struct nb_cfg_change {
+       char xpath[XPATH_MAXLEN];
+       enum nb_operation operation;
+       const char *value;
+};
+
 union nb_resource {
        int fd;
        void *ptr;
@@ -558,6 +597,8 @@ struct nb_node {
 #define F_NB_NODE_CONFIG_ONLY 0x01
 /* The YANG list doesn't contain key leafs. */
 #define F_NB_NODE_KEYLESS_LIST 0x02
+/* Ignore callbacks for this node */
+#define F_NB_NODE_IGNORE_CBS 0x04
 
 /*
  * HACK: old gcc versions (< 5.x) have a bug that prevents C99 flexible arrays
@@ -570,6 +611,12 @@ struct frr_yang_module_info {
        /* YANG module name. */
        const char *name;
 
+       /*
+        * Ignore callbacks for this module. Set this to true to
+        * load module without any callbacks.
+        */
+       bool ignore_cbs;
+
        /* Northbound callbacks. */
        const struct {
                /* Data path of this YANG node. */
@@ -613,6 +660,8 @@ enum nb_client {
        NB_CLIENT_SYSREPO,
        NB_CLIENT_GRPC,
        NB_CLIENT_PCEP,
+       NB_CLIENT_MGMTD_SERVER,
+       NB_CLIENT_MGMTD_BE,
 };
 
 /* Northbound context. */
@@ -624,12 +673,6 @@ struct nb_context {
        const void *user;
 };
 
-/* Northbound configuration. */
-struct nb_config {
-       struct lyd_node *dnode;
-       uint32_t version;
-};
-
 /* Northbound configuration callback. */
 struct nb_config_cb {
        RB_ENTRY(nb_config_cb) entry;
@@ -656,6 +699,13 @@ struct nb_transaction {
        struct nb_config_cbs changes;
 };
 
+/* Northbound configuration. */
+struct nb_config {
+       struct lyd_node *dnode;
+       uint32_t version;
+       struct nb_config_cbs cfg_chgs;
+};
+
 /* Callback function used by nb_oper_data_iterate(). */
 typedef int (*nb_oper_data_cb)(const struct lysc_node *snode,
                               struct yang_translator *translator,
@@ -825,6 +875,22 @@ extern int nb_candidate_edit(struct nb_config *candidate,
                             const struct yang_data *previous,
                             const struct yang_data *data);
 
+/*
+ * Create diff for configuration.
+ *
+ * dnode
+ *    Pointer to a libyang data node containing the configuration data. If NULL
+ *    is given, an empty configuration will be created.
+ *
+ * seq
+ *    Returns sequence number assigned to the specific change.
+ *
+ * changes
+ *    Northbound config callback head.
+ */
+extern void nb_config_diff_created(const struct lyd_node *dnode, uint32_t *seq,
+                                  struct nb_config_cbs *changes);
+
 /*
  * Check if a candidate configuration is outdated and needs to be updated.
  *
@@ -836,6 +902,140 @@ extern int nb_candidate_edit(struct nb_config *candidate,
  */
 extern bool nb_candidate_needs_update(const struct nb_config *candidate);
 
+/*
+ * Edit candidate configuration changes.
+ *
+ * candidate_config
+ *    Candidate configuration to edit.
+ *
+ * cfg_changes
+ *    Northbound config changes.
+ *
+ * num_cfg_changes
+ *    Number of config changes.
+ *
+ * xpath_base
+ *    Base xpath for config.
+ *
+ * curr_xpath
+ *    Current xpath for config.
+ *
+ * xpath_index
+ *    Index of xpath being processed.
+ *
+ * err_buf
+ *    Buffer to store human-readable error message in case of error.
+ *
+ * err_bufsize
+ *    Size of err_buf.
+ *
+ * error
+ *    TRUE on error, FALSE on success
+ */
+extern void nb_candidate_edit_config_changes(
+       struct nb_config *candidate_config, struct nb_cfg_change cfg_changes[],
+       size_t num_cfg_changes, const char *xpath_base, const char *curr_xpath,
+       int xpath_index, char *err_buf, int err_bufsize, bool *error);
+
+/*
+ * Delete candidate configuration changes.
+ *
+ * changes
+ *    Northbound config changes.
+ */
+extern void nb_config_diff_del_changes(struct nb_config_cbs *changes);
+
+/*
+ * Create candidate diff and validate on yang tree
+ *
+ * context
+ *    Context of the northbound transaction.
+ *
+ * candidate
+ *    Candidate DB configuration.
+ *
+ * changes
+ *    Northbound config changes.
+ *
+ * errmsg
+ *    Buffer to store human-readable error message in case of error.
+ *
+ * errmsg_len
+ *    Size of errmsg.
+ *
+ * Returns:
+ *    NB_OK on success, NB_ERR_VALIDATION otherwise
+ */
+extern int nb_candidate_diff_and_validate_yang(struct nb_context *context,
+                                              struct nb_config *candidate,
+                                              struct nb_config_cbs *changes,
+                                              char *errmsg, size_t errmsg_len);
+
+/*
+ * Calculate the delta between two different configurations.
+ *
+ * reference
+ *    Running DB config changes to be compared against.
+ *
+ * incremental
+ *    Candidate DB config changes that will be compared against reference.
+ *
+ * changes
+ *    Will hold the final diff generated.
+ *
+ */
+extern void nb_config_diff(const struct nb_config *reference,
+                          const struct nb_config *incremental,
+                          struct nb_config_cbs *changes);
+
+/*
+ * Perform YANG syntactic and semantic validation.
+ *
+ * WARNING: lyd_validate() can change the configuration as part of the
+ * validation process.
+ *
+ * candidate
+ *    Candidate DB configuration.
+ *
+ * errmsg
+ *    Buffer to store human-readable error message in case of error.
+ *
+ * errmsg_len
+ *    Size of errmsg.
+ *
+ * Returns:
+ *    NB_OK on success, NB_ERR_VALIDATION otherwise
+ */
+extern int nb_candidate_validate_yang(struct nb_config *candidate,
+                                     bool no_state, char *errmsg,
+                                     size_t errmsg_len);
+
+/*
+ * Perform code-level validation using the northbound callbacks.
+ *
+ * context
+ *    Context of the northbound transaction.
+ *
+ * candidate
+ *    Candidate DB configuration.
+ *
+ * changes
+ *    Northbound config changes.
+ *
+ * errmsg
+ *    Buffer to store human-readable error message in case of error.
+ *
+ * errmsg_len
+ *    Size of errmsg.
+ *
+ * Returns:
+ *    NB_OK on success, NB_ERR_VALIDATION otherwise
+ */
+extern int nb_candidate_validate_code(struct nb_context *context,
+                                     struct nb_config *candidate,
+                                     struct nb_config_cbs *changes,
+                                     char *errmsg, size_t errmsg_len);
+
 /*
  * Update a candidate configuration by rebasing the changes on top of the latest
  * running configuration. Resolve conflicts automatically by giving preference
@@ -895,6 +1095,12 @@ extern int nb_candidate_validate(struct nb_context *context,
  *    nb_candidate_commit_abort() or committed using
  *    nb_candidate_commit_apply().
  *
+ * skip_validate
+ *    TRUE to skip commit validation, FALSE otherwise.
+ *
+ * ignore_zero_change
+ *    TRUE to ignore if zero changes, FALSE otherwise.
+ *
  * errmsg
  *    Buffer to store human-readable error message in case of error.
  *
@@ -915,7 +1121,9 @@ extern int nb_candidate_commit_prepare(struct nb_context context,
                                       struct nb_config *candidate,
                                       const char *comment,
                                       struct nb_transaction **transaction,
-                                      char *errmsg, size_t errmsg_len);
+                                      bool skip_validate,
+                                      bool ignore_zero_change, char *errmsg,
+                                      size_t errmsg_len);
 
 /*
  * Abort a previously created configuration transaction, releasing all resources
index fa5884fb7850c49d21a6966d2b65f2ec627a0cfc..281d9a4704eb959328be8c970d03ad3be95ba594 100644 (file)
@@ -120,7 +120,7 @@ static int nb_cli_schedule_command(struct vty *vty)
 void nb_cli_enqueue_change(struct vty *vty, const char *xpath,
                           enum nb_operation operation, const char *value)
 {
-       struct vty_cfg_change *change;
+       struct nb_cfg_change *change;
 
        if (vty->num_cfg_changes == VTY_MAXCFGCHANGES) {
                /* Not expected to happen. */
@@ -141,79 +141,21 @@ static int nb_cli_apply_changes_internal(struct vty *vty,
                                         bool clear_pending)
 {
        bool error = false;
-
-       if (xpath_base == NULL)
-               xpath_base = "";
+       char buf[BUFSIZ];
 
        VTY_CHECK_XPATH;
 
-       /* Edit candidate configuration. */
-       for (size_t i = 0; i < vty->num_cfg_changes; i++) {
-               struct vty_cfg_change *change = &vty->cfg_changes[i];
-               struct nb_node *nb_node;
-               char xpath[XPATH_MAXLEN];
-               struct yang_data *data;
-               int ret;
-
-               /* Handle relative XPaths. */
-               memset(xpath, 0, sizeof(xpath));
-               if (vty->xpath_index > 0
-                   && (xpath_base[0] == '.' || change->xpath[0] == '.'))
-                       strlcpy(xpath, VTY_CURR_XPATH, sizeof(xpath));
-               if (xpath_base[0]) {
-                       if (xpath_base[0] == '.')
-                               strlcat(xpath, xpath_base + 1, sizeof(xpath));
-                       else
-                               strlcat(xpath, xpath_base, sizeof(xpath));
-               }
-               if (change->xpath[0] == '.')
-                       strlcat(xpath, change->xpath + 1, sizeof(xpath));
-               else
-                       strlcpy(xpath, change->xpath, sizeof(xpath));
-
-               /* Find the northbound node associated to the data path. */
-               nb_node = nb_node_find(xpath);
-               if (!nb_node) {
-                       flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
-                                 "%s: unknown data path: %s", __func__, xpath);
-                       error = true;
-                       continue;
-               }
-
-               /* If the value is not set, get the default if it exists. */
-               if (change->value == NULL)
-                       change->value = yang_snode_get_default(nb_node->snode);
-               data = yang_data_new(xpath, change->value);
-
-               /*
-                * Ignore "not found" errors when editing the candidate
-                * configuration.
-                */
-               ret = nb_candidate_edit(vty->candidate_config, nb_node,
-                                       change->operation, xpath, NULL, data);
-               yang_data_free(data);
-               if (ret != NB_OK && ret != NB_ERR_NOT_FOUND) {
-                       flog_warn(
-                               EC_LIB_NB_CANDIDATE_EDIT_ERROR,
-                               "%s: failed to edit candidate configuration: operation [%s] xpath [%s]",
-                               __func__, nb_operation_name(change->operation),
-                               xpath);
-                       error = true;
-                       continue;
-               }
-       }
-
+       nb_candidate_edit_config_changes(
+               vty->candidate_config, vty->cfg_changes, vty->num_cfg_changes,
+               xpath_base, VTY_CURR_XPATH, vty->xpath_index, buf, sizeof(buf),
+               &error);
        if (error) {
-               char buf[BUFSIZ];
-
                /*
                 * Failure to edit the candidate configuration should never
                 * happen in practice, unless there's a bug in the code. When
                 * that happens, log the error but otherwise ignore it.
                 */
-               vty_out(vty, "%% Failed to edit configuration.\n\n");
-               vty_out(vty, "%s",
-                       yang_print_errors(ly_native_ctx, buf, sizeof(buf)));
+               vty_out(vty, "%s", buf);
        }
 
        /*
@@ -241,6 +183,8 @@ static int nb_cli_apply_changes_internal(struct vty *vty,
 int nb_cli_apply_changes(struct vty *vty, const char *xpath_base_fmt, ...)
 {
        char xpath_base[XPATH_MAXLEN] = {};
+       bool implicit_commit;
+       int ret;
 
        /* Parse the base XPath format string. */
        if (xpath_base_fmt) {
@@ -250,6 +194,17 @@ int nb_cli_apply_changes(struct vty *vty, const char *xpath_base_fmt, ...)
                vsnprintf(xpath_base, sizeof(xpath_base), xpath_base_fmt, ap);
                va_end(ap);
        }
+
+       if (vty_mgmt_fe_enabled()) {
+               VTY_CHECK_XPATH;
+
+               implicit_commit = vty_needs_implicit_commit(vty);
+               ret = vty_mgmt_send_config_data(vty);
+               if (ret >= 0 && !implicit_commit)
+                       vty->mgmt_num_pending_setcfg++;
+               return ret;
+       }
+
        return nb_cli_apply_changes_internal(vty, xpath_base, false);
 }
 
@@ -257,6 +212,8 @@ int nb_cli_apply_changes_clear_pending(struct vty *vty,
                                       const char *xpath_base_fmt, ...)
 {
        char xpath_base[XPATH_MAXLEN] = {};
+       bool implicit_commit;
+       int ret;
 
        /* Parse the base XPath format string. */
        if (xpath_base_fmt) {
@@ -266,6 +223,17 @@ int nb_cli_apply_changes_clear_pending(struct vty *vty,
                vsnprintf(xpath_base, sizeof(xpath_base), xpath_base_fmt, ap);
                va_end(ap);
        }
+
+       if (vty_mgmt_fe_enabled()) {
+               VTY_CHECK_XPATH;
+
+               implicit_commit = vty_needs_implicit_commit(vty);
+               ret = vty_mgmt_send_config_data(vty);
+               if (ret >= 0 && !implicit_commit)
+                       vty->mgmt_num_pending_setcfg++;
+               return ret;
+       }
+
        return nb_cli_apply_changes_internal(vty, xpath_base, true);
 }
 
index 2b57ff27070f69f129df0584a4fce11ddd606578..ee19568516f81017a00e84306a317a8feb169314 100644 (file)
@@ -312,7 +312,8 @@ static void frr_confd_cdb_read_cb_prepare(int fd, int *subp, int reslen)
        transaction = NULL;
        context.client = NB_CLIENT_CONFD;
        ret = nb_candidate_commit_prepare(context, candidate, NULL,
-                                         &transaction, errmsg, sizeof(errmsg));
+                                         &transaction, false, false, errmsg,
+                                         sizeof(errmsg));
        if (ret != NB_OK && ret != NB_ERR_NO_CHANGES) {
                enum confd_errcode errcode;
 
index 1459146eab1c82c46f15829b2b3eca424e9e0af8..274a0ca45a0deb6fb533a54a12375a6da8a6dea9 100644 (file)
@@ -825,7 +825,8 @@ HandleUnaryCommit(UnaryRpcState<frr::CommitRequest, frr::CommitResponse> *tag)
                grpc_debug("`-> Performing PREPARE");
                ret = nb_candidate_commit_prepare(
                        context, candidate->config, comment.c_str(),
-                       &candidate->transaction, errmsg, sizeof(errmsg));
+                       &candidate->transaction, false, false, errmsg,
+                       sizeof(errmsg));
                break;
        case frr::CommitRequest::ABORT:
                grpc_debug("`-> Performing ABORT");
index 096414ff2474c7846a0d3a3a7be30833e9229ace..337fb690d1d8f7c89aa48c13f5d801f754202685 100644 (file)
@@ -269,7 +269,8 @@ static int frr_sr_config_change_cb_prepare(sr_session_ctx_t *session,
         * required to apply them.
         */
        ret = nb_candidate_commit_prepare(context, candidate, NULL,
-                                         &transaction, errmsg, sizeof(errmsg));
+                                         &transaction, false, false, errmsg,
+                                         sizeof(errmsg));
        if (ret != NB_OK && ret != NB_ERR_NO_CHANGES)
                flog_warn(
                        EC_LIB_LIBSYSREPO,
index beef8675aaba28b5dc1755b0a9e0164f3fe3e549..d456629bbd9a97907aa4489a642031174ec928a2 100644 (file)
@@ -64,6 +64,9 @@ lib_libfrr_la_SOURCES = \
        lib/log_vty.c \
        lib/md5.c \
        lib/memory.c \
+       lib/mgmt_be_client.c \
+       lib/mgmt_fe_client.c \
+       lib/mgmt_msg.c \
        lib/mlag.c \
        lib/module.c \
        lib/mpls.c \
@@ -146,6 +149,23 @@ nodist_lib_libfrr_la_SOURCES = \
        yang/frr-module-translator.yang.c \
        # end
 
+# Add logic to build mgmt.proto
+lib_libfrr_la_LIBADD += $(PROTOBUF_C_LIBS)
+
+BUILT_SOURCES += \
+       lib/mgmt.pb-c.c \
+       lib/mgmt.pb-c.h \
+       # end
+
+CLEANFILES += \
+       lib/mgmt.pb-c.h \
+       lib/mgmt.pb-c.c \
+       # end
+
+lib_libfrr_la_SOURCES += \
+       lib/mgmt.pb-c.c \
+       #end
+
 if SQLITE3
 lib_libfrr_la_LIBADD += $(SQLITE3_LIBS)
 lib_libfrr_la_SOURCES += lib/db.c
@@ -222,6 +242,11 @@ pkginclude_HEADERS += \
        lib/log_vty.h \
        lib/md5.h \
        lib/memory.h \
+       lib/mgmt.pb-c.h \
+       lib/mgmt_be_client.h \
+       lib/mgmt_fe_client.h \
+       lib/mgmt_msg.h \
+       lib/mgmt_pb.h \
        lib/module.h \
        lib/monotime.h \
        lib/mpls.h \
index 786271abe89cd3122c154cb03d8ba2ac107d2ea5..3dcae0282f689ac15c2e15cd46c0f3473dcc9c91 100644 (file)
--- a/lib/vty.c
+++ b/lib/vty.c
@@ -65,6 +65,14 @@ enum vty_event {
 #endif /* VTYSH */
 };
 
+struct nb_config *vty_mgmt_candidate_config;
+
+static uintptr_t mgmt_lib_hndl;
+static bool mgmt_fe_connected;
+static bool mgmt_candidate_ds_wr_locked;
+static uint64_t mgmt_client_id_next;
+static uint64_t mgmt_last_req_id = UINT64_MAX;
+
 PREDECL_DLIST(vtyservs);
 
 struct vty_serv {
@@ -80,6 +88,7 @@ DECLARE_DLIST(vtyservs, struct vty_serv, itm);
 
 static void vty_event_serv(enum vty_event event, struct vty_serv *);
 static void vty_event(enum vty_event, struct vty *);
+static int vtysh_flush(struct vty *vty);
 
 /* Extern host structure from command.c */
 extern struct host host;
@@ -112,6 +121,36 @@ static char integrate_default[] = SYSCONFDIR INTEGRATE_DEFAULT_CONFIG;
 static bool do_log_commands;
 static bool do_log_commands_perm;
 
+void vty_mgmt_resume_response(struct vty *vty, bool success)
+{
+       uint8_t header[4] = {0, 0, 0, 0};
+       int ret = CMD_SUCCESS;
+
+       if (!vty->mgmt_req_pending) {
+               zlog_err(
+                       "vty response called without setting mgmt_req_pending");
+               return;
+       }
+
+       if (!success)
+               ret = CMD_WARNING_CONFIG_FAILED;
+
+       vty->mgmt_req_pending = false;
+       header[3] = ret;
+       buffer_put(vty->obuf, header, 4);
+
+       if (!vty->t_write && (vtysh_flush(vty) < 0))
+               /* Try to flush results; exit if a write
+                * error occurs.
+                */
+               return;
+
+       if (vty->status == VTY_CLOSE)
+               vty_close(vty);
+       else
+               vty_event(VTYSH_READ, vty);
+}
+
 void vty_frame(struct vty *vty, const char *format, ...)
 {
        va_list args;
@@ -1586,6 +1625,16 @@ struct vty *vty_new(void)
        new->max = VTY_BUFSIZ;
        new->pass_fd = -1;
 
+       if (mgmt_lib_hndl) {
+               new->mgmt_client_id = mgmt_client_id_next++;
+               if (mgmt_fe_create_client_session(
+                           mgmt_lib_hndl, new->mgmt_client_id,
+                           (uintptr_t) new) != MGMTD_SUCCESS)
+                       zlog_err(
+                               "Failed to open a MGMTD Frontend session for VTY session %p!!",
+                               new);
+       }
+
        return new;
 }
 
@@ -2201,6 +2250,12 @@ static void vtysh_read(struct thread *thread)
                                if (ret == CMD_SUSPEND)
                                        break;
 
+                               /* with new infra we need to stop response till
+                                * we get response through callback.
+                                */
+                               if (vty->mgmt_req_pending)
+                                       return;
+
                                /* warning: watchfrr hardcodes this result write
                                 */
                                header[3] = ret;
@@ -2257,6 +2312,12 @@ void vty_close(struct vty *vty)
        int i;
        bool was_stdio = false;
 
+       if (mgmt_lib_hndl) {
+               mgmt_fe_destroy_client_session(mgmt_lib_hndl,
+                                              vty->mgmt_client_id);
+               vty->mgmt_session_id = 0;
+       }
+
        /* Drop out of configure / transaction if needed. */
        vty_config_exit(vty);
 
@@ -2632,6 +2693,23 @@ int vty_config_enter(struct vty *vty, bool private_config, bool exclusive)
                return CMD_WARNING;
        }
 
+       if (vty_mgmt_fe_enabled()) {
+               if (!mgmt_candidate_ds_wr_locked) {
+                       if (vty_mgmt_send_lockds_req(vty, MGMTD_DS_CANDIDATE,
+                                                    true) != 0) {
+                               vty_out(vty, "Not able to lock candidate DS\n");
+                               return CMD_WARNING;
+                       }
+               } else {
+                       vty_out(vty,
+                               "Candidate DS already locked by different session\n");
+                       return CMD_WARNING;
+               }
+
+               vty->mgmt_locked_candidate_ds = true;
+               mgmt_candidate_ds_wr_locked = true;
+       }
+
        vty->node = CONFIG_NODE;
        vty->config = true;
        vty->private_config = private_config;
@@ -2643,7 +2721,14 @@ int vty_config_enter(struct vty *vty, bool private_config, bool exclusive)
                vty_out(vty,
                        "Warning: uncommitted changes will be discarded on exit.\n\n");
        } else {
-               vty->candidate_config = vty_shared_candidate_config;
+               /*
+                * NOTE: On the MGMTD daemon we point the VTY candidate DS to
+                * the global MGMTD candidate DS. Else we point to the VTY
+                * Shared Candidate Config.
+                */
+               vty->candidate_config = vty_mgmt_candidate_config
+                                               ? vty_mgmt_candidate_config
+                                               : vty_shared_candidate_config;
                if (frr_get_cli_mode() == FRR_CLI_TRANSACTIONAL)
                        vty->candidate_config_base =
                                nb_config_dup(running_config);
@@ -2676,6 +2761,18 @@ int vty_config_node_exit(struct vty *vty)
 {
        vty->xpath_index = 0;
 
+       if (vty_mgmt_fe_enabled() && mgmt_candidate_ds_wr_locked &&
+           vty->mgmt_locked_candidate_ds) {
+               if (vty_mgmt_send_lockds_req(vty, MGMTD_DS_CANDIDATE, false) !=
+                   0) {
+                       vty_out(vty, "Not able to unlock candidate DS\n");
+                       return CMD_WARNING;
+               }
+
+               vty->mgmt_locked_candidate_ds = false;
+               mgmt_candidate_ds_wr_locked = false;
+       }
+
        /* Perform any pending commits. */
        (void)nb_cli_pending_commit_check(vty);
 
@@ -3173,6 +3270,390 @@ void vty_init_vtysh(void)
        /* currently nothing to do, but likely to have future use */
 }
 
+static void vty_mgmt_server_connected(uintptr_t lib_hndl, uintptr_t usr_data,
+                                     bool connected)
+{
+       zlog_err("%sGot %sconnected %s MGMTD Frontend Server",
+                !connected ? "ERROR: " : "", !connected ? "dis: " : "",
+                !connected ? "from" : "to");
+
+       mgmt_fe_connected = connected;
+
+       /*
+        * TODO: Setup or teardown front-end sessions for existing
+        * VTY connections.
+        */
+}
+
+static void vty_mgmt_session_created(uintptr_t lib_hndl, uintptr_t usr_data,
+                                    uint64_t client_id, bool create,
+                                    bool success, uintptr_t session_id,
+                                    uintptr_t session_ctx)
+{
+       struct vty *vty;
+
+       vty = (struct vty *)session_ctx;
+
+       if (!success) {
+               zlog_err("%s session for client %llu failed!",
+                        create ? "Creating" : "Destroying",
+                        (unsigned long long)client_id);
+               return;
+       }
+
+       zlog_err("%s session for client %llu successfully!",
+                create ? "Created" : "Destroyed",
+                (unsigned long long)client_id);
+       if (create)
+               vty->mgmt_session_id = session_id;
+}
+
+static void vty_mgmt_ds_lock_notified(uintptr_t lib_hndl, uintptr_t usr_data,
+                                     uint64_t client_id, uintptr_t session_id,
+                                     uintptr_t session_ctx, uint64_t req_id,
+                                     bool lock_ds, bool success,
+                                     Mgmtd__DatastoreId ds_id,
+                                     char *errmsg_if_any)
+{
+       struct vty *vty;
+
+       vty = (struct vty *)session_ctx;
+
+       if (!success) {
+               zlog_err("%socking for DS %u failed! Err: '%s'",
+                        lock_ds ? "L" : "Unl", ds_id, errmsg_if_any);
+               vty_out(vty, "ERROR: %socking for DS %u failed! Err: '%s'\n",
+                       lock_ds ? "L" : "Unl", ds_id, errmsg_if_any);
+       } else {
+               zlog_err("%socked DS %u successfully!", lock_ds ? "L" : "Unl",
+                        ds_id);
+       }
+
+       vty_mgmt_resume_response(vty, success);
+}
+
+static void vty_mgmt_set_config_result_notified(
+       uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id,
+       uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id,
+       bool success, Mgmtd__DatastoreId ds_id, char *errmsg_if_any)
+{
+       struct vty *vty;
+
+       vty = (struct vty *)session_ctx;
+
+       if (!success) {
+               zlog_err(
+                       "SET_CONFIG request for client 0x%llx failed! Error: '%s'",
+                       (unsigned long long)client_id,
+                       errmsg_if_any ? errmsg_if_any : "Unknown");
+               vty_out(vty, "ERROR: SET_CONFIG request failed! Error: %s\n",
+                       errmsg_if_any ? errmsg_if_any : "Unknown");
+       } else {
+               zlog_err(
+                       "SET_CONFIG request for client 0x%llx req-id %llu was successfull!",
+                       (unsigned long long)client_id,
+                       (unsigned long long)req_id);
+       }
+
+       vty_mgmt_resume_response(vty, success);
+}
+
+static void vty_mgmt_commit_config_result_notified(
+       uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id,
+       uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id,
+       bool success, Mgmtd__DatastoreId src_ds_id,
+       Mgmtd__DatastoreId dst_ds_id, bool validate_only, char *errmsg_if_any)
+{
+       struct vty *vty;
+
+       vty = (struct vty *)session_ctx;
+
+       if (!success) {
+               zlog_err(
+                       "COMMIT_CONFIG request for client 0x%llx failed! Error: '%s'",
+                       (unsigned long long)client_id,
+                       errmsg_if_any ? errmsg_if_any : "Unknown");
+               vty_out(vty, "ERROR: COMMIT_CONFIG request failed! Error: %s\n",
+                       errmsg_if_any ? errmsg_if_any : "Unknown");
+       } else {
+               zlog_err(
+                       "COMMIT_CONFIG request for client 0x%llx req-id %llu was successfull!",
+                       (unsigned long long)client_id,
+                       (unsigned long long)req_id);
+               if (errmsg_if_any)
+                       vty_out(vty, "MGMTD: %s\n", errmsg_if_any);
+       }
+
+       vty_mgmt_resume_response(vty, success);
+}
+
+static enum mgmt_result vty_mgmt_get_data_result_notified(
+       uintptr_t lib_hndl, uintptr_t usr_data, uint64_t client_id,
+       uintptr_t session_id, uintptr_t session_ctx, uint64_t req_id,
+       bool success, Mgmtd__DatastoreId ds_id, Mgmtd__YangData **yang_data,
+       size_t num_data, int next_key, char *errmsg_if_any)
+{
+       struct vty *vty;
+       size_t indx;
+
+       vty = (struct vty *)session_ctx;
+
+       if (!success) {
+               zlog_err(
+                       "GET_DATA request for client 0x%llx failed! Error: '%s'",
+                       (unsigned long long)client_id,
+                       errmsg_if_any ? errmsg_if_any : "Unknown");
+               vty_out(vty, "ERROR: GET_DATA request failed! Error: %s\n",
+                       errmsg_if_any ? errmsg_if_any : "Unknown");
+               vty_mgmt_resume_response(vty, success);
+               return MGMTD_INTERNAL_ERROR;
+       }
+
+       zlog_debug(
+               "GET_DATA request for client 0x%llx req-id %llu was successfull!",
+               (unsigned long long)client_id, (unsigned long long)req_id);
+
+       if (req_id != mgmt_last_req_id) {
+               mgmt_last_req_id = req_id;
+               vty_out(vty, "[\n");
+       }
+
+       for (indx = 0; indx < num_data; indx++) {
+               vty_out(vty, "  \"%s\": \"%s\"\n", yang_data[indx]->xpath,
+                       yang_data[indx]->value->encoded_str_val);
+       }
+       if (next_key < 0) {
+               vty_out(vty, "]\n");
+               vty_mgmt_resume_response(vty, success);
+       }
+
+       return MGMTD_SUCCESS;
+}
+
+static struct mgmt_fe_client_params client_params = {
+       .client_connect_notify = vty_mgmt_server_connected,
+       .client_session_notify = vty_mgmt_session_created,
+       .lock_ds_notify = vty_mgmt_ds_lock_notified,
+       .set_config_notify = vty_mgmt_set_config_result_notified,
+       .commit_config_notify = vty_mgmt_commit_config_result_notified,
+       .get_data_notify = vty_mgmt_get_data_result_notified,
+};
+
+void vty_init_mgmt_fe(void)
+{
+       if (!vty_master) {
+               zlog_err("Always call vty_mgmt_init_fe() after vty_init()!!");
+               return;
+       }
+
+       assert(!mgmt_lib_hndl);
+       snprintf(client_params.name, sizeof(client_params.name), "%s-%lld",
+                frr_get_progname(), (long long)getpid());
+       mgmt_lib_hndl = mgmt_fe_client_lib_init(&client_params, vty_master);
+       assert(mgmt_lib_hndl);
+}
+
+bool vty_mgmt_fe_enabled(void)
+{
+       return mgmt_lib_hndl && mgmt_fe_connected ? true : false;
+}
+
+int vty_mgmt_send_lockds_req(struct vty *vty, Mgmtd__DatastoreId ds_id,
+                            bool lock)
+{
+       enum mgmt_result ret;
+
+       if (mgmt_lib_hndl && vty->mgmt_session_id) {
+               vty->mgmt_req_id++;
+               ret = mgmt_fe_lock_ds(mgmt_lib_hndl, vty->mgmt_session_id,
+                                     vty->mgmt_req_id, ds_id, lock);
+               if (ret != MGMTD_SUCCESS) {
+                       zlog_err(
+                               "Failed to send %sLOCK-DS-REQ to MGMTD for req-id %llu.",
+                               lock ? "" : "UN",
+                               (unsigned long long)vty->mgmt_req_id);
+                       vty_out(vty, "Failed to send %sLOCK-DS-REQ to MGMTD!",
+                               lock ? "" : "UN");
+                       return -1;
+               }
+
+               vty->mgmt_req_pending = true;
+       }
+
+       return 0;
+}
+
+int vty_mgmt_send_config_data(struct vty *vty)
+{
+       Mgmtd__YangDataValue value[VTY_MAXCFGCHANGES];
+       Mgmtd__YangData cfg_data[VTY_MAXCFGCHANGES];
+       Mgmtd__YangCfgDataReq cfg_req[VTY_MAXCFGCHANGES];
+       Mgmtd__YangCfgDataReq *cfgreq[VTY_MAXCFGCHANGES] = {0};
+       size_t indx;
+       int cnt;
+       bool implicit_commit = false;
+
+       if (mgmt_lib_hndl && vty->mgmt_session_id) {
+               cnt = 0;
+               for (indx = 0; indx < vty->num_cfg_changes; indx++) {
+                       mgmt_yang_data_init(&cfg_data[cnt]);
+
+                       if (vty->cfg_changes[indx].value) {
+                               mgmt_yang_data_value_init(&value[cnt]);
+                               value[cnt].encoded_str_val =
+                                       (char *)vty->cfg_changes[indx].value;
+                               value[cnt].value_case =
+                                       MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
+                               cfg_data[cnt].value = &value[cnt];
+                       }
+
+                       cfg_data[cnt].xpath = vty->cfg_changes[indx].xpath;
+
+                       mgmt_yang_cfg_data_req_init(&cfg_req[cnt]);
+                       cfg_req[cnt].data = &cfg_data[cnt];
+                       switch (vty->cfg_changes[indx].operation) {
+                       case NB_OP_DESTROY:
+                               cfg_req[cnt].req_type =
+                                       MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA;
+                               break;
+
+                       case NB_OP_CREATE:
+                       case NB_OP_MODIFY:
+                       case NB_OP_MOVE:
+                       case NB_OP_PRE_VALIDATE:
+                       case NB_OP_APPLY_FINISH:
+                               cfg_req[cnt].req_type =
+                                       MGMTD__CFG_DATA_REQ_TYPE__SET_DATA;
+                               break;
+                       case NB_OP_GET_ELEM:
+                       case NB_OP_GET_NEXT:
+                       case NB_OP_GET_KEYS:
+                       case NB_OP_LOOKUP_ENTRY:
+                       case NB_OP_RPC:
+                               assert(!"Invalid type of operation");
+                               break;
+                       default:
+                               assert(!"non-enum value, invalid");
+                       }
+
+                       cfgreq[cnt] = &cfg_req[cnt];
+                       cnt++;
+               }
+
+               vty->mgmt_req_id++;
+               implicit_commit = vty_needs_implicit_commit(vty);
+               if (cnt && mgmt_fe_set_config_data(
+                                  mgmt_lib_hndl, vty->mgmt_session_id,
+                                  vty->mgmt_req_id, MGMTD_DS_CANDIDATE, cfgreq,
+                                  cnt, implicit_commit,
+                                  MGMTD_DS_RUNNING) != MGMTD_SUCCESS) {
+                       zlog_err("Failed to send %d Config Xpaths to MGMTD!!",
+                                (int)indx);
+                       return -1;
+               }
+
+               vty->mgmt_req_pending = true;
+       }
+
+       return 0;
+}
+
+int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only, bool abort)
+{
+       enum mgmt_result ret;
+
+       if (mgmt_lib_hndl && vty->mgmt_session_id) {
+               vty->mgmt_req_id++;
+               ret = mgmt_fe_commit_config_data(
+                       mgmt_lib_hndl, vty->mgmt_session_id, vty->mgmt_req_id,
+                       MGMTD_DS_CANDIDATE, MGMTD_DS_RUNNING, validate_only,
+                       abort);
+               if (ret != MGMTD_SUCCESS) {
+                       zlog_err(
+                               "Failed to send COMMIT-REQ to MGMTD for req-id %llu.",
+                               (unsigned long long)vty->mgmt_req_id);
+                       vty_out(vty, "Failed to send COMMIT-REQ to MGMTD!");
+                       return -1;
+               }
+
+               vty->mgmt_req_pending = true;
+               vty->mgmt_num_pending_setcfg = 0;
+       }
+
+       return 0;
+}
+
+int vty_mgmt_send_get_config(struct vty *vty, Mgmtd__DatastoreId datastore,
+                            const char **xpath_list, int num_req)
+{
+       enum mgmt_result ret;
+       Mgmtd__YangData yang_data[VTY_MAXCFGCHANGES];
+       Mgmtd__YangGetDataReq get_req[VTY_MAXCFGCHANGES];
+       Mgmtd__YangGetDataReq *getreq[VTY_MAXCFGCHANGES];
+       int i;
+
+       vty->mgmt_req_id++;
+
+       for (i = 0; i < num_req; i++) {
+               mgmt_yang_get_data_req_init(&get_req[i]);
+               mgmt_yang_data_init(&yang_data[i]);
+
+               yang_data->xpath = (char *)xpath_list[i];
+
+               get_req[i].data = &yang_data[i];
+               getreq[i] = &get_req[i];
+       }
+       ret = mgmt_fe_get_config_data(mgmt_lib_hndl, vty->mgmt_session_id,
+                                     vty->mgmt_req_id, datastore, getreq,
+                                     num_req);
+
+       if (ret != MGMTD_SUCCESS) {
+               zlog_err("Failed to send GET-CONFIG to MGMTD for req-id %llu.",
+                        (unsigned long long)vty->mgmt_req_id);
+               vty_out(vty, "Failed to send GET-CONFIG to MGMTD!");
+               return -1;
+       }
+
+       vty->mgmt_req_pending = true;
+
+       return 0;
+}
+
+int vty_mgmt_send_get_data(struct vty *vty, Mgmtd__DatastoreId datastore,
+                          const char **xpath_list, int num_req)
+{
+       enum mgmt_result ret;
+       Mgmtd__YangData yang_data[VTY_MAXCFGCHANGES];
+       Mgmtd__YangGetDataReq get_req[VTY_MAXCFGCHANGES];
+       Mgmtd__YangGetDataReq *getreq[VTY_MAXCFGCHANGES];
+       int i;
+
+       vty->mgmt_req_id++;
+
+       for (i = 0; i < num_req; i++) {
+               mgmt_yang_get_data_req_init(&get_req[i]);
+               mgmt_yang_data_init(&yang_data[i]);
+
+               yang_data->xpath = (char *)xpath_list[i];
+
+               get_req[i].data = &yang_data[i];
+               getreq[i] = &get_req[i];
+       }
+       ret = mgmt_fe_get_data(mgmt_lib_hndl, vty->mgmt_session_id,
+                              vty->mgmt_req_id, datastore, getreq, num_req);
+
+       if (ret != MGMTD_SUCCESS) {
+               zlog_err("Failed to send GET-DATA to MGMTD for req-id %llu.",
+                        (unsigned long long)vty->mgmt_req_id);
+               vty_out(vty, "Failed to send GET-DATA to MGMTD!");
+               return -1;
+       }
+
+       vty->mgmt_req_pending = true;
+
+       return 0;
+}
+
 /* Install vty's own commands like `who' command. */
 void vty_init(struct thread_master *master_thread, bool do_command_logging)
 {
@@ -3220,6 +3701,11 @@ void vty_terminate(void)
        struct vty *vty;
        struct vty_serv *vtyserv;
 
+       if (mgmt_lib_hndl) {
+               mgmt_fe_client_lib_destroy(mgmt_lib_hndl);
+               mgmt_lib_hndl = 0;
+       }
+
        memset(vty_cwd, 0x00, sizeof(vty_cwd));
 
        vty_reset();
index 3cab9590f1f4aca7176cf6f0cfe53ef9ab1c0c7d..b302c14913628b149c497c2918bb5770317a5dd7 100644 (file)
--- a/lib/vty.h
+++ b/lib/vty.h
@@ -25,6 +25,8 @@
 #include "compiler.h"
 #include "northbound.h"
 #include "zlog_live.h"
+#include "libfrr.h"
+#include "mgmt_fe_client.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -113,12 +115,18 @@ struct vty {
 
        /* Changes enqueued to be applied in the candidate configuration. */
        size_t num_cfg_changes;
-       struct vty_cfg_change cfg_changes[VTY_MAXCFGCHANGES];
+       struct nb_cfg_change cfg_changes[VTY_MAXCFGCHANGES];
 
        /* XPath of the current node */
        int xpath_index;
        char xpath[VTY_MAXDEPTH][XPATH_MAXLEN];
 
+       /*
+        * Keep track of how many SET_CFG requests has been sent so far that
+        * has not been committed yet.
+        */
+       size_t mgmt_num_pending_setcfg;
+
        /* In configure mode. */
        bool config;
 
@@ -134,6 +142,7 @@ struct vty {
        /* Dynamic transaction information. */
        bool pending_allowed;
        bool pending_commit;
+       bool no_implicit_commit;
        char *pending_cmds_buf;
        size_t pending_cmds_buflen;
        size_t pending_cmds_bufpos;
@@ -208,6 +217,12 @@ struct vty {
         * without any output. */
        size_t frame_pos;
        char frame[1024];
+
+       uintptr_t mgmt_session_id;
+       uint64_t mgmt_client_id;
+       uint64_t mgmt_req_id;
+       bool mgmt_req_pending;
+       bool mgmt_locked_candidate_ds;
 };
 
 static inline void vty_push_context(struct vty *vty, int node, uint64_t id)
@@ -319,6 +334,8 @@ struct vty_arg {
 #define IS_DIRECTORY_SEP(c) ((c) == DIRECTORY_SEP)
 #endif
 
+extern struct nb_config *vty_mgmt_candidate_config;
+
 /* Prototypes. */
 extern void vty_init(struct thread_master *, bool do_command_logging);
 extern void vty_init_vtysh(void);
@@ -370,6 +387,29 @@ extern void vty_stdio_suspend(void);
 extern void vty_stdio_resume(void);
 extern void vty_stdio_close(void);
 
+extern void vty_init_mgmt_fe(void);
+extern bool vty_mgmt_fe_enabled(void);
+extern int vty_mgmt_send_config_data(struct vty *vty);
+extern int vty_mgmt_send_commit_config(struct vty *vty, bool validate_only,
+                                      bool abort);
+extern int vty_mgmt_send_get_config(struct vty *vty,
+                                   Mgmtd__DatastoreId datastore,
+                                   const char **xpath_list, int num_req);
+extern int vty_mgmt_send_get_data(struct vty *vty, Mgmtd__DatastoreId datastore,
+                                 const char **xpath_list, int num_req);
+extern int vty_mgmt_send_lockds_req(struct vty *vty, Mgmtd__DatastoreId ds_id,
+                                   bool lock);
+extern void vty_mgmt_resume_response(struct vty *vty, bool success);
+
+static inline bool vty_needs_implicit_commit(struct vty *vty)
+{
+       return (frr_get_cli_mode() == FRR_CLI_CLASSIC
+                       ? ((vty->pending_allowed || vty->no_implicit_commit)
+                                  ? false
+                                  : true)
+                       : false);
+}
+
 #ifdef __cplusplus
 }
 #endif
index 78738f7d4d5d404172df4c78d7248d3ceb650487..70a3251ab39db4280c8c3a904fac85d7febee71b 100644 (file)
@@ -395,7 +395,12 @@ struct lyd_node *yang_dnode_get(const struct lyd_node *dnode, const char *xpath)
                xpath += 2;
 
        if (lyd_find_xpath(dnode, xpath, &set)) {
-               assert(0); /* XXX replicates old libyang1 base code */
+               /*
+                * Commenting out the below assert failure as it crashes mgmtd
+                * when bad xpath is passed.
+                *
+                * assert(0);  XXX replicates old libyang1 base code
+                */
                goto exit;
        }
        if (set->count == 0)
diff --git a/mgmtd/.gitignore b/mgmtd/.gitignore
new file mode 100644 (file)
index 0000000..7ce107e
--- /dev/null
@@ -0,0 +1 @@
+mgmtd
diff --git a/mgmtd/Makefile b/mgmtd/Makefile
new file mode 100644 (file)
index 0000000..d69ec5f
--- /dev/null
@@ -0,0 +1,10 @@
+all: ALWAYS
+       @$(MAKE) -s -C .. mgmtd/mgmtd
+%: ALWAYS
+       @$(MAKE) -s -C .. mgmtd/$@
+
+Makefile:
+       #nothing
+ALWAYS:
+.PHONY: ALWAYS makefiles
+.SUFFIXES:
diff --git a/mgmtd/mgmt.c b/mgmtd/mgmt.c
new file mode 100644 (file)
index 0000000..e1acfde
--- /dev/null
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * FRR Management Daemon (MGMTD) program
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar
+ */
+
+#include <zebra.h>
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_be_server.h"
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt_fe_server.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_history.h"
+#include "mgmtd/mgmt_memory.h"
+
+bool mgmt_debug_be;
+bool mgmt_debug_fe;
+bool mgmt_debug_ds;
+bool mgmt_debug_txn;
+
+/* MGMTD process wide configuration.  */
+static struct mgmt_master mgmt_master;
+
+/* MGMTD process wide configuration pointer to export.  */
+struct mgmt_master *mm;
+
+void mgmt_master_init(struct thread_master *master, const int buffer_size)
+{
+       memset(&mgmt_master, 0, sizeof(struct mgmt_master));
+
+       mm = &mgmt_master;
+       mm->master = master;
+       mm->terminating = false;
+       mm->socket_buffer = buffer_size;
+       mm->perf_stats_en = true;
+}
+
+void mgmt_init(void)
+{
+
+       /*
+        * Allocates some vital data structures used by peer commands in
+        * vty_init
+        */
+       vty_init_mgmt_fe();
+
+       /* Initialize datastores */
+       mgmt_ds_init(mm);
+
+       /* Initialize history */
+       mgmt_history_init();
+
+       /* Initialize MGMTD Transaction module */
+       mgmt_txn_init(mm, mm->master);
+
+       /* Initialize the MGMTD Backend Adapter Module */
+       mgmt_be_adapter_init(mm->master);
+
+       /* Initialize the MGMTD Frontend Adapter Module */
+       mgmt_fe_adapter_init(mm->master, mm);
+
+       /* Start the MGMTD Backend Server for clients to connect */
+       mgmt_be_server_init(mm->master);
+
+       /* Start the MGMTD Frontend Server for clients to connect */
+       mgmt_fe_server_init(mm->master);
+
+       /* MGMTD VTY commands installation. */
+       mgmt_vty_init();
+}
+
+void mgmt_terminate(void)
+{
+       mgmt_fe_server_destroy();
+       mgmt_fe_adapter_destroy();
+       mgmt_be_server_destroy();
+       mgmt_be_adapter_destroy();
+       mgmt_txn_destroy();
+       mgmt_history_destroy();
+       mgmt_ds_destroy();
+}
diff --git a/mgmtd/mgmt.h b/mgmtd/mgmt.h
new file mode 100644 (file)
index 0000000..2a9d947
--- /dev/null
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD message definition header.
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_H
+#define _FRR_MGMTD_H
+
+#include "vrf.h"
+#include "defaults.h"
+#include "stream.h"
+
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_defines.h"
+#include "mgmtd/mgmt_history.h"
+#include "mgmtd/mgmt_txn.h"
+#include "mgmtd/mgmt_ds.h"
+
+#define MGMTD_VTY_PORT 2622
+#define MGMTD_SOCKET_BUF_SIZE 65535
+#define MGMTD_MAX_COMMIT_LIST 10
+
+extern bool mgmt_debug_be;
+extern bool mgmt_debug_fe;
+extern bool mgmt_debug_ds;
+extern bool mgmt_debug_txn;
+
+struct mgmt_txn_ctx;
+
+/*
+ * MGMTD master for system wide configurations and variables.
+ */
+struct mgmt_master {
+       struct thread_master *master;
+
+       /* How big should we set the socket buffer size */
+       uint32_t socket_buffer;
+
+       /* The single instance of config transaction allowed at any time */
+       struct mgmt_txns_head txn_list;
+
+       /* Map of Transactions and its ID */
+       struct hash *txn_hash;
+       uint64_t next_txn_id;
+
+       /* The single instance of config transaction allowed at any time */
+       struct mgmt_txn_ctx *cfg_txn;
+
+       /* Datastores */
+       struct mgmt_ds_ctx *running_ds;
+       struct mgmt_ds_ctx *candidate_ds;
+       struct mgmt_ds_ctx *oper_ds;
+
+       bool terminating;   /* global flag that sigint terminate seen */
+       bool perf_stats_en; /* to enable performance stats measurement */
+
+       /* List of commit infos */
+       struct mgmt_cmt_infos_head cmts; /* List of last 10 commits executed. */
+};
+
+extern struct mgmt_master *mm;
+
+/* Inline functions */
+static inline unsigned long timeval_elapsed(struct timeval a, struct timeval b)
+{
+       return (((a.tv_sec - b.tv_sec) * TIMER_SECOND_MICRO)
+               + (a.tv_usec - b.tv_usec));
+}
+
+/*
+ * Remove trailing separator from a string.
+ *
+ * str
+ *    A null terminated string.
+ *
+ * sep
+ *    Trailing character that needs to be removed.
+ */
+static inline void mgmt_remove_trailing_separator(char *str, char sep)
+{
+       size_t len;
+
+       len = strlen(str);
+       if (len && str[len - 1] == sep)
+               str[len - 1] = '\0';
+}
+
+/* Prototypes. */
+extern void mgmt_terminate(void);
+extern void mgmt_reset(void);
+extern time_t mgmt_clock(void);
+
+extern int mgmt_config_write(struct vty *vty);
+
+extern void mgmt_master_init(struct thread_master *master,
+                            const int buffer_size);
+
+extern void mgmt_init(void);
+extern void mgmt_vty_init(void);
+
+static inline char *mgmt_realtime_to_string(struct timeval *tv, char *buf,
+                                           size_t sz)
+{
+       struct tm tm;
+       size_t n;
+
+       localtime_r((const time_t *)&tv->tv_sec, &tm);
+       n = strftime(buf, sz, "%Y-%m-%dT%H:%M:%S", &tm);
+       snprintf(&buf[n], sz - n, ",%06u000", (unsigned int)tv->tv_usec);
+       return buf;
+}
+
+#endif /* _FRR_MGMTD_H */
diff --git a/mgmtd/mgmt_be_adapter.c b/mgmtd/mgmt_be_adapter.c
new file mode 100644 (file)
index 0000000..2957b29
--- /dev/null
@@ -0,0 +1,1098 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Backend Client Connection Adapter
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "thread.h"
+#include "sockopt.h"
+#include "network.h"
+#include "libfrr.h"
+#include "mgmt_msg.h"
+#include "mgmt_pb.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmt_be_client.h"
+#include "mgmtd/mgmt_be_adapter.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_BE_ADAPTER_DBG(fmt, ...)                                        \
+       fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_BE_ADAPTER_ERR(fmt, ...)                                        \
+       fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_BE_ADAPTER_DBG(fmt, ...)                                        \
+       do {                                                                  \
+               if (mgmt_debug_be)                                            \
+                       zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__);      \
+       } while (0)
+#define MGMTD_BE_ADAPTER_ERR(fmt, ...)                                        \
+       zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+#define FOREACH_ADAPTER_IN_LIST(adapter)                                       \
+       frr_each_safe (mgmt_be_adapters, &mgmt_be_adapters, (adapter))
+
+/*
+ * Static mapping of YANG XPath regular expressions and
+ * the corresponding interested backend clients.
+ * NOTE: Thiis is a static mapping defined by all MGMTD
+ * backend client modules (for now, till we develop a
+ * more dynamic way of creating and updating this map).
+ * A running map is created by MGMTD in run-time to
+ * handle real-time mapping of YANG xpaths to one or
+ * more interested backend client adapters.
+ *
+ * Please see xpath_map_reg[] in lib/mgmt_be_client.c
+ * for the actual map
+ */
+struct mgmt_be_xpath_map_reg {
+       const char *xpath_regexp; /* Longest matching regular expression */
+       enum mgmt_be_client_id *be_clients; /* clients to notify */
+};
+
+struct mgmt_be_xpath_regexp_map {
+       const char *xpath_regexp;
+       struct mgmt_be_client_subscr_info be_subscrs;
+};
+
+struct mgmt_be_get_adapter_config_params {
+       struct mgmt_be_client_adapter *adapter;
+       struct nb_config_cbs *cfg_chgs;
+       uint32_t seq;
+};
+
+/*
+ * Static mapping of YANG XPath regular expressions and
+ * the corresponding interested backend clients.
+ * NOTE: Thiis is a static mapping defined by all MGMTD
+ * backend client modules (for now, till we develop a
+ * more dynamic way of creating and updating this map).
+ * A running map is created by MGMTD in run-time to
+ * handle real-time mapping of YANG xpaths to one or
+ * more interested backend client adapters.
+ */
+static const struct mgmt_be_xpath_map_reg xpath_static_map_reg[] = {
+       {.xpath_regexp = "/frr-vrf:lib/*",
+        .be_clients =
+                (enum mgmt_be_client_id[]){
+#if HAVE_STATICD
+                MGMTD_BE_CLIENT_ID_STATICD,
+#endif
+                        MGMTD_BE_CLIENT_ID_MAX}},
+       {.xpath_regexp = "/frr-interface:lib/*",
+        .be_clients =
+                (enum mgmt_be_client_id[]){
+#if HAVE_STATICD
+                MGMTD_BE_CLIENT_ID_STATICD,
+#endif
+                        MGMTD_BE_CLIENT_ID_MAX}},
+       {.xpath_regexp =
+                "/frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/*",
+
+        .be_clients =
+                (enum mgmt_be_client_id[]){
+#if HAVE_STATICD
+                MGMTD_BE_CLIENT_ID_STATICD,
+#endif
+                        MGMTD_BE_CLIENT_ID_MAX}},
+};
+
+#define MGMTD_BE_MAX_NUM_XPATH_MAP 256
+static struct mgmt_be_xpath_regexp_map
+       mgmt_xpath_map[MGMTD_BE_MAX_NUM_XPATH_MAP];
+static int mgmt_num_xpath_maps;
+
+static struct thread_master *mgmt_be_adapter_tm;
+
+static struct mgmt_be_adapters_head mgmt_be_adapters;
+
+static struct mgmt_be_client_adapter
+       *mgmt_be_adapters_by_id[MGMTD_BE_CLIENT_ID_MAX];
+
+/* Forward declarations */
+static void
+mgmt_be_adapter_register_event(struct mgmt_be_client_adapter *adapter,
+                              enum mgmt_be_event event);
+
+static struct mgmt_be_client_adapter *
+mgmt_be_find_adapter_by_fd(int conn_fd)
+{
+       struct mgmt_be_client_adapter *adapter;
+
+       FOREACH_ADAPTER_IN_LIST (adapter) {
+               if (adapter->conn_fd == conn_fd)
+                       return adapter;
+       }
+
+       return NULL;
+}
+
+static struct mgmt_be_client_adapter *
+mgmt_be_find_adapter_by_name(const char *name)
+{
+       struct mgmt_be_client_adapter *adapter;
+
+       FOREACH_ADAPTER_IN_LIST (adapter) {
+               if (!strncmp(adapter->name, name, sizeof(adapter->name)))
+                       return adapter;
+       }
+
+       return NULL;
+}
+
+static void
+mgmt_be_cleanup_adapters(void)
+{
+       struct mgmt_be_client_adapter *adapter;
+
+       FOREACH_ADAPTER_IN_LIST (adapter)
+               mgmt_be_adapter_unlock(&adapter);
+}
+
+static void mgmt_be_xpath_map_init(void)
+{
+       int indx, num_xpath_maps;
+       uint16_t indx1;
+       enum mgmt_be_client_id id;
+
+       MGMTD_BE_ADAPTER_DBG("Init XPath Maps");
+
+       num_xpath_maps = (int)array_size(xpath_static_map_reg);
+       for (indx = 0; indx < num_xpath_maps; indx++) {
+               MGMTD_BE_ADAPTER_DBG(" - XPATH: '%s'",
+                                    xpath_static_map_reg[indx].xpath_regexp);
+               mgmt_xpath_map[indx].xpath_regexp =
+                       xpath_static_map_reg[indx].xpath_regexp;
+               for (indx1 = 0;; indx1++) {
+                       id = xpath_static_map_reg[indx].be_clients[indx1];
+                       if (id == MGMTD_BE_CLIENT_ID_MAX)
+                               break;
+                       MGMTD_BE_ADAPTER_DBG("   -- Client: %s Id: %u",
+                                            mgmt_be_client_id2name(id),
+                                            id);
+                       if (id < MGMTD_BE_CLIENT_ID_MAX) {
+                               mgmt_xpath_map[indx]
+                                       .be_subscrs.xpath_subscr[id]
+                                       .validate_config = 1;
+                               mgmt_xpath_map[indx]
+                                       .be_subscrs.xpath_subscr[id]
+                                       .notify_config = 1;
+                               mgmt_xpath_map[indx]
+                                       .be_subscrs.xpath_subscr[id]
+                                       .own_oper_data = 1;
+                       }
+               }
+       }
+
+       mgmt_num_xpath_maps = indx;
+       MGMTD_BE_ADAPTER_DBG("Total XPath Maps: %u", mgmt_num_xpath_maps);
+}
+
+static int mgmt_be_eval_regexp_match(const char *xpath_regexp,
+                                    const char *xpath)
+{
+       int match_len = 0, re_indx = 0, xp_indx = 0;
+       int rexp_len, xpath_len;
+       bool match = true, re_wild = false, xp_wild = false;
+       bool delim = false, enter_wild_match = false;
+       char wild_delim = 0;
+
+       rexp_len = strlen(xpath_regexp);
+       xpath_len = strlen(xpath);
+
+       /*
+        * Remove the trailing wildcard from the regexp and Xpath.
+        */
+       if (rexp_len && xpath_regexp[rexp_len-1] == '*')
+               rexp_len--;
+       if (xpath_len && xpath[xpath_len-1] == '*')
+               xpath_len--;
+
+       if (!rexp_len || !xpath_len)
+               return 0;
+
+       for (re_indx = 0, xp_indx = 0;
+            match && re_indx < rexp_len && xp_indx < xpath_len;) {
+               match = (xpath_regexp[re_indx] == xpath[xp_indx]);
+
+               /*
+                * Check if we need to enter wildcard matching.
+                */
+               if (!enter_wild_match && !match &&
+                       (xpath_regexp[re_indx] == '*'
+                        || xpath[xp_indx] == '*')) {
+                       /*
+                        * Found wildcard
+                        */
+                       enter_wild_match =
+                               (xpath_regexp[re_indx-1] == '/'
+                                || xpath_regexp[re_indx-1] == '\''
+                                || xpath[xp_indx-1] == '/'
+                                || xpath[xp_indx-1] == '\'');
+                       if (enter_wild_match) {
+                               if (xpath_regexp[re_indx] == '*') {
+                                       /*
+                                        * Begin RE wildcard match.
+                                        */
+                                       re_wild = true;
+                                       wild_delim = xpath_regexp[re_indx-1];
+                               } else if (xpath[xp_indx] == '*') {
+                                       /*
+                                        * Begin XP wildcard match.
+                                        */
+                                       xp_wild = true;
+                                       wild_delim = xpath[xp_indx-1];
+                               }
+                       }
+               }
+
+               /*
+                * Check if we need to exit wildcard matching.
+                */
+               if (enter_wild_match) {
+                       if (re_wild && xpath[xp_indx] == wild_delim) {
+                               /*
+                                * End RE wildcard matching.
+                                */
+                               re_wild = false;
+                               if (re_indx < rexp_len-1)
+                                       re_indx++;
+                               enter_wild_match = false;
+                       } else if (xp_wild
+                                  && xpath_regexp[re_indx] == wild_delim) {
+                               /*
+                                * End XP wildcard matching.
+                                */
+                               xp_wild = false;
+                               if (xp_indx < xpath_len-1)
+                                       xp_indx++;
+                               enter_wild_match = false;
+                       }
+               }
+
+               match = (xp_wild || re_wild
+                        || xpath_regexp[re_indx] == xpath[xp_indx]);
+
+               /*
+                * Check if we found a delimiter in both the Xpaths
+                */
+               if ((xpath_regexp[re_indx] == '/'
+                       && xpath[xp_indx] == '/')
+                       || (xpath_regexp[re_indx] == ']'
+                               && xpath[xp_indx] == ']')
+                       || (xpath_regexp[re_indx] == '['
+                               && xpath[xp_indx] == '[')) {
+                       /*
+                        * Increment the match count if we have a
+                        * new delimiter.
+                        */
+                       if (match && re_indx && xp_indx && !delim)
+                               match_len++;
+                       delim = true;
+               } else {
+                       delim = false;
+               }
+
+               /*
+                * Proceed to the next character in the RE/XP string as
+                * necessary.
+                */
+               if (!re_wild)
+                       re_indx++;
+               if (!xp_wild)
+                       xp_indx++;
+       }
+
+       /*
+        * If we finished matching and the last token was a full match
+        * increment the match count appropriately.
+        */
+       if (match && !delim &&
+               (xpath_regexp[re_indx] == '/'
+                || xpath_regexp[re_indx] == ']'))
+               match_len++;
+
+       return match_len;
+}
+
+static void mgmt_be_adapter_disconnect(struct mgmt_be_client_adapter *adapter)
+{
+       if (adapter->conn_fd >= 0) {
+               close(adapter->conn_fd);
+               adapter->conn_fd = -1;
+       }
+
+       /*
+        * Notify about client disconnect for appropriate cleanup
+        */
+       mgmt_txn_notify_be_adapter_conn(adapter, false);
+
+       if (adapter->id < MGMTD_BE_CLIENT_ID_MAX) {
+               mgmt_be_adapters_by_id[adapter->id] = NULL;
+               adapter->id = MGMTD_BE_CLIENT_ID_MAX;
+       }
+
+       mgmt_be_adapters_del(&mgmt_be_adapters, adapter);
+
+       mgmt_be_adapter_unlock(&adapter);
+}
+
+static void
+mgmt_be_adapter_cleanup_old_conn(struct mgmt_be_client_adapter *adapter)
+{
+       struct mgmt_be_client_adapter *old;
+
+       FOREACH_ADAPTER_IN_LIST (old) {
+               if (old != adapter
+                   && !strncmp(adapter->name, old->name, sizeof(adapter->name))) {
+                       /*
+                        * We have a Zombie lingering around
+                        */
+                       MGMTD_BE_ADAPTER_DBG(
+                               "Client '%s' (FD:%d) seems to have reconnected. Removing old connection (FD:%d)!",
+                               adapter->name, adapter->conn_fd, old->conn_fd);
+                       mgmt_be_adapter_disconnect(old);
+               }
+       }
+}
+
+static int
+mgmt_be_adapter_handle_msg(struct mgmt_be_client_adapter *adapter,
+                             Mgmtd__BeMessage *be_msg)
+{
+       /*
+        * protobuf-c adds a max size enum with an internal, and changing by
+        * version, name; cast to an int to avoid unhandled enum warnings
+        */
+       switch ((int)be_msg->message_case) {
+       case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REQ:
+               MGMTD_BE_ADAPTER_DBG(
+                       "Got Subscribe Req Msg from '%s' to %sregister %u xpaths",
+                       be_msg->subscr_req->client_name,
+                       !be_msg->subscr_req->subscribe_xpaths
+                                       && be_msg->subscr_req->n_xpath_reg
+                               ? "de"
+                               : "",
+                       (uint32_t)be_msg->subscr_req->n_xpath_reg);
+
+               if (strlen(be_msg->subscr_req->client_name)) {
+                       strlcpy(adapter->name, be_msg->subscr_req->client_name,
+                               sizeof(adapter->name));
+                       adapter->id = mgmt_be_client_name2id(adapter->name);
+                       if (adapter->id >= MGMTD_BE_CLIENT_ID_MAX) {
+                               MGMTD_BE_ADAPTER_ERR(
+                                       "Unable to resolve adapter '%s' to a valid ID. Disconnecting!",
+                                       adapter->name);
+                               mgmt_be_adapter_disconnect(adapter);
+                       }
+                       mgmt_be_adapters_by_id[adapter->id] = adapter;
+                       mgmt_be_adapter_cleanup_old_conn(adapter);
+               }
+               break;
+       case MGMTD__BE_MESSAGE__MESSAGE_TXN_REPLY:
+               MGMTD_BE_ADAPTER_DBG(
+                       "Got %s TXN_REPLY Msg for Txn-Id 0x%llx from '%s' with '%s'",
+                       be_msg->txn_reply->create ? "Create" : "Delete",
+                       (unsigned long long)be_msg->txn_reply->txn_id,
+                       adapter->name,
+                       be_msg->txn_reply->success ? "success" : "failure");
+               /*
+                * Forward the TXN_REPLY to txn module.
+                */
+               mgmt_txn_notify_be_txn_reply(
+                       be_msg->txn_reply->txn_id,
+                       be_msg->txn_reply->create,
+                       be_msg->txn_reply->success, adapter);
+               break;
+       case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REPLY:
+               MGMTD_BE_ADAPTER_DBG(
+                       "Got CFGDATA_REPLY Msg from '%s' for Txn-Id 0x%llx Batch-Id 0x%llx with Err:'%s'",
+                       adapter->name,
+                       (unsigned long long)be_msg->cfg_data_reply->txn_id,
+                       (unsigned long long)be_msg->cfg_data_reply->batch_id,
+                       be_msg->cfg_data_reply->error_if_any
+                               ? be_msg->cfg_data_reply->error_if_any
+                               : "None");
+               /*
+                * Forward the CGFData-create reply to txn module.
+                */
+               mgmt_txn_notify_be_cfgdata_reply(
+                       be_msg->cfg_data_reply->txn_id,
+                       be_msg->cfg_data_reply->batch_id,
+                       be_msg->cfg_data_reply->success,
+                       be_msg->cfg_data_reply->error_if_any, adapter);
+               break;
+       case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REPLY:
+               MGMTD_BE_ADAPTER_DBG(
+                       "Got %s CFG_APPLY_REPLY Msg from '%s' for Txn-Id 0x%llx for %d batches (Id 0x%llx-0x%llx),  Err:'%s'",
+                       be_msg->cfg_apply_reply->success ? "successful"
+                                                           : "failed",
+                       adapter->name,
+                       (unsigned long long)
+                               be_msg->cfg_apply_reply->txn_id,
+                       (int)be_msg->cfg_apply_reply->n_batch_ids,
+                       (unsigned long long)
+                               be_msg->cfg_apply_reply->batch_ids[0],
+                       (unsigned long long)be_msg->cfg_apply_reply
+                               ->batch_ids[be_msg->cfg_apply_reply
+                                                   ->n_batch_ids
+                                           - 1],
+                       be_msg->cfg_apply_reply->error_if_any
+                               ? be_msg->cfg_apply_reply->error_if_any
+                               : "None");
+               /*
+                * Forward the CGFData-apply reply to txn module.
+                */
+               mgmt_txn_notify_be_cfg_apply_reply(
+                       be_msg->cfg_apply_reply->txn_id,
+                       be_msg->cfg_apply_reply->success,
+                       (uint64_t *)be_msg->cfg_apply_reply->batch_ids,
+                       be_msg->cfg_apply_reply->n_batch_ids,
+                       be_msg->cfg_apply_reply->error_if_any, adapter);
+               break;
+       case MGMTD__BE_MESSAGE__MESSAGE_GET_REPLY:
+       case MGMTD__BE_MESSAGE__MESSAGE_CFG_CMD_REPLY:
+       case MGMTD__BE_MESSAGE__MESSAGE_SHOW_CMD_REPLY:
+       case MGMTD__BE_MESSAGE__MESSAGE_NOTIFY_DATA:
+               /*
+                * TODO: Add handling code in future.
+                */
+               break;
+       /*
+        * NOTE: The following messages are always sent from MGMTD to
+        * Backend clients only and/or need not be handled on MGMTd.
+        */
+       case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REPLY:
+       case MGMTD__BE_MESSAGE__MESSAGE_GET_REQ:
+       case MGMTD__BE_MESSAGE__MESSAGE_TXN_REQ:
+       case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REQ:
+       case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REQ:
+       case MGMTD__BE_MESSAGE__MESSAGE_CFG_CMD_REQ:
+       case MGMTD__BE_MESSAGE__MESSAGE_SHOW_CMD_REQ:
+       case MGMTD__BE_MESSAGE__MESSAGE__NOT_SET:
+       default:
+               /*
+                * A 'default' case is being added contrary to the
+                * FRR code guidelines to take care of build
+                * failures on certain build systems (courtesy of
+                * the proto-c package).
+                */
+               break;
+       }
+
+       return 0;
+}
+
+static inline void
+mgmt_be_adapter_sched_msg_write(struct mgmt_be_client_adapter *adapter)
+{
+       if (!CHECK_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_WRITES_OFF))
+               mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_WRITE);
+}
+
+static inline void
+mgmt_be_adapter_writes_on(struct mgmt_be_client_adapter *adapter)
+{
+       MGMTD_BE_ADAPTER_DBG("Resume writing msgs for '%s'", adapter->name);
+       UNSET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_WRITES_OFF);
+       mgmt_be_adapter_sched_msg_write(adapter);
+}
+
+static inline void
+mgmt_be_adapter_writes_off(struct mgmt_be_client_adapter *adapter)
+{
+       SET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_WRITES_OFF);
+       MGMTD_BE_ADAPTER_DBG("Pause writing msgs for '%s'", adapter->name);
+}
+
+static int mgmt_be_adapter_send_msg(struct mgmt_be_client_adapter *adapter,
+                                   Mgmtd__BeMessage *be_msg)
+{
+       if (adapter->conn_fd == -1) {
+               MGMTD_BE_ADAPTER_DBG("can't send message on closed connection");
+               return -1;
+       }
+
+       int rv = mgmt_msg_send_msg(
+               &adapter->mstate, be_msg,
+               mgmtd__be_message__get_packed_size(be_msg),
+               (size_t(*)(void *, void *))mgmtd__be_message__pack,
+               mgmt_debug_be);
+       mgmt_be_adapter_sched_msg_write(adapter);
+       return rv;
+}
+
+static int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter,
+                                   uint64_t txn_id, bool create)
+{
+       Mgmtd__BeMessage be_msg;
+       Mgmtd__BeTxnReq txn_req;
+
+       mgmtd__be_txn_req__init(&txn_req);
+       txn_req.create = create;
+       txn_req.txn_id = txn_id;
+
+       mgmtd__be_message__init(&be_msg);
+       be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_TXN_REQ;
+       be_msg.txn_req = &txn_req;
+
+       MGMTD_BE_ADAPTER_DBG(
+               "Sending TXN_REQ message to Backend client '%s' for Txn-Id %llx",
+               adapter->name, (unsigned long long)txn_id);
+
+       return mgmt_be_adapter_send_msg(adapter, &be_msg);
+}
+
+static int
+mgmt_be_send_cfgdata_create_req(struct mgmt_be_client_adapter *adapter,
+                                  uint64_t txn_id, uint64_t batch_id,
+                                  Mgmtd__YangCfgDataReq **cfgdata_reqs,
+                                  size_t num_reqs, bool end_of_data)
+{
+       Mgmtd__BeMessage be_msg;
+       Mgmtd__BeCfgDataCreateReq cfgdata_req;
+
+       mgmtd__be_cfg_data_create_req__init(&cfgdata_req);
+       cfgdata_req.batch_id = batch_id;
+       cfgdata_req.txn_id = txn_id;
+       cfgdata_req.data_req = cfgdata_reqs;
+       cfgdata_req.n_data_req = num_reqs;
+       cfgdata_req.end_of_data = end_of_data;
+
+       mgmtd__be_message__init(&be_msg);
+       be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REQ;
+       be_msg.cfg_data_req = &cfgdata_req;
+
+       MGMTD_BE_ADAPTER_DBG(
+               "Sending CFGDATA_CREATE_REQ message to Backend client '%s' for Txn-Id %llx, Batch-Id: %llx",
+               adapter->name, (unsigned long long)txn_id,
+               (unsigned long long)batch_id);
+
+       return mgmt_be_adapter_send_msg(adapter, &be_msg);
+}
+
+static int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter,
+                                       uint64_t txn_id)
+{
+       Mgmtd__BeMessage be_msg;
+       Mgmtd__BeCfgDataApplyReq apply_req;
+
+       mgmtd__be_cfg_data_apply_req__init(&apply_req);
+       apply_req.txn_id = txn_id;
+
+       mgmtd__be_message__init(&be_msg);
+       be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REQ;
+       be_msg.cfg_apply_req = &apply_req;
+
+       MGMTD_BE_ADAPTER_DBG(
+               "Sending CFG_APPLY_REQ message to Backend client '%s' for Txn-Id 0x%llx",
+               adapter->name, (unsigned long long)txn_id);
+
+       return mgmt_be_adapter_send_msg(adapter, &be_msg);
+}
+
+static void mgmt_be_adapter_process_msg(void *user_ctx, uint8_t *data,
+                                       size_t len)
+{
+       struct mgmt_be_client_adapter *adapter = user_ctx;
+       Mgmtd__BeMessage *be_msg;
+
+       be_msg = mgmtd__be_message__unpack(NULL, len, data);
+       if (!be_msg) {
+               MGMTD_BE_ADAPTER_DBG(
+                       "Failed to decode %zu bytes for adapter: %s", len,
+                       adapter->name);
+               return;
+       }
+       MGMTD_BE_ADAPTER_DBG("Decoded %zu bytes of message: %u for adapter: %s",
+                            len, be_msg->message_case, adapter->name);
+       (void)mgmt_be_adapter_handle_msg(adapter, be_msg);
+       mgmtd__be_message__free_unpacked(be_msg, NULL);
+}
+
+static void mgmt_be_adapter_proc_msgbufs(struct thread *thread)
+{
+       struct mgmt_be_client_adapter *adapter = THREAD_ARG(thread);
+
+       if (mgmt_msg_procbufs(&adapter->mstate, mgmt_be_adapter_process_msg,
+                             adapter, mgmt_debug_be))
+               mgmt_be_adapter_register_event(adapter, MGMTD_BE_PROC_MSG);
+}
+
+static void mgmt_be_adapter_read(struct thread *thread)
+{
+       struct mgmt_be_client_adapter *adapter;
+       enum mgmt_msg_rsched rv;
+
+       adapter = (struct mgmt_be_client_adapter *)THREAD_ARG(thread);
+
+       rv = mgmt_msg_read(&adapter->mstate, adapter->conn_fd, mgmt_debug_be);
+       if (rv == MSR_DISCONNECT) {
+               mgmt_be_adapter_disconnect(adapter);
+               return;
+       }
+       if (rv == MSR_SCHED_BOTH)
+               mgmt_be_adapter_register_event(adapter, MGMTD_BE_PROC_MSG);
+       mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_READ);
+}
+
+static void mgmt_be_adapter_write(struct thread *thread)
+{
+       struct mgmt_be_client_adapter *adapter = THREAD_ARG(thread);
+       enum mgmt_msg_wsched rv;
+
+       rv = mgmt_msg_write(&adapter->mstate, adapter->conn_fd, mgmt_debug_be);
+       if (rv == MSW_SCHED_STREAM)
+               mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_WRITE);
+       else if (rv == MSW_DISCONNECT)
+               mgmt_be_adapter_disconnect(adapter);
+       else if (rv == MSW_SCHED_WRITES_OFF) {
+               mgmt_be_adapter_writes_off(adapter);
+               mgmt_be_adapter_register_event(adapter,
+                                              MGMTD_BE_CONN_WRITES_ON);
+       } else
+               assert(rv == MSW_SCHED_NONE);
+}
+
+static void mgmt_be_adapter_resume_writes(struct thread *thread)
+{
+       struct mgmt_be_client_adapter *adapter;
+
+       adapter = (struct mgmt_be_client_adapter *)THREAD_ARG(thread);
+       assert(adapter && adapter->conn_fd >= 0);
+
+       mgmt_be_adapter_writes_on(adapter);
+}
+
+static void mgmt_be_iter_and_get_cfg(struct mgmt_ds_ctx *ds_ctx,
+                                       char *xpath, struct lyd_node *node,
+                                       struct nb_node *nb_node, void *ctx)
+{
+       struct mgmt_be_client_subscr_info subscr_info;
+       struct mgmt_be_get_adapter_config_params *parms;
+       struct mgmt_be_client_adapter *adapter;
+       struct nb_config_cbs *root;
+       uint32_t *seq;
+
+       if (mgmt_be_get_subscr_info_for_xpath(xpath, &subscr_info) != 0) {
+               MGMTD_BE_ADAPTER_ERR(
+                       "ERROR: Failed to get subscriber for '%s'", xpath);
+               return;
+       }
+
+       parms = (struct mgmt_be_get_adapter_config_params *)ctx;
+
+       adapter = parms->adapter;
+       if (!subscr_info.xpath_subscr[adapter->id].subscribed)
+               return;
+
+       root = parms->cfg_chgs;
+       seq = &parms->seq;
+       nb_config_diff_created(node, seq, root);
+}
+
+static void mgmt_be_adapter_conn_init(struct thread *thread)
+{
+       struct mgmt_be_client_adapter *adapter;
+
+       adapter = (struct mgmt_be_client_adapter *)THREAD_ARG(thread);
+       assert(adapter && adapter->conn_fd >= 0);
+
+       /*
+        * Check first if the current session can run a CONFIG
+        * transaction or not. Reschedule if a CONFIG transaction
+        * from another session is already in progress.
+        */
+       if (mgmt_config_txn_in_progress() != MGMTD_SESSION_ID_NONE) {
+               mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_INIT);
+               return;
+       }
+
+       /*
+        * Notify TXN module to create a CONFIG transaction and
+        * download the CONFIGs identified for this new client.
+        * If the TXN module fails to initiate the CONFIG transaction
+        * disconnect from the client forcing a reconnect later.
+        * That should also take care of destroying the adapter.
+        */
+       if (mgmt_txn_notify_be_adapter_conn(adapter, true) != 0) {
+               mgmt_be_adapter_disconnect(adapter);
+               adapter = NULL;
+       }
+}
+
+static void
+mgmt_be_adapter_register_event(struct mgmt_be_client_adapter *adapter,
+                               enum mgmt_be_event event)
+{
+       struct timeval tv = {0};
+
+       switch (event) {
+       case MGMTD_BE_CONN_INIT:
+               thread_add_timer_msec(mgmt_be_adapter_tm,
+                                     mgmt_be_adapter_conn_init, adapter,
+                                     MGMTD_BE_CONN_INIT_DELAY_MSEC,
+                                     &adapter->conn_init_ev);
+               assert(adapter->conn_init_ev);
+               break;
+       case MGMTD_BE_CONN_READ:
+               thread_add_read(mgmt_be_adapter_tm, mgmt_be_adapter_read,
+                               adapter, adapter->conn_fd, &adapter->conn_read_ev);
+               assert(adapter->conn_read_ev);
+               break;
+       case MGMTD_BE_CONN_WRITE:
+               if (adapter->conn_write_ev)
+                       MGMTD_BE_ADAPTER_DBG(
+                               "write ready notify already set for client %s",
+                               adapter->name);
+               else
+                       MGMTD_BE_ADAPTER_DBG(
+                               "scheduling write ready notify for client %s",
+                               adapter->name);
+               thread_add_write(mgmt_be_adapter_tm, mgmt_be_adapter_write,
+                                adapter, adapter->conn_fd, &adapter->conn_write_ev);
+               assert(adapter->conn_write_ev);
+               break;
+       case MGMTD_BE_PROC_MSG:
+               tv.tv_usec = MGMTD_BE_MSG_PROC_DELAY_USEC;
+               thread_add_timer_tv(mgmt_be_adapter_tm,
+                                   mgmt_be_adapter_proc_msgbufs, adapter, &tv,
+                                   &adapter->proc_msg_ev);
+               assert(adapter->proc_msg_ev);
+               break;
+       case MGMTD_BE_CONN_WRITES_ON:
+               thread_add_timer_msec(mgmt_be_adapter_tm,
+                                     mgmt_be_adapter_resume_writes, adapter,
+                                     MGMTD_BE_MSG_WRITE_DELAY_MSEC,
+                                     &adapter->conn_writes_on);
+               assert(adapter->conn_writes_on);
+               break;
+       case MGMTD_BE_SERVER:
+       case MGMTD_BE_SCHED_CFG_PREPARE:
+       case MGMTD_BE_RESCHED_CFG_PREPARE:
+       case MGMTD_BE_SCHED_CFG_APPLY:
+       case MGMTD_BE_RESCHED_CFG_APPLY:
+               assert(!"mgmt_be_adapter_post_event() called incorrectly");
+               break;
+       }
+}
+
+void mgmt_be_adapter_lock(struct mgmt_be_client_adapter *adapter)
+{
+       adapter->refcount++;
+}
+
+extern void mgmt_be_adapter_unlock(struct mgmt_be_client_adapter **adapter)
+{
+       assert(*adapter && (*adapter)->refcount);
+
+       (*adapter)->refcount--;
+       if (!(*adapter)->refcount) {
+               mgmt_be_adapters_del(&mgmt_be_adapters, *adapter);
+               THREAD_OFF((*adapter)->conn_init_ev);
+               THREAD_OFF((*adapter)->conn_read_ev);
+               THREAD_OFF((*adapter)->conn_write_ev);
+               THREAD_OFF((*adapter)->conn_writes_on);
+               THREAD_OFF((*adapter)->proc_msg_ev);
+               mgmt_msg_destroy(&(*adapter)->mstate);
+               XFREE(MTYPE_MGMTD_BE_ADPATER, *adapter);
+       }
+
+       *adapter = NULL;
+}
+
+int mgmt_be_adapter_init(struct thread_master *tm)
+{
+       if (!mgmt_be_adapter_tm) {
+               mgmt_be_adapter_tm = tm;
+               memset(mgmt_xpath_map, 0, sizeof(mgmt_xpath_map));
+               mgmt_num_xpath_maps = 0;
+               memset(mgmt_be_adapters_by_id, 0,
+                      sizeof(mgmt_be_adapters_by_id));
+               mgmt_be_adapters_init(&mgmt_be_adapters);
+               mgmt_be_xpath_map_init();
+       }
+
+       return 0;
+}
+
+void mgmt_be_adapter_destroy(void)
+{
+       mgmt_be_cleanup_adapters();
+}
+
+struct mgmt_be_client_adapter *
+mgmt_be_create_adapter(int conn_fd, union sockunion *from)
+{
+       struct mgmt_be_client_adapter *adapter = NULL;
+
+       adapter = mgmt_be_find_adapter_by_fd(conn_fd);
+       if (!adapter) {
+               adapter = XCALLOC(MTYPE_MGMTD_BE_ADPATER,
+                               sizeof(struct mgmt_be_client_adapter));
+               assert(adapter);
+
+               adapter->conn_fd = conn_fd;
+               adapter->id = MGMTD_BE_CLIENT_ID_MAX;
+               memcpy(&adapter->conn_su, from, sizeof(adapter->conn_su));
+               snprintf(adapter->name, sizeof(adapter->name), "Unknown-FD-%d",
+                        adapter->conn_fd);
+               mgmt_msg_init(&adapter->mstate, MGMTD_BE_MAX_NUM_MSG_PROC,
+                             MGMTD_BE_MAX_NUM_MSG_WRITE, MGMTD_BE_MSG_MAX_LEN,
+                             "BE-adapter");
+               mgmt_be_adapter_lock(adapter);
+
+               mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_READ);
+               mgmt_be_adapters_add_tail(&mgmt_be_adapters, adapter);
+
+               RB_INIT(nb_config_cbs, &adapter->cfg_chgs);
+
+               MGMTD_BE_ADAPTER_DBG("Added new MGMTD Backend adapter '%s'",
+                                     adapter->name);
+       }
+
+       /* Make client socket non-blocking.  */
+       set_nonblocking(adapter->conn_fd);
+       setsockopt_so_sendbuf(adapter->conn_fd, MGMTD_SOCKET_BE_SEND_BUF_SIZE);
+       setsockopt_so_recvbuf(adapter->conn_fd, MGMTD_SOCKET_BE_RECV_BUF_SIZE);
+
+       /* Trigger resync of config with the new adapter */
+       mgmt_be_adapter_register_event(adapter, MGMTD_BE_CONN_INIT);
+
+       return adapter;
+}
+
+struct mgmt_be_client_adapter *
+mgmt_be_get_adapter_by_id(enum mgmt_be_client_id id)
+{
+       return (id < MGMTD_BE_CLIENT_ID_MAX ? mgmt_be_adapters_by_id[id]
+                                              : NULL);
+}
+
+struct mgmt_be_client_adapter *
+mgmt_be_get_adapter_by_name(const char *name)
+{
+       return mgmt_be_find_adapter_by_name(name);
+}
+
+int mgmt_be_get_adapter_config(struct mgmt_be_client_adapter *adapter,
+                                 struct mgmt_ds_ctx *ds_ctx,
+                                 struct nb_config_cbs **cfg_chgs)
+{
+       char base_xpath[] = "/";
+       struct mgmt_be_get_adapter_config_params parms;
+
+       assert(cfg_chgs);
+
+       if (RB_EMPTY(nb_config_cbs, &adapter->cfg_chgs)) {
+               parms.adapter = adapter;
+               parms.cfg_chgs = &adapter->cfg_chgs;
+               parms.seq = 0;
+
+               mgmt_ds_iter_data(ds_ctx, base_xpath,
+                                 mgmt_be_iter_and_get_cfg, (void *)&parms,
+                                 false);
+       }
+
+       *cfg_chgs = &adapter->cfg_chgs;
+       return 0;
+}
+
+int mgmt_be_create_txn(struct mgmt_be_client_adapter *adapter,
+                          uint64_t txn_id)
+{
+       return mgmt_be_send_txn_req(adapter, txn_id, true);
+}
+
+int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter,
+                           uint64_t txn_id)
+{
+       return mgmt_be_send_txn_req(adapter, txn_id, false);
+}
+
+int mgmt_be_send_cfg_data_create_req(struct mgmt_be_client_adapter *adapter,
+                                       uint64_t txn_id, uint64_t batch_id,
+                                       struct mgmt_be_cfgreq *cfg_req,
+                                       bool end_of_data)
+{
+       return mgmt_be_send_cfgdata_create_req(
+               adapter, txn_id, batch_id, cfg_req->cfgdata_reqs,
+               cfg_req->num_reqs, end_of_data);
+}
+
+extern int
+mgmt_be_send_cfg_apply_req(struct mgmt_be_client_adapter *adapter,
+                             uint64_t txn_id)
+{
+       return mgmt_be_send_cfgapply_req(adapter, txn_id);
+}
+
+/*
+ * This function maps a YANG dtata Xpath to one or more
+ * Backend Clients that should be contacted for various purposes.
+ */
+int mgmt_be_get_subscr_info_for_xpath(
+       const char *xpath, struct mgmt_be_client_subscr_info *subscr_info)
+{
+       int indx, match, max_match = 0, num_reg;
+       enum mgmt_be_client_id id;
+       struct mgmt_be_client_subscr_info
+               *reg_maps[array_size(mgmt_xpath_map)] = {0};
+       bool root_xp = false;
+
+       if (!subscr_info)
+               return -1;
+
+       num_reg = 0;
+       memset(subscr_info, 0, sizeof(*subscr_info));
+
+       if (strlen(xpath) <= 2 && xpath[0] == '/'
+               && (!xpath[1] || xpath[1] == '*')) {
+               root_xp = true;
+       }
+
+       MGMTD_BE_ADAPTER_DBG("XPATH: %s", xpath);
+       for (indx = 0; indx < mgmt_num_xpath_maps; indx++) {
+               /*
+                * For Xpaths: '/' and '/ *' all xpath maps should match
+                * the given xpath.
+                */
+               if (!root_xp) {
+                       match = mgmt_be_eval_regexp_match(
+                               mgmt_xpath_map[indx].xpath_regexp, xpath);
+
+                       if (!match || match < max_match)
+                               continue;
+
+                       if (match > max_match) {
+                               num_reg = 0;
+                               max_match = match;
+                       }
+               }
+
+               reg_maps[num_reg] = &mgmt_xpath_map[indx].be_subscrs;
+               num_reg++;
+       }
+
+       for (indx = 0; indx < num_reg; indx++) {
+               FOREACH_MGMTD_BE_CLIENT_ID (id) {
+                       if (reg_maps[indx]->xpath_subscr[id].subscribed) {
+                               MGMTD_BE_ADAPTER_DBG(
+                                       "Cient: %s",
+                                       mgmt_be_client_id2name(id));
+                               memcpy(&subscr_info->xpath_subscr[id],
+                                      &reg_maps[indx]->xpath_subscr[id],
+                                      sizeof(subscr_info->xpath_subscr[id]));
+                       }
+               }
+       }
+
+       return 0;
+}
+
+void mgmt_be_adapter_status_write(struct vty *vty)
+{
+       struct mgmt_be_client_adapter *adapter;
+
+       vty_out(vty, "MGMTD Backend Adapters\n");
+
+       FOREACH_ADAPTER_IN_LIST (adapter) {
+               vty_out(vty, "  Client: \t\t\t%s\n", adapter->name);
+               vty_out(vty, "    Conn-FD: \t\t\t%d\n", adapter->conn_fd);
+               vty_out(vty, "    Client-Id: \t\t\t%d\n", adapter->id);
+               vty_out(vty, "    Ref-Count: \t\t\t%u\n", adapter->refcount);
+               vty_out(vty, "    Msg-Recvd: \t\t\t%" PRIu64 "\n",
+                       adapter->mstate.nrxm);
+               vty_out(vty, "    Bytes-Recvd: \t\t%" PRIu64 "\n",
+                       adapter->mstate.nrxb);
+               vty_out(vty, "    Msg-Sent: \t\t\t%" PRIu64 "\n",
+                       adapter->mstate.ntxm);
+               vty_out(vty, "    Bytes-Sent: \t\t%" PRIu64 "\n",
+                       adapter->mstate.ntxb);
+       }
+       vty_out(vty, "  Total: %d\n",
+               (int)mgmt_be_adapters_count(&mgmt_be_adapters));
+}
+
+void mgmt_be_xpath_register_write(struct vty *vty)
+{
+       int indx;
+       enum mgmt_be_client_id id;
+       struct mgmt_be_client_adapter *adapter;
+
+       vty_out(vty, "MGMTD Backend XPath Registry\n");
+
+       for (indx = 0; indx < mgmt_num_xpath_maps; indx++) {
+               vty_out(vty, " - XPATH: '%s'\n",
+                       mgmt_xpath_map[indx].xpath_regexp);
+               FOREACH_MGMTD_BE_CLIENT_ID (id) {
+                       if (mgmt_xpath_map[indx]
+                                   .be_subscrs.xpath_subscr[id]
+                                   .subscribed) {
+                               vty_out(vty,
+                                       "   -- Client: '%s' \t Validate:%s, Notify:%s, Own:%s\n",
+                                       mgmt_be_client_id2name(id),
+                                       mgmt_xpath_map[indx]
+                                                       .be_subscrs
+                                                       .xpath_subscr[id]
+                                                       .validate_config
+                                               ? "T"
+                                               : "F",
+                                       mgmt_xpath_map[indx]
+                                                       .be_subscrs
+                                                       .xpath_subscr[id]
+                                                       .notify_config
+                                               ? "T"
+                                               : "F",
+                                       mgmt_xpath_map[indx]
+                                                       .be_subscrs
+                                                       .xpath_subscr[id]
+                                                       .own_oper_data
+                                               ? "T"
+                                               : "F");
+                               adapter = mgmt_be_get_adapter_by_id(id);
+                               if (adapter) {
+                                       vty_out(vty, "     -- Adapter: %p\n",
+                                               adapter);
+                               }
+                       }
+               }
+       }
+
+       vty_out(vty, "Total XPath Registries: %u\n", mgmt_num_xpath_maps);
+}
+
+void mgmt_be_xpath_subscr_info_write(struct vty *vty, const char *xpath)
+{
+       struct mgmt_be_client_subscr_info subscr;
+       enum mgmt_be_client_id id;
+       struct mgmt_be_client_adapter *adapter;
+
+       if (mgmt_be_get_subscr_info_for_xpath(xpath, &subscr) != 0) {
+               vty_out(vty, "ERROR: Failed to get subscriber for '%s'\n",
+                       xpath);
+               return;
+       }
+
+       vty_out(vty, "XPath: '%s'\n", xpath);
+       FOREACH_MGMTD_BE_CLIENT_ID (id) {
+               if (subscr.xpath_subscr[id].subscribed) {
+                       vty_out(vty,
+                               "  -- Client: '%s' \t Validate:%s, Notify:%s, Own:%s\n",
+                               mgmt_be_client_id2name(id),
+                               subscr.xpath_subscr[id].validate_config ? "T"
+                                                                       : "F",
+                               subscr.xpath_subscr[id].notify_config ? "T"
+                                                                     : "F",
+                               subscr.xpath_subscr[id].own_oper_data ? "T"
+                                                                     : "F");
+                       adapter = mgmt_be_get_adapter_by_id(id);
+                       if (adapter)
+                               vty_out(vty, "    -- Adapter: %p\n", adapter);
+               }
+       }
+}
diff --git a/mgmtd/mgmt_be_adapter.h b/mgmtd/mgmt_be_adapter.h
new file mode 100644 (file)
index 0000000..7f57233
--- /dev/null
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Backend Client Connection Adapter
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_BE_ADAPTER_H_
+#define _FRR_MGMTD_BE_ADAPTER_H_
+
+#include "mgmt_be_client.h"
+#include "mgmt_msg.h"
+#include "mgmtd/mgmt_defines.h"
+#include "mgmtd/mgmt_ds.h"
+
+#define MGMTD_BE_CONN_INIT_DELAY_MSEC 50
+
+#define MGMTD_FIND_ADAPTER_BY_INDEX(adapter_index)                             \
+       mgmt_adaptr_ref[adapter_index]
+
+enum mgmt_be_req_type {
+       MGMTD_BE_REQ_NONE = 0,
+       MGMTD_BE_REQ_CFG_VALIDATE,
+       MGMTD_BE_REQ_CFG_APPLY,
+       MGMTD_BE_REQ_DATA_GET_ELEM,
+       MGMTD_BE_REQ_DATA_GET_NEXT
+};
+
+struct mgmt_be_cfgreq {
+       Mgmtd__YangCfgDataReq **cfgdata_reqs;
+       size_t num_reqs;
+};
+
+struct mgmt_be_datareq {
+       Mgmtd__YangGetDataReq **getdata_reqs;
+       size_t num_reqs;
+};
+
+PREDECL_LIST(mgmt_be_adapters);
+PREDECL_LIST(mgmt_txn_badapters);
+
+struct mgmt_be_client_adapter {
+       enum mgmt_be_client_id id;
+       int conn_fd;
+       union sockunion conn_su;
+       struct thread *conn_init_ev;
+       struct thread *conn_read_ev;
+       struct thread *conn_write_ev;
+       struct thread *conn_writes_on;
+       struct thread *proc_msg_ev;
+       uint32_t flags;
+       char name[MGMTD_CLIENT_NAME_MAX_LEN];
+       uint8_t num_xpath_reg;
+       char xpath_reg[MGMTD_MAX_NUM_XPATH_REG][MGMTD_MAX_XPATH_LEN];
+
+       /* IO streams for read and write */
+       struct mgmt_msg_state mstate;
+
+       int refcount;
+
+       /*
+        * List of config items that should be sent to the
+        * backend during re/connect. This is temporarily
+        * created and then freed-up as soon as the initial
+        * config items has been applied onto the backend.
+        */
+       struct nb_config_cbs cfg_chgs;
+
+       struct mgmt_be_adapters_item list_linkage;
+       struct mgmt_txn_badapters_item txn_list_linkage;
+};
+
+#define MGMTD_BE_ADAPTER_FLAGS_WRITES_OFF (1U << 0)
+#define MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED (1U << 1)
+
+DECLARE_LIST(mgmt_be_adapters, struct mgmt_be_client_adapter, list_linkage);
+DECLARE_LIST(mgmt_txn_badapters, struct mgmt_be_client_adapter,
+            txn_list_linkage);
+
+union mgmt_be_xpath_subscr_info {
+       uint8_t subscribed;
+       struct {
+               uint8_t validate_config : 1;
+               uint8_t notify_config : 1;
+               uint8_t own_oper_data : 1;
+       };
+};
+
+struct mgmt_be_client_subscr_info {
+       union mgmt_be_xpath_subscr_info xpath_subscr[MGMTD_BE_CLIENT_ID_MAX];
+};
+
+/* Initialise backend adapter module. */
+extern int mgmt_be_adapter_init(struct thread_master *tm);
+
+/* Destroy the backend adapter module. */
+extern void mgmt_be_adapter_destroy(void);
+
+/* Acquire lock for backend adapter. */
+extern void mgmt_be_adapter_lock(struct mgmt_be_client_adapter *adapter);
+
+/* Remove lock from backend adapter. */
+extern void mgmt_be_adapter_unlock(struct mgmt_be_client_adapter **adapter);
+
+/* Create backend adapter. */
+extern struct mgmt_be_client_adapter *
+mgmt_be_create_adapter(int conn_fd, union sockunion *su);
+
+/* Fetch backend adapter given an adapter name. */
+extern struct mgmt_be_client_adapter *
+mgmt_be_get_adapter_by_name(const char *name);
+
+/* Fetch backend adapter given an client ID. */
+extern struct mgmt_be_client_adapter *
+mgmt_be_get_adapter_by_id(enum mgmt_be_client_id id);
+
+/* Fetch backend adapter config. */
+extern int
+mgmt_be_get_adapter_config(struct mgmt_be_client_adapter *adapter,
+                             struct mgmt_ds_ctx *ds_ctx,
+                             struct nb_config_cbs **cfg_chgs);
+
+/* Create a transaction. */
+extern int mgmt_be_create_txn(struct mgmt_be_client_adapter *adapter,
+                                 uint64_t txn_id);
+
+/* Destroy a transaction. */
+extern int mgmt_be_destroy_txn(struct mgmt_be_client_adapter *adapter,
+                                  uint64_t txn_id);
+
+/*
+ * Send config data create request to backend client.
+ *
+ * adaptr
+ *    Backend adapter information.
+ *
+ * txn_id
+ *    Unique transaction identifier.
+ *
+ * batch_id
+ *    Request batch ID.
+ *
+ * cfg_req
+ *    Config data request.
+ *
+ * end_of_data
+ *    TRUE if the data from last batch, FALSE otherwise.
+ *
+ * Returns:
+ *    0 on success, -1 on failure.
+ */
+extern int mgmt_be_send_cfg_data_create_req(
+       struct mgmt_be_client_adapter *adapter, uint64_t txn_id,
+       uint64_t batch_id, struct mgmt_be_cfgreq *cfg_req, bool end_of_data);
+
+/*
+ * Send config validate request to backend client.
+ *
+ * adaptr
+ *    Backend adapter information.
+ *
+ * txn_id
+ *    Unique transaction identifier.
+ *
+ * batch_ids
+ *    List of request batch IDs.
+ *
+ * num_batch_ids
+ *    Number of batch ids.
+ *
+ * Returns:
+ *    0 on success, -1 on failure.
+ */
+extern int
+mgmt_be_send_cfg_validate_req(struct mgmt_be_client_adapter *adapter,
+                                uint64_t txn_id, uint64_t batch_ids[],
+                                size_t num_batch_ids);
+
+/*
+ * Send config apply request to backend client.
+ *
+ * adaptr
+ *    Backend adapter information.
+ *
+ * txn_id
+ *    Unique transaction identifier.
+ *
+ * Returns:
+ *    0 on success, -1 on failure.
+ */
+extern int
+mgmt_be_send_cfg_apply_req(struct mgmt_be_client_adapter *adapter,
+                             uint64_t txn_id);
+
+/*
+ * Dump backend adapter status to vty.
+ */
+extern void mgmt_be_adapter_status_write(struct vty *vty);
+
+/*
+ * Dump xpath registry for each backend client to vty.
+ */
+extern void mgmt_be_xpath_register_write(struct vty *vty);
+
+/*
+ * Maps a YANG dtata Xpath to one or more
+ * backend clients that should be contacted for various purposes.
+ */
+extern int mgmt_be_get_subscr_info_for_xpath(
+       const char *xpath, struct mgmt_be_client_subscr_info *subscr_info);
+
+/*
+ * Dump backend client information for a given xpath to vty.
+ */
+extern void mgmt_be_xpath_subscr_info_write(struct vty *vty,
+                                              const char *xpath);
+
+#endif /* _FRR_MGMTD_BE_ADAPTER_H_ */
diff --git a/mgmtd/mgmt_be_server.c b/mgmtd/mgmt_be_server.c
new file mode 100644 (file)
index 0000000..6997fdc
--- /dev/null
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Backend Server
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "network.h"
+#include "libfrr.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_be_server.h"
+#include "mgmtd/mgmt_be_adapter.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_BE_SRVR_DBG(fmt, ...)                                         \
+       fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_BE_SRVR_ERR(fmt, ...)                                         \
+       fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_BE_SRVR_DBG(fmt, ...)                                            \
+       do {                                                                   \
+               if (mgmt_debug_be)                                             \
+                       zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__);       \
+       } while (0)
+#define MGMTD_BE_SRVR_ERR(fmt, ...)                                         \
+       zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+static int mgmt_be_listen_fd = -1;
+static struct thread_master *mgmt_be_listen_tm;
+static struct thread *mgmt_be_listen_ev;
+static void mgmt_be_server_register_event(enum mgmt_be_event event);
+
+static void mgmt_be_conn_accept(struct thread *thread)
+{
+       int client_conn_fd;
+       union sockunion su;
+
+       if (mgmt_be_listen_fd < 0)
+               return;
+
+       /* We continue hearing server listen socket. */
+       mgmt_be_server_register_event(MGMTD_BE_SERVER);
+
+       memset(&su, 0, sizeof(union sockunion));
+
+       /* We can handle IPv4 or IPv6 socket. */
+       client_conn_fd = sockunion_accept(mgmt_be_listen_fd, &su);
+       if (client_conn_fd < 0) {
+               MGMTD_BE_SRVR_ERR(
+                       "Failed to accept MGMTD Backend client connection : %s",
+                       safe_strerror(errno));
+               return;
+       }
+       set_nonblocking(client_conn_fd);
+       set_cloexec(client_conn_fd);
+
+       MGMTD_BE_SRVR_DBG("Got a new MGMTD Backend connection");
+
+       mgmt_be_create_adapter(client_conn_fd, &su);
+}
+
+static void mgmt_be_server_register_event(enum mgmt_be_event event)
+{
+       if (event == MGMTD_BE_SERVER) {
+               thread_add_read(mgmt_be_listen_tm, mgmt_be_conn_accept,
+                               NULL, mgmt_be_listen_fd,
+                               &mgmt_be_listen_ev);
+               assert(mgmt_be_listen_ev);
+       } else {
+               assert(!"mgmt_be_server_post_event() called incorrectly");
+       }
+}
+
+static void mgmt_be_server_start(const char *hostname)
+{
+       int ret;
+       int sock;
+       struct sockaddr_un addr;
+       mode_t old_mask;
+
+       /* Set umask */
+       old_mask = umask(0077);
+
+       sock = socket(AF_UNIX, SOCK_STREAM, PF_UNSPEC);
+       if (sock < 0) {
+               MGMTD_BE_SRVR_ERR("Failed to create server socket: %s",
+                                    safe_strerror(errno));
+               goto mgmt_be_server_start_failed;
+       }
+
+       addr.sun_family = AF_UNIX,
+       strlcpy(addr.sun_path, MGMTD_BE_SERVER_PATH, sizeof(addr.sun_path));
+       unlink(addr.sun_path);
+       ret = bind(sock, (struct sockaddr *)&addr, sizeof(addr));
+       if (ret < 0) {
+               MGMTD_BE_SRVR_ERR(
+                       "Failed to bind server socket to '%s'. Err: %s",
+                       addr.sun_path, safe_strerror(errno));
+               goto mgmt_be_server_start_failed;
+       }
+
+       ret = listen(sock, MGMTD_BE_MAX_CONN);
+       if (ret < 0) {
+               MGMTD_BE_SRVR_ERR("Failed to listen on server socket: %s",
+                                    safe_strerror(errno));
+               goto mgmt_be_server_start_failed;
+       }
+
+       /* Restore umask */
+       umask(old_mask);
+
+       mgmt_be_listen_fd = sock;
+       mgmt_be_server_register_event(MGMTD_BE_SERVER);
+
+       MGMTD_BE_SRVR_DBG("Started MGMTD Backend Server!");
+       return;
+
+mgmt_be_server_start_failed:
+       if (sock)
+               close(sock);
+
+       mgmt_be_listen_fd = -1;
+       exit(-1);
+}
+
+int mgmt_be_server_init(struct thread_master *master)
+{
+       if (mgmt_be_listen_tm) {
+               MGMTD_BE_SRVR_DBG("MGMTD Backend Server already running!");
+               return 0;
+       }
+
+       mgmt_be_listen_tm = master;
+
+       mgmt_be_server_start("localhost");
+
+       return 0;
+}
+
+void mgmt_be_server_destroy(void)
+{
+       if (mgmt_be_listen_tm) {
+               MGMTD_BE_SRVR_DBG("Closing MGMTD Backend Server!");
+
+               if (mgmt_be_listen_ev) {
+                       THREAD_OFF(mgmt_be_listen_ev);
+                       mgmt_be_listen_ev = NULL;
+               }
+
+               if (mgmt_be_listen_fd >= 0) {
+                       close(mgmt_be_listen_fd);
+                       mgmt_be_listen_fd = -1;
+               }
+
+               mgmt_be_listen_tm = NULL;
+       }
+}
diff --git a/mgmtd/mgmt_be_server.h b/mgmtd/mgmt_be_server.h
new file mode 100644 (file)
index 0000000..5ee57fd
--- /dev/null
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Backend Server
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar
+ */
+
+#ifndef _FRR_MGMTD_BE_SERVER_H_
+#define _FRR_MGMTD_BE_SERVER_H_
+
+#define MGMTD_BE_MAX_CONN 32
+
+/* Initialise backend server */
+extern int mgmt_be_server_init(struct thread_master *master);
+
+/* Destroy backend server */
+extern void mgmt_be_server_destroy(void);
+
+#endif /* _FRR_MGMTD_BE_SERVER_H_ */
diff --git a/mgmtd/mgmt_defines.h b/mgmtd/mgmt_defines.h
new file mode 100644 (file)
index 0000000..ee2f376
--- /dev/null
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD public defines.
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_DEFINES_H
+#define _FRR_MGMTD_DEFINES_H
+
+#include "yang.h"
+
+#define MGMTD_CLIENT_NAME_MAX_LEN 32
+
+#define MGMTD_MAX_XPATH_LEN XPATH_MAXLEN
+
+#define MGMTD_MAX_YANG_VALUE_LEN YANG_VALUE_MAXLEN
+
+#define MGMTD_MAX_NUM_XPATH_REG 128
+
+#define MGMTD_MAX_NUM_DATA_REQ_IN_BATCH 32
+#define MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH 8
+
+enum mgmt_result {
+       MGMTD_SUCCESS = 0,
+       MGMTD_INVALID_PARAM,
+       MGMTD_INTERNAL_ERROR,
+       MGMTD_NO_CFG_CHANGES,
+       MGMTD_DS_LOCK_FAILED,
+       MGMTD_DS_UNLOCK_FAILED,
+       MGMTD_UNKNOWN_FAILURE
+};
+
+enum mgmt_fe_event {
+       MGMTD_FE_SERVER = 1,
+       MGMTD_FE_CONN_READ,
+       MGMTD_FE_CONN_WRITE,
+       MGMTD_FE_CONN_WRITES_ON,
+       MGMTD_FE_PROC_MSG
+};
+
+enum mgmt_be_event {
+       MGMTD_BE_SERVER = 1,
+       MGMTD_BE_CONN_INIT,
+       MGMTD_BE_CONN_READ,
+       MGMTD_BE_CONN_WRITE,
+       MGMTD_BE_CONN_WRITES_ON,
+       MGMTD_BE_PROC_MSG,
+       MGMTD_BE_SCHED_CFG_PREPARE,
+       MGMTD_BE_RESCHED_CFG_PREPARE,
+       MGMTD_BE_SCHED_CFG_APPLY,
+       MGMTD_BE_RESCHED_CFG_APPLY,
+};
+
+#define MGMTD_TXN_ID_NONE 0
+
+#define MGMTD_TXN_BATCH_ID_NONE 0
+
+#endif /* _FRR_MGMTD_DEFINES_H */
diff --git a/mgmtd/mgmt_ds.c b/mgmtd/mgmt_ds.c
new file mode 100644 (file)
index 0000000..1724afb
--- /dev/null
@@ -0,0 +1,669 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Datastores
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "md5.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_history.h"
+#include "mgmtd/mgmt_txn.h"
+#include "libyang/libyang.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_DS_DBG(fmt, ...)                                                 \
+       fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_DS_ERR(fmt, ...)                                                 \
+       fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_DS_DBG(fmt, ...)                                                 \
+       do {                                                                   \
+               if (mgmt_debug_ds)                                             \
+                       zlog_err("%s: " fmt, __func__, ##__VA_ARGS__);         \
+       } while (0)
+#define MGMTD_DS_ERR(fmt, ...)                                                 \
+       zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+struct mgmt_ds_ctx {
+       Mgmtd__DatastoreId ds_id;
+       int lock; /* 0 unlocked, >0 read locked < write locked */
+
+       bool config_ds;
+
+       union {
+               struct nb_config *cfg_root;
+               struct lyd_node *dnode_root;
+       } root;
+};
+
+const char *mgmt_ds_names[MGMTD_DS_MAX_ID + 1] = {
+       MGMTD_DS_NAME_NONE,     /* MGMTD_DS_NONE */
+       MGMTD_DS_NAME_RUNNING,     /* MGMTD_DS_RUNNING */
+       MGMTD_DS_NAME_CANDIDATE,   /* MGMTD_DS_CANDIDATE */
+       MGMTD_DS_NAME_OPERATIONAL, /* MGMTD_DS_OPERATIONAL */
+       "Unknown/Invalid",       /* MGMTD_DS_ID_MAX */
+};
+
+static struct mgmt_master *mgmt_ds_mm;
+static struct mgmt_ds_ctx running, candidate, oper;
+
+/* Dump the data tree of the specified format in the file pointed by the path */
+static int mgmt_ds_dump_in_memory(struct mgmt_ds_ctx *ds_ctx,
+                                 const char *base_xpath, LYD_FORMAT format,
+                                 struct ly_out *out)
+{
+       struct lyd_node *root;
+       uint32_t options = 0;
+
+       if (base_xpath[0] == '\0')
+               root = ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
+                                         : ds_ctx->root.dnode_root;
+       else
+               root = yang_dnode_get(ds_ctx->config_ds
+                                             ? ds_ctx->root.cfg_root->dnode
+                                             : ds_ctx->root.dnode_root,
+                                     base_xpath);
+       if (!root)
+               return -1;
+
+       options = ds_ctx->config_ds ? LYD_PRINT_WD_TRIM :
+               LYD_PRINT_WD_EXPLICIT;
+
+       if (base_xpath[0] == '\0')
+               lyd_print_all(out, root, format, options);
+       else
+               lyd_print_tree(out, root, format, options);
+
+       return 0;
+}
+
+static int mgmt_ds_replace_dst_with_src_ds(struct mgmt_ds_ctx *src,
+                                          struct mgmt_ds_ctx *dst)
+{
+       struct lyd_node *dst_dnode, *src_dnode;
+       struct ly_out *out;
+
+       if (!src || !dst)
+               return -1;
+       MGMTD_DS_DBG("Replacing %d with %d", dst->ds_id, src->ds_id);
+
+       src_dnode = src->config_ds ? src->root.cfg_root->dnode
+                                  : dst->root.dnode_root;
+       dst_dnode = dst->config_ds ? dst->root.cfg_root->dnode
+                                  : dst->root.dnode_root;
+
+       if (dst_dnode)
+               yang_dnode_free(dst_dnode);
+
+       /* Not using nb_config_replace as the oper ds does not contain nb_config
+        */
+       dst_dnode = yang_dnode_dup(src_dnode);
+       if (dst->config_ds)
+               dst->root.cfg_root->dnode = dst_dnode;
+       else
+               dst->root.dnode_root = dst_dnode;
+
+       if (src->ds_id == MGMTD_DS_CANDIDATE) {
+               /*
+                * Drop the changes in scratch-buffer.
+                */
+               MGMTD_DS_DBG("Emptying Candidate Scratch buffer!");
+               nb_config_diff_del_changes(&src->root.cfg_root->cfg_chgs);
+       }
+
+       if (dst->ds_id == MGMTD_DS_RUNNING) {
+               if (ly_out_new_filepath(MGMTD_STARTUP_DS_FILE_PATH, &out)
+                   == LY_SUCCESS)
+                       mgmt_ds_dump_in_memory(dst, "", LYD_JSON, out);
+               ly_out_free(out, NULL, 0);
+       }
+
+       /* TODO: Update the versions if nb_config present */
+
+       return 0;
+}
+
+static int mgmt_ds_merge_src_with_dst_ds(struct mgmt_ds_ctx *src,
+                                        struct mgmt_ds_ctx *dst)
+{
+       int ret;
+       struct lyd_node **dst_dnode, *src_dnode;
+       struct ly_out *out;
+
+       if (!src || !dst)
+               return -1;
+
+       MGMTD_DS_DBG("Merging DS %d with %d", dst->ds_id, src->ds_id);
+
+       src_dnode = src->config_ds ? src->root.cfg_root->dnode
+                                  : dst->root.dnode_root;
+       dst_dnode = dst->config_ds ? &dst->root.cfg_root->dnode
+                                  : &dst->root.dnode_root;
+       ret = lyd_merge_siblings(dst_dnode, src_dnode, 0);
+       if (ret != 0) {
+               MGMTD_DS_ERR("lyd_merge() failed with err %d", ret);
+               return ret;
+       }
+
+       if (src->ds_id == MGMTD_DS_CANDIDATE) {
+               /*
+                * Drop the changes in scratch-buffer.
+                */
+               MGMTD_DS_DBG("Emptying Candidate Scratch buffer!");
+               nb_config_diff_del_changes(&src->root.cfg_root->cfg_chgs);
+       }
+
+       if (dst->ds_id == MGMTD_DS_RUNNING) {
+               if (ly_out_new_filepath(MGMTD_STARTUP_DS_FILE_PATH, &out)
+                   == LY_SUCCESS)
+                       mgmt_ds_dump_in_memory(dst, "", LYD_JSON, out);
+               ly_out_free(out, NULL, 0);
+       }
+
+       return 0;
+}
+
+static int mgmt_ds_load_cfg_from_file(const char *filepath,
+                                     struct lyd_node **dnode)
+{
+       LY_ERR ret;
+
+       *dnode = NULL;
+       ret = lyd_parse_data_path(ly_native_ctx, filepath, LYD_JSON,
+                                 LYD_PARSE_STRICT, 0, dnode);
+
+       if (ret != LY_SUCCESS) {
+               if (*dnode)
+                       yang_dnode_free(*dnode);
+               return -1;
+       }
+
+       return 0;
+}
+
+void mgmt_ds_reset_candidate(void)
+{
+       struct lyd_node *dnode = mm->candidate_ds->root.cfg_root->dnode;
+       if (dnode)
+               yang_dnode_free(dnode);
+
+       dnode = yang_dnode_new(ly_native_ctx, true);
+       mm->candidate_ds->root.cfg_root->dnode = dnode;
+}
+
+
+int mgmt_ds_init(struct mgmt_master *mm)
+{
+       struct lyd_node *root;
+
+       if (mgmt_ds_mm || mm->running_ds || mm->candidate_ds || mm->oper_ds)
+               assert(!"MGMTD: Call ds_init only once!");
+
+       /* Use Running DS from NB module??? */
+       if (!running_config)
+               assert(!"MGMTD: Call ds_init after frr_init only!");
+
+       if (mgmt_ds_load_cfg_from_file(MGMTD_STARTUP_DS_FILE_PATH, &root)
+           == 0) {
+               nb_config_free(running_config);
+               running_config = nb_config_new(root);
+       }
+
+       running.root.cfg_root = running_config;
+       running.config_ds = true;
+       running.ds_id = MGMTD_DS_RUNNING;
+
+       candidate.root.cfg_root = nb_config_dup(running.root.cfg_root);
+       candidate.config_ds = true;
+       candidate.ds_id = MGMTD_DS_CANDIDATE;
+
+       /*
+        * Redirect lib/vty candidate-config datastore to the global candidate
+        * config Ds on the MGMTD process.
+        */
+       vty_mgmt_candidate_config = candidate.root.cfg_root;
+
+       oper.root.dnode_root = yang_dnode_new(ly_native_ctx, true);
+       oper.config_ds = false;
+       oper.ds_id = MGMTD_DS_OPERATIONAL;
+
+       mm->running_ds = &running;
+       mm->candidate_ds = &candidate;
+       mm->oper_ds = &oper;
+       mgmt_ds_mm = mm;
+
+       return 0;
+}
+
+void mgmt_ds_destroy(void)
+{
+       /*
+        * TODO: Free the datastores.
+        */
+}
+
+struct mgmt_ds_ctx *mgmt_ds_get_ctx_by_id(struct mgmt_master *mm,
+                                         Mgmtd__DatastoreId ds_id)
+{
+       switch (ds_id) {
+       case MGMTD_DS_CANDIDATE:
+               return (mm->candidate_ds);
+       case MGMTD_DS_RUNNING:
+               return (mm->running_ds);
+       case MGMTD_DS_OPERATIONAL:
+               return (mm->oper_ds);
+       case MGMTD_DS_NONE:
+       case MGMTD__DATASTORE_ID__STARTUP_DS:
+       case _MGMTD__DATASTORE_ID_IS_INT_SIZE:
+               return 0;
+       }
+
+       return 0;
+}
+
+bool mgmt_ds_is_config(struct mgmt_ds_ctx *ds_ctx)
+{
+       if (!ds_ctx)
+               return false;
+
+       return ds_ctx->config_ds;
+}
+
+int mgmt_ds_read_lock(struct mgmt_ds_ctx *ds_ctx)
+{
+       if (!ds_ctx)
+               return EINVAL;
+       if (ds_ctx->lock < 0)
+               return EBUSY;
+       ++ds_ctx->lock;
+       return 0;
+}
+
+int mgmt_ds_write_lock(struct mgmt_ds_ctx *ds_ctx)
+{
+       if (!ds_ctx)
+               return EINVAL;
+       if (ds_ctx->lock != 0)
+               return EBUSY;
+       ds_ctx->lock = -1;
+       return 0;
+}
+
+int mgmt_ds_unlock(struct mgmt_ds_ctx *ds_ctx)
+{
+       if (!ds_ctx)
+               return EINVAL;
+       if (ds_ctx->lock > 0)
+               --ds_ctx->lock;
+       else if (ds_ctx->lock < 0) {
+               assert(ds_ctx->lock == -1);
+               ds_ctx->lock = 0;
+       } else {
+               assert(ds_ctx->lock != 0);
+               return EINVAL;
+       }
+       return 0;
+}
+
+int mgmt_ds_copy_dss(struct mgmt_ds_ctx *src_ds_ctx,
+                    struct mgmt_ds_ctx *dst_ds_ctx, bool updt_cmt_rec)
+{
+       if (mgmt_ds_replace_dst_with_src_ds(src_ds_ctx, dst_ds_ctx) != 0)
+               return -1;
+
+       if (updt_cmt_rec && dst_ds_ctx->ds_id == MGMTD_DS_RUNNING)
+               mgmt_history_new_record(dst_ds_ctx);
+
+       return 0;
+}
+
+int mgmt_ds_dump_ds_to_file(char *file_name, struct mgmt_ds_ctx *ds_ctx)
+{
+       struct ly_out *out;
+       int ret = 0;
+
+       if (ly_out_new_filepath(file_name, &out) == LY_SUCCESS) {
+               ret = mgmt_ds_dump_in_memory(ds_ctx, "", LYD_JSON, out);
+               ly_out_free(out, NULL, 0);
+       }
+
+       return ret;
+}
+
+struct nb_config *mgmt_ds_get_nb_config(struct mgmt_ds_ctx *ds_ctx)
+{
+       if (!ds_ctx)
+               return NULL;
+
+       return ds_ctx->config_ds ? ds_ctx->root.cfg_root : NULL;
+}
+
+static int mgmt_walk_ds_nodes(
+       struct mgmt_ds_ctx *ds_ctx, char *base_xpath,
+       struct lyd_node *base_dnode,
+       void (*mgmt_ds_node_iter_fn)(struct mgmt_ds_ctx *ds_ctx, char *xpath,
+                                    struct lyd_node *node,
+                                    struct nb_node *nb_node, void *ctx),
+       void *ctx, char *xpaths[], int *num_nodes, bool childs_as_well,
+       bool alloc_xp_copy)
+{
+       uint32_t indx;
+       char *xpath, *xpath_buf, *iter_xp;
+       int ret, num_left = 0, num_found = 0;
+       struct lyd_node *dnode;
+       struct nb_node *nbnode;
+       bool alloc_xp = false;
+
+       if (xpaths)
+               assert(num_nodes);
+
+       if (num_nodes && !*num_nodes)
+               return 0;
+
+       if (num_nodes) {
+               num_left = *num_nodes;
+               MGMTD_DS_DBG(" -- START: num_left:%d", num_left);
+               *num_nodes = 0;
+       }
+
+       MGMTD_DS_DBG(" -- START: Base: %s", base_xpath);
+
+       if (!base_dnode)
+               base_dnode = yang_dnode_get(
+                       ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
+                                          : ds_ctx->root.dnode_root,
+                       base_xpath);
+       if (!base_dnode)
+               return -1;
+
+       if (mgmt_ds_node_iter_fn) {
+               /*
+                * In case the caller is interested in getting a copy
+                * of the xpath for themselves (by setting
+                * 'alloc_xp_copy' to 'true') we make a copy for the
+                * caller and pass it. Else we pass the original xpath
+                * buffer.
+                *
+                * NOTE: In such case caller will have to take care of
+                * the copy later.
+                */
+               iter_xp = alloc_xp_copy ? strdup(base_xpath) : base_xpath;
+
+               nbnode = (struct nb_node *)base_dnode->schema->priv;
+               (*mgmt_ds_node_iter_fn)(ds_ctx, iter_xp, base_dnode, nbnode,
+                                       ctx);
+       }
+
+       if (num_nodes) {
+               (*num_nodes)++;
+               num_left--;
+       }
+
+       /*
+        * If the base_xpath points to a leaf node, or we don't need to
+        * visit any children we can skip the tree walk.
+        */
+       if (!childs_as_well || base_dnode->schema->nodetype & LYD_NODE_TERM)
+               return 0;
+
+       indx = 0;
+       LY_LIST_FOR (lyd_child(base_dnode), dnode) {
+               assert(dnode->schema && dnode->schema->priv);
+
+               xpath = NULL;
+               if (xpaths) {
+                       if (!xpaths[*num_nodes]) {
+                               alloc_xp = true;
+                               xpaths[*num_nodes] =
+                                       (char *)calloc(1, MGMTD_MAX_XPATH_LEN);
+                       }
+                       xpath = lyd_path(dnode, LYD_PATH_STD,
+                                        xpaths[*num_nodes],
+                                        MGMTD_MAX_XPATH_LEN);
+               } else {
+                       alloc_xp = true;
+                       xpath_buf = (char *)calloc(1, MGMTD_MAX_XPATH_LEN);
+                       (void) lyd_path(dnode, LYD_PATH_STD, xpath_buf,
+                                        MGMTD_MAX_XPATH_LEN);
+                       xpath = xpath_buf;
+               }
+
+               assert(xpath);
+               MGMTD_DS_DBG(" -- XPATH: %s", xpath);
+
+               if (num_nodes)
+                       num_found = num_left;
+
+               ret = mgmt_walk_ds_nodes(ds_ctx, xpath, dnode,
+                                        mgmt_ds_node_iter_fn, ctx,
+                                        xpaths ? &xpaths[*num_nodes] : NULL,
+                                        num_nodes ? &num_found : NULL,
+                                        childs_as_well, alloc_xp_copy);
+
+               if (num_nodes) {
+                       num_left -= num_found;
+                       (*num_nodes) += num_found;
+               }
+
+               if (alloc_xp)
+                       free(xpath);
+
+               if (ret != 0)
+                       break;
+
+               indx++;
+       }
+
+
+       if (num_nodes) {
+               MGMTD_DS_DBG(" -- END: *num_nodes:%d, num_left:%d", *num_nodes,
+                            num_left);
+       }
+
+       return 0;
+}
+
+int mgmt_ds_lookup_data_nodes(struct mgmt_ds_ctx *ds_ctx, const char *xpath,
+                             char *dxpaths[], int *num_nodes,
+                             bool get_childs_as_well, bool alloc_xp_copy)
+{
+       char base_xpath[MGMTD_MAX_XPATH_LEN];
+
+       if (!ds_ctx || !num_nodes)
+               return -1;
+
+       if (xpath[0] == '.' && xpath[1] == '/')
+               xpath += 2;
+
+       strlcpy(base_xpath, xpath, sizeof(base_xpath));
+       mgmt_remove_trailing_separator(base_xpath, '/');
+
+       return (mgmt_walk_ds_nodes(ds_ctx, base_xpath, NULL, NULL, NULL,
+                                  dxpaths, num_nodes, get_childs_as_well,
+                                  alloc_xp_copy));
+}
+
+struct lyd_node *mgmt_ds_find_data_node_by_xpath(struct mgmt_ds_ctx *ds_ctx,
+                                                const char *xpath)
+{
+       if (!ds_ctx)
+               return NULL;
+
+       return yang_dnode_get(ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
+                                                : ds_ctx->root.dnode_root,
+                             xpath);
+}
+
+int mgmt_ds_delete_data_nodes(struct mgmt_ds_ctx *ds_ctx, const char *xpath)
+{
+       struct nb_node *nb_node;
+       struct lyd_node *dnode, *dep_dnode;
+       char dep_xpath[XPATH_MAXLEN];
+
+       if (!ds_ctx)
+               return -1;
+
+       nb_node = nb_node_find(xpath);
+
+       dnode = yang_dnode_get(ds_ctx->config_ds
+                                      ? ds_ctx->root.cfg_root->dnode
+                                      : ds_ctx->root.dnode_root,
+                              xpath);
+
+       if (!dnode)
+               /*
+                * Return a special error code so the caller can choose
+                * whether to ignore it or not.
+                */
+               return NB_ERR_NOT_FOUND;
+       /* destroy dependant */
+       if (nb_node->dep_cbs.get_dependant_xpath) {
+               nb_node->dep_cbs.get_dependant_xpath(dnode, dep_xpath);
+
+               dep_dnode = yang_dnode_get(
+                       ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
+                                          : ds_ctx->root.dnode_root,
+                       dep_xpath);
+               if (dep_dnode)
+                       lyd_free_tree(dep_dnode);
+       }
+       lyd_free_tree(dnode);
+
+       return 0;
+}
+
+int mgmt_ds_load_config_from_file(struct mgmt_ds_ctx *dst,
+                                 const char *file_path, bool merge)
+{
+       struct lyd_node *iter;
+       struct mgmt_ds_ctx parsed;
+
+       if (!dst)
+               return -1;
+
+       if (mgmt_ds_load_cfg_from_file(file_path, &iter) != 0) {
+               MGMTD_DS_ERR("Failed to load config from the file %s",
+                            file_path);
+               return -1;
+       }
+
+       parsed.root.cfg_root = nb_config_new(iter);
+       parsed.config_ds = true;
+       parsed.ds_id = dst->ds_id;
+
+       if (merge)
+               mgmt_ds_merge_src_with_dst_ds(&parsed, dst);
+       else
+               mgmt_ds_replace_dst_with_src_ds(&parsed, dst);
+
+       nb_config_free(parsed.root.cfg_root);
+
+       return 0;
+}
+
+int mgmt_ds_iter_data(struct mgmt_ds_ctx *ds_ctx, char *base_xpath,
+                     void (*mgmt_ds_node_iter_fn)(struct mgmt_ds_ctx *ds_ctx,
+                                                  char *xpath,
+                                                  struct lyd_node *node,
+                                                  struct nb_node *nb_node,
+                                                  void *ctx),
+                     void *ctx, bool alloc_xp_copy)
+{
+       int ret;
+       char xpath[MGMTD_MAX_XPATH_LEN];
+       struct lyd_node *base_dnode = NULL;
+       struct lyd_node *node;
+
+       if (!ds_ctx)
+               return -1;
+
+       mgmt_remove_trailing_separator(base_xpath, '/');
+
+       strlcpy(xpath, base_xpath, sizeof(xpath));
+
+       MGMTD_DS_DBG(" -- START DS walk for DSid: %d", ds_ctx->ds_id);
+
+       /* If the base_xpath is empty then crawl the sibblings */
+       if (xpath[0] == '\0') {
+               base_dnode = ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
+                                               : ds_ctx->root.dnode_root;
+
+               /* get first top-level sibling */
+               while (base_dnode->parent)
+                       base_dnode = lyd_parent(base_dnode);
+
+               while (base_dnode->prev->next)
+                       base_dnode = base_dnode->prev;
+
+               LY_LIST_FOR (base_dnode, node) {
+                       ret = mgmt_walk_ds_nodes(
+                               ds_ctx, xpath, node, mgmt_ds_node_iter_fn,
+                               ctx, NULL, NULL, true, alloc_xp_copy);
+               }
+       } else
+               ret = mgmt_walk_ds_nodes(ds_ctx, xpath, base_dnode,
+                                        mgmt_ds_node_iter_fn, ctx, NULL, NULL,
+                                        true, alloc_xp_copy);
+
+       return ret;
+}
+
+void mgmt_ds_dump_tree(struct vty *vty, struct mgmt_ds_ctx *ds_ctx,
+                      const char *xpath, FILE *f, LYD_FORMAT format)
+{
+       struct ly_out *out;
+       char *str;
+       char base_xpath[MGMTD_MAX_XPATH_LEN] = {0};
+
+       if (!ds_ctx) {
+               vty_out(vty, "    >>>>> Datastore Not Initialized!\n");
+               return;
+       }
+
+       if (xpath) {
+               strlcpy(base_xpath, xpath, MGMTD_MAX_XPATH_LEN);
+               mgmt_remove_trailing_separator(base_xpath, '/');
+       }
+
+       if (f)
+               ly_out_new_file(f, &out);
+       else
+               ly_out_new_memory(&str, 0, &out);
+
+       mgmt_ds_dump_in_memory(ds_ctx, base_xpath, format, out);
+
+       if (!f)
+               vty_out(vty, "%s\n", str);
+
+       ly_out_free(out, NULL, 0);
+}
+
+void mgmt_ds_status_write_one(struct vty *vty, struct mgmt_ds_ctx *ds_ctx)
+{
+       if (!ds_ctx) {
+               vty_out(vty, "    >>>>> Datastore Not Initialized!\n");
+               return;
+       }
+
+       vty_out(vty, "  DS: %s\n", mgmt_ds_id2name(ds_ctx->ds_id));
+       vty_out(vty, "    DS-Hndl: \t\t\t%p\n", ds_ctx);
+       vty_out(vty, "    Config: \t\t\t%s\n",
+               ds_ctx->config_ds ? "True" : "False");
+}
+
+void mgmt_ds_status_write(struct vty *vty)
+{
+       vty_out(vty, "MGMTD Datastores\n");
+
+       mgmt_ds_status_write_one(vty, mgmt_ds_mm->running_ds);
+
+       mgmt_ds_status_write_one(vty, mgmt_ds_mm->candidate_ds);
+
+       mgmt_ds_status_write_one(vty, mgmt_ds_mm->oper_ds);
+}
diff --git a/mgmtd/mgmt_ds.h b/mgmtd/mgmt_ds.h
new file mode 100644 (file)
index 0000000..89a2ea9
--- /dev/null
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Datastores
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_DS_H_
+#define _FRR_MGMTD_DS_H_
+
+#include "mgmt_fe_client.h"
+#include "northbound.h"
+
+#include "mgmtd/mgmt_defines.h"
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+
+#define MGMTD_MAX_NUM_DSNODES_PER_BATCH 128
+
+#define MGMTD_DS_NAME_MAX_LEN 32
+#define MGMTD_DS_NAME_NONE "none"
+#define MGMTD_DS_NAME_RUNNING "running"
+#define MGMTD_DS_NAME_CANDIDATE "candidate"
+#define MGMTD_DS_NAME_OPERATIONAL "operational"
+
+#define MGMTD_STARTUP_DS_FILE_PATH DAEMON_DB_DIR "/frr_startup.json"
+
+#define FOREACH_MGMTD_DS_ID(id)                                                \
+       for ((id) = MGMTD_DS_NONE; (id) < MGMTD_DS_MAX_ID; (id)++)
+
+#define MGMTD_MAX_COMMIT_LIST 10
+#define MGMTD_MD5_HASH_LEN 16
+#define MGMTD_MD5_HASH_STR_HEX_LEN 33
+
+#define MGMTD_COMMIT_FILE_PATH DAEMON_DB_DIR "/commit-%s.json"
+#define MGMTD_COMMIT_INDEX_FILE_NAME DAEMON_DB_DIR "/commit-index.dat"
+#define MGMTD_COMMIT_TIME_STR_LEN 100
+
+extern struct nb_config *running_config;
+
+struct mgmt_ds_ctx;
+
+/***************************************************************
+ * Global data exported
+ ***************************************************************/
+
+extern const char *mgmt_ds_names[MGMTD_DS_MAX_ID + 1];
+
+/*
+ * Convert datastore ID to datastore name.
+ *
+ * id
+ *    Datastore ID.
+ *
+ * Returns:
+ *    Datastore name.
+ */
+static inline const char *mgmt_ds_id2name(Mgmtd__DatastoreId id)
+{
+       if (id > MGMTD_DS_MAX_ID)
+               id = MGMTD_DS_MAX_ID;
+       return mgmt_ds_names[id];
+}
+
+/*
+ * Convert datastore name to datastore ID.
+ *
+ * id
+ *    Datastore name.
+ *
+ * Returns:
+ *    Datastore ID.
+ */
+static inline Mgmtd__DatastoreId mgmt_ds_name2id(const char *name)
+{
+       Mgmtd__DatastoreId id;
+
+       FOREACH_MGMTD_DS_ID (id) {
+               if (!strncmp(mgmt_ds_names[id], name, MGMTD_DS_NAME_MAX_LEN))
+                       return id;
+       }
+
+       return MGMTD_DS_NONE;
+}
+
+/*
+ * Convert datastore ID to datastore name.
+ *
+ * similar to above funtion.
+ */
+static inline Mgmtd__DatastoreId mgmt_get_ds_id_by_name(const char *ds_name)
+{
+       if (!strncmp(ds_name, "candidate", sizeof("candidate")))
+               return MGMTD_DS_CANDIDATE;
+       else if (!strncmp(ds_name, "running", sizeof("running")))
+               return MGMTD_DS_RUNNING;
+       else if (!strncmp(ds_name, "operational", sizeof("operational")))
+               return MGMTD_DS_OPERATIONAL;
+       return MGMTD_DS_NONE;
+}
+
+/*
+ * Appends trail wildcard '/' '*' to a given xpath.
+ *
+ * xpath
+ *     YANG xpath.
+ *
+ * path_len
+ *     xpath length.
+ */
+static inline void mgmt_xpath_append_trail_wildcard(char *xpath,
+                                                   size_t *xpath_len)
+{
+       if (!xpath || !xpath_len)
+               return;
+
+       if (!*xpath_len)
+               *xpath_len = strlen(xpath);
+
+       if (*xpath_len > 2 && *xpath_len < MGMTD_MAX_XPATH_LEN - 2) {
+               if (xpath[*xpath_len - 1] == '/') {
+                       xpath[*xpath_len] = '*';
+                       xpath[*xpath_len + 1] = 0;
+                       (*xpath_len)++;
+               } else if (xpath[*xpath_len - 1] != '*') {
+                       xpath[*xpath_len] = '/';
+                       xpath[*xpath_len + 1] = '*';
+                       xpath[*xpath_len + 2] = 0;
+                       (*xpath_len) += 2;
+               }
+       }
+}
+
+/*
+ * Removes trail wildcard '/' '*' from a given xpath.
+ *
+ * xpath
+ *     YANG xpath.
+ *
+ * path_len
+ *     xpath length.
+ */
+static inline void mgmt_xpath_remove_trail_wildcard(char *xpath,
+                                                   size_t *xpath_len)
+{
+       if (!xpath || !xpath_len)
+               return;
+
+       if (!*xpath_len)
+               *xpath_len = strlen(xpath);
+
+       if (*xpath_len > 2 && xpath[*xpath_len - 2] == '/'
+           && xpath[*xpath_len - 1] == '*') {
+               xpath[*xpath_len - 2] = 0;
+               (*xpath_len) -= 2;
+       }
+}
+
+/* Initialise datastore */
+extern int mgmt_ds_init(struct mgmt_master *cm);
+
+/* Destroy datastore */
+extern void mgmt_ds_destroy(void);
+
+/*
+ * Get datastore handler by ID
+ *
+ * mm
+ *    Management master structure.
+ *
+ * ds_id
+ *    Datastore ID.
+ *
+ * Returns:
+ *    Datastore context (Holds info about ID, lock, root node etc).
+ */
+extern struct mgmt_ds_ctx *mgmt_ds_get_ctx_by_id(struct mgmt_master *mm,
+                                                  Mgmtd__DatastoreId ds_id);
+
+/*
+ * Check if a given datastore is config ds
+ */
+extern bool mgmt_ds_is_config(struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Acquire read lock to a ds given a ds_handle
+ */
+extern int mgmt_ds_read_lock(struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Acquire write lock to a ds given a ds_handle
+ */
+extern int mgmt_ds_write_lock(struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Remove a lock from ds given a ds_handle
+ */
+extern int mgmt_ds_unlock(struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Copy from source to destination datastore.
+ *
+ * src_ds
+ *    Source datastore handle (ds to be copied from).
+ *
+ * dst_ds
+ *    Destination datastore handle (ds to be copied to).
+ *
+ * update_cmd_rec
+ *    TRUE if need to update commit record, FALSE otherwise.
+ *
+ * Returns:
+ *    0 on success, -1 on failure.
+ */
+extern int mgmt_ds_copy_dss(struct mgmt_ds_ctx *src_ds_ctx,
+                           struct mgmt_ds_ctx *dst_ds_ctx,
+                           bool update_cmt_rec);
+
+/*
+ * Fetch northbound configuration for a given datastore context.
+ */
+extern struct nb_config *mgmt_ds_get_nb_config(struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Lookup YANG data nodes.
+ *
+ * ds_ctx
+ *    Datastore context.
+ *
+ * xpath
+ *    YANG base xpath.
+ *
+ * dxpaths
+ *    Out param - array of YANG data xpaths.
+ *
+ * num_nodes
+ *    In-out param - number of YANG data xpaths.
+ *    Note - Caller should init this to the size of the array
+ *    provided in dxpaths.
+ *    On return this will have the actual number of xpaths
+ *    being returned.
+ *
+ * get_childs_as_well
+ *    TRUE if child nodes needs to be fetched as well, FALSE otherwise.
+ *
+ * alloc_xp_copy
+ *    TRUE if the caller is interested in getting a copy of the xpath.
+ *
+ * Returns:
+ *    0 on success, -1 on failure.
+ */
+extern int mgmt_ds_lookup_data_nodes(struct mgmt_ds_ctx *ds_ctx,
+                                    const char *xpath, char *dxpaths[],
+                                    int *num_nodes, bool get_childs_as_well,
+                                    bool alloc_xp_copy);
+
+/*
+ * Find YANG data node given a datastore handle YANG xpath.
+ */
+extern struct lyd_node *
+mgmt_ds_find_data_node_by_xpath(struct mgmt_ds_ctx *ds_ctx,
+                               const char *xpath);
+
+/*
+ * Delete YANG data node given a datastore handle and YANG xpath.
+ */
+extern int mgmt_ds_delete_data_nodes(struct mgmt_ds_ctx *ds_ctx,
+                                    const char *xpath);
+
+/*
+ * Iterate over datastore data.
+ *
+ * ds_ctx
+ *    Datastore context.
+ *
+ * base_xpath
+ *    Base YANG xpath from where needs to be iterated.
+ *
+ * iter_fn
+ *    function that will be called during each iteration.
+ *
+ * ctx
+ *    User defined opaque value normally used to pass
+ *    reference to some user private context that will
+ *    be passed to the iterator function provided in
+ *    'iter_fn'.
+ *
+ * alloc_xp_copy
+ *    TRUE if the caller is interested in getting a copy of the xpath.
+ *
+ * Returns:
+ *    0 on success, -1 on failure.
+ */
+extern int mgmt_ds_iter_data(
+       struct mgmt_ds_ctx *ds_ctx, char *base_xpath,
+       void (*mgmt_ds_node_iter_fn)(struct mgmt_ds_ctx *ds_ctx, char *xpath,
+                                    struct lyd_node *node,
+                                    struct nb_node *nb_node, void *ctx),
+       void *ctx, bool alloc_xp_copy);
+
+/*
+ * Load config to datastore from a file.
+ *
+ * ds_ctx
+ *    Datastore context.
+ *
+ * file_path
+ *    File path of the configuration file.
+ *
+ * merge
+ *    TRUE if you want to merge with existing config,
+ *    FALSE if you want to replace with existing config
+ *
+ * Returns:
+ *    0 on success, -1 on failure.
+ */
+extern int mgmt_ds_load_config_from_file(struct mgmt_ds_ctx *ds_ctx,
+                                        const char *file_path, bool merge);
+
+/*
+ * Dump the data tree to a file with JSON/XML format.
+ *
+ * vty
+ *    VTY context.
+ *
+ * ds_ctx
+ *    Datastore context.
+ *
+ * xpath
+ *    Base YANG xpath from where data needs to be dumped.
+ *
+ * f
+ *    File pointer to where data to be dumped.
+ *
+ * format
+ *    JSON/XML
+ */
+extern void mgmt_ds_dump_tree(struct vty *vty, struct mgmt_ds_ctx *ds_ctx,
+                             const char *xpath, FILE *f, LYD_FORMAT format);
+
+/*
+ * Dump the complete data tree to a file with JSON format.
+ *
+ * file_name
+ *    File path to where data to be dumped.
+ *
+ * ds
+ *    Datastore context.
+ *
+ * Returns:
+ *    0 on success, -1 on failure.
+ */
+extern int mgmt_ds_dump_ds_to_file(char *file_name,
+                                  struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Dump information about specific datastore.
+ */
+extern void mgmt_ds_status_write_one(struct vty *vty,
+                                    struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Dump information about all the datastores.
+ */
+extern void mgmt_ds_status_write(struct vty *vty);
+
+
+/*
+ * Reset the candidate DS to empty state
+ */
+void mgmt_ds_reset_candidate(void);
+
+#endif /* _FRR_MGMTD_DS_H_ */
diff --git a/mgmtd/mgmt_fe_adapter.c b/mgmtd/mgmt_fe_adapter.c
new file mode 100644 (file)
index 0000000..6b4f09d
--- /dev/null
@@ -0,0 +1,1908 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Client Connection Adapter
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "sockopt.h"
+#include "network.h"
+#include "libfrr.h"
+#include "mgmt_fe_client.h"
+#include "mgmt_msg.h"
+#include "mgmt_pb.h"
+#include "hash.h"
+#include "jhash.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_FE_ADAPTER_DBG(fmt, ...)                                       \
+       fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_FE_ADAPTER_ERR(fmt, ...)                                       \
+       fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_FE_ADAPTER_DBG(fmt, ...)                                       \
+       do {                                                                 \
+               if (mgmt_debug_fe)                                           \
+                       zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__);     \
+       } while (0)
+#define MGMTD_FE_ADAPTER_ERR(fmt, ...)                                       \
+       zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+#define FOREACH_ADAPTER_IN_LIST(adapter)                                       \
+       frr_each_safe (mgmt_fe_adapters, &mgmt_fe_adapters, (adapter))
+
+enum mgmt_session_event {
+       MGMTD_FE_SESSION_CFG_TXN_CLNUP = 1,
+       MGMTD_FE_SESSION_SHOW_TXN_CLNUP,
+};
+
+struct mgmt_fe_session_ctx {
+       struct mgmt_fe_client_adapter *adapter;
+       uint64_t session_id;
+       uint64_t client_id;
+       uint64_t txn_id;
+       uint64_t cfg_txn_id;
+       uint8_t ds_write_locked[MGMTD_DS_MAX_ID];
+       uint8_t ds_read_locked[MGMTD_DS_MAX_ID];
+       uint8_t ds_locked_implict[MGMTD_DS_MAX_ID];
+       struct thread *proc_cfg_txn_clnp;
+       struct thread *proc_show_txn_clnp;
+
+       struct mgmt_fe_sessions_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_fe_sessions, struct mgmt_fe_session_ctx, list_linkage);
+
+#define FOREACH_SESSION_IN_LIST(adapter, session)                              \
+       frr_each_safe (mgmt_fe_sessions, &(adapter)->fe_sessions, (session))
+
+static struct thread_master *mgmt_fe_adapter_tm;
+static struct mgmt_master *mgmt_fe_adapter_mm;
+
+static struct mgmt_fe_adapters_head mgmt_fe_adapters;
+
+static struct hash *mgmt_fe_sessions;
+static uint64_t mgmt_fe_next_session_id;
+
+/* Forward declarations */
+static void
+mgmt_fe_adapter_register_event(struct mgmt_fe_client_adapter *adapter,
+                                enum mgmt_fe_event event);
+static void
+mgmt_fe_adapter_disconnect(struct mgmt_fe_client_adapter *adapter);
+static void
+mgmt_fe_session_register_event(struct mgmt_fe_session_ctx *session,
+                                  enum mgmt_session_event event);
+
+static int
+mgmt_fe_session_write_lock_ds(Mgmtd__DatastoreId ds_id,
+                                 struct mgmt_ds_ctx *ds_ctx,
+                                 struct mgmt_fe_session_ctx *session)
+{
+       if (!session->ds_write_locked[ds_id]) {
+               if (mgmt_ds_write_lock(ds_ctx) != 0) {
+                       MGMTD_FE_ADAPTER_DBG(
+                               "Failed to lock the DS %u for Sessn: %p from %s!",
+                               ds_id, session, session->adapter->name);
+                       return -1;
+               }
+
+               session->ds_write_locked[ds_id] = true;
+               MGMTD_FE_ADAPTER_DBG(
+                       "Write-Locked the DS %u for Sessn: %p from %s!", ds_id,
+                       session, session->adapter->name);
+       }
+
+       return 0;
+}
+
+static int
+mgmt_fe_session_read_lock_ds(Mgmtd__DatastoreId ds_id,
+                                struct mgmt_ds_ctx *ds_ctx,
+                                struct mgmt_fe_session_ctx *session)
+{
+       if (!session->ds_read_locked[ds_id]) {
+               if (mgmt_ds_read_lock(ds_ctx) != 0) {
+                       MGMTD_FE_ADAPTER_DBG(
+                               "Failed to lock the DS %u for Sessn: %p from %s!",
+                               ds_id, session, session->adapter->name);
+                       return -1;
+               }
+
+               session->ds_read_locked[ds_id] = true;
+               MGMTD_FE_ADAPTER_DBG(
+                       "Read-Locked the DS %u for Sessn: %p from %s!", ds_id,
+                       session, session->adapter->name);
+       }
+
+       return 0;
+}
+
+static int mgmt_fe_session_unlock_ds(Mgmtd__DatastoreId ds_id,
+                                        struct mgmt_ds_ctx *ds_ctx,
+                                        struct mgmt_fe_session_ctx *session,
+                                        bool unlock_write, bool unlock_read)
+{
+       if (unlock_write && session->ds_write_locked[ds_id]) {
+               session->ds_write_locked[ds_id] = false;
+               session->ds_locked_implict[ds_id] = false;
+               if (mgmt_ds_unlock(ds_ctx) != 0) {
+                       MGMTD_FE_ADAPTER_DBG(
+                               "Failed to unlock the DS %u taken earlier by Sessn: %p from %s!",
+                               ds_id, session, session->adapter->name);
+                       return -1;
+               }
+
+               MGMTD_FE_ADAPTER_DBG(
+                       "Unlocked DS %u write-locked earlier by Sessn: %p from %s",
+                       ds_id, session, session->adapter->name);
+       } else if (unlock_read && session->ds_read_locked[ds_id]) {
+               session->ds_read_locked[ds_id] = false;
+               session->ds_locked_implict[ds_id] = false;
+               if (mgmt_ds_unlock(ds_ctx) != 0) {
+                       MGMTD_FE_ADAPTER_DBG(
+                               "Failed to unlock the DS %u taken earlier by Sessn: %p from %s!",
+                               ds_id, session, session->adapter->name);
+                       return -1;
+               }
+
+               MGMTD_FE_ADAPTER_DBG(
+                       "Unlocked DS %u read-locked earlier by Sessn: %p from %s",
+                       ds_id, session, session->adapter->name);
+       }
+
+       return 0;
+}
+
+static void
+mgmt_fe_session_cfg_txn_cleanup(struct mgmt_fe_session_ctx *session)
+{
+       Mgmtd__DatastoreId ds_id;
+       struct mgmt_ds_ctx *ds_ctx;
+
+       /*
+        * Ensure any uncommitted changes in Candidate DS
+        * is discarded.
+        */
+       mgmt_ds_copy_dss(mm->running_ds, mm->candidate_ds, false);
+
+       for (ds_id = 0; ds_id < MGMTD_DS_MAX_ID; ds_id++) {
+               ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, ds_id);
+               if (ds_ctx) {
+                       if (session->ds_locked_implict[ds_id])
+                               mgmt_fe_session_unlock_ds(
+                                       ds_id, ds_ctx, session, true, false);
+               }
+       }
+
+       /*
+        * Destroy the actual transaction created earlier.
+        */
+       if (session->cfg_txn_id != MGMTD_TXN_ID_NONE)
+               mgmt_destroy_txn(&session->cfg_txn_id);
+}
+
+static void
+mgmt_fe_session_show_txn_cleanup(struct mgmt_fe_session_ctx *session)
+{
+       Mgmtd__DatastoreId ds_id;
+       struct mgmt_ds_ctx *ds_ctx;
+
+       for (ds_id = 0; ds_id < MGMTD_DS_MAX_ID; ds_id++) {
+               ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, ds_id);
+               if (ds_ctx) {
+                       mgmt_fe_session_unlock_ds(ds_id, ds_ctx, session,
+                                                     false, true);
+               }
+       }
+
+       /*
+        * Destroy the transaction created recently.
+        */
+       if (session->txn_id != MGMTD_TXN_ID_NONE)
+               mgmt_destroy_txn(&session->txn_id);
+}
+
+static void
+mgmt_fe_adapter_compute_set_cfg_timers(struct mgmt_setcfg_stats *setcfg_stats)
+{
+       setcfg_stats->last_exec_tm = timeval_elapsed(setcfg_stats->last_end,
+                                                    setcfg_stats->last_start);
+       if (setcfg_stats->last_exec_tm > setcfg_stats->max_tm)
+               setcfg_stats->max_tm = setcfg_stats->last_exec_tm;
+
+       if (setcfg_stats->last_exec_tm < setcfg_stats->min_tm)
+               setcfg_stats->min_tm = setcfg_stats->last_exec_tm;
+
+       setcfg_stats->avg_tm =
+               (((setcfg_stats->avg_tm * (setcfg_stats->set_cfg_count - 1))
+                 + setcfg_stats->last_exec_tm)
+                / setcfg_stats->set_cfg_count);
+}
+
+static void
+mgmt_fe_session_compute_commit_timers(struct mgmt_commit_stats *cmt_stats)
+{
+       cmt_stats->last_exec_tm =
+               timeval_elapsed(cmt_stats->last_end, cmt_stats->last_start);
+       if (cmt_stats->last_exec_tm > cmt_stats->max_tm) {
+               cmt_stats->max_tm = cmt_stats->last_exec_tm;
+               cmt_stats->max_batch_cnt = cmt_stats->last_batch_cnt;
+       }
+
+       if (cmt_stats->last_exec_tm < cmt_stats->min_tm) {
+               cmt_stats->min_tm = cmt_stats->last_exec_tm;
+               cmt_stats->min_batch_cnt = cmt_stats->last_batch_cnt;
+       }
+}
+
+static void mgmt_fe_cleanup_session(struct mgmt_fe_session_ctx **session)
+{
+       if ((*session)->adapter) {
+               mgmt_fe_session_cfg_txn_cleanup((*session));
+               mgmt_fe_session_show_txn_cleanup((*session));
+               mgmt_fe_session_unlock_ds(MGMTD_DS_CANDIDATE,
+                                         mgmt_fe_adapter_mm->candidate_ds,
+                                         *session, true, true);
+               mgmt_fe_session_unlock_ds(MGMTD_DS_RUNNING,
+                                         mgmt_fe_adapter_mm->running_ds,
+                                         *session, true, true);
+
+               mgmt_fe_sessions_del(&(*session)->adapter->fe_sessions,
+                                    *session);
+               mgmt_fe_adapter_unlock(&(*session)->adapter);
+       }
+
+       hash_release(mgmt_fe_sessions, *session);
+       XFREE(MTYPE_MGMTD_FE_SESSION, *session);
+       *session = NULL;
+}
+
+static struct mgmt_fe_session_ctx *
+mgmt_fe_find_session_by_client_id(struct mgmt_fe_client_adapter *adapter,
+                                     uint64_t client_id)
+{
+       struct mgmt_fe_session_ctx *session;
+
+       FOREACH_SESSION_IN_LIST (adapter, session) {
+               if (session->client_id == client_id)
+                       return session;
+       }
+
+       return NULL;
+}
+
+static unsigned int mgmt_fe_session_hash_key(const void *data)
+{
+       const struct mgmt_fe_session_ctx *session = data;
+
+       return jhash2((uint32_t *) &session->session_id,
+                     sizeof(session->session_id) / sizeof(uint32_t), 0);
+}
+
+static bool mgmt_fe_session_hash_cmp(const void *d1, const void *d2)
+{
+       const struct mgmt_fe_session_ctx *session1 = d1;
+       const struct mgmt_fe_session_ctx *session2 = d2;
+
+       return (session1->session_id == session2->session_id);
+}
+
+static void mgmt_fe_session_hash_free(void *data)
+{
+       struct mgmt_fe_session_ctx *session = data;
+
+       mgmt_fe_cleanup_session(&session);
+}
+
+static void mgmt_fe_session_hash_destroy(void)
+{
+       if (mgmt_fe_sessions == NULL)
+               return;
+
+       hash_clean(mgmt_fe_sessions,
+                  mgmt_fe_session_hash_free);
+       hash_free(mgmt_fe_sessions);
+       mgmt_fe_sessions = NULL;
+}
+
+static inline struct mgmt_fe_session_ctx *
+mgmt_session_id2ctx(uint64_t session_id)
+{
+       struct mgmt_fe_session_ctx key = {0};
+       struct mgmt_fe_session_ctx *session;
+
+       if (!mgmt_fe_sessions)
+               return NULL;
+
+       key.session_id = session_id;
+       session = hash_lookup(mgmt_fe_sessions, &key);
+
+       return session;
+}
+
+static struct mgmt_fe_session_ctx *
+mgmt_fe_create_session(struct mgmt_fe_client_adapter *adapter,
+                          uint64_t client_id)
+{
+       struct mgmt_fe_session_ctx *session;
+
+       session = mgmt_fe_find_session_by_client_id(adapter, client_id);
+       if (session)
+               mgmt_fe_cleanup_session(&session);
+
+       session = XCALLOC(MTYPE_MGMTD_FE_SESSION,
+                       sizeof(struct mgmt_fe_session_ctx));
+       assert(session);
+       session->client_id = client_id;
+       session->adapter = adapter;
+       session->txn_id = MGMTD_TXN_ID_NONE;
+       session->cfg_txn_id = MGMTD_TXN_ID_NONE;
+       mgmt_fe_adapter_lock(adapter);
+       mgmt_fe_sessions_add_tail(&adapter->fe_sessions, session);
+       if (!mgmt_fe_next_session_id)
+               mgmt_fe_next_session_id++;
+       session->session_id = mgmt_fe_next_session_id++;
+       hash_get(mgmt_fe_sessions, session, hash_alloc_intern);
+
+       return session;
+}
+
+static void
+mgmt_fe_cleanup_sessions(struct mgmt_fe_client_adapter *adapter)
+{
+       struct mgmt_fe_session_ctx *session;
+
+       FOREACH_SESSION_IN_LIST (adapter, session)
+               mgmt_fe_cleanup_session(&session);
+}
+
+static inline void
+mgmt_fe_adapter_sched_msg_write(struct mgmt_fe_client_adapter *adapter)
+{
+       if (!CHECK_FLAG(adapter->flags, MGMTD_FE_ADAPTER_FLAGS_WRITES_OFF))
+               mgmt_fe_adapter_register_event(adapter,
+                                                MGMTD_FE_CONN_WRITE);
+}
+
+static inline void
+mgmt_fe_adapter_writes_on(struct mgmt_fe_client_adapter *adapter)
+{
+       MGMTD_FE_ADAPTER_DBG("Resume writing msgs for '%s'", adapter->name);
+       UNSET_FLAG(adapter->flags, MGMTD_FE_ADAPTER_FLAGS_WRITES_OFF);
+       mgmt_fe_adapter_sched_msg_write(adapter);
+}
+
+static inline void
+mgmt_fe_adapter_writes_off(struct mgmt_fe_client_adapter *adapter)
+{
+       SET_FLAG(adapter->flags, MGMTD_FE_ADAPTER_FLAGS_WRITES_OFF);
+       MGMTD_FE_ADAPTER_DBG("Paused writing msgs for '%s'", adapter->name);
+}
+
+static int
+mgmt_fe_adapter_send_msg(struct mgmt_fe_client_adapter *adapter,
+                            Mgmtd__FeMessage *fe_msg)
+{
+       if (adapter->conn_fd == -1) {
+               MGMTD_FE_ADAPTER_DBG("can't send message on closed connection");
+               return -1;
+       }
+
+       int rv = mgmt_msg_send_msg(
+               &adapter->mstate, fe_msg,
+               mgmtd__fe_message__get_packed_size(fe_msg),
+               (size_t(*)(void *, void *))mgmtd__fe_message__pack,
+               mgmt_debug_fe);
+       mgmt_fe_adapter_sched_msg_write(adapter);
+       return rv;
+}
+
+static int
+mgmt_fe_send_session_reply(struct mgmt_fe_client_adapter *adapter,
+                              struct mgmt_fe_session_ctx *session,
+                              bool create, bool success)
+{
+       Mgmtd__FeMessage fe_msg;
+       Mgmtd__FeSessionReply session_reply;
+
+       mgmtd__fe_session_reply__init(&session_reply);
+       session_reply.create = create;
+       if (create) {
+               session_reply.has_client_conn_id = 1;
+               session_reply.client_conn_id = session->client_id;
+       }
+       session_reply.session_id = session->session_id;
+       session_reply.success = success;
+
+       mgmtd__fe_message__init(&fe_msg);
+       fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_SESSION_REPLY;
+       fe_msg.session_reply = &session_reply;
+
+       MGMTD_FE_ADAPTER_DBG(
+               "Sending SESSION_REPLY message to MGMTD Frontend client '%s'",
+               adapter->name);
+
+       return mgmt_fe_adapter_send_msg(adapter, &fe_msg);
+}
+
+static int mgmt_fe_send_lockds_reply(struct mgmt_fe_session_ctx *session,
+                                        Mgmtd__DatastoreId ds_id,
+                                        uint64_t req_id, bool lock_ds,
+                                        bool success, const char *error_if_any)
+{
+       Mgmtd__FeMessage fe_msg;
+       Mgmtd__FeLockDsReply lockds_reply;
+
+       assert(session->adapter);
+
+       mgmtd__fe_lock_ds_reply__init(&lockds_reply);
+       lockds_reply.session_id = session->session_id;
+       lockds_reply.ds_id = ds_id;
+       lockds_reply.req_id = req_id;
+       lockds_reply.lock = lock_ds;
+       lockds_reply.success = success;
+       if (error_if_any)
+               lockds_reply.error_if_any = (char *)error_if_any;
+
+       mgmtd__fe_message__init(&fe_msg);
+       fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REPLY;
+       fe_msg.lockds_reply = &lockds_reply;
+
+       MGMTD_FE_ADAPTER_DBG(
+               "Sending LOCK_DS_REPLY message to MGMTD Frontend client '%s'",
+               session->adapter->name);
+
+       return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+}
+
+static int mgmt_fe_send_setcfg_reply(struct mgmt_fe_session_ctx *session,
+                                        Mgmtd__DatastoreId ds_id,
+                                        uint64_t req_id, bool success,
+                                        const char *error_if_any,
+                                        bool implicit_commit)
+{
+       Mgmtd__FeMessage fe_msg;
+       Mgmtd__FeSetConfigReply setcfg_reply;
+
+       assert(session->adapter);
+
+       if (implicit_commit && session->cfg_txn_id)
+               mgmt_fe_session_register_event(
+                       session, MGMTD_FE_SESSION_CFG_TXN_CLNUP);
+
+       mgmtd__fe_set_config_reply__init(&setcfg_reply);
+       setcfg_reply.session_id = session->session_id;
+       setcfg_reply.ds_id = ds_id;
+       setcfg_reply.req_id = req_id;
+       setcfg_reply.success = success;
+       if (error_if_any)
+               setcfg_reply.error_if_any = (char *)error_if_any;
+
+       mgmtd__fe_message__init(&fe_msg);
+       fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REPLY;
+       fe_msg.setcfg_reply = &setcfg_reply;
+
+       MGMTD_FE_ADAPTER_DBG(
+               "Sending SET_CONFIG_REPLY message to MGMTD Frontend client '%s'",
+               session->adapter->name);
+
+       if (implicit_commit) {
+               if (mm->perf_stats_en)
+                       gettimeofday(&session->adapter->cmt_stats.last_end, NULL);
+               mgmt_fe_session_compute_commit_timers(
+                       &session->adapter->cmt_stats);
+       }
+
+       if (mm->perf_stats_en)
+               gettimeofday(&session->adapter->setcfg_stats.last_end, NULL);
+       mgmt_fe_adapter_compute_set_cfg_timers(&session->adapter->setcfg_stats);
+
+       return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+}
+
+static int mgmt_fe_send_commitcfg_reply(
+       struct mgmt_fe_session_ctx *session, Mgmtd__DatastoreId src_ds_id,
+       Mgmtd__DatastoreId dst_ds_id, uint64_t req_id, enum mgmt_result result,
+       bool validate_only, const char *error_if_any)
+{
+       Mgmtd__FeMessage fe_msg;
+       Mgmtd__FeCommitConfigReply commcfg_reply;
+
+       assert(session->adapter);
+
+       mgmtd__fe_commit_config_reply__init(&commcfg_reply);
+       commcfg_reply.session_id = session->session_id;
+       commcfg_reply.src_ds_id = src_ds_id;
+       commcfg_reply.dst_ds_id = dst_ds_id;
+       commcfg_reply.req_id = req_id;
+       commcfg_reply.success =
+               (result == MGMTD_SUCCESS || result == MGMTD_NO_CFG_CHANGES)
+                       ? true
+                       : false;
+       commcfg_reply.validate_only = validate_only;
+       if (error_if_any)
+               commcfg_reply.error_if_any = (char *)error_if_any;
+
+       mgmtd__fe_message__init(&fe_msg);
+       fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REPLY;
+       fe_msg.commcfg_reply = &commcfg_reply;
+
+       MGMTD_FE_ADAPTER_DBG(
+               "Sending COMMIT_CONFIG_REPLY message to MGMTD Frontend client '%s'",
+               session->adapter->name);
+
+       /*
+        * Cleanup the CONFIG transaction associated with this session.
+        */
+       if (session->cfg_txn_id
+           && ((result == MGMTD_SUCCESS && !validate_only)
+               || (result == MGMTD_NO_CFG_CHANGES)))
+               mgmt_fe_session_register_event(
+                       session, MGMTD_FE_SESSION_CFG_TXN_CLNUP);
+
+       if (mm->perf_stats_en)
+               gettimeofday(&session->adapter->cmt_stats.last_end, NULL);
+       mgmt_fe_session_compute_commit_timers(&session->adapter->cmt_stats);
+       return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+}
+
+static int mgmt_fe_send_getcfg_reply(struct mgmt_fe_session_ctx *session,
+                                        Mgmtd__DatastoreId ds_id,
+                                        uint64_t req_id, bool success,
+                                        Mgmtd__YangDataReply *data,
+                                        const char *error_if_any)
+{
+       Mgmtd__FeMessage fe_msg;
+       Mgmtd__FeGetConfigReply getcfg_reply;
+
+       assert(session->adapter);
+
+       mgmtd__fe_get_config_reply__init(&getcfg_reply);
+       getcfg_reply.session_id = session->session_id;
+       getcfg_reply.ds_id = ds_id;
+       getcfg_reply.req_id = req_id;
+       getcfg_reply.success = success;
+       getcfg_reply.data = data;
+       if (error_if_any)
+               getcfg_reply.error_if_any = (char *)error_if_any;
+
+       mgmtd__fe_message__init(&fe_msg);
+       fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REPLY;
+       fe_msg.getcfg_reply = &getcfg_reply;
+
+       MGMTD_FE_ADAPTER_DBG(
+               "Sending GET_CONFIG_REPLY message to MGMTD Frontend client '%s'",
+               session->adapter->name);
+
+       /*
+        * Cleanup the SHOW transaction associated with this session.
+        */
+       if (session->txn_id && (!success || (data && data->next_indx < 0)))
+               mgmt_fe_session_register_event(
+                       session, MGMTD_FE_SESSION_SHOW_TXN_CLNUP);
+
+       return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+}
+
+static int mgmt_fe_send_getdata_reply(struct mgmt_fe_session_ctx *session,
+                                         Mgmtd__DatastoreId ds_id,
+                                         uint64_t req_id, bool success,
+                                         Mgmtd__YangDataReply *data,
+                                         const char *error_if_any)
+{
+       Mgmtd__FeMessage fe_msg;
+       Mgmtd__FeGetDataReply getdata_reply;
+
+       assert(session->adapter);
+
+       mgmtd__fe_get_data_reply__init(&getdata_reply);
+       getdata_reply.session_id = session->session_id;
+       getdata_reply.ds_id = ds_id;
+       getdata_reply.req_id = req_id;
+       getdata_reply.success = success;
+       getdata_reply.data = data;
+       if (error_if_any)
+               getdata_reply.error_if_any = (char *)error_if_any;
+
+       mgmtd__fe_message__init(&fe_msg);
+       fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REPLY;
+       fe_msg.getdata_reply = &getdata_reply;
+
+       MGMTD_FE_ADAPTER_DBG(
+               "Sending GET_DATA_REPLY message to MGMTD Frontend client '%s'",
+               session->adapter->name);
+
+       /*
+        * Cleanup the SHOW transaction associated with this session.
+        */
+       if (session->txn_id && (!success || (data && data->next_indx < 0)))
+               mgmt_fe_session_register_event(
+                       session, MGMTD_FE_SESSION_SHOW_TXN_CLNUP);
+
+       return mgmt_fe_adapter_send_msg(session->adapter, &fe_msg);
+}
+
+static void mgmt_fe_session_cfg_txn_clnup(struct thread *thread)
+{
+       struct mgmt_fe_session_ctx *session;
+
+       session = (struct mgmt_fe_session_ctx *)THREAD_ARG(thread);
+
+       mgmt_fe_session_cfg_txn_cleanup(session);
+}
+
+static void mgmt_fe_session_show_txn_clnup(struct thread *thread)
+{
+       struct mgmt_fe_session_ctx *session;
+
+       session = (struct mgmt_fe_session_ctx *)THREAD_ARG(thread);
+
+       mgmt_fe_session_show_txn_cleanup(session);
+}
+
+static void
+mgmt_fe_session_register_event(struct mgmt_fe_session_ctx *session,
+                                  enum mgmt_session_event event)
+{
+       struct timeval tv = {.tv_sec = 0,
+                            .tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC};
+
+       switch (event) {
+       case MGMTD_FE_SESSION_CFG_TXN_CLNUP:
+               thread_add_timer_tv(mgmt_fe_adapter_tm,
+                                   mgmt_fe_session_cfg_txn_clnup, session,
+                                   &tv, &session->proc_cfg_txn_clnp);
+               assert(session->proc_cfg_txn_clnp);
+               break;
+       case MGMTD_FE_SESSION_SHOW_TXN_CLNUP:
+               thread_add_timer_tv(mgmt_fe_adapter_tm,
+                                   mgmt_fe_session_show_txn_clnup, session,
+                                   &tv, &session->proc_show_txn_clnp);
+               assert(session->proc_show_txn_clnp);
+               break;
+       }
+}
+
+static struct mgmt_fe_client_adapter *
+mgmt_fe_find_adapter_by_fd(int conn_fd)
+{
+       struct mgmt_fe_client_adapter *adapter;
+
+       FOREACH_ADAPTER_IN_LIST (adapter) {
+               if (adapter->conn_fd == conn_fd)
+                       return adapter;
+       }
+
+       return NULL;
+}
+
+static struct mgmt_fe_client_adapter *
+mgmt_fe_find_adapter_by_name(const char *name)
+{
+       struct mgmt_fe_client_adapter *adapter;
+
+       FOREACH_ADAPTER_IN_LIST (adapter) {
+               if (!strncmp(adapter->name, name, sizeof(adapter->name)))
+                       return adapter;
+       }
+
+       return NULL;
+}
+
+static void mgmt_fe_adapter_disconnect(struct mgmt_fe_client_adapter *adapter)
+{
+       if (adapter->conn_fd >= 0) {
+               close(adapter->conn_fd);
+               adapter->conn_fd = -1;
+       }
+
+       /* TODO: notify about client disconnect for appropriate cleanup */
+       mgmt_fe_cleanup_sessions(adapter);
+       mgmt_fe_sessions_fini(&adapter->fe_sessions);
+       mgmt_fe_adapters_del(&mgmt_fe_adapters, adapter);
+
+       mgmt_fe_adapter_unlock(&adapter);
+}
+
+static void
+mgmt_fe_adapter_cleanup_old_conn(struct mgmt_fe_client_adapter *adapter)
+{
+       struct mgmt_fe_client_adapter *old;
+
+       FOREACH_ADAPTER_IN_LIST (old) {
+               if (old != adapter
+                   && !strncmp(adapter->name, old->name, sizeof(adapter->name))) {
+                       /*
+                        * We have a Zombie lingering around
+                        */
+                       MGMTD_FE_ADAPTER_DBG(
+                               "Client '%s' (FD:%d) seems to have reconnected. Removing old connection (FD:%d)!",
+                               adapter->name, adapter->conn_fd, old->conn_fd);
+                       mgmt_fe_adapter_disconnect(old);
+               }
+       }
+}
+
+static void
+mgmt_fe_cleanup_adapters(void)
+{
+       struct mgmt_fe_client_adapter *adapter;
+
+       FOREACH_ADAPTER_IN_LIST (adapter) {
+               mgmt_fe_cleanup_sessions(adapter);
+               mgmt_fe_adapter_unlock(&adapter);
+       }
+}
+
+static int
+mgmt_fe_session_handle_lockds_req_msg(struct mgmt_fe_session_ctx *session,
+                                         Mgmtd__FeLockDsReq *lockds_req)
+{
+       struct mgmt_ds_ctx *ds_ctx;
+
+       /*
+        * Next check first if the SET_CONFIG_REQ is for Candidate DS
+        * or not. Report failure if its not. MGMTD currently only
+        * supports editing the Candidate DS.
+        */
+       if (lockds_req->ds_id != MGMTD_DS_CANDIDATE) {
+               mgmt_fe_send_lockds_reply(
+                       session, lockds_req->ds_id, lockds_req->req_id,
+                       lockds_req->lock, false,
+                       "Lock/Unlock on datastores other than Candidate DS not permitted!");
+               return -1;
+       }
+
+       ds_ctx =
+               mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, lockds_req->ds_id);
+       if (!ds_ctx) {
+               mgmt_fe_send_lockds_reply(
+                       session, lockds_req->ds_id, lockds_req->req_id,
+                       lockds_req->lock, false,
+                       "Failed to retrieve handle for DS!");
+               return -1;
+       }
+
+       if (lockds_req->lock) {
+               if (mgmt_fe_session_write_lock_ds(lockds_req->ds_id,
+                                                     ds_ctx, session)
+                   != 0) {
+                       mgmt_fe_send_lockds_reply(
+                               session, lockds_req->ds_id, lockds_req->req_id,
+                               lockds_req->lock, false,
+                               "Lock already taken on DS by another session!");
+                       return -1;
+               }
+
+               session->ds_locked_implict[lockds_req->ds_id] = false;
+       } else {
+               if (!session->ds_write_locked[lockds_req->ds_id]) {
+                       mgmt_fe_send_lockds_reply(
+                               session, lockds_req->ds_id, lockds_req->req_id,
+                               lockds_req->lock, false,
+                               "Lock on DS was not taken by this session!");
+                       return 0;
+               }
+
+               (void)mgmt_fe_session_unlock_ds(lockds_req->ds_id, ds_ctx,
+                                                   session, true, false);
+       }
+
+       if (mgmt_fe_send_lockds_reply(session, lockds_req->ds_id,
+                                          lockds_req->req_id, lockds_req->lock,
+                                          true, NULL)
+           != 0) {
+               MGMTD_FE_ADAPTER_DBG(
+                       "Failed to send LOCK_DS_REPLY for DS %u Sessn: %p from %s",
+                       lockds_req->ds_id, session, session->adapter->name);
+       }
+
+       return 0;
+}
+
+static int
+mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
+                                         Mgmtd__FeSetConfigReq *setcfg_req)
+{
+       uint64_t cfg_session_id;
+       struct mgmt_ds_ctx *ds_ctx, *dst_ds_ctx;
+
+       if (mm->perf_stats_en)
+               gettimeofday(&session->adapter->setcfg_stats.last_start, NULL);
+
+       /*
+        * Next check first if the SET_CONFIG_REQ is for Candidate DS
+        * or not. Report failure if its not. MGMTD currently only
+        * supports editing the Candidate DS.
+        */
+       if (setcfg_req->ds_id != MGMTD_DS_CANDIDATE) {
+               mgmt_fe_send_setcfg_reply(
+                       session, setcfg_req->ds_id, setcfg_req->req_id, false,
+                       "Set-Config on datastores other than Candidate DS not permitted!",
+                       setcfg_req->implicit_commit);
+               return 0;
+       }
+
+       /*
+        * Get the DS handle.
+        */
+       ds_ctx =
+               mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, setcfg_req->ds_id);
+       if (!ds_ctx) {
+               mgmt_fe_send_setcfg_reply(
+                       session, setcfg_req->ds_id, setcfg_req->req_id, false,
+                       "No such DS exists!", setcfg_req->implicit_commit);
+               return 0;
+       }
+
+       if (session->cfg_txn_id == MGMTD_TXN_ID_NONE) {
+               /*
+                * Check first if the current session can run a CONFIG
+                * transaction or not. Report failure if a CONFIG transaction
+                * from another session is already in progress.
+                */
+               cfg_session_id = mgmt_config_txn_in_progress();
+               if (cfg_session_id != MGMTD_SESSION_ID_NONE
+                  && cfg_session_id != session->session_id) {
+                       mgmt_fe_send_setcfg_reply(
+                               session, setcfg_req->ds_id, setcfg_req->req_id,
+                               false,
+                               "Configuration already in-progress through a different user session!",
+                               setcfg_req->implicit_commit);
+                       goto mgmt_fe_sess_handle_setcfg_req_failed;
+               }
+
+
+               /*
+                * Try taking write-lock on the requested DS (if not already).
+                */
+               if (!session->ds_write_locked[setcfg_req->ds_id]) {
+                       if (mgmt_fe_session_write_lock_ds(setcfg_req->ds_id,
+                                                             ds_ctx, session)
+                           != 0) {
+                               mgmt_fe_send_setcfg_reply(
+                                       session, setcfg_req->ds_id,
+                                       setcfg_req->req_id, false,
+                                       "Failed to lock the DS!",
+                                       setcfg_req->implicit_commit);
+                               goto mgmt_fe_sess_handle_setcfg_req_failed;
+                       }
+
+                       session->ds_locked_implict[setcfg_req->ds_id] = true;
+               }
+
+               /*
+                * Start a CONFIG Transaction (if not started already)
+                */
+               session->cfg_txn_id = mgmt_create_txn(session->session_id,
+                                                     MGMTD_TXN_TYPE_CONFIG);
+               if (session->cfg_txn_id == MGMTD_SESSION_ID_NONE) {
+                       mgmt_fe_send_setcfg_reply(
+                               session, setcfg_req->ds_id, setcfg_req->req_id,
+                               false,
+                               "Failed to create a Configuration session!",
+                               setcfg_req->implicit_commit);
+                       goto mgmt_fe_sess_handle_setcfg_req_failed;
+               }
+
+               MGMTD_FE_ADAPTER_DBG(
+                       "Created new Config Txn 0x%llx for session %p",
+                       (unsigned long long)session->cfg_txn_id, session);
+       } else {
+               MGMTD_FE_ADAPTER_DBG(
+                       "Config Txn 0x%llx for session %p already created",
+                       (unsigned long long)session->cfg_txn_id, session);
+
+               if (setcfg_req->implicit_commit) {
+                       /*
+                        * In this scenario need to skip cleanup of the txn,
+                        * so setting implicit commit to false.
+                        */
+                       mgmt_fe_send_setcfg_reply(
+                               session, setcfg_req->ds_id, setcfg_req->req_id,
+                               false,
+                               "A Configuration transaction is already in progress!",
+                               false);
+                       return 0;
+               }
+       }
+
+       dst_ds_ctx = 0;
+       if (setcfg_req->implicit_commit) {
+               dst_ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm,
+                                                    setcfg_req->commit_ds_id);
+               if (!dst_ds_ctx) {
+                       mgmt_fe_send_setcfg_reply(
+                               session, setcfg_req->ds_id, setcfg_req->req_id,
+                               false, "No such commit DS exists!",
+                               setcfg_req->implicit_commit);
+                       return 0;
+               }
+       }
+
+       /*
+        * Create the SETConfig request under the transaction.
+        */
+       if (mgmt_txn_send_set_config_req(
+                   session->cfg_txn_id, setcfg_req->req_id, setcfg_req->ds_id,
+                   ds_ctx, setcfg_req->data, setcfg_req->n_data,
+                   setcfg_req->implicit_commit, setcfg_req->commit_ds_id,
+                   dst_ds_ctx)
+           != 0) {
+               mgmt_fe_send_setcfg_reply(
+                       session, setcfg_req->ds_id, setcfg_req->req_id, false,
+                       "Request processing for SET-CONFIG failed!",
+                       setcfg_req->implicit_commit);
+               goto mgmt_fe_sess_handle_setcfg_req_failed;
+       }
+
+       return 0;
+
+mgmt_fe_sess_handle_setcfg_req_failed:
+
+       /*
+        * Delete transaction created recently.
+        */
+       if (session->cfg_txn_id != MGMTD_TXN_ID_NONE)
+               mgmt_destroy_txn(&session->cfg_txn_id);
+       if (ds_ctx && session->ds_write_locked[setcfg_req->ds_id])
+               mgmt_fe_session_unlock_ds(setcfg_req->ds_id, ds_ctx, session,
+                                             true, false);
+
+       return 0;
+}
+
+static int
+mgmt_fe_session_handle_getcfg_req_msg(struct mgmt_fe_session_ctx *session,
+                                         Mgmtd__FeGetConfigReq *getcfg_req)
+{
+       struct mgmt_ds_ctx *ds_ctx;
+
+       /*
+        * Get the DS handle.
+        */
+       ds_ctx =
+               mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm, getcfg_req->ds_id);
+       if (!ds_ctx) {
+               mgmt_fe_send_getcfg_reply(session, getcfg_req->ds_id,
+                                             getcfg_req->req_id, false, NULL,
+                                             "No such DS exists!");
+               return 0;
+       }
+
+       /*
+        * Next check first if the SET_CONFIG_REQ is for Candidate DS
+        * or not. Report failure if its not. MGMTD currently only
+        * supports editing the Candidate DS.
+        */
+       if (getcfg_req->ds_id != MGMTD_DS_CANDIDATE
+           && getcfg_req->ds_id != MGMTD_DS_RUNNING) {
+               mgmt_fe_send_getcfg_reply(
+                       session, getcfg_req->ds_id, getcfg_req->req_id, false,
+                       NULL,
+                       "Get-Config on datastores other than Candidate or Running DS not permitted!");
+               return 0;
+       }
+
+       if (session->txn_id == MGMTD_TXN_ID_NONE) {
+               /*
+                * Try taking read-lock on the requested DS (if not already
+                * locked). If the DS has already been write-locked by a ongoing
+                * CONFIG transaction we may allow reading the contents of the
+                * same DS.
+                */
+               if (!session->ds_read_locked[getcfg_req->ds_id]
+                   && !session->ds_write_locked[getcfg_req->ds_id]) {
+                       if (mgmt_fe_session_read_lock_ds(getcfg_req->ds_id,
+                                                            ds_ctx, session)
+                           != 0) {
+                               mgmt_fe_send_getcfg_reply(
+                                       session, getcfg_req->ds_id,
+                                       getcfg_req->req_id, false, NULL,
+                                       "Failed to lock the DS! Another session might have locked it!");
+                               goto mgmt_fe_sess_handle_getcfg_req_failed;
+                       }
+
+                       session->ds_locked_implict[getcfg_req->ds_id] = true;
+               }
+
+               /*
+                * Start a SHOW Transaction (if not started already)
+                */
+               session->txn_id = mgmt_create_txn(session->session_id,
+                                                 MGMTD_TXN_TYPE_SHOW);
+               if (session->txn_id == MGMTD_SESSION_ID_NONE) {
+                       mgmt_fe_send_getcfg_reply(
+                               session, getcfg_req->ds_id, getcfg_req->req_id,
+                               false, NULL,
+                               "Failed to create a Show transaction!");
+                       goto mgmt_fe_sess_handle_getcfg_req_failed;
+               }
+
+               MGMTD_FE_ADAPTER_DBG(
+                       "Created new Show Txn 0x%llx for session %p",
+                       (unsigned long long)session->txn_id, session);
+       } else {
+               MGMTD_FE_ADAPTER_DBG(
+                       "Show Txn 0x%llx for session %p already created",
+                       (unsigned long long)session->txn_id, session);
+       }
+
+       /*
+        * Create a GETConfig request under the transaction.
+        */
+       if (mgmt_txn_send_get_config_req(session->txn_id, getcfg_req->req_id,
+                                         getcfg_req->ds_id, ds_ctx,
+                                         getcfg_req->data, getcfg_req->n_data)
+           != 0) {
+               mgmt_fe_send_getcfg_reply(
+                       session, getcfg_req->ds_id, getcfg_req->req_id, false,
+                       NULL, "Request processing for GET-CONFIG failed!");
+               goto mgmt_fe_sess_handle_getcfg_req_failed;
+       }
+
+       return 0;
+
+mgmt_fe_sess_handle_getcfg_req_failed:
+
+       /*
+        * Destroy the transaction created recently.
+        */
+       if (session->txn_id != MGMTD_TXN_ID_NONE)
+               mgmt_destroy_txn(&session->txn_id);
+       if (ds_ctx && session->ds_read_locked[getcfg_req->ds_id])
+               mgmt_fe_session_unlock_ds(getcfg_req->ds_id, ds_ctx, session,
+                                             false, true);
+
+       return -1;
+}
+
+static int
+mgmt_fe_session_handle_getdata_req_msg(struct mgmt_fe_session_ctx *session,
+                                          Mgmtd__FeGetDataReq *getdata_req)
+{
+       struct mgmt_ds_ctx *ds_ctx;
+
+       /*
+        * Get the DS handle.
+        */
+       ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm,
+                                        getdata_req->ds_id);
+       if (!ds_ctx) {
+               mgmt_fe_send_getdata_reply(session, getdata_req->ds_id,
+                                              getdata_req->req_id, false, NULL,
+                                              "No such DS exists!");
+               return 0;
+       }
+
+       if (session->txn_id == MGMTD_TXN_ID_NONE) {
+               /*
+                * Try taking read-lock on the requested DS (if not already
+                * locked). If the DS has already been write-locked by a ongoing
+                * CONFIG transaction we may allow reading the contents of the
+                * same DS.
+                */
+               if (!session->ds_read_locked[getdata_req->ds_id]
+                   && !session->ds_write_locked[getdata_req->ds_id]) {
+                       if (mgmt_fe_session_read_lock_ds(getdata_req->ds_id,
+                                                            ds_ctx, session)
+                           != 0) {
+                               mgmt_fe_send_getdata_reply(
+                                       session, getdata_req->ds_id,
+                                       getdata_req->req_id, false, NULL,
+                                       "Failed to lock the DS! Another session might have locked it!");
+                               goto mgmt_fe_sess_handle_getdata_req_failed;
+                       }
+
+                       session->ds_locked_implict[getdata_req->ds_id] = true;
+               }
+
+               /*
+                * Start a SHOW Transaction (if not started already)
+                */
+               session->txn_id = mgmt_create_txn(session->session_id,
+                                                 MGMTD_TXN_TYPE_SHOW);
+               if (session->txn_id == MGMTD_SESSION_ID_NONE) {
+                       mgmt_fe_send_getdata_reply(
+                               session, getdata_req->ds_id, getdata_req->req_id,
+                               false, NULL,
+                               "Failed to create a Show transaction!");
+                       goto mgmt_fe_sess_handle_getdata_req_failed;
+               }
+
+               MGMTD_FE_ADAPTER_DBG(
+                       "Created new Show Txn 0x%llx for session %p",
+                       (unsigned long long)session->txn_id, session);
+       } else {
+               MGMTD_FE_ADAPTER_DBG(
+                       "Show Txn 0x%llx for session %p already created",
+                       (unsigned long long)session->txn_id, session);
+       }
+
+       /*
+        * Create a GETData request under the transaction.
+        */
+       if (mgmt_txn_send_get_data_req(session->txn_id, getdata_req->req_id,
+                                       getdata_req->ds_id, ds_ctx,
+                                       getdata_req->data, getdata_req->n_data)
+           != 0) {
+               mgmt_fe_send_getdata_reply(
+                       session, getdata_req->ds_id, getdata_req->req_id, false,
+                       NULL, "Request processing for GET-CONFIG failed!");
+               goto mgmt_fe_sess_handle_getdata_req_failed;
+       }
+
+       return 0;
+
+mgmt_fe_sess_handle_getdata_req_failed:
+
+       /*
+        * Destroy the transaction created recently.
+        */
+       if (session->txn_id != MGMTD_TXN_ID_NONE)
+               mgmt_destroy_txn(&session->txn_id);
+
+       if (ds_ctx && session->ds_read_locked[getdata_req->ds_id])
+               mgmt_fe_session_unlock_ds(getdata_req->ds_id, ds_ctx,
+                                             session, false, true);
+
+       return -1;
+}
+
+static int mgmt_fe_session_handle_commit_config_req_msg(
+       struct mgmt_fe_session_ctx *session,
+       Mgmtd__FeCommitConfigReq *commcfg_req)
+{
+       struct mgmt_ds_ctx *src_ds_ctx, *dst_ds_ctx;
+
+       if (mm->perf_stats_en)
+               gettimeofday(&session->adapter->cmt_stats.last_start, NULL);
+       session->adapter->cmt_stats.commit_cnt++;
+       /*
+        * Get the source DS handle.
+        */
+       src_ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm,
+                                            commcfg_req->src_ds_id);
+       if (!src_ds_ctx) {
+               mgmt_fe_send_commitcfg_reply(
+                       session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+                       commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+                       commcfg_req->validate_only,
+                       "No such source DS exists!");
+               return 0;
+       }
+
+       /*
+        * Get the destination DS handle.
+        */
+       dst_ds_ctx = mgmt_ds_get_ctx_by_id(mgmt_fe_adapter_mm,
+                                            commcfg_req->dst_ds_id);
+       if (!dst_ds_ctx) {
+               mgmt_fe_send_commitcfg_reply(
+                       session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+                       commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+                       commcfg_req->validate_only,
+                       "No such destination DS exists!");
+               return 0;
+       }
+
+       /*
+        * Next check first if the SET_CONFIG_REQ is for Candidate DS
+        * or not. Report failure if its not. MGMTD currently only
+        * supports editing the Candidate DS.
+        */
+       if (commcfg_req->dst_ds_id != MGMTD_DS_RUNNING) {
+               mgmt_fe_send_commitcfg_reply(
+                       session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+                       commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+                       commcfg_req->validate_only,
+                       "Set-Config on datastores other than Running DS not permitted!");
+               return 0;
+       }
+
+       if (session->cfg_txn_id == MGMTD_TXN_ID_NONE) {
+               /*
+                * Start a CONFIG Transaction (if not started already)
+                */
+               session->cfg_txn_id = mgmt_create_txn(session->session_id,
+                                               MGMTD_TXN_TYPE_CONFIG);
+               if (session->cfg_txn_id == MGMTD_SESSION_ID_NONE) {
+                       mgmt_fe_send_commitcfg_reply(
+                               session, commcfg_req->src_ds_id,
+                               commcfg_req->dst_ds_id, commcfg_req->req_id,
+                               MGMTD_INTERNAL_ERROR,
+                               commcfg_req->validate_only,
+                               "Failed to create a Configuration session!");
+                       return 0;
+               }
+               MGMTD_FE_ADAPTER_DBG("Created txn %" PRIu64
+                                    " for session %" PRIu64
+                                    " for COMMIT-CFG-REQ",
+                                    session->cfg_txn_id, session->session_id);
+       }
+
+
+       /*
+        * Try taking write-lock on the destination DS (if not already).
+        */
+       if (!session->ds_write_locked[commcfg_req->dst_ds_id]) {
+               if (mgmt_fe_session_write_lock_ds(commcfg_req->dst_ds_id,
+                                                     dst_ds_ctx, session)
+                   != 0) {
+                       mgmt_fe_send_commitcfg_reply(
+                               session, commcfg_req->src_ds_id,
+                               commcfg_req->dst_ds_id, commcfg_req->req_id,
+                               MGMTD_DS_LOCK_FAILED,
+                               commcfg_req->validate_only,
+                               "Failed to lock the destination DS!");
+                       return 0;
+               }
+
+               session->ds_locked_implict[commcfg_req->dst_ds_id] = true;
+       }
+
+       /*
+        * Create COMMITConfig request under the transaction
+        */
+       if (mgmt_txn_send_commit_config_req(
+                   session->cfg_txn_id, commcfg_req->req_id,
+                   commcfg_req->src_ds_id, src_ds_ctx, commcfg_req->dst_ds_id,
+                   dst_ds_ctx, commcfg_req->validate_only, commcfg_req->abort,
+                   false)
+           != 0) {
+               mgmt_fe_send_commitcfg_reply(
+                       session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+                       commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+                       commcfg_req->validate_only,
+                       "Request processing for COMMIT-CONFIG failed!");
+               return 0;
+       }
+
+       return 0;
+}
+
+static int
+mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
+                              Mgmtd__FeMessage *fe_msg)
+{
+       struct mgmt_fe_session_ctx *session;
+
+       /*
+        * protobuf-c adds a max size enum with an internal, and changing by
+        * version, name; cast to an int to avoid unhandled enum warnings
+        */
+       switch ((int)fe_msg->message_case) {
+       case MGMTD__FE_MESSAGE__MESSAGE_REGISTER_REQ:
+               MGMTD_FE_ADAPTER_DBG("Got Register Req Msg from '%s'",
+                                      fe_msg->register_req->client_name);
+
+               if (strlen(fe_msg->register_req->client_name)) {
+                       strlcpy(adapter->name,
+                               fe_msg->register_req->client_name,
+                               sizeof(adapter->name));
+                       mgmt_fe_adapter_cleanup_old_conn(adapter);
+               }
+               break;
+       case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REQ:
+               if (fe_msg->session_req->create
+                   && fe_msg->session_req->id_case
+                       == MGMTD__FE_SESSION_REQ__ID_CLIENT_CONN_ID) {
+                       MGMTD_FE_ADAPTER_DBG(
+                               "Got Session Create Req Msg for client-id %llu from '%s'",
+                               (unsigned long long)
+                                       fe_msg->session_req->client_conn_id,
+                               adapter->name);
+
+                       session = mgmt_fe_create_session(
+                               adapter, fe_msg->session_req->client_conn_id);
+                       mgmt_fe_send_session_reply(adapter, session, true,
+                                                      session ? true : false);
+               } else if (
+                       !fe_msg->session_req->create
+                       && fe_msg->session_req->id_case
+                               == MGMTD__FE_SESSION_REQ__ID_SESSION_ID) {
+                       MGMTD_FE_ADAPTER_DBG(
+                               "Got Session Destroy Req Msg for session-id %llu from '%s'",
+                               (unsigned long long)
+                                       fe_msg->session_req->session_id,
+                               adapter->name);
+
+                       session = mgmt_session_id2ctx(
+                               fe_msg->session_req->session_id);
+                       mgmt_fe_send_session_reply(adapter, session, false,
+                                                      true);
+                       mgmt_fe_cleanup_session(&session);
+               }
+               break;
+       case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REQ:
+               session = mgmt_session_id2ctx(
+                               fe_msg->lockds_req->session_id);
+               MGMTD_FE_ADAPTER_DBG(
+                       "Got %sockDS Req Msg for DS:%d for session-id %llx from '%s'",
+                       fe_msg->lockds_req->lock ? "L" : "Unl",
+                       fe_msg->lockds_req->ds_id,
+                       (unsigned long long)fe_msg->lockds_req->session_id,
+                       adapter->name);
+               mgmt_fe_session_handle_lockds_req_msg(
+                       session, fe_msg->lockds_req);
+               break;
+       case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REQ:
+               session = mgmt_session_id2ctx(
+                               fe_msg->setcfg_req->session_id);
+               session->adapter->setcfg_stats.set_cfg_count++;
+               MGMTD_FE_ADAPTER_DBG(
+                       "Got Set Config Req Msg (%d Xpaths, Implicit:%c) on DS:%d for session-id %llu from '%s'",
+                       (int)fe_msg->setcfg_req->n_data,
+                       fe_msg->setcfg_req->implicit_commit ? 'T':'F',
+                       fe_msg->setcfg_req->ds_id,
+                       (unsigned long long)fe_msg->setcfg_req->session_id,
+                       adapter->name);
+
+               mgmt_fe_session_handle_setcfg_req_msg(
+                       session, fe_msg->setcfg_req);
+               break;
+       case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REQ:
+               session = mgmt_session_id2ctx(
+                               fe_msg->commcfg_req->session_id);
+               MGMTD_FE_ADAPTER_DBG(
+                       "Got Commit Config Req Msg for src-DS:%d dst-DS:%d (Abort:%c) on session-id %llu from '%s'",
+                       fe_msg->commcfg_req->src_ds_id,
+                       fe_msg->commcfg_req->dst_ds_id,
+                       fe_msg->commcfg_req->abort ? 'T':'F',
+                       (unsigned long long)fe_msg->commcfg_req->session_id,
+                       adapter->name);
+               mgmt_fe_session_handle_commit_config_req_msg(
+                       session, fe_msg->commcfg_req);
+               break;
+       case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REQ:
+               session = mgmt_session_id2ctx(
+                               fe_msg->getcfg_req->session_id);
+               MGMTD_FE_ADAPTER_DBG(
+                       "Got Get-Config Req Msg for DS:%d (xpaths: %d) on session-id %llu from '%s'",
+                       fe_msg->getcfg_req->ds_id,
+                       (int)fe_msg->getcfg_req->n_data,
+                       (unsigned long long)fe_msg->getcfg_req->session_id,
+                       adapter->name);
+               mgmt_fe_session_handle_getcfg_req_msg(
+                       session, fe_msg->getcfg_req);
+               break;
+       case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REQ:
+               session = mgmt_session_id2ctx(
+                               fe_msg->getdata_req->session_id);
+               MGMTD_FE_ADAPTER_DBG(
+                       "Got Get-Data Req Msg for DS:%d (xpaths: %d) on session-id %llu from '%s'",
+                       fe_msg->getdata_req->ds_id,
+                       (int)fe_msg->getdata_req->n_data,
+                       (unsigned long long)fe_msg->getdata_req->session_id,
+                       adapter->name);
+               mgmt_fe_session_handle_getdata_req_msg(
+                       session, fe_msg->getdata_req);
+               break;
+       case MGMTD__FE_MESSAGE__MESSAGE_NOTIFY_DATA_REQ:
+       case MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ:
+               /*
+                * TODO: Add handling code in future.
+                */
+               break;
+       /*
+        * NOTE: The following messages are always sent from MGMTD to
+        * Frontend clients only and/or need not be handled on MGMTd.
+        */
+       case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REPLY:
+       case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REPLY:
+       case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REPLY:
+       case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REPLY:
+       case MGMTD__FE_MESSAGE__MESSAGE_GETCFG_REPLY:
+       case MGMTD__FE_MESSAGE__MESSAGE_GETDATA_REPLY:
+       case MGMTD__FE_MESSAGE__MESSAGE__NOT_SET:
+       default:
+               /*
+                * A 'default' case is being added contrary to the
+                * FRR code guidelines to take care of build
+                * failures on certain build systems (courtesy of
+                * the proto-c package).
+                */
+               break;
+       }
+
+       return 0;
+}
+
+static void mgmt_fe_adapter_process_msg(void *user_ctx, uint8_t *data,
+                                       size_t len)
+{
+       struct mgmt_fe_client_adapter *adapter = user_ctx;
+       Mgmtd__FeMessage *fe_msg;
+
+       fe_msg = mgmtd__fe_message__unpack(NULL, len, data);
+       if (!fe_msg) {
+               MGMTD_FE_ADAPTER_DBG(
+                       "Failed to decode %zu bytes for adapter: %s", len,
+                       adapter->name);
+               return;
+       }
+       MGMTD_FE_ADAPTER_DBG(
+               "Decoded %zu bytes of message: %u from adapter: %s", len,
+               fe_msg->message_case, adapter->name);
+       (void)mgmt_fe_adapter_handle_msg(adapter, fe_msg);
+       mgmtd__fe_message__free_unpacked(fe_msg, NULL);
+}
+
+static void mgmt_fe_adapter_proc_msgbufs(struct thread *thread)
+{
+       struct mgmt_fe_client_adapter *adapter = THREAD_ARG(thread);
+
+       if (mgmt_msg_procbufs(&adapter->mstate, mgmt_fe_adapter_process_msg,
+                             adapter, mgmt_debug_fe))
+               mgmt_fe_adapter_register_event(adapter, MGMTD_FE_PROC_MSG);
+}
+
+static void mgmt_fe_adapter_read(struct thread *thread)
+{
+       struct mgmt_fe_client_adapter *adapter = THREAD_ARG(thread);
+       enum mgmt_msg_rsched rv;
+
+       rv = mgmt_msg_read(&adapter->mstate, adapter->conn_fd, mgmt_debug_fe);
+       if (rv == MSR_DISCONNECT) {
+               mgmt_fe_adapter_disconnect(adapter);
+               return;
+       }
+       if (rv == MSR_SCHED_BOTH)
+               mgmt_fe_adapter_register_event(adapter, MGMTD_FE_PROC_MSG);
+       mgmt_fe_adapter_register_event(adapter, MGMTD_FE_CONN_READ);
+}
+
+static void mgmt_fe_adapter_write(struct thread *thread)
+{
+       struct mgmt_fe_client_adapter *adapter = THREAD_ARG(thread);
+       enum mgmt_msg_wsched rv;
+
+       rv = mgmt_msg_write(&adapter->mstate, adapter->conn_fd, mgmt_debug_fe);
+       if (rv == MSW_SCHED_STREAM)
+               mgmt_fe_adapter_register_event(adapter, MGMTD_FE_CONN_WRITE);
+       else if (rv == MSW_DISCONNECT)
+               mgmt_fe_adapter_disconnect(adapter);
+       else if (rv == MSW_SCHED_WRITES_OFF) {
+               mgmt_fe_adapter_writes_off(adapter);
+               mgmt_fe_adapter_register_event(adapter,
+                                              MGMTD_FE_CONN_WRITES_ON);
+       } else
+               assert(rv == MSW_SCHED_NONE);
+}
+
+static void mgmt_fe_adapter_resume_writes(struct thread *thread)
+{
+       struct mgmt_fe_client_adapter *adapter;
+
+       adapter = (struct mgmt_fe_client_adapter *)THREAD_ARG(thread);
+       assert(adapter && adapter->conn_fd != -1);
+
+       mgmt_fe_adapter_writes_on(adapter);
+}
+
+static void
+mgmt_fe_adapter_register_event(struct mgmt_fe_client_adapter *adapter,
+                                enum mgmt_fe_event event)
+{
+       struct timeval tv = {0};
+
+       switch (event) {
+       case MGMTD_FE_CONN_READ:
+               thread_add_read(mgmt_fe_adapter_tm, mgmt_fe_adapter_read,
+                               adapter, adapter->conn_fd, &adapter->conn_read_ev);
+               assert(adapter->conn_read_ev);
+               break;
+       case MGMTD_FE_CONN_WRITE:
+               thread_add_write(mgmt_fe_adapter_tm,
+                                mgmt_fe_adapter_write, adapter,
+                                adapter->conn_fd, &adapter->conn_write_ev);
+               assert(adapter->conn_write_ev);
+               break;
+       case MGMTD_FE_PROC_MSG:
+               tv.tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC;
+               thread_add_timer_tv(mgmt_fe_adapter_tm,
+                                   mgmt_fe_adapter_proc_msgbufs, adapter,
+                                   &tv, &adapter->proc_msg_ev);
+               assert(adapter->proc_msg_ev);
+               break;
+       case MGMTD_FE_CONN_WRITES_ON:
+               thread_add_timer_msec(mgmt_fe_adapter_tm,
+                                     mgmt_fe_adapter_resume_writes, adapter,
+                                     MGMTD_FE_MSG_WRITE_DELAY_MSEC,
+                                     &adapter->conn_writes_on);
+               assert(adapter->conn_writes_on);
+               break;
+       case MGMTD_FE_SERVER:
+               assert(!"mgmt_fe_adapter_post_event() called incorrectly");
+               break;
+       }
+}
+
+void mgmt_fe_adapter_lock(struct mgmt_fe_client_adapter *adapter)
+{
+       adapter->refcount++;
+}
+
+extern void
+mgmt_fe_adapter_unlock(struct mgmt_fe_client_adapter **adapter)
+{
+       assert(*adapter && (*adapter)->refcount);
+
+       (*adapter)->refcount--;
+       if (!(*adapter)->refcount) {
+               mgmt_fe_adapters_del(&mgmt_fe_adapters, *adapter);
+               THREAD_OFF((*adapter)->conn_read_ev);
+               THREAD_OFF((*adapter)->conn_write_ev);
+               THREAD_OFF((*adapter)->proc_msg_ev);
+               THREAD_OFF((*adapter)->conn_writes_on);
+               mgmt_msg_destroy(&(*adapter)->mstate);
+               XFREE(MTYPE_MGMTD_FE_ADPATER, *adapter);
+       }
+
+       *adapter = NULL;
+}
+
+int mgmt_fe_adapter_init(struct thread_master *tm, struct mgmt_master *mm)
+{
+       if (!mgmt_fe_adapter_tm) {
+               mgmt_fe_adapter_tm = tm;
+               mgmt_fe_adapter_mm = mm;
+               mgmt_fe_adapters_init(&mgmt_fe_adapters);
+
+               assert(!mgmt_fe_sessions);
+               mgmt_fe_sessions = hash_create(mgmt_fe_session_hash_key,
+                                              mgmt_fe_session_hash_cmp,
+                                              "MGMT Frontend Sessions");
+       }
+
+       return 0;
+}
+
+void mgmt_fe_adapter_destroy(void)
+{
+       mgmt_fe_cleanup_adapters();
+       mgmt_fe_session_hash_destroy();
+}
+
+struct mgmt_fe_client_adapter *
+mgmt_fe_create_adapter(int conn_fd, union sockunion *from)
+{
+       struct mgmt_fe_client_adapter *adapter = NULL;
+
+       adapter = mgmt_fe_find_adapter_by_fd(conn_fd);
+       if (!adapter) {
+               adapter = XCALLOC(MTYPE_MGMTD_FE_ADPATER,
+                               sizeof(struct mgmt_fe_client_adapter));
+               assert(adapter);
+
+               adapter->conn_fd = conn_fd;
+               memcpy(&adapter->conn_su, from, sizeof(adapter->conn_su));
+               snprintf(adapter->name, sizeof(adapter->name), "Unknown-FD-%d",
+                        adapter->conn_fd);
+               mgmt_fe_sessions_init(&adapter->fe_sessions);
+
+               mgmt_msg_init(&adapter->mstate, MGMTD_FE_MAX_NUM_MSG_PROC,
+                             MGMTD_FE_MAX_NUM_MSG_WRITE, MGMTD_FE_MSG_MAX_LEN,
+                             "FE-adapter");
+               mgmt_fe_adapter_lock(adapter);
+
+               mgmt_fe_adapter_register_event(adapter, MGMTD_FE_CONN_READ);
+               mgmt_fe_adapters_add_tail(&mgmt_fe_adapters, adapter);
+
+               adapter->setcfg_stats.min_tm = ULONG_MAX;
+               adapter->cmt_stats.min_tm = ULONG_MAX;
+               MGMTD_FE_ADAPTER_DBG("Added new MGMTD Frontend adapter '%s'",
+                                      adapter->name);
+       }
+
+       /* Make client socket non-blocking.  */
+       set_nonblocking(adapter->conn_fd);
+       setsockopt_so_sendbuf(adapter->conn_fd,
+                             MGMTD_SOCKET_FE_SEND_BUF_SIZE);
+       setsockopt_so_recvbuf(adapter->conn_fd,
+                             MGMTD_SOCKET_FE_RECV_BUF_SIZE);
+       return adapter;
+}
+
+struct mgmt_fe_client_adapter *mgmt_fe_get_adapter(const char *name)
+{
+       return mgmt_fe_find_adapter_by_name(name);
+}
+
+int mgmt_fe_send_set_cfg_reply(uint64_t session_id, uint64_t txn_id,
+                                  Mgmtd__DatastoreId ds_id, uint64_t req_id,
+                                  enum mgmt_result result,
+                                  const char *error_if_any,
+                                  bool implicit_commit)
+{
+       struct mgmt_fe_session_ctx *session;
+
+       session = mgmt_session_id2ctx(session_id);
+       if (!session || session->cfg_txn_id != txn_id) {
+               if (session)
+                       MGMTD_FE_ADAPTER_ERR(
+                               "Txn_id doesnot match, session txn is 0x%llx, current txn 0x%llx",
+                               (unsigned long long)session->cfg_txn_id,
+                               (unsigned long long)txn_id);
+               return -1;
+       }
+
+       return mgmt_fe_send_setcfg_reply(
+               session, ds_id, req_id, result == MGMTD_SUCCESS ? true : false,
+               error_if_any, implicit_commit);
+}
+
+int mgmt_fe_send_commit_cfg_reply(uint64_t session_id, uint64_t txn_id,
+                                     Mgmtd__DatastoreId src_ds_id,
+                                     Mgmtd__DatastoreId dst_ds_id,
+                                     uint64_t req_id, bool validate_only,
+                                     enum mgmt_result result,
+                                     const char *error_if_any)
+{
+       struct mgmt_fe_session_ctx *session;
+
+       session = mgmt_session_id2ctx(session_id);
+       if (!session || session->cfg_txn_id != txn_id)
+               return -1;
+
+       return mgmt_fe_send_commitcfg_reply(session, src_ds_id, dst_ds_id,
+                                               req_id, result, validate_only,
+                                               error_if_any);
+}
+
+int mgmt_fe_send_get_cfg_reply(uint64_t session_id, uint64_t txn_id,
+                                  Mgmtd__DatastoreId ds_id, uint64_t req_id,
+                                  enum mgmt_result result,
+                                  Mgmtd__YangDataReply *data_resp,
+                                  const char *error_if_any)
+{
+       struct mgmt_fe_session_ctx *session;
+
+       session = mgmt_session_id2ctx(session_id);
+       if (!session || session->txn_id != txn_id)
+               return -1;
+
+       return mgmt_fe_send_getcfg_reply(session, ds_id, req_id,
+                                            result == MGMTD_SUCCESS, data_resp,
+                                            error_if_any);
+}
+
+int mgmt_fe_send_get_data_reply(uint64_t session_id, uint64_t txn_id,
+                                   Mgmtd__DatastoreId ds_id, uint64_t req_id,
+                                   enum mgmt_result result,
+                                   Mgmtd__YangDataReply *data_resp,
+                                   const char *error_if_any)
+{
+       struct mgmt_fe_session_ctx *session;
+
+       session = mgmt_session_id2ctx(session_id);
+       if (!session || session->txn_id != txn_id)
+               return -1;
+
+       return mgmt_fe_send_getdata_reply(session, ds_id, req_id,
+                                             result == MGMTD_SUCCESS,
+                                             data_resp, error_if_any);
+}
+
+int mgmt_fe_send_data_notify(Mgmtd__DatastoreId ds_id,
+                                Mgmtd__YangData * data_resp[], int num_data)
+{
+       /* struct mgmt_fe_session_ctx *session; */
+
+       return 0;
+}
+
+struct mgmt_setcfg_stats *
+mgmt_fe_get_session_setcfg_stats(uint64_t session_id)
+{
+       struct mgmt_fe_session_ctx *session;
+
+       session = mgmt_session_id2ctx(session_id);
+       if (!session || !session->adapter)
+               return NULL;
+
+       return &session->adapter->setcfg_stats;
+}
+
+struct mgmt_commit_stats *
+mgmt_fe_get_session_commit_stats(uint64_t session_id)
+{
+       struct mgmt_fe_session_ctx *session;
+
+       session = mgmt_session_id2ctx(session_id);
+       if (!session || !session->adapter)
+               return NULL;
+
+       return &session->adapter->cmt_stats;
+}
+
+static void
+mgmt_fe_adapter_cmt_stats_write(struct vty *vty,
+                                   struct mgmt_fe_client_adapter *adapter)
+{
+       char buf[100] = {0};
+
+       if (!mm->perf_stats_en)
+               return;
+
+       vty_out(vty, "    Num-Commits: \t\t\t%lu\n",
+               adapter->cmt_stats.commit_cnt);
+       if (adapter->cmt_stats.commit_cnt > 0) {
+               if (mm->perf_stats_en)
+                       vty_out(vty, "    Max-Commit-Duration: \t\t%lu uSecs\n",
+                               adapter->cmt_stats.max_tm);
+               vty_out(vty, "    Max-Commit-Batch-Size: \t\t%lu\n",
+                       adapter->cmt_stats.max_batch_cnt);
+               if (mm->perf_stats_en)
+                       vty_out(vty, "    Min-Commit-Duration: \t\t%lu uSecs\n",
+                               adapter->cmt_stats.min_tm);
+               vty_out(vty, "    Min-Commit-Batch-Size: \t\t%lu\n",
+                       adapter->cmt_stats.min_batch_cnt);
+               if (mm->perf_stats_en)
+                       vty_out(vty,
+                               "    Last-Commit-Duration: \t\t%lu uSecs\n",
+                               adapter->cmt_stats.last_exec_tm);
+               vty_out(vty, "    Last-Commit-Batch-Size: \t\t%lu\n",
+                       adapter->cmt_stats.last_batch_cnt);
+               vty_out(vty, "    Last-Commit-CfgData-Reqs: \t\t%lu\n",
+                       adapter->cmt_stats.last_num_cfgdata_reqs);
+               vty_out(vty, "    Last-Commit-CfgApply-Reqs: \t\t%lu\n",
+                       adapter->cmt_stats.last_num_apply_reqs);
+               if (mm->perf_stats_en) {
+                       vty_out(vty, "    Last-Commit-Details:\n");
+                       vty_out(vty, "      Commit Start: \t\t\t%s\n",
+                               mgmt_realtime_to_string(
+                                       &adapter->cmt_stats.last_start, buf,
+                                       sizeof(buf)));
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+                       vty_out(vty, "        Config-Validate Start: \t\t%s\n",
+                               mgmt_realtime_to_string(
+                                       &adapter->cmt_stats.validate_start, buf,
+                                       sizeof(buf)));
+#endif
+                       vty_out(vty, "        Prep-Config Start: \t\t%s\n",
+                               mgmt_realtime_to_string(
+                                       &adapter->cmt_stats.prep_cfg_start, buf,
+                                       sizeof(buf)));
+                       vty_out(vty, "        Txn-Create Start: \t\t%s\n",
+                               mgmt_realtime_to_string(
+                                       &adapter->cmt_stats.txn_create_start,
+                                       buf, sizeof(buf)));
+                       vty_out(vty,
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+                               "        Send-Config Start: \t\t%s\n",
+#else
+                               "        Send-Config-Validate Start: \t%s\n",
+#endif
+                               mgmt_realtime_to_string(
+                                       &adapter->cmt_stats.send_cfg_start, buf,
+                                       sizeof(buf)));
+                       vty_out(vty, "        Apply-Config Start: \t\t%s\n",
+                               mgmt_realtime_to_string(
+                                       &adapter->cmt_stats.apply_cfg_start, buf,
+                                       sizeof(buf)));
+                       vty_out(vty, "        Apply-Config End: \t\t%s\n",
+                               mgmt_realtime_to_string(
+                                       &adapter->cmt_stats.apply_cfg_end, buf,
+                                       sizeof(buf)));
+                       vty_out(vty, "        Txn-Delete Start: \t\t%s\n",
+                               mgmt_realtime_to_string(
+                                       &adapter->cmt_stats.txn_del_start, buf,
+                                       sizeof(buf)));
+                       vty_out(vty, "      Commit End: \t\t\t%s\n",
+                               mgmt_realtime_to_string(
+                                       &adapter->cmt_stats.last_end, buf,
+                                       sizeof(buf)));
+               }
+       }
+}
+
+static void
+mgmt_fe_adapter_setcfg_stats_write(struct vty *vty,
+                                      struct mgmt_fe_client_adapter *adapter)
+{
+       char buf[100] = {0};
+
+       if (!mm->perf_stats_en)
+               return;
+
+       vty_out(vty, "    Num-Set-Cfg: \t\t\t%lu\n",
+               adapter->setcfg_stats.set_cfg_count);
+       if (mm->perf_stats_en && adapter->setcfg_stats.set_cfg_count > 0) {
+               vty_out(vty, "    Max-Set-Cfg-Duration: \t\t%lu uSec\n",
+                       adapter->setcfg_stats.max_tm);
+               vty_out(vty, "    Min-Set-Cfg-Duration: \t\t%lu uSec\n",
+                       adapter->setcfg_stats.min_tm);
+               vty_out(vty, "    Avg-Set-Cfg-Duration: \t\t%lu uSec\n",
+                       adapter->setcfg_stats.avg_tm);
+               vty_out(vty, "    Last-Set-Cfg-Details:\n");
+               vty_out(vty, "      Set-Cfg Start: \t\t\t%s\n",
+                       mgmt_realtime_to_string(&adapter->setcfg_stats.last_start,
+                                               buf, sizeof(buf)));
+               vty_out(vty, "      Set-Cfg End: \t\t\t%s\n",
+                       mgmt_realtime_to_string(&adapter->setcfg_stats.last_end,
+                                               buf, sizeof(buf)));
+       }
+}
+
+void mgmt_fe_adapter_status_write(struct vty *vty, bool detail)
+{
+       struct mgmt_fe_client_adapter *adapter;
+       struct mgmt_fe_session_ctx *session;
+       Mgmtd__DatastoreId ds_id;
+       bool locked = false;
+
+       vty_out(vty, "MGMTD Frontend Adpaters\n");
+
+       FOREACH_ADAPTER_IN_LIST (adapter) {
+               vty_out(vty, "  Client: \t\t\t\t%s\n", adapter->name);
+               vty_out(vty, "    Conn-FD: \t\t\t\t%d\n", adapter->conn_fd);
+               if (detail) {
+                       mgmt_fe_adapter_setcfg_stats_write(vty, adapter);
+                       mgmt_fe_adapter_cmt_stats_write(vty, adapter);
+               }
+               vty_out(vty, "    Sessions\n");
+               FOREACH_SESSION_IN_LIST (adapter, session) {
+                       vty_out(vty, "      Session: \t\t\t\t%p\n", session);
+                       vty_out(vty, "        Client-Id: \t\t\t%llu\n",
+                               (unsigned long long)session->client_id);
+                       vty_out(vty, "        Session-Id: \t\t\t%llx\n",
+                               (unsigned long long)session->session_id);
+                       vty_out(vty, "        DS-Locks:\n");
+                       FOREACH_MGMTD_DS_ID (ds_id) {
+                               if (session->ds_write_locked[ds_id]
+                                   || session->ds_read_locked[ds_id]) {
+                                       locked = true;
+                                       vty_out(vty,
+                                               "          %s\t\t\t%s, %s\n",
+                                               mgmt_ds_id2name(ds_id),
+                                               session->ds_write_locked[ds_id]
+                                                       ? "Write"
+                                                       : "Read",
+                                               session->ds_locked_implict[ds_id]
+                                                       ? "Implicit"
+                                                       : "Explicit");
+                               }
+                       }
+                       if (!locked)
+                               vty_out(vty, "          None\n");
+               }
+               vty_out(vty, "    Total-Sessions: \t\t\t%d\n",
+                       (int)mgmt_fe_sessions_count(&adapter->fe_sessions));
+               vty_out(vty, "    Msg-Recvd: \t\t\t\t%" PRIu64 "\n",
+                       adapter->mstate.nrxm);
+               vty_out(vty, "    Bytes-Recvd: \t\t\t%" PRIu64 "\n",
+                       adapter->mstate.nrxb);
+               vty_out(vty, "    Msg-Sent: \t\t\t\t%" PRIu64 "\n",
+                       adapter->mstate.ntxm);
+               vty_out(vty, "    Bytes-Sent: \t\t\t%" PRIu64 "\n",
+                       adapter->mstate.ntxb);
+       }
+       vty_out(vty, "  Total: %d\n",
+               (int)mgmt_fe_adapters_count(&mgmt_fe_adapters));
+}
+
+void mgmt_fe_adapter_perf_measurement(struct vty *vty, bool config)
+{
+       mm->perf_stats_en = config;
+}
+
+void mgmt_fe_adapter_reset_perf_stats(struct vty *vty)
+{
+       struct mgmt_fe_client_adapter *adapter;
+       struct mgmt_fe_session_ctx *session;
+
+       FOREACH_ADAPTER_IN_LIST (adapter) {
+               memset(&adapter->setcfg_stats, 0, sizeof(adapter->setcfg_stats));
+               FOREACH_SESSION_IN_LIST (adapter, session) {
+                       memset(&adapter->cmt_stats, 0, sizeof(adapter->cmt_stats));
+               }
+       }
+}
diff --git a/mgmtd/mgmt_fe_adapter.h b/mgmtd/mgmt_fe_adapter.h
new file mode 100644 (file)
index 0000000..3389234
--- /dev/null
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Client Connection Adapter
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_FE_ADAPTER_H_
+#define _FRR_MGMTD_FE_ADAPTER_H_
+
+#include "mgmt_fe_client.h"
+#include "mgmt_msg.h"
+#include "mgmtd/mgmt_defines.h"
+
+struct mgmt_fe_client_adapter;
+struct mgmt_master;
+
+struct mgmt_commit_stats {
+       struct timeval last_start;
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+       struct timeval validate_start;
+#endif
+       struct timeval prep_cfg_start;
+       struct timeval txn_create_start;
+       struct timeval send_cfg_start;
+       struct timeval apply_cfg_start;
+       struct timeval apply_cfg_end;
+       struct timeval txn_del_start;
+       struct timeval last_end;
+       unsigned long last_exec_tm;
+       unsigned long max_tm;
+       unsigned long min_tm;
+       unsigned long last_batch_cnt;
+       unsigned long last_num_cfgdata_reqs;
+       unsigned long last_num_apply_reqs;
+       unsigned long max_batch_cnt;
+       unsigned long min_batch_cnt;
+       unsigned long commit_cnt;
+};
+
+struct mgmt_setcfg_stats {
+       struct timeval last_start;
+       struct timeval last_end;
+       unsigned long last_exec_tm;
+       unsigned long max_tm;
+       unsigned long min_tm;
+       unsigned long avg_tm;
+       unsigned long set_cfg_count;
+};
+
+PREDECL_LIST(mgmt_fe_sessions);
+
+PREDECL_LIST(mgmt_fe_adapters);
+
+struct mgmt_fe_client_adapter {
+       int conn_fd;
+       union sockunion conn_su;
+       struct thread *conn_read_ev;
+       struct thread *conn_write_ev;
+       struct thread *conn_writes_on;
+       struct thread *proc_msg_ev;
+       uint32_t flags;
+
+       char name[MGMTD_CLIENT_NAME_MAX_LEN];
+
+       /* List of sessions created and being maintained for this client. */
+       struct mgmt_fe_sessions_head fe_sessions;
+
+       /* IO streams for read and write */
+       struct mgmt_msg_state mstate;
+
+       int refcount;
+       struct mgmt_commit_stats cmt_stats;
+       struct mgmt_setcfg_stats setcfg_stats;
+
+       struct mgmt_fe_adapters_item list_linkage;
+};
+
+#define MGMTD_FE_ADAPTER_FLAGS_WRITES_OFF (1U << 0)
+
+DECLARE_LIST(mgmt_fe_adapters, struct mgmt_fe_client_adapter, list_linkage);
+
+/* Initialise frontend adapter module */
+extern int mgmt_fe_adapter_init(struct thread_master *tm,
+                               struct mgmt_master *cm);
+
+/* Destroy frontend adapter module */
+extern void mgmt_fe_adapter_destroy(void);
+
+/* Acquire lock for frontend adapter */
+extern void mgmt_fe_adapter_lock(struct mgmt_fe_client_adapter *adapter);
+
+/* Remove lock from frontend adapter */
+extern void
+mgmt_fe_adapter_unlock(struct mgmt_fe_client_adapter **adapter);
+
+/* Create frontend adapter */
+extern struct mgmt_fe_client_adapter *
+mgmt_fe_create_adapter(int conn_fd, union sockunion *su);
+
+/* Fetch frontend adapter given a name */
+extern struct mgmt_fe_client_adapter *
+mgmt_fe_get_adapter(const char *name);
+
+/*
+ * Send set-config reply to the frontend client.
+ *
+ * session
+ *    Unique session identifier.
+ *
+ * txn_id
+ *    Unique transaction identifier.
+ *
+ * ds_id
+ *    Datastore ID.
+ *
+ * req_id
+ *    Config request ID.
+ *
+ * result
+ *    Config request result (MGMT_*).
+ *
+ * error_if_any
+ *    Buffer to store human-readable error message in case of error.
+ *
+ * implicit_commit
+ *    TRUE if the commit is implicit, FALSE otherwise.
+ *
+ * Returns:
+ *    0 on success, -1 on failures.
+ */
+extern int mgmt_fe_send_set_cfg_reply(uint64_t session_id, uint64_t txn_id,
+                                         Mgmtd__DatastoreId ds_id,
+                                         uint64_t req_id,
+                                         enum mgmt_result result,
+                                         const char *error_if_any,
+                                         bool implcit_commit);
+
+/*
+ * Send commit-config reply to the frontend client.
+ */
+extern int mgmt_fe_send_commit_cfg_reply(
+       uint64_t session_id, uint64_t txn_id, Mgmtd__DatastoreId src_ds_id,
+       Mgmtd__DatastoreId dst_ds_id, uint64_t req_id, bool validate_only,
+       enum mgmt_result result, const char *error_if_any);
+
+/*
+ * Send get-config reply to the frontend client.
+ */
+extern int mgmt_fe_send_get_cfg_reply(uint64_t session_id, uint64_t txn_id,
+                                         Mgmtd__DatastoreId ds_id,
+                                         uint64_t req_id,
+                                         enum mgmt_result result,
+                                         Mgmtd__YangDataReply *data_resp,
+                                         const char *error_if_any);
+
+/*
+ * Send get-data reply to the frontend client.
+ */
+extern int mgmt_fe_send_get_data_reply(
+       uint64_t session_id, uint64_t txn_id, Mgmtd__DatastoreId ds_id,
+       uint64_t req_id, enum mgmt_result result,
+       Mgmtd__YangDataReply *data_resp, const char *error_if_any);
+
+/*
+ * Send data notify to the frontend client.
+ */
+extern int mgmt_fe_send_data_notify(Mgmtd__DatastoreId ds_id,
+                                       Mgmtd__YangData * data_resp[],
+                                       int num_data);
+
+/* Fetch frontend client session set-config stats */
+extern struct mgmt_setcfg_stats *
+mgmt_fe_get_session_setcfg_stats(uint64_t session_id);
+
+/* Fetch frontend client session commit stats */
+extern struct mgmt_commit_stats *
+mgmt_fe_get_session_commit_stats(uint64_t session_id);
+
+extern void mgmt_fe_adapter_status_write(struct vty *vty, bool detail);
+extern void mgmt_fe_adapter_perf_measurement(struct vty *vty, bool config);
+extern void mgmt_fe_adapter_reset_perf_stats(struct vty *vty);
+#endif /* _FRR_MGMTD_FE_ADAPTER_H_ */
diff --git a/mgmtd/mgmt_fe_server.c b/mgmtd/mgmt_fe_server.c
new file mode 100644 (file)
index 0000000..0b0a56e
--- /dev/null
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Server
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "network.h"
+#include "libfrr.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_fe_server.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_FE_SRVR_DBG(fmt, ...)                                        \
+       fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_FE_SRVR_ERR(fmt, ...)                                        \
+       fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_FE_SRVR_DBG(fmt, ...)                                            \
+       do {                                                                   \
+               if (mgmt_debug_fe)                                             \
+                       zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__);       \
+       } while (0)
+#define MGMTD_FE_SRVR_ERR(fmt, ...)                                        \
+       zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+static int mgmt_fe_listen_fd = -1;
+static struct thread_master *mgmt_fe_listen_tm;
+static struct thread *mgmt_fe_listen_ev;
+static void mgmt_fe_server_register_event(enum mgmt_fe_event event);
+
+static void mgmt_fe_conn_accept(struct thread *thread)
+{
+       int client_conn_fd;
+       union sockunion su;
+
+       if (mgmt_fe_listen_fd < 0)
+               return;
+
+       /* We continue hearing server listen socket. */
+       mgmt_fe_server_register_event(MGMTD_FE_SERVER);
+
+       memset(&su, 0, sizeof(union sockunion));
+
+       /* We can handle IPv4 or IPv6 socket. */
+       client_conn_fd = sockunion_accept(mgmt_fe_listen_fd, &su);
+       if (client_conn_fd < 0) {
+               MGMTD_FE_SRVR_ERR(
+                       "Failed to accept MGMTD Frontend client connection : %s",
+                       safe_strerror(errno));
+               return;
+       }
+       set_nonblocking(client_conn_fd);
+       set_cloexec(client_conn_fd);
+
+       MGMTD_FE_SRVR_DBG("Got a new MGMTD Frontend connection");
+
+       mgmt_fe_create_adapter(client_conn_fd, &su);
+}
+
+static void mgmt_fe_server_register_event(enum mgmt_fe_event event)
+{
+       if (event == MGMTD_FE_SERVER) {
+               thread_add_read(mgmt_fe_listen_tm, mgmt_fe_conn_accept,
+                               NULL, mgmt_fe_listen_fd,
+                               &mgmt_fe_listen_ev);
+               assert(mgmt_fe_listen_ev);
+       } else {
+               assert(!"mgmt_fe_server_post_event() called incorrectly");
+       }
+}
+
+static void mgmt_fe_server_start(const char *hostname)
+{
+       int ret;
+       int sock;
+       struct sockaddr_un addr;
+       mode_t old_mask;
+
+       /* Set umask */
+       old_mask = umask(0077);
+
+       sock = socket(AF_UNIX, SOCK_STREAM, PF_UNSPEC);
+       if (sock < 0) {
+               MGMTD_FE_SRVR_ERR("Failed to create server socket: %s",
+                                     safe_strerror(errno));
+               goto mgmt_fe_server_start_failed;
+       }
+
+       addr.sun_family = AF_UNIX,
+       strlcpy(addr.sun_path, MGMTD_FE_SERVER_PATH, sizeof(addr.sun_path));
+       unlink(addr.sun_path);
+       ret = bind(sock, (struct sockaddr *)&addr, sizeof(addr));
+       if (ret < 0) {
+               MGMTD_FE_SRVR_ERR(
+                       "Failed to bind server socket to '%s'. Err: %s",
+                       addr.sun_path, safe_strerror(errno));
+               goto mgmt_fe_server_start_failed;
+       }
+
+       ret = listen(sock, MGMTD_FE_MAX_CONN);
+       if (ret < 0) {
+               MGMTD_FE_SRVR_ERR("Failed to listen on server socket: %s",
+                                     safe_strerror(errno));
+               goto mgmt_fe_server_start_failed;
+       }
+
+       /* Restore umask */
+       umask(old_mask);
+
+       mgmt_fe_listen_fd = sock;
+       mgmt_fe_server_register_event(MGMTD_FE_SERVER);
+
+       MGMTD_FE_SRVR_DBG("Started MGMTD Frontend Server!");
+       return;
+
+mgmt_fe_server_start_failed:
+       if (sock)
+               close(sock);
+
+       mgmt_fe_listen_fd = -1;
+       exit(-1);
+}
+
+int mgmt_fe_server_init(struct thread_master *master)
+{
+       if (mgmt_fe_listen_tm) {
+               MGMTD_FE_SRVR_DBG("MGMTD Frontend Server already running!");
+               return 0;
+       }
+
+       mgmt_fe_listen_tm = master;
+
+       mgmt_fe_server_start("localhost");
+
+       return 0;
+}
+
+void mgmt_fe_server_destroy(void)
+{
+       if (mgmt_fe_listen_tm) {
+               MGMTD_FE_SRVR_DBG("Closing MGMTD Frontend Server!");
+
+               if (mgmt_fe_listen_ev) {
+                       THREAD_OFF(mgmt_fe_listen_ev);
+                       mgmt_fe_listen_ev = NULL;
+               }
+
+               if (mgmt_fe_listen_fd >= 0) {
+                       close(mgmt_fe_listen_fd);
+                       mgmt_fe_listen_fd = -1;
+               }
+
+               mgmt_fe_listen_tm = NULL;
+       }
+}
diff --git a/mgmtd/mgmt_fe_server.h b/mgmtd/mgmt_fe_server.h
new file mode 100644 (file)
index 0000000..672ab05
--- /dev/null
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Server
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_FE_SERVER_H_
+#define _FRR_MGMTD_FE_SERVER_H_
+
+#define MGMTD_FE_MAX_CONN 32
+
+/* Initialise frontend server */
+extern int mgmt_fe_server_init(struct thread_master *master);
+
+/* Destroy frontend server */
+extern void mgmt_fe_server_destroy(void);
+
+#endif /* _FRR_MGMTD_FE_SERVER_H_ */
diff --git a/mgmtd/mgmt_history.c b/mgmtd/mgmt_history.c
new file mode 100644 (file)
index 0000000..75def3a
--- /dev/null
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+
+#include <zebra.h>
+#include "md5.h"
+#include "thread.h"
+#include "xref.h"
+
+#include "mgmt_fe_client.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_history.h"
+
+struct mgmt_cmt_info_t {
+       struct mgmt_cmt_infos_item cmts;
+
+       char cmtid_str[MGMTD_MD5_HASH_STR_HEX_LEN];
+       char time_str[MGMTD_COMMIT_TIME_STR_LEN];
+       char cmt_json_file[PATH_MAX];
+};
+
+
+DECLARE_DLIST(mgmt_cmt_infos, struct mgmt_cmt_info_t, cmts);
+
+#define FOREACH_CMT_REC(mm, cmt_info)                                          \
+       frr_each_safe (mgmt_cmt_infos, &mm->cmts, cmt_info)
+
+/*
+ * The only instance of VTY session that has triggered an ongoing
+ * config rollback operation.
+ */
+static struct vty *rollback_vty = NULL;
+
+static bool mgmt_history_record_exists(char *file_path)
+{
+       int exist;
+
+       exist = access(file_path, F_OK);
+       if (exist == 0)
+               return true;
+       else
+               return false;
+}
+
+static void mgmt_history_remove_file(char *name)
+{
+       if (remove(name) == 0)
+               zlog_debug("Old commit info deletion succeeded");
+       else
+               zlog_err("Old commit info deletion failed");
+}
+
+static void mgmt_history_hash(const char *input_str, char *hash)
+{
+       int i;
+       unsigned char digest[MGMTD_MD5_HASH_LEN];
+       MD5_CTX ctx;
+
+       memset(&ctx, 0, sizeof(ctx));
+       MD5Init(&ctx);
+       MD5Update(&ctx, input_str, strlen(input_str));
+       MD5Final(digest, &ctx);
+
+       for (i = 0; i < MGMTD_MD5_HASH_LEN; i++)
+               snprintf(&hash[i * 2], MGMTD_MD5_HASH_STR_HEX_LEN, "%02x",
+                        (unsigned int)digest[i]);
+}
+
+static struct mgmt_cmt_info_t *mgmt_history_create_cmt_rec(void)
+{
+       struct mgmt_cmt_info_t *new;
+       struct mgmt_cmt_info_t *cmt_info;
+       struct mgmt_cmt_info_t *last_cmt_info = NULL;
+       struct timeval cmt_recd_tv;
+
+       new = XCALLOC(MTYPE_MGMTD_CMT_INFO, sizeof(struct mgmt_cmt_info_t));
+       gettimeofday(&cmt_recd_tv, NULL);
+       mgmt_realtime_to_string(&cmt_recd_tv, new->time_str,
+                               sizeof(new->time_str));
+       mgmt_history_hash(new->time_str, new->cmtid_str);
+       snprintf(new->cmt_json_file, sizeof(new->cmt_json_file),
+                MGMTD_COMMIT_FILE_PATH, new->cmtid_str);
+
+       if (mgmt_cmt_infos_count(&mm->cmts) == MGMTD_MAX_COMMIT_LIST) {
+               FOREACH_CMT_REC (mm, cmt_info)
+                       last_cmt_info = cmt_info;
+
+               if (last_cmt_info) {
+                       mgmt_history_remove_file(last_cmt_info->cmt_json_file);
+                       mgmt_cmt_infos_del(&mm->cmts, last_cmt_info);
+                       XFREE(MTYPE_MGMTD_CMT_INFO, last_cmt_info);
+               }
+       }
+
+       mgmt_cmt_infos_add_head(&mm->cmts, new);
+       return new;
+}
+
+static struct mgmt_cmt_info_t *mgmt_history_find_cmt_record(const char *cmtid_str)
+{
+       struct mgmt_cmt_info_t *cmt_info;
+
+       FOREACH_CMT_REC (mm, cmt_info) {
+               if (strncmp(cmt_info->cmtid_str, cmtid_str,
+                           MGMTD_MD5_HASH_STR_HEX_LEN) == 0)
+                       return cmt_info;
+       }
+
+       return NULL;
+}
+
+static bool mgmt_history_read_cmt_record_index(void)
+{
+       FILE *fp;
+       struct mgmt_cmt_info_t cmt_info;
+       struct mgmt_cmt_info_t *new;
+       int cnt = 0;
+
+       fp = fopen(MGMTD_COMMIT_INDEX_FILE_NAME, "rb");
+       if (!fp) {
+               zlog_err("Failed to open file %s rb mode",
+                        MGMTD_COMMIT_INDEX_FILE_NAME);
+               return false;
+       }
+
+       while ((fread(&cmt_info, sizeof(cmt_info), 1, fp)) > 0) {
+               if (cnt < MGMTD_MAX_COMMIT_LIST) {
+                       if (!mgmt_history_record_exists(cmt_info.cmt_json_file)) {
+                               zlog_err(
+                                       "Commit record present in index_file, but commit file %s missing",
+                                       cmt_info.cmt_json_file);
+                               continue;
+                       }
+
+                       new = XCALLOC(MTYPE_MGMTD_CMT_INFO,
+                                     sizeof(struct mgmt_cmt_info_t));
+                       memcpy(new, &cmt_info, sizeof(struct mgmt_cmt_info_t));
+                       mgmt_cmt_infos_add_tail(&mm->cmts, new);
+               } else {
+                       zlog_err("More records found in index file %s",
+                                MGMTD_COMMIT_INDEX_FILE_NAME);
+                       return false;
+               }
+
+               cnt++;
+       }
+
+       fclose(fp);
+       return true;
+}
+
+static bool mgmt_history_dump_cmt_record_index(void)
+{
+       FILE *fp;
+       int ret = 0;
+       struct mgmt_cmt_info_t *cmt_info;
+       struct mgmt_cmt_info_t cmt_info_set[10];
+       int cnt = 0;
+
+       mgmt_history_remove_file((char *)MGMTD_COMMIT_INDEX_FILE_NAME);
+       fp = fopen(MGMTD_COMMIT_INDEX_FILE_NAME, "ab");
+       if (!fp) {
+               zlog_err("Failed to open file %s ab mode",
+                        MGMTD_COMMIT_INDEX_FILE_NAME);
+               return false;
+       }
+
+       FOREACH_CMT_REC (mm, cmt_info) {
+               memcpy(&cmt_info_set[cnt], cmt_info,
+                      sizeof(struct mgmt_cmt_info_t));
+               cnt++;
+       }
+
+       if (!cnt) {
+               fclose(fp);
+               return false;
+       }
+
+       ret = fwrite(&cmt_info_set, sizeof(struct mgmt_cmt_info_t), cnt, fp);
+       fclose(fp);
+       if (ret != cnt) {
+               zlog_err("Write record failed");
+               return false;
+       } else {
+               return true;
+       }
+}
+
+static int mgmt_history_rollback_to_cmt(struct vty *vty,
+                                  struct mgmt_cmt_info_t *cmt_info,
+                                  bool skip_file_load)
+{
+       struct mgmt_ds_ctx *src_ds_ctx;
+       struct mgmt_ds_ctx *dst_ds_ctx;
+       int ret = 0;
+
+       if (rollback_vty) {
+               vty_out(vty, "ERROR: Rollback already in progress!\n");
+               return -1;
+       }
+
+       src_ds_ctx = mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_CANDIDATE);
+       if (!src_ds_ctx) {
+               vty_out(vty, "ERROR: Couldnot access Candidate datastore!\n");
+               return -1;
+       }
+
+       /*
+        * Note: Write lock on src_ds is not required. This is already
+        * taken in 'conf te'.
+        */
+       dst_ds_ctx = mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_RUNNING);
+       if (!dst_ds_ctx) {
+               vty_out(vty, "ERROR: Couldnot access Running datastore!\n");
+               return -1;
+       }
+
+       ret = mgmt_ds_write_lock(dst_ds_ctx);
+       if (ret != 0) {
+               vty_out(vty,
+                       "Failed to lock the DS %u for rollback Reason: %s!\n",
+                       MGMTD_DS_RUNNING, strerror(ret));
+               return -1;
+       }
+
+       if (!skip_file_load) {
+               ret = mgmt_ds_load_config_from_file(
+                       src_ds_ctx, cmt_info->cmt_json_file, false);
+               if (ret != 0) {
+                       mgmt_ds_unlock(dst_ds_ctx);
+                       vty_out(vty,
+                               "Error with parsing the file with error code %d\n",
+                               ret);
+                       return ret;
+               }
+       }
+
+       /* Internally trigger a commit-request. */
+       ret = mgmt_txn_rollback_trigger_cfg_apply(src_ds_ctx, dst_ds_ctx);
+       if (ret != 0) {
+               mgmt_ds_unlock(dst_ds_ctx);
+               vty_out(vty,
+                       "Error with creating commit apply txn with error code %d\n",
+                       ret);
+               return ret;
+       }
+
+       mgmt_history_dump_cmt_record_index();
+
+       /*
+        * Block the rollback command from returning till the rollback
+        * is completed. On rollback completion mgmt_history_rollback_complete()
+        * shall be called to resume the rollback command return to VTYSH.
+        */
+       vty->mgmt_req_pending = true;
+       rollback_vty = vty;
+       return 0;
+}
+
+void mgmt_history_rollback_complete(bool success)
+{
+       vty_mgmt_resume_response(rollback_vty, success);
+       rollback_vty = NULL;
+}
+
+int mgmt_history_rollback_by_id(struct vty *vty, const char *cmtid_str)
+{
+       int ret = 0;
+       struct mgmt_cmt_info_t *cmt_info;
+
+       if (!mgmt_cmt_infos_count(&mm->cmts) ||
+           !mgmt_history_find_cmt_record(cmtid_str)) {
+               vty_out(vty, "Invalid commit Id\n");
+               return -1;
+       }
+
+       FOREACH_CMT_REC (mm, cmt_info) {
+               if (strncmp(cmt_info->cmtid_str, cmtid_str,
+                           MGMTD_MD5_HASH_STR_HEX_LEN) == 0) {
+                       ret = mgmt_history_rollback_to_cmt(vty, cmt_info, false);
+                       return ret;
+               }
+
+               mgmt_history_remove_file(cmt_info->cmt_json_file);
+               mgmt_cmt_infos_del(&mm->cmts, cmt_info);
+               XFREE(MTYPE_MGMTD_CMT_INFO, cmt_info);
+       }
+
+       return 0;
+}
+
+int mgmt_history_rollback_n(struct vty *vty, int num_cmts)
+{
+       int ret = 0;
+       int cnt = 0;
+       struct mgmt_cmt_info_t *cmt_info;
+       size_t cmts;
+
+       if (!num_cmts)
+               num_cmts = 1;
+
+       cmts = mgmt_cmt_infos_count(&mm->cmts);
+       if ((int)cmts < num_cmts) {
+               vty_out(vty,
+                       "Number of commits found (%d) less than required to rollback\n",
+                       (int)cmts);
+               return -1;
+       }
+
+       if ((int)cmts == 1 || (int)cmts == num_cmts) {
+               vty_out(vty,
+                       "Number of commits found (%d), Rollback of last commit is not supported\n",
+                       (int)cmts);
+               return -1;
+       }
+
+       FOREACH_CMT_REC (mm, cmt_info) {
+               if (cnt == num_cmts) {
+                       ret = mgmt_history_rollback_to_cmt(vty, cmt_info, false);
+                       return ret;
+               }
+
+               cnt++;
+               mgmt_history_remove_file(cmt_info->cmt_json_file);
+               mgmt_cmt_infos_del(&mm->cmts, cmt_info);
+               XFREE(MTYPE_MGMTD_CMT_INFO, cmt_info);
+       }
+
+       if (!mgmt_cmt_infos_count(&mm->cmts)) {
+               mgmt_ds_reset_candidate();
+               ret = mgmt_history_rollback_to_cmt(vty, cmt_info, true);
+       }
+
+       return ret;
+}
+
+void show_mgmt_cmt_history(struct vty *vty)
+{
+       struct mgmt_cmt_info_t *cmt_info;
+       int slno = 0;
+
+       vty_out(vty, "Last 10 commit history:\n");
+       vty_out(vty, "  Sl.No\tCommit-ID(HEX)\t\t\t  Commit-Record-Time\n");
+       FOREACH_CMT_REC (mm, cmt_info) {
+               vty_out(vty, "  %d\t%s  %s\n", slno, cmt_info->cmtid_str,
+                       cmt_info->time_str);
+               slno++;
+       }
+}
+
+void mgmt_history_new_record(struct mgmt_ds_ctx *ds_ctx)
+{
+       struct mgmt_cmt_info_t *cmt_info = mgmt_history_create_cmt_rec();
+       mgmt_ds_dump_ds_to_file(cmt_info->cmt_json_file, ds_ctx);
+       mgmt_history_dump_cmt_record_index();
+}
+
+void mgmt_history_init(void)
+{
+       /* Create commit record for previously stored commit-apply */
+       mgmt_cmt_infos_init(&mm->cmts);
+       mgmt_history_read_cmt_record_index();
+}
+
+void mgmt_history_destroy(void)
+{
+       struct mgmt_cmt_info_t *cmt_info;
+
+       FOREACH_CMT_REC(mm, cmt_info) {
+               mgmt_cmt_infos_del(&mm->cmts, cmt_info);
+               XFREE(MTYPE_MGMTD_CMT_INFO, cmt_info);
+       }
+
+       mgmt_cmt_infos_fini(&mm->cmts);
+}
diff --git a/mgmtd/mgmt_history.h b/mgmtd/mgmt_history.h
new file mode 100644 (file)
index 0000000..29a1d77
--- /dev/null
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+  * Copyright (C) 2021  Vmware, Inc.
+  *                   Pushpasis Sarkar <spushpasis@vmware.com>
+  * Copyright (c) 2023, LabN Consulting, L.L.C.
+  *
+  */
+#ifndef _FRR_MGMTD_HISTORY_H_
+#define _FRR_MGMTD_HISTORY_H_
+
+#include "vrf.h"
+
+PREDECL_DLIST(mgmt_cmt_infos);
+
+struct mgmt_ds_ctx;
+
+/*
+ * Rollback specific commit from commit history.
+ *
+ * vty
+ *    VTY context.
+ *
+ * cmtid_str
+ *    Specific commit id from commit history.
+ *
+ * Returns:
+ *    0 on success, -1 on failure.
+ */
+extern int mgmt_history_rollback_by_id(struct vty *vty, const char *cmtid_str);
+
+/*
+ * Rollback n commits from commit history.
+ *
+ * vty
+ *    VTY context.
+ *
+ * num_cmts
+ *    Number of commits to be rolled back.
+ *
+ * Returns:
+ *    0 on success, -1 on failure.
+ */
+extern int mgmt_history_rollback_n(struct vty *vty, int num_cmts);
+
+extern void mgmt_history_rollback_complete(bool success);
+
+/*
+ * Show mgmt commit history.
+ */
+extern void show_mgmt_cmt_history(struct vty *vty);
+
+extern void mgmt_history_new_record(struct mgmt_ds_ctx *ds_ctx);
+
+extern void mgmt_history_destroy(void);
+extern void mgmt_history_init(void);
+
+#endif /* _FRR_MGMTD_HISTORY_H_ */
diff --git a/mgmtd/mgmt_main.c b/mgmtd/mgmt_main.c
new file mode 100644 (file)
index 0000000..7d17605
--- /dev/null
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Main routine of mgmt.
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar
+ */
+
+#include <zebra.h>
+#include "lib/version.h"
+#include "routemap.h"
+#include "filter.h"
+#include "libfrr.h"
+#include "frr_pthread.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_ds.h"
+#include "routing_nb.h"
+
+
+/* mgmt options, we use GNU getopt library. */
+static const struct option longopts[] = {
+       {"skip_runas", no_argument, NULL, 'S'},
+       {"no_zebra", no_argument, NULL, 'Z'},
+       {"socket_size", required_argument, NULL, 's'},
+       {0}
+};
+
+static void mgmt_exit(int);
+static void mgmt_vrf_terminate(void);
+
+/* privileges */
+static zebra_capabilities_t _caps_p[] = {ZCAP_BIND, ZCAP_NET_RAW,
+                                        ZCAP_NET_ADMIN, ZCAP_SYS_ADMIN};
+
+struct zebra_privs_t mgmt_privs = {
+#if defined(FRR_USER) && defined(FRR_GROUP)
+       .user = FRR_USER,
+       .group = FRR_GROUP,
+#endif
+#ifdef VTY_GROUP
+       .vty_group = VTY_GROUP,
+#endif
+       .caps_p = _caps_p,
+       .cap_num_p = array_size(_caps_p),
+       .cap_num_i = 0,
+};
+
+static struct frr_daemon_info mgmtd_di;
+char backup_config_file[256];
+
+/* SIGHUP handler. */
+static void sighup(void)
+{
+       zlog_info("SIGHUP received, ignoring");
+
+       return;
+
+       /*
+        * This is turned off for the moment.  There is all
+        * sorts of config turned off by mgmt_terminate
+        * that is not setup properly again in mgmt_reset.
+        * I see no easy way to do this nor do I see that
+        * this is a desirable way to reload config
+        * given the yang work.
+        */
+       /* Terminate all thread. */
+       mgmt_terminate();
+
+       /*
+        * mgmt_reset();
+        */
+       zlog_info("MGMTD restarting!");
+
+       /*
+        * Reload config file.
+        * vty_read_config(NULL, mgmtd_di.config_file, config_default);
+        */
+       /* Try to return to normal operation. */
+}
+
+/* SIGINT handler. */
+static __attribute__((__noreturn__)) void sigint(void)
+{
+       zlog_notice("Terminating on signal");
+       assert(mm->terminating == false);
+       mm->terminating = true; /* global flag that shutting down */
+
+       mgmt_terminate();
+
+       mgmt_exit(0);
+
+       exit(0);
+}
+
+/* SIGUSR1 handler. */
+static void sigusr1(void)
+{
+       zlog_rotate();
+}
+
+/*
+ * Try to free up allocations we know about so that diagnostic tools such as
+ * valgrind are able to better illuminate leaks.
+ *
+ * Zebra route removal and protocol teardown are not meant to be done here.
+ * For example, "retain_mode" may be set.
+ */
+static __attribute__((__noreturn__)) void mgmt_exit(int status)
+{
+       /* it only makes sense for this to be called on a clean exit */
+       assert(status == 0);
+
+       frr_early_fini();
+
+       /* stop pthreads (if any) */
+       frr_pthread_stop_all();
+
+       mgmt_vrf_terminate();
+
+       frr_fini();
+       exit(status);
+}
+
+static struct frr_signal_t mgmt_signals[] = {
+       {
+               .signal = SIGHUP,
+               .handler = &sighup,
+       },
+       {
+               .signal = SIGUSR1,
+               .handler = &sigusr1,
+       },
+       {
+               .signal = SIGINT,
+               .handler = &sigint,
+       },
+       {
+               .signal = SIGTERM,
+               .handler = &sigint,
+       },
+};
+
+static int mgmt_vrf_new(struct vrf *vrf)
+{
+       zlog_debug("VRF Created: %s(%u)", vrf->name, vrf->vrf_id);
+
+       return 0;
+}
+
+static int mgmt_vrf_delete(struct vrf *vrf)
+{
+       zlog_debug("VRF Deletion: %s(%u)", vrf->name, vrf->vrf_id);
+
+       return 0;
+}
+
+static int mgmt_vrf_enable(struct vrf *vrf)
+{
+       zlog_debug("VRF Enable: %s(%u)", vrf->name, vrf->vrf_id);
+
+       return 0;
+}
+
+static int mgmt_vrf_disable(struct vrf *vrf)
+{
+       zlog_debug("VRF Disable: %s(%u)", vrf->name, vrf->vrf_id);
+
+       /* Note: This is a callback, the VRF will be deleted by the caller. */
+       return 0;
+}
+
+static int mgmt_vrf_config_write(struct vty *vty)
+{
+       return 0;
+}
+
+static void mgmt_vrf_init(void)
+{
+       vrf_init(mgmt_vrf_new, mgmt_vrf_enable, mgmt_vrf_disable,
+                mgmt_vrf_delete);
+       vrf_cmd_init(mgmt_vrf_config_write);
+}
+
+static void mgmt_vrf_terminate(void)
+{
+       vrf_terminate();
+}
+
+/*
+ * List of YANG modules to be loaded in the process context of
+ * MGMTd.
+ *
+ * NOTE: In future this will also include the YANG modules of
+ * all individual Backend clients.
+ */
+static const struct frr_yang_module_info *const mgmt_yang_modules[] = {
+       &frr_filter_info,  &frr_interface_info, &frr_route_map_info,
+       &frr_routing_info, &frr_vrf_info,
+/*
+ * YANG module info supported by backend clients get added here.
+ * NOTE: Always set .ignore_cbs true for to avoid validating
+ * backend northbound callbacks during loading.
+ */
+#ifdef HAVE_STATICD
+       &(struct frr_yang_module_info){.name = "frr-staticd",
+                                      .ignore_cbs = true},
+#endif
+};
+
+FRR_DAEMON_INFO(mgmtd, MGMTD, .vty_port = MGMTD_VTY_PORT,
+
+               .proghelp = "FRR Management Daemon.",
+
+               .signals = mgmt_signals, .n_signals = array_size(mgmt_signals),
+
+               .privs = &mgmt_privs, .yang_modules = mgmt_yang_modules,
+               .n_yang_modules = array_size(mgmt_yang_modules),
+);
+
+#define DEPRECATED_OPTIONS ""
+
+/* Main routine of mgmt. Treatment of argument and start mgmt finite
+ * state machine is handled at here.
+ */
+int main(int argc, char **argv)
+{
+       int opt;
+       int buffer_size = MGMTD_SOCKET_BUF_SIZE;
+
+       frr_preinit(&mgmtd_di, argc, argv);
+       frr_opt_add(
+               "s:" DEPRECATED_OPTIONS, longopts,
+               "  -s, --socket_size  Set MGMTD peer socket send buffer size\n");
+
+       /* Command line argument treatment. */
+       while (1) {
+               opt = frr_getopt(argc, argv, 0);
+
+               if (opt && opt < 128 && strchr(DEPRECATED_OPTIONS, opt)) {
+                       fprintf(stderr,
+                               "The -%c option no longer exists.\nPlease refer to the manual.\n",
+                               opt);
+                       continue;
+               }
+
+               if (opt == EOF)
+                       break;
+
+               switch (opt) {
+               case 0:
+                       break;
+               case 's':
+                       buffer_size = atoi(optarg);
+                       break;
+               default:
+                       frr_help_exit(1);
+                       break;
+               }
+       }
+
+       /* MGMTD master init. */
+       mgmt_master_init(frr_init(), buffer_size);
+
+       /* VRF Initializations. */
+       mgmt_vrf_init();
+
+       /* MGMTD related initialization.  */
+       mgmt_init();
+
+       snprintf(backup_config_file, sizeof(backup_config_file),
+                "%s/zebra.conf", frr_sysconfdir);
+       mgmtd_di.backup_config_file = backup_config_file;
+
+       frr_config_fork();
+
+       frr_run(mm->master);
+
+       /* Not reached. */
+       return 0;
+}
diff --git a/mgmtd/mgmt_memory.c b/mgmtd/mgmt_memory.c
new file mode 100644 (file)
index 0000000..2858bc7
--- /dev/null
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * mgmt memory type definitions
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "mgmt_memory.h"
+
+/* this file is temporary in nature;  definitions should be moved to the
+ * files they're used in
+ */
+
+DEFINE_MGROUP(MGMTD, "mgmt");
+DEFINE_MTYPE(MGMTD, MGMTD, "MGMTD instance");
+DEFINE_MTYPE(MGMTD, MGMTD_BE_ADPATER, "MGMTD backend adapter");
+DEFINE_MTYPE(MGMTD, MGMTD_FE_ADPATER, "MGMTD Frontend adapter");
+DEFINE_MTYPE(MGMTD, MGMTD_FE_SESSION, "MGMTD Frontend Client Session");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN, "MGMTD Transaction");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_REQ, "MGMTD Transaction Requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_SETCFG_REQ,
+            "MGMTD Transaction Set-Config Requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_COMMCFG_REQ,
+            "MGMTD Transaction Commit-Config Requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_GETDATA_REQ,
+            "MGMTD Transaction Get-Data Requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_GETDATA_REPLY,
+            "MGMTD Transaction Get-Data Replies");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_CFG_BATCH, "MGMTD Transaction Gonfig Batches");
+DEFINE_MTYPE(MGMTD, MGMTD_CMT_INFO, "MGMTD commit info for tracking commits");
diff --git a/mgmtd/mgmt_memory.h b/mgmtd/mgmt_memory.h
new file mode 100644 (file)
index 0000000..5cfcafc
--- /dev/null
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * mgmt memory type declarations
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_MEMORY_H
+#define _FRR_MGMTD_MEMORY_H
+
+#include "memory.h"
+
+DECLARE_MGROUP(MGMTD);
+DECLARE_MTYPE(MGMTD);
+DECLARE_MTYPE(MGMTD_BE_ADPATER);
+DECLARE_MTYPE(MGMTD_FE_ADPATER);
+DECLARE_MTYPE(MGMTD_FE_SESSION);
+DECLARE_MTYPE(MGMTD_TXN);
+DECLARE_MTYPE(MGMTD_TXN_REQ);
+DECLARE_MTYPE(MGMTD_TXN_SETCFG_REQ);
+DECLARE_MTYPE(MGMTD_TXN_COMMCFG_REQ);
+DECLARE_MTYPE(MGMTD_TXN_GETDATA_REQ);
+DECLARE_MTYPE(MGMTD_TXN_GETDATA_REPLY);
+DECLARE_MTYPE(MGMTD_TXN_CFG_BATCH);
+DECLARE_MTYPE(MGMTD_BE_ADAPTER_MSG_BUF);
+DECLARE_MTYPE(MGMTD_CMT_INFO);
+#endif /* _FRR_MGMTD_MEMORY_H */
diff --git a/mgmtd/mgmt_txn.c b/mgmtd/mgmt_txn.c
new file mode 100644 (file)
index 0000000..7ae720b
--- /dev/null
@@ -0,0 +1,2841 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Transactions
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "hash.h"
+#include "jhash.h"
+#include "libfrr.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_txn.h"
+
+#ifdef REDIRECT_DEBUG_TO_STDERR
+#define MGMTD_TXN_DBG(fmt, ...)                                               \
+       fprintf(stderr, "%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define MGMTD_TXN_ERR(fmt, ...)                                               \
+       fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
+#else /* REDIRECT_DEBUG_TO_STDERR */
+#define MGMTD_TXN_DBG(fmt, ...)                                               \
+       do {                                                                   \
+               if (mgmt_debug_txn)                                           \
+                       zlog_err("%s: " fmt, __func__, ##__VA_ARGS__);         \
+       } while (0)
+#define MGMTD_TXN_ERR(fmt, ...)                                               \
+       zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+#endif /* REDIRECT_DEBUG_TO_STDERR */
+
+#define MGMTD_TXN_LOCK(txn) mgmt_txn_lock(txn, __FILE__, __LINE__)
+#define MGMTD_TXN_UNLOCK(txn) mgmt_txn_unlock(txn, __FILE__, __LINE__)
+
+enum mgmt_txn_event {
+       MGMTD_TXN_PROC_SETCFG = 1,
+       MGMTD_TXN_PROC_COMMITCFG,
+       MGMTD_TXN_PROC_GETCFG,
+       MGMTD_TXN_PROC_GETDATA,
+       MGMTD_TXN_COMMITCFG_TIMEOUT,
+       MGMTD_TXN_CLEANUP
+};
+
+PREDECL_LIST(mgmt_txn_reqs);
+
+struct mgmt_set_cfg_req {
+       Mgmtd__DatastoreId ds_id;
+       struct mgmt_ds_ctx *ds_ctx;
+       struct nb_cfg_change cfg_changes[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+       uint16_t num_cfg_changes;
+       bool implicit_commit;
+       Mgmtd__DatastoreId dst_ds_id;
+       struct mgmt_ds_ctx *dst_ds_ctx;
+       struct mgmt_setcfg_stats *setcfg_stats;
+};
+
+enum mgmt_commit_phase {
+       MGMTD_COMMIT_PHASE_PREPARE_CFG = 0,
+       MGMTD_COMMIT_PHASE_TXN_CREATE,
+       MGMTD_COMMIT_PHASE_SEND_CFG,
+       MGMTD_COMMIT_PHASE_APPLY_CFG,
+       MGMTD_COMMIT_PHASE_TXN_DELETE,
+       MGMTD_COMMIT_PHASE_MAX
+};
+
+static inline const char *
+mgmt_commit_phase2str(enum mgmt_commit_phase cmt_phase)
+{
+       switch (cmt_phase) {
+       case MGMTD_COMMIT_PHASE_PREPARE_CFG:
+               return "PREP-CFG";
+       case MGMTD_COMMIT_PHASE_TXN_CREATE:
+               return "CREATE-TXN";
+       case MGMTD_COMMIT_PHASE_SEND_CFG:
+               return "SEND-CFG";
+       case MGMTD_COMMIT_PHASE_APPLY_CFG:
+               return "APPLY-CFG";
+       case MGMTD_COMMIT_PHASE_TXN_DELETE:
+               return "DELETE-TXN";
+       case MGMTD_COMMIT_PHASE_MAX:
+               return "Invalid/Unknown";
+       }
+
+       return "Invalid/Unknown";
+}
+
+PREDECL_LIST(mgmt_txn_batches);
+
+struct mgmt_txn_be_cfg_batch {
+       struct mgmt_txn_ctx *txn;
+       uint64_t batch_id;
+       enum mgmt_be_client_id be_id;
+       struct mgmt_be_client_adapter *be_adapter;
+       union mgmt_be_xpath_subscr_info
+               xp_subscr[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+       Mgmtd__YangCfgDataReq cfg_data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+       Mgmtd__YangCfgDataReq * cfg_datap[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+       Mgmtd__YangData data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+       Mgmtd__YangDataValue value[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+       size_t num_cfg_data;
+       int buf_space_left;
+       enum mgmt_commit_phase comm_phase;
+       struct mgmt_txn_batches_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_txn_batches, struct mgmt_txn_be_cfg_batch, list_linkage);
+
+#define FOREACH_TXN_CFG_BATCH_IN_LIST(list, batch)                             \
+       frr_each_safe (mgmt_txn_batches, list, batch)
+
+struct mgmt_commit_cfg_req {
+       Mgmtd__DatastoreId src_ds_id;
+       struct mgmt_ds_ctx *src_ds_ctx;
+       Mgmtd__DatastoreId dst_ds_id;
+       struct mgmt_ds_ctx *dst_ds_ctx;
+       uint32_t nb_txn_id;
+       uint8_t validate_only : 1;
+       uint8_t abort : 1;
+       uint8_t implicit : 1;
+       uint8_t rollback : 1;
+
+       /* Track commit phases */
+       enum mgmt_commit_phase curr_phase;
+       enum mgmt_commit_phase next_phase;
+
+       /*
+        * Set of config changes to commit. This is used only
+        * when changes are NOT to be determined by comparing
+        * candidate and running DSs. This is typically used
+        * for downloading all relevant configs for a new backend
+        * client that has recently come up and connected with
+        * MGMTD.
+        */
+       struct nb_config_cbs *cfg_chgs;
+
+       /*
+        * Details on all the Backend Clients associated with
+        * this commit.
+        */
+       struct mgmt_be_client_subscr_info subscr_info;
+
+       /*
+        * List of backend batches for this commit to be validated
+        * and applied at the backend.
+        *
+        * FIXME: Need to re-think this design for the case set of
+        * validators for a given YANG data item is different from
+        * the set of notifiers for the same. We may need to have
+        * separate list of batches for VALIDATE and APPLY.
+        */
+       struct mgmt_txn_batches_head curr_batches[MGMTD_BE_CLIENT_ID_MAX];
+       struct mgmt_txn_batches_head next_batches[MGMTD_BE_CLIENT_ID_MAX];
+       /*
+        * The last batch added for any backend client. This is always on
+        * 'curr_batches'
+        */
+       struct mgmt_txn_be_cfg_batch
+               *last_be_cfg_batch[MGMTD_BE_CLIENT_ID_MAX];
+       struct hash *batches;
+       uint64_t next_batch_id;
+
+       struct mgmt_commit_stats *cmt_stats;
+};
+
+struct mgmt_get_data_reply {
+       /* Buffer space for preparing data reply */
+       int num_reply;
+       int last_batch;
+       Mgmtd__YangDataReply data_reply;
+       Mgmtd__YangData reply_data[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+       Mgmtd__YangData * reply_datap[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+       Mgmtd__YangDataValue reply_value[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+       char *reply_xpathp[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+};
+
+struct mgmt_get_data_req {
+       Mgmtd__DatastoreId ds_id;
+       struct mgmt_ds_ctx *ds_ctx;
+       char *xpaths[MGMTD_MAX_NUM_DATA_REQ_IN_BATCH];
+       int num_xpaths;
+
+       /*
+        * Buffer space for preparing reply.
+        * NOTE: Should only be malloc-ed on demand to reduce
+        * memory footprint. Freed up via mgmt_trx_req_free()
+        */
+       struct mgmt_get_data_reply *reply;
+
+       int total_reply;
+};
+
+struct mgmt_txn_req {
+       struct mgmt_txn_ctx *txn;
+       enum mgmt_txn_event req_event;
+       uint64_t req_id;
+       union {
+               struct mgmt_set_cfg_req *set_cfg;
+               struct mgmt_get_data_req *get_data;
+               struct mgmt_commit_cfg_req commit_cfg;
+       } req;
+
+       bool pending_be_proc;
+       struct mgmt_txn_reqs_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_txn_reqs, struct mgmt_txn_req, list_linkage);
+
+#define FOREACH_TXN_REQ_IN_LIST(list, req)                                     \
+       frr_each_safe (mgmt_txn_reqs, list, req)
+
+struct mgmt_txn_ctx {
+       uint64_t session_id; /* One transaction per client session */
+       uint64_t txn_id;
+       enum mgmt_txn_type type;
+
+       /* struct mgmt_master *mm; */
+
+       struct thread *proc_set_cfg;
+       struct thread *proc_comm_cfg;
+       struct thread *proc_get_cfg;
+       struct thread *proc_get_data;
+       struct thread *comm_cfg_timeout;
+       struct thread *clnup;
+
+       /* List of backend adapters involved in this transaction */
+       struct mgmt_txn_badapters_head be_adapters;
+
+       int refcount;
+
+       struct mgmt_txns_item list_linkage;
+
+       /*
+        * List of pending set-config requests for a given
+        * transaction/session. Just one list for requests
+        * not processed at all. There's no backend interaction
+        * involved.
+        */
+       struct mgmt_txn_reqs_head set_cfg_reqs;
+       /*
+        * List of pending get-config requests for a given
+        * transaction/session. Just one list for requests
+        * not processed at all. There's no backend interaction
+        * involved.
+        */
+       struct mgmt_txn_reqs_head get_cfg_reqs;
+       /*
+        * List of pending get-data requests for a given
+        * transaction/session Two lists, one for requests
+        * not processed at all, and one for requests that
+        * has been sent to backend for processing.
+        */
+       struct mgmt_txn_reqs_head get_data_reqs;
+       struct mgmt_txn_reqs_head pending_get_datas;
+       /*
+        * There will always be one commit-config allowed for a given
+        * transaction/session. No need to maintain lists for it.
+        */
+       struct mgmt_txn_req *commit_cfg_req;
+};
+
+DECLARE_LIST(mgmt_txns, struct mgmt_txn_ctx, list_linkage);
+
+#define FOREACH_TXN_IN_LIST(mm, txn)                                           \
+       frr_each_safe (mgmt_txns, &(mm)->txn_list, (txn))
+
+static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
+                                         enum mgmt_result result,
+                                         const char *error_if_any);
+
+static inline const char *
+mgmt_txn_commit_phase_str(struct mgmt_txn_ctx *txn, bool curr)
+{
+       if (!txn->commit_cfg_req)
+               return "None";
+
+       return (mgmt_commit_phase2str(
+               curr ? txn->commit_cfg_req->req.commit_cfg.curr_phase
+                    : txn->commit_cfg_req->req.commit_cfg.next_phase));
+}
+
+static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file,
+                          int line);
+static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
+                            int line);
+static int
+mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
+                                struct mgmt_be_client_adapter *adapter);
+
+static struct thread_master *mgmt_txn_tm;
+static struct mgmt_master *mgmt_txn_mm;
+
+static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
+                                    enum mgmt_txn_event event);
+
+static int
+mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
+                                    struct mgmt_be_client_adapter *adapter);
+
+static struct mgmt_txn_be_cfg_batch *
+mgmt_txn_cfg_batch_alloc(struct mgmt_txn_ctx *txn,
+                         enum mgmt_be_client_id id,
+                         struct mgmt_be_client_adapter *be_adapter)
+{
+       struct mgmt_txn_be_cfg_batch *cfg_btch;
+
+       cfg_btch = XCALLOC(MTYPE_MGMTD_TXN_CFG_BATCH,
+                          sizeof(struct mgmt_txn_be_cfg_batch));
+       assert(cfg_btch);
+       cfg_btch->be_id = id;
+
+       cfg_btch->txn = txn;
+       MGMTD_TXN_LOCK(txn);
+       assert(txn->commit_cfg_req);
+       mgmt_txn_batches_add_tail(
+               &txn->commit_cfg_req->req.commit_cfg.curr_batches[id],
+               cfg_btch);
+       cfg_btch->be_adapter = be_adapter;
+       cfg_btch->buf_space_left = MGMTD_BE_CFGDATA_MAX_MSG_LEN;
+       if (be_adapter)
+               mgmt_be_adapter_lock(be_adapter);
+
+       txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] =
+               cfg_btch;
+       if (!txn->commit_cfg_req->req.commit_cfg.next_batch_id)
+               txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
+       cfg_btch->batch_id =
+               txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
+       hash_get(txn->commit_cfg_req->req.commit_cfg.batches, cfg_btch,
+                hash_alloc_intern);
+
+       return cfg_btch;
+}
+
+static void
+mgmt_txn_cfg_batch_free(struct mgmt_txn_be_cfg_batch **cfg_btch)
+{
+       size_t indx;
+       struct mgmt_commit_cfg_req *cmtcfg_req;
+
+       MGMTD_TXN_DBG(" Batch: %p, Txn: %p", *cfg_btch, (*cfg_btch)->txn);
+
+       assert((*cfg_btch)->txn
+              && (*cfg_btch)->txn->type == MGMTD_TXN_TYPE_CONFIG);
+
+       cmtcfg_req = &(*cfg_btch)->txn->commit_cfg_req->req.commit_cfg;
+       hash_release(cmtcfg_req->batches, *cfg_btch);
+       mgmt_txn_batches_del(&cmtcfg_req->curr_batches[(*cfg_btch)->be_id],
+                            *cfg_btch);
+       mgmt_txn_batches_del(&cmtcfg_req->next_batches[(*cfg_btch)->be_id],
+                            *cfg_btch);
+
+       if ((*cfg_btch)->be_adapter)
+               mgmt_be_adapter_unlock(&(*cfg_btch)->be_adapter);
+
+       for (indx = 0; indx < (*cfg_btch)->num_cfg_data; indx++) {
+               if ((*cfg_btch)->data[indx].xpath) {
+                       free((*cfg_btch)->data[indx].xpath);
+                       (*cfg_btch)->data[indx].xpath = NULL;
+               }
+       }
+
+       MGMTD_TXN_UNLOCK(&(*cfg_btch)->txn);
+
+       XFREE(MTYPE_MGMTD_TXN_CFG_BATCH, *cfg_btch);
+       *cfg_btch = NULL;
+}
+
+static unsigned int mgmt_txn_cfgbatch_hash_key(const void *data)
+{
+       const struct mgmt_txn_be_cfg_batch *batch = data;
+
+       return jhash2((uint32_t *) &batch->batch_id,
+                     sizeof(batch->batch_id) / sizeof(uint32_t), 0);
+}
+
+static bool mgmt_txn_cfgbatch_hash_cmp(const void *d1, const void *d2)
+{
+       const struct mgmt_txn_be_cfg_batch *batch1 = d1;
+       const struct mgmt_txn_be_cfg_batch *batch2 = d2;
+
+       return (batch1->batch_id == batch2->batch_id);
+}
+
+static void mgmt_txn_cfgbatch_hash_free(void *data)
+{
+       struct mgmt_txn_be_cfg_batch *batch = data;
+
+       mgmt_txn_cfg_batch_free(&batch);
+}
+
+static inline struct mgmt_txn_be_cfg_batch *
+mgmt_txn_cfgbatch_id2ctx(struct mgmt_txn_ctx *txn, uint64_t batch_id)
+{
+       struct mgmt_txn_be_cfg_batch key = {0};
+       struct mgmt_txn_be_cfg_batch *batch;
+
+       if (!txn->commit_cfg_req)
+               return NULL;
+
+       key.batch_id = batch_id;
+       batch = hash_lookup(txn->commit_cfg_req->req.commit_cfg.batches,
+                           &key);
+
+       return batch;
+}
+
+static void mgmt_txn_cleanup_be_cfg_batches(struct mgmt_txn_ctx *txn,
+                                           enum mgmt_be_client_id id)
+{
+       struct mgmt_txn_be_cfg_batch *cfg_btch;
+       struct mgmt_txn_batches_head *list;
+
+       list = &txn->commit_cfg_req->req.commit_cfg.curr_batches[id];
+       FOREACH_TXN_CFG_BATCH_IN_LIST (list, cfg_btch)
+               mgmt_txn_cfg_batch_free(&cfg_btch);
+
+       mgmt_txn_batches_fini(list);
+
+       list = &txn->commit_cfg_req->req.commit_cfg.next_batches[id];
+       FOREACH_TXN_CFG_BATCH_IN_LIST (list, cfg_btch)
+               mgmt_txn_cfg_batch_free(&cfg_btch);
+
+       mgmt_txn_batches_fini(list);
+
+       txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] = NULL;
+}
+
+static struct mgmt_txn_req *mgmt_txn_req_alloc(struct mgmt_txn_ctx *txn,
+                                                uint64_t req_id,
+                                                enum mgmt_txn_event req_event)
+{
+       struct mgmt_txn_req *txn_req;
+       enum mgmt_be_client_id id;
+
+       txn_req = XCALLOC(MTYPE_MGMTD_TXN_REQ, sizeof(struct mgmt_txn_req));
+       assert(txn_req);
+       txn_req->txn = txn;
+       txn_req->req_id = req_id;
+       txn_req->req_event = req_event;
+       txn_req->pending_be_proc = false;
+
+       switch (txn_req->req_event) {
+       case MGMTD_TXN_PROC_SETCFG:
+               txn_req->req.set_cfg = XCALLOC(MTYPE_MGMTD_TXN_SETCFG_REQ,
+                                              sizeof(struct mgmt_set_cfg_req));
+               assert(txn_req->req.set_cfg);
+               mgmt_txn_reqs_add_tail(&txn->set_cfg_reqs, txn_req);
+               MGMTD_TXN_DBG(
+                       "Added a new SETCFG Req: %p for Txn: %p, Sessn: 0x%llx",
+                       txn_req, txn, (unsigned long long)txn->session_id);
+               break;
+       case MGMTD_TXN_PROC_COMMITCFG:
+               txn->commit_cfg_req = txn_req;
+               MGMTD_TXN_DBG(
+                       "Added a new COMMITCFG Req: %p for Txn: %p, Sessn: 0x%llx",
+                       txn_req, txn, (unsigned long long)txn->session_id);
+
+               FOREACH_MGMTD_BE_CLIENT_ID (id) {
+                       mgmt_txn_batches_init(
+                               &txn_req->req.commit_cfg.curr_batches[id]);
+                       mgmt_txn_batches_init(
+                               &txn_req->req.commit_cfg.next_batches[id]);
+               }
+
+               txn_req->req.commit_cfg.batches =
+                       hash_create(mgmt_txn_cfgbatch_hash_key,
+                                   mgmt_txn_cfgbatch_hash_cmp,
+                                   "MGMT Config Batches");
+               break;
+       case MGMTD_TXN_PROC_GETCFG:
+               txn_req->req.get_data =
+                       XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
+                               sizeof(struct mgmt_get_data_req));
+               assert(txn_req->req.get_data);
+               mgmt_txn_reqs_add_tail(&txn->get_cfg_reqs, txn_req);
+               MGMTD_TXN_DBG(
+                       "Added a new GETCFG Req: %p for Txn: %p, Sessn: 0x%llx",
+                       txn_req, txn, (unsigned long long)txn->session_id);
+               break;
+       case MGMTD_TXN_PROC_GETDATA:
+               txn_req->req.get_data =
+                       XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
+                               sizeof(struct mgmt_get_data_req));
+               assert(txn_req->req.get_data);
+               mgmt_txn_reqs_add_tail(&txn->get_data_reqs, txn_req);
+               MGMTD_TXN_DBG(
+                       "Added a new GETDATA Req: %p for Txn: %p, Sessn: 0x%llx",
+                       txn_req, txn, (unsigned long long)txn->session_id);
+               break;
+       case MGMTD_TXN_COMMITCFG_TIMEOUT:
+       case MGMTD_TXN_CLEANUP:
+               break;
+       }
+
+       MGMTD_TXN_LOCK(txn);
+
+       return txn_req;
+}
+
+static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
+{
+       int indx;
+       struct mgmt_txn_reqs_head *req_list = NULL;
+       struct mgmt_txn_reqs_head *pending_list = NULL;
+       enum mgmt_be_client_id id;
+       struct mgmt_be_client_adapter *adapter;
+
+       switch ((*txn_req)->req_event) {
+       case MGMTD_TXN_PROC_SETCFG:
+               for (indx = 0; indx < (*txn_req)->req.set_cfg->num_cfg_changes;
+                    indx++) {
+                       if ((*txn_req)->req.set_cfg->cfg_changes[indx].value) {
+                               MGMTD_TXN_DBG(
+                                       "Freeing value for %s at %p ==> '%s'",
+                                       (*txn_req)
+                                               ->req.set_cfg->cfg_changes[indx]
+                                               .xpath,
+                                       (*txn_req)
+                                               ->req.set_cfg->cfg_changes[indx]
+                                               .value,
+                                       (*txn_req)
+                                               ->req.set_cfg->cfg_changes[indx]
+                                               .value);
+                               free((void *)(*txn_req)
+                                            ->req.set_cfg->cfg_changes[indx]
+                                            .value);
+                       }
+               }
+               req_list = &(*txn_req)->txn->set_cfg_reqs;
+               MGMTD_TXN_DBG("Deleting SETCFG Req: %p for Txn: %p",
+                              *txn_req, (*txn_req)->txn);
+               XFREE(MTYPE_MGMTD_TXN_SETCFG_REQ, (*txn_req)->req.set_cfg);
+               break;
+       case MGMTD_TXN_PROC_COMMITCFG:
+               MGMTD_TXN_DBG("Deleting COMMITCFG Req: %p for Txn: %p",
+                              *txn_req, (*txn_req)->txn);
+               FOREACH_MGMTD_BE_CLIENT_ID (id) {
+                       /*
+                        * Send TXN_DELETE to cleanup state for this
+                        * transaction on backend
+                        */
+                       if ((*txn_req)->req.commit_cfg.curr_phase
+                                   >= MGMTD_COMMIT_PHASE_TXN_CREATE
+                           && (*txn_req)->req.commit_cfg.curr_phase
+                                      < MGMTD_COMMIT_PHASE_TXN_DELETE
+                           && (*txn_req)
+                                      ->req.commit_cfg.subscr_info
+                                      .xpath_subscr[id]
+                                      .subscribed) {
+                               adapter = mgmt_be_get_adapter_by_id(id);
+                               if (adapter)
+                                       mgmt_txn_send_be_txn_delete(
+                                               (*txn_req)->txn, adapter);
+                       }
+
+                       mgmt_txn_cleanup_be_cfg_batches((*txn_req)->txn,
+                                                           id);
+                       if ((*txn_req)->req.commit_cfg.batches) {
+                               hash_clean((*txn_req)->req.commit_cfg.batches,
+                                          mgmt_txn_cfgbatch_hash_free);
+                               hash_free((*txn_req)->req.commit_cfg.batches);
+                               (*txn_req)->req.commit_cfg.batches = NULL;
+                       }
+               }
+               break;
+       case MGMTD_TXN_PROC_GETCFG:
+               for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
+                    indx++) {
+                       if ((*txn_req)->req.get_data->xpaths[indx])
+                               free((void *)(*txn_req)
+                                            ->req.get_data->xpaths[indx]);
+               }
+               req_list = &(*txn_req)->txn->get_cfg_reqs;
+               MGMTD_TXN_DBG("Deleting GETCFG Req: %p for Txn: %p",
+                              *txn_req, (*txn_req)->txn);
+               if ((*txn_req)->req.get_data->reply)
+                       XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
+                             (*txn_req)->req.get_data->reply);
+               XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
+               break;
+       case MGMTD_TXN_PROC_GETDATA:
+               for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
+                    indx++) {
+                       if ((*txn_req)->req.get_data->xpaths[indx])
+                               free((void *)(*txn_req)
+                                            ->req.get_data->xpaths[indx]);
+               }
+               pending_list = &(*txn_req)->txn->pending_get_datas;
+               req_list = &(*txn_req)->txn->get_data_reqs;
+               MGMTD_TXN_DBG("Deleting GETDATA Req: %p for Txn: %p",
+                              *txn_req, (*txn_req)->txn);
+               if ((*txn_req)->req.get_data->reply)
+                       XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
+                             (*txn_req)->req.get_data->reply);
+               XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
+               break;
+       case MGMTD_TXN_COMMITCFG_TIMEOUT:
+       case MGMTD_TXN_CLEANUP:
+               break;
+       }
+
+       if ((*txn_req)->pending_be_proc && pending_list) {
+               mgmt_txn_reqs_del(pending_list, *txn_req);
+               MGMTD_TXN_DBG("Removed Req: %p from pending-list (left:%d)",
+                             *txn_req, (int)mgmt_txn_reqs_count(pending_list));
+       } else if (req_list) {
+               mgmt_txn_reqs_del(req_list, *txn_req);
+               MGMTD_TXN_DBG("Removed Req: %p from request-list (left:%d)",
+                             *txn_req, (int)mgmt_txn_reqs_count(req_list));
+       }
+
+       (*txn_req)->pending_be_proc = false;
+       MGMTD_TXN_UNLOCK(&(*txn_req)->txn);
+       XFREE(MTYPE_MGMTD_TXN_REQ, (*txn_req));
+       *txn_req = NULL;
+}
+
+static void mgmt_txn_process_set_cfg(struct thread *thread)
+{
+       struct mgmt_txn_ctx *txn;
+       struct mgmt_txn_req *txn_req;
+       struct mgmt_ds_ctx *ds_ctx;
+       struct nb_config *nb_config;
+       char err_buf[1024];
+       bool error;
+       int num_processed = 0;
+       size_t left;
+       struct mgmt_commit_stats *cmt_stats;
+       int ret = 0;
+
+       txn = (struct mgmt_txn_ctx *)THREAD_ARG(thread);
+       assert(txn);
+       cmt_stats = mgmt_fe_get_session_commit_stats(txn->session_id);
+
+       MGMTD_TXN_DBG(
+               "Processing %d SET_CONFIG requests for Txn:%p Session:0x%llx",
+               (int)mgmt_txn_reqs_count(&txn->set_cfg_reqs), txn,
+               (unsigned long long)txn->session_id);
+
+       FOREACH_TXN_REQ_IN_LIST (&txn->set_cfg_reqs, txn_req) {
+               error = false;
+               assert(txn_req->req_event == MGMTD_TXN_PROC_SETCFG);
+               ds_ctx = txn_req->req.set_cfg->ds_ctx;
+               if (!ds_ctx) {
+                       mgmt_fe_send_set_cfg_reply(
+                               txn->session_id, txn->txn_id,
+                               txn_req->req.set_cfg->ds_id, txn_req->req_id,
+                               MGMTD_INTERNAL_ERROR, "No such datastore!",
+                               txn_req->req.set_cfg->implicit_commit);
+                       error = true;
+                       goto mgmt_txn_process_set_cfg_done;
+               }
+
+               nb_config = mgmt_ds_get_nb_config(ds_ctx);
+               if (!nb_config) {
+                       mgmt_fe_send_set_cfg_reply(
+                               txn->session_id, txn->txn_id,
+                               txn_req->req.set_cfg->ds_id, txn_req->req_id,
+                               MGMTD_INTERNAL_ERROR,
+                               "Unable to retrieve DS Config Tree!",
+                               txn_req->req.set_cfg->implicit_commit);
+                       error = true;
+                       goto mgmt_txn_process_set_cfg_done;
+               }
+
+               error = false;
+               nb_candidate_edit_config_changes(
+                       nb_config, txn_req->req.set_cfg->cfg_changes,
+                       (size_t)txn_req->req.set_cfg->num_cfg_changes, NULL,
+                       NULL, 0, err_buf, sizeof(err_buf), &error);
+               if (error) {
+                       mgmt_fe_send_set_cfg_reply(
+                               txn->session_id, txn->txn_id,
+                               txn_req->req.set_cfg->ds_id, txn_req->req_id,
+                               MGMTD_INTERNAL_ERROR, err_buf,
+                               txn_req->req.set_cfg->implicit_commit);
+                       goto mgmt_txn_process_set_cfg_done;
+               }
+
+               if (txn_req->req.set_cfg->implicit_commit) {
+                       assert(mgmt_txn_reqs_count(&txn->set_cfg_reqs) == 1);
+                       assert(txn_req->req.set_cfg->dst_ds_ctx);
+
+                       ret = mgmt_ds_write_lock(
+                               txn_req->req.set_cfg->dst_ds_ctx);
+                       if (ret != 0) {
+                               MGMTD_TXN_ERR(
+                                       "Failed to lock the DS %u for txn: %p session 0x%llx, errstr %s!",
+                                       txn_req->req.set_cfg->dst_ds_id, txn,
+                                       (unsigned long long)txn->session_id,
+                                       strerror(ret));
+                               mgmt_txn_send_commit_cfg_reply(
+                                       txn, MGMTD_DS_LOCK_FAILED,
+                                       "Lock running DS before implicit commit failed!");
+                               goto mgmt_txn_process_set_cfg_done;
+                       }
+
+                       mgmt_txn_send_commit_config_req(
+                               txn->txn_id, txn_req->req_id,
+                               txn_req->req.set_cfg->ds_id,
+                               txn_req->req.set_cfg->ds_ctx,
+                               txn_req->req.set_cfg->dst_ds_id,
+                               txn_req->req.set_cfg->dst_ds_ctx, false,
+                               false, true);
+
+                       if (mm->perf_stats_en)
+                               gettimeofday(&cmt_stats->last_start, NULL);
+                       cmt_stats->commit_cnt++;
+               } else if (mgmt_fe_send_set_cfg_reply(
+                                  txn->session_id, txn->txn_id,
+                                  txn_req->req.set_cfg->ds_id,
+                                  txn_req->req_id, MGMTD_SUCCESS, NULL, false)
+                          != 0) {
+                       MGMTD_TXN_ERR(
+                               "Failed to send SET_CONFIG_REPLY for txn %p session 0x%llx",
+                               txn, (unsigned long long)txn->session_id);
+                       error = true;
+               }
+
+       mgmt_txn_process_set_cfg_done:
+
+               /*
+                * Note: The following will remove it from the list as well.
+                */
+               mgmt_txn_req_free(&txn_req);
+
+               num_processed++;
+               if (num_processed == MGMTD_TXN_MAX_NUM_SETCFG_PROC)
+                       break;
+       }
+
+       left = mgmt_txn_reqs_count(&txn->set_cfg_reqs);
+       if (left) {
+               MGMTD_TXN_DBG(
+                       "Processed maximum number of Set-Config requests (%d/%d/%d). Rescheduling for rest.",
+                       num_processed, MGMTD_TXN_MAX_NUM_SETCFG_PROC,
+                       (int)left);
+               mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
+       }
+}
+
+static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
+                                          enum mgmt_result result,
+                                          const char *error_if_any)
+{
+       int ret = 0;
+       bool success, create_cmt_info_rec;
+
+       if (!txn->commit_cfg_req)
+               return -1;
+
+       success = (result == MGMTD_SUCCESS || result == MGMTD_NO_CFG_CHANGES);
+
+       if (!txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id
+           && mgmt_fe_send_commit_cfg_reply(
+                      txn->session_id, txn->txn_id,
+                      txn->commit_cfg_req->req.commit_cfg.src_ds_id,
+                      txn->commit_cfg_req->req.commit_cfg.dst_ds_id,
+                      txn->commit_cfg_req->req_id,
+                      txn->commit_cfg_req->req.commit_cfg.validate_only,
+                      result, error_if_any)
+                      != 0) {
+               MGMTD_TXN_ERR(
+                       "Failed to send COMMIT-CONFIG-REPLY for Txn %p Sessn 0x%llx",
+                       txn, (unsigned long long)txn->session_id);
+       }
+
+       if (txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id
+           && mgmt_fe_send_set_cfg_reply(
+                      txn->session_id, txn->txn_id,
+                      txn->commit_cfg_req->req.commit_cfg.src_ds_id,
+                      txn->commit_cfg_req->req_id,
+                      success ? MGMTD_SUCCESS : MGMTD_INTERNAL_ERROR,
+                      error_if_any, true)
+                      != 0) {
+               MGMTD_TXN_ERR(
+                       "Failed to send SET-CONFIG-REPLY for Txn %p Sessn 0x%llx",
+                       txn, (unsigned long long)txn->session_id);
+       }
+
+       if (success) {
+               /* Stop the commit-timeout timer */
+               THREAD_OFF(txn->comm_cfg_timeout);
+
+               create_cmt_info_rec =
+                       (result != MGMTD_NO_CFG_CHANGES &&
+                        !txn->commit_cfg_req->req.commit_cfg.rollback);
+
+               /*
+                * Successful commit: Merge Src DS into Dst DS if and only if
+                * this was not a validate-only or abort request.
+                */
+               if ((txn->session_id
+                    && !txn->commit_cfg_req->req.commit_cfg.validate_only
+                    && !txn->commit_cfg_req->req.commit_cfg.abort)
+                   || txn->commit_cfg_req->req.commit_cfg.rollback) {
+                       mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
+                                                .src_ds_ctx,
+                                        txn->commit_cfg_req->req.commit_cfg
+                                                .dst_ds_ctx,
+                                        create_cmt_info_rec);
+               }
+
+               /*
+                * Restore Src DS back to Dest DS only through a commit abort
+                * request.
+                */
+               if (txn->session_id
+                   && txn->commit_cfg_req->req.commit_cfg.abort)
+                       mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
+                                                .dst_ds_ctx,
+                                        txn->commit_cfg_req->req.commit_cfg
+                                                .src_ds_ctx,
+                                        false);
+       } else {
+               /*
+                * The commit has failied. For implicit commit requests restore
+                * back the contents of the candidate DS.
+                */
+               if (txn->commit_cfg_req->req.commit_cfg.implicit)
+                       mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
+                                                .dst_ds_ctx,
+                                        txn->commit_cfg_req->req.commit_cfg
+                                                .src_ds_ctx,
+                                        false);
+       }
+
+       if (txn->commit_cfg_req->req.commit_cfg.rollback) {
+               ret = mgmt_ds_unlock(
+                       txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx);
+               if (ret != 0)
+                       MGMTD_TXN_ERR(
+                               "Failed to unlock the dst DS during rollback : %s",
+                               strerror(ret));
+
+               /*
+                * Resume processing the rollback command.
+                */
+               mgmt_history_rollback_complete(success);
+       }
+
+       if (txn->commit_cfg_req->req.commit_cfg.implicit)
+               if (mgmt_ds_unlock(
+                           txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx)
+                   != 0)
+                       MGMTD_TXN_ERR(
+                               "Failed to unlock the dst DS during implicit : %s",
+                               strerror(ret));
+
+       txn->commit_cfg_req->req.commit_cfg.cmt_stats = NULL;
+       mgmt_txn_req_free(&txn->commit_cfg_req);
+
+       /*
+        * The CONFIG Transaction should be destroyed from Frontend-adapter.
+        * But in case the transaction is not triggered from a front-end session
+        * we need to cleanup by itself.
+        */
+       if (!txn->session_id)
+               mgmt_txn_register_event(txn, MGMTD_TXN_CLEANUP);
+
+       return 0;
+}
+
+static void
+mgmt_move_txn_cfg_batch_to_next(struct mgmt_commit_cfg_req *cmtcfg_req,
+                               struct mgmt_txn_be_cfg_batch *cfg_btch,
+                               struct mgmt_txn_batches_head *src_list,
+                               struct mgmt_txn_batches_head *dst_list,
+                               bool update_commit_phase,
+                               enum mgmt_commit_phase to_phase)
+{
+       mgmt_txn_batches_del(src_list, cfg_btch);
+
+       if (update_commit_phase) {
+               MGMTD_TXN_DBG("Move Txn-Id %p Batch-Id %p from '%s' --> '%s'",
+                             cfg_btch->txn, cfg_btch,
+                             mgmt_commit_phase2str(cfg_btch->comm_phase),
+                             mgmt_txn_commit_phase_str(cfg_btch->txn, false));
+               cfg_btch->comm_phase = to_phase;
+       }
+
+       mgmt_txn_batches_add_tail(dst_list, cfg_btch);
+}
+
+static void mgmt_move_txn_cfg_batches(struct mgmt_txn_ctx *txn,
+                                     struct mgmt_commit_cfg_req *cmtcfg_req,
+                                     struct mgmt_txn_batches_head *src_list,
+                                     struct mgmt_txn_batches_head *dst_list,
+                                     bool update_commit_phase,
+                                     enum mgmt_commit_phase to_phase)
+{
+       struct mgmt_txn_be_cfg_batch *cfg_btch;
+
+       FOREACH_TXN_CFG_BATCH_IN_LIST (src_list, cfg_btch) {
+               mgmt_move_txn_cfg_batch_to_next(cmtcfg_req, cfg_btch, src_list,
+                                                dst_list, update_commit_phase,
+                                                to_phase);
+       }
+}
+
+static int
+mgmt_try_move_commit_to_next_phase(struct mgmt_txn_ctx *txn,
+                                  struct mgmt_commit_cfg_req *cmtcfg_req)
+{
+       struct mgmt_txn_batches_head *curr_list, *next_list;
+       enum mgmt_be_client_id id;
+
+       MGMTD_TXN_DBG("Txn-Id %p, Phase(current:'%s' next:'%s')", txn,
+                     mgmt_txn_commit_phase_str(txn, true),
+                     mgmt_txn_commit_phase_str(txn, false));
+
+       /*
+        * Check if all clients has moved to next phase or not.
+        */
+       FOREACH_MGMTD_BE_CLIENT_ID (id) {
+               if (cmtcfg_req->subscr_info.xpath_subscr[id].subscribed &&
+                   mgmt_txn_batches_count(&cmtcfg_req->curr_batches[id])) {
+                       /*
+                        * There's atleast once client who hasn't moved to
+                        * next phase.
+                        *
+                        * TODO: Need to re-think this design for the case
+                        * set of validators for a given YANG data item is
+                        * different from the set of notifiers for the same.
+                        */
+                       return -1;
+               }
+       }
+
+       MGMTD_TXN_DBG("Move entire Txn-Id %p from '%s' to '%s'", txn,
+                      mgmt_txn_commit_phase_str(txn, true),
+                      mgmt_txn_commit_phase_str(txn, false));
+
+       /*
+        * If we are here, it means all the clients has moved to next phase.
+        * So we can move the whole commit to next phase.
+        */
+       cmtcfg_req->curr_phase = cmtcfg_req->next_phase;
+       cmtcfg_req->next_phase++;
+       MGMTD_TXN_DBG(
+               "Move back all config batches for Txn %p from next to current branch",
+               txn);
+       FOREACH_MGMTD_BE_CLIENT_ID (id) {
+               curr_list = &cmtcfg_req->curr_batches[id];
+               next_list = &cmtcfg_req->next_batches[id];
+               mgmt_move_txn_cfg_batches(txn, cmtcfg_req, next_list,
+                                          curr_list, false, 0);
+       }
+
+       mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+
+       return 0;
+}
+
+static int
+mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
+                                 struct mgmt_be_client_adapter *adapter)
+{
+       struct mgmt_commit_cfg_req *cmtcfg_req;
+       struct mgmt_txn_batches_head *curr_list, *next_list;
+
+       if (txn->type != MGMTD_TXN_TYPE_CONFIG || !txn->commit_cfg_req)
+               return -1;
+
+       cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+
+       MGMTD_TXN_DBG(
+               "Move Txn-Id %p for '%s' Phase(current: '%s' next:'%s')", txn,
+               adapter->name, mgmt_txn_commit_phase_str(txn, true),
+               mgmt_txn_commit_phase_str(txn, false));
+
+       MGMTD_TXN_DBG(
+               "Move all config batches for '%s' from current to next list",
+               adapter->name);
+       curr_list = &cmtcfg_req->curr_batches[adapter->id];
+       next_list = &cmtcfg_req->next_batches[adapter->id];
+       mgmt_move_txn_cfg_batches(txn, cmtcfg_req, curr_list, next_list, true,
+                                  cmtcfg_req->next_phase);
+
+       MGMTD_TXN_DBG("Txn-Id %p, Phase(current:'%s' next:'%s')", txn,
+                      mgmt_txn_commit_phase_str(txn, true),
+                      mgmt_txn_commit_phase_str(txn, false));
+
+       /*
+        * Check if all clients has moved to next phase or not.
+        */
+       mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
+
+       return 0;
+}
+
+static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
+                                          struct nb_config_cbs *changes)
+{
+       struct nb_config_cb *cb, *nxt;
+       struct nb_config_change *chg;
+       struct mgmt_txn_be_cfg_batch *cfg_btch;
+       struct mgmt_be_client_subscr_info subscr_info;
+       char *xpath = NULL, *value = NULL;
+       char err_buf[1024];
+       enum mgmt_be_client_id id;
+       struct mgmt_be_client_adapter *adapter;
+       struct mgmt_commit_cfg_req *cmtcfg_req;
+       bool found_validator;
+       int num_chgs = 0;
+       int xpath_len, value_len;
+
+       cmtcfg_req = &txn_req->req.commit_cfg;
+
+       RB_FOREACH_SAFE (cb, nb_config_cbs, changes, nxt) {
+               chg = (struct nb_config_change *)cb;
+
+               /*
+                * Could have directly pointed to xpath in nb_node.
+                * But dont want to mess with it now.
+                * xpath = chg->cb.nb_node->xpath;
+                */
+               xpath = lyd_path(chg->cb.dnode, LYD_PATH_STD, NULL, 0);
+               if (!xpath) {
+                       (void)mgmt_txn_send_commit_cfg_reply(
+                               txn_req->txn, MGMTD_INTERNAL_ERROR,
+                               "Internal error! Could not get Xpath from Ds node!");
+                       goto mgmt_txn_create_config_batches_failed;
+               }
+
+               value = (char *)lyd_get_value(chg->cb.dnode);
+               if (!value)
+                       value = (char *)MGMTD_BE_CONTAINER_NODE_VAL;
+
+               MGMTD_TXN_DBG("XPATH: %s, Value: '%s'", xpath,
+                              value ? value : "NIL");
+
+               if (mgmt_be_get_subscr_info_for_xpath(xpath, &subscr_info)
+                   != 0) {
+                       snprintf(err_buf, sizeof(err_buf),
+                                "No backend module found for XPATH: '%s",
+                                xpath);
+                       (void)mgmt_txn_send_commit_cfg_reply(
+                               txn_req->txn, MGMTD_INTERNAL_ERROR, err_buf);
+                       goto mgmt_txn_create_config_batches_failed;
+               }
+
+               xpath_len = strlen(xpath) + 1;
+               value_len = strlen(value) + 1;
+               found_validator = false;
+               FOREACH_MGMTD_BE_CLIENT_ID (id) {
+                       if (!subscr_info.xpath_subscr[id].validate_config
+                           && !subscr_info.xpath_subscr[id].notify_config)
+                               continue;
+
+                       adapter = mgmt_be_get_adapter_by_id(id);
+                       if (!adapter)
+                               continue;
+
+                       cfg_btch = cmtcfg_req->last_be_cfg_batch[id];
+                       if (!cfg_btch
+                           || (cfg_btch->num_cfg_data
+                               == MGMTD_MAX_CFG_CHANGES_IN_BATCH)
+                           || (cfg_btch->buf_space_left
+                               < (xpath_len + value_len))) {
+                               /* Allocate a new config batch */
+                               cfg_btch = mgmt_txn_cfg_batch_alloc(
+                                       txn_req->txn, id, adapter);
+                       }
+
+                       cfg_btch->buf_space_left -= (xpath_len + value_len);
+                       memcpy(&cfg_btch->xp_subscr[cfg_btch->num_cfg_data],
+                              &subscr_info.xpath_subscr[id],
+                              sizeof(cfg_btch->xp_subscr[0]));
+
+                       mgmt_yang_cfg_data_req_init(
+                               &cfg_btch->cfg_data[cfg_btch->num_cfg_data]);
+                       cfg_btch->cfg_datap[cfg_btch->num_cfg_data] =
+                               &cfg_btch->cfg_data[cfg_btch->num_cfg_data];
+
+                       if (chg->cb.operation == NB_OP_DESTROY)
+                               cfg_btch->cfg_data[cfg_btch->num_cfg_data]
+                                       .req_type =
+                                       MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA;
+                       else
+                               cfg_btch->cfg_data[cfg_btch->num_cfg_data]
+                                       .req_type =
+                                       MGMTD__CFG_DATA_REQ_TYPE__SET_DATA;
+
+                       mgmt_yang_data_init(
+                               &cfg_btch->data[cfg_btch->num_cfg_data]);
+                       cfg_btch->cfg_data[cfg_btch->num_cfg_data].data =
+                               &cfg_btch->data[cfg_btch->num_cfg_data];
+                       cfg_btch->data[cfg_btch->num_cfg_data].xpath = xpath;
+                       xpath = NULL;
+
+                       mgmt_yang_data_value_init(
+                               &cfg_btch->value[cfg_btch->num_cfg_data]);
+                       cfg_btch->data[cfg_btch->num_cfg_data].value =
+                               &cfg_btch->value[cfg_btch->num_cfg_data];
+                       cfg_btch->value[cfg_btch->num_cfg_data].value_case =
+                               MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
+                       cfg_btch->value[cfg_btch->num_cfg_data]
+                               .encoded_str_val = value;
+                       value = NULL;
+
+                       if (subscr_info.xpath_subscr[id].validate_config)
+                               found_validator = true;
+
+                       cmtcfg_req->subscr_info.xpath_subscr[id].subscribed |=
+                               subscr_info.xpath_subscr[id].subscribed;
+                       MGMTD_TXN_DBG(
+                               " -- %s, {V:%d, N:%d}, Batch: %p, Item:%d",
+                               adapter->name,
+                               subscr_info.xpath_subscr[id].validate_config,
+                               subscr_info.xpath_subscr[id].notify_config,
+                               cfg_btch, (int)cfg_btch->num_cfg_data);
+
+                       cfg_btch->num_cfg_data++;
+                       num_chgs++;
+               }
+
+               if (!found_validator) {
+                       snprintf(err_buf, sizeof(err_buf),
+                                "No validator module found for XPATH: '%s",
+                                xpath);
+                       MGMTD_TXN_ERR("***** %s", err_buf);
+               }
+       }
+
+       cmtcfg_req->cmt_stats->last_batch_cnt = num_chgs;
+       if (!num_chgs) {
+               (void)mgmt_txn_send_commit_cfg_reply(
+                       txn_req->txn, MGMTD_NO_CFG_CHANGES,
+                       "No changes found to commit!");
+               goto mgmt_txn_create_config_batches_failed;
+       }
+
+       cmtcfg_req->next_phase = MGMTD_COMMIT_PHASE_TXN_CREATE;
+       return 0;
+
+mgmt_txn_create_config_batches_failed:
+
+       if (xpath)
+               free(xpath);
+
+       return -1;
+}
+
+static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
+{
+       struct nb_context nb_ctx;
+       struct nb_config *nb_config;
+       struct nb_config_cbs changes;
+       struct nb_config_cbs *cfg_chgs = NULL;
+       int ret;
+       bool del_cfg_chgs = false;
+
+       ret = 0;
+       memset(&nb_ctx, 0, sizeof(nb_ctx));
+       memset(&changes, 0, sizeof(changes));
+       if (txn->commit_cfg_req->req.commit_cfg.cfg_chgs) {
+               cfg_chgs = txn->commit_cfg_req->req.commit_cfg.cfg_chgs;
+               del_cfg_chgs = true;
+               goto mgmt_txn_prep_config_validation_done;
+       }
+
+       if (txn->commit_cfg_req->req.commit_cfg.src_ds_id
+           != MGMTD_DS_CANDIDATE) {
+               (void)mgmt_txn_send_commit_cfg_reply(
+                       txn, MGMTD_INVALID_PARAM,
+                       "Source DS cannot be any other than CANDIDATE!");
+               ret = -1;
+               goto mgmt_txn_prepare_config_done;
+       }
+
+       if (txn->commit_cfg_req->req.commit_cfg.dst_ds_id
+           != MGMTD_DS_RUNNING) {
+               (void)mgmt_txn_send_commit_cfg_reply(
+                       txn, MGMTD_INVALID_PARAM,
+                       "Destination DS cannot be any other than RUNNING!");
+               ret = -1;
+               goto mgmt_txn_prepare_config_done;
+       }
+
+       if (!txn->commit_cfg_req->req.commit_cfg.src_ds_ctx) {
+               (void)mgmt_txn_send_commit_cfg_reply(
+                       txn, MGMTD_INVALID_PARAM, "No such source datastore!");
+               ret = -1;
+               goto mgmt_txn_prepare_config_done;
+       }
+
+       if (!txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx) {
+               (void)mgmt_txn_send_commit_cfg_reply(
+                       txn, MGMTD_INVALID_PARAM,
+                       "No such destination datastore!");
+               ret = -1;
+               goto mgmt_txn_prepare_config_done;
+       }
+
+       if (txn->commit_cfg_req->req.commit_cfg.abort) {
+               /*
+                * This is a commit abort request. Return back success.
+                * That should trigger a restore of Candidate datastore to
+                * Running.
+                */
+               (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
+                                                     NULL);
+               goto mgmt_txn_prepare_config_done;
+       }
+
+       nb_config = mgmt_ds_get_nb_config(
+               txn->commit_cfg_req->req.commit_cfg.src_ds_ctx);
+       if (!nb_config) {
+               (void)mgmt_txn_send_commit_cfg_reply(
+                       txn, MGMTD_INTERNAL_ERROR,
+                       "Unable to retrieve Commit DS Config Tree!");
+               ret = -1;
+               goto mgmt_txn_prepare_config_done;
+       }
+
+       /*
+        * Check for diffs from scratch buffer. If found empty
+        * get the diff from Candidate DS itself.
+        */
+       cfg_chgs = &nb_config->cfg_chgs;
+       if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
+               /*
+                * This could be the case when the config is directly
+                * loaded onto the candidate DS from a file. Get the
+                * diff from a full comparison of the candidate and
+                * running DSs.
+                */
+               nb_config_diff(
+                       mgmt_ds_get_nb_config(txn->commit_cfg_req->req
+                                                     .commit_cfg.dst_ds_ctx),
+                       nb_config, &changes);
+               cfg_chgs = &changes;
+               del_cfg_chgs = true;
+       }
+
+       if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
+               /*
+                * This means there's no changes to commit whatsoever
+                * is the source of the changes in config.
+                */
+               (void)mgmt_txn_send_commit_cfg_reply(
+                       txn, MGMTD_NO_CFG_CHANGES,
+                       "No changes found to be committed!");
+               ret = -1;
+               goto mgmt_txn_prepare_config_done;
+       }
+
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+       if (mm->perf_stats_en)
+               gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
+                                     ->validate_start,
+                            NULL);
+       /*
+        * Validate YANG contents of the source DS and get the diff
+        * between source and destination DS contents.
+        */
+       char err_buf[1024] = {0};
+       nb_ctx.client = NB_CLIENT_MGMTD_SERVER;
+       nb_ctx.user = (void *)txn;
+       ret = nb_candidate_validate_yang(nb_config, false, err_buf,
+                                        sizeof(err_buf) - 1);
+       if (ret != NB_OK) {
+               if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
+                       strlcpy(err_buf, "Validation failed", sizeof(err_buf));
+               (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
+                                                     err_buf);
+               ret = -1;
+               goto mgmt_txn_prepare_config_done;
+       }
+       /*
+        * Perform application level validations locally on the MGMTD
+        * process by calling application specific validation routines
+        * loaded onto MGMTD process using libraries.
+        */
+       ret = nb_candidate_validate_code(&nb_ctx, nb_config, &changes, err_buf,
+                                        sizeof(err_buf) - 1);
+       if (ret != NB_OK) {
+               if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
+                       strlcpy(err_buf, "Validation failed", sizeof(err_buf));
+               (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
+                                                     err_buf);
+               ret = -1;
+               goto mgmt_txn_prepare_config_done;
+       }
+
+       if (txn->commit_cfg_req->req.commit_cfg.validate_only) {
+               /*
+                * This was a validate-only COMMIT request return success.
+                */
+               (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
+                                                     NULL);
+               goto mgmt_txn_prepare_config_done;
+       }
+#endif /* ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED */
+
+mgmt_txn_prep_config_validation_done:
+
+       if (mm->perf_stats_en)
+               gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
+                                     ->prep_cfg_start,
+                            NULL);
+
+       /*
+        * Iterate over the diffs and create ordered batches of config
+        * commands to be validated.
+        */
+       ret = mgmt_txn_create_config_batches(txn->commit_cfg_req, cfg_chgs);
+       if (ret != 0) {
+               ret = -1;
+               goto mgmt_txn_prepare_config_done;
+       }
+
+       /* Move to the Transaction Create Phase */
+       txn->commit_cfg_req->req.commit_cfg.curr_phase =
+               MGMTD_COMMIT_PHASE_TXN_CREATE;
+       mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+
+       /*
+        * Start the COMMIT Timeout Timer to abort Txn if things get stuck at
+        * backend.
+        */
+       mgmt_txn_register_event(txn, MGMTD_TXN_COMMITCFG_TIMEOUT);
+mgmt_txn_prepare_config_done:
+
+       if (cfg_chgs && del_cfg_chgs)
+               nb_config_diff_del_changes(cfg_chgs);
+
+       return ret;
+}
+
+static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn)
+{
+       enum mgmt_be_client_id id;
+       struct mgmt_be_client_adapter *adapter;
+       struct mgmt_commit_cfg_req *cmtcfg_req;
+       struct mgmt_txn_be_cfg_batch *cfg_btch;
+
+       assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
+
+       cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+       FOREACH_MGMTD_BE_CLIENT_ID (id) {
+               if (cmtcfg_req->subscr_info.xpath_subscr[id].subscribed) {
+                       adapter = mgmt_be_get_adapter_by_id(id);
+                       if (mgmt_be_create_txn(adapter, txn->txn_id)
+                           != 0) {
+                               (void)mgmt_txn_send_commit_cfg_reply(
+                                       txn, MGMTD_INTERNAL_ERROR,
+                                       "Could not send TXN_CREATE to backend adapter");
+                               return -1;
+                       }
+
+                       FOREACH_TXN_CFG_BATCH_IN_LIST (
+                               &txn->commit_cfg_req->req.commit_cfg
+                                        .curr_batches[id],
+                               cfg_btch)
+                               cfg_btch->comm_phase =
+                                       MGMTD_COMMIT_PHASE_TXN_CREATE;
+               }
+       }
+
+       txn->commit_cfg_req->req.commit_cfg.next_phase =
+               MGMTD_COMMIT_PHASE_SEND_CFG;
+
+       /*
+        * Dont move the commit to next phase yet. Wait for the TXN_REPLY to
+        * come back.
+        */
+
+       MGMTD_TXN_DBG(
+               "Txn:%p Session:0x%llx, Phase(Current:'%s', Next: '%s')", txn,
+               (unsigned long long)txn->session_id,
+               mgmt_txn_commit_phase_str(txn, true),
+               mgmt_txn_commit_phase_str(txn, false));
+
+       return 0;
+}
+
+static int
+mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
+                             struct mgmt_be_client_adapter *adapter)
+{
+       struct mgmt_commit_cfg_req *cmtcfg_req;
+       struct mgmt_txn_be_cfg_batch *cfg_btch;
+       struct mgmt_be_cfgreq cfg_req = {0};
+       size_t num_batches, indx;
+
+       assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
+
+       cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+       assert(cmtcfg_req->subscr_info.xpath_subscr[adapter->id].subscribed);
+
+       indx = 0;
+       num_batches =
+               mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id]);
+       FOREACH_TXN_CFG_BATCH_IN_LIST (&cmtcfg_req->curr_batches[adapter->id],
+                                      cfg_btch) {
+               assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_SEND_CFG);
+
+               cfg_req.cfgdata_reqs = cfg_btch->cfg_datap;
+               cfg_req.num_reqs = cfg_btch->num_cfg_data;
+               indx++;
+               if (mgmt_be_send_cfg_data_create_req(
+                           adapter, txn->txn_id, cfg_btch->batch_id, &cfg_req,
+                           indx == num_batches ? true : false)
+                   != 0) {
+                       (void)mgmt_txn_send_commit_cfg_reply(
+                               txn, MGMTD_INTERNAL_ERROR,
+                               "Internal Error! Could not send config data to backend!");
+                       MGMTD_TXN_ERR(
+                               "Could not send CFGDATA_CREATE for Txn %p Batch %p to client '%s",
+                               txn, cfg_btch, adapter->name);
+                       return -1;
+               }
+
+               cmtcfg_req->cmt_stats->last_num_cfgdata_reqs++;
+               mgmt_move_txn_cfg_batch_to_next(
+                       cmtcfg_req, cfg_btch,
+                       &cmtcfg_req->curr_batches[adapter->id],
+                       &cmtcfg_req->next_batches[adapter->id], true,
+                       MGMTD_COMMIT_PHASE_SEND_CFG);
+       }
+
+       /*
+        * This could ne the last Backend Client to send CFGDATA_CREATE_REQ to.
+        * Try moving the commit to next phase.
+        */
+       mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
+
+       return 0;
+}
+
+static int
+mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
+                                struct mgmt_be_client_adapter *adapter)
+{
+       struct mgmt_commit_cfg_req *cmtcfg_req;
+       struct mgmt_txn_be_cfg_batch *cfg_btch;
+
+       assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
+
+       cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+       if (cmtcfg_req->subscr_info.xpath_subscr[adapter->id].subscribed) {
+               adapter = mgmt_be_get_adapter_by_id(adapter->id);
+               (void)mgmt_be_destroy_txn(adapter, txn->txn_id);
+
+               FOREACH_TXN_CFG_BATCH_IN_LIST (
+                       &txn->commit_cfg_req->req.commit_cfg
+                                .curr_batches[adapter->id],
+                       cfg_btch)
+                       cfg_btch->comm_phase = MGMTD_COMMIT_PHASE_TXN_DELETE;
+       }
+
+       return 0;
+}
+
+static void mgmt_txn_cfg_commit_timedout(struct thread *thread)
+{
+       struct mgmt_txn_ctx *txn;
+
+       txn = (struct mgmt_txn_ctx *)THREAD_ARG(thread);
+       assert(txn);
+
+       assert(txn->type == MGMTD_TXN_TYPE_CONFIG);
+
+       if (!txn->commit_cfg_req)
+               return;
+
+       MGMTD_TXN_ERR(
+               "Backend operations for Config Txn %p has timedout! Aborting commit!!",
+               txn);
+
+       /*
+        * Send a COMMIT_CONFIG_REPLY with failure.
+        * NOTE: The transaction cleanup will be triggered from Front-end
+        * adapter.
+        */
+       mgmt_txn_send_commit_cfg_reply(
+               txn, MGMTD_INTERNAL_ERROR,
+               "Operation on the backend timed-out. Aborting commit!");
+}
+
+/*
+ * Send CFG_APPLY_REQs to all the backend client.
+ *
+ * NOTE: This is always dispatched when all CFGDATA_CREATE_REQs
+ * for all backend clients has been generated. Please see
+ * mgmt_txn_register_event() and mgmt_txn_process_commit_cfg()
+ * for details.
+ */
+static int mgmt_txn_send_be_cfg_apply(struct mgmt_txn_ctx *txn)
+{
+       enum mgmt_be_client_id id;
+       struct mgmt_be_client_adapter *adapter;
+       struct mgmt_commit_cfg_req *cmtcfg_req;
+       struct mgmt_txn_batches_head *btch_list;
+       struct mgmt_txn_be_cfg_batch *cfg_btch;
+
+       assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
+
+       cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+       if (cmtcfg_req->validate_only) {
+               /*
+                * If this was a validate-only COMMIT request return success.
+                */
+               (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS,
+                                                     NULL);
+               return 0;
+       }
+
+       FOREACH_MGMTD_BE_CLIENT_ID (id) {
+               if (cmtcfg_req->subscr_info.xpath_subscr[id].notify_config) {
+                       adapter = mgmt_be_get_adapter_by_id(id);
+                       if (!adapter)
+                               return -1;
+
+                       btch_list = &cmtcfg_req->curr_batches[id];
+                       if (mgmt_be_send_cfg_apply_req(adapter, txn->txn_id)
+                           != 0) {
+                               (void)mgmt_txn_send_commit_cfg_reply(
+                                       txn, MGMTD_INTERNAL_ERROR,
+                                       "Could not send CFG_APPLY_REQ to backend adapter");
+                               return -1;
+                       }
+                       cmtcfg_req->cmt_stats->last_num_apply_reqs++;
+
+                       UNSET_FLAG(adapter->flags,
+                                  MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
+
+                       FOREACH_TXN_CFG_BATCH_IN_LIST (btch_list, cfg_btch)
+                               cfg_btch->comm_phase =
+                                       MGMTD_COMMIT_PHASE_APPLY_CFG;
+               }
+       }
+
+       txn->commit_cfg_req->req.commit_cfg.next_phase =
+               MGMTD_COMMIT_PHASE_TXN_DELETE;
+
+       /*
+        * Dont move the commit to next phase yet. Wait for all VALIDATE_REPLIES
+        * to come back.
+        */
+
+       return 0;
+}
+
+static void mgmt_txn_process_commit_cfg(struct thread *thread)
+{
+       struct mgmt_txn_ctx *txn;
+       struct mgmt_commit_cfg_req *cmtcfg_req;
+
+       txn = (struct mgmt_txn_ctx *)THREAD_ARG(thread);
+       assert(txn);
+
+       MGMTD_TXN_DBG(
+               "Processing COMMIT_CONFIG for Txn:%p Session:0x%llx, Phase(Current:'%s', Next: '%s')",
+               txn, (unsigned long long)txn->session_id,
+               mgmt_txn_commit_phase_str(txn, true),
+               mgmt_txn_commit_phase_str(txn, false));
+
+       assert(txn->commit_cfg_req);
+       cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+       switch (cmtcfg_req->curr_phase) {
+       case MGMTD_COMMIT_PHASE_PREPARE_CFG:
+               mgmt_txn_prepare_config(txn);
+               break;
+       case MGMTD_COMMIT_PHASE_TXN_CREATE:
+               if (mm->perf_stats_en)
+                       gettimeofday(&cmtcfg_req->cmt_stats->txn_create_start,
+                                    NULL);
+               /*
+                * Send TXN_CREATE_REQ to all Backend now.
+                */
+               mgmt_txn_send_be_txn_create(txn);
+               break;
+       case MGMTD_COMMIT_PHASE_SEND_CFG:
+               if (mm->perf_stats_en)
+                       gettimeofday(&cmtcfg_req->cmt_stats->send_cfg_start,
+                                    NULL);
+                       /*
+                        * All CFGDATA_CREATE_REQ should have been sent to
+                        * Backend by now.
+                        */
+#ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED
+               assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
+               MGMTD_TXN_DBG(
+                       "Txn:%p Session:0x%llx, trigger sending CFG_VALIDATE_REQ to all backend clients",
+                       txn, (unsigned long long)txn->session_id);
+#else  /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
+               assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
+               MGMTD_TXN_DBG(
+                       "Txn:%p Session:0x%llx, trigger sending CFG_APPLY_REQ to all backend clients",
+                       txn, (unsigned long long)txn->session_id);
+#endif /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
+               break;
+       case MGMTD_COMMIT_PHASE_APPLY_CFG:
+               if (mm->perf_stats_en)
+                       gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_start,
+                                    NULL);
+               /*
+                * We should have received successful CFG_VALIDATE_REPLY from
+                * all concerned Backend Clients by now. Send out the
+                * CFG_APPLY_REQs now.
+                */
+               mgmt_txn_send_be_cfg_apply(txn);
+               break;
+       case MGMTD_COMMIT_PHASE_TXN_DELETE:
+               if (mm->perf_stats_en)
+                       gettimeofday(&cmtcfg_req->cmt_stats->txn_del_start,
+                                    NULL);
+               /*
+                * We would have sent TXN_DELETE_REQ to all backend by now.
+                * Send a successful CONFIG_COMMIT_REPLY back to front-end.
+                * NOTE: This should also trigger DS merge/unlock and Txn
+                * cleanup. Please see mgmt_fe_send_commit_cfg_reply() for
+                * more details.
+                */
+               THREAD_OFF(txn->comm_cfg_timeout);
+               mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
+               break;
+       case MGMTD_COMMIT_PHASE_MAX:
+               break;
+       }
+
+       MGMTD_TXN_DBG(
+               "Txn:%p Session:0x%llx, Phase updated to (Current:'%s', Next: '%s')",
+               txn, (unsigned long long)txn->session_id,
+               mgmt_txn_commit_phase_str(txn, true),
+               mgmt_txn_commit_phase_str(txn, false));
+}
+
+static void mgmt_init_get_data_reply(struct mgmt_get_data_reply *get_reply)
+{
+       size_t indx;
+
+       for (indx = 0; indx < array_size(get_reply->reply_data); indx++)
+               get_reply->reply_datap[indx] = &get_reply->reply_data[indx];
+}
+
+static void mgmt_reset_get_data_reply(struct mgmt_get_data_reply *get_reply)
+{
+       int indx;
+
+       for (indx = 0; indx < get_reply->num_reply; indx++) {
+               if (get_reply->reply_xpathp[indx]) {
+                       free(get_reply->reply_xpathp[indx]);
+                       get_reply->reply_xpathp[indx] = 0;
+               }
+               if (get_reply->reply_data[indx].xpath) {
+                       zlog_debug("%s free xpath %p", __func__,
+                                  get_reply->reply_data[indx].xpath);
+                       free(get_reply->reply_data[indx].xpath);
+                       get_reply->reply_data[indx].xpath = 0;
+               }
+       }
+
+       get_reply->num_reply = 0;
+       memset(&get_reply->data_reply, 0, sizeof(get_reply->data_reply));
+       memset(&get_reply->reply_data, 0, sizeof(get_reply->reply_data));
+       memset(&get_reply->reply_datap, 0, sizeof(get_reply->reply_datap));
+
+       memset(&get_reply->reply_value, 0, sizeof(get_reply->reply_value));
+
+       mgmt_init_get_data_reply(get_reply);
+}
+
+static void mgmt_reset_get_data_reply_buf(struct mgmt_get_data_req *get_data)
+{
+       if (get_data->reply)
+               mgmt_reset_get_data_reply(get_data->reply);
+}
+
+static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
+                                            struct mgmt_get_data_req *get_req)
+{
+       struct mgmt_get_data_reply *get_reply;
+       Mgmtd__YangDataReply *data_reply;
+
+       get_reply = get_req->reply;
+       if (!get_reply)
+               return;
+
+       data_reply = &get_reply->data_reply;
+       mgmt_yang_data_reply_init(data_reply);
+       data_reply->n_data = get_reply->num_reply;
+       data_reply->data = get_reply->reply_datap;
+       data_reply->next_indx =
+               (!get_reply->last_batch ? get_req->total_reply : -1);
+
+       MGMTD_TXN_DBG("Sending %d Get-Config/Data replies (next-idx:%lld)",
+               (int) data_reply->n_data,
+               (long long)data_reply->next_indx);
+
+       switch (txn_req->req_event) {
+       case MGMTD_TXN_PROC_GETCFG:
+               if (mgmt_fe_send_get_cfg_reply(
+                           txn_req->txn->session_id, txn_req->txn->txn_id,
+                           get_req->ds_id, txn_req->req_id, MGMTD_SUCCESS,
+                           data_reply, NULL)
+                   != 0) {
+                       MGMTD_TXN_ERR(
+                               "Failed to send GET-CONFIG-REPLY for Txn %p, Sessn: 0x%llx, Req: %llu",
+                               txn_req->txn,
+                               (unsigned long long)txn_req->txn->session_id,
+                               (unsigned long long)txn_req->req_id);
+               }
+               break;
+       case MGMTD_TXN_PROC_GETDATA:
+               if (mgmt_fe_send_get_data_reply(
+                           txn_req->txn->session_id, txn_req->txn->txn_id,
+                           get_req->ds_id, txn_req->req_id, MGMTD_SUCCESS,
+                           data_reply, NULL)
+                   != 0) {
+                       MGMTD_TXN_ERR(
+                               "Failed to send GET-DATA-REPLY for Txn %p, Sessn: 0x%llx, Req: %llu",
+                               txn_req->txn,
+                               (unsigned long long)txn_req->txn->session_id,
+                               (unsigned long long)txn_req->req_id);
+               }
+               break;
+       case MGMTD_TXN_PROC_SETCFG:
+       case MGMTD_TXN_PROC_COMMITCFG:
+       case MGMTD_TXN_COMMITCFG_TIMEOUT:
+       case MGMTD_TXN_CLEANUP:
+               MGMTD_TXN_ERR("Invalid Txn-Req-Event %u",
+                              txn_req->req_event);
+               break;
+       }
+
+       /*
+        * Reset reply buffer for next reply.
+        */
+       mgmt_reset_get_data_reply_buf(get_req);
+}
+
+static void mgmt_txn_iter_and_send_get_cfg_reply(struct mgmt_ds_ctx *ds_ctx,
+                                                 char *xpath,
+                                                 struct lyd_node *node,
+                                                 struct nb_node *nb_node,
+                                                 void *ctx)
+{
+       struct mgmt_txn_req *txn_req;
+       struct mgmt_get_data_req *get_req;
+       struct mgmt_get_data_reply *get_reply;
+       Mgmtd__YangData *data;
+       Mgmtd__YangDataValue *data_value;
+
+       txn_req = (struct mgmt_txn_req *)ctx;
+       if (!txn_req)
+               goto mgmtd_ignore_get_cfg_reply_data;
+
+       if (!(node->schema->nodetype & LYD_NODE_TERM))
+               goto mgmtd_ignore_get_cfg_reply_data;
+
+       assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG
+              || txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
+
+       get_req = txn_req->req.get_data;
+       assert(get_req);
+       get_reply = get_req->reply;
+       data = &get_reply->reply_data[get_reply->num_reply];
+       data_value = &get_reply->reply_value[get_reply->num_reply];
+
+       mgmt_yang_data_init(data);
+       data->xpath = xpath;
+       mgmt_yang_data_value_init(data_value);
+       data_value->value_case = MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
+       data_value->encoded_str_val = (char *)lyd_get_value(node);
+       data->value = data_value;
+
+       get_reply->num_reply++;
+       get_req->total_reply++;
+       MGMTD_TXN_DBG(" [%d] XPATH: '%s', Value: '%s'", get_req->total_reply,
+                      data->xpath, data_value->encoded_str_val);
+
+       if (get_reply->num_reply == MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH)
+               mgmt_txn_send_getcfg_reply_data(txn_req, get_req);
+
+       return;
+
+mgmtd_ignore_get_cfg_reply_data:
+       if (xpath)
+               free(xpath);
+}
+
+static int mgmt_txn_get_config(struct mgmt_txn_ctx *txn,
+                              struct mgmt_txn_req *txn_req,
+                              struct mgmt_ds_ctx *ds_ctx)
+{
+       struct mgmt_txn_reqs_head *req_list = NULL;
+       struct mgmt_txn_reqs_head *pending_list = NULL;
+       int indx;
+       struct mgmt_get_data_req *get_data;
+       struct mgmt_get_data_reply *get_reply;
+
+       switch (txn_req->req_event) {
+       case MGMTD_TXN_PROC_GETCFG:
+               req_list = &txn->get_cfg_reqs;
+               break;
+       case MGMTD_TXN_PROC_GETDATA:
+               req_list = &txn->get_data_reqs;
+               break;
+       case MGMTD_TXN_PROC_SETCFG:
+       case MGMTD_TXN_PROC_COMMITCFG:
+       case MGMTD_TXN_COMMITCFG_TIMEOUT:
+       case MGMTD_TXN_CLEANUP:
+               assert(!"Wrong txn request type!");
+               break;
+       }
+
+       get_data = txn_req->req.get_data;
+
+       if (!get_data->reply) {
+               get_data->reply = XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REPLY,
+                                         sizeof(struct mgmt_get_data_reply));
+               if (!get_data->reply) {
+                       mgmt_fe_send_get_cfg_reply(
+                               txn->session_id, txn->txn_id,
+                               get_data->ds_id, txn_req->req_id,
+                               MGMTD_INTERNAL_ERROR, NULL,
+                               "Internal error: Unable to allocate reply buffers!");
+                       goto mgmt_txn_get_config_failed;
+               }
+       }
+
+       /*
+        * Read data contents from the DS and respond back directly.
+        * No need to go to backend for getting data.
+        */
+       get_reply = get_data->reply;
+       for (indx = 0; indx < get_data->num_xpaths; indx++) {
+               MGMTD_TXN_DBG("Trying to get all data under '%s'",
+                              get_data->xpaths[indx]);
+               mgmt_init_get_data_reply(get_reply);
+               if (mgmt_ds_iter_data(get_data->ds_ctx, get_data->xpaths[indx],
+                                     mgmt_txn_iter_and_send_get_cfg_reply,
+                                     (void *)txn_req, true)
+                   == -1) {
+                       MGMTD_TXN_DBG("Invalid Xpath '%s",
+                                      get_data->xpaths[indx]);
+                       mgmt_fe_send_get_cfg_reply(
+                               txn->session_id, txn->txn_id,
+                               get_data->ds_id, txn_req->req_id,
+                               MGMTD_INTERNAL_ERROR, NULL, "Invalid xpath");
+                       goto mgmt_txn_get_config_failed;
+               }
+               MGMTD_TXN_DBG("Got %d remaining data-replies for xpath '%s'",
+                              get_reply->num_reply, get_data->xpaths[indx]);
+               get_reply->last_batch = true;
+               mgmt_txn_send_getcfg_reply_data(txn_req, get_data);
+       }
+
+mgmt_txn_get_config_failed:
+
+       if (pending_list) {
+               /*
+                * Move the transaction to corresponding pending list.
+                */
+               if (req_list)
+                       mgmt_txn_reqs_del(req_list, txn_req);
+               txn_req->pending_be_proc = true;
+               mgmt_txn_reqs_add_tail(pending_list, txn_req);
+               MGMTD_TXN_DBG(
+                       "Moved Req: %p for Txn: %p from Req-List to Pending-List",
+                       txn_req, txn_req->txn);
+       } else {
+               /*
+                * Delete the txn request. It will also remove it from request
+                * list.
+                */
+               mgmt_txn_req_free(&txn_req);
+       }
+
+       return 0;
+}
+
+static void mgmt_txn_process_get_cfg(struct thread *thread)
+{
+       struct mgmt_txn_ctx *txn;
+       struct mgmt_txn_req *txn_req;
+       struct mgmt_ds_ctx *ds_ctx;
+       int num_processed = 0;
+       bool error;
+
+       txn = (struct mgmt_txn_ctx *)THREAD_ARG(thread);
+       assert(txn);
+
+       MGMTD_TXN_DBG(
+               "Processing %d GET_CONFIG requests for Txn:%p Session:0x%llx",
+               (int)mgmt_txn_reqs_count(&txn->get_cfg_reqs), txn,
+               (unsigned long long)txn->session_id);
+
+       FOREACH_TXN_REQ_IN_LIST (&txn->get_cfg_reqs, txn_req) {
+               error = false;
+               assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG);
+               ds_ctx = txn_req->req.get_data->ds_ctx;
+               if (!ds_ctx) {
+                       mgmt_fe_send_get_cfg_reply(
+                               txn->session_id, txn->txn_id,
+                               txn_req->req.get_data->ds_id, txn_req->req_id,
+                               MGMTD_INTERNAL_ERROR, NULL,
+                               "No such datastore!");
+                       error = true;
+                       goto mgmt_txn_process_get_cfg_done;
+               }
+
+               if (mgmt_txn_get_config(txn, txn_req, ds_ctx) != 0) {
+                       MGMTD_TXN_ERR(
+                               "Unable to retrieve Config from DS %d for Txn %p, Sessn: 0x%llx, Req: %llu!",
+                               txn_req->req.get_data->ds_id, txn,
+                               (unsigned long long)txn->session_id,
+                               (unsigned long long)txn_req->req_id);
+                       error = true;
+               }
+
+       mgmt_txn_process_get_cfg_done:
+
+               if (error) {
+                       /*
+                        * Delete the txn request.
+                        * Note: The following will remove it from the list
+                        * as well.
+                        */
+                       mgmt_txn_req_free(&txn_req);
+               }
+
+               /*
+                * Else the transaction would have been already deleted or
+                * moved to corresponding pending list. No need to delete it.
+                */
+               num_processed++;
+               if (num_processed == MGMTD_TXN_MAX_NUM_GETCFG_PROC)
+                       break;
+       }
+
+       if (mgmt_txn_reqs_count(&txn->get_cfg_reqs)) {
+               MGMTD_TXN_DBG(
+                       "Processed maximum number of Get-Config requests (%d/%d). Rescheduling for rest.",
+                       num_processed, MGMTD_TXN_MAX_NUM_GETCFG_PROC);
+               mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
+       }
+}
+
+static void mgmt_txn_process_get_data(struct thread *thread)
+{
+       struct mgmt_txn_ctx *txn;
+       struct mgmt_txn_req *txn_req;
+       struct mgmt_ds_ctx *ds_ctx;
+       int num_processed = 0;
+       bool error;
+
+       txn = (struct mgmt_txn_ctx *)THREAD_ARG(thread);
+       assert(txn);
+
+       MGMTD_TXN_DBG(
+               "Processing %d GET_DATA requests for Txn:%p Session:0x%llx",
+               (int)mgmt_txn_reqs_count(&txn->get_data_reqs), txn,
+               (unsigned long long)txn->session_id);
+
+       FOREACH_TXN_REQ_IN_LIST (&txn->get_data_reqs, txn_req) {
+               error = false;
+               assert(txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
+               ds_ctx = txn_req->req.get_data->ds_ctx;
+               if (!ds_ctx) {
+                       mgmt_fe_send_get_data_reply(
+                               txn->session_id, txn->txn_id,
+                               txn_req->req.get_data->ds_id, txn_req->req_id,
+                               MGMTD_INTERNAL_ERROR, NULL,
+                               "No such datastore!");
+                       error = true;
+                       goto mgmt_txn_process_get_data_done;
+               }
+
+               if (mgmt_ds_is_config(ds_ctx)) {
+                       if (mgmt_txn_get_config(txn, txn_req, ds_ctx)
+                           != 0) {
+                               MGMTD_TXN_ERR(
+                                       "Unable to retrieve Config from DS %d for Txn %p, Sessn: 0x%llx, Req: %llu!",
+                                       txn_req->req.get_data->ds_id, txn,
+                                       (unsigned long long)txn->session_id,
+                                       (unsigned long long)txn_req->req_id);
+                               error = true;
+                       }
+               } else {
+                       /*
+                        * TODO: Trigger GET procedures for Backend
+                        * For now return back error.
+                        */
+                       mgmt_fe_send_get_data_reply(
+                               txn->session_id, txn->txn_id,
+                               txn_req->req.get_data->ds_id, txn_req->req_id,
+                               MGMTD_INTERNAL_ERROR, NULL,
+                               "GET-DATA on Oper DS is not supported yet!");
+                       error = true;
+               }
+
+       mgmt_txn_process_get_data_done:
+
+               if (error) {
+                       /*
+                        * Delete the txn request.
+                        * Note: The following will remove it from the list
+                        * as well.
+                        */
+                       mgmt_txn_req_free(&txn_req);
+               }
+
+               /*
+                * Else the transaction would have been already deleted or
+                * moved to corresponding pending list. No need to delete it.
+                */
+               num_processed++;
+               if (num_processed == MGMTD_TXN_MAX_NUM_GETDATA_PROC)
+                       break;
+       }
+
+       if (mgmt_txn_reqs_count(&txn->get_data_reqs)) {
+               MGMTD_TXN_DBG(
+                       "Processed maximum number of Get-Data requests (%d/%d). Rescheduling for rest.",
+                       num_processed, MGMTD_TXN_MAX_NUM_GETDATA_PROC);
+               mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETDATA);
+       }
+}
+
+static struct mgmt_txn_ctx *
+mgmt_fe_find_txn_by_session_id(struct mgmt_master *cm, uint64_t session_id,
+                                   enum mgmt_txn_type type)
+{
+       struct mgmt_txn_ctx *txn;
+
+       FOREACH_TXN_IN_LIST (cm, txn) {
+               if (txn->session_id == session_id && txn->type == type)
+                       return txn;
+       }
+
+       return NULL;
+}
+
+static struct mgmt_txn_ctx *mgmt_txn_create_new(uint64_t session_id,
+                                                  enum mgmt_txn_type type)
+{
+       struct mgmt_txn_ctx *txn = NULL;
+
+       /*
+        * For 'CONFIG' transaction check if one is already created
+        * or not.
+        */
+       if (type == MGMTD_TXN_TYPE_CONFIG && mgmt_txn_mm->cfg_txn) {
+               if (mgmt_config_txn_in_progress() == session_id)
+                       txn = mgmt_txn_mm->cfg_txn;
+               goto mgmt_create_txn_done;
+       }
+
+       txn = mgmt_fe_find_txn_by_session_id(mgmt_txn_mm, session_id,
+                                                  type);
+       if (!txn) {
+               txn = XCALLOC(MTYPE_MGMTD_TXN, sizeof(struct mgmt_txn_ctx));
+               assert(txn);
+
+               txn->session_id = session_id;
+               txn->type = type;
+               mgmt_txn_badapters_init(&txn->be_adapters);
+               mgmt_txns_add_tail(&mgmt_txn_mm->txn_list, txn);
+               mgmt_txn_reqs_init(&txn->set_cfg_reqs);
+               mgmt_txn_reqs_init(&txn->get_cfg_reqs);
+               mgmt_txn_reqs_init(&txn->get_data_reqs);
+               mgmt_txn_reqs_init(&txn->pending_get_datas);
+               txn->commit_cfg_req = NULL;
+               txn->refcount = 0;
+               if (!mgmt_txn_mm->next_txn_id)
+                       mgmt_txn_mm->next_txn_id++;
+               txn->txn_id = mgmt_txn_mm->next_txn_id++;
+               hash_get(mgmt_txn_mm->txn_hash, txn, hash_alloc_intern);
+
+               MGMTD_TXN_DBG("Added new '%s' MGMTD Transaction '%p'",
+                              mgmt_txn_type2str(type), txn);
+
+               if (type == MGMTD_TXN_TYPE_CONFIG)
+                       mgmt_txn_mm->cfg_txn = txn;
+
+               MGMTD_TXN_LOCK(txn);
+       }
+
+mgmt_create_txn_done:
+       return txn;
+}
+
+static void mgmt_txn_delete(struct mgmt_txn_ctx **txn)
+{
+       MGMTD_TXN_UNLOCK(txn);
+}
+
+static unsigned int mgmt_txn_hash_key(const void *data)
+{
+       const struct mgmt_txn_ctx *txn = data;
+
+       return jhash2((uint32_t *) &txn->txn_id,
+                     sizeof(txn->txn_id) / sizeof(uint32_t), 0);
+}
+
+static bool mgmt_txn_hash_cmp(const void *d1, const void *d2)
+{
+       const struct mgmt_txn_ctx *txn1 = d1;
+       const struct mgmt_txn_ctx *txn2 = d2;
+
+       return (txn1->txn_id == txn2->txn_id);
+}
+
+static void mgmt_txn_hash_free(void *data)
+{
+       struct mgmt_txn_ctx *txn = data;
+
+       mgmt_txn_delete(&txn);
+}
+
+static void mgmt_txn_hash_init(void)
+{
+       if (!mgmt_txn_mm || mgmt_txn_mm->txn_hash)
+               return;
+
+       mgmt_txn_mm->txn_hash = hash_create(mgmt_txn_hash_key,
+                                             mgmt_txn_hash_cmp,
+                                             "MGMT Transactions");
+}
+
+static void mgmt_txn_hash_destroy(void)
+{
+       if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
+               return;
+
+       hash_clean(mgmt_txn_mm->txn_hash,
+                  mgmt_txn_hash_free);
+       hash_free(mgmt_txn_mm->txn_hash);
+       mgmt_txn_mm->txn_hash = NULL;
+}
+
+static inline struct mgmt_txn_ctx *
+mgmt_txn_id2ctx(uint64_t txn_id)
+{
+       struct mgmt_txn_ctx key = {0};
+       struct mgmt_txn_ctx *txn;
+
+       if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
+               return NULL;
+
+       key.txn_id = txn_id;
+       txn = hash_lookup(mgmt_txn_mm->txn_hash, &key);
+
+       return txn;
+}
+
+static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file,
+                          int line)
+{
+       txn->refcount++;
+       MGMTD_TXN_DBG("%s:%d --> Lock %s Txn %p, Count: %d", file, line,
+                      mgmt_txn_type2str(txn->type), txn, txn->refcount);
+}
+
+static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
+                            int line)
+{
+       assert(*txn && (*txn)->refcount);
+
+       (*txn)->refcount--;
+       MGMTD_TXN_DBG("%s:%d --> Unlock %s Txn %p, Count: %d", file, line,
+                      mgmt_txn_type2str((*txn)->type), *txn,
+                      (*txn)->refcount);
+       if (!(*txn)->refcount) {
+               if ((*txn)->type == MGMTD_TXN_TYPE_CONFIG)
+                       if (mgmt_txn_mm->cfg_txn == *txn)
+                               mgmt_txn_mm->cfg_txn = NULL;
+               THREAD_OFF((*txn)->proc_get_cfg);
+               THREAD_OFF((*txn)->proc_get_data);
+               THREAD_OFF((*txn)->proc_comm_cfg);
+               THREAD_OFF((*txn)->comm_cfg_timeout);
+               hash_release(mgmt_txn_mm->txn_hash, *txn);
+               mgmt_txns_del(&mgmt_txn_mm->txn_list, *txn);
+
+               MGMTD_TXN_DBG("Deleted %s Txn %p for Sessn: 0x%llx",
+                             mgmt_txn_type2str((*txn)->type), *txn,
+                             (unsigned long long)(*txn)->session_id);
+
+               XFREE(MTYPE_MGMTD_TXN, *txn);
+       }
+
+       *txn = NULL;
+}
+
+static void mgmt_txn_cleanup_txn(struct mgmt_txn_ctx **txn)
+{
+       /* TODO: Any other cleanup applicable */
+
+       mgmt_txn_delete(txn);
+}
+
+static void
+mgmt_txn_cleanup_all_txns(void)
+{
+       struct mgmt_txn_ctx *txn;
+
+       if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
+               return;
+
+       FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn)
+               mgmt_txn_cleanup_txn(&txn);
+}
+
+static void mgmt_txn_cleanup(struct thread *thread)
+{
+       struct mgmt_txn_ctx *txn;
+
+       txn = (struct mgmt_txn_ctx *)THREAD_ARG(thread);
+       assert(txn);
+
+       mgmt_txn_cleanup_txn(&txn);
+}
+
+static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
+                                    enum mgmt_txn_event event)
+{
+       struct timeval tv = {.tv_sec = 0,
+                            .tv_usec = MGMTD_TXN_PROC_DELAY_USEC};
+
+       assert(mgmt_txn_mm && mgmt_txn_tm);
+
+       switch (event) {
+       case MGMTD_TXN_PROC_SETCFG:
+               thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_set_cfg,
+                                   txn, &tv, &txn->proc_set_cfg);
+               assert(txn->proc_set_cfg);
+               break;
+       case MGMTD_TXN_PROC_COMMITCFG:
+               thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_commit_cfg,
+                                   txn, &tv, &txn->proc_comm_cfg);
+               assert(txn->proc_comm_cfg);
+               break;
+       case MGMTD_TXN_PROC_GETCFG:
+               thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_cfg,
+                                   txn, &tv, &txn->proc_get_cfg);
+               assert(txn->proc_get_cfg);
+               break;
+       case MGMTD_TXN_PROC_GETDATA:
+               thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_data,
+                                   txn, &tv, &txn->proc_get_data);
+               assert(txn->proc_get_data);
+               break;
+       case MGMTD_TXN_COMMITCFG_TIMEOUT:
+               thread_add_timer_msec(mgmt_txn_tm,
+                                     mgmt_txn_cfg_commit_timedout, txn,
+                                     MGMTD_TXN_CFG_COMMIT_MAX_DELAY_MSEC,
+                                     &txn->comm_cfg_timeout);
+               assert(txn->comm_cfg_timeout);
+               break;
+       case MGMTD_TXN_CLEANUP:
+               tv.tv_usec = MGMTD_TXN_CLEANUP_DELAY_USEC;
+               thread_add_timer_tv(mgmt_txn_tm, mgmt_txn_cleanup, txn, &tv,
+                                   &txn->clnup);
+               assert(txn->clnup);
+       }
+}
+
+int mgmt_txn_init(struct mgmt_master *mm, struct thread_master *tm)
+{
+       if (mgmt_txn_mm || mgmt_txn_tm)
+               assert(!"MGMTD TXN: Call txn_init() only once");
+
+       mgmt_txn_mm = mm;
+       mgmt_txn_tm = tm;
+       mgmt_txns_init(&mm->txn_list);
+       mgmt_txn_hash_init();
+       assert(!mm->cfg_txn);
+       mm->cfg_txn = NULL;
+
+       return 0;
+}
+
+void mgmt_txn_destroy(void)
+{
+       mgmt_txn_cleanup_all_txns();
+       mgmt_txn_hash_destroy();
+}
+
+uint64_t mgmt_config_txn_in_progress(void)
+{
+       if (mgmt_txn_mm && mgmt_txn_mm->cfg_txn)
+               return mgmt_txn_mm->cfg_txn->session_id;
+
+       return MGMTD_SESSION_ID_NONE;
+}
+
+uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type)
+{
+       struct mgmt_txn_ctx *txn;
+
+       txn = mgmt_txn_create_new(session_id, type);
+       return txn ? txn->txn_id : MGMTD_TXN_ID_NONE;
+}
+
+bool mgmt_txn_id_is_valid(uint64_t txn_id)
+{
+       return mgmt_txn_id2ctx(txn_id) ? true : false;
+}
+
+void mgmt_destroy_txn(uint64_t *txn_id)
+{
+       struct mgmt_txn_ctx *txn;
+
+       txn = mgmt_txn_id2ctx(*txn_id);
+       if (!txn)
+               return;
+
+       mgmt_txn_delete(&txn);
+       *txn_id = MGMTD_TXN_ID_NONE;
+}
+
+enum mgmt_txn_type mgmt_get_txn_type(uint64_t txn_id)
+{
+       struct mgmt_txn_ctx *txn;
+
+       txn = mgmt_txn_id2ctx(txn_id);
+       if (!txn)
+               return MGMTD_TXN_TYPE_NONE;
+
+       return txn->type;
+}
+
+int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
+                                 Mgmtd__DatastoreId ds_id,
+                                 struct mgmt_ds_ctx *ds_ctx,
+                                 Mgmtd__YangCfgDataReq **cfg_req,
+                                 size_t num_req, bool implicit_commit,
+                                 Mgmtd__DatastoreId dst_ds_id,
+                                 struct mgmt_ds_ctx *dst_ds_ctx)
+{
+       struct mgmt_txn_ctx *txn;
+       struct mgmt_txn_req *txn_req;
+       size_t indx;
+       uint16_t *num_chgs;
+       struct nb_cfg_change *cfg_chg;
+
+       txn = mgmt_txn_id2ctx(txn_id);
+       if (!txn)
+               return -1;
+
+       if (implicit_commit && mgmt_txn_reqs_count(&txn->set_cfg_reqs)) {
+               MGMTD_TXN_ERR(
+                       "For implicit commit config only one SETCFG-REQ can be allowed!");
+               return -1;
+       }
+
+       txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_SETCFG);
+       txn_req->req.set_cfg->ds_id = ds_id;
+       txn_req->req.set_cfg->ds_ctx = ds_ctx;
+       num_chgs = &txn_req->req.set_cfg->num_cfg_changes;
+       for (indx = 0; indx < num_req; indx++) {
+               cfg_chg = &txn_req->req.set_cfg->cfg_changes[*num_chgs];
+
+               if (cfg_req[indx]->req_type
+                   == MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA)
+                       cfg_chg->operation = NB_OP_DESTROY;
+               else if (cfg_req[indx]->req_type
+                        == MGMTD__CFG_DATA_REQ_TYPE__SET_DATA)
+                       cfg_chg->operation =
+                               mgmt_ds_find_data_node_by_xpath(
+                                       ds_ctx, cfg_req[indx]->data->xpath)
+                                       ? NB_OP_MODIFY
+                                       : NB_OP_CREATE;
+               else
+                       continue;
+
+               MGMTD_TXN_DBG(
+                       "XPath: '%s', Value: '%s'", cfg_req[indx]->data->xpath,
+                       (cfg_req[indx]->data->value
+                                        && cfg_req[indx]
+                                                   ->data->value
+                                                   ->encoded_str_val
+                                ? cfg_req[indx]->data->value->encoded_str_val
+                                : "NULL"));
+               strlcpy(cfg_chg->xpath, cfg_req[indx]->data->xpath,
+                       sizeof(cfg_chg->xpath));
+               cfg_chg->value = (cfg_req[indx]->data->value
+                                                 && cfg_req[indx]
+                                                            ->data->value
+                                                            ->encoded_str_val
+                                         ? strdup(cfg_req[indx]
+                                                          ->data->value
+                                                          ->encoded_str_val)
+                                         : NULL);
+               if (cfg_chg->value)
+                       MGMTD_TXN_DBG("Allocated value at %p ==> '%s'",
+                                      cfg_chg->value, cfg_chg->value);
+
+               (*num_chgs)++;
+       }
+       txn_req->req.set_cfg->implicit_commit = implicit_commit;
+       txn_req->req.set_cfg->dst_ds_id = dst_ds_id;
+       txn_req->req.set_cfg->dst_ds_ctx = dst_ds_ctx;
+       txn_req->req.set_cfg->setcfg_stats =
+               mgmt_fe_get_session_setcfg_stats(txn->session_id);
+       mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
+
+       return 0;
+}
+
+int mgmt_txn_send_commit_config_req(uint64_t txn_id, uint64_t req_id,
+                                    Mgmtd__DatastoreId src_ds_id,
+                                    struct mgmt_ds_ctx *src_ds_ctx,
+                                    Mgmtd__DatastoreId dst_ds_id,
+                                    struct mgmt_ds_ctx *dst_ds_ctx,
+                                    bool validate_only, bool abort,
+                                    bool implicit)
+{
+       struct mgmt_txn_ctx *txn;
+       struct mgmt_txn_req *txn_req;
+
+       txn = mgmt_txn_id2ctx(txn_id);
+       if (!txn)
+               return -1;
+
+       if (txn->commit_cfg_req) {
+               MGMTD_TXN_ERR(
+                       "A commit is already in-progress for Txn %p, session 0x%llx. Cannot start another!",
+                       txn, (unsigned long long)txn->session_id);
+               return -1;
+       }
+
+       txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_COMMITCFG);
+       txn_req->req.commit_cfg.src_ds_id = src_ds_id;
+       txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
+       txn_req->req.commit_cfg.dst_ds_id = dst_ds_id;
+       txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
+       txn_req->req.commit_cfg.validate_only = validate_only;
+       txn_req->req.commit_cfg.abort = abort;
+       txn_req->req.commit_cfg.implicit = implicit;
+       txn_req->req.commit_cfg.cmt_stats =
+               mgmt_fe_get_session_commit_stats(txn->session_id);
+
+       /*
+        * Trigger a COMMIT-CONFIG process.
+        */
+       mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+       return 0;
+}
+
+int mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
+                                       bool connect)
+{
+       struct mgmt_txn_ctx *txn;
+       struct mgmt_txn_req *txn_req;
+       struct mgmt_commit_cfg_req *cmtcfg_req;
+       static struct mgmt_commit_stats dummy_stats;
+       struct nb_config_cbs *adapter_cfgs = NULL;
+
+       memset(&dummy_stats, 0, sizeof(dummy_stats));
+       if (connect) {
+               /* Get config for this single backend client */
+               mgmt_be_get_adapter_config(adapter, mm->running_ds,
+                                             &adapter_cfgs);
+
+               if (!adapter_cfgs || RB_EMPTY(nb_config_cbs, adapter_cfgs)) {
+                       SET_FLAG(adapter->flags,
+                                MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
+                       return 0;
+               }
+
+               /*
+                * Create a CONFIG transaction to push the config changes
+                * provided to the backend client.
+                */
+               txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
+               if (!txn) {
+                       MGMTD_TXN_ERR(
+                               "Failed to create CONFIG Transaction for downloading CONFIGs for client '%s'",
+                               adapter->name);
+                       return -1;
+               }
+
+               MGMTD_TXN_DBG("Created initial txn %" PRIu64
+                             " for BE connection %s",
+                             txn->txn_id, adapter->name);
+               /*
+                * Set the changeset for transaction to commit and trigger the
+                * commit request.
+                */
+               txn_req =
+                       mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
+               txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_NONE;
+               txn_req->req.commit_cfg.src_ds_ctx = 0;
+               txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_NONE;
+               txn_req->req.commit_cfg.dst_ds_ctx = 0;
+               txn_req->req.commit_cfg.validate_only = false;
+               txn_req->req.commit_cfg.abort = false;
+               txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
+               txn_req->req.commit_cfg.cfg_chgs = adapter_cfgs;
+
+               /*
+                * Trigger a COMMIT-CONFIG process.
+                */
+               mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+
+       } else {
+               /*
+                * Check if any transaction is currently on-going that
+                * involves this backend client. If so, report the transaction
+                * has failed.
+                */
+               FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
+                       if (txn->type == MGMTD_TXN_TYPE_CONFIG) {
+                               cmtcfg_req = txn->commit_cfg_req
+                                                    ? &txn->commit_cfg_req
+                                                               ->req.commit_cfg
+                                                    : NULL;
+                               if (cmtcfg_req
+                                   && cmtcfg_req->subscr_info
+                                              .xpath_subscr[adapter->id]
+                                              .subscribed) {
+                                       mgmt_txn_send_commit_cfg_reply(
+                                               txn, MGMTD_INTERNAL_ERROR,
+                                               "Backend daemon disconnected while processing commit!");
+                               }
+                       }
+               }
+       }
+
+       return 0;
+}
+
+int mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create,
+                                     bool success,
+                                     struct mgmt_be_client_adapter *adapter)
+{
+       struct mgmt_txn_ctx *txn;
+       struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
+
+       txn = mgmt_txn_id2ctx(txn_id);
+       if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
+               return -1;
+
+       if (!create && !txn->commit_cfg_req)
+               return 0;
+
+       assert(txn->commit_cfg_req);
+       cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+       if (create) {
+               if (success) {
+                       /*
+                        * Done with TXN_CREATE. Move the backend client to
+                        * next phase.
+                        */
+                       assert(cmtcfg_req->curr_phase
+                              == MGMTD_COMMIT_PHASE_TXN_CREATE);
+
+                       /*
+                        * Send CFGDATA_CREATE-REQs to the backend immediately.
+                        */
+                       mgmt_txn_send_be_cfg_data(txn, adapter);
+               } else {
+                       mgmt_txn_send_commit_cfg_reply(
+                               txn, MGMTD_INTERNAL_ERROR,
+                               "Internal error! Failed to initiate transaction at backend!");
+               }
+       } else {
+               /*
+                * Done with TXN_DELETE. Move the backend client to next phase.
+                */
+               if (false)
+                       mgmt_move_be_commit_to_next_phase(txn, adapter);
+       }
+
+       return 0;
+}
+
+int mgmt_txn_notify_be_cfgdata_reply(
+       uint64_t txn_id, uint64_t batch_id, bool success, char *error_if_any,
+       struct mgmt_be_client_adapter *adapter)
+{
+       struct mgmt_txn_ctx *txn;
+       struct mgmt_txn_be_cfg_batch *cfg_btch;
+       struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
+
+       txn = mgmt_txn_id2ctx(txn_id);
+       if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
+               return -1;
+
+       if (!txn->commit_cfg_req)
+               return -1;
+       cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+
+       cfg_btch = mgmt_txn_cfgbatch_id2ctx(txn, batch_id);
+       if (!cfg_btch || cfg_btch->txn != txn)
+               return -1;
+
+       if (!success) {
+               MGMTD_TXN_ERR(
+                       "CFGDATA_CREATE_REQ sent to '%s' failed for Txn %p, Batch %p, Err: %s",
+                       adapter->name, txn, cfg_btch,
+                       error_if_any ? error_if_any : "None");
+               mgmt_txn_send_commit_cfg_reply(
+                       txn, MGMTD_INTERNAL_ERROR,
+                       error_if_any ? error_if_any :
+                       "Internal error! Failed to download config data to backend!");
+               return 0;
+       }
+
+       MGMTD_TXN_DBG(
+               "CFGDATA_CREATE_REQ sent to '%s' was successful for Txn %p, Batch %p, Err: %s",
+               adapter->name, txn, cfg_btch,
+               error_if_any ? error_if_any : "None");
+       mgmt_move_txn_cfg_batch_to_next(
+               cmtcfg_req, cfg_btch, &cmtcfg_req->curr_batches[adapter->id],
+               &cmtcfg_req->next_batches[adapter->id], true,
+               MGMTD_COMMIT_PHASE_APPLY_CFG);
+
+       mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
+
+       return 0;
+}
+
+int mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
+                                      uint64_t batch_ids[],
+                                      size_t num_batch_ids, char *error_if_any,
+                                      struct mgmt_be_client_adapter *adapter)
+{
+       struct mgmt_txn_ctx *txn;
+       struct mgmt_txn_be_cfg_batch *cfg_btch;
+       struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
+       size_t indx;
+
+       txn = mgmt_txn_id2ctx(txn_id);
+       if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG
+           || !txn->commit_cfg_req)
+               return -1;
+
+       cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+
+       if (!success) {
+               MGMTD_TXN_ERR(
+                       "CFGDATA_APPLY_REQ sent to '%s' failed for Txn %p, Batches [0x%llx - 0x%llx], Err: %s",
+                       adapter->name, txn, (unsigned long long)batch_ids[0],
+                       (unsigned long long)batch_ids[num_batch_ids - 1],
+                       error_if_any ? error_if_any : "None");
+               mgmt_txn_send_commit_cfg_reply(
+                       txn, MGMTD_INTERNAL_ERROR,
+                       error_if_any ? error_if_any :
+                       "Internal error! Failed to apply config data on backend!");
+               return 0;
+       }
+
+       for (indx = 0; indx < num_batch_ids; indx++) {
+               cfg_btch = mgmt_txn_cfgbatch_id2ctx(txn, batch_ids[indx]);
+               if (cfg_btch->txn != txn)
+                       return -1;
+               mgmt_move_txn_cfg_batch_to_next(
+                       cmtcfg_req, cfg_btch,
+                       &cmtcfg_req->curr_batches[adapter->id],
+                       &cmtcfg_req->next_batches[adapter->id], true,
+                       MGMTD_COMMIT_PHASE_TXN_DELETE);
+       }
+
+       if (!mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id])) {
+               /*
+                * All configuration for the specific backend has been applied.
+                * Send TXN-DELETE to wrap up the transaction for this backend.
+                */
+               SET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
+               mgmt_txn_send_be_txn_delete(txn, adapter);
+       }
+
+       mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
+       if (mm->perf_stats_en)
+               gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_end, NULL);
+
+       return 0;
+}
+
+int mgmt_txn_send_commit_config_reply(uint64_t txn_id,
+                                      enum mgmt_result result,
+                                      const char *error_if_any)
+{
+       struct mgmt_txn_ctx *txn;
+
+       txn = mgmt_txn_id2ctx(txn_id);
+       if (!txn)
+               return -1;
+
+       if (!txn->commit_cfg_req) {
+               MGMTD_TXN_ERR(
+                       "NO commit in-progress for Txn %p, session 0x%llx!",
+                       txn, (unsigned long long)txn->session_id);
+               return -1;
+       }
+
+       return mgmt_txn_send_commit_cfg_reply(txn, result, error_if_any);
+}
+
+int mgmt_txn_send_get_config_req(uint64_t txn_id, uint64_t req_id,
+                                 Mgmtd__DatastoreId ds_id,
+                                 struct mgmt_ds_ctx *ds_ctx,
+                                 Mgmtd__YangGetDataReq **data_req,
+                                 size_t num_reqs)
+{
+       struct mgmt_txn_ctx *txn;
+       struct mgmt_txn_req *txn_req;
+       size_t indx;
+
+       txn = mgmt_txn_id2ctx(txn_id);
+       if (!txn)
+               return -1;
+
+       txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETCFG);
+       txn_req->req.get_data->ds_id = ds_id;
+       txn_req->req.get_data->ds_ctx = ds_ctx;
+       for (indx = 0;
+            indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
+            indx++) {
+               MGMTD_TXN_DBG("XPath: '%s'", data_req[indx]->data->xpath);
+               txn_req->req.get_data->xpaths[indx] =
+                       strdup(data_req[indx]->data->xpath);
+               txn_req->req.get_data->num_xpaths++;
+       }
+
+       mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
+
+       return 0;
+}
+
+int mgmt_txn_send_get_data_req(uint64_t txn_id, uint64_t req_id,
+                               Mgmtd__DatastoreId ds_id,
+                               struct mgmt_ds_ctx *ds_ctx,
+                               Mgmtd__YangGetDataReq **data_req,
+                               size_t num_reqs)
+{
+       struct mgmt_txn_ctx *txn;
+       struct mgmt_txn_req *txn_req;
+       size_t indx;
+
+       txn = mgmt_txn_id2ctx(txn_id);
+       if (!txn)
+               return -1;
+
+       txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETDATA);
+       txn_req->req.get_data->ds_id = ds_id;
+       txn_req->req.get_data->ds_ctx = ds_ctx;
+       for (indx = 0;
+            indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
+            indx++) {
+               MGMTD_TXN_DBG("XPath: '%s'", data_req[indx]->data->xpath);
+               txn_req->req.get_data->xpaths[indx] =
+                       strdup(data_req[indx]->data->xpath);
+               txn_req->req.get_data->num_xpaths++;
+       }
+
+       mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETDATA);
+
+       return 0;
+}
+
+void mgmt_txn_status_write(struct vty *vty)
+{
+       struct mgmt_txn_ctx *txn;
+
+       vty_out(vty, "MGMTD Transactions\n");
+
+       FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
+               vty_out(vty, "  Txn: \t\t\t%p\n", txn);
+               vty_out(vty, "    Txn-Id: \t\t\t%llu\n",
+                       (unsigned long long)txn->txn_id);
+               vty_out(vty, "    Session-Id: \t\t%llu\n",
+                       (unsigned long long)txn->session_id);
+               vty_out(vty, "    Type: \t\t\t%s\n",
+                       mgmt_txn_type2str(txn->type));
+               vty_out(vty, "    Ref-Count: \t\t\t%d\n", txn->refcount);
+       }
+       vty_out(vty, "  Total: %d\n",
+               (int)mgmt_txns_count(&mgmt_txn_mm->txn_list));
+}
+
+int mgmt_txn_rollback_trigger_cfg_apply(struct mgmt_ds_ctx *src_ds_ctx,
+                                       struct mgmt_ds_ctx *dst_ds_ctx)
+{
+       static struct nb_config_cbs changes;
+       struct nb_config_cbs *cfg_chgs = NULL;
+       struct mgmt_txn_ctx *txn;
+       struct mgmt_txn_req *txn_req;
+       static struct mgmt_commit_stats dummy_stats;
+
+       memset(&changes, 0, sizeof(changes));
+       memset(&dummy_stats, 0, sizeof(dummy_stats));
+       /*
+        * This could be the case when the config is directly
+        * loaded onto the candidate DS from a file. Get the
+        * diff from a full comparison of the candidate and
+        * running DSs.
+        */
+       nb_config_diff(mgmt_ds_get_nb_config(dst_ds_ctx),
+                      mgmt_ds_get_nb_config(src_ds_ctx), &changes);
+       cfg_chgs = &changes;
+
+       if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
+               /*
+                * This means there's no changes to commit whatsoever
+                * is the source of the changes in config.
+                */
+               return -1;
+       }
+
+       /*
+        * Create a CONFIG transaction to push the config changes
+        * provided to the backend client.
+        */
+       txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
+       if (!txn) {
+               MGMTD_TXN_ERR(
+                       "Failed to create CONFIG Transaction for downloading CONFIGs");
+               return -1;
+       }
+
+       MGMTD_TXN_DBG("Created rollback txn %" PRIu64, txn->txn_id);
+
+       /*
+        * Set the changeset for transaction to commit and trigger the commit
+        * request.
+        */
+       txn_req = mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
+       txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_CANDIDATE;
+       txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
+       txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_RUNNING;
+       txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
+       txn_req->req.commit_cfg.validate_only = false;
+       txn_req->req.commit_cfg.abort = false;
+       txn_req->req.commit_cfg.rollback = true;
+       txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
+       txn_req->req.commit_cfg.cfg_chgs = cfg_chgs;
+
+       /*
+        * Trigger a COMMIT-CONFIG process.
+        */
+       mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+       return 0;
+}
diff --git a/mgmtd/mgmt_txn.h b/mgmtd/mgmt_txn.h
new file mode 100644 (file)
index 0000000..f026a39
--- /dev/null
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Transactions
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_TXN_H_
+#define _FRR_MGMTD_TXN_H_
+
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_ds.h"
+
+#define MGMTD_TXN_PROC_DELAY_MSEC 5
+#define MGMTD_TXN_PROC_DELAY_USEC 10
+#define MGMTD_TXN_MAX_NUM_SETCFG_PROC 128
+#define MGMTD_TXN_MAX_NUM_GETCFG_PROC 128
+#define MGMTD_TXN_MAX_NUM_GETDATA_PROC 128
+
+#define MGMTD_TXN_SEND_CFGVALIDATE_DELAY_MSEC 100
+#define MGMTD_TXN_SEND_CFGAPPLY_DELAY_MSEC 100
+#define MGMTD_TXN_CFG_COMMIT_MAX_DELAY_MSEC 30000 /* 30 seconds */
+
+#define MGMTD_TXN_CLEANUP_DELAY_MSEC 100
+#define MGMTD_TXN_CLEANUP_DELAY_USEC 10
+
+/*
+ * The following definition enables local validation of config
+ * on the MGMTD process by loading client-defined NB callbacks
+ * and calling them locally before sening CNFG_APPLY_REQ to
+ * backend for actual apply of configuration on internal state
+ * of the backend application.
+ *
+ * #define MGMTD_LOCAL_VALIDATIONS_ENABLED
+ *
+ * Note: Enabled by default in configure.ac, if this needs to be
+ * disabled then pass --enable-mgmtd-local-validations=no to
+ * the list of arguments passed to ./configure
+ */
+
+PREDECL_LIST(mgmt_txns);
+
+struct mgmt_master;
+
+enum mgmt_txn_type {
+       MGMTD_TXN_TYPE_NONE = 0,
+       MGMTD_TXN_TYPE_CONFIG,
+       MGMTD_TXN_TYPE_SHOW
+};
+
+static inline const char *mgmt_txn_type2str(enum mgmt_txn_type type)
+{
+       switch (type) {
+       case MGMTD_TXN_TYPE_NONE:
+               return "None";
+       case MGMTD_TXN_TYPE_CONFIG:
+               return "CONFIG";
+       case MGMTD_TXN_TYPE_SHOW:
+               return "SHOW";
+       }
+
+       return "Unknown";
+}
+
+/* Initialise transaction module. */
+extern int mgmt_txn_init(struct mgmt_master *cm, struct thread_master *tm);
+
+/* Destroy the transaction module. */
+extern void mgmt_txn_destroy(void);
+
+/*
+ * Check if transaction is in progress.
+ *
+ * Returns:
+ *    session ID if in-progress, MGMTD_SESSION_ID_NONE otherwise.
+ */
+extern uint64_t mgmt_config_txn_in_progress(void);
+
+/*
+ * Create transaction.
+ *
+ * session_id
+ *    Session ID.
+ *
+ * type
+ *    Transaction type (CONFIG/SHOW/NONE)
+ *
+ * Returns:
+ *    transaction ID.
+ */
+extern uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type);
+
+/*
+ * Destroy transaction.
+ *
+ * txn_id
+ *     Unique transaction identifier.
+ */
+extern void mgmt_destroy_txn(uint64_t *txn_id);
+
+/*
+ * Check if transaction is valid given an ID.
+ */
+extern bool mgmt_txn_id_is_valid(uint64_t txn_id);
+
+/*
+ * Returns the type of transaction given an ID.
+ */
+extern enum mgmt_txn_type mgmt_get_txn_type(uint64_t txn_id);
+
+/*
+ * Send set-config request to be processed later in transaction.
+ *
+ * txn_id
+ *    Unique transaction identifier.
+ *
+ * req_id
+ *    Unique transaction request identifier.
+ *
+ * ds_id
+ *    Datastore ID.
+ *
+ * ds_hndl
+ *    Datastore handle.
+ *
+ * cfg_req
+ *    Config requests.
+ *
+ * num_req
+ *    Number of config requests.
+ *
+ * implicit_commit
+ *    TRUE if the commit is implicit, FALSE otherwise.
+ *
+ * dst_ds_id
+ *    Destination datastore ID.
+ *
+ * dst_ds_handle
+ *    Destination datastore handle.
+ *
+ * Returns:
+ *    0 on success, -1 on failures.
+ */
+extern int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
+                                        Mgmtd__DatastoreId ds_id,
+                                        struct mgmt_ds_ctx *ds_ctx,
+                                        Mgmtd__YangCfgDataReq **cfg_req,
+                                        size_t num_req, bool implicit_commit,
+                                        Mgmtd__DatastoreId dst_ds_id,
+                                        struct mgmt_ds_ctx *dst_ds_ctx);
+
+/*
+ * Send commit-config request to be processed later in transaction.
+ *
+ * txn_id
+ *    Unique transaction identifier.
+ *
+ * req_id
+ *    Unique transaction request identifier.
+ *
+ * src_ds_id
+ *    Source datastore ID.
+ *
+ * src_ds_hndl
+ *    Source Datastore handle.
+ *
+ * validate_only
+ *    TRUE if commit request needs to be validated only, FALSE otherwise.
+ *
+ * abort
+ *    TRUE if need to restore Src DS back to Dest DS, FALSE otherwise.
+ *
+ * implicit
+ *    TRUE if the commit is implicit, FALSE otherwise.
+ *
+ * Returns:
+ *    0 on success, -1 on failures.
+ */
+extern int mgmt_txn_send_commit_config_req(uint64_t txn_id, uint64_t req_id,
+                                           Mgmtd__DatastoreId src_ds_id,
+                                           struct mgmt_ds_ctx *dst_ds_ctx,
+                                           Mgmtd__DatastoreId dst_ds_id,
+                                           struct mgmt_ds_ctx *src_ds_ctx,
+                                           bool validate_only, bool abort,
+                                           bool implicit);
+
+extern int mgmt_txn_send_commit_config_reply(uint64_t txn_id,
+                                             enum mgmt_result result,
+                                             const char *error_if_any);
+
+/*
+ * Send get-config request to be processed later in transaction.
+ *
+ * Similar to set-config request.
+ */
+extern int mgmt_txn_send_get_config_req(uint64_t txn_id, uint64_t req_id,
+                                        Mgmtd__DatastoreId ds_id,
+                                        struct mgmt_ds_ctx *ds_ctx,
+                                        Mgmtd__YangGetDataReq **data_req,
+                                        size_t num_reqs);
+
+/*
+ * Send get-data request to be processed later in transaction.
+ *
+ * Similar to get-config request, but here data is fetched from backedn client.
+ */
+extern int mgmt_txn_send_get_data_req(uint64_t txn_id, uint64_t req_id,
+                                      Mgmtd__DatastoreId ds_id,
+                                      struct mgmt_ds_ctx *ds_ctx,
+                                      Mgmtd__YangGetDataReq **data_req,
+                                      size_t num_reqs);
+
+/*
+ * Notifiy backend adapter on connection.
+ */
+extern int
+mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
+                                   bool connect);
+
+/*
+ * Reply to backend adapter about transaction create/delete.
+ */
+extern int
+mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create, bool success,
+                                 struct mgmt_be_client_adapter *adapter);
+
+/*
+ * Reply to backend adapater with config data create request.
+ */
+extern int
+mgmt_txn_notify_be_cfgdata_reply(uint64_t txn_id, uint64_t batch_id,
+                                    bool success, char *error_if_any,
+                                    struct mgmt_be_client_adapter *adapter);
+
+/*
+ * Reply to backend adapater with config data validate request.
+ */
+extern int mgmt_txn_notify_be_cfg_validate_reply(
+       uint64_t txn_id, bool success, uint64_t batch_ids[],
+       size_t num_batch_ids, char *error_if_any,
+       struct mgmt_be_client_adapter *adapter);
+
+/*
+ * Reply to backend adapater with config data apply request.
+ */
+extern int
+mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
+                                      uint64_t batch_ids[],
+                                      size_t num_batch_ids, char *error_if_any,
+                                      struct mgmt_be_client_adapter *adapter);
+
+/*
+ * Dump transaction status to vty.
+ */
+extern void mgmt_txn_status_write(struct vty *vty);
+
+/*
+ * Trigger rollback config apply.
+ *
+ * Creates a new transaction and commit request for rollback.
+ */
+extern int
+mgmt_txn_rollback_trigger_cfg_apply(struct mgmt_ds_ctx *src_ds_ctx,
+                                    struct mgmt_ds_ctx *dst_ds_ctx);
+#endif /* _FRR_MGMTD_TXN_H_ */
diff --git a/mgmtd/mgmt_vty.c b/mgmtd/mgmt_vty.c
new file mode 100644 (file)
index 0000000..79fa54a
--- /dev/null
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD VTY Interface
+ *
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+
+#include "command.h"
+#include "json.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_be_server.h"
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt_fe_server.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_history.h"
+
+#include "mgmtd/mgmt_vty_clippy.c"
+
+DEFPY(show_mgmt_be_adapter,
+      show_mgmt_be_adapter_cmd,
+      "show mgmt backend-adapter all",
+      SHOW_STR
+      MGMTD_STR
+      MGMTD_BE_ADAPTER_STR
+      "Display all Backend Adapters\n")
+{
+       mgmt_be_adapter_status_write(vty);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_be_xpath_reg,
+      show_mgmt_be_xpath_reg_cmd,
+      "show mgmt backend-yang-xpath-registry",
+      SHOW_STR
+      MGMTD_STR
+      "Backend Adapter YANG Xpath Registry\n")
+{
+       mgmt_be_xpath_register_write(vty);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_fe_adapter, show_mgmt_fe_adapter_cmd,
+      "show mgmt frontend-adapter all [detail$detail]",
+      SHOW_STR
+      MGMTD_STR
+      MGMTD_FE_ADAPTER_STR
+      "Display all Frontend Adapters\n"
+      "Display more details\n")
+{
+       mgmt_fe_adapter_status_write(vty, !!detail);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY_HIDDEN(mgmt_performance_measurement,
+            mgmt_performance_measurement_cmd,
+            "[no] mgmt performance-measurement",
+            NO_STR
+            MGMTD_STR
+            "Enable performance measurement\n")
+{
+       if (no)
+               mgmt_fe_adapter_perf_measurement(vty, false);
+       else
+               mgmt_fe_adapter_perf_measurement(vty, true);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_reset_performance_stats,
+      mgmt_reset_performance_stats_cmd,
+      "mgmt reset-statistics",
+      MGMTD_STR
+      "Reset the Performance measurement statistics\n")
+{
+       mgmt_fe_adapter_reset_perf_stats(vty);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_txn,
+      show_mgmt_txn_cmd,
+      "show mgmt transaction all",
+      SHOW_STR
+      MGMTD_STR
+      MGMTD_TXN_STR
+      "Display all Transactions\n")
+{
+       mgmt_txn_status_write(vty);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_ds,
+      show_mgmt_ds_cmd,
+      "show mgmt datastore [all|candidate|operational|running]$dsname",
+      SHOW_STR
+      MGMTD_STR
+      MGMTD_DS_STR
+      "All datastores (default)\n"
+      "Candidate datastore\n"
+      "Operational datastore\n"
+      "Running datastore\n")
+{
+       struct mgmt_ds_ctx *ds_ctx;
+
+       if (!dsname || dsname[0] == 'a') {
+               mgmt_ds_status_write(vty);
+               return CMD_SUCCESS;
+       }
+       ds_ctx = mgmt_ds_get_ctx_by_id(mm, mgmt_ds_name2id(dsname));
+       if (!ds_ctx) {
+               vty_out(vty, "ERROR: Could not access %s datastore!\n", dsname);
+               return CMD_ERR_NO_MATCH;
+       }
+       mgmt_ds_status_write_one(vty, ds_ctx);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_commit,
+      mgmt_commit_cmd,
+      "mgmt commit <check|apply|abort>$type",
+      MGMTD_STR
+      "Commit action\n"
+      "Validate the set of config commands\n"
+      "Validate and apply the set of config commands\n"
+      "Abort and drop the set of config commands recently added\n")
+{
+       bool validate_only = type[0] == 'c';
+       bool abort = type[1] == 'b';
+
+       if (vty_mgmt_send_commit_config(vty, validate_only, abort) != 0)
+               return CMD_WARNING_CONFIG_FAILED;
+       return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_set_config_data, mgmt_set_config_data_cmd,
+      "mgmt set-config WORD$path VALUE",
+      MGMTD_STR
+      "Set configuration data\n"
+      "XPath expression specifying the YANG data path\n"
+      "Value of the data to set\n")
+{
+       strlcpy(vty->cfg_changes[0].xpath, path,
+               sizeof(vty->cfg_changes[0].xpath));
+       vty->cfg_changes[0].value = value;
+       vty->cfg_changes[0].operation = NB_OP_CREATE;
+       vty->num_cfg_changes = 1;
+
+       vty->no_implicit_commit = true;
+       vty_mgmt_send_config_data(vty);
+       vty->no_implicit_commit = false;
+       return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_delete_config_data, mgmt_delete_config_data_cmd,
+      "mgmt delete-config WORD$path",
+      MGMTD_STR
+      "Delete configuration data\n"
+      "XPath expression specifying the YANG data path\n")
+{
+
+       strlcpy(vty->cfg_changes[0].xpath, path,
+               sizeof(vty->cfg_changes[0].xpath));
+       vty->cfg_changes[0].value = NULL;
+       vty->cfg_changes[0].operation = NB_OP_DESTROY;
+       vty->num_cfg_changes = 1;
+
+       vty->no_implicit_commit = true;
+       vty_mgmt_send_config_data(vty);
+       vty->no_implicit_commit = false;
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_get_config, show_mgmt_get_config_cmd,
+      "show mgmt get-config [candidate|operational|running]$dsname WORD$path",
+      SHOW_STR MGMTD_STR
+      "Get configuration data from a specific configuration datastore\n"
+      "Candidate datastore (default)\n"
+      "Operational datastore\n"
+      "Running datastore\n"
+      "XPath expression specifying the YANG data path\n")
+{
+       const char *xpath_list[VTY_MAXCFGCHANGES] = {0};
+       Mgmtd__DatastoreId datastore = MGMTD_DS_CANDIDATE;
+
+       if (dsname)
+               datastore = mgmt_ds_name2id(dsname);
+
+       xpath_list[0] = path;
+       vty_mgmt_send_get_config(vty, datastore, xpath_list, 1);
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_get_data, show_mgmt_get_data_cmd,
+      "show mgmt get-data [candidate|operational|running]$dsname WORD$path",
+      SHOW_STR MGMTD_STR
+      "Get data from a specific datastore\n"
+      "Candidate datastore\n"
+      "Operational datastore (default)\n"
+      "Running datastore\n"
+      "XPath expression specifying the YANG data path\n")
+{
+       const char *xpath_list[VTY_MAXCFGCHANGES] = {0};
+       Mgmtd__DatastoreId datastore = MGMTD_DS_OPERATIONAL;
+
+       if (dsname)
+               datastore = mgmt_ds_name2id(dsname);
+
+       xpath_list[0] = path;
+       vty_mgmt_send_get_data(vty, datastore, xpath_list, 1);
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_dump_data,
+      show_mgmt_dump_data_cmd,
+      "show mgmt datastore-contents [candidate|operational|running]$dsname [xpath WORD$path] [file WORD$filepath] <json|xml>$fmt",
+      SHOW_STR
+      MGMTD_STR
+      "Get Datastore contents from a specific datastore\n"
+      "Candidate datastore (default)\n"
+      "Operational datastore\n"
+      "Running datastore\n"
+      "XPath expression specifying the YANG data path\n"
+      "XPath string\n"
+      "Dump the contents to a file\n"
+      "Full path of the file\n"
+      "json output\n"
+      "xml output\n")
+{
+       struct mgmt_ds_ctx *ds_ctx;
+       Mgmtd__DatastoreId datastore = MGMTD_DS_CANDIDATE;
+       LYD_FORMAT format = fmt[0] == 'j' ? LYD_JSON : LYD_XML;
+       FILE *f = NULL;
+
+       if (datastore)
+               datastore = mgmt_ds_name2id(dsname);
+
+       ds_ctx = mgmt_ds_get_ctx_by_id(mm, datastore);
+       if (!ds_ctx) {
+               vty_out(vty, "ERROR: Could not access datastore!\n");
+               return CMD_ERR_NO_MATCH;
+       }
+
+       if (filepath) {
+               f = fopen(filepath, "w");
+               if (!f) {
+                       vty_out(vty,
+                               "Could not open file pointed by filepath %s\n",
+                               filepath);
+                       return CMD_SUCCESS;
+               }
+       }
+
+       mgmt_ds_dump_tree(vty, ds_ctx, path, f, format);
+
+       if (f)
+               fclose(f);
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_map_xpath,
+      show_mgmt_map_xpath_cmd,
+      "show mgmt yang-xpath-subscription WORD$path",
+      SHOW_STR
+      MGMTD_STR
+      "Get YANG Backend Subscription\n"
+      "XPath expression specifying the YANG data path\n")
+{
+       mgmt_be_xpath_subscr_info_write(vty, path);
+       return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_load_config,
+      mgmt_load_config_cmd,
+      "mgmt load-config WORD$filepath <merge|replace>$type",
+      MGMTD_STR
+      "Load configuration onto Candidate Datastore\n"
+      "Full path of the file\n"
+      "Merge configuration with contents of Candidate Datastore\n"
+      "Replace the existing contents of Candidate datastore\n")
+{
+       bool merge = type[0] == 'm' ? true : false;
+       struct mgmt_ds_ctx *ds_ctx;
+       int ret;
+
+       if (access(filepath, F_OK) == -1) {
+               vty_out(vty, "ERROR: File %s : %s\n", filepath,
+                       strerror(errno));
+               return CMD_ERR_NO_FILE;
+       }
+
+       ds_ctx = mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_CANDIDATE);
+       if (!ds_ctx) {
+               vty_out(vty, "ERROR: Could not access Candidate datastore!\n");
+               return CMD_ERR_NO_MATCH;
+       }
+
+       ret = mgmt_ds_load_config_from_file(ds_ctx, filepath, merge);
+       if (ret != 0)
+               vty_out(vty, "Error with parsing the file with error code %d\n",
+                       ret);
+       return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_save_config,
+      mgmt_save_config_cmd,
+      "mgmt save-config <candidate|running>$dsname WORD$filepath",
+      MGMTD_STR
+      "Save configuration from datastore\n"
+      "Candidate datastore\n"
+      "Running datastore\n"
+      "Full path of the file\n")
+{
+       Mgmtd__DatastoreId datastore = mgmt_ds_name2id(dsname);
+       struct mgmt_ds_ctx *ds_ctx;
+       FILE *f;
+
+       ds_ctx = mgmt_ds_get_ctx_by_id(mm, datastore);
+       if (!ds_ctx) {
+               vty_out(vty, "ERROR: Could not access the '%s' datastore!\n",
+                       dsname);
+               return CMD_ERR_NO_MATCH;
+       }
+
+       if (!filepath) {
+               vty_out(vty, "ERROR: No file path mentioned!\n");
+               return CMD_ERR_NO_MATCH;
+       }
+
+       f = fopen(filepath, "w");
+       if (!f) {
+               vty_out(vty, "Could not open file pointed by filepath %s\n",
+                       filepath);
+               return CMD_SUCCESS;
+       }
+
+       mgmt_ds_dump_tree(vty, ds_ctx, "/", f, LYD_JSON);
+
+       fclose(f);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_cmt_hist,
+      show_mgmt_cmt_hist_cmd,
+      "show mgmt commit-history",
+      SHOW_STR
+      MGMTD_STR
+      "Show commit history\n")
+{
+       show_mgmt_cmt_history(vty);
+       return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_rollback,
+      mgmt_rollback_cmd,
+      "mgmt rollback <commit-id WORD$commit | last [(1-10)]$last>",
+      MGMTD_STR
+      "Rollback commits\n"
+      "Rollback to commit ID\n"
+      "Commit-ID\n"
+      "Rollbak n commits\n"
+      "Number of commits\n")
+{
+       if (commit)
+               mgmt_history_rollback_by_id(vty, commit);
+       else
+               mgmt_history_rollback_n(vty, last);
+
+       return CMD_SUCCESS;
+}
+
+static int config_write_mgmt_debug(struct vty *vty);
+static struct cmd_node debug_node = {
+       .name = "debug",
+       .node = DEBUG_NODE,
+       .prompt = "",
+       .config_write = config_write_mgmt_debug,
+};
+
+static int config_write_mgmt_debug(struct vty *vty)
+{
+       int n = mgmt_debug_be + mgmt_debug_fe + mgmt_debug_ds + mgmt_debug_txn;
+       if (!n)
+               return 0;
+       if (n == 4) {
+               vty_out(vty, "debug mgmt all\n");
+               return 0;
+       }
+
+       vty_out(vty, "debug mgmt");
+       if (mgmt_debug_be)
+               vty_out(vty, " backend");
+       if (mgmt_debug_ds)
+               vty_out(vty, " datastore");
+       if (mgmt_debug_fe)
+               vty_out(vty, " frontend");
+       if (mgmt_debug_txn)
+               vty_out(vty, " transaction");
+
+       vty_out(vty, "\n");
+
+       return 0;
+}
+
+DEFPY(debug_mgmt,
+      debug_mgmt_cmd,
+      "[no$no] debug mgmt <all$all|{backend$be|datastore$ds|frontend$fe|transaction$txn}>",
+      NO_STR
+      DEBUG_STR
+      MGMTD_STR
+      "All debug\n"
+      "Back-end debug\n"
+      "Datastore debug\n"
+      "Front-end debug\n"
+      "Transaction debug\n")
+{
+       bool set = !no;
+       if (all)
+               be = fe = ds = txn = set ? all : NULL;
+
+       if (be)
+               mgmt_debug_be = set;
+       if (ds)
+               mgmt_debug_ds = set;
+       if (fe)
+               mgmt_debug_fe = set;
+       if (txn)
+               mgmt_debug_txn = set;
+
+       return CMD_SUCCESS;
+}
+
+void mgmt_vty_init(void)
+{
+       /*
+        * Initialize command handling from VTYSH connection.
+        * Call command initialization routines defined by
+        * backend components that are moved to new MGMTD infra
+        * here one by one.
+        */
+#if HAVE_STATICD
+       extern void static_vty_init(void);
+       static_vty_init();
+#endif
+
+       install_node(&debug_node);
+
+       install_element(VIEW_NODE, &show_mgmt_be_adapter_cmd);
+       install_element(VIEW_NODE, &show_mgmt_be_xpath_reg_cmd);
+       install_element(VIEW_NODE, &show_mgmt_fe_adapter_cmd);
+       install_element(VIEW_NODE, &show_mgmt_txn_cmd);
+       install_element(VIEW_NODE, &show_mgmt_ds_cmd);
+       install_element(VIEW_NODE, &show_mgmt_get_config_cmd);
+       install_element(VIEW_NODE, &show_mgmt_get_data_cmd);
+       install_element(VIEW_NODE, &show_mgmt_dump_data_cmd);
+       install_element(VIEW_NODE, &show_mgmt_map_xpath_cmd);
+       install_element(VIEW_NODE, &show_mgmt_cmt_hist_cmd);
+
+       install_element(CONFIG_NODE, &mgmt_commit_cmd);
+       install_element(CONFIG_NODE, &mgmt_set_config_data_cmd);
+       install_element(CONFIG_NODE, &mgmt_delete_config_data_cmd);
+       install_element(CONFIG_NODE, &mgmt_load_config_cmd);
+       install_element(CONFIG_NODE, &mgmt_save_config_cmd);
+       install_element(CONFIG_NODE, &mgmt_rollback_cmd);
+
+       install_element(VIEW_NODE, &debug_mgmt_cmd);
+       install_element(CONFIG_NODE, &debug_mgmt_cmd);
+
+       /* Enable view */
+       install_element(ENABLE_NODE, &mgmt_performance_measurement_cmd);
+       install_element(ENABLE_NODE, &mgmt_reset_performance_stats_cmd);
+
+       /*
+        * TODO: Register and handlers for auto-completion here.
+        */
+}
diff --git a/mgmtd/mgmt_vty.c.safe b/mgmtd/mgmt_vty.c.safe
new file mode 100644 (file)
index 0000000..c43485c
--- /dev/null
@@ -0,0 +1,506 @@
+/*
+ * MGMTD VTY Interface
+ * Copyright (C) 2021  Vmware, Inc.
+ *                    Pushpasis Sarkar <spushpasis@vmware.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <zebra.h>
+
+#include "command.h"
+#include "json.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_be_server.h"
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt_fe_server.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_history.h"
+
+#include "mgmtd/mgmt_vty_clippy.c"
+
+DEFPY(show_mgmt_be_adapter,
+      show_mgmt_be_adapter_cmd,
+      "show mgmt backend-adapter all",
+      SHOW_STR
+      MGMTD_STR
+      MGMTD_BE_ADAPTER_STR
+      "Display all Backend Adapters\n")
+{
+       mgmt_be_adapter_status_write(vty);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_be_xpath_reg,
+      show_mgmt_be_xpath_reg_cmd,
+      "show mgmt backend-yang-xpath-registry",
+      SHOW_STR
+      MGMTD_STR
+      "Backend Adapter YANG Xpath Registry\n")
+{
+       mgmt_be_xpath_register_write(vty);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_fe_adapter,
+      show_mgmt_fe_adapter_cmd,
+      "show mgmt frontend-adapter all",
+      SHOW_STR MGMTD_STR MGMTD_FE_ADAPTER_STR "Display all Frontend Adapters\n")
+{
+       mgmt_fe_adapter_status_write(vty, false);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_fe_adapter_detail, show_mgmt_fe_adapter_detail_cmd,
+      "show mgmt frontend-adapter all detail",
+      SHOW_STR MGMTD_STR MGMTD_FE_ADAPTER_STR
+      "Display all Frontend Adapters\n"
+      "Details of commit stats\n")
+{
+       mgmt_fe_adapter_status_write(vty, true);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY_HIDDEN(mgmt_performance_measurement,
+            mgmt_performance_measurement_cmd,
+            "[no] mgmt performance-measurement",
+            NO_STR
+            MGMTD_STR
+            "Enable performance measurement\n")
+{
+       if (no)
+               mgmt_fe_adapter_perf_measurement(vty, false);
+       else
+               mgmt_fe_adapter_perf_measurement(vty, true);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_reset_performance_stats,
+      mgmt_reset_performance_stats_cmd,
+      "mgmt reset-statistics",
+      MGMTD_STR
+      "Reset the Performance measurement statistics\n")
+{
+       mgmt_fe_adapter_reset_perf_stats(vty);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_txn,
+      show_mgmt_txn_cmd,
+      "show mgmt transaction all",
+      SHOW_STR
+      MGMTD_STR
+      MGMTD_TXN_STR
+      "Display all Transactions\n")
+{
+       mgmt_txn_status_write(vty);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_ds,
+      show_mgmt_ds_cmd,
+      "show mgmt datastore [all|candidate|operational|running]$dsname",
+      SHOW_STR
+      MGMTD_STR
+      MGMTD_DS_STR
+      "All datastores (default)\n"
+      "Candidate datastore\n"
+      "Operational datastore\n"
+      "Running datastore\n")
+{
+       struct mgmt_ds_ctx *ds_ctx;
+
+       if (!dsname || dsname[0] == 'a') {
+               mgmt_ds_status_write(vty);
+               return CMD_SUCCESS;
+       }
+       ds_ctx = mgmt_ds_get_ctx_by_id(mm, mgmt_ds_name2id(dsname));
+       if (!ds_ctx) {
+               vty_out(vty, "ERROR: Could not access %s datastore!\n", dsname);
+               return CMD_ERR_NO_MATCH;
+       }
+       mgmt_ds_status_write_one(vty, ds_ctx);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_commit,
+      mgmt_commit_cmd,
+      "mgmt commit <check|apply|abort>$type",
+      MGMTD_STR
+      "Commit action\n"
+      "Validate the set of config commands\n"
+      "Validate and apply the set of config commands\n"
+      "Abort and drop the set of config commands recently added\n")
+{
+       bool validate_only = type[0] == 'c';
+       bool abort = type[1] == 'b';
+
+       if (vty_mgmt_send_commit_config(vty, validate_only, abort) != 0)
+               return CMD_WARNING_CONFIG_FAILED;
+       return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_set_config_data, mgmt_set_config_data_cmd,
+      "mgmt set-config WORD$path VALUE",
+      MGMTD_STR
+      "Set configuration data\n"
+      "XPath expression specifying the YANG data path\n"
+      "Value of the data to set\n")
+{
+       strlcpy(vty->cfg_changes[0].xpath, path,
+               sizeof(vty->cfg_changes[0].xpath));
+       vty->cfg_changes[0].value = value;
+       vty->cfg_changes[0].operation = NB_OP_CREATE;
+       vty->num_cfg_changes = 1;
+
+       vty->no_implicit_commit = true;
+       vty_mgmt_send_config_data(vty);
+       vty->no_implicit_commit = false;
+       return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_delete_config_data, mgmt_delete_config_data_cmd,
+      "mgmt delete-config WORD$path",
+      MGMTD_STR
+      "Delete configuration data\n"
+      "XPath expression specifying the YANG data path\n")
+{
+
+       strlcpy(vty->cfg_changes[0].xpath, path,
+               sizeof(vty->cfg_changes[0].xpath));
+       vty->cfg_changes[0].value = NULL;
+       vty->cfg_changes[0].operation = NB_OP_DESTROY;
+       vty->num_cfg_changes = 1;
+
+       vty->no_implicit_commit = true;
+       vty_mgmt_send_config_data(vty);
+       vty->no_implicit_commit = false;
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_get_config, show_mgmt_get_config_cmd,
+      "show mgmt get-config [candidate|operational|running]$dsname WORD$path",
+      SHOW_STR MGMTD_STR
+      "Get configuration data from a specific configuration datastore\n"
+      "Candidate datastore (default)\n"
+      "Operational datastore\n"
+      "Running datastore\n"
+      "XPath expression specifying the YANG data path\n")
+{
+       const char *xpath_list[VTY_MAXCFGCHANGES] = {0};
+       Mgmtd__DatastoreId datastore = MGMTD_DS_CANDIDATE;
+
+       if (dsname)
+               datastore = mgmt_ds_name2id(dsname);
+
+       xpath_list[0] = path;
+       vty_mgmt_send_get_config(vty, datastore, xpath_list, 1);
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_get_data, show_mgmt_get_data_cmd,
+      "show mgmt get-data [candidate|operational|running]$dsname WORD$path",
+      SHOW_STR MGMTD_STR
+      "Get data from a specific datastore\n"
+      "Candidate datastore\n"
+      "Operational datastore (default)\n"
+      "Running datastore\n"
+      "XPath expression specifying the YANG data path\n")
+{
+       const char *xpath_list[VTY_MAXCFGCHANGES] = {0};
+       Mgmtd__DatastoreId datastore = MGMTD_DS_OPERATIONAL;
+
+       if (dsname)
+               datastore = mgmt_ds_name2id(dsname);
+
+       xpath_list[0] = path;
+       vty_mgmt_send_get_data(vty, datastore, xpath_list, 1);
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_dump_data,
+      show_mgmt_dump_data_cmd,
+      "show mgmt datastore-contents [candidate|operational|running]$dsname [xpath WORD$path] [file WORD$filepath] <json|xml>$fmt",
+      SHOW_STR
+      MGMTD_STR
+      "Get Datastore contents from a specific datastore\n"
+      "Candidate datastore (default)\n"
+      "Operational datastore\n"
+      "Running datastore\n"
+      "XPath expression specifying the YANG data path\n"
+      "XPath string\n"
+      "Dump the contents to a file\n"
+      "Full path of the file\n"
+      "json output\n"
+      "xml output\n")
+{
+       struct mgmt_ds_ctx *ds_ctx;
+       Mgmtd__DatastoreId datastore = MGMTD_DS_CANDIDATE;
+       LYD_FORMAT format = fmt[0] == 'j' ? LYD_JSON : LYD_XML;
+       FILE *f = NULL;
+
+       if (datastore)
+               datastore = mgmt_ds_name2id(dsname);
+
+       ds_ctx = mgmt_ds_get_ctx_by_id(mm, datastore);
+       if (!ds_ctx) {
+               vty_out(vty, "ERROR: Could not access datastore!\n");
+               return CMD_ERR_NO_MATCH;
+       }
+
+       if (filepath) {
+               f = fopen(filepath, "w");
+               if (!f) {
+                       vty_out(vty,
+                               "Could not open file pointed by filepath %s\n",
+                               filepath);
+                       return CMD_SUCCESS;
+               }
+       }
+
+       mgmt_ds_dump_tree(vty, ds_ctx, path, f, format);
+
+       if (f)
+               fclose(f);
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_map_xpath,
+      show_mgmt_map_xpath_cmd,
+      "show mgmt yang-xpath-subscription WORD$path",
+      SHOW_STR
+      MGMTD_STR
+      "Get YANG Backend Subscription\n"
+      "XPath expression specifying the YANG data path\n")
+{
+       mgmt_be_xpath_subscr_info_write(vty, path);
+       return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_load_config,
+      mgmt_load_config_cmd,
+      "mgmt load-config WORD$filepath <merge|replace>$type",
+      MGMTD_STR
+      "Load configuration onto Candidate Datastore\n"
+      "Full path of the file\n"
+      "Merge configuration with contents of Candidate Datastore\n"
+      "Replace the existing contents of Candidate datastore\n")
+{
+       bool merge = type[0] == 'm' ? true : false;
+       struct mgmt_ds_ctx *ds_ctx;
+       int ret;
+
+       if (access(filepath, F_OK) == -1) {
+               vty_out(vty, "ERROR: File %s : %s\n", filepath,
+                       strerror(errno));
+               return CMD_ERR_NO_FILE;
+       }
+
+       ds_ctx = mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_CANDIDATE);
+       if (!ds_ctx) {
+               vty_out(vty, "ERROR: Could not access Candidate datastore!\n");
+               return CMD_ERR_NO_MATCH;
+       }
+
+       ret = mgmt_ds_load_config_from_file(ds_ctx, filepath, merge);
+       if (ret != 0)
+               vty_out(vty, "Error with parsing the file with error code %d\n",
+                       ret);
+       return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_save_config,
+      mgmt_save_config_cmd,
+      "mgmt save-config <candidate|running>$dsname WORD$filepath",
+      MGMTD_STR
+      "Save configuration from datastore\n"
+      "Candidate datastore\n"
+      "Running datastore\n"
+      "Full path of the file\n")
+{
+       Mgmtd__DatastoreId datastore = mgmt_ds_name2id(dsname);
+       struct mgmt_ds_ctx *ds_ctx;
+       FILE *f;
+
+       ds_ctx = mgmt_ds_get_ctx_by_id(mm, datastore);
+       if (!ds_ctx) {
+               vty_out(vty, "ERROR: Could not access the '%s' datastore!\n",
+                       dsname);
+               return CMD_ERR_NO_MATCH;
+       }
+
+       if (!filepath) {
+               vty_out(vty, "ERROR: No file path mentioned!\n");
+               return CMD_ERR_NO_MATCH;
+       }
+
+       f = fopen(filepath, "w");
+       if (!f) {
+               vty_out(vty, "Could not open file pointed by filepath %s\n",
+                       filepath);
+               return CMD_SUCCESS;
+       }
+
+       mgmt_ds_dump_tree(vty, ds_ctx, "/", f, LYD_JSON);
+
+       fclose(f);
+
+       return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_cmt_hist,
+      show_mgmt_cmt_hist_cmd,
+      "show mgmt commit-history",
+      SHOW_STR
+      MGMTD_STR
+      "Show commit history\n")
+{
+       show_mgmt_cmt_history(vty);
+       return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_rollback,
+      mgmt_rollback_cmd,
+      "mgmt rollback <commit-id WORD$commit | last [(1-10)]$last>",
+      MGMTD_STR
+      "Rollback commits\n"
+      "Rollback to commit ID\n"
+      "Commit-ID\n"
+      "Rollbak n commits\n"
+      "Number of commits\n")
+{
+       if (commit)
+               mgmt_history_rollback_by_id(vty, commit);
+       else
+               mgmt_history_rollback_n(vty, last);
+
+       return CMD_SUCCESS;
+}
+
+static int config_write_mgmt_debug(struct vty *vty);
+static struct cmd_node debug_node = {
+       .name = "debug",
+       .node = DEBUG_NODE,
+       .prompt = "",
+       .config_write = config_write_mgmt_debug,
+};
+
+static int config_write_mgmt_debug(struct vty *vty)
+{
+       int n = mgmt_debug_be + mgmt_debug_fe + mgmt_debug_ds + mgmt_debug_txn;
+       if (!n)
+               return 0;
+       if (n == 4) {
+               vty_out(vty, "debug mgmt all\n");
+               return 0;
+       }
+
+       vty_out(vty, "debug mgmt");
+       if (mgmt_debug_be)
+               vty_out(vty, " backend");
+       if (mgmt_debug_ds)
+               vty_out(vty, " datastore");
+       if (mgmt_debug_fe)
+               vty_out(vty, " frontend");
+       if (mgmt_debug_txn)
+               vty_out(vty, " transaction");
+
+       vty_out(vty, "\n");
+
+       return 0;
+}
+
+DEFPY(debug_mgmt,
+      debug_mgmt_cmd,
+      "[no$no] debug mgmt <all$all|{backend$be|datastore$ds|frontend$fe|transaction$txn}>",
+      NO_STR
+      DEBUG_STR
+      MGMTD_STR
+      "All debug\n"
+      "Back-end debug\n"
+      "Datastore debug\n"
+      "Front-end debug\n"
+      "Transaction debug\n")
+{
+       bool set = !no;
+       if (all)
+               be = fe = ds = txn = set ? all : NULL;
+
+       if (be)
+               mgmt_debug_be = set;
+       if (ds)
+               mgmt_debug_ds = set;
+       if (fe)
+               mgmt_debug_fe = set;
+       if (txn)
+               mgmt_debug_txn = set;
+
+       return CMD_SUCCESS;
+}
+
+void mgmt_vty_init(void)
+{
+       /*
+        * Initialize command handling from VTYSH connection.
+        * Call command initialization routines defined by
+        * backend components that are moved to new MGMTD infra
+        * here one by one.
+        */
+#if HAVE_STATICD
+       extern void static_vty_init(void);
+       static_vty_init();
+#endif
+
+       install_node(&debug_node);
+
+       install_element(VIEW_NODE, &show_mgmt_be_adapter_cmd);
+       install_element(VIEW_NODE, &show_mgmt_be_xpath_reg_cmd);
+       install_element(VIEW_NODE, &show_mgmt_fe_adapter_cmd);
+       install_element(VIEW_NODE, &show_mgmt_fe_adapter_detail_cmd);
+       install_element(VIEW_NODE, &show_mgmt_txn_cmd);
+       install_element(VIEW_NODE, &show_mgmt_ds_cmd);
+       install_element(VIEW_NODE, &show_mgmt_get_config_cmd);
+       install_element(VIEW_NODE, &show_mgmt_get_data_cmd);
+       install_element(VIEW_NODE, &show_mgmt_dump_data_cmd);
+       install_element(VIEW_NODE, &show_mgmt_map_xpath_cmd);
+       install_element(VIEW_NODE, &show_mgmt_cmt_hist_cmd);
+
+       install_element(CONFIG_NODE, &mgmt_commit_cmd);
+       install_element(CONFIG_NODE, &mgmt_set_config_data_cmd);
+       install_element(CONFIG_NODE, &mgmt_delete_config_data_cmd);
+       install_element(CONFIG_NODE, &mgmt_load_config_cmd);
+       install_element(CONFIG_NODE, &mgmt_save_config_cmd);
+       install_element(CONFIG_NODE, &mgmt_rollback_cmd);
+
+       install_element(VIEW_NODE, &debug_mgmt_cmd);
+       install_element(CONFIG_NODE, &debug_mgmt_cmd);
+
+       /* Enable view */
+       install_element(ENABLE_NODE, &mgmt_performance_measurement_cmd);
+       install_element(ENABLE_NODE, &mgmt_reset_performance_stats_cmd);
+
+       /*
+        * TODO: Register and handlers for auto-completion here.
+        */
+}
diff --git a/mgmtd/subdir.am b/mgmtd/subdir.am
new file mode 100644 (file)
index 0000000..a93f8f9
--- /dev/null
@@ -0,0 +1,68 @@
+#
+# mgmtd -- Mangagement Daemon
+#
+
+# dist_examples_DATA += \
+       # end
+
+vtysh_daemons += mgmtd
+
+# man8 += $(MANBUILD)/frr-mgmtd.8
+# endif
+
+clippy_scan += \
+       mgmtd/mgmt_vty.c \
+       # end
+
+lib_LTLIBRARIES += mgmtd/libmgmt_be_nb.la
+nodist_mgmtd_libmgmt_be_nb_la_SOURCES = \
+       # end
+mgmtd_libmgmt_be_nb_la_CFLAGS = $(AM_CFLAGS) -DINCLUDE_MGMTD_CMDDEFS_ONLY
+mgmtd_libmgmt_be_nb_la_CPPFLAGS = $(AM_CPPFLAGS) -DINCLUDE_MGMTD_CMDDEFS_ONLY
+mgmtd_libmgmt_be_nb_la_LDFLAGS = -version-info 0:0:0
+
+noinst_LIBRARIES += mgmtd/libmgmtd.a
+mgmtd_libmgmtd_a_SOURCES = \
+       mgmtd/mgmt.c \
+       mgmtd/mgmt_ds.c \
+       mgmtd/mgmt_be_server.c \
+       mgmtd/mgmt_be_adapter.c \
+       mgmtd/mgmt_fe_server.c \
+       mgmtd/mgmt_fe_adapter.c \
+       mgmtd/mgmt_history.c \
+       mgmtd/mgmt_memory.c \
+       mgmtd/mgmt_txn.c \
+       mgmtd/mgmt_vty.c \
+       # end
+
+mgmtdheaderdir = $(pkgincludedir)/mgmtd
+mgmtdheader_HEADERS = \
+       mgmtd/mgmt_defines.h \
+       # end
+
+noinst_HEADERS += \
+       mgmtd/mgmt.h \
+       mgmtd/mgmt_be_server.h \
+       mgmtd/mgmt_be_adapter.h \
+       mgmtd/mgmt_ds.h \
+       mgmtd/mgmt_fe_server.h \
+       mgmtd/mgmt_fe_adapter.h \
+       mgmtd/mgmt_history.h \
+       mgmtd/mgmt_memory.h \
+       mgmtd/mgmt_txn.h \
+       # end
+
+sbin_PROGRAMS += mgmtd/mgmtd
+
+mgmtd_mgmtd_SOURCES = \
+       mgmtd/mgmt_main.c \
+       # end
+mgmtd_mgmtd_CFLAGS = $(AM_CFLAGS) -I ./
+mgmtd_mgmtd_LDADD = mgmtd/libmgmtd.a lib/libfrr.la $(LIBCAP) $(LIBM) $(LIBYANG_LIBS) $(UST_LIBS)
+mgmtd_mgmtd_LDADD += mgmtd/libmgmt_be_nb.la
+
+if STATICD
+$(mgmtd_mgmtd_OBJECTS): yang/frr-staticd.yang.c
+CLEANFILES += yang/frr-staticd.yang.c
+nodist_mgmtd_libmgmt_be_nb_la_SOURCES += staticd/static_vty.c
+endif
diff --git a/pkgsrc/mgmtd.sh.in b/pkgsrc/mgmtd.sh.in
new file mode 100644 (file)
index 0000000..fb57c0a
--- /dev/null
@@ -0,0 +1,44 @@
+#!/bin/sh
+#
+# mgmtd is part of the quagga routing beast
+#
+# PROVIDE: mgmtd
+# REQUIRE: none
+##
+
+PATH=/sbin:/bin:/usr/sbin:/usr/bin:@prefix@/sbin:@prefix@/bin
+export PATH
+
+if [ -f /etc/rc.subr ]
+then
+       . /etc/rc.subr
+fi
+
+name="mgmtd"
+rcvar=$name
+required_files="@sysconfdir@/${name}.conf"
+command="@prefix@/sbin/${name}"
+command_args="-d"
+
+start_precmd="zebra_precmd"
+socket_dir=@localstatedir@
+pidfile="${socket_dir}/${name}.pid"
+
+zebra_precmd()
+{
+    rc_flags="$(
+       set -- $rc_flags
+       while [ $# -ne 0 ]; do
+           if [ X"$1" = X-P -o X"$1" = X-A ]; then
+               break
+           fi
+           shift
+       done
+       if [ $# -eq 0 ]; then
+           echo "-P 0"
+       fi
+       ) $rc_flags"
+}
+
+load_rc_config $name
+run_rc_command "$1"
index a4f8560bb07fb7a94d250dadf652ebb6c223acbc..b5873a3aac5b48e53bb735f9476e6609c5ad0c72 100644 (file)
@@ -325,7 +325,17 @@ class CommandEntry:
     def load(cls, xref):
         nodes = NodeDict()
 
+        mgmtname = "mgmtd/libmgmt_be_nb.la"
         for cmd_name, origins in xref.get("cli", {}).items():
+            # If mgmtd has a yang version of a CLI command, make it the only daemon
+            # to handle it.  For now, daemons can still be compiling their cmds into the
+            # binaries to allow for running standalone with CLI config files. When they
+            # do this they will also be present in the xref file, but we want to ignore
+            # those in vtysh.
+            if "yang" in origins.get(mgmtname, {}).get("attrs", []):
+                CommandEntry.process(nodes, cmd_name, mgmtname, origins[mgmtname])
+                continue
+
             for origin, spec in origins.items():
                 CommandEntry.process(nodes, cmd_name, origin, spec)
         return nodes
index e897822ecc1205dcf43a3145038d986b895fc075..21aa84df5c21bd416b5c060983bb5e75478ad7d5 100644 (file)
@@ -29,23 +29,3 @@ CLEANFILES += \
        # end
 
 EXTRA_DIST += qpb/qpb.proto
-SUFFIXES += .proto .pb-c.c .pb-c.h
-
-if HAVE_PROTOBUF
-
-# Rules
-.proto.pb.h:
-       $(PROTOC) -I$(top_srcdir) --cpp_out=$(top_builddir) $^
-
-AM_V_PROTOC_C = $(am__v_PROTOC_C_$(V))
-am__v_PROTOC_C_ = $(am__v_PROTOC_C_$(AM_DEFAULT_VERBOSITY))
-am__v_PROTOC_C_0 = @echo "  PROTOC_C" $@;
-am__v_PROTOC_C_1 =
-
-.proto.pb-c.c:
-       $(AM_V_PROTOC_C)$(PROTOC_C) -I$(top_srcdir) --c_out=$(top_builddir) $^
-       $(AM_V_GEN)$(SED) -e '1i#include "config.h"' -i $@
-.pb-c.c.pb-c.h:
-       @/bin/true
-
-endif  # HAVE_PROTOBUF
index 14973ba890e48e94487c03bb88f8a50b49173e2d..4dec84b8fb3ce09c0122d29813c26a398f996de5 100644 (file)
@@ -667,6 +667,7 @@ fi
 %{_sbindir}/ospfd
 %{_sbindir}/ripd
 %{_sbindir}/bgpd
+%{_sbindir}/mgmtd
 %exclude %{_sbindir}/ssd
 %if %{with_watchfrr}
     %{_sbindir}/watchfrr
@@ -716,6 +717,9 @@ fi
 %{_libdir}/frr/modules/dplane_fpm_nl.so
 %{_libdir}/frr/modules/zebra_irdp.so
 %{_libdir}/frr/modules/bgpd_bmp.so
+%{_libdir}/libfrr_pb.so*
+%{_libdir}/libfrrfpm_pb.so*
+%{_libdir}/libmgmt_be_nb.so*
 %{_bindir}/*
 %config(noreplace) %{configdir}/[!v]*.conf*
 %config(noreplace) %attr(750,%{frr_user},%{frr_user}) %{configdir}/daemons
@@ -775,6 +779,8 @@ sed -i 's/ -M rpki//' %{_sysconfdir}/frr/daemons
 %{_libdir}/lib*.so
 %dir %{_includedir}/%{name}
 %{_includedir}/%{name}/*.h
+%dir %{_includedir}/%{name}/mgmtd
+%{_includedir}/%{name}/mgmtd/*.h
 %dir %{_includedir}/%{name}/ospfd
 %{_includedir}/%{name}/ospfd/*.h
 %if %{with_bfdd}
index bc501b3d6947c38888b6eee1d39c160896a25742..d429162196c5c79579fec113473cb611764b6b7a 100644 (file)
@@ -27,6 +27,8 @@
 #include "static_debug.h"
 #include "static_nb.h"
 
+#include "mgmt_be_client.h"
+
 char backup_config_file[256];
 
 bool mpls_enabled;
@@ -51,6 +53,8 @@ struct option longopts[] = { { 0 } };
 /* Master of threads. */
 struct thread_master *master;
 
+uintptr_t mgmt_lib_hndl;
+
 static struct frr_daemon_info staticd_di;
 /* SIGHUP handler. */
 static void sighup(void)
@@ -64,6 +68,11 @@ static void sigint(void)
 {
        zlog_notice("Terminating on signal");
 
+       /* Disable BFD events to avoid wasting processing. */
+       bfd_protocol_integration_set_shutdown(true);
+
+       mgmt_be_client_lib_destroy(mgmt_lib_hndl);
+
        static_vrf_terminate();
 
        static_zebra_stop();
@@ -97,6 +106,51 @@ struct frr_signal_t static_signals[] = {
        },
 };
 
+static void static_mgmt_be_client_connect(uintptr_t lib_hndl,
+                                         uintptr_t usr_data, bool connected)
+{
+       (void)usr_data;
+
+       assert(lib_hndl == mgmt_lib_hndl);
+
+       zlog_debug("Got %s %s MGMTD Backend Client Server",
+                  connected ? "connected" : "disconnected",
+                  connected ? "to" : "from");
+
+       if (connected)
+               (void)mgmt_be_subscribe_yang_data(mgmt_lib_hndl, NULL, 0);
+}
+
+#if 0
+static void
+static_mgmt_txn_notify(uintptr_t lib_hndl, uintptr_t usr_data,
+                       struct mgmt_be_client_txn_ctx *txn_ctx,
+                       bool destroyed)
+{
+       zlog_debug("Got Txn %s Notify from MGMTD server",
+                  destroyed ? "DESTROY" : "CREATE");
+
+       if (!destroyed) {
+               /*
+                * TODO: Allocate and install a private scratchpad for this
+                * transaction if required
+                */
+       } else {
+               /*
+                * TODO: Uninstall and deallocate the private scratchpad for
+                * this transaction if installed earlier.
+                */
+       }
+}
+#endif
+
+static struct mgmt_be_client_params mgmt_params = {
+       .name = "staticd",
+       .conn_retry_intvl_sec = 3,
+       .client_connect_notify = static_mgmt_be_client_connect,
+       .txn_notify = NULL, /* static_mgmt_txn_notify */
+};
+
 static const struct frr_yang_module_info *const staticd_yang_modules[] = {
        &frr_filter_info,
        &frr_interface_info,
@@ -147,6 +201,10 @@ int main(int argc, char **argv, char **envp)
        static_zebra_init();
        static_vty_init();
 
+       /* Initialize MGMT backend functionalities */
+       mgmt_lib_hndl = mgmt_be_client_lib_init(&mgmt_params, master);
+       assert(mgmt_lib_hndl);
+
        hook_register(routing_conf_event,
                      routing_control_plane_protocols_name_validate);
 
index 3399686a90d05d3d01b021158bb2c4cd148180a5..1fbbf7e99d34fa6b40dfbe48970fbbd559389abc 100644 (file)
@@ -757,30 +757,6 @@ void static_ifindex_update(struct interface *ifp, bool up)
        static_ifindex_update_af(ifp, up, AFI_IP6, SAFI_MULTICAST);
 }
 
-void static_get_nh_type(enum static_nh_type stype, char *type, size_t size)
-{
-       switch (stype) {
-       case STATIC_IFNAME:
-               strlcpy(type, "ifindex", size);
-               break;
-       case STATIC_IPV4_GATEWAY:
-               strlcpy(type, "ip4", size);
-               break;
-       case STATIC_IPV4_GATEWAY_IFNAME:
-               strlcpy(type, "ip4-ifindex", size);
-               break;
-       case STATIC_BLACKHOLE:
-               strlcpy(type, "blackhole", size);
-               break;
-       case STATIC_IPV6_GATEWAY:
-               strlcpy(type, "ip6", size);
-               break;
-       case STATIC_IPV6_GATEWAY_IFNAME:
-               strlcpy(type, "ip6-ifindex", size);
-               break;
-       };
-}
-
 struct stable_info *static_get_stable_info(struct route_node *rn)
 {
        struct route_table *table;
index eb7953db2cfb79dab27c097b0cc2b541a7eaa4f9..4fd7a1342ff8cdcc1f5a2ea630a31971271db7b9 100644 (file)
@@ -159,6 +159,31 @@ static_route_info_from_rnode(struct route_node *rn)
        return (struct static_route_info *)(rn->info);
 }
 
+static inline void static_get_nh_type(enum static_nh_type stype, char *type,
+                                     size_t size)
+{
+       switch (stype) {
+       case STATIC_IFNAME:
+               strlcpy(type, "ifindex", size);
+               break;
+       case STATIC_IPV4_GATEWAY:
+               strlcpy(type, "ip4", size);
+               break;
+       case STATIC_IPV4_GATEWAY_IFNAME:
+               strlcpy(type, "ip4-ifindex", size);
+               break;
+       case STATIC_BLACKHOLE:
+               strlcpy(type, "blackhole", size);
+               break;
+       case STATIC_IPV6_GATEWAY:
+               strlcpy(type, "ip6", size);
+               break;
+       case STATIC_IPV6_GATEWAY_IFNAME:
+               strlcpy(type, "ip6-ifindex", size);
+               break;
+       };
+}
+
 extern bool mpls_enabled;
 extern uint32_t zebra_ecmp_count;
 
@@ -192,8 +217,6 @@ extern struct static_path *static_add_path(struct route_node *rn,
                                           uint32_t table_id, uint8_t distance);
 extern void static_del_path(struct static_path *pn);
 
-extern void static_get_nh_type(enum static_nh_type stype, char *type,
-                              size_t size);
 extern bool static_add_nexthop_validate(const char *nh_vrf_name,
                                        enum static_nh_type type,
                                        struct ipaddr *ipaddr);
index d87ca16c6198de10e8b1bb669b71a048dfaa90fd..386b255a855464b6bc8d671f2272ef516d79758b 100644 (file)
@@ -285,10 +285,10 @@ static int static_route_nb_run(struct vty *vty, struct static_route_args *args)
                                nb_cli_enqueue_change(vty, ab_xpath,
                                                      NB_OP_MODIFY, "false");
                }
-               if (type == STATIC_IPV4_GATEWAY
-                   || type == STATIC_IPV6_GATEWAY
-                   || type == STATIC_IPV4_GATEWAY_IFNAME
-                   || type == STATIC_IPV6_GATEWAY_IFNAME) {
+               if (type == STATIC_IPV4_GATEWAY ||
+                   type == STATIC_IPV6_GATEWAY ||
+                   type == STATIC_IPV4_GATEWAY_IFNAME ||
+                   type == STATIC_IPV6_GATEWAY_IFNAME) {
                        strlcpy(ab_xpath, xpath_nexthop, sizeof(ab_xpath));
                        strlcat(ab_xpath, FRR_STATIC_ROUTE_NH_COLOR_XPATH,
                                sizeof(ab_xpath));
@@ -368,25 +368,51 @@ static int static_route_nb_run(struct vty *vty, struct static_route_args *args)
 
                ret = nb_cli_apply_changes(vty, "%s", xpath_prefix);
        } else {
-               if (args->source)
-                       snprintf(ab_xpath, sizeof(ab_xpath),
-                                FRR_DEL_S_ROUTE_SRC_NH_KEY_NO_DISTANCE_XPATH,
-                                "frr-staticd:staticd", "staticd", args->vrf,
-                                buf_prefix,
-                                yang_afi_safi_value2identity(args->afi,
-                                                             args->safi),
-                                buf_src_prefix, table_id, buf_nh_type,
-                                args->nexthop_vrf, buf_gate_str,
-                                args->interface_name);
-               else
-                       snprintf(ab_xpath, sizeof(ab_xpath),
-                                FRR_DEL_S_ROUTE_NH_KEY_NO_DISTANCE_XPATH,
-                                "frr-staticd:staticd", "staticd", args->vrf,
-                                buf_prefix,
-                                yang_afi_safi_value2identity(args->afi,
-                                                             args->safi),
-                                table_id, buf_nh_type, args->nexthop_vrf,
-                                buf_gate_str, args->interface_name);
+               if (args->source) {
+                       if (args->distance)
+                               snprintf(ab_xpath, sizeof(ab_xpath),
+                                        FRR_DEL_S_ROUTE_SRC_NH_KEY_XPATH,
+                                        "frr-staticd:staticd", "staticd",
+                                        args->vrf, buf_prefix,
+                                        yang_afi_safi_value2identity(
+                                                args->afi, args->safi),
+                                        buf_src_prefix, table_id, distance,
+                                        buf_nh_type, args->nexthop_vrf,
+                                        buf_gate_str, args->interface_name);
+                       else
+                               snprintf(
+                                       ab_xpath, sizeof(ab_xpath),
+                                       FRR_DEL_S_ROUTE_SRC_NH_KEY_NO_DISTANCE_XPATH,
+                                       "frr-staticd:staticd", "staticd",
+                                       args->vrf, buf_prefix,
+                                       yang_afi_safi_value2identity(
+                                               args->afi, args->safi),
+                                       buf_src_prefix, table_id, buf_nh_type,
+                                       args->nexthop_vrf, buf_gate_str,
+                                       args->interface_name);
+               } else {
+                       if (args->distance)
+                               snprintf(ab_xpath, sizeof(ab_xpath),
+                                        FRR_DEL_S_ROUTE_NH_KEY_XPATH,
+                                        "frr-staticd:staticd", "staticd",
+                                        args->vrf, buf_prefix,
+                                        yang_afi_safi_value2identity(
+                                                args->afi, args->safi),
+                                        table_id, distance, buf_nh_type,
+                                        args->nexthop_vrf, buf_gate_str,
+                                        args->interface_name);
+                       else
+                               snprintf(
+                                       ab_xpath, sizeof(ab_xpath),
+                                       FRR_DEL_S_ROUTE_NH_KEY_NO_DISTANCE_XPATH,
+                                       "frr-staticd:staticd", "staticd",
+                                       args->vrf, buf_prefix,
+                                       yang_afi_safi_value2identity(
+                                               args->afi, args->safi),
+                                       table_id, buf_nh_type,
+                                       args->nexthop_vrf, buf_gate_str,
+                                       args->interface_name);
+               }
 
                dnode = yang_dnode_get(vty->candidate_config->dnode, ab_xpath);
                if (!dnode) {
@@ -1452,15 +1478,18 @@ DEFPY_YANG(debug_staticd, debug_staticd_cmd,
           "Debug route\n"
           "Debug bfd\n")
 {
+#ifndef INCLUDE_MGMTD_CMDDEFS_ONLY
        /* If no specific category, change all */
        if (strmatch(argv[argc - 1]->text, "static"))
                static_debug_set(vty->node, !no, true, true, true);
        else
                static_debug_set(vty->node, !no, !!events, !!route, !!bfd);
+#endif /* ifndef INCLUDE_MGMTD_CMDDEFS_ONLY */
 
        return CMD_SUCCESS;
 }
 
+#ifndef INCLUDE_MGMTD_CMDDEFS_ONLY
 DEFPY(staticd_show_bfd_routes, staticd_show_bfd_routes_cmd,
       "show bfd static route [json]$isjson",
       SHOW_STR
@@ -1496,9 +1525,15 @@ static struct cmd_node debug_node = {
        .config_write = static_config_write_debug,
 };
 
+#endif /* ifndef INCLUDE_MGMTD_CMDDEFS_ONLY */
+
 void static_vty_init(void)
 {
+#ifndef INCLUDE_MGMTD_CMDDEFS_ONLY
        install_node(&debug_node);
+       install_element(ENABLE_NODE, &show_debugging_static_cmd);
+       install_element(ENABLE_NODE, &staticd_show_bfd_routes_cmd);
+#endif /* ifndef INCLUDE_MGMTD_CMDDEFS_ONLY */
 
        install_element(CONFIG_NODE, &ip_mroute_dist_cmd);
 
@@ -1516,9 +1551,6 @@ void static_vty_init(void)
        install_element(CONFIG_NODE, &ipv6_route_cmd);
        install_element(VRF_NODE, &ipv6_route_vrf_cmd);
 
-       install_element(ENABLE_NODE, &show_debugging_static_cmd);
        install_element(ENABLE_NODE, &debug_staticd_cmd);
        install_element(CONFIG_NODE, &debug_staticd_cmd);
-
-       install_element(ENABLE_NODE, &staticd_show_bfd_routes_cmd);
 }
index fe84d496ac0bcb347e090f936b8b5125f74be2fe..f7c3a4c19d29333b20a99379e43df9a84835efae 100644 (file)
@@ -82,6 +82,7 @@ def setup_module(module):
     #
     # Main router
     for i in range(1, 2):
+        net["r%s" % i].loadConf("mgmtd", "%s/r%s/zebra.conf" % (thisDir, i))
         net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i))
         net["r%s" % i].loadConf("ripd", "%s/r%s/ripd.conf" % (thisDir, i))
         net["r%s" % i].loadConf("ripngd", "%s/r%s/ripngd.conf" % (thisDir, i))
index b720218e9bc8a196fabcfc468790a5f677a10d2f..636dbf354d85dcc916f64b9608e7b8114b6708e2 100644 (file)
@@ -45,6 +45,7 @@ def setup_module(mod):
 
     router_list = tgen.routers()
     for rname, router in router_list.items():
+
         daemon_file = "{}/{}/zebra.conf".format(CWD, rname)
         router.load_config(TopoRouter.RD_ZEBRA, daemon_file)
 
index fec9d485b5d3b0fe039f81871741dbabb8cb35c2..8de6f9bf7082a0ef9bef54d856bf22aab1288811 100644 (file)
@@ -24,7 +24,7 @@ import math
 import os
 import sys
 import pytest
-
+from lib import topotest
 
 CWD = os.path.dirname(os.path.realpath(__file__))
 sys.path.append(os.path.join(CWD, "../"))
@@ -109,7 +109,9 @@ def test_static_timing():
                 router.logdir, rname, "{}-routes-{}.conf".format(iptype.lower(), optype)
             )
             with open(config_file, "w") as f:
-                for i, net in enumerate(get_ip_networks(super_prefix, base_count, count)):
+                for i, net in enumerate(
+                    get_ip_networks(super_prefix, base_count, count)
+                ):
                     if i in bad_indices:
                         if add:
                             f.write("ip route {} {} bad_input\n".format(net, via))
@@ -148,7 +150,6 @@ def test_static_timing():
 
         return tot_delta
 
-
     # Number of static routes
     router = tgen.gears["r1"]
     output = router.run("vtysh -h | grep address-sanitizer")
@@ -164,20 +165,49 @@ def test_static_timing():
         [u"2100:1111:2220::/44", u"2100:3333:4440::/44"],
     ]
 
+    # This apparently needed to allow for various mgmtd/staticd/zebra connections to form
+    # which then SLOWS execution down. If we don't include this value then the
+    # initial, baseline establishing, time is 2 time faster (e.g., 5s instead of 10s),
+    # but all later runs are slower and fail.
+    #
+    # This should be done differently based on actual facts.
+    topotest.sleep(5)
+
     bad_indices = []
     for ipv6 in [False, True]:
         base_delta = do_config(
-            prefix_count, prefix_count, bad_indices, 0, 0, True, ipv6, prefix_base[ipv6][0]
+            prefix_count,
+            prefix_count,
+            bad_indices,
+            0,
+            0,
+            True,
+            ipv6,
+            prefix_base[ipv6][0],
         )
 
         # Another set of same number of prefixes
         do_config(
-            prefix_count, prefix_count, bad_indices, base_delta, 3, True, ipv6, prefix_base[ipv6][1]
+            prefix_count,
+            prefix_count,
+            bad_indices,
+            base_delta,
+            3,
+            True,
+            ipv6,
+            prefix_base[ipv6][1],
         )
 
         # Duplicate config
         do_config(
-            prefix_count, prefix_count, bad_indices, base_delta, 3, True, ipv6, prefix_base[ipv6][0]
+            prefix_count,
+            prefix_count,
+            bad_indices,
+            base_delta,
+            3,
+            True,
+            ipv6,
+            prefix_base[ipv6][0],
         )
 
         # Remove 1/2 of duplicate
@@ -194,15 +224,36 @@ def test_static_timing():
 
         # Add all back in so 1/2 replicate 1/2 new
         do_config(
-            prefix_count, prefix_count, bad_indices, base_delta, 3, True, ipv6, prefix_base[ipv6][0]
+            prefix_count,
+            prefix_count,
+            bad_indices,
+            base_delta,
+            3,
+            True,
+            ipv6,
+            prefix_base[ipv6][0],
         )
 
         # remove all
         delta = do_config(
-            prefix_count, prefix_count, bad_indices, base_delta, 3, False, ipv6, prefix_base[ipv6][0]
+            prefix_count,
+            prefix_count,
+            bad_indices,
+            base_delta,
+            3,
+            False,
+            ipv6,
+            prefix_base[ipv6][0],
         )
         delta += do_config(
-            prefix_count, prefix_count, bad_indices, base_delta, 3, False, ipv6, prefix_base[ipv6][1]
+            prefix_count,
+            prefix_count,
+            bad_indices,
+            base_delta,
+            3,
+            False,
+            ipv6,
+            prefix_base[ipv6][1],
         )
 
 
index 3e02769d873a5db0afb8595161456f1ea82c5ac0..e5a1e758379c1aa312c9fc00eb0678e1f95e38f4 100644 (file)
@@ -110,6 +110,7 @@ DEBUG_LOGS = {
         "debug zebra vxlan",
         "debug zebra nht",
     ],
+    "mgmt": [],
     "ospf": [
         "debug ospf event",
         "debug ospf ism",
@@ -450,6 +451,8 @@ def check_router_status(tgen):
             result = rnode.check_router_running()
             if result != "":
                 daemons = []
+                if "mgmtd" in result:
+                    daemons.append("mgmtd")
                 if "bgpd" in result:
                     daemons.append("bgpd")
                 if "zebra" in result:
@@ -1047,6 +1050,11 @@ def start_topology(tgen):
                     feature.add("ospf6")
                     break
 
+        # Loading empty mgmtd.conf file to router, to start the mgmtd daemon
+        router.load_config(
+            TopoRouter.RD_MGMTD, "{}/{}/mgmtd.conf".format(tgen.logdir, rname)
+        )
+
         # Loading empty zebra.conf file to router, to start the zebra deamon
         router.load_config(
             TopoRouter.RD_ZEBRA, "{}/{}/zebra.conf".format(tgen.logdir, rname)
@@ -2590,7 +2598,7 @@ def create_route_maps(tgen, input_dict, build=False):
                         nexthop = set_data.setdefault("nexthop", None)
                         origin = set_data.setdefault("origin", None)
                         ext_comm_list = set_data.setdefault("extcommunity", {})
-                        metrictype = set_data.setdefault("metric-type", {})
+                        metrictype = set_data.setdefault("metric-type", None)
 
                         # Local Preference
                         if local_preference:
index 16d89f079a720d9182c3055336b2796f91e80b41..41da660b7d027afd1fae978147784cfd072efd47 100644 (file)
@@ -485,7 +485,7 @@ class Topogen(object):
         memleak_file = os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.config.get(
             self.CONFIG_SECTION, "memleak_path"
         )
-        if memleak_file == "" or memleak_file == None:
+        if memleak_file == "" or memleak_file is None:
             return False
         return True
 
@@ -713,6 +713,7 @@ class TopoRouter(TopoGear):
     RD_PATH = 17
     RD_SNMP = 18
     RD_PIM6 = 19
+    RD_MGMTD = 20
     RD = {
         RD_FRR: "frr",
         RD_ZEBRA: "zebra",
@@ -734,6 +735,7 @@ class TopoRouter(TopoGear):
         RD_PBRD: "pbrd",
         RD_PATH: "pathd",
         RD_SNMP: "snmpd",
+        RD_MGMTD: "mgmtd",
     }
 
     def __init__(self, tgen, cls, name, **params):
@@ -810,7 +812,7 @@ class TopoRouter(TopoGear):
         TopoRouter.RD_RIPNG, TopoRouter.RD_OSPF, TopoRouter.RD_OSPF6,
         TopoRouter.RD_ISIS, TopoRouter.RD_BGP, TopoRouter.RD_LDP,
         TopoRouter.RD_PIM, TopoRouter.RD_PIM6, TopoRouter.RD_PBR,
-        TopoRouter.RD_SNMP.
+        TopoRouter.RD_SNMP, TopoRouter.RD_MGMTD.
 
         Possible `source` values are `None` for an empty config file, a path name which is
         used directly, or a file name with no path components which is first looked for
@@ -1017,7 +1019,7 @@ class TopoRouter(TopoGear):
         memleak_file = (
             os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.params["memleak_path"]
         )
-        if memleak_file == "" or memleak_file == None:
+        if memleak_file == "" or memleak_file is None:
             return
 
         self.stop()
@@ -1174,7 +1176,7 @@ class TopoExaBGP(TopoHost):
         self.run("chown -R exabgp:exabgp /etc/exabgp")
 
         output = self.run(exacmd + " -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg")
-        if output == None or len(output) == 0:
+        if output is None or len(output) == 0:
             output = "<none>"
 
         logger.info("{} exabgp started, output={}".format(self.name, output))
@@ -1269,6 +1271,7 @@ def diagnose_env_linux(rundir):
             "pim6d",
             "ldpd",
             "pbrd",
+            "mgmtd",
         ]:
             path = os.path.join(frrdir, fname)
             if not os.path.isfile(path):
@@ -1283,9 +1286,10 @@ def diagnose_env_linux(rundir):
                 logger.error("could not find {} in {}".format(fname, frrdir))
                 ret = False
             else:
-                if fname != "zebra":
+                if fname != "zebra" or fname != "mgmtd":
                     continue
 
+                os.system("{} -v 2>&1 >{}/frr_mgmtd.txt".format(path, rundir))
                 os.system("{} -v 2>&1 >{}/frr_zebra.txt".format(path, rundir))
 
     # Test MPLS availability
index 0cd60b228d0803fd8ddc32d7310ceca88d732e29..d35b908e120ed9851dcefdbdd5c674b0f8465a2c 100644 (file)
@@ -582,6 +582,7 @@ def iproute2_is_vrf_capable():
             pass
     return False
 
+
 def iproute2_is_fdb_get_capable():
     """
     Checks if the iproute2 version installed on the system is capable of
@@ -606,6 +607,7 @@ def iproute2_is_fdb_get_capable():
             pass
     return False
 
+
 def module_present_linux(module, load):
     """
     Returns whether `module` is present.
@@ -1371,6 +1373,7 @@ class Router(Node):
             "pbrd": 0,
             "pathd": 0,
             "snmpd": 0,
+            "mgmtd": 0,
         }
         self.daemons_options = {"zebra": ""}
         self.reportCores = True
@@ -1398,6 +1401,10 @@ class Router(Node):
         if not os.path.isfile(zebra_path):
             raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path))
 
+        mgmtd_path = os.path.join(self.daemondir, "mgmtd")
+        if not os.path.isfile(mgmtd_path):
+            raise Exception("FRR MGMTD binary doesn't exist at {}".format(mgmtd_path))
+
     # pylint: disable=W0221
     # Some params are only meaningful for the parent class.
     def config(self, **params):
@@ -1415,6 +1422,10 @@ class Router(Node):
             zpath = os.path.join(self.daemondir, "zebra")
             if not os.path.isfile(zpath):
                 raise Exception("No zebra binary found in {}".format(zpath))
+
+            cpath = os.path.join(self.daemondir, "mgmtd")
+            if not os.path.isfile(zpath):
+                raise Exception("No MGMTD binary found in {}".format(cpath))
             # Allow user to specify routertype when the path was specified.
             if params.get("routertype") is not None:
                 self.routertype = params.get("routertype")
@@ -1567,6 +1578,10 @@ class Router(Node):
                     self.cmd_raises("rm -f " + conf_file)
                     self.cmd_raises("touch " + conf_file)
             else:
+                # copy zebra.conf to mgmtd folder, which can be used during startup
+                if daemon == "zebra":
+                    conf_file_mgmt = "/etc/{}/{}.conf".format(self.routertype, "mgmtd")
+                    self.cmd_raises("cp {} {}".format(source, conf_file_mgmt))
                 self.cmd_raises("cp {} {}".format(source, conf_file))
 
             if not self.unified_config or daemon == "frr":
@@ -1578,6 +1593,17 @@ class Router(Node):
                 self.cmd('echo "agentXSocket /etc/frr/agentx" >> /etc/snmp/frr.conf')
                 self.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
 
+            if (daemon == "zebra") and (self.daemons["mgmtd"] == 0):
+                # Add mgmtd with zebra - if it exists
+                try:
+                    mgmtd_path = os.path.join(self.daemondir, "mgmtd")
+                except:
+                    pdb.set_trace()
+                if os.path.isfile(mgmtd_path):
+                    self.daemons["mgmtd"] = 1
+                    self.daemons_options["mgmtd"] = ""
+                    # Auto-Started mgmtd has no config, so it will read from zebra config
+
             if (daemon == "zebra") and (self.daemons["staticd"] == 0):
                 # Add staticd with zebra - if it exists
                 try:
@@ -1589,6 +1615,7 @@ class Router(Node):
                     self.daemons["staticd"] = 1
                     self.daemons_options["staticd"] = ""
                     # Auto-Started staticd has no config, so it will read from zebra config
+
         else:
             logger.info("No daemon {} known".format(daemon))
         # print "Daemons after:", self.daemons
@@ -1834,7 +1861,13 @@ class Router(Node):
                 else:
                     logger.info("%s: %s %s started", self, self.routertype, daemon)
 
-        # Start Zebra first
+        # Start mgmtd first
+        if "mgmtd" in daemons_list:
+            start_daemon("mgmtd")
+            while "mgmtd" in daemons_list:
+                daemons_list.remove("mgmtd")
+
+        # Start Zebra after mgmtd
         if "zebra" in daemons_list:
             start_daemon("zebra", "-s 90000000")
             while "zebra" in daemons_list:
diff --git a/tests/topotests/mgmt_tests/test_yang_mgmt.py b/tests/topotests/mgmt_tests/test_yang_mgmt.py
new file mode 100644 (file)
index 0000000..06c18d7
--- /dev/null
@@ -0,0 +1,540 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation,
+# Inc. ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+"""
+
+1. Verify mgmt commit check.
+2. Verify mgmt commit apply.
+3. Verify mgmt commit abort.
+4. Verify mgmt delete config.
+5. Kill mgmtd - verify that static routes are intact.
+6. Kill mgmtd - verify that watch frr restarts.
+7. Show and CLI - Execute all the newly introduced commands of mgmtd.
+8. Verify mgmt rollback functionality.
+
+"""
+import sys
+import time
+import os
+import pytest
+import platform
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topotest import version_cmp
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+    start_topology,
+    write_test_header,
+    write_test_footer,
+    reset_config_on_routers,
+    verify_rib,
+    create_static_routes,
+    check_address_types,
+    step,
+    shutdown_bringup_interface,
+    stop_router,
+    start_router,
+    apply_raw_config,
+    kill_router_daemons,
+    start_router_daemons,
+)
+from lib.topolog import logger
+from lib.bgp import verify_bgp_convergence, create_router_bgp, verify_bgp_rib
+from lib.topojson import build_config_from_json
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+# Global variables
+ADDR_TYPES = check_address_types()
+NETWORK = {"ipv4": ["11.0.20.1/32", "11.0.20.2/32"], "ipv6": ["2::1/128", "2::2/128"]}
+NETWORK2 = {"ipv4": "11.0.20.1/32", "ipv6": "2::1/128"}
+PREFIX1 = {"ipv4": "110.0.20.1/32", "ipv6": "20::1/128"}
+
+
+def setup_module(mod):
+    """
+    Sets up the pytest environment.
+
+    * `mod`: module name
+    """
+
+    testsuite_run_time = time.asctime(time.localtime(time.time()))
+    logger.info("Testsuite start time: {}".format(testsuite_run_time))
+    logger.info("=" * 40)
+
+    logger.info("Running setup_module to create topology")
+
+    # This function initiates the topology build with Topogen...
+    json_file = "{}/yang_mgmt.json".format(CWD)
+    tgen = Topogen(json_file, mod.__name__)
+    global topo
+    topo = tgen.json_topo
+    # ... and here it calls Mininet initialization functions.
+
+    # Starting topology, create tmp files which are loaded to routers
+    #  to start deamons and then start routers
+    start_topology(tgen)
+
+    # Creating configuration from JSON
+    build_config_from_json(tgen, topo)
+
+    if version_cmp(platform.release(), "4.19") < 0:
+        error_msg = (
+            'These tests will not run. (have kernel "{}", '
+            "requires kernel >= 4.19)".format(platform.release())
+        )
+        pytest.skip(error_msg)
+
+    # Don't run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    logger.info("Running setup_module() done")
+
+
+def teardown_module(mod):
+    """
+    Teardown the pytest environment.
+
+    * `mod`: module name
+    """
+
+    logger.info("Running teardown_module to delete topology: %s", mod)
+
+    tgen = get_topogen()
+
+    # Stop toplogy and Remove tmp files
+    tgen.stop_topology()
+
+    logger.info(
+        "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+    )
+    logger.info("=" * 40)
+
+
+def populate_nh():
+    """
+    Populate nexthops.
+    """
+
+    next_hop_ip = {
+        "nh1": {
+            "ipv4": topo["routers"]["r1"]["links"]["r2-link0"]["ipv4"].split("/")[0],
+            "ipv6": topo["routers"]["r1"]["links"]["r2-link0"]["ipv6"].split("/")[0],
+        },
+        "nh2": {
+            "ipv4": topo["routers"]["r1"]["links"]["r2-link1"]["ipv4"].split("/")[0],
+            "ipv6": topo["routers"]["r1"]["links"]["r2-link1"]["ipv6"].split("/")[0],
+        },
+    }
+    return next_hop_ip
+
+
+#####################################################
+#
+#   Testcases
+#
+#####################################################
+
+
+def test_mgmt_commit_check(request):
+    """
+    Verify mgmt commit check.
+
+    """
+    tc_name = request.node.name
+    write_test_header(tc_name)
+    tgen = get_topogen()
+    # Don't run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    reset_config_on_routers(tgen)
+
+    step("Mgmt Commit check")
+    raw_config = {
+        "r1": {
+            "raw_config": [
+                "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.2/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec",
+                "mgmt commit check",
+            ]
+        }
+    }
+
+    result = apply_raw_config(tgen, raw_config)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Mgmt Commit check")
+    raw_config = {
+        "r1": {
+            "raw_config": [
+                "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.2/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec",
+                "mgmt commit check",
+            ]
+        }
+    }
+
+    result = apply_raw_config(tgen, raw_config)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("verify that the route is not configured, as commit apply not done.")
+
+    dut = "r1"
+    protocol = "static"
+    input_dict_4 = {
+        "r2": {
+            "static_routes": [
+                {
+                    "network": "1192.1.1.2/32",
+                    "next_hop": "Null0",
+                }
+            ]
+        }
+    }
+    result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+    assert (
+        result is not True
+    ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+    write_test_footer(tc_name)
+
+
+def test_mgmt_commit_apply(request):
+    """
+    Verify mgmt commit apply.
+
+    """
+    tc_name = request.node.name
+    write_test_header(tc_name)
+    tgen = get_topogen()
+    # Don't run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    reset_config_on_routers(tgen)
+
+    step("Mgmt Commit apply with Valid Configuration")
+    raw_config = {
+        "r1": {
+            "raw_config": [
+                "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.20/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default",
+                "mgmt commit apply",
+            ]
+        }
+    }
+
+    result = apply_raw_config(tgen, raw_config)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Mgmt Commit apply with Invalid Configuration")
+    raw_config = {
+        "r1": {
+            "raw_config": [
+                "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.20/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default",
+                "mgmt commit apply",
+            ]
+        }
+    }
+
+    result = apply_raw_config(tgen, raw_config)
+    assert result is not True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("verify that the route is configured")
+
+    dut = "r1"
+    protocol = "static"
+    input_dict_4 = {"r2": {"static_routes": [{"network": "192.1.1.20/32"}]}}
+    result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+    assert (
+        result is True
+    ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+    write_test_footer(tc_name)
+
+
+def test_mgmt_commit_abort(request):
+    """
+    Verify mgmt commit abort.
+
+    """
+    tc_name = request.node.name
+    write_test_header(tc_name)
+    tgen = get_topogen()
+    # Don't run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    reset_config_on_routers(tgen)
+
+    step("Mgmt Commit abort")
+    raw_config = {
+        "r1": {
+            "raw_config": [
+                "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.1.3/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default",
+                "mgmt commit abort",
+            ]
+        }
+    }
+
+    result = apply_raw_config(tgen, raw_config)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("verify that the route is not configured")
+
+    dut = "r1"
+    protocol = "static"
+    input_dict_4 = {
+        "r2": {
+            "static_routes": [
+                {
+                    "network": "192.1.1.3/32",
+                    "next_hop": "Null0",
+                }
+            ]
+        }
+    }
+    result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+    assert (
+        result is not True
+    ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+    write_test_footer(tc_name)
+
+
+def test_mgmt_delete_config(request):
+    """
+    Verify mgmt delete config.
+
+    """
+    tc_name = request.node.name
+    write_test_header(tc_name)
+    tgen = get_topogen()
+    # Don't run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    reset_config_on_routers(tgen)
+
+    step("Mgmt - Configure a static route using commit apply")
+
+    raw_config = {
+        "r1": {
+            "raw_config": [
+                "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.168.1.3/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/vrf default",
+                "mgmt commit apply",
+            ]
+        }
+    }
+
+    result = apply_raw_config(tgen, raw_config)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Verify that the route is added to RIB")
+    dut = "r1"
+    protocol = "static"
+    input_dict_4 = {
+        "r2": {
+            "static_routes": [
+                {
+                    "network": "192.168.1.3/32",
+                    "next_hop": "Null0",
+                }
+            ]
+        }
+    }
+    result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+    assert (
+        result is True
+    ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+    
+    step("Mgmt delete config")
+    raw_config = {
+        "r1": {
+            "raw_config": [
+                "mgmt delete-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.168.1.3/32'][afi-safi='frr-routing:ipv4-unicast']",
+                "mgmt commit apply",
+            ]
+        }
+    }
+
+    result = apply_raw_config(tgen, raw_config)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("Verify that the route is deleted from RIB")
+    result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+    assert (
+        result is not True
+    ), "Testcase {} : Failed" "Error: Routes is still present in RIB".format(tc_name)
+
+    write_test_footer(tc_name)
+
+
+def test_mgmt_chaos_stop_start_frr(request):
+    """
+    Kill mgmtd - verify that watch frr restarts.
+
+    """
+    tc_name = request.node.name
+    write_test_header(tc_name)
+    tgen = get_topogen()
+    # Don't run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    reset_config_on_routers(tgen)
+    next_hop_ip = populate_nh()
+
+    step("Configure Static route with next hop null 0")
+
+    raw_config = {
+        "r1": {
+            "raw_config": [
+                "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec",
+                "mgmt commit apply",
+            ]
+        }
+    }
+
+    result = apply_raw_config(tgen, raw_config)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("verify that the route is configured and present in the zebra")
+
+    dut = "r1"
+    protocol = "static"
+    input_dict_4 = {"r2": {"static_routes": [{"network": "192.1.11.200/32"}]}}
+    result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+    assert (
+        result is True
+    ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+    step("Restart frr")
+    stop_router(tgen, "r1")
+    start_router(tgen, "r1")
+    step("Verify routes are intact in zebra.")
+    result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+    assert (
+        result is True
+    ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+    step("delete the configured route and ")
+    raw_config = {
+        "r1": {
+            "raw_config": [
+                "mgmt  delete-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][afi-safi='frr-routing:ipv4-unicast']",
+                "mgmt commit apply",
+            ]
+        }
+    }
+
+    result = apply_raw_config(tgen, raw_config)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("verify that the route is deleted and deleted from zebra")
+
+    dut = "r1"
+    protocol = "static"
+    input_dict_4 = {"r1": {"static_routes": [{"network": "192.1.11.200/32"}]}}
+    result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+    assert (
+        result is not True
+    ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
+
+    write_test_footer(tc_name)
+
+
+def test_mgmt_chaos_kill_daemon(request):
+    """
+    Kill mgmtd - verify that static routes are intact
+
+    """
+    tc_name = request.node.name
+    write_test_header(tc_name)
+    tgen = get_topogen()
+    # Don't run this test if we have any failure.
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    reset_config_on_routers(tgen)
+    next_hop_ip = populate_nh()
+
+    step("Configure Static route with next hop null 0")
+
+    raw_config = {
+        "r1": {
+            "raw_config": [
+                "mgmt set-config /frr-routing:routing/control-plane-protocols/control-plane-protocol[type='frr-staticd:staticd'][name='staticd'][vrf='default']/frr-staticd:staticd/route-list[prefix='192.1.11.200/32'][afi-safi='frr-routing:ipv4-unicast']/path-list[table-id='0'][distance='1']/frr-nexthops/nexthop[nh-type='blackhole'][vrf='default'][gateway=''][interface='(null)']/bh-type unspec",
+                "mgmt commit apply",
+            ]
+        }
+    }
+
+    result = apply_raw_config(tgen, raw_config)
+    assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+    step("verify that the route is configured and present in the zebra")
+
+    dut = "r1"
+    protocol = "static"
+    input_dict_4 = {"r2": {"static_routes": [{"network": "192.1.11.200/32"}]}}
+    result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+    assert (
+        result is True
+    ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+    step("Kill static daemon on R2.")
+    kill_router_daemons(tgen, "r1", ["staticd"])
+
+    step("Bring up staticd daemon on R2.")
+    start_router_daemons(tgen, "r1", ["staticd"])
+
+    step("Verify routes are intact in zebra.")
+    result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+    assert (
+        result is True
+    ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+    step("Kill mgmt daemon on R2.")
+    kill_router_daemons(tgen, "r1", ["mgmtd"])
+
+    step("Bring up zebra daemon on R2.")
+    start_router_daemons(tgen, "r1", ["mgmtd"])
+
+    step("Verify routes are intact in zebra.")
+    result = verify_rib(tgen, "ipv4", dut, input_dict_4, protocol=protocol)
+    assert (
+        result is True
+    ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
+
+    write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+    args = ["-s"] + sys.argv[1:]
+    sys.exit(pytest.main(args))
diff --git a/tests/topotests/mgmt_tests/yang_mgmt.json b/tests/topotests/mgmt_tests/yang_mgmt.json
new file mode 100644 (file)
index 0000000..0fe3bb1
--- /dev/null
@@ -0,0 +1,157 @@
+{
+    "address_types": [
+        "ipv4",
+        "ipv6"
+    ],
+    "ipv4base": "10.0.0.0",
+    "ipv4mask": 30,
+    "ipv6base": "fd00::",
+    "ipv6mask": 64,
+    "link_ip_start": {
+        "ipv4": "10.0.0.0",
+        "v4mask": 30,
+        "ipv6": "fd00::",
+        "v6mask": 64
+    },
+    "lo_prefix": {
+        "ipv4": "1.0.",
+        "v4mask": 32,
+        "ipv6": "2001:db8:f::",
+        "v6mask": 128
+    },
+    "routers": {
+        "r1": {
+            "links": {
+                "lo": {
+                    "ipv4": "auto",
+                    "ipv6": "auto",
+                    "type": "loopback"
+                },
+                "r2-link0": {
+                    "ipv4": "auto",
+                    "ipv6": "auto"
+                },
+                "r2-link1": {
+                    "ipv4": "auto",
+                    "ipv6": "auto"
+                },
+                "r3-link0": {
+                    "ipv4": "auto",
+                    "ipv6": "auto"
+                },
+                "r3-link1": {
+                    "ipv4": "auto",
+                    "ipv6": "auto"
+                }
+            }
+        },
+        "r2": {
+            "links": {
+                "lo": {
+                    "ipv4": "auto",
+                    "ipv6": "auto",
+                    "type": "loopback"
+                },
+                "r1-link0": {
+                    "ipv4": "auto",
+                    "ipv6": "auto"
+                },
+                "r1-link1": {
+                    "ipv4": "auto",
+                    "ipv6": "auto"
+                },
+                "r3": {
+                    "ipv4": "auto",
+                    "ipv6": "auto"
+                }
+            },
+            "bgp": {
+                "local_as": "100",
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "neighbor": {
+                                "r3": {
+                                    "dest_link": {
+                                        "r2": {
+                                            "keepalivetimer": 1,
+                                            "holddowntimer": 4
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "neighbor": {
+                                "r3": {
+                                    "dest_link": {
+                                        "r2": {
+                                            "keepalivetimer": 1,
+                                            "holddowntimer": 4
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        },
+        "r3": {
+            "links": {
+                "lo": {
+                    "ipv4": "auto",
+                    "ipv6": "auto",
+                    "type": "loopback"
+                },
+                "r2": {
+                    "ipv4": "auto",
+                    "ipv6": "auto"
+                },
+                "r1-link0": {
+                    "ipv4": "auto",
+                    "ipv6": "auto"
+                },
+                "r1-link1": {
+                    "ipv4": "auto",
+                    "ipv6": "auto"
+                }
+            },
+            "bgp": {
+                "local_as": "200",
+                "address_family": {
+                    "ipv4": {
+                        "unicast": {
+                            "neighbor": {
+                                "r2": {
+                                    "dest_link": {
+                                        "r3": {
+                                            "keepalivetimer": 1,
+                                            "holddowntimer": 4
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    },
+                    "ipv6": {
+                        "unicast": {
+                            "neighbor": {
+                                "r2": {
+                                    "dest_link": {
+                                        "r3": {
+                                            "keepalivetimer": 1,
+                                            "holddowntimer": 4
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
index 2f31608e64223825f88830dfa5c377485afa3f02..6d9304d864ede82c890be67b33097502afc62933 100644 (file)
@@ -71,6 +71,7 @@ def setup_module(mod):
 
     router_list = tgen.routers()
     for rname, router in router_list.items():
+
         daemon_file = "{}/{}/zebra.conf".format(CWD, rname)
         if os.path.isfile(daemon_file):
             router.load_config(TopoRouter.RD_ZEBRA, daemon_file)
index 4a007e7d2074a2c7d92de730b45f4be3f23c5846..1af58b0a010fe48fc5dbb287237020772207c662 100755 (executable)
@@ -82,6 +82,7 @@ def setup_module(mod):
 
     router_list = tgen.routers()
     for rname, router in router_list.items():
+
         daemon_file = "{}/{}/zebra.conf".format(CWD, rname)
         if os.path.isfile(daemon_file):
             router.load_config(TopoRouter.RD_ZEBRA, daemon_file)
index 9ec0266aa0f48217c5c5d534a174754d400b4b60..f95f7bbe5e8b440b06494231b5e11b20fc0cc8d0 100644 (file)
@@ -129,6 +129,7 @@ def setup_module(mod):
 
     router_list = tgen.routers()
     for rname, router in router_list.items():
+
         daemon_file = "{}/{}/zebra.conf".format(CWD, rname)
         if os.path.isfile(daemon_file):
             router.load_config(TopoRouter.RD_ZEBRA, daemon_file)
diff --git a/tests/topotests/static_simple/r1/mgmtd.conf b/tests/topotests/static_simple/r1/mgmtd.conf
new file mode 100644 (file)
index 0000000..0f9f97c
--- /dev/null
@@ -0,0 +1 @@
+log timestamp precision 3
diff --git a/tests/topotests/static_simple/r1/staticd.conf b/tests/topotests/static_simple/r1/staticd.conf
new file mode 100644 (file)
index 0000000..0f9f97c
--- /dev/null
@@ -0,0 +1 @@
+log timestamp precision 3
diff --git a/tests/topotests/static_simple/r1/zebra.conf b/tests/topotests/static_simple/r1/zebra.conf
new file mode 100644 (file)
index 0000000..ec82761
--- /dev/null
@@ -0,0 +1,11 @@
+log timestamp precision 3
+
+interface r1-eth0
+  ip address 101.0.0.1/24
+  ipv6 address 2101::1/64
+exit
+
+interface r1-eth1 vrf red
+  ip address 102.0.0.1/24
+  ipv6 address 2102::1/64
+exit
diff --git a/tests/topotests/static_simple/test_static_simple.py b/tests/topotests/static_simple/test_static_simple.py
new file mode 100644 (file)
index 0000000..817336a
--- /dev/null
@@ -0,0 +1,214 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C.
+# Copyright (c) 2019-2020 by
+# Donatas Abraitis <donatas.abraitis@gmail.com>
+#
+"""
+Test static route functionality
+"""
+
+import datetime
+import ipaddress
+import math
+import os
+import sys
+import re
+
+import pytest
+from lib.topogen import TopoRouter, Topogen, get_topogen
+from lib.topolog import logger
+from lib.common_config import retry, step
+
+pytestmark = [pytest.mark.staticd]
+
+
+@pytest.fixture(scope="module")
+def tgen(request):
+    "Setup/Teardown the environment and provide tgen argument to tests"
+
+    topodef = {"s1": ("r1",), "s2": ("r1",)}
+
+    tgen = Topogen(topodef, request.module.__name__)
+    tgen.start_topology()
+
+    router_list = tgen.routers()
+    for rname, router in router_list.items():
+        # Setup VRF red
+        router.net.add_l3vrf("red", 10)
+        router.net.add_loop("lo-red")
+        router.net.attach_iface_to_l3vrf("lo-red", "red")
+        router.net.attach_iface_to_l3vrf(rname + "-eth1", "red")
+        # and select daemons to run
+        router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
+        router.load_config(TopoRouter.RD_MGMTD)
+        router.load_config(TopoRouter.RD_STATIC)
+
+    tgen.start_router()
+    yield tgen
+    tgen.stop_topology()
+
+
+def get_ip_networks(super_prefix, count):
+    count_log2 = math.log(count, 2)
+    if count_log2 != int(count_log2):
+        count_log2 = int(count_log2) + 1
+    else:
+        count_log2 = int(count_log2)
+    network = ipaddress.ip_network(super_prefix)
+    return tuple(network.subnets(count_log2))[0:count]
+
+
+def enable_debug(router):
+    router.vtysh_cmd("debug northbound callbacks configuration")
+
+
+def disable_debug(router):
+    router.vtysh_cmd("no debug northbound callbacks configuration")
+
+
+@retry(retry_timeout=3, initial_wait=0.1)
+def check_kernel(r1, super_prefix, count, add, is_blackhole, vrf, matchvia):
+    network = ipaddress.ip_network(super_prefix)
+    vrfstr = f" vrf {vrf}" if vrf else ""
+    if network.version == 6:
+        kernel = r1.run(f"ip -6 route show{vrfstr}")
+    else:
+        kernel = r1.run(f"ip -4 route show{vrfstr}")
+
+    logger.debug("checking kernel routing table%s:\n%s", vrfstr, kernel)
+    for i, net in enumerate(get_ip_networks(super_prefix, count)):
+        if not add:
+            assert str(net) not in kernel
+            continue
+
+        if is_blackhole:
+            route = f"blackhole {str(net)} proto (static|196) metric 20"
+        else:
+            route = (
+                f"{str(net)}(?: nhid [0-9]+)? {matchvia} "
+                "proto (static|196) metric 20"
+            )
+        assert re.search(route, kernel), f"Failed to find \n'{route}'\n in \n'{kernel}'"
+
+
+def do_config(
+    r1,
+    count,
+    add=True,
+    do_ipv6=False,
+    via=None,
+    vrf=None,
+    use_cli=False,
+):
+    optype = "adding" if add else "removing"
+    iptype = "IPv6" if do_ipv6 else "IPv4"
+
+    #
+    # Set the route details
+    #
+
+    if vrf:
+        super_prefix = "2002::/48" if do_ipv6 else "20.0.0.0/8"
+    else:
+        super_prefix = "2001::/48" if do_ipv6 else "10.0.0.0/8"
+
+    matchtype = ""
+    matchvia = ""
+    if via == "blackhole":
+        pass
+    elif via:
+        matchvia = f"dev {via}"
+    else:
+        if vrf:
+            via = "2102::2" if do_ipv6 else "102.0.0.2"
+            matchvia = f"via {via} dev r1-eth1"
+        else:
+            via = "2101::2" if do_ipv6 else "101.0.0.2"
+            matchvia = f"via {via} dev r1-eth0"
+
+    vrfdbg = " in vrf {}".format(vrf) if vrf else ""
+    logger.debug("{} {} static {} routes{}".format(optype, count, iptype, vrfdbg))
+
+    #
+    # Generate config file in a retrievable place
+    #
+
+    config_file = os.path.join(
+        r1.logdir, r1.name, "{}-routes-{}.conf".format(iptype.lower(), optype)
+    )
+    with open(config_file, "w") as f:
+        if use_cli:
+            f.write("configure terminal\n")
+        if vrf:
+            f.write("vrf {}\n".format(vrf))
+
+        for i, net in enumerate(get_ip_networks(super_prefix, count)):
+            if add:
+                f.write("ip route {} {}\n".format(net, via))
+            else:
+                f.write("no ip route {} {}\n".format(net, via))
+
+    #
+    # Load config file.
+    #
+
+    if use_cli:
+        load_command = 'vtysh < "{}"'.format(config_file)
+    else:
+        load_command = 'vtysh -f "{}"'.format(config_file)
+    tstamp = datetime.datetime.now()
+    output = r1.cmd_raises(load_command)
+    delta = (datetime.datetime.now() - tstamp).total_seconds()
+
+    #
+    # Verify the results are in the kernel
+    #
+    check_kernel(r1, super_prefix, count, add, via == "blackhole", vrf, matchvia)
+
+    optyped = "added" if add else "removed"
+    logger.debug(
+        "{} {} {} static routes under {}{} in {}s".format(
+            optyped, count, iptype.lower(), super_prefix, vrfdbg, delta
+        )
+    )
+
+
+def guts(tgen, vrf, use_cli):
+    if tgen.routers_have_failure():
+        pytest.skip(tgen.errors)
+
+    r1 = tgen.routers()["r1"]
+
+    step("add via gateway", reset=True)
+    do_config(r1, 1, True, False, vrf=vrf, use_cli=use_cli)
+    step("remove via gateway")
+    do_config(r1, 1, False, False, vrf=vrf, use_cli=use_cli)
+
+    via = f"lo-{vrf}" if vrf else "lo"
+    step("add via loopback")
+    do_config(r1, 1, True, False, via=via, vrf=vrf, use_cli=use_cli)
+    step("remove via loopback")
+    do_config(r1, 1, False, False, via=via, vrf=vrf, use_cli=use_cli)
+
+    step("add via blackhole")
+    do_config(r1, 1, True, False, via="blackhole", vrf=vrf, use_cli=use_cli)
+    step("remove via blackhole")
+    do_config(r1, 1, False, False, via="blackhole", vrf=vrf, use_cli=use_cli)
+
+
+def test_static_cli(tgen):
+    guts(tgen, "", True)
+
+
+def test_static_file(tgen):
+    guts(tgen, "", False)
+
+
+def test_static_vrf_cli(tgen):
+    guts(tgen, "red", True)
+
+
+def test_static_vrf_file(tgen):
+    guts(tgen, "red", False)
index 2427bfff7771fc343f242998f5006e215f5c0084..c487e7e5f28c0dd552d99902c50f5dcd92e28989 100644 (file)
@@ -40,6 +40,7 @@ pathd=no
 #
 vtysh_enable=yes
 zebra_options="  -A 127.0.0.1 -s 90000000"
+mgmtd_options="  -A 127.0.0.1"
 bgpd_options="   -A 127.0.0.1"
 ospfd_options="  -A 127.0.0.1"
 ospf6d_options=" -A ::1"
index 1ffdade54f21126891adc9721868a9f8c311b064..c9d48d0279fdab6dc35857e22061372e5bb2f58d 100755 (executable)
@@ -27,7 +27,7 @@ FRR_DEFAULT_PROFILE="@DFLT_NAME@" # traditional / datacenter
 # Local Daemon selection may be done by using /etc/frr/daemons.
 # See /usr/share/doc/frr/README.Debian.gz for further information.
 # Keep zebra first and do not list watchfrr!
-DAEMONS="zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd pim6d ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd"
+DAEMONS="mgmtd zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd pim6d ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd"
 MAX_INSTANCES=5
 RELOAD_SCRIPT="$D_PATH/frr-reload.py"
 
index f1db3a73d528dd90624c251f1ed28ab8e8c5cb27..e26c2947146c1d106929f6292c3f4486ebff9927 100755 (executable)
@@ -35,7 +35,7 @@ FRR_DEFAULT_PROFILE="@DFLT_NAME@" # traditional / datacenter
 # - keep zebra first
 # - watchfrr does NOT belong in this list
 
-DAEMONS="zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd pim6d ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd"
+DAEMONS="zebra mgmtd bgpd ripd ripngd ospfd ospf6d isisd babeld pimd pim6d ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd pathd"
 RELOAD_SCRIPT="$D_PATH/frr-reload.py"
 
 #
@@ -99,7 +99,7 @@ daemon_list() {
        for daemon in $DAEMONS; do
                eval cfg=\$$daemon
                eval inst=\$${daemon}_instances
-               [ "$daemon" = zebra -o "$daemon" = staticd ] && cfg=yes
+               [ "$daemon" = zebra -o "$daemon" = staticd -o "$daemon" = mgmtd ] && cfg=yes
                if [ -n "$cfg" -a "$cfg" != "no" -a "$cfg" != "0" ]; then
                        if ! daemon_prep "$daemon" "$inst"; then
                                continue
index 200427fb6e1d48aa6bffb84d90a1fa5cef3fb03e..50970f26c91ba609da906028e3759ab9c0cce0b3 100644 (file)
@@ -120,6 +120,7 @@ static void vtysh_pager_envdef(bool fallback)
 /* --- */
 
 struct vtysh_client vtysh_client[] = {
+       {.name = "mgmtd", .flag = VTYSH_MGMTD},
        {.name = "zebra", .flag = VTYSH_ZEBRA},
        {.name = "ripd", .flag = VTYSH_RIPD},
        {.name = "ripngd", .flag = VTYSH_RIPNGD},
index 538837391b86bf8ab4ee7df9c79cc53ebb4f0bc0..2c202c3536da0194f7cbcede44763c4bdd0a63ca 100644 (file)
@@ -34,6 +34,7 @@ extern struct thread_master *master;
 #define VTYSH_VRRPD     0x40000
 #define VTYSH_PATHD     0x80000
 #define VTYSH_PIM6D     0x100000
+#define VTYSH_MGMTD 0x200000
 
 #define VTYSH_WAS_ACTIVE (-2)
 
@@ -42,7 +43,12 @@ extern struct thread_master *master;
 /* watchfrr is not in ALL since library CLI functions should not be
  * run on it (logging & co. should stay in a fixed/frozen config, and
  * things like prefix lists are not even initialised) */
-#define VTYSH_ALL        VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_LDPD|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_NHRPD|VTYSH_EIGRPD|VTYSH_BABELD|VTYSH_SHARPD|VTYSH_PBRD|VTYSH_STATICD|VTYSH_BFDD|VTYSH_FABRICD|VTYSH_VRRPD|VTYSH_PATHD
+#define VTYSH_ALL                                                              \
+       VTYSH_ZEBRA | VTYSH_RIPD | VTYSH_RIPNGD | VTYSH_OSPFD | VTYSH_OSPF6D | \
+               VTYSH_LDPD | VTYSH_BGPD | VTYSH_ISISD | VTYSH_PIMD |           \
+               VTYSH_PIM6D | VTYSH_NHRPD | VTYSH_EIGRPD | VTYSH_BABELD |      \
+               VTYSH_SHARPD | VTYSH_PBRD | VTYSH_STATICD | VTYSH_BFDD |       \
+               VTYSH_FABRICD | VTYSH_VRRPD | VTYSH_PATHD | VTYSH_MGMTD
 #define VTYSH_ACL         VTYSH_BFDD|VTYSH_BABELD|VTYSH_BGPD|VTYSH_EIGRPD|VTYSH_ISISD|VTYSH_FABRICD|VTYSH_LDPD|VTYSH_NHRPD|VTYSH_OSPF6D|VTYSH_OSPFD|VTYSH_PBRD|VTYSH_PIMD|VTYSH_PIM6D|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_VRRPD|VTYSH_ZEBRA
 #define VTYSH_AFFMAP VTYSH_ZEBRA | VTYSH_ISISD
 #define VTYSH_RMAP       VTYSH_ZEBRA|VTYSH_RIPD|VTYSH_RIPNGD|VTYSH_OSPFD|VTYSH_OSPF6D|VTYSH_BGPD|VTYSH_ISISD|VTYSH_PIMD|VTYSH_EIGRPD|VTYSH_FABRICD
@@ -52,7 +58,7 @@ extern struct thread_master *master;
                VTYSH_EIGRPD | VTYSH_BABELD | VTYSH_PBRD | VTYSH_FABRICD |     \
                VTYSH_VRRPD
 #define VTYSH_INTERFACE VTYSH_INTERFACE_SUBSET | VTYSH_BGPD
-#define VTYSH_VRF VTYSH_INTERFACE_SUBSET | VTYSH_STATICD
+#define VTYSH_VRF VTYSH_INTERFACE_SUBSET | VTYSH_STATICD | VTYSH_MGMTD
 #define VTYSH_KEYS VTYSH_RIPD | VTYSH_EIGRPD | VTYSH_OSPF6D
 /* Daemons who can process nexthop-group configs */
 #define VTYSH_NH_GROUP    VTYSH_PBRD|VTYSH_SHARPD
index 8e288194eceddafc06aec513867525bcf986f537..666f2bb23561e73adf1dd477d5367e860964769b 100644 (file)
@@ -474,6 +474,27 @@ module frr-bgp-route-map {
       "ext-community link bandwidth types.";
   }
 
+  typedef asn-type {
+    type union {
+      type uint32 {
+        range "1..4294967295";
+      }
+      type string {
+        pattern '(0|[1-9][0-9]{0,3}|[1-5][0-9]{4}|'
+        +  '6[0-4][0-9]{3}|65[0-4][0-9]{2}|'
+        +  '655[0-2][0-9]|6553[0-5])\.'
+        +  '(0|[1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|'
+        +  '65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])' {
+          error-message "AS dot should be in the form [0..65535].[0..65535].";
+        }
+        pattern '^0\.0$' {
+          modifier "invert-match";
+          error-message "AS dot can't be equal to 0.0.";
+        }
+      }
+    }
+  }
+
   augment "/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:match-condition/frr-route-map:rmap-match-condition/frr-route-map:match-condition" {
     case local-preference {
       when "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:match-condition/frr-route-map:condition, 'frr-bgp-route-map:match-local-preference')";
@@ -739,7 +760,7 @@ module frr-bgp-route-map {
       when "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action, 'frr-bgp-route-map:distance')";
       leaf distance {
         type uint8 {
-          range "0..255";
+          range "1..255";
         }
       }
     }
@@ -1004,16 +1025,12 @@ module frr-bgp-route-map {
       when "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action, 'frr-bgp-route-map:aggregator')";
       container aggregator {
         leaf aggregator-asn {
-          type uint32 {
-            range "1..4294967295";
-          }
+          type asn-type;
           description
             "ASN of the aggregator";
         }
 
         leaf aggregator-address {
-          when "../aggregator-asn > 0 or "
-             + "../aggregator-asn <= 4294967295";
           type inet:ipv4-address;
           description
             "IPv4 address of the aggregator";
index 19f1839ac7c4052791400046c17d05b2310b6352..153a5b8d8fbadd504d3cb2e29fbaf7cd03b3e314 100644 (file)
@@ -3819,6 +3819,8 @@ void zebra_vxlan_print_evpn(struct vty *vty, bool uj)
                json_object_int_add(json, "detectionTime", zvrf->dad_time);
                json_object_int_add(json, "detectionFreezeTime",
                                    zvrf->dad_freeze_time);
+               json_object_boolean_add(json, "isDetectionFreeze",
+                                       zvrf->dad_freeze);
                zebra_evpn_mh_json(json);
        } else {
                vty_out(vty, "L2 VNIs: %u\n", num_l2vnis);